summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2013-11-25 15:30:10 -0800
committerTony Lindgren <tony@atomide.com>2013-11-25 15:30:10 -0800
commitf19f8d8ebc0b392c01082e0feb44ec152cc47a30 (patch)
treef45f5391e97e29d5ef0003fc90f34bf0e18ba20f /drivers
parentf984370913d3ba5d13806cc8ac6fc26f8ebd1694 (diff)
parent6310f3a9362e8fa2e29c8f84aa603b5f86e98442 (diff)
Merge branch 'omap-for-v3.13/fixes-take4' into omap-for-v3.14/board-removal
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig52
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c257
-rw-r--r--drivers/acpi/acpi_extlog.c327
-rw-r--r--drivers/acpi/acpi_ipmi.c580
-rw-r--r--drivers/acpi/acpi_lpss.c21
-rw-r--r--drivers/acpi/acpi_memhotplug.c7
-rw-r--r--drivers/acpi/acpi_platform.c9
-rw-r--r--drivers/acpi/acpi_processor.c28
-rw-r--r--drivers/acpi/acpica/acdebug.h8
-rw-r--r--drivers/acpi/acpica/acevents.h9
-rw-r--r--drivers/acpi/acpica/acglobal.h20
-rw-r--r--drivers/acpi/acpica/aclocal.h11
-rw-r--r--drivers/acpi/acpica/acmacros.h31
-rw-r--r--drivers/acpi/acpica/acnamesp.h6
-rw-r--r--drivers/acpi/acpica/acutils.h17
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c5
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c10
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c3
-rw-r--r--drivers/acpi/acpica/evgpeblk.c6
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c4
-rw-r--r--drivers/acpi/acpica/evhandler.c4
-rw-r--r--drivers/acpi/acpica/evmisc.c14
-rw-r--r--drivers/acpi/acpica/evregion.c29
-rw-r--r--drivers/acpi/acpica/evsci.c79
-rw-r--r--drivers/acpi/acpica/evxface.c148
-rw-r--r--drivers/acpi/acpica/evxfevnt.c3
-rw-r--r--drivers/acpi/acpica/evxfgpe.c9
-rw-r--r--drivers/acpi/acpica/evxfregn.c7
-rw-r--r--drivers/acpi/acpica/excreate.c8
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c8
-rw-r--r--drivers/acpi/acpica/exmisc.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c8
-rw-r--r--drivers/acpi/acpica/exoparg2.c10
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exregion.c1
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c3
-rw-r--r--drivers/acpi/acpica/hwxface.c43
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c7
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsdump.c143
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c7
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c4
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c3
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c23
-rw-r--r--drivers/acpi/acpica/nsxfname.c7
-rw-r--r--drivers/acpi/acpica/nsxfobj.c7
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psxface.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c3
-rw-r--r--drivers/acpi/acpica/tbinstal.c18
-rw-r--r--drivers/acpi/acpica/tbprint.c18
-rw-r--r--drivers/acpi/acpica/tbutils.c5
-rw-r--r--drivers/acpi/acpica/tbxface.c16
-rw-r--r--drivers/acpi/acpica/tbxfload.c11
-rw-r--r--drivers/acpi/acpica/tbxfroot.c5
-rw-r--r--drivers/acpi/acpica/utalloc.c117
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c4
-rw-r--r--drivers/acpi/acpica/utdebug.c5
-rw-r--r--drivers/acpi/acpica/utdecode.c1
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c3
-rw-r--r--drivers/acpi/acpica/utglobal.c20
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utobject.c28
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c4
-rw-r--r--drivers/acpi/acpica/utstate.c1
-rw-r--r--drivers/acpi/acpica/utstring.c66
-rw-r--r--drivers/acpi/acpica/uttrack.c31
-rw-r--r--drivers/acpi/acpica/utxface.c45
-rw-r--r--drivers/acpi/acpica/utxferror.c3
-rw-r--r--drivers/acpi/acpica/utxfinit.c18
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/apei/Makefile2
-rw-r--r--drivers/acpi/apei/apei-base.c6
-rw-r--r--drivers/acpi/apei/apei-internal.h12
-rw-r--r--drivers/acpi/apei/ghes.c58
-rw-r--r--drivers/acpi/battery.c328
-rw-r--r--drivers/acpi/blacklist.c98
-rw-r--r--drivers/acpi/bus.c21
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/device_pm.c22
-rw-r--r--drivers/acpi/dock.c31
-rw-r--r--drivers/acpi/ec.c52
-rw-r--r--drivers/acpi/event.c30
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/glue.c53
-rw-r--r--drivers/acpi/internal.h10
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c144
-rw-r--r--drivers/acpi/pci_root.c298
-rw-r--r--drivers/acpi/proc.c305
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c4
-rw-r--r--drivers/acpi/processor_idle.c61
-rw-r--r--drivers/acpi/processor_perflib.c22
-rw-r--r--drivers/acpi/sbs.c325
-rw-r--r--drivers/acpi/scan.c178
-rw-r--r--drivers/acpi/sysfs.c18
-rw-r--r--drivers/acpi/thermal.c53
-rw-r--r--drivers/acpi/utils.c21
-rw-r--r--drivers/acpi/video.c538
-rw-r--r--drivers/acpi/video_detect.c12
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_imx.c101
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ata_piix.c19
-rw-r--r--drivers/ata/libahci.c27
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-transport.c16
-rw-r--r--drivers/ata/pata_arasan_cf.c3
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_highbank.c8
-rw-r--r--drivers/ata/sata_rcar.c10
-rw-r--r--drivers/atm/firestream.h1
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c3
-rw-r--r--drivers/base/bus.c82
-rw-r--r--drivers/base/class.c29
-rw-r--r--drivers/base/core.c88
-rw-r--r--drivers/base/cpu.c39
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devres.c31
-rw-r--r--drivers/base/devtmpfs.c6
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--drivers/base/firmware_class.c38
-rw-r--r--drivers/base/platform.c21
-rw-r--r--drivers/base/power/main.c80
-rw-r--r--drivers/base/power/opp.c115
-rw-r--r--drivers/base/power/runtime.c5
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache.c19
-rw-r--r--drivers/base/regmap/regmap-debugfs.c57
-rw-r--r--drivers/base/regmap/regmap-irq.c16
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap-spmi.c90
-rw-r--r--drivers/base/regmap/regmap.c366
-rw-r--r--drivers/bcma/host_pci.c8
-rw-r--r--drivers/bcma/main.c23
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/drbd/drbd_main.c19
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c45
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c21
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c500
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h18
-rw-r--r--drivers/block/null_blk.c635
-rw-r--r--drivers/block/nvme-core.c10
-rw-r--r--drivers/block/pktcdvd.c22
-rw-r--r--drivers/block/rsxx/core.c8
-rw-r--r--drivers/block/rsxx/dev.c8
-rw-r--r--drivers/block/rsxx/dma.c119
-rw-r--r--drivers/block/rsxx/rsxx_priv.h11
-rw-r--r--drivers/block/skd_main.c5432
-rw-r--r--drivers/block/skd_s1120.h330
-rw-r--r--drivers/block/virtio_blk.c404
-rw-r--r--drivers/block/xen-blkback/blkback.c3
-rw-r--r--drivers/block/xen-blkfront.c212
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/bfusb.c31
-rw-r--r--drivers/bluetooth/bluecard_cs.c30
-rw-r--r--drivers/bluetooth/bpa10x.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c30
-rw-r--r--drivers/bluetooth/btmrvl_drv.h12
-rw-r--r--drivers/bluetooth/btmrvl_main.c295
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c23
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h2
-rw-r--r--drivers/bluetooth/btsdio.c6
-rw-r--r--drivers/bluetooth/btuart_cs.c30
-rw-r--r--drivers/bluetooth/btusb.c22
-rw-r--r--drivers/bluetooth/btwilink.c9
-rw-r--r--drivers/bluetooth/dtl1_cs.c30
-rw-r--r--drivers/bluetooth/hci_bcsp.c5
-rw-r--r--drivers/bluetooth/hci_h4.c24
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c12
-rw-r--r--drivers/bluetooth/hci_ll.c14
-rw-r--r--drivers/bluetooth/hci_vhci.c179
-rw-r--r--drivers/bus/arm-cci.c623
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/hpet.c29
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c1
-rw-r--r--drivers/char/hw_random/powernv-rng.c81
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c1
-rw-r--r--drivers/char/hw_random/pseries-rng.c14
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c4
-rw-r--r--drivers/char/misc.c20
-rw-r--r--drivers/char/nwbutton.c2
-rw-r--r--drivers/char/random.c646
-rw-r--r--drivers/char/rtc.c5
-rw-r--r--drivers/char/snsc.c3
-rw-r--r--drivers/char/snsc_event.c3
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/tpm/Kconfig37
-rw-r--r--drivers/char/tpm/Makefile11
-rw-r--r--drivers/char/tpm/tpm-interface.c (renamed from drivers/char/tpm/tpm.c)138
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c284
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c710
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c12
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c6
-rw-r--r--drivers/char/tpm/tpm_ppi.c4
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/char/virtio_console.c25
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c3
-rw-r--r--drivers/clk/Kconfig14
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/clk-bcm2835.c8
-rw-r--r--drivers/clk/clk-efm32gg.c81
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-highbank.c10
-rw-r--r--drivers/clk/clk-nomadik.c187
-rw-r--r--drivers/clk/clk-ppc-corenet.c1
-rw-r--r--drivers/clk/clk-prima2.c29
-rw-r--r--drivers/clk/clk-vt8500.c34
-rw-r--r--drivers/clk/clk-wm831x.c6
-rw-r--r--drivers/clk/clk-xgene.c521
-rw-r--r--drivers/clk/clk.c56
-rw-r--r--drivers/clk/keystone/Makefile1
-rw-r--r--drivers/clk/keystone/gate.c264
-rw-r--r--drivers/clk/keystone/pll.c305
-rw-r--r--drivers/clk/mxs/clk-imx23.c15
-rw-r--r--drivers/clk/mxs/clk-imx28.c16
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c11
-rw-r--r--drivers/clk/ux500/Makefile1
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c559
-rw-r--r--drivers/clk/ux500/u8540_clk.c2
-rw-r--r--drivers/clk/zynq/clkc.c16
-rw-r--r--drivers/clocksource/Kconfig24
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c55
-rw-r--r--drivers/clocksource/arm_global_timer.c3
-rw-r--r--drivers/clocksource/bcm2835_timer.c4
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c5
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c16
-rw-r--r--drivers/clocksource/em_sti.c4
-rw-r--r--drivers/clocksource/mxs_timer.c4
-rw-r--r--drivers/clocksource/nomadik-mtu.c4
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c4
-rw-r--r--drivers/clocksource/sun4i_timer.c12
-rw-r--r--drivers/clocksource/tcb_clksrc.c61
-rw-r--r--drivers/clocksource/tegra20_timer.c8
-rw-r--r--drivers/clocksource/time-armada-370-xp.c4
-rw-r--r--drivers/clocksource/time-efm32.c275
-rw-r--r--drivers/clocksource/timer-prima2.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c4
-rw-r--r--drivers/clocksource/vt8500_timer.c2
-rw-r--r--drivers/connector/cn_proc.c72
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Kconfig.arm19
-rw-r--r--drivers/cpufreq/Kconfig.powerpc6
-rw-r--r--drivers/cpufreq/Kconfig.x8615
-rw-r--r--drivers/cpufreq/Makefile6
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c52
-rw-r--r--drivers/cpufreq/arm_big_little.c453
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c2
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c106
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c54
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c119
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c5
-rw-r--r--drivers/cpufreq/cpufreq.c322
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.c4
-rw-r--r--drivers/cpufreq/cpufreq_governor.h5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c64
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c61
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c77
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c78
-rw-r--r--drivers/cpufreq/e_powersaver.c59
-rw-r--r--drivers/cpufreq/elanfreq.c88
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c87
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c67
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c69
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c67
-rw-r--r--drivers/cpufreq/freq_table.c59
-rw-r--r--drivers/cpufreq/gx-suspmod.c5
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c3
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c71
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c117
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c75
-rw-r--r--drivers/cpufreq/intel_pstate.c255
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c107
-rw-r--r--drivers/cpufreq/longhaul.c45
-rw-r--r--drivers/cpufreq/longrun.c4
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c57
-rw-r--r--drivers/cpufreq/maple-cpufreq.c56
-rw-r--r--drivers/cpufreq/omap-cpufreq.c144
-rw-r--r--drivers/cpufreq/p4-clockmod.c53
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c52
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c15
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c53
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c70
-rw-r--r--drivers/cpufreq/powernow-k6.c67
-rw-r--r--drivers/cpufreq/powernow-k7.c42
-rw-r--r--drivers/cpufreq/powernow-k8.c52
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c54
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c50
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c70
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c46
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c67
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c27
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c81
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c86
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c49
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c46
-rw-r--r--drivers/cpufreq/sc520_freq.c64
-rw-r--r--drivers/cpufreq/sh-cpufreq.c22
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c42
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c44
-rw-r--r--drivers/cpufreq/spear-cpufreq.c64
-rw-r--r--drivers/cpufreq/speedstep-centrino.c84
-rw-r--r--drivers/cpufreq/speedstep-ich.c85
-rw-r--r--drivers/cpufreq/speedstep-smi.c76
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c70
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c5
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c70
-rw-r--r--drivers/cpuidle/Kconfig.arm27
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-at91.c69
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c61
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c2
-rw-r--r--drivers/cpuidle/cpuidle-zynq.c17
-rw-r--r--drivers/cpuidle/cpuidle.c78
-rw-r--r--drivers/cpuidle/driver.c67
-rw-r--r--drivers/cpuidle/governor.c43
-rw-r--r--drivers/cpuidle/sysfs.c7
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c3
-rw-r--r--drivers/crypto/caam/ctrl.c5
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/ixp4xx_crypto.c48
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/tegra-aes.c2
-rw-r--r--drivers/devfreq/devfreq.c29
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c29
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c57
-rw-r--r--drivers/dma/Kconfig25
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c44
-rw-r--r--drivers/dma/at_hdmac.c28
-rw-r--r--drivers/dma/bestcomm/sram.c1
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/cppi41.c178
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dmaengine.c264
-rw-r--r--drivers/dma/dmatest.c917
-rw-r--r--drivers/dma/dw/core.c29
-rw-r--r--drivers/dma/dw/platform.c8
-rw-r--r--drivers/dma/edma.c379
-rw-r--r--drivers/dma/ep93xx_dma.c30
-rw-r--r--drivers/dma/fsldma.c28
-rw-r--r--drivers/dma/fsldma.h2
-rw-r--r--drivers/dma/imx-dma.c42
-rw-r--r--drivers/dma/imx-sdma.c14
-rw-r--r--drivers/dma/intel_mid_dma.c4
-rw-r--r--drivers/dma/ioat/dma.c53
-rw-r--r--drivers/dma/ioat/dma.h14
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v2.h1
-rw-r--r--drivers/dma/ioat/dma_v3.c323
-rw-r--r--drivers/dma/ioat/pci.c20
-rw-r--r--drivers/dma/iop-adma.c113
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c7
-rw-r--r--drivers/dma/mmp_tdma.c47
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/mv_xor.c58
-rw-r--r--drivers/dma/mv_xor.h25
-rw-r--r--drivers/dma/mxs-dma.c178
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pl330.c36
-rw-r--r--drivers/dma/ppc4xx/adma.c274
-rw-r--r--drivers/dma/s3c24xx-dma.c1350
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/ste_dma40.c7
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/timb_dma.c37
-rw-r--r--drivers/dma/txx9dmac.c29
-rw-r--r--drivers/edac/amd64_edac.c46
-rw-r--r--drivers/edac/amd64_edac.h8
-rw-r--r--drivers/edac/cell_edac.c2
-rw-r--r--drivers/edac/edac_device.c9
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci.c8
-rw-r--r--drivers/edac/ghes_edac.c16
-rw-r--r--drivers/edac/highbank_l2_edac.c33
-rw-r--r--drivers/edac/highbank_mc_edac.c175
-rw-r--r--drivers/edac/mpc85xx_edac.c22
-rw-r--r--drivers/edac/sb_edac.c595
-rw-r--r--drivers/extcon/extcon-adc-jack.c27
-rw-r--r--drivers/extcon/extcon-arizona.c55
-rw-r--r--drivers/extcon/extcon-class.c102
-rw-r--r--drivers/extcon/extcon-gpio.c19
-rw-r--r--drivers/extcon/extcon-max77693.c136
-rw-r--r--drivers/extcon/extcon-max8997.c11
-rw-r--r--drivers/extcon/extcon-palmas.c5
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firmware/dcdbas.c32
-rw-r--r--drivers/firmware/dmi_scan.c61
-rw-r--r--drivers/firmware/efi/Kconfig3
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/cper.c (renamed from drivers/acpi/apei/cper.c)132
-rw-r--r--drivers/firmware/efi/efi-stub-helper.c636
-rw-r--r--drivers/firmware/efi/efi.c140
-rw-r--r--drivers/firmware/efi/efivars.c2
-rw-r--r--drivers/firmware/google/gsmi.c13
-rw-r--r--drivers/fmc/Kconfig2
-rw-r--r--drivers/gpio/Kconfig39
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/devres.c85
-rw-r--r--drivers/gpio/gpio-74x164.c5
-rw-r--r--drivers/gpio/gpio-adnp.c8
-rw-r--r--drivers/gpio/gpio-arizona.c4
-rw-r--r--drivers/gpio/gpio-bcm-kona.c640
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-clps711x.c2
-rw-r--r--drivers/gpio/gpio-davinci.c132
-rw-r--r--drivers/gpio/gpio-em.c13
-rw-r--r--drivers/gpio/gpio-ep93xx.c18
-rw-r--r--drivers/gpio/gpio-intel-mid.c471
-rw-r--r--drivers/gpio/gpio-iop.c130
-rw-r--r--drivers/gpio/gpio-langwell.c397
-rw-r--r--drivers/gpio/gpio-lpc32xx.c1
-rw-r--r--drivers/gpio/gpio-lynxpoint.c19
-rw-r--r--drivers/gpio/gpio-mc33880.c3
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c9
-rw-r--r--drivers/gpio/gpio-mxs.c32
-rw-r--r--drivers/gpio/gpio-omap.c20
-rw-r--r--drivers/gpio/gpio-palmas.c104
-rw-r--r--drivers/gpio/gpio-pca953x.c11
-rw-r--r--drivers/gpio/gpio-pcf857x.c105
-rw-r--r--drivers/gpio/gpio-pl061.c10
-rw-r--r--drivers/gpio/gpio-rcar.c13
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-samsung.c42
-rw-r--r--drivers/gpio/gpio-stmpe.c25
-rw-r--r--drivers/gpio/gpio-tb10x.c328
-rw-r--r--drivers/gpio/gpio-tc3589x.c36
-rw-r--r--drivers/gpio/gpio-tegra.c18
-rw-r--r--drivers/gpio/gpio-tnetv107x.c1
-rw-r--r--drivers/gpio/gpio-twl4030.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.c138
-rw-r--r--drivers/gpio/gpiolib-of.c91
-rw-r--r--drivers/gpio/gpiolib.c878
-rw-r--r--drivers/gpu/drm/Kconfig73
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/Kconfig24
-rw-r--r--drivers/gpu/drm/armada/Makefile7
-rw-r--r--drivers/gpu/drm/armada/armada_510.c87
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1098
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h83
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c177
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h113
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c421
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c170
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h24
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c202
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c611
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h52
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h318
-rw-r--r--drivers/gpu/drm/armada/armada_ioctlP.h18
-rw-r--r--drivers/gpu/drm/armada/armada_output.c158
-rw-r--r--drivers/gpu/drm/armada/armada_output.h39
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c477
-rw-r--r--drivers/gpu/drm/armada/armada_slave.c139
-rw-r--r--drivers/gpu/drm/armada/armada_slave.h26
-rw-r--r--drivers/gpu/drm/ast/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c153
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c96
-rw-r--r--drivers/gpu/drm/drm_debugfs.c6
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c16
-rw-r--r--drivers/gpu/drm/drm_drv.c74
-rw-r--r--drivers/gpu/drm/drm_edid.c314
-rw-r--r--drivers/gpu/drm/drm_edid_load.c108
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c17
-rw-r--r--drivers/gpu/drm/drm_flip_work.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c94
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c181
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c43
-rw-r--r--drivers/gpu/drm/drm_pci.c69
-rw-r--r--drivers/gpu/drm/drm_platform.c59
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/drm_stub.c362
-rw-r--r--drivers/gpu/drm/drm_sysfs.c118
-rw-r--r--drivers/gpu/drm/drm_usb.c57
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/gma500/Kconfig1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c90
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c433
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c32
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c39
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h58
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c59
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c22
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c11
-rw-r--r--drivers/gpu/drm/i915/Kconfig67
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo.h11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1205
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c118
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c187
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h438
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c558
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c401
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c508
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1043
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h827
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c44
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c152
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h62
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c202
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h121
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c271
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1734
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c747
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h565
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c620
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h102
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c427
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h109
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c317
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c (renamed from drivers/gpu/drm/i915/intel_fb.c)33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c83
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c25
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c496
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c346
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1336
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c275
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c52
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c79
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c203
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c29
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c449
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c5
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h42
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h46
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h6
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h126
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c208
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c16
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c19
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h58
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c30
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c60
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h37
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c160
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c56
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile48
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c119
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c195
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c449
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h91
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h28
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h111
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h50
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h80
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/boost.c127
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/volt.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c145
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h113
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c494
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c183
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c520
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c404
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c497
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/seq.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c344
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c447
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c567
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1264
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc452
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc219
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h1165
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c56
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c320
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c185
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c121
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c (renamed from drivers/gpu/drm/nouveau/nouveau_pm.c)560
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c647
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c416
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h283
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c250
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c146
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c353
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c855
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c624
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c599
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h127
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c58
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c800
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c75
-rw-r--r--drivers/gpu/drm/radeon/cikd.h103
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c66
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c97
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h70
-rw-r--r--drivers/gpu/drm/radeon/ni.c76
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c19
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c66
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c102
-rw-r--r--drivers/gpu/drm/radeon/r600d.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon.h71
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h53
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c307
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c380
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c64
-rw-r--r--drivers/gpu/drm/radeon/rs690.c16
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si.c99
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c31
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/sid.h47
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig (renamed from drivers/gpu/host1x/drm/Kconfig)13
-rw-r--r--drivers/gpu/drm/tegra/Makefile15
-rw-r--r--drivers/gpu/drm/tegra/bus.c76
-rw-r--r--drivers/gpu/drm/tegra/dc.c (renamed from drivers/gpu/host1x/drm/dc.c)108
-rw-r--r--drivers/gpu/drm/tegra/dc.h (renamed from drivers/gpu/host1x/drm/dc.h)5
-rw-r--r--drivers/gpu/drm/tegra/drm.c714
-rw-r--r--drivers/gpu/drm/tegra/drm.h (renamed from drivers/gpu/host1x/drm/drm.h)101
-rw-r--r--drivers/gpu/drm/tegra/fb.c (renamed from drivers/gpu/host1x/drm/fb.c)38
-rw-r--r--drivers/gpu/drm/tegra/gem.c (renamed from drivers/gpu/host1x/drm/gem.c)44
-rw-r--r--drivers/gpu/drm/tegra/gem.h (renamed from drivers/gpu/host1x/drm/gem.h)16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c227
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h28
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c338
-rw-r--r--drivers/gpu/drm/tegra/gr3d.h27
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c (renamed from drivers/gpu/host1x/drm/hdmi.c)257
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h (renamed from drivers/gpu/host1x/drm/hdmi.h)152
-rw-r--r--drivers/gpu/drm/tegra/output.c (renamed from drivers/gpu/host1x/drm/output.c)64
-rw-r--r--drivers/gpu/drm/tegra/rgb.c (renamed from drivers/gpu/host1x/drm/rgb.c)19
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/ttm/Makefile6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c118
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c379
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c101
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h112
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c101
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c30
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/Makefile13
-rw-r--r--drivers/gpu/host1x/bus.c550
-rw-r--r--drivers/gpu/host1x/bus.h (renamed from drivers/gpu/host1x/host1x_client.h)24
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/channel.h6
-rw-r--r--drivers/gpu/host1x/dev.c82
-rw-r--r--drivers/gpu/host1x/dev.h11
-rw-r--r--drivers/gpu/host1x/drm/drm.c647
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c343
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c8
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c32
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x02.h26
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h6
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h175
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c4
-rw-r--r--drivers/gpu/host1x/job.c73
-rw-r--r--drivers/gpu/host1x/job.h108
-rw-r--r--drivers/gpu/host1x/syncpt.c92
-rw-r--r--drivers/gpu/host1x/syncpt.h46
-rw-r--r--drivers/hid/Kconfig22
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-apple.c22
-rw-r--r--drivers/hid/hid-axff.c3
-rw-r--r--drivers/hid/hid-core.c25
-rw-r--r--drivers/hid/hid-elo.c35
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h16
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c50
-rw-r--r--drivers/hid/hid-lg.c138
-rw-r--r--drivers/hid/hid-lg2ff.c2
-rw-r--r--drivers/hid/hid-lg4ff.c101
-rw-r--r--drivers/hid/hid-logitech-dj.c14
-rw-r--r--drivers/hid/hid-multitouch.c27
-rw-r--r--drivers/hid/hid-roccat-common.c65
-rw-r--r--drivers/hid/hid-roccat-common.h62
-rw-r--r--drivers/hid/hid-roccat-konepure.c158
-rw-r--r--drivers/hid/hid-roccat-konepure.h72
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c4
-rw-r--r--drivers/hid/hid-roccat-ryos.c241
-rw-r--r--drivers/hid/hid-roccat-savu.c123
-rw-r--r--drivers/hid/hid-roccat-savu.h32
-rw-r--r--drivers/hid/hid-sensor-hub.c58
-rw-r--r--drivers/hid/hid-sony.c63
-rw-r--r--drivers/hid/hid-wiimote-modules.c117
-rw-r--r--drivers/hid/hid-wiimote.h4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c24
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hsi/hsi.c10
-rw-r--r--drivers/hv/channel.c50
-rw-r--r--drivers/hv/channel_mgmt.c5
-rw-r--r--drivers/hv/connection.c21
-rw-r--r--drivers/hv/hv.c2
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h9
-rw-r--r--drivers/hv/vmbus_drv.c488
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/abituguru.c6
-rw-r--r--drivers/hwmon/abituguru3.c2
-rw-r--r--drivers/hwmon/acpi_power_meter.c18
-rw-r--r--drivers/hwmon/adcxx.c2
-rw-r--r--drivers/hwmon/adm1026.c6
-rw-r--r--drivers/hwmon/adt7310.c7
-rw-r--r--drivers/hwmon/adt7462.c5
-rw-r--r--drivers/hwmon/asc7621.c12
-rw-r--r--drivers/hwmon/asus_atk0110.c2
-rw-r--r--drivers/hwmon/atxp1.c3
-rw-r--r--drivers/hwmon/ds1621.c63
-rw-r--r--drivers/hwmon/emc1403.c120
-rw-r--r--drivers/hwmon/f71882fg.c1
-rw-r--r--drivers/hwmon/f75375s.c4
-rw-r--r--drivers/hwmon/gpio-fan.c46
-rw-r--r--drivers/hwmon/hwmon.c185
-rw-r--r--drivers/hwmon/ina209.c46
-rw-r--r--drivers/hwmon/ina2xx.c64
-rw-r--r--drivers/hwmon/jc42.c62
-rw-r--r--drivers/hwmon/jz4740-hwmon.c2
-rw-r--r--drivers/hwmon/lm70.c2
-rw-r--r--drivers/hwmon/lm73.c70
-rw-r--r--drivers/hwmon/lm75.c3
-rw-r--r--drivers/hwmon/lm90.c327
-rw-r--r--drivers/hwmon/lm95234.c138
-rw-r--r--drivers/hwmon/ltc4245.c78
-rw-r--r--drivers/hwmon/ltc4261.c56
-rw-r--r--drivers/hwmon/max16065.c124
-rw-r--r--drivers/hwmon/max6642.c73
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/max6697.c55
-rw-r--r--drivers/hwmon/mc13783-adc.c2
-rw-r--r--drivers/hwmon/nct6775.c234
-rw-r--r--drivers/hwmon/pmbus/lm25066.c91
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c16
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c24
-rw-r--r--drivers/hwmon/tmp401.c92
-rw-r--r--drivers/hwmon/w83791d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83793.c5
-rw-r--r--drivers/i2c/busses/Kconfig32
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c909
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c6
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c26
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c769
-rw-r--r--drivers/i2c/busses/i2c-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c342
-rw-r--r--drivers/i2c/busses/i2c-omap.c24
-rw-r--r--drivers/i2c/busses/i2c-pnx.c1
-rw-r--r--drivers/i2c/busses/i2c-powermac.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c65
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-scmi.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c8
-rw-r--r--drivers/i2c/busses/i2c-st.c872
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c5
-rw-r--r--drivers/i2c/busses/i2c-xiic.c3
-rw-r--r--drivers/i2c/i2c-core.c46
-rw-r--r--drivers/i2c/i2c-dev.c19
-rw-r--r--drivers/i2c/i2c-smbus.c10
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c1
-rw-r--r--drivers/ide/Kconfig11
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/cs5536.c13
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/ide/ide-h8300.c109
-rw-r--r--drivers/ide/ide-sysfs.c35
-rw-r--r--drivers/ide/ide.c2
-rw-r--r--drivers/ide/pmac.c52
-rw-r--r--drivers/idle/intel_idle.c44
-rw-r--r--drivers/iio/accel/bma180.c7
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c7
-rw-r--r--drivers/iio/accel/kxsd9.c7
-rw-r--r--drivers/iio/accel/st_accel_buffer.c11
-rw-r--r--drivers/iio/accel/st_accel_core.c30
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7266.c46
-rw-r--r--drivers/iio/adc/ad7298.c10
-rw-r--r--drivers/iio/adc/ad7476.c16
-rw-r--r--drivers/iio/adc/ad7791.c18
-rw-r--r--drivers/iio/adc/ad7887.c15
-rw-r--r--drivers/iio/adc/ad7923.c10
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c13
-rw-r--r--drivers/iio/adc/at91_adc.c469
-rw-r--r--drivers/iio/adc/max1363.c299
-rw-r--r--drivers/iio/adc/mcp3422.c410
-rw-r--r--drivers/iio/adc/nau7802.c5
-rw-r--r--drivers/iio/adc/ti-adc081c.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c225
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c6
-rw-r--r--drivers/iio/buffer_cb.c27
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c7
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c70
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/dac/ad5064.c10
-rw-r--r--drivers/iio/dac/ad5360.c11
-rw-r--r--drivers/iio/dac/ad5380.c44
-rw-r--r--drivers/iio/dac/ad5421.c86
-rw-r--r--drivers/iio/dac/ad5446.c18
-rw-r--r--drivers/iio/dac/ad5449.c7
-rw-r--r--drivers/iio/dac/ad5504.c13
-rw-r--r--drivers/iio/dac/ad5624r_spi.c13
-rw-r--r--drivers/iio/dac/ad5686.c14
-rw-r--r--drivers/iio/dac/ad5755.c26
-rw-r--r--drivers/iio/dac/ad5764.c10
-rw-r--r--drivers/iio/dac/ad5791.c11
-rw-r--r--drivers/iio/dac/ad7303.c1
-rw-r--r--drivers/iio/dac/max517.c17
-rw-r--r--drivers/iio/dac/mcp4725.c19
-rw-r--r--drivers/iio/frequency/adf4350.c1
-rw-r--r--drivers/iio/gyro/adis16080.c7
-rw-r--r--drivers/iio/gyro/adis16130.c8
-rw-r--r--drivers/iio/gyro/adis16260.c1
-rw-r--r--drivers/iio/gyro/adxrs450.c13
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c7
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c5
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c11
-rw-r--r--drivers/iio/gyro/st_gyro_core.c30
-rw-r--r--drivers/iio/iio_core.h8
-rw-r--r--drivers/iio/imu/adis16400_buffer.c9
-rw-r--r--drivers/iio/imu/adis_buffer.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c6
-rw-r--r--drivers/iio/industrialio-buffer.c242
-rw-r--r--drivers/iio/industrialio-core.c275
-rw-r--r--drivers/iio/industrialio-event.c234
-rw-r--r--drivers/iio/industrialio-triggered-buffer.c8
-rw-r--r--drivers/iio/kfifo_buf.c49
-rw-r--r--drivers/iio/light/Kconfig43
-rw-r--r--drivers/iio/light/Makefile4
-rw-r--r--drivers/iio/light/adjd_s311.c77
-rw-r--r--drivers/iio/light/apds9300.c53
-rw-r--r--drivers/iio/light/cm36651.c708
-rw-r--r--drivers/iio/light/gp2ap020a00f.c1654
-rw-r--r--drivers/iio/light/hid-sensor-als.c7
-rw-r--r--drivers/iio/light/tcs3472.c367
-rw-r--r--drivers/iio/light/tsl2563.c53
-rw-r--r--drivers/iio/light/tsl4531.c258
-rw-r--r--drivers/iio/light/vcnl4000.c6
-rw-r--r--drivers/iio/magnetometer/Kconfig10
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/magnetometer/mag3110.c401
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c11
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c31
-rw-r--r--drivers/iio/pressure/Kconfig2
-rw-r--r--drivers/iio/pressure/st_pressure.h1
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c11
-rw-r--r--drivers/iio/pressure/st_pressure_core.c307
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c1
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c1
-rw-r--r--drivers/iio/temperature/tmp006.c57
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c14
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/cm.c5
-rw-r--r--drivers/infiniband/core/cma.c96
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/uverbs.h36
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c109
-rw-r--r--drivers/infiniband/core/uverbs_main.c128
-rw-r--r--drivers/infiniband/core/verbs.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c7
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c25
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c167
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c21
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h14
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c126
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c10
-rw-r--r--drivers/infiniband/ulp/isert/Kconfig4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c99
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c500
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h21
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/evdev.c16
-rw-r--r--drivers/input/gameport/gameport.c17
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/Kconfig6
-rw-r--r--drivers/input/keyboard/gpio_keys.c1
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c1
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c2
-rw-r--r--drivers/input/keyboard/nspire-keypad.c6
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c1
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c4
-rw-r--r--drivers/input/misc/Kconfig4
-rw-r--r--drivers/input/misc/ad714x-spi.c1
-rw-r--r--drivers/input/misc/cobalt_btns.c2
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c25
-rw-r--r--drivers/input/misc/mma8450.c4
-rw-r--r--drivers/input/misc/mpu3050.c1
-rw-r--r--drivers/input/misc/pwm-beeper.c1
-rw-r--r--drivers/input/misc/rb532_button.c1
-rw-r--r--drivers/input/misc/rotary_encoder.c1
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/misc/uinput.c26
-rw-r--r--drivers/input/mouse/alps.c3
-rw-r--r--drivers/input/mouse/cypress_ps2.c29
-rw-r--r--drivers/input/serio/Kconfig18
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/hyperv-keyboard.c437
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h2
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/serio/serio.c70
-rw-r--r--drivers/input/serio/xilinx_ps2.c8
-rw-r--r--drivers/input/tablet/wacom_sys.c96
-rw-r--r--drivers/input/tablet/wacom_wac.c114
-rw-r--r--drivers/input/tablet/wacom_wac.h8
-rw-r--r--drivers/input/touchscreen/Kconfig13
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7877.c2
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c1
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c5
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c2
-rw-r--r--drivers/input/touchscreen/htcpen.c2
-rw-r--r--drivers/input/touchscreen/st1232.c1
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c23
-rw-r--r--drivers/input/touchscreen/tsc2005.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c836
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/arm-smmu.c74
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/intel_irq_remapping.c21
-rw-r--r--drivers/iommu/iommu-traces.c27
-rw-r--r--drivers/iommu/iommu.c21
-rw-r--r--drivers/iommu/omap-iopgtable.h2
-rw-r--r--drivers/iommu/tegra-gart.c27
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/ipack/ipack.c22
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c202
-rw-r--r--drivers/irqchip/irq-bcm2835.c22
-rw-r--r--drivers/irqchip/irq-gic.c151
-rw-r--r--drivers/irqchip/irq-vic.c7
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.c2
-rw-r--r--drivers/isdn/icn/icn.c3
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c8
-rw-r--r--drivers/isdn/mISDN/socket.c13
-rw-r--r--drivers/isdn/sc/init.c2
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-blinkm.c3
-rw-r--r--drivers/leds/leds-dac124s085.c3
-rw-r--r--drivers/leds/leds-gpio.c7
-rw-r--r--drivers/leds/leds-lp5523.c1
-rw-r--r--drivers/leds/leds-lp5562.c1
-rw-r--r--drivers/leds/leds-lp55xx-common.c28
-rw-r--r--drivers/leds/leds-lp8501.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/leds/leds-pca9685.c213
-rw-r--r--drivers/leds/leds-pwm.c2
-rw-r--r--drivers/lguest/lguest_device.c3
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/macio_asic.c2
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/md/Kconfig22
-rw-r--r--drivers/md/bcache/Kconfig11
-rw-r--r--drivers/md/bcache/alloc.c383
-rw-r--r--drivers/md/bcache/bcache.h327
-rw-r--r--drivers/md/bcache/bset.c289
-rw-r--r--drivers/md/bcache/bset.h93
-rw-r--r--drivers/md/bcache/btree.c1396
-rw-r--r--drivers/md/bcache/btree.h195
-rw-r--r--drivers/md/bcache/closure.c103
-rw-r--r--drivers/md/bcache/closure.h183
-rw-r--r--drivers/md/bcache/debug.c185
-rw-r--r--drivers/md/bcache/debug.h50
-rw-r--r--drivers/md/bcache/journal.c293
-rw-r--r--drivers/md/bcache/journal.h52
-rw-r--r--drivers/md/bcache/movinggc.c87
-rw-r--r--drivers/md/bcache/request.c1102
-rw-r--r--drivers/md/bcache/request.h43
-rw-r--r--drivers/md/bcache/stats.c26
-rw-r--r--drivers/md/bcache/stats.h13
-rw-r--r--drivers/md/bcache/super.c190
-rw-r--r--drivers/md/bcache/sysfs.c42
-rw-r--r--drivers/md/bcache/trace.c1
-rw-r--r--drivers/md/bcache/util.c12
-rw-r--r--drivers/md/bcache/util.h15
-rw-r--r--drivers/md/bcache/writeback.c455
-rw-r--r--drivers/md/bcache/writeback.h46
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-cache-metadata.c104
-rw-r--r--drivers/md/dm-cache-metadata.h5
-rw-r--r--drivers/md/dm-cache-policy-internal.h7
-rw-r--r--drivers/md/dm-cache-policy-mq.c681
-rw-r--r--drivers/md/dm-cache-policy.c4
-rw-r--r--drivers/md/dm-cache-policy.h21
-rw-r--r--drivers/md/dm-cache-target.c687
-rw-r--r--drivers/md/dm-crypt.c216
-rw-r--r--drivers/md/dm-ioctl.c36
-rw-r--r--drivers/md/dm-mpath.c34
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/dm.h13
-rw-r--r--drivers/md/md.c175
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/persistent-data/dm-array.c5
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c18
-rw-r--r--drivers/md/raid1.c172
-rw-r--r--drivers/md/raid1.h15
-rw-r--r--drivers/md/raid10.c24
-rw-r--r--drivers/md/raid5.c422
-rw-r--r--drivers/md/raid5.h18
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.c8
-rw-r--r--drivers/media/common/siano/smsdvb-main.c8
-rw-r--r--drivers/media/dvb-core/dmxdev.c4
-rw-r--r--drivers/media/dvb-core/dvb_demux.c17
-rw-r--r--drivers/media/dvb-core/dvbdev.c19
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9013.c14
-rw-r--r--drivers/media/dvb-frontends/af9033.c23
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c15
-rw-r--r--drivers/media/dvb-frontends/cx24110.c2
-rw-r--r--drivers/media/dvb-frontends/cx24117.c1650
-rw-r--r--drivers/media/dvb-frontends/cx24117.h47
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c25
-rw-r--r--drivers/media/dvb-frontends/dib9000.c4
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c12
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c4
-rw-r--r--drivers/media/dvb-frontends/itd1000.c13
-rw-r--r--drivers/media/dvb-frontends/mt312.c10
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c11
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c14
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c15
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h1
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c11
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c12
-rw-r--r--drivers/media/dvb-frontends/stb6100.c11
-rw-r--r--drivers/media/dvb-frontends/stv0367.c13
-rw-r--r--drivers/media/dvb-frontends/stv090x.c12
-rw-r--r--drivers/media/dvb-frontends/stv6110.c12
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c13
-rw-r--r--drivers/media/dvb-frontends/tda10071.c25
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c14
-rw-r--r--drivers/media/dvb-frontends/tda8083.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c7
-rw-r--r--drivers/media/dvb-frontends/ts2020.h1
-rw-r--r--drivers/media/dvb-frontends/zl10039.c12
-rw-r--r--drivers/media/i2c/Kconfig11
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7183.c2
-rw-r--r--drivers/media/i2c/adv7343.c1
-rw-r--r--drivers/media/i2c/lm3560.c488
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c4
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c4
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c2
-rw-r--r--drivers/media/i2c/ths8200.c1
-rw-r--r--drivers/media/i2c/tvp514x.c1
-rw-r--r--drivers/media/i2c/tvp7002.c1
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c23
-rw-r--r--drivers/media/pci/cx23885/Kconfig1
-rw-r--r--drivers/media/pci/cx23885/cimax2.c13
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c108
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885.h3
-rw-r--r--drivers/media/pci/cx25821/cx25821-cards.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-video.c18
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-video.h6
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c29
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c17
-rw-r--r--drivers/media/pci/cx88/cx88-video.c18
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c2
-rw-r--r--drivers/media/pci/dm1105/dm1105.c5
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/mantis/mantis_pci.c2
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/ngene/ngene-core.c4
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c3
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c19
-rw-r--r--drivers/media/pci/zoran/Kconfig1
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/platform/Kconfig21
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c2
-rw-r--r--drivers/media/platform/coda.c283
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c4
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c29
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c13
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c6
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/platform/mem2mem_testdev.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.h3
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c6
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c46
-rw-r--r--drivers/media/platform/ti-vpe/Makefile5
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c846
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h203
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h641
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2099
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h496
-rw-r--r--drivers/media/platform/timblogiw.c6
-rw-r--r--drivers/media/radio/radio-keene.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c5
-rw-r--r--drivers/media/radio/radio-shark.c2
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c10
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c6
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/tef6862.c20
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/rc/Kconfig10
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/fintek-cir.h4
-rw-r--r--drivers/media/rc/gpio-ir-recv.c1
-rw-r--r--drivers/media/rc/iguanair.c24
-rw-r--r--drivers/media/rc/ir-rx51.c3
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c2
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c2
-rw-r--r--drivers/media/rc/nuvoton-cir.h4
-rw-r--r--drivers/media/rc/st_rc.c395
-rw-r--r--drivers/media/rc/winbond-cir.c2
-rw-r--r--drivers/media/tuners/e4000.c25
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/fc2580.c25
-rw-r--r--drivers/media/tuners/r820t.c22
-rw-r--r--drivers/media/tuners/tda18212.c25
-rw-r--r--drivers/media/tuners/tda18218.c23
-rw-r--r--drivers/media/tuners/tda9887.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028.c12
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c6
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c110
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c29
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c10
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c42
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h1
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c41
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c10
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c103
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c42
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c121
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c63
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c7
-rw-r--r--drivers/media/usb/em28xx/em28xx.h2
-rw-r--r--drivers/media/usb/gspca/conex.c3
-rw-r--r--drivers/media/usb/gspca/cpia1.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c48
-rw-r--r--drivers/media/usb/gspca/gspca.h10
-rw-r--r--drivers/media/usb/gspca/jeilinj.c5
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c2
-rw-r--r--drivers/media/usb/gspca/mars.c7
-rw-r--r--drivers/media/usb/gspca/mr97310a.c6
-rw-r--r--drivers/media/usb/gspca/nw80x.c11
-rw-r--r--drivers/media/usb/gspca/ov519.c52
-rw-r--r--drivers/media/usb/gspca/ov534.c5
-rw-r--r--drivers/media/usb/gspca/ov534_9.c334
-rw-r--r--drivers/media/usb/gspca/pac207.c4
-rw-r--r--drivers/media/usb/gspca/pac7311.c6
-rw-r--r--drivers/media/usb/gspca/se401.c6
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c6
-rw-r--r--drivers/media/usb/gspca/sonixb.c7
-rw-r--r--drivers/media/usb/gspca/sonixj.c3
-rw-r--r--drivers/media/usb/gspca/spca1528.c3
-rw-r--r--drivers/media/usb/gspca/spca500.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c2
-rw-r--r--drivers/media/usb/gspca/sq930x.c3
-rw-r--r--drivers/media/usb/gspca/stk014.c5
-rw-r--r--drivers/media/usb/gspca/stk1135.c76
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c3
-rw-r--r--drivers/media/usb/gspca/topro.c13
-rw-r--r--drivers/media/usb/gspca/tv8532.c7
-rw-r--r--drivers/media/usb/gspca/vicam.c8
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c28
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c46
-rw-r--r--drivers/media/usb/gspca/zc3xx.c3
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c43
-rw-r--r--drivers/media/usb/tlg2300/pd-main.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c152
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c4
-rw-r--r--drivers/media/usb/uvc/uvc_video.c3
-rw-r--r--drivers/media/v4l2-core/tuner-core.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c39
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c16
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c149
-rw-r--r--drivers/memstick/core/memstick.c20
-rw-r--r--drivers/memstick/core/ms_block.c4
-rw-r--r--drivers/memstick/core/ms_block.h2
-rw-r--r--drivers/memstick/core/mspro_block.c4
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/message/i2o/core.h2
-rw-r--r--drivers/message/i2o/device.c32
-rw-r--r--drivers/message/i2o/driver.c18
-rw-r--r--drivers/mfd/88pm860x-core.c2
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/arizona-core.c38
-rw-r--r--drivers/mfd/arizona-i2c.c1
-rw-r--r--drivers/mfd/arizona-spi.c1
-rw-r--r--drivers/mfd/as3711.c1
-rw-r--r--drivers/mfd/as3722.c449
-rw-r--r--drivers/mfd/da9052-i2c.c12
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h1
-rw-r--r--drivers/mfd/ezx-pcap.c8
-rw-r--r--drivers/mfd/lpc_ich.c15
-rw-r--r--drivers/mfd/lpc_sch.c3
-rw-r--r--drivers/mfd/max77686.c1
-rw-r--r--drivers/mfd/max77693-irq.c3
-rw-r--r--drivers/mfd/max77693.c19
-rw-r--r--drivers/mfd/max8907.c1
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/max8997.c1
-rw-r--r--drivers/mfd/mc13xxx-core.c5
-rw-r--r--drivers/mfd/mc13xxx-i2c.c1
-rw-r--r--drivers/mfd/mc13xxx-spi.c5
-rw-r--r--drivers/mfd/mfd-core.c51
-rw-r--r--drivers/mfd/omap-usb-host.c18
-rw-r--r--drivers/mfd/omap-usb-tll.c6
-rw-r--r--drivers/mfd/palmas.c30
-rw-r--r--drivers/mfd/pm8921-core.c9
-rw-r--r--drivers/mfd/rts5249.c48
-rw-r--r--drivers/mfd/rtsx_pcr.c5
-rw-r--r--drivers/mfd/sec-core.c1
-rw-r--r--drivers/mfd/sm501.c4
-rw-r--r--drivers/mfd/stw481x.c250
-rw-r--r--drivers/mfd/tc3589x.c37
-rw-r--r--drivers/mfd/ti-ssp.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c24
-rw-r--r--drivers/mfd/timberdale.c6
-rw-r--r--drivers/mfd/tps6507x.c1
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps6586x.c19
-rw-r--r--drivers/mfd/tps65910.c5
-rw-r--r--drivers/mfd/twl6040.c18
-rw-r--r--drivers/mfd/ucb1x00-core.c1
-rw-r--r--drivers/mfd/wm5102-tables.c1
-rw-r--r--drivers/mfd/wm5110-tables.c47
-rw-r--r--drivers/mfd/wm8994-core.c78
-rw-r--r--drivers/misc/Kconfig14
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/arm-charlcd.c2
-rw-r--r--drivers/misc/atmel_pwm.c6
-rw-r--r--drivers/misc/bh1780gli.c11
-rw-r--r--drivers/misc/bmp085.c2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c5
-rw-r--r--drivers/misc/cb710/core.c2
-rw-r--r--drivers/misc/eeprom/Kconfig13
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c5
-rw-r--r--drivers/misc/eeprom/at25.c7
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c2
-rw-r--r--drivers/misc/eeprom/sunxi_sid.c158
-rw-r--r--drivers/misc/ep93xx_pwm.c286
-rw-r--r--drivers/misc/ibmasm/module.c4
-rw-r--r--drivers/misc/lkdtm.c107
-rw-r--r--drivers/misc/mei/amthif.c49
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c129
-rw-r--r--drivers/misc/mei/client.h9
-rw-r--r--drivers/misc/mei/hbm.c9
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/init.c21
-rw-r--r--drivers/misc/mei/interrupt.c47
-rw-r--r--drivers/misc/mei/main.c65
-rw-r--r--drivers/misc/mei/mei_dev.h1
-rw-r--r--drivers/misc/mei/nfc.c10
-rw-r--r--drivers/misc/mei/pci-me.c8
-rw-r--r--drivers/misc/mei/wd.c12
-rw-r--r--drivers/misc/mic/Kconfig39
-rw-r--r--drivers/misc/mic/Makefile6
-rw-r--r--drivers/misc/mic/card/Makefile11
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c130
-rw-r--r--drivers/misc/mic/card/mic_device.c305
-rw-r--r--drivers/misc/mic/card/mic_device.h133
-rw-r--r--drivers/misc/mic/card/mic_virtio.c630
-rw-r--r--drivers/misc/mic/card/mic_virtio.h77
-rw-r--r--drivers/misc/mic/card/mic_x100.c256
-rw-r--r--drivers/misc/mic/card/mic_x100.h48
-rw-r--r--drivers/misc/mic/common/mic_dev.h51
-rw-r--r--drivers/misc/mic/host/Makefile14
-rw-r--r--drivers/misc/mic/host/mic_boot.c300
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c491
-rw-r--r--drivers/misc/mic/host/mic_device.h203
-rw-r--r--drivers/misc/mic/host/mic_fops.c222
-rw-r--r--drivers/misc/mic/host/mic_fops.h32
-rw-r--r--drivers/misc/mic/host/mic_intr.c630
-rw-r--r--drivers/misc/mic/host/mic_intr.h137
-rw-r--r--drivers/misc/mic/host/mic_main.c536
-rw-r--r--drivers/misc/mic/host/mic_smpt.c442
-rw-r--r--drivers/misc/mic/host/mic_smpt.h98
-rw-r--r--drivers/misc/mic/host/mic_sysfs.c459
-rw-r--r--drivers/misc/mic/host/mic_virtio.c700
-rw-r--r--drivers/misc/mic/host/mic_virtio.h138
-rw-r--r--drivers/misc/mic/host/mic_x100.c570
-rw-r--r--drivers/misc/mic/host/mic_x100.h98
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/pti.c1
-rw-r--r--drivers/misc/ti-st/st_kim.c12
-rw-r--r--drivers/misc/ti_dac7512.c23
-rw-r--r--drivers/misc/tifm_7xx1.c7
-rw-r--r--drivers/misc/tifm_core.c10
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c21
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/mmc/core/bus.c14
-rw-r--r--drivers/mmc/core/core.c154
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/mmc.c127
-rw-r--r--drivers/mmc/core/mmc_ops.c96
-rw-r--r--drivers/mmc/core/sd.c118
-rw-r--r--drivers/mmc/core/sdio.c82
-rw-r--r--drivers/mmc/core/sdio_bus.c24
-rw-r--r--drivers/mmc/host/atmel-mci.c82
-rw-r--r--drivers/mmc/host/au1xmmc.c7
-rw-r--r--drivers/mmc/host/bfin_sdh.c15
-rw-r--r--drivers/mmc/host/cb710-mmc.c10
-rw-r--r--drivers/mmc/host/davinci_mmc.c26
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c291
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c7
-rw-r--r--drivers/mmc/host/dw_mmc-socfpga.c34
-rw-r--r--drivers/mmc/host/dw_mmc.c604
-rw-r--r--drivers/mmc/host/dw_mmc.h55
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/mmci.c95
-rw-r--r--drivers/mmc/host/mmci.h4
-rw-r--r--drivers/mmc/host/msm_sdcc.c27
-rw-r--r--drivers/mmc/host/mvsdio.c46
-rw-r--r--drivers/mmc/host/mxcmmc.c12
-rw-r--r--drivers/mmc/host/mxs-mmc.c12
-rw-r--r--drivers/mmc/host/omap.c53
-rw-r--r--drivers/mmc/host/omap_hsmmc.c112
-rw-r--r--drivers/mmc/host/pxamci.c32
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c35
-rw-r--r--drivers/mmc/host/s3cmci.c29
-rw-r--r--drivers/mmc/host/sdhci-acpi.c5
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c14
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c8
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c550
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h37
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c35
-rw-r--r--drivers/mmc/host/sdhci-pci.c76
-rw-r--r--drivers/mmc/host/sdhci.c44
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sdricoh_cs.c3
-rw-r--r--drivers/mmc/host/sh_mmcif.c32
-rw-r--r--drivers/mmc/host/tifm_sd.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c9
-rw-r--r--drivers/mmc/host/via-sdmmc.c7
-rw-r--r--drivers/mmc/host/vub300.c18
-rw-r--r--drivers/mmc/host/wbsd.c34
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c57
-rw-r--r--drivers/mtd/bcm47xxpart.c23
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/m25p80.c82
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/devices/phram.c66
-rw-r--r--drivers/mtd/devices/sst25l.c13
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c1
-rw-r--r--drivers/mtd/maps/pci.c1
-rw-r--r--drivers/mtd/maps/plat-ram.c18
-rw-r--r--drivers/mtd/maps/scb2_flash.c1
-rw-r--r--drivers/mtd/mtdblock.c3
-rw-r--r--drivers/mtd/mtdblock_ro.c3
-rw-r--r--drivers/mtd/mtdchar.c7
-rw-r--r--drivers/mtd/mtdcore.c3
-rw-r--r--drivers/mtd/mtdsuper.c1
-rw-r--r--drivers/mtd/nand/Kconfig40
-rw-r--r--drivers/mtd/nand/atmel_nand.c83
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c38
-rw-r--r--drivers/mtd/nand/denali.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c1
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c18
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c135
-rw-r--r--drivers/mtd/nand/fsl_upm.c1
-rw-r--r--drivers/mtd/nand/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c13
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c66
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/nand_base.c305
-rw-r--r--drivers/mtd/nand/nand_bbt.c38
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c1
-rw-r--r--drivers/mtd/nand/omap2.c642
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c56
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c15
-rw-r--r--drivers/mtd/nand/tmio_nand.c3
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/onenand/omap2.c32
-rw-r--r--drivers/mtd/onenand/onenand_base.c17
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/mtd/ubi/attach.c11
-rw-r--r--drivers/mtd/ubi/build.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c41
-rw-r--r--drivers/mtd/ubi/wl.c4
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c199
-rw-r--r--drivers/net/bonding/bond_alb.c150
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/bonding/bond_main.c715
-rw-r--r--drivers/net/bonding/bond_netlink.c131
-rw-r--r--drivers/net/bonding/bond_options.c142
-rw-r--r--drivers/net/bonding/bond_procfs.c21
-rw-r--r--drivers/net/bonding/bond_sysfs.c308
-rw-r--r--drivers/net/bonding/bonding.h116
-rw-r--r--drivers/net/caif/caif_virtio.c23
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c4
-rw-r--r--drivers/net/can/dev.c67
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/mscan.h6
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/softing/softing.h24
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c12
-rw-r--r--drivers/net/can/usb/kvaser_usb.c20
-rw-r--r--drivers/net/dummy.c6
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c1
-rw-r--r--drivers/net/ethernet/8390/8390.h40
-rw-r--r--drivers/net/ethernet/8390/Kconfig7
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c684
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h2
-rw-r--r--drivers/net/ethernet/amd/7990.h12
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c19
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c3
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c17
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c137
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c105
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c151
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h7
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c170
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c14
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h43
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c1
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h186
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c43
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h332
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c344
-rw-r--r--drivers/net/ethernet/fealnx.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c31
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c89
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h26
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c1
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c17
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/icplus/ipg.c1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h32
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h45
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c338
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c484
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h38
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h74
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c145
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c84
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h22
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h22
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h25
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h258
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c608
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c234
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h178
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c270
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/jme.h1
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c110
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c196
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h56
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h9
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c155
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c78
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c184
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h109
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c260
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c67
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c440
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h60
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c127
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c319
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.h105
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/io.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h120
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.c73
-rw-r--r--drivers/net/ethernet/sfc/nic.h256
-rw-r--r--drivers/net/ethernet/sfc/phy.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c90
-rw-r--r--drivers/net/ethernet/sfc/selftest.h15
-rw-r--r--drivers/net/ethernet/sfc/tx.c426
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c1
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c126
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c331
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c80
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c43
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c237
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c169
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c161
-rw-r--r--drivers/net/ethernet/ti/cpsw.c195
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h29
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c41
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c12
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h28
-rw-r--r--drivers/net/fddi/skfp/skfddi.c6
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/ifb.c5
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/bfin_sir.c4
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir-dev.h29
-rw-r--r--drivers/net/loopback.c6
-rw-r--r--drivers/net/macvlan.c54
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/netconsole.c75
-rw-r--r--drivers/net/nlmon.c8
-rw-r--r--drivers/net/phy/Kconfig7
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c57
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/mdio-moxart.c201
-rw-r--r--drivers/net/phy/mdio_bus.c10
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/realtek.c15
-rw-r--r--drivers/net/phy/vitesse.c117
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/team/team.c35
-rw-r--r--drivers/net/team/team_mode_loadbalance.c9
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/Kconfig15
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/ax88179_178a.c11
-rw-r--r--drivers/net/usb/catc.c8
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c104
-rw-r--r--drivers/net/usb/cdc_ncm.c507
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c230
-rw-r--r--drivers/net/usb/qmi_wwan.c69
-rw-r--r--drivers/net/usb/r8152.c114
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/veth.c17
-rw-r--r--drivers/net/virtio_net.c293
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c45
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/sbni.c89
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wan/x25_asy.h2
-rw-r--r--drivers/net/wan/z85230.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h117
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/Kconfig18
-rw-r--r--drivers/net/wireless/ath/Makefile5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c397
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h126
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c355
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h80
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c157
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c245
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c314
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c287
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h79
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c750
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c467
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1277
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1037
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c288
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h15
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h258
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h74
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c564
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c456
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h113
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c149
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c327
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c282
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c70
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c)23
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h)28
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c)10
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h)2
-rw-r--r--drivers/net/wireless/ath/regd.c141
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c181
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c805
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4657
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1036
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c62
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2129
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h127
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h160
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h238
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/atmel.c94
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43/phy_n.c3
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c9
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c186
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c343
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.h14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h110
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h219
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h371
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h145
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h48
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h16
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h44
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h87
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h82
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h66
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c638
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c515
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c207
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h149
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c81
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c256
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h89
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c70
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c42
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c793
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h163
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c462
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h95
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c127
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c43
-rw-r--r--drivers/net/wireless/libertas/debugfs.c6
-rw-r--r--drivers/net/wireless/libertas/firmware.c5
-rw-r--r--drivers/net/wireless/libertas/if_cs.c9
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.c6
-rw-r--r--drivers/net/wireless/libertas/if_usb.c17
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c32
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c23
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/mwifiex/ie.c11
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c32
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c46
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c5
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c29
-rw-r--r--drivers/net/wireless/mwifiex/usb.c27
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c5
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h24
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h31
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c9
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig28
-rw-r--r--drivers/net/wireless/rt2x00/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h44
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c291
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c873
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h165
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c951
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h97
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c263
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c43
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c122
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h10
-rw-r--r--drivers/net/wireless/rtlwifi/core.c10
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c23
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c193
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h49
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c6
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h8
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c18
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h33
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c70
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h5
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c18
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c232
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c51
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h11
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/common.h13
-rw-r--r--drivers/net/xen-netback/interface.c25
-rw-r--r--drivers/net/xen-netback/netback.c304
-rw-r--r--drivers/net/xen-netback/xenbus.c52
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/nfc/Kconfig10
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/mei_phy.c6
-rw-r--r--drivers/nfc/microread/i2c.c32
-rw-r--r--drivers/nfc/microread/mei.c4
-rw-r--r--drivers/nfc/microread/microread.c7
-rw-r--r--drivers/nfc/nfcsim.c38
-rw-r--r--drivers/nfc/nfcwilink.c97
-rw-r--r--drivers/nfc/pn533.c604
-rw-r--r--drivers/nfc/pn544/i2c.c42
-rw-r--r--drivers/nfc/pn544/pn544.c129
-rw-r--r--drivers/nfc/port100.c1529
-rw-r--r--drivers/ntb/ntb_transport.c86
-rw-r--r--drivers/of/Kconfig1
-rw-r--r--drivers/of/address.c18
-rw-r--r--drivers/of/base.c96
-rw-r--r--drivers/of/fdt.c138
-rw-r--r--drivers/of/irq.c164
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/of/of_pci_irq.c41
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/of/selftest.c161
-rw-r--r--drivers/parport/Kconfig13
-rw-r--r--drivers/parport/parport_ip32.c4
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/host/Kconfig16
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-exynos.c132
-rw-r--r--drivers/pci/host/pci-imx6.c568
-rw-r--r--drivers/pci/host/pci-mvebu.c248
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c333
-rw-r--r--drivers/pci/host/pci-tegra.c16
-rw-r--r--drivers/pci/host/pcie-designware.c257
-rw-r--r--drivers/pci/host/pcie-designware.h26
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c11
-rw-r--r--drivers/pci/hotplug/acpiphp.h11
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c49
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c43
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c70
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c2
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c20
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c22
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.h18
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c5
-rw-r--r--drivers/pci/hotplug/ibmphp.h12
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c109
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c103
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c24
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c71
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c76
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c6
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c6
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c8
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp.h6
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c8
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c19
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c6
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h10
-rw-r--r--drivers/pci/hotplug/shpchp_core.c10
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/ioapic.c2
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/irq.c2
-rw-r--r--drivers/pci/msi.c30
-rw-r--r--drivers/pci/pci-acpi.c11
-rw-r--r--drivers/pci/pci-driver.c56
-rw-r--r--drivers/pci/pci-label.c6
-rw-r--r--drivers/pci/pci-stub.c4
-rw-r--r--drivers/pci/pci-sysfs.c160
-rw-r--r--drivers/pci/pci.c74
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c4
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_bus.c4
-rw-r--r--drivers/pci/pcie/portdrv_core.c17
-rw-r--r--drivers/pci/pcie/portdrv_pci.c7
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c127
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/search.c12
-rw-r--r--drivers/pci/setup-bus.c22
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/syscall.c2
-rw-r--r--drivers/pcmcia/at91_cf.c11
-rw-r--r--drivers/pcmcia/ds.c65
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/pd6729.c13
-rw-r--r--drivers/pcmcia/yenta_socket.c16
-rw-r--r--drivers/phy/Kconfig54
-rw-r--r--drivers/phy/Makefile9
-rw-r--r--drivers/phy/phy-core.c698
-rw-r--r--drivers/phy/phy-exynos-dp-video.c111
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c176
-rw-r--r--drivers/phy/phy-omap-usb2.c (renamed from drivers/usb/phy/phy-omap-usb2.c)72
-rw-r--r--drivers/phy/phy-twl4030-usb.c (renamed from drivers/usb/phy/phy-twl4030-usb.c)71
-rw-r--r--drivers/pinctrl/Kconfig54
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/core.c14
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c2
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf54x.c592
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf60x.c521
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c1164
-rw-r--r--drivers/pinctrl/pinctrl-adi2.h75
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c630
-rw-r--r--drivers/pinctrl/pinctrl-at91.c37
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c5
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c653
-rw-r--r--drivers/pinctrl/pinctrl-imx1.h73
-rw-r--r--drivers/pinctrl/pinctrl-imx27.c477
-rw-r--r--drivers/pinctrl/pinctrl-imx35.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx50.c426
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx53.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx6dl.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx6sl.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c5
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c20
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c292
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c384
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c875
-rw-r--r--drivers/pinctrl/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/pinmux.c4
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig5
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c9
-rw-r--r--drivers/pinctrl/sh-pfc/core.h1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c180
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c110
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c4214
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c56
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c115
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c8
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.h6
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c4
-rw-r--r--drivers/platform/x86/eeepc-laptop.c8
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c44
-rw-r--r--drivers/platform/x86/ideapad-laptop.c344
-rw-r--r--drivers/platform/x86/intel-rst.c48
-rw-r--r--drivers/platform/x86/intel-smartconnect.c27
-rw-r--r--drivers/platform/x86/intel_menlow.c8
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c123
-rw-r--r--drivers/platform/x86/topstar-laptop.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c44
-rw-r--r--drivers/platform/x86/wmi.c30
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/interface.c43
-rw-r--r--drivers/pnp/pnpacpi/core.c21
-rw-r--r--drivers/power/Kconfig6
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab8500_charger.c17
-rw-r--r--drivers/power/ab8500_fg.c4
-rw-r--r--drivers/power/bq2415x_charger.c6
-rw-r--r--drivers/power/bq24735-charger.c419
-rw-r--r--drivers/power/charger-manager.c85
-rw-r--r--drivers/power/isp1704_charger.c91
-rw-r--r--drivers/power/jz4740-battery.c2
-rw-r--r--drivers/power/max17042_battery.c373
-rw-r--r--drivers/power/pm2301_charger.c27
-rw-r--r--drivers/power/tps65090-charger.c30
-rw-r--r--drivers/power/twl4030_charger.c47
-rw-r--r--drivers/powercap/Kconfig32
-rw-r--r--drivers/powercap/Makefile2
-rw-r--r--drivers/powercap/intel_rapl.c1395
-rw-r--r--drivers/powercap/powercap_sys.c685
-rw-r--r--drivers/pps/clients/pps-gpio.c2
-rw-r--r--drivers/ptp/ptp_ixp46x.c9
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c4
-rw-r--r--drivers/pwm/pwm-ep93xx.c230
-rw-r--r--drivers/pwm/pwm-imx.c3
-rw-r--r--drivers/pwm/pwm-lpc32xx.c2
-rw-r--r--drivers/pwm/pwm-mxs.c2
-rw-r--r--drivers/pwm/pwm-samsung.c3
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c6
-rw-r--r--drivers/pwm/pwm-twl-led.c1
-rw-r--r--drivers/pwm/pwm-twl.c1
-rw-r--r--drivers/rapidio/rio-driver.c4
-rw-r--r--drivers/rapidio/rio-sysfs.c54
-rw-r--r--drivers/rapidio/rio.h4
-rw-r--r--drivers/regulator/88pm800.c12
-rw-r--r--drivers/regulator/88pm8607.c12
-rw-r--r--drivers/regulator/Kconfig26
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/aat2870-regulator.c11
-rw-r--r--drivers/regulator/ab3100.c3
-rw-r--r--drivers/regulator/ab8500-ext.c26
-rw-r--r--drivers/regulator/ad5398.c19
-rw-r--r--drivers/regulator/anatop-regulator.c7
-rw-r--r--drivers/regulator/arizona-ldo1.c12
-rw-r--r--drivers/regulator/arizona-micsupp.c14
-rw-r--r--drivers/regulator/as3711-regulator.c43
-rw-r--r--drivers/regulator/as3722-regulator.c908
-rw-r--r--drivers/regulator/core.c455
-rw-r--r--drivers/regulator/da903x.c17
-rw-r--r--drivers/regulator/da9052-regulator.c55
-rw-r--r--drivers/regulator/da9055-regulator.c24
-rw-r--r--drivers/regulator/da9063-regulator.c23
-rw-r--r--drivers/regulator/da9210-regulator.c19
-rw-r--r--drivers/regulator/devres.c415
-rw-r--r--drivers/regulator/fan53555.c12
-rw-r--r--drivers/regulator/fixed.c38
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/helpers.c6
-rw-r--r--drivers/regulator/internal.h38
-rw-r--r--drivers/regulator/isl6271a-regulator.c24
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/lp872x.c33
-rw-r--r--drivers/regulator/lp8788-buck.c12
-rw-r--r--drivers/regulator/lp8788-ldo.c24
-rw-r--r--drivers/regulator/max1586.c26
-rw-r--r--drivers/regulator/max77686.c23
-rw-r--r--drivers/regulator/max77693.c29
-rw-r--r--drivers/regulator/max8649.c14
-rw-r--r--drivers/regulator/max8660.c30
-rw-r--r--drivers/regulator/max8907-regulator.c23
-rw-r--r--drivers/regulator/max8925-regulator.c12
-rw-r--r--drivers/regulator/max8973-regulator.c11
-rw-r--r--drivers/regulator/max8997.c44
-rw-r--r--drivers/regulator/max8998.c35
-rw-r--r--drivers/regulator/mc13783-regulator.c53
-rw-r--r--drivers/regulator/mc13892-regulator.c22
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c208
-rw-r--r--drivers/regulator/pcap-regulator.c13
-rw-r--r--drivers/regulator/pcf50633-regulator.c13
-rw-r--r--drivers/regulator/rc5t583-regulator.c22
-rw-r--r--drivers/regulator/s2mps11.c23
-rw-r--r--drivers/regulator/s5m8767.c86
-rw-r--r--drivers/regulator/stw481x-vmmc.c111
-rw-r--r--drivers/regulator/ti-abb-regulator.c86
-rw-r--r--drivers/regulator/tps51632-regulator.c11
-rw-r--r--drivers/regulator/tps6105x-regulator.c15
-rw-r--r--drivers/regulator/tps62360-regulator.c17
-rw-r--r--drivers/regulator/tps65023-regulator.c25
-rw-r--r--drivers/regulator/tps6507x-regulator.c23
-rw-r--r--drivers/regulator/tps65090-regulator.c37
-rw-r--r--drivers/regulator/tps65217-regulator.c50
-rw-r--r--drivers/regulator/tps6524x-regulator.c32
-rw-r--r--drivers/regulator/tps6586x-regulator.c33
-rw-r--r--drivers/regulator/tps65910-regulator.c35
-rw-r--r--drivers/regulator/tps65912-regulator.c33
-rw-r--r--drivers/regulator/tps80031-regulator.c30
-rw-r--r--drivers/regulator/twl-regulator.c3
-rw-r--r--drivers/regulator/vexpress.c3
-rw-r--r--drivers/regulator/wm831x-dcdc.c118
-rw-r--r--drivers/regulator/wm831x-isink.c25
-rw-r--r--drivers/regulator/wm831x-ldo.c75
-rw-r--r--drivers/regulator/wm8350-regulator.c12
-rw-r--r--drivers/regulator/wm8400-regulator.c19
-rw-r--r--drivers/regulator/wm8994-regulator.c14
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c3
-rw-r--r--drivers/rtc/Kconfig30
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/interface.c13
-rw-r--r--drivers/rtc/rtc-88pm80x.c8
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-as3722.c275
-rw-r--r--drivers/rtc/rtc-at91rm9200.c37
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c10
-rw-r--r--drivers/rtc/rtc-ds2404.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c6
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c28
-rw-r--r--drivers/rtc/rtc-isl1208.c42
-rw-r--r--drivers/rtc/rtc-m48t59.c20
-rw-r--r--drivers/rtc/rtc-m48t86.c8
-rw-r--r--drivers/rtc/rtc-max6900.c9
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mrst.c6
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-pl030.c9
-rw-r--r--drivers/rtc/rtc-pl031.c5
-rw-r--r--drivers/rtc/rtc-puv3.c22
-rw-r--r--drivers/rtc/rtc-rs5c348.c4
-rw-r--r--drivers/rtc/rtc-s5m.c635
-rw-r--r--drivers/rtc/rtc-sh.c5
-rw-r--r--drivers/rtc/rtc-sirfsoc.c27
-rw-r--r--drivers/rtc/rtc-snvs.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c1
-rw-r--r--drivers/rtc/rtc-v3020.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c4
-rw-r--r--drivers/rtc/rtc-vt8500.c4
-rw-r--r--drivers/s390/block/dasd.c11
-rw-r--r--drivers/s390/block/scm_blk.c30
-rw-r--r--drivers/s390/block/scm_blk.h2
-rw-r--r--drivers/s390/block/scm_blk_cluster.c2
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.h5
-rw-r--r--drivers/s390/char/sclp_cmd.c180
-rw-r--r--drivers/s390/char/sclp_early.c264
-rw-r--r--drivers/s390/char/sclp_sdias.c78
-rw-r--r--drivers/s390/char/sclp_sdias.h46
-rw-r--r--drivers/s390/char/zcore.c42
-rw-r--r--drivers/s390/cio/airq.c19
-rw-r--r--drivers/s390/cio/eadm_sch.c39
-rw-r--r--drivers/s390/cio/eadm_sch.h4
-rw-r--r--drivers/s390/cio/qdio_debug.h8
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/cio/scm.c45
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h12
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/kvm/virtio_ccw.c5
-rw-r--r--drivers/s390/net/claw.h8
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/lcs.h8
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h4
-rw-r--r--drivers/scsi/BusLogic.c36
-rw-r--r--drivers/scsi/aacraid/commctrl.c3
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c200
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h65
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c175
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1331
-rw-r--r--drivers/scsi/be2iscsi/be_main.h182
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c290
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h13
-rw-r--r--drivers/scsi/bfa/bfad.c54
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c65
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c14
-rw-r--r--drivers/scsi/csiostor/csio_hw.c22
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/dc395x.c25
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c31
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c1
-rw-r--r--drivers/scsi/dpt_i2o.c35
-rw-r--r--drivers/scsi/dpti.h1
-rw-r--r--drivers/scsi/esas2r/esas2r.h132
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c55
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c34
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c156
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c97
-rw-r--r--drivers/scsi/esas2r/esas2r_io.c73
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c28
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c34
-rw-r--r--drivers/scsi/esas2r/esas2r_targdb.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c12
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c148
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c28
-rw-r--r--drivers/scsi/fnic/fnic.h10
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c390
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c18
-rw-r--r--drivers/scsi/fnic/fnic_isr.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c20
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c253
-rw-r--r--drivers/scsi/fnic/fnic_stats.h116
-rw-r--r--drivers/scsi/fnic/fnic_trace.c185
-rw-r--r--drivers/scsi/fnic/fnic_trace.h3
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c251
-rw-r--r--drivers/scsi/libfc/fc_fcp.c10
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c183
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c21
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c153
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h6
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c152
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c66
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h74
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c523
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h15
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c56
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h12
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c34
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c472
-rw-r--r--drivers/scsi/scsi.c28
-rw-r--r--drivers/scsi/scsi_debug.c137
-rw-r--r--drivers/scsi/scsi_error.c146
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_pm.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c39
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c26
-rw-r--r--drivers/scsi/scsi_transport_srp.c540
-rw-r--r--drivers/scsi/sd.c85
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/tmscsim.c15
-rw-r--r--drivers/scsi/tmscsim.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c1
-rw-r--r--drivers/scsi/virtio_scsi.c19
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/spi/Kconfig5
-rw-r--r--drivers/spi/spi-altera.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c50
-rw-r--r--drivers/spi/spi-au1550.c5
-rw-r--r--drivers/spi/spi-bcm2835.c6
-rw-r--r--drivers/spi/spi-bcm63xx.c6
-rw-r--r--drivers/spi/spi-bfin-sport.c29
-rw-r--r--drivers/spi/spi-bfin-v3.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c48
-rw-r--r--drivers/spi/spi-bitbang.c25
-rw-r--r--drivers/spi/spi-butterfly.c15
-rw-r--r--drivers/spi/spi-clps711x.c9
-rw-r--r--drivers/spi/spi-davinci.c15
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c5
-rw-r--r--drivers/spi/spi-dw-pci.c3
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-efm32.c12
-rw-r--r--drivers/spi/spi-ep93xx.c7
-rw-r--r--drivers/spi/spi-fsl-cpm.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-fsl-espi.c14
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-gpio.c6
-rw-r--r--drivers/spi/spi-imx.c35
-rw-r--r--drivers/spi/spi-lm70llp.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c7
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mxs.c195
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-octeon.c4
-rw-r--r--drivers/spi/spi-omap-100k.c4
-rw-r--r--drivers/spi/spi-omap-uwire.c5
-rw-r--r--drivers/spi/spi-omap2-mcspi.c19
-rw-r--r--drivers/spi/spi-orion.c10
-rw-r--r--drivers/spi/spi-pl022.c10
-rw-r--r--drivers/spi/spi-ppc4xx.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c41
-rw-r--r--drivers/spi/spi-rspi.c270
-rw-r--r--drivers/spi/spi-s3c24xx.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c260
-rw-r--r--drivers/spi/spi-sh-hspi.c13
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-sh-sci.c2
-rw-r--r--drivers/spi/spi-sirf.c6
-rw-r--r--drivers/spi/spi-tegra114.c96
-rw-r--r--drivers/spi/spi-tegra20-sflash.c7
-rw-r--r--drivers/spi/spi-tegra20-slink.c148
-rw-r--r--drivers/spi/spi-ti-qspi.c46
-rw-r--r--drivers/spi/spi-topcliff-pch.c17
-rw-r--r--drivers/spi/spi-txx9.c11
-rw-r--r--drivers/spi/spi-xilinx.c4
-rw-r--r--drivers/spi/spi.c281
-rw-r--r--drivers/spi/spidev.c7
-rw-r--r--drivers/ssb/main.c25
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig7
-rw-r--r--drivers/staging/android/alarm-dev.c8
-rw-r--r--drivers/staging/android/ashmem.c139
-rw-r--r--drivers/staging/android/binder.c9
-rw-r--r--drivers/staging/android/timed_output.h4
-rw-r--r--drivers/staging/bcm/Adapter.h128
-rw-r--r--drivers/staging/bcm/Bcmchar.c49
-rw-r--r--drivers/staging/bcm/Bcmnet.c5
-rw-r--r--drivers/staging/bcm/CmHost.c70
-rw-r--r--drivers/staging/bcm/CmHost.h2
-rw-r--r--drivers/staging/bcm/DDRInit.c2
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c12
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c30
-rw-r--r--drivers/staging/bcm/InterfaceDld.c8
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c4
-rw-r--r--drivers/staging/bcm/InterfaceInit.c20
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c12
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c10
-rw-r--r--drivers/staging/bcm/InterfaceRx.c22
-rw-r--r--drivers/staging/bcm/InterfaceRx.h2
-rw-r--r--drivers/staging/bcm/InterfaceTx.c28
-rw-r--r--drivers/staging/bcm/LeakyBucket.c8
-rw-r--r--drivers/staging/bcm/Misc.c68
-rw-r--r--drivers/staging/bcm/PHSModule.c46
-rw-r--r--drivers/staging/bcm/PHSModule.h4
-rw-r--r--drivers/staging/bcm/Prototypes.h12
-rw-r--r--drivers/staging/bcm/Qos.c102
-rw-r--r--drivers/staging/bcm/Transmit.c4
-rw-r--r--drivers/staging/bcm/Typedefs.h4
-rw-r--r--drivers/staging/bcm/led_control.c38
-rw-r--r--drivers/staging/bcm/nvm.c160
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c2
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h2
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c317
-rw-r--r--drivers/staging/ced1401/ced_ioc.c5
-rw-r--r--drivers/staging/comedi/Kconfig3
-rw-r--r--drivers/staging/comedi/comedi_buf.c8
-rw-r--r--drivers/staging/comedi/comedi_compat32.c3
-rw-r--r--drivers/staging/comedi/comedi_fops.c52
-rw-r--r--drivers/staging/comedi/comedidev.h32
-rw-r--r--drivers/staging/comedi/drivers.c29
-rw-r--r--drivers/staging/comedi/drivers/8253.h9
-rw-r--r--drivers/staging/comedi/drivers/8255.c27
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c7
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c19
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c7
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c8
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c8
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c8
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c8
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c17
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c137
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c13
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c24
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c46
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c32
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c39
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c15
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c33
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c33
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c17
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c40
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c21
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c17
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c21
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c49
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c27
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h2
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c380
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c12
-rw-r--r--drivers/staging/comedi/drivers/das08.c46
-rw-r--r--drivers/staging/comedi/drivers/das08.h1
-rw-r--r--drivers/staging/comedi/drivers/das16.c46
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c46
-rw-r--r--drivers/staging/comedi/drivers/das1800.c75
-rw-r--r--drivers/staging/comedi/drivers/das800.c16
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c70
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c21
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c8
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c51
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c22
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c11
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c19
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c20
-rw-r--r--drivers/staging/comedi/drivers/fl512.c3
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c18
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c7
-rw-r--r--drivers/staging/comedi/drivers/me4000.c41
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c9
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c473
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c538
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c35
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c63
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c21
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h2
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c704
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c592
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c13
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c57
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c25
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c44
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c8
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c7
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c4
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c11
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c62
-rw-r--r--drivers/staging/comedi/drivers/rti800.c8
-rw-r--r--drivers/staging/comedi/drivers/s526.c9
-rw-r--r--drivers/staging/comedi/drivers/s626.c2844
-rw-r--r--drivers/staging/comedi/drivers/s626.h1384
-rw-r--r--drivers/staging/comedi/drivers/skel.c46
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c48
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c41
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c26
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c8
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c15
-rw-r--r--drivers/staging/cxt1e1/comet.c10
-rw-r--r--drivers/staging/cxt1e1/comet.h2
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c14
-rw-r--r--drivers/staging/cxt1e1/linux.c28
-rw-r--r--drivers/staging/cxt1e1/musycc.c32
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c16
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h4
-rw-r--r--drivers/staging/cxt1e1/sbecrc.c4
-rw-r--r--drivers/staging/cxt1e1/sbeid.c2
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c2
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h24
-rw-r--r--drivers/staging/dgap/Makefile2
-rw-r--r--drivers/staging/dgap/dgap_downld.h2
-rw-r--r--drivers/staging/dgap/dgap_driver.c13
-rw-r--r--drivers/staging/dgap/dgap_driver.h5
-rw-r--r--drivers/staging/dgap/dgap_fep5.c53
-rw-r--r--drivers/staging/dgap/dgap_fep5.h2
-rw-r--r--drivers/staging/dgap/dgap_kcompat.h29
-rw-r--r--drivers/staging/dgap/dgap_parse.c10
-rw-r--r--drivers/staging/dgap/dgap_sysfs.c22
-rw-r--r--drivers/staging/dgap/dgap_tty.c41
-rw-r--r--drivers/staging/dgap/digi.h4
-rw-r--r--drivers/staging/dgap/downld.c4
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c45
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c145
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h12
-rw-r--r--drivers/staging/dgnc/dgnc_kcompat.h29
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c26
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c32
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h2
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c188
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h6
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c337
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h6
-rw-r--r--drivers/staging/dgnc/digi.h4
-rw-r--r--drivers/staging/dgrp/dgrp_sysfs.c30
-rw-r--r--drivers/staging/dwc2/TODO33
-rw-r--r--drivers/staging/dwc2/core.c27
-rw-r--r--drivers/staging/dwc2/core.h9
-rw-r--r--drivers/staging/dwc2/hcd.c54
-rw-r--r--drivers/staging/dwc2/hcd.h3
-rw-r--r--drivers/staging/dwc2/hcd_ddma.c13
-rw-r--r--drivers/staging/dwc2/hcd_intr.c46
-rw-r--r--drivers/staging/dwc2/hcd_queue.c203
-rw-r--r--drivers/staging/dwc2/pci.c1
-rw-r--r--drivers/staging/dwc2/platform.c7
-rw-r--r--drivers/staging/et131x/Module.symvers0
-rw-r--r--drivers/staging/et131x/README7
-rw-r--r--drivers/staging/et131x/et131x.c53
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c574
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c457
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h2
-rw-r--r--drivers/staging/fwserial/fwserial.c3
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c6
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c12
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c6
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h2
-rw-r--r--drivers/staging/iio/TODO11
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c6
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c66
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c14
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c66
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c19
-rw-r--r--drivers/staging/iio/adc/Kconfig8
-rw-r--r--drivers/staging/iio/adc/ad7192.c15
-rw-r--r--drivers/staging/iio/adc/ad7280a.c21
-rw-r--r--drivers/staging/iio/adc/ad7291.c246
-rw-r--r--drivers/staging/iio/adc/ad7606.h2
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c35
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c8
-rw-r--r--drivers/staging/iio/adc/ad7780.c37
-rw-r--r--drivers/staging/iio/adc/ad7816.c59
-rw-r--r--drivers/staging/iio/adc/ad799x.h16
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c297
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c10
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c51
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c998
-rw-r--r--drivers/staging/iio/adc/spear_adc.c12
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c3
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c3
-rw-r--r--drivers/staging/iio/addac/adt7316.c86
-rw-r--r--drivers/staging/iio/cdc/ad7150.c165
-rw-r--r--drivers/staging/iio/cdc/ad7746.c13
-rw-r--r--drivers/staging/iio/frequency/ad5930.c16
-rw-r--r--drivers/staging/iio/frequency/ad9832.c36
-rw-r--r--drivers/staging/iio/frequency/ad9834.c33
-rw-r--r--drivers/staging/iio/frequency/ad9850.c16
-rw-r--r--drivers/staging/iio/frequency/ad9852.c62
-rw-r--r--drivers/staging/iio/frequency/ad9910.c98
-rw-r--r--drivers/staging/iio/frequency/ad9951.c43
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c49
-rw-r--r--drivers/staging/iio/iio_simple_dummy.h22
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c17
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c49
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c44
-rw-r--r--drivers/staging/iio/light/isl29018.c18
-rw-r--r--drivers/staging/iio/light/tsl2583.c48
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c168
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c669
-rw-r--r--drivers/staging/iio/meter/ade7753.c34
-rw-r--r--drivers/staging/iio/meter/ade7754.c34
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c32
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c18
-rw-r--r--drivers/staging/iio/meter/ade7759.c33
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c4
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c4
-rw-r--r--drivers/staging/iio/meter/ade7854.c22
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c28
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c34
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c16
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c25
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c4
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/Makefile2
-rw-r--r--drivers/staging/imx-drm/TODO1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c61
-rw-r--r--drivers/staging/imx-drm/imx-drm.h2
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c9
-rw-r--r--drivers/staging/imx-drm/imx-tve.c6
-rw-r--r--drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h4
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c175
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dc.c13
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dp.c2
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c215
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c375
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.h55
-rw-r--r--drivers/staging/keucr/usb.c4
-rw-r--r--drivers/staging/line6/driver.c8
-rw-r--r--drivers/staging/line6/midi.c4
-rw-r--r--drivers/staging/line6/playback.c5
-rw-r--r--drivers/staging/line6/toneport.c14
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/bitmap.h110
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h264
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h90
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c5
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c92
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c102
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c352
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c20
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c22
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c91
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c90
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c34
-rw-r--r--drivers/staging/lustre/lustre/Kconfig2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c5
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_errno.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h42
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h6
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h12
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h18
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c20
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c34
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c250
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/prng.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h36
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c30
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c10
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c36
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c10
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c10
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt.c12
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_lib.c5
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c74
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_test.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c53
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c4
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c8
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c8
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c5
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c109
-rw-r--r--drivers/staging/media/lirc/TODO5
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c37
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c2
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c25
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c12
-rw-r--r--drivers/staging/media/msi3101/Kconfig3
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-disp.c25
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-p2m.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c170
-rw-r--r--drivers/staging/media/solo6x10/solo6x10.h1
-rw-r--r--drivers/staging/mt29f_spinand/Kconfig16
-rw-r--r--drivers/staging/mt29f_spinand/Makefile1
-rw-r--r--drivers/staging/mt29f_spinand/TODO13
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c947
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h107
-rw-r--r--drivers/staging/netlogic/xlr_net.c34
-rw-r--r--drivers/staging/netlogic/xlr_net.h2
-rw-r--r--drivers/staging/nvec/Kconfig2
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/octeon-usb/Makefile4
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c3158
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.h542
-rw-r--r--drivers/staging/octeon-usb/cvmx-usbnx-defs.h885
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3033
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h (renamed from drivers/staging/octeon-usb/cvmx-usbcx-defs.h)331
-rw-r--r--drivers/staging/octeon/ethernet-rx.c15
-rw-r--r--drivers/staging/octeon/ethernet-spi.c92
-rw-r--r--drivers/staging/octeon/ethernet-tx.c71
-rw-r--r--drivers/staging/octeon/ethernet.c24
-rw-r--r--drivers/staging/olpc_dcon/Kconfig3
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/quickstart/quickstart.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c232
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c5
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c112
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c98
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c6
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c13
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c428
-rw-r--r--drivers/staging/rtl8188eu/Makefile5
-rw-r--r--drivers/staging/rtl8188eu/TODO1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c47
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c8
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c25
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c1761
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c429
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c66
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c8
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c98
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h28
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h2
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h2
-rw-r--r--drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h2
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h16
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h2
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c23
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c213
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/dot11d.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c32
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c46
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c17
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c30
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/dot11d.h100
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h39
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c11
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211_crypt.h86
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.c48
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.h28
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h4
-rw-r--r--drivers/staging/rtl8192u/r8192U.h12
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c82
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c278
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c200
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTType.h411
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c103
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h230
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c23
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c8
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c12
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c16
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c2
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c4
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h4
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c13
-rw-r--r--drivers/staging/sbe-2t3e3/cpld.c2
-rw-r--r--drivers/staging/sep/sep_crypto.c4
-rw-r--r--drivers/staging/sep/sep_main.c32
-rw-r--r--drivers/staging/silicom/bp_mod.h2
-rw-r--r--drivers/staging/silicom/bpctl_mod.c188
-rw-r--r--drivers/staging/slicoss/slicoss.c172
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c16
-rw-r--r--drivers/staging/speakup/Kconfig4
-rw-r--r--drivers/staging/speakup/kobjects.c80
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c2
-rw-r--r--drivers/staging/speakup/speakup_apollo.c2
-rw-r--r--drivers/staging/speakup/speakup_audptr.c2
-rw-r--r--drivers/staging/speakup/varhandlers.c8
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c2
-rw-r--r--drivers/staging/tidspbridge/core/sync.c4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/sync.h2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c6
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c4
-rw-r--r--drivers/staging/usbip/stub_dev.c26
-rw-r--r--drivers/staging/usbip/stub_main.c4
-rw-r--r--drivers/staging/usbip/userspace/configure.ac1
-rw-r--r--drivers/staging/usbip/userspace/doc/usbip.88
-rw-r--r--drivers/staging/usbip/userspace/doc/usbipd.827
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.c16
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.h1
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c69
-rw-r--r--drivers/staging/usbip/vhci_hcd.c10
-rw-r--r--drivers/staging/vt6655/80211mgr.c16
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c20
-rw-r--r--drivers/staging/vt6655/baseband.c7
-rw-r--r--drivers/staging/vt6655/bssdb.c11
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6655/dpc.c7
-rw-r--r--drivers/staging/vt6655/hostap.c27
-rw-r--r--drivers/staging/vt6655/iwctl.c6
-rw-r--r--drivers/staging/vt6655/key.c10
-rw-r--r--drivers/staging/vt6655/michael.h10
-rw-r--r--drivers/staging/vt6655/rf.c56
-rw-r--r--drivers/staging/vt6655/tkip.c4
-rw-r--r--drivers/staging/vt6655/vntwifi.c22
-rw-r--r--drivers/staging/vt6655/wcmd.c2
-rw-r--r--drivers/staging/vt6655/wctl.c8
-rw-r--r--drivers/staging/vt6655/wmgr.c3
-rw-r--r--drivers/staging/vt6655/wpa.c12
-rw-r--r--drivers/staging/vt6655/wpactl.c2
-rw-r--r--drivers/staging/vt6655/wroute.c2
-rw-r--r--drivers/staging/vt6655/wroute.h2
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c18
-rw-r--r--drivers/staging/vt6656/bssdb.c43
-rw-r--r--drivers/staging/vt6656/bssdb.h2
-rw-r--r--drivers/staging/vt6656/channel.c2
-rw-r--r--drivers/staging/vt6656/datarate.c370
-rw-r--r--drivers/staging/vt6656/desc.h9
-rw-r--r--drivers/staging/vt6656/device.h18
-rw-r--r--drivers/staging/vt6656/dpc.c31
-rw-r--r--drivers/staging/vt6656/dpc.h4
-rw-r--r--drivers/staging/vt6656/firmware.c92
-rw-r--r--drivers/staging/vt6656/hostap.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c77
-rw-r--r--drivers/staging/vt6656/key.c10
-rw-r--r--drivers/staging/vt6656/main_usb.c52
-rw-r--r--drivers/staging/vt6656/power.c11
-rw-r--r--drivers/staging/vt6656/rxtx.c848
-rw-r--r--drivers/staging/vt6656/rxtx.h47
-rw-r--r--drivers/staging/vt6656/usbpipe.c8
-rw-r--r--drivers/staging/vt6656/wcmd.c78
-rw-r--r--drivers/staging/vt6656/wcmd.h11
-rw-r--r--drivers/staging/vt6656/wctl.c7
-rw-r--r--drivers/staging/vt6656/wmgr.c34
-rw-r--r--drivers/staging/vt6656/wmgr.h3
-rw-r--r--drivers/staging/vt6656/wpactl.c2
-rw-r--r--drivers/staging/winbond/core.h8
-rw-r--r--drivers/staging/winbond/mds.c4
-rw-r--r--drivers/staging/winbond/mto.c7
-rw-r--r--drivers/staging/winbond/mto.h8
-rw-r--r--drivers/staging/winbond/phy_calibration.c369
-rw-r--r--drivers/staging/winbond/reg.c121
-rw-r--r--drivers/staging/winbond/wb35tx.c2
-rw-r--r--drivers/staging/winbond/wbusb.c6
-rw-r--r--drivers/staging/wlags49_h2/hcf.h22
-rw-r--r--drivers/staging/wlags49_h2/sta_h2.c4
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c65
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c90
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c6
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c11
-rw-r--r--drivers/staging/xgifb/vb_table.h2
-rw-r--r--drivers/staging/xillybus/Kconfig4
-rw-r--r--drivers/staging/xillybus/xillybus_core.c153
-rw-r--r--drivers/staging/xillybus/xillybus_of.c27
-rw-r--r--drivers/staging/xillybus/xillybus_pcie.c27
-rw-r--r--drivers/staging/zram/zram_drv.c15
-rw-r--r--drivers/staging/zsmalloc/Kconfig1
-rw-r--r--drivers/target/iscsi/iscsi_target.c90
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c86
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h34
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c42
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c242
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_alua.c150
-rw-r--r--drivers/target/target_core_alua.h33
-rw-r--r--drivers/target/target_core_configfs.c123
-rw-r--r--drivers/target/target_core_device.c35
-rw-r--r--drivers/target/target_core_fabric_configfs.c38
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c43
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c24
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c12
-rw-r--r--drivers/target/target_core_spc.c17
-rw-r--r--drivers/target/target_core_stat.c16
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_tpg.c41
-rw-r--r--drivers/target/target_core_transport.c244
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_xcopy.c19
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c18
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c18
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/cpu_cooling.c4
-rw-r--r--drivers/thermal/intel_powerclamp.c29
-rw-r--r--drivers/thermal/thermal_core.c35
-rw-r--r--drivers/tty/bfin_jtag_comm.c2
-rw-r--r--drivers/tty/ehv_bytechan.c1
-rw-r--r--drivers/tty/hvc/hvc_dcc.c21
-rw-r--r--drivers/tty/hvc/hvc_iucv.c3
-rw-r--r--drivers/tty/hvc/hvc_opal.c4
-rw-r--r--drivers/tty/hvc/hvc_vio.c5
-rw-r--r--drivers/tty/hvc/hvc_xen.c19
-rw-r--r--drivers/tty/hvc/hvsi_lib.c25
-rw-r--r--drivers/tty/metag_da.c2
-rw-r--r--drivers/tty/n_tty.c13
-rw-r--r--drivers/tty/nozomi.c6
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c76
-rw-r--r--drivers/tty/serial/8250/8250_em.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c347
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c3
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c19
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c11
-rw-r--r--drivers/tty/serial/bfin_uart.c21
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c1
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c82
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mfd.c13
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/mpsc.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c45
-rw-r--r--drivers/tty/serial/mxs-auart.c9
-rw-r--r--drivers/tty/serial/omap-serial.c141
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/sa1100.c5
-rw-r--r--drivers/tty/serial/samsung.c22
-rw-r--r--drivers/tty/serial/samsung.h2
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h12
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c551
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/tty/vt/vt.c33
-rw-r--r--drivers/uio/uio.c38
-rw-r--r--drivers/uio/uio_aec.c1
-rw-r--r--drivers/uio/uio_cif.c1
-rw-r--r--drivers/uio/uio_mf624.c5
-rw-r--r--drivers/uio/uio_netx.c1
-rw-r--r--drivers/uio/uio_pdrv_genirq.c34
-rw-r--r--drivers/uio/uio_pruss.c6
-rw-r--r--drivers/uio/uio_sercos3.c1
-rw-r--r--drivers/usb/atm/usbatm.h14
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c2
-rw-r--r--drivers/usb/chipidea/bits.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c28
-rw-r--r--drivers/usb/chipidea/core.c123
-rw-r--r--drivers/usb/chipidea/host.c6
-rw-r--r--drivers/usb/chipidea/udc.c56
-rw-r--r--drivers/usb/class/cdc-wdm.c42
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/core/devio.c38
-rw-r--r--drivers/usb/core/driver.c7
-rw-r--r--drivers/usb/core/file.c24
-rw-r--r--drivers/usb/core/hcd-pci.c3
-rw-r--r--drivers/usb/core/hcd.c122
-rw-r--r--drivers/usb/core/hub.c184
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c37
-rw-r--r--drivers/usb/core/sysfs.c64
-rw-r--r--drivers/usb/core/urb.c44
-rw-r--r--drivers/usb/core/usb-acpi.c4
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc3/core.c3
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c7
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c6
-rw-r--r--drivers/usb/early/ehci-dbgp.c4
-rw-r--r--drivers/usb/gadget/Kconfig34
-rw-r--r--drivers/usb/gadget/Makefile5
-rw-r--r--drivers/usb/gadget/acm_ms.c125
-rw-r--r--drivers/usb/gadget/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/configfs.c10
-rw-r--r--drivers/usb/gadget/configfs.h6
-rw-r--r--drivers/usb/gadget/dummy_hcd.c6
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c1254
-rw-r--r--drivers/usb/gadget/f_mass_storage.h166
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c1
-rw-r--r--drivers/usb/gadget/g_ffs.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c3
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c4
-rw-r--r--drivers/usb/gadget/mass_storage.c125
-rw-r--r--drivers/usb/gadget/multi.c247
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c4
-rw-r--r--drivers/usb/gadget/net2280.c5
-rw-r--r--drivers/usb/gadget/pch_udc.c1
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c167
-rw-r--r--drivers/usb/gadget/storage_common.c430
-rw-r--r--drivers/usb/gadget/storage_common.h229
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c22
-rw-r--r--drivers/usb/gadget/udc-core.c3
-rw-r--r--drivers/usb/gadget/zero.c25
-rw-r--r--drivers/usb/host/Kconfig59
-rw-r--r--drivers/usb/host/Makefile11
-rw-r--r--drivers/usb/host/bcma-hcd.c3
-rw-r--r--drivers/usb/host/ehci-atmel.c23
-rw-r--r--drivers/usb/host/ehci-dbg.c113
-rw-r--r--drivers/usb/host/ehci-exynos.c (renamed from drivers/usb/host/ehci-s5p.c)171
-rw-r--r--drivers/usb/host/ehci-fsl.c4
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c42
-rw-r--r--drivers/usb/host/ehci-mem.c4
-rw-r--r--drivers/usb/host/ehci-msm.c20
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-octeon.c6
-rw-r--r--drivers/usb/host/ehci-omap.c10
-rw-r--r--drivers/usb/host/ehci-orion.c7
-rw-r--r--drivers/usb/host/ehci-pci.c24
-rw-r--r--drivers/usb/host/ehci-platform.c10
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c4
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c64
-rw-r--r--drivers/usb/host/ehci-sched.c1033
-rw-r--r--drivers/usb/host/ehci-sead3.c2
-rw-r--r--drivers/usb/host/ehci-sh.c2
-rw-r--r--drivers/usb/host/ehci-spear.c7
-rw-r--r--drivers/usb/host/ehci-sysfs.c15
-rw-r--r--drivers/usb/host/ehci-tegra.c11
-rw-r--r--drivers/usb/host/ehci-tilegx.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c89
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci.h81
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--drivers/usb/host/fusbh200-hcd.c2
-rw-r--r--drivers/usb/host/hwa-hc.c33
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/ohci-at91.c165
-rw-r--r--drivers/usb/host/ohci-dbg.c2
-rw-r--r--drivers/usb/host/ohci-ep93xx.c184
-rw-r--r--drivers/usb/host/ohci-exynos.c193
-rw-r--r--drivers/usb/host/ohci-hcd.c159
-rw-r--r--drivers/usb/host/ohci-hub.c9
-rw-r--r--drivers/usb/host/ohci-nxp.c128
-rw-r--r--drivers/usb/host/ohci-octeon.c5
-rw-r--r--drivers/usb/host/ohci-omap.c169
-rw-r--r--drivers/usb/host/ohci-omap3.c128
-rw-r--r--drivers/usb/host/ohci-pci.c15
-rw-r--r--drivers/usb/host/ohci-platform.c11
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c257
-rw-r--r--drivers/usb/host/ohci-s3c2410.c136
-rw-r--r--drivers/usb/host/ohci-sa1111.c6
-rw-r--r--drivers/usb/host/ohci-sm501.c11
-rw-r--r--drivers/usb/host/ohci-spear.c147
-rw-r--r--drivers/usb/host/pci-quirks.c137
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/sl811-hcd.c4
-rw-r--r--drivers/usb/host/ssb-hcd.c3
-rw-r--r--drivers/usb/host/uhci-debug.c16
-rw-r--r--drivers/usb/host/uhci-hub.c40
-rw-r--r--drivers/usb/host/uhci-pci.c19
-rw-r--r--drivers/usb/host/uhci-platform.c10
-rw-r--r--drivers/usb/host/whci/hcd.c4
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c324
-rw-r--r--drivers/usb/host/xhci.c182
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/usbtest.c172
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/am35x.c63
-rw-r--r--drivers/usb/musb/blackfin.c13
-rw-r--r--drivers/usb/musb/da8xx.c49
-rw-r--r--drivers/usb/musb/davinci.c59
-rw-r--r--drivers/usb/musb/musb_am335x.c2
-rw-r--r--drivers/usb/musb/musb_core.c19
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c8
-rw-r--r--drivers/usb/musb/musb_dsps.c96
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_virthub.c19
-rw-r--r--drivers/usb/musb/omap2430.c54
-rw-r--r--drivers/usb/musb/tusb6010.c49
-rw-r--r--drivers/usb/musb/ux500.c15
-rw-r--r--drivers/usb/phy/Kconfig35
-rw-r--r--drivers/usb/phy/Makefile3
-rw-r--r--drivers/usb/phy/phy-am335x-control.c44
-rw-r--r--drivers/usb/phy/phy-am335x.c44
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c96
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h4
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c72
-rw-r--r--drivers/usb/phy/phy-fsm-usb.h179
-rw-r--r--drivers/usb/phy/phy-generic.c108
-rw-r--r--drivers/usb/phy/phy-generic.h7
-rw-r--r--drivers/usb/phy/phy-omap-control.c208
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c32
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c248
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c3
-rw-r--r--drivers/usb/phy/phy-samsung-usb3.c3
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c1
-rw-r--r--drivers/usb/phy/phy-ulpi-viewport.c2
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/serial/cyberjack.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c10
-rw-r--r--drivers/usb/serial/generic.c82
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/wusbcore/cbaf.c7
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.c18
-rw-r--r--drivers/usb/wusbcore/wa-hc.h42
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c22
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1261
-rw-r--r--drivers/uwb/lc-dev.c16
-rw-r--r--drivers/uwb/umc-bus.c13
-rw-r--r--drivers/vhost/scsi.c18
-rw-r--r--drivers/video/68328fb.c9
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/acornfb.c4
-rw-r--r--drivers/video/amba-clcd.c9
-rw-r--r--drivers/video/amifb.c6
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/arkfb.c49
-rw-r--r--drivers/video/asiliantfb.c4
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c344
-rw-r--r--drivers/video/aty/aty128fb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c1
-rw-r--r--drivers/video/aty/radeon_base.c5
-rw-r--r--drivers/video/aty/radeon_pm.c22
-rw-r--r--drivers/video/aty/radeonfb.h1
-rw-r--r--drivers/video/au1100fb.c16
-rw-r--r--drivers/video/au1200fb.c16
-rw-r--r--drivers/video/backlight/88pm860x_bl.c18
-rw-r--r--drivers/video/backlight/Kconfig10
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/aat2870_bl.c13
-rw-r--r--drivers/video/backlight/adp5520_bl.c11
-rw-r--r--drivers/video/backlight/adp8860_bl.c17
-rw-r--r--drivers/video/backlight/adp8870_bl.c17
-rw-r--r--drivers/video/backlight/ams369fg06.c24
-rw-r--r--drivers/video/backlight/as3711_bl.c26
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c105
-rw-r--r--drivers/video/backlight/backlight.c31
-rw-r--r--drivers/video/backlight/bd6107.c6
-rw-r--r--drivers/video/backlight/corgi_lcd.c30
-rw-r--r--drivers/video/backlight/cr_bllcd.c13
-rw-r--r--drivers/video/backlight/da903x_bl.c16
-rw-r--r--drivers/video/backlight/da9052_bl.c6
-rw-r--r--drivers/video/backlight/ep93xx_bl.c13
-rw-r--r--drivers/video/backlight/generic_bl.c8
-rw-r--r--drivers/video/backlight/gpio_backlight.c17
-rw-r--r--drivers/video/backlight/hx8357.c20
-rw-r--r--drivers/video/backlight/ili922x.c7
-rw-r--r--drivers/video/backlight/ili9320.c15
-rw-r--r--drivers/video/backlight/kb3886_bl.c20
-rw-r--r--drivers/video/backlight/l4f00242t03.c7
-rw-r--r--drivers/video/backlight/ld9040.c23
-rw-r--r--drivers/video/backlight/ld9040_gamma.h4
-rw-r--r--drivers/video/backlight/lm3533_bl.c12
-rw-r--r--drivers/video/backlight/lm3630_bl.c475
-rw-r--r--drivers/video/backlight/lm3630a_bl.c483
-rw-r--r--drivers/video/backlight/lm3639_bl.c13
-rw-r--r--drivers/video/backlight/lms283gf05.c17
-rw-r--r--drivers/video/backlight/lms501kf03.c8
-rw-r--r--drivers/video/backlight/lp855x_bl.c41
-rw-r--r--drivers/video/backlight/lp8788_bl.c2
-rw-r--r--drivers/video/backlight/ltv350qv.c11
-rw-r--r--drivers/video/backlight/lv5207lp.c9
-rw-r--r--drivers/video/backlight/max8925_bl.c17
-rw-r--r--drivers/video/backlight/omap1_bl.c2
-rw-r--r--drivers/video/backlight/pandora_bl.c12
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c15
-rw-r--r--drivers/video/backlight/platform_lcd.c22
-rw-r--r--drivers/video/backlight/pwm_bl.c168
-rw-r--r--drivers/video/backlight/s6e63m0.c22
-rw-r--r--drivers/video/backlight/tdo24m.c14
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c6
-rw-r--r--drivers/video/backlight/tps65217_bl.c17
-rw-r--r--drivers/video/backlight/wm831x_bl.c21
-rw-r--r--drivers/video/bf54x-lq043fb.c14
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c14
-rw-r--r--drivers/video/broadsheetfb.c19
-rw-r--r--drivers/video/bw2.c2
-rw-r--r--drivers/video/carminefb.c4
-rw-r--r--drivers/video/cfbimgblt.c2
-rw-r--r--drivers/video/cg14.c6
-rw-r--r--drivers/video/cg3.c2
-rw-r--r--drivers/video/cg6.c4
-rw-r--r--drivers/video/cirrusfb.c6
-rw-r--r--drivers/video/cobalt_lcdfb.c17
-rw-r--r--drivers/video/console/sticore.c166
-rw-r--r--drivers/video/controlfb.c4
-rw-r--r--drivers/video/cyber2000fb.c75
-rw-r--r--drivers/video/da8xx-fb.c21
-rw-r--r--drivers/video/efifb.c7
-rw-r--r--drivers/video/ep93xx-fb.c2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/exynos/exynos_dp_core.c132
-rw-r--r--drivers/video/exynos/exynos_dp_core.h110
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c2
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c20
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c7
-rw-r--r--drivers/video/fb-puv3.c5
-rw-r--r--drivers/video/fbmem.c50
-rw-r--r--drivers/video/fbsysfs.c19
-rw-r--r--drivers/video/ffb.c2
-rw-r--r--drivers/video/fm2fb.c2
-rw-r--r--drivers/video/fsl-diu-fb.c4
-rw-r--r--drivers/video/gbefb.c6
-rw-r--r--drivers/video/geode/gx1fb_core.c3
-rw-r--r--drivers/video/geode/gxfb_core.c3
-rw-r--r--drivers/video/geode/lxfb_core.c4
-rw-r--r--drivers/video/grvga.c16
-rw-r--r--drivers/video/gxt4500.c3
-rw-r--r--drivers/video/hecubafb.c19
-rw-r--r--drivers/video/hgafb.c3
-rw-r--r--drivers/video/hitfb.c3
-rw-r--r--drivers/video/hpfb.c3
-rw-r--r--drivers/video/hyperv_fb.c45
-rw-r--r--drivers/video/i740fb.c9
-rw-r--r--drivers/video/i810/i810_main.c1
-rw-r--r--drivers/video/igafb.c5
-rw-r--r--drivers/video/imsttfb.c4
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/jz4740_fb.c29
-rw-r--r--drivers/video/kyro/fbdev.c10
-rw-r--r--drivers/video/leo.c4
-rw-r--r--drivers/video/macfb.c3
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c4
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c6
-rw-r--r--drivers/video/matrox/matroxfb_maven.c14
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c3
-rw-r--r--drivers/video/mbx/mbxfb.c4
-rw-r--r--drivers/video/metronomefb.c17
-rw-r--r--drivers/video/mmp/fb/mmpfb.c34
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c71
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.h5
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/neofb.c9
-rw-r--r--drivers/video/nuc900fb.c9
-rw-r--r--drivers/video/nvidia/nv_hw.c2
-rw-r--r--drivers/video/offb.c3
-rw-r--r--drivers/video/omap/hwa742.c2
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/omap2/displays-new/Kconfig6
-rw-r--r--drivers/video/omap2/displays-new/Makefile1
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c7
-rw-r--r--drivers/video/omap2/displays-new/encoder-tpd12s015.c2
-rw-r--r--drivers/video/omap2/displays-new/panel-dsi-cm.c2
-rw-r--r--drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c480
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/core.c4
-rw-r--r--drivers/video/omap2/dss/dispc.c10
-rw-r--r--drivers/video/omap2/dss/display.c2
-rw-r--r--drivers/video/omap2/dss/dsi.c12
-rw-r--r--drivers/video/omap2/dss/dss.h4
-rw-r--r--drivers/video/omap2/dss/dss_features.c44
-rw-r--r--drivers/video/omap2/dss/dss_features.h8
-rw-r--r--drivers/video/omap2/dss/hdmi.c1184
-rw-r--r--drivers/video/omap2/dss/hdmi.h444
-rw-r--r--drivers/video/omap2/dss/hdmi4.c696
-rw-r--r--drivers/video/omap2/dss/hdmi4_core.c (renamed from drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c)771
-rw-r--r--drivers/video/omap2/dss/hdmi4_core.h (renamed from drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h)303
-rw-r--r--drivers/video/omap2/dss/hdmi_common.c423
-rw-r--r--drivers/video/omap2/dss/hdmi_phy.c160
-rw-r--r--drivers/video/omap2/dss/hdmi_pll.c230
-rw-r--r--drivers/video/omap2/dss/hdmi_wp.c271
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h187
-rw-r--r--drivers/video/p9100.c2
-rw-r--r--drivers/video/platinumfb.c3
-rw-r--r--drivers/video/pm2fb.c5
-rw-r--r--drivers/video/pm3fb.c4
-rw-r--r--drivers/video/pmag-ba-fb.c4
-rw-r--r--drivers/video/pmagb-b-fb.c9
-rw-r--r--drivers/video/pvr2fb.c25
-rw-r--r--drivers/video/pxa168fb.c6
-rw-r--r--drivers/video/pxafb.c16
-rw-r--r--drivers/video/q40fb.c3
-rw-r--r--drivers/video/riva/fbdev.c5
-rw-r--r--drivers/video/s1d13xxxfb.c15
-rw-r--r--drivers/video/s3c-fb.c2
-rw-r--r--drivers/video/s3c2410fb.c6
-rw-r--r--drivers/video/s3fb.c63
-rw-r--r--drivers/video/sa1100fb.c4
-rw-r--r--drivers/video/savage/savagefb_driver.c6
-rw-r--r--drivers/video/sbuslib.c2
-rw-r--r--drivers/video/sgivwfb.c4
-rw-r--r--drivers/video/sh_mobile_hdmi.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c14
-rw-r--r--drivers/video/simplefb.c24
-rw-r--r--drivers/video/sis/init.c5
-rw-r--r--drivers/video/sis/sis_main.c8
-rw-r--r--drivers/video/skeletonfb.c3
-rw-r--r--drivers/video/smscufx.c2
-rw-r--r--drivers/video/ssd1307fb.c2
-rw-r--r--drivers/video/sstfb.c8
-rw-r--r--drivers/video/sticore.h62
-rw-r--r--drivers/video/stifb.c14
-rw-r--r--drivers/video/sunxvr1000.c2
-rw-r--r--drivers/video/svgalib.c4
-rw-r--r--drivers/video/sysimgblt.c2
-rw-r--r--drivers/video/tcx.c6
-rw-r--r--drivers/video/tdfxfb.c1
-rw-r--r--drivers/video/tgafb.c4
-rw-r--r--drivers/video/tmiofb.c13
-rw-r--r--drivers/video/tridentfb.c1
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/uvesafb.c25
-rw-r--r--drivers/video/valkyriefb.c2
-rw-r--r--drivers/video/vesafb.c3
-rw-r--r--drivers/video/vfb.c10
-rw-r--r--drivers/video/vga16fb.c3
-rw-r--r--drivers/video/vt8500lcdfb.c2
-rw-r--r--drivers/video/vt8623fb.c41
-rw-r--r--drivers/video/w100fb.c7
-rw-r--r--drivers/video/wm8505fb.c14
-rw-r--r--drivers/video/wmt_ge_rops.c4
-rw-r--r--drivers/video/xilinxfb.c61
-rw-r--r--drivers/virt/fsl_hypervisor.c1
-rw-r--r--drivers/virtio/virtio.c27
-rw-r--r--drivers/virtio/virtio_balloon.c14
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci.c3
-rw-r--r--drivers/virtio/virtio_ring.c34
-rw-r--r--drivers/w1/masters/ds1wm.c14
-rw-r--r--drivers/w1/masters/omap_hdq.c3
-rw-r--r--drivers/w1/masters/w1-gpio.c45
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/w1/w1.h2
-rw-r--r--drivers/watchdog/Kconfig29
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/acquirewdt.c4
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c1
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c1
-rw-r--r--drivers/watchdog/bfin_wdt.c1
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/dw_wdt.c36
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/hpwdt.c1
-rw-r--r--drivers/watchdog/i6300esb.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c5
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c5
-rw-r--r--drivers/watchdog/iop_wdt.c1
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/it87_wdt.c1
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c5
-rw-r--r--drivers/watchdog/ks8695_wdt.c1
-rw-r--r--drivers/watchdog/lantiq_wdt.c1
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/mixcomwd.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c165
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/mv64x60_wdt.c3
-rw-r--r--drivers/watchdog/nuc900_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c38
-rw-r--r--drivers/watchdog/orion_wdt.c3
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pcwd_pci.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c16
-rw-r--r--drivers/watchdog/pika_wdt.c3
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/pnx833x_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/watchdog/rt2880_wdt.c207
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c1
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c1
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c5
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c226
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/softdog.c1
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c5
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c38
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c3
-rw-r--r--drivers/watchdog/w83627hf_wdt.c341
-rw-r--r--drivers/watchdog/w83697hf_wdt.c1
-rw-r--r--drivers/watchdog/w83697ug_wdt.c1
-rw-r--r--drivers/watchdog/w83877f_wdt.c1
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/wdrtas.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/wm831x_wdt.c8
-rw-r--r--drivers/watchdog/xen_wdt.c1
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/grant-table.c19
-rw-r--r--drivers/xen/pci.c53
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/swiotlb-xen.c119
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c24
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
4336 files changed, 210037 insertions, 92219 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index aa43b911cce..b3138fbb46a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -166,4 +166,8 @@ source "drivers/reset/Kconfig"
source "drivers/fmc/Kconfig"
+source "drivers/phy/Kconfig"
+
+source "drivers/powercap/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index ab93de8297f..3cc8214f9b2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -8,6 +8,8 @@
obj-y += irqchip/
obj-y += bus/
+obj-$(CONFIG_GENERIC_PHY) += phy/
+
# GPIO must come after pinctrl as gpios may need to mux pins etc
obj-y += pinctrl/
obj-y += gpio/
@@ -152,3 +154,4 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
+obj-$(CONFIG_POWERCAP) += powercap/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 6efe2ac6902..5d9248526d7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -56,23 +56,6 @@ config ACPI_PROCFS
Say N to delete /proc/acpi/ files that have moved to /sys/
-config ACPI_PROCFS_POWER
- bool "Deprecated power /proc/acpi directories"
- depends on PROC_FS
- help
- For backwards compatibility, this option allows
- deprecated power /proc/acpi/ directories to exist, even when
- they have been replaced by functions in /sys.
- The deprecated directories (and their replacements) include:
- /proc/acpi/battery/* (/sys/class/power_supply/*)
- /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
- This option has no effect on /proc/acpi/ directories
- and functions, which do not yet exist in /sys
- This option, together with the proc directories, will be
- deleted in 2.6.39.
-
- Say N to delete power /proc/acpi/ directories that have moved to /sys/
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -175,9 +158,10 @@ config ACPI_PROCESSOR
To compile this driver as a module, choose M here:
the module will be called processor.
+
config ACPI_IPMI
tristate "IPMI"
- depends on IPMI_SI && IPMI_HANDLER
+ depends on IPMI_SI
default n
help
This driver enables the ACPI to access the BMC controller. And it
@@ -251,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE
initrd, therefore it's safe to say Y.
See Documentation/acpi/initrd_table_override.txt for details
-config ACPI_BLACKLIST_YEAR
- int "Disable ACPI for systems before Jan 1st this year" if X86_32
- default 0
- help
- Enter a 4-digit year, e.g., 2001, to disable ACPI by default
- on platforms with DMI BIOS date before January 1st that year.
- "acpi=force" can be used to override this mechanism.
-
- Enter 0 to disable this mechanism and allow ACPI to
- run by default no matter what the year. (default)
-
config ACPI_DEBUG
bool "Debug Statements"
default n
@@ -372,4 +345,25 @@ config ACPI_BGRT
source "drivers/acpi/apei/Kconfig"
+config ACPI_EXTLOG
+ tristate "Extended Error Log support"
+ depends on X86_MCE && X86_LOCAL_APIC
+ select EFI
+ select UEFI_CPER
+ default n
+ help
+ Certain usages such as Predictive Failure Analysis (PFA) require
+ more information about the error than what can be described in
+ processor machine check banks. Most server processors log
+ additional information about the error in processor uncore
+ registers. Since the addresses and layout of these registers vary
+ widely from one processor to another, system software cannot
+ readily make use of them. To complicate matters further, some of
+ the additional error information cannot be constructed without
+ detailed knowledge about platform topology.
+
+ Enhanced MCA Logging allows firmware to provide additional error
+ information to system software, synchronous with MCE or CMCI. This
+ driver adds support for that functionality.
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index cdaf68b58b0..0331f91d56e 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,7 +47,6 @@ acpi-y += sysfs.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
ifdef CONFIG_ACPI_VIDEO
acpi-y += video_detect.o
endif
@@ -82,3 +81,5 @@ processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
obj-$(CONFIG_ACPI_APEI) += apei/
+
+obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index f37beaa3275..8711e379716 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,10 +30,7 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
+#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -55,75 +52,30 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
-#endif
-
-static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id ac_device_ids[] = {
- {"ACPI0003", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_ac_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-
static int ac_sleep_before_get_state_ms;
-static struct acpi_driver acpi_ac_driver = {
- .name = "ac",
- .class = ACPI_AC_CLASS,
- .ids = ac_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = acpi_ac_add,
- .remove = acpi_ac_remove,
- .notify = acpi_ac_notify,
- },
- .drv.pm = &acpi_ac_pm,
-};
-
struct acpi_ac {
struct power_supply charger;
- struct acpi_device * device;
+ struct platform_device *pdev;
unsigned long long state;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static const struct file_operations acpi_ac_fops = {
- .owner = THIS_MODULE,
- .open = acpi_ac_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
static int acpi_ac_get_state(struct acpi_ac *ac)
{
- acpi_status status = AE_OK;
-
-
- if (!ac)
- return -EINVAL;
+ acpi_status status;
+ acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
- status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+ status = acpi_evaluate_integer(handle, "_PSR", NULL,
+ &ac->state);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Error reading AC Adapter state"));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
@@ -160,91 +112,14 @@ static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_ac *ac = seq->private;
-
-
- if (!ac)
- return 0;
-
- if (acpi_ac_get_state(ac)) {
- seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
- return 0;
- }
-
- seq_puts(seq, "state: ");
- switch (ac->state) {
- case ACPI_AC_STATUS_OFFLINE:
- seq_puts(seq, "off-line\n");
- break;
- case ACPI_AC_STATUS_ONLINE:
- seq_puts(seq, "on-line\n");
- break;
- default:
- seq_puts(seq, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int acpi_ac_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
-}
-
-static int acpi_ac_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_ac_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'state' [R] */
- entry = proc_create_data(ACPI_AC_FILE_STATE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_ac_fops, acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
-
- remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#endif
-
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
{
- struct acpi_ac *ac = acpi_driver_data(device);
-
+ struct acpi_ac *ac = data;
+ struct acpi_device *adev;
if (!ac)
return;
@@ -267,10 +142,11 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
- acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event,
- (u32) ac->state);
- acpi_notifier_call_chain(device, event, (u32) ac->state);
+ adev = ACPI_COMPANION(&ac->pdev->dev);
+ acpi_bus_generate_netlink_event(adev->pnp.device_class,
+ dev_name(&ac->pdev->dev),
+ event, (u32) ac->state);
+ acpi_notifier_call_chain(adev, event, (u32) ac->state);
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
}
@@ -295,53 +171,54 @@ static struct dmi_system_id ac_dmi_table[] = {
{},
};
-static int acpi_ac_add(struct acpi_device *device)
+static int acpi_ac_probe(struct platform_device *pdev)
{
int result = 0;
struct acpi_ac *ac = NULL;
+ struct acpi_device *adev;
-
- if (!device)
+ if (!pdev)
return -EINVAL;
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return -ENODEV;
+
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
- ac->device = device;
- strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_AC_CLASS);
- device->driver_data = ac;
+ strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+ strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+ ac->pdev = pdev;
+ platform_set_drvdata(pdev, ac);
result = acpi_ac_get_state(ac);
if (result)
goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_ac_add_fs(device);
-#endif
- if (result)
- goto end;
- ac->charger.name = acpi_device_bid(device);
+ ac->charger.name = acpi_device_bid(adev);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
- result = power_supply_register(&ac->device->dev, &ac->charger);
+ result = power_supply_register(&pdev->dev, &ac->charger);
if (result)
goto end;
+ result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+ if (result) {
+ power_supply_unregister(&ac->charger);
+ goto end;
+ }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
+ acpi_device_name(adev), acpi_device_bid(adev),
ac->state ? "on-line" : "off-line");
- end:
- if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(device);
-#endif
+end:
+ if (result)
kfree(ac);
- }
dmi_check_system(ac_dmi_table);
return result;
@@ -356,7 +233,7 @@ static int acpi_ac_resume(struct device *dev)
if (!dev)
return -EINVAL;
- ac = acpi_driver_data(to_acpi_device(dev));
+ ac = platform_get_drvdata(to_platform_device(dev));
if (!ac)
return -EINVAL;
@@ -368,28 +245,44 @@ static int acpi_ac_resume(struct device *dev)
return 0;
}
#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
-static int acpi_ac_remove(struct acpi_device *device)
+static int acpi_ac_remove(struct platform_device *pdev)
{
- struct acpi_ac *ac = NULL;
-
+ struct acpi_ac *ac;
- if (!device || !acpi_driver_data(device))
+ if (!pdev)
return -EINVAL;
- ac = acpi_driver_data(device);
+ acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+ ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(device);
-#endif
kfree(ac);
return 0;
}
+static const struct acpi_device_id acpi_ac_match[] = {
+ { "ACPI0003", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
+
+static struct platform_driver acpi_ac_driver = {
+ .probe = acpi_ac_probe,
+ .remove = acpi_ac_remove,
+ .driver = {
+ .name = "acpi-ac",
+ .owner = THIS_MODULE,
+ .pm = &acpi_ac_pm_ops,
+ .acpi_match_table = ACPI_PTR(acpi_ac_match),
+ },
+};
+
static int __init acpi_ac_init(void)
{
int result;
@@ -397,34 +290,16 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
+ result = platform_driver_register(&acpi_ac_driver);
+ if (result < 0)
return -ENODEV;
-#endif
-
- result = acpi_bus_register_driver(&acpi_ac_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
- return -ENODEV;
- }
return 0;
}
static void __exit acpi_ac_exit(void)
{
-
- acpi_bus_unregister_driver(&acpi_ac_driver);
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-
- return;
+ platform_driver_unregister(&acpi_ac_driver);
}
-
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
new file mode 100644
index 00000000000..a6869e110ce
--- /dev/null
+++ b/drivers/acpi/acpi_extlog.c
@@ -0,0 +1,327 @@
+/*
+ * Extended Error Log driver
+ *
+ * Copyright (C) 2013 Intel Corp.
+ * Author: Chen, Gong <gong.chen@intel.com>
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/cper.h>
+#include <linux/ratelimit.h>
+#include <asm/cpu.h>
+#include <asm/mce.h>
+
+#include "apei/apei-internal.h"
+
+#define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */
+
+#define EXTLOG_DSM_REV 0x0
+#define EXTLOG_FN_QUERY 0x0
+#define EXTLOG_FN_ADDR 0x1
+
+#define FLAG_OS_OPTIN BIT(0)
+#define EXTLOG_QUERY_L1_EXIST BIT(1)
+#define ELOG_ENTRY_VALID (1ULL<<63)
+#define ELOG_ENTRY_LEN 0x1000
+
+#define EMCA_BUG \
+ "Can not request iomem region <0x%016llx-0x%016llx> - eMCA disabled\n"
+
+struct extlog_l1_head {
+ u32 ver; /* Header Version */
+ u32 hdr_len; /* Header Length */
+ u64 total_len; /* entire L1 Directory length including this header */
+ u64 elog_base; /* MCA Error Log Directory base address */
+ u64 elog_len; /* MCA Error Log Directory length */
+ u32 flags; /* bit 0 - OS/VMM Opt-in */
+ u8 rev0[12];
+ u32 entries; /* Valid L1 Directory entries per logical processor */
+ u8 rev1[12];
+};
+
+static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295";
+
+/* L1 table related physical address */
+static u64 elog_base;
+static size_t elog_size;
+static u64 l1_dirbase;
+static size_t l1_size;
+
+/* L1 table related virtual address */
+static void __iomem *extlog_l1_addr;
+static void __iomem *elog_addr;
+
+static void *elog_buf;
+
+static u64 *l1_entry_base;
+static u32 l1_percpu_entry;
+
+#define ELOG_IDX(cpu, bank) \
+ (cpu_physical_id(cpu) * l1_percpu_entry + (bank))
+
+#define ELOG_ENTRY_DATA(idx) \
+ (*(l1_entry_base + (idx)))
+
+#define ELOG_ENTRY_ADDR(phyaddr) \
+ (phyaddr - elog_base + (u8 *)elog_addr)
+
+static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
+{
+ int idx;
+ u64 data;
+ struct acpi_generic_status *estatus;
+
+ WARN_ON(cpu < 0);
+ idx = ELOG_IDX(cpu, bank);
+ data = ELOG_ENTRY_DATA(idx);
+ if ((data & ELOG_ENTRY_VALID) == 0)
+ return NULL;
+
+ data &= EXT_ELOG_ENTRY_MASK;
+ estatus = (struct acpi_generic_status *)ELOG_ENTRY_ADDR(data);
+
+ /* if no valid data in elog entry, just return */
+ if (estatus->block_status == 0)
+ return NULL;
+
+ return estatus;
+}
+
+static void __print_extlog_rcd(const char *pfx,
+ struct acpi_generic_status *estatus, int cpu)
+{
+ static atomic_t seqno;
+ unsigned int curr_seqno;
+ char pfx_seq[64];
+
+ if (!pfx) {
+ if (estatus->error_severity <= CPER_SEV_CORRECTED)
+ pfx = KERN_INFO;
+ else
+ pfx = KERN_ERR;
+ }
+ curr_seqno = atomic_inc_return(&seqno);
+ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}", pfx, curr_seqno);
+ printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu);
+ cper_estatus_print(pfx_seq, estatus);
+}
+
+static int print_extlog_rcd(const char *pfx,
+ struct acpi_generic_status *estatus, int cpu)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
+ static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
+ struct ratelimit_state *ratelimit;
+
+ if (estatus->error_severity == CPER_SEV_CORRECTED ||
+ (estatus->error_severity == CPER_SEV_INFORMATIONAL))
+ ratelimit = &ratelimit_corrected;
+ else
+ ratelimit = &ratelimit_uncorrected;
+ if (__ratelimit(ratelimit)) {
+ __print_extlog_rcd(pfx, estatus, cpu);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int extlog_print(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ int bank = mce->bank;
+ int cpu = mce->extcpu;
+ struct acpi_generic_status *estatus;
+ int rc;
+
+ estatus = extlog_elog_entry_check(cpu, bank);
+ if (estatus == NULL)
+ return NOTIFY_DONE;
+
+ memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
+ /* clear record status to enable BIOS to update it again */
+ estatus->block_status = 0;
+
+ rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
+
+ return NOTIFY_DONE;
+}
+
+static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_object_list input;
+ union acpi_object params[4], *obj;
+ u8 uuid[16];
+ int i;
+
+ acpi_str_to_uuid(extlog_dsm_uuid, uuid);
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = 16;
+ params[0].buffer.pointer = uuid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = rev;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf)))
+ return -1;
+
+ *ret = 0;
+ obj = (union acpi_object *)buf.pointer;
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *ret = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ if (obj->buffer.length <= 8) {
+ for (i = 0; i < obj->buffer.length; i++)
+ *ret |= (obj->buffer.pointer[i] << (i * 8));
+ }
+ }
+ kfree(buf.pointer);
+
+ return 0;
+}
+
+static bool extlog_get_l1addr(void)
+{
+ acpi_handle handle;
+ u64 ret;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
+ return false;
+
+ if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_QUERY, &ret) ||
+ !(ret & EXTLOG_QUERY_L1_EXIST))
+ return false;
+
+ if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_ADDR, &ret))
+ return false;
+
+ l1_dirbase = ret;
+ /* Spec says L1 directory must be 4K aligned, bail out if it isn't */
+ if (l1_dirbase & ((1 << 12) - 1)) {
+ pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n",
+ l1_dirbase);
+ return false;
+ }
+
+ return true;
+}
+static struct notifier_block extlog_mce_dec = {
+ .notifier_call = extlog_print,
+};
+
+static int __init extlog_init(void)
+{
+ struct extlog_l1_head *l1_head;
+ void __iomem *extlog_l1_hdr;
+ size_t l1_hdr_size;
+ struct resource *r;
+ u64 cap;
+ int rc;
+
+ rc = -ENODEV;
+
+ rdmsrl(MSR_IA32_MCG_CAP, cap);
+ if (!(cap & MCG_ELOG_P))
+ return rc;
+
+ if (!extlog_get_l1addr())
+ return rc;
+
+ rc = -EINVAL;
+ /* get L1 header to fetch necessary information */
+ l1_hdr_size = sizeof(struct extlog_l1_head);
+ r = request_mem_region(l1_dirbase, l1_hdr_size, "L1 DIR HDR");
+ if (!r) {
+ pr_warn(FW_BUG EMCA_BUG,
+ (unsigned long long)l1_dirbase,
+ (unsigned long long)l1_dirbase + l1_hdr_size);
+ goto err;
+ }
+
+ extlog_l1_hdr = acpi_os_map_memory(l1_dirbase, l1_hdr_size);
+ l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
+ l1_size = l1_head->total_len;
+ l1_percpu_entry = l1_head->entries;
+ elog_base = l1_head->elog_base;
+ elog_size = l1_head->elog_len;
+ acpi_os_unmap_memory(extlog_l1_hdr, l1_hdr_size);
+ release_mem_region(l1_dirbase, l1_hdr_size);
+
+ /* remap L1 header again based on completed information */
+ r = request_mem_region(l1_dirbase, l1_size, "L1 Table");
+ if (!r) {
+ pr_warn(FW_BUG EMCA_BUG,
+ (unsigned long long)l1_dirbase,
+ (unsigned long long)l1_dirbase + l1_size);
+ goto err;
+ }
+ extlog_l1_addr = acpi_os_map_memory(l1_dirbase, l1_size);
+ l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
+
+ /* remap elog table */
+ r = request_mem_region(elog_base, elog_size, "Elog Table");
+ if (!r) {
+ pr_warn(FW_BUG EMCA_BUG,
+ (unsigned long long)elog_base,
+ (unsigned long long)elog_base + elog_size);
+ goto err_release_l1_dir;
+ }
+ elog_addr = acpi_os_map_memory(elog_base, elog_size);
+
+ rc = -ENOMEM;
+ /* allocate buffer to save elog record */
+ elog_buf = kmalloc(ELOG_ENTRY_LEN, GFP_KERNEL);
+ if (elog_buf == NULL)
+ goto err_release_elog;
+
+ mce_register_decode_chain(&extlog_mce_dec);
+ /* enable OS to be involved to take over management from BIOS */
+ ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
+
+ return 0;
+
+err_release_elog:
+ if (elog_addr)
+ acpi_os_unmap_memory(elog_addr, elog_size);
+ release_mem_region(elog_base, elog_size);
+err_release_l1_dir:
+ if (extlog_l1_addr)
+ acpi_os_unmap_memory(extlog_l1_addr, l1_size);
+ release_mem_region(l1_dirbase, l1_size);
+err:
+ pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
+ return rc;
+}
+
+static void __exit extlog_exit(void)
+{
+ mce_unregister_decode_chain(&extlog_mce_dec);
+ ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
+ if (extlog_l1_addr)
+ acpi_os_unmap_memory(extlog_l1_addr, l1_size);
+ if (elog_addr)
+ acpi_os_unmap_memory(elog_addr, elog_size);
+ release_mem_region(elog_base, elog_size);
+ release_mem_region(l1_dirbase, l1_size);
+ kfree(elog_buf);
+}
+
+module_init(extlog_init);
+module_exit(extlog_exit);
+
+MODULE_AUTHOR("Chen, Gong <gong.chen@intel.com>");
+MODULE_DESCRIPTION("Extended MCA Error Log Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index a6977e12d57..ac0f52f6df2 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -1,8 +1,9 @@
/*
* acpi_ipmi.c - ACPI IPMI opregion
*
- * Copyright (C) 2010 Intel Corporation
- * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ * Copyright (C) 2010, 2013 Intel Corporation
+ * Author: Zhao Yakui <yakui.zhao@intel.com>
+ * Lv Zheng <lv.zheng@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -23,60 +24,58 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/ipmi.h>
-#include <linux/device.h>
-#include <linux/pnp.h>
#include <linux/spinlock.h>
MODULE_AUTHOR("Zhao Yakui");
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
MODULE_LICENSE("GPL");
-#define IPMI_FLAGS_HANDLER_INSTALL 0
-
#define ACPI_IPMI_OK 0
#define ACPI_IPMI_TIMEOUT 0x10
#define ACPI_IPMI_UNKNOWN 0x07
/* the IPMI timeout is 5s */
-#define IPMI_TIMEOUT (5 * HZ)
+#define IPMI_TIMEOUT (5000)
+#define ACPI_IPMI_MAX_MSG_LENGTH 64
struct acpi_ipmi_device {
/* the device list attached to driver_data.ipmi_devices */
struct list_head head;
+
/* the IPMI request message list */
struct list_head tx_msg_list;
- spinlock_t tx_msg_lock;
+
+ spinlock_t tx_msg_lock;
acpi_handle handle;
- struct pnp_dev *pnp_dev;
- ipmi_user_t user_interface;
+ struct device *dev;
+ ipmi_user_t user_interface;
int ipmi_ifnum; /* IPMI interface number */
long curr_msgid;
- unsigned long flags;
- struct ipmi_smi_info smi_data;
+ bool dead;
+ struct kref kref;
};
struct ipmi_driver_data {
- struct list_head ipmi_devices;
- struct ipmi_smi_watcher bmc_events;
- struct ipmi_user_hndl ipmi_hndlrs;
- struct mutex ipmi_lock;
+ struct list_head ipmi_devices;
+ struct ipmi_smi_watcher bmc_events;
+ struct ipmi_user_hndl ipmi_hndlrs;
+ struct mutex ipmi_lock;
+
+ /*
+ * NOTE: IPMI System Interface Selection
+ * There is no system interface specified by the IPMI operation
+ * region access. We try to select one system interface with ACPI
+ * handle set. IPMI messages passed from the ACPI codes are sent
+ * to this selected global IPMI system interface.
+ */
+ struct acpi_ipmi_device *selected_smi;
};
struct acpi_ipmi_msg {
struct list_head head;
+
/*
* General speaking the addr type should be SI_ADDR_TYPE. And
* the addr channel should be BMC.
@@ -86,30 +85,31 @@ struct acpi_ipmi_msg {
*/
struct ipmi_addr addr;
long tx_msgid;
+
/* it is used to track whether the IPMI message is finished */
struct completion tx_complete;
+
struct kernel_ipmi_msg tx_message;
- int msg_done;
- /* tx data . And copy it from ACPI object buffer */
- u8 tx_data[64];
- int tx_len;
- u8 rx_data[64];
- int rx_len;
+ int msg_done;
+
+ /* tx/rx data . And copy it from/to ACPI object buffer */
+ u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
+ u8 rx_len;
+
struct acpi_ipmi_device *device;
+ struct kref kref;
};
/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
struct acpi_ipmi_buffer {
u8 status;
u8 length;
- u8 data[64];
+ u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
};
static void ipmi_register_bmc(int iface, struct device *dev);
static void ipmi_bmc_gone(int iface);
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
static struct ipmi_driver_data driver_data = {
.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -121,29 +121,142 @@ static struct ipmi_driver_data driver_data = {
.ipmi_hndlrs = {
.ipmi_recv_hndl = ipmi_msg_handler,
},
+ .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
};
-static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+static struct acpi_ipmi_device *
+ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
+{
+ struct acpi_ipmi_device *ipmi_device;
+ int err;
+ ipmi_user_t user;
+
+ ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+ if (!ipmi_device)
+ return NULL;
+
+ kref_init(&ipmi_device->kref);
+ INIT_LIST_HEAD(&ipmi_device->head);
+ INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+ spin_lock_init(&ipmi_device->tx_msg_lock);
+ ipmi_device->handle = handle;
+ ipmi_device->dev = get_device(dev);
+ ipmi_device->ipmi_ifnum = iface;
+
+ err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+ ipmi_device, &user);
+ if (err) {
+ put_device(dev);
+ kfree(ipmi_device);
+ return NULL;
+ }
+ ipmi_device->user_interface = user;
+
+ return ipmi_device;
+}
+
+static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
+{
+ ipmi_destroy_user(ipmi_device->user_interface);
+ put_device(ipmi_device->dev);
+ kfree(ipmi_device);
+}
+
+static void ipmi_dev_release_kref(struct kref *kref)
+{
+ struct acpi_ipmi_device *ipmi =
+ container_of(kref, struct acpi_ipmi_device, kref);
+
+ ipmi_dev_release(ipmi);
+}
+
+static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
+{
+ list_del(&ipmi_device->head);
+ if (driver_data.selected_smi == ipmi_device)
+ driver_data.selected_smi = NULL;
+
+ /*
+ * Always setting dead flag after deleting from the list or
+ * list_for_each_entry() codes must get changed.
+ */
+ ipmi_device->dead = true;
+}
+
+static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
+{
+ struct acpi_ipmi_device *ipmi_device = NULL;
+
+ mutex_lock(&driver_data.ipmi_lock);
+ if (driver_data.selected_smi) {
+ ipmi_device = driver_data.selected_smi;
+ kref_get(&ipmi_device->kref);
+ }
+ mutex_unlock(&driver_data.ipmi_lock);
+
+ return ipmi_device;
+}
+
+static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
+{
+ kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
+}
+
+static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
{
+ struct acpi_ipmi_device *ipmi;
struct acpi_ipmi_msg *ipmi_msg;
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+ ipmi = acpi_ipmi_dev_get();
+ if (!ipmi)
+ return NULL;
ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
- if (!ipmi_msg) {
- dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+ if (!ipmi_msg) {
+ acpi_ipmi_dev_put(ipmi);
return NULL;
}
+
+ kref_init(&ipmi_msg->kref);
init_completion(&ipmi_msg->tx_complete);
INIT_LIST_HEAD(&ipmi_msg->head);
ipmi_msg->device = ipmi;
+ ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
+
return ipmi_msg;
}
-#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
-#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
-static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
- acpi_physical_address address,
- acpi_integer *value)
+static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
+{
+ acpi_ipmi_dev_put(tx_msg->device);
+ kfree(tx_msg);
+}
+
+static void ipmi_msg_release_kref(struct kref *kref)
+{
+ struct acpi_ipmi_msg *tx_msg =
+ container_of(kref, struct acpi_ipmi_msg, kref);
+
+ ipmi_msg_release(tx_msg);
+}
+
+static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
+{
+ kref_get(&tx_msg->kref);
+
+ return tx_msg;
+}
+
+static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
+{
+ kref_put(&tx_msg->kref, ipmi_msg_release_kref);
+}
+
+#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
+static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
+ acpi_physical_address address,
+ acpi_integer *value)
{
struct kernel_ipmi_msg *msg;
struct acpi_ipmi_buffer *buffer;
@@ -151,21 +264,31 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
unsigned long flags;
msg = &tx_msg->tx_message;
+
/*
* IPMI network function and command are encoded in the address
* within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
*/
msg->netfn = IPMI_OP_RGN_NETFN(address);
msg->cmd = IPMI_OP_RGN_CMD(address);
- msg->data = tx_msg->tx_data;
+ msg->data = tx_msg->data;
+
/*
* value is the parameter passed by the IPMI opregion space handler.
* It points to the IPMI request message buffer
*/
buffer = (struct acpi_ipmi_buffer *)value;
+
/* copy the tx message data */
+ if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
+ dev_WARN_ONCE(tx_msg->device->dev, true,
+ "Unexpected request (msg len %d).\n",
+ buffer->length);
+ return -EINVAL;
+ }
msg->data_len = buffer->length;
- memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+ memcpy(tx_msg->data, buffer->data, msg->data_len);
+
/*
* now the default type is SYSTEM_INTERFACE and channel type is BMC.
* If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
@@ -179,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
/* Get the msgid */
device = tx_msg->device;
+
spin_lock_irqsave(&device->tx_msg_lock, flags);
device->curr_msgid++;
tx_msg->tx_msgid = device->curr_msgid;
spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+
+ return 0;
}
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
- acpi_integer *value, int rem_time)
+ acpi_integer *value)
{
struct acpi_ipmi_buffer *buffer;
@@ -195,110 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
* IPMI message returned by IPMI command.
*/
buffer = (struct acpi_ipmi_buffer *)value;
- if (!rem_time && !msg->msg_done) {
- buffer->status = ACPI_IPMI_TIMEOUT;
- return;
- }
+
/*
- * If the flag of msg_done is not set or the recv length is zero, it
- * means that the IPMI command is not executed correctly.
- * The status code will be ACPI_IPMI_UNKNOWN.
+ * If the flag of msg_done is not set, it means that the IPMI command is
+ * not executed correctly.
*/
- if (!msg->msg_done || !msg->rx_len) {
- buffer->status = ACPI_IPMI_UNKNOWN;
+ buffer->status = msg->msg_done;
+ if (msg->msg_done != ACPI_IPMI_OK)
return;
- }
+
/*
* If the IPMI response message is obtained correctly, the status code
* will be ACPI_IPMI_OK
*/
- buffer->status = ACPI_IPMI_OK;
buffer->length = msg->rx_len;
- memcpy(buffer->data, msg->rx_data, msg->rx_len);
+ memcpy(buffer->data, msg->data, msg->rx_len);
}
static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
{
- struct acpi_ipmi_msg *tx_msg, *temp;
- int count = HZ / 10;
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+ struct acpi_ipmi_msg *tx_msg;
+ unsigned long flags;
+
+ /*
+ * NOTE: On-going ipmi_recv_msg
+ * ipmi_msg_handler() may still be invoked by ipmi_si after
+ * flushing. But it is safe to do a fast flushing on module_exit()
+ * without waiting for all ipmi_recv_msg(s) to complete from
+ * ipmi_msg_handler() as it is ensured by ipmi_si that all
+ * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
+ */
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+ while (!list_empty(&ipmi->tx_msg_list)) {
+ tx_msg = list_first_entry(&ipmi->tx_msg_list,
+ struct acpi_ipmi_msg,
+ head);
+ list_del(&tx_msg->head);
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
- list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
/* wake up the sleep thread on the Tx msg */
complete(&tx_msg->tx_complete);
+ acpi_ipmi_msg_put(tx_msg);
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
}
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+}
+
+static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
+ struct acpi_ipmi_msg *msg)
+{
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ bool msg_found = false;
+ unsigned long flags;
- /* wait for about 100ms to flush the tx message list */
- while (count--) {
- if (list_empty(&ipmi->tx_msg_list))
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+ list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+ if (msg == tx_msg) {
+ msg_found = true;
+ list_del(&tx_msg->head);
break;
- schedule_timeout(1);
+ }
}
- if (!list_empty(&ipmi->tx_msg_list))
- dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
+ if (msg_found)
+ acpi_ipmi_msg_put(tx_msg);
}
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
struct acpi_ipmi_device *ipmi_device = user_msg_data;
- int msg_found = 0;
- struct acpi_ipmi_msg *tx_msg;
- struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+ bool msg_found = false;
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ struct device *dev = ipmi_device->dev;
unsigned long flags;
if (msg->user != ipmi_device->user_interface) {
- dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
- "returned user %p, expected user %p\n",
- msg->user, ipmi_device->user_interface);
- ipmi_free_recv_msg(msg);
- return;
+ dev_warn(dev,
+ "Unexpected response is returned. returned user %p, expected user %p\n",
+ msg->user, ipmi_device->user_interface);
+ goto out_msg;
}
+
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
- list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+ list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
if (msg->msgid == tx_msg->tx_msgid) {
- msg_found = 1;
+ msg_found = true;
+ list_del(&tx_msg->head);
break;
}
}
-
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+
if (!msg_found) {
- dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
- "returned.\n", msg->msgid);
- ipmi_free_recv_msg(msg);
- return;
+ dev_warn(dev,
+ "Unexpected response (msg id %ld) is returned.\n",
+ msg->msgid);
+ goto out_msg;
}
- if (msg->msg.data_len) {
- /* copy the response data to Rx_data buffer */
- memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
- tx_msg->rx_len = msg->msg.data_len;
- tx_msg->msg_done = 1;
+ /* copy the response data to Rx_data buffer */
+ if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
+ dev_WARN_ONCE(dev, true,
+ "Unexpected response (msg len %d).\n",
+ msg->msg.data_len);
+ goto out_comp;
}
+
+ /* response msg is an error msg */
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
+ msg->msg.data_len == 1) {
+ if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
+ dev_WARN_ONCE(dev, true,
+ "Unexpected response (timeout).\n");
+ tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
+ }
+ goto out_comp;
+ }
+
+ tx_msg->rx_len = msg->msg.data_len;
+ memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
+ tx_msg->msg_done = ACPI_IPMI_OK;
+
+out_comp:
complete(&tx_msg->tx_complete);
+ acpi_ipmi_msg_put(tx_msg);
+out_msg:
ipmi_free_recv_msg(msg);
-};
+}
static void ipmi_register_bmc(int iface, struct device *dev)
{
struct acpi_ipmi_device *ipmi_device, *temp;
- struct pnp_dev *pnp_dev;
- ipmi_user_t user;
int err;
struct ipmi_smi_info smi_data;
acpi_handle handle;
err = ipmi_get_smi_info(iface, &smi_data);
-
if (err)
return;
- if (smi_data.addr_src != SI_ACPI) {
- put_device(smi_data.dev);
- return;
- }
-
+ if (smi_data.addr_src != SI_ACPI)
+ goto err_ref;
handle = smi_data.addr_info.acpi_info.acpi_handle;
+ if (!handle)
+ goto err_ref;
+
+ ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
+ if (!ipmi_device) {
+ dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
+ goto err_ref;
+ }
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -307,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev)
* to the device list, don't add it again.
*/
if (temp->handle == handle)
- goto out;
+ goto err_lock;
}
-
- ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
-
- if (!ipmi_device)
- goto out;
-
- pnp_dev = to_pnp_dev(smi_data.dev);
- ipmi_device->handle = handle;
- ipmi_device->pnp_dev = pnp_dev;
-
- err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
- ipmi_device, &user);
- if (err) {
- dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
- kfree(ipmi_device);
- goto out;
- }
- acpi_add_ipmi_device(ipmi_device);
- ipmi_device->user_interface = user;
- ipmi_device->ipmi_ifnum = iface;
+ if (!driver_data.selected_smi)
+ driver_data.selected_smi = ipmi_device;
+ list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
mutex_unlock(&driver_data.ipmi_lock);
- memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+
+ put_device(smi_data.dev);
return;
-out:
+err_lock:
mutex_unlock(&driver_data.ipmi_lock);
+ ipmi_dev_release(ipmi_device);
+err_ref:
put_device(smi_data.dev);
return;
}
@@ -342,23 +502,29 @@ out:
static void ipmi_bmc_gone(int iface)
{
struct acpi_ipmi_device *ipmi_device, *temp;
+ bool dev_found = false;
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry_safe(ipmi_device, temp,
- &driver_data.ipmi_devices, head) {
- if (ipmi_device->ipmi_ifnum != iface)
- continue;
-
- acpi_remove_ipmi_device(ipmi_device);
- put_device(ipmi_device->smi_data.dev);
- kfree(ipmi_device);
- break;
+ &driver_data.ipmi_devices, head) {
+ if (ipmi_device->ipmi_ifnum != iface) {
+ dev_found = true;
+ __ipmi_dev_kill(ipmi_device);
+ break;
+ }
}
+ if (!driver_data.selected_smi)
+ driver_data.selected_smi = list_first_entry_or_null(
+ &driver_data.ipmi_devices,
+ struct acpi_ipmi_device, head);
mutex_unlock(&driver_data.ipmi_lock);
+
+ if (dev_found) {
+ ipmi_flush_tx_msg(ipmi_device);
+ acpi_ipmi_dev_put(ipmi_device);
+ }
}
-/* --------------------------------------------------------------------------
- * Address Space Management
- * -------------------------------------------------------------------------- */
+
/*
* This is the IPMI opregion space handler.
* @function: indicates the read/write. In fact as the IPMI message is driven
@@ -371,17 +537,17 @@ static void ipmi_bmc_gone(int iface)
* the response IPMI message returned by IPMI command.
* @handler_context: IPMI device context.
*/
-
static acpi_status
acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
- u32 bits, acpi_integer *value,
- void *handler_context, void *region_context)
+ u32 bits, acpi_integer *value,
+ void *handler_context, void *region_context)
{
struct acpi_ipmi_msg *tx_msg;
- struct acpi_ipmi_device *ipmi_device = handler_context;
- int err, rem_time;
+ struct acpi_ipmi_device *ipmi_device;
+ int err;
acpi_status status;
unsigned long flags;
+
/*
* IPMI opregion message.
* IPMI message is firstly written to the BMC and system software
@@ -391,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
if ((function & ACPI_IO_MASK) == ACPI_READ)
return AE_TYPE;
- if (!ipmi_device->user_interface)
+ tx_msg = ipmi_msg_alloc();
+ if (!tx_msg)
return AE_NOT_EXIST;
+ ipmi_device = tx_msg->device;
- tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
- if (!tx_msg)
- return AE_NO_MEMORY;
+ if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
+ ipmi_msg_release(tx_msg);
+ return AE_TYPE;
+ }
- acpi_format_ipmi_msg(tx_msg, address, value);
+ acpi_ipmi_msg_get(tx_msg);
+ mutex_lock(&driver_data.ipmi_lock);
+ /* Do not add a tx_msg that can not be flushed. */
+ if (ipmi_device->dead) {
+ mutex_unlock(&driver_data.ipmi_lock);
+ ipmi_msg_release(tx_msg);
+ return AE_NOT_EXIST;
+ }
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ mutex_unlock(&driver_data.ipmi_lock);
+
err = ipmi_request_settime(ipmi_device->user_interface,
- &tx_msg->addr,
- tx_msg->tx_msgid,
- &tx_msg->tx_message,
- NULL, 0, 0, 0);
+ &tx_msg->addr,
+ tx_msg->tx_msgid,
+ &tx_msg->tx_message,
+ NULL, 0, 0, IPMI_TIMEOUT);
if (err) {
status = AE_ERROR;
- goto end_label;
+ goto out_msg;
}
- rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
- IPMI_TIMEOUT);
- acpi_format_ipmi_response(tx_msg, value, rem_time);
+ wait_for_completion(&tx_msg->tx_complete);
+
+ acpi_format_ipmi_response(tx_msg, value);
status = AE_OK;
-end_label:
- spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
- list_del(&tx_msg->head);
- spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
- kfree(tx_msg);
+out_msg:
+ ipmi_cancel_tx_msg(ipmi_device, tx_msg);
+ acpi_ipmi_msg_put(tx_msg);
return status;
}
-static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
-{
- if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
- return;
-
- acpi_remove_address_space_handler(ipmi->handle,
- ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
-
- clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-}
-
-static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+static int __init acpi_ipmi_init(void)
{
+ int result;
acpi_status status;
- if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+ if (acpi_disabled)
return 0;
- status = acpi_install_address_space_handler(ipmi->handle,
+ status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler,
- NULL, ipmi);
+ NULL, NULL);
if (ACPI_FAILURE(status)) {
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
- dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
- "handle\n");
+ pr_warn("Can't register IPMI opregion space handle\n");
return -EINVAL;
}
- set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
- return 0;
-}
-
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-
- INIT_LIST_HEAD(&ipmi_device->head);
-
- spin_lock_init(&ipmi_device->tx_msg_lock);
- INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
- ipmi_install_space_handler(ipmi_device);
-
- list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
-}
-
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
- /*
- * If the IPMI user interface is created, it should be
- * destroyed.
- */
- if (ipmi_device->user_interface) {
- ipmi_destroy_user(ipmi_device->user_interface);
- ipmi_device->user_interface = NULL;
- }
- /* flush the Tx_msg list */
- if (!list_empty(&ipmi_device->tx_msg_list))
- ipmi_flush_tx_msg(ipmi_device);
-
- list_del(&ipmi_device->head);
- ipmi_remove_space_handler(ipmi_device);
-}
-
-static int __init acpi_ipmi_init(void)
-{
- int result = 0;
-
- if (acpi_disabled)
- return result;
-
- mutex_init(&driver_data.ipmi_lock);
-
result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+ if (result)
+ pr_err("Can't register IPMI system interface watcher\n");
return result;
}
static void __exit acpi_ipmi_exit(void)
{
- struct acpi_ipmi_device *ipmi_device, *temp;
+ struct acpi_ipmi_device *ipmi_device;
if (acpi_disabled)
return;
@@ -516,13 +639,22 @@ static void __exit acpi_ipmi_exit(void)
* handler and free it.
*/
mutex_lock(&driver_data.ipmi_lock);
- list_for_each_entry_safe(ipmi_device, temp,
- &driver_data.ipmi_devices, head) {
- acpi_remove_ipmi_device(ipmi_device);
- put_device(ipmi_device->smi_data.dev);
- kfree(ipmi_device);
+ while (!list_empty(&driver_data.ipmi_devices)) {
+ ipmi_device = list_first_entry(&driver_data.ipmi_devices,
+ struct acpi_ipmi_device,
+ head);
+ __ipmi_dev_kill(ipmi_device);
+ mutex_unlock(&driver_data.ipmi_lock);
+
+ ipmi_flush_tx_msg(ipmi_device);
+ acpi_ipmi_dev_put(ipmi_device);
+
+ mutex_lock(&driver_data.ipmi_lock);
}
mutex_unlock(&driver_data.ipmi_lock);
+ acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+ ACPI_ADR_SPACE_IPMI,
+ &acpi_ipmi_space_handler);
}
module_init(acpi_ipmi_init);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index fb78bb9ad8f..6745fe137b9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -30,6 +30,7 @@ ACPI_MODULE_NAME("acpi_lpss");
/* Offsets relative to LPSS_PRIVATE_OFFSET */
#define LPSS_GENERAL 0x08
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
+#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
#define LPSS_TX_INT 0x20
@@ -68,11 +69,16 @@ struct lpss_private_data {
static void lpss_uart_setup(struct lpss_private_data *pdata)
{
- unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+ unsigned int offset;
u32 reg;
- reg = readl(pdata->mmio_base + tx_int_offset);
- writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset);
+ offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+ reg = readl(pdata->mmio_base + offset);
+ writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+ offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+ reg = readl(pdata->mmio_base + offset);
+ writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
}
static struct lpss_device_desc lpt_dev_desc = {
@@ -157,6 +163,15 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
{ "INT33B2", },
+ { "INT3430", (unsigned long)&lpt_dev_desc },
+ { "INT3431", (unsigned long)&lpt_dev_desc },
+ { "INT3432", (unsigned long)&lpt_dev_desc },
+ { "INT3433", (unsigned long)&lpt_dev_desc },
+ { "INT3434", (unsigned long)&lpt_uart_dev_desc },
+ { "INT3435", (unsigned long)&lpt_uart_dev_desc },
+ { "INT3436", (unsigned long)&lpt_sdio_dev_desc },
+ { "INT3437", },
+
{ }
};
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 999adb5499c..551dad712ff 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -152,8 +152,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
unsigned long long current_status;
/* Get device present/absent information from the _STA */
- if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
- NULL, &current_status)))
+ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+ METHOD_NAME__STA, NULL,
+ &current_status)))
return -ENODEV;
/*
* Check for device status. Device should be
@@ -281,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
if (!info->enabled)
continue;
- if (nid < 0)
+ if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(info->start_addr);
acpi_unbind_memory_blocks(info, handle);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 1bde12708f9..dbfe49e5fd6 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,6 +29,13 @@ ACPI_MODULE_NAME("platform");
static const struct acpi_device_id acpi_platform_device_ids[] = {
{ "PNP0D40" },
+ { "ACPI0003" },
+ { "VPC2004" },
+ { "BCM4752" },
+
+ /* Intel Smart Sound Technology */
+ { "INT33C8" },
+ { "80860F28" },
{ }
};
@@ -104,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
pdevinfo.id = -1;
pdevinfo.res = resources;
pdevinfo.num_res = count;
- pdevinfo.acpi_node.handle = adev->handle;
+ pdevinfo.acpi_node.companion = adev;
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
dev_err(&adev->dev, "platform device creation failed: %ld\n",
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f29e06efa47..3c1d6b0c09a 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -140,15 +140,11 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
return 0;
}
-static int acpi_processor_errata(struct acpi_processor *pr)
+static int acpi_processor_errata(void)
{
int result = 0;
struct pci_dev *dev = NULL;
-
- if (!pr)
- return -EINVAL;
-
/*
* PIIX4
*/
@@ -181,7 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, &pr->id);
+ ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
if (ret)
goto out;
@@ -219,11 +215,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
int cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
+ unsigned long long value;
- if (num_online_cpus() > 1)
- errata.smp = TRUE;
-
- acpi_processor_errata(pr);
+ acpi_processor_errata();
/*
* Check to see if we have bus mastering arbitration control. This
@@ -247,18 +241,12 @@ static int acpi_processor_get_info(struct acpi_device *device)
return -ENODEV;
}
- /*
- * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
- * >>> 'acpi_get_processor_id(acpi_id, &id)' in
- * arch/xxx/acpi.c
- */
pr->acpi_id = object.processor.proc_id;
} else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
*/
- unsigned long long value;
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
@@ -270,7 +258,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
device_declaration = 1;
pr->acpi_id = value;
}
- cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
+ pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
+ pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if (!cpu0_initialized && (cpu_index == -1) &&
@@ -332,9 +322,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
* ensure we get the right value in the "physical id" field
* of /proc/cpuinfo
*/
- status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+ status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
if (ACPI_SUCCESS(status))
- arch_fix_phys_package_id(pr->id, object.integer.value);
+ arch_fix_phys_package_id(pr->id, value);
return 0;
}
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 9feba08c29f..a9fd0b87206 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -114,10 +114,12 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_db_generate_gpe(char *gpe_arg,
char *block_arg))
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
+
/*
* dbconvert - miscellaneous conversion routines
*/
- acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
@@ -154,6 +156,8 @@ void acpi_db_set_scope(char *name);
void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
+void acpi_db_dump_namespace_paths(void);
+
void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
acpi_status acpi_db_find_name_in_namespace(char *name_arg);
@@ -240,6 +244,8 @@ void acpi_db_display_history(void);
char *acpi_db_get_from_history(char *command_num_arg);
+char *acpi_db_get_history_by_index(u32 commandd_num);
+
/*
* dbinput - user front-end to the AML debugger
*/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ab0e9771038..41abe552c7a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -71,7 +71,8 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
- ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
acpi_status acpi_ev_remove_global_lock_handler(void);
/*
@@ -242,11 +243,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
*/
u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
-u32 acpi_ev_install_sci_handler(void);
+u32 acpi_ev_sci_dispatch(void);
-acpi_status acpi_ev_remove_sci_handler(void);
+u32 acpi_ev_install_sci_handler(void);
-u32 acpi_ev_initialize_SCI(u32 program_SCI);
+acpi_status acpi_ev_remove_all_sci_handlers(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 90e846f985f..e9f1fc7f99c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -269,6 +269,7 @@ ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
+ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
/* Owner ID support */
@@ -405,7 +406,9 @@ extern u32 acpi_gbl_nesting_level;
/* Event counters */
+ACPI_EXTERN u32 acpi_method_count;
ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_sci_count;
ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
/* Support for dynamic control method tracing mechanism */
@@ -445,13 +448,6 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
ACPI_EXTERN char *acpi_gbl_db_buffer;
ACPI_EXTERN char *acpi_gbl_db_filename;
@@ -459,6 +455,16 @@ ACPI_EXTERN u32 acpi_gbl_db_debug_level;
ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+
/*
* Statistic globals
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0ed00669cd2..53ed1a8ba4f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -398,6 +398,14 @@ struct acpi_simple_repair_info {
*
****************************************************************************/
+/* Dispatch info for each host-installed SCI handler */
+
+struct acpi_sci_handler_info {
+ struct acpi_sci_handler_info *next;
+ acpi_sci_handler address; /* Address of handler */
+ void *context; /* Context to be passed to handler */
+};
+
/* Dispatch info for each GPE -- either a method or handler, cannot be both */
struct acpi_gpe_handler_info {
@@ -1064,7 +1072,7 @@ struct acpi_db_method_info {
char *name;
u32 flags;
u32 num_loops;
- char pathname[128];
+ char pathname[ACPI_DB_LINE_BUFFER_SIZE];
char **args;
acpi_object_type *types;
@@ -1086,6 +1094,7 @@ struct acpi_integrity_info {
u32 objects;
};
+#define ACPI_DB_DISABLE_OUTPUT 0x00
#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
#define ACPI_DB_CONSOLE_OUTPUT 0x02
#define ACPI_DB_DUPLICATE_OUTPUT 0x03
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 530a2f8c125..2a86c65d873 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -410,37 +410,6 @@
#endif
/*
- * Memory allocation tracking (DEBUG ONLY)
- */
-#define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__
-
-#ifndef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Memory allocation */
-
-#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_FREE
-#define ACPI_FREE(a) acpi_os_free(a)
-#endif
-#define ACPI_MEM_TRACKING(a)
-
-#else
-
-/* Memory allocation */
-
-#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
-#define ACPI_MEM_TRACKING(a) a
-
-#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
-
-/*
* Macros used for ACPICA utilities only
*/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 40b04bd5579..e6138ac4a16 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -213,6 +213,12 @@ acpi_ns_dump_objects(acpi_object_type type,
u8 display_type,
u32 max_depth,
acpi_owner_id owner_id, acpi_handle start_handle);
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle);
#endif /* ACPI_FUTURE_USAGE */
/*
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index d5a62a6182b..be8180c17d7 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -628,6 +628,17 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
void acpi_ut_repair_name(char *name);
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length);
+#endif
+
/*
* utmutex - mutex support
*/
@@ -652,12 +663,6 @@ acpi_status
acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
acpi_size required_length);
-void *acpi_ut_allocate(acpi_size size,
- u32 component, const char *module, u32 line);
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
- u32 component, const char *module, u32 line);
-
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
void *acpi_ut_allocate_and_track(acpi_size size,
u32 component, const char *module, u32 line);
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index fb09b08d708..afdc6df17ab 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -158,7 +158,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
walk_state->deferred_node = node;
status = acpi_ps_parse_aml(walk_state);
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index d4bfe7b7f90..2d4c0732257 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -259,7 +259,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
goto cleanup;
}
- cleanup:
+cleanup:
/* Remove local reference to the object */
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index a9ffd44c18f..81a78ba8431 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -292,9 +292,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
* reentered one more time (even if it is the same thread)
*/
obj_desc->method.thread_count++;
+ acpi_method_count++;
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/* On error, must release the method mutex (if present) */
if (obj_desc->method.mutex) {
@@ -424,7 +425,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/* On error, we must terminate the method properly */
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 63f0d220ca3..b1746a68dad 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -240,7 +240,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
- exit:
+exit:
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 1fc1ff114f2..5205edcf2c0 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -257,7 +257,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
(buffer_desc->common.reference_count +
obj_desc->common.reference_count);
- cleanup:
+cleanup:
/* Always delete the operands */
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index c666fc01498..ade44e49deb 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -299,7 +299,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
goto result_used;
}
- result_used:
+result_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Result of [%s] used by Parent [%s] Op=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -308,7 +308,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
return_UINT8(TRUE);
- result_not_used:
+result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Result of [%s] not used by Parent [%s] Op=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -752,7 +752,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/*
* We must undo everything done above; meaning that we must
* pop everything off of the operand stack and delete those
@@ -851,7 +851,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
goto exit;
}
- push_result:
+push_result:
walk_state->result_obj = new_obj_desc;
@@ -863,7 +863,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
op->common.flags |= ACPI_PARSEOP_IN_STACK;
}
- exit:
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 151d924817e..1bbb22fd6fa 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -170,7 +170,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
(void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
- cleanup:
+cleanup:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
walk_state->control_state->common.value,
@@ -335,7 +335,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
- error_exit:
+error_exit:
status = acpi_ds_method_error(status, walk_state);
return_ACPI_STATUS(status);
}
@@ -722,7 +722,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->result_obj = NULL;
}
- cleanup:
+cleanup:
if (walk_state->result_obj) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b1f8f4725c2..7f569d57302 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -728,7 +728,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
break;
}
- cleanup:
+cleanup:
/* Remove the Node pushed at the very beginning */
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fdb0a76e40a..4c67193a9fa 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -173,7 +173,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_gbl_global_lock_pending = FALSE;
- cleanup_and_exit:
+cleanup_and_exit:
acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
return (ACPI_INTERRUPT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index c8a1f7d5931..a9cb4a1a4bb 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -458,7 +458,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_block = gpe_block->next;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (int_status);
@@ -522,6 +522,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index c1aa1eda26c..a9e76bc4ad9 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -111,7 +111,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
gpe_block->xrupt_block = gpe_xrupt_block;
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -178,7 +178,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
ACPI_FREE(gpe_block->event_info);
ACPI_FREE(gpe_block);
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -302,7 +302,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
if (gpe_register_info) {
ACPI_FREE(gpe_register_info);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7842700346a..a3e2f38aadf 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -203,7 +203,7 @@ acpi_status acpi_ev_gpe_initialize(void)
goto cleanup;
}
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index b24dbb80fab..d3f5e1e2a2b 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
gpe_xrupt_info = gpe_xrupt_info->next;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -196,7 +196,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
*
* RETURN: A GPE interrupt block
*
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 068af96134b..e3157313eb2 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -129,7 +129,7 @@ acpi_status acpi_ev_install_region_handlers(void)
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -531,6 +531,6 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
acpi_ev_install_handler, NULL,
handler_obj, NULL);
- unlock_and_exit:
+unlock_and_exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 1b111ef7490..a5687540e9a 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -264,13 +264,6 @@ void acpi_ev_terminate(void)
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
- /* Remove SCI handler */
-
- status = acpi_ev_remove_sci_handler();
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
- }
-
status = acpi_ev_remove_global_lock_handler();
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
@@ -280,6 +273,13 @@ void acpi_ev_terminate(void)
acpi_gbl_events_initialized = FALSE;
}
+ /* Remove SCI handlers */
+
+ status = acpi_ev_remove_all_sci_handlers();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+ }
+
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cea14d6fc76..144cbb9b73b 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -217,16 +217,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
- if (region_obj2->extra.region_context) {
-
- /* The handler for this region was already installed */
-
- ACPI_FREE(region_context);
- } else {
- /*
- * Save the returned context for use in all accesses to
- * this particular region
- */
+ /*
+ * Save the returned context for use in all accesses to
+ * the handler for this particular region
+ */
+ if (!(region_obj2->extra.region_context)) {
region_obj2->extra.region_context =
region_context;
}
@@ -402,6 +397,14 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
handler_obj->address_space.
context, region_context);
+ /*
+ * region_context should have been released by the deactivate
+ * operation. We don't need access to it anymore here.
+ */
+ if (region_context) {
+ *region_context = NULL;
+ }
+
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) {
@@ -570,10 +573,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
status = acpi_ns_evaluate(info);
acpi_ut_remove_reference(args[1]);
- cleanup2:
+cleanup2:
acpi_ut_remove_reference(args[0]);
- cleanup1:
+cleanup1:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
@@ -758,7 +761,7 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
- exit:
+exit:
/* We ignore all errors from above, don't care */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index b905acf7aac..9e9e3454d89 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -54,6 +54,50 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
/*******************************************************************************
*
+ * FUNCTION: acpi_ev_sci_dispatch
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_sci_dispatch(void)
+{
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_NAME(ev_sci_dispatch);
+
+ /* Are there any host-installed SCI handlers? */
+
+ if (!acpi_gbl_sci_handler_list) {
+ return (int_status);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Invoke all host-installed SCI handlers */
+
+ sci_handler = acpi_gbl_sci_handler_list;
+ while (sci_handler) {
+
+ /* Invoke the installed handler (at interrupt level) */
+
+ int_status |= sci_handler->address(sci_handler->context);
+
+ sci_handler = sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ev_sci_xrupt_handler
*
* PARAMETERS: context - Calling Context
@@ -89,6 +133,11 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+ /* Invoke all host-installed SCI handlers */
+
+ interrupt_handled |= acpi_ev_sci_dispatch();
+
+ acpi_sci_count++;
return_UINT32(interrupt_handled);
}
@@ -112,14 +161,13 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
- * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * We are guaranteed by the ACPICA initialization/shutdown code that
* if this interrupt handler is installed, ACPI is enabled.
*/
/* GPEs: Check for and dispatch any GPEs that have occurred */
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
return_UINT32(interrupt_handled);
}
@@ -150,15 +198,15 @@ u32 acpi_ev_install_sci_handler(void)
/******************************************************************************
*
- * FUNCTION: acpi_ev_remove_sci_handler
+ * FUNCTION: acpi_ev_remove_all_sci_handlers
*
* PARAMETERS: none
*
- * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN: AE_OK if handler uninstalled, AE_ERROR if handler was not
* installed to begin with
*
* DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- * taken.
+ * taken. Remove all host-installed SCI handlers.
*
* Note: It doesn't seem important to disable all events or set the event
* enable registers to their original values. The OS should disable
@@ -167,11 +215,13 @@ u32 acpi_ev_install_sci_handler(void)
*
******************************************************************************/
-acpi_status acpi_ev_remove_sci_handler(void)
+acpi_status acpi_ev_remove_all_sci_handlers(void)
{
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
acpi_status status;
- ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+ ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers);
/* Just let the OS remove the handler and disable the level */
@@ -179,6 +229,21 @@ acpi_status acpi_ev_remove_sci_handler(void)
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler);
+ if (!acpi_gbl_sci_handler_list) {
+ return (status);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Free all host-installed SCI handlers */
+
+ while (acpi_gbl_sci_handler_list) {
+ sci_handler = acpi_gbl_sci_handler_list;
+ acpi_gbl_sci_handler_list = sci_handler->next;
+ ACPI_FREE(sci_handler);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ca5fba99c33..23a7fadca41 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -374,7 +375,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
acpi_gbl_exception_handler = handler;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -385,6 +386,144 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
+ * FUNCTION: acpi_install_sci_handler
+ *
+ * PARAMETERS: address - Address of the handler
+ * context - Value passed to the handler on each SCI
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context)
+{
+ struct acpi_sci_handler_info *new_sci_handler;
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_sci_handler);
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Allocate and init a handler object */
+
+ new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info));
+ if (!new_sci_handler) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ new_sci_handler->address = address;
+ new_sci_handler->context = context;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Lock list during installation */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ sci_handler = acpi_gbl_sci_handler_list;
+
+ /* Ensure handler does not already exist */
+
+ while (sci_handler) {
+ if (address == sci_handler->address) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ sci_handler = sci_handler->next;
+ }
+
+ /* Install the new handler into the global list (at head) */
+
+ new_sci_handler->next = acpi_gbl_sci_handler_list;
+ acpi_gbl_sci_handler_list = new_sci_handler;
+
+unlock_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+exit:
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(new_sci_handler);
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_sci_handler
+ *
+ * PARAMETERS: address - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_remove_sci_handler(acpi_sci_handler address)
+{
+ struct acpi_sci_handler_info *prev_sci_handler;
+ struct acpi_sci_handler_info *next_sci_handler;
+ acpi_cpu_flags flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_sci_handler);
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Remove the SCI handler with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ prev_sci_handler = NULL;
+ next_sci_handler = acpi_gbl_sci_handler_list;
+ while (next_sci_handler) {
+ if (next_sci_handler->address == address) {
+
+ /* Unlink and free the SCI handler info block */
+
+ if (prev_sci_handler) {
+ prev_sci_handler->next = next_sci_handler->next;
+ } else {
+ acpi_gbl_sci_handler_list =
+ next_sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ ACPI_FREE(next_sci_handler);
+ goto unlock_and_exit;
+ }
+
+ prev_sci_handler = next_sci_handler;
+ next_sci_handler = next_sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ status = AE_NOT_EXIST;
+
+unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_install_global_event_handler
*
* PARAMETERS: handler - Pointer to the global event handler function
@@ -398,6 +537,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
* Can be used to update event counters, etc.
*
******************************************************************************/
+
acpi_status
acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
{
@@ -426,7 +566,7 @@ acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
acpi_gbl_global_event_handler = handler;
acpi_gbl_global_event_handler_context = context;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -498,7 +638,7 @@ acpi_install_fixed_event_handler(u32 event,
handler));
}
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7039606a0ba..39d06af5e34 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 7662f1a42ff..5713da77c66 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -471,7 +472,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
*event_status |= ACPI_EVENT_FLAG_HANDLE;
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -624,7 +625,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -679,7 +680,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
obj_desc->device.gpe_block = NULL;
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 80cecf83859..02ed75ac56c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -147,7 +148,7 @@ acpi_install_address_space_handler(acpi_handle device,
status = acpi_ev_execute_reg_methods(node, space_id);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -286,7 +287,7 @@ acpi_remove_address_space_handler(acpi_handle device,
status = AE_NOT_EXIST;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 269e81d86ef..3c2e6dcdad3 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -193,7 +193,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
operands[0], obj_desc, ACPI_TYPE_EVENT);
- cleanup:
+cleanup:
/*
* Remove local reference to the object (on error, will cause deletion
* of both object and semaphore if present.)
@@ -248,7 +248,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
ACPI_TYPE_MUTEX);
- cleanup:
+cleanup:
/*
* Remove local reference to the object (on error, will cause deletion
* of both object and semaphore if present.)
@@ -347,7 +347,7 @@ acpi_ex_create_region(u8 * aml_start,
status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
- cleanup:
+cleanup:
/* Remove local reference to the object */
@@ -520,7 +520,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_ut_remove_reference(obj_desc);
- exit:
+exit:
/* Remove a reference to the operand */
acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index c2a65aaf29a..cfd87524342 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -197,7 +197,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
- exit:
+exit:
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(buffer_desc);
} else {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 7e0afe72487..49fb742d61b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -123,12 +123,6 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
}
- /* Exit if Address/Length have been disallowed by the host OS */
-
- if (rgn_desc->common.flags & AOPOBJ_INVALID) {
- return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
- }
-
/*
* Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
@@ -1002,7 +996,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
mask, merged_datum,
field_offset);
- exit:
+exit:
/* Free temporary buffer if we used one */
if (new_buffer) {
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 00bf2987757..65d93607f36 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -388,7 +388,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
*actual_return_desc = return_desc;
- cleanup:
+cleanup:
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
@@ -718,7 +718,7 @@ acpi_ex_do_logical_op(u16 opcode,
}
}
- cleanup:
+cleanup:
/* New object was created if implicit conversion performed - delete */
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 2cdd41d8ade..d74cea416ca 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -115,7 +115,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
break;
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -234,7 +234,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
return_ACPI_STATUS(status);
}
@@ -551,7 +551,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_store(return_desc, operand[1], walk_state);
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -1054,7 +1054,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d5088f7030c..d6fa0fce1fc 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -215,7 +215,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/*
* Since the remainder is not returned indirectly, remove a reference to
* it. Only the quotient is returned indirectly.
@@ -445,7 +445,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
break;
}
- store_result_to_target:
+store_result_to_target:
if (ACPI_SUCCESS(status)) {
/*
@@ -462,7 +462,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
}
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -553,7 +553,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- store_logical_result:
+store_logical_result:
/*
* Set return value to according to logical_result. logical TRUE (all ones)
* Default is FALSE (zero)
@@ -562,7 +562,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
return_desc->integer.value = ACPI_UINT64_MAX;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 37656f12f20..bc042adf880 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -124,7 +124,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
return_ACPI_STATUS(status);
}
@@ -252,7 +252,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_store(return_desc, operand[3], walk_state);
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 879b6cd8319..4459e32c683 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 303429bb4d5..9d28867e60d 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -400,6 +400,7 @@ acpi_ex_pci_config_space_handler(u32 function,
switch (function) {
case ACPI_READ:
+ *value = 0;
status = acpi_os_read_pci_configuration(pci_id, pci_register,
value, bit_width);
break;
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index ac04278ad28..1606524312e 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -521,7 +521,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
*/
type = obj_desc->common.type;
- exit:
+exit:
/* Convert internal types to external types */
switch (type) {
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 00e5af7129c..be3f66973ee 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -683,7 +683,7 @@ acpi_ex_resolve_operands(u16 opcode,
return_ACPI_STATUS(status);
}
- next_operand:
+next_operand:
/*
* If more operands needed, decrement stack_ptr to point
* to next operand on stack
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 8d2e866be15..12e6cff54f7 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -560,7 +560,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
}
- exit:
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2d7d22ebc78..3c498dc1636 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 5ee7a814cd9..b4b47db2dee 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -83,11 +84,17 @@ acpi_status acpi_reset(void)
* For I/O space, write directly to the OSL. This bypasses the port
* validation mechanism, which may block a valid write to the reset
* register.
- * Spec section 4.7.3.6 requires register width to be 8.
+ *
+ * NOTE:
+ * The ACPI spec requires the reset register width to be 8, so we
+ * hardcode it here and ignore the FADT value. This maintains
+ * compatibility with other ACPI implementations that have allowed
+ * BIOS code with bad register width values to go unnoticed.
*/
status =
acpi_os_write_port((acpi_io_address) reset_reg->address,
- acpi_gbl_FADT.reset_value, 8);
+ acpi_gbl_FADT.reset_value,
+ ACPI_RESET_REGISTER_WIDTH);
} else {
/* Write the reset value to the reset register */
@@ -119,7 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
******************************************************************************/
acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
- u32 value;
+ u32 value_lo;
+ u32 value_hi;
u32 width;
u64 address;
acpi_status status;
@@ -137,13 +145,8 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
return (status);
}
- /* Initialize entire 64-bit return value to zero */
-
- *return_value = 0;
- value = 0;
-
/*
- * Two address spaces supported: Memory or IO. PCI_Config is
+ * Two address spaces supported: Memory or I/O. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -155,29 +158,35 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
}
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+ value_lo = 0;
+ value_hi = 0;
+
width = reg->bit_width;
if (width == 64) {
width = 32; /* Break into two 32-bit transfers */
}
status = acpi_hw_read_port((acpi_io_address)
- address, &value, width);
+ address, &value_lo, width);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value = value;
if (reg->bit_width == 64) {
/* Read the top 32 bits */
status = acpi_hw_read_port((acpi_io_address)
- (address + 4), &value, 32);
+ (address + 4), &value_hi,
+ 32);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value |= ((u64)value << 32);
}
+
+ /* Set the return value only if status is AE_OK */
+
+ *return_value = (value_lo | ((u64)value_hi << 32));
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
@@ -186,7 +195,7 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
- return (status);
+ return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_read)
@@ -561,10 +570,10 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
break;
}
- cleanup1:
+cleanup1:
acpi_ut_remove_reference(info->return_object);
- cleanup:
+cleanup:
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating Sleep State [%s]",
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f2e669db8b6..15dddc10fc9 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
@@ -166,7 +167,7 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+acpi_status acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
@@ -360,7 +361,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index c5316e5bd4a..14f65f6345b 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -240,7 +240,7 @@ acpi_status acpi_ns_root_initialize(void)
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Save a handle to "_GPE", it is always present */
@@ -424,8 +424,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
- "ACPI path has too many parent prefixes (^) "
- "- reached beyond root node"));
+ "%s: Path has too many parent prefixes (^) "
+ "- reached beyond root node",
+ pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 7418c77fde8..48b9c6f1264 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -59,6 +59,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
#endif
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#ifdef ACPI_FUTURE_USAGE
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+#endif /* ACPI_FUTURE_USAGE */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_print_pathname
@@ -609,7 +620,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
}
- cleanup:
+cleanup:
acpi_os_printf("\n");
return (AE_OK);
}
@@ -671,6 +682,136 @@ acpi_ns_dump_objects(acpi_object_type type,
}
#endif /* ACPI_FUTURE_USAGE */
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_one_object_path, acpi_ns_get_max_depth
+ *
+ * PARAMETERS: obj_handle - Node to be dumped
+ * level - Nesting level of the handle
+ * context - Passed into walk_namespace
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth
+ * computes the maximum nesting depth in the namespace tree, in
+ * order to simplify formatting in acpi_ns_dump_one_object_path.
+ * These procedures are user_functions called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ u32 max_level = *((u32 *)context);
+ char *pathname;
+ struct acpi_namespace_node *node;
+ int path_indent;
+
+ if (!obj_handle) {
+ return (AE_OK);
+ }
+
+ node = acpi_ns_validate_handle(obj_handle);
+ if (!node) {
+
+ /* Ignore bad node during namespace walk */
+
+ return (AE_OK);
+ }
+
+ pathname = acpi_ns_get_external_pathname(node);
+
+ path_indent = 1;
+ if (level <= max_level) {
+ path_indent = max_level - level + 1;
+ }
+
+ acpi_os_printf("%2d%*s%-12s%*s",
+ level, level, " ", acpi_ut_get_type_name(node->type),
+ path_indent, " ");
+
+ acpi_os_printf("%s\n", &pathname[1]);
+ ACPI_FREE(pathname);
+ return (AE_OK);
+}
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ u32 *max_level = (u32 *)context;
+
+ if (level > *max_level) {
+ *max_level = level;
+ }
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_object_paths
+ *
+ * PARAMETERS: type - Object type to be dumped
+ * display_type - 0 or ACPI_DISPLAY_SUMMARY
+ * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX
+ * for an effectively unlimited depth.
+ * owner_id - Dump only objects owned by this ID. Use
+ * ACPI_UINT32_MAX to match all owners.
+ * start_handle - Where in namespace to start/end search
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ * acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle)
+{
+ acpi_status status;
+ u32 max_level = 0;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Just lock the entire namespace for the duration of the dump.
+ * We don't want any changes to the namespace during this time,
+ * especially the temporary nodes since we are going to display
+ * them also.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not acquire namespace mutex\n");
+ return;
+ }
+
+ /* Get the max depth of the namespace tree, for formatting later */
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_get_max_depth, NULL,
+ (void *)&max_level, NULL);
+
+ /* Now dump the entire namespace */
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_dump_one_object_path, NULL,
+ (void *)&max_level, NULL);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+#endif /* ACPI_FUTURE_USAGE */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_dump_entry
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 409ae80824d..283762511b7 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -69,6 +69,7 @@ static acpi_status
acpi_ns_dump_one_device(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
+ struct acpi_buffer buffer;
struct acpi_device_info *info;
acpi_status status;
u32 i;
@@ -78,15 +79,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
- status = acpi_get_object_info(obj_handle, &info);
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_get_object_info(obj_handle, &buffer);
if (ACPI_SUCCESS(status)) {
+ info = buffer.pointer;
for (i = 0; i < level; i++) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
" HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
- info->hardware_id.string,
+ info->hardware_id.value,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 18108bc2e51..963ceef063f 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -314,7 +314,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
"*** Completed evaluation of object %s ***\n",
info->relative_pathname));
- cleanup:
+cleanup:
/*
* Namespace was unlocked by the handling acpi_ns* function, so we
* just free the pathname and return
@@ -486,7 +486,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
parent_node->type = (u8)type;
}
- exit:
+exit:
if (parent_obj) {
acpi_ut_remove_reference(parent_obj);
}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index dd2ceae3f71..3a0423af968 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -213,7 +213,7 @@ acpi_status acpi_ns_initialize_devices(void)
return_ACPI_STATUS(status);
- error_exit:
+error_exit:
ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 0a7badc3179..89ec645e773 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -114,7 +114,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
(void)acpi_tb_release_owner_id(table_index);
}
- unlock:
+unlock:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 35dde8151c0..17785734027 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -140,7 +140,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
pass_number));
status = acpi_ps_parse_aml(walk_state);
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(parse_root);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 098e7666cbc..d2855d9857c 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -271,7 +271,7 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info,
return (AE_OK); /* Successful repair */
}
- type_error_exit:
+type_error_exit:
/* Create a string with all expected types for this predefined object */
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 6d55cef7916..3d5391f9bcb 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -330,7 +330,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
return (status);
- package_too_small:
+package_too_small:
/* Error exit for the case with an incorrect package count */
@@ -555,7 +555,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
return (AE_OK);
- package_too_small:
+package_too_small:
/* The sub-package count was smaller than required */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index f8e71ea6031..a05afff50eb 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -263,7 +263,7 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
return (AE_AML_OPERAND_TYPE);
- object_repaired:
+object_repaired:
/* Object was successfully repaired */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index c84603ee83a..6a25d320b16 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -478,7 +478,7 @@ acpi_ns_repair_CST(struct acpi_evaluate_info *info,
removing = TRUE;
}
- remove_element:
+remove_element:
if (removing) {
acpi_ns_remove_element(return_object, i + 1);
outer_element_count--;
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5d43efc53a6..47420faef07 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,8 @@ acpi_ns_search_and_enter(u32 target_name,
/* Node is an object defined by an External() statement */
- if (flags & ACPI_NS_EXTERNAL) {
+ if (flags & ACPI_NS_EXTERNAL ||
+ (walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 08c0b5beec8..cc2fea94c5f 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -722,7 +722,7 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- cleanup:
+cleanup:
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index b38b4b07f86..e973e311f85 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -138,7 +139,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
- ACPI_FREE(return_buffer->pointer);
+ ACPI_FREE_BUFFER(*return_buffer);
return_buffer->pointer = NULL;
}
@@ -441,7 +442,7 @@ acpi_evaluate_object(acpi_handle handle,
acpi_ex_exit_interpreter();
}
- cleanup:
+cleanup:
/* Free the input parameter list (if we created one) */
@@ -605,14 +606,22 @@ acpi_walk_namespace(acpi_object_type type,
goto unlock_and_exit;
}
+ /* Now we can validate the starting node */
+
+ if (!acpi_ns_validate_handle(start_object)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit2;
+ }
+
status = acpi_ns_walk_namespace(type, start_object, max_depth,
ACPI_NS_WALK_UNLOCK,
descending_callback, ascending_callback,
context, return_value);
+unlock_and_exit2:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock);
return_ACPI_STATUS(status);
}
@@ -856,7 +865,7 @@ acpi_attach_data(acpi_handle obj_handle,
status = acpi_ns_attach_data(node, handler, data);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
@@ -902,7 +911,7 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
status = acpi_ns_detach_data(node, handler);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
@@ -949,7 +958,7 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
status = acpi_ns_get_attached_data(node, handler, data);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83c16443458..3a4bd3ff49a 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -208,7 +209,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
status = AE_OK;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
@@ -496,7 +497,7 @@ acpi_get_object_info(acpi_handle handle,
*return_buffer = info;
status = AE_OK;
- cleanup:
+cleanup:
if (hid) {
ACPI_FREE(hid);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c0853ef294e..0e6d79e462d 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -200,7 +201,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
status = AE_NULL_ENTRY;
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
@@ -280,7 +281,7 @@ acpi_get_next_object(acpi_object_type type,
*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 86198a9139b..79d9a28dede 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -297,7 +297,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
}
}
- cleanup:
+cleanup:
/* Now we can actually delete the subtree rooted at Op */
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 11b99ab20bb..fcb7a840e99 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -142,7 +142,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
}
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
@@ -185,7 +185,7 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
acpi_dbg_level = acpi_gbl_original_dbg_level;
acpi_dbg_layer = acpi_gbl_original_dbg_layer;
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
@@ -323,7 +323,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
/* walk_state was deleted by parse_aml */
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(op);
/* End optional tracing */
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 80d12994e0d..c99cec9cefd 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -440,7 +440,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info++;
}
- exit:
+exit:
if (!flags_mode) {
/* Round the resource struct length up to the next boundary (32 or 64) */
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info++;
}
- exit:
+exit:
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 480b6b40c5e..aef303d56d8 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -784,7 +784,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
acpi_ut_remove_reference(args[0]);
- cleanup:
+cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 94e3517554f..01e476988aa 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 42a13c0d701..634357d51fe 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -80,16 +80,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
}
}
- /* FACS is the odd table, has no standard ACPI header and no checksum */
+ /* Always calculate checksum, ignore bad checksum if requested */
- if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
- /* Always calculate checksum, ignore bad checksum if requested */
-
- status =
- acpi_tb_verify_checksum(table_desc->pointer,
- table_desc->length);
- }
+ status =
+ acpi_tb_verify_checksum(table_desc->pointer, table_desc->length);
return_ACPI_STATUS(status);
}
@@ -237,10 +231,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
goto release;
}
- print_header:
+print_header:
acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
- release:
+release:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -312,7 +306,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
return (NULL); /* There was no override */
- finish_override:
+finish_override:
ACPI_INFO((AE_INFO,
"%4.4s %p %s table override, new table: %p",
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index dc963f823d2..6866e767ba9 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -135,10 +135,10 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%4.4s %p %05X",
+ ACPI_INFO((AE_INFO, "%4.4s %p %06X",
header->signature, ACPI_CAST_PTR(void, address),
header->length));
- } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+ } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
/* RSDP has no common fields */
@@ -147,7 +147,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
+ ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
ACPI_CAST_PTR(void, address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
@@ -162,7 +162,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
+ "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
local_header.signature, ACPI_CAST_PTR(void, address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
@@ -190,6 +190,16 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
{
u8 checksum;
+ /*
+ * FACS/S3PT:
+ * They are the odd tables, have no standard ACPI header and no checksum
+ */
+
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+ return (AE_OK);
+ }
+
/* Compute the checksum on the table */
checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index bffdfc7b832..3d6bb83aa7e 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -350,7 +350,7 @@ acpi_tb_install_table(acpi_physical_address address,
acpi_tb_delete_table(table_desc);
}
- unmap_and_exit:
+unmap_and_exit:
/* Always unmap the table header that we mapped above */
@@ -430,8 +430,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
*
******************************************************************************/
-acpi_status __init
-acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
+acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
{
struct acpi_table_rsdp *rsdp;
u32 table_entry_size;
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index ad11162482f..db826eaadd1 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
@@ -147,6 +148,8 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
+
/*******************************************************************************
*
* FUNCTION: acpi_reallocate_root_table
@@ -161,7 +164,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
* kernel.
*
******************************************************************************/
-acpi_status acpi_reallocate_root_table(void)
+acpi_status __init acpi_reallocate_root_table(void)
{
acpi_status status;
@@ -181,6 +184,8 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_table_header
@@ -356,6 +361,7 @@ acpi_get_table_with_size(char *signature,
return (AE_NOT_FOUND);
}
+
ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
acpi_status
@@ -367,6 +373,7 @@ acpi_get_table(char *signature,
return acpi_get_table_with_size(signature,
instance, out_table, &tbl_size);
}
+
ACPI_EXPORT_SYMBOL(acpi_get_table)
/*******************************************************************************
@@ -424,7 +431,6 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
-
/*******************************************************************************
*
* FUNCTION: acpi_install_table_handler
@@ -465,7 +471,7 @@ acpi_install_table_handler(acpi_table_handler handler, void *context)
acpi_gbl_table_handler = handler;
acpi_gbl_table_handler_context = context;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -506,7 +512,7 @@ acpi_status acpi_remove_table_handler(acpi_table_handler handler)
acpi_gbl_table_handler = NULL;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0ba9e328d5d..60b5a871833 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -65,7 +66,7 @@ static acpi_status acpi_tb_load_namespace(void);
*
******************************************************************************/
-acpi_status acpi_load_tables(void)
+acpi_status __init acpi_load_tables(void)
{
acpi_status status;
@@ -82,7 +83,7 @@ acpi_status acpi_load_tables(void)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
+ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
/*******************************************************************************
*
@@ -200,7 +201,7 @@ static acpi_status acpi_tb_load_namespace(void)
ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -268,7 +269,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
acpi_gbl_table_handler_context);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 948c95e80d4..e4e1468877c 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -68,8 +68,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
* Note: Sometimes there exists more than one RSDP in memory; the valid
* RSDP has a valid checksum, all others have an invalid checksum.
*/
- if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
- sizeof(ACPI_SIG_RSDP) - 1) != 0) {
+ if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
/* Nope, BAD Signature */
@@ -112,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
*
******************************************************************************/
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
+acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
{
u8 *table_ptr;
u8 *mem_rover;
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index e0ffb580f4b..814267f5271 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -48,6 +48,39 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utalloc")
+#if !defined (USE_NATIVE_ALLOCATE_ZEROED)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_allocate_zeroed
+ *
+ * PARAMETERS: size - Size of the allocation
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
+ * This is the default implementation. Can be overridden via the
+ * USE_NATIVE_ALLOCATE_ZEROED flag.
+ *
+ ******************************************************************************/
+void *acpi_os_allocate_zeroed(acpi_size size)
+{
+ void *allocation;
+
+ ACPI_FUNCTION_ENTRY();
+
+ allocation = acpi_os_allocate(size);
+ if (allocation) {
+
+ /* Clear the memory block */
+
+ ACPI_MEMSET(allocation, 0, size);
+ }
+
+ return (allocation);
+}
+
+#endif /* !USE_NATIVE_ALLOCATE_ZEROED */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_caches
@@ -59,6 +92,7 @@ ACPI_MODULE_NAME("utalloc")
* DESCRIPTION: Create all local caches
*
******************************************************************************/
+
acpi_status acpi_ut_create_caches(void)
{
acpi_status status;
@@ -175,10 +209,10 @@ acpi_status acpi_ut_delete_caches(void)
/* Free memory lists */
- ACPI_FREE(acpi_gbl_global_list);
+ acpi_os_free(acpi_gbl_global_list);
acpi_gbl_global_list = NULL;
- ACPI_FREE(acpi_gbl_ns_node_list);
+ acpi_os_free(acpi_gbl_ns_node_list);
acpi_gbl_ns_node_list = NULL;
#endif
@@ -302,82 +336,3 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
ACPI_MEMSET(buffer->pointer, 0, required_length);
return (AE_OK);
}
-
-#ifdef NOT_USED_BY_LINUX
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate
- *
- * PARAMETERS: size - Size of the allocation
- * component - Component type of caller
- * module - Source file name of caller
- * line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate(acpi_size size,
- u32 component, const char *module, u32 line)
-{
- void *allocation;
-
- ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
-
- /* Check for an inadvertent size of zero bytes */
-
- if (!size) {
- ACPI_WARNING((module, line,
- "Attempt to allocate zero bytes, allocating 1 byte"));
- size = 1;
- }
-
- allocation = acpi_os_allocate(size);
- if (!allocation) {
-
- /* Report allocation error */
-
- ACPI_WARNING((module, line,
- "Could not allocate size %u", (u32) size));
-
- return_PTR(NULL);
- }
-
- return_PTR(allocation);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate_zeroed
- *
- * PARAMETERS: size - Size of the allocation
- * component - Component type of caller
- * module - Source file name of caller
- * line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
- u32 component, const char *module, u32 line)
-{
- void *allocation;
-
- ACPI_FUNCTION_ENTRY();
-
- allocation = acpi_ut_allocate(size, component, module, line);
- if (allocation) {
-
- /* Clear the memory block */
-
- ACPI_MEMSET(allocation, 0, size);
- }
-
- return (allocation);
-}
-#endif
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index a877a9647fd..366bfec4b77 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -65,7 +65,7 @@ ACPI_MODULE_NAME("utcache")
acpi_status
acpi_os_create_cache(char *cache_name,
u16 object_size,
- u16 max_depth, struct acpi_memory_list ** return_cache)
+ u16 max_depth, struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1731c27c36a..edff4e653d9 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -552,7 +552,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
*ret_internal_object = internal_object;
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
acpi_ut_remove_reference(internal_object);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -899,7 +899,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
return (status);
- error_exit:
+error_exit:
acpi_ut_remove_reference(target_object);
return (status);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5796e11a067..1a67b3944b3 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
@@ -190,7 +191,7 @@ acpi_debug_print(u32 requested_debug_level,
* Display the module name, current line number, thread ID (if requested),
* current procedure nesting level, and the current procedure name
*/
- acpi_os_printf("%8s-%04ld ", module_name, line_number);
+ acpi_os_printf("%9s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf("[%u] ", (u32)thread_id);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 11e2e02e161..b3f31dd89a4 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -41,7 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index d6b33f29d32..c07d2227ea4 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -649,7 +649,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
return (AE_OK);
- error_exit:
+error_exit:
ACPI_EXCEPTION((AE_INFO, status,
"Could not update object reference count"));
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 4fd68971019..16fb90506db 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -181,7 +181,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
*return_desc = info->return_object;
- cleanup:
+cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index ff6d9e8aa84..3cf7b597edb 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -41,8 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define EXPORT_ACPI_INTERFACES
+
#define ACPI_DEFINE_EXCEPTION_TABLE
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index d6f26bf8a06..81f9a958445 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -41,9 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define EXPORT_ACPI_INTERFACES
#define DEFINE_ACPI_GLOBALS
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
@@ -289,9 +289,19 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+ /* Event counters */
+
+ acpi_method_count = 0;
+ acpi_sci_count = 0;
+ acpi_gpe_count = 0;
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_fixed_event_count[i] = 0;
+ }
+
#if (!ACPI_REDUCED_HARDWARE)
- /* GPE support */
+ /* GPE/SCI support */
acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -300,6 +310,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_current_gpe_count = 0;
acpi_gbl_global_event_handler = NULL;
+ acpi_gbl_sci_handler_list = NULL;
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -377,6 +388,11 @@ acpi_status acpi_ut_init_globals(void)
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
+
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+
+ACPI_EXPORT_SYMBOL(acpi_gpe_count)
+
ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index fa69071db41..bfca7b4b673 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -184,7 +184,7 @@ acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
sub->length = length;
*return_id = sub;
- cleanup:
+cleanup:
/* On exit, we must delete the return object */
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index aa61f66ee86..517af700399 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -180,7 +180,7 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count)
package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
1) * sizeof(void *));
if (!package_elements) {
- acpi_ut_remove_reference(package_desc);
+ ACPI_FREE(package_desc);
return_PTR(NULL);
}
@@ -356,7 +356,7 @@ u8 acpi_ut_valid_internal_object(void *object)
default:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%p is not not an ACPI operand obj [%s]\n",
+ "%p is not an ACPI operand obj [%s]\n",
object, acpi_ut_get_descriptor_name(object)));
break;
}
@@ -396,7 +396,6 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
/* Mark the descriptor type */
- memset(object, 0, sizeof(union acpi_operand_object));
ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
@@ -461,25 +460,28 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
+ /* Start with the length of the (external) Acpi object */
+
+ length = sizeof(union acpi_object);
+
+ /* A NULL object is allowed, can be a legal uninitialized package element */
+
+ if (!internal_object) {
/*
- * Handle a null object (Could be a uninitialized package
- * element -- which is legal)
+ * Object is NULL, just return the length of union acpi_object
+ * (A NULL union acpi_object is an object of all zeroes.)
*/
- if (!internal_object) {
- *obj_length = sizeof(union acpi_object);
+ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
return_ACPI_STATUS(AE_OK);
}
- /* Start with the length of the Acpi object */
-
- length = sizeof(union acpi_object);
+ /* A Namespace Node should never appear here */
if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
- /* Object is a named object (reference), just return the length */
+ /* A namespace node should never get here */
- *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 835340b26d3..eb3aca76136 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -148,7 +148,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
ACPI_ERROR((AE_INFO,
"Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cb7fa491dec..2c2accb9e53 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -643,7 +643,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
return (AE_OK);
- invalid_resource:
+invalid_resource:
if (walk_state) {
ACPI_ERROR((AE_INFO,
@@ -652,7 +652,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
}
return (AE_AML_INVALID_RESOURCE_TYPE);
- bad_resource_length:
+bad_resource_length:
if (walk_state) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index a6b729d4c1d..03c4c2febd8 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -161,7 +161,6 @@ union acpi_generic_state *acpi_ut_create_generic_state(void)
if (state) {
/* Initialize */
- memset(state, 0, sizeof(union acpi_generic_state));
state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index cb1e9cc32d5..45c0eb26b33 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -310,7 +310,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* All done, normal exit */
- all_done:
+all_done:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(return_value)));
@@ -318,7 +318,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
*ret_integer = return_value;
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
/* Base was set/validated above */
if (base == 10) {
@@ -584,3 +584,65 @@ void ut_convert_backslashes(char *pathname)
}
}
#endif
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
+ * functions. This is the size of the Destination buffer.
+ *
+ * RETURN: TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ * the result of the operation will not overflow the output string
+ * buffer.
+ *
+ * NOTE: These functions are typically only helpful for processing
+ * user input and command lines. For most ACPICA code, the
+ * required buffer length is precisely calculated before buffer
+ * allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+ if (ACPI_STRLEN(source) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRCPY(dest, source);
+ return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+ if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRCAT(dest, source);
+ return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length)
+{
+ acpi_size actual_transfer_length;
+
+ actual_transfer_length =
+ ACPI_MIN(max_transfer_length, ACPI_STRLEN(source));
+
+ if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRNCAT(dest, source, max_transfer_length);
+ return (FALSE);
+}
+#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 160f13f4aab..c0027773ccc 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -130,10 +130,23 @@ void *acpi_ut_allocate_and_track(acpi_size size,
struct acpi_debug_mem_block *allocation;
acpi_status status;
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
allocation =
- acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
+ acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header));
if (!allocation) {
+
+ /* Report allocation error */
+
+ ACPI_WARNING((module, line,
+ "Could not allocate size %u", (u32)size));
+
return (NULL);
}
@@ -179,9 +192,17 @@ void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
struct acpi_debug_mem_block *allocation;
acpi_status status;
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
allocation =
- acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
+ acpi_os_allocate_zeroed(size +
+ sizeof(struct acpi_debug_mem_header));
if (!allocation) {
/* Report allocation error */
@@ -409,7 +430,7 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
element->next = allocation;
}
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 03a211e6e26..be322c83643 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdebug.h"
@@ -60,7 +61,7 @@ ACPI_MODULE_NAME("utxface")
* DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
*
******************************************************************************/
-acpi_status acpi_terminate(void)
+acpi_status __init acpi_terminate(void)
{
acpi_status status;
@@ -104,7 +105,7 @@ acpi_status acpi_terminate(void)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_terminate)
+ACPI_EXPORT_SYMBOL_INIT(acpi_terminate)
#ifndef ACPI_ASL_COMPILER
#ifdef ACPI_FUTURE_USAGE
@@ -207,6 +208,44 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
ACPI_EXPORT_SYMBOL(acpi_get_system_info)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_statistics
+ *
+ * PARAMETERS: stats - Where the statistics are returned
+ *
+ * RETURN: status - the status of the call
+ *
+ * DESCRIPTION: Get the contents of the various system counters
+ *
+ ******************************************************************************/
+acpi_status acpi_get_statistics(struct acpi_statistics *stats)
+{
+ ACPI_FUNCTION_TRACE(acpi_get_statistics);
+
+ /* Parameter validation */
+
+ if (!stats) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Various interrupt-based event counters */
+
+ stats->sci_count = acpi_sci_count;
+ stats->gpe_count = acpi_gpe_count;
+
+ ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count,
+ sizeof(acpi_fixed_event_count));
+
+ /* Other counters */
+
+ stats->method_count = acpi_method_count;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_statistics)
+
/*****************************************************************************
*
* FUNCTION: acpi_install_initialization_handler
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index e966a2e47b7..f7edb88f605 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 41ebaaf8bb1..75efea0539c 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -64,7 +65,7 @@ ACPI_MODULE_NAME("utxfinit")
* called, so any early initialization belongs here.
*
******************************************************************************/
-acpi_status acpi_initialize_subsystem(void)
+acpi_status __init acpi_initialize_subsystem(void)
{
acpi_status status;
@@ -124,7 +125,8 @@ acpi_status acpi_initialize_subsystem(void)
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem)
/*******************************************************************************
*
@@ -138,7 +140,7 @@ ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
+acpi_status __init acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
@@ -228,7 +230,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem)
/*******************************************************************************
*
@@ -242,7 +245,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
+acpi_status __init acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
@@ -314,4 +317,5 @@ acpi_status acpi_initialize_objects(u32 flags)
acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_objects)
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f0c1ce95a0e..786294bb682 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,6 +2,8 @@ config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
select MISC_FILESYSTEMS
select PSTORE
+ select EFI
+ select UEFI_CPER
depends on X86
help
APEI allows to report errors (for example from the chipset)
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index d1d1bc0a4ee..5d575a95594 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o
-apei-y := apei-base.o hest.o cper.o erst.o
+apei-y := apei-base.o hest.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 46f80e2c92f..6d2c49b86b7 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -758,9 +758,9 @@ int apei_osc_setup(void)
.cap.pointer = capbuf,
};
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = 1;
- capbuf[OSC_CONTROL_TYPE] = 0;
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_DWORD] = 1;
+ capbuf[OSC_CONTROL_DWORD] = 0;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
|| ACPI_FAILURE(acpi_run_osc(handle, &context)))
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f220d642136..21ba34a7388 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -122,11 +122,11 @@ struct dentry;
struct dentry *apei_get_debugfs_dir(void);
#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ for (section = (struct acpi_generic_data *)(estatus + 1); \
(void *)section - (void *)estatus < estatus->data_length; \
section = (void *)(section+1) + section->error_data_length)
-static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
{
if (estatus->raw_data_length)
return estatus->raw_data_offset + \
@@ -135,10 +135,10 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
return sizeof(*estatus) + estatus->data_length;
}
-void apei_estatus_print(const char *pfx,
- const struct acpi_hest_generic_status *estatus);
-int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
-int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+void cper_estatus_print(const char *pfx,
+ const struct acpi_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_generic_status *estatus);
+int cper_estatus_check(const struct acpi_generic_status *estatus);
int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 8ec37bbdd69..a30bc313787 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -75,13 +75,13 @@
#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_cache) + (estatus_len))
#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
- ((struct acpi_hest_generic_status *) \
+ ((struct acpi_generic_status *) \
((struct ghes_estatus_cache *)(estatus_cache) + 1))
#define GHES_ESTATUS_NODE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_node) + (estatus_len))
-#define GHES_ESTATUS_FROM_NODE(estatus_node) \
- ((struct acpi_hest_generic_status *) \
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
bool ghes_disable;
@@ -378,17 +378,17 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
ghes->flags |= GHES_TO_CLEAR;
rc = -EIO;
- len = apei_estatus_len(ghes->estatus);
+ len = cper_estatus_len(ghes->estatus);
if (len < sizeof(*ghes->estatus))
goto err_read_block;
if (len > ghes->generic->error_block_length)
goto err_read_block;
- if (apei_estatus_check_header(ghes->estatus))
+ if (cper_estatus_check_header(ghes->estatus))
goto err_read_block;
ghes_copy_tofrom_phys(ghes->estatus + 1,
buf_paddr + sizeof(*ghes->estatus),
len - sizeof(*ghes->estatus), 1);
- if (apei_estatus_check(ghes->estatus))
+ if (cper_estatus_check(ghes->estatus))
goto err_read_block;
rc = 0;
@@ -409,7 +409,7 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
+static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
{
#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
@@ -419,7 +419,7 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
if (sec_sev == GHES_SEV_CORRECTED &&
(gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) &&
- (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)) {
+ (mem_err->validation_bits & CPER_MEM_VALID_PA)) {
pfn = mem_err->physical_addr >> PAGE_SHIFT;
if (pfn_valid(pfn))
memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
@@ -430,7 +430,7 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
}
if (sev == GHES_SEV_RECOVERABLE &&
sec_sev == GHES_SEV_RECOVERABLE &&
- mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ mem_err->validation_bits & CPER_MEM_VALID_PA) {
pfn = mem_err->physical_addr >> PAGE_SHIFT;
memory_failure_queue(pfn, 0, 0);
}
@@ -438,10 +438,10 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
}
static void ghes_do_proc(struct ghes *ghes,
- const struct acpi_hest_generic_status *estatus)
+ const struct acpi_generic_status *estatus)
{
int sev, sec_sev;
- struct acpi_hest_generic_data *gdata;
+ struct acpi_generic_data *gdata;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -496,7 +496,7 @@ static void ghes_do_proc(struct ghes *ghes,
static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_hest_generic_status *estatus)
+ const struct acpi_generic_status *estatus)
{
static atomic_t seqno;
unsigned int curr_seqno;
@@ -513,12 +513,12 @@ static void __ghes_print_estatus(const char *pfx,
snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
pfx_seq, generic->header.source_id);
- apei_estatus_print(pfx_seq, estatus);
+ cper_estatus_print(pfx_seq, estatus);
}
static int ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_hest_generic_status *estatus)
+ const struct acpi_generic_status *estatus)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -540,15 +540,15 @@ static int ghes_print_estatus(const char *pfx,
* GHES error status reporting throttle, to report more kinds of
* errors, instead of just most frequently occurred errors.
*/
-static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+static int ghes_estatus_cached(struct acpi_generic_status *estatus)
{
u32 len;
int i, cached = 0;
unsigned long long now;
struct ghes_estatus_cache *cache;
- struct acpi_hest_generic_status *cache_estatus;
+ struct acpi_generic_status *cache_estatus;
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
rcu_read_lock();
for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
cache = rcu_dereference(ghes_estatus_caches[i]);
@@ -571,19 +571,19 @@ static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
struct acpi_hest_generic *generic,
- struct acpi_hest_generic_status *estatus)
+ struct acpi_generic_status *estatus)
{
int alloced;
u32 len, cache_len;
struct ghes_estatus_cache *cache;
- struct acpi_hest_generic_status *cache_estatus;
+ struct acpi_generic_status *cache_estatus;
alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
atomic_dec(&ghes_estatus_cache_alloced);
return NULL;
}
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
cache_len = GHES_ESTATUS_CACHE_LEN(len);
cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
if (!cache) {
@@ -603,7 +603,7 @@ static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
{
u32 len;
- len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
+ len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
len = GHES_ESTATUS_CACHE_LEN(len);
gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
atomic_dec(&ghes_estatus_cache_alloced);
@@ -619,7 +619,7 @@ static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
static void ghes_estatus_cache_add(
struct acpi_hest_generic *generic,
- struct acpi_hest_generic_status *estatus)
+ struct acpi_generic_status *estatus)
{
int i, slot = -1, count;
unsigned long long now, duration, period, max_period = 0;
@@ -751,7 +751,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -765,7 +765,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
ghes_do_proc(estatus_node->ghes, estatus);
if (!ghes_estatus_cached(estatus)) {
@@ -784,7 +784,7 @@ static void ghes_print_queued_estatus(void)
struct llist_node *llnode;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -797,7 +797,7 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
@@ -843,7 +843,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
u32 len, node_len;
struct ghes_estatus_node *estatus_node;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
@@ -851,7 +851,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (ghes_estatus_cached(ghes->estatus))
goto next;
/* Save estatus for further processing in IRQ context */
- len = apei_estatus_len(ghes->estatus);
+ len = cper_estatus_len(ghes->estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
node_len);
@@ -923,7 +923,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
rc = -EIO;
if (generic->error_block_length <
- sizeof(struct acpi_hest_generic_status)) {
+ sizeof(struct acpi_generic_status)) {
pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
generic->error_block_length,
generic->header.source_id);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c9958cd7a4..fbf1aceda8b 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,12 +36,6 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/power_supply.h>
@@ -72,19 +66,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
-enum acpi_battery_files {
- info_tag = 0,
- state_tag,
- alarm_tag,
- ACPI_BATTERY_NUMFILES,
-};
-
-#endif
-
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
@@ -320,14 +301,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-inline char *acpi_battery_units(struct acpi_battery *battery)
-{
- return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
- "mA" : "mW";
-}
-#endif
-
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -741,279 +714,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static int acpi_battery_print_info(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design capacity: unknown\n");
- else
- seq_printf(seq, "design capacity: %d %sh\n",
- battery->design_capacity,
- acpi_battery_units(battery));
-
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "last full capacity: unknown\n");
- else
- seq_printf(seq, "last full capacity: %d %sh\n",
- battery->full_charge_capacity,
- acpi_battery_units(battery));
-
- seq_printf(seq, "battery technology: %srechargeable\n",
- (!battery->technology)?"non-":"");
-
- if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design voltage: unknown\n");
- else
- seq_printf(seq, "design voltage: %d mV\n",
- battery->design_voltage);
- seq_printf(seq, "design capacity warning: %d %sh\n",
- battery->design_capacity_warning,
- acpi_battery_units(battery));
- seq_printf(seq, "design capacity low: %d %sh\n",
- battery->design_capacity_low,
- acpi_battery_units(battery));
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: %d %sh\n",
- battery->capacity_granularity_1,
- acpi_battery_units(battery));
- seq_printf(seq, "capacity granularity 2: %d %sh\n",
- battery->capacity_granularity_2,
- acpi_battery_units(battery));
- seq_printf(seq, "model number: %s\n", battery->model_number);
- seq_printf(seq, "serial number: %s\n", battery->serial_number);
- seq_printf(seq, "battery type: %s\n", battery->type);
- seq_printf(seq, "OEM info: %s\n", battery->oem_info);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery info\n");
- return result;
-}
-
-static int acpi_battery_print_state(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
-
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x04) ? "critical" : "ok");
- if ((battery->state & 0x01) && (battery->state & 0x02))
- seq_printf(seq,
- "charging state: charging/discharging\n");
- else if (battery->state & 0x01)
- seq_printf(seq, "charging state: discharging\n");
- else if (battery->state & 0x02)
- seq_printf(seq, "charging state: charging\n");
- else
- seq_printf(seq, "charging state: charged\n");
-
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present rate: unknown\n");
- else
- seq_printf(seq, "present rate: %d %s\n",
- battery->rate_now, acpi_battery_units(battery));
-
- if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "remaining capacity: unknown\n");
- else
- seq_printf(seq, "remaining capacity: %d %sh\n",
- battery->capacity_now, acpi_battery_units(battery));
- if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present voltage: unknown\n");
- else
- seq_printf(seq, "present voltage: %d mV\n",
- battery->voltage_now);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery state\n");
-
- return result;
-}
-
-static int acpi_battery_print_alarm(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- if (!acpi_battery_present(battery)) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
- seq_printf(seq, "alarm: ");
- if (!battery->alarm)
- seq_printf(seq, "unsupported\n");
- else
- seq_printf(seq, "%u %sh\n", battery->alarm,
- acpi_battery_units(battery));
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery alarm\n");
- return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[12] = { '\0' };
- struct seq_file *m = file->private_data;
- struct acpi_battery *battery = m->private;
-
- if (!battery || (count > sizeof(alarm_string) - 1))
- return -EINVAL;
- if (!acpi_battery_present(battery)) {
- result = -ENODEV;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = '\0';
- battery->alarm = simple_strtol(alarm_string, NULL, 0);
- result = acpi_battery_set_alarm(battery);
- end:
- if (!result)
- return count;
- return result;
-}
-
-typedef int(*print_func)(struct seq_file *seq, int result);
-
-static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
- acpi_battery_print_info,
- acpi_battery_print_state,
- acpi_battery_print_alarm,
-};
-
-static int acpi_battery_read(int fid, struct seq_file *seq)
-{
- struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery);
- return acpi_print_funcs[fid](seq, result);
-}
-
-#define DECLARE_FILE_FUNCTIONS(_name) \
-static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
-{ \
- return acpi_battery_read(_name##_tag, seq); \
-} \
-static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
-}
-
-DECLARE_FILE_FUNCTIONS(info);
-DECLARE_FILE_FUNCTIONS(state);
-DECLARE_FILE_FUNCTIONS(alarm);
-
-#undef DECLARE_FILE_FUNCTIONS
-
-#define FILE_DESCRIPTION_RO(_name) \
- { \
- .name = __stringify(_name), \
- .mode = S_IRUGO, \
- .ops = { \
- .open = acpi_battery_##_name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-#define FILE_DESCRIPTION_RW(_name) \
- { \
- .name = __stringify(_name), \
- .mode = S_IFREG | S_IRUGO | S_IWUSR, \
- .ops = { \
- .open = acpi_battery_##_name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .write = acpi_battery_write_##_name, \
- .release = single_release, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-static const struct battery_file {
- struct file_operations ops;
- umode_t mode;
- const char *name;
-} acpi_battery_file[] = {
- FILE_DESCRIPTION_RO(info),
- FILE_DESCRIPTION_RO(state),
- FILE_DESCRIPTION_RW(alarm),
-};
-
-#undef FILE_DESCRIPTION_RO
-#undef FILE_DESCRIPTION_RW
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
- int i;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_battery_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
- entry = proc_create_data(acpi_battery_file[i].name,
- acpi_battery_file[i].mode,
- acpi_device_dir(device),
- &acpi_battery_file[i].ops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- }
- return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
- int i;
- if (!acpi_device_dir(device))
- return;
- for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
- remove_proc_entry(acpi_battery_file[i].name,
- acpi_device_dir(device));
-
- remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
- acpi_device_dir(device) = NULL;
-}
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1075,15 +775,6 @@ static int acpi_battery_add(struct acpi_device *device)
result = acpi_battery_update(battery);
if (result)
goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_battery_add_fs(device);
-#endif
- if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
- goto fail;
- }
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -1110,9 +801,6 @@ static int acpi_battery_remove(struct acpi_device *device)
return -EINVAL;
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
@@ -1158,18 +846,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir)
- return;
-#endif
- if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
- return;
- }
- return;
+ acpi_bus_register_driver(&acpi_battery_driver);
}
static int __init acpi_battery_init(void)
@@ -1181,9 +858,6 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
acpi_bus_unregister_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9515f18898b..078c4f7fe2d 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
{""}
};
-#if CONFIG_ACPI_BLACKLIST_YEAR
-
-static int __init blacklist_by_year(void)
-{
- int year;
-
- /* Doesn't exist? Likely an old system */
- if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
- printk(KERN_ERR PREFIX "no DMI BIOS year, "
- "acpi=force is required to enable ACPI\n" );
- return 1;
- }
- /* 0? Likely a buggy new BIOS */
- if (year == 0) {
- printk(KERN_ERR PREFIX "DMI BIOS year==0, "
- "assuming ACPI-capable machine\n" );
- return 0;
- }
- if (year < CONFIG_ACPI_BLACKLIST_YEAR) {
- printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), "
- "acpi=force is required to enable ACPI\n",
- year, CONFIG_ACPI_BLACKLIST_YEAR);
- return 1;
- }
- return 0;
-}
-#else
-static inline int blacklist_by_year(void)
-{
- return 0;
-}
-#endif
-
int __init acpi_blacklisted(void)
{
int i = 0;
@@ -166,8 +133,6 @@ int __init acpi_blacklisted(void)
}
}
- blacklisted += blacklist_by_year();
-
dmi_check_system(acpi_osi_dmi_table);
return blacklisted;
@@ -274,6 +239,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba NB100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+ },
+ },
+
+ /*
+ * The following machines have broken backlight support when reporting
+ * the Windows 2012 OSI, so disable it until their support is fixed.
+ */
+ {
.callback = dmi_disable_osi_win8,
.ident = "ASUS Zenbook Prime UX31A",
.matches = {
@@ -291,12 +269,60 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_win8,
- .ident = "Lenovo ThinkPad Edge E530",
+ .ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Acer Aspire V5-573G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Acer Aspire V5-572G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad T431s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad T430",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b587ec8257b..bba9b72e25f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -174,7 +174,7 @@ static void acpi_print_osc_error(acpi_handle handle,
printk("\n");
}
-static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
+acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
{
int i;
static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
@@ -195,6 +195,7 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
}
return AE_OK;
}
+EXPORT_SYMBOL_GPL(acpi_str_to_uuid);
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
{
@@ -255,7 +256,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
acpi_print_osc_error(handle, context,
"_OSC invalid revision");
if (errors & OSC_CAPABILITIES_MASK_ERROR) {
- if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+ if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD]
& OSC_QUERY_ENABLE)
goto out_success;
status = AE_SUPPORT;
@@ -295,30 +296,30 @@ static void acpi_bus_osc_support(void)
};
acpi_handle handle;
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
#endif
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
#endif
#ifdef ACPI_HOTPLUG_OST
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
#endif
if (!ghes_disable)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
u32 *capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_TYPE)
+ if (context.ret.length > OSC_SUPPORT_DWORD)
osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
kfree(context.ret.pointer);
}
/* do we need to check other returned cap? Sounds no */
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a55773801c5..c971929d75c 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -383,18 +383,15 @@ static int acpi_button_add(struct acpi_device *device)
switch (button->type) {
case ACPI_BUTTON_TYPE_POWER:
- input->evbit[0] = BIT_MASK(EV_KEY);
- set_bit(KEY_POWER, input->keybit);
+ input_set_capability(input, EV_KEY, KEY_POWER);
break;
case ACPI_BUTTON_TYPE_SLEEP:
- input->evbit[0] = BIT_MASK(EV_KEY);
- set_bit(KEY_SLEEP, input->keybit);
+ input_set_capability(input, EV_KEY, KEY_SLEEP);
break;
case ACPI_BUTTON_TYPE_LID:
- input->evbit[0] = BIT_MASK(EV_SW);
- set_bit(SW_LID, input->swbit);
+ input_set_capability(input, EV_SW, SW_LID);
break;
}
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644
index 6c9ee68e46f..00000000000
--- a/drivers/acpi/cm_sbs.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_BATTERY_CLASS "battery"
-#define _COMPONENT ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_ac_dir)
- acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
- if (acpi_ac_dir) {
- lock_ac_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_AC_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_ac_dir_param)
- lock_ac_dir_cnt--;
- if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
- remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
- acpi_ac_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_battery_dir) {
- acpi_battery_dir =
- proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
- }
- if (acpi_battery_dir) {
- lock_battery_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_BATTERY_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_battery_dir_param)
- lock_battery_dir_cnt--;
- if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
- && acpi_battery_dir) {
- remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
- acpi_battery_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
- return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a94383d1f35..b3480cf7db1 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -22,16 +22,12 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/device.h>
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#include "internal.h"
#define _COMPONENT ACPI_POWER_COMPONENT
@@ -118,9 +114,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
/*
* If we were unsure about the device parent's power state up to this
* point, the fact that the device is in D0 implies that the parent has
- * to be in D0 too.
+ * to be in D0 too, except if ignore_parent is set.
*/
- if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+ if (!device->power.flags.ignore_parent && device->parent
+ && device->parent->power.state == ACPI_STATE_UNKNOWN
&& result == ACPI_STATE_D0)
device->parent->power.state = ACPI_STATE_D0;
@@ -177,7 +174,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
acpi_power_state_string(state));
return -ENODEV;
}
- if (device->parent && (state < device->parent->power.state)) {
+ if (!device->power.flags.ignore_parent &&
+ device->parent && (state < device->parent->power.state)) {
dev_warn(&device->dev,
"Cannot transition to power state %s for parent in %s\n",
acpi_power_state_string(state),
@@ -546,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
*/
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
int ret, d_min, d_max;
@@ -654,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
if (!device_run_wake(phys_dev))
return -EINVAL;
- handle = DEVICE_ACPI_HANDLE(phys_dev);
+ handle = ACPI_HANDLE(phys_dev);
if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
__func__);
@@ -698,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
if (!device_can_wakeup(dev))
return -EINVAL;
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
return -ENODEV;
@@ -720,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
*/
struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 05ea4be01a8..dcd73ccb514 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -441,7 +441,7 @@ static void handle_dock(struct dock_station *ds, int dock)
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ unsigned long long value;
acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
@@ -450,12 +450,10 @@ static void handle_dock(struct dock_station *ds, int dock)
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
- status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+ status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
status);
-
- kfree(buffer.pointer);
}
static inline void dock(struct dock_station *ds)
@@ -671,39 +669,20 @@ static void dock_notify(struct dock_station *ds, u32 event)
}
}
-struct dock_data {
- struct dock_station *ds;
- u32 event;
-};
-
-static void acpi_dock_deferred_cb(void *context)
+static void acpi_dock_deferred_cb(void *data, u32 event)
{
- struct dock_data *data = context;
-
acpi_scan_lock_acquire();
- dock_notify(data->ds, data->event);
+ dock_notify(data, event);
acpi_scan_lock_release();
- kfree(data);
}
static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
{
- struct dock_data *dd;
-
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return;
- dd = kmalloc(sizeof(*dd), GFP_KERNEL);
- if (dd) {
- acpi_status status;
-
- dd->ds = data;
- dd->event = event;
- status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
- if (ACPI_FAILURE(status))
- kfree(dd);
- }
+ acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
}
/**
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a06d9837470..ba5b56db9d2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -28,6 +28,7 @@
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : EC: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -49,9 +50,6 @@
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
#define ACPI_EC_FILE_INFO "info"
-#undef PREFIX
-#define PREFIX "ACPI: EC: "
-
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
@@ -131,26 +129,26 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
- pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+ pr_debug("---> status = 0x%2.2x\n", x);
return x;
}
static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
- pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+ pr_debug("---> data = 0x%2.2x\n", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
- pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+ pr_debug("<--- command = 0x%2.2x\n", command);
outb(command, ec->command_addr);
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
- pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+ pr_debug("<--- data = 0x%2.2x\n", data);
outb(data, ec->data_addr);
}
@@ -175,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec)
static void advance_transaction(struct acpi_ec *ec, u8 status)
{
unsigned long flags;
- struct transaction *t = ec->curr;
+ struct transaction *t;
spin_lock_irqsave(&ec->lock, flags);
+ t = ec->curr;
if (!t)
goto unlock;
if (t->wlen > t->wi) {
@@ -241,7 +240,7 @@ static int ec_poll(struct acpi_ec *ec)
}
advance_transaction(ec, acpi_ec_read_status(ec));
} while (time_before(jiffies, delay));
- pr_debug(PREFIX "controller reset, restart transaction\n");
+ pr_debug("controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
@@ -309,12 +308,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
}
}
if (ec_wait_ibf0(ec)) {
- pr_err(PREFIX "input buffer is not empty, "
+ pr_err("input buffer is not empty, "
"aborting transaction\n");
status = -ETIME;
goto end;
}
- pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
@@ -331,12 +330,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
/* It is safe to enable the GPE outside of the transaction. */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ec_storm_threshold) {
- pr_info(PREFIX "GPE storm detected(%d GPEs), "
+ pr_info("GPE storm detected(%d GPEs), "
"transactions will use polling mode\n",
t->irq_count);
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
- pr_debug(PREFIX "transaction end\n");
+ pr_debug("transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
@@ -570,12 +569,12 @@ static void acpi_ec_run(void *cxt)
struct acpi_ec_query_handler *handler = cxt;
if (!handler)
return;
- pr_debug(PREFIX "start query execution\n");
+ pr_debug("start query execution\n");
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
- pr_debug(PREFIX "stop query execution\n");
+ pr_debug("stop query execution\n");
kfree(handler);
}
@@ -593,7 +592,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec)
if (!copy)
return -ENOMEM;
memcpy(copy, handler, sizeof(*copy));
- pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+ pr_debug("push query execution (0x%2x) on queue\n",
+ value);
return acpi_os_execute((copy->func) ?
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
acpi_ec_run, copy);
@@ -616,7 +616,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
- pr_debug(PREFIX "push gpe query to the queue\n");
+ pr_debug("push gpe query to the queue\n");
return acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_ec_gpe_query, ec);
}
@@ -630,7 +630,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
struct acpi_ec *ec = data;
u8 status = acpi_ec_read_status(ec);
- pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+ pr_debug("~~~> interrupt, status:0x%02x\n", status);
advance_transaction(ec, status);
if (ec_transaction_done(ec) &&
@@ -776,7 +776,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
* The AE_NOT_FOUND error will be ignored and OS
* continue to initialize EC.
*/
- printk(KERN_ERR "Fail in evaluating the _REG object"
+ pr_err("Fail in evaluating the _REG object"
" of EC device. Broken bios is suspected.\n");
} else {
acpi_remove_gpe_handler(NULL, ec->gpe,
@@ -795,10 +795,10 @@ static void ec_remove_handlers(struct acpi_ec *ec)
acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
- pr_err(PREFIX "failed to remove space handler\n");
+ pr_err("failed to remove space handler\n");
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
- pr_err(PREFIX "failed to remove gpe handler\n");
+ pr_err("failed to remove gpe handler\n");
clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
}
@@ -840,7 +840,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+ pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
ret = ec_install_handlers(ec);
@@ -931,7 +931,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
/* MSI EC needs special treatment, enable it */
static int ec_flag_msi(const struct dmi_system_id *id)
{
- printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+ pr_debug("Detected MSI hardware, enabling workarounds.\n");
EC_FLAGS_MSI = 1;
EC_FLAGS_VALIDATE_ECDT = 1;
return 0;
@@ -1010,7 +1010,7 @@ int __init acpi_ec_ecdt_probe(void)
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_SUCCESS(status)) {
- pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+ pr_info("EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
boot_ec->gpe = ecdt_ptr->gpe;
@@ -1030,7 +1030,7 @@ int __init acpi_ec_ecdt_probe(void)
/* This workaround is needed only on some broken machines,
* which require early EC, but fail to provide ECDT */
- printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+ pr_debug("Look up EC in DSDT\n");
status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
boot_ec, NULL);
/* Check that acpi_get_devices actually find something */
@@ -1042,7 +1042,7 @@ int __init acpi_ec_ecdt_probe(void)
saved_ec->data_addr != boot_ec->data_addr ||
saved_ec->gpe != boot_ec->gpe ||
saved_ec->handle != boot_ec->handle)
- pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+ pr_info("ASUSTek keeps feeding us with broken "
"ECDT tables, which are very hard to workaround. "
"Trying to use DSDT EC info instead. Please send "
"output of acpidump to linux-acpi@vger.kernel.org\n");
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 8247fcdde07..cae3b387b86 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -78,15 +78,17 @@ enum {
#define ACPI_GENL_VERSION 0x01
#define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group"
+static const struct genl_multicast_group acpi_event_mcgrps[] = {
+ { .name = ACPI_GENL_MCAST_GROUP_NAME, },
+};
+
static struct genl_family acpi_event_genl_family = {
.id = GENL_ID_GENERATE,
.name = ACPI_GENL_FAMILY_NAME,
.version = ACPI_GENL_VERSION,
.maxattr = ACPI_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group acpi_event_mcgrp = {
- .name = ACPI_GENL_MCAST_GROUP_NAME,
+ .mcgrps = acpi_event_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps),
};
int acpi_bus_generate_netlink_event(const char *device_class,
@@ -127,11 +129,6 @@ int acpi_bus_generate_netlink_event(const char *device_class,
}
event = nla_data(attr);
- if (!event) {
- nlmsg_free(skb);
- return -EINVAL;
- }
-
memset(event, 0, sizeof(struct acpi_genl_event));
strcpy(event->device_class, device_class);
@@ -146,7 +143,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
return result;
}
- genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC);
+ genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
return 0;
}
@@ -154,18 +151,7 @@ EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
static int acpi_event_genetlink_init(void)
{
- int result;
-
- result = genl_register_family(&acpi_event_genl_family);
- if (result)
- return result;
-
- result = genl_register_mc_group(&acpi_event_genl_family,
- &acpi_event_mcgrp);
- if (result)
- genl_unregister_family(&acpi_event_genl_family);
-
- return result;
+ return genl_register_family(&acpi_event_genl_family);
}
#else
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 41ade6570bc..ba3da88cee4 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -168,7 +168,7 @@ static int acpi_fan_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- end:
+end:
return result;
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 10f0f40587b..a22a295edb6 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id)
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
- struct acpi_device *acpi_dev;
- acpi_status status;
+ struct acpi_device *acpi_dev = NULL;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
- if (ACPI_HANDLE(dev)) {
+ if (ACPI_COMPANION(dev)) {
if (handle) {
- dev_warn(dev, "ACPI handle is already set\n");
+ dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
} else {
- handle = ACPI_HANDLE(dev);
+ acpi_dev = ACPI_COMPANION(dev);
}
+ } else {
+ acpi_bus_get_device(handle, &acpi_dev);
}
- if (!handle)
+ if (!acpi_dev)
return -EINVAL;
+ get_device(&acpi_dev->dev);
get_device(dev);
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- goto err;
-
physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
@@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
dev_warn(dev, "Already associated with ACPI node\n");
kfree(physical_node);
- if (ACPI_HANDLE(dev) != handle)
+ if (ACPI_COMPANION(dev) != acpi_dev)
goto err;
put_device(dev);
+ put_device(&acpi_dev->dev);
return 0;
}
if (pn->node_id == node_id) {
@@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
- if (!ACPI_HANDLE(dev))
- ACPI_HANDLE_SET(dev, acpi_dev->handle);
+ if (!ACPI_COMPANION(dev))
+ ACPI_COMPANION_SET(dev, acpi_dev);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
@@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
return 0;
err:
- ACPI_HANDLE_SET(dev, NULL);
+ ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
+ put_device(&acpi_dev->dev);
return retval;
}
EXPORT_SYMBOL_GPL(acpi_bind_one);
int acpi_unbind_one(struct device *dev)
{
+ struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
struct acpi_device_physical_node *entry;
- struct acpi_device *acpi_dev;
- acpi_status status;
- if (!ACPI_HANDLE(dev))
+ if (!acpi_dev)
return 0;
- status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__);
- return -EINVAL;
- }
-
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
@@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev)
acpi_physnode_link_name(physnode_name, entry->node_id);
sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
- ACPI_HANDLE_SET(dev, NULL);
- /* acpi_bind_one() increase refcnt by one. */
+ ACPI_COMPANION_SET(dev, NULL);
+ /* Drop references taken by acpi_bind_one(). */
put_device(dev);
+ put_device(&acpi_dev->dev);
kfree(entry);
break;
}
@@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
+void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr)
+{
+ struct acpi_device *adev;
+
+ if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev))
+ ACPI_COMPANION_SET(dev, adev);
+}
+EXPORT_SYMBOL_GPL(acpi_preset_companion);
+
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 20f423337e1..a29739c0ba7 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -26,11 +26,6 @@
acpi_status acpi_os_initialize1(void);
int init_acpi_device_notify(void);
int acpi_scan_init(void);
-#ifdef CONFIG_ACPI_PCI_SLOT
-void acpi_pci_slot_init(void);
-#else
-static inline void acpi_pci_slot_init(void) { }
-#endif
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
void acpi_pci_root_hp_init(void);
@@ -92,6 +87,7 @@ void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
int acpi_bind_one(struct device *dev, acpi_handle handle);
int acpi_unbind_one(struct device *dev);
+void acpi_bus_device_eject(void *data, u32 ost_src);
/* --------------------------------------------------------------------------
Power Resource
@@ -169,9 +165,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
Video
-------------------------------------------------------------------------- */
#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-bool acpi_video_backlight_quirks(void);
-#else
-static inline bool acpi_video_backlight_quirks(void) { return false; }
+bool acpi_osi_is_win8(void);
#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 2e82e5d7693..a2343a1d9e0 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -73,7 +73,7 @@ int acpi_map_pxm_to_node(int pxm)
{
int node = pxm_to_node_map[pxm];
- if (node < 0) {
+ if (node == NUMA_NO_NODE) {
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
@@ -334,7 +334,7 @@ int acpi_get_pxm(acpi_handle h)
int acpi_get_node(acpi_handle *handle)
{
- int pxm, node = -1;
+ int pxm, node = NUMA_NO_NODE;
pxm = acpi_get_pxm(handle);
if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e5f416c7f66..54a20ff4b86 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -61,7 +61,6 @@ struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
- int wait;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -569,8 +568,10 @@ static const char * const table_sigs[] = {
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
void __init acpi_initrd_override(void *data, size_t size)
{
@@ -579,8 +580,6 @@ void __init acpi_initrd_override(void *data, size_t size)
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
- struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
- char *p;
if (data == NULL || size == 0)
return;
@@ -625,8 +624,8 @@ void __init acpi_initrd_override(void *data, size_t size)
table->signature, cpio_path, file.name, table->length);
all_tables_size += table->length;
- early_initrd_files[table_nr].data = file.data;
- early_initrd_files[table_nr].size = file.size;
+ acpi_initrd_files[table_nr].data = file.data;
+ acpi_initrd_files[table_nr].size = file.size;
table_nr++;
}
if (table_nr == 0)
@@ -652,14 +651,34 @@ void __init acpi_initrd_override(void *data, size_t size)
memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
- p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+ /*
+ * early_ioremap only can remap 256k one time. If we map all
+ * tables one time, we will hit the limit. Need to map chunks
+ * one by one during copying the same as that in relocate_initrd().
+ */
for (no = 0; no < table_nr; no++) {
- memcpy(p + total_offset, early_initrd_files[no].data,
- early_initrd_files[no].size);
- total_offset += early_initrd_files[no].size;
+ unsigned char *src_p = acpi_initrd_files[no].data;
+ phys_addr_t size = acpi_initrd_files[no].size;
+ phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+ phys_addr_t slop, clen;
+ char *dest_p;
+
+ total_offset += size;
+
+ while (size) {
+ slop = dest_addr & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAP_CHUNK_SIZE - slop)
+ clen = MAP_CHUNK_SIZE - slop;
+ dest_p = early_ioremap(dest_addr & PAGE_MASK,
+ clen + slop);
+ memcpy(dest_p + slop, src_p, clen);
+ early_iounmap(dest_p, clen + slop);
+ src_p += clen;
+ dest_addr += clen;
+ size -= clen;
+ }
}
- early_iounmap(p, all_tables_size);
}
#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
@@ -820,7 +839,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ msleep(ms);
}
void acpi_os_stall(u32 us)
@@ -1067,9 +1086,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
- if (dpc->wait)
- acpi_os_wait_events_complete();
-
dpc->function(dpc->context);
kfree(dpc);
}
@@ -1089,8 +1105,8 @@ static void acpi_os_execute_deferred(struct work_struct *work)
*
******************************************************************************/
-static acpi_status __acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context, int hp)
+acpi_status acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
@@ -1117,20 +1133,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
dpc->context = context;
/*
- * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
- * because the hotplug code may call driver .remove() functions,
- * which invoke flush_scheduled_work/acpi_os_wait_events_complete
- * to flush these workqueues.
- *
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
- if (hp) {
- queue = kacpi_hotplug_wq;
- dpc->wait = 1;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else if (type == OSL_NOTIFY_HANDLER) {
+ if (type == OSL_NOTIFY_HANDLER) {
queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else {
@@ -1155,28 +1162,59 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
}
return status;
}
+EXPORT_SYMBOL(acpi_os_execute);
-acpi_status acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context)
+void acpi_os_wait_events_complete(void)
{
- return __acpi_os_execute(type, function, context, 0);
+ flush_workqueue(kacpid_wq);
+ flush_workqueue(kacpi_notify_wq);
}
-EXPORT_SYMBOL(acpi_os_execute);
-acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
- void *context)
+struct acpi_hp_work {
+ struct work_struct work;
+ acpi_hp_callback func;
+ void *data;
+ u32 src;
+};
+
+static void acpi_hotplug_work_fn(struct work_struct *work)
{
- return __acpi_os_execute(0, function, context, 1);
+ struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
+
+ acpi_os_wait_events_complete();
+ hpw->func(hpw->data, hpw->src);
+ kfree(hpw);
}
-EXPORT_SYMBOL(acpi_os_hotplug_execute);
-void acpi_os_wait_events_complete(void)
+acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
{
- flush_workqueue(kacpid_wq);
- flush_workqueue(kacpi_notify_wq);
+ struct acpi_hp_work *hpw;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Scheduling function [%p(%p, %u)] for deferred execution.\n",
+ func, data, src));
+
+ hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
+ if (!hpw)
+ return AE_NO_MEMORY;
+
+ INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
+ hpw->func = func;
+ hpw->data = data;
+ hpw->src = src;
+ /*
+ * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
+ * the hotplug code may call driver .remove() functions, which may
+ * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
+ * these workqueues.
+ */
+ if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
+ kfree(hpw);
+ return AE_ERROR;
+ }
+ return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_wait_events_complete);
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
@@ -1335,7 +1373,7 @@ static int __init acpi_os_name_setup(char *str)
if (!str || !*str)
return 0;
- for (; count-- && str && *str; str++) {
+ for (; count-- && *str; str++) {
if (isalnum(*str) || *str == ' ' || *str == ':')
*p++ = *str;
else if (*str == '\'' || *str == '"')
@@ -1825,25 +1863,3 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
{
__acpi_os_prepare_extended_sleep = func;
}
-
-
-void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
- void (*func)(struct work_struct *work))
-{
- struct acpi_hp_work *hp_work;
- int ret;
-
- hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
- if (!hp_work)
- return;
-
- hp_work->handle = handle;
- hp_work->type = type;
- hp_work->context = context;
-
- INIT_WORK(&hp_work->work, func);
- ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
- if (!ret)
- kfree(hp_work);
-}
-EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d3874f42565..0703bff5e60 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -39,6 +39,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/apei.h>
+#include "internal.h"
+
#define PREFIX "ACPI: "
#define _COMPONENT ACPI_PCI_COMPONENT
@@ -49,10 +51,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_pci_root_remove(struct acpi_device *device);
-#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
- | OSC_ACTIVE_STATE_PWR_SUPPORT \
- | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
- | OSC_MSI_SUPPORT)
+#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
+ | OSC_PCI_ASPM_SUPPORT \
+ | OSC_PCI_CLOCK_PM_SUPPORT \
+ | OSC_PCI_MSI_SUPPORT)
static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
@@ -127,6 +129,55 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
return AE_OK;
}
+struct pci_osc_bit_struct {
+ u32 bit;
+ char *desc;
+};
+
+static struct pci_osc_bit_struct pci_osc_support_bit[] = {
+ { OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
+ { OSC_PCI_ASPM_SUPPORT, "ASPM" },
+ { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
+ { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
+ { OSC_PCI_MSI_SUPPORT, "MSI" },
+};
+
+static struct pci_osc_bit_struct pci_osc_control_bit[] = {
+ { OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
+ { OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
+ { OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
+ { OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
+ { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+};
+
+static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
+ struct pci_osc_bit_struct *table, int size)
+{
+ char buf[80];
+ int i, len = 0;
+ struct pci_osc_bit_struct *entry;
+
+ buf[0] = '\0';
+ for (i = 0, entry = table; i < size; i++, entry++)
+ if (word & entry->bit)
+ len += snprintf(buf + len, sizeof(buf) - len, "%s%s",
+ len ? " " : "", entry->desc);
+
+ dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
+}
+
+static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
+{
+ decode_osc_bits(root, msg, word, pci_osc_support_bit,
+ ARRAY_SIZE(pci_osc_support_bit));
+}
+
+static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
+{
+ decode_osc_bits(root, msg, word, pci_osc_control_bit,
+ ARRAY_SIZE(pci_osc_control_bit));
+}
+
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -158,14 +209,14 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
support &= OSC_PCI_SUPPORT_MASKS;
support |= root->osc_support_set;
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = support;
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_DWORD] = support;
if (control) {
*control &= OSC_PCI_CONTROL_MASKS;
- capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
} else {
/* Run _OSC query only with existing controls. */
- capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+ capbuf[OSC_CONTROL_DWORD] = root->osc_control_set;
}
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
@@ -180,11 +231,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
{
acpi_status status;
- acpi_handle tmp;
- status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
mutex_lock(&osc_lock);
status = acpi_pci_query_osc(root, flags, NULL);
mutex_unlock(&osc_lock);
@@ -316,9 +363,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
struct acpi_pci_root *root;
- acpi_status status;
+ acpi_status status = AE_OK;
u32 ctrl, capbuf[3];
- acpi_handle tmp;
if (!mask)
return AE_BAD_PARAMETER;
@@ -331,10 +377,6 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
if (!root)
return AE_NOT_EXIST;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
-
mutex_lock(&osc_lock);
*mask = ctrl | root->osc_control_set;
@@ -349,17 +391,21 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
goto out;
if (ctrl == *mask)
break;
+ decode_osc_control(root, "platform does not support",
+ ctrl & ~(*mask));
ctrl = *mask;
}
if ((ctrl & req) != req) {
+ decode_osc_control(root, "not requesting control; platform does not support",
+ req & ~(ctrl));
status = AE_SUPPORT;
goto out;
}
- capbuf[OSC_QUERY_TYPE] = 0;
- capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
- capbuf[OSC_CONTROL_TYPE] = ctrl;
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
+ capbuf[OSC_CONTROL_DWORD] = ctrl;
status = acpi_pci_run_osc(handle, capbuf, mask);
if (ACPI_SUCCESS(status))
root->osc_control_set = *mask;
@@ -369,6 +415,87 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+ int *clear_aspm)
+{
+ u32 support, control, requested;
+ acpi_status status;
+ struct acpi_device *device = root->device;
+ acpi_handle handle = device->handle;
+
+ /*
+ * All supported architectures that use ACPI have support for
+ * PCI domains, so we indicate this in _OSC support capabilities.
+ */
+ support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ if (pci_ext_cfg_avail())
+ support |= OSC_PCI_EXT_CONFIG_SUPPORT;
+ if (pcie_aspm_support_enabled())
+ support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
+ if (pci_msi_enabled())
+ support |= OSC_PCI_MSI_SUPPORT;
+
+ decode_osc_support(root, "OS supports", support);
+ status = acpi_pci_osc_support(root, support);
+ if (ACPI_FAILURE(status)) {
+ dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+ acpi_format_exception(status));
+ *no_aspm = 1;
+ return;
+ }
+
+ if (pcie_ports_disabled) {
+ dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
+ return;
+ }
+
+ if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
+ decode_osc_support(root, "not requesting OS control; OS requires",
+ ACPI_PCIE_REQ_SUPPORT);
+ return;
+ }
+
+ control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
+ | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+ | OSC_PCI_EXPRESS_PME_CONTROL;
+
+ if (pci_aer_available()) {
+ if (aer_acpi_firmware_first())
+ dev_info(&device->dev,
+ "PCIe AER handled by firmware\n");
+ else
+ control |= OSC_PCI_EXPRESS_AER_CONTROL;
+ }
+
+ requested = control;
+ status = acpi_pci_osc_control_set(handle, &control,
+ OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
+ if (ACPI_SUCCESS(status)) {
+ decode_osc_control(root, "OS now controls", control);
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ /*
+ * We have ASPM control, but the FADT indicates
+ * that it's unsupported. Clear it.
+ */
+ *clear_aspm = 1;
+ }
+ } else {
+ decode_osc_control(root, "OS requested", requested);
+ decode_osc_control(root, "platform willing to grant", control);
+ dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+ acpi_format_exception(status));
+
+ /*
+ * We want to disable ASPM here, but aspm_disabled
+ * needs to remain in its state from boot so that we
+ * properly handle PCIe 1.1 devices. So we set this
+ * flag here, to defer the action until after the ACPI
+ * root scan.
+ */
+ *no_aspm = 1;
+ }
+}
+
static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
@@ -376,9 +503,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_status status;
int result;
struct acpi_pci_root *root;
- u32 flags, base_flags;
acpi_handle handle = device->handle;
- bool no_aspm = false, clear_aspm = false;
+ int no_aspm = 0, clear_aspm = 0;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -431,81 +557,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
- /*
- * All supported architectures that use ACPI have support for
- * PCI domains, so we indicate this in _OSC support capabilities.
- */
- flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
- acpi_pci_osc_support(root, flags);
-
- if (pci_ext_cfg_avail())
- flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
- if (pcie_aspm_support_enabled()) {
- flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
- OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
- }
- if (pci_msi_enabled())
- flags |= OSC_MSI_SUPPORT;
- if (flags != base_flags) {
- status = acpi_pci_osc_support(root, flags);
- if (ACPI_FAILURE(status)) {
- dev_info(&device->dev, "ACPI _OSC support "
- "notification failed, disabling PCIe ASPM\n");
- no_aspm = true;
- flags = base_flags;
- }
- }
-
- if (!pcie_ports_disabled
- && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
- flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
- | OSC_PCI_EXPRESS_PME_CONTROL;
-
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_dbg(&device->dev,
- "PCIe errors handled by BIOS.\n");
- else
- flags |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
-
- dev_info(&device->dev,
- "Requesting ACPI _OSC control (0x%02x)\n", flags);
-
- status = acpi_pci_osc_control_set(handle, &flags,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_SUCCESS(status)) {
- dev_info(&device->dev,
- "ACPI _OSC control (0x%02x) granted\n", flags);
- if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
- /*
- * We have ASPM control, but the FADT indicates
- * that it's unsupported. Clear it.
- */
- clear_aspm = true;
- }
- } else {
- dev_info(&device->dev,
- "ACPI _OSC request failed (%s), "
- "returned control mask: 0x%02x\n",
- acpi_format_exception(status), flags);
- dev_info(&device->dev,
- "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
- /*
- * We want to disable ASPM here, but aspm_disabled
- * needs to remain in its state from boot so that we
- * properly handle PCIe 1.1 devices. So we set this
- * flag here, to defer the action until after the ACPI
- * root scan.
- */
- no_aspm = true;
- }
- } else {
- dev_info(&device->dev,
- "Unable to request _OSC control "
- "(_OSC support mask: 0x%02x)\n", flags);
- }
+ negotiate_os_control(root, &no_aspm, &clear_aspm);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
@@ -523,6 +575,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
dev_err(&device->dev,
"Bus %04x:%02x not present in PCI namespace\n",
root->segment, (unsigned int)root->secondary.start);
+ device->driver_data = NULL;
result = -ENODEV;
goto end;
}
@@ -590,39 +643,10 @@ static void handle_root_bridge_insertion(acpi_handle handle)
acpi_handle_err(handle, "cannot add bridge to acpi list\n");
}
-static void handle_root_bridge_removal(struct acpi_device *device)
-{
- acpi_status status;
- struct acpi_eject_event *ej_event;
-
- ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
- if (!ej_event) {
- /* Inform firmware the hot-remove operation has error */
- (void) acpi_evaluate_hotplug_ost(device->handle,
- ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE,
- NULL);
- return;
- }
-
- ej_event->device = device;
- ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
-
- status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
- if (ACPI_FAILURE(status))
- kfree(ej_event);
-}
-
-static void _handle_hotplug_event_root(struct work_struct *work)
+static void hotplug_event_root(void *data, u32 type)
{
+ acpi_handle handle = data;
struct acpi_pci_root *root;
- struct acpi_hp_work *hp_work;
- acpi_handle handle;
- u32 type;
-
- hp_work = container_of(work, struct acpi_hp_work, work);
- handle = hp_work->handle;
- type = hp_work->type;
acpi_scan_lock_acquire();
@@ -652,9 +676,15 @@ static void _handle_hotplug_event_root(struct work_struct *work)
/* request device eject */
acpi_handle_printk(KERN_DEBUG, handle,
"Device eject notify on %s\n", __func__);
- if (root)
- handle_root_bridge_removal(root->device);
- break;
+ if (!root)
+ break;
+
+ get_device(&root->device->dev);
+
+ acpi_scan_lock_release();
+
+ acpi_bus_device_eject(root->device, ACPI_NOTIFY_EJECT_REQUEST);
+ return;
default:
acpi_handle_warn(handle,
"notify_handler: unknown event type 0x%x\n",
@@ -663,14 +693,12 @@ static void _handle_hotplug_event_root(struct work_struct *work)
}
acpi_scan_lock_release();
- kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
}
static void handle_hotplug_event_root(acpi_handle handle, u32 type,
void *context)
{
- alloc_acpi_hp_work(handle, type, context,
- _handle_hotplug_event_root);
+ acpi_hotplug_execute(hotplug_event_root, handle, type);
}
static acpi_status __init
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 04a13784dd2..6a5b152ad4d 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -8,289 +8,17 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
#include "sleep.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
/*
* this file provides support for:
- * /proc/acpi/alarm
* /proc/acpi/wakeup
*/
ACPI_MODULE_NAME("sleep")
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef HAVE_ACPI_LEGACY_ALARM
-
-static u32 cmos_bcd_read(int offset, int rtc_control);
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
- u32 sec, min, hr;
- u32 day, mo, yr, cent = 0;
- u32 today = 0;
- unsigned char rtc_control = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- rtc_control = CMOS_READ(RTC_CONTROL);
- sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
- min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
- hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
-
- /* If we ever get an FACP with proper values... */
- if (acpi_gbl_FADT.day_alarm) {
- /* ACPI spec: only low 6 its should be cared */
- day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- day = bcd2bin(day);
- } else
- day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- if (acpi_gbl_FADT.month_alarm)
- mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
- else {
- mo = cmos_bcd_read(RTC_MONTH, rtc_control);
- today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- }
- if (acpi_gbl_FADT.century)
- cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
-
- yr = cmos_bcd_read(RTC_YEAR, rtc_control);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- /* we're trusting the FADT (see above) */
- if (!acpi_gbl_FADT.century)
- /* If we're not trusting the FADT, we should at least make it
- * right for _this_ century... ehm, what is _this_ century?
- *
- * TBD:
- * ASAP: find piece of code in the kernel, e.g. star tracker driver,
- * which we can trust to determine the century correctly. Atom
- * watch driver would be nice, too...
- *
- * if that has not happened, change for first release in 2050:
- * if (yr<50)
- * yr += 2100;
- * else
- * yr += 2000; // current line of code
- *
- * if that has not happened either, please do on 2099/12/31:23:59:59
- * s/2000/2100
- *
- */
- yr += 2000;
- else
- yr += cent * 100;
-
- /*
- * Show correct dates for alarms up to a month into the future.
- * This solves issues for nearly all situations with the common
- * 30-day alarm clocks in PC hardware.
- */
- if (day < today) {
- if (mo < 12) {
- mo += 1;
- } else {
- mo = 1;
- yr += 1;
- }
- }
-
- seq_printf(seq, "%4.4u-", yr);
- (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
- (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
- (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
- (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
- (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
- return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
-}
-
-static int get_date_field(char **p, u32 * value)
-{
- char *next = NULL;
- char *string_end = NULL;
- int result = -EINVAL;
-
- /*
- * Try to find delimeter, only to insert null. The end of the
- * string won't have one, but is still valid.
- */
- if (*p == NULL)
- return result;
-
- next = strpbrk(*p, "- :");
- if (next)
- *next++ = '\0';
-
- *value = simple_strtoul(*p, &string_end, 10);
-
- /* Signal success if we got a good digit */
- if (string_end != *p)
- result = 0;
-
- if (next)
- *p = next;
- else
- *p = NULL;
-
- return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
- u32 val = CMOS_READ(offset);
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- val = bcd2bin(val);
- return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- val = bin2bcd(val);
- CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
- const char __user * buffer, size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[30] = { '\0' };
- char *p = alarm_string;
- u32 sec, min, hr, day, mo, yr;
- int adjust = 0;
- unsigned char rtc_control = 0;
-
- if (count > sizeof(alarm_string) - 1)
- return -EINVAL;
-
- if (copy_from_user(alarm_string, buffer, count))
- return -EFAULT;
-
- alarm_string[count] = '\0';
-
- /* check for time adjustment */
- if (alarm_string[0] == '+') {
- p++;
- adjust = 1;
- }
-
- if ((result = get_date_field(&p, &yr)))
- goto end;
- if ((result = get_date_field(&p, &mo)))
- goto end;
- if ((result = get_date_field(&p, &day)))
- goto end;
- if ((result = get_date_field(&p, &hr)))
- goto end;
- if ((result = get_date_field(&p, &min)))
- goto end;
- if ((result = get_date_field(&p, &sec)))
- goto end;
-
- spin_lock_irq(&rtc_lock);
-
- rtc_control = CMOS_READ(RTC_CONTROL);
-
- if (adjust) {
- yr += cmos_bcd_read(RTC_YEAR, rtc_control);
- mo += cmos_bcd_read(RTC_MONTH, rtc_control);
- day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- hr += cmos_bcd_read(RTC_HOURS, rtc_control);
- min += cmos_bcd_read(RTC_MINUTES, rtc_control);
- sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
- }
-
- spin_unlock_irq(&rtc_lock);
-
- if (sec > 59) {
- min += sec/60;
- sec = sec%60;
- }
- if (min > 59) {
- hr += min/60;
- min = min%60;
- }
- if (hr > 23) {
- day += hr/24;
- hr = hr%24;
- }
- if (day > 31) {
- mo += day/32;
- day = day%32;
- }
- if (mo > 12) {
- yr += mo/13;
- mo = mo%13;
- }
-
- spin_lock_irq(&rtc_lock);
- /*
- * Disable alarm interrupt before setting alarm timer or else
- * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
- */
- rtc_control &= ~RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- /* write the fields the rtc knows about */
- cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
- cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
- cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
- /*
- * If the system supports an enhanced alarm it will have non-zero
- * offsets into the CMOS RAM here -- which for some reason are pointing
- * to the RTC area of memory.
- */
- if (acpi_gbl_FADT.day_alarm)
- cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
- if (acpi_gbl_FADT.month_alarm)
- cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
- if (acpi_gbl_FADT.century) {
- if (adjust)
- yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
- cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
- }
- /* enable the rtc alarm interrupt */
- rtc_control |= RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- spin_unlock_irq(&rtc_lock);
-
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_enable_event(ACPI_EVENT_RTC, 0);
-
- *ppos += count;
-
- result = 0;
- end:
- return result ? result : count;
-}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
@@ -417,41 +145,8 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
.release = single_release,
};
-#ifdef HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
- .owner = THIS_MODULE,
- .open = acpi_system_alarm_open_fs,
- .read = seq_read,
- .write = acpi_system_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_disable_event(ACPI_EVENT_RTC, 0);
-
- return ACPI_INTERRUPT_HANDLED;
-}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
int __init acpi_sleep_proc_init(void)
{
-#ifdef HAVE_ACPI_LEGACY_ALARM
- /* 'alarm' [R/W] */
- proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_alarm_fops);
-
- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
- /*
- * Disable the RTC event after installing RTC handler.
- * Only when RTC alarm is set will it be enabled.
- */
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_wakeup_device_fops);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index cf34d903f4f..b3171f30b31 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -162,16 +162,23 @@ exit:
return apic_id;
}
-int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
{
-#ifdef CONFIG_SMP
- int i;
-#endif
- int apic_id = -1;
+ int apic_id;
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
+
+ return apic_id;
+}
+
+int acpi_map_cpuid(int apic_id, u32 acpi_id)
+{
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
if (apic_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
@@ -211,6 +218,15 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
#endif
return -1;
}
+
+int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+{
+ int apic_id;
+
+ apic_id = acpi_get_apicid(handle, type, acpi_id);
+
+ return acpi_map_cpuid(apic_id, acpi_id);
+}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
static bool __init processor_physically_present(acpi_handle handle)
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e534ba66d5b..146ab7e2b81 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -153,8 +153,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __refdata acpi_cpu_notifier =
-{
+static struct notifier_block __refdata acpi_cpu_notifier = {
.notifier_call = acpi_cpu_soft_notify,
};
@@ -172,7 +171,6 @@ static int __acpi_processor_start(struct acpi_device *device)
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_load_module(pr);
#endif
acpi_processor_get_throttling_info(pr);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f98dd00b51a..644516d9bde 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -119,17 +119,10 @@ static struct dmi_system_id processor_power_dmi_table[] = {
*/
static void acpi_safe_halt(void)
{
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
- if (!need_resched()) {
+ if (!tif_need_resched()) {
safe_halt();
local_irq_disable();
}
- current_thread_info()->status |= TS_POLLING;
}
#ifdef ARCH_APICTIMER_STOPS_ON_C3
@@ -272,9 +265,6 @@ static void tsc_check_state(int state) { return; }
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
{
- if (!pr)
- return -EINVAL;
-
if (!pr->pblk)
return -ENODEV;
@@ -737,6 +727,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ if (current_set_polling_and_test())
+ return -EINVAL;
+ }
+
lapic_timer_state_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx);
@@ -790,18 +785,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
- if (cx->entry_method != ACPI_CSTATE_FFH) {
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ if (current_set_polling_and_test())
return -EINVAL;
- }
}
/*
@@ -819,9 +805,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_wakeup_event(0);
- if (cx->entry_method != ACPI_CSTATE_FFH)
- current_thread_info()->status |= TS_POLLING;
-
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
@@ -858,18 +841,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
}
- if (cx->entry_method != ACPI_CSTATE_FFH) {
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ if (current_set_polling_and_test())
return -EINVAL;
- }
}
acpi_unlazy_tlb(smp_processor_id());
@@ -915,9 +889,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
sched_clock_idle_wakeup_event(0);
- if (cx->entry_method != ACPI_CSTATE_FFH)
- current_thread_info()->status |= TS_POLLING;
-
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
@@ -1076,12 +1047,8 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (!pr)
- return -EINVAL;
-
- if (nocst) {
+ if (nocst)
return -ENODEV;
- }
if (!pr->flags.power_setup_done)
return -ENODEV;
@@ -1108,9 +1075,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (!pr)
- return -EINVAL;
-
if (nocst)
return -ENODEV;
@@ -1183,9 +1147,6 @@ int acpi_processor_power_init(struct acpi_processor *pr)
first_run++;
}
- if (!pr)
- return -EINVAL;
-
if (acpi_gbl_FADT.cst_control && !nocst) {
status =
acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 51d7948611d..60a7c28fc16 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -235,28 +235,6 @@ void acpi_processor_ppc_exit(void)
acpi_processor_ppc_status &= ~PPC_REGISTERED;
}
-/*
- * Do a quick check if the systems looks like it should use ACPI
- * cpufreq. We look at a _PCT method being available, but don't
- * do a whole lot of sanity checks.
- */
-void acpi_processor_load_module(struct acpi_processor *pr)
-{
- static int requested;
- acpi_status status = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- if (!arch_has_acpi_pdc() || requested)
- return;
- status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
- if (!ACPI_FAILURE(status)) {
- printk(KERN_INFO PREFIX "Requesting acpi_cpufreq\n");
- request_module_nowait("acpi_cpufreq");
- requested = 1;
- }
- kfree(buffer.pointer);
-}
-
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index aef7e1cd1e5..d465ae6cdd0 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -30,12 +30,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -67,11 +61,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -84,9 +73,6 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
struct acpi_battery {
struct power_supply bat;
struct acpi_sbs *sbs;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- struct proc_dir_entry *proc_entry;
-#endif
unsigned long update_time;
char name[8];
char manufacturer_name[ACPI_SBS_BLOCK_MAX];
@@ -119,9 +105,6 @@ struct acpi_sbs {
struct acpi_device *device;
struct acpi_smb_hc *hc;
struct mutex lock;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- struct proc_dir_entry *charger_entry;
-#endif
struct acpi_battery battery[MAX_SBS_BAT];
u8 batteries_supported:4;
u8 manager_present:1;
@@ -482,261 +465,6 @@ static struct device_attribute alarm_attr = {
};
/* --------------------------------------------------------------------------
- FS Interface (/proc/acpi)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* Generic Routines */
-static int
-acpi_sbs_add_fs(struct proc_dir_entry **dir,
- struct proc_dir_entry *parent_dir,
- char *dir_name,
- const struct file_operations *info_fops,
- const struct file_operations *state_fops,
- const struct file_operations *alarm_fops, void *data)
-{
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!*dir) {
- *dir = proc_mkdir(dir_name, parent_dir);
- if (!*dir) {
- return -ENODEV;
- }
- }
-
- /* 'info' [R] */
- if (info_fops)
- proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
- info_fops, data);
-
- /* 'state' [R] */
- if (state_fops)
- proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
- state_fops, data);
-
- /* 'alarm' [R/W] */
- if (alarm_fops)
- proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
- alarm_fops, data);
- return 0;
-}
-
-/* Smart Battery Interface */
-static struct proc_dir_entry *acpi_battery_dir = NULL;
-
-static inline char *acpi_battery_units(struct acpi_battery *battery)
-{
- return acpi_battery_mode(battery) ? " mW" : " mA";
-}
-
-
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int result = 0;
-
- mutex_lock(&sbs->lock);
-
- seq_printf(seq, "present: %s\n",
- (battery->present) ? "yes" : "no");
- if (!battery->present)
- goto end;
-
- seq_printf(seq, "design capacity: %i%sh\n",
- battery->design_capacity * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "last full capacity: %i%sh\n",
- battery->full_charge_capacity * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "battery technology: rechargeable\n");
- seq_printf(seq, "design voltage: %i mV\n",
- battery->design_voltage * acpi_battery_vscale(battery));
- seq_printf(seq, "design capacity warning: unknown\n");
- seq_printf(seq, "design capacity low: unknown\n");
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: unknown\n");
- seq_printf(seq, "capacity granularity 2: unknown\n");
- seq_printf(seq, "model number: %s\n", battery->device_name);
- seq_printf(seq, "serial number: %i\n",
- battery->serial_number);
- seq_printf(seq, "battery type: %s\n",
- battery->device_chemistry);
- seq_printf(seq, "OEM info: %s\n",
- battery->manufacturer_name);
- end:
- mutex_unlock(&sbs->lock);
- return result;
-}
-
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int rate;
-
- mutex_lock(&sbs->lock);
- seq_printf(seq, "present: %s\n",
- (battery->present) ? "yes" : "no");
- if (!battery->present)
- goto end;
-
- acpi_battery_get_state(battery);
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x0010) ? "critical" : "ok");
- seq_printf(seq, "charging state: %s\n",
- (battery->rate_now < 0) ? "discharging" :
- ((battery->rate_now > 0) ? "charging" : "charged"));
- rate = abs(battery->rate_now) * acpi_battery_ipscale(battery);
- rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
- acpi_battery_vscale(battery)/1000):1;
- seq_printf(seq, "present rate: %d%s\n", rate,
- acpi_battery_units(battery));
- seq_printf(seq, "remaining capacity: %i%sh\n",
- battery->capacity_now * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "present voltage: %i mV\n",
- battery->voltage_now * acpi_battery_vscale(battery));
-
- end:
- mutex_unlock(&sbs->lock);
- return 0;
-}
-
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int result = 0;
-
- mutex_lock(&sbs->lock);
-
- if (!battery->present) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
-
- acpi_battery_get_alarm(battery);
- seq_printf(seq, "alarm: ");
- if (battery->alarm_capacity)
- seq_printf(seq, "%i%sh\n",
- battery->alarm_capacity *
- acpi_battery_scale(battery),
- acpi_battery_units(battery));
- else
- seq_printf(seq, "disabled\n");
- end:
- mutex_unlock(&sbs->lock);
- return result;
-}
-
-static ssize_t
-acpi_battery_write_alarm(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *seq = file->private_data;
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- char alarm_string[12] = { '\0' };
- int result = 0;
- mutex_lock(&sbs->lock);
- if (!battery->present) {
- result = -ENODEV;
- goto end;
- }
- if (count > sizeof(alarm_string) - 1) {
- result = -EINVAL;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = 0;
- battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
- acpi_battery_scale(battery);
- acpi_battery_set_alarm(battery);
- end:
- mutex_unlock(&sbs->lock);
- if (result)
- return result;
- return count;
-}
-
-static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_battery_info_fops = {
- .open = acpi_battery_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_fops = {
- .open = acpi_battery_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_fops = {
- .open = acpi_battery_alarm_open_fs,
- .read = seq_read,
- .write = acpi_battery_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-/* Legacy AC Adapter Interface */
-
-static struct proc_dir_entry *acpi_ac_dir = NULL;
-
-static int acpi_ac_read_state(struct seq_file *seq, void *offset)
-{
-
- struct acpi_sbs *sbs = seq->private;
-
- mutex_lock(&sbs->lock);
-
- seq_printf(seq, "state: %s\n",
- sbs->charger_present ? "on-line" : "off-line");
-
- mutex_unlock(&sbs->lock);
- return 0;
-}
-
-static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_ac_state_fops = {
- .open = acpi_ac_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static int acpi_battery_read(struct acpi_battery *battery)
@@ -781,12 +509,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
return result;
sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
- battery->name, &acpi_battery_info_fops,
- &acpi_battery_state_fops, &acpi_battery_alarm_fops,
- battery);
-#endif
battery->bat.name = battery->name;
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
if (!acpi_battery_mode(battery)) {
@@ -822,10 +544,6 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
}
-#ifdef CONFIG_ACPI_PROCFS_POWER
- proc_remove(battery->proc_entry);
- battery->proc_entry = NULL;
-#endif
}
static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -835,13 +553,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
result = acpi_ac_get_present(sbs);
if (result)
goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
- ACPI_AC_DIR_NAME, NULL,
- &acpi_ac_state_fops, NULL, sbs);
- if (result)
- goto end;
-#endif
+
sbs->charger.name = "sbs-charger";
sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
sbs->charger.properties = sbs_ac_props;
@@ -859,10 +571,6 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
{
if (sbs->charger.dev)
power_supply_unregister(&sbs->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- proc_remove(sbs->charger_entry);
- sbs->charger_entry = NULL;
-#endif
}
static void acpi_sbs_callback(void *context)
@@ -950,20 +658,6 @@ static int acpi_sbs_remove(struct acpi_device *device)
return 0;
}
-static void acpi_sbs_rmdirs(void)
-{
-#ifdef CONFIG_ACPI_PROCFS_POWER
- if (acpi_ac_dir) {
- acpi_unlock_ac_dir(acpi_ac_dir);
- acpi_ac_dir = NULL;
- }
- if (acpi_battery_dir) {
- acpi_unlock_battery_dir(acpi_battery_dir);
- acpi_battery_dir = NULL;
- }
-#endif
-}
-
#ifdef CONFIG_PM_SLEEP
static int acpi_sbs_resume(struct device *dev)
{
@@ -995,28 +689,17 @@ static int __init acpi_sbs_init(void)
if (acpi_disabled)
return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
- return -ENODEV;
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir) {
- acpi_sbs_rmdirs();
- return -ENODEV;
- }
-#endif
+
result = acpi_bus_register_driver(&acpi_sbs_driver);
- if (result < 0) {
- acpi_sbs_rmdirs();
+ if (result < 0)
return -ENODEV;
- }
+
return 0;
}
static void __exit acpi_sbs_exit(void)
{
acpi_bus_unregister_driver(&acpi_sbs_driver);
- acpi_sbs_rmdirs();
return;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fee8a297c7d..15daa21fcd0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -125,8 +125,8 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
- void *data, void **ret_p)
+static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
+ void **ret_p)
{
struct acpi_device *device = NULL;
struct acpi_device_physical_node *pn;
@@ -136,6 +136,11 @@ static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
if (acpi_bus_get_device(handle, &device))
return AE_OK;
+ if (device->handler && !device->handler->hotplug.enabled) {
+ *ret_p = &device->dev;
+ return AE_SUPPORT;
+ }
+
mutex_lock(&device->physical_node_lock);
list_for_each_entry(pn, &device->physical_node_list, node) {
@@ -168,8 +173,8 @@ static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
return status;
}
-static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl,
- void *data, void **ret_p)
+static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
+ void **ret_p)
{
struct acpi_device *device = NULL;
struct acpi_device_physical_node *pn;
@@ -214,26 +219,32 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
* If the first pass is successful, the second one isn't needed, though.
*/
errdev = NULL;
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- NULL, acpi_bus_offline_companions,
- (void *)false, (void **)&errdev);
- acpi_bus_offline_companions(handle, 0, (void *)false, (void **)&errdev);
+ status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ NULL, acpi_bus_offline, (void *)false,
+ (void **)&errdev);
+ if (status == AE_SUPPORT) {
+ dev_warn(errdev, "Offline disabled.\n");
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_online, NULL, NULL, NULL);
+ put_device(&device->dev);
+ return -EPERM;
+ }
+ acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
if (errdev) {
errdev = NULL;
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- NULL, acpi_bus_offline_companions,
- (void *)true , (void **)&errdev);
+ NULL, acpi_bus_offline, (void *)true,
+ (void **)&errdev);
if (!errdev || acpi_force_hot_remove)
- acpi_bus_offline_companions(handle, 0, (void *)true,
- (void **)&errdev);
+ acpi_bus_offline(handle, 0, (void *)true,
+ (void **)&errdev);
if (errdev && !acpi_force_hot_remove) {
dev_warn(errdev, "Offline failed.\n");
- acpi_bus_online_companions(handle, 0, NULL, NULL);
+ acpi_bus_online(handle, 0, NULL, NULL);
acpi_walk_namespace(ACPI_TYPE_ANY, handle,
- ACPI_UINT32_MAX,
- acpi_bus_online_companions, NULL,
- NULL, NULL);
+ ACPI_UINT32_MAX, acpi_bus_online,
+ NULL, NULL, NULL);
put_device(&device->dev);
return -EBUSY;
}
@@ -274,49 +285,45 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return 0;
}
-static void acpi_bus_device_eject(void *context)
+void acpi_bus_device_eject(void *data, u32 ost_src)
{
- acpi_handle handle = context;
- struct acpi_device *device = NULL;
- struct acpi_scan_handler *handler;
+ struct acpi_device *device = data;
+ acpi_handle handle = device->handle;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
- acpi_bus_get_device(handle, &device);
- if (!device)
- goto err_out;
+ if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
+ acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- handler = device->handler;
- if (!handler || !handler->hotplug.enabled) {
- ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- goto err_out;
- }
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- if (handler->hotplug.mode == AHM_CONTAINER)
+ if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- get_device(&device->dev);
error = acpi_scan_hot_remove(device);
- if (error)
+ if (error == -EPERM) {
+ goto err_support;
+ } else if (error) {
goto err_out;
+ }
out:
mutex_unlock(&acpi_scan_lock);
unlock_device_hotplug();
return;
+ err_support:
+ ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
err_out:
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code,
- NULL);
+ acpi_evaluate_hotplug_ost(handle, ost_src, ost_code, NULL);
goto out;
}
-static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
+static void acpi_scan_bus_device_check(void *data, u32 ost_source)
{
+ acpi_handle handle = data;
struct acpi_device *device = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
@@ -331,8 +338,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
goto out;
}
}
- acpi_evaluate_hotplug_ost(handle, ost_source,
- ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
error = acpi_bus_scan(handle);
if (error) {
acpi_handle_warn(handle, "Namespace scan failure\n");
@@ -353,18 +358,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
unlock_device_hotplug();
}
-static void acpi_scan_bus_check(void *context)
-{
- acpi_scan_bus_device_check((acpi_handle)context,
- ACPI_NOTIFY_BUS_CHECK);
-}
-
-static void acpi_scan_device_check(void *context)
-{
- acpi_scan_bus_device_check((acpi_handle)context,
- ACPI_NOTIFY_DEVICE_CHECK);
-}
-
static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
{
u32 ost_status;
@@ -395,8 +388,8 @@ static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{
- acpi_osd_exec_callback callback;
struct acpi_scan_handler *handler = data;
+ struct acpi_device *adev;
acpi_status status;
if (!handler->hotplug.enabled)
@@ -405,56 +398,34 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
- callback = acpi_scan_bus_check;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
- callback = acpi_scan_device_check;
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- callback = acpi_bus_device_eject;
- break;
+ if (acpi_bus_get_device(handle, &adev))
+ goto err_out;
+
+ get_device(&adev->dev);
+ status = acpi_hotplug_execute(acpi_bus_device_eject, adev, type);
+ if (ACPI_SUCCESS(status))
+ return;
+
+ put_device(&adev->dev);
+ goto err_out;
default:
/* non-hotplug event; possibly handled by other handler */
return;
}
- status = acpi_os_hotplug_execute(callback, handle);
- if (ACPI_FAILURE(status))
- acpi_evaluate_hotplug_ost(handle, type,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE,
- NULL);
-}
-
-/**
- * acpi_bus_hot_remove_device: hot-remove a device and its children
- * @context: struct acpi_eject_event pointer (freed in this func)
- *
- * Hot-remove a device and its children. This function frees up the
- * memory space passed by arg context, so that the caller may call
- * this function asynchronously through acpi_os_hotplug_execute().
- */
-void acpi_bus_hot_remove_device(void *context)
-{
- struct acpi_eject_event *ej_event = context;
- struct acpi_device *device = ej_event->device;
- acpi_handle handle = device->handle;
- int error;
-
- lock_device_hotplug();
- mutex_lock(&acpi_scan_lock);
-
- error = acpi_scan_hot_remove(device);
- if (error && handle)
- acpi_evaluate_hotplug_ost(handle, ej_event->event,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE,
- NULL);
+ status = acpi_hotplug_execute(acpi_scan_bus_device_check, handle, type);
+ if (ACPI_SUCCESS(status))
+ return;
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
- kfree(context);
+ err_out:
+ acpi_evaluate_hotplug_ost(handle, type,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
-EXPORT_SYMBOL(acpi_bus_hot_remove_device);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -487,10 +458,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
- struct acpi_eject_event *ej_event;
acpi_object_type not_used;
acpi_status status;
- int ret;
if (!count || buf[0] != '1')
return -EINVAL;
@@ -503,28 +472,18 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
- ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
- if (!ej_event) {
- ret = -ENOMEM;
- goto err_out;
- }
acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- ej_event->device = acpi_device;
- ej_event->event = ACPI_OST_EC_OSPM_EJECT;
get_device(&acpi_device->dev);
- status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+ status = acpi_hotplug_execute(acpi_bus_device_eject, acpi_device,
+ ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
put_device(&acpi_device->dev);
- kfree(ej_event);
- ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
-
- err_out:
acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
- return ret;
+ return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -1676,7 +1635,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
void acpi_device_add_finalize(struct acpi_device *device)
{
- device->flags.match_driver = true;
dev_set_uevent_suppress(&device->dev, false);
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
}
@@ -1915,8 +1873,12 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
ret = acpi_scan_attach_handler(device);
- if (ret)
- return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
+ if (ret < 0)
+ return AE_CTRL_DEPTH;
+
+ device->flags.match_driver = true;
+ if (ret > 0)
+ return AE_OK;
ret = device_attach(&device->dev);
return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
@@ -2027,6 +1989,7 @@ static int acpi_bus_scan_fixed(void)
if (result)
return result;
+ device->flags.match_driver = true;
result = device_attach(&device->dev);
if (result < 0)
return result;
@@ -2043,6 +2006,7 @@ static int acpi_bus_scan_fixed(void)
if (result)
return result;
+ device->flags.match_driver = true;
result = device_attach(&device->dev);
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 05306a59aed..db5293650f6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -564,6 +564,7 @@ static ssize_t counter_set(struct kobject *kobj,
acpi_event_status status;
acpi_handle handle;
int result = 0;
+ unsigned long tmp;
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
int i;
@@ -596,8 +597,10 @@ static ssize_t counter_set(struct kobject *kobj,
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
+ else if (!kstrtoul(buf, 0, &tmp))
+ all_counters[index].count = tmp;
else
- all_counters[index].count = strtoul(buf, NULL, 0);
+ result = -EINVAL;
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
@@ -609,8 +612,10 @@ static ssize_t counter_set(struct kobject *kobj,
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_event(event);
+ else if (!kstrtoul(buf, 0, &tmp))
+ all_counters[index].count = tmp;
else
- all_counters[index].count = strtoul(buf, NULL, 0);
+ result = -EINVAL;
} else
all_counters[index].count = strtoul(buf, NULL, 0);
@@ -762,13 +767,8 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
if (!hotplug_kobj)
goto err_out;
- kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
- error = kobject_set_name(&hotplug->kobj, "%s", name);
- if (error)
- goto err_out;
-
- hotplug->kobj.parent = hotplug_kobj;
- error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+ error = kobject_init_and_add(&hotplug->kobj,
+ &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error)
goto err_out;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6a0329340b4..0d9f46b5ae6 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -299,8 +299,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No critical threshold\n"));
} else if (tmp <= 2732) {
- printk(KERN_WARNING FW_BUG "Invalid critical threshold "
- "(%llu)\n", tmp);
+ pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+ tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
@@ -317,8 +317,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* Allow override critical threshold
*/
if (crt_k > tz->trips.critical.temperature)
- printk(KERN_WARNING PREFIX
- "Critical threshold %d C\n", crt);
+ pr_warn(PREFIX "Critical threshold %d C\n",
+ crt);
tz->trips.critical.temperature = crt_k;
}
}
@@ -390,8 +390,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Invalid passive threshold\n");
+ pr_warn(PREFIX "Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
}
else
@@ -453,8 +452,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Invalid active%d threshold\n", i);
+ pr_warn(PREFIX "Invalid active%d threshold\n",
+ i);
tz->trips.active[i].flags.valid = 0;
}
else
@@ -505,7 +504,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
valid |= tz->trips.active[i].flags.valid;
if (!valid) {
- printk(KERN_WARNING FW_BUG "No valid trip found\n");
+ pr_warn(FW_BUG "No valid trip found\n");
return -ENODEV;
}
return 0;
@@ -515,10 +514,9 @@ static void acpi_thermal_check(void *data)
{
struct acpi_thermal *tz = data;
- if (!tz->tz_enabled) {
- pr_warn("thermal zone is disabled \n");
+ if (!tz->tz_enabled)
return;
- }
+
thermal_zone_device_update(tz->thermal_zone);
}
@@ -570,9 +568,10 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
*/
if (mode == THERMAL_DEVICE_ENABLED)
enable = 1;
- else if (mode == THERMAL_DEVICE_DISABLED)
+ else if (mode == THERMAL_DEVICE_DISABLED) {
enable = 0;
- else
+ pr_warn("thermal zone will be disabled\n");
+ } else
return -EINVAL;
if (enable != tz->tz_enabled) {
@@ -923,8 +922,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
acpi_bus_private_data_handler,
tz->thermal_zone);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Error attaching device data\n");
+ pr_err(PREFIX "Error attaching device data\n");
return -ENODEV;
}
@@ -1094,9 +1092,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
- acpi_device_name(device), acpi_device_bid(device),
- KELVIN_TO_CELSIUS(tz->temperature));
+ pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+ acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
goto end;
free_memory:
@@ -1159,24 +1156,24 @@ static int acpi_thermal_resume(struct device *dev)
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all active thermal trip points\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all active thermal trip points\n", d->ident);
act = -1;
}
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all critical thermal trip point actions.\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all critical thermal trip point actions.\n", d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
if (tzp == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "enabling thermal zone polling\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "enabling thermal zone polling\n", d->ident);
tzp = 300; /* 300 dS = 30 Seconds */
}
return 0;
@@ -1184,8 +1181,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
static int thermal_psv(const struct dmi_system_id *d) {
if (psv == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all passive thermal trip points\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all passive thermal trip points\n", d->ident);
psv = -1;
}
return 0;
@@ -1238,7 +1235,7 @@ static int __init acpi_thermal_init(void)
dmi_check_system(thermal_dmi_table);
if (off) {
- printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+ pr_notice(PREFIX "thermal control disabled\n");
return -ENODEV;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 552248b0005..6d408bfbbb1 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -121,7 +121,7 @@ acpi_extract_package(union acpi_object *package,
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d]: got number, expecing"
+ " [%d]: got number, expecting"
" [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
@@ -148,7 +148,7 @@ acpi_extract_package(union acpi_object *package,
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d] got string/buffer,"
- " expecing [%c]\n",
+ " expecting [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
@@ -169,11 +169,20 @@ acpi_extract_package(union acpi_object *package,
/*
* Validate output buffer.
*/
- if (buffer->length < size_required) {
+ if (buffer->length == ACPI_ALLOCATE_BUFFER) {
+ buffer->pointer = ACPI_ALLOCATE(size_required);
+ if (!buffer->pointer)
+ return AE_NO_MEMORY;
buffer->length = size_required;
- return AE_BUFFER_OVERFLOW;
- } else if (buffer->length != size_required || !buffer->pointer) {
- return AE_BAD_PARAMETER;
+ memset(buffer->pointer, 0, size_required);
+ } else {
+ if (buffer->length < size_required) {
+ buffer->length = size_required;
+ return AE_BUFFER_OVERFLOW;
+ } else if (buffer->length != size_required ||
+ !buffer->pointer) {
+ return AE_BAD_PARAMETER;
+ }
}
head = buffer->pointer;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index aebcf6355df..995e91bcb97 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -82,13 +82,15 @@ static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
- * Some BIOSes claim they use minimum backlight at boot,
- * and this may bring dimming screen after boot
+ * For Windows 8 systems: if set ture and the GPU driver has
+ * registered a backlight interface, skip registering ACPI video's.
*/
-static bool use_bios_initial_backlight = 1;
-module_param(use_bios_initial_backlight, bool, 0644);
+static bool use_native_backlight = false;
+module_param(use_native_backlight, bool, 0644);
static int register_count;
+static struct mutex video_list_lock;
+static struct list_head video_bus_head;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -157,6 +159,7 @@ struct acpi_video_bus {
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
+ struct list_head entry;
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
@@ -229,6 +232,14 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_verify_backlight_support(void)
+{
+ if (acpi_osi_is_win8() && use_native_backlight &&
+ backlight_device_registered(BACKLIGHT_RAW))
+ return false;
+ return acpi_video_backlight_support();
+}
+
/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
{
@@ -388,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
-static int video_ignore_initial_backlight(const struct dmi_system_id *d)
-{
- use_bios_initial_backlight = 0;
- return 0;
-}
-
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -438,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Folio 13-2000",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "Fujitsu E753",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Pavilion dm4",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Pavilion g6 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP 1000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Pavilion m4",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
- },
- },
{}
};
@@ -821,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device)
if (!device->cap._BQC)
goto set_level;
- if (use_bios_initial_backlight) {
- level = acpi_video_bqc_value_to_level(device, level_old);
- /*
- * On some buggy laptops, _BQC returns an uninitialized
- * value when invoked for the first time, i.e.
- * level_old is invalid (no matter whether it's a level
- * or an index). Set the backlight to max_level in this case.
- */
- for (i = 2; i < br->count; i++)
- if (level_old == br->levels[i])
- break;
- if (i == br->count)
- level = max_level;
- }
+ level = acpi_video_bqc_value_to_level(device, level_old);
+ /*
+ * On some buggy laptops, _BQC returns an uninitialized
+ * value when invoked for the first time, i.e.
+ * level_old is invalid (no matter whether it's a level
+ * or an index). Set the backlight to max_level in this case.
+ */
+ for (i = 2; i < br->count; i++)
+ if (level == br->levels[i])
+ break;
+ if (i == br->count || !level)
+ level = max_level;
set_level:
result = acpi_video_device_lcd_set_level(device, level);
@@ -884,79 +839,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_has_method(device->dev->handle, "_DDC"))
device->cap._DDC = 1;
-
- if (acpi_video_backlight_support()) {
- struct backlight_properties props;
- struct pci_dev *pdev;
- acpi_handle acpi_parent;
- struct device *parent = NULL;
- int result;
- static int count;
- char *name;
-
- result = acpi_video_init_brightness(device);
- if (result)
- return;
- name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
- if (!name)
- return;
- count++;
-
- acpi_get_parent(device->dev->handle, &acpi_parent);
-
- pdev = acpi_get_pci_dev(acpi_parent);
- if (pdev) {
- parent = &pdev->dev;
- pci_dev_put(pdev);
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_FIRMWARE;
- props.max_brightness = device->brightness->count - 3;
- device->backlight = backlight_device_register(name,
- parent,
- device,
- &acpi_backlight_ops,
- &props);
- kfree(name);
- if (IS_ERR(device->backlight))
- return;
-
- /*
- * Save current brightness level in case we have to restore it
- * before acpi_video_device_lcd_set_level() is called next time.
- */
- device->backlight->props.brightness =
- acpi_video_get_brightness(device->backlight);
-
- device->cooling_dev = thermal_cooling_device_register("LCD",
- device->dev, &video_cooling_ops);
- if (IS_ERR(device->cooling_dev)) {
- /*
- * Set cooling_dev to NULL so we don't crash trying to
- * free it.
- * Also, why the hell we are returning early and
- * not attempt to register video output if cooling
- * device registration failed?
- * -- dtor
- */
- device->cooling_dev = NULL;
- return;
- }
-
- dev_info(&device->dev->dev, "registered as cooling_device%d\n",
- device->cooling_dev->id);
- result = sysfs_create_link(&device->dev->dev.kobj,
- &device->cooling_dev->device.kobj,
- "thermal_cooling");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
- result = sysfs_create_link(&device->cooling_dev->device.kobj,
- &device->dev->dev.kobj, "device");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
-
- }
}
/*
@@ -1143,13 +1025,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
- status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
- acpi_video_device_notify, data);
- if (ACPI_FAILURE(status))
- dev_err(&device->dev, "Error installing notify handler\n");
- else
- data->flags.notify = 1;
-
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
@@ -1333,8 +1208,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
- /* no warning message if acpi_backlight=vendor is used */
- if (!acpi_video_backlight_support())
+ /* no warning message if acpi_backlight=vendor or a quirk is used */
+ if (!acpi_video_verify_backlight_support())
return 0;
if (!device->brightness)
@@ -1454,64 +1329,6 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
return status;
}
-static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
-{
- acpi_status status;
-
- if (!device || !device->video)
- return -ENOENT;
-
- if (device->flags.notify) {
- status = acpi_remove_notify_handler(device->dev->handle,
- ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
- if (ACPI_FAILURE(status))
- dev_err(&device->dev->dev,
- "Can't remove video notify handler\n");
- }
-
- if (device->backlight) {
- backlight_device_unregister(device->backlight);
- device->backlight = NULL;
- }
- if (device->cooling_dev) {
- sysfs_remove_link(&device->dev->dev.kobj,
- "thermal_cooling");
- sysfs_remove_link(&device->cooling_dev->device.kobj,
- "device");
- thermal_cooling_device_unregister(device->cooling_dev);
- device->cooling_dev = NULL;
- }
-
- return 0;
-}
-
-static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
-{
- int status;
- struct acpi_video_device *dev, *next;
-
- mutex_lock(&video->device_list_lock);
-
- list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
-
- status = acpi_video_bus_put_one_device(dev);
- if (ACPI_FAILURE(status))
- printk(KERN_WARNING PREFIX
- "hhuuhhuu bug in acpi video driver.\n");
-
- if (dev->brightness) {
- kfree(dev->brightness->levels);
- kfree(dev->brightness);
- }
- list_del(&dev->entry);
- kfree(dev);
- }
-
- mutex_unlock(&video->device_list_lock);
-
- return 0;
-}
-
/* acpi_video interface */
/*
@@ -1521,13 +1338,13 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
- acpi_video_backlight_quirks() ? 1 : 0);
+ acpi_osi_is_win8() ? 1 : 0);
}
static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
- acpi_video_backlight_quirks() ? 0 : 1);
+ acpi_osi_is_win8() ? 0 : 1);
}
static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
@@ -1536,7 +1353,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
struct input_dev *input;
int keycode = 0;
- if (!video)
+ if (!video || !video->input)
return;
input = video->input;
@@ -1691,12 +1508,236 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
return AE_OK;
}
+static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+{
+ if (acpi_video_verify_backlight_support()) {
+ struct backlight_properties props;
+ struct pci_dev *pdev;
+ acpi_handle acpi_parent;
+ struct device *parent = NULL;
+ int result;
+ static int count;
+ char *name;
+
+ result = acpi_video_init_brightness(device);
+ if (result)
+ return;
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
+ if (!name)
+ return;
+ count++;
+
+ acpi_get_parent(device->dev->handle, &acpi_parent);
+
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_FIRMWARE;
+ props.max_brightness = device->brightness->count - 3;
+ device->backlight = backlight_device_register(name,
+ parent,
+ device,
+ &acpi_backlight_ops,
+ &props);
+ kfree(name);
+ if (IS_ERR(device->backlight))
+ return;
+
+ /*
+ * Save current brightness level in case we have to restore it
+ * before acpi_video_device_lcd_set_level() is called next time.
+ */
+ device->backlight->props.brightness =
+ acpi_video_get_brightness(device->backlight);
+
+ device->cooling_dev = thermal_cooling_device_register("LCD",
+ device->dev, &video_cooling_ops);
+ if (IS_ERR(device->cooling_dev)) {
+ /*
+ * Set cooling_dev to NULL so we don't crash trying to
+ * free it.
+ * Also, why the hell we are returning early and
+ * not attempt to register video output if cooling
+ * device registration failed?
+ * -- dtor
+ */
+ device->cooling_dev = NULL;
+ return;
+ }
+
+ dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+ device->cooling_dev->id);
+ result = sysfs_create_link(&device->dev->dev.kobj,
+ &device->cooling_dev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&device->cooling_dev->device.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ }
+}
+
+static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_register_backlight(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ video->pm_nb.notifier_call = acpi_video_resume;
+ video->pm_nb.priority = 0;
+ return register_pm_notifier(&video->pm_nb);
+}
+
+static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
+{
+ if (device->backlight) {
+ backlight_device_unregister(device->backlight);
+ device->backlight = NULL;
+ }
+ if (device->brightness) {
+ kfree(device->brightness->levels);
+ kfree(device->brightness);
+ device->brightness = NULL;
+ }
+ if (device->cooling_dev) {
+ sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
+ thermal_cooling_device_unregister(device->cooling_dev);
+ device->cooling_dev = NULL;
+ }
+}
+
+static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+ int error = unregister_pm_notifier(&video->pm_nb);
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_unregister_backlight(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ return error;
+}
+
+static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
+{
+ acpi_status status;
+ struct acpi_device *adev = device->dev;
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify, device);
+ if (ACPI_FAILURE(status))
+ dev_err(&adev->dev, "Error installing notify handler\n");
+ else
+ device->flags.notify = 1;
+}
+
+static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
+{
+ struct input_dev *input;
+ struct acpi_video_device *dev;
+ int error;
+
+ video->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ error = acpi_video_bus_start_devices(video);
+ if (error)
+ goto err_free_input;
+
+ snprintf(video->phys, sizeof(video->phys),
+ "%s/video/input0", acpi_device_hid(video->device));
+
+ input->name = acpi_device_name(video->device);
+ input->phys = video->phys;
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x06;
+ input->dev.parent = &video->device->dev;
+ input->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+ set_bit(KEY_VIDEO_NEXT, input->keybit);
+ set_bit(KEY_VIDEO_PREV, input->keybit);
+ set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+ set_bit(KEY_BRIGHTNESSUP, input->keybit);
+ set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+ set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+ set_bit(KEY_DISPLAY_OFF, input->keybit);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_stop_dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_add_notify_handler(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+
+err_stop_dev:
+ acpi_video_bus_stop_devices(video);
+err_free_input:
+ input_free_device(input);
+ video->input = NULL;
+out:
+ return error;
+}
+
+static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
+{
+ if (dev->flags.notify) {
+ acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify);
+ dev->flags.notify = 0;
+ }
+}
+
+static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_remove_notify_handler(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ acpi_video_bus_stop_devices(video);
+ input_unregister_device(video->input);
+ video->input = NULL;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev, *next;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+ list_del(&dev->entry);
+ kfree(dev);
+ }
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+}
+
static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
- struct input_dev *input;
int error;
acpi_status status;
@@ -1748,62 +1789,24 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_put_video;
- video->input = input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_put_video;
- }
-
- error = acpi_video_bus_start_devices(video);
- if (error)
- goto err_free_input_dev;
-
- snprintf(video->phys, sizeof(video->phys),
- "%s/video/input0", acpi_device_hid(video->device));
-
- input->name = acpi_device_name(video->device);
- input->phys = video->phys;
- input->id.bustype = BUS_HOST;
- input->id.product = 0x06;
- input->dev.parent = &device->dev;
- input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
- set_bit(KEY_VIDEO_NEXT, input->keybit);
- set_bit(KEY_VIDEO_PREV, input->keybit);
- set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
- set_bit(KEY_BRIGHTNESSUP, input->keybit);
- set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
- set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
- set_bit(KEY_DISPLAY_OFF, input->keybit);
-
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
video->flags.post ? "yes" : "no");
+ mutex_lock(&video_list_lock);
+ list_add_tail(&video->entry, &video_bus_head);
+ mutex_unlock(&video_list_lock);
- video->pm_nb.notifier_call = acpi_video_resume;
- video->pm_nb.priority = 0;
- error = register_pm_notifier(&video->pm_nb);
- if (error)
- goto err_stop_video;
-
- error = input_register_device(input);
- if (error)
- goto err_unregister_pm_notifier;
+ acpi_video_bus_register_backlight(video);
+ acpi_video_bus_add_notify_handler(video);
return 0;
- err_unregister_pm_notifier:
- unregister_pm_notifier(&video->pm_nb);
- err_stop_video:
- acpi_video_bus_stop_devices(video);
- err_free_input_dev:
- input_free_device(input);
- err_put_video:
+err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
- err_free_video:
+err_free_video:
kfree(video);
device->driver_data = NULL;
@@ -1820,12 +1823,14 @@ static int acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device);
- unregister_pm_notifier(&video->pm_nb);
-
- acpi_video_bus_stop_devices(video);
+ acpi_video_bus_remove_notify_handler(video);
+ acpi_video_bus_unregister_backlight(video);
acpi_video_bus_put_devices(video);
- input_unregister_device(video->input);
+ mutex_lock(&video_list_lock);
+ list_del(&video->entry);
+ mutex_unlock(&video_list_lock);
+
kfree(video->attached_array);
kfree(video);
@@ -1874,6 +1879,9 @@ int acpi_video_register(void)
return 0;
}
+ mutex_init(&video_list_lock);
+ INIT_LIST_HEAD(&video_bus_head);
+
result = acpi_bus_register_driver(&acpi_video_bus);
if (result < 0)
return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 940edbf2fe8..84875fd4c74 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,6 +168,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
{ },
};
@@ -233,11 +241,11 @@ static void acpi_video_caps_check(void)
acpi_video_get_capabilities(NULL);
}
-bool acpi_video_backlight_quirks(void)
+bool acpi_osi_is_win8(void)
{
return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
}
-EXPORT_SYMBOL(acpi_video_backlight_quirks);
+EXPORT_SYMBOL(acpi_osi_is_win8);
/* Promote the vendor interface instead of the generic video module.
* This function allow DMI blacklists to be implemented by externals
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c6707278a6b..c4876ac9151 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -552,7 +552,6 @@ amba_aphb_device_add(struct device *parent, const char *name,
if (!dev)
return ERR_PTR(-ENOMEM);
- dev->dma_mask = dma_mask;
dev->dev.coherent_dma_mask = dma_mask;
dev->irq[0] = irq1;
dev->irq[1] = irq2;
@@ -619,7 +618,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev_set_name(&dev->dev, "%s", name);
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
- dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->res.name = dev_name(&dev->dev);
}
@@ -663,9 +662,6 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
amba_device_initialize(dev, dev->dev.init_name);
dev->dev.init_name = NULL;
- if (!dev->dev.coherent_dma_mask && dev->dma_mask)
- dev_warn(&dev->dev, "coherent dma mask is unset\n");
-
return amba_device_add(dev, parent);
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8e28f923cf7..e2903d03180 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -292,6 +292,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 11456371f29..2289efdf820 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -339,6 +339,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
.sdev_attrs = ahci_sdev_attrs
extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_platform_ops;
extern struct ata_port_operations ahci_pmp_retry_srst_ops;
unsigned int ahci_dev_classify(struct ata_port *ap);
@@ -368,6 +369,7 @@ irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
void ahci_print_info(struct ata_host *host, const char *scc_s);
int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
+void ahci_error_handler(struct ata_port *ap);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 58debb0acc3..ae2d73fe321 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1,6 +1,6 @@
/*
+ * copyright (c) 2013 Freescale Semiconductor, Inc.
* Freescale IMX AHCI SATA platform driver
- * Copyright 2013 Freescale Semiconductor, Inc.
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
*
@@ -25,10 +25,13 @@
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/libata.h>
#include "ahci.h"
enum {
- HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+ PORT_PHY_CTL = 0x178, /* Port0 PHY Control */
+ PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */
+ HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
};
struct imx_ahci_priv {
@@ -36,6 +39,56 @@ struct imx_ahci_priv {
struct clk *sata_ref_clk;
struct clk *ahb_clk;
struct regmap *gpr;
+ bool no_device;
+ bool first_time;
+};
+
+static int ahci_imx_hotplug;
+module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+static void ahci_imx_error_handler(struct ata_port *ap)
+{
+ u32 reg_val;
+ struct ata_device *dev;
+ struct ata_host *host = dev_get_drvdata(ap->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+
+ ahci_error_handler(ap);
+
+ if (!(imxpriv->first_time) || ahci_imx_hotplug)
+ return;
+
+ imxpriv->first_time = false;
+
+ ata_for_each_dev(dev, &ap->link, ENABLED)
+ return;
+ /*
+ * Disable link to save power. An imx ahci port can't be recovered
+ * without full reset once the pddq mode is enabled making it
+ * impossible to use as part of libata LPM.
+ */
+ reg_val = readl(mmio + PORT_PHY_CTL);
+ writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+ imxpriv->no_device = true;
+}
+
+static struct ata_port_operations ahci_imx_ops = {
+ .inherits = &ahci_platform_ops,
+ .error_handler = ahci_imx_error_handler,
+};
+
+static const struct ata_port_info ahci_imx_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_imx_ops,
};
static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
@@ -117,9 +170,51 @@ static void imx6q_sata_exit(struct device *dev)
clk_disable_unprepare(imxpriv->sata_ref_clk);
}
+static int imx_ahci_suspend(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ /*
+ * If no_device is set, The CLKs had been gated off in the
+ * initialization so don't do it again here.
+ */
+ if (!imxpriv->no_device) {
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+ }
+
+ return 0;
+}
+
+static int imx_ahci_resume(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ int ret;
+
+ if (!imxpriv->no_device) {
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+ if (ret < 0) {
+ dev_err(dev, "pre-enable sata_ref clock err:%d\n", ret);
+ return ret;
+ }
+
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static struct ahci_platform_data imx6q_sata_pdata = {
.init = imx6q_sata_init,
.exit = imx6q_sata_exit,
+ .ata_port_info = &ahci_imx_port_info,
+ .suspend = imx_ahci_suspend,
+ .resume = imx_ahci_resume,
};
static const struct of_device_id imx_ahci_of_match[] = {
@@ -152,6 +247,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
ahci_dev = &ahci_pdev->dev;
ahci_dev->parent = dev;
+ imxpriv->no_device = false;
+ imxpriv->first_time = true;
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 7d3b85385bf..f9554318504 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -49,10 +49,11 @@ static struct platform_device_id ahci_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, ahci_devtype);
-static struct ata_port_operations ahci_platform_ops = {
+struct ata_port_operations ahci_platform_ops = {
.inherits = &ahci_ops,
.host_stop = ahci_host_stop,
};
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
static struct ata_port_operations ahci_platform_retry_srst_ops = {
.inherits = &ahci_pmp_retry_srst_ops,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 513ad7ed0c9..6334c8d7c3f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -100,7 +100,7 @@
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
- ICH5_PMR = 0x90, /* port mapping register */
+ ICH5_PMR = 0x90, /* address map register */
ICH5_PCS = 0x92, /* port control and status */
PIIX_SIDPR_BAR = 5,
PIIX_SIDPR_LEN = 16,
@@ -233,7 +233,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
/* 82801GB/GR/GH (ICH7, identical to ICH6) */
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
- /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
+ /* 82801GBM/GHM (ICH7M, identical to ICH6M) */
{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
/* Enterprise Southbridge 2 (631xESB/632xESB) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
@@ -517,7 +517,7 @@ static int ich_pata_cable_detect(struct ata_port *ap)
const struct ich_laptop *lap = &ich_laptop[0];
u8 mask;
- /* Check for specials - Acer Aspire 5602WLMi */
+ /* Check for specials */
while (lap->device) {
if (lap->device == pdev->device &&
lap->subvendor == pdev->subsystem_vendor &&
@@ -1366,38 +1366,39 @@ static const int *piix_init_sata_map(struct pci_dev *pdev,
const int *map;
int i, invalid_map = 0;
u8 map_value;
+ char buf[32];
+ char *p = buf, *end = buf + sizeof(buf);
pci_read_config_byte(pdev, ICH5_PMR, &map_value);
map = map_db->map[map_value & map_db->mask];
- dev_info(&pdev->dev, "MAP [");
for (i = 0; i < 4; i++) {
switch (map[i]) {
case RV:
invalid_map = 1;
- pr_cont(" XX");
+ p += scnprintf(p, end - p, " XX");
break;
case NA:
- pr_cont(" --");
+ p += scnprintf(p, end - p, " --");
break;
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich_pata_100];
i++;
- pr_cont(" IDE IDE");
+ p += scnprintf(p, end - p, " IDE IDE");
break;
default:
- pr_cont(" P%d", map[i]);
+ p += scnprintf(p, end - p, " P%d", map[i]);
if (i & 1)
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
- pr_cont(" ]\n");
+ dev_info(&pdev->dev, "MAP [%s ]\n", buf);
if (invalid_map)
dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index aaac4fb0d56..c482f8cadd7 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -89,7 +89,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static void ahci_dev_config(struct ata_device *dev);
#ifdef CONFIG_PM
@@ -189,14 +188,15 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
-int ahci_em_messages = 1;
+static bool ahci_em_messages __read_mostly = true;
EXPORT_SYMBOL_GPL(ahci_em_messages);
-module_param(ahci_em_messages, int, 0444);
+module_param(ahci_em_messages, bool, 0444);
/* add other LED protocol types when they become supported */
MODULE_PARM_DESC(ahci_em_messages,
"AHCI Enclosure Management Message control (0 = off, 1 = on)");
-int devslp_idle_timeout = 1000; /* device sleep idle timeout in ms */
+/* device sleep idle timeout in ms */
+static int devslp_idle_timeout __read_mostly = 1000;
module_param(devslp_idle_timeout, int, 0644);
MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
@@ -1275,9 +1275,11 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
const char *reason = NULL;
unsigned long now, msecs;
struct ata_taskfile tf;
+ bool fbs_disabled = false;
int rc;
DPRINTK("ENTER\n");
@@ -1287,6 +1289,16 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
if (rc && rc != -EOPNOTSUPP)
ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+ /*
+ * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
+ * clear PxFBS.EN to '0' prior to issuing software reset to devices
+ * that is attached to port multiplier.
+ */
+ if (!ata_is_host_link(link) && pp->fbs_enabled) {
+ ahci_disable_fbs(ap);
+ fbs_disabled = true;
+ }
+
ata_tf_init(link->device, &tf);
/* issue the first D2H Register FIS */
@@ -1327,6 +1339,10 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
} else
*class = ahci_dev_classify(ap);
+ /* re-enable FBS if disabled before */
+ if (fbs_disabled)
+ ahci_enable_fbs(ap);
+
DPRINTK("EXIT, class=%u\n", *class);
return 0;
@@ -1989,7 +2005,7 @@ static void ahci_thaw(struct ata_port *ap)
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
-static void ahci_error_handler(struct ata_port *ap)
+void ahci_error_handler(struct ata_port *ap)
{
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
@@ -2002,6 +2018,7 @@ static void ahci_error_handler(struct ata_port *ap)
if (!ata_dev_enabled(ap->link.device))
ahci_stop_engine(ap);
}
+EXPORT_SYMBOL_GPL(ahci_error_handler);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ab714d2ad97..4372cfa883c 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap)
if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle)
return;
- ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no));
+ acpi_preset_companion(&ap->tdev, host_handle, ap->port_no);
if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
@@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev)
parent_handle = port_handle;
}
- ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr));
+ acpi_preset_companion(&dev->tdev, parent_handle, adr);
register_hotplug_dock_device(ata_dev_acpi_handle(dev),
&ata_acpi_dev_dock_ops, dev, NULL, NULL);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 83b1a9fb2d4..81a94a3919d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4126,6 +4126,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/* Devices we expect to fail diagnostics */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 370462fa8e0..92d7797223b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2293,6 +2293,7 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_IDLE, "IDLE" },
{ ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
{ ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
+ { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
{ ATA_CMD_NOP, "NOP" },
{ ATA_CMD_FLUSH, "FLUSH CACHE" },
{ ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
@@ -2313,6 +2314,8 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
+ { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
{ ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
{ ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
@@ -2339,12 +2342,15 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
{ ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
{ ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
+ { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
{ ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
{ ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
{ ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
{ ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
{ ATA_CMD_PMP_READ, "READ BUFFER" },
+ { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
{ ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
+ { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
{ ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
{ ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
{ ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
@@ -2363,6 +2369,8 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
{ ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
{ ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
+ { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
+ { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
{ ATA_CMD_READ_LONG, "READ LONG (with retries)" },
{ ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
{ ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
@@ -3009,7 +3017,7 @@ static inline void ata_eh_pull_park_action(struct ata_port *ap)
* ourselves at the beginning of each pass over the loop.
*
* Additionally, all write accesses to &ap->park_req_pending
- * through INIT_COMPLETION() (see below) or complete_all()
+ * through reinit_completion() (see below) or complete_all()
* (see ata_scsi_park_store()) are protected by the host lock.
* As a result we have that park_req_pending.done is zero on
* exit from this function, i.e. when ATA_EH_PARK actions for
@@ -3023,7 +3031,7 @@ static inline void ata_eh_pull_park_action(struct ata_port *ap)
*/
spin_lock_irqsave(ap->lock, flags);
- INIT_COMPLETION(ap->park_req_pending);
+ reinit_completion(&ap->park_req_pending);
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ALL) {
struct ata_eh_info *ehi = &link->eh_info;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 150a917f0c3..e3741322822 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -321,25 +321,25 @@ int ata_tport_add(struct device *parent,
/*
* ATA link attributes
*/
+static int noop(int x) { return x; }
-
-#define ata_link_show_linkspeed(field) \
+#define ata_link_show_linkspeed(field, format) \
static ssize_t \
show_ata_link_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_link *link = transport_class_to_link(dev); \
\
- return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
+ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
}
-#define ata_link_linkspeed_attr(field) \
- ata_link_show_linkspeed(field) \
+#define ata_link_linkspeed_attr(field, format) \
+ ata_link_show_linkspeed(field, format) \
static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
-ata_link_linkspeed_attr(hw_sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd);
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
static DECLARE_TRANSPORT_CLASS(ata_link_class,
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 853f610af28..e88690ebfd8 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
struct dma_async_tx_descriptor *tx;
struct dma_chan *chan = acdev->dma_chan;
dma_cookie_t cookie;
- unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP;
+ unsigned long flags = DMA_PREP_INTERRUPT;
int ret = 0;
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 1ec53f8ca96..ddf470c2341 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -144,6 +144,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
struct ata_host *host;
struct ata_port *ap;
struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+ int ret;
cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
return -ENOMEM;
/* acquire resources and fill host */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index c51bbb9ea8e..83c4ddb1bc7 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1014,8 +1014,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
}
cf_port->c0 = ap->ioaddr.ctl_addr;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv)
+ return rv;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 2e391730e8b..523524b6802 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -31,6 +31,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 851bd3f43ac..fb0b40a191c 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -24,6 +24,8 @@
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <asm/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
static unsigned int intr_coalescing_count;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 7f5e5d96327..ea3b3dc10f3 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -343,13 +343,11 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
{
struct device_node *sata_node = dev->of_node;
int phy_count = 0, phy, port = 0, i;
- void __iomem *cphy_base[CPHY_PHY_COUNT];
- struct device_node *phy_nodes[CPHY_PHY_COUNT];
- u32 tx_atten[CPHY_PORT_COUNT];
+ void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
+ struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
+ u32 tx_atten[CPHY_PORT_COUNT] = {};
memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
- memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
- memset(tx_atten, 0xff, CPHY_PORT_COUNT);
do {
u32 tmp;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c2d95e9fb97..1dae9a9009f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -792,7 +792,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get access to sata clock\n");
return PTR_ERR(priv->clk);
}
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -822,7 +822,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
return 0;
cleanup:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -841,7 +841,7 @@ static int sata_rcar_remove(struct platform_device *pdev)
iowrite32(0, base + SATAINTSTAT_REG);
iowrite32(0x7ff, base + SATAINTMASK_REG);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
@@ -861,7 +861,7 @@ static int sata_rcar_suspend(struct device *dev)
/* mask */
iowrite32(0x7ff, base + SATAINTMASK_REG);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
return ret;
@@ -873,7 +873,7 @@ static int sata_rcar_resume(struct device *dev)
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
index 49e783e35ee..364eded3188 100644
--- a/drivers/atm/firestream.h
+++ b/drivers/atm/firestream.h
@@ -420,7 +420,6 @@ struct fs_transmit_config {
#define RC_FLAGS_BFPS_BFP27 (0xd << 17)
#define RC_FLAGS_BFPS_BFP47 (0xe << 17)
-#define RC_FLAGS_BFPS (0x1 << 17)
#define RC_FLAGS_BFPP (0x1 << 21)
#define RC_FLAGS_TEVC (0x1 << 22)
#define RC_FLAGS_TEP (0x1 << 23)
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 272f0092776..1bdf104e90b 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev)
tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
if (tmp) {
memcpy(card->atmdev->esi, tmp->dev_addr, 6);
-
+ dev_put(tmp);
printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
}
/*
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index d585735430d..a3874034e2c 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -102,8 +102,7 @@ static int cfag12864bfb_probe(struct platform_device *device)
platform_set_drvdata(device, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 4c289ab9135..73f6c292528 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -591,37 +591,6 @@ void bus_remove_device(struct device *dev)
bus_put(dev->bus);
}
-static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
-{
- int error = 0;
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; bus->drv_attrs[i].attr.name; i++) {
- error = driver_create_file(drv, &bus->drv_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- goto done;
-}
-
-static void driver_remove_attrs(struct bus_type *bus,
- struct device_driver *drv)
-{
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; bus->drv_attrs[i].attr.name; i++)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- }
-}
-
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
@@ -720,16 +689,12 @@ int bus_add_driver(struct device_driver *drv)
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
- error = driver_add_attrs(bus, drv);
+ error = driver_add_groups(drv, bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
- printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
- __func__, drv->name);
- }
- error = driver_add_groups(drv, bus->drv_groups);
- if (error)
printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
__func__, drv->name);
+ }
if (!drv->suppress_bind_attrs) {
error = add_bind_files(drv);
@@ -766,7 +731,6 @@ void bus_remove_driver(struct device_driver *drv)
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
- driver_remove_attrs(drv->bus, drv);
driver_remove_groups(drv, drv->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
@@ -846,42 +810,6 @@ struct bus_type *find_bus(char *name)
}
#endif /* 0 */
-
-/**
- * bus_add_attrs - Add default attributes for this bus.
- * @bus: Bus that has just been registered.
- */
-
-static int bus_add_attrs(struct bus_type *bus)
-{
- int error = 0;
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; bus->bus_attrs[i].attr.name; i++) {
- error = bus_create_file(bus, &bus->bus_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- goto done;
-}
-
-static void bus_remove_attrs(struct bus_type *bus)
-{
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; bus->bus_attrs[i].attr.name; i++)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- }
-}
-
static int bus_add_groups(struct bus_type *bus,
const struct attribute_group **groups)
{
@@ -983,9 +911,6 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_probe_files_fail;
- retval = bus_add_attrs(bus);
- if (retval)
- goto bus_attrs_fail;
retval = bus_add_groups(bus, bus->bus_groups);
if (retval)
goto bus_groups_fail;
@@ -994,8 +919,6 @@ int bus_register(struct bus_type *bus)
return 0;
bus_groups_fail:
- bus_remove_attrs(bus);
-bus_attrs_fail:
remove_probe_files(bus);
bus_probe_files_fail:
kset_unregister(bus->p->drivers_kset);
@@ -1024,7 +947,6 @@ void bus_unregister(struct bus_type *bus)
pr_debug("bus: '%s': unregistering\n", bus->name);
if (bus->dev_root)
device_unregister(bus->dev_root);
- bus_remove_attrs(bus);
bus_remove_groups(bus, bus->bus_groups);
remove_probe_files(bus);
kset_unregister(bus->p->drivers_kset);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8b7818b8005..f96f70419a7 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -47,18 +47,6 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-static const void *class_attr_namespace(struct kobject *kobj,
- const struct attribute *attr)
-{
- struct class_attribute *class_attr = to_class_attr(attr);
- struct subsys_private *cp = to_subsys_private(kobj);
- const void *ns = NULL;
-
- if (class_attr->namespace)
- ns = class_attr->namespace(cp->class, class_attr);
- return ns;
-}
-
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
@@ -86,7 +74,6 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
- .namespace = class_attr_namespace,
};
static struct kobj_type class_ktype = {
@@ -99,21 +86,23 @@ static struct kobj_type class_ktype = {
static struct kset *class_kset;
-int class_create_file(struct class *cls, const struct class_attribute *attr)
+int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
{
int error;
if (cls)
- error = sysfs_create_file(&cls->p->subsys.kobj,
- &attr->attr);
+ error = sysfs_create_file_ns(&cls->p->subsys.kobj,
+ &attr->attr, ns);
else
error = -EINVAL;
return error;
}
-void class_remove_file(struct class *cls, const struct class_attribute *attr)
+void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
{
if (cls)
- sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr);
+ sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
}
static struct class *class_get(struct class *cls)
@@ -600,8 +589,8 @@ int __init classes_init(void)
return 0;
}
-EXPORT_SYMBOL_GPL(class_create_file);
-EXPORT_SYMBOL_GPL(class_remove_file);
+EXPORT_SYMBOL_GPL(class_create_file_ns);
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
EXPORT_SYMBOL_GPL(class_unregister);
EXPORT_SYMBOL_GPL(class_destroy);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 34abf4d8a45..67b180d855b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -455,64 +455,6 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(online);
-static int device_add_attributes(struct device *dev,
- struct device_attribute *attrs)
-{
- int error = 0;
- int i;
-
- if (attrs) {
- for (i = 0; attrs[i].attr.name; i++) {
- error = device_create_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_attributes(struct device *dev,
- struct device_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attrs[i].attr.name; i++)
- device_remove_file(dev, &attrs[i]);
-}
-
-static int device_add_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int error = 0;
- int i;
-
- if (attrs) {
- for (i = 0; attrs[i].attr.name; i++) {
- error = device_create_bin_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_bin_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attrs[i].attr.name; i++)
- device_remove_bin_file(dev, &attrs[i]);
-}
-
int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
@@ -534,18 +476,12 @@ static int device_add_attrs(struct device *dev)
error = device_add_groups(dev, class->dev_groups);
if (error)
return error;
- error = device_add_attributes(dev, class->dev_attrs);
- if (error)
- goto err_remove_class_groups;
- error = device_add_bin_attributes(dev, class->dev_bin_attrs);
- if (error)
- goto err_remove_class_attrs;
}
if (type) {
error = device_add_groups(dev, type->groups);
if (error)
- goto err_remove_class_bin_attrs;
+ goto err_remove_class_groups;
}
error = device_add_groups(dev, dev->groups);
@@ -563,12 +499,6 @@ static int device_add_attrs(struct device *dev)
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
- err_remove_class_bin_attrs:
- if (class)
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
- err_remove_class_attrs:
- if (class)
- device_remove_attributes(dev, class->dev_attrs);
err_remove_class_groups:
if (class)
device_remove_groups(dev, class->dev_groups);
@@ -587,11 +517,8 @@ static void device_remove_attrs(struct device *dev)
if (type)
device_remove_groups(dev, type->groups);
- if (class) {
- device_remove_attributes(dev, class->dev_attrs);
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
+ if (class)
device_remove_groups(dev, class->dev_groups);
- }
}
static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
@@ -1881,6 +1808,7 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
@@ -1888,8 +1816,7 @@ int device_rename(struct device *dev, const char *new_name)
if (!dev)
return -EINVAL;
- pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
- __func__, new_name);
+ dev_dbg(dev, "renaming to %s\n", new_name);
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!old_device_name) {
@@ -1898,13 +1825,14 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->subsys.kobj,
- &dev->kobj, old_device_name, new_name);
+ error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
+ kobj, old_device_name,
+ new_name, kobject_namespace(kobj));
if (error)
goto out;
}
- error = kobject_rename(&dev->kobj, new_name);
+ error = kobject_rename(kobj, new_name);
if (error)
goto out;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 848ebbd2571..f48370dfc90 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev)
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
int from_nid, to_nid;
- int ret = -ENODEV;
-
- cpu_hotplug_driver_lock();
+ int ret;
from_nid = cpu_to_node(cpuid);
if (from_nid == NUMA_NO_NODE)
- goto out;
+ return -ENODEV;
ret = cpu_up(cpuid);
/*
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev)
if (from_nid != to_nid)
change_cpu_under_node(cpu, from_nid, to_nid);
- out:
- cpu_hotplug_driver_unlock();
return ret;
}
static int cpu_subsys_offline(struct device *dev)
{
- int ret;
-
- cpu_hotplug_driver_lock();
- ret = cpu_down(dev->id);
- cpu_hotplug_driver_unlock();
- return ret;
+ return cpu_down(dev->id);
}
void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_probe(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_probe(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_release(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_release(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 35fa3689891..06051767393 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -499,7 +499,7 @@ static void __device_release_driver(struct device *dev)
BUS_NOTIFY_UNBIND_DRIVER,
dev);
- pm_runtime_put(dev);
+ pm_runtime_put_sync(dev);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 507379e7b76..545c4de412c 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, tot_size);
+ memset(dr, 0, offsetof(struct devres, data));
+
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
@@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
@@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
return dr->data;
@@ -745,58 +746,62 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
EXPORT_SYMBOL_GPL(devm_remove_action);
/*
- * Managed kzalloc/kfree
+ * Managed kmalloc/kfree
*/
-static void devm_kzalloc_release(struct device *dev, void *res)
+static void devm_kmalloc_release(struct device *dev, void *res)
{
/* noop */
}
-static int devm_kzalloc_match(struct device *dev, void *res, void *data)
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
- * devm_kzalloc - Resource-managed kzalloc
+ * devm_kmalloc - Resource-managed kmalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
- * Managed kzalloc. Memory allocated with this function is
+ * Managed kmalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kzalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp);
if (unlikely(!dr))
return NULL;
+ /*
+ * This is named devm_kzalloc_release for historical reasons
+ * The initial implementation did not support kmalloc, only kzalloc
+ */
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devm_kzalloc);
+EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with devm_kzalloc().
+ * Free memory allocated with devm_kmalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
+ rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 7413d065906..0f3820121e0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -216,7 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
+ notify_change(dentry, &newattrs, NULL);
mutex_unlock(&dentry->d_inode->i_mutex);
/* mark as kernel-created inode */
@@ -322,9 +322,9 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
+ notify_change(dentry, &newattrs, NULL);
mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(parent.dentry->d_inode, dentry);
+ err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 99802d6f3c6..165c2c299e5 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -49,7 +49,7 @@ struct cma *dma_contiguous_default_area;
/*
* Default global CMA area size can be defined in kernel's .config.
- * This is usefull mainly for distro maintainers to create a kernel
+ * This is useful mainly for distro maintainers to create a kernel
* that works correctly for most supported systems.
* The size can be set in bytes or as a percentage of the total memory
* in the system.
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 10a4467c63f..eb8fb94ae2c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -282,31 +282,35 @@ static noinline_for_stack long fw_file_size(struct file *file)
return st.size;
}
-static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
{
long size;
char *buf;
+ int rc;
size = fw_file_size(file);
if (size <= 0)
- return false;
+ return -EINVAL;
buf = vmalloc(size);
if (!buf)
- return false;
- if (kernel_read(file, 0, buf, size) != size) {
+ return -ENOMEM;
+ rc = kernel_read(file, 0, buf, size);
+ if (rc != size) {
+ if (rc > 0)
+ rc = -EIO;
vfree(buf);
- return false;
+ return rc;
}
fw_buf->data = buf;
fw_buf->size = size;
- return true;
+ return 0;
}
-static bool fw_get_filesystem_firmware(struct device *device,
+static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
int i;
- bool success = false;
+ int rc = -ENOENT;
char *path = __getname();
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
@@ -321,14 +325,17 @@ static bool fw_get_filesystem_firmware(struct device *device,
file = filp_open(path, O_RDONLY, 0);
if (IS_ERR(file))
continue;
- success = fw_read_file_contents(file, buf);
+ rc = fw_read_file_contents(file, buf);
fput(file);
- if (success)
+ if (rc)
+ dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
+ path, rc);
+ else
break;
}
__putname(path);
- if (success) {
+ if (!rc) {
dev_dbg(device, "firmware: direct-loading firmware %s\n",
buf->fw_id);
mutex_lock(&fw_lock);
@@ -337,7 +344,7 @@ static bool fw_get_filesystem_firmware(struct device *device,
mutex_unlock(&fw_lock);
}
- return success;
+ return rc;
}
/* firmware holds the ownership of pages */
@@ -1086,9 +1093,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
}
}
- if (!fw_get_filesystem_firmware(device, fw->priv))
+ ret = fw_get_filesystem_firmware(device, fw->priv);
+ if (ret) {
+ dev_warn(device, "Direct firmware load failed with error %d\n",
+ ret);
+ dev_warn(device, "Falling back to user helper\n");
ret = fw_load_from_user_helper(fw, name, device,
uevent, nowait, timeout);
+ }
/* don't cache firmware handled without uevent */
if (!ret)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4f8bef3eb5a..3a94b799f16 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
- ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
+ ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);
if (pdevinfo->dma_mask) {
/*
@@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full(
ret = platform_device_add(pdev);
if (ret) {
err:
- ACPI_HANDLE_SET(&pdev->dev, NULL);
+ ACPI_COMPANION_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
err_alloc:
@@ -488,6 +488,11 @@ static int platform_drv_probe(struct device *_dev)
if (ret && ACPI_HANDLE(_dev))
acpi_dev_pm_detach(_dev, true);
+ if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+ dev_warn(_dev, "probe deferral not supported\n");
+ ret = -ENXIO;
+ }
+
return ret;
}
@@ -553,8 +558,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
* platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
- * @probe: the driver probe routine, probably from an __init section,
- * must not return -EPROBE_DEFER.
+ * @probe: the driver probe routine, probably from an __init section
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
@@ -565,8 +569,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
* into system-on-chip processors, where the controller devices have been
* configured as part of board setup.
*
- * This is incompatible with deferred probing so probe() must not
- * return -EPROBE_DEFER.
+ * Note that this is incompatible with deferred probing.
*
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
@@ -576,6 +579,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
{
int retval, code;
+ /*
+ * Prevent driver from requesting probe deferral to avoid further
+ * futile probe attempts.
+ */
+ drv->prevent_deferred_probe = true;
+
/* make sure driver won't have bind/unbind attributes */
drv->driver.suppress_bind_attrs = true;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9f098a82cf0..1b41fca3d65 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -30,6 +30,8 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
#include "../base.h"
#include "power.h"
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
return error;
}
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+ struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+ struct dpm_watchdog *wd = (void *)data;
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = current;
+
+ init_timer_on_stack(timer);
+ /* use same timeout value for both suspend and resume */
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->function = dpm_watchdog_handler;
+ timer->data = (unsigned long)wd;
+ add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
dpm_wait(dev->parent, async);
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
/*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -687,7 +757,7 @@ void dpm_resume(pm_message_t state)
async_error = 0;
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- INIT_COMPLETION(dev->power.completion);
+ reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
async_schedule(async_resume, dev);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
dpm_wait_for_children(dev, async);
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -1164,7 +1237,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
static int device_suspend(struct device *dev)
{
- INIT_COMPLETION(dev->power.completion);
+ reinit_completion(&dev->power.completion);
if (pm_async_enabled && dev->power.async_suspend) {
get_device(dev);
@@ -1277,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_unlock(dev);
+ if (error)
+ pm_runtime_put(dev);
+
return error;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ef89897c604..fa418741844 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -21,7 +21,7 @@
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
@@ -42,7 +42,7 @@
*/
/**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
* @node: opp list node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
@@ -59,7 +59,7 @@
*
* This structure stores the OPP information for a given device.
*/
-struct opp {
+struct dev_pm_opp {
struct list_head node;
bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
}
/**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
* @opp: opp for which voltage has to be returned for
*
* Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
* @opp: opp for which frequency has to be returned for
*
* Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
* This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
*/
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
{
struct device_opp *dev_opp;
- struct opp *temp_opp;
+ struct dev_pm_opp *temp_opp;
int count = 0;
dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
return count;
}
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
/**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
* @available: true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * opp_add() - Add an OPP table from a table definitions
+ * dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
* This function adds an opp definition to the opp list and returns status.
* The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
struct device_opp *dev_opp = NULL;
- struct opp *opp, *new_opp;
+ struct dev_pm_opp *opp, *new_opp;
struct list_head *head;
/* allocate new OPP node */
- new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
-EXPORT_SYMBOL_GPL(opp_add);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
- struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
/* keep the node allocated */
- new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
}
/**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
*
* Enables a provided opp. If the operation is valid, this returns 0, else the
* corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
/**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to disable
*
* Disables a provided opp. If the operation is valid, this returns
* 0, else the corresponding error value. It is meant to be a temporary
* control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
-EXPORT_SYMBOL_GPL(opp_disable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
* @dev: device for which we do this operation
* @table: Cpufreq table returned back to caller
*
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
* Callers should ensure that this function is *NOT* called under RCU protection
* or in contexts where mutex locking cannot be used.
*/
-int opp_init_cpufreq_table(struct device *dev,
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
struct device_opp *dev_opp;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table;
int i = 0;
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
}
freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
- (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+ (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
if (!freq_table) {
mutex_unlock(&dev_opp_list_lock);
dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
/**
- * opp_free_cpufreq_table() - free the cpufreq table
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
* @dev: device for which we do this operation
* @table: table to free
*
- * Free up the table allocated by opp_init_cpufreq_table
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
-void opp_free_cpufreq_table(struct device *dev,
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
*/
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
struct device_opp *dev_opp = find_device_opp(dev);
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (opp_add(dev, freq, volt)) {
+ if (dev_pm_opp_add(dev, freq, volt)) {
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 268a3509757..72e00e66ecc 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
* Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
- return retval ? retval : rpm_suspend(dev, rpmflags);
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index f0d30543fcc..4251570610c 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
@@ -15,6 +15,9 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+config REGMAP_SPMI
+ tristate
+
config REGMAP_MMIO
tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index cf129980abd..a7c670b4123 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -3,5 +3,6 @@ obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 57f777835d9..33414b1de20 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -44,7 +44,6 @@ struct regmap_format {
struct regmap_async {
struct list_head list;
- struct work_struct cleanup;
struct regmap *map;
void *work_buf;
};
@@ -64,9 +63,11 @@ struct regmap {
void *bus_context;
const char *name;
+ bool async;
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
+ struct list_head async_free;
int async_ret;
#ifdef CONFIG_DEBUG_FS
@@ -179,6 +180,9 @@ struct regmap_field {
/* lsb */
unsigned int shift;
unsigned int reg;
+
+ unsigned int id_size;
+ unsigned int id_offset;
};
#ifdef CONFIG_DEBUG_FS
@@ -218,7 +222,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async);
+ const void *val, size_t val_len);
void regmap_async_complete_cb(struct regmap_async *async, int ret);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d6c2d691b6e..d4dd7713481 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -307,6 +307,8 @@ int regcache_sync(struct regmap *map)
if (!map->cache_dirty)
goto out;
+ map->async = true;
+
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
@@ -332,11 +334,15 @@ int regcache_sync(struct regmap *map)
map->cache_dirty = false;
out:
- trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
+ map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map->dev, name, "stop");
+
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync);
@@ -375,17 +381,23 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
if (!map->cache_dirty)
goto out;
+ map->async = true;
+
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, min, max);
else
ret = regcache_default_sync(map, min, max);
out:
- trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
+ map->async = false;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map->dev, name, "stop region");
+
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync_region);
@@ -631,8 +643,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
map->cache_bypass = 1;
- ret = _regmap_raw_write(map, base, *data, count * val_bytes,
- false);
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes);
map->cache_bypass = 0;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index de11ecaf383..c5471cd6ebb 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -15,10 +15,19 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
+#include <linux/list.h>
#include "internal.h"
+struct regmap_debugfs_node {
+ struct regmap *map;
+ const char *name;
+ struct list_head link;
+};
+
static struct dentry *regmap_debugfs_root;
+static LIST_HEAD(regmap_debugfs_early_list);
+static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
@@ -465,6 +474,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct rb_node *next;
struct regmap_range_node *range_node;
+ /* If we don't have the debugfs root yet, postpone init */
+ if (!regmap_debugfs_root) {
+ struct regmap_debugfs_node *node;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return;
+ node->map = map;
+ node->name = name;
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_add(&node->link, &regmap_debugfs_early_list);
+ mutex_unlock(&regmap_debugfs_early_lock);
+ return;
+ }
+
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
@@ -519,18 +542,42 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
- debugfs_remove_recursive(map->debugfs);
- mutex_lock(&map->cache_lock);
- regmap_debugfs_free_dump_cache(map);
- mutex_unlock(&map->cache_lock);
- kfree(map->debugfs_name);
+ if (map->debugfs) {
+ debugfs_remove_recursive(map->debugfs);
+ mutex_lock(&map->cache_lock);
+ regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
+ kfree(map->debugfs_name);
+ } else {
+ struct regmap_debugfs_node *node, *tmp;
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
+ link) {
+ if (node->map == map) {
+ list_del(&node->link);
+ kfree(node);
+ }
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
+ }
}
void regmap_debugfs_initcall(void)
{
+ struct regmap_debugfs_node *node, *tmp;
+
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
if (!regmap_debugfs_root) {
pr_warn("regmap: Failed to create debugfs root\n");
return;
}
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
+ regmap_debugfs_init(node->map, node->name);
+ list_del(&node->link);
+ kfree(node);
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d10456ffd81..763c60d3d27 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -105,6 +105,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
"Failed to sync wakes in %x: %d\n",
reg, ret);
}
+
+ if (!d->chip->init_ack_masked)
+ continue;
+ /*
+ * Ack all the masked interrupts uncondictionly,
+ * OR if there is masked interrupt which hasn't been Acked,
+ * it'll be ignored in irq handler, then may introduce irq storm
+ */
+ if (d->mask_buf[i] && d->chip->ack_base) {
+ reg = d->chip->ack_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_write(map, reg, d->mask_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ }
}
if (d->chip->runtime_pm)
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4c506bd940f..37f12ae7aad 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -73,7 +73,8 @@ static int regmap_spi_async_write(void *context,
spi_message_init(&async->m);
spi_message_add_tail(&async->t[0], &async->m);
- spi_message_add_tail(&async->t[1], &async->m);
+ if (val)
+ spi_message_add_tail(&async->t[1], &async->m);
async->m.complete = regmap_spi_complete;
async->m.context = async;
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
new file mode 100644
index 00000000000..ac2391013db
--- /dev/null
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -0,0 +1,90 @@
+/*
+ * Register map access API - SPMI support
+ *
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * Based on regmap-i2c.c:
+ * Copyright 2011 Wolfson Microelectronics plc
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_spmi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ BUG_ON(reg_size != 2);
+ return spmi_ext_register_readl(context, *(u16 *)reg,
+ val, val_size);
+}
+
+static int regmap_spmi_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ BUG_ON(reg_size != 2);
+ return spmi_ext_register_writel(context, *(u16 *)reg, val, val_size);
+}
+
+static int regmap_spmi_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 2);
+ return regmap_spmi_gather_write(context, data, 2, data + 2, count - 2);
+}
+
+static struct regmap_bus regmap_spmi = {
+ .read = regmap_spmi_read,
+ .write = regmap_spmi_write,
+ .gather_write = regmap_spmi_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi(): Initialize register map
+ *
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi);
+
+/**
+ * devm_regmap_init_spmi(): Initialise managed register map
+ *
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7d689a15c50..9c021d9cace 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val);
-static void async_cleanup(struct work_struct *work)
-{
- struct regmap_async *async = container_of(work, struct regmap_async,
- cleanup);
-
- kfree(async->work_buf);
- kfree(async);
-}
-
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
+ INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
if (config->read_flag_mask || config->write_flag_mask) {
@@ -821,6 +813,8 @@ static void regmap_field_init(struct regmap_field *rm_field,
rm_field->reg = reg_field.reg;
rm_field->shift = reg_field.lsb;
rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
+ rm_field->id_size = reg_field.id_size;
+ rm_field->id_offset = reg_field.id_offset;
}
/**
@@ -942,12 +936,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
*/
void regmap_exit(struct regmap *map)
{
+ struct regmap_async *async;
+
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
+ while (!list_empty(&map->async_free)) {
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ list_del(&async->list);
+ kfree(async->work_buf);
+ kfree(async);
+ }
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1039,7 +1043,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async)
+ const void *val, size_t val_len)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -1091,7 +1095,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write(map, reg, val, win_residue *
- map->format.val_bytes, async);
+ map->format.val_bytes);
if (ret != 0)
return ret;
@@ -1114,49 +1118,72 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
- if (async && map->bus->async_write) {
- struct regmap_async *async = map->bus->async_alloc();
- if (!async)
- return -ENOMEM;
+ /*
+ * Essentially all I/O mechanisms will be faster with a single
+ * buffer to write. Since register syncs often generate raw
+ * writes of single registers optimise that case.
+ */
+ if (val != work_val && val_len == map->format.val_bytes) {
+ memcpy(work_val, val, map->format.val_bytes);
+ val = work_val;
+ }
+
+ if (map->async && map->bus->async_write) {
+ struct regmap_async *async;
trace_regmap_async_write_start(map->dev, reg, val_len);
- async->work_buf = kzalloc(map->format.buf_size,
- GFP_KERNEL | GFP_DMA);
- if (!async->work_buf) {
- kfree(async);
- return -ENOMEM;
+ spin_lock_irqsave(&map->async_lock, flags);
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ if (async)
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ if (!async) {
+ async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
}
- INIT_WORK(&async->cleanup, async_cleanup);
async->map = map;
/* If the caller supplied the value we can use it safely. */
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
map->format.reg_bytes + map->format.val_bytes);
- if (val == work_val)
- val = async->work_buf + map->format.pad_bytes +
- map->format.reg_bytes;
spin_lock_irqsave(&map->async_lock, flags);
list_add_tail(&async->list, &map->async_list);
spin_unlock_irqrestore(&map->async_lock, flags);
- ret = map->bus->async_write(map->bus_context, async->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes,
- val, val_len, async);
+ if (val != work_val)
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+ else
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len, NULL, 0, async);
if (ret != 0) {
dev_err(map->dev, "Failed to schedule write: %d\n",
ret);
spin_lock_irqsave(&map->async_lock, flags);
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
spin_unlock_irqrestore(&map->async_lock, flags);
-
- kfree(async->work_buf);
- kfree(async);
}
return ret;
@@ -1253,7 +1280,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
- map->format.val_bytes, false);
+ map->format.val_bytes);
}
static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1318,6 +1345,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
EXPORT_SYMBOL_GPL(regmap_write);
/**
+ * regmap_write_async(): Write a value to a single register asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_write(map, reg, val);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_async);
+
+/**
* regmap_raw_write(): Write raw values to one or more registers
*
* @map: Register map to write to
@@ -1345,7 +1403,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len, false);
+ ret = _regmap_raw_write(map, reg, val, val_len);
map->unlock(map->lock_arg);
@@ -1369,6 +1427,74 @@ int regmap_field_write(struct regmap_field *field, unsigned int val)
}
EXPORT_SYMBOL_GPL(regmap_field_write);
+/**
+ * regmap_field_update_bits(): Perform a read/modify/write cycle
+ * on the register field
+ *
+ * @field: Register field to write to
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+{
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits(field->regmap, field->reg,
+ mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+
+/**
+ * regmap_fields_write(): Write a value to a single register field with port ID
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+ unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ return regmap_update_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_write);
+
+/**
+ * regmap_fields_update_bits(): Perform a read/modify/write cycle
+ * on the register field
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+
/*
* regmap_bulk_write(): Write multiple registers to the device
*
@@ -1418,16 +1544,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
*/
if (map->use_single_rw) {
for (i = 0; i < val_count; i++) {
- ret = regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ ret = _regmap_raw_write(map,
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
if (ret != 0)
return ret;
}
} else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
- false);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
}
if (val_bytes != 1)
@@ -1439,6 +1564,47 @@ out:
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
+/*
+ * regmap_multi_reg_write(): Write multiple registers to the device
+ *
+ * where the set of register are supplied in any order
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * This function is intended to be used for writing a large block of data
+ * atomically to the device in single transfer for those I2C client devices
+ * that implement this alternative block write mode.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
+ int num_regs)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+ }
+
+ map->lock(map->lock_arg);
+
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ goto out;
+ }
+out:
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
/**
* regmap_raw_write_async(): Write raw values to one or more registers
* asynchronously
@@ -1473,7 +1639,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len, true);
+ map->async = true;
+
+ ret = _regmap_raw_write(map, reg, val, val_len);
+
+ map->async = false;
map->unlock(map->lock_arg);
@@ -1677,6 +1847,39 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_field_read);
/**
+ * regmap_fields_read(): Read a value to a single register field with port ID
+ *
+ * @field: Register field to read from
+ * @id: port ID
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ ret = regmap_read(field->regmap,
+ field->reg + (field->id_offset * id),
+ &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_fields_read);
+
+/**
* regmap_bulk_read(): Read multiple registers from the device
*
* @map: Register map to write to
@@ -1788,6 +1991,41 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_update_bits);
/**
+ * regmap_update_bits_async: Perform a read/modify/write cycle on the register
+ * map asynchronously
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ bool change;
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_update_bits(map, reg, mask, val, &change);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_async);
+
+/**
* regmap_update_bits_check: Perform a read/modify/write cycle on the
* register map and report if updated
*
@@ -1812,6 +2050,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+/**
+ * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
+ * register map asynchronously and report if
+ * updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_update_bits(map, reg, mask, val, change);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
struct regmap *map = async->map;
@@ -1820,8 +2095,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
trace_regmap_async_io_complete(map->dev);
spin_lock(&map->async_lock);
-
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
wake = list_empty(&map->async_list);
if (ret != 0)
@@ -1829,8 +2103,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
spin_unlock(&map->async_lock);
- schedule_work(&async->cleanup);
-
if (wake)
wake_up(&map->async_waitq);
}
@@ -1906,6 +2178,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
bypass = map->cache_bypass;
map->cache_bypass = true;
+ map->async = true;
/* Write out first; it's useful to apply even if we fail later. */
for (i = 0; i < num_regs; i++) {
@@ -1929,10 +2202,13 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
}
out:
+ map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_register_patch);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a355e63a383..6fb98b53533 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -188,8 +188,11 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
- if (!pci_is_pcie(dev))
- bcma_err(bus, "PCI card detected, report problems.\n");
+ if (!pci_is_pcie(dev)) {
+ bcma_err(bus, "PCI card detected, they are not supported.\n");
+ err = -ENXIO;
+ goto err_pci_release_regions;
+ }
/* Map MMIO */
err = -ENOMEM;
@@ -269,6 +272,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 90ee350442a..e15430a82e9 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -30,28 +30,37 @@ static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, cha
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.manuf);
}
+static DEVICE_ATTR_RO(manuf);
+
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.id);
}
+static DEVICE_ATTR_RO(id);
+
static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%02X\n", core->id.rev);
}
+static DEVICE_ATTR_RO(rev);
+
static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%X\n", core->id.class);
}
-static struct device_attribute bcma_device_attrs[] = {
- __ATTR_RO(manuf),
- __ATTR_RO(id),
- __ATTR_RO(rev),
- __ATTR_RO(class),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(class);
+
+static struct attribute *bcma_device_attrs[] = {
+ &dev_attr_manuf.attr,
+ &dev_attr_id.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_class.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(bcma_device);
static struct bus_type bcma_bus_type = {
.name = "bcma",
@@ -59,7 +68,7 @@ static struct bus_type bcma_bus_type = {
.probe = bcma_device_probe,
.remove = bcma_device_remove,
.uevent = bcma_device_uevent,
- .dev_attrs = bcma_device_attrs,
+ .dev_groups = bcma_device_groups,
};
static u16 bcma_cc_core_id(struct bcma_bus *bus)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e07a5fd58ad..86b9f37d102 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -15,6 +15,9 @@ menuconfig BLK_DEV
if BLK_DEV
+config BLK_DEV_NULL_BLK
+ tristate "Null test block driver"
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
@@ -107,7 +110,7 @@ source "drivers/block/mtip32xx/Kconfig"
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
- depends on PCI && VIRT_TO_BUS
+ depends on PCI && VIRT_TO_BUS && 0
help
This is the driver for Compaq Smart Array controllers. Everyone
using these boards should say Y here. See the file
@@ -316,6 +319,16 @@ config BLK_DEV_NVME
To compile this driver as a module, choose M here: the
module will be called nvme.
+config BLK_DEV_SKD
+ tristate "STEC S1120 Block Driver"
+ depends on PCI
+ depends on 64BIT
+ ---help---
+ Saying Y or M here will enable support for the
+ STEC, Inc. S1120 PCIe SSD.
+
+ Use device /dev/skd$N amd /dev/skd$Np$M.
+
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
@@ -505,7 +518,7 @@ config VIRTIO_BLK
config BLK_DEV_HD
bool "Very old hard disk (MFM/RLL/IDE) driver"
depends on HAVE_IDE
- depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN
+ depends on !ARM || ARCH_RPC || BROKEN
help
This is a very old hard disk driver that lacks the enhanced
functionality of the newer ones.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ca07399a8d9..8cc98cd0d4a 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+obj-$(CONFIG_BLK_DEV_SKD) += skd.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
@@ -41,6 +42,8 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
nvme-y := nvme-core.o nvme-scsi.o
+skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4ff85b8785e..748dea4f34d 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -343,7 +343,7 @@ static int fd_motor_on(int nr)
unit[nr].motor = 1;
fd_select(nr);
- INIT_COMPLETION(motor_on_completion);
+ reinit_completion(&motor_on_completion);
motor_on_timer.data = nr;
mod_timer(&motor_on_timer, jiffies + HZ/2);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 9bf4371755f..d91f1a56e86 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -545,7 +545,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
mutex_lock(&brd_devices_mutex);
brd = brd_init_one(MINOR(dev) >> part_shift);
- kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
+ kobj = brd ? get_disk(brd->brd_disk) : NULL;
mutex_unlock(&brd_devices_mutex);
*part = 0;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index edfa2515bc8..b35fc4f5237 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2808,7 +2808,7 @@ resend_cmd2:
/* erase the old error information */
memset(c->err_info, 0, sizeof(ErrorInfo_struct));
return_status = IO_OK;
- INIT_COMPLETION(wait);
+ reinit_completion(&wait);
goto resend_cmd2;
}
@@ -3669,7 +3669,7 @@ static int add_to_scan_list(struct ctlr_info *h)
}
}
if (!found && !h->busy_scanning) {
- INIT_COMPLETION(h->scan_wait);
+ reinit_completion(&h->scan_wait);
list_add_tail(&h->scan_list, &scan_q);
ret = 1;
}
@@ -5183,7 +5183,7 @@ reinit_after_soft_reset:
rebuild_lun_table(h, 1, 0);
cciss_engage_scsi(h);
h->busy_initializing = 0;
- return 1;
+ return 0;
clean4:
cciss_free_cmd_pool(h);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2d7f608d181..0e06f0c5dd1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1474,7 +1474,8 @@ enum determine_dev_size {
DS_ERROR = -1,
DS_UNCHANGED = 0,
DS_SHRUNK = 1,
- DS_GREW = 2
+ DS_GREW = 2,
+ DS_GREW_FROM_ZERO = 3,
};
extern enum determine_dev_size
drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55635edf563..9e3818b1bc8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2750,13 +2750,6 @@ int __init drbd_init(void)
return err;
}
- err = drbd_genl_register();
- if (err) {
- printk(KERN_ERR "drbd: unable to register generic netlink family\n");
- goto fail;
- }
-
-
register_reboot_notifier(&drbd_notifier);
/*
@@ -2767,6 +2760,15 @@ int __init drbd_init(void)
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&minors);
+ rwlock_init(&global_state_lock);
+ INIT_LIST_HEAD(&drbd_tconns);
+
+ err = drbd_genl_register();
+ if (err) {
+ printk(KERN_ERR "drbd: unable to register generic netlink family\n");
+ goto fail;
+ }
+
err = drbd_create_mempools();
if (err)
goto fail;
@@ -2778,9 +2780,6 @@ int __init drbd_init(void)
goto fail;
}
- rwlock_init(&global_state_lock);
- INIT_LIST_HEAD(&drbd_tconns);
-
retry.wq = create_singlethread_workqueue("drbd-reissue");
if (!retry.wq) {
printk(KERN_ERR "drbd: unable to create retry workqueue\n");
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 8cc1e640f48..c706d50a8b0 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -955,7 +955,7 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
}
if (size > la_size_sect)
- rv = DS_GREW;
+ rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
if (size < la_size_sect)
rv = DS_SHRUNK;
@@ -1132,9 +1132,9 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
- if (mdev->state.conn >= C_CONNECTED) {
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
if (mdev->tconn->agreed_pro_version < 94)
- peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
else if (mdev->tconn->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cc29cd3bf78..6fa6673b36b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1890,29 +1890,11 @@ static u32 seq_max(u32 a, u32 b)
return seq_greater(a, b) ? a : b;
}
-static bool need_peer_seq(struct drbd_conf *mdev)
-{
- struct drbd_tconn *tconn = mdev->tconn;
- int tp;
-
- /*
- * We only need to keep track of the last packet_seq number of our peer
- * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
- * handle_write_conflicts().
- */
-
- rcu_read_lock();
- tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
- rcu_read_unlock();
-
- return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
-}
-
static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
{
unsigned int newest_peer_seq;
- if (need_peer_seq(mdev)) {
+ if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
spin_lock(&mdev->peer_seq_lock);
newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
mdev->peer_seq = newest_peer_seq;
@@ -1972,22 +1954,31 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s
{
DEFINE_WAIT(wait);
long timeout;
- int ret;
+ int ret = 0, tp;
- if (!need_peer_seq(mdev))
+ if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
return 0;
spin_lock(&mdev->peer_seq_lock);
for (;;) {
if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
- ret = 0;
break;
}
+
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+
+ if (!tp)
+ break;
+
+ /* Only need to wait if two_primaries is enabled */
prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&mdev->peer_seq_lock);
rcu_read_lock();
@@ -2228,8 +2219,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
}
goto out_interrupted;
}
- } else
+ } else {
+ update_peer_seq(mdev, peer_seq);
spin_lock_irq(&mdev->tconn->req_lock);
+ }
list_add(&peer_req->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->tconn->req_lock);
@@ -4132,7 +4125,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
(unsigned int)bs.buf_len);
return -EIO;
}
- look_ahead >>= bits;
+ /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
+ if (likely(bits < 64))
+ look_ahead >>= bits;
+ else
+ look_ahead = 0;
have -= bits;
bits = bitstream_get_bits(&bs, &tmp, 64 - have);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c24379ffd4e..fec7bef4499 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1306,6 +1306,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
int backing_limit;
if (bio_size && get_ldev(mdev)) {
+ unsigned int max_hw_sectors = queue_max_hw_sectors(q);
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
@@ -1313,6 +1314,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
limit = min(limit, backing_limit);
}
put_ldev(mdev);
+ if ((limit >> 9) > max_hw_sectors)
+ limit = max_hw_sectors << 9;
}
return limit;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 04ceb7e2fad..000abe2f105 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2886,9 +2886,9 @@ static void do_fd_request(struct request_queue *q)
return;
if (WARN(atomic_read(&usage_count) == 0,
- "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n",
+ "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
- current_req->cmd_flags))
+ (unsigned long long) current_req->cmd_flags))
return;
if (test_and_set_bit(0, &fdc_busy)) {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 40e715531aa..c8dac730524 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -894,13 +894,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
bio_list_init(&lo->lo_bio_list);
- /*
- * set queue make_request_fn, and add limits based on lower level
- * device
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
- lo->lo_queue->queuedata = lo;
-
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1618,6 +1611,8 @@ static int loop_add(struct loop_device **l, int i)
if (!lo)
goto out;
+ lo->lo_state = Lo_unbound;
+
/* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
@@ -1633,7 +1628,13 @@ static int loop_add(struct loop_device **l, int i)
err = -ENOMEM;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
- goto out_free_dev;
+ goto out_free_idr;
+
+ /*
+ * set queue make_request_fn
+ */
+ blk_queue_make_request(lo->lo_queue, loop_make_request);
+ lo->lo_queue->queuedata = lo;
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
@@ -1678,6 +1679,8 @@ static int loop_add(struct loop_device **l, int i)
out_free_queue:
blk_cleanup_queue(lo->lo_queue);
+out_free_idr:
+ idr_remove(&loop_index_idr, i);
out_free_dev:
kfree(lo);
out:
@@ -1741,7 +1744,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift);
if (err < 0)
- kobj = ERR_PTR(err);
+ kobj = NULL;
else
kobj = get_disk(lo->lo_disk);
mutex_unlock(&loop_index_mutex);
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 77a60bedd7a..7bc363f1ee8 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -936,7 +936,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_3b;
}
err = request_irq(host->irq, mg_irq,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING,
MG_DEV_NAME, host);
if (err) {
printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 952dbfe2212..050c71267f1 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -126,64 +126,30 @@ struct mtip_compat_ide_task_request_s {
static bool mtip_check_surprise_removal(struct pci_dev *pdev)
{
u16 vendor_id = 0;
+ struct driver_data *dd = pci_get_drvdata(pdev);
+
+ if (dd->sr)
+ return true;
/* Read the vendorID from the configuration space */
pci_read_config_word(pdev, 0x00, &vendor_id);
- if (vendor_id == 0xFFFF)
+ if (vendor_id == 0xFFFF) {
+ dd->sr = true;
+ if (dd->queue)
+ set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
+ else
+ dev_warn(&dd->pdev->dev,
+ "%s: dd->queue is NULL\n", __func__);
+ if (dd->port) {
+ set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ } else
+ dev_warn(&dd->pdev->dev,
+ "%s: dd->port is NULL\n", __func__);
return true; /* device removed */
-
- return false; /* device present */
-}
-
-/*
- * This function is called for clean the pending command in the
- * command slot during the surprise removal of device and return
- * error to the upper layer.
- *
- * @dd Pointer to the DRIVER_DATA structure.
- *
- * return value
- * None
- */
-static void mtip_command_cleanup(struct driver_data *dd)
-{
- int group = 0, commandslot = 0, commandindex = 0;
- struct mtip_cmd *command;
- struct mtip_port *port = dd->port;
- static int in_progress;
-
- if (in_progress)
- return;
-
- in_progress = 1;
-
- for (group = 0; group < 4; group++) {
- for (commandslot = 0; commandslot < 32; commandslot++) {
- if (!(port->allocated[group] & (1 << commandslot)))
- continue;
-
- commandindex = group << 5 | commandslot;
- command = &port->commands[commandindex];
-
- if (atomic_read(&command->active)
- && (command->async_callback)) {
- command->async_callback(command->async_data,
- -ENODEV);
- command->async_callback = NULL;
- command->async_data = NULL;
- }
-
- dma_unmap_sg(&port->dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
- }
}
- up(&port->cmd_slot);
-
- set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
- in_progress = 0;
+ return false; /* device present */
}
/*
@@ -222,10 +188,7 @@ static int get_slot(struct mtip_port *port)
}
dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
- if (mtip_check_surprise_removal(port->dd->pdev)) {
- /* Device not present, clean outstanding commands */
- mtip_command_cleanup(port->dd);
- }
+ mtip_check_surprise_removal(port->dd->pdev);
return -1;
}
@@ -246,6 +209,107 @@ static inline void release_slot(struct mtip_port *port, int tag)
}
/*
+ * IO completion function.
+ *
+ * This completion function is called by the driver ISR when a
+ * command that was issued by the kernel completes. It first calls the
+ * asynchronous completion function which normally calls back into the block
+ * layer passing the asynchronous callback data, then unmaps the
+ * scatter list associated with the completed command, and finally
+ * clears the allocated bit associated with the completed command.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command.
+ * @data Pointer to driver_data.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_async_complete(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command;
+ struct driver_data *dd = data;
+ int cb_status = status ? -EIO : 0;
+
+ if (unlikely(!dd) || unlikely(!port))
+ return;
+
+ command = &port->commands[tag];
+
+ if (unlikely(status == PORT_IRQ_TF_ERR)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Command tag %d failed due to TFE\n", tag);
+ }
+
+ /* Upper layer callback */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data, cb_status);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /* Clear the allocated and active bits for the command */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+}
+
+/*
+ * This function is called for clean the pending command in the
+ * command slot during the surprise removal of device and return
+ * error to the upper layer.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_command_cleanup(struct driver_data *dd)
+{
+ int tag = 0;
+ struct mtip_cmd *cmd;
+ struct mtip_port *port = dd->port;
+ unsigned int num_cmd_slots = dd->slot_groups * 32;
+
+ if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ return;
+
+ if (!port)
+ return;
+
+ cmd = &port->commands[MTIP_TAG_INTERNAL];
+ if (atomic_read(&cmd->active))
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) &
+ (1 << MTIP_TAG_INTERNAL))
+ if (cmd->comp_func)
+ cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ cmd->comp_data, -ENODEV);
+
+ while (1) {
+ tag = find_next_bit(port->allocated, num_cmd_slots, tag);
+ if (tag >= num_cmd_slots)
+ break;
+
+ cmd = &port->commands[tag];
+ if (atomic_read(&cmd->active))
+ mtip_async_complete(port, tag, dd, -ENODEV);
+ }
+
+ set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+}
+
+/*
* Reset the HBA (without sleeping)
*
* @dd Pointer to the driver data structure.
@@ -584,6 +648,9 @@ static void mtip_timeout_function(unsigned long int data)
if (unlikely(!port))
return;
+ if (unlikely(port->dd->sr))
+ return;
+
if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
mod_timer(&port->cmd_timer,
jiffies + msecs_to_jiffies(30000));
@@ -675,66 +742,6 @@ static void mtip_timeout_function(unsigned long int data)
}
/*
- * IO completion function.
- *
- * This completion function is called by the driver ISR when a
- * command that was issued by the kernel completes. It first calls the
- * asynchronous completion function which normally calls back into the block
- * layer passing the asynchronous callback data, then unmaps the
- * scatter list associated with the completed command, and finally
- * clears the allocated bit associated with the completed command.
- *
- * @port Pointer to the port data structure.
- * @tag Tag of the command.
- * @data Pointer to driver_data.
- * @status Completion status.
- *
- * return value
- * None
- */
-static void mtip_async_complete(struct mtip_port *port,
- int tag,
- void *data,
- int status)
-{
- struct mtip_cmd *command;
- struct driver_data *dd = data;
- int cb_status = status ? -EIO : 0;
-
- if (unlikely(!dd) || unlikely(!port))
- return;
-
- command = &port->commands[tag];
-
- if (unlikely(status == PORT_IRQ_TF_ERR)) {
- dev_warn(&port->dd->pdev->dev,
- "Command tag %d failed due to TFE\n", tag);
- }
-
- /* Upper layer callback */
- if (likely(command->async_callback))
- command->async_callback(command->async_data, cb_status);
-
- command->async_callback = NULL;
- command->comp_func = NULL;
-
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
-
- /* Clear the allocated and active bits for the command */
- atomic_set(&port->commands[tag].active, 0);
- release_slot(port, tag);
-
- if (unlikely(command->unaligned))
- up(&port->cmd_slot_unal);
- else
- up(&port->cmd_slot);
-}
-
-/*
* Internal command completion callback function.
*
* This function is normally called by the driver ISR when an internal
@@ -854,7 +861,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
"Missing completion func for tag %d",
tag);
if (mtip_check_surprise_removal(dd->pdev)) {
- mtip_command_cleanup(dd);
/* don't proceed further */
return;
}
@@ -1018,14 +1024,12 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
command->comp_data,
0);
} else {
- dev_warn(&dd->pdev->dev,
- "Null completion "
- "for tag %d",
+ dev_dbg(&dd->pdev->dev,
+ "Null completion for tag %d",
tag);
if (mtip_check_surprise_removal(
dd->pdev)) {
- mtip_command_cleanup(dd);
return;
}
}
@@ -1145,7 +1149,6 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
if (unlikely(port_stat & PORT_IRQ_ERR)) {
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
- mtip_command_cleanup(dd);
/* don't proceed further */
return IRQ_HANDLED;
}
@@ -2806,34 +2809,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf)
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
+ struct driver_data *dd = (struct driver_data *)f->private_data;
int size = *offset;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
+ char *buf;
+ int rv = 0;
if (!len || *offset)
return 0;
+ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: status buffer\n");
+ return -ENOMEM;
+ }
+
size += show_device_status(NULL, buf);
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
- return -EFAULT;
+ rv = -EFAULT;
- return *offset;
+ kfree(buf);
+ return rv ? rv : *offset;
}
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
struct driver_data *dd = (struct driver_data *)f->private_data;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
+ char *buf;
u32 group_allocated;
int size = *offset;
- int n;
+ int n, rv = 0;
if (!len || size)
return 0;
+ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: register buffer\n");
+ return -ENOMEM;
+ }
+
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
@@ -2888,21 +2908,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
- return -EFAULT;
+ rv = -EFAULT;
- return *offset;
+ kfree(buf);
+ return rv ? rv : *offset;
}
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
struct driver_data *dd = (struct driver_data *)f->private_data;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
+ char *buf;
int size = *offset;
+ int rv = 0;
if (!len || size)
return 0;
+ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: flag buffer\n");
+ return -ENOMEM;
+ }
+
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
@@ -2911,9 +2940,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
- return -EFAULT;
+ rv = -EFAULT;
- return *offset;
+ kfree(buf);
+ return rv ? rv : *offset;
}
static const struct file_operations mtip_device_status_fops = {
@@ -3006,6 +3036,46 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd)
debugfs_remove_recursive(dd->dfs_node);
}
+static int mtip_free_orphan(struct driver_data *dd)
+{
+ struct kobject *kobj;
+
+ if (dd->bdev) {
+ if (dd->bdev->bd_holders >= 1)
+ return -2;
+
+ bdput(dd->bdev);
+ dd->bdev = NULL;
+ }
+
+ mtip_hw_debugfs_exit(dd);
+
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+
+ if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) &&
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+ put_disk(dd->disk);
+ } else {
+ if (dd->disk) {
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+ del_gendisk(dd->disk);
+ dd->disk = NULL;
+ }
+ if (dd->queue) {
+ dd->queue->queuedata = NULL;
+ blk_cleanup_queue(dd->queue);
+ dd->queue = NULL;
+ }
+ }
+ kfree(dd);
+ return 0;
+}
/*
* Perform any init/resume time hardware setup
@@ -3154,6 +3224,7 @@ static int mtip_service_thread(void *data)
unsigned long slot, slot_start, slot_wrap;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
+ int ret;
while (1) {
/*
@@ -3164,13 +3235,18 @@ static int mtip_service_thread(void *data)
!(port->flags & MTIP_PF_PAUSE_IO));
if (kthread_should_stop())
+ goto st_out;
+
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
+ /* If I am an orphan, start self cleanup */
+ if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags))
break;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)))
- break;
+ goto st_out;
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -3201,7 +3277,7 @@ static int mtip_service_thread(void *data)
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
} else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (!mtip_ftl_rebuild_poll(dd))
+ if (mtip_ftl_rebuild_poll(dd) < 0)
set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
&dd->dd_flag);
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
@@ -3209,8 +3285,30 @@ static int mtip_service_thread(void *data)
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+ goto st_out;
+ }
+
+ /* wait for pci remove to exit */
+ while (1) {
+ if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag))
break;
+ msleep_interruptible(1000);
+ if (kthread_should_stop())
+ goto st_out;
+ }
+
+ while (1) {
+ ret = mtip_free_orphan(dd);
+ if (!ret) {
+ /* NOTE: All data structures are invalid, do not
+ * access any here */
+ return 0;
+ }
+ msleep_interruptible(1000);
+ if (kthread_should_stop())
+ goto st_out;
}
+st_out:
return 0;
}
@@ -3437,13 +3535,13 @@ static int mtip_hw_init(struct driver_data *dd)
rv = -EFAULT;
goto out3;
}
+ mtip_dump_identify(dd->port);
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
return MTIP_FTL_REBUILD_MAGIC;
}
- mtip_dump_identify(dd->port);
/* check write protect, over temp and rebuild statuses */
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
@@ -3467,8 +3565,8 @@ static int mtip_hw_init(struct driver_data *dd)
}
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
- "Drive indicates rebuild has failed.\n");
- /* TODO */
+ "Drive is in security locked state.\n");
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
}
}
@@ -3523,9 +3621,8 @@ static int mtip_hw_exit(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
-
- if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
+ if (!dd->sr) {
+ if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
if (mtip_standby_immediate(dd->port))
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
@@ -3551,6 +3648,7 @@ static int mtip_hw_exit(struct driver_data *dd)
dd->port->command_list_dma);
/* Free the memory allocated for the for structure. */
kfree(dd->port);
+ dd->port = NULL;
return 0;
}
@@ -3572,7 +3670,8 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- mtip_standby_immediate(dd->port);
+ if (!dd->sr && dd->port)
+ mtip_standby_immediate(dd->port);
return 0;
}
@@ -3887,6 +3986,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
bio_endio(bio, -ENODATA);
return;
}
+ if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+ bio_endio(bio, -ENXIO);
+ return;
+ }
}
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
@@ -4010,6 +4113,8 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->disk->private_data = dd;
dd->index = index;
+ mtip_hw_debugfs_init(dd);
+
/*
* if rebuild pending, start the service thread, and delay the block
* queue creation and add_disk()
@@ -4068,6 +4173,7 @@ skip_create_disk:
/* Enable the block device and add it to /dev */
add_disk(dd->disk);
+ dd->bdev = bdget_disk(dd->disk, 0);
/*
* Now that the disk is active, initialize any sysfs attributes
* managed by the protocol layer.
@@ -4077,7 +4183,6 @@ skip_create_disk:
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
- mtip_hw_debugfs_init(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -4103,7 +4208,8 @@ start_service_thread:
return rv;
kthread_run_error:
- mtip_hw_debugfs_exit(dd);
+ bdput(dd->bdev);
+ dd->bdev = NULL;
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -4112,6 +4218,7 @@ read_capacity_error:
blk_cleanup_queue(dd->queue);
block_queue_alloc_init_error:
+ mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, index);
@@ -4141,40 +4248,48 @@ static int mtip_block_remove(struct driver_data *dd)
{
struct kobject *kobj;
- if (dd->mtip_svc_handler) {
- set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
- wake_up_interruptible(&dd->port->svc_wait);
- kthread_stop(dd->mtip_svc_handler);
- }
+ if (!dd->sr) {
+ mtip_hw_debugfs_exit(dd);
- /* Clean up the sysfs attributes, if created */
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
- kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
- if (kobj) {
- mtip_hw_sysfs_exit(dd, kobj);
- kobject_put(kobj);
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ kthread_stop(dd->mtip_svc_handler);
}
- }
- mtip_hw_debugfs_exit(dd);
- /*
- * Delete our gendisk structure. This also removes the device
- * from /dev
- */
- if (dd->disk) {
- if (dd->disk->queue)
- del_gendisk(dd->disk);
- else
- put_disk(dd->disk);
- }
-
- spin_lock(&rssd_index_lock);
- ida_remove(&rssd_index_ida, dd->index);
- spin_unlock(&rssd_index_lock);
+ /* Clean up the sysfs attributes, if created */
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+ }
+ /*
+ * Delete our gendisk structure. This also removes the device
+ * from /dev
+ */
+ if (dd->bdev) {
+ bdput(dd->bdev);
+ dd->bdev = NULL;
+ }
+ if (dd->disk) {
+ if (dd->disk->queue) {
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->queue = NULL;
+ } else
+ put_disk(dd->disk);
+ }
+ dd->disk = NULL;
- blk_cleanup_queue(dd->queue);
- dd->disk = NULL;
- dd->queue = NULL;
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+ } else {
+ dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ dd->disk->disk_name);
+ }
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
@@ -4490,8 +4605,7 @@ done:
static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
- int counter = 0;
- unsigned long flags;
+ unsigned long flags, to;
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
@@ -4500,17 +4614,22 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_add(&dd->remove_list, &removing_list);
spin_unlock_irqrestore(&dev_lock, flags);
- if (mtip_check_surprise_removal(pdev)) {
- while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
- counter++;
- msleep(20);
- if (counter == 10) {
- /* Cleanup the outstanding commands */
- mtip_command_cleanup(dd);
- break;
- }
- }
+ mtip_check_surprise_removal(pdev);
+ synchronize_irq(dd->pdev->irq);
+
+ /* Spin until workers are done */
+ to = jiffies + msecs_to_jiffies(4000);
+ do {
+ msleep(20);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0) {
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!\n");
}
+ /* Cleanup the outstanding commands */
+ mtip_command_cleanup(dd);
/* Clean up the block layer. */
mtip_block_remove(dd);
@@ -4529,8 +4648,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_del_init(&dd->remove_list);
spin_unlock_irqrestore(&dev_lock, flags);
- kfree(dd);
+ if (!dd->sr)
+ kfree(dd);
+ else
+ set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
+
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+ pci_set_drvdata(pdev, NULL);
+ pci_dev_put(pdev);
+
}
/*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3bb8a295fbe..9be7a1582ad 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -140,6 +140,7 @@ enum {
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
+ MTIP_PF_SR_CLEANUP_BIT = 7,
MTIP_PF_SVC_THD_STOP_BIT = 8,
/* below are bit numbers in 'dd_flag' defined in driver_data */
@@ -147,15 +148,18 @@ enum {
MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3,
- MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
- (1 << MTIP_DDF_SEC_LOCK_BIT) |
- (1 << MTIP_DDF_OVER_TEMP_BIT) |
- (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
-
+ MTIP_DDF_REMOVE_DONE_BIT = 4,
MTIP_DDF_CLEANUP_BIT = 5,
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+ (1 << MTIP_DDF_SEC_LOCK_BIT) |
+ (1 << MTIP_DDF_OVER_TEMP_BIT) |
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT) |
+ (1 << MTIP_DDF_REBUILD_FAILED_BIT)),
+
};
struct smart_attr {
@@ -499,6 +503,8 @@ struct driver_data {
bool trim_supp; /* flag indicating trim support */
+ bool sr;
+
int numa_node; /* NUMA support */
char workq_name[32];
@@ -511,6 +517,8 @@ struct driver_data {
int isr_binding;
+ struct block_device *bdev;
+
int unal_qdepth; /* qdepth of unaligned IO queue */
struct list_head online_list; /* linkage for online list */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
new file mode 100644
index 00000000000..ea192ec029c
--- /dev/null
+++ b/drivers/block/null_blk.c
@@ -0,0 +1,635 @@
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/hrtimer.h>
+
+struct nullb_cmd {
+ struct list_head list;
+ struct llist_node ll_list;
+ struct call_single_data csd;
+ struct request *rq;
+ struct bio *bio;
+ unsigned int tag;
+ struct nullb_queue *nq;
+};
+
+struct nullb_queue {
+ unsigned long *tag_map;
+ wait_queue_head_t wait;
+ unsigned int queue_depth;
+
+ struct nullb_cmd *cmds;
+};
+
+struct nullb {
+ struct list_head list;
+ unsigned int index;
+ struct request_queue *q;
+ struct gendisk *disk;
+ struct hrtimer timer;
+ unsigned int queue_depth;
+ spinlock_t lock;
+
+ struct nullb_queue *queues;
+ unsigned int nr_queues;
+};
+
+static LIST_HEAD(nullb_list);
+static struct mutex lock;
+static int null_major;
+static int nullb_indexes;
+
+struct completion_queue {
+ struct llist_head list;
+ struct hrtimer timer;
+};
+
+/*
+ * These are per-cpu for now, they will need to be configured by the
+ * complete_queues parameter and appropriately mapped.
+ */
+static DEFINE_PER_CPU(struct completion_queue, completion_queues);
+
+enum {
+ NULL_IRQ_NONE = 0,
+ NULL_IRQ_SOFTIRQ = 1,
+ NULL_IRQ_TIMER = 2,
+
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
+static int submit_queues = 1;
+module_param(submit_queues, int, S_IRUGO);
+MODULE_PARM_DESC(submit_queues, "Number of submission queues");
+
+static int home_node = NUMA_NO_NODE;
+module_param(home_node, int, S_IRUGO);
+MODULE_PARM_DESC(home_node, "Home node for the device");
+
+static int queue_mode = NULL_Q_MQ;
+module_param(queue_mode, int, S_IRUGO);
+MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
+
+static int gb = 250;
+module_param(gb, int, S_IRUGO);
+MODULE_PARM_DESC(gb, "Size in GB");
+
+static int bs = 512;
+module_param(bs, int, S_IRUGO);
+MODULE_PARM_DESC(bs, "Block size (in bytes)");
+
+static int nr_devices = 2;
+module_param(nr_devices, int, S_IRUGO);
+MODULE_PARM_DESC(nr_devices, "Number of devices to register");
+
+static int irqmode = NULL_IRQ_SOFTIRQ;
+module_param(irqmode, int, S_IRUGO);
+MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
+
+static int completion_nsec = 10000;
+module_param(completion_nsec, int, S_IRUGO);
+MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
+
+static int hw_queue_depth = 64;
+module_param(hw_queue_depth, int, S_IRUGO);
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
+
+static bool use_per_node_hctx = true;
+module_param(use_per_node_hctx, bool, S_IRUGO);
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+
+static void put_tag(struct nullb_queue *nq, unsigned int tag)
+{
+ clear_bit_unlock(tag, nq->tag_map);
+
+ if (waitqueue_active(&nq->wait))
+ wake_up(&nq->wait);
+}
+
+static unsigned int get_tag(struct nullb_queue *nq)
+{
+ unsigned int tag;
+
+ do {
+ tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
+ if (tag >= nq->queue_depth)
+ return -1U;
+ } while (test_and_set_bit_lock(tag, nq->tag_map));
+
+ return tag;
+}
+
+static void free_cmd(struct nullb_cmd *cmd)
+{
+ put_tag(cmd->nq, cmd->tag);
+}
+
+static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
+{
+ struct nullb_cmd *cmd;
+ unsigned int tag;
+
+ tag = get_tag(nq);
+ if (tag != -1U) {
+ cmd = &nq->cmds[tag];
+ cmd->tag = tag;
+ cmd->nq = nq;
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
+{
+ struct nullb_cmd *cmd;
+ DEFINE_WAIT(wait);
+
+ cmd = __alloc_cmd(nq);
+ if (cmd || !can_wait)
+ return cmd;
+
+ do {
+ prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
+ cmd = __alloc_cmd(nq);
+ if (cmd)
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&nq->wait, &wait);
+ return cmd;
+}
+
+static void end_cmd(struct nullb_cmd *cmd)
+{
+ if (cmd->rq) {
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_end_io(cmd->rq, 0);
+ else {
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ }
+ } else if (cmd->bio)
+ bio_endio(cmd->bio, 0);
+
+ if (queue_mode != NULL_Q_MQ)
+ free_cmd(cmd);
+}
+
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
+{
+ struct completion_queue *cq;
+ struct llist_node *entry;
+ struct nullb_cmd *cmd;
+
+ cq = &per_cpu(completion_queues, smp_processor_id());
+
+ while ((entry = llist_del_all(&cq->list)) != NULL) {
+ do {
+ cmd = container_of(entry, struct nullb_cmd, ll_list);
+ end_cmd(cmd);
+ entry = entry->next;
+ } while (entry);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void null_cmd_end_timer(struct nullb_cmd *cmd)
+{
+ struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
+
+ cmd->ll_list.next = NULL;
+ if (llist_add(&cmd->ll_list, &cq->list)) {
+ ktime_t kt = ktime_set(0, completion_nsec);
+
+ hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
+ }
+
+ put_cpu();
+}
+
+static void null_softirq_done_fn(struct request *rq)
+{
+ blk_end_request_all(rq, 0);
+}
+
+#ifdef CONFIG_SMP
+
+static void null_ipi_cmd_end_io(void *data)
+{
+ struct completion_queue *cq;
+ struct llist_node *entry, *next;
+ struct nullb_cmd *cmd;
+
+ cq = &per_cpu(completion_queues, smp_processor_id());
+
+ entry = llist_del_all(&cq->list);
+
+ while (entry) {
+ next = entry->next;
+ cmd = llist_entry(entry, struct nullb_cmd, ll_list);
+ end_cmd(cmd);
+ entry = next;
+ }
+}
+
+static void null_cmd_end_ipi(struct nullb_cmd *cmd)
+{
+ struct call_single_data *data = &cmd->csd;
+ int cpu = get_cpu();
+ struct completion_queue *cq = &per_cpu(completion_queues, cpu);
+
+ cmd->ll_list.next = NULL;
+
+ if (llist_add(&cmd->ll_list, &cq->list)) {
+ data->func = null_ipi_cmd_end_io;
+ data->flags = 0;
+ __smp_call_function_single(cpu, data, 0);
+ }
+
+ put_cpu();
+}
+
+#endif /* CONFIG_SMP */
+
+static inline void null_handle_cmd(struct nullb_cmd *cmd)
+{
+ /* Complete IO by inline, softirq or timer */
+ switch (irqmode) {
+ case NULL_IRQ_NONE:
+ end_cmd(cmd);
+ break;
+ case NULL_IRQ_SOFTIRQ:
+#ifdef CONFIG_SMP
+ null_cmd_end_ipi(cmd);
+#else
+ end_cmd(cmd);
+#endif
+ break;
+ case NULL_IRQ_TIMER:
+ null_cmd_end_timer(cmd);
+ break;
+ }
+}
+
+static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
+{
+ int index = 0;
+
+ if (nullb->nr_queues != 1)
+ index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
+
+ return &nullb->queues[index];
+}
+
+static void null_queue_bio(struct request_queue *q, struct bio *bio)
+{
+ struct nullb *nullb = q->queuedata;
+ struct nullb_queue *nq = nullb_to_queue(nullb);
+ struct nullb_cmd *cmd;
+
+ cmd = alloc_cmd(nq, 1);
+ cmd->bio = bio;
+
+ null_handle_cmd(cmd);
+}
+
+static int null_rq_prep_fn(struct request_queue *q, struct request *req)
+{
+ struct nullb *nullb = q->queuedata;
+ struct nullb_queue *nq = nullb_to_queue(nullb);
+ struct nullb_cmd *cmd;
+
+ cmd = alloc_cmd(nq, 0);
+ if (cmd) {
+ cmd->rq = req;
+ req->special = cmd;
+ return BLKPREP_OK;
+ }
+
+ return BLKPREP_DEFER;
+}
+
+static void null_request_fn(struct request_queue *q)
+{
+ struct request *rq;
+
+ while ((rq = blk_fetch_request(q)) != NULL) {
+ struct nullb_cmd *cmd = rq->special;
+
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
+}
+
+static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+ struct nullb_cmd *cmd = rq->special;
+
+ cmd->rq = rq;
+ cmd->nq = hctx->driver_data;
+
+ null_handle_cmd(cmd);
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
+{
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
+ hctx_index);
+}
+
+static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
+{
+ kfree(hctx);
+}
+
+static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int index)
+{
+ struct nullb *nullb = data;
+ struct nullb_queue *nq = &nullb->queues[index];
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+ nullb->nr_queues++;
+ hctx->driver_data = nq;
+
+ return 0;
+}
+
+static struct blk_mq_ops null_mq_ops = {
+ .queue_rq = null_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_hctx = null_init_hctx,
+};
+
+static struct blk_mq_reg null_mq_reg = {
+ .ops = &null_mq_ops,
+ .queue_depth = 64,
+ .cmd_size = sizeof(struct nullb_cmd),
+ .flags = BLK_MQ_F_SHOULD_MERGE,
+};
+
+static void null_del_dev(struct nullb *nullb)
+{
+ list_del_init(&nullb->list);
+
+ del_gendisk(nullb->disk);
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_queue(nullb->q);
+ else
+ blk_cleanup_queue(nullb->q);
+ put_disk(nullb->disk);
+ kfree(nullb);
+}
+
+static int null_open(struct block_device *bdev, fmode_t mode)
+{
+ return 0;
+}
+
+static void null_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
+static const struct block_device_operations null_fops = {
+ .owner = THIS_MODULE,
+ .open = null_open,
+ .release = null_release,
+};
+
+static int setup_commands(struct nullb_queue *nq)
+{
+ struct nullb_cmd *cmd;
+ int i, tag_size;
+
+ nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
+ if (!nq->cmds)
+ return 1;
+
+ tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
+ nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
+ if (!nq->tag_map) {
+ kfree(nq->cmds);
+ return 1;
+ }
+
+ for (i = 0; i < nq->queue_depth; i++) {
+ cmd = &nq->cmds[i];
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->ll_list.next = NULL;
+ cmd->tag = -1U;
+ }
+
+ return 0;
+}
+
+static void cleanup_queue(struct nullb_queue *nq)
+{
+ kfree(nq->tag_map);
+ kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+ int i;
+
+ for (i = 0; i < nullb->nr_queues; i++)
+ cleanup_queue(&nullb->queues[i]);
+
+ kfree(nullb->queues);
+}
+
+static int setup_queues(struct nullb *nullb)
+{
+ struct nullb_queue *nq;
+ int i;
+
+ nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+ if (!nullb->queues)
+ return 1;
+
+ nullb->nr_queues = 0;
+ nullb->queue_depth = hw_queue_depth;
+
+ if (queue_mode == NULL_Q_MQ)
+ return 0;
+
+ for (i = 0; i < submit_queues; i++) {
+ nq = &nullb->queues[i];
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = hw_queue_depth;
+ if (setup_commands(nq))
+ break;
+ nullb->nr_queues++;
+ }
+
+ if (i == submit_queues)
+ return 0;
+
+ cleanup_queues(nullb);
+ return 1;
+}
+
+static int null_add_dev(void)
+{
+ struct gendisk *disk;
+ struct nullb *nullb;
+ sector_t size;
+
+ nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
+ if (!nullb)
+ return -ENOMEM;
+
+ spin_lock_init(&nullb->lock);
+
+ if (setup_queues(nullb))
+ goto err;
+
+ if (queue_mode == NULL_Q_MQ) {
+ null_mq_reg.numa_node = home_node;
+ null_mq_reg.queue_depth = hw_queue_depth;
+
+ if (use_per_node_hctx) {
+ null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
+ null_mq_reg.ops->free_hctx = null_free_hctx;
+
+ null_mq_reg.nr_hw_queues = nr_online_nodes;
+ } else {
+ null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
+ null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
+
+ null_mq_reg.nr_hw_queues = submit_queues;
+ }
+
+ nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
+ } else if (queue_mode == NULL_Q_BIO) {
+ nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+ blk_queue_make_request(nullb->q, null_queue_bio);
+ } else {
+ nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+ blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
+ if (nullb->q)
+ blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ }
+
+ if (!nullb->q)
+ goto queue_fail;
+
+ nullb->q->queuedata = nullb;
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
+
+ disk = nullb->disk = alloc_disk_node(1, home_node);
+ if (!disk) {
+queue_fail:
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_queue(nullb->q);
+ else
+ blk_cleanup_queue(nullb->q);
+ cleanup_queues(nullb);
+err:
+ kfree(nullb);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&lock);
+ list_add_tail(&nullb->list, &nullb_list);
+ nullb->index = nullb_indexes++;
+ mutex_unlock(&lock);
+
+ blk_queue_logical_block_size(nullb->q, bs);
+ blk_queue_physical_block_size(nullb->q, bs);
+
+ size = gb * 1024 * 1024 * 1024ULL;
+ sector_div(size, bs);
+ set_capacity(disk, size);
+
+ disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->major = null_major;
+ disk->first_minor = nullb->index;
+ disk->fops = &null_fops;
+ disk->private_data = nullb;
+ disk->queue = nullb->q;
+ sprintf(disk->disk_name, "nullb%d", nullb->index);
+ add_disk(disk);
+ return 0;
+}
+
+static int __init null_init(void)
+{
+ unsigned int i;
+
+#if !defined(CONFIG_SMP)
+ if (irqmode == NULL_IRQ_SOFTIRQ) {
+ pr_warn("null_blk: softirq completions not available.\n");
+ pr_warn("null_blk: using direct completions.\n");
+ irqmode = NULL_IRQ_NONE;
+ }
+#endif
+
+ if (submit_queues > nr_cpu_ids)
+ submit_queues = nr_cpu_ids;
+ else if (!submit_queues)
+ submit_queues = 1;
+
+ mutex_init(&lock);
+
+ /* Initialize a separate list for each CPU for issuing softirqs */
+ for_each_possible_cpu(i) {
+ struct completion_queue *cq = &per_cpu(completion_queues, i);
+
+ init_llist_head(&cq->list);
+
+ if (irqmode != NULL_IRQ_TIMER)
+ continue;
+
+ hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cq->timer.function = null_cmd_timer_expired;
+ }
+
+ null_major = register_blkdev(0, "nullb");
+ if (null_major < 0)
+ return null_major;
+
+ for (i = 0; i < nr_devices; i++) {
+ if (null_add_dev()) {
+ unregister_blkdev(null_major, "nullb");
+ return -EINVAL;
+ }
+ }
+
+ pr_info("null: module loaded\n");
+ return 0;
+}
+
+static void __exit null_exit(void)
+{
+ struct nullb *nullb;
+
+ unregister_blkdev(null_major, "nullb");
+
+ mutex_lock(&lock);
+ while (!list_empty(&nullb_list)) {
+ nullb = list_entry(nullb_list.next, struct nullb, list);
+ null_del_dev(nullb);
+ }
+ mutex_unlock(&lock);
+}
+
+module_init(null_init);
+module_exit(null_exit);
+
+MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da52092980e..26d03fa0bf2 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1949,12 +1949,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable_pci;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- else
- goto disable_pci;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto disable;
pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2165,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
+
result = nvme_set_instance(dev);
if (result)
goto free;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 56188475cfd..ff8668c5efb 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -473,45 +473,31 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
- pd->dfs_f_info = NULL;
pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
- if (IS_ERR(pd->dfs_d_root)) {
- pd->dfs_d_root = NULL;
+ if (!pd->dfs_d_root)
return;
- }
+
pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
pd->dfs_d_root, pd, &debug_fops);
- if (IS_ERR(pd->dfs_f_info)) {
- pd->dfs_f_info = NULL;
- return;
- }
}
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
- if (pd->dfs_f_info)
- debugfs_remove(pd->dfs_f_info);
+ debugfs_remove(pd->dfs_f_info);
+ debugfs_remove(pd->dfs_d_root);
pd->dfs_f_info = NULL;
- if (pd->dfs_d_root)
- debugfs_remove(pd->dfs_d_root);
pd->dfs_d_root = NULL;
}
static void pkt_debugfs_init(void)
{
pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
- if (IS_ERR(pkt_debugfs_root)) {
- pkt_debugfs_root = NULL;
- return;
- }
}
static void pkt_debugfs_cleanup(void)
{
- if (!pkt_debugfs_root)
- return;
debugfs_remove(pkt_debugfs_root);
pkt_debugfs_root = NULL;
}
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 6e85e21445e..a8de2eec6ff 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
for (i = 0; i < card->n_targets; i++) {
spin_lock_bh(&card->ctrl[i].queue_lock);
cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
- &card->ctrl[i].queue);
+ &card->ctrl[i].queue,
+ COMPLETE_DMA);
spin_unlock_bh(&card->ctrl[i].queue_lock);
cnt += rsxx_dma_cancel(&card->ctrl[i]);
@@ -748,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
card->eeh_state = 0;
- st = rsxx_eeh_remap_dmas(card);
- if (st)
- goto failed_remap_dmas;
-
spin_lock_irqsave(&card->irq_lock, flags);
if (card->n_targets & RSXX_MAX_TARGETS)
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -778,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
failed_hw_buffers_init:
-failed_remap_dmas:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev,
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index d7af441880b..2284f5d3a54 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -295,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
- blk_size = card->config.data.block_size;
+ if (card->config_valid) {
+ blk_size = card->config.data.block_size;
+ blk_queue_dma_alignment(card->queue, blk_size - 1);
+ blk_queue_logical_block_size(card->queue, blk_size);
+ }
blk_queue_make_request(card->queue, rsxx_make_request);
blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
- blk_queue_dma_alignment(card->queue, blk_size - 1);
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
- blk_queue_logical_block_size(card->queue, blk_size);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index bed32f16b08..fc88ba3e1bd 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
}
/*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
+{
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ }
+ }
+
+ kmem_cache_free(rsxx_dma_pool, dma);
+}
+
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
struct rsxx_dma *dma,
unsigned int status)
@@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
if (status & DMA_CANCELLED)
ctrl->stats.dma_cancelled++;
- if (dma->dma_addr)
- pci_unmap_page(ctrl->card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
-
if (dma->cb)
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
- kmem_cache_free(rsxx_dma_pool, dma);
+ rsxx_free_dma(ctrl, dma);
}
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q)
+ struct list_head *q, unsigned int done)
{
struct rsxx_dma *dma;
struct rsxx_dma *tmp;
@@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
list_for_each_entry_safe(dma, tmp, q, list) {
list_del(&dma->list);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ if (done & COMPLETE_DMA)
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ else
+ rsxx_free_dma(ctrl, dma);
cnt++;
}
@@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data)
/* Clean up the DMA queue */
spin_lock(&ctrl->queue_lock);
- cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
spin_unlock(&ctrl->queue_lock);
cnt += rsxx_dma_cancel(ctrl);
@@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
int tag;
int cmds_pending = 0;
struct hw_cmd *hw_cmd_buf;
+ int dir;
hw_cmd_buf = ctrl->cmd.buf;
@@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
continue;
}
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ if (dma->cmd == HW_CMD_BLK_WRITE)
+ dir = PCI_DMA_TODEVICE;
+ else
+ dir = PCI_DMA_FROMDEVICE;
+
+ /*
+ * The function pci_map_page is placed here because we
+ * can only, by design, issue up to 255 commands to the
+ * hardware at one time per DMA channel. So the maximum
+ * amount of mapped memory would be 255 * 4 channels *
+ * 4096 Bytes which is less than 2GB, the limit of a x8
+ * Non-HWWD PCIe slot. This way the pci_map_page
+ * function should never fail because of a lack of
+ * mappable memory.
+ */
+ dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+ dma->pg_off, dma->sub_page.cnt << 9, dir);
+ if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ push_tracker(ctrl->trackers, tag);
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ continue;
+ }
+ }
+
set_tracker_dma(ctrl->trackers, tag, dma);
hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
hw_cmd_buf[ctrl->cmd.idx].tag = tag;
@@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
if (!dma)
return -ENOMEM;
- dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
- dir ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (!dma->dma_addr) {
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
-
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
dma->laddr = laddr;
dma->sub_page.off = (dma_off >> 9);
@@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
return 0;
bvec_err:
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
+ for (i = 0; i < card->n_targets; i++)
+ rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
+ FREE_DMA);
return st;
}
@@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
/* Clean up the DMA queue */
spin_lock_bh(&ctrl->queue_lock);
- rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
spin_unlock_bh(&ctrl->queue_lock);
rsxx_dma_cancel(ctrl);
@@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
else
card->ctrl[i].stats.reads_issued--;
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ }
+
list_add_tail(&dma->list, &issued_dmas[i]);
push_tracker(card->ctrl[i].trackers, j);
cnt++;
@@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
card->ctrl[i].stats.sw_q_depth += cnt;
card->ctrl[i].e_cnt = 0;
-
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- if (dma->dma_addr)
- pci_unmap_page(card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- }
spin_unlock_bh(&card->ctrl[i].queue_lock);
}
@@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
return 0;
}
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma *dma;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- dma->dma_addr = pci_map_page(card->dev, dma->page,
- dma->pg_off, get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (!dma->dma_addr) {
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
- }
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
-
- return 0;
-}
-
int rsxx_dma_init(void)
{
rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 5ad5055a410..6bbc64d0f69 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -52,7 +52,7 @@ struct proc_cmd;
#define RS70_PCI_REV_SUPPORTED 4
#define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "4.0"
+#define DRIVER_VERSION "4.0.3.2516"
/* Block size is 4096 */
#define RSXX_HW_BLK_SHIFT 12
@@ -345,6 +345,11 @@ enum rsxx_creg_stat {
CREG_STAT_TAG_MASK = 0x0000ff00,
};
+enum rsxx_dma_finish {
+ FREE_DMA = 0x0,
+ COMPLETE_DMA = 0x1,
+};
+
static inline unsigned int CREG_DATA(int N)
{
return CREG_DATA0 + (N << 2);
@@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
int rsxx_dma_setup(struct rsxx_cardinfo *card);
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
int rsxx_dma_init(void);
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
+int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
+ struct list_head *q,
+ unsigned int done);
int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
new file mode 100644
index 00000000000..9199c93be92
--- /dev/null
+++ b/drivers/block/skd_main.c
@@ -0,0 +1,5432 @@
+/* Copyright 2012 STEC, Inc.
+ *
+ * This file is licensed under the terms of the 3-clause
+ * BSD License (http://opensource.org/licenses/BSD-3-Clause)
+ * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
+ * at your option. Both licenses are also available in the LICENSE file
+ * distributed with this project. This file may not be copied, modified,
+ * or distributed except in accordance with those terms.
+ * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
+ * Initial Driver Design!
+ * Thomas Swann <tswann@stec-inc.com>
+ * Interrupt handling.
+ * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
+ * biomode implementation.
+ * Akhil Bhansali <abhansali@stec-inc.com>
+ * Added support for DISCARD / FLUSH and FUA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/hdreg.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/scatterlist.h>
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/aer.h>
+#include <linux/ctype.h>
+#include <linux/wait.h>
+#include <linux/uio.h>
+#include <scsi/scsi.h>
+#include <scsi/sg.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
+#include "skd_s1120.h"
+
+static int skd_dbg_level;
+static int skd_isr_comp_limit = 4;
+
+enum {
+ STEC_LINK_2_5GTS = 0,
+ STEC_LINK_5GTS = 1,
+ STEC_LINK_8GTS = 2,
+ STEC_LINK_UNKNOWN = 0xFF
+};
+
+enum {
+ SKD_FLUSH_INITIALIZER,
+ SKD_FLUSH_ZERO_SIZE_FIRST,
+ SKD_FLUSH_DATA_SECOND,
+};
+
+#define SKD_ASSERT(expr) \
+ do { \
+ if (unlikely(!(expr))) { \
+ pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
+ # expr, __FILE__, __func__, __LINE__); \
+ } \
+ } while (0)
+
+#define DRV_NAME "skd"
+#define DRV_VERSION "2.2.1"
+#define DRV_BUILD_ID "0260"
+#define PFX DRV_NAME ": "
+#define DRV_BIN_VERSION 0x100
+#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
+
+MODULE_AUTHOR("bug-reports: support@stec-inc.com");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
+MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
+
+#define PCI_VENDOR_ID_STEC 0x1B39
+#define PCI_DEVICE_ID_S1120 0x0001
+
+#define SKD_FUA_NV (1 << 1)
+#define SKD_MINORS_PER_DEVICE 16
+
+#define SKD_MAX_QUEUE_DEPTH 200u
+
+#define SKD_PAUSE_TIMEOUT (5 * 1000)
+
+#define SKD_N_FITMSG_BYTES (512u)
+
+#define SKD_N_SPECIAL_CONTEXT 32u
+#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
+
+/* SG elements are 32 bytes, so we can make this 4096 and still be under the
+ * 128KB limit. That allows 4096*4K = 16M xfer size
+ */
+#define SKD_N_SG_PER_REQ_DEFAULT 256u
+#define SKD_N_SG_PER_SPECIAL 256u
+
+#define SKD_N_COMPLETION_ENTRY 256u
+#define SKD_N_READ_CAP_BYTES (8u)
+
+#define SKD_N_INTERNAL_BYTES (512u)
+
+/* 5 bits of uniqifier, 0xF800 */
+#define SKD_ID_INCR (0x400)
+#define SKD_ID_TABLE_MASK (3u << 8u)
+#define SKD_ID_RW_REQUEST (0u << 8u)
+#define SKD_ID_INTERNAL (1u << 8u)
+#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
+#define SKD_ID_FIT_MSG (3u << 8u)
+#define SKD_ID_SLOT_MASK 0x00FFu
+#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
+
+#define SKD_N_TIMEOUT_SLOT 4u
+#define SKD_TIMEOUT_SLOT_MASK 3u
+
+#define SKD_N_MAX_SECTORS 2048u
+
+#define SKD_MAX_RETRIES 2u
+
+#define SKD_TIMER_SECONDS(seconds) (seconds)
+#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
+
+#define INQ_STD_NBYTES 36
+#define SKD_DISCARD_CDB_LENGTH 24
+
+enum skd_drvr_state {
+ SKD_DRVR_STATE_LOAD,
+ SKD_DRVR_STATE_IDLE,
+ SKD_DRVR_STATE_BUSY,
+ SKD_DRVR_STATE_STARTING,
+ SKD_DRVR_STATE_ONLINE,
+ SKD_DRVR_STATE_PAUSING,
+ SKD_DRVR_STATE_PAUSED,
+ SKD_DRVR_STATE_DRAINING_TIMEOUT,
+ SKD_DRVR_STATE_RESTARTING,
+ SKD_DRVR_STATE_RESUMING,
+ SKD_DRVR_STATE_STOPPING,
+ SKD_DRVR_STATE_FAULT,
+ SKD_DRVR_STATE_DISAPPEARED,
+ SKD_DRVR_STATE_PROTOCOL_MISMATCH,
+ SKD_DRVR_STATE_BUSY_ERASE,
+ SKD_DRVR_STATE_BUSY_SANITIZE,
+ SKD_DRVR_STATE_BUSY_IMMINENT,
+ SKD_DRVR_STATE_WAIT_BOOT,
+ SKD_DRVR_STATE_SYNCING,
+};
+
+#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
+#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
+#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
+#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
+#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
+#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
+#define SKD_START_WAIT_SECONDS 90u
+
+enum skd_req_state {
+ SKD_REQ_STATE_IDLE,
+ SKD_REQ_STATE_SETUP,
+ SKD_REQ_STATE_BUSY,
+ SKD_REQ_STATE_COMPLETED,
+ SKD_REQ_STATE_TIMEOUT,
+ SKD_REQ_STATE_ABORTED,
+};
+
+enum skd_fit_msg_state {
+ SKD_MSG_STATE_IDLE,
+ SKD_MSG_STATE_BUSY,
+};
+
+enum skd_check_status_action {
+ SKD_CHECK_STATUS_REPORT_GOOD,
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT,
+ SKD_CHECK_STATUS_REQUEUE_REQUEST,
+ SKD_CHECK_STATUS_REPORT_ERROR,
+ SKD_CHECK_STATUS_BUSY_IMMINENT,
+};
+
+struct skd_fitmsg_context {
+ enum skd_fit_msg_state state;
+
+ struct skd_fitmsg_context *next;
+
+ u32 id;
+ u16 outstanding;
+
+ u32 length;
+ u32 offset;
+
+ u8 *msg_buf;
+ dma_addr_t mb_dma_address;
+};
+
+struct skd_request_context {
+ enum skd_req_state state;
+
+ struct skd_request_context *next;
+
+ u16 id;
+ u32 fitmsg_id;
+
+ struct request *req;
+ u8 flush_cmd;
+ u8 discard_page;
+
+ u32 timeout_stamp;
+ u8 sg_data_dir;
+ struct scatterlist *sg;
+ u32 n_sg;
+ u32 sg_byte_count;
+
+ struct fit_sg_descriptor *sksg_list;
+ dma_addr_t sksg_dma_address;
+
+ struct fit_completion_entry_v1 completion;
+
+ struct fit_comp_error_info err_info;
+
+};
+#define SKD_DATA_DIR_HOST_TO_CARD 1
+#define SKD_DATA_DIR_CARD_TO_HOST 2
+#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
+
+struct skd_special_context {
+ struct skd_request_context req;
+
+ u8 orphaned;
+
+ void *data_buf;
+ dma_addr_t db_dma_address;
+
+ u8 *msg_buf;
+ dma_addr_t mb_dma_address;
+};
+
+struct skd_sg_io {
+ fmode_t mode;
+ void __user *argp;
+
+ struct sg_io_hdr sg;
+
+ u8 cdb[16];
+
+ u32 dxfer_len;
+ u32 iovcnt;
+ struct sg_iovec *iov;
+ struct sg_iovec no_iov_iov;
+
+ struct skd_special_context *skspcl;
+};
+
+typedef enum skd_irq_type {
+ SKD_IRQ_LEGACY,
+ SKD_IRQ_MSI,
+ SKD_IRQ_MSIX
+} skd_irq_type_t;
+
+#define SKD_MAX_BARS 2
+
+struct skd_device {
+ volatile void __iomem *mem_map[SKD_MAX_BARS];
+ resource_size_t mem_phys[SKD_MAX_BARS];
+ u32 mem_size[SKD_MAX_BARS];
+
+ skd_irq_type_t irq_type;
+ u32 msix_count;
+ struct skd_msix_entry *msix_entries;
+
+ struct pci_dev *pdev;
+ int pcie_error_reporting_is_enabled;
+
+ spinlock_t lock;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ struct device *class_dev;
+ int gendisk_on;
+ int sync_done;
+
+ atomic_t device_count;
+ u32 devno;
+ u32 major;
+ char name[32];
+ char isr_name[30];
+
+ enum skd_drvr_state state;
+ u32 drive_state;
+
+ u32 in_flight;
+ u32 cur_max_queue_depth;
+ u32 queue_low_water_mark;
+ u32 dev_max_queue_depth;
+
+ u32 num_fitmsg_context;
+ u32 num_req_context;
+
+ u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
+ u32 timeout_stamp;
+ struct skd_fitmsg_context *skmsg_free_list;
+ struct skd_fitmsg_context *skmsg_table;
+
+ struct skd_request_context *skreq_free_list;
+ struct skd_request_context *skreq_table;
+
+ struct skd_special_context *skspcl_free_list;
+ struct skd_special_context *skspcl_table;
+
+ struct skd_special_context internal_skspcl;
+ u32 read_cap_blocksize;
+ u32 read_cap_last_lba;
+ int read_cap_is_valid;
+ int inquiry_is_valid;
+ u8 inq_serial_num[13]; /*12 chars plus null term */
+ u8 id_str[80]; /* holds a composite name (pci + sernum) */
+
+ u8 skcomp_cycle;
+ u32 skcomp_ix;
+ struct fit_completion_entry_v1 *skcomp_table;
+ struct fit_comp_error_info *skerr_table;
+ dma_addr_t cq_dma_address;
+
+ wait_queue_head_t waitq;
+
+ struct timer_list timer;
+ u32 timer_countdown;
+ u32 timer_substate;
+
+ int n_special;
+ int sgs_per_request;
+ u32 last_mtd;
+
+ u32 proto_ver;
+
+ int dbg_level;
+ u32 connect_time_stamp;
+ int connect_retries;
+#define SKD_MAX_CONNECT_RETRIES 16
+ u32 drive_jiffies;
+
+ u32 timo_slot;
+
+
+ struct work_struct completion_worker;
+};
+
+#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
+#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
+#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
+
+static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
+{
+ u32 val;
+
+ if (likely(skdev->dbg_level < 2))
+ return readl(skdev->mem_map[1] + offset);
+ else {
+ barrier();
+ val = readl(skdev->mem_map[1] + offset);
+ barrier();
+ pr_debug("%s:%s:%d offset %x = %x\n",
+ skdev->name, __func__, __LINE__, offset, val);
+ return val;
+ }
+
+}
+
+static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
+ u32 offset)
+{
+ if (likely(skdev->dbg_level < 2)) {
+ writel(val, skdev->mem_map[1] + offset);
+ barrier();
+ } else {
+ barrier();
+ writel(val, skdev->mem_map[1] + offset);
+ barrier();
+ pr_debug("%s:%s:%d offset %x = %x\n",
+ skdev->name, __func__, __LINE__, offset, val);
+ }
+}
+
+static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
+ u32 offset)
+{
+ if (likely(skdev->dbg_level < 2)) {
+ writeq(val, skdev->mem_map[1] + offset);
+ barrier();
+ } else {
+ barrier();
+ writeq(val, skdev->mem_map[1] + offset);
+ barrier();
+ pr_debug("%s:%s:%d offset %x = %016llx\n",
+ skdev->name, __func__, __LINE__, offset, val);
+ }
+}
+
+
+#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
+static int skd_isr_type = SKD_IRQ_DEFAULT;
+
+module_param(skd_isr_type, int, 0444);
+MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
+ " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
+
+#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
+static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
+
+module_param(skd_max_req_per_msg, int, 0444);
+MODULE_PARM_DESC(skd_max_req_per_msg,
+ "Maximum SCSI requests packed in a single message."
+ " (1-14, default==1)");
+
+#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
+#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
+static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
+
+module_param(skd_max_queue_depth, int, 0444);
+MODULE_PARM_DESC(skd_max_queue_depth,
+ "Maximum SCSI requests issued to s1120."
+ " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
+
+static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
+module_param(skd_sgs_per_request, int, 0444);
+MODULE_PARM_DESC(skd_sgs_per_request,
+ "Maximum SG elements per block request."
+ " (1-4096, default==256)");
+
+static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+module_param(skd_max_pass_thru, int, 0444);
+MODULE_PARM_DESC(skd_max_pass_thru,
+ "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
+
+module_param(skd_dbg_level, int, 0444);
+MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
+
+module_param(skd_isr_comp_limit, int, 0444);
+MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
+
+/* Major device number dynamically assigned. */
+static u32 skd_major;
+
+static void skd_destruct(struct skd_device *skdev);
+static const struct block_device_operations skd_blockdev_ops;
+static void skd_send_fitmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg);
+static void skd_send_special_fitmsg(struct skd_device *skdev,
+ struct skd_special_context *skspcl);
+static void skd_request_fn(struct request_queue *rq);
+static void skd_end_request(struct skd_device *skdev,
+ struct skd_request_context *skreq, int error);
+static int skd_preop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq);
+static void skd_postop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq);
+
+static void skd_restart_device(struct skd_device *skdev);
+static int skd_quiesce_dev(struct skd_device *skdev);
+static int skd_unquiesce_dev(struct skd_device *skdev);
+static void skd_release_special(struct skd_device *skdev,
+ struct skd_special_context *skspcl);
+static void skd_disable_interrupts(struct skd_device *skdev);
+static void skd_isr_fwstate(struct skd_device *skdev);
+static void skd_recover_requests(struct skd_device *skdev, int requeue);
+static void skd_soft_reset(struct skd_device *skdev);
+
+static const char *skd_name(struct skd_device *skdev);
+const char *skd_drive_state_to_str(int state);
+const char *skd_skdev_state_to_str(enum skd_drvr_state state);
+static void skd_log_skdev(struct skd_device *skdev, const char *event);
+static void skd_log_skmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg, const char *event);
+static void skd_log_skreq(struct skd_device *skdev,
+ struct skd_request_context *skreq, const char *event);
+
+/*
+ *****************************************************************************
+ * READ/WRITE REQUESTS
+ *****************************************************************************
+ */
+static void skd_fail_all_pending(struct skd_device *skdev)
+{
+ struct request_queue *q = skdev->queue;
+ struct request *req;
+
+ for (;; ) {
+ req = blk_peek_request(q);
+ if (req == NULL)
+ break;
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
+}
+
+static void
+skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
+ int data_dir, unsigned lba,
+ unsigned count)
+{
+ if (data_dir == READ)
+ scsi_req->cdb[0] = 0x28;
+ else
+ scsi_req->cdb[0] = 0x2a;
+
+ scsi_req->cdb[1] = 0;
+ scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
+ scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
+ scsi_req->cdb[4] = (lba & 0xff00) >> 8;
+ scsi_req->cdb[5] = (lba & 0xff);
+ scsi_req->cdb[6] = 0;
+ scsi_req->cdb[7] = (count & 0xff00) >> 8;
+ scsi_req->cdb[8] = count & 0xff;
+ scsi_req->cdb[9] = 0;
+}
+
+static void
+skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
+ struct skd_request_context *skreq)
+{
+ skreq->flush_cmd = 1;
+
+ scsi_req->cdb[0] = 0x35;
+ scsi_req->cdb[1] = 0;
+ scsi_req->cdb[2] = 0;
+ scsi_req->cdb[3] = 0;
+ scsi_req->cdb[4] = 0;
+ scsi_req->cdb[5] = 0;
+ scsi_req->cdb[6] = 0;
+ scsi_req->cdb[7] = 0;
+ scsi_req->cdb[8] = 0;
+ scsi_req->cdb[9] = 0;
+}
+
+static void
+skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
+ struct skd_request_context *skreq,
+ struct page *page,
+ u32 lba, u32 count)
+{
+ char *buf;
+ unsigned long len;
+ struct request *req;
+
+ buf = page_address(page);
+ len = SKD_DISCARD_CDB_LENGTH;
+
+ scsi_req->cdb[0] = UNMAP;
+ scsi_req->cdb[8] = len;
+
+ put_unaligned_be16(6 + 16, &buf[0]);
+ put_unaligned_be16(16, &buf[2]);
+ put_unaligned_be64(lba, &buf[8]);
+ put_unaligned_be32(count, &buf[16]);
+
+ req = skreq->req;
+ blk_add_request_payload(req, page, len);
+ req->buffer = buf;
+}
+
+static void skd_request_fn_not_online(struct request_queue *q);
+
+static void skd_request_fn(struct request_queue *q)
+{
+ struct skd_device *skdev = q->queuedata;
+ struct skd_fitmsg_context *skmsg = NULL;
+ struct fit_msg_hdr *fmh = NULL;
+ struct skd_request_context *skreq;
+ struct request *req = NULL;
+ struct skd_scsi_request *scsi_req;
+ struct page *page;
+ unsigned long io_flags;
+ int error;
+ u32 lba;
+ u32 count;
+ int data_dir;
+ u32 be_lba;
+ u32 be_count;
+ u64 be_dmaa;
+ u64 cmdctxt;
+ u32 timo_slot;
+ void *cmd_ptr;
+ int flush, fua;
+
+ if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ skd_request_fn_not_online(q);
+ return;
+ }
+
+ if (blk_queue_stopped(skdev->queue)) {
+ if (skdev->skmsg_free_list == NULL ||
+ skdev->skreq_free_list == NULL ||
+ skdev->in_flight >= skdev->queue_low_water_mark)
+ /* There is still some kind of shortage */
+ return;
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
+ }
+
+ /*
+ * Stop conditions:
+ * - There are no more native requests
+ * - There are already the maximum number of requests in progress
+ * - There are no more skd_request_context entries
+ * - There are no more FIT msg buffers
+ */
+ for (;; ) {
+
+ flush = fua = 0;
+
+ req = blk_peek_request(q);
+
+ /* Are there any native requests to start? */
+ if (req == NULL)
+ break;
+
+ lba = (u32)blk_rq_pos(req);
+ count = blk_rq_sectors(req);
+ data_dir = rq_data_dir(req);
+ io_flags = req->cmd_flags;
+
+ if (io_flags & REQ_FLUSH)
+ flush++;
+
+ if (io_flags & REQ_FUA)
+ fua++;
+
+ pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
+ "count=%u(0x%x) dir=%d\n",
+ skdev->name, __func__, __LINE__,
+ req, lba, lba, count, count, data_dir);
+
+ /* At this point we know there is a request */
+
+ /* Are too many requets already in progress? */
+ if (skdev->in_flight >= skdev->cur_max_queue_depth) {
+ pr_debug("%s:%s:%d qdepth %d, limit %d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->in_flight, skdev->cur_max_queue_depth);
+ break;
+ }
+
+ /* Is a skd_request_context available? */
+ skreq = skdev->skreq_free_list;
+ if (skreq == NULL) {
+ pr_debug("%s:%s:%d Out of req=%p\n",
+ skdev->name, __func__, __LINE__, q);
+ break;
+ }
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
+ SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
+
+ /* Now we check to see if we can get a fit msg */
+ if (skmsg == NULL) {
+ if (skdev->skmsg_free_list == NULL) {
+ pr_debug("%s:%s:%d Out of msg\n",
+ skdev->name, __func__, __LINE__);
+ break;
+ }
+ }
+
+ skreq->flush_cmd = 0;
+ skreq->n_sg = 0;
+ skreq->sg_byte_count = 0;
+ skreq->discard_page = 0;
+
+ /*
+ * OK to now dequeue request from q.
+ *
+ * At this point we are comitted to either start or reject
+ * the native request. Note that skd_request_context is
+ * available but is still at the head of the free list.
+ */
+ blk_start_request(req);
+ skreq->req = req;
+ skreq->fitmsg_id = 0;
+
+ /* Either a FIT msg is in progress or we have to start one. */
+ if (skmsg == NULL) {
+ /* Are there any FIT msg buffers available? */
+ skmsg = skdev->skmsg_free_list;
+ if (skmsg == NULL) {
+ pr_debug("%s:%s:%d Out of msg skdev=%p\n",
+ skdev->name, __func__, __LINE__,
+ skdev);
+ break;
+ }
+ SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
+ SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
+
+ skdev->skmsg_free_list = skmsg->next;
+
+ skmsg->state = SKD_MSG_STATE_BUSY;
+ skmsg->id += SKD_ID_INCR;
+
+ /* Initialize the FIT msg header */
+ fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
+ memset(fmh, 0, sizeof(*fmh));
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ skmsg->length = sizeof(*fmh);
+ }
+
+ skreq->fitmsg_id = skmsg->id;
+
+ /*
+ * Note that a FIT msg may have just been started
+ * but contains no SoFIT requests yet.
+ */
+
+ /*
+ * Transcode the request, checking as we go. The outcome of
+ * the transcoding is represented by the error variable.
+ */
+ cmd_ptr = &skmsg->msg_buf[skmsg->length];
+ memset(cmd_ptr, 0, 32);
+
+ be_lba = cpu_to_be32(lba);
+ be_count = cpu_to_be32(count);
+ be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
+ cmdctxt = skreq->id + SKD_ID_INCR;
+
+ scsi_req = cmd_ptr;
+ scsi_req->hdr.tag = cmdctxt;
+ scsi_req->hdr.sg_list_dma_address = be_dmaa;
+
+ if (data_dir == READ)
+ skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
+ else
+ skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
+
+ if (io_flags & REQ_DISCARD) {
+ page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!page) {
+ pr_err("request_fn:Page allocation failed.\n");
+ skd_end_request(skdev, skreq, -ENOMEM);
+ break;
+ }
+ skreq->discard_page = 1;
+ skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
+
+ } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+ skd_prep_zerosize_flush_cdb(scsi_req, skreq);
+ SKD_ASSERT(skreq->flush_cmd == 1);
+
+ } else {
+ skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
+ }
+
+ if (fua)
+ scsi_req->cdb[1] |= SKD_FUA_NV;
+
+ if (!req->bio)
+ goto skip_sg;
+
+ error = skd_preop_sg_list(skdev, skreq);
+
+ if (error != 0) {
+ /*
+ * Complete the native request with error.
+ * Note that the request context is still at the
+ * head of the free list, and that the SoFIT request
+ * was encoded into the FIT msg buffer but the FIT
+ * msg length has not been updated. In short, the
+ * only resource that has been allocated but might
+ * not be used is that the FIT msg could be empty.
+ */
+ pr_debug("%s:%s:%d error Out\n",
+ skdev->name, __func__, __LINE__);
+ skd_end_request(skdev, skreq, error);
+ continue;
+ }
+
+skip_sg:
+ scsi_req->hdr.sg_list_len_bytes =
+ cpu_to_be32(skreq->sg_byte_count);
+
+ /* Complete resource allocations. */
+ skdev->skreq_free_list = skreq->next;
+ skreq->state = SKD_REQ_STATE_BUSY;
+ skreq->id += SKD_ID_INCR;
+
+ skmsg->length += sizeof(struct skd_scsi_request);
+ fmh->num_protocol_cmds_coalesced++;
+
+ /*
+ * Update the active request counts.
+ * Capture the timeout timestamp.
+ */
+ skreq->timeout_stamp = skdev->timeout_stamp;
+ timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+ skdev->timeout_slot[timo_slot]++;
+ skdev->in_flight++;
+ pr_debug("%s:%s:%d req=0x%x busy=%d\n",
+ skdev->name, __func__, __LINE__,
+ skreq->id, skdev->in_flight);
+
+ /*
+ * If the FIT msg buffer is full send it.
+ */
+ if (skmsg->length >= SKD_N_FITMSG_BYTES ||
+ fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
+ skd_send_fitmsg(skdev, skmsg);
+ skmsg = NULL;
+ fmh = NULL;
+ }
+ }
+
+ /*
+ * Is a FIT msg in progress? If it is empty put the buffer back
+ * on the free list. If it is non-empty send what we got.
+ * This minimizes latency when there are fewer requests than
+ * what fits in a FIT msg.
+ */
+ if (skmsg != NULL) {
+ /* Bigger than just a FIT msg header? */
+ if (skmsg->length > sizeof(struct fit_msg_hdr)) {
+ pr_debug("%s:%s:%d sending msg=%p, len %d\n",
+ skdev->name, __func__, __LINE__,
+ skmsg, skmsg->length);
+ skd_send_fitmsg(skdev, skmsg);
+ } else {
+ /*
+ * The FIT msg is empty. It means we got started
+ * on the msg, but the requests were rejected.
+ */
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->id += SKD_ID_INCR;
+ skmsg->next = skdev->skmsg_free_list;
+ skdev->skmsg_free_list = skmsg;
+ }
+ skmsg = NULL;
+ fmh = NULL;
+ }
+
+ /*
+ * If req is non-NULL it means there is something to do but
+ * we are out of a resource.
+ */
+ if (req)
+ blk_stop_queue(skdev->queue);
+}
+
+static void skd_end_request(struct skd_device *skdev,
+ struct skd_request_context *skreq, int error)
+{
+ struct request *req = skreq->req;
+ unsigned int io_flags = req->cmd_flags;
+
+ if ((io_flags & REQ_DISCARD) &&
+ (skreq->discard_page == 1)) {
+ pr_debug("%s:%s:%d, free the page!",
+ skdev->name, __func__, __LINE__);
+ free_page((unsigned long)req->buffer);
+ req->buffer = NULL;
+ }
+
+ if (unlikely(error)) {
+ struct request *req = skreq->req;
+ char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
+ u32 lba = (u32)blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
+ skd_name(skdev), cmd, lba, count, skreq->id);
+ } else
+ pr_debug("%s:%s:%d id=0x%x error=%d\n",
+ skdev->name, __func__, __LINE__, skreq->id, error);
+
+ __blk_end_request_all(skreq->req, error);
+}
+
+static int skd_preop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ struct request *req = skreq->req;
+ int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
+ int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+ struct scatterlist *sg = &skreq->sg[0];
+ int n_sg;
+ int i;
+
+ skreq->sg_byte_count = 0;
+
+ /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
+ skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
+
+ n_sg = blk_rq_map_sg(skdev->queue, req, sg);
+ if (n_sg <= 0)
+ return -EINVAL;
+
+ /*
+ * Map scatterlist to PCI bus addresses.
+ * Note PCI might change the number of entries.
+ */
+ n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
+ if (n_sg <= 0)
+ return -EINVAL;
+
+ SKD_ASSERT(n_sg <= skdev->sgs_per_request);
+
+ skreq->n_sg = n_sg;
+
+ for (i = 0; i < n_sg; i++) {
+ struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+ u32 cnt = sg_dma_len(&sg[i]);
+ uint64_t dma_addr = sg_dma_address(&sg[i]);
+
+ sgd->control = FIT_SGD_CONTROL_NOT_LAST;
+ sgd->byte_count = cnt;
+ skreq->sg_byte_count += cnt;
+ sgd->host_side_addr = dma_addr;
+ sgd->dev_side_addr = 0;
+ }
+
+ skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
+ skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skdev->name, __func__, __LINE__,
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ for (i = 0; i < n_sg; i++) {
+ struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+ pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
+ "addr=0x%llx next=0x%llx\n",
+ skdev->name, __func__, __LINE__,
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
+ }
+ }
+
+ return 0;
+}
+
+static void skd_postop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
+ int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+
+ /*
+ * restore the next ptr for next IO request so we
+ * don't have to set it every time.
+ */
+ skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
+ skreq->sksg_dma_address +
+ ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
+ pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
+}
+
+static void skd_request_fn_not_online(struct request_queue *q)
+{
+ struct skd_device *skdev = q->queuedata;
+ int error;
+
+ SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
+
+ skd_log_skdev(skdev, "req_not_online");
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ /* In case of starting, we haven't started the queue,
+ * so we can't get here... but requests are
+ * possibly hanging out waiting for us because we
+ * reported the dev/skd0 already. They'll wait
+ * forever if connect doesn't complete.
+ * What to do??? delay dev/skd0 ??
+ */
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+ return;
+
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ error = -EIO;
+ break;
+ }
+
+ /* If we get here, terminate all pending block requeusts
+ * with EIO and any scsi pass thru with appropriate sense
+ */
+
+ skd_fail_all_pending(skdev);
+}
+
+/*
+ *****************************************************************************
+ * TIMER
+ *****************************************************************************
+ */
+
+static void skd_timer_tick_not_online(struct skd_device *skdev);
+
+static void skd_timer_tick(ulong arg)
+{
+ struct skd_device *skdev = (struct skd_device *)arg;
+
+ u32 timo_slot;
+ u32 overdue_timestamp;
+ unsigned long reqflags;
+ u32 state;
+
+ if (skdev->state == SKD_DRVR_STATE_FAULT)
+ /* The driver has declared fault, and we want it to
+ * stay that way until driver is reloaded.
+ */
+ return;
+
+ spin_lock_irqsave(&skdev->lock, reqflags);
+
+ state = SKD_READL(skdev, FIT_STATUS);
+ state &= FIT_SR_DRIVE_STATE_MASK;
+ if (state != skdev->drive_state)
+ skd_isr_fwstate(skdev);
+
+ if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ skd_timer_tick_not_online(skdev);
+ goto timer_func_out;
+ }
+ skdev->timeout_stamp++;
+ timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+
+ /*
+ * All requests that happened during the previous use of
+ * this slot should be done by now. The previous use was
+ * over 7 seconds ago.
+ */
+ if (skdev->timeout_slot[timo_slot] == 0)
+ goto timer_func_out;
+
+ /* Something is overdue */
+ overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
+
+ pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->timeout_slot[timo_slot], skdev->in_flight);
+ pr_err("(%s): Overdue IOs (%d), busy %d\n",
+ skd_name(skdev), skdev->timeout_slot[timo_slot],
+ skdev->in_flight);
+
+ skdev->timer_countdown = SKD_DRAINING_TIMO;
+ skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
+ skdev->timo_slot = timo_slot;
+ blk_stop_queue(skdev->queue);
+
+timer_func_out:
+ mod_timer(&skdev->timer, (jiffies + HZ));
+
+ spin_unlock_irqrestore(&skdev->lock, reqflags);
+}
+
+static void skd_timer_tick_not_online(struct skd_device *skdev)
+{
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_IDLE:
+ case SKD_DRVR_STATE_LOAD:
+ break;
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
+ skdev->name, __func__, __LINE__,
+ skdev->drive_state, skdev->state);
+ /* If we've been in sanitize for 3 seconds, we figure we're not
+ * going to get anymore completions, so recover requests now
+ */
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ skd_recover_requests(skdev, 0);
+ break;
+
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state, skdev->timer_countdown);
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
+ skdev->name, __func__, __LINE__,
+ skdev->state, skdev->timer_countdown);
+ skd_restart_device(skdev);
+ break;
+
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ case SKD_DRVR_STATE_STARTING:
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ /* For now, we fault the drive. Could attempt resets to
+ * revcover at some point. */
+ skdev->state = SKD_DRVR_STATE_FAULT;
+
+ pr_err("(%s): DriveFault Connect Timeout (%x)\n",
+ skd_name(skdev), skdev->drive_state);
+
+ /*start the queue so we can respond with error to requests */
+ /* wakeup anyone waiting for startup complete */
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case SKD_DRVR_STATE_ONLINE:
+ /* shouldn't get here. */
+ break;
+
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ break;
+
+ case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+ pr_debug("%s:%s:%d "
+ "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
+ skdev->name, __func__, __LINE__,
+ skdev->timo_slot,
+ skdev->timer_countdown,
+ skdev->in_flight,
+ skdev->timeout_slot[skdev->timo_slot]);
+ /* if the slot has cleared we can let the I/O continue */
+ if (skdev->timeout_slot[skdev->timo_slot] == 0) {
+ pr_debug("%s:%s:%d Slot drained, starting queue.\n",
+ skdev->name, __func__, __LINE__);
+ skdev->state = SKD_DRVR_STATE_ONLINE;
+ blk_start_queue(skdev->queue);
+ return;
+ }
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ skd_restart_device(skdev);
+ break;
+
+ case SKD_DRVR_STATE_RESTARTING:
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ /* For now, we fault the drive. Could attempt resets to
+ * revcover at some point. */
+ skdev->state = SKD_DRVR_STATE_FAULT;
+ pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
+ skd_name(skdev), skdev->drive_state);
+
+ /*
+ * Recovering does two things:
+ * 1. completes IO with error
+ * 2. reclaims dma resources
+ * When is it safe to recover requests?
+ * - if the drive state is faulted
+ * - if the state is still soft reset after out timeout
+ * - if the drive registers are dead (state = FF)
+ * If it is "unsafe", we still need to recover, so we will
+ * disable pci bus mastering and disable our interrupts.
+ */
+
+ if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
+ (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
+ (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
+ /* It never came out of soft reset. Try to
+ * recover the requests and then let them
+ * fail. This is to mitigate hung processes. */
+ skd_recover_requests(skdev, 0);
+ else {
+ pr_err("(%s): Disable BusMaster (%x)\n",
+ skd_name(skdev), skdev->drive_state);
+ pci_disable_device(skdev->pdev);
+ skd_disable_interrupts(skdev);
+ skd_recover_requests(skdev, 0);
+ }
+
+ /*start the queue so we can respond with error to requests */
+ /* wakeup anyone waiting for startup complete */
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case SKD_DRVR_STATE_RESUMING:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ break;
+ }
+}
+
+static int skd_start_timer(struct skd_device *skdev)
+{
+ int rc;
+
+ init_timer(&skdev->timer);
+ setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+
+ rc = mod_timer(&skdev->timer, (jiffies + HZ));
+ if (rc)
+ pr_err("%s: failed to start timer %d\n",
+ __func__, rc);
+ return rc;
+}
+
+static void skd_kill_timer(struct skd_device *skdev)
+{
+ del_timer_sync(&skdev->timer);
+}
+
+/*
+ *****************************************************************************
+ * IOCTL
+ *****************************************************************************
+ */
+static int skd_ioctl_sg_io(struct skd_device *skdev,
+ fmode_t mode, void __user *argp);
+static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_prep_buffering(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_copy_buffer(struct skd_device *skdev,
+ struct skd_sg_io *sksgio, int dxfer_dir);
+static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
+static int skd_sg_io_release_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_put_status(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+
+static void skd_complete_special(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl);
+
+static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
+ uint cmd_in, ulong arg)
+{
+ int rc = 0;
+ struct gendisk *disk = bdev->bd_disk;
+ struct skd_device *skdev = disk->private_data;
+ void __user *p = (void *)arg;
+
+ pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
+ skdev->name, __func__, __LINE__,
+ disk->disk_name, current->comm, mode, cmd_in, arg);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd_in) {
+ case SG_SET_TIMEOUT:
+ case SG_GET_TIMEOUT:
+ case SG_GET_VERSION_NUM:
+ rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
+ break;
+ case SG_IO:
+ rc = skd_ioctl_sg_io(skdev, mode, p);
+ break;
+
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ pr_debug("%s:%s:%d %s: completion rc %d\n",
+ skdev->name, __func__, __LINE__, disk->disk_name, rc);
+ return rc;
+}
+
+static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
+ void __user *argp)
+{
+ int rc;
+ struct skd_sg_io sksgio;
+
+ memset(&sksgio, 0, sizeof(sksgio));
+ sksgio.mode = mode;
+ sksgio.argp = argp;
+ sksgio.iov = &sksgio.no_iov_iov;
+
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_ONLINE:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ break;
+
+ default:
+ pr_debug("%s:%s:%d drive not online\n",
+ skdev->name, __func__, __LINE__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_prep_buffering(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_await(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_put_status(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = 0;
+
+out:
+ skd_sg_io_release_skspcl(skdev, &sksgio);
+
+ if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
+ kfree(sksgio.iov);
+ return rc;
+}
+
+static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct sg_io_hdr *sgp = &sksgio->sg;
+ int i, acc;
+
+ if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
+ pr_debug("%s:%s:%d access sg failed %p\n",
+ skdev->name, __func__, __LINE__, sksgio->argp);
+ return -EFAULT;
+ }
+
+ if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
+ pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
+ skdev->name, __func__, __LINE__, sksgio->argp);
+ return -EFAULT;
+ }
+
+ if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
+ pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
+ skdev->name, __func__, __LINE__, sgp->interface_id);
+ return -EINVAL;
+ }
+
+ if (sgp->cmd_len > sizeof(sksgio->cdb)) {
+ pr_debug("%s:%s:%d cmd_len invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->cmd_len);
+ return -EINVAL;
+ }
+
+ if (sgp->iovec_count > 256) {
+ pr_debug("%s:%s:%d iovec_count invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->iovec_count);
+ return -EINVAL;
+ }
+
+ if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
+ pr_debug("%s:%s:%d dxfer_len invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->dxfer_len);
+ return -EINVAL;
+ }
+
+ switch (sgp->dxfer_direction) {
+ case SG_DXFER_NONE:
+ acc = -1;
+ break;
+
+ case SG_DXFER_TO_DEV:
+ acc = VERIFY_READ;
+ break;
+
+ case SG_DXFER_FROM_DEV:
+ case SG_DXFER_TO_FROM_DEV:
+ acc = VERIFY_WRITE;
+ break;
+
+ default:
+ pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->dxfer_direction);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
+ pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
+ skdev->name, __func__, __LINE__, sgp->cmdp);
+ return -EFAULT;
+ }
+
+ if (sgp->mx_sb_len != 0) {
+ if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
+ pr_debug("%s:%s:%d access sbp failed %p\n",
+ skdev->name, __func__, __LINE__, sgp->sbp);
+ return -EFAULT;
+ }
+ }
+
+ if (sgp->iovec_count == 0) {
+ sksgio->iov[0].iov_base = sgp->dxferp;
+ sksgio->iov[0].iov_len = sgp->dxfer_len;
+ sksgio->iovcnt = 1;
+ sksgio->dxfer_len = sgp->dxfer_len;
+ } else {
+ struct sg_iovec *iov;
+ uint nbytes = sizeof(*iov) * sgp->iovec_count;
+ size_t iov_data_len;
+
+ iov = kmalloc(nbytes, GFP_KERNEL);
+ if (iov == NULL) {
+ pr_debug("%s:%s:%d alloc iovec failed %d\n",
+ skdev->name, __func__, __LINE__,
+ sgp->iovec_count);
+ return -ENOMEM;
+ }
+ sksgio->iov = iov;
+ sksgio->iovcnt = sgp->iovec_count;
+
+ if (copy_from_user(iov, sgp->dxferp, nbytes)) {
+ pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
+ skdev->name, __func__, __LINE__, sgp->dxferp);
+ return -EFAULT;
+ }
+
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov_data_len = 0;
+ for (i = 0; i < sgp->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len)
+ return -EINVAL;
+ iov_data_len += iov[i].iov_len;
+ }
+
+ /* SG_IO howto says that the shorter of the two wins */
+ if (sgp->dxfer_len < iov_data_len) {
+ sksgio->iovcnt = iov_shorten((struct iovec *)iov,
+ sgp->iovec_count,
+ sgp->dxfer_len);
+ sksgio->dxfer_len = sgp->dxfer_len;
+ } else
+ sksgio->dxfer_len = iov_data_len;
+ }
+
+ if (sgp->dxfer_direction != SG_DXFER_NONE) {
+ struct sg_iovec *iov = sksgio->iov;
+ for (i = 0; i < sksgio->iovcnt; i++, iov++) {
+ if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
+ pr_debug("%s:%s:%d access data failed %p/%d\n",
+ skdev->name, __func__, __LINE__,
+ iov->iov_base, (int)iov->iov_len);
+ return -EFAULT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = NULL;
+ int rc;
+
+ for (;;) {
+ ulong flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ skspcl = skdev->skspcl_free_list;
+ if (skspcl != NULL) {
+ skdev->skspcl_free_list =
+ (struct skd_special_context *)skspcl->req.next;
+ skspcl->req.id += SKD_ID_INCR;
+ skspcl->req.state = SKD_REQ_STATE_SETUP;
+ skspcl->orphaned = 0;
+ skspcl->req.n_sg = 0;
+ }
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ if (skspcl != NULL) {
+ rc = 0;
+ break;
+ }
+
+ pr_debug("%s:%s:%d blocking\n",
+ skdev->name, __func__, __LINE__);
+
+ rc = wait_event_interruptible_timeout(
+ skdev->waitq,
+ (skdev->skspcl_free_list != NULL),
+ msecs_to_jiffies(sksgio->sg.timeout));
+
+ pr_debug("%s:%s:%d unblocking, rc=%d\n",
+ skdev->name, __func__, __LINE__, rc);
+
+ if (rc <= 0) {
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = -EINTR;
+ break;
+ }
+ /*
+ * If we get here rc > 0 meaning the timeout to
+ * wait_event_interruptible_timeout() had time left, hence the
+ * sought event -- non-empty free list -- happened.
+ * Retry the allocation.
+ */
+ }
+ sksgio->skspcl = skspcl;
+
+ return rc;
+}
+
+static int skd_skreq_prep_buffering(struct skd_device *skdev,
+ struct skd_request_context *skreq,
+ u32 dxfer_len)
+{
+ u32 resid = dxfer_len;
+
+ /*
+ * The DMA engine must have aligned addresses and byte counts.
+ */
+ resid += (-resid) & 3;
+ skreq->sg_byte_count = resid;
+
+ skreq->n_sg = 0;
+
+ while (resid > 0) {
+ u32 nbytes = PAGE_SIZE;
+ u32 ix = skreq->n_sg;
+ struct scatterlist *sg = &skreq->sg[ix];
+ struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
+ struct page *page;
+
+ if (nbytes > resid)
+ nbytes = resid;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ return -ENOMEM;
+
+ sg_set_page(sg, page, nbytes, 0);
+
+ /* TODO: This should be going through a pci_???()
+ * routine to do proper mapping. */
+ sksg->control = FIT_SGD_CONTROL_NOT_LAST;
+ sksg->byte_count = nbytes;
+
+ sksg->host_side_addr = sg_phys(sg);
+
+ sksg->dev_side_addr = 0;
+ sksg->next_desc_ptr = skreq->sksg_dma_address +
+ (ix + 1) * sizeof(*sksg);
+
+ skreq->n_sg++;
+ resid -= nbytes;
+ }
+
+ if (skreq->n_sg > 0) {
+ u32 ix = skreq->n_sg - 1;
+ struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
+
+ sksg->control = FIT_SGD_CONTROL_LAST;
+ sksg->next_desc_ptr = 0;
+ }
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ u32 i;
+
+ pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skdev->name, __func__, __LINE__,
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ for (i = 0; i < skreq->n_sg; i++) {
+ struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+
+ pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
+ "addr=0x%llx next=0x%llx\n",
+ skdev->name, __func__, __LINE__,
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
+ }
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_prep_buffering(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ struct skd_request_context *skreq = &skspcl->req;
+ u32 dxfer_len = sksgio->dxfer_len;
+ int rc;
+
+ rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
+ /*
+ * Eventually, errors or not, skd_release_special() is called
+ * to recover allocations including partial allocations.
+ */
+ return rc;
+}
+
+static int skd_sg_io_copy_buffer(struct skd_device *skdev,
+ struct skd_sg_io *sksgio, int dxfer_dir)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ u32 iov_ix = 0;
+ struct sg_iovec curiov;
+ u32 sksg_ix = 0;
+ u8 *bufp = NULL;
+ u32 buf_len = 0;
+ u32 resid = sksgio->dxfer_len;
+ int rc;
+
+ curiov.iov_len = 0;
+ curiov.iov_base = NULL;
+
+ if (dxfer_dir != sksgio->sg.dxfer_direction) {
+ if (dxfer_dir != SG_DXFER_TO_DEV ||
+ sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
+ return 0;
+ }
+
+ while (resid > 0) {
+ u32 nbytes = PAGE_SIZE;
+
+ if (curiov.iov_len == 0) {
+ curiov = sksgio->iov[iov_ix++];
+ continue;
+ }
+
+ if (buf_len == 0) {
+ struct page *page;
+ page = sg_page(&skspcl->req.sg[sksg_ix++]);
+ bufp = page_address(page);
+ buf_len = PAGE_SIZE;
+ }
+
+ nbytes = min_t(u32, nbytes, resid);
+ nbytes = min_t(u32, nbytes, curiov.iov_len);
+ nbytes = min_t(u32, nbytes, buf_len);
+
+ if (dxfer_dir == SG_DXFER_TO_DEV)
+ rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
+ else
+ rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
+
+ if (rc)
+ return -EFAULT;
+
+ resid -= nbytes;
+ curiov.iov_len -= nbytes;
+ curiov.iov_base += nbytes;
+ buf_len -= nbytes;
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
+ struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
+
+ memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
+
+ /* Initialize the FIT msg header */
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ fmh->num_protocol_cmds_coalesced = 1;
+
+ /* Initialize the SCSI request */
+ if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
+ scsi_req->hdr.sg_list_dma_address =
+ cpu_to_be64(skspcl->req.sksg_dma_address);
+ scsi_req->hdr.tag = skspcl->req.id;
+ scsi_req->hdr.sg_list_len_bytes =
+ cpu_to_be32(skspcl->req.sg_byte_count);
+ memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
+
+ skspcl->req.state = SKD_REQ_STATE_BUSY;
+ skd_send_special_fitmsg(skdev, skspcl);
+
+ return 0;
+}
+
+static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
+{
+ unsigned long flags;
+ int rc;
+
+ rc = wait_event_interruptible_timeout(skdev->waitq,
+ (sksgio->skspcl->req.state !=
+ SKD_REQ_STATE_BUSY),
+ msecs_to_jiffies(sksgio->sg.
+ timeout));
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
+ pr_debug("%s:%s:%d skspcl %p aborted\n",
+ skdev->name, __func__, __LINE__, sksgio->skspcl);
+
+ /* Build check cond, sense and let command finish. */
+ /* For a timeout, we must fabricate completion and sense
+ * data to complete the command */
+ sksgio->skspcl->req.completion.status =
+ SAM_STAT_CHECK_CONDITION;
+
+ memset(&sksgio->skspcl->req.err_info, 0,
+ sizeof(sksgio->skspcl->req.err_info));
+ sksgio->skspcl->req.err_info.type = 0x70;
+ sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
+ sksgio->skspcl->req.err_info.code = 0x44;
+ sksgio->skspcl->req.err_info.qual = 0;
+ rc = 0;
+ } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
+ /* No longer on the adapter. We finish. */
+ rc = 0;
+ else {
+ /* Something's gone wrong. Still busy. Timeout or
+ * user interrupted (control-C). Mark as an orphan
+ * so it will be disposed when completed. */
+ sksgio->skspcl->orphaned = 1;
+ sksgio->skspcl = NULL;
+ if (rc == 0) {
+ pr_debug("%s:%s:%d timed out %p (%u ms)\n",
+ skdev->name, __func__, __LINE__,
+ sksgio, sksgio->sg.timeout);
+ rc = -ETIMEDOUT;
+ } else {
+ pr_debug("%s:%s:%d cntlc %p\n",
+ skdev->name, __func__, __LINE__, sksgio);
+ rc = -EINTR;
+ }
+ }
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ return rc;
+}
+
+static int skd_sg_io_put_status(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct sg_io_hdr *sgp = &sksgio->sg;
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ int resid = 0;
+
+ u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
+
+ sgp->status = skspcl->req.completion.status;
+ resid = sksgio->dxfer_len - nb;
+
+ sgp->masked_status = sgp->status & STATUS_MASK;
+ sgp->msg_status = 0;
+ sgp->host_status = 0;
+ sgp->driver_status = 0;
+ sgp->resid = resid;
+ if (sgp->masked_status || sgp->host_status || sgp->driver_status)
+ sgp->info |= SG_INFO_CHECK;
+
+ pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ sgp->status, sgp->masked_status, sgp->resid);
+
+ if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
+ if (sgp->mx_sb_len > 0) {
+ struct fit_comp_error_info *ei = &skspcl->req.err_info;
+ u32 nbytes = sizeof(*ei);
+
+ nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
+
+ sgp->sb_len_wr = nbytes;
+
+ if (__copy_to_user(sgp->sbp, ei, nbytes)) {
+ pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
+ skdev->name, __func__, __LINE__,
+ sgp->sbp);
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
+ pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
+ skdev->name, __func__, __LINE__, sksgio->argp);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_release_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+
+ if (skspcl != NULL) {
+ ulong flags;
+
+ sksgio->skspcl = NULL;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ skd_release_special(skdev, skspcl);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ }
+
+ return 0;
+}
+
+/*
+ *****************************************************************************
+ * INTERNAL REQUESTS -- generated by driver itself
+ *****************************************************************************
+ */
+
+static int skd_format_internal_skspcl(struct skd_device *skdev)
+{
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
+ struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
+ struct fit_msg_hdr *fmh;
+ uint64_t dma_address;
+ struct skd_scsi_request *scsi;
+
+ fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ fmh->num_protocol_cmds_coalesced = 1;
+
+ scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ memset(scsi, 0, sizeof(*scsi));
+ dma_address = skspcl->req.sksg_dma_address;
+ scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
+ sgd->control = FIT_SGD_CONTROL_LAST;
+ sgd->byte_count = 0;
+ sgd->host_side_addr = skspcl->db_dma_address;
+ sgd->dev_side_addr = 0;
+ sgd->next_desc_ptr = 0LL;
+
+ return 1;
+}
+
+#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
+
+static void skd_send_internal_skspcl(struct skd_device *skdev,
+ struct skd_special_context *skspcl,
+ u8 opcode)
+{
+ struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
+ struct skd_scsi_request *scsi;
+ unsigned char *buf = skspcl->data_buf;
+ int i;
+
+ if (skspcl->req.state != SKD_REQ_STATE_IDLE)
+ /*
+ * A refresh is already in progress.
+ * Just wait for it to finish.
+ */
+ return;
+
+ SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
+ skspcl->req.state = SKD_REQ_STATE_BUSY;
+ skspcl->req.id += SKD_ID_INCR;
+
+ scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi->hdr.tag = skspcl->req.id;
+
+ memset(scsi->cdb, 0, sizeof(scsi->cdb));
+
+ switch (opcode) {
+ case TEST_UNIT_READY:
+ scsi->cdb[0] = TEST_UNIT_READY;
+ sgd->byte_count = 0;
+ scsi->hdr.sg_list_len_bytes = 0;
+ break;
+
+ case READ_CAPACITY:
+ scsi->cdb[0] = READ_CAPACITY;
+ sgd->byte_count = SKD_N_READ_CAP_BYTES;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ break;
+
+ case INQUIRY:
+ scsi->cdb[0] = INQUIRY;
+ scsi->cdb[1] = 0x01; /* evpd */
+ scsi->cdb[2] = 0x80; /* serial number page */
+ scsi->cdb[4] = 0x10;
+ sgd->byte_count = 16;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ break;
+
+ case SYNCHRONIZE_CACHE:
+ scsi->cdb[0] = SYNCHRONIZE_CACHE;
+ sgd->byte_count = 0;
+ scsi->hdr.sg_list_len_bytes = 0;
+ break;
+
+ case WRITE_BUFFER:
+ scsi->cdb[0] = WRITE_BUFFER;
+ scsi->cdb[1] = 0x02;
+ scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
+ scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
+ sgd->byte_count = WR_BUF_SIZE;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ /* fill incrementing byte pattern */
+ for (i = 0; i < sgd->byte_count; i++)
+ buf[i] = i & 0xFF;
+ break;
+
+ case READ_BUFFER:
+ scsi->cdb[0] = READ_BUFFER;
+ scsi->cdb[1] = 0x02;
+ scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
+ scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
+ sgd->byte_count = WR_BUF_SIZE;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ memset(skspcl->data_buf, 0, sgd->byte_count);
+ break;
+
+ default:
+ SKD_ASSERT("Don't know what to send");
+ return;
+
+ }
+ skd_send_special_fitmsg(skdev, skspcl);
+}
+
+static void skd_refresh_device_data(struct skd_device *skdev)
+{
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
+
+ skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
+}
+
+static int skd_chk_read_buf(struct skd_device *skdev,
+ struct skd_special_context *skspcl)
+{
+ unsigned char *buf = skspcl->data_buf;
+ int i;
+
+ /* check for incrementing byte pattern */
+ for (i = 0; i < WR_BUF_SIZE; i++)
+ if (buf[i] != (i & 0xFF))
+ return 1;
+
+ return 0;
+}
+
+static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
+ u8 code, u8 qual, u8 fruc)
+{
+ /* If the check condition is of special interest, log a message */
+ if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
+ && (code == 0x04) && (qual == 0x06)) {
+ pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
+ "ascq/fruc %02x/%02x/%02x/%02x\n",
+ skd_name(skdev), key, code, qual, fruc);
+ }
+}
+
+static void skd_complete_internal(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl)
+{
+ u8 *buf = skspcl->data_buf;
+ u8 status;
+ int i;
+ struct skd_scsi_request *scsi =
+ (struct skd_scsi_request *)&skspcl->msg_buf[64];
+
+ SKD_ASSERT(skspcl == &skdev->internal_skspcl);
+
+ pr_debug("%s:%s:%d complete internal %x\n",
+ skdev->name, __func__, __LINE__, scsi->cdb[0]);
+
+ skspcl->req.completion = *skcomp;
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+ skspcl->req.id += SKD_ID_INCR;
+
+ status = skspcl->req.completion.status;
+
+ skd_log_check_status(skdev, status, skerr->key, skerr->code,
+ skerr->qual, skerr->fruc);
+
+ switch (scsi->cdb[0]) {
+ case TEST_UNIT_READY:
+ if (status == SAM_STAT_GOOD)
+ skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
+ else if ((status == SAM_STAT_CHECK_CONDITION) &&
+ (skerr->key == MEDIUM_ERROR))
+ skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
+ else {
+ if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+ pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return;
+ }
+ pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ }
+ break;
+
+ case WRITE_BUFFER:
+ if (status == SAM_STAT_GOOD)
+ skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
+ else {
+ if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+ pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return;
+ }
+ pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ }
+ break;
+
+ case READ_BUFFER:
+ if (status == SAM_STAT_GOOD) {
+ if (skd_chk_read_buf(skdev, skspcl) == 0)
+ skd_send_internal_skspcl(skdev, skspcl,
+ READ_CAPACITY);
+ else {
+ pr_err(
+ "(%s):*** W/R Buffer mismatch %d ***\n",
+ skd_name(skdev), skdev->connect_retries);
+ if (skdev->connect_retries <
+ SKD_MAX_CONNECT_RETRIES) {
+ skdev->connect_retries++;
+ skd_soft_reset(skdev);
+ } else {
+ pr_err(
+ "(%s): W/R Buffer Connect Error\n",
+ skd_name(skdev));
+ return;
+ }
+ }
+
+ } else {
+ if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+ pr_debug("%s:%s:%d "
+ "read buffer failed, don't send anymore state 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return;
+ }
+ pr_debug("%s:%s:%d "
+ "**** read buffer failed, retry skerr\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ }
+ break;
+
+ case READ_CAPACITY:
+ skdev->read_cap_is_valid = 0;
+ if (status == SAM_STAT_GOOD) {
+ skdev->read_cap_last_lba =
+ (buf[0] << 24) | (buf[1] << 16) |
+ (buf[2] << 8) | buf[3];
+ skdev->read_cap_blocksize =
+ (buf[4] << 24) | (buf[5] << 16) |
+ (buf[6] << 8) | buf[7];
+
+ pr_debug("%s:%s:%d last lba %d, bs %d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->read_cap_last_lba,
+ skdev->read_cap_blocksize);
+
+ set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
+
+ skdev->read_cap_is_valid = 1;
+
+ skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
+ } else if ((status == SAM_STAT_CHECK_CONDITION) &&
+ (skerr->key == MEDIUM_ERROR)) {
+ skdev->read_cap_last_lba = ~0;
+ set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
+ pr_debug("%s:%s:%d "
+ "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
+ } else {
+ pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
+ }
+ break;
+
+ case INQUIRY:
+ skdev->inquiry_is_valid = 0;
+ if (status == SAM_STAT_GOOD) {
+ skdev->inquiry_is_valid = 1;
+
+ for (i = 0; i < 12; i++)
+ skdev->inq_serial_num[i] = buf[i + 4];
+ skdev->inq_serial_num[12] = 0;
+ }
+
+ if (skd_unquiesce_dev(skdev) < 0)
+ pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
+ skdev->name, __func__, __LINE__);
+ /* connection is complete */
+ skdev->connect_retries = 0;
+ break;
+
+ case SYNCHRONIZE_CACHE:
+ if (status == SAM_STAT_GOOD)
+ skdev->sync_done = 1;
+ else
+ skdev->sync_done = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ default:
+ SKD_ASSERT("we didn't send this");
+ }
+}
+
+/*
+ *****************************************************************************
+ * FIT MESSAGES
+ *****************************************************************************
+ */
+
+static void skd_send_fitmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg)
+{
+ u64 qcmd;
+ struct fit_msg_hdr *fmh;
+
+ pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
+ skdev->name, __func__, __LINE__,
+ skmsg->mb_dma_address, skdev->in_flight);
+ pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
+ skdev->name, __func__, __LINE__,
+ skmsg->msg_buf, skmsg->offset);
+
+ qcmd = skmsg->mb_dma_address;
+ qcmd |= FIT_QCMD_QID_NORMAL;
+
+ fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
+ skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ u8 *bp = (u8 *)skmsg->msg_buf;
+ int i;
+ for (i = 0; i < skmsg->length; i += 8) {
+ pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ skdev->name, __func__, __LINE__,
+ i, bp[i + 0], bp[i + 1], bp[i + 2],
+ bp[i + 3], bp[i + 4], bp[i + 5],
+ bp[i + 6], bp[i + 7]);
+ if (i == 0)
+ i = 64 - 8;
+ }
+ }
+
+ if (skmsg->length > 256)
+ qcmd |= FIT_QCMD_MSGSIZE_512;
+ else if (skmsg->length > 128)
+ qcmd |= FIT_QCMD_MSGSIZE_256;
+ else if (skmsg->length > 64)
+ qcmd |= FIT_QCMD_MSGSIZE_128;
+ else
+ /*
+ * This makes no sense because the FIT msg header is
+ * 64 bytes. If the msg is only 64 bytes long it has
+ * no payload.
+ */
+ qcmd |= FIT_QCMD_MSGSIZE_64;
+
+ SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+
+}
+
+static void skd_send_special_fitmsg(struct skd_device *skdev,
+ struct skd_special_context *skspcl)
+{
+ u64 qcmd;
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ u8 *bp = (u8 *)skspcl->msg_buf;
+ int i;
+
+ for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
+ pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ skdev->name, __func__, __LINE__, i,
+ bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
+ bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+ if (i == 0)
+ i = 64 - 8;
+ }
+
+ pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ skdev->name, __func__, __LINE__,
+ skspcl, skspcl->req.id, skspcl->req.sksg_list,
+ skspcl->req.sksg_dma_address);
+ for (i = 0; i < skspcl->req.n_sg; i++) {
+ struct fit_sg_descriptor *sgd =
+ &skspcl->req.sksg_list[i];
+
+ pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
+ "addr=0x%llx next=0x%llx\n",
+ skdev->name, __func__, __LINE__,
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
+ }
+ }
+
+ /*
+ * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
+ * and one 64-byte SSDI command.
+ */
+ qcmd = skspcl->mb_dma_address;
+ qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+
+ SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+}
+
+/*
+ *****************************************************************************
+ * COMPLETION QUEUE
+ *****************************************************************************
+ */
+
+static void skd_complete_other(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr);
+
+struct sns_info {
+ u8 type;
+ u8 stat;
+ u8 key;
+ u8 asc;
+ u8 ascq;
+ u8 mask;
+ enum skd_check_status_action action;
+};
+
+static struct sns_info skd_chkstat_table[] = {
+ /* Good */
+ { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
+ SKD_CHECK_STATUS_REPORT_GOOD },
+
+ /* Smart alerts */
+ { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+ { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+ { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+
+ /* Retry (with limits) */
+ { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+ { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+ { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+ { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+
+ /* Busy (or about to be) */
+ { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
+ SKD_CHECK_STATUS_BUSY_IMMINENT },
+};
+
+/*
+ * Look up status and sense data to decide how to handle the error
+ * from the device.
+ * mask says which fields must match e.g., mask=0x18 means check
+ * type and stat, ignore key, asc, ascq.
+ */
+
+static enum skd_check_status_action
+skd_check_status(struct skd_device *skdev,
+ u8 cmp_status, volatile struct fit_comp_error_info *skerr)
+{
+ int i, n;
+
+ pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ skd_name(skdev), skerr->key, skerr->code, skerr->qual,
+ skerr->fruc);
+
+ pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
+ skdev->name, __func__, __LINE__, skerr->type, cmp_status,
+ skerr->key, skerr->code, skerr->qual, skerr->fruc);
+
+ /* Does the info match an entry in the good category? */
+ n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
+ for (i = 0; i < n; i++) {
+ struct sns_info *sns = &skd_chkstat_table[i];
+
+ if (sns->mask & 0x10)
+ if (skerr->type != sns->type)
+ continue;
+
+ if (sns->mask & 0x08)
+ if (cmp_status != sns->stat)
+ continue;
+
+ if (sns->mask & 0x04)
+ if (skerr->key != sns->key)
+ continue;
+
+ if (sns->mask & 0x02)
+ if (skerr->code != sns->asc)
+ continue;
+
+ if (sns->mask & 0x01)
+ if (skerr->qual != sns->ascq)
+ continue;
+
+ if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
+ pr_err("(%s): SMART Alert: sense key/asc/ascq "
+ "%02x/%02x/%02x\n",
+ skd_name(skdev), skerr->key,
+ skerr->code, skerr->qual);
+ }
+ return sns->action;
+ }
+
+ /* No other match, so nonzero status means error,
+ * zero status means good
+ */
+ if (cmp_status) {
+ pr_debug("%s:%s:%d status check: error\n",
+ skdev->name, __func__, __LINE__);
+ return SKD_CHECK_STATUS_REPORT_ERROR;
+ }
+
+ pr_debug("%s:%s:%d status check good default\n",
+ skdev->name, __func__, __LINE__);
+ return SKD_CHECK_STATUS_REPORT_GOOD;
+}
+
+static void skd_resolve_req_exception(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ u8 cmp_status = skreq->completion.status;
+
+ switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
+ case SKD_CHECK_STATUS_REPORT_GOOD:
+ case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
+ skd_end_request(skdev, skreq, 0);
+ break;
+
+ case SKD_CHECK_STATUS_BUSY_IMMINENT:
+ skd_log_skreq(skdev, skreq, "retry(busy)");
+ blk_requeue_request(skdev->queue, skreq->req);
+ pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
+ skdev->timer_countdown = SKD_TIMER_MINUTES(20);
+ skd_quiesce_dev(skdev);
+ break;
+
+ case SKD_CHECK_STATUS_REQUEUE_REQUEST:
+ if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ skd_log_skreq(skdev, skreq, "retry");
+ blk_requeue_request(skdev->queue, skreq->req);
+ break;
+ }
+ /* fall through to report error */
+
+ case SKD_CHECK_STATUS_REPORT_ERROR:
+ default:
+ skd_end_request(skdev, skreq, -EIO);
+ break;
+ }
+}
+
+/* assume spinlock is already held */
+static void skd_release_skreq(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ u32 msg_slot;
+ struct skd_fitmsg_context *skmsg;
+
+ u32 timo_slot;
+
+ /*
+ * Reclaim the FIT msg buffer if this is
+ * the first of the requests it carried to
+ * be completed. The FIT msg buffer used to
+ * send this request cannot be reused until
+ * we are sure the s1120 card has copied
+ * it to its memory. The FIT msg might have
+ * contained several requests. As soon as
+ * any of them are completed we know that
+ * the entire FIT msg was transferred.
+ * Only the first completed request will
+ * match the FIT msg buffer id. The FIT
+ * msg buffer id is immediately updated.
+ * When subsequent requests complete the FIT
+ * msg buffer id won't match, so we know
+ * quite cheaply that it is already done.
+ */
+ msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
+ SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
+
+ skmsg = &skdev->skmsg_table[msg_slot];
+ if (skmsg->id == skreq->fitmsg_id) {
+ SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
+ SKD_ASSERT(skmsg->outstanding > 0);
+ skmsg->outstanding--;
+ if (skmsg->outstanding == 0) {
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->id += SKD_ID_INCR;
+ skmsg->next = skdev->skmsg_free_list;
+ skdev->skmsg_free_list = skmsg;
+ }
+ }
+
+ /*
+ * Decrease the number of active requests.
+ * Also decrements the count in the timeout slot.
+ */
+ SKD_ASSERT(skdev->in_flight > 0);
+ skdev->in_flight -= 1;
+
+ timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+ SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
+ skdev->timeout_slot[timo_slot] -= 1;
+
+ /*
+ * Reset backpointer
+ */
+ skreq->req = NULL;
+
+ /*
+ * Reclaim the skd_request_context
+ */
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->id += SKD_ID_INCR;
+ skreq->next = skdev->skreq_free_list;
+ skdev->skreq_free_list = skreq;
+}
+
+#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
+
+static void skd_do_inq_page_00(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ uint8_t *cdb, uint8_t *buf)
+{
+ uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
+
+ /* Caller requested "supported pages". The driver needs to insert
+ * its page.
+ */
+ pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
+ skdev->name, __func__, __LINE__);
+
+ /* If the device rejected the request because the CDB was
+ * improperly formed, then just leave.
+ */
+ if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
+ skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
+ return;
+
+ /* Get the amount of space the caller allocated */
+ max_bytes = (cdb[3] << 8) | cdb[4];
+
+ /* Get the number of pages actually returned by the device */
+ drive_pages = (buf[2] << 8) | buf[3];
+ drive_bytes = drive_pages + 4;
+ new_size = drive_pages + 1;
+
+ /* Supported pages must be in numerical order, so find where
+ * the driver page needs to be inserted into the list of
+ * pages returned by the device.
+ */
+ for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
+ if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
+ return; /* Device using this page code. abort */
+ else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
+ break;
+ }
+
+ if (insert_pt < max_bytes) {
+ uint16_t u;
+
+ /* Shift everything up one byte to make room. */
+ for (u = new_size + 3; u > insert_pt; u--)
+ buf[u] = buf[u - 1];
+ buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
+
+ /* SCSI byte order increment of num_returned_bytes by 1 */
+ skcomp->num_returned_bytes =
+ be32_to_cpu(skcomp->num_returned_bytes) + 1;
+ skcomp->num_returned_bytes =
+ be32_to_cpu(skcomp->num_returned_bytes);
+ }
+
+ /* update page length field to reflect the driver's page too */
+ buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
+ buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
+}
+
+static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
+{
+ int pcie_reg;
+ u16 pci_bus_speed;
+ u8 pci_lanes;
+
+ pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_reg) {
+ u16 linksta;
+ pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
+
+ pci_bus_speed = linksta & 0xF;
+ pci_lanes = (linksta & 0x3F0) >> 4;
+ } else {
+ *speed = STEC_LINK_UNKNOWN;
+ *width = 0xFF;
+ return;
+ }
+
+ switch (pci_bus_speed) {
+ case 1:
+ *speed = STEC_LINK_2_5GTS;
+ break;
+ case 2:
+ *speed = STEC_LINK_5GTS;
+ break;
+ case 3:
+ *speed = STEC_LINK_8GTS;
+ break;
+ default:
+ *speed = STEC_LINK_UNKNOWN;
+ break;
+ }
+
+ if (pci_lanes <= 0x20)
+ *width = pci_lanes;
+ else
+ *width = 0xFF;
+}
+
+static void skd_do_inq_page_da(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ uint8_t *cdb, uint8_t *buf)
+{
+ struct pci_dev *pdev = skdev->pdev;
+ unsigned max_bytes;
+ struct driver_inquiry_data inq;
+ u16 val;
+
+ pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
+ skdev->name, __func__, __LINE__);
+
+ memset(&inq, 0, sizeof(inq));
+
+ inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
+
+ skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
+ inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
+ inq.pcie_device_number = PCI_SLOT(pdev->devfn);
+ inq.pcie_function_number = PCI_FUNC(pdev->devfn);
+
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
+ inq.pcie_vendor_id = cpu_to_be16(val);
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
+ inq.pcie_device_id = cpu_to_be16(val);
+
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
+ inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
+
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
+ inq.pcie_subsystem_device_id = cpu_to_be16(val);
+
+ /* Driver version, fixed lenth, padded with spaces on the right */
+ inq.driver_version_length = sizeof(inq.driver_version);
+ memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
+ memcpy(inq.driver_version, DRV_VER_COMPL,
+ min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
+
+ inq.page_length = cpu_to_be16((sizeof(inq) - 4));
+
+ /* Clear the error set by the device */
+ skcomp->status = SAM_STAT_GOOD;
+ memset((void *)skerr, 0, sizeof(*skerr));
+
+ /* copy response into output buffer */
+ max_bytes = (cdb[3] << 8) | cdb[4];
+ memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
+
+ skcomp->num_returned_bytes =
+ be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
+}
+
+static void skd_do_driver_inq(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ uint8_t *cdb, uint8_t *buf)
+{
+ if (!buf)
+ return;
+ else if (cdb[0] != INQUIRY)
+ return; /* Not an INQUIRY */
+ else if ((cdb[1] & 1) == 0)
+ return; /* EVPD not set */
+ else if (cdb[2] == 0)
+ /* Need to add driver's page to supported pages list */
+ skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
+ else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
+ /* Caller requested driver's page */
+ skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
+}
+
+static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
+{
+ if (!sg)
+ return NULL;
+ if (!sg_page(sg))
+ return NULL;
+ return sg_virt(sg);
+}
+
+static void skd_process_scsi_inq(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl)
+{
+ uint8_t *buf;
+ struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
+ struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
+
+ dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
+ skspcl->req.sg_data_dir);
+ buf = skd_sg_1st_page_ptr(skspcl->req.sg);
+
+ if (buf)
+ skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
+}
+
+
+static int skd_isr_completion_posted(struct skd_device *skdev,
+ int limit, int *enqueued)
+{
+ volatile struct fit_completion_entry_v1 *skcmp = NULL;
+ volatile struct fit_comp_error_info *skerr;
+ u16 req_id;
+ u32 req_slot;
+ struct skd_request_context *skreq;
+ u16 cmp_cntxt = 0;
+ u8 cmp_status = 0;
+ u8 cmp_cycle = 0;
+ u32 cmp_bytes = 0;
+ int rc = 0;
+ int processed = 0;
+
+ for (;; ) {
+ SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
+
+ skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
+ cmp_cycle = skcmp->cycle;
+ cmp_cntxt = skcmp->tag;
+ cmp_status = skcmp->status;
+ cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
+
+ skerr = &skdev->skerr_table[skdev->skcomp_ix];
+
+ pr_debug("%s:%s:%d "
+ "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
+ "busy=%d rbytes=0x%x proto=%d\n",
+ skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
+ skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
+ skdev->in_flight, cmp_bytes, skdev->proto_ver);
+
+ if (cmp_cycle != skdev->skcomp_cycle) {
+ pr_debug("%s:%s:%d end of completions\n",
+ skdev->name, __func__, __LINE__);
+ break;
+ }
+ /*
+ * Update the completion queue head index and possibly
+ * the completion cycle count. 8-bit wrap-around.
+ */
+ skdev->skcomp_ix++;
+ if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
+ skdev->skcomp_ix = 0;
+ skdev->skcomp_cycle++;
+ }
+
+ /*
+ * The command context is a unique 32-bit ID. The low order
+ * bits help locate the request. The request is usually a
+ * r/w request (see skd_start() above) or a special request.
+ */
+ req_id = cmp_cntxt;
+ req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+
+ /* Is this other than a r/w request? */
+ if (req_slot >= skdev->num_req_context) {
+ /*
+ * This is not a completion for a r/w request.
+ */
+ skd_complete_other(skdev, skcmp, skerr);
+ continue;
+ }
+
+ skreq = &skdev->skreq_table[req_slot];
+
+ /*
+ * Make sure the request ID for the slot matches.
+ */
+ if (skreq->id != req_id) {
+ pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
+ skdev->name, __func__, __LINE__,
+ req_id, skreq->id);
+ {
+ u16 new_id = cmp_cntxt;
+ pr_err("(%s): Completion mismatch "
+ "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
+ skd_name(skdev), req_id,
+ skreq->id, new_id);
+
+ continue;
+ }
+ }
+
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
+
+ if (skreq->state == SKD_REQ_STATE_ABORTED) {
+ pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
+ skdev->name, __func__, __LINE__,
+ skreq, skreq->id);
+ /* a previously timed out command can
+ * now be cleaned up */
+ skd_release_skreq(skdev, skreq);
+ continue;
+ }
+
+ skreq->completion = *skcmp;
+ if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
+ skreq->err_info = *skerr;
+ skd_log_check_status(skdev, cmp_status, skerr->key,
+ skerr->code, skerr->qual,
+ skerr->fruc);
+ }
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
+
+ if (!skreq->req) {
+ pr_debug("%s:%s:%d NULL backptr skdreq %p, "
+ "req=0x%x req_id=0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skreq, skreq->id, req_id);
+ } else {
+ /*
+ * Capture the outcome and post it back to the
+ * native request.
+ */
+ if (likely(cmp_status == SAM_STAT_GOOD))
+ skd_end_request(skdev, skreq, 0);
+ else
+ skd_resolve_req_exception(skdev, skreq);
+ }
+
+ /*
+ * Release the skreq, its FIT msg (if one), timeout slot,
+ * and queue depth.
+ */
+ skd_release_skreq(skdev, skreq);
+
+ /* skd_isr_comp_limit equal zero means no limit */
+ if (limit) {
+ if (++processed >= limit) {
+ rc = 1;
+ break;
+ }
+ }
+ }
+
+ if ((skdev->state == SKD_DRVR_STATE_PAUSING)
+ && (skdev->in_flight) == 0) {
+ skdev->state = SKD_DRVR_STATE_PAUSED;
+ wake_up_interruptible(&skdev->waitq);
+ }
+
+ return rc;
+}
+
+static void skd_complete_other(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr)
+{
+ u32 req_id = 0;
+ u32 req_table;
+ u32 req_slot;
+ struct skd_special_context *skspcl;
+
+ req_id = skcomp->tag;
+ req_table = req_id & SKD_ID_TABLE_MASK;
+ req_slot = req_id & SKD_ID_SLOT_MASK;
+
+ pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
+ skdev->name, __func__, __LINE__,
+ req_table, req_id, req_slot);
+
+ /*
+ * Based on the request id, determine how to dispatch this completion.
+ * This swich/case is finding the good cases and forwarding the
+ * completion entry. Errors are reported below the switch.
+ */
+ switch (req_table) {
+ case SKD_ID_RW_REQUEST:
+ /*
+ * The caller, skd_completion_posted_isr() above,
+ * handles r/w requests. The only way we get here
+ * is if the req_slot is out of bounds.
+ */
+ break;
+
+ case SKD_ID_SPECIAL_REQUEST:
+ /*
+ * Make sure the req_slot is in bounds and that the id
+ * matches.
+ */
+ if (req_slot < skdev->n_special) {
+ skspcl = &skdev->skspcl_table[req_slot];
+ if (skspcl->req.id == req_id &&
+ skspcl->req.state == SKD_REQ_STATE_BUSY) {
+ skd_complete_special(skdev,
+ skcomp, skerr, skspcl);
+ return;
+ }
+ }
+ break;
+
+ case SKD_ID_INTERNAL:
+ if (req_slot == 0) {
+ skspcl = &skdev->internal_skspcl;
+ if (skspcl->req.id == req_id &&
+ skspcl->req.state == SKD_REQ_STATE_BUSY) {
+ skd_complete_internal(skdev,
+ skcomp, skerr, skspcl);
+ return;
+ }
+ }
+ break;
+
+ case SKD_ID_FIT_MSG:
+ /*
+ * These id's should never appear in a completion record.
+ */
+ break;
+
+ default:
+ /*
+ * These id's should never appear anywhere;
+ */
+ break;
+ }
+
+ /*
+ * If we get here it is a bad or stale id.
+ */
+}
+
+static void skd_complete_special(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl)
+{
+ pr_debug("%s:%s:%d completing special request %p\n",
+ skdev->name, __func__, __LINE__, skspcl);
+ if (skspcl->orphaned) {
+ /* Discard orphaned request */
+ /* ?: Can this release directly or does it need
+ * to use a worker? */
+ pr_debug("%s:%s:%d release orphaned %p\n",
+ skdev->name, __func__, __LINE__, skspcl);
+ skd_release_special(skdev, skspcl);
+ return;
+ }
+
+ skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
+
+ skspcl->req.state = SKD_REQ_STATE_COMPLETED;
+ skspcl->req.completion = *skcomp;
+ skspcl->req.err_info = *skerr;
+
+ skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
+ skerr->code, skerr->qual, skerr->fruc);
+
+ wake_up_interruptible(&skdev->waitq);
+}
+
+/* assume spinlock is already held */
+static void skd_release_special(struct skd_device *skdev,
+ struct skd_special_context *skspcl)
+{
+ int i, was_depleted;
+
+ for (i = 0; i < skspcl->req.n_sg; i++) {
+ struct page *page = sg_page(&skspcl->req.sg[i]);
+ __free_page(page);
+ }
+
+ was_depleted = (skdev->skspcl_free_list == NULL);
+
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+ skspcl->req.id += SKD_ID_INCR;
+ skspcl->req.next =
+ (struct skd_request_context *)skdev->skspcl_free_list;
+ skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
+
+ if (was_depleted) {
+ pr_debug("%s:%s:%d skspcl was depleted\n",
+ skdev->name, __func__, __LINE__);
+ /* Free list was depleted. Their might be waiters. */
+ wake_up_interruptible(&skdev->waitq);
+ }
+}
+
+static void skd_reset_skcomp(struct skd_device *skdev)
+{
+ u32 nbytes;
+ struct fit_completion_entry_v1 *skcomp;
+
+ nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
+ nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+
+ memset(skdev->skcomp_table, 0, nbytes);
+
+ skdev->skcomp_ix = 0;
+ skdev->skcomp_cycle = 1;
+}
+
+/*
+ *****************************************************************************
+ * INTERRUPTS
+ *****************************************************************************
+ */
+static void skd_completion_worker(struct work_struct *work)
+{
+ struct skd_device *skdev =
+ container_of(work, struct skd_device, completion_worker);
+ unsigned long flags;
+ int flush_enqueued = 0;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ /*
+ * pass in limit=0, which means no limit..
+ * process everything in compq
+ */
+ skd_isr_completion_posted(skdev, 0, &flush_enqueued);
+ skd_request_fn(skdev->queue);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+}
+
+static void skd_isr_msg_from_dev(struct skd_device *skdev);
+
+irqreturn_t
+static skd_isr(int irq, void *ptr)
+{
+ struct skd_device *skdev;
+ u32 intstat;
+ u32 ack;
+ int rc = 0;
+ int deferred = 0;
+ int flush_enqueued = 0;
+
+ skdev = (struct skd_device *)ptr;
+ spin_lock(&skdev->lock);
+
+ for (;; ) {
+ intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
+
+ ack = FIT_INT_DEF_MASK;
+ ack &= intstat;
+
+ pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
+ skdev->name, __func__, __LINE__, intstat, ack);
+
+ /* As long as there is an int pending on device, keep
+ * running loop. When none, get out, but if we've never
+ * done any processing, call completion handler?
+ */
+ if (ack == 0) {
+ /* No interrupts on device, but run the completion
+ * processor anyway?
+ */
+ if (rc == 0)
+ if (likely (skdev->state
+ == SKD_DRVR_STATE_ONLINE))
+ deferred = 1;
+ break;
+ }
+
+ rc = IRQ_HANDLED;
+
+ SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
+
+ if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
+ (skdev->state != SKD_DRVR_STATE_STOPPING))) {
+ if (intstat & FIT_ISH_COMPLETION_POSTED) {
+ /*
+ * If we have already deferred completion
+ * processing, don't bother running it again
+ */
+ if (deferred == 0)
+ deferred =
+ skd_isr_completion_posted(skdev,
+ skd_isr_comp_limit, &flush_enqueued);
+ }
+
+ if (intstat & FIT_ISH_FW_STATE_CHANGE) {
+ skd_isr_fwstate(skdev);
+ if (skdev->state == SKD_DRVR_STATE_FAULT ||
+ skdev->state ==
+ SKD_DRVR_STATE_DISAPPEARED) {
+ spin_unlock(&skdev->lock);
+ return rc;
+ }
+ }
+
+ if (intstat & FIT_ISH_MSG_FROM_DEV)
+ skd_isr_msg_from_dev(skdev);
+ }
+ }
+
+ if (unlikely(flush_enqueued))
+ skd_request_fn(skdev->queue);
+
+ if (deferred)
+ schedule_work(&skdev->completion_worker);
+ else if (!flush_enqueued)
+ skd_request_fn(skdev->queue);
+
+ spin_unlock(&skdev->lock);
+
+ return rc;
+}
+
+static void skd_drive_fault(struct skd_device *skdev)
+{
+ skdev->state = SKD_DRVR_STATE_FAULT;
+ pr_err("(%s): Drive FAULT\n", skd_name(skdev));
+}
+
+static void skd_drive_disappeared(struct skd_device *skdev)
+{
+ skdev->state = SKD_DRVR_STATE_DISAPPEARED;
+ pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
+}
+
+static void skd_isr_fwstate(struct skd_device *skdev)
+{
+ u32 sense;
+ u32 state;
+ u32 mtd;
+ int prev_driver_state = skdev->state;
+
+ sense = SKD_READL(skdev, FIT_STATUS);
+ state = sense & FIT_SR_DRIVE_STATE_MASK;
+
+ pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
+ skd_name(skdev),
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_drive_state_to_str(state), state);
+
+ skdev->drive_state = state;
+
+ switch (skdev->drive_state) {
+ case FIT_SR_DRIVE_INIT:
+ if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
+ skd_disable_interrupts(skdev);
+ break;
+ }
+ if (skdev->state == SKD_DRVR_STATE_RESTARTING)
+ skd_recover_requests(skdev, 0);
+ if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
+ skdev->timer_countdown = SKD_STARTING_TIMO;
+ skdev->state = SKD_DRVR_STATE_STARTING;
+ skd_soft_reset(skdev);
+ break;
+ }
+ mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_SR_DRIVE_ONLINE:
+ skdev->cur_max_queue_depth = skd_max_queue_depth;
+ if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
+ skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
+
+ skdev->queue_low_water_mark =
+ skdev->cur_max_queue_depth * 2 / 3 + 1;
+ if (skdev->queue_low_water_mark < 1)
+ skdev->queue_low_water_mark = 1;
+ pr_info(
+ "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
+ skd_name(skdev),
+ skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+
+ skd_refresh_device_data(skdev);
+ break;
+
+ case FIT_SR_DRIVE_BUSY:
+ skdev->state = SKD_DRVR_STATE_BUSY;
+ skdev->timer_countdown = SKD_BUSY_TIMO;
+ skd_quiesce_dev(skdev);
+ break;
+ case FIT_SR_DRIVE_BUSY_SANITIZE:
+ /* set timer for 3 seconds, we'll abort any unfinished
+ * commands after that expires
+ */
+ skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
+ skdev->timer_countdown = SKD_TIMER_SECONDS(3);
+ blk_start_queue(skdev->queue);
+ break;
+ case FIT_SR_DRIVE_BUSY_ERASE:
+ skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
+ skdev->timer_countdown = SKD_BUSY_TIMO;
+ break;
+ case FIT_SR_DRIVE_OFFLINE:
+ skdev->state = SKD_DRVR_STATE_IDLE;
+ break;
+ case FIT_SR_DRIVE_SOFT_RESET:
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ /* Expected by a caller of skd_soft_reset() */
+ break;
+ default:
+ skdev->state = SKD_DRVR_STATE_RESTARTING;
+ break;
+ }
+ break;
+ case FIT_SR_DRIVE_FW_BOOTING:
+ pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
+ skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_DEGRADED:
+ case FIT_SR_PCIE_LINK_DOWN:
+ case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
+ break;
+
+ case FIT_SR_DRIVE_FAULT:
+ skd_drive_fault(skdev);
+ skd_recover_requests(skdev, 0);
+ blk_start_queue(skdev->queue);
+ break;
+
+ /* PCIe bus returned all Fs? */
+ case 0xFF:
+ pr_info("(%s): state=0x%x sense=0x%x\n",
+ skd_name(skdev), state, sense);
+ skd_drive_disappeared(skdev);
+ skd_recover_requests(skdev, 0);
+ blk_start_queue(skdev->queue);
+ break;
+ default:
+ /*
+ * Uknown FW State. Wait for a state we recognize.
+ */
+ break;
+ }
+ pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
+ skd_name(skdev),
+ skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+}
+
+static void skd_recover_requests(struct skd_device *skdev, int requeue)
+{
+ int i;
+
+ for (i = 0; i < skdev->num_req_context; i++) {
+ struct skd_request_context *skreq = &skdev->skreq_table[i];
+
+ if (skreq->state == SKD_REQ_STATE_BUSY) {
+ skd_log_skreq(skdev, skreq, "recover");
+
+ SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
+ SKD_ASSERT(skreq->req != NULL);
+
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
+
+ if (requeue &&
+ (unsigned long) ++skreq->req->special <
+ SKD_MAX_RETRIES)
+ blk_requeue_request(skdev->queue, skreq->req);
+ else
+ skd_end_request(skdev, skreq, -EIO);
+
+ skreq->req = NULL;
+
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->id += SKD_ID_INCR;
+ }
+ if (i > 0)
+ skreq[-1].next = skreq;
+ skreq->next = NULL;
+ }
+ skdev->skreq_free_list = skdev->skreq_table;
+
+ for (i = 0; i < skdev->num_fitmsg_context; i++) {
+ struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
+
+ if (skmsg->state == SKD_MSG_STATE_BUSY) {
+ skd_log_skmsg(skdev, skmsg, "salvaged");
+ SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->id += SKD_ID_INCR;
+ }
+ if (i > 0)
+ skmsg[-1].next = skmsg;
+ skmsg->next = NULL;
+ }
+ skdev->skmsg_free_list = skdev->skmsg_table;
+
+ for (i = 0; i < skdev->n_special; i++) {
+ struct skd_special_context *skspcl = &skdev->skspcl_table[i];
+
+ /* If orphaned, reclaim it because it has already been reported
+ * to the process as an error (it was just waiting for
+ * a completion that didn't come, and now it will never come)
+ * If busy, change to a state that will cause it to error
+ * out in the wait routine and let it do the normal
+ * reporting and reclaiming
+ */
+ if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
+ if (skspcl->orphaned) {
+ pr_debug("%s:%s:%d orphaned %p\n",
+ skdev->name, __func__, __LINE__,
+ skspcl);
+ skd_release_special(skdev, skspcl);
+ } else {
+ pr_debug("%s:%s:%d not orphaned %p\n",
+ skdev->name, __func__, __LINE__,
+ skspcl);
+ skspcl->req.state = SKD_REQ_STATE_ABORTED;
+ }
+ }
+ }
+ skdev->skspcl_free_list = skdev->skspcl_table;
+
+ for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
+ skdev->timeout_slot[i] = 0;
+
+ skdev->in_flight = 0;
+}
+
+static void skd_isr_msg_from_dev(struct skd_device *skdev)
+{
+ u32 mfd;
+ u32 mtd;
+ u32 data;
+
+ mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
+
+ pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
+ skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
+
+ /* ignore any mtd that is an ack for something we didn't send */
+ if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
+ return;
+
+ switch (FIT_MXD_TYPE(mfd)) {
+ case FIT_MTD_FITFW_INIT:
+ skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
+
+ if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
+ pr_err("(%s): protocol mismatch\n",
+ skdev->name);
+ pr_err("(%s): got=%d support=%d\n",
+ skdev->name, skdev->proto_ver,
+ FIT_PROTOCOL_VERSION_1);
+ pr_err("(%s): please upgrade driver\n",
+ skdev->name);
+ skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
+ skd_soft_reset(skdev);
+ break;
+ }
+ mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_GET_CMDQ_DEPTH:
+ skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
+ mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
+ SKD_N_COMPLETION_ENTRY);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_SET_COMPQ_DEPTH:
+ SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
+ mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_SET_COMPQ_ADDR:
+ skd_reset_skcomp(skdev);
+ mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_CMD_LOG_HOST_ID:
+ skdev->connect_time_stamp = get_seconds();
+ data = skdev->connect_time_stamp & 0xFFFF;
+ mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
+ skdev->drive_jiffies = FIT_MXD_DATA(mfd);
+ data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
+ mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
+ skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
+ mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+
+ pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
+ skd_name(skdev),
+ skdev->connect_time_stamp, skdev->drive_jiffies);
+ break;
+
+ case FIT_MTD_ARM_QUEUE:
+ skdev->last_mtd = 0;
+ /*
+ * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
+ */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void skd_disable_interrupts(struct skd_device *skdev)
+{
+ u32 sense;
+
+ sense = SKD_READL(skdev, FIT_CONTROL);
+ sense &= ~FIT_CR_ENABLE_INTERRUPTS;
+ SKD_WRITEL(skdev, sense, FIT_CONTROL);
+ pr_debug("%s:%s:%d sense 0x%x\n",
+ skdev->name, __func__, __LINE__, sense);
+
+ /* Note that the 1s is written. A 1-bit means
+ * disable, a 0 means enable.
+ */
+ SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
+}
+
+static void skd_enable_interrupts(struct skd_device *skdev)
+{
+ u32 val;
+
+ /* unmask interrupts first */
+ val = FIT_ISH_FW_STATE_CHANGE +
+ FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
+
+ /* Note that the compliment of mask is written. A 1-bit means
+ * disable, a 0 means enable. */
+ SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
+ pr_debug("%s:%s:%d interrupt mask=0x%x\n",
+ skdev->name, __func__, __LINE__, ~val);
+
+ val = SKD_READL(skdev, FIT_CONTROL);
+ val |= FIT_CR_ENABLE_INTERRUPTS;
+ pr_debug("%s:%s:%d control=0x%x\n",
+ skdev->name, __func__, __LINE__, val);
+ SKD_WRITEL(skdev, val, FIT_CONTROL);
+}
+
+/*
+ *****************************************************************************
+ * START, STOP, RESTART, QUIESCE, UNQUIESCE
+ *****************************************************************************
+ */
+
+static void skd_soft_reset(struct skd_device *skdev)
+{
+ u32 val;
+
+ val = SKD_READL(skdev, FIT_CONTROL);
+ val |= (FIT_CR_SOFT_RESET);
+ pr_debug("%s:%s:%d control=0x%x\n",
+ skdev->name, __func__, __LINE__, val);
+ SKD_WRITEL(skdev, val, FIT_CONTROL);
+}
+
+static void skd_start_device(struct skd_device *skdev)
+{
+ unsigned long flags;
+ u32 sense;
+ u32 state;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ /* ack all ghost interrupts */
+ SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+
+ sense = SKD_READL(skdev, FIT_STATUS);
+
+ pr_debug("%s:%s:%d initial status=0x%x\n",
+ skdev->name, __func__, __LINE__, sense);
+
+ state = sense & FIT_SR_DRIVE_STATE_MASK;
+ skdev->drive_state = state;
+ skdev->last_mtd = 0;
+
+ skdev->state = SKD_DRVR_STATE_STARTING;
+ skdev->timer_countdown = SKD_STARTING_TIMO;
+
+ skd_enable_interrupts(skdev);
+
+ switch (skdev->drive_state) {
+ case FIT_SR_DRIVE_OFFLINE:
+ pr_err("(%s): Drive offline...\n", skd_name(skdev));
+ break;
+
+ case FIT_SR_DRIVE_FW_BOOTING:
+ pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
+ skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_BUSY_SANITIZE:
+ pr_info("(%s): Start: BUSY_SANITIZE\n",
+ skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
+ skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_BUSY_ERASE:
+ pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
+ skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_INIT:
+ case FIT_SR_DRIVE_ONLINE:
+ skd_soft_reset(skdev);
+ break;
+
+ case FIT_SR_DRIVE_BUSY:
+ pr_err("(%s): Drive Busy...\n", skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY;
+ skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_SOFT_RESET:
+ pr_err("(%s) drive soft reset in prog\n",
+ skd_name(skdev));
+ break;
+
+ case FIT_SR_DRIVE_FAULT:
+ /* Fault state is bad...soft reset won't do it...
+ * Hard reset, maybe, but does it work on device?
+ * For now, just fault so the system doesn't hang.
+ */
+ skd_drive_fault(skdev);
+ /*start the queue so we can respond with error to requests */
+ pr_debug("%s:%s:%d starting %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case 0xFF:
+ /* Most likely the device isn't there or isn't responding
+ * to the BAR1 addresses. */
+ skd_drive_disappeared(skdev);
+ /*start the queue so we can respond with error to requests */
+ pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ default:
+ pr_err("(%s) Start: unknown state %x\n",
+ skd_name(skdev), skdev->drive_state);
+ break;
+ }
+
+ state = SKD_READL(skdev, FIT_CONTROL);
+ pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
+ pr_debug("%s:%s:%d Intr Status=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_INT_MASK_HOST);
+ pr_debug("%s:%s:%d Intr Mask=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
+ pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_HW_VERSION);
+ pr_debug("%s:%s:%d HW version=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+}
+
+static void skd_stop_device(struct skd_device *skdev)
+{
+ unsigned long flags;
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
+ u32 dev_state;
+ int i;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ pr_err("(%s): skd_stop_device not online no sync\n",
+ skd_name(skdev));
+ goto stop_out;
+ }
+
+ if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
+ pr_err("(%s): skd_stop_device no special\n",
+ skd_name(skdev));
+ goto stop_out;
+ }
+
+ skdev->state = SKD_DRVR_STATE_SYNCING;
+ skdev->sync_done = 0;
+
+ skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ wait_event_interruptible_timeout(skdev->waitq,
+ (skdev->sync_done), (10 * HZ));
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ switch (skdev->sync_done) {
+ case 0:
+ pr_err("(%s): skd_stop_device no sync\n",
+ skd_name(skdev));
+ break;
+ case 1:
+ pr_err("(%s): skd_stop_device sync done\n",
+ skd_name(skdev));
+ break;
+ default:
+ pr_err("(%s): skd_stop_device sync error\n",
+ skd_name(skdev));
+ }
+
+stop_out:
+ skdev->state = SKD_DRVR_STATE_STOPPING;
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ skd_kill_timer(skdev);
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ skd_disable_interrupts(skdev);
+
+ /* ensure all ints on device are cleared */
+ /* soft reset the device to unload with a clean slate */
+ SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+ SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ /* poll every 100ms, 1 second timeout */
+ for (i = 0; i < 10; i++) {
+ dev_state =
+ SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
+ if (dev_state == FIT_SR_DRIVE_INIT)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(100));
+ }
+
+ if (dev_state != FIT_SR_DRIVE_INIT)
+ pr_err("(%s): skd_stop_device state error 0x%02x\n",
+ skd_name(skdev), dev_state);
+}
+
+/* assume spinlock is held */
+static void skd_restart_device(struct skd_device *skdev)
+{
+ u32 state;
+
+ /* ack all ghost interrupts */
+ SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+
+ state = SKD_READL(skdev, FIT_STATUS);
+
+ pr_debug("%s:%s:%d drive status=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state &= FIT_SR_DRIVE_STATE_MASK;
+ skdev->drive_state = state;
+ skdev->last_mtd = 0;
+
+ skdev->state = SKD_DRVR_STATE_RESTARTING;
+ skdev->timer_countdown = SKD_RESTARTING_TIMO;
+
+ skd_soft_reset(skdev);
+}
+
+/* assume spinlock is held */
+static int skd_quiesce_dev(struct skd_device *skdev)
+{
+ int rc = 0;
+
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ pr_debug("%s:%s:%d stopping %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_stop_queue(skdev->queue);
+ break;
+ case SKD_DRVR_STATE_ONLINE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_RESUMING:
+ default:
+ rc = -EINVAL;
+ pr_debug("%s:%s:%d state [%d] not implemented\n",
+ skdev->name, __func__, __LINE__, skdev->state);
+ }
+ return rc;
+}
+
+/* assume spinlock is held */
+static int skd_unquiesce_dev(struct skd_device *skdev)
+{
+ int prev_driver_state = skdev->state;
+
+ skd_log_skdev(skdev, "unquiesce");
+ if (skdev->state == SKD_DRVR_STATE_ONLINE) {
+ pr_debug("%s:%s:%d **** device already ONLINE\n",
+ skdev->name, __func__, __LINE__);
+ return 0;
+ }
+ if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
+ /*
+ * If there has been an state change to other than
+ * ONLINE, we will rely on controller state change
+ * to come back online and restart the queue.
+ * The BUSY state means that driver is ready to
+ * continue normal processing but waiting for controller
+ * to become available.
+ */
+ skdev->state = SKD_DRVR_STATE_BUSY;
+ pr_debug("%s:%s:%d drive BUSY state\n",
+ skdev->name, __func__, __LINE__);
+ return 0;
+ }
+
+ /*
+ * Drive has just come online, driver is either in startup,
+ * paused performing a task, or bust waiting for hardware.
+ */
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_IDLE:
+ case SKD_DRVR_STATE_LOAD:
+ skdev->state = SKD_DRVR_STATE_ONLINE;
+ pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
+ skd_name(skdev),
+ skd_skdev_state_to_str(prev_driver_state),
+ prev_driver_state, skd_skdev_state_to_str(skdev->state),
+ skdev->state);
+ pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
+ skdev->name, __func__, __LINE__);
+ pr_debug("%s:%s:%d starting %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = 1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
+ *****************************************************************************
+ * PCIe MSI/MSI-X INTERRUPT HANDLERS
+ *****************************************************************************
+ */
+
+static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
+ irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
+ skd_isr_fwstate(skdev);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+ int flush_enqueued = 0;
+ int deferred;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
+ deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
+ &flush_enqueued);
+ if (flush_enqueued)
+ skd_request_fn(skdev->queue);
+
+ if (deferred)
+ schedule_work(&skdev->completion_worker);
+ else if (!flush_enqueued)
+ skd_request_fn(skdev->queue);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
+ skd_isr_msg_from_dev(skdev);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ *****************************************************************************
+ * PCIe MSI/MSI-X SETUP
+ *****************************************************************************
+ */
+
+struct skd_msix_entry {
+ int have_irq;
+ u32 vector;
+ u32 entry;
+ struct skd_device *rsp;
+ char isr_name[30];
+};
+
+struct skd_init_msix_entry {
+ const char *name;
+ irq_handler_t handler;
+};
+
+#define SKD_MAX_MSIX_COUNT 13
+#define SKD_MIN_MSIX_COUNT 7
+#define SKD_BASE_MSIX_IRQ 4
+
+static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
+ { "(DMA 0)", skd_reserved_isr },
+ { "(DMA 1)", skd_reserved_isr },
+ { "(DMA 2)", skd_reserved_isr },
+ { "(DMA 3)", skd_reserved_isr },
+ { "(State Change)", skd_statec_isr },
+ { "(COMPL_Q)", skd_comp_q },
+ { "(MSG)", skd_msg_isr },
+ { "(Reserved)", skd_reserved_isr },
+ { "(Reserved)", skd_reserved_isr },
+ { "(Queue Full 0)", skd_qfull_isr },
+ { "(Queue Full 1)", skd_qfull_isr },
+ { "(Queue Full 2)", skd_qfull_isr },
+ { "(Queue Full 3)", skd_qfull_isr },
+};
+
+static void skd_release_msix(struct skd_device *skdev)
+{
+ struct skd_msix_entry *qentry;
+ int i;
+
+ if (skdev->msix_entries == NULL)
+ return;
+ for (i = 0; i < skdev->msix_count; i++) {
+ qentry = &skdev->msix_entries[i];
+ skdev = qentry->rsp;
+
+ if (qentry->have_irq)
+ devm_free_irq(&skdev->pdev->dev,
+ qentry->vector, qentry->rsp);
+ }
+ pci_disable_msix(skdev->pdev);
+ kfree(skdev->msix_entries);
+ skdev->msix_count = 0;
+ skdev->msix_entries = NULL;
+}
+
+static int skd_acquire_msix(struct skd_device *skdev)
+{
+ int i, rc;
+ struct pci_dev *pdev;
+ struct msix_entry *entries = NULL;
+ struct skd_msix_entry *qentry;
+
+ pdev = skdev->pdev;
+ skdev->msix_count = SKD_MAX_MSIX_COUNT;
+ entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
+ entries[i].entry = i;
+
+ rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
+ if (rc < 0)
+ goto msix_out;
+ if (rc) {
+ if (rc < SKD_MIN_MSIX_COUNT) {
+ pr_err("(%s): failed to enable MSI-X %d\n",
+ skd_name(skdev), rc);
+ goto msix_out;
+ }
+ pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
+ skdev->name, __func__, __LINE__,
+ pci_name(pdev), skdev->name, rc);
+
+ skdev->msix_count = rc;
+ rc = pci_enable_msix(pdev, entries, skdev->msix_count);
+ if (rc) {
+ pr_err("(%s): failed to enable MSI-X "
+ "support (%d) %d\n",
+ skd_name(skdev), skdev->msix_count, rc);
+ goto msix_out;
+ }
+ }
+ skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
+ skdev->msix_count, GFP_KERNEL);
+ if (!skdev->msix_entries) {
+ rc = -ENOMEM;
+ skdev->msix_count = 0;
+ pr_err("(%s): msix table allocation error\n",
+ skd_name(skdev));
+ goto msix_out;
+ }
+
+ qentry = skdev->msix_entries;
+ for (i = 0; i < skdev->msix_count; i++) {
+ qentry->vector = entries[i].vector;
+ qentry->entry = entries[i].entry;
+ qentry->rsp = NULL;
+ qentry->have_irq = 0;
+ pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
+ skdev->name, __func__, __LINE__,
+ pci_name(pdev), skdev->name,
+ i, qentry->vector, qentry->entry);
+ qentry++;
+ }
+
+ /* Enable MSI-X vectors for the base queue */
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ qentry = &skdev->msix_entries[i];
+ snprintf(qentry->isr_name, sizeof(qentry->isr_name),
+ "%s%d-msix %s", DRV_NAME, skdev->devno,
+ msix_entries[i].name);
+ rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
+ msix_entries[i].handler, 0,
+ qentry->isr_name, skdev);
+ if (rc) {
+ pr_err("(%s): Unable to register(%d) MSI-X "
+ "handler %d: %s\n",
+ skd_name(skdev), rc, i, qentry->isr_name);
+ goto msix_out;
+ } else {
+ qentry->have_irq = 1;
+ qentry->rsp = skdev;
+ }
+ }
+ pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
+ skdev->name, __func__, __LINE__,
+ pci_name(pdev), skdev->name, skdev->msix_count);
+ return 0;
+
+msix_out:
+ if (entries)
+ kfree(entries);
+ skd_release_msix(skdev);
+ return rc;
+}
+
+static int skd_acquire_irq(struct skd_device *skdev)
+{
+ int rc;
+ struct pci_dev *pdev;
+
+ pdev = skdev->pdev;
+ skdev->msix_count = 0;
+
+RETRY_IRQ_TYPE:
+ switch (skdev->irq_type) {
+ case SKD_IRQ_MSIX:
+ rc = skd_acquire_msix(skdev);
+ if (!rc)
+ pr_info("(%s): MSI-X %d irqs enabled\n",
+ skd_name(skdev), skdev->msix_count);
+ else {
+ pr_err(
+ "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+ skd_name(skdev), rc);
+ skdev->irq_type = SKD_IRQ_MSI;
+ goto RETRY_IRQ_TYPE;
+ }
+ break;
+ case SKD_IRQ_MSI:
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
+ DRV_NAME, skdev->devno);
+ rc = pci_enable_msi(pdev);
+ if (!rc) {
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
+ skdev->isr_name, skdev);
+ if (rc) {
+ pci_disable_msi(pdev);
+ pr_err(
+ "(%s): failed to allocate the MSI interrupt %d\n",
+ skd_name(skdev), rc);
+ goto RETRY_IRQ_LEGACY;
+ }
+ pr_info("(%s): MSI irq %d enabled\n",
+ skd_name(skdev), pdev->irq);
+ } else {
+RETRY_IRQ_LEGACY:
+ pr_err(
+ "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
+ skd_name(skdev), rc);
+ skdev->irq_type = SKD_IRQ_LEGACY;
+ goto RETRY_IRQ_TYPE;
+ }
+ break;
+ case SKD_IRQ_LEGACY:
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name),
+ "%s%d-legacy", DRV_NAME, skdev->devno);
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+ IRQF_SHARED, skdev->isr_name, skdev);
+ if (!rc)
+ pr_info("(%s): LEGACY irq %d enabled\n",
+ skd_name(skdev), pdev->irq);
+ else
+ pr_err("(%s): request LEGACY irq error %d\n",
+ skd_name(skdev), rc);
+ break;
+ default:
+ pr_info("(%s): irq_type %d invalid, re-set to %d\n",
+ skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
+ skdev->irq_type = SKD_IRQ_LEGACY;
+ goto RETRY_IRQ_TYPE;
+ }
+ return rc;
+}
+
+static void skd_release_irq(struct skd_device *skdev)
+{
+ switch (skdev->irq_type) {
+ case SKD_IRQ_MSIX:
+ skd_release_msix(skdev);
+ break;
+ case SKD_IRQ_MSI:
+ devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
+ pci_disable_msi(skdev->pdev);
+ break;
+ case SKD_IRQ_LEGACY:
+ devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
+ break;
+ default:
+ pr_err("(%s): wrong irq type %d!",
+ skd_name(skdev), skdev->irq_type);
+ break;
+ }
+}
+
+/*
+ *****************************************************************************
+ * CONSTRUCT
+ *****************************************************************************
+ */
+
+static int skd_cons_skcomp(struct skd_device *skdev)
+{
+ int rc = 0;
+ struct fit_completion_entry_v1 *skcomp;
+ u32 nbytes;
+
+ nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
+ nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+
+ pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
+ skdev->name, __func__, __LINE__,
+ nbytes, SKD_N_COMPLETION_ENTRY);
+
+ skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skdev->cq_dma_address);
+
+ if (skcomp == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skcomp, 0, nbytes);
+
+ skdev->skcomp_table = skcomp;
+ skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
+ sizeof(*skcomp) *
+ SKD_N_COMPLETION_ENTRY);
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_skmsg(struct skd_device *skdev)
+{
+ int rc = 0;
+ u32 i;
+
+ pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
+ skdev->name, __func__, __LINE__,
+ sizeof(struct skd_fitmsg_context),
+ skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
+
+ skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
+ *skdev->num_fitmsg_context, GFP_KERNEL);
+ if (skdev->skmsg_table == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < skdev->num_fitmsg_context; i++) {
+ struct skd_fitmsg_context *skmsg;
+
+ skmsg = &skdev->skmsg_table[i];
+
+ skmsg->id = i + SKD_ID_FIT_MSG;
+
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
+ SKD_N_FITMSG_BYTES + 64,
+ &skmsg->mb_dma_address);
+
+ if (skmsg->msg_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skmsg->offset = (u32)((u64)skmsg->msg_buf &
+ (~FIT_QCMD_BASE_ADDRESS_MASK));
+ skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
+ skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
+ FIT_QCMD_BASE_ADDRESS_MASK);
+ skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
+ skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
+ memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
+
+ skmsg->next = &skmsg[1];
+ }
+
+ /* Free list is in order starting with the 0th entry. */
+ skdev->skmsg_table[i - 1].next = NULL;
+ skdev->skmsg_free_list = skdev->skmsg_table;
+
+err_out:
+ return rc;
+}
+
+static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
+ u32 n_sg,
+ dma_addr_t *ret_dma_addr)
+{
+ struct fit_sg_descriptor *sg_list;
+ u32 nbytes;
+
+ nbytes = sizeof(*sg_list) * n_sg;
+
+ sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+
+ if (sg_list != NULL) {
+ uint64_t dma_address = *ret_dma_addr;
+ u32 i;
+
+ memset(sg_list, 0, nbytes);
+
+ for (i = 0; i < n_sg - 1; i++) {
+ uint64_t ndp_off;
+ ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
+
+ sg_list[i].next_desc_ptr = dma_address + ndp_off;
+ }
+ sg_list[i].next_desc_ptr = 0LL;
+ }
+
+ return sg_list;
+}
+
+static int skd_cons_skreq(struct skd_device *skdev)
+{
+ int rc = 0;
+ u32 i;
+
+ pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
+ skdev->name, __func__, __LINE__,
+ sizeof(struct skd_request_context),
+ skdev->num_req_context,
+ sizeof(struct skd_request_context) * skdev->num_req_context);
+
+ skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
+ * skdev->num_req_context, GFP_KERNEL);
+ if (skdev->skreq_table == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
+ skdev->name, __func__, __LINE__,
+ skdev->sgs_per_request, sizeof(struct scatterlist),
+ skdev->sgs_per_request * sizeof(struct scatterlist));
+
+ for (i = 0; i < skdev->num_req_context; i++) {
+ struct skd_request_context *skreq;
+
+ skreq = &skdev->skreq_table[i];
+
+ skreq->id = i + SKD_ID_RW_REQUEST;
+ skreq->state = SKD_REQ_STATE_IDLE;
+
+ skreq->sg = kzalloc(sizeof(struct scatterlist) *
+ skdev->sgs_per_request, GFP_KERNEL);
+ if (skreq->sg == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ sg_init_table(skreq->sg, skdev->sgs_per_request);
+
+ skreq->sksg_list = skd_cons_sg_list(skdev,
+ skdev->sgs_per_request,
+ &skreq->sksg_dma_address);
+
+ if (skreq->sksg_list == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skreq->next = &skreq[1];
+ }
+
+ /* Free list is in order starting with the 0th entry. */
+ skdev->skreq_table[i - 1].next = NULL;
+ skdev->skreq_free_list = skdev->skreq_table;
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_skspcl(struct skd_device *skdev)
+{
+ int rc = 0;
+ u32 i, nbytes;
+
+ pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
+ skdev->name, __func__, __LINE__,
+ sizeof(struct skd_special_context),
+ skdev->n_special,
+ sizeof(struct skd_special_context) * skdev->n_special);
+
+ skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
+ * skdev->n_special, GFP_KERNEL);
+ if (skdev->skspcl_table == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < skdev->n_special; i++) {
+ struct skd_special_context *skspcl;
+
+ skspcl = &skdev->skspcl_table[i];
+
+ skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+
+ skspcl->req.next = &skspcl[1].req;
+
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+
+ skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skspcl->mb_dma_address);
+ if (skspcl->msg_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skspcl->msg_buf, 0, nbytes);
+
+ skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
+ SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
+ if (skspcl->req.sg == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skspcl->req.sksg_list = skd_cons_sg_list(skdev,
+ SKD_N_SG_PER_SPECIAL,
+ &skspcl->req.
+ sksg_dma_address);
+ if (skspcl->req.sksg_list == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ /* Free list is in order starting with the 0th entry. */
+ skdev->skspcl_table[i - 1].req.next = NULL;
+ skdev->skspcl_free_list = skdev->skspcl_table;
+
+ return rc;
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_sksb(struct skd_device *skdev)
+{
+ int rc = 0;
+ struct skd_special_context *skspcl;
+ u32 nbytes;
+
+ skspcl = &skdev->internal_skspcl;
+
+ skspcl->req.id = 0 + SKD_ID_INTERNAL;
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+
+ nbytes = SKD_N_INTERNAL_BYTES;
+
+ skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skspcl->db_dma_address);
+ if (skspcl->data_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skspcl->data_buf, 0, nbytes);
+
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+ skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skspcl->mb_dma_address);
+ if (skspcl->msg_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skspcl->msg_buf, 0, nbytes);
+
+ skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
+ &skspcl->req.sksg_dma_address);
+ if (skspcl->req.sksg_list == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ if (!skd_format_internal_skspcl(skdev)) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_disk(struct skd_device *skdev)
+{
+ int rc = 0;
+ struct gendisk *disk;
+ struct request_queue *q;
+ unsigned long flags;
+
+ disk = alloc_disk(SKD_MINORS_PER_DEVICE);
+ if (!disk) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skdev->disk = disk;
+ sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
+
+ disk->major = skdev->major;
+ disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
+ disk->fops = &skd_blockdev_ops;
+ disk->private_data = skdev;
+
+ q = blk_init_queue(skd_request_fn, &skdev->lock);
+ if (!q) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skdev->queue = q;
+ disk->queue = q;
+ q->queuedata = skdev;
+
+ blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_max_segments(q, skdev->sgs_per_request);
+ blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
+
+ /* set sysfs ptimal_io_size to 8K */
+ blk_queue_io_opt(q, 8192);
+
+ /* DISCARD Flag initialization. */
+ q->limits.discard_granularity = 8192;
+ q->limits.discard_alignment = 0;
+ q->limits.max_discard_sectors = UINT_MAX >> 9;
+ q->limits.discard_zeroes_data = 1;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d stopping %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_stop_queue(skdev->queue);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+err_out:
+ return rc;
+}
+
+#define SKD_N_DEV_TABLE 16u
+static u32 skd_next_devno;
+
+static struct skd_device *skd_construct(struct pci_dev *pdev)
+{
+ struct skd_device *skdev;
+ int blk_major = skd_major;
+ int rc;
+
+ skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
+
+ if (!skdev) {
+ pr_err(PFX "(%s): memory alloc failure\n",
+ pci_name(pdev));
+ return NULL;
+ }
+
+ skdev->state = SKD_DRVR_STATE_LOAD;
+ skdev->pdev = pdev;
+ skdev->devno = skd_next_devno++;
+ skdev->major = blk_major;
+ skdev->irq_type = skd_isr_type;
+ sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
+ skdev->dev_max_queue_depth = 0;
+
+ skdev->num_req_context = skd_max_queue_depth;
+ skdev->num_fitmsg_context = skd_max_queue_depth;
+ skdev->n_special = skd_max_pass_thru;
+ skdev->cur_max_queue_depth = 1;
+ skdev->queue_low_water_mark = 1;
+ skdev->proto_ver = 99;
+ skdev->sgs_per_request = skd_sgs_per_request;
+ skdev->dbg_level = skd_dbg_level;
+
+ atomic_set(&skdev->device_count, 0);
+
+ spin_lock_init(&skdev->lock);
+
+ INIT_WORK(&skdev->completion_worker, skd_completion_worker);
+
+ pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skcomp(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skmsg(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skreq(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skspcl(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_sksb(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_disk(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
+ return skdev;
+
+err_out:
+ pr_debug("%s:%s:%d construct failed\n",
+ skdev->name, __func__, __LINE__);
+ skd_destruct(skdev);
+ return NULL;
+}
+
+/*
+ *****************************************************************************
+ * DESTRUCT (FREE)
+ *****************************************************************************
+ */
+
+static void skd_free_skcomp(struct skd_device *skdev)
+{
+ if (skdev->skcomp_table != NULL) {
+ u32 nbytes;
+
+ nbytes = sizeof(skdev->skcomp_table[0]) *
+ SKD_N_COMPLETION_ENTRY;
+ pci_free_consistent(skdev->pdev, nbytes,
+ skdev->skcomp_table, skdev->cq_dma_address);
+ }
+
+ skdev->skcomp_table = NULL;
+ skdev->cq_dma_address = 0;
+}
+
+static void skd_free_skmsg(struct skd_device *skdev)
+{
+ u32 i;
+
+ if (skdev->skmsg_table == NULL)
+ return;
+
+ for (i = 0; i < skdev->num_fitmsg_context; i++) {
+ struct skd_fitmsg_context *skmsg;
+
+ skmsg = &skdev->skmsg_table[i];
+
+ if (skmsg->msg_buf != NULL) {
+ skmsg->msg_buf += skmsg->offset;
+ skmsg->mb_dma_address += skmsg->offset;
+ pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
+ skmsg->msg_buf,
+ skmsg->mb_dma_address);
+ }
+ skmsg->msg_buf = NULL;
+ skmsg->mb_dma_address = 0;
+ }
+
+ kfree(skdev->skmsg_table);
+ skdev->skmsg_table = NULL;
+}
+
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ u32 n_sg, dma_addr_t dma_addr)
+{
+ if (sg_list != NULL) {
+ u32 nbytes;
+
+ nbytes = sizeof(*sg_list) * n_sg;
+
+ pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
+ }
+}
+
+static void skd_free_skreq(struct skd_device *skdev)
+{
+ u32 i;
+
+ if (skdev->skreq_table == NULL)
+ return;
+
+ for (i = 0; i < skdev->num_req_context; i++) {
+ struct skd_request_context *skreq;
+
+ skreq = &skdev->skreq_table[i];
+
+ skd_free_sg_list(skdev, skreq->sksg_list,
+ skdev->sgs_per_request,
+ skreq->sksg_dma_address);
+
+ skreq->sksg_list = NULL;
+ skreq->sksg_dma_address = 0;
+
+ kfree(skreq->sg);
+ }
+
+ kfree(skdev->skreq_table);
+ skdev->skreq_table = NULL;
+}
+
+static void skd_free_skspcl(struct skd_device *skdev)
+{
+ u32 i;
+ u32 nbytes;
+
+ if (skdev->skspcl_table == NULL)
+ return;
+
+ for (i = 0; i < skdev->n_special; i++) {
+ struct skd_special_context *skspcl;
+
+ skspcl = &skdev->skspcl_table[i];
+
+ if (skspcl->msg_buf != NULL) {
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+ pci_free_consistent(skdev->pdev, nbytes,
+ skspcl->msg_buf,
+ skspcl->mb_dma_address);
+ }
+
+ skspcl->msg_buf = NULL;
+ skspcl->mb_dma_address = 0;
+
+ skd_free_sg_list(skdev, skspcl->req.sksg_list,
+ SKD_N_SG_PER_SPECIAL,
+ skspcl->req.sksg_dma_address);
+
+ skspcl->req.sksg_list = NULL;
+ skspcl->req.sksg_dma_address = 0;
+
+ kfree(skspcl->req.sg);
+ }
+
+ kfree(skdev->skspcl_table);
+ skdev->skspcl_table = NULL;
+}
+
+static void skd_free_sksb(struct skd_device *skdev)
+{
+ struct skd_special_context *skspcl;
+ u32 nbytes;
+
+ skspcl = &skdev->internal_skspcl;
+
+ if (skspcl->data_buf != NULL) {
+ nbytes = SKD_N_INTERNAL_BYTES;
+
+ pci_free_consistent(skdev->pdev, nbytes,
+ skspcl->data_buf, skspcl->db_dma_address);
+ }
+
+ skspcl->data_buf = NULL;
+ skspcl->db_dma_address = 0;
+
+ if (skspcl->msg_buf != NULL) {
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+ pci_free_consistent(skdev->pdev, nbytes,
+ skspcl->msg_buf, skspcl->mb_dma_address);
+ }
+
+ skspcl->msg_buf = NULL;
+ skspcl->mb_dma_address = 0;
+
+ skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
+ skspcl->req.sksg_dma_address);
+
+ skspcl->req.sksg_list = NULL;
+ skspcl->req.sksg_dma_address = 0;
+}
+
+static void skd_free_disk(struct skd_device *skdev)
+{
+ struct gendisk *disk = skdev->disk;
+
+ if (disk != NULL) {
+ struct request_queue *q = disk->queue;
+
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+ if (q)
+ blk_cleanup_queue(q);
+ put_disk(disk);
+ }
+ skdev->disk = NULL;
+}
+
+static void skd_destruct(struct skd_device *skdev)
+{
+ if (skdev == NULL)
+ return;
+
+
+ pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ skd_free_disk(skdev);
+
+ pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ skd_free_sksb(skdev);
+
+ pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
+ skd_free_skspcl(skdev);
+
+ pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
+ skd_free_skreq(skdev);
+
+ pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ skd_free_skmsg(skdev);
+
+ pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ skd_free_skcomp(skdev);
+
+ pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
+ kfree(skdev);
+}
+
+/*
+ *****************************************************************************
+ * BLOCK DEVICE (BDEV) GLUE
+ *****************************************************************************
+ */
+
+static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct skd_device *skdev;
+ u64 capacity;
+
+ skdev = bdev->bd_disk->private_data;
+
+ pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
+ skdev->name, __func__, __LINE__,
+ bdev->bd_disk->disk_name, current->comm);
+
+ if (skdev->read_cap_is_valid) {
+ capacity = get_capacity(skdev->disk);
+ geo->heads = 64;
+ geo->sectors = 255;
+ geo->cylinders = (capacity) / (255 * 64);
+
+ return 0;
+ }
+ return -EIO;
+}
+
+static int skd_bdev_attach(struct skd_device *skdev)
+{
+ pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
+ add_disk(skdev->disk);
+ return 0;
+}
+
+static const struct block_device_operations skd_blockdev_ops = {
+ .owner = THIS_MODULE,
+ .ioctl = skd_bdev_ioctl,
+ .getgeo = skd_bdev_getgeo,
+};
+
+
+/*
+ *****************************************************************************
+ * PCIe DRIVER GLUE
+ *****************************************************************************
+ */
+
+static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
+ { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { 0 } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
+
+static char *skd_pci_info(struct skd_device *skdev, char *str)
+{
+ int pcie_reg;
+
+ strcpy(str, "PCIe (");
+ pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
+
+ if (pcie_reg) {
+
+ char lwstr[6];
+ uint16_t pcie_lstat, lspeed, lwidth;
+
+ pcie_reg += 0x12;
+ pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
+ lspeed = pcie_lstat & (0xF);
+ lwidth = (pcie_lstat & 0x3F0) >> 4;
+
+ if (lspeed == 1)
+ strcat(str, "2.5GT/s ");
+ else if (lspeed == 2)
+ strcat(str, "5.0GT/s ");
+ else
+ strcat(str, "<unknown> ");
+ snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
+ strcat(str, lwstr);
+ }
+ return str;
+}
+
+static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int i;
+ int rc = 0;
+ char pci_str[32];
+ struct skd_device *skdev;
+
+ pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
+ DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
+ pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
+ pci_name(pdev), pdev->vendor, pdev->device);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!rc) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ pr_err("(%s): consistent DMA mask error %d\n",
+ pci_name(pdev), rc);
+ }
+ } else {
+ (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
+ if (rc) {
+
+ pr_err("(%s): DMA mask error %d\n",
+ pci_name(pdev), rc);
+ goto err_out_regions;
+ }
+ }
+
+ if (!skd_major) {
+ rc = register_blkdev(0, DRV_NAME);
+ if (rc < 0)
+ goto err_out_regions;
+ BUG_ON(!rc);
+ skd_major = rc;
+ }
+
+ skdev = skd_construct(pdev);
+ if (skdev == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ skd_pci_info(skdev, pci_str);
+ pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
+
+ pci_set_master(pdev);
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc) {
+ pr_err(
+ "(%s): bad enable of PCIe error reporting rc=%d\n",
+ skd_name(skdev), rc);
+ skdev->pcie_error_reporting_is_enabled = 0;
+ } else
+ skdev->pcie_error_reporting_is_enabled = 1;
+
+
+ pci_set_drvdata(pdev, skdev);
+
+ skdev->disk->driverfs_dev = &pdev->dev;
+
+ for (i = 0; i < SKD_MAX_BARS; i++) {
+ skdev->mem_phys[i] = pci_resource_start(pdev, i);
+ skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
+ skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
+ skdev->mem_size[i]);
+ if (!skdev->mem_map[i]) {
+ pr_err("(%s): Unable to map adapter memory!\n",
+ skd_name(skdev));
+ rc = -ENODEV;
+ goto err_out_iounmap;
+ }
+ pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->mem_map[i],
+ (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ }
+
+ rc = skd_acquire_irq(skdev);
+ if (rc) {
+ pr_err("(%s): interrupt resource error %d\n",
+ skd_name(skdev), rc);
+ goto err_out_iounmap;
+ }
+
+ rc = skd_start_timer(skdev);
+ if (rc)
+ goto err_out_timer;
+
+ init_waitqueue_head(&skdev->waitq);
+
+ skd_start_device(skdev);
+
+ rc = wait_event_interruptible_timeout(skdev->waitq,
+ (skdev->gendisk_on),
+ (SKD_START_WAIT_SECONDS * HZ));
+ if (skdev->gendisk_on > 0) {
+ /* device came on-line after reset */
+ skd_bdev_attach(skdev);
+ rc = 0;
+ } else {
+ /* we timed out, something is wrong with the device,
+ don't add the disk structure */
+ pr_err(
+ "(%s): error: waiting for s1120 timed out %d!\n",
+ skd_name(skdev), rc);
+ /* in case of no error; we timeout with ENXIO */
+ if (!rc)
+ rc = -ENXIO;
+ goto err_out_timer;
+ }
+
+
+#ifdef SKD_VMK_POLL_HANDLER
+ if (skdev->irq_type == SKD_IRQ_MSIX) {
+ /* MSIX completion handler is being used for coredump */
+ vmklnx_scsi_register_poll_handler(skdev->scsi_host,
+ skdev->msix_entries[5].vector,
+ skd_comp_q, skdev);
+ } else {
+ vmklnx_scsi_register_poll_handler(skdev->scsi_host,
+ skdev->pdev->irq, skd_isr,
+ skdev);
+ }
+#endif /* SKD_VMK_POLL_HANDLER */
+
+ return rc;
+
+err_out_timer:
+ skd_stop_device(skdev);
+ skd_release_irq(skdev);
+
+err_out_iounmap:
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap(skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+ skd_destruct(skdev);
+
+err_out_regions:
+ pci_release_regions(pdev);
+
+err_out:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return rc;
+}
+
+static void skd_pci_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct skd_device *skdev;
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return;
+ }
+ skd_stop_device(skdev);
+ skd_release_irq(skdev);
+
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap((u32 *)skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+ skd_destruct(skdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ return;
+}
+
+static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct skd_device *skdev;
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return -EIO;
+ }
+
+ skd_stop_device(skdev);
+
+ skd_release_irq(skdev);
+
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap((u32 *)skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_release_regions(pdev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int skd_pci_resume(struct pci_dev *pdev)
+{
+ int i;
+ int rc = 0;
+ struct skd_device *skdev;
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return -1;
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!rc) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ pr_err("(%s): consistent DMA mask error %d\n",
+ pci_name(pdev), rc);
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+
+ pr_err("(%s): DMA mask error %d\n",
+ pci_name(pdev), rc);
+ goto err_out_regions;
+ }
+ }
+
+ pci_set_master(pdev);
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc) {
+ pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
+ skdev->name, rc);
+ skdev->pcie_error_reporting_is_enabled = 0;
+ } else
+ skdev->pcie_error_reporting_is_enabled = 1;
+
+ for (i = 0; i < SKD_MAX_BARS; i++) {
+
+ skdev->mem_phys[i] = pci_resource_start(pdev, i);
+ skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
+ skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
+ skdev->mem_size[i]);
+ if (!skdev->mem_map[i]) {
+ pr_err("(%s): Unable to map adapter memory!\n",
+ skd_name(skdev));
+ rc = -ENODEV;
+ goto err_out_iounmap;
+ }
+ pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->mem_map[i],
+ (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ }
+ rc = skd_acquire_irq(skdev);
+ if (rc) {
+
+ pr_err("(%s): interrupt resource error %d\n",
+ pci_name(pdev), rc);
+ goto err_out_iounmap;
+ }
+
+ rc = skd_start_timer(skdev);
+ if (rc)
+ goto err_out_timer;
+
+ init_waitqueue_head(&skdev->waitq);
+
+ skd_start_device(skdev);
+
+ return rc;
+
+err_out_timer:
+ skd_stop_device(skdev);
+ skd_release_irq(skdev);
+
+err_out_iounmap:
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap(skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+err_out_regions:
+ pci_release_regions(pdev);
+
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void skd_pci_shutdown(struct pci_dev *pdev)
+{
+ struct skd_device *skdev;
+
+ pr_err("skd_pci_shutdown called\n");
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return;
+ }
+
+ pr_err("%s: calling stop\n", skd_name(skdev));
+ skd_stop_device(skdev);
+}
+
+static struct pci_driver skd_driver = {
+ .name = DRV_NAME,
+ .id_table = skd_pci_tbl,
+ .probe = skd_pci_probe,
+ .remove = skd_pci_remove,
+ .suspend = skd_pci_suspend,
+ .resume = skd_pci_resume,
+ .shutdown = skd_pci_shutdown,
+};
+
+/*
+ *****************************************************************************
+ * LOGGING SUPPORT
+ *****************************************************************************
+ */
+
+static const char *skd_name(struct skd_device *skdev)
+{
+ memset(skdev->id_str, 0, sizeof(skdev->id_str));
+
+ if (skdev->inquiry_is_valid)
+ snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
+ skdev->name, skdev->inq_serial_num,
+ pci_name(skdev->pdev));
+ else
+ snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
+ skdev->name, pci_name(skdev->pdev));
+
+ return skdev->id_str;
+}
+
+const char *skd_drive_state_to_str(int state)
+{
+ switch (state) {
+ case FIT_SR_DRIVE_OFFLINE:
+ return "OFFLINE";
+ case FIT_SR_DRIVE_INIT:
+ return "INIT";
+ case FIT_SR_DRIVE_ONLINE:
+ return "ONLINE";
+ case FIT_SR_DRIVE_BUSY:
+ return "BUSY";
+ case FIT_SR_DRIVE_FAULT:
+ return "FAULT";
+ case FIT_SR_DRIVE_DEGRADED:
+ return "DEGRADED";
+ case FIT_SR_PCIE_LINK_DOWN:
+ return "INK_DOWN";
+ case FIT_SR_DRIVE_SOFT_RESET:
+ return "SOFT_RESET";
+ case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
+ return "NEED_FW";
+ case FIT_SR_DRIVE_INIT_FAULT:
+ return "INIT_FAULT";
+ case FIT_SR_DRIVE_BUSY_SANITIZE:
+ return "BUSY_SANITIZE";
+ case FIT_SR_DRIVE_BUSY_ERASE:
+ return "BUSY_ERASE";
+ case FIT_SR_DRIVE_FW_BOOTING:
+ return "FW_BOOTING";
+ default:
+ return "???";
+ }
+}
+
+const char *skd_skdev_state_to_str(enum skd_drvr_state state)
+{
+ switch (state) {
+ case SKD_DRVR_STATE_LOAD:
+ return "LOAD";
+ case SKD_DRVR_STATE_IDLE:
+ return "IDLE";
+ case SKD_DRVR_STATE_BUSY:
+ return "BUSY";
+ case SKD_DRVR_STATE_STARTING:
+ return "STARTING";
+ case SKD_DRVR_STATE_ONLINE:
+ return "ONLINE";
+ case SKD_DRVR_STATE_PAUSING:
+ return "PAUSING";
+ case SKD_DRVR_STATE_PAUSED:
+ return "PAUSED";
+ case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+ return "DRAINING_TIMEOUT";
+ case SKD_DRVR_STATE_RESTARTING:
+ return "RESTARTING";
+ case SKD_DRVR_STATE_RESUMING:
+ return "RESUMING";
+ case SKD_DRVR_STATE_STOPPING:
+ return "STOPPING";
+ case SKD_DRVR_STATE_SYNCING:
+ return "SYNCING";
+ case SKD_DRVR_STATE_FAULT:
+ return "FAULT";
+ case SKD_DRVR_STATE_DISAPPEARED:
+ return "DISAPPEARED";
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ return "BUSY_ERASE";
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ return "BUSY_SANITIZE";
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ return "BUSY_IMMINENT";
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ return "WAIT_BOOT";
+
+ default:
+ return "???";
+ }
+}
+
+const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+{
+ switch (state) {
+ case SKD_MSG_STATE_IDLE:
+ return "IDLE";
+ case SKD_MSG_STATE_BUSY:
+ return "BUSY";
+ default:
+ return "???";
+ }
+}
+
+const char *skd_skreq_state_to_str(enum skd_req_state state)
+{
+ switch (state) {
+ case SKD_REQ_STATE_IDLE:
+ return "IDLE";
+ case SKD_REQ_STATE_SETUP:
+ return "SETUP";
+ case SKD_REQ_STATE_BUSY:
+ return "BUSY";
+ case SKD_REQ_STATE_COMPLETED:
+ return "COMPLETED";
+ case SKD_REQ_STATE_TIMEOUT:
+ return "TIMEOUT";
+ case SKD_REQ_STATE_ABORTED:
+ return "ABORTED";
+ default:
+ return "???";
+ }
+}
+
+static void skd_log_skdev(struct skd_device *skdev, const char *event)
+{
+ pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
+ skdev->name, __func__, __LINE__, skdev->name, skdev, event);
+ pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
+ skdev->name, __func__, __LINE__,
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+ pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->in_flight, skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
+}
+
+static void skd_log_skmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg, const char *event)
+{
+ pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
+ skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
+ pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
+ skdev->name, __func__, __LINE__,
+ skd_skmsg_state_to_str(skmsg->state), skmsg->state,
+ skmsg->id, skmsg->length);
+}
+
+static void skd_log_skreq(struct skd_device *skdev,
+ struct skd_request_context *skreq, const char *event)
+{
+ pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
+ skdev->name, __func__, __LINE__, skdev->name, skreq, event);
+ pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
+ skdev->name, __func__, __LINE__,
+ skd_skreq_state_to_str(skreq->state), skreq->state,
+ skreq->id, skreq->fitmsg_id);
+ pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
+ skdev->name, __func__, __LINE__,
+ skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
+
+ if (skreq->req != NULL) {
+ struct request *req = skreq->req;
+ u32 lba = (u32)blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ pr_debug("%s:%s:%d "
+ "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
+ skdev->name, __func__, __LINE__,
+ req, lba, lba, count, count,
+ (int)rq_data_dir(req));
+ } else
+ pr_debug("%s:%s:%d req=NULL\n",
+ skdev->name, __func__, __LINE__);
+}
+
+/*
+ *****************************************************************************
+ * MODULE GLUE
+ *****************************************************************************
+ */
+
+static int __init skd_init(void)
+{
+ pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
+
+ switch (skd_isr_type) {
+ case SKD_IRQ_LEGACY:
+ case SKD_IRQ_MSI:
+ case SKD_IRQ_MSIX:
+ break;
+ default:
+ pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
+ skd_isr_type, SKD_IRQ_DEFAULT);
+ skd_isr_type = SKD_IRQ_DEFAULT;
+ }
+
+ if (skd_max_queue_depth < 1 ||
+ skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
+ pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
+ skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
+ skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
+ }
+
+ if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
+ pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
+ skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
+ skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
+ }
+
+ if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
+ pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
+ skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
+ skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
+ }
+
+ if (skd_dbg_level < 0 || skd_dbg_level > 2) {
+ pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
+ skd_dbg_level, 0);
+ skd_dbg_level = 0;
+ }
+
+ if (skd_isr_comp_limit < 0) {
+ pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
+ skd_isr_comp_limit, 0);
+ skd_isr_comp_limit = 0;
+ }
+
+ if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
+ pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
+ skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
+ skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+ }
+
+ return pci_register_driver(&skd_driver);
+}
+
+static void __exit skd_exit(void)
+{
+ pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
+
+ pci_unregister_driver(&skd_driver);
+
+ if (skd_major)
+ unregister_blkdev(skd_major, DRV_NAME);
+}
+
+module_init(skd_init);
+module_exit(skd_exit);
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
new file mode 100644
index 00000000000..61c757ff016
--- /dev/null
+++ b/drivers/block/skd_s1120.h
@@ -0,0 +1,330 @@
+/* Copyright 2012 STEC, Inc.
+ *
+ * This file is licensed under the terms of the 3-clause
+ * BSD License (http://opensource.org/licenses/BSD-3-Clause)
+ * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
+ * at your option. Both licenses are also available in the LICENSE file
+ * distributed with this project. This file may not be copied, modified,
+ * or distributed except in accordance with those terms.
+ */
+
+
+#ifndef SKD_S1120_H
+#define SKD_S1120_H
+
+#pragma pack(push, s1120_h, 1)
+
+/*
+ * Q-channel, 64-bit r/w
+ */
+#define FIT_Q_COMMAND 0x400u
+#define FIT_QCMD_QID_MASK (0x3 << 1)
+#define FIT_QCMD_QID0 (0x0 << 1)
+#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
+#define FIT_QCMD_QID1 (0x1 << 1)
+#define FIT_QCMD_QID2 (0x2 << 1)
+#define FIT_QCMD_QID3 (0x3 << 1)
+#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
+#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
+#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
+#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
+#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
+#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
+#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
+
+/*
+ * Control, 32-bit r/w
+ */
+#define FIT_CONTROL 0x500u
+#define FIT_CR_HARD_RESET (1u << 0u)
+#define FIT_CR_SOFT_RESET (1u << 1u)
+#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
+#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
+
+/*
+ * Status, 32-bit, r/o
+ */
+#define FIT_STATUS 0x510u
+#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
+#define FIT_SR_SIGNATURE (0xFF << 8)
+#define FIT_SR_PIO_DMA (1 << 16)
+#define FIT_SR_DRIVE_OFFLINE 0x00
+#define FIT_SR_DRIVE_INIT 0x01
+/* #define FIT_SR_DRIVE_READY 0x02 */
+#define FIT_SR_DRIVE_ONLINE 0x03
+#define FIT_SR_DRIVE_BUSY 0x04
+#define FIT_SR_DRIVE_FAULT 0x05
+#define FIT_SR_DRIVE_DEGRADED 0x06
+#define FIT_SR_PCIE_LINK_DOWN 0x07
+#define FIT_SR_DRIVE_SOFT_RESET 0x08
+#define FIT_SR_DRIVE_INIT_FAULT 0x09
+#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A
+#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
+#define FIT_SR_DRIVE_FW_BOOTING 0x0C
+#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
+#define FIT_SR_DEVICE_MISSING 0xFF
+#define FIT_SR__RESERVED 0xFFFFFF00u
+
+/*
+ * FIT_STATUS - Status register data definition
+ */
+#define FIT_SR_STATE_MASK (0xFF << 0)
+#define FIT_SR_SIGNATURE (0xFF << 8)
+#define FIT_SR_PIO_DMA (1 << 16)
+
+/*
+ * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
+ */
+#define FIT_INT_STATUS_HOST 0x520u
+#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
+#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
+#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
+#define FIT_ISH_UNDEFINED_3 (1u << 3u)
+#define FIT_ISH_UNDEFINED_4 (1u << 4u)
+#define FIT_ISH_Q0_FULL (1u << 5u)
+#define FIT_ISH_Q1_FULL (1u << 6u)
+#define FIT_ISH_Q2_FULL (1u << 7u)
+#define FIT_ISH_Q3_FULL (1u << 8u)
+#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
+#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
+
+#define FIT_INT_DEF_MASK \
+ (FIT_ISH_FW_STATE_CHANGE | \
+ FIT_ISH_COMPLETION_POSTED | \
+ FIT_ISH_MSG_FROM_DEV | \
+ FIT_ISH_Q0_FULL | \
+ FIT_ISH_Q1_FULL | \
+ FIT_ISH_Q2_FULL | \
+ FIT_ISH_Q3_FULL | \
+ FIT_ISH_QCMD_FIFO_OVERRUN | \
+ FIT_ISH_BAD_EXP_ROM_READ)
+
+#define FIT_INT_QUEUE_FULL \
+ (FIT_ISH_Q0_FULL | \
+ FIT_ISH_Q1_FULL | \
+ FIT_ISH_Q2_FULL | \
+ FIT_ISH_Q3_FULL)
+
+#define MSI_MSG_NWL_ERROR_0 0x00000000
+#define MSI_MSG_NWL_ERROR_1 0x00000001
+#define MSI_MSG_NWL_ERROR_2 0x00000002
+#define MSI_MSG_NWL_ERROR_3 0x00000003
+#define MSI_MSG_STATE_CHANGE 0x00000004
+#define MSI_MSG_COMPLETION_POSTED 0x00000005
+#define MSI_MSG_MSG_FROM_DEV 0x00000006
+#define MSI_MSG_RESERVED_0 0x00000007
+#define MSI_MSG_RESERVED_1 0x00000008
+#define MSI_MSG_QUEUE_0_FULL 0x00000009
+#define MSI_MSG_QUEUE_1_FULL 0x0000000A
+#define MSI_MSG_QUEUE_2_FULL 0x0000000B
+#define MSI_MSG_QUEUE_3_FULL 0x0000000C
+
+#define FIT_INT_RESERVED_MASK \
+ (FIT_ISH_UNDEFINED_3 | \
+ FIT_ISH_UNDEFINED_4)
+
+/*
+ * Interrupt mask, 32-bit r/w
+ * Bit definitions are the same as FIT_INT_STATUS_HOST
+ */
+#define FIT_INT_MASK_HOST 0x528u
+
+/*
+ * Message to device, 32-bit r/w
+ */
+#define FIT_MSG_TO_DEVICE 0x540u
+
+/*
+ * Message from device, 32-bit, r/o
+ */
+#define FIT_MSG_FROM_DEVICE 0x548u
+
+/*
+ * 32-bit messages to/from device, composition/extraction macros
+ */
+#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
+ ((((TYPE) & 0xFFu) << 24u) | \
+ (((PARAM) & 0xFFu) << 16u) | \
+ (((DATA) & 0xFFFFu) << 0u))
+#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
+#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
+#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
+
+/*
+ * Types of messages to/from device
+ */
+#define FIT_MTD_FITFW_INIT 0x01u
+#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
+#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
+#define FIT_MTD_SET_COMPQ_ADDR 0x04u
+#define FIT_MTD_ARM_QUEUE 0x05u
+#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
+#define FIT_MFD_SMART_EXCEEDED 0x10u
+#define FIT_MFD_POWER_DOWN 0x11u
+#define FIT_MFD_OFFLINE 0x12u
+#define FIT_MFD_ONLINE 0x13u
+#define FIT_MFD_FW_RESTARTING 0x14u
+#define FIT_MFD_PM_ACTIVE 0x15u
+#define FIT_MFD_PM_STANDBY 0x16u
+#define FIT_MFD_PM_SLEEP 0x17u
+#define FIT_MFD_CMD_PROGRESS 0x18u
+
+#define FIT_MTD_DEBUG 0xFEu
+#define FIT_MFD_DEBUG 0xFFu
+
+#define FIT_MFD_MASK (0xFFu)
+#define FIT_MFD_DATA_MASK (0xFFu)
+#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
+#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
+
+/*
+ * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
+ * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
+ * (was Response buffer in docs)
+ */
+#define FIT_MSG_TO_DEVICE_ARG 0x580u
+
+/*
+ * Hardware (ASIC) version, 32-bit r/o
+ */
+#define FIT_HW_VERSION 0x588u
+
+/*
+ * Scatter/gather list descriptor.
+ * 32-bytes and must be aligned on a 32-byte boundary.
+ * All fields are in little endian order.
+ */
+struct fit_sg_descriptor {
+ uint32_t control;
+ uint32_t byte_count;
+ uint64_t host_side_addr;
+ uint64_t dev_side_addr;
+ uint64_t next_desc_ptr;
+};
+
+#define FIT_SGD_CONTROL_NOT_LAST 0x000u
+#define FIT_SGD_CONTROL_LAST 0x40Eu
+
+/*
+ * Header at the beginning of a FIT message. The header
+ * is followed by SSDI requests each 64 bytes.
+ * A FIT message can be up to 512 bytes long and must start
+ * on a 64-byte boundary.
+ */
+struct fit_msg_hdr {
+ uint8_t protocol_id;
+ uint8_t num_protocol_cmds_coalesced;
+ uint8_t _reserved[62];
+};
+
+#define FIT_PROTOCOL_ID_FIT 1
+#define FIT_PROTOCOL_ID_SSDI 2
+#define FIT_PROTOCOL_ID_SOFIT 3
+
+
+#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
+#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
+
+/*
+ * Format of a completion entry. The completion queue is circular
+ * and must have at least as many entries as the maximum number
+ * of commands that may be issued to the device.
+ *
+ * There are no head/tail pointers. The cycle value is used to
+ * infer the presence of new completion records.
+ * Initially the cycle in all entries is 0, the index is 0, and
+ * the cycle value to expect is 1. When completions are added
+ * their cycle values are set to 1. When the index wraps the
+ * cycle value to expect is incremented.
+ *
+ * Command_context is opaque and taken verbatim from the SSDI command.
+ * All other fields are big endian.
+ */
+#define FIT_PROTOCOL_VERSION_0 0
+
+/*
+ * Protocol major version 1 completion entry.
+ * The major protocol version is found in bits
+ * 20-23 of the FIT_MTD_FITFW_INIT response.
+ */
+struct fit_completion_entry_v1 {
+ uint32_t num_returned_bytes;
+ uint16_t tag;
+ uint8_t status; /* SCSI status */
+ uint8_t cycle;
+};
+#define FIT_PROTOCOL_VERSION_1 1
+#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
+
+struct fit_comp_error_info {
+ uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
+ uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
+ uint8_t reserved0; /* 01: Obsolete field */
+ uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */
+ uint8_t reserved2:1; /* 02: Reserved bit. */
+ uint8_t bad_length:1; /* 02: Incorrect Length Indicator */
+ uint8_t end_medium:1; /* 02: End of Medium */
+ uint8_t file_mark:1; /* 02: Filemark */
+ uint8_t info[4]; /* 03: */
+ uint8_t reserved1; /* 07: Additional Sense Length */
+ uint8_t cmd_spec[4]; /* 08: Command Specific Information */
+ uint8_t code; /* 0C: Additional Sense Code */
+ uint8_t qual; /* 0D: Additional Sense Code Qualifier */
+ uint8_t fruc; /* 0E: Field Replaceable Unit Code */
+ uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */
+ uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */
+ uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
+ uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
+ uint16_t uec; /* 14: Additional Sense Bytes */
+ uint64_t per; /* 16: Additional Sense Bytes */
+ uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
+};
+
+
+/* Task management constants */
+#define SOFT_TASK_SIMPLE 0x00
+#define SOFT_TASK_HEAD_OF_QUEUE 0x01
+#define SOFT_TASK_ORDERED 0x02
+
+/* Version zero has the last 32 bits reserved,
+ * Version one has the last 32 bits sg_list_len_bytes;
+ */
+struct skd_command_header {
+ uint64_t sg_list_dma_address;
+ uint16_t tag;
+ uint8_t attribute;
+ uint8_t add_cdb_len; /* In 32 bit words */
+ uint32_t sg_list_len_bytes;
+};
+
+struct skd_scsi_request {
+ struct skd_command_header hdr;
+ unsigned char cdb[16];
+/* unsigned char _reserved[16]; */
+};
+
+struct driver_inquiry_data {
+ uint8_t peripheral_device_type:5;
+ uint8_t qualifier:3;
+ uint8_t page_code;
+ uint16_t page_length;
+ uint16_t pcie_bus_number;
+ uint8_t pcie_device_number;
+ uint8_t pcie_function_number;
+ uint8_t pcie_link_speed;
+ uint8_t pcie_link_lanes;
+ uint16_t pcie_vendor_id;
+ uint16_t pcie_device_id;
+ uint16_t pcie_subsystem_vendor_id;
+ uint16_t pcie_subsystem_device_id;
+ uint8_t reserved1[2];
+ uint8_t reserved2[3];
+ uint8_t driver_version_length;
+ uint8_t driver_version[0x14];
+};
+
+#pragma pack(pop, s1120_h)
+
+#endif /* SKD_S1120_H */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5cdf88b7ad9..6a680d4de7f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -11,12 +11,11 @@
#include <linux/string_helpers.h>
#include <scsi/scsi_cmnd.h>
#include <linux/idr.h>
+#include <linux/blk-mq.h>
+#include <linux/numa.h>
#define PART_BITS 4
-static bool use_bio;
-module_param(use_bio, bool, S_IRUGO);
-
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -26,13 +25,11 @@ struct virtio_blk
{
struct virtio_device *vdev;
struct virtqueue *vq;
- wait_queue_head_t queue_wait;
+ spinlock_t vq_lock;
/* The disk structure for the kernel. */
struct gendisk *disk;
- mempool_t *pool;
-
/* Process context for config space updates */
struct work_struct config_work;
@@ -47,31 +44,17 @@ struct virtio_blk
/* Ida index - used to track minor number allocations. */
int index;
-
- /* Scatterlist: can be too big for stack. */
- struct scatterlist sg[/*sg_elems*/];
};
struct virtblk_req
{
struct request *req;
- struct bio *bio;
struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr;
- struct work_struct work;
- struct virtio_blk *vblk;
- int flags;
u8 status;
struct scatterlist sg[];
};
-enum {
- VBLK_IS_FLUSH = 1,
- VBLK_REQ_FLUSH = 2,
- VBLK_REQ_DATA = 4,
- VBLK_REQ_FUA = 8,
-};
-
static inline int virtblk_result(struct virtblk_req *vbr)
{
switch (vbr->status) {
@@ -84,22 +67,6 @@ static inline int virtblk_result(struct virtblk_req *vbr)
}
}
-static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
- gfp_t gfp_mask)
-{
- struct virtblk_req *vbr;
-
- vbr = mempool_alloc(vblk->pool, gfp_mask);
- if (!vbr)
- return NULL;
-
- vbr->vblk = vblk;
- if (use_bio)
- sg_init_table(vbr->sg, vblk->sg_elems);
-
- return vbr;
-}
-
static int __virtblk_add_req(struct virtqueue *vq,
struct virtblk_req *vbr,
struct scatterlist *data_sg,
@@ -143,83 +110,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
-{
- struct virtio_blk *vblk = vbr->vblk;
- DEFINE_WAIT(wait);
- int ret;
-
- spin_lock_irq(vblk->disk->queue->queue_lock);
- while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
- have_data)) < 0)) {
- prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
- TASK_UNINTERRUPTIBLE);
-
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- io_schedule();
- spin_lock_irq(vblk->disk->queue->queue_lock);
-
- finish_wait(&vblk->queue_wait, &wait);
- }
-
- virtqueue_kick(vblk->vq);
- spin_unlock_irq(vblk->disk->queue->queue_lock);
-}
-
-static void virtblk_bio_send_flush(struct virtblk_req *vbr)
-{
- vbr->flags |= VBLK_IS_FLUSH;
- vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = 0;
-
- virtblk_add_req(vbr, false);
-}
-
-static void virtblk_bio_send_data(struct virtblk_req *vbr)
-{
- struct virtio_blk *vblk = vbr->vblk;
- struct bio *bio = vbr->bio;
- bool have_data;
-
- vbr->flags &= ~VBLK_IS_FLUSH;
- vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = bio->bi_sector;
- vbr->out_hdr.ioprio = bio_prio(bio);
-
- if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
- have_data = true;
- if (bio->bi_rw & REQ_WRITE)
- vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- else
- vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- } else
- have_data = false;
-
- virtblk_add_req(vbr, have_data);
-}
-
-static void virtblk_bio_send_data_work(struct work_struct *work)
-{
- struct virtblk_req *vbr;
-
- vbr = container_of(work, struct virtblk_req, work);
-
- virtblk_bio_send_data(vbr);
-}
-
-static void virtblk_bio_send_flush_work(struct work_struct *work)
-{
- struct virtblk_req *vbr;
-
- vbr = container_of(work, struct virtblk_req, work);
-
- virtblk_bio_send_flush(vbr);
-}
-
static inline void virtblk_request_done(struct virtblk_req *vbr)
{
- struct virtio_blk *vblk = vbr->vblk;
struct request *req = vbr->req;
int error = virtblk_result(vbr);
@@ -231,90 +123,45 @@ static inline void virtblk_request_done(struct virtblk_req *vbr)
req->errors = (error != 0);
}
- __blk_end_request_all(req, error);
- mempool_free(vbr, vblk->pool);
-}
-
-static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
-{
- struct virtio_blk *vblk = vbr->vblk;
-
- if (vbr->flags & VBLK_REQ_DATA) {
- /* Send out the actual write data */
- INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
- queue_work(virtblk_wq, &vbr->work);
- } else {
- bio_endio(vbr->bio, virtblk_result(vbr));
- mempool_free(vbr, vblk->pool);
- }
-}
-
-static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
-{
- struct virtio_blk *vblk = vbr->vblk;
-
- if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
- /* Send out a flush before end the bio */
- vbr->flags &= ~VBLK_REQ_DATA;
- INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
- queue_work(virtblk_wq, &vbr->work);
- } else {
- bio_endio(vbr->bio, virtblk_result(vbr));
- mempool_free(vbr, vblk->pool);
- }
-}
-
-static inline void virtblk_bio_done(struct virtblk_req *vbr)
-{
- if (unlikely(vbr->flags & VBLK_IS_FLUSH))
- virtblk_bio_flush_done(vbr);
- else
- virtblk_bio_data_done(vbr);
+ blk_mq_end_io(req, error);
}
static void virtblk_done(struct virtqueue *vq)
{
struct virtio_blk *vblk = vq->vdev->priv;
- bool bio_done = false, req_done = false;
+ bool req_done = false;
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
- spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
+ spin_lock_irqsave(&vblk->vq_lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- if (vbr->bio) {
- virtblk_bio_done(vbr);
- bio_done = true;
- } else {
- virtblk_request_done(vbr);
- req_done = true;
- }
+ virtblk_request_done(vbr);
+ req_done = true;
}
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
} while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
+
/* In case queue is stopped waiting for more buffers. */
if (req_done)
- blk_start_queue(vblk->disk->queue);
- spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
-
- if (bio_done)
- wake_up(&vblk->queue_wait);
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue);
}
-static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
- struct request *req)
+static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtblk_req *vbr = req->special;
+ unsigned long flags;
unsigned int num;
- struct virtblk_req *vbr;
+ const bool last = (req->cmd_flags & REQ_END) != 0;
- vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
- if (!vbr)
- /* When another request finishes we'll try again. */
- return false;
+ BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
vbr->req = req;
- vbr->bio = NULL;
if (req->cmd_flags & REQ_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
@@ -342,7 +189,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- num = blk_rq_map_sg(q, vbr->req, vblk->sg);
+ num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
if (num) {
if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
@@ -350,63 +197,19 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
}
- if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) {
- mempool_free(vbr, vblk->pool);
- return false;
- }
-
- return true;
-}
-
-static void virtblk_request(struct request_queue *q)
-{
- struct virtio_blk *vblk = q->queuedata;
- struct request *req;
- unsigned int issued = 0;
-
- while ((req = blk_peek_request(q)) != NULL) {
- BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
- /* If this request fails, stop queue and wait for something to
- finish to restart it. */
- if (!do_req(q, vblk, req)) {
- blk_stop_queue(q);
- break;
- }
- blk_start_request(req);
- issued++;
+ spin_lock_irqsave(&vblk->vq_lock, flags);
+ if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
+ virtqueue_kick(vblk->vq);
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
+ blk_mq_stop_hw_queue(hctx);
+ return BLK_MQ_RQ_QUEUE_BUSY;
}
- if (issued)
+ if (last)
virtqueue_kick(vblk->vq);
-}
-static void virtblk_make_request(struct request_queue *q, struct bio *bio)
-{
- struct virtio_blk *vblk = q->queuedata;
- struct virtblk_req *vbr;
-
- BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
-
- vbr = virtblk_alloc_req(vblk, GFP_NOIO);
- if (!vbr) {
- bio_endio(bio, -ENOMEM);
- return;
- }
-
- vbr->bio = bio;
- vbr->flags = 0;
- if (bio->bi_rw & REQ_FLUSH)
- vbr->flags |= VBLK_REQ_FLUSH;
- if (bio->bi_rw & REQ_FUA)
- vbr->flags |= VBLK_REQ_FUA;
- if (bio->bi_size)
- vbr->flags |= VBLK_REQ_DATA;
-
- if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
- virtblk_bio_send_flush(vbr);
- else
- virtblk_bio_send_data(vbr);
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
+ return BLK_MQ_RQ_QUEUE_OK;
}
/* return id (s/n) string for *disk to *id_str
@@ -456,18 +259,15 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
- struct virtio_blk_geometry vgeo;
- int err;
/* see if the host passed in geometry config */
- err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
- offsetof(struct virtio_blk_config, geometry),
- &vgeo);
-
- if (!err) {
- geo->heads = vgeo.heads;
- geo->sectors = vgeo.sectors;
- geo->cylinders = vgeo.cylinders;
+ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ geometry.cylinders, &geo->cylinders);
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ geometry.heads, &geo->heads);
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ geometry.sectors, &geo->sectors);
} else {
/* some standard values, similar to sd */
geo->heads = 1 << 6;
@@ -529,8 +329,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
goto done;
/* Host must always specify the capacity. */
- vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
- &capacity, sizeof(capacity));
+ virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
/* If capacity is too big, truncate with warning. */
if ((sector_t)capacity != capacity) {
@@ -608,9 +407,9 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
u8 writeback;
int err;
- err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
- offsetof(struct virtio_blk_config, wce),
- &writeback);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
+ struct virtio_blk_config, wce,
+ &writeback);
if (err)
writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
@@ -642,7 +441,6 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
int i;
- u8 writeback;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
@@ -652,11 +450,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
if (i < 0)
return -EINVAL;
- writeback = i;
- vdev->config->set(vdev,
- offsetof(struct virtio_blk_config, wce),
- &writeback, sizeof(writeback));
-
+ virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
return count;
}
@@ -680,12 +474,35 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
+static struct blk_mq_ops virtio_mq_ops = {
+ .queue_rq = virtio_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .alloc_hctx = blk_mq_alloc_single_hw_queue,
+ .free_hctx = blk_mq_free_single_hw_queue,
+};
+
+static struct blk_mq_reg virtio_mq_reg = {
+ .ops = &virtio_mq_ops,
+ .nr_hw_queues = 1,
+ .queue_depth = 64,
+ .numa_node = NUMA_NO_NODE,
+ .flags = BLK_MQ_F_SHOULD_MERGE,
+};
+
+static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
+ struct request *rq, unsigned int nr)
+{
+ struct virtio_blk *vblk = data;
+ struct virtblk_req *vbr = rq->special;
+
+ sg_init_table(vbr->sg, vblk->sg_elems);
+}
+
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
int err, index;
- int pool_size;
u64 cap;
u32 v, blk_size, sg_elems, opt_io_size;
@@ -699,9 +516,9 @@ static int virtblk_probe(struct virtio_device *vdev)
index = err;
/* We need to know how many segments before we allocate. */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
- offsetof(struct virtio_blk_config, seg_max),
- &sg_elems);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
+ struct virtio_blk_config, seg_max,
+ &sg_elems);
/* We need at least one SG element, whatever they say. */
if (err || !sg_elems)
@@ -709,17 +526,14 @@ static int virtblk_probe(struct virtio_device *vdev)
/* We need an extra sg elements at head and tail. */
sg_elems += 2;
- vdev->priv = vblk = kmalloc(sizeof(*vblk) +
- sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
+ vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
}
- init_waitqueue_head(&vblk->queue_wait);
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
- sg_init_table(vblk->sg, vblk->sg_elems);
mutex_init(&vblk->config_lock);
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
@@ -728,31 +542,27 @@ static int virtblk_probe(struct virtio_device *vdev)
err = init_vq(vblk);
if (err)
goto out_free_vblk;
-
- pool_size = sizeof(struct virtblk_req);
- if (use_bio)
- pool_size += sizeof(struct scatterlist) * sg_elems;
- vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
- if (!vblk->pool) {
- err = -ENOMEM;
- goto out_free_vq;
- }
+ spin_lock_init(&vblk->vq_lock);
/* FIXME: How many partitions? How long is a piece of string? */
vblk->disk = alloc_disk(1 << PART_BITS);
if (!vblk->disk) {
err = -ENOMEM;
- goto out_mempool;
+ goto out_free_vq;
}
- q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
+ virtio_mq_reg.cmd_size =
+ sizeof(struct virtblk_req) +
+ sizeof(struct scatterlist) * sg_elems;
+
+ q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) {
err = -ENOMEM;
goto out_put_disk;
}
- if (use_bio)
- blk_queue_make_request(q, virtblk_make_request);
+ blk_mq_init_commands(q, virtblk_init_vbr, vblk);
+
q->queuedata = vblk;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -772,8 +582,7 @@ static int virtblk_probe(struct virtio_device *vdev)
set_disk_ro(vblk->disk, 1);
/* Host must always specify the capacity. */
- vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
- &cap, sizeof(cap));
+ virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
/* If capacity is too big, truncate with warning. */
if ((sector_t)cap != cap) {
@@ -794,46 +603,45 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Host can optionally specify maximum segment size and number of
* segments. */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
- offsetof(struct virtio_blk_config, size_max),
- &v);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
+ struct virtio_blk_config, size_max, &v);
if (!err)
blk_queue_max_segment_size(q, v);
else
blk_queue_max_segment_size(q, -1U);
/* Host can optionally specify the block size of the device */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
- offsetof(struct virtio_blk_config, blk_size),
- &blk_size);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
+ struct virtio_blk_config, blk_size,
+ &blk_size);
if (!err)
blk_queue_logical_block_size(q, blk_size);
else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, physical_block_exp),
- &physical_block_exp);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, physical_block_exp,
+ &physical_block_exp);
if (!err && physical_block_exp)
blk_queue_physical_block_size(q,
blk_size * (1 << physical_block_exp));
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, alignment_offset),
- &alignment_offset);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, alignment_offset,
+ &alignment_offset);
if (!err && alignment_offset)
blk_queue_alignment_offset(q, blk_size * alignment_offset);
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, min_io_size),
- &min_io_size);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, min_io_size,
+ &min_io_size);
if (!err && min_io_size)
blk_queue_io_min(q, blk_size * min_io_size);
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, opt_io_size),
- &opt_io_size);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, opt_io_size,
+ &opt_io_size);
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
@@ -857,8 +665,6 @@ out_del_disk:
blk_cleanup_queue(vblk->disk->queue);
out_put_disk:
put_disk(vblk->disk);
-out_mempool:
- mempool_destroy(vblk->pool);
out_free_vq:
vdev->config->del_vqs(vdev);
out_free_vblk:
@@ -890,7 +696,6 @@ static void virtblk_remove(struct virtio_device *vdev)
refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
put_disk(vblk->disk);
- mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev);
kfree(vblk);
@@ -899,7 +704,7 @@ static void virtblk_remove(struct virtio_device *vdev)
ida_simple_remove(&vd_index_ida, index);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
@@ -914,10 +719,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
flush_work(&vblk->config_work);
- spin_lock_irq(vblk->disk->queue->queue_lock);
- blk_stop_queue(vblk->disk->queue);
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- blk_sync_queue(vblk->disk->queue);
+ blk_mq_stop_hw_queues(vblk->disk->queue);
vdev->config->del_vqs(vdev);
return 0;
@@ -930,11 +732,9 @@ static int virtblk_restore(struct virtio_device *vdev)
vblk->config_enable = true;
ret = init_vq(vdev->priv);
- if (!ret) {
- spin_lock_irq(vblk->disk->queue->queue_lock);
- blk_start_queue(vblk->disk->queue);
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- }
+ if (!ret)
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+
return ret;
}
#endif
@@ -959,7 +759,7 @@ static struct virtio_driver virtio_blk = {
.probe = virtblk_probe,
.remove = virtblk_remove,
.config_changed = virtblk_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index bf4b9d282c0..6620b73d049 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -887,6 +887,8 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
unsigned long secure;
struct phys_req preq;
+ xen_blkif_get(blkif);
+
preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors;
@@ -899,7 +901,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
}
blkif->st_ds_req++;
- xen_blkif_get(blkif);
secure = (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
BLKDEV_DISCARD_SECURE : 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a4660bbee8a..432db1b59b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -121,7 +121,8 @@ struct blkfront_info
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
- struct list_head persistent_gnts;
+ struct list_head grants;
+ struct list_head indirect_pages;
unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
@@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
if (!gnt_list_entry)
goto out_of_memory;
- granted_page = alloc_page(GFP_NOIO);
- if (!granted_page) {
- kfree(gnt_list_entry);
- goto out_of_memory;
+ if (info->feature_persistent) {
+ granted_page = alloc_page(GFP_NOIO);
+ if (!granted_page) {
+ kfree(gnt_list_entry);
+ goto out_of_memory;
+ }
+ gnt_list_entry->pfn = page_to_pfn(granted_page);
}
- gnt_list_entry->pfn = page_to_pfn(granted_page);
gnt_list_entry->gref = GRANT_INVALID_REF;
- list_add(&gnt_list_entry->node, &info->persistent_gnts);
+ list_add(&gnt_list_entry->node, &info->grants);
i++;
}
@@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
- &info->persistent_gnts, node) {
+ &info->grants, node) {
list_del(&gnt_list_entry->node);
- __free_page(pfn_to_page(gnt_list_entry->pfn));
+ if (info->feature_persistent)
+ __free_page(pfn_to_page(gnt_list_entry->pfn));
kfree(gnt_list_entry);
i--;
}
@@ -227,13 +231,14 @@ out_of_memory:
}
static struct grant *get_grant(grant_ref_t *gref_head,
+ unsigned long pfn,
struct blkfront_info *info)
{
struct grant *gnt_list_entry;
unsigned long buffer_mfn;
- BUG_ON(list_empty(&info->persistent_gnts));
- gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
+ BUG_ON(list_empty(&info->grants));
+ gnt_list_entry = list_first_entry(&info->grants, struct grant,
node);
list_del(&gnt_list_entry->node);
@@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
+ if (!info->feature_persistent) {
+ BUG_ON(!pfn);
+ gnt_list_entry->pfn = pfn;
+ }
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id,
@@ -400,10 +409,13 @@ static int blkif_queue_request(struct request *req)
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
- max_grefs = info->max_indirect_segments ?
- info->max_indirect_segments +
- INDIRECT_GREFS(info->max_indirect_segments) :
- BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ max_grefs = req->nr_phys_segments;
+ if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ /*
+ * If we are using indirect segments we need to account
+ * for the indirect grefs used in the request.
+ */
+ max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
/* Check if we have enough grants to allocate a requests */
if (info->persistent_gnts_c < max_grefs) {
@@ -477,22 +489,34 @@ static int blkif_queue_request(struct request *req)
if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
+ unsigned long pfn;
+
if (segments)
kunmap_atomic(segments);
n = i / SEGS_PER_INDIRECT_FRAME;
- gnt_list_entry = get_grant(&gref_head, info);
+ if (!info->feature_persistent) {
+ struct page *indirect_page;
+
+ /* Fetch a pre-allocated page to use for indirect grefs */
+ BUG_ON(list_empty(&info->indirect_pages));
+ indirect_page = list_first_entry(&info->indirect_pages,
+ struct page, lru);
+ list_del(&indirect_page->lru);
+ pfn = page_to_pfn(indirect_page);
+ }
+ gnt_list_entry = get_grant(&gref_head, pfn, info);
info->shadow[id].indirect_grants[n] = gnt_list_entry;
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
}
- gnt_list_entry = get_grant(&gref_head, info);
+ gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
ref = gnt_list_entry->gref;
info->shadow[id].grants_used[i] = gnt_list_entry;
- if (rq_data_dir(req)) {
+ if (rq_data_dir(req) && info->feature_persistent) {
char *bvec_data;
void *shared_data;
@@ -904,21 +928,36 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq);
/* Remove all persistent grants */
- if (!list_empty(&info->persistent_gnts)) {
+ if (!list_empty(&info->grants)) {
list_for_each_entry_safe(persistent_gnt, n,
- &info->persistent_gnts, node) {
+ &info->grants, node) {
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
0, 0UL);
info->persistent_gnts_c--;
}
- __free_page(pfn_to_page(persistent_gnt->pfn));
+ if (info->feature_persistent)
+ __free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
}
}
BUG_ON(info->persistent_gnts_c != 0);
+ /*
+ * Remove indirect pages, this only happens when using indirect
+ * descriptors but not persistent grants
+ */
+ if (!list_empty(&info->indirect_pages)) {
+ struct page *indirect_page, *n;
+
+ BUG_ON(info->feature_persistent);
+ list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
+ list_del(&indirect_page->lru);
+ __free_page(indirect_page);
+ }
+ }
+
for (i = 0; i < BLK_RING_SIZE; i++) {
/*
* Clear persistent grants present in requests already
@@ -933,7 +972,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
for (j = 0; j < segs; j++) {
persistent_gnt = info->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
- __free_page(pfn_to_page(persistent_gnt->pfn));
+ if (info->feature_persistent)
+ __free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
}
@@ -992,7 +1032,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
nseg = s->req.operation == BLKIF_OP_INDIRECT ?
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
- if (bret->operation == BLKIF_OP_READ) {
+ if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
/*
* Copy the data received from the backend into the bvec.
* Since bv_offset can be different than 0, and bv_len different
@@ -1013,13 +1053,51 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < nseg; i++) {
- list_add(&s->grants_used[i]->node, &info->persistent_gnts);
- info->persistent_gnts_c++;
+ if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
+ /*
+ * If the grant is still mapped by the backend (the
+ * backend has chosen to make this grant persistent)
+ * we add it at the head of the list, so it will be
+ * reused first.
+ */
+ if (!info->feature_persistent)
+ pr_alert_ratelimited("backed has not unmapped grant: %u\n",
+ s->grants_used[i]->gref);
+ list_add(&s->grants_used[i]->node, &info->grants);
+ info->persistent_gnts_c++;
+ } else {
+ /*
+ * If the grant is not mapped by the backend we end the
+ * foreign access and add it to the tail of the list,
+ * so it will not be picked again unless we run out of
+ * persistent grants.
+ */
+ gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
+ s->grants_used[i]->gref = GRANT_INVALID_REF;
+ list_add_tail(&s->grants_used[i]->node, &info->grants);
+ }
}
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
- list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
- info->persistent_gnts_c++;
+ if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
+ if (!info->feature_persistent)
+ pr_alert_ratelimited("backed has not unmapped grant: %u\n",
+ s->indirect_grants[i]->gref);
+ list_add(&s->indirect_grants[i]->node, &info->grants);
+ info->persistent_gnts_c++;
+ } else {
+ struct page *indirect_page;
+
+ gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
+ /*
+ * Add the used indirect page back to the list of
+ * available pages for indirect grefs.
+ */
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+ list_add_tail(&s->indirect_grants[i]->node, &info->grants);
+ }
}
}
}
@@ -1313,7 +1391,8 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
- INIT_LIST_HEAD(&info->persistent_gnts);
+ INIT_LIST_HEAD(&info->grants);
+ INIT_LIST_HEAD(&info->indirect_pages);
info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue);
@@ -1336,57 +1415,6 @@ static int blkfront_probe(struct xenbus_device *dev,
return 0;
}
-/*
- * This is a clone of md_trim_bio, used to split a bio into smaller ones
- */
-static void trim_bio(struct bio *bio, int offset, int size)
-{
- /* 'bio' is a cloned bio which we need to trim to match
- * the given offset and size.
- * This requires adjusting bi_sector, bi_size, and bi_io_vec
- */
- int i;
- struct bio_vec *bvec;
- int sofar = 0;
-
- size <<= 9;
- if (offset == 0 && size == bio->bi_size)
- return;
-
- bio->bi_sector += offset;
- bio->bi_size = size;
- offset <<= 9;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
- while (bio->bi_idx < bio->bi_vcnt &&
- bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
- /* remove this whole bio_vec */
- offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
- bio->bi_idx++;
- }
- if (bio->bi_idx < bio->bi_vcnt) {
- bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
- bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
- }
- /* avoid any complications with bi_idx being non-zero*/
- if (bio->bi_idx) {
- memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
- (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
- bio->bi_vcnt -= bio->bi_idx;
- bio->bi_idx = 0;
- }
- /* Make sure vcnt and last bv are not too big */
- bio_for_each_segment(bvec, bio, i) {
- if (sofar + bvec->bv_len > size)
- bvec->bv_len = size - sofar;
- if (bvec->bv_len == 0) {
- bio->bi_vcnt = i;
- break;
- }
- sofar += bvec->bv_len;
- }
-}
-
static void split_bio_end(struct bio *bio, int error)
{
struct split_bio *split_bio = bio->bi_private;
@@ -1522,7 +1550,7 @@ static int blkif_recover(struct blkfront_info *info)
(unsigned int)(bio->bi_size >> 9) - offset);
cloned_bio = bio_clone(bio, GFP_NOIO);
BUG_ON(cloned_bio == NULL);
- trim_bio(cloned_bio, offset, size);
+ bio_trim(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
submit_bio(cloned_bio->bi_rw, cloned_bio);
@@ -1660,6 +1688,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
if (err)
goto out_of_memory;
+ if (!info->feature_persistent && info->max_indirect_segments) {
+ /*
+ * We are using indirect descriptors but not persistent
+ * grants, we need to allocate a set of pages that can be
+ * used for mapping indirect grefs
+ */
+ int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
+
+ BUG_ON(!list_empty(&info->indirect_pages));
+ for (i = 0; i < num; i++) {
+ struct page *indirect_page = alloc_page(GFP_NOIO);
+ if (!indirect_page)
+ goto out_of_memory;
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ }
+ }
+
for (i = 0; i < BLK_RING_SIZE; i++) {
info->shadow[i].grants_used = kzalloc(
sizeof(info->shadow[i].grants_used[0]) * segs,
@@ -1690,6 +1735,13 @@ out_of_memory:
kfree(info->shadow[i].indirect_grants);
info->shadow[i].indirect_grants = NULL;
}
+ if (!list_empty(&info->indirect_pages)) {
+ struct page *indirect_page, *n;
+ list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
+ list_del(&indirect_page->lru);
+ __free_page(indirect_page);
+ }
+ }
return -ENOMEM;
}
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 4afae20df51..9fe8a875a82 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -30,3 +30,5 @@ hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 0a327f4154a..6bfc1bb318f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -57,7 +57,7 @@ struct ath3k_version {
unsigned char reserved[0x07];
};
-static struct usb_device_id ath3k_table[] = {
+static const struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 */
{ USB_DEVICE(0x0CF3, 0x3000) },
@@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
#define BTUSB_ATH3012 0x80
/* This table is to load patch and sysconfig files
* for AR3012 */
-static struct usb_device_id ath3k_blist_tbl[] = {
+static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 995aee9cba2..31386998c9a 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -42,7 +42,7 @@
static struct usb_driver bfusb_driver;
-static struct usb_device_id bfusb_table[] = {
+static const struct usb_device_id bfusb_table[] = {
/* AVM BlueFRITZ! USB */
{ USB_DEVICE(0x057c, 0x2200) },
@@ -318,7 +318,6 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
return -ENOMEM;
}
- skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = pkt_type;
data->reassembly = skb;
@@ -333,7 +332,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
memcpy(skb_put(data->reassembly, len), buf, len);
if (hdr & 0x08) {
- hci_recv_frame(data->reassembly);
+ hci_recv_frame(data->hdev, data->reassembly);
data->reassembly = NULL;
}
@@ -465,26 +464,18 @@ static int bfusb_close(struct hci_dev *hdev)
return 0;
}
-static int bfusb_send_frame(struct sk_buff *skb)
+static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct bfusb_data *data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
struct sk_buff *nskb;
unsigned char buf[3];
int sent = 0, size, count;
BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- data = hci_get_drvdata(hdev);
-
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@@ -544,11 +535,6 @@ static int bfusb_send_frame(struct sk_buff *skb)
return 0;
}
-static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
static int bfusb_load_firmware(struct bfusb_data *data,
const unsigned char *firmware, int count)
{
@@ -699,11 +685,10 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hci_set_drvdata(hdev, data);
SET_HCIDEV_DEV(hdev, &intf->dev);
- hdev->open = bfusb_open;
- hdev->close = bfusb_close;
- hdev->flush = bfusb_flush;
- hdev->send = bfusb_send_frame;
- hdev->ioctl = bfusb_ioctl;
+ hdev->open = bfusb_open;
+ hdev->close = bfusb_close;
+ hdev->flush = bfusb_flush;
+ hdev->send = bfusb_send_frame;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 6c3e3d43c71..57427de864a 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -399,7 +399,6 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = buf[i];
switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -477,7 +476,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
break;
case RECV_WAIT_DATA:
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
info->rx_skb = NULL;
break;
@@ -659,17 +658,9 @@ static int bluecard_hci_close(struct hci_dev *hdev)
}
-static int bluecard_hci_send_frame(struct sk_buff *skb)
+static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- bluecard_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -693,12 +684,6 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
}
-static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -734,11 +719,10 @@ static int bluecard_open(bluecard_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = bluecard_hci_open;
- hdev->close = bluecard_hci_close;
- hdev->flush = bluecard_hci_flush;
- hdev->send = bluecard_hci_send_frame;
- hdev->ioctl = bluecard_hci_ioctl;
+ hdev->open = bluecard_hci_open;
+ hdev->close = bluecard_hci_close;
+ hdev->flush = bluecard_hci_flush;
+ hdev->send = bluecard_hci_send_frame;
id = inb(iobase + 0x30);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 2fe4a803134..8a319913c9a 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -37,7 +37,7 @@
#define VERSION "0.10"
-static struct usb_device_id bpa10x_table[] = {
+static const struct usb_device_id bpa10x_table[] = {
/* Tektronix BPA 100/105 (Digianswer) */
{ USB_DEVICE(0x08fd, 0x0002) },
@@ -129,8 +129,6 @@ static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
return -ENOMEM;
}
- skb->dev = (void *) hdev;
-
data->rx_skb[queue] = skb;
scb = (void *) skb->cb;
@@ -155,7 +153,7 @@ static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
data->rx_skb[queue] = NULL;
bt_cb(skb)->pkt_type = scb->type;
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
}
count -= len; buf += len;
@@ -352,9 +350,8 @@ static int bpa10x_flush(struct hci_dev *hdev)
return 0;
}
-static int bpa10x_send_frame(struct sk_buff *skb)
+static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct bpa10x_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
@@ -366,6 +363,8 @@ static int bpa10x_send_frame(struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
+ skb->dev = (void *) hdev;
+
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index a1aaa3ba2a4..73d87994d02 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -247,7 +247,6 @@ static void bt3c_receive(bt3c_info_t *info)
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
inb(iobase + DATA_H);
//printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
@@ -318,7 +317,7 @@ static void bt3c_receive(bt3c_info_t *info)
break;
case RECV_WAIT_DATA:
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
info->rx_skb = NULL;
break;
@@ -416,19 +415,11 @@ static int bt3c_hci_close(struct hci_dev *hdev)
}
-static int bt3c_hci_send_frame(struct sk_buff *skb)
+static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- bt3c_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+ bt3c_info_t *info = hci_get_drvdata(hdev);
unsigned long flags;
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
-
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@@ -455,12 +446,6 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
}
-static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -577,11 +562,10 @@ static int bt3c_open(bt3c_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = bt3c_hci_open;
- hdev->close = bt3c_hci_close;
- hdev->flush = bt3c_hci_flush;
- hdev->send = bt3c_hci_send_frame;
- hdev->ioctl = bt3c_hci_ioctl;
+ hdev->open = bt3c_hci_open;
+ hdev->close = bt3c_hci_close;
+ hdev->flush = bt3c_hci_flush;
+ hdev->send = bt3c_hci_send_frame;
/* Load firmware */
err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 27068d14938..f9d183387f4 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,6 +23,8 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
+#include <linux/ctype.h>
+#include <linux/firmware.h>
#define BTM_HEADER_LEN 4
#define BTM_UPLD_SIZE 2312
@@ -41,6 +43,8 @@ struct btmrvl_thread {
struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
+ struct device *dev;
+ const char *cal_data;
u8 dev_type;
@@ -91,6 +95,7 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_CONFIG 0x59
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
+#define BT_CMD_LOAD_CONFIG_DATA 0x61
/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
@@ -116,11 +121,8 @@ struct btmrvl_private {
#define PS_SLEEP 0x01
#define PS_AWAKE 0x00
-struct btmrvl_cmd {
- __le16 ocf_ogf;
- u8 length;
- u8 data[4];
-} __packed;
+#define BT_CMD_DATA_SIZE 32
+#define BT_CAL_DATA_SIZE 28
struct btmrvl_event {
u8 ec; /* event counter */
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 9a9f51875df..5cf31c4fe6d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -57,8 +57,7 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
ocf = hci_opcode_ocf(opcode);
ogf = hci_opcode_ogf(opcode);
- if (ocf == BT_CMD_MODULE_CFG_REQ &&
- priv->btmrvl_dev.sendcmdflag) {
+ if (priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
@@ -116,7 +115,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
adapter->hs_state = HS_ACTIVATED;
if (adapter->psmode)
adapter->ps_state = PS_SLEEP;
- wake_up_interruptible(&adapter->cmd_wait_q);
BT_DBG("HS ACTIVATED!");
} else {
BT_DBG("HS Enable failed");
@@ -168,45 +166,50 @@ exit:
}
EXPORT_SYMBOL_GPL(btmrvl_process_event);
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
+ const void *param, u8 len)
{
struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
- int ret = 0;
+ struct hci_command_hdr *hdr;
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+ skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ));
- cmd->length = 1;
- cmd->data[0] = subcmd;
+ hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
+ hdr->plen = len;
+
+ if (len)
+ memcpy(skb_put(skb, len), param, len);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
priv->btmrvl_dev.sendcmdflag = true;
priv->adapter->cmd_complete = false;
- BT_DBG("Queue module cfg Command");
-
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->cmd_complete,
- msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) {
- ret = -ETIMEDOUT;
- BT_ERR("module_cfg_cmd(%x): timeout: %d",
- subcmd, priv->btmrvl_dev.sendcmdflag);
- }
+ msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
- BT_DBG("module cfg Command done");
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+{
+ int ret;
+
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
+ if (ret)
+ BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
return ret;
}
@@ -214,61 +217,36 @@ EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
{
- struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (!skb) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
- BT_CMD_HOST_SLEEP_CONFIG));
- cmd->length = 2;
- cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
- cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+ int ret;
+ u8 param[2];
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+ param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+ param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
+ BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
+ param[0], param[1]);
- BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
- cmd->data[1]);
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
+ if (ret)
+ BT_ERR("HSCFG command failed\n");
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
- struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
- BT_CMD_AUTO_SLEEP_MODE));
- cmd->length = 1;
+ int ret;
+ u8 param;
if (priv->btmrvl_dev.psmode)
- cmd->data[0] = BT_PS_ENABLE;
+ param = BT_PS_ENABLE;
else
- cmd->data[0] = BT_PS_DISABLE;
+ param = BT_PS_DISABLE;
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
-
- BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
+ if (ret)
+ BT_ERR("PSMODE command failed\n");
return 0;
}
@@ -276,37 +254,11 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
int btmrvl_enable_hs(struct btmrvl_private *priv)
{
- struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
- int ret = 0;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
- cmd->length = 0;
-
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
-
- BT_DBG("Queue hs enable Command");
-
- wake_up_interruptible(&priv->main_thread.wait_q);
+ int ret;
- if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
- priv->adapter->hs_state,
- msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) {
- ret = -ETIMEDOUT;
- BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
- priv->adapter->ps_state,
- priv->adapter->wakeup_tries);
- }
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
+ if (ret)
+ BT_ERR("Host sleep enable command failed\n");
return ret;
}
@@ -403,26 +355,12 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
priv->adapter = NULL;
}
-static int btmrvl_ioctl(struct hci_dev *hdev,
- unsigned int cmd, unsigned long arg)
+static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- return -ENOIOCTLCMD;
-}
-
-static int btmrvl_send_frame(struct sk_buff *skb)
-{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btmrvl_private *priv = NULL;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device");
- return -ENODEV;
- }
-
- priv = hci_get_drvdata(hdev);
-
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
@@ -480,6 +418,137 @@ static int btmrvl_open(struct hci_dev *hdev)
}
/*
+ * This function parses provided calibration data input. It should contain
+ * hex bytes separated by space or new line character. Here is an example.
+ * 00 1C 01 37 FF FF FF FF 02 04 7F 01
+ * CE BA 00 00 00 2D C6 C0 00 00 00 00
+ * 00 F0 00 00
+ */
+static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
+{
+ const u8 *s = src;
+ u8 *d = dst;
+ int ret;
+ u8 tmp[3];
+
+ tmp[2] = '\0';
+ while ((s - src) <= len - 2) {
+ if (isspace(*s)) {
+ s++;
+ continue;
+ }
+
+ if (isxdigit(*s)) {
+ if ((d - dst) >= dst_size) {
+ BT_ERR("calibration data file too big!!!");
+ return -EINVAL;
+ }
+
+ memcpy(tmp, s, 2);
+
+ ret = kstrtou8(tmp, 16, d++);
+ if (ret < 0)
+ return ret;
+
+ s += 2;
+ } else {
+ return -EINVAL;
+ }
+ }
+ if (d == dst)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int btmrvl_load_cal_data(struct btmrvl_private *priv,
+ u8 *config_data)
+{
+ int i, ret;
+ u8 data[BT_CMD_DATA_SIZE];
+
+ data[0] = 0x00;
+ data[1] = 0x00;
+ data[2] = 0x00;
+ data[3] = BT_CMD_DATA_SIZE - 4;
+
+ /* Swap cal-data bytes. Each four bytes are swapped. Considering 4
+ * byte SDIO header offset, mapping of input and output bytes will be
+ * {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
+ * {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
+ for (i = 4; i < BT_CMD_DATA_SIZE; i++)
+ data[i] = config_data[(i / 4) * 8 - 1 - i];
+
+ print_hex_dump_bytes("Calibration data: ",
+ DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
+
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
+ BT_CMD_DATA_SIZE);
+ if (ret)
+ BT_ERR("Failed to download caibration data\n");
+
+ return 0;
+}
+
+static int
+btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
+{
+ u8 cal_data[BT_CAL_DATA_SIZE];
+ int ret;
+
+ ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
+ if (ret)
+ return ret;
+
+ ret = btmrvl_load_cal_data(priv, cal_data);
+ if (ret) {
+ BT_ERR("Fail to load calibrate data");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int btmrvl_cal_data_config(struct btmrvl_private *priv)
+{
+ const struct firmware *cfg;
+ int ret;
+ const char *cal_data = priv->btmrvl_dev.cal_data;
+
+ if (!cal_data)
+ return 0;
+
+ ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
+ if (ret < 0) {
+ BT_DBG("Failed to get %s file, skipping cal data download",
+ cal_data);
+ return 0;
+ }
+
+ ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
+ release_firmware(cfg);
+ return ret;
+}
+
+static int btmrvl_setup(struct hci_dev *hdev)
+{
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
+
+ btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+ if (btmrvl_cal_data_config(priv))
+ BT_ERR("Set cal data failed");
+
+ priv->btmrvl_dev.psmode = 1;
+ btmrvl_enable_ps(priv);
+
+ priv->btmrvl_dev.gpio_gap = 0xffff;
+ btmrvl_send_hscfg_cmd(priv);
+
+ return 0;
+}
+
+/*
* This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel.
*/
@@ -566,14 +635,12 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
priv->btmrvl_dev.hcidev = hdev;
hci_set_drvdata(hdev, priv);
- hdev->bus = HCI_SDIO;
- hdev->open = btmrvl_open;
+ hdev->bus = HCI_SDIO;
+ hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
hdev->flush = btmrvl_flush;
- hdev->send = btmrvl_send_frame;
- hdev->ioctl = btmrvl_ioctl;
-
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ hdev->send = btmrvl_send_frame;
+ hdev->setup = btmrvl_setup;
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 00da6df9f71..fabcf5bb48a 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -18,7 +18,6 @@
* this warranty disclaimer.
**/
-#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/mmc/sdio_ids.h>
@@ -102,6 +101,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
+ .cal_data = NULL,
.reg = &btmrvl_reg_8688,
.sd_blksz_fw_dl = 64,
};
@@ -109,6 +109,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
+ .cal_data = NULL,
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -116,6 +117,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.helper = NULL,
.firmware = "mrvl/sd8797_uapsta.bin",
+ .cal_data = "mrvl/sd8797_caldata.conf",
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -123,6 +125,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.helper = NULL,
.firmware = "mrvl/sd8897_uapsta.bin",
+ .cal_data = NULL,
.reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
@@ -597,15 +600,14 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
case HCI_SCODATA_PKT:
case HCI_EVENT_PKT:
bt_cb(skb)->pkt_type = type;
- skb->dev = (void *)hdev;
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
if (type == HCI_EVENT_PKT) {
if (btmrvl_check_evtpkt(priv, skb))
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
} else {
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
}
hdev->stat.byte_rx += buf_len;
@@ -613,12 +615,11 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
case MRVL_VENDOR_PKT:
bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
- skb->dev = (void *)hdev;
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
if (btmrvl_process_event(priv, skb))
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
hdev->stat.byte_rx += buf_len;
break;
@@ -1006,6 +1007,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
struct btmrvl_sdio_device *data = (void *) id->driver_data;
card->helper = data->helper;
card->firmware = data->firmware;
+ card->cal_data = data->cal_data;
card->reg = data->reg;
card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
}
@@ -1034,6 +1036,8 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
}
card->priv = priv;
+ priv->btmrvl_dev.dev = &card->func->dev;
+ priv->btmrvl_dev.cal_data = card->cal_data;
/* Initialize the interface specific function pointers */
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@@ -1046,12 +1050,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
goto disable_host_int;
}
- priv->btmrvl_dev.psmode = 1;
- btmrvl_enable_ps(priv);
-
- priv->btmrvl_dev.gpio_gap = 0xffff;
- btmrvl_send_hscfg_cmd(priv);
-
return 0;
disable_host_int:
@@ -1222,4 +1220,5 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a609ca..6872d9ecac0 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -85,6 +85,7 @@ struct btmrvl_sdio_card {
u32 ioport;
const char *helper;
const char *firmware;
+ const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
u8 rx_unit;
@@ -94,6 +95,7 @@ struct btmrvl_sdio_card {
struct btmrvl_sdio_device {
const char *helper;
const char *firmware;
+ const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
};
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 4a990971387..b61440aaee6 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -157,10 +157,9 @@ static int btsdio_rx_packet(struct btsdio_data *data)
data->hdev->stat.byte_rx += len;
- skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = hdr[3];
- err = hci_recv_frame(skb);
+ err = hci_recv_frame(data->hdev, skb);
if (err < 0)
return err;
@@ -255,9 +254,8 @@ static int btsdio_flush(struct hci_dev *hdev)
return 0;
}
-static int btsdio_send_frame(struct sk_buff *skb)
+static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index beb262f2dc4..a03ecc22a56 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -198,7 +198,6 @@ static void btuart_receive(btuart_info_t *info)
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -265,7 +264,7 @@ static void btuart_receive(btuart_info_t *info)
break;
case RECV_WAIT_DATA:
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
info->rx_skb = NULL;
break;
@@ -424,17 +423,9 @@ static int btuart_hci_close(struct hci_dev *hdev)
}
-static int btuart_hci_send_frame(struct sk_buff *skb)
+static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- btuart_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
+ btuart_info_t *info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -458,12 +449,6 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
}
-static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -495,11 +480,10 @@ static int btuart_open(btuart_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = btuart_hci_open;
- hdev->close = btuart_hci_close;
- hdev->flush = btuart_hci_flush;
- hdev->send = btuart_hci_send_frame;
- hdev->ioctl = btuart_hci_ioctl;
+ hdev->open = btuart_hci_open;
+ hdev->close = btuart_hci_close;
+ hdev->flush = btuart_hci_flush;
+ hdev->send = btuart_hci_send_frame;
spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f3dfc0a88fd..c0ff34f2d2d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -50,7 +50,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_ATH3012 0x80
#define BTUSB_INTEL 0x100
-static struct usb_device_id btusb_table[] = {
+static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
@@ -121,7 +121,7 @@ static struct usb_device_id btusb_table[] = {
MODULE_DEVICE_TABLE(usb, btusb_table);
-static struct usb_device_id blacklist_table[] = {
+static const struct usb_device_id blacklist_table[] = {
/* CSR BlueCore devices */
{ USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR },
@@ -716,9 +716,8 @@ static int btusb_flush(struct hci_dev *hdev)
return 0;
}
-static int btusb_send_frame(struct sk_buff *skb)
+static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
@@ -730,6 +729,8 @@ static int btusb_send_frame(struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
+ skb->dev = (void *) hdev;
+
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -774,7 +775,7 @@ static int btusb_send_frame(struct sk_buff *skb)
break;
case HCI_SCODATA_PKT:
- if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
+ if (!data->isoc_tx_ep || hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
@@ -833,8 +834,8 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
BT_DBG("%s evt %d", hdev->name, evt);
- if (hdev->conn_hash.sco_num != data->sco_num) {
- data->sco_num = hdev->conn_hash.sco_num;
+ if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
+ data->sco_num = hci_conn_num(hdev, SCO_LINK);
schedule_work(&data->work);
}
}
@@ -889,7 +890,7 @@ static void btusb_work(struct work_struct *work)
int new_alts;
int err;
- if (hdev->conn_hash.sco_num > 0) {
+ if (data->sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
if (err < 0) {
@@ -903,9 +904,9 @@ static void btusb_work(struct work_struct *work)
if (hdev->voice_setting & 0x0020) {
static const int alts[3] = { 2, 4, 5 };
- new_alts = alts[hdev->conn_hash.sco_num - 1];
+ new_alts = alts[data->sco_num - 1];
} else {
- new_alts = hdev->conn_hash.sco_num;
+ new_alts = data->sco_num;
}
if (data->isoc_altsetting != new_alts) {
@@ -1628,7 +1629,6 @@ static struct usb_driver btusb_driver = {
#ifdef CONFIG_PM
.suspend = btusb_suspend,
.resume = btusb_resume,
- .reset_resume = btusb_resume,
#endif
.id_table = btusb_table,
.supports_autosuspend = 1,
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 60abf596f60..f038dba19e3 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -108,10 +108,8 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
return -EFAULT;
}
- skb->dev = (void *) lhst->hdev;
-
/* Forward skb to HCI core layer */
- err = hci_recv_frame(skb);
+ err = hci_recv_frame(lhst->hdev, skb);
if (err < 0) {
BT_ERR("Unable to push skb to HCI core(%d)", err);
return err;
@@ -253,14 +251,11 @@ static int ti_st_close(struct hci_dev *hdev)
return err;
}
-static int ti_st_send_frame(struct sk_buff *skb)
+static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev;
struct ti_st *hst;
long len;
- hdev = (struct hci_dev *)skb->dev;
-
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 33f3a6950c0..52eed1f3565 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -256,9 +256,8 @@ static void dtl1_receive(dtl1_info_t *info)
case 0x83:
case 0x84:
/* send frame to the HCI layer */
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type &= 0x0f;
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
break;
default:
/* unknown packet */
@@ -383,20 +382,12 @@ static int dtl1_hci_close(struct hci_dev *hdev)
}
-static int dtl1_hci_send_frame(struct sk_buff *skb)
+static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- dtl1_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+ dtl1_info_t *info = hci_get_drvdata(hdev);
struct sk_buff *s;
nsh_t nsh;
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
-
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@@ -438,12 +429,6 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
}
-static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -477,11 +462,10 @@ static int dtl1_open(dtl1_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = dtl1_hci_open;
- hdev->close = dtl1_hci_close;
- hdev->flush = dtl1_hci_flush;
- hdev->send = dtl1_hci_send_frame;
- hdev->ioctl = dtl1_hci_ioctl;
+ hdev->open = dtl1_hci_open;
+ hdev->close = dtl1_hci_close;
+ hdev->flush = dtl1_hci_flush;
+ hdev->send = dtl1_hci_send_frame;
spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 57e502e0608..0bc87f7abd9 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -522,7 +522,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
- hci_recv_frame(bcsp->rx_skb);
+ hci_recv_frame(hu->hdev, bcsp->rx_skb);
} else {
BT_ERR ("Packet for unknown channel (%u %s)",
bcsp->rx_skb->data[1] & 0x0f,
@@ -536,7 +536,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
/* Pull out BCSP hdr */
skb_pull(bcsp->rx_skb, 4);
- hci_recv_frame(bcsp->rx_skb);
+ hci_recv_frame(hu->hdev, bcsp->rx_skb);
}
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -655,7 +655,6 @@ static int bcsp_recv(struct hci_uart *hu, void *data, int count)
bcsp->rx_count = 0;
return 0;
}
- bcsp->rx_skb->dev = (void *) hu->hdev;
break;
}
break;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 8ae9f1ea2bb..7048a583fe5 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -124,30 +124,6 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
-static inline int h4_check_data_len(struct h4_struct *h4, int len)
-{
- int room = skb_tailroom(h4->rx_skb);
-
- BT_DBG("len %d room %d", len, room);
-
- if (!len) {
- hci_recv_frame(h4->rx_skb);
- } else if (len > room) {
- BT_ERR("Data length is too large");
- kfree_skb(h4->rx_skb);
- } else {
- h4->rx_state = H4_W4_DATA;
- h4->rx_count = len;
- return len;
- }
-
- h4->rx_state = H4_W4_PACKET_TYPE;
- h4->rx_skb = NULL;
- h4->rx_count = 0;
-
- return 0;
-}
-
/* Recv data */
static int h4_recv(struct hci_uart *hu, void *data, int count)
{
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index b6154d5a07a..f6f49745056 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -340,7 +340,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
/* Remove Three-wire header */
skb_pull(h5->rx_skb, 4);
- hci_recv_frame(h5->rx_skb);
+ hci_recv_frame(hu->hdev, h5->rx_skb);
h5->rx_skb = NULL;
break;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index bc68a440d43..6e06f6f6915 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -234,21 +234,13 @@ static int hci_uart_close(struct hci_dev *hdev)
}
/* Send frames from HCI layer */
-static int hci_uart_send_frame(struct sk_buff *skb)
+static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev* hdev = (struct hci_dev *) skb->dev;
- struct hci_uart *hu;
-
- if (!hdev) {
- BT_ERR("Frame for unknown device (hdev=NULL)");
- return -ENODEV;
- }
+ struct hci_uart *hu = hci_get_drvdata(hdev);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- hu = hci_get_drvdata(hdev);
-
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
hu->proto->enqueue(hu, skb);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index cfc76793858..69a90b1b5ff 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -110,7 +110,6 @@ static int send_hcill_cmd(u8 cmd, struct hci_uart *hu)
/* prepare packet */
hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
hcill_packet->cmd = cmd;
- skb->dev = (void *) hu->hdev;
/* send packet */
skb_queue_tail(&ll->txq, skb);
@@ -346,14 +345,14 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
-static inline int ll_check_data_len(struct ll_struct *ll, int len)
+static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll, int len)
{
int room = skb_tailroom(ll->rx_skb);
BT_DBG("len %d room %d", len, room);
if (!len) {
- hci_recv_frame(ll->rx_skb);
+ hci_recv_frame(hdev, ll->rx_skb);
} else if (len > room) {
BT_ERR("Data length is too large");
kfree_skb(ll->rx_skb);
@@ -395,7 +394,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
switch (ll->rx_state) {
case HCILL_W4_DATA:
BT_DBG("Complete data");
- hci_recv_frame(ll->rx_skb);
+ hci_recv_frame(hu->hdev, ll->rx_skb);
ll->rx_state = HCILL_W4_PACKET_TYPE;
ll->rx_skb = NULL;
@@ -406,7 +405,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
- ll_check_data_len(ll, eh->plen);
+ ll_check_data_len(hu->hdev, ll, eh->plen);
continue;
case HCILL_W4_ACL_HDR:
@@ -415,7 +414,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_DBG("ACL header: dlen %d", dlen);
- ll_check_data_len(ll, dlen);
+ ll_check_data_len(hu->hdev, ll, dlen);
continue;
case HCILL_W4_SCO_HDR:
@@ -423,7 +422,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_DBG("SCO header: dlen %d", sh->dlen);
- ll_check_data_len(ll, sh->dlen);
+ ll_check_data_len(hu->hdev, ll, sh->dlen);
continue;
}
}
@@ -494,7 +493,6 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
return -ENOMEM;
}
- ll->rx_skb->dev = (void *) hu->hdev;
bt_cb(ll->rx_skb)->pkt_type = type;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index d8b7aed6e4a..7b167385a1c 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -39,17 +40,17 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define VERSION "1.3"
+#define VERSION "1.4"
static bool amp;
struct vhci_data {
struct hci_dev *hdev;
- unsigned long flags;
-
wait_queue_head_t read_wait;
struct sk_buff_head readq;
+
+ struct delayed_work open_timeout;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -80,35 +81,73 @@ static int vhci_flush(struct hci_dev *hdev)
return 0;
}
-static int vhci_send_frame(struct sk_buff *skb)
+static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev* hdev = (struct hci_dev *) skb->dev;
- struct vhci_data *data;
+ struct vhci_data *data = hci_get_drvdata(hdev);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -EBUSY;
+
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ skb_queue_tail(&data->readq, skb);
+
+ wake_up_interruptible(&data->read_wait);
+ return 0;
+}
+static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
+{
+ struct hci_dev *hdev;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdev = hci_alloc_dev();
if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
+ kfree_skb(skb);
+ return -ENOMEM;
}
- if (!test_bit(HCI_RUNNING, &hdev->flags))
+ data->hdev = hdev;
+
+ hdev->bus = HCI_VIRTUAL;
+ hdev->dev_type = dev_type;
+ hci_set_drvdata(hdev, data);
+
+ hdev->open = vhci_open_dev;
+ hdev->close = vhci_close_dev;
+ hdev->flush = vhci_flush;
+ hdev->send = vhci_send_frame;
+
+ if (hci_register_dev(hdev) < 0) {
+ BT_ERR("Can't register HCI device");
+ hci_free_dev(hdev);
+ data->hdev = NULL;
+ kfree_skb(skb);
return -EBUSY;
+ }
- data = hci_get_drvdata(hdev);
+ bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
- memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ *skb_put(skb, 1) = 0xff;
+ *skb_put(skb, 1) = dev_type;
+ put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
wake_up_interruptible(&data->read_wait);
-
return 0;
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
- const char __user *buf, size_t count)
+ const char __user *buf, size_t count)
{
struct sk_buff *skb;
+ __u8 pkt_type, dev_type;
+ int ret;
- if (count > HCI_MAX_FRAME_SIZE)
+ if (count < 2 || count > HCI_MAX_FRAME_SIZE)
return -EINVAL;
skb = bt_skb_alloc(count, GFP_KERNEL);
@@ -120,27 +159,69 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
return -EFAULT;
}
- skb->dev = (void *) data->hdev;
- bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
+ pkt_type = *((__u8 *) skb->data);
skb_pull(skb, 1);
- hci_recv_frame(skb);
+ switch (pkt_type) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ if (!data->hdev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ bt_cb(skb)->pkt_type = pkt_type;
+
+ ret = hci_recv_frame(data->hdev, skb);
+ break;
- return count;
+ case HCI_VENDOR_PKT:
+ if (data->hdev) {
+ kfree_skb(skb);
+ return -EBADFD;
+ }
+
+ cancel_delayed_work_sync(&data->open_timeout);
+
+ dev_type = *((__u8 *) skb->data);
+ skb_pull(skb, 1);
+
+ if (skb->len > 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ kfree_skb(skb);
+
+ if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
+ return -EINVAL;
+
+ ret = vhci_create_device(data, dev_type);
+ break;
+
+ default:
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ return (ret < 0) ? ret : count;
}
static inline ssize_t vhci_put_user(struct vhci_data *data,
- struct sk_buff *skb, char __user *buf, int count)
+ struct sk_buff *skb,
+ char __user *buf, int count)
{
char __user *ptr = buf;
- int len, total = 0;
+ int len;
len = min_t(unsigned int, skb->len, count);
if (copy_to_user(ptr, skb->data, len))
return -EFAULT;
- total += len;
+ if (!data->hdev)
+ return len;
data->hdev->stat.byte_tx += len;
@@ -148,21 +229,19 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
case HCI_COMMAND_PKT:
data->hdev->stat.cmd_tx++;
break;
-
case HCI_ACLDATA_PKT:
data->hdev->stat.acl_tx++;
break;
-
case HCI_SCODATA_PKT:
data->hdev->stat.sco_tx++;
break;
}
- return total;
+ return len;
}
static ssize_t vhci_read(struct file *file,
- char __user *buf, size_t count, loff_t *pos)
+ char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
struct sk_buff *skb;
@@ -185,7 +264,7 @@ static ssize_t vhci_read(struct file *file,
}
ret = wait_event_interruptible(data->read_wait,
- !skb_queue_empty(&data->readq));
+ !skb_queue_empty(&data->readq));
if (ret < 0)
break;
}
@@ -194,7 +273,7 @@ static ssize_t vhci_read(struct file *file,
}
static ssize_t vhci_write(struct file *file,
- const char __user *buf, size_t count, loff_t *pos)
+ const char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
@@ -213,10 +292,17 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
return POLLOUT | POLLWRNORM;
}
+static void vhci_open_timeout(struct work_struct *work)
+{
+ struct vhci_data *data = container_of(work, struct vhci_data,
+ open_timeout.work);
+
+ vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
+}
+
static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
- struct hci_dev *hdev;
data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
if (!data)
@@ -225,35 +311,13 @@ static int vhci_open(struct inode *inode, struct file *file)
skb_queue_head_init(&data->readq);
init_waitqueue_head(&data->read_wait);
- hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->hdev = hdev;
-
- hdev->bus = HCI_VIRTUAL;
- hci_set_drvdata(hdev, data);
-
- if (amp)
- hdev->dev_type = HCI_AMP;
-
- hdev->open = vhci_open_dev;
- hdev->close = vhci_close_dev;
- hdev->flush = vhci_flush;
- hdev->send = vhci_send_frame;
-
- if (hci_register_dev(hdev) < 0) {
- BT_ERR("Can't register HCI device");
- kfree(data);
- hci_free_dev(hdev);
- return -EBUSY;
- }
+ INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
file->private_data = data;
nonseekable_open(inode, file);
+ schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+
return 0;
}
@@ -262,8 +326,12 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
+ cancel_delayed_work_sync(&data->open_timeout);
+
+ if (hdev) {
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ }
file->private_data = NULL;
kfree(data);
@@ -309,3 +377,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("devname:vhci");
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 20092669977..b6739cb78e3 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -18,11 +18,21 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <asm/cacheflush.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
#include <asm/smp_plat.h>
+#define DRIVER_NAME "CCI-400"
+#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
+#define PMU_NAME "CCI_400"
+
#define CCI_PORT_CTRL 0x0
#define CCI_CTRL_STATUS 0xc
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports;
static void __iomem *cci_ctrl_base;
static unsigned long cci_ctrl_phys;
+#ifdef CONFIG_HW_PERF_EVENTS
+
+#define CCI_PMCR 0x0100
+#define CCI_PID2 0x0fe8
+
+#define CCI_PMCR_CEN 0x00000001
+#define CCI_PMCR_NCNT_MASK 0x0000f800
+#define CCI_PMCR_NCNT_SHIFT 11
+
+#define CCI_PID2_REV_MASK 0xf0
+#define CCI_PID2_REV_SHIFT 4
+
+/* Port ids */
+#define CCI_PORT_S0 0
+#define CCI_PORT_S1 1
+#define CCI_PORT_S2 2
+#define CCI_PORT_S3 3
+#define CCI_PORT_S4 4
+#define CCI_PORT_M0 5
+#define CCI_PORT_M1 6
+#define CCI_PORT_M2 7
+
+#define CCI_REV_R0 0
+#define CCI_REV_R1 1
+#define CCI_REV_R0_P4 4
+#define CCI_REV_R1_P2 6
+
+#define CCI_PMU_EVT_SEL 0x000
+#define CCI_PMU_CNTR 0x004
+#define CCI_PMU_CNTR_CTRL 0x008
+#define CCI_PMU_OVRFLW 0x00c
+
+#define CCI_PMU_OVRFLW_FLAG 1
+
+#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
+
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+ CCI_PMU_CYCLES = 0xff
+};
+
+#define CCI_PMU_EVENT_MASK 0xff
+#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
+#define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
+
+#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
+
+#define CCI_PMU_CYCLE_CNTR_IDX 0
+#define CCI_PMU_CNTR0_IDX 1
+#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
+
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
+
+#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
+#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
+#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
+
+#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
+#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
+#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
+
+struct pmu_port_event_ranges {
+ u8 slave_min;
+ u8 slave_max;
+ u8 master_min;
+ u8 master_max;
+};
+
+static struct pmu_port_event_ranges port_event_range[] = {
+ [CCI_REV_R0] = {
+ .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
+ .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
+ .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
+ .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
+ },
+ [CCI_REV_R1] = {
+ .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
+ .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
+ .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
+ .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
+ },
+};
+
+struct cci_pmu_drv_data {
+ void __iomem *base;
+ struct arm_pmu *cci_pmu;
+ int nr_irqs;
+ int irqs[CCI_PMU_MAX_HW_EVENTS];
+ unsigned long active_irqs;
+ struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
+ struct pmu_port_event_ranges *port_ranges;
+ struct pmu_hw_events hw_events;
+};
+static struct cci_pmu_drv_data *pmu;
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++)
+ if (irq == irqs[i])
+ return true;
+
+ return false;
+}
+
+static int probe_cci_revision(void)
+{
+ int rev;
+ rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+ rev >>= CCI_PID2_REV_SHIFT;
+
+ if (rev <= CCI_REV_R0_P4)
+ return CCI_REV_R0;
+ else if (rev <= CCI_REV_R1_P2)
+ return CCI_REV_R1;
+
+ return -ENOENT;
+}
+
+static struct pmu_port_event_ranges *port_range_by_rev(void)
+{
+ int rev = probe_cci_revision();
+
+ if (rev < 0)
+ return NULL;
+
+ return &port_event_range[rev];
+}
+
+static int pmu_is_valid_slave_event(u8 ev_code)
+{
+ return pmu->port_ranges->slave_min <= ev_code &&
+ ev_code <= pmu->port_ranges->slave_max;
+}
+
+static int pmu_is_valid_master_event(u8 ev_code)
+{
+ return pmu->port_ranges->master_min <= ev_code &&
+ ev_code <= pmu->port_ranges->master_max;
+}
+
+static int pmu_validate_hw_event(u8 hw_event)
+{
+ u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
+ u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
+
+ switch (ev_source) {
+ case CCI_PORT_S0:
+ case CCI_PORT_S1:
+ case CCI_PORT_S2:
+ case CCI_PORT_S3:
+ case CCI_PORT_S4:
+ /* Slave Interface */
+ if (pmu_is_valid_slave_event(ev_code))
+ return hw_event;
+ break;
+ case CCI_PORT_M0:
+ case CCI_PORT_M1:
+ case CCI_PORT_M2:
+ /* Master Interface */
+ if (pmu_is_valid_master_event(ev_code))
+ return hw_event;
+ break;
+ }
+
+ return -ENOENT;
+}
+
+static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx)
+{
+ return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
+ idx <= CCI_PMU_CNTR_LAST(cci_pmu);
+}
+
+static u32 pmu_read_register(int idx, unsigned int offset)
+{
+ return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+ return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_disable_counter(int idx)
+{
+ pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_enable_counter(int idx)
+{
+ pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_set_event(int idx, unsigned long event)
+{
+ event &= CCI_PMU_EVENT_MASK;
+ pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
+}
+
+static u32 pmu_get_max_counters(void)
+{
+ u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
+ CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
+
+ /* add 1 for cycle counter */
+ return n_cnts + 1;
+}
+
+static struct pmu_hw_events *pmu_get_hw_events(void)
+{
+ return &pmu->hw_events;
+}
+
+static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_event = &event->hw;
+ unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
+ int idx;
+
+ if (cci_event == CCI_PMU_CYCLES) {
+ if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
+ return -EAGAIN;
+
+ return CCI_PMU_CYCLE_CNTR_IDX;
+ }
+
+ for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
+ if (!test_and_set_bit(idx, hw->used_mask))
+ return idx;
+
+ /* No counters available */
+ return -EAGAIN;
+}
+
+static int pmu_map_event(struct perf_event *event)
+{
+ int mapping;
+ u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
+
+ if (event->attr.type < PERF_TYPE_MAX)
+ return -ENOENT;
+
+ if (config == CCI_PMU_CYCLES)
+ mapping = config;
+ else
+ mapping = pmu_validate_hw_event(config);
+
+ return mapping;
+}
+
+static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
+{
+ int i;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ if (unlikely(!pmu_device))
+ return -ENODEV;
+
+ if (pmu->nr_irqs < 1) {
+ dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Register all available CCI PMU interrupts. In the interrupt handler
+ * we iterate over the counters checking for interrupt source (the
+ * overflowing counter) and clear it.
+ *
+ * This should allow handling of non-unique interrupt for the counters.
+ */
+ for (i = 0; i < pmu->nr_irqs; i++) {
+ int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
+ "arm-cci-pmu", cci_pmu);
+ if (err) {
+ dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+ pmu->irqs[i]);
+ return err;
+ }
+
+ set_bit(i, &pmu->active_irqs);
+ }
+
+ return 0;
+}
+
+static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ int idx, handled = IRQ_NONE;
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ regs = get_irq_regs();
+ /*
+ * Iterate over counters and update the corresponding perf events.
+ * This should work regardless of whether we have per-counter overflow
+ * interrupt or a combined overflow interrupt.
+ */
+ for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
+ struct perf_event *event = events->events[idx];
+ struct hw_perf_event *hw_counter;
+
+ if (!event)
+ continue;
+
+ hw_counter = &event->hw;
+
+ /* Did this counter overflow? */
+ if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)
+ continue;
+
+ pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
+
+ handled = IRQ_HANDLED;
+
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hw_counter->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ cci_pmu->disable(event);
+ }
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pmu_free_irq(struct arm_pmu *cci_pmu)
+{
+ int i;
+
+ for (i = 0; i < pmu->nr_irqs; i++) {
+ if (!test_and_clear_bit(i, &pmu->active_irqs))
+ continue;
+
+ free_irq(pmu->irqs[i], cci_pmu);
+ }
+}
+
+static void pmu_enable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Configure the event to count, unless you are counting cycles */
+ if (idx != CCI_PMU_CYCLE_CNTR_IDX)
+ pmu_set_event(idx, hw_counter->config_base);
+
+ pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_disable_event(struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ pmu_disable_counter(idx);
+}
+
+static void pmu_start(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Enable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_stop(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u32 pmu_read_counter(struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+ u32 value;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return 0;
+ }
+ value = pmu_read_register(idx, CCI_PMU_CNTR);
+
+ return value;
+}
+
+static void pmu_write_counter(struct perf_event *event, u32 value)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ else
+ pmu_write_register(value, idx, CCI_PMU_CNTR);
+}
+
+static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
+{
+ *cci_pmu = (struct arm_pmu){
+ .name = PMU_NAME,
+ .max_period = (1LLU << 32) - 1,
+ .get_hw_events = pmu_get_hw_events,
+ .get_event_idx = pmu_get_event_idx,
+ .map_event = pmu_map_event,
+ .request_irq = pmu_request_irq,
+ .handle_irq = pmu_handle_irq,
+ .free_irq = pmu_free_irq,
+ .enable = pmu_enable_event,
+ .disable = pmu_disable_event,
+ .start = pmu_start,
+ .stop = pmu_stop,
+ .read_counter = pmu_read_counter,
+ .write_counter = pmu_write_counter,
+ };
+
+ cci_pmu->plat_device = pdev;
+ cci_pmu->num_events = pmu_get_max_counters();
+
+ return armpmu_register(cci_pmu, -1);
+}
+
+static const struct of_device_id arm_cci_pmu_matches[] = {
+ {
+ .compatible = "arm,cci-400-pmu",
+ },
+ {},
+};
+
+static int cci_pmu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int i, ret, irq;
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmu->base))
+ return -ENOMEM;
+
+ /*
+ * CCI PMU has 5 overflow signals - one per counter; but some may be tied
+ * together to a common interrupt.
+ */
+ pmu->nr_irqs = 0;
+ for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+
+ if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
+ continue;
+
+ pmu->irqs[pmu->nr_irqs++] = irq;
+ }
+
+ /*
+ * Ensure that the device tree has as many interrupts as the number
+ * of counters.
+ */
+ if (i < CCI_PMU_MAX_HW_EVENTS) {
+ dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
+ i, CCI_PMU_MAX_HW_EVENTS);
+ return -EINVAL;
+ }
+
+ pmu->port_ranges = port_range_by_rev();
+ if (!pmu->port_ranges) {
+ dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+ return -EINVAL;
+ }
+
+ pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
+ if (!pmu->cci_pmu)
+ return -ENOMEM;
+
+ pmu->hw_events.events = pmu->events;
+ pmu->hw_events.used_mask = pmu->used_mask;
+ raw_spin_lock_init(&pmu->hw_events.pmu_lock);
+
+ ret = cci_pmu_init(pmu->cci_pmu, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+ if (!cci_probed())
+ return -ENODEV;
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
struct cpu_port {
u64 mpidr;
u32 port;
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn)
}
EXPORT_SYMBOL_GPL(cci_ace_get_port);
-static void __init cci_ace_init_ports(void)
+static void cci_ace_init_ports(void)
{
int port, cpu;
struct device_node *cpun;
@@ -280,7 +852,7 @@ asmlinkage void __naked cci_enable_port_for_self(void)
/* Enable the CCI port */
" ldr r0, [r0, %[offsetof_port_phys]] \n"
-" mov r3, #"__stringify(CCI_ENABLE_REQ)" \n"
+" mov r3, %[cci_enable_req]\n"
" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
/* poll the status reg for completion */
@@ -288,7 +860,7 @@ asmlinkage void __naked cci_enable_port_for_self(void)
" ldr r0, [r1] \n"
" ldr r0, [r0, r1] @ cci_ctrl_base \n"
"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
-" tst r1, #1 \n"
+" tst r1, %[cci_control_status_bits] \n"
" bne 4b \n"
" mov r0, #0 \n"
@@ -301,6 +873,8 @@ asmlinkage void __naked cci_enable_port_for_self(void)
"7: .word cci_ctrl_phys - . \n"
: :
[sizeof_cpu_port] "i" (sizeof(cpu_port)),
+ [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+ [cci_control_status_bits] "i" cpu_to_le32(1),
#ifndef __ARMEB__
[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
#else
@@ -386,7 +960,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = {
{},
};
-static int __init cci_probe(void)
+static int cci_probe(void)
{
struct cci_nb_ports const *cci_config;
int ret, i, nb_ace = 0, nb_ace_lite = 0;
@@ -490,7 +1064,7 @@ memalloc_err:
static int cci_init_status = -EAGAIN;
static DEFINE_MUTEX(cci_probing);
-static int __init cci_init(void)
+static int cci_init(void)
{
if (cci_init_status != -EAGAIN)
return cci_init_status;
@@ -502,18 +1076,55 @@ static int __init cci_init(void)
return cci_init_status;
}
+#ifdef CONFIG_HW_PERF_EVENTS
+static struct platform_driver cci_pmu_driver = {
+ .driver = {
+ .name = DRIVER_NAME_PMU,
+ .of_match_table = arm_cci_pmu_matches,
+ },
+ .probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cci_pmu_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&cci_platform_driver);
+}
+
+#else
+
+static int __init cci_platform_init(void)
+{
+ return 0;
+}
+
+#endif
/*
* To sort out early init calls ordering a helper function is provided to
* check if the CCI driver has beed initialized. Function check if the driver
* has been initialized, if not it calls the init function that probes
* the driver and updates the return value.
*/
-bool __init cci_probed(void)
+bool cci_probed(void)
{
return cci_init() == 0;
}
EXPORT_SYMBOL_GPL(cci_probed);
early_initcall(cci_init);
+core_initcall(cci_platform_init);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARM CCI support");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 14219972c74..fa3243d71c7 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -522,10 +522,16 @@ config HPET_MMAP
If you say Y here, user applications will be able to mmap
the HPET registers.
+config HPET_MMAP_DEFAULT
+ bool "Enable HPET MMAP access by default"
+ default y
+ depends on HPET_MMAP
+ help
In some hardware implementations, the page containing HPET
registers may also contain other things that shouldn't be
- exposed to the user. If this applies to your hardware,
- say N here.
+ exposed to the user. This option selects the default (if
+ kernel parameter hpet_mmap is not set) user access to the
+ registers for applications that require it.
config HANGCHECK_TIMER
tristate "Hangcheck timer"
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 0671e45daa5..8fedbc25041 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/fs.h>
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 448ce5e29c5..5d9c31dfc90 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -367,12 +367,29 @@ static unsigned int hpet_poll(struct file *file, poll_table * wait)
return 0;
}
+#ifdef CONFIG_HPET_MMAP
+#ifdef CONFIG_HPET_MMAP_DEFAULT
+static int hpet_mmap_enabled = 1;
+#else
+static int hpet_mmap_enabled = 0;
+#endif
+
+static __init int hpet_mmap_enable(char *str)
+{
+ get_option(&str, &hpet_mmap_enabled);
+ pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
+ return 1;
+}
+__setup("hpet_mmap", hpet_mmap_enable);
+
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
-#ifdef CONFIG_HPET_MMAP
struct hpet_dev *devp;
unsigned long addr;
+ if (!hpet_mmap_enabled)
+ return -EACCES;
+
devp = file->private_data;
addr = devp->hd_hpets->hp_hpet_phys;
@@ -381,10 +398,13 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, addr, PAGE_SIZE);
+}
#else
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
return -ENOSYS;
-#endif
}
+#endif
static int hpet_fasync(int fd, struct file *file, int on)
{
@@ -486,8 +506,7 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
}
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
- irq_flags = devp->hd_flags & HPET_SHARED_IRQ
- ? IRQF_SHARED : IRQF_DISABLED;
+ irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0;
if (request_irq(irq, hpet_interrupt, irq_flags,
devp->hd_name, (void *)devp)) {
printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
@@ -971,8 +990,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
struct acpi_resource_fixed_memory32 *fixmem32;
fixmem32 = &res->data.fixed_memory32;
- if (!fixmem32)
- return AE_NO_MEMORY;
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 0aa9d91daef..c206de2951f 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -290,6 +290,19 @@ config HW_RANDOM_PSERIES
If unsure, say Y.
+config HW_RANDOM_POWERNV
+ tristate "PowerNV Random Number Generator support"
+ depends on HW_RANDOM && PPC_POWERNV
+ default HW_RANDOM
+ ---help---
+ This is the driver for Random Number Generator hardware found
+ in POWER7+ and above machines for PowerNV platform.
+
+ To compile this driver as a module, choose M here: the
+ module will be called powernv-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index bed467c9300..d7d2435ff7f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
+obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c6df5b29af0..c66279bb6ef 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
new file mode 100644
index 00000000000..3f4f6320456
--- /dev/null
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ unsigned long *buf;
+ int i, len;
+
+ /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */
+ len = max / sizeof(unsigned long);
+
+ buf = (unsigned long *)data;
+
+ for (i = 0; i < len; i++)
+ powernv_get_random_long(buf++);
+
+ return len * sizeof(unsigned long);
+}
+
+static struct hwrng powernv_hwrng = {
+ .name = "powernv-rng",
+ .read = powernv_rng_read,
+};
+
+static int powernv_rng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&powernv_hwrng);
+
+ return 0;
+}
+
+static int powernv_rng_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ rc = hwrng_register(&powernv_hwrng);
+ if (rc) {
+ /* We only register one device, ignore any others */
+ if (rc == -EEXIST)
+ rc = -ENODEV;
+
+ return rc;
+ }
+
+ pr_info("Registered powernv hwrng.\n");
+
+ return 0;
+}
+
+static struct of_device_id powernv_rng_match[] = {
+ { .compatible = "ibm,power-rng",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, powernv_rng_match);
+
+static struct platform_driver powernv_rng_driver = {
+ .driver = {
+ .name = "powernv_rng",
+ .of_match_table = powernv_rng_match,
+ },
+ .probe = powernv_rng_probe,
+ .remove = powernv_rng_remove,
+};
+module_platform_driver(powernv_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index 732c330805f..521f76b0934 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 5f1197929f0..b761459a343 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -17,6 +17,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hw_random.h>
#include <asm/vio.h>
@@ -25,10 +28,15 @@
static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
{
- if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
- printk(KERN_ERR "pseries rng hcall error\n");
- return 0;
+ int rc;
+
+ rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+ if (rc != H_SUCCESS) {
+ pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
+ return -EIO;
}
+
+ /* The hypervisor interface returns 64 bits */
return 8;
}
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index d2120ba8f3f..73ce739f8e1 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -79,7 +79,7 @@ static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
priv->expires = cur + delay;
priv->present = 0;
- INIT_COMPLETION(priv->completion);
+ reinit_completion(&priv->completion);
mod_timer(&priv->timer, priv->expires);
return 4;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index ef46a9cfd83..c12398d1517 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -133,7 +133,7 @@ static void virtrng_remove(struct virtio_device *vdev)
remove_common(vdev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtrng_freeze(struct virtio_device *vdev)
{
remove_common(vdev);
@@ -157,7 +157,7 @@ static struct virtio_driver virtio_rng_driver = {
.id_table = id_table,
.probe = virtrng_probe,
.remove = virtrng_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtrng_freeze,
.restore = virtrng_restore,
#endif
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 190d4423653..ffa97d261cf 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -114,7 +114,7 @@ static int misc_open(struct inode * inode, struct file * file)
int minor = iminor(inode);
struct miscdevice *c;
int err = -ENODEV;
- const struct file_operations *old_fops, *new_fops = NULL;
+ const struct file_operations *new_fops = NULL;
mutex_lock(&misc_mtx);
@@ -141,17 +141,11 @@ static int misc_open(struct inode * inode, struct file * file)
}
err = 0;
- old_fops = file->f_op;
- file->f_op = new_fops;
+ replace_fops(file, new_fops);
if (file->f_op->open) {
file->private_data = c;
- err=file->f_op->open(inode,file);
- if (err) {
- fops_put(file->f_op);
- file->f_op = fops_get(old_fops);
- }
+ err = file->f_op->open(inode,file);
}
- fops_put(old_fops);
fail:
mutex_unlock(&misc_mtx);
return err;
@@ -193,8 +187,8 @@ int misc_register(struct miscdevice * misc)
if (misc->minor == MISC_DYNAMIC_MINOR) {
int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
if (i >= DYNAMIC_MINORS) {
- mutex_unlock(&misc_mtx);
- return -EBUSY;
+ err = -EBUSY;
+ goto out;
}
misc->minor = DYNAMIC_MINORS - i - 1;
set_bit(i, misc_minors);
@@ -203,8 +197,8 @@ int misc_register(struct miscdevice * misc)
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
- mutex_unlock(&misc_mtx);
- return -EBUSY;
+ err = -EBUSY;
+ goto out;
}
}
}
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index cfdfe493c6a..1fd00dc0689 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -220,7 +220,7 @@ static int __init nwbutton_init(void)
return -EBUSY;
}
- if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, IRQF_DISABLED,
+ if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, 0,
"nwbutton", NULL)) {
printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n",
IRQ_NETWINDER_BUTTON);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7a744d39175..429b75bb60e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -255,6 +255,7 @@
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/kmemcheck.h>
+#include <linux/workqueue.h>
#include <linux/irq.h>
#include <asm/processor.h>
@@ -269,14 +270,28 @@
/*
* Configuration information
*/
-#define INPUT_POOL_WORDS 128
-#define OUTPUT_POOL_WORDS 32
-#define SEC_XFER_SIZE 512
-#define EXTRACT_SIZE 10
+#define INPUT_POOL_SHIFT 12
+#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
+#define OUTPUT_POOL_SHIFT 10
+#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
+#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
+
+#define DEBUG_RANDOM_BOOT 0
#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
/*
+ * To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits.
+ *
+ * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+ * credit_entropy_bits() needs to be 64 bits wide.
+ */
+#define ENTROPY_SHIFT 3
+#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+
+/*
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
*/
@@ -287,108 +302,100 @@ static int random_read_wakeup_thresh = 64;
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
-static int random_write_wakeup_thresh = 128;
+static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
/*
- * When the input pool goes over trickle_thresh, start dropping most
- * samples to avoid wasting CPU time and reduce lock contention.
+ * The minimum number of seconds between urandom pool resending. We
+ * do this to limit the amount of entropy that can be drained from the
+ * input pool even if there are heavy demands on /dev/urandom.
*/
-
-static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
-
-static DEFINE_PER_CPU(int, trickle_count);
+static int random_min_urandom_seed = 60;
/*
- * A pool of size .poolwords is stirred with a primitive polynomial
- * of degree .poolwords over GF(2). The taps for various sizes are
- * defined below. They are chosen to be evenly spaced (minimum RMS
- * distance from evenly spaced; the numbers in the comments are a
- * scaled squared error sum) except for the last tap, which is 1 to
- * get the twisting happening as fast as possible.
+ * Originally, we used a primitive polynomial of degree .poolwords
+ * over GF(2). The taps for various sizes are defined below. They
+ * were chosen to be evenly spaced except for the last tap, which is 1
+ * to get the twisting happening as fast as possible.
+ *
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a (modified) twisted Generalized Feedback Shift
+ * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
+ * generators. ACM Transactions on Modeling and Computer Simulation
+ * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
+ * GFSR generators II. ACM Transactions on Mdeling and Computer
+ * Simulation 4:254-266)
+ *
+ * Thanks to Colin Plumb for suggesting this.
+ *
+ * The mixing operation is much less sensitive than the output hash,
+ * where we use SHA-1. All that we want of mixing operation is that
+ * it be a good non-cryptographic hash; i.e. it not produce collisions
+ * when fed "random" data of the sort we expect to see. As long as
+ * the pool state differs for different inputs, we have preserved the
+ * input entropy and done a good job. The fact that an intelligent
+ * attacker can construct inputs that will produce controlled
+ * alterations to the pool's state is not important because we don't
+ * consider such inputs to contribute any randomness. The only
+ * property we need with respect to them is that the attacker can't
+ * increase his/her knowledge of the pool's state. Since all
+ * additions are reversible (knowing the final state and the input,
+ * you can reconstruct the initial state), if an attacker has any
+ * uncertainty about the initial state, he/she can only shuffle that
+ * uncertainty about, but never cause any collisions (which would
+ * decrease the uncertainty).
+ *
+ * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
+ * Videau in their paper, "The Linux Pseudorandom Number Generator
+ * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
+ * paper, they point out that we are not using a true Twisted GFSR,
+ * since Matsumoto & Kurita used a trinomial feedback polynomial (that
+ * is, with only three taps, instead of the six that we are using).
+ * As a result, the resulting polynomial is neither primitive nor
+ * irreducible, and hence does not have a maximal period over
+ * GF(2**32). They suggest a slight change to the generator
+ * polynomial which improves the resulting TGFSR polynomial to be
+ * irreducible, which we have made here.
*/
static struct poolinfo {
- int poolwords;
+ int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
+#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
- /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
- { 128, 103, 76, 51, 25, 1 },
- /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
- { 32, 26, 20, 14, 7, 1 },
+ /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
+ { S(128), 104, 76, 51, 25, 1 },
+ /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
+ /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
+ { S(32), 26, 19, 14, 7, 1 },
#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
- { 2048, 1638, 1231, 819, 411, 1 },
+ { S(2048), 1638, 1231, 819, 411, 1 },
/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
- { 1024, 817, 615, 412, 204, 1 },
+ { S(1024), 817, 615, 412, 204, 1 },
/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
- { 1024, 819, 616, 410, 207, 2 },
+ { S(1024), 819, 616, 410, 207, 2 },
/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
- { 512, 411, 308, 208, 104, 1 },
+ { S(512), 411, 308, 208, 104, 1 },
/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
- { 512, 409, 307, 206, 102, 2 },
+ { S(512), 409, 307, 206, 102, 2 },
/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
- { 512, 409, 309, 205, 103, 2 },
+ { S(512), 409, 309, 205, 103, 2 },
/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
- { 256, 205, 155, 101, 52, 1 },
+ { S(256), 205, 155, 101, 52, 1 },
/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
- { 128, 103, 78, 51, 27, 2 },
+ { S(128), 103, 78, 51, 27, 2 },
/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
- { 64, 52, 39, 26, 14, 1 },
+ { S(64), 52, 39, 26, 14, 1 },
#endif
};
-#define POOLBITS poolwords*32
-#define POOLBYTES poolwords*4
-
-/*
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a twisted Generalized Feedback Shift Reigster
- *
- * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
- * Transactions on Modeling and Computer Simulation 2(3):179-194.
- * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
- * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
- *
- * Thanks to Colin Plumb for suggesting this.
- *
- * We have not analyzed the resultant polynomial to prove it primitive;
- * in fact it almost certainly isn't. Nonetheless, the irreducible factors
- * of a random large-degree polynomial over GF(2) are more than large enough
- * that periodicity is not a concern.
- *
- * The input hash is much less sensitive than the output hash. All
- * that we want of it is that it be a good non-cryptographic hash;
- * i.e. it not produce collisions when fed "random" data of the sort
- * we expect to see. As long as the pool state differs for different
- * inputs, we have preserved the input entropy and done a good job.
- * The fact that an intelligent attacker can construct inputs that
- * will produce controlled alterations to the pool's state is not
- * important because we don't consider such inputs to contribute any
- * randomness. The only property we need with respect to them is that
- * the attacker can't increase his/her knowledge of the pool's state.
- * Since all additions are reversible (knowing the final state and the
- * input, you can reconstruct the initial state), if an attacker has
- * any uncertainty about the initial state, he/she can only shuffle
- * that uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
- *
- * The chosen system lets the state of the pool be (essentially) the input
- * modulo the generator polymnomial. Now, for random primitive polynomials,
- * this is a universal class of hash functions, meaning that the chance
- * of a collision is limited by the attacker's knowledge of the generator
- * polynomail, so if it is chosen at random, an attacker can never force
- * a collision. Here, we use a fixed polynomial, but we *can* assume that
- * ###--> it is unknown to the processes generating the input entropy. <-###
- * Because of this important property, this is a good, collision-resistant
- * hash; hash collisions will occur no more often than chance.
- */
-
/*
* Static global variables
*/
@@ -396,17 +403,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
-static bool debug;
-module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { \
- if (debug) \
- printk(KERN_DEBUG "random %04d %04d %04d: " \
- fmt,\
- input_pool.entropy_count,\
- blocking_pool.entropy_count,\
- nonblocking_pool.entropy_count,\
- ## arg); } while (0)
-
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -417,23 +413,26 @@ module_param(debug, bool, 0644);
struct entropy_store;
struct entropy_store {
/* read-only data: */
- struct poolinfo *poolinfo;
+ const struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
struct entropy_store *pull;
- int limit;
+ struct work_struct push_work;
/* read-write data: */
+ unsigned long last_pulled;
spinlock_t lock;
- unsigned add_ptr;
- unsigned input_rotate;
+ unsigned short add_ptr;
+ unsigned short input_rotate;
int entropy_count;
int entropy_total;
unsigned int initialized:1;
- bool last_data_init;
+ unsigned int limit:1;
+ unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
};
+static void push_to_pool(struct work_struct *work);
static __u32 input_pool_data[INPUT_POOL_WORDS];
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
@@ -452,7 +451,9 @@ static struct entropy_store blocking_pool = {
.limit = 1,
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
- .pool = blocking_pool_data
+ .pool = blocking_pool_data,
+ .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
+ push_to_pool),
};
static struct entropy_store nonblocking_pool = {
@@ -460,7 +461,9 @@ static struct entropy_store nonblocking_pool = {
.name = "nonblocking",
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
- .pool = nonblocking_pool_data
+ .pool = nonblocking_pool_data,
+ .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
+ push_to_pool),
};
static __u32 const twist_table[8] = {
@@ -498,7 +501,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
- w = rol32(*bytes++, input_rotate & 31);
+ w = rol32(*bytes++, input_rotate);
i = (i - 1) & wordmask;
/* XOR in the various taps */
@@ -518,7 +521,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
* rotation, so that successive passes spread the
* input bits across the pool evenly.
*/
- input_rotate += i ? 7 : 14;
+ input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
}
ACCESS_ONCE(r->input_rotate) = input_rotate;
@@ -561,62 +564,151 @@ struct fast_pool {
* collector. It's hardcoded for an 128 bit pool and assumes that any
* locks that might be needed are taken by the caller.
*/
-static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+static void fast_mix(struct fast_pool *f, __u32 input[4])
{
- const char *bytes = in;
__u32 w;
- unsigned i = f->count;
unsigned input_rotate = f->rotate;
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
- f->pool[(i + 1) & 3];
- f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
- input_rotate += (i++ & 3) ? 7 : 14;
- }
- f->count = i;
+ w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
+ f->pool[0] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 14) & 31;
+ w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
+ f->pool[1] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 7) & 31;
+ w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
+ f->pool[2] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 7) & 31;
+ w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
+ f->pool[3] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 7) & 31;
+
f->rotate = input_rotate;
+ f->count++;
}
/*
- * Credit (or debit) the entropy store with n bits of entropy
+ * Credit (or debit) the entropy store with n bits of entropy.
+ * Use credit_entropy_bits_safe() if the value comes from userspace
+ * or otherwise should be checked for extreme values.
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
int entropy_count, orig;
+ const int pool_size = r->poolinfo->poolfracbits;
+ int nfrac = nbits << ENTROPY_SHIFT;
if (!nbits)
return;
- DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
- entropy_count += nbits;
+ if (nfrac < 0) {
+ /* Debit */
+ entropy_count += nfrac;
+ } else {
+ /*
+ * Credit: we have to account for the possibility of
+ * overwriting already present entropy. Even in the
+ * ideal case of pure Shannon entropy, new contributions
+ * approach the full value asymptotically:
+ *
+ * entropy <- entropy + (pool_size - entropy) *
+ * (1 - exp(-add_entropy/pool_size))
+ *
+ * For add_entropy <= pool_size/2 then
+ * (1 - exp(-add_entropy/pool_size)) >=
+ * (add_entropy/pool_size)*0.7869...
+ * so we can approximate the exponential with
+ * 3/4*add_entropy/pool_size and still be on the
+ * safe side by adding at most pool_size/2 at a time.
+ *
+ * The use of pool_size-2 in the while statement is to
+ * prevent rounding artifacts from making the loop
+ * arbitrarily long; this limits the loop to log2(pool_size)*2
+ * turns no matter how large nbits is.
+ */
+ int pnfrac = nfrac;
+ const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+ /* The +2 corresponds to the /4 in the denominator */
+
+ do {
+ unsigned int anfrac = min(pnfrac, pool_size/2);
+ unsigned int add =
+ ((pool_size - entropy_count)*anfrac*3) >> s;
+
+ entropy_count += add;
+ pnfrac -= anfrac;
+ } while (unlikely(entropy_count < pool_size-2 && pnfrac));
+ }
if (entropy_count < 0) {
- DEBUG_ENT("negative entropy/overflow\n");
+ pr_warn("random: negative entropy/overflow: pool %s count %d\n",
+ r->name, entropy_count);
+ WARN_ON(1);
entropy_count = 0;
- } else if (entropy_count > r->poolinfo->POOLBITS)
- entropy_count = r->poolinfo->POOLBITS;
+ } else if (entropy_count > pool_size)
+ entropy_count = pool_size;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- if (!r->initialized && nbits > 0) {
- r->entropy_total += nbits;
- if (r->entropy_total > 128)
- r->initialized = 1;
+ r->entropy_total += nbits;
+ if (!r->initialized && r->entropy_total > 128) {
+ r->initialized = 1;
+ r->entropy_total = 0;
+ if (r == &nonblocking_pool) {
+ prandom_reseed_late();
+ pr_notice("random: %s pool is initialized\n", r->name);
+ }
}
- trace_credit_entropy_bits(r->name, nbits, entropy_count,
+ trace_credit_entropy_bits(r->name, nbits,
+ entropy_count >> ENTROPY_SHIFT,
r->entropy_total, _RET_IP_);
- /* should we wake readers? */
- if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
- wake_up_interruptible(&random_read_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
+ if (r == &input_pool) {
+ int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
+
+ /* should we wake readers? */
+ if (entropy_bytes >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+ /* If the input pool is getting full, send some
+ * entropy to the two output pools, flipping back and
+ * forth between them, until the output pools are 75%
+ * full.
+ */
+ if (entropy_bytes > random_write_wakeup_thresh &&
+ r->initialized &&
+ r->entropy_total >= 2*random_read_wakeup_thresh) {
+ static struct entropy_store *last = &blocking_pool;
+ struct entropy_store *other = &blocking_pool;
+
+ if (last == &blocking_pool)
+ other = &nonblocking_pool;
+ if (other->entropy_count <=
+ 3 * other->poolinfo->poolfracbits / 4)
+ last = other;
+ if (last->entropy_count <=
+ 3 * last->poolinfo->poolfracbits / 4) {
+ schedule_work(&last->push_work);
+ r->entropy_total = 0;
+ }
+ }
}
}
+static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+{
+ const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+
+ /* Cap the value to avoid overflows */
+ nbits = min(nbits, nbits_max);
+ nbits = max(nbits, -nbits_max);
+
+ credit_entropy_bits(r, nbits);
+}
+
/*********************************************************************
*
* Entropy input management
@@ -630,6 +722,8 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
+#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+
/*
* Add device- or boot-specific data to the input and nonblocking
* pools to help initialize them to unique values.
@@ -641,15 +735,22 @@ struct timer_rand_state {
void add_device_randomness(const void *buf, unsigned int size)
{
unsigned long time = random_get_entropy() ^ jiffies;
+ unsigned long flags;
- mix_pool_bytes(&input_pool, buf, size, NULL);
- mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
- mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
- mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+ trace_add_device_randomness(size, _RET_IP_);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&input_pool, buf, size, NULL);
+ _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+
+ spin_lock_irqsave(&nonblocking_pool.lock, flags);
+ _mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+ _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+ spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
-static struct timer_rand_state input_timer_state;
+static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
/*
* This function adds entropy to the entropy "pool" by using timing
@@ -663,6 +764,7 @@ static struct timer_rand_state input_timer_state;
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
+ struct entropy_store *r;
struct {
long jiffies;
unsigned cycles;
@@ -671,15 +773,12 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
long delta, delta2, delta3;
preempt_disable();
- /* if over the trickle threshold, use only 1 in 4096 samples */
- if (input_pool.entropy_count > trickle_thresh &&
- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
- goto out;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ mix_pool_bytes(r, &sample, sizeof(sample), NULL);
/*
* Calculate number of bits of randomness we probably added.
@@ -713,10 +812,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
- credit_entropy_bits(&input_pool,
- min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
-out:
preempt_enable();
}
@@ -729,10 +826,10 @@ void add_input_randomness(unsigned int type, unsigned int code,
if (value == last_value)
return;
- DEBUG_ENT("input event\n");
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
+ trace_add_input_randomness(ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -744,20 +841,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
- __u32 input[4], cycles = random_get_entropy();
-
- input[0] = cycles ^ jiffies;
- input[1] = irq;
- if (regs) {
- __u64 ip = instruction_pointer(regs);
- input[2] = ip;
- input[3] = ip >> 32;
- }
+ cycles_t cycles = random_get_entropy();
+ __u32 input[4], c_high, j_high;
+ __u64 ip;
- fast_mix(fast_pool, input, sizeof(input));
+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+ input[0] = cycles ^ j_high ^ irq;
+ input[1] = now ^ c_high;
+ ip = regs ? instruction_pointer(regs) : _RET_IP_;
+ input[2] = ip;
+ input[3] = ip >> 32;
- if ((fast_pool->count & 1023) &&
- !time_after(now, fast_pool->last + HZ))
+ fast_mix(fast_pool, input);
+
+ if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
return;
fast_pool->last = now;
@@ -786,10 +884,8 @@ void add_disk_randomness(struct gendisk *disk)
if (!disk || !disk->random)
return;
/* first major is 1, so we get >= 0x200 here */
- DEBUG_ENT("disk event %d:%d\n",
- MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
-
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+ trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
}
#endif
@@ -807,30 +903,58 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
* from the primary pool to the secondary extraction pool. We make
* sure we pull enough for a 'catastrophic reseed'.
*/
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
- __u32 tmp[OUTPUT_POOL_WORDS];
+ if (r->limit == 0 && random_min_urandom_seed) {
+ unsigned long now = jiffies;
- if (r->pull && r->entropy_count < nbytes * 8 &&
- r->entropy_count < r->poolinfo->POOLBITS) {
- /* If we're limited, always leave two wakeup worth's BITS */
- int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
- int bytes = nbytes;
-
- /* pull at least as many as BYTES as wakeup BITS */
- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
- /* but never more than the buffer size */
- bytes = min_t(int, bytes, sizeof(tmp));
-
- DEBUG_ENT("going to reseed %s with %d bits "
- "(%zu of %d requested)\n",
- r->name, bytes * 8, nbytes * 8, r->entropy_count);
-
- bytes = extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes, NULL);
- credit_entropy_bits(r, bytes*8);
+ if (time_before(now,
+ r->last_pulled + random_min_urandom_seed * HZ))
+ return;
+ r->last_pulled = now;
}
+ if (r->pull &&
+ r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
+ r->entropy_count < r->poolinfo->poolfracbits)
+ _xfer_secondary_pool(r, nbytes);
+}
+
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
+ __u32 tmp[OUTPUT_POOL_WORDS];
+
+ /* For /dev/random's pool, always leave two wakeup worth's BITS */
+ int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+ int bytes = nbytes;
+
+ /* pull at least as many as BYTES as wakeup BITS */
+ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* but never more than the buffer size */
+ bytes = min_t(int, bytes, sizeof(tmp));
+
+ trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+ ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+ mix_pool_bytes(r, tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
+}
+
+/*
+ * Used as a workqueue function so that when the input pool is getting
+ * full, we can "spill over" some entropy to the output pools. That
+ * way the output pools can store some of the excess entropy instead
+ * of letting it go to waste.
+ */
+static void push_to_pool(struct work_struct *work)
+{
+ struct entropy_store *r = container_of(work, struct entropy_store,
+ push_work);
+ BUG_ON(!r);
+ _xfer_secondary_pool(r, random_read_wakeup_thresh/8);
+ trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
+ r->pull->entropy_count >> ENTROPY_SHIFT);
}
/*
@@ -850,50 +974,48 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
{
unsigned long flags;
int wakeup_write = 0;
+ int have_bytes;
+ int entropy_count, orig;
+ size_t ibytes;
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, flags);
- BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
- DEBUG_ENT("trying to extract %zu bits from %s\n",
- nbytes * 8, r->name);
+ BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
/* Can we pull enough? */
- if (r->entropy_count / 8 < min + reserved) {
- nbytes = 0;
- } else {
- int entropy_count, orig;
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+ ibytes = nbytes;
+ if (have_bytes < min + reserved) {
+ ibytes = 0;
+ } else {
/* If limited, never pull more than available */
- if (r->limit && nbytes + reserved >= entropy_count / 8)
- nbytes = entropy_count/8 - reserved;
-
- if (entropy_count / 8 >= nbytes + reserved) {
- entropy_count -= nbytes*8;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
- } else {
- entropy_count = reserved;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
- }
+ if (r->limit && ibytes + reserved >= have_bytes)
+ ibytes = have_bytes - reserved;
- if (entropy_count < random_write_wakeup_thresh)
- wakeup_write = 1;
- }
+ if (have_bytes >= ibytes + reserved)
+ entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+ else
+ entropy_count = reserved << (ENTROPY_SHIFT + 3);
- DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
- nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ if ((r->entropy_count >> ENTROPY_SHIFT)
+ < random_write_wakeup_thresh)
+ wakeup_write = 1;
+ }
spin_unlock_irqrestore(&r->lock, flags);
+ trace_debit_entropy(r->name, 8 * ibytes);
if (wakeup_write) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
- return nbytes;
+ return ibytes;
}
static void extract_buf(struct entropy_store *r, __u8 *out)
@@ -901,7 +1023,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
int i;
union {
__u32 w[5];
- unsigned long l[LONGS(EXTRACT_SIZE)];
+ unsigned long l[LONGS(20)];
} hash;
__u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
@@ -914,6 +1036,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
+ * If we have a architectural hardware random number
+ * generator, mix that in, too.
+ */
+ for (i = 0; i < LONGS(20); i++) {
+ unsigned long v;
+ if (!arch_get_random_long(&v))
+ break;
+ hash.l[i] ^= v;
+ }
+
+ /*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
@@ -942,17 +1075,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
hash.w[1] ^= hash.w[4];
hash.w[2] ^= rol32(hash.w[2], 16);
- /*
- * If we have a architectural hardware random number
- * generator, mix that in, too.
- */
- for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- hash.l[i] ^= v;
- }
-
memcpy(out, &hash, EXTRACT_SIZE);
memset(&hash, 0, sizeof(hash));
}
@@ -968,10 +1090,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!r->last_data_init) {
- r->last_data_init = true;
+ r->last_data_init = 1;
spin_unlock_irqrestore(&r->lock, flags);
trace_extract_entropy(r->name, EXTRACT_SIZE,
- r->entropy_count, _RET_IP_);
+ ENTROPY_BITS(r), _RET_IP_);
xfer_secondary_pool(r, EXTRACT_SIZE);
extract_buf(r, tmp);
spin_lock_irqsave(&r->lock, flags);
@@ -980,7 +1102,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
spin_unlock_irqrestore(&r->lock, flags);
}
- trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
+ trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -1013,7 +1135,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
- trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
+ trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -1053,6 +1175,14 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
void get_random_bytes(void *buf, int nbytes)
{
+#if DEBUG_RANDOM_BOOT > 0
+ if (unlikely(nonblocking_pool.initialized == 0))
+ printk(KERN_NOTICE "random: %pF get_random_bytes called "
+ "with %d bits of entropy available\n",
+ (void *) _RET_IP_,
+ nonblocking_pool.entropy_total);
+#endif
+ trace_get_random_bytes(nbytes, _RET_IP_);
extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1071,7 +1201,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
{
char *p = buf;
- trace_get_random_bytes(nbytes, _RET_IP_);
+ trace_get_random_bytes_arch(nbytes, _RET_IP_);
while (nbytes) {
unsigned long v;
int chunk = min(nbytes, (int)sizeof(unsigned long));
@@ -1105,13 +1235,11 @@ static void init_std_data(struct entropy_store *r)
ktime_t now = ktime_get_real();
unsigned long rv;
- r->entropy_count = 0;
- r->entropy_total = 0;
- r->last_data_init = false;
+ r->last_pulled = jiffies;
mix_pool_bytes(r, &now, sizeof(now), NULL);
- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+ for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_long(&rv))
- break;
+ rv = random_get_entropy();
mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
@@ -1134,7 +1262,7 @@ static int rand_initialize(void)
init_std_data(&nonblocking_pool);
return 0;
}
-module_init(rand_initialize);
+early_initcall(rand_initialize);
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
@@ -1146,8 +1274,10 @@ void rand_initialize_disk(struct gendisk *disk)
* source.
*/
state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state)
+ if (state) {
+ state->last_time = INITIAL_JIFFIES;
disk->random = state;
+ }
}
#endif
@@ -1164,8 +1294,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (n > SEC_XFER_SIZE)
n = SEC_XFER_SIZE;
- DEBUG_ENT("reading %zu bits\n", n*8);
-
n = extract_entropy_user(&blocking_pool, buf, n);
if (n < 0) {
@@ -1173,8 +1301,9 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
break;
}
- DEBUG_ENT("read got %zd bits (%zd still needed)\n",
- n*8, (nbytes-n)*8);
+ trace_random_read(n*8, (nbytes-n)*8,
+ ENTROPY_BITS(&blocking_pool),
+ ENTROPY_BITS(&input_pool));
if (n == 0) {
if (file->f_flags & O_NONBLOCK) {
@@ -1182,13 +1311,9 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
break;
}
- DEBUG_ENT("sleeping?\n");
-
wait_event_interruptible(random_read_wait,
- input_pool.entropy_count >=
- random_read_wakeup_thresh);
-
- DEBUG_ENT("awake\n");
+ ENTROPY_BITS(&input_pool) >=
+ random_read_wakeup_thresh);
if (signal_pending(current)) {
retval = -ERESTARTSYS;
@@ -1211,7 +1336,18 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- return extract_entropy_user(&nonblocking_pool, buf, nbytes);
+ int ret;
+
+ if (unlikely(nonblocking_pool.initialized == 0))
+ printk_once(KERN_NOTICE "random: %s urandom read "
+ "with %d bits of entropy available\n",
+ current->comm, nonblocking_pool.entropy_total);
+
+ ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
+
+ trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
+ ENTROPY_BITS(&input_pool));
+ return ret;
}
static unsigned int
@@ -1222,9 +1358,9 @@ random_poll(struct file *file, poll_table * wait)
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
- if (input_pool.entropy_count >= random_read_wakeup_thresh)
+ if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh)
mask |= POLLIN | POLLRDNORM;
- if (input_pool.entropy_count < random_write_wakeup_thresh)
+ if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
@@ -1275,7 +1411,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
- if (put_user(input_pool.entropy_count, p))
+ ent_count = ENTROPY_BITS(&input_pool);
+ if (put_user(ent_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1283,7 +1420,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_bits(&input_pool, ent_count);
+ credit_entropy_bits_safe(&input_pool, ent_count);
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
@@ -1298,14 +1435,19 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size);
if (retval < 0)
return retval;
- credit_entropy_bits(&input_pool, ent_count);
+ credit_entropy_bits_safe(&input_pool, ent_count);
return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
- /* Clear the entropy pool counters. */
+ /*
+ * Clear the entropy pool counters. We no longer clear
+ * the entropy pool, as that's silly.
+ */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- rand_initialize();
+ input_pool.entropy_count = 0;
+ nonblocking_pool.entropy_count = 0;
+ blocking_pool.entropy_count = 0;
return 0;
default:
return -EINVAL;
@@ -1405,6 +1547,23 @@ static int proc_do_uuid(struct ctl_table *table, int write,
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
+/*
+ * Return entropy available scaled to integral bits
+ */
+static int proc_do_entropy(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ ctl_table fake_table;
+ int entropy_count;
+
+ entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+
+ fake_table.data = &entropy_count;
+ fake_table.maxlen = sizeof(entropy_count);
+
+ return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+}
+
static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
extern struct ctl_table random_table[];
struct ctl_table random_table[] = {
@@ -1419,7 +1578,7 @@ struct ctl_table random_table[] = {
.procname = "entropy_avail",
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_do_entropy,
.data = &input_pool.entropy_count,
},
{
@@ -1441,6 +1600,13 @@ struct ctl_table random_table[] = {
.extra2 = &max_write_thresh,
},
{
+ .procname = "urandom_min_reseed_secs",
+ .data = &random_min_urandom_seed,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "boot_id",
.data = &sysctl_bootid,
.maxlen = 16,
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index c0cbbd429bd..35259961cc3 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -227,7 +227,7 @@ static inline unsigned char rtc_is_updating(void)
#ifdef RTC_IRQ
/*
- * A very tiny interrupt handler. It runs with IRQF_DISABLED set,
+ * A very tiny interrupt handler. It runs with interrupts disabled,
* but there is possibility of conflicting with the set_rtc_mmss()
* call (the rtc irq and the timer irq can easily run at the same
* time in two different CPUs). So we need to serialize
@@ -1040,8 +1040,7 @@ no_irq:
rtc_int_handler_ptr = rtc_interrupt;
}
- if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
- "rtc", NULL)) {
+ if (request_irq(RTC_IRQ, rtc_int_handler_ptr, 0, "rtc", NULL)) {
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
rtc_has_irq = 0;
printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 5816b39ff5a..8bab59292a0 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -108,8 +108,7 @@ scdrv_open(struct inode *inode, struct file *file)
/* hook this subchannel up to the system controller interrupt */
mutex_lock(&scdrv_mutex);
rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
- IRQF_SHARED | IRQF_DISABLED,
- SYSCTL_BASENAME, sd);
+ IRQF_SHARED, SYSCTL_BASENAME, sd);
if (rv) {
ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
kfree(sd);
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index ee156948b9f..59bcefd6ec7 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -292,8 +292,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
/* hook event subchannel up to the system controller interrupt */
rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
- IRQF_SHARED | IRQF_DISABLED,
- "system controller events", event_sd);
+ IRQF_SHARED, "system controller events", event_sd);
if (rv) {
printk(KERN_WARNING "%s: irq request failed (%d)\n",
__func__, rv);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index e95e0ab0bd8..100cd1de993 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -222,7 +222,7 @@ static int tlclk_open(struct inode *inode, struct file *filp)
/* This device is wired through the FPGA IO space of the ATCA blade
* we can't share this IRQ */
result = request_irq(telclk_interrupt, &tlclk_interrupt,
- IRQF_DISABLED, "telco_clock", tlclk_interrupt);
+ 0, "telco_clock", tlclk_interrupt);
if (result == -EBUSY)
printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
else
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 94c0c74434e..1a65838888c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -33,6 +33,15 @@ config TCG_TIS
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_tis.
+config TCG_TIS_I2C_ATMEL
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
+ depends on I2C
+ ---help---
+ If you have an Atmel I2C TPM security chip say Yes and it will be
+ accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will
+ be called tpm_tis_i2c_atmel.
+
config TCG_TIS_I2C_INFINEON
tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
depends on I2C
@@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON
Specification 0.20 say Yes and it will be accessible from within
Linux.
To compile this driver as a module, choose M here; the module
- will be called tpm_tis_i2c_infineon.
+ will be called tpm_i2c_infineon.
+
+config TCG_TIS_I2C_NUVOTON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
+ depends on I2C
+ ---help---
+ If you have a TPM security chip with an I2C interface from
+ Nuvoton Technology Corp. say Yes and it will be accessible
+ from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_nuvoton.
config TCG_NSC
tristate "National Semiconductor TPM Interface"
@@ -82,14 +101,14 @@ config TCG_IBMVTPM
as a module, choose M here; the module will be called tpm_ibmvtpm.
config TCG_ST33_I2C
- tristate "STMicroelectronics ST33 I2C TPM"
- depends on I2C
- depends on GPIOLIB
- ---help---
- If you have a TPM security chip from STMicroelectronics working with
- an I2C bus say Yes and it will be accessible from within Linux.
- To compile this driver as a module, choose M here; the module will be
- called tpm_stm_st33_i2c.
+ tristate "STMicroelectronics ST33 I2C TPM"
+ depends on I2C
+ depends on GPIOLIB
+ ---help---
+ If you have a TPM security chip from STMicroelectronics working with
+ an I2C bus say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_stm_st33_i2c.
config TCG_XEN
tristate "XEN TPM Interface"
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index eb41ff97d0a..b80a4000dae 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,17 +2,20 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
+tpm-y := tpm-interface.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o
+
ifdef CONFIG_ACPI
- obj-$(CONFIG_TCG_TPM) += tpm_bios.o
- tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o
+ tpm-y += tpm_eventlog.o tpm_acpi.o
else
ifdef CONFIG_TCG_IBMVTPM
- obj-$(CONFIG_TCG_TPM) += tpm_bios.o
- tpm_bios-objs += tpm_eventlog.o tpm_of.o
+ tpm-y += tpm_eventlog.o tpm_of.o
endif
endif
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c
index e3c974a6c52..6ae41d33763 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -10,13 +10,13 @@
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
+ * Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
- *
+ *
* Note, the TPM chip is not interrupt driven (only polling)
* and can have very long timeouts (minutes!). Hence the unusual
* calls to msleep.
@@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->dev,
- "invalid count value %x %zx \n", count, bufsiz);
+ "invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
mutex_lock(&chip->tpm_mutex);
- if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
+ rc = chip->vendor.send(chip, (u8 *) buf, count);
+ if (rc < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
@@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
{
int err;
- len = tpm_transmit(chip,(u8 *) cmd, len);
+ len = tpm_transmit(chip, (u8 *) cmd, len);
if (len < 0)
return len;
else if (len < TPM_HEADER_SIZE)
@@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
return rc;
}
-ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
-ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_active);
-ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
-ssize_t tpm_show_temp_deactivated(struct device * dev,
- struct device_attribute * attr, char *buf)
+ssize_t tpm_show_temp_deactivated(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
cap_t cap;
ssize_t rc;
@@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
/**
* tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
+ * @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * @res_buf: TPM_PCR value
+ * size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
@@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
+ * @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
@@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
unsigned long duration;
struct tpm_cmd_t cmd;
- duration = tpm_calc_ordinal_duration(chip,
- TPM_ORD_CONTINUE_SELFTEST);
+ duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
loops = jiffies_to_msecs(duration) / delay_msec;
@@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
if (err)
goto out;
- /*
+ /*
ignore header 10 bytes
algorithm 32 bits (1 == RSA )
encscheme 16 bits
sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
+ parameters (RSA 12->bytes: keybit, #primes, expbit)
keylenbytes 32 bits
256 byte modulus
ignore checksum 20 bytes
@@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
- rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major, cap.tpm_version.Minor,
- cap.tpm_version.revMajor, cap.tpm_version.revMinor);
- return str - buf;
-}
-EXPORT_SYMBOL_GPL(tpm_show_caps);
-
-ssize_t tpm_show_caps_1_2(struct device * dev,
- struct device_attribute * attr, char *buf)
-{
- cap_t cap;
- ssize_t rc;
- char *str = buf;
-
- rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
- "attempting to determine the manufacturer");
- if (rc)
- return 0;
- str += sprintf(str, "Manufacturer: 0x%x\n",
- be32_to_cpu(cap.manufacturer_id));
+ /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
+ if (!rc) {
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version_1_2.Major,
+ cap.tpm_version_1_2.Minor,
+ cap.tpm_version_1_2.revMajor,
+ cap.tpm_version_1_2.revMinor);
+ } else {
+ /* Otherwise just use TPM_STRUCT_VER */
+ rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version");
+ if (rc)
+ return 0;
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version.Major,
+ cap.tpm_version.Minor,
+ cap.tpm_version.revMajor,
+ cap.tpm_version.revMinor);
+ }
+
return str - buf;
}
-EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel,
- bool *canceled)
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
{
u8 status = chip->vendor.status(chip);
@@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
*/
int tpm_open(struct inode *inode, struct file *file)
{
- int minor = iminor(inode);
- struct tpm_chip *chip = NULL, *pos;
-
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (pos->vendor.miscdev.minor == minor) {
- chip = pos;
- get_device(chip->dev);
- break;
- }
- }
- rcu_read_unlock();
-
- if (!chip)
- return -ENODEV;
+ struct miscdevice *misc = file->private_data;
+ struct tpm_chip *chip = container_of(misc, struct tpm_chip,
+ vendor.miscdev);
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
- put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
- put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
+ get_device(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(tpm_open);
@@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip)
chip->vendor.release(chip->dev);
clear_bit(chip->dev_num, dev_mask);
- kfree(chip->vendor.miscdev.name);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
@@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
- * Called from tpm_<specific>.c probe function only for devices
+ * Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
* upon errant exit from this function specific probe function should call
@@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release);
struct tpm_chip *tpm_register_hardware(struct device *dev,
const struct tpm_vendor_specific *entry)
{
-#define DEVNAME_SIZE 7
-
- char *devname;
struct tpm_chip *chip;
/* Driver specific per-device data */
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
- if (chip == NULL || devname == NULL)
- goto out_free;
+ if (chip == NULL)
+ return NULL;
mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
@@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
set_bit(chip->dev_num, dev_mask);
- scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
- chip->vendor.miscdev.name = devname;
+ scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm",
+ chip->dev_num);
+ chip->vendor.miscdev.name = chip->devname;
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
@@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
goto put_device;
}
- chip->bios_dir = tpm_bios_log_setup(devname);
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
/* Make chip available */
spin_lock(&driver_lock);
@@ -1571,7 +1544,6 @@ put_device:
put_device(chip->dev);
out_free:
kfree(chip);
- kfree(devname);
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7bfc176ed4..f3284787219 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
char *);
-extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr,
- char *);
extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
const char *, size_t);
extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr,
@@ -122,6 +120,7 @@ struct tpm_chip {
struct device *dev; /* Device stuff */
int dev_num; /* /dev/tpm# */
+ char devname[7];
unsigned long is_open; /* only one allowed */
int time_expired;
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 99d6820c611..c9a528d25d2 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -202,7 +202,7 @@ static int __init init_atmel(void)
have_region =
(atmel_request_region
- (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+ (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
if (IS_ERR(pdev)) {
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index 84ddc557b8f..59f7cb28260 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -406,7 +406,6 @@ out_tpm:
out:
return NULL;
}
-EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
void tpm_bios_log_teardown(struct dentry **lst)
{
@@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst)
for (i = 0; i < 3; i++)
securityfs_remove(lst[i]);
}
-EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 00000000000..c3cd7fe481a
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,284 @@
+/*
+ * ATMEL I2C TPM AT97SC3204T
+ *
+ * Copyright (C) 2012 V Lab Technologies
+ * Teddy Reed <teddy@prosauce.org>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ * Device driver for ATMEL I2C TPMs.
+ *
+ * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
+ * devices the raw TCG formatted TPM command data is written via I2C and then
+ * raw TCG formatted TPM command data is returned via I2C.
+ *
+ * TGC status/locality/etc functions seen in the LPC implementation do not
+ * seem to be present.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see http://www.gnu.org/licenses/>.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+#define I2C_DRIVER_NAME "tpm_i2c_atmel"
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+#define ATMEL_STS_OK 1
+
+struct priv_data {
+ size_t len;
+ /* This is the amount we read on the first try. 25 was chosen to fit a
+ * fair number of read responses in the buffer so a 2nd retry can be
+ * avoided in small message cases. */
+ u8 buffer[sizeof(struct tpm_output_header) + 25];
+};
+
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+
+ priv->len = 0;
+
+ if (len <= 2)
+ return -EIO;
+
+ status = i2c_master_send(client, buf, len);
+
+ dev_dbg(chip->dev,
+ "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+ (int)min_t(size_t, 64, len), buf, len, status);
+ return status;
+}
+
+static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ struct tpm_output_header *hdr =
+ (struct tpm_output_header *)priv->buffer;
+ u32 expected_len;
+ int rc;
+
+ if (priv->len == 0)
+ return -EIO;
+
+ /* Get the message size from the message header, if we didn't get the
+ * whole message in read_status then we need to re-read the
+ * message. */
+ expected_len = be32_to_cpu(hdr->length);
+ if (expected_len > count)
+ return -ENOMEM;
+
+ if (priv->len >= expected_len) {
+ dev_dbg(chip->dev,
+ "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ memcpy(buf, priv->buffer, expected_len);
+ return expected_len;
+ }
+
+ rc = i2c_master_recv(client, buf, expected_len);
+ dev_dbg(chip->dev,
+ "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ return rc;
+}
+
+static void i2c_atmel_cancel(struct tpm_chip *chip)
+{
+ dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported");
+}
+
+static u8 i2c_atmel_read_status(struct tpm_chip *chip)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ int rc;
+
+ /* The TPM fails the I2C read until it is ready, so we do the entire
+ * transfer here and buffer it locally. This way the common code can
+ * properly handle the timeouts. */
+ priv->len = 0;
+ memset(priv->buffer, 0, sizeof(priv->buffer));
+
+
+ /* Once the TPM has completed the command the command remains readable
+ * until another command is issued. */
+ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
+ dev_dbg(chip->dev,
+ "%s: sts=%d", __func__, rc);
+ if (rc <= 0)
+ return 0;
+
+ priv->len = rc;
+
+ return ATMEL_STS_OK;
+}
+
+static const struct file_operations i2c_atmel_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *i2c_atmel_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_atmel_attr_grp = {
+ .attrs = i2c_atmel_attrs
+};
+
+static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return 0;
+}
+
+static const struct tpm_vendor_specific i2c_atmel = {
+ .status = i2c_atmel_read_status,
+ .recv = i2c_atmel_recv,
+ .send = i2c_atmel_send,
+ .cancel = i2c_atmel_cancel,
+ .req_complete_mask = ATMEL_STS_OK,
+ .req_complete_val = ATMEL_STS_OK,
+ .req_canceled = i2c_atmel_req_canceled,
+ .attr_group = &i2c_atmel_attr_grp,
+ .miscdev.fops = &i2c_atmel_ops,
+};
+
+static int i2c_atmel_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpm_register_hardware(dev, &i2c_atmel);
+ if (!chip) {
+ dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
+ return -ENODEV;
+ }
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.irq = 0;
+
+ /* There is no known way to probe for this device, and all version
+ * information seems to be read via TPM commands. Thus we rely on the
+ * TPM startup process in the common code to detect the device. */
+ if (tpm_get_timeouts(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+
+static int i2c_atmel_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip)
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(dev);
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id i2c_atmel_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_atmel_of_match[] = {
+ {.compatible = "atmel,at97sc3204t"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_atmel_driver = {
+ .id_table = i2c_atmel_id,
+ .probe = i2c_atmel_probe,
+ .remove = i2c_atmel_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &i2c_atmel_pm_ops,
+ .of_match_table = of_match_ptr(i2c_atmel_of_match),
+ },
+};
+
+module_i2c_driver(i2c_atmel_driver);
+
+MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
+MODULE_DESCRIPTION("Atmel TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index b8735de8ce9..fefd2aa5c81 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
@@ -685,7 +685,6 @@ out_vendor:
chip->dev->release = NULL;
chip->release = NULL;
tpm_dev.client = NULL;
- dev_set_drvdata(chip->dev, chip);
out_err:
return rc;
}
@@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client)
chip->dev->release = NULL;
chip->release = NULL;
tpm_dev.client = NULL;
- dev_set_drvdata(chip->dev, chip);
return 0;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 00000000000..6276fea01ff
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,710 @@
+/******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501,
+ * based on the TCG TPM Interface Spec version 1.2.
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2011, Nuvoton Technology Corporation.
+ * Dan Morav <dan.morav@nuvoton.com>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see http://www.gnu.org/licenses/>.
+ *
+ * Nuvoton contact information: APC.Support@nuvoton.com
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+/* I2C interface offsets */
+#define TPM_STS 0x00
+#define TPM_BURST_COUNT 0x01
+#define TPM_DATA_FIFO_W 0x20
+#define TPM_DATA_FIFO_R 0x40
+#define TPM_VID_DID_RID 0x60
+/* TPM command header size */
+#define TPM_HEADER_SIZE 10
+#define TPM_RETRY 5
+/*
+ * I2C bus device maximum buffer size w/o counting I2C address or command
+ * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
+ */
+#define TPM_I2C_MAX_BUF_SIZE 32
+#define TPM_I2C_RETRY_COUNT 32
+#define TPM_I2C_BUS_DELAY 1 /* msec */
+#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
+#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
+
+#define I2C_DRIVER_NAME "tpm_i2c_nuvoton"
+
+struct priv_data {
+ unsigned int intrs;
+};
+
+static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+#define TPM_STS_VALID 0x80
+#define TPM_STS_COMMAND_READY 0x40
+#define TPM_STS_GO 0x20
+#define TPM_STS_DATA_AVAIL 0x10
+#define TPM_STS_EXPECT 0x08
+#define TPM_STS_RESPONSE_RETRY 0x02
+#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+/* read TPM_STS register */
+static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+ u8 data;
+
+ status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
+ if (status <= 0) {
+ dev_err(chip->dev, "%s() error return %d\n", __func__,
+ status);
+ data = TPM_STS_ERR_VAL;
+ }
+
+ return data;
+}
+
+/* write byte to TPM_STS register */
+static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
+{
+ s32 status;
+ int i;
+
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+ msleep(TPM_I2C_BUS_DELAY);
+ }
+ return status;
+}
+
+/* write commandReady to TPM_STS register */
+static void i2c_nuvoton_ready(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+
+ /* this causes the current command to be aborted */
+ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
+ if (status < 0)
+ dev_err(chip->dev,
+ "%s() fail to write TPM_STS.commandReady\n", __func__);
+}
+
+/* read burstCount field from TPM_STS register
+ * return -1 on fail to read */
+static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
+ struct tpm_chip *chip)
+{
+ unsigned long stop = jiffies + chip->vendor.timeout_d;
+ s32 status;
+ int burst_count = -1;
+ u8 data;
+
+ /* wait for burstcount to be non-zero */
+ do {
+ /* in I2C burstCount is 1 byte */
+ status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
+ &data);
+ if (status > 0 && data > 0) {
+ burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+ break;
+ }
+ msleep(TPM_I2C_BUS_DELAY);
+ } while (time_before(jiffies, stop));
+
+ return burst_count;
+}
+
+/*
+ * WPCT301/NPCT501 SINT# supports only dataAvail
+ * any call to this function which is not waiting for dataAvail will
+ * set queue to NULL to avoid waiting for interrupt
+ */
+static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
+{
+ u8 status = i2c_nuvoton_read_status(chip);
+ return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
+}
+
+static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
+ u32 timeout, wait_queue_head_t *queue)
+{
+ if (chip->vendor.irq && queue) {
+ s32 rc;
+ DEFINE_WAIT(wait);
+ struct priv_data *priv = chip->vendor.priv;
+ unsigned int cur_intrs = priv->intrs;
+
+ enable_irq(chip->vendor.irq);
+ rc = wait_event_interruptible_timeout(*queue,
+ cur_intrs != priv->intrs,
+ timeout);
+ if (rc > 0)
+ return 0;
+ /* At this point we know that the SINT pin is asserted, so we
+ * do not need to do i2c_nuvoton_check_status */
+ } else {
+ unsigned long ten_msec, stop;
+ bool status_valid;
+
+ /* check current status */
+ status_valid = i2c_nuvoton_check_status(chip, mask, value);
+ if (status_valid)
+ return 0;
+
+ /* use polling to wait for the event */
+ ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+ stop = jiffies + timeout;
+ do {
+ if (time_before(jiffies, ten_msec))
+ msleep(TPM_I2C_RETRY_DELAY_SHORT);
+ else
+ msleep(TPM_I2C_RETRY_DELAY_LONG);
+ status_valid = i2c_nuvoton_check_status(chip, mask,
+ value);
+ if (status_valid)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ value);
+ return -ETIMEDOUT;
+}
+
+/* wait for dataAvail field to be set in the TPM_STS register */
+static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
+ wait_queue_head_t *queue)
+{
+ return i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ timeout, queue);
+}
+
+/* Read @count bytes into @buf from TPM_RD_FIFO register */
+static int i2c_nuvoton_recv_data(struct i2c_client *client,
+ struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ s32 rc;
+ int burst_count, bytes2read, size = 0;
+
+ while (size < count &&
+ i2c_nuvoton_wait_for_data_avail(chip,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue) == 0) {
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(chip->dev,
+ "%s() fail to read burstCount=%d\n", __func__,
+ burst_count);
+ return -EIO;
+ }
+ bytes2read = min_t(size_t, burst_count, count - size);
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
+ bytes2read, &buf[size]);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "%s() fail on i2c_nuvoton_read_buf()=%d\n",
+ __func__, rc);
+ return -EIO;
+ }
+ dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read);
+ size += bytes2read;
+ }
+
+ return size;
+}
+
+/* Read TPM command results */
+static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct device *dev = chip->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ s32 rc;
+ int expected, status, burst_count, retries, size = 0;
+
+ if (count < TPM_HEADER_SIZE) {
+ i2c_nuvoton_ready(chip); /* return to idle */
+ dev_err(dev, "%s() count < header size\n", __func__);
+ return -EIO;
+ }
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ if (retries > 0) {
+ /* if this is not the first trial, set responseRetry */
+ i2c_nuvoton_write_status(client,
+ TPM_STS_RESPONSE_RETRY);
+ }
+ /*
+ * read first available (> 10 bytes), including:
+ * tag, paramsize, and result
+ */
+ status = i2c_nuvoton_wait_for_data_avail(
+ chip, chip->vendor.timeout_c, &chip->vendor.read_queue);
+ if (status != 0) {
+ dev_err(dev, "%s() timeout on dataAvail\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail to get burstCount\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ size = i2c_nuvoton_recv_data(client, chip, buf,
+ burst_count);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(dev, "%s() fail to read header\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ /*
+ * convert number of expected bytes field from big endian 32 bit
+ * to machine native
+ */
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ dev_err(dev, "%s() expected > count\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
+ expected - size);
+ size += rc;
+ if (rc < 0 || size < expected) {
+ dev_err(dev, "%s() fail to read remainder of result\n",
+ __func__);
+ size = -EIO;
+ continue;
+ }
+ if (i2c_nuvoton_wait_for_stat(
+ chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
+ TPM_STS_VALID, chip->vendor.timeout_c,
+ NULL)) {
+ dev_err(dev, "%s() error left over data\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ i2c_nuvoton_ready(chip);
+ dev_dbg(chip->dev, "%s() -> %d\n", __func__, size);
+ return size;
+}
+
+/*
+ * Send TPM command.
+ *
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct device *dev = chip->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u32 ordinal;
+ size_t count = 0;
+ int burst_count, bytes2write, retries, rc = -EIO;
+
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ i2c_nuvoton_ready(chip);
+ if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->vendor.timeout_b, NULL)) {
+ dev_err(dev, "%s() timeout on commandReady\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ rc = 0;
+ while (count < len - 1) {
+ burst_count = i2c_nuvoton_get_burstcount(client,
+ chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail get burstCount\n",
+ __func__);
+ rc = -EIO;
+ break;
+ }
+ bytes2write = min_t(size_t, burst_count,
+ len - 1 - count);
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
+ bytes2write, &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail i2cWriteBuf\n",
+ __func__);
+ break;
+ }
+ dev_dbg(dev, "%s(%d):", __func__, bytes2write);
+ count += bytes2write;
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ chip->vendor.timeout_c,
+ NULL);
+ if (rc < 0) {
+ dev_err(dev, "%s() timeout on Expect\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (rc < 0)
+ continue;
+
+ /* write last byte */
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
+ &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write last byte\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID | TPM_STS_EXPECT,
+ TPM_STS_VALID,
+ chip->vendor.timeout_c, NULL);
+ if (rc) {
+ dev_err(dev, "%s() timeout on Expect to clear\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ if (rc < 0) {
+ /* retries == TPM_RETRY */
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ /* execute the TPM command */
+ rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write Go\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ rc = i2c_nuvoton_wait_for_data_avail(chip,
+ tpm_calc_ordinal_duration(chip,
+ ordinal),
+ &chip->vendor.read_queue);
+ if (rc) {
+ dev_err(dev, "%s() timeout command duration\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+
+ dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+ return len;
+}
+
+static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct file_operations i2c_nuvoton_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *i2c_nuvoton_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_nuvoton_attr_grp = {
+ .attrs = i2c_nuvoton_attrs
+};
+
+static const struct tpm_vendor_specific tpm_i2c = {
+ .status = i2c_nuvoton_read_status,
+ .recv = i2c_nuvoton_recv,
+ .send = i2c_nuvoton_send,
+ .cancel = i2c_nuvoton_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = i2c_nuvoton_req_canceled,
+ .attr_group = &i2c_nuvoton_attr_grp,
+ .miscdev.fops = &i2c_nuvoton_ops,
+};
+
+/* The only purpose for the handler is to signal to any waiting threads that
+ * the interrupt is currently being asserted. The driver does not do any
+ * processing triggered by interrupts, and the chip provides no way to mask at
+ * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
+ * this means it cannot be shared. */
+static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct priv_data *priv = chip->vendor.priv;
+
+ priv->intrs++;
+ wake_up(&chip->vendor.read_queue);
+ disable_irq_nosync(chip->vendor.irq);
+ return IRQ_HANDLED;
+}
+
+static int get_vid(struct i2c_client *client, u32 *res)
+{
+ static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
+ u32 temp;
+ s32 rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+ rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
+ /*
+ * f/w rev 2.81 has an issue where the VID_DID_RID is not
+ * reporting the right value. so give it another chance at
+ * offset 0x20 (FIFO_W).
+ */
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
+ (u8 *) (&temp));
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value,
+ sizeof(vid_did_rid_value)))
+ return -ENODEV;
+ }
+
+ *res = temp;
+ return 0;
+}
+
+static int i2c_nuvoton_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ u32 vid = 0;
+
+ rc = get_vid(client, &vid);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
+ (u8) (vid >> 16), (u8) (vid >> 24));
+
+ chip = tpm_register_hardware(dev, &tpm_i2c);
+ if (!chip) {
+ dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
+ return -ENODEV;
+ }
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
+ init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&chip->vendor.int_queue);
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ /*
+ * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
+ * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
+ * The IRQ should be set in the i2c_board_info (which is done
+ * automatically in of_i2c_register_devices, for device tree users */
+ chip->vendor.irq = client->irq;
+
+ if (chip->vendor.irq) {
+ dev_dbg(dev, "%s() chip-vendor.irq\n", __func__);
+ rc = devm_request_irq(dev, chip->vendor.irq,
+ i2c_nuvoton_int_handler,
+ IRQF_TRIGGER_LOW,
+ chip->vendor.miscdev.name,
+ chip);
+ if (rc) {
+ dev_err(dev, "%s() Unable to request irq: %d for use\n",
+ __func__, chip->vendor.irq);
+ chip->vendor.irq = 0;
+ } else {
+ /* Clear any pending interrupt */
+ i2c_nuvoton_ready(chip);
+ /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->vendor.timeout_b,
+ NULL);
+ if (rc == 0) {
+ /*
+ * TIS is in ready state
+ * write dummy byte to enter reception state
+ * TPM_DATA_FIFO_W <- rc (0)
+ */
+ rc = i2c_nuvoton_write_buf(client,
+ TPM_DATA_FIFO_W,
+ 1, (u8 *) (&rc));
+ if (rc < 0)
+ goto out_err;
+ /* TPM_STS <- 0x40 (commandReady) */
+ i2c_nuvoton_ready(chip);
+ } else {
+ /*
+ * timeout_b reached - command was
+ * aborted. TIS should now be in idle state -
+ * only TPM_STS_VALID should be set
+ */
+ if (i2c_nuvoton_read_status(chip) !=
+ TPM_STS_VALID) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+ }
+ }
+
+ if (tpm_get_timeouts(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+
+static int i2c_nuvoton_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip)
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(dev);
+ kfree(chip);
+ return 0;
+}
+
+
+static const struct i2c_device_id i2c_nuvoton_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_nuvoton_of_match[] = {
+ {.compatible = "nuvoton,npct501"},
+ {.compatible = "winbond,wpct301"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_nuvoton_driver = {
+ .id_table = i2c_nuvoton_id,
+ .probe = i2c_nuvoton_probe,
+ .remove = i2c_nuvoton_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &i2c_nuvoton_pm_ops,
+ .of_match_table = of_match_ptr(i2c_nuvoton_of_match),
+ },
+};
+
+module_i2c_driver(i2c_nuvoton_driver);
+
+MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5bb8e2ddd3b..a0d6ceb5d00 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static struct attribute *stm_tpm_attrs[] = {
@@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
tpm_get_timeouts(chip);
- i2c_set_clientdata(client, chip);
-
dev_info(chip->dev, "TPM I2C Initialized\n");
return 0;
_irq_set:
@@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
/*
* tpm_st33_i2c_pm_suspend suspend the TPM device
- * Added: Work around when suspend and no tpm application is running, suspend
- * may fail because chip->data_buffer is not set (only set in tpm_open in Linux
- * TPM core)
* @param: client, the i2c_client drescription (TPM I2C description).
* @param: mesg, the power management message.
* @return: 0 in case of success.
*/
static int tpm_st33_i2c_pm_suspend(struct device *dev)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
struct st33zp24_platform_data *pin_infos = dev->platform_data;
int ret = 0;
if (power_mgt) {
gpio_set_value(pin_infos->io_lpcpd, 0);
} else {
- if (chip->data_buffer == NULL)
- chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
ret = tpm_pm_suspend(dev);
}
return ret;
@@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev)
TPM_STS_VALID) == TPM_STS_VALID,
chip->vendor.timeout_b);
} else {
- if (chip->data_buffer == NULL)
- chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
ret = tpm_pm_resume(dev);
if (!ret)
tpm_do_selftest(chip);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 56b07c35a13..2783a42aa73 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if (count < len) {
dev_err(ibmvtpm->dev,
- "Invalid size in recv: count=%ld, crq_size=%d\n",
+ "Invalid size in recv: count=%zd, crq_size=%d\n",
count, len);
return -EIO;
}
@@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
if (count > ibmvtpm->rtce_size) {
dev_err(ibmvtpm->dev,
- "Invalid size in send: count=%ld, rtce_size=%d\n",
+ "Invalid size in send: count=%zd, rtce_size=%d\n",
count, ibmvtpm->rtce_size);
return -EIO;
}
@@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 2168d15bc72..8e562dc6560 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent)
{
return sysfs_create_group(parent, &ppi_attr_grp);
}
-EXPORT_SYMBOL_GPL(tpm_add_ppi);
void tpm_remove_ppi(struct kobject *parent)
{
sysfs_remove_group(parent, &ppi_attr_grp);
}
-EXPORT_SYMBOL_GPL(tpm_remove_ppi);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 5796d0157ce..1b74459c072 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 94c280d36e8..c8ff4df8177 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
tpm_get_timeouts(priv->chip);
- dev_set_drvdata(&dev->dev, priv->chip);
-
return rv;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b79cf3e1b79..feea87cc6b8 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -577,7 +577,8 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
spin_lock(&portdev->c_ovq_lock);
if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
- while (!virtqueue_get_buf(vq, &len))
+ while (!virtqueue_get_buf(vq, &len)
+ && !virtqueue_is_broken(vq))
cpu_relax();
}
spin_unlock(&portdev->c_ovq_lock);
@@ -650,7 +651,8 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
* we need to kmalloc a GFP_ATOMIC buffer each time the
* console driver writes something out.
*/
- while (!virtqueue_get_buf(out_vq, &len))
+ while (!virtqueue_get_buf(out_vq, &len)
+ && !virtqueue_is_broken(out_vq))
cpu_relax();
done:
spin_unlock_irqrestore(&port->outvq_lock, flags);
@@ -1837,12 +1839,8 @@ static void config_intr(struct virtio_device *vdev)
struct port *port;
u16 rows, cols;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &cols, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &rows, sizeof(u16));
+ virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+ virtio_cread(vdev, struct virtio_console_config, rows, &rows);
port = find_port_by_id(portdev, 0);
set_console_size(port, rows, cols);
@@ -2014,10 +2012,9 @@ static int virtcons_probe(struct virtio_device *vdev)
/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
if (!is_rproc_serial(vdev) &&
- virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
- offsetof(struct virtio_console_config,
- max_nr_ports),
- &portdev->config.max_nr_ports) == 0) {
+ virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ struct virtio_console_config, max_nr_ports,
+ &portdev->config.max_nr_ports) == 0) {
multiport = true;
}
@@ -2142,7 +2139,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
static unsigned int rproc_serial_features[] = {
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtcons_freeze(struct virtio_device *vdev)
{
struct ports_device *portdev;
@@ -2220,7 +2217,7 @@ static struct virtio_driver virtio_console = {
.probe = virtcons_probe,
.remove = virtcons_remove,
.config_changed = config_intr,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtcons_freeze,
.restore = virtcons_restore,
#endif
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 5224da5202d..f6345f932e4 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -721,7 +721,7 @@ static int hwicap_remove(struct device *dev)
{
struct hwicap_drvdata *drvdata;
- drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev);
+ drvdata = dev_get_drvdata(dev);
if (!drvdata)
return 0;
@@ -731,7 +731,6 @@ static int hwicap_remove(struct device *dev)
iounmap(drvdata->base_address);
release_mem_region(drvdata->mem_start, drvdata->mem_size);
kfree(drvdata);
- dev_set_drvdata(dev, NULL);
mutex_lock(&icap_sem);
probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 279407a3639..5c51115081b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -93,6 +93,20 @@ config CLK_PPC_CORENET
This adds the clock driver support for Freescale PowerPC corenet
platforms using common clock framework.
+config COMMON_CLK_XGENE
+ bool "Clock driver for APM XGene SoC"
+ default y
+ depends on ARM64
+ ---help---
+ Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
+
+config COMMON_CLK_KEYSTONE
+ tristate "Clock drivers for Keystone based SOCs"
+ depends on ARCH_KEYSTONE && OF
+ ---help---
+ Supports clock drivers for Keystone based SOCs. These SOCs have local
+ a power sleep control module that gate the clock to the IPs and PLLs.
+
endmenu
source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 7b111062ccb..7a10bc9a23e 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-composite.o
# SoCs specific
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
+obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
@@ -32,6 +33,8 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
+obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
obj-$(CONFIG_X86) += x86/
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 5fb4ff53d08..6b950ca8b71 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -20,14 +20,8 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/bcm2835.h>
-#include <linux/clk-provider.h>
#include <linux/of.h>
-static const struct of_device_id clk_match[] __initconst = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { }
-};
-
/*
* These are fixed clocks. They're probably not all root clocks and it may
* be possible to turn them on and off but until this is mapped out better
@@ -63,6 +57,4 @@ void __init bcm2835_init_clocks(void)
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
if (ret)
pr_err("uart1_pclk alias not registered\n");
-
- of_clk_init(clk_match);
}
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
new file mode 100644
index 00000000000..bac2ddf49d0
--- /dev/null
+++ b/drivers/clk/clk-efm32gg.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/efm32-cmu.h>
+
+#define CMU_HFPERCLKEN0 0x44
+
+static struct clk *clk[37];
+static struct clk_onecell_data clk_data = {
+ .clks = clk,
+ .clk_num = ARRAY_SIZE(clk),
+};
+
+static int __init efm32gg_cmu_init(struct device_node *np)
+{
+ int i;
+ void __iomem *base;
+
+ for (i = 0; i < ARRAY_SIZE(clk); ++i)
+ clk[i] = ERR_PTR(-ENOENT);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("Failed to map address range for efm32gg,cmu node\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
+ CLK_IS_ROOT, 48000000);
+
+ clk[clk_HFPERCLKUSART0] = clk_register_gate(NULL, "HFPERCLK.USART0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
+ clk[clk_HFPERCLKUSART1] = clk_register_gate(NULL, "HFPERCLK.USART1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 1, 0, NULL);
+ clk[clk_HFPERCLKUSART2] = clk_register_gate(NULL, "HFPERCLK.USART2",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 2, 0, NULL);
+ clk[clk_HFPERCLKUART0] = clk_register_gate(NULL, "HFPERCLK.UART0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 3, 0, NULL);
+ clk[clk_HFPERCLKUART1] = clk_register_gate(NULL, "HFPERCLK.UART1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 4, 0, NULL);
+ clk[clk_HFPERCLKTIMER0] = clk_register_gate(NULL, "HFPERCLK.TIMER0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 5, 0, NULL);
+ clk[clk_HFPERCLKTIMER1] = clk_register_gate(NULL, "HFPERCLK.TIMER1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 6, 0, NULL);
+ clk[clk_HFPERCLKTIMER2] = clk_register_gate(NULL, "HFPERCLK.TIMER2",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 7, 0, NULL);
+ clk[clk_HFPERCLKTIMER3] = clk_register_gate(NULL, "HFPERCLK.TIMER3",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 8, 0, NULL);
+ clk[clk_HFPERCLKACMP0] = clk_register_gate(NULL, "HFPERCLK.ACMP0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 9, 0, NULL);
+ clk[clk_HFPERCLKACMP1] = clk_register_gate(NULL, "HFPERCLK.ACMP1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 10, 0, NULL);
+ clk[clk_HFPERCLKI2C0] = clk_register_gate(NULL, "HFPERCLK.I2C0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 11, 0, NULL);
+ clk[clk_HFPERCLKI2C1] = clk_register_gate(NULL, "HFPERCLK.I2C1",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 12, 0, NULL);
+ clk[clk_HFPERCLKGPIO] = clk_register_gate(NULL, "HFPERCLK.GPIO",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 13, 0, NULL);
+ clk[clk_HFPERCLKVCMP] = clk_register_gate(NULL, "HFPERCLK.VCMP",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 14, 0, NULL);
+ clk[clk_HFPERCLKPRS] = clk_register_gate(NULL, "HFPERCLK.PRS",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 15, 0, NULL);
+ clk[clk_HFPERCLKADC0] = clk_register_gate(NULL, "HFPERCLK.ADC0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 16, 0, NULL);
+ clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0",
+ "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
+
+ return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 0e1d89b4321..d9e3f671c2e 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -117,7 +117,7 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
}
if (of_property_read_u32(node, "clock-mult", &mult)) {
- pr_err("%s Fixed factor clock <%s> must have a clokc-mult property\n",
+ pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n",
__func__, node->name);
return;
}
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 2e08cb00193..2e7e9d9798c 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -20,8 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
-
-extern void __iomem *sregs_base;
+#include <linux/of_address.h>
#define HB_PLL_LOCK_500 0x20000000
#define HB_PLL_LOCK 0x10000000
@@ -280,6 +279,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
const char *clk_name = node->name;
const char *parent_name;
struct clk_init_data init;
+ struct device_node *srnp;
int rc;
rc = of_property_read_u32(node, "reg", &reg);
@@ -290,7 +290,11 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
if (WARN_ON(!hb_clk))
return NULL;
- hb_clk->reg = sregs_base + reg;
+ /* Map system registers */
+ srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
+ hb_clk->reg = of_iomap(srnp, 0);
+ BUG_ON(!hb_clk->reg);
+ hb_clk->reg += reg;
of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 4d978a3c88f..6a934a5296b 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -62,6 +62,79 @@ static DEFINE_SPINLOCK(src_lock);
/* Base address of the SRC */
static void __iomem *src_base;
+static int nomadik_clk_reboot_handler(struct notifier_block *this,
+ unsigned long code,
+ void *unused)
+{
+ u32 val;
+
+ /* The main chrystal need to be enabled for reboot to work */
+ val = readl(src_base + SRC_XTALCR);
+ val &= ~SRC_XTALCR_MXTALOVER;
+ val |= SRC_XTALCR_MXTALEN;
+ pr_crit("force-enabling MXTALO\n");
+ writel(val, src_base + SRC_XTALCR);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nomadik_clk_reboot_notifier = {
+ .notifier_call = nomadik_clk_reboot_handler,
+};
+
+static const struct of_device_id nomadik_src_match[] __initconst = {
+ { .compatible = "stericsson,nomadik-src" },
+ { /* sentinel */ }
+};
+
+static void __init nomadik_src_init(void)
+{
+ struct device_node *np;
+ u32 val;
+
+ np = of_find_matching_node(NULL, nomadik_src_match);
+ if (!np) {
+ pr_crit("no matching node for SRC, aborting clock init\n");
+ return;
+ }
+ src_base = of_iomap(np, 0);
+ if (!src_base) {
+ pr_err("%s: must have src parent node with REGS (%s)\n",
+ __func__, np->name);
+ return;
+ }
+
+ /* Set all timers to use the 2.4 MHz TIMCLK */
+ val = readl(src_base + SRC_CR);
+ val |= SRC_CR_T0_ENSEL;
+ val |= SRC_CR_T1_ENSEL;
+ val |= SRC_CR_T2_ENSEL;
+ val |= SRC_CR_T3_ENSEL;
+ val |= SRC_CR_T4_ENSEL;
+ val |= SRC_CR_T5_ENSEL;
+ val |= SRC_CR_T6_ENSEL;
+ val |= SRC_CR_T7_ENSEL;
+ writel(val, src_base + SRC_CR);
+
+ val = readl(src_base + SRC_XTALCR);
+ pr_info("SXTALO is %s\n",
+ (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ pr_info("MXTAL is %s\n",
+ (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ if (of_property_read_bool(np, "disable-sxtalo")) {
+ /* The machine uses an external oscillator circuit */
+ val |= SRC_XTALCR_SXTALDIS;
+ pr_info("disabling SXTALO\n");
+ }
+ if (of_property_read_bool(np, "disable-mxtalo")) {
+ /* Disable this too: also run by external oscillator */
+ val |= SRC_XTALCR_MXTALOVER;
+ val &= ~SRC_XTALCR_MXTALEN;
+ pr_info("disabling MXTALO\n");
+ }
+ writel(val, src_base + SRC_XTALCR);
+ register_reboot_notifier(&nomadik_clk_reboot_notifier);
+}
+
/**
* struct clk_pll1 - Nomadik PLL1 clock
* @hw: corresponding clock hardware entry
@@ -439,6 +512,9 @@ static void __init of_nomadik_pll_setup(struct device_node *np)
const char *parent_name;
u32 pll_id;
+ if (!src_base)
+ nomadik_src_init();
+
if (of_property_read_u32(np, "pll-id", &pll_id)) {
pr_err("%s: PLL \"%s\" missing pll-id property\n",
__func__, clk_name);
@@ -449,6 +525,8 @@ static void __init of_nomadik_pll_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(nomadik_pll_clk,
+ "st,nomadik-pll-clock", of_nomadik_pll_setup);
static void __init of_nomadik_hclk_setup(struct device_node *np)
{
@@ -456,6 +534,9 @@ static void __init of_nomadik_hclk_setup(struct device_node *np)
const char *clk_name = np->name;
const char *parent_name;
+ if (!src_base)
+ nomadik_src_init();
+
parent_name = of_clk_get_parent_name(np, 0);
/*
* The HCLK divides PLL1 with 1 (passthru), 2, 3 or 4.
@@ -468,6 +549,8 @@ static void __init of_nomadik_hclk_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(nomadik_hclk_clk,
+ "st,nomadik-hclk-clock", of_nomadik_hclk_setup);
static void __init of_nomadik_src_clk_setup(struct device_node *np)
{
@@ -476,6 +559,9 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
const char *parent_name;
u32 clk_id;
+ if (!src_base)
+ nomadik_src_init();
+
if (of_property_read_u32(np, "clock-id", &clk_id)) {
pr_err("%s: SRC clock \"%s\" missing clock-id property\n",
__func__, clk_name);
@@ -486,102 +572,5 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-
-static const struct of_device_id nomadik_src_match[] __initconst = {
- { .compatible = "stericsson,nomadik-src" },
- { /* sentinel */ }
-};
-
-static const struct of_device_id nomadik_src_clk_match[] __initconst = {
- {
- .compatible = "fixed-clock",
- .data = of_fixed_clk_setup,
- },
- {
- .compatible = "fixed-factor-clock",
- .data = of_fixed_factor_clk_setup,
- },
- {
- .compatible = "st,nomadik-pll-clock",
- .data = of_nomadik_pll_setup,
- },
- {
- .compatible = "st,nomadik-hclk-clock",
- .data = of_nomadik_hclk_setup,
- },
- {
- .compatible = "st,nomadik-src-clock",
- .data = of_nomadik_src_clk_setup,
- },
- { /* sentinel */ }
-};
-
-static int nomadik_clk_reboot_handler(struct notifier_block *this,
- unsigned long code,
- void *unused)
-{
- u32 val;
-
- /* The main chrystal need to be enabled for reboot to work */
- val = readl(src_base + SRC_XTALCR);
- val &= ~SRC_XTALCR_MXTALOVER;
- val |= SRC_XTALCR_MXTALEN;
- pr_crit("force-enabling MXTALO\n");
- writel(val, src_base + SRC_XTALCR);
- return NOTIFY_OK;
-}
-
-static struct notifier_block nomadik_clk_reboot_notifier = {
- .notifier_call = nomadik_clk_reboot_handler,
-};
-
-void __init nomadik_clk_init(void)
-{
- struct device_node *np;
- u32 val;
-
- np = of_find_matching_node(NULL, nomadik_src_match);
- if (!np) {
- pr_crit("no matching node for SRC, aborting clock init\n");
- return;
- }
- src_base = of_iomap(np, 0);
- if (!src_base) {
- pr_err("%s: must have src parent node with REGS (%s)\n",
- __func__, np->name);
- return;
- }
-
- /* Set all timers to use the 2.4 MHz TIMCLK */
- val = readl(src_base + SRC_CR);
- val |= SRC_CR_T0_ENSEL;
- val |= SRC_CR_T1_ENSEL;
- val |= SRC_CR_T2_ENSEL;
- val |= SRC_CR_T3_ENSEL;
- val |= SRC_CR_T4_ENSEL;
- val |= SRC_CR_T5_ENSEL;
- val |= SRC_CR_T6_ENSEL;
- val |= SRC_CR_T7_ENSEL;
- writel(val, src_base + SRC_CR);
-
- val = readl(src_base + SRC_XTALCR);
- pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
- pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
- if (of_property_read_bool(np, "disable-sxtalo")) {
- /* The machine uses an external oscillator circuit */
- val |= SRC_XTALCR_SXTALDIS;
- pr_info("disabling SXTALO\n");
- }
- if (of_property_read_bool(np, "disable-mxtalo")) {
- /* Disable this too: also run by external oscillator */
- val |= SRC_XTALCR_MXTALOVER;
- val &= ~SRC_XTALCR_MXTALEN;
- pr_info("disabling MXTALO\n");
- }
- writel(val, src_base + SRC_XTALCR);
- register_reboot_notifier(&nomadik_clk_reboot_notifier);
-
- of_clk_init(nomadik_src_clk_match);
-}
+CLK_OF_DECLARE(nomadik_src_clk,
+ "st,nomadik-src-clock", of_nomadik_src_clk_setup);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index e9587073bd3..c4f76ed914b 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/slab.h>
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index 5ab95f1ad57..6c15e331613 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1015,16 +1015,6 @@ static struct clk_std clk_usb1 = {
},
};
-static struct of_device_id clkc_ids[] = {
- { .compatible = "sirf,prima2-clkc" },
- {},
-};
-
-static struct of_device_id rsc_ids[] = {
- { .compatible = "sirf,prima2-rsc" },
- {},
-};
-
enum prima2_clk_index {
/* 0 1 2 3 4 5 6 7 8 9 */
rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
@@ -1082,24 +1072,16 @@ static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
static struct clk *prima2_clks[maxclk];
static struct clk_onecell_data clk_data;
-void __init sirfsoc_of_clk_init(void)
+static void __init sirfsoc_clk_init(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *rscnp;
int i;
- np = of_find_matching_node(NULL, rsc_ids);
- if (!np)
- panic("unable to find compatible rsc node in dtb\n");
-
- sirfsoc_rsc_vbase = of_iomap(np, 0);
+ rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
+ sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
if (!sirfsoc_rsc_vbase)
panic("unable to map rsc registers\n");
-
- of_node_put(np);
-
- np = of_find_matching_node(NULL, clkc_ids);
- if (!np)
- return;
+ of_node_put(rscnp);
sirfsoc_clk_vbase = of_iomap(np, 0);
if (!sirfsoc_clk_vbase)
@@ -1124,3 +1106,4 @@ void __init sirfsoc_of_clk_init(void)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
+CLK_OF_DECLARE(sirfsoc_clk, "sirf,prima2-clkc", sirfsoc_clk_init);
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 82306f5fb9c..7fd5c5e9e25 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -15,11 +15,14 @@
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#define LEGACY_PMC_BASE 0xD8130000
+
/* All clocks share the same lock as none can be changed concurrently */
static DEFINE_SPINLOCK(_lock);
@@ -53,6 +56,21 @@ struct clk_pll {
static void __iomem *pmc_base;
+static __init void vtwm_set_pmc_base(void)
+{
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "via,vt8500-pmc");
+
+ if (np)
+ pmc_base = of_iomap(np, 0);
+ else
+ pmc_base = ioremap(LEGACY_PMC_BASE, 0x1000);
+ of_node_put(np);
+
+ if (!pmc_base)
+ pr_err("%s:of_iomap(pmc) failed\n", __func__);
+}
+
#define to_clk_device(_hw) container_of(_hw, struct clk_device, hw)
#define VT8500_PMC_BUSY_MASK 0x18
@@ -222,6 +240,9 @@ static __init void vtwm_device_clk_init(struct device_node *node)
int rc;
int clk_init_flags = 0;
+ if (!pmc_base)
+ vtwm_set_pmc_base();
+
dev_clk = kzalloc(sizeof(*dev_clk), GFP_KERNEL);
if (WARN_ON(!dev_clk))
return;
@@ -636,6 +657,9 @@ static __init void vtwm_pll_clk_init(struct device_node *node, int pll_type)
struct clk_init_data init;
int rc;
+ if (!pmc_base)
+ vtwm_set_pmc_base();
+
rc = of_property_read_u32(node, "reg", &reg);
if (WARN_ON(rc))
return;
@@ -694,13 +718,3 @@ static void __init wm8850_pll_init(struct device_node *node)
vtwm_pll_clk_init(node, PLL_TYPE_WM8850);
}
CLK_OF_DECLARE(wm8850_pll, "wm,wm8850-pll-clock", wm8850_pll_init);
-
-void __init vtwm_clk_init(void __iomem *base)
-{
- if (!base)
- return;
-
- pmc_base = base;
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 805b4c34400..b131041c8f4 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -391,14 +391,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
return 0;
}
-static int wm831x_clk_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver wm831x_clk_driver = {
.probe = wm831x_clk_probe,
- .remove = wm831x_clk_remove,
.driver = {
.name = "wm831x-clk",
.owner = THIS_MODULE,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
new file mode 100644
index 00000000000..dd8a62d8f11
--- /dev/null
+++ b/drivers/clk/clk-xgene.c
@@ -0,0 +1,521 @@
+/*
+ * clk-xgene.c - AppliedMicro X-Gene Clock Interface
+ *
+ * Copyright (c) 2013, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <asm/setup.h>
+
+/* Register SCU_PCPPLL bit fields */
+#define N_DIV_RD(src) (((src) & 0x000001ff))
+
+/* Register SCU_SOCPLL bit fields */
+#define CLKR_RD(src) (((src) & 0x07000000)>>24)
+#define CLKOD_RD(src) (((src) & 0x00300000)>>20)
+#define REGSPEC_RESET_F1_MASK 0x00010000
+#define CLKF_RD(src) (((src) & 0x000001ff))
+
+#define XGENE_CLK_DRIVER_VER "0.1"
+
+static DEFINE_SPINLOCK(clk_lock);
+
+static inline u32 xgene_clk_read(void *csr)
+{
+ return readl_relaxed(csr);
+}
+
+static inline void xgene_clk_write(u32 data, void *csr)
+{
+ return writel_relaxed(data, csr);
+}
+
+/* PLL Clock */
+enum xgene_pll_type {
+ PLL_TYPE_PCP = 0,
+ PLL_TYPE_SOC = 1,
+};
+
+struct xgene_clk_pll {
+ struct clk_hw hw;
+ const char *name;
+ void __iomem *reg;
+ spinlock_t *lock;
+ u32 pll_offset;
+ enum xgene_pll_type type;
+};
+
+#define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
+
+static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
+ u32 data;
+
+ data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
+ pr_debug("%s pll %s\n", pllclk->name,
+ data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
+
+ return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
+}
+
+static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
+ unsigned long fref;
+ unsigned long fvco;
+ u32 pll;
+ u32 nref;
+ u32 nout;
+ u32 nfb;
+
+ pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
+
+ if (pllclk->type == PLL_TYPE_PCP) {
+ /*
+ * PLL VCO = Reference clock * NF
+ * PCP PLL = PLL_VCO / 2
+ */
+ nout = 2;
+ fvco = parent_rate * (N_DIV_RD(pll) + 4);
+ } else {
+ /*
+ * Fref = Reference Clock / NREF;
+ * Fvco = Fref * NFB;
+ * Fout = Fvco / NOUT;
+ */
+ nref = CLKR_RD(pll) + 1;
+ nout = CLKOD_RD(pll) + 1;
+ nfb = CLKF_RD(pll);
+ fref = parent_rate / nref;
+ fvco = fref * nfb;
+ }
+ pr_debug("%s pll recalc rate %ld parent %ld\n", pllclk->name,
+ fvco / nout, parent_rate);
+
+ return fvco / nout;
+}
+
+const struct clk_ops xgene_clk_pll_ops = {
+ .is_enabled = xgene_clk_pll_is_enabled,
+ .recalc_rate = xgene_clk_pll_recalc_rate,
+};
+
+static struct clk *xgene_register_clk_pll(struct device *dev,
+ const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u32 pll_offset,
+ u32 type, spinlock_t *lock)
+{
+ struct xgene_clk_pll *apmclk;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the APM clock structure */
+ apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
+ if (!apmclk) {
+ pr_err("%s: could not allocate APM clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &xgene_clk_pll_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ apmclk->name = name;
+ apmclk->reg = reg;
+ apmclk->lock = lock;
+ apmclk->pll_offset = pll_offset;
+ apmclk->type = type;
+ apmclk->hw.init = &init;
+
+ /* Register the clock */
+ clk = clk_register(dev, &apmclk->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: could not register clk %s\n", __func__, name);
+ kfree(apmclk);
+ return NULL;
+ }
+ return clk;
+}
+
+static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
+{
+ const char *clk_name = np->full_name;
+ struct clk *clk;
+ void *reg;
+
+ reg = of_iomap(np, 0);
+ if (reg == NULL) {
+ pr_err("Unable to map CSR register for %s\n", np->full_name);
+ return;
+ }
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ clk = xgene_register_clk_pll(NULL,
+ clk_name, of_clk_get_parent_name(np, 0),
+ CLK_IS_ROOT, reg, 0, pll_type, &clk_lock);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ pr_debug("Add %s clock PLL\n", clk_name);
+ }
+}
+
+static void xgene_socpllclk_init(struct device_node *np)
+{
+ xgene_pllclk_init(np, PLL_TYPE_SOC);
+}
+
+static void xgene_pcppllclk_init(struct device_node *np)
+{
+ xgene_pllclk_init(np, PLL_TYPE_PCP);
+}
+
+/* IP Clock */
+struct xgene_dev_parameters {
+ void __iomem *csr_reg; /* CSR for IP clock */
+ u32 reg_clk_offset; /* Offset to clock enable CSR */
+ u32 reg_clk_mask; /* Mask bit for clock enable */
+ u32 reg_csr_offset; /* Offset to CSR reset */
+ u32 reg_csr_mask; /* Mask bit for disable CSR reset */
+ void __iomem *divider_reg; /* CSR for divider */
+ u32 reg_divider_offset; /* Offset to divider register */
+ u32 reg_divider_shift; /* Bit shift to divider field */
+ u32 reg_divider_width; /* Width of the bit to divider field */
+};
+
+struct xgene_clk {
+ struct clk_hw hw;
+ const char *name;
+ spinlock_t *lock;
+ struct xgene_dev_parameters param;
+};
+
+#define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
+
+static int xgene_clk_enable(struct clk_hw *hw)
+{
+ struct xgene_clk *pclk = to_xgene_clk(hw);
+ unsigned long flags = 0;
+ u32 data;
+
+ if (pclk->lock)
+ spin_lock_irqsave(pclk->lock, flags);
+
+ if (pclk->param.csr_reg != NULL) {
+ pr_debug("%s clock enabled\n", pclk->name);
+ /* First enable the clock */
+ data = xgene_clk_read(pclk->param.csr_reg +
+ pclk->param.reg_clk_offset);
+ data |= pclk->param.reg_clk_mask;
+ xgene_clk_write(data, pclk->param.csr_reg +
+ pclk->param.reg_clk_offset);
+ pr_debug("%s clock PADDR base 0x%016LX clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+ pclk->name, __pa(pclk->param.csr_reg),
+ pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
+ data);
+
+ /* Second enable the CSR */
+ data = xgene_clk_read(pclk->param.csr_reg +
+ pclk->param.reg_csr_offset);
+ data &= ~pclk->param.reg_csr_mask;
+ xgene_clk_write(data, pclk->param.csr_reg +
+ pclk->param.reg_csr_offset);
+ pr_debug("%s CSR RESET PADDR base 0x%016LX csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+ pclk->name, __pa(pclk->param.csr_reg),
+ pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
+ data);
+ }
+
+ if (pclk->lock)
+ spin_unlock_irqrestore(pclk->lock, flags);
+
+ return 0;
+}
+
+static void xgene_clk_disable(struct clk_hw *hw)
+{
+ struct xgene_clk *pclk = to_xgene_clk(hw);
+ unsigned long flags = 0;
+ u32 data;
+
+ if (pclk->lock)
+ spin_lock_irqsave(pclk->lock, flags);
+
+ if (pclk->param.csr_reg != NULL) {
+ pr_debug("%s clock disabled\n", pclk->name);
+ /* First put the CSR in reset */
+ data = xgene_clk_read(pclk->param.csr_reg +
+ pclk->param.reg_csr_offset);
+ data |= pclk->param.reg_csr_mask;
+ xgene_clk_write(data, pclk->param.csr_reg +
+ pclk->param.reg_csr_offset);
+
+ /* Second disable the clock */
+ data = xgene_clk_read(pclk->param.csr_reg +
+ pclk->param.reg_clk_offset);
+ data &= ~pclk->param.reg_clk_mask;
+ xgene_clk_write(data, pclk->param.csr_reg +
+ pclk->param.reg_clk_offset);
+ }
+
+ if (pclk->lock)
+ spin_unlock_irqrestore(pclk->lock, flags);
+}
+
+static int xgene_clk_is_enabled(struct clk_hw *hw)
+{
+ struct xgene_clk *pclk = to_xgene_clk(hw);
+ u32 data = 0;
+
+ if (pclk->param.csr_reg != NULL) {
+ pr_debug("%s clock checking\n", pclk->name);
+ data = xgene_clk_read(pclk->param.csr_reg +
+ pclk->param.reg_clk_offset);
+ pr_debug("%s clock is %s\n", pclk->name,
+ data & pclk->param.reg_clk_mask ? "enabled" :
+ "disabled");
+ }
+
+ if (pclk->param.csr_reg == NULL)
+ return 1;
+ return data & pclk->param.reg_clk_mask ? 1 : 0;
+}
+
+static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct xgene_clk *pclk = to_xgene_clk(hw);
+ u32 data;
+
+ if (pclk->param.divider_reg) {
+ data = xgene_clk_read(pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+ data >>= pclk->param.reg_divider_shift;
+ data &= (1 << pclk->param.reg_divider_width) - 1;
+
+ pr_debug("%s clock recalc rate %ld parent %ld\n",
+ pclk->name, parent_rate / data, parent_rate);
+ return parent_rate / data;
+ } else {
+ pr_debug("%s clock recalc rate %ld parent %ld\n",
+ pclk->name, parent_rate, parent_rate);
+ return parent_rate;
+ }
+}
+
+static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct xgene_clk *pclk = to_xgene_clk(hw);
+ unsigned long flags = 0;
+ u32 data;
+ u32 divider;
+ u32 divider_save;
+
+ if (pclk->lock)
+ spin_lock_irqsave(pclk->lock, flags);
+
+ if (pclk->param.divider_reg) {
+ /* Let's compute the divider */
+ if (rate > parent_rate)
+ rate = parent_rate;
+ divider_save = divider = parent_rate / rate; /* Rounded down */
+ divider &= (1 << pclk->param.reg_divider_width) - 1;
+ divider <<= pclk->param.reg_divider_shift;
+
+ /* Set new divider */
+ data = xgene_clk_read(pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+ data &= ~((1 << pclk->param.reg_divider_width) - 1);
+ data |= divider;
+ xgene_clk_write(data, pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+ pr_debug("%s clock set rate %ld\n", pclk->name,
+ parent_rate / divider_save);
+ } else {
+ divider_save = 1;
+ }
+
+ if (pclk->lock)
+ spin_unlock_irqrestore(pclk->lock, flags);
+
+ return parent_rate / divider_save;
+}
+
+static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct xgene_clk *pclk = to_xgene_clk(hw);
+ unsigned long parent_rate = *prate;
+ u32 divider;
+
+ if (pclk->param.divider_reg) {
+ /* Let's compute the divider */
+ if (rate > parent_rate)
+ rate = parent_rate;
+ divider = parent_rate / rate; /* Rounded down */
+ } else {
+ divider = 1;
+ }
+
+ return parent_rate / divider;
+}
+
+const struct clk_ops xgene_clk_ops = {
+ .enable = xgene_clk_enable,
+ .disable = xgene_clk_disable,
+ .is_enabled = xgene_clk_is_enabled,
+ .recalc_rate = xgene_clk_recalc_rate,
+ .set_rate = xgene_clk_set_rate,
+ .round_rate = xgene_clk_round_rate,
+};
+
+static struct clk *xgene_register_clk(struct device *dev,
+ const char *name, const char *parent_name,
+ struct xgene_dev_parameters *parameters, spinlock_t *lock)
+{
+ struct xgene_clk *apmclk;
+ struct clk *clk;
+ struct clk_init_data init;
+ int rc;
+
+ /* allocate the APM clock structure */
+ apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
+ if (!apmclk) {
+ pr_err("%s: could not allocate APM clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &xgene_clk_ops;
+ init.flags = 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ apmclk->name = name;
+ apmclk->lock = lock;
+ apmclk->hw.init = &init;
+ apmclk->param = *parameters;
+
+ /* Register the clock */
+ clk = clk_register(dev, &apmclk->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: could not register clk %s\n", __func__, name);
+ kfree(apmclk);
+ return clk;
+ }
+
+ /* Register the clock for lookup */
+ rc = clk_register_clkdev(clk, name, NULL);
+ if (rc != 0) {
+ pr_err("%s: could not register lookup clk %s\n",
+ __func__, name);
+ }
+ return clk;
+}
+
+static void __init xgene_devclk_init(struct device_node *np)
+{
+ const char *clk_name = np->full_name;
+ struct clk *clk;
+ struct resource res;
+ int rc;
+ struct xgene_dev_parameters parameters;
+ int i;
+
+ /* Check if the entry is disabled */
+ if (!of_device_is_available(np))
+ return;
+
+ /* Parse the DTS register for resource */
+ parameters.csr_reg = NULL;
+ parameters.divider_reg = NULL;
+ for (i = 0; i < 2; i++) {
+ void *map_res;
+ rc = of_address_to_resource(np, i, &res);
+ if (rc != 0) {
+ if (i == 0) {
+ pr_err("no DTS register for %s\n",
+ np->full_name);
+ return;
+ }
+ break;
+ }
+ map_res = of_iomap(np, i);
+ if (map_res == NULL) {
+ pr_err("Unable to map resource %d for %s\n",
+ i, np->full_name);
+ goto err;
+ }
+ if (strcmp(res.name, "div-reg") == 0)
+ parameters.divider_reg = map_res;
+ else /* if (strcmp(res->name, "csr-reg") == 0) */
+ parameters.csr_reg = map_res;
+ }
+ if (of_property_read_u32(np, "csr-offset", &parameters.reg_csr_offset))
+ parameters.reg_csr_offset = 0;
+ if (of_property_read_u32(np, "csr-mask", &parameters.reg_csr_mask))
+ parameters.reg_csr_mask = 0xF;
+ if (of_property_read_u32(np, "enable-offset",
+ &parameters.reg_clk_offset))
+ parameters.reg_clk_offset = 0x8;
+ if (of_property_read_u32(np, "enable-mask", &parameters.reg_clk_mask))
+ parameters.reg_clk_mask = 0xF;
+ if (of_property_read_u32(np, "divider-offset",
+ &parameters.reg_divider_offset))
+ parameters.reg_divider_offset = 0;
+ if (of_property_read_u32(np, "divider-width",
+ &parameters.reg_divider_width))
+ parameters.reg_divider_width = 0;
+ if (of_property_read_u32(np, "divider-shift",
+ &parameters.reg_divider_shift))
+ parameters.reg_divider_shift = 0;
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ clk = xgene_register_clk(NULL, clk_name,
+ of_clk_get_parent_name(np, 0), &parameters, &clk_lock);
+ if (IS_ERR(clk))
+ goto err;
+ pr_debug("Add %s clock\n", clk_name);
+ rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (rc != 0)
+ pr_err("%s: could register provider clk %s\n", __func__,
+ np->full_name);
+
+ return;
+
+err:
+ if (parameters.csr_reg)
+ iounmap(parameters.csr_reg);
+ if (parameters.divider_reg)
+ iounmap(parameters.divider_reg);
+}
+
+CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
+CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
+CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index a004769528e..2cf2ea6b77a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1080,13 +1080,16 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_rate);
-static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
- u8 i;
+ int i;
- if (!clk->parents)
- clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
- GFP_KERNEL);
+ if (!clk->parents) {
+ clk->parents = kcalloc(clk->num_parents,
+ sizeof(struct clk *), GFP_KERNEL);
+ if (!clk->parents)
+ return -ENOMEM;
+ }
/*
* find index of new parent clock using cached parent ptrs,
@@ -1094,16 +1097,19 @@ static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
- if (clk->parents && clk->parents[i] == parent)
- break;
- else if (!strcmp(clk->parent_names[i], parent->name)) {
- if (clk->parents)
- clk->parents[i] = __clk_lookup(parent->name);
- break;
+ if (clk->parents[i] == parent)
+ return i;
+
+ if (clk->parents[i])
+ continue;
+
+ if (!strcmp(clk->parent_names[i], parent->name)) {
+ clk->parents[i] = __clk_lookup(parent->name);
+ return i;
}
}
- return i;
+ return -EINVAL;
}
static void clk_reparent(struct clk *clk, struct clk *new_parent)
@@ -1265,7 +1271,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
struct clk *old_parent, *parent;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
- u8 p_index = 0;
+ int p_index = 0;
/* sanity */
if (IS_ERR_OR_NULL(clk))
@@ -1306,7 +1312,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(clk, parent);
- if (p_index == clk->num_parents) {
+ if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, clk->name);
return NULL;
@@ -1532,7 +1538,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
if (!clk->parents)
clk->parents =
- kzalloc((sizeof(struct clk*) * clk->num_parents),
+ kcalloc(clk->num_parents, sizeof(struct clk *),
GFP_KERNEL);
ret = clk_get_parent_by_index(clk, index);
@@ -1568,7 +1574,7 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
- u8 p_index = 0;
+ int p_index = 0;
unsigned long p_rate = 0;
if (!clk)
@@ -1597,10 +1603,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
if (parent) {
p_index = clk_fetch_parent_index(clk, parent);
p_rate = parent->rate;
- if (p_index == clk->num_parents) {
+ if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, clk->name);
- ret = -EINVAL;
+ ret = p_index;
goto out;
}
}
@@ -1689,8 +1695,8 @@ int __clk_init(struct device *dev, struct clk *clk)
* for clock drivers to statically initialize clk->parents.
*/
if (clk->num_parents > 1 && !clk->parents) {
- clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
- GFP_KERNEL);
+ clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
+ GFP_KERNEL);
/*
* __clk_lookup returns NULL for parents that have not been
* clk_init'd; thus any access to clk->parents[] must check
@@ -1830,8 +1836,8 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
hw->clk = clk;
/* allocate local copy in case parent_names is __initdata */
- clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
- GFP_KERNEL);
+ clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
+ GFP_KERNEL);
if (!clk->parent_names) {
pr_err("%s: could not allocate clk->parent_names\n", __func__);
@@ -2196,6 +2202,12 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
return clk;
}
+int of_clk_get_parent_count(struct device_node *np)
+{
+ return of_count_phandle_with_args(np, "clocks", "#clock-cells");
+}
+EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
+
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
struct of_phandle_args clkspec;
diff --git a/drivers/clk/keystone/Makefile b/drivers/clk/keystone/Makefile
new file mode 100644
index 00000000000..0477cf63f13
--- /dev/null
+++ b/drivers/clk/keystone/Makefile
@@ -0,0 +1 @@
+obj-y += pll.o gate.o
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
new file mode 100644
index 00000000000..1f333bcfc22
--- /dev/null
+++ b/drivers/clk/keystone/gate.c
@@ -0,0 +1,264 @@
+/*
+ * Clock driver for Keystone 2 based devices
+ *
+ * Copyright (C) 2013 Texas Instruments.
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/module.h>
+
+/* PSC register offsets */
+#define PTCMD 0x120
+#define PTSTAT 0x128
+#define PDSTAT 0x200
+#define PDCTL 0x300
+#define MDSTAT 0x800
+#define MDCTL 0xa00
+
+/* PSC module states */
+#define PSC_STATE_SWRSTDISABLE 0
+#define PSC_STATE_SYNCRST 1
+#define PSC_STATE_DISABLE 2
+#define PSC_STATE_ENABLE 3
+
+#define MDSTAT_STATE_MASK 0x3f
+#define MDSTAT_MCKOUT BIT(12)
+#define PDSTAT_STATE_MASK 0x1f
+#define MDCTL_FORCE BIT(31)
+#define MDCTL_LRESET BIT(8)
+#define PDCTL_NEXT BIT(0)
+
+/* Maximum timeout to bail out state transition for module */
+#define STATE_TRANS_MAX_COUNT 0xffff
+
+static void __iomem *domain_transition_base;
+
+/**
+ * struct clk_psc_data - PSC data
+ * @control_base: Base address for a PSC control
+ * @domain_base: Base address for a PSC domain
+ * @domain_id: PSC domain id number
+ */
+struct clk_psc_data {
+ void __iomem *control_base;
+ void __iomem *domain_base;
+ u32 domain_id;
+};
+
+/**
+ * struct clk_psc - PSC clock structure
+ * @hw: clk_hw for the psc
+ * @psc_data: PSC driver specific data
+ * @lock: Spinlock used by the driver
+ */
+struct clk_psc {
+ struct clk_hw hw;
+ struct clk_psc_data *psc_data;
+ spinlock_t *lock;
+};
+
+static DEFINE_SPINLOCK(psc_lock);
+
+#define to_clk_psc(_hw) container_of(_hw, struct clk_psc, hw)
+
+static void psc_config(void __iomem *control_base, void __iomem *domain_base,
+ u32 next_state, u32 domain_id)
+{
+ u32 ptcmd, pdstat, pdctl, mdstat, mdctl, ptstat;
+ u32 count = STATE_TRANS_MAX_COUNT;
+
+ mdctl = readl(control_base + MDCTL);
+ mdctl &= ~MDSTAT_STATE_MASK;
+ mdctl |= next_state;
+ /* For disable, we always put the module in local reset */
+ if (next_state == PSC_STATE_DISABLE)
+ mdctl &= ~MDCTL_LRESET;
+ writel(mdctl, control_base + MDCTL);
+
+ pdstat = readl(domain_base + PDSTAT);
+ if (!(pdstat & PDSTAT_STATE_MASK)) {
+ pdctl = readl(domain_base + PDCTL);
+ pdctl |= PDCTL_NEXT;
+ writel(pdctl, domain_base + PDCTL);
+ }
+
+ ptcmd = 1 << domain_id;
+ writel(ptcmd, domain_transition_base + PTCMD);
+ do {
+ ptstat = readl(domain_transition_base + PTSTAT);
+ } while (((ptstat >> domain_id) & 1) && count--);
+
+ count = STATE_TRANS_MAX_COUNT;
+ do {
+ mdstat = readl(control_base + MDSTAT);
+ } while (!((mdstat & MDSTAT_STATE_MASK) == next_state) && count--);
+}
+
+static int keystone_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_psc *psc = to_clk_psc(hw);
+ struct clk_psc_data *data = psc->psc_data;
+ u32 mdstat = readl(data->control_base + MDSTAT);
+
+ return (mdstat & MDSTAT_MCKOUT) ? 1 : 0;
+}
+
+static int keystone_clk_enable(struct clk_hw *hw)
+{
+ struct clk_psc *psc = to_clk_psc(hw);
+ struct clk_psc_data *data = psc->psc_data;
+ unsigned long flags = 0;
+
+ if (psc->lock)
+ spin_lock_irqsave(psc->lock, flags);
+
+ psc_config(data->control_base, data->domain_base,
+ PSC_STATE_ENABLE, data->domain_id);
+
+ if (psc->lock)
+ spin_unlock_irqrestore(psc->lock, flags);
+
+ return 0;
+}
+
+static void keystone_clk_disable(struct clk_hw *hw)
+{
+ struct clk_psc *psc = to_clk_psc(hw);
+ struct clk_psc_data *data = psc->psc_data;
+ unsigned long flags = 0;
+
+ if (psc->lock)
+ spin_lock_irqsave(psc->lock, flags);
+
+ psc_config(data->control_base, data->domain_base,
+ PSC_STATE_DISABLE, data->domain_id);
+
+ if (psc->lock)
+ spin_unlock_irqrestore(psc->lock, flags);
+}
+
+static const struct clk_ops clk_psc_ops = {
+ .enable = keystone_clk_enable,
+ .disable = keystone_clk_disable,
+ .is_enabled = keystone_clk_is_enabled,
+};
+
+/**
+ * clk_register_psc - register psc clock
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @psc_data: platform data to configure this clock
+ * @lock: spinlock used by this clock
+ */
+static struct clk *clk_register_psc(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ struct clk_psc_data *psc_data,
+ spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_psc *psc;
+ struct clk *clk;
+
+ psc = kzalloc(sizeof(*psc), GFP_KERNEL);
+ if (!psc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_psc_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ psc->psc_data = psc_data;
+ psc->lock = lock;
+ psc->hw.init = &init;
+
+ clk = clk_register(NULL, &psc->hw);
+ if (IS_ERR(clk))
+ kfree(psc);
+
+ return clk;
+}
+
+/**
+ * of_psc_clk_init - initialize psc clock through DT
+ * @node: device tree node for this clock
+ * @lock: spinlock used by this clock
+ */
+static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
+{
+ const char *clk_name = node->name;
+ const char *parent_name;
+ struct clk_psc_data *data;
+ struct clk *clk;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ pr_err("%s: Out of memory\n", __func__);
+ return;
+ }
+
+ i = of_property_match_string(node, "reg-names", "control");
+ data->control_base = of_iomap(node, i);
+ if (!data->control_base) {
+ pr_err("%s: control ioremap failed\n", __func__);
+ goto out;
+ }
+
+ i = of_property_match_string(node, "reg-names", "domain");
+ data->domain_base = of_iomap(node, i);
+ if (!data->domain_base) {
+ pr_err("%s: domain ioremap failed\n", __func__);
+ iounmap(data->control_base);
+ goto out;
+ }
+
+ of_property_read_u32(node, "domain-id", &data->domain_id);
+
+ /* Domain transition registers at fixed address space of domain_id 0 */
+ if (!domain_transition_base && !data->domain_id)
+ domain_transition_base = data->domain_base;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%s: Parent clock not found\n", __func__);
+ goto out;
+ }
+
+ clk = clk_register_psc(NULL, clk_name, parent_name, data, lock);
+ if (clk) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return;
+ }
+
+ pr_err("%s: error registering clk %s\n", __func__, node->name);
+out:
+ kfree(data);
+ return;
+}
+
+/**
+ * of_keystone_psc_clk_init - initialize psc clock through DT
+ * @node: device tree node for this clock
+ */
+static void __init of_keystone_psc_clk_init(struct device_node *node)
+{
+ of_psc_clk_init(node, &psc_lock);
+}
+CLK_OF_DECLARE(keystone_gate_clk, "ti,keystone,psc-clock",
+ of_keystone_psc_clk_init);
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
new file mode 100644
index 00000000000..47a1bd9f172
--- /dev/null
+++ b/drivers/clk/keystone/pll.c
@@ -0,0 +1,305 @@
+/*
+ * PLL clock driver for Keystone devices
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/module.h>
+
+#define PLLM_LOW_MASK 0x3f
+#define PLLM_HIGH_MASK 0x7ffc0
+#define MAIN_PLLM_HIGH_MASK 0x7f000
+#define PLLM_HIGH_SHIFT 6
+#define PLLD_MASK 0x3f
+
+/**
+ * struct clk_pll_data - pll data structure
+ * @has_pllctrl: If set to non zero, lower 6 bits of multiplier is in pllm
+ * register of pll controller, else it is in the pll_ctrl0((bit 11-6)
+ * @phy_pllm: Physical address of PLLM in pll controller. Used when
+ * has_pllctrl is non zero.
+ * @phy_pll_ctl0: Physical address of PLL ctrl0. This could be that of
+ * Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
+ * or PA PLL available on keystone2. These PLLs are controlled by
+ * this register. Main PLL is controlled by a PLL controller.
+ * @pllm: PLL register map address
+ * @pll_ctl0: PLL controller map address
+ * @pllm_lower_mask: multiplier lower mask
+ * @pllm_upper_mask: multiplier upper mask
+ * @pllm_upper_shift: multiplier upper shift
+ * @plld_mask: divider mask
+ * @postdiv: Post divider
+ */
+struct clk_pll_data {
+ bool has_pllctrl;
+ u32 phy_pllm;
+ u32 phy_pll_ctl0;
+ void __iomem *pllm;
+ void __iomem *pll_ctl0;
+ u32 pllm_lower_mask;
+ u32 pllm_upper_mask;
+ u32 pllm_upper_shift;
+ u32 plld_mask;
+ u32 postdiv;
+};
+
+/**
+ * struct clk_pll - Main pll clock
+ * @hw: clk_hw for the pll
+ * @pll_data: PLL driver specific data
+ */
+struct clk_pll {
+ struct clk_hw hw;
+ struct clk_pll_data *pll_data;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct clk_pll_data *pll_data = pll->pll_data;
+ unsigned long rate = parent_rate;
+ u32 mult = 0, prediv, postdiv, val;
+
+ /*
+ * get bits 0-5 of multiplier from pllctrl PLLM register
+ * if has_pllctrl is non zero
+ */
+ if (pll_data->has_pllctrl) {
+ val = readl(pll_data->pllm);
+ mult = (val & pll_data->pllm_lower_mask);
+ }
+
+ /* bit6-12 of PLLM is in Main PLL control register */
+ val = readl(pll_data->pll_ctl0);
+ mult |= ((val & pll_data->pllm_upper_mask)
+ >> pll_data->pllm_upper_shift);
+ prediv = (val & pll_data->plld_mask);
+ postdiv = pll_data->postdiv;
+
+ rate /= (prediv + 1);
+ rate = (rate * (mult + 1));
+ rate /= postdiv;
+
+ return rate;
+}
+
+static const struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pllclk_recalc,
+};
+
+static struct clk *clk_register_pll(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ struct clk_pll_data *pll_data)
+{
+ struct clk_init_data init;
+ struct clk_pll *pll;
+ struct clk *clk;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_pll_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->pll_data = pll_data;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ goto out;
+
+ return clk;
+out:
+ kfree(pll);
+ return NULL;
+}
+
+/**
+ * _of_clk_init - PLL initialisation via DT
+ * @node: device tree node for this clock
+ * @pllctrl: If true, lower 6 bits of multiplier is in pllm register of
+ * pll controller, else it is in the control regsiter0(bit 11-6)
+ */
+static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
+{
+ struct clk_pll_data *pll_data;
+ const char *parent_name;
+ struct clk *clk;
+ int i;
+
+ pll_data = kzalloc(sizeof(*pll_data), GFP_KERNEL);
+ if (!pll_data) {
+ pr_err("%s: Out of memory\n", __func__);
+ return;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv))
+ goto out;
+
+ i = of_property_match_string(node, "reg-names", "control");
+ pll_data->pll_ctl0 = of_iomap(node, i);
+ if (!pll_data->pll_ctl0) {
+ pr_err("%s: ioremap failed\n", __func__);
+ goto out;
+ }
+
+ pll_data->pllm_lower_mask = PLLM_LOW_MASK;
+ pll_data->pllm_upper_shift = PLLM_HIGH_SHIFT;
+ pll_data->plld_mask = PLLD_MASK;
+ pll_data->has_pllctrl = pllctrl;
+ if (!pll_data->has_pllctrl) {
+ pll_data->pllm_upper_mask = PLLM_HIGH_MASK;
+ } else {
+ pll_data->pllm_upper_mask = MAIN_PLLM_HIGH_MASK;
+ i = of_property_match_string(node, "reg-names", "multiplier");
+ pll_data->pllm = of_iomap(node, i);
+ if (!pll_data->pllm) {
+ iounmap(pll_data->pll_ctl0);
+ goto out;
+ }
+ }
+
+ clk = clk_register_pll(NULL, node->name, parent_name, pll_data);
+ if (clk) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return;
+ }
+
+out:
+ pr_err("%s: error initializing pll %s\n", __func__, node->name);
+ kfree(pll_data);
+}
+
+/**
+ * of_keystone_pll_clk_init - PLL initialisation DT wrapper
+ * @node: device tree node for this clock
+ */
+static void __init of_keystone_pll_clk_init(struct device_node *node)
+{
+ _of_pll_clk_init(node, false);
+}
+CLK_OF_DECLARE(keystone_pll_clock, "ti,keystone,pll-clock",
+ of_keystone_pll_clk_init);
+
+/**
+ * of_keystone_pll_main_clk_init - Main PLL initialisation DT wrapper
+ * @node: device tree node for this clock
+ */
+static void __init of_keystone_main_pll_clk_init(struct device_node *node)
+{
+ _of_pll_clk_init(node, true);
+}
+CLK_OF_DECLARE(keystone_main_pll_clock, "ti,keystone,main-pll-clock",
+ of_keystone_main_pll_clk_init);
+
+/**
+ * of_pll_div_clk_init - PLL divider setup function
+ * @node: device tree node for this clock
+ */
+static void __init of_pll_div_clk_init(struct device_node *node)
+{
+ const char *parent_name;
+ void __iomem *reg;
+ u32 shift, mask;
+ struct clk *clk;
+ const char *clk_name = node->name;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("%s: ioremap failed\n", __func__);
+ return;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%s: missing parent clock\n", __func__);
+ return;
+ }
+
+ if (of_property_read_u32(node, "bit-shift", &shift)) {
+ pr_err("%s: missing 'shift' property\n", __func__);
+ return;
+ }
+
+ if (of_property_read_u32(node, "bit-mask", &mask)) {
+ pr_err("%s: missing 'bit-mask' property\n", __func__);
+ return;
+ }
+
+ clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
+ mask, 0, NULL);
+ if (clk)
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ else
+ pr_err("%s: error registering divider %s\n", __func__, clk_name);
+}
+CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
+
+/**
+ * of_pll_mux_clk_init - PLL mux setup function
+ * @node: device tree node for this clock
+ */
+static void __init of_pll_mux_clk_init(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 shift, mask;
+ struct clk *clk;
+ const char *parents[2];
+ const char *clk_name = node->name;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("%s: ioremap failed\n", __func__);
+ return;
+ }
+
+ parents[0] = of_clk_get_parent_name(node, 0);
+ parents[1] = of_clk_get_parent_name(node, 1);
+ if (!parents[0] || !parents[1]) {
+ pr_err("%s: missing parent clocks\n", __func__);
+ return;
+ }
+
+ if (of_property_read_u32(node, "bit-shift", &shift)) {
+ pr_err("%s: missing 'shift' property\n", __func__);
+ return;
+ }
+
+ if (of_property_read_u32(node, "bit-mask", &mask)) {
+ pr_err("%s: missing 'bit-mask' property\n", __func__);
+ return;
+ }
+
+ clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
+ ARRAY_SIZE(parents) , 0, reg, shift, mask,
+ 0, NULL);
+ if (clk)
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ else
+ pr_err("%s: error registering mux %s\n", __func__, clk_name);
+}
+CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index c396fe36158..9fc9359f513 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -100,16 +101,16 @@ static enum imx23_clk clks_init_on[] __initdata = {
cpu, hbus, xbus, emi, uart,
};
-int __init mx23_clocks_init(void)
+static void __init mx23_clocks_init(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *dcnp;
u32 i;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx23-digctl");
- digctrl = of_iomap(np, 0);
+ dcnp = of_find_compatible_node(NULL, NULL, "fsl,imx23-digctl");
+ digctrl = of_iomap(dcnp, 0);
WARN_ON(!digctrl);
+ of_node_put(dcnp);
- np = of_find_compatible_node(NULL, NULL, "fsl,imx23-clkctrl");
clkctrl = of_iomap(np, 0);
WARN_ON(!clkctrl);
@@ -162,7 +163,7 @@ int __init mx23_clocks_init(void)
if (IS_ERR(clks[i])) {
pr_err("i.MX23 clk %d: register failed with %ld\n",
i, PTR_ERR(clks[i]));
- return PTR_ERR(clks[i]);
+ return;
}
clk_data.clks = clks;
@@ -172,5 +173,5 @@ int __init mx23_clocks_init(void)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
- return 0;
}
+CLK_OF_DECLARE(imx23_clkctrl, "fsl,imx23-clkctrl", mx23_clocks_init);
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 4faf0afc44c..a6c35010e4e 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -154,16 +155,16 @@ static enum imx28_clk clks_init_on[] __initdata = {
cpu, hbus, xbus, emi, uart,
};
-int __init mx28_clocks_init(void)
+static void __init mx28_clocks_init(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *dcnp;
u32 i;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl");
- digctrl = of_iomap(np, 0);
+ dcnp = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl");
+ digctrl = of_iomap(dcnp, 0);
WARN_ON(!digctrl);
+ of_node_put(dcnp);
- np = of_find_compatible_node(NULL, NULL, "fsl,imx28-clkctrl");
clkctrl = of_iomap(np, 0);
WARN_ON(!clkctrl);
@@ -239,7 +240,7 @@ int __init mx28_clocks_init(void)
if (IS_ERR(clks[i])) {
pr_err("i.MX28 clk %d: register failed with %ld\n",
i, PTR_ERR(clks[i]));
- return PTR_ERR(clks[i]);
+ return;
}
clk_data.clks = clks;
@@ -250,6 +251,5 @@ int __init mx28_clocks_init(void)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
-
- return 0;
}
+CLK_OF_DECLARE(imx28_clkctrl, "fsl,imx28-clkctrl", mx28_clocks_init);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 3413380086d..8eb4799237f 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,6 +8,4 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
-ifdef CONFIG_COMMON_CLK
obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
-endif
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 34ee69f4d50..9bbd0351454 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -16,7 +16,6 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/clk/sunxi.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -617,11 +616,8 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
}
}
-void __init sunxi_init_clocks(void)
+static void __init sunxi_init_clocks(struct device_node *np)
{
- /* Register all the simple and basic clocks on DT */
- of_clk_init(NULL);
-
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
@@ -634,3 +630,8 @@ void __init sunxi_init_clocks(void)
/* Register gate clocks */
of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
}
+CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sunxi_init_clocks);
+CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sunxi_init_clocks);
+CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sunxi_init_clocks);
+CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sunxi_init_clocks);
+CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sunxi_init_clocks);
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index c6a806ed0e8..521483f0ba3 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-prcmu.o
obj-y += clk-sysctrl.o
# Clock definitions
+obj-y += u8500_of_clk.o
obj-y += u8500_clk.o
obj-y += u9540_clk.o
obj-y += u8540_clk.o
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
new file mode 100644
index 00000000000..cdeff299de2
--- /dev/null
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -0,0 +1,559 @@
+/*
+ * Clock definitions for u8500 platform.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/platform_data/clk-ux500.h>
+#include "clk.h"
+
+#define PRCC_NUM_PERIPH_CLUSTERS 6
+#define PRCC_PERIPHS_PER_CLUSTER 32
+
+static struct clk *prcmu_clk[PRCMU_NUM_CLKS];
+static struct clk *prcc_pclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
+static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
+
+#define PRCC_SHOW(clk, base, bit) \
+ clk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit]
+#define PRCC_PCLK_STORE(clk, base, bit) \
+ prcc_pclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk
+#define PRCC_KCLK_STORE(clk, base, bit) \
+ prcc_kclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk
+
+struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clk **clk_data = data;
+ unsigned int base, bit;
+
+ if (clkspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ base = clkspec->args[0];
+ bit = clkspec->args[1];
+
+ if (base != 1 && base != 2 && base != 3 && base != 5 && base != 6) {
+ pr_err("%s: invalid PRCC base %d\n", __func__, base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return PRCC_SHOW(clk_data, base, bit);
+}
+
+static const struct of_device_id u8500_clk_of_match[] = {
+ { .compatible = "stericsson,u8500-clks", },
+ { },
+};
+
+void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base)
+{
+ struct prcmu_fw_version *fw_version;
+ struct device_node *np = NULL;
+ struct device_node *child = NULL;
+ const char *sgaclk_parent = NULL;
+ struct clk *clk, *rtc_clk, *twd_clk;
+
+ if (of_have_populated_dt())
+ np = of_find_matching_node(NULL, u8500_clk_of_match);
+ if (!np) {
+ pr_err("Either DT or U8500 Clock node not found\n");
+ return;
+ }
+
+ /* Clock sources */
+ clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_PLLSOC0] = clk;
+
+ clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_PLLSOC1] = clk;
+
+ clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_PLLDDR] = clk;
+
+ /* FIXME: Add sys, ulp and int clocks here. */
+
+ rtc_clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ 32768);
+
+ /* PRCMU clocks */
+ fw_version = prcmu_get_fw_version();
+ if (fw_version != NULL) {
+ switch (fw_version->project) {
+ case PRCMU_FW_PROJECT_U8500_C2:
+ case PRCMU_FW_PROJECT_U8520:
+ case PRCMU_FW_PROJECT_U8420:
+ sgaclk_parent = "soc0_pll";
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (sgaclk_parent)
+ clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
+ PRCMU_SGACLK, 0);
+ else
+ clk = clk_reg_prcmu_gate("sgclk", NULL,
+ PRCMU_SGACLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_SGACLK] = clk;
+
+ clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_UARTCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_MSP02CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_MSP1CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_I2CCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_SLIMCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER1CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER2CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER3CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER5CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER6CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER7CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_LCDCLK] = clk;
+
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_BMLCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_HSITXCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_HSIRXCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_HDMICLK] = clk;
+
+ clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_APEATCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK,
+ CLK_IS_ROOT);
+ prcmu_clk[PRCMU_APETRACECLK] = clk;
+
+ clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_MCDECLK] = clk;
+
+ clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
+ CLK_IS_ROOT);
+ prcmu_clk[PRCMU_IPI2CCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
+ CLK_IS_ROOT);
+ prcmu_clk[PRCMU_DSIALTCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_DMACLK] = clk;
+
+ clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_B2R2CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_TVCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_SSPCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_RNGCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_UICCCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_TIMCLK] = clk;
+
+ clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
+ 100000000,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_SDMMCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
+ PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_PLLDSI] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
+ PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI0CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
+ PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI1CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
+ PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI0ESCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
+ PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI1ESCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
+ PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI2ESCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable_rate("armss", NULL,
+ PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_ARMSS] = clk;
+
+ twd_clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
+ CLK_IGNORE_UNUSED, 1, 2);
+
+ /*
+ * FIXME: Add special handled PRCMU clocks here:
+ * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl.
+ * 2. ab9540_clkout1yuv, see clkout0yuv
+ */
+
+ /* PRCC P-clocks */
+ clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 1, 0);
+
+ clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 1, 1);
+
+ clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 1, 2);
+
+ clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 1, 3);
+
+ clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 1, 4);
+
+ clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 1, 5);
+
+ clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 1, 6);
+
+ clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 1, 7);
+
+ clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
+ BIT(8), 0);
+ PRCC_PCLK_STORE(clk, 1, 8);
+
+ clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
+ BIT(9), 0);
+ PRCC_PCLK_STORE(clk, 1, 9);
+
+ clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
+ BIT(10), 0);
+ PRCC_PCLK_STORE(clk, 1, 10);
+
+ clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
+ BIT(11), 0);
+ PRCC_PCLK_STORE(clk, 1, 11);
+
+ clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 2, 0);
+
+ clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 2, 1);
+
+ clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 2, 2);
+
+ clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 2, 3);
+
+ clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 2, 4);
+
+ clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 2, 5);
+
+ clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 2, 6);
+
+ clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 2, 7);
+
+ clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
+ BIT(8), 0);
+ PRCC_PCLK_STORE(clk, 2, 8);
+
+ clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
+ BIT(9), 0);
+ PRCC_PCLK_STORE(clk, 2, 9);
+
+ clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
+ BIT(10), 0);
+ PRCC_PCLK_STORE(clk, 2, 10);
+
+ clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
+ BIT(11), 0);
+ PRCC_PCLK_STORE(clk, 2, 11);
+
+ clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
+ BIT(12), 0);
+ PRCC_PCLK_STORE(clk, 2, 12);
+
+ clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 3, 0);
+
+ clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 3, 1);
+
+ clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 3, 2);
+
+ clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 3, 3);
+
+ clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 3, 4);
+
+ clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 3, 5);
+
+ clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 3, 6);
+
+ clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 3, 7);
+
+ clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
+ BIT(8), 0);
+ PRCC_PCLK_STORE(clk, 3, 8);
+
+ clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 5, 0);
+
+ clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 5, 1);
+
+ clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 6, 0);
+
+ clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 6, 1);
+
+ clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 6, 2);
+
+ clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 6, 3);
+
+ clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 6, 4);
+
+ clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 6, 5);
+
+ clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 6, 6);
+
+ clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 6, 7);
+
+ /* PRCC K-clocks
+ *
+ * FIXME: Some drivers requires PERPIH[n| to be automatically enabled
+ * by enabling just the K-clock, even if it is not a valid parent to
+ * the K-clock. Until drivers get fixed we might need some kind of
+ * "parent muxed join".
+ */
+
+ /* Periph1 */
+ clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
+ clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 0);
+
+ clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
+ clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 1);
+
+ clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
+ clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 2);
+
+ clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
+ clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 3);
+
+ clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
+ clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 4);
+
+ clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
+ clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 5);
+
+ clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
+ clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 6);
+
+ clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
+ clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 8);
+
+ clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
+ clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 9);
+
+ clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
+ clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 10);
+
+ /* Periph2 */
+ clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
+ clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 0);
+
+ clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
+ clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 2);
+
+ clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
+ clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 3);
+
+ clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
+ clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 4);
+
+ clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
+ clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 5);
+
+ /* Note that rate is received from parent. */
+ clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
+ clkrst2_base, BIT(6),
+ CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
+ PRCC_KCLK_STORE(clk, 2, 6);
+
+ clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
+ clkrst2_base, BIT(7),
+ CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
+ PRCC_KCLK_STORE(clk, 2, 7);
+
+ /* Periph3 */
+ clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
+ clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 1);
+
+ clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
+ clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 2);
+
+ clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
+ clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 3);
+
+ clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
+ clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 4);
+
+ clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
+ clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 5);
+
+ clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
+ clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 6);
+
+ clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
+ clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 7);
+
+ /* Periph6 */
+ clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
+ clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 6, 0);
+
+ for_each_child_of_node(np, child) {
+ static struct clk_onecell_data clk_data;
+
+ if (!of_node_cmp(child->name, "prcmu-clock")) {
+ clk_data.clks = prcmu_clk;
+ clk_data.clk_num = ARRAY_SIZE(prcmu_clk);
+ of_clk_add_provider(child, of_clk_src_onecell_get, &clk_data);
+ }
+ if (!of_node_cmp(child->name, "prcc-periph-clock"))
+ of_clk_add_provider(child, ux500_twocell_get, prcc_pclk);
+
+ if (!of_node_cmp(child->name, "prcc-kernel-clock"))
+ of_clk_add_provider(child, ux500_twocell_get, prcc_kclk);
+
+ if (!of_node_cmp(child->name, "rtc32k-clock"))
+ of_clk_add_provider(child, of_clk_src_simple_get, rtc_clk);
+
+ if (!of_node_cmp(child->name, "smp-twd-clock"))
+ of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
+ }
+}
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index f26258869de..20c8add90d1 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -83,7 +83,7 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
clk_register_clkdev(clk, NULL, "lcd");
clk_register_clkdev(clk, "lcd", "mcde");
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BML8580CLK,
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK,
CLK_IS_ROOT);
clk_register_clkdev(clk, NULL, "bml");
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index cc40fe64f2d..10772aa72e4 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -117,13 +117,19 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
goto err;
fclk_gate_lock = kmalloc(sizeof(*fclk_gate_lock), GFP_KERNEL);
if (!fclk_gate_lock)
- goto err;
+ goto err_fclk_gate_lock;
spin_lock_init(fclk_lock);
spin_lock_init(fclk_gate_lock);
mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name);
+ if (!mux_name)
+ goto err_mux_name;
div0_name = kasprintf(GFP_KERNEL, "%s_div0", clk_name);
+ if (!div0_name)
+ goto err_div0_name;
div1_name = kasprintf(GFP_KERNEL, "%s_div1", clk_name);
+ if (!div1_name)
+ goto err_div1_name;
clk = clk_register_mux(NULL, mux_name, parents, 4,
CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
@@ -147,6 +153,14 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
return;
+err_div1_name:
+ kfree(div0_name);
+err_div0_name:
+ kfree(mux_name);
+err_mux_name:
+ kfree(fclk_gate_lock);
+err_fclk_gate_lock:
+ kfree(fclk_lock);
err:
clks[fclk] = ERR_PTR(-ENOMEM);
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 971d796e071..bdb953e15d2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -34,6 +34,7 @@ config ORION_TIMER
bool
config SUN4I_TIMER
+ select CLKSRC_MMIO
bool
config VT8500_TIMER
@@ -71,10 +72,33 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
help
Use the always on PRCMU Timer as sched_clock
+config CLKSRC_EFM32
+ bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
+ depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ default ARCH_EFM32
+ help
+ Support to use the timers of EFM32 SoCs as clock source and clock
+ event device.
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
+config ARM_ARCH_TIMER_EVTSTREAM
+ bool "Support for ARM architected timer event stream generation"
+ default y if ARM_ARCH_TIMER
+ help
+ This option enables support for event stream generation based on
+ the ARM architected timer. It is used for waking up CPUs executing
+ the wfe instruction at a frequency represented as a power-of-2
+ divisor of the clock rate.
+ The main use of the event stream is wfe-based timeouts of userspace
+ locking implementations. It might also be useful for imposing timeout
+ on wfe to safeguard against any programming errors in case an expected
+ event is not generated.
+ This must be disabled for hardware validation purposes to detect any
+ hardware anomalies of missing events.
+
config ARM_GLOBAL_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 704d6d342ad..33621efb914 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
+obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index fbd9ccd5e11..95fb944e15e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -13,12 +13,14 @@
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/sched_clock.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -294,6 +296,19 @@ static void __arch_timer_setup(unsigned type,
clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
}
+static void arch_timer_configure_evtstream(void)
+{
+ int evt_stream_div, pos;
+
+ /* Find the closest power of two to the divisor */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
+ pos = fls(evt_stream_div);
+ if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
+ pos--;
+ /* enable event stream */
+ arch_timer_evtstrm_enable(min(pos, 15));
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
@@ -307,6 +322,8 @@ static int arch_timer_setup(struct clock_event_device *clk)
}
arch_counter_set_user_access();
+ if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ arch_timer_configure_evtstream();
return 0;
}
@@ -389,7 +406,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static struct cyclecounter cyclecounter = {
@@ -419,6 +436,9 @@ static void __init arch_counter_register(unsigned type)
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter, start_count);
+
+ /* 56 bits minimum, so we assume worst case rollover */
+ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
@@ -460,6 +480,33 @@ static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static unsigned int saved_cntkctl;
+static int arch_timer_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_PM_ENTER)
+ saved_cntkctl = arch_timer_get_cntkctl();
+ else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+ arch_timer_set_cntkctl(saved_cntkctl);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_pm_notifier = {
+ .notifier_call = arch_timer_cpu_pm_notify,
+};
+
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
+}
+#else
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return 0;
+}
+#endif
+
static int __init arch_timer_register(void)
{
int err;
@@ -499,11 +546,17 @@ static int __init arch_timer_register(void)
if (err)
goto out_free_irq;
+ err = arch_timer_cpu_pm_init();
+ if (err)
+ goto out_unreg_notify;
+
/* Immediately configure the timer on the boot CPU */
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
return 0;
+out_unreg_notify:
+ unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
if (arch_timer_use_virtual)
free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index b66c1f36066..c639b1a9e99 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -169,7 +169,8 @@ static int gt_clockevents_init(struct clock_event_device *clk)
int cpu = smp_processor_id();
clk->name = "arm_global_timer";
- clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERCPU;
clk->set_mode = gt_clockevent_set_mode;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 07ea7ce900d..26ed331b1aa 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -49,7 +49,7 @@ struct bcm2835_timer {
static void __iomem *system_clock __read_mostly;
-static u32 notrace bcm2835_sched_read(void)
+static u64 notrace bcm2835_sched_read(void)
{
return readl_relaxed(system_clock);
}
@@ -110,7 +110,7 @@ static void __init bcm2835_timer_init(struct device_node *node)
panic("Can't read clock-frequency");
system_clock = base + REG_COUNTER_LO;
- setup_sched_clock(bcm2835_sched_read, 32, freq);
+ sched_clock_register(bcm2835_sched_read, 32, freq);
clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
freq, 300, 32, clocksource_mmio_readl_up);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index a9fd4ad2567..b375106844d 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -53,7 +53,7 @@ static struct clocksource clocksource_dbx500_prcmu = {
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static u32 notrace dbx500_prcmu_sched_clock_read(void)
+static u64 notrace dbx500_prcmu_sched_clock_read(void)
{
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
@@ -81,8 +81,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- setup_sched_clock(dbx500_prcmu_sched_clock_read,
- 32, RATE_32K);
+ sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index b9ddd9e3a2f..35639cf4e5a 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,5 +35,6 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
+ of_node_put(np);
}
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 4cbae4f762b..45ba8aecc72 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -23,7 +23,7 @@
#include <linux/clk.h>
#include <linux/sched_clock.h>
-static void timer_get_base_and_rate(struct device_node *np,
+static void __init timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
struct clk *timer_clk;
@@ -55,11 +55,11 @@ static void timer_get_base_and_rate(struct device_node *np,
try_clock_freq:
if (of_property_read_u32(np, "clock-freq", rate) &&
- of_property_read_u32(np, "clock-frequency", rate))
+ of_property_read_u32(np, "clock-frequency", rate))
panic("No clock nor clock-frequency property for %s", np->name);
}
-static void add_clockevent(struct device_node *event_timer)
+static void __init add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
@@ -82,7 +82,7 @@ static void add_clockevent(struct device_node *event_timer)
static void __iomem *sched_io_base;
static u32 sched_rate;
-static void add_clocksource(struct device_node *source_timer)
+static void __init add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
@@ -106,7 +106,7 @@ static void add_clocksource(struct device_node *source_timer)
sched_rate = rate;
}
-static u32 read_sched_clock(void)
+static u64 read_sched_clock(void)
{
return __raw_readl(sched_io_base);
}
@@ -117,7 +117,7 @@ static const struct of_device_id sptimer_ids[] __initconst = {
{ /* Sentinel */ },
};
-static void init_sched_clock(void)
+static void __init init_sched_clock(void)
{
struct device_node *sched_timer;
@@ -128,7 +128,7 @@ static void init_sched_clock(void)
of_node_put(sched_timer);
}
- setup_sched_clock(read_sched_clock, 32, sched_rate);
+ sched_clock_register(read_sched_clock, 32, sched_rate);
}
static int num_called;
@@ -138,12 +138,10 @@ static void __init dw_apb_timer_init(struct device_node *timer)
case 0:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
- of_node_put(timer);
break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
- of_node_put(timer);
init_sched_clock();
break;
default:
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 3a5909c12d4..9d170834fcf 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -78,7 +78,7 @@ static int em_sti_enable(struct em_sti_priv *p)
int ret;
/* enable clock */
- ret = clk_enable(p->clk);
+ ret = clk_prepare_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
@@ -107,7 +107,7 @@ static void em_sti_disable(struct em_sti_priv *p)
em_sti_write(p, STI_INTENCLR, 3);
/* stop clock */
- clk_disable(p->clk);
+ clk_disable_unprepare(p->clk);
}
static cycle_t em_sti_count(struct em_sti_priv *p)
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 0f5e65f74dc..445b68a01dc 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -222,7 +222,7 @@ static struct clocksource clocksource_mxs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u32 notrace mxs_read_sched_clock_v2(void)
+static u64 notrace mxs_read_sched_clock_v2(void)
{
return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
}
@@ -236,7 +236,7 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
else {
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
- setup_sched_clock(mxs_read_sched_clock_v2, 32, c);
+ sched_clock_register(mxs_read_sched_clock_v2, 32, c);
}
return 0;
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 1b74bea1238..ed7b73b508e 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -76,7 +76,7 @@ static struct delay_timer mtu_delay_timer;
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
-static u32 notrace nomadik_read_sched_clock(void)
+static u64 notrace nomadik_read_sched_clock(void)
{
if (unlikely(!mtu_base))
return 0;
@@ -231,7 +231,7 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,
"mtu_0");
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
- setup_sched_clock(nomadik_read_sched_clock, 32, rate);
+ sched_clock_register(nomadik_read_sched_clock, 32, rate);
#endif
/* Timer 1 is used for events, register irq and clockevents */
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index ab29476ee5f..85082e8d305 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -331,7 +331,7 @@ static struct clocksource samsung_clocksource = {
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by U300 implementation.)
*/
-static u32 notrace samsung_read_sched_clock(void)
+static u64 notrace samsung_read_sched_clock(void)
{
return samsung_clocksource_read(NULL);
}
@@ -357,7 +357,7 @@ static void __init samsung_clocksource_init(void)
else
pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
- setup_sched_clock(samsung_read_sched_clock,
+ sched_clock_register(samsung_read_sched_clock,
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 8ead0258740..2fb4695a28d 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -37,6 +37,8 @@
#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14)
#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18)
+#define TIMER_SYNC_TICKS 3
+
static void __iomem *timer_base;
static u32 ticks_per_jiffy;
@@ -50,7 +52,7 @@ static void sun4i_clkevt_sync(void)
{
u32 old = readl(timer_base + TIMER_CNTVAL_REG(1));
- while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3)
+ while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
@@ -104,7 +106,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
struct clock_event_device *unused)
{
sun4i_clkevt_time_stop(0);
- sun4i_clkevt_time_setup(0, evt);
+ sun4i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
sun4i_clkevt_time_start(0, false);
return 0;
@@ -131,7 +133,7 @@ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
static struct irqaction sun4i_timer_irq = {
.name = "sun4i_timer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sun4i_timer_interrupt,
.dev_id = &sun4i_clockevent,
};
@@ -187,8 +189,8 @@ static void __init sun4i_timer_init(struct device_node *node)
sun4i_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
- 0xffffffff);
+ clockevents_config_and_register(&sun4i_clockevent, rate,
+ TIMER_SYNC_TICKS, 0xffffffff);
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 8a6187225dd..00fdd117028 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
+ clk_disable_unprepare(tcd->clk);
}
switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
break;
case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -180,15 +180,22 @@ static irqreturn_t ch2_irq(int irq, void *handle)
static struct irqaction tc_irqaction = {
.name = "tc_clkevt",
- .flags = IRQF_TIMER | IRQF_DISABLED,
+ .flags = IRQF_TIMER,
.handler = ch2_irq,
};
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
+ int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
+ /* try to enable t2 clk to avoid future errors in mode change */
+ ret = clk_prepare_enable(t2_clk);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(t2_clk);
+
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
@@ -197,16 +204,21 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.clkevt.cpumask = cpumask_of(0);
+ ret = setup_irq(irq, &tc_irqaction);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
- setup_irq(irq, &tc_irqaction);
+ return ret;
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
/* NOTHING */
+ return 0;
}
#endif
@@ -265,6 +277,7 @@ static int __init tcb_clksrc_init(void)
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
int i;
+ int ret;
tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
if (!tc) {
@@ -275,7 +288,11 @@ static int __init tcb_clksrc_init(void)
pdev = tc->pdev;
t0_clk = tc->clk[0];
- clk_enable(t0_clk);
+ ret = clk_prepare_enable(t0_clk);
+ if (ret) {
+ pr_debug("can't enable T0 clk\n");
+ goto err_free_tc;
+ }
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
@@ -313,17 +330,39 @@ static int __init tcb_clksrc_init(void)
/* tclib will give us three clocks no matter what the
* underlying platform supports.
*/
- clk_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc->clk[1]);
+ if (ret) {
+ pr_debug("can't enable T1 clk\n");
+ goto err_disable_t0;
+ }
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(tc, best_divisor_idx);
}
/* and away we go! */
- clocksource_register_hz(&clksrc, divided_rate);
+ ret = clocksource_register_hz(&clksrc, divided_rate);
+ if (ret)
+ goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(tc, clk32k_divisor_idx);
+ if (ret)
+ goto err_unregister_clksrc;
return 0;
+
+err_unregister_clksrc:
+ clocksource_unregister(&clksrc);
+
+err_disable_t1:
+ if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
+ clk_disable_unprepare(tc->clk[1]);
+
+err_disable_t0:
+ clk_disable_unprepare(t0_clk);
+
+err_free_tc:
+ atmel_tc_free(tc);
+ return ret;
}
arch_initcall(tcb_clksrc_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 93961703b88..642849256d8 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -98,7 +98,7 @@ static struct clock_event_device tegra_clockevent = {
.set_mode = tegra_timer_set_mode,
};
-static u32 notrace tegra_read_sched_clock(void)
+static u64 notrace tegra_read_sched_clock(void)
{
return timer_readl(TIMERUS_CNTR_1US);
}
@@ -181,8 +181,6 @@ static void __init tegra20_init_timer(struct device_node *np)
rate = clk_get_rate(clk);
}
- of_node_put(np);
-
switch (rate) {
case 12000000:
timer_writel(0x000b, TIMERUS_USEC_CFG);
@@ -200,7 +198,7 @@ static void __init tegra20_init_timer(struct device_node *np)
WARN(1, "Unknown clock rate");
}
- setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
+ sched_clock_register(tegra_read_sched_clock, 32, 1000000);
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
@@ -241,8 +239,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- of_node_put(np);
-
register_persistent_clock(NULL, tegra_read_persistent_clock);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 0198504ef6b..d8e47e50278 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -96,7 +96,7 @@ static void local_timer_ctrl_clrset(u32 clr, u32 set)
local_base + TIMER_CTRL_OFF);
}
-static u32 notrace armada_370_xp_read_sched_clock(void)
+static u64 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
@@ -258,7 +258,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
/*
* Set scale and timer for sched_clock.
*/
- setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
+ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
/*
* Setup free-running clocksource timer (interrupts
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
new file mode 100644
index 00000000000..1a6205b7bed
--- /dev/null
+++ b/drivers/clocksource/time-efm32.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+
+#define TIMERn_CTRL 0x00
+#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24)
+#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10)
+#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16)
+#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0)
+#define TIMERn_CTRL_OSMEN 0x00000010
+#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0)
+#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0)
+#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1)
+
+#define TIMERn_CMD 0x04
+#define TIMERn_CMD_START 0x00000001
+#define TIMERn_CMD_STOP 0x00000002
+
+#define TIMERn_IEN 0x0c
+#define TIMERn_IF 0x10
+#define TIMERn_IFS 0x14
+#define TIMERn_IFC 0x18
+#define TIMERn_IRQ_UF 0x00000002
+
+#define TIMERn_TOP 0x1c
+#define TIMERn_CNT 0x24
+
+struct efm32_clock_event_ddata {
+ struct clock_event_device evtdev;
+ void __iomem *base;
+ unsigned periodic_top;
+};
+
+static void efm32_clock_event_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_OSMEN |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static int efm32_clock_event_set_next_event(unsigned long evt,
+ struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(evt, ddata->base + TIMERn_CNT);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+
+ return 0;
+}
+
+static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
+{
+ struct efm32_clock_event_ddata *ddata = dev_id;
+
+ writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC);
+
+ ddata->evtdev.event_handler(&ddata->evtdev);
+
+ return IRQ_HANDLED;
+}
+
+static struct efm32_clock_event_ddata clock_event_ddata = {
+ .evtdev = {
+ .name = "efm32 clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_MODE_PERIODIC,
+ .set_mode = efm32_clock_event_set_mode,
+ .set_next_event = efm32_clock_event_set_next_event,
+ .rating = 200,
+ },
+};
+
+static struct irqaction efm32_clock_event_irq = {
+ .name = "efm32 clockevent",
+ .flags = IRQF_TIMER,
+ .handler = efm32_clock_event_handler,
+ .dev_id = &clock_event_ddata,
+};
+
+static int __init efm32_clocksource_init(struct device_node *np)
+{
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long rate;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clocksource (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable timer clock for clocksource (%d)\n",
+ ret);
+ goto err_clk_enable;
+ }
+ rate = clk_get_rate(clk);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map registers for clocksource\n");
+ goto err_iomap;
+ }
+
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD);
+
+ ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer",
+ DIV_ROUND_CLOSEST(rate, 1024), 200, 16,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("failed to init clocksource (%d)\n", ret);
+ goto err_clocksource_init;
+ }
+
+ return 0;
+
+err_clocksource_init:
+
+ iounmap(base);
+err_iomap:
+
+ clk_disable_unprepare(clk);
+err_clk_enable:
+
+ clk_put(clk);
+err_clk_get:
+
+ return ret;
+}
+
+static int __init efm32_clockevent_init(struct device_node *np)
+{
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long rate;
+ int irq;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clockevent (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable timer clock for clockevent (%d)\n",
+ ret);
+ goto err_clk_enable;
+ }
+ rate = clk_get_rate(clk);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map registers for clockevent\n");
+ goto err_iomap;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENOENT;
+ pr_err("failed to get irq for clockevent\n");
+ goto err_get_irq;
+ }
+
+ writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN);
+
+ clock_event_ddata.base = base;
+ clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
+
+ setup_irq(irq, &efm32_clock_event_irq);
+
+ clockevents_config_and_register(&clock_event_ddata.evtdev,
+ DIV_ROUND_CLOSEST(rate, 1024),
+ 0xf, 0xffff);
+
+ return 0;
+
+err_get_irq:
+
+ iounmap(base);
+err_iomap:
+
+ clk_disable_unprepare(clk);
+err_clk_enable:
+
+ clk_put(clk);
+err_clk_get:
+
+ return ret;
+}
+
+/*
+ * This function asserts that we have exactly one clocksource and one
+ * clock_event_device in the end.
+ */
+static void __init efm32_timer_init(struct device_node *np)
+{
+ static int has_clocksource, has_clockevent;
+ int ret;
+
+ if (!has_clocksource) {
+ ret = efm32_clocksource_init(np);
+ if (!ret) {
+ has_clocksource = 1;
+ return;
+ }
+ }
+
+ if (!has_clockevent) {
+ ret = efm32_clockevent_init(np);
+ if (!ret) {
+ has_clockevent = 1;
+ return;
+ }
+ }
+}
+CLOCKSOURCE_OF_DECLARE(efm32, "efm32,timer", efm32_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index ef3cfb269d8..8a492d34ff9 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -165,9 +165,9 @@ static struct irqaction sirfsoc_timer_irq = {
};
/* Overwrite weak default sched_clock with more precise one */
-static u32 notrace sirfsoc_read_sched_clock(void)
+static u64 notrace sirfsoc_read_sched_clock(void)
{
- return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
+ return sirfsoc_timer_read(NULL);
}
static void __init sirfsoc_clockevent_init(void)
@@ -206,7 +206,7 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
- setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
+ sched_clock_register(sirfsoc_read_sched_clock, 64, CLOCK_TICK_RATE);
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 587e0202a70..02821b06a39 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void)
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}
-static unsigned int pit_read_sched_clock(void)
+static u64 pit_read_sched_clock(void)
{
return __raw_readl(clksrc_base + PITCVAL);
}
@@ -64,7 +64,7 @@ static int __init pit_clocksource_init(unsigned long rate)
__raw_writel(~0UL, clksrc_base + PITLDVAL);
__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
- setup_sched_clock(pit_read_sched_clock, 32, rate);
+ sched_clock_register(pit_read_sched_clock, 32, rate);
return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
300, 32, clocksource_mmio_readl_down);
}
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 64f553f04fa..ad3c0e83a77 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -137,14 +137,12 @@ static void __init vt8500_timer_init(struct device_node *np)
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
- of_node_put(np);
return;
}
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
- of_node_put(np);
return;
}
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index c73fc2b74de..18c5b9b1664 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -32,11 +32,23 @@
#include <linux/atomic.h>
#include <linux/pid_namespace.h>
-#include <asm/unaligned.h>
-
#include <linux/cn_proc.h>
-#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
+/*
+ * Size of a cn_msg followed by a proc_event structure. Since the
+ * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
+ * add one 4-byte word to the size here, and then start the actual
+ * cn_msg structure 4 bytes into the stack buffer. The result is that
+ * the immediately following proc_event structure is aligned to 8 bytes.
+ */
+#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
+
+/* See comment above; we test our assumption about sizeof struct cn_msg here. */
+static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
+{
+ BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
+ return (struct cn_msg *)(buffer + 4);
+}
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
@@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
parent = rcu_dereference(task->real_parent);
@@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task)
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
ev->event_data.exec.process_tgid = task->tgid;
@@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
const struct cred *cred;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id;
@@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
@@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task)
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
ev->event_data.sid.process_tgid = task->tgid;
@@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
ev->event_data.ptrace.process_tgid = task->tgid;
@@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task)
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
ev->event_data.comm.process_tgid = task->tgid;
@@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid;
@@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
ev->event_data.exit.process_tgid = task->tgid;
@@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->cpu = -1;
ev->what = PROC_EVENT_NONE;
ev->event_data.ack.err = err;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 534fcb82515..38093e27237 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -17,15 +17,11 @@ config CPU_FREQ
if CPU_FREQ
-config CPU_FREQ_TABLE
- tristate
-
config CPU_FREQ_GOV_COMMON
bool
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
- select CPU_FREQ_TABLE
default y
help
This driver exports CPU frequency statistics information through sysfs
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
- select CPU_FREQ_TABLE
select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
- select CPU_FREQ_TABLE
help
This adds a generic cpufreq driver for CPU0 frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -223,7 +217,6 @@ depends on IA64
config IA64_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
- select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
@@ -240,7 +233,6 @@ depends on MIPS
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
- select CPU_FREQ_TABLE
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers"
depends on SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-III processors.
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ
config SPARC_US2E_CPUFREQ
tristate "UltraSPARC-IIe CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling"
depends on SUPERH
config SH_CPU_FREQ
tristate "SuperH CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the cpufreq driver for SuperH. Any CPU that supports
clock rate rounding through the clock framework can use this
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0fa204b244b..ce52ed94924 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,6 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
- select CPU_FREQ_TABLE
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ
config ARM_EXYNOS_CPUFREQ
bool
- select CPU_FREQ_TABLE
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ
depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF
default y
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Samsung EXYNOS5440
SoC. The nature of exynos5440 clock controller is
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6Q cpufreq support"
depends on SOC_IMX6Q
depends on REGULATOR_ANATOP
- select CPU_FREQ_TABLE
help
This adds cpufreq driver support for Freescale i.MX6Q SOC.
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR
config ARM_KIRKWOOD_CPUFREQ
def_bool ARCH_KIRKWOOD && OF
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
- select CPU_FREQ_TABLE
config ARM_S3C_CPUFREQ
bool
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ
config ARM_S3C2416_CPUFREQ
bool "S3C2416 CPU Frequency scaling support"
depends on CPU_S3C2416
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for the Samsung S3C2416 and
S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -231,7 +221,14 @@ config ARM_SPEAR_CPUFREQ
config ARM_TEGRA_CPUFREQ
bool "TEGRA CPUFreq support"
depends on ARCH_TEGRA
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 25ca9db62e0..ca0021a96e1 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,7 +1,6 @@
config CPU_FREQ_CBE
tristate "CBE frequency scaling"
depends on CBE_RAS && PPC_CELL
- select CPU_FREQ_TABLE
default m
help
This adds the cpufreq driver for Cell BE processors.
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI
config CPU_FREQ_MAPLE
bool "Support for Maple 970FX Evaluation Board"
depends on PPC_MAPLE
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Maple 970FX
Evaluation Board and compatible boards (IBM JS2x blades).
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE
config PPC_CORENET_CPUFREQ
tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
depends on PPC_E500MC && OF && COMMON_CLK
- select CPU_FREQ_TABLE
select CLK_PPC_CORENET
help
This adds the CPUFreq driver support for Freescale e500mc,
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple PowerBooks,
this currently includes some models of iBook & Titanium
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on PPC_PMAC && PPC64
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64
config PPC_PASEMI_CPUFREQ
bool "Support for PA Semi PWRficient"
depends on PPC_PASEMI
- select CPU_FREQ_TABLE
default y
help
This adds the support for frequency switching on PA Semi
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index e2b6eabef22..d369349eeaa 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
- select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
- select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC400 and SC410
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ
config SC520_CPUFREQ
tristate "AMD Elan SC520"
- select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC520 processor.
@@ -88,7 +85,6 @@ config SC520_CPUFREQ
config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6
config X86_POWERNOW_K7
tristate "AMD Mobile Athlon/Duron PowerNow!"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K7 mobile processors.
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
- select CPU_FREQ_TABLE
depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
@@ -132,11 +126,10 @@ config X86_POWERNOW_K8
config X86_AMD_FREQ_SENSITIVITY
tristate "AMD frequency sensitivity feedback powersave bias"
depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
- select CPU_FREQ_TABLE
help
This adds AMD-specific powersave bias function to the ondemand
governor, which allows it to make more power-conscious frequency
- change decisions based on feedback from hardware (availble on AMD
+ change decisions based on feedback from hardware (available on AMD
Family 16h and above).
Hardware feedback tells software how "sensitive" to frequency changes
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD
config X86_SPEEDSTEP_CENTRINO
tristate "Intel Enhanced SpeedStep (deprecated)"
- select CPU_FREQ_TABLE
select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
help
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE
config X86_SPEEDSTEP_ICH
tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI
config X86_P4_CLOCKMOD
tristate "Intel Pentium 4 clock modulation"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Intel Pentium 4 / XEON
processors. When enabled it will lower CPU temperature by skipping
@@ -259,7 +248,6 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
- select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
@@ -272,7 +260,6 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
- select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ad5866c2ada..74945652dd7 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,5 +1,5 @@
# CPUfreq core
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
-# CPUfreq cross-arch helpers
-obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
-
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
@@ -77,6 +74,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 506fd23c755..caf41ebea18 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -424,34 +424,21 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
}
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
- struct cpufreq_freqs freqs;
struct drv_cmd cmd;
- unsigned int next_state = 0; /* Index into freq_table */
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
- pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
-
if (unlikely(data == NULL ||
data->acpi_data == NULL || data->freq_table == NULL)) {
return -ENODEV;
}
perf = data->acpi_data;
- result = cpufreq_frequency_table_target(policy,
- data->freq_table,
- target_freq,
- relation, &next_state);
- if (unlikely(result)) {
- result = -ENODEV;
- goto out;
- }
-
- next_perf_state = data->freq_table[next_state].driver_data;
+ next_perf_state = data->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
@@ -492,23 +479,17 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
else
cmd.mask = cpumask_of(policy->cpu);
- freqs.old = perf->states[perf->state].core_frequency * 1000;
- freqs.new = data->freq_table[next_state].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
drv_write(&cmd);
if (acpi_pstate_strict) {
- if (!check_freqs(cmd.mask, freqs.new, data)) {
+ if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+ data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
- freqs.new = freqs.old;
}
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
if (!result)
perf->state = next_perf_state;
@@ -516,15 +497,6 @@ out:
return result;
}
-static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
-{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
-
- pr_debug("acpi_cpufreq_verify\n");
-
- return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
@@ -837,7 +809,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result)
goto err_freqfree;
@@ -846,12 +818,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
- /* Current speed is unknown and not detectable by IO port */
+ /*
+ * The core will not set policy->cur, because
+ * cpufreq_driver->get is NULL, so we need to set it here.
+ * However, we have to guess it, because the current speed is
+ * unknown and not detectable via IO ports.
+ */
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
- policy->cur = get_cur_freq_on_cpu(cpu);
break;
default:
break;
@@ -868,8 +844,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
@@ -929,8 +903,8 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
};
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 3549f0784af..5519933813e 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -24,110 +24,323 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
+#include <asm/bL_switcher.h>
#include "arm_big_little.h"
/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
#define MAX_CLUSTERS 2
+#ifdef CONFIG_BL_SWITCHER
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
-static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-static unsigned int bL_cpufreq_get(unsigned int cpu)
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
{
- u32 cur_cluster = cpu_to_cluster(cpu);
+ return topology_physical_package_id(cpu);
+}
- return clk_get_rate(clk[cur_cluster]) / 1000;
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}
-/* Validate policy frequency range */
-static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static unsigned int find_cluster_maxfreq(int cluster)
{
- u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if ((cluster == per_cpu(physical_cluster, j)) &&
+ (max_freq < cpu_freq))
+ max_freq = cpu_freq;
+ }
+
+ pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+ max_freq);
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+ cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled()) {
+ pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
+ cpu));
+
+ return per_cpu(cpu_last_req_freq, cpu);
+ } else {
+ return clk_get_cpu_rate(cpu);
+ }
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
- return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+ __func__, cpu, old_cluster, new_cluster, new_rate);
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (WARN_ON(ret)) {
+ pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+ new_cluster);
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+ __func__, cpu, old_cluster, new_cluster);
+
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate) {
+ pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+ __func__, old_cluster, new_rate);
+
+ if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
}
/* Set clock frequency */
static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
- int ret = 0;
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if ((actual_cluster == A15_CLUSTER) &&
+ (freqs_new < clk_big_min)) {
+ new_cluster = A7_CLUSTER;
+ } else if ((actual_cluster == A7_CLUSTER) &&
+ (freqs_new > clk_little_max)) {
+ new_cluster = A15_CLUSTER;
+ }
+ }
- cur_cluster = cpu_to_cluster(policy->cpu);
+ return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+}
- freqs.old = bL_cpufreq_get(policy->cpu);
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
- /* Determine valid target frequency using freq_table */
- cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
- target_freq, relation, &freq_tab_idx);
- freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
- pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
- __func__, cpu, cur_cluster, freqs.old, target_freq,
- freqs.new);
+ return count;
+}
- if (freqs.old == freqs.new)
- return 0;
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t min_freq = ~0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency < min_freq)
+ min_freq = table[i].frequency;
+ return min_freq;
+}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t max_freq = 0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency > max_freq)
+ max_freq = table[i].frequency;
+ return max_freq;
+}
- ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
- if (ret) {
- pr_err("clk_set_rate failed: %d\n", ret);
- freqs.new = freqs.old;
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ table[k].frequency = VIRT_FREQ(i,
+ freq_table[i][j].frequency);
+ pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+ table[k].frequency);
+ k++;
+ }
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
- return ret;
+ pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev);
- if (!atomic_dec_return(&cluster_usage[cluster])) {
- clk_put(clk[cluster]);
- opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
- dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
}
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
}
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
char name[14] = "cpu-cluster.";
int ret;
- if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ if (freq_table[cluster])
return 0;
ret = arm_bL_ops->init_opp_table(cpu_dev);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto atomic_dec;
+ goto out;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (ret) {
dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto atomic_dec;
+ goto out;
}
name[12] = cluster + '0';
- clk[cluster] = clk_get_sys(name, NULL);
+ clk[cluster] = clk_get(cpu_dev, name);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
@@ -138,15 +351,74 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
__func__, cpu_dev->id, cluster);
ret = PTR_ERR(clk[cluster]);
- opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-atomic_dec:
- atomic_dec(&cluster_usage[cluster]);
+out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ ret = _get_cluster_clk_and_freq_table(cdev);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[0]);
+ clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+ pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+ __func__, cluster, clk_big_min, clk_little_max);
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
+}
+
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
@@ -165,7 +437,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
policy->cpu, cur_cluster);
@@ -173,7 +445,14 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
@@ -181,9 +460,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = bL_cpufreq_get(policy->cpu);
-
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
@@ -200,33 +478,60 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
+ cpufreq_frequency_table_put_attr(policy->cpu);
put_cluster_clk_and_freq_table(cpu_dev);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
}
-/* Export freq_table to sysfs */
-static struct freq_attr *bL_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
- .flags = CPUFREQ_STICKY,
- .verify = bL_cpufreq_verify_policy,
- .target = bL_cpufreq_set_target,
- .get = bL_cpufreq_get,
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bL_cpufreq_set_target,
+ .get = bL_cpufreq_get_rate,
.init = bL_cpufreq_init,
.exit = bL_cpufreq_exit,
- .have_governor_per_policy = true,
- .attr = bL_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
+};
+
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
- int ret;
+ int ret, i;
if (arm_bL_ops) {
pr_debug("%s: Already registered: %s, exiting\n", __func__,
@@ -241,16 +546,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
+ ret = bL_switcher_get_enabled();
+ set_switching_enabled(ret);
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
ret = cpufreq_register_driver(&bL_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
- pr_info("%s: Registered platform driver: %s\n", __func__,
- ops->name);
+ ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ if (ret) {
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ops->name);
+ }
}
+ bL_switcher_put_enabled();
return ret;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
@@ -263,7 +581,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
return;
}
+ bL_switcher_get_enabled();
+ bL_switcher_unregister_notifier(&bL_switcher_notifier);
cpufreq_unregister_driver(&bL_cpufreq_driver);
+ bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
arm_bL_ops->name);
arm_bL_ops = NULL;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 79b2ce17884..70f18fc12d4 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops {
int (*init_opp_table)(struct device *cpu_dev);
};
-static inline int cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 480c0bd0468..8d9d5910890 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index e0c38d93899..856ad80418a 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -19,18 +19,10 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/slab.h>
static struct clk *cpuclk;
-
-static int at32_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
+static struct cpufreq_frequency_table *freq_table;
static unsigned int at32_get_speed(unsigned int cpu)
{
@@ -43,74 +35,94 @@ static unsigned int at32_get_speed(unsigned int cpu)
static unsigned int ref_freq;
static unsigned long loops_per_jiffy_ref;
-static int at32_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- long freq;
-
- /* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
-
- /* Check if policy->min <= new_freq <= policy->max */
- if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+ unsigned int old_freq, new_freq;
- freqs.old = at32_get_speed(0);
- freqs.new = (freq + 500) / 1000;
- freqs.flags = 0;
+ old_freq = at32_get_speed(0);
+ new_freq = freq_table[index].frequency;
if (!ref_freq) {
- ref_freq = freqs.old;
+ ref_freq = old_freq;
loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- if (freqs.old < freqs.new)
+ if (old_freq < new_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, freqs.new);
- clk_set_rate(cpuclk, freq);
- if (freqs.new < freqs.old)
+ loops_per_jiffy_ref, ref_freq, new_freq);
+ clk_set_rate(cpuclk, new_freq * 1000);
+ if (new_freq < old_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, freqs.new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+ loops_per_jiffy_ref, ref_freq, new_freq);
return 0;
}
static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
+ unsigned int frequency, rate, min_freq;
+ int retval, steps, i;
+
if (policy->cpu != 0)
return -EINVAL;
cpuclk = clk_get(NULL, "cpu");
if (IS_ERR(cpuclk)) {
pr_debug("cpufreq: could not get CPU clk\n");
- return PTR_ERR(cpuclk);
+ retval = PTR_ERR(cpuclk);
+ goto out_err;
}
- policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
- policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
policy->cpuinfo.transition_latency = 0;
- policy->cur = at32_get_speed(0);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- printk("cpufreq: AT32AP CPU frequency driver\n");
+ /*
+ * AVR32 CPU frequency rate scales in power of two between maximum and
+ * minimum, also add space for the table end marker.
+ *
+ * Further validate that the frequency is usable, and append it to the
+ * frequency table.
+ */
+ steps = fls(frequency / min_freq) + 1;
+ freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ retval = -ENOMEM;
+ goto out_err_put_clk;
+ }
- return 0;
+ for (i = 0; i < (steps - 1); i++) {
+ rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
+
+ if (rate != frequency)
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_table[i].frequency = frequency;
+
+ frequency /= 2;
+ }
+
+ freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+ retval = cpufreq_table_validate_and_show(policy, freq_table);
+ if (!retval) {
+ printk("cpufreq: AT32AP CPU frequency driver\n");
+ return 0;
+ }
+
+ kfree(freq_table);
+out_err_put_clk:
+ clk_put(cpuclk);
+out_err:
+ return retval;
}
static struct cpufreq_driver at32_driver = {
.name = "at32ap",
.init = at32_cpufreq_driver_init,
- .verify = at32_verify_speed,
- .target = at32_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = at32_set_target,
.get = at32_get_speed,
.flags = CPUFREQ_STICKY,
};
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index ef05978a723..e9e63fc9c2c 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -127,41 +127,28 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new)
}
#endif
-static int bfin_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
{
#ifndef CONFIG_BF60x
unsigned int plldiv;
#endif
- unsigned int index;
- unsigned long cclk_hz;
- struct cpufreq_freqs freqs;
static unsigned long lpj_ref;
static unsigned int lpj_ref_freq;
+ unsigned int old_freq, new_freq;
int ret = 0;
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
#endif
- if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
- relation, &index))
- return -EINVAL;
+ old_freq = bfin_getfreq_khz(0);
+ new_freq = bfin_freq_table[index].frequency;
- cclk_hz = bfin_freq_table[index].frequency;
-
- freqs.old = bfin_getfreq_khz(0);
- freqs.new = cclk_hz;
-
- pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
- cclk_hz, target_freq, freqs.old);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifndef CONFIG_BF60x
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
bfin_write_PLL_DIV(plldiv);
#else
- ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
+ ret = cpu_set_cclk(policy->cpu, new_freq * 1000);
if (ret != 0) {
WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
return ret;
@@ -177,25 +164,16 @@ static int bfin_target(struct cpufreq_policy *policy,
#endif
if (!lpj_ref_freq) {
lpj_ref = loops_per_jiffy;
- lpj_ref_freq = freqs.old;
+ lpj_ref_freq = old_freq;
}
- if (freqs.new != freqs.old) {
+ if (new_freq != old_freq) {
loops_per_jiffy = cpufreq_scale(lpj_ref,
- lpj_ref_freq, freqs.new);
+ lpj_ref_freq, new_freq);
}
- /* TODO: just test case for cycles clock source, remove later */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: done\n");
return ret;
}
-static int bfin_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, bfin_freq_table);
-}
-
static int __bfin_cpu_init(struct cpufreq_policy *policy)
{
@@ -209,23 +187,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
- policy->cur = cclk;
- cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+ return cpufreq_table_validate_and_show(policy, bfin_freq_table);
}
-static struct freq_attr *bfin_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver bfin_driver = {
- .verify = bfin_verify_speed,
- .target = bfin_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
+ .exit = cpufreq_generic_exit,
.name = "bfin cpufreq",
- .attr = bfin_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init bfin_cpu_init(void)
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index c522a95c0e1..d4585ce2346 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -17,7 +17,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -30,73 +30,51 @@ static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
-static int cpu0_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int cpu0_get_speed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
}
-static int cpu0_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
- unsigned int index;
int ret;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (ret) {
- pr_err("failed to match target freqency %d: %d\n",
- target_freq, ret);
- return ret;
- }
-
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
- freq_exact = freq_Hz;
- freqs.new = freq_Hz / 1000;
- freqs.old = clk_get_rate(cpu_clk) / 1000;
- if (freqs.old == freqs.new)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ freq_exact = freq_Hz;
+ new_freq = freq_Hz / 1000;
+ old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
- opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
- freqs.new = freqs.old;
- ret = PTR_ERR(opp);
- goto post_notify;
+ return PTR_ERR(opp);
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
- freqs.new / 1000, volt ? volt / 1000 : -1);
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
+ if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
@@ -105,72 +83,35 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
pr_err("failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
+ if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
- clk_set_rate(cpu_clk, freqs.old * 1000);
- freqs.new = freqs.old;
+ clk_set_rate(cpu_clk, old_freq * 1000);
}
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return ret;
}
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (ret) {
- pr_err("invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = clk_get_rate(cpu_clk) / 1000;
-
- /*
- * The driver only supports the SMP configuartion where all processors
- * share the clock and voltage and clock. Use cpufreq affected_cpus
- * interface to have all CPUs scaled together.
- */
- cpumask_setall(policy->cpus);
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
}
-static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
-
- return 0;
-}
-
-static struct freq_attr *cpu0_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
- .verify = cpu0_verify_speed,
- .target = cpu0_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpu0_set_target,
.get = cpu0_get_speed,
.init = cpu0_cpufreq_init,
- .exit = cpu0_cpufreq_exit,
+ .exit = cpufreq_generic_exit,
.name = "generic_cpu0",
- .attr = cpu0_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int cpu0_cpufreq_probe(struct platform_device *pdev)
@@ -218,7 +159,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_put_node;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
goto out_put_node;
@@ -230,7 +171,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long min_uV, max_uV;
int i;
@@ -242,12 +183,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
rcu_read_lock();
- opp = opp_find_freq_exact(cpu_dev,
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
- min_uV = opp_get_voltage(opp);
- opp = opp_find_freq_exact(cpu_dev,
+ min_uV = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
- max_uV = opp_get_voltage(opp);
+ max_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
@@ -264,7 +205,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
return 0;
out_free_table:
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
return ret;
@@ -273,7 +214,7 @@ out_put_node:
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index b83d45f6857..a05b876f375 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy)
if (policy->min < (fsb_pol_max * fid * 100))
policy->max = (fsb_pol_max + 1) * fid * 100;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = nforce2_get(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04548f7023a..02d534da22d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -47,49 +47,11 @@ static LIST_HEAD(cpufreq_policy_list);
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
-/*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
- * all cpufreq/hotplug/workqueue/etc related lock issues.
- *
- * The rules for this semaphore:
- * - Any routine that wants to read from the policy structure will
- * do a down_read on this semaphore.
- * - Any routine that will write to the policy structure and/or may take away
- * the policy altogether (eg. CPU hotplug), will hold this lock in write
- * mode before doing so.
- *
- * Additional rules:
- * - Governor routines that can be called in cpufreq hotplug path should not
- * take this sem as top level hotplug notifier handler takes this.
- * - Lock should not be held across
- * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- */
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
-
-#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode(int cpu) \
-{ \
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
- BUG_ON(!policy); \
- down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
- \
- return 0; \
-}
-
-lock_policy_rwsem(read, cpu);
-lock_policy_rwsem(write, cpu);
-
-#define unlock_policy_rwsem(mode, cpu) \
-static void unlock_policy_rwsem_##mode(int cpu) \
-{ \
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
- BUG_ON(!policy); \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
+static inline bool has_target(void)
+{
+ return cpufreq_driver->target_index || cpufreq_driver->target;
}
-unlock_policy_rwsem(read, cpu);
-unlock_policy_rwsem(write, cpu);
-
/*
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
* sections
@@ -135,7 +97,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
{
- return cpufreq_driver->have_governor_per_policy;
+ return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
@@ -183,6 +145,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency)
+{
+ int ret;
+
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ /*
+ * The driver only supports the SMP configuartion where all processors
+ * share the clock and voltage and clock.
+ */
+ cpumask_setall(policy->cpus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
@@ -363,7 +356,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
- } else if (cpufreq_driver->target) {
+ } else if (has_target()) {
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
@@ -414,7 +407,7 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
/**
@@ -435,7 +428,7 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
- ret = __cpufreq_set_policy(policy, &new_policy); \
+ ret = cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
\
return ret ? ret : count; \
@@ -493,11 +486,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
&new_policy.governor))
return -EINVAL;
- /*
- * Do not use cpufreq_set_policy here or the user_policy.max
- * will be wrongly overridden
- */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -525,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
ssize_t i = 0;
struct cpufreq_governor *t;
- if (!cpufreq_driver->target) {
+ if (!has_target()) {
i += sprintf(buf, "performance powersave");
goto out;
}
@@ -653,24 +642,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EINVAL;
+ ssize_t ret;
if (!down_read_trylock(&cpufreq_rwsem))
- goto exit;
+ return -EINVAL;
- if (lock_policy_rwsem_read(policy->cpu) < 0)
- goto up_read;
+ down_read(&policy->rwsem);
if (fattr->show)
ret = fattr->show(policy, buf);
else
ret = -EIO;
- unlock_policy_rwsem_read(policy->cpu);
-
-up_read:
+ up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
-exit:
+
return ret;
}
@@ -689,17 +675,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!down_read_trylock(&cpufreq_rwsem))
goto unlock;
- if (lock_policy_rwsem_write(policy->cpu) < 0)
- goto up_read;
+ down_write(&policy->rwsem);
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-up_read:
up_read(&cpufreq_rwsem);
unlock:
put_online_cpus();
@@ -815,7 +799,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
if (ret)
goto err_out_kobj_put;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
goto err_out_kobj_put;
@@ -844,11 +828,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
- /* assure that the starting sequence is run in __cpufreq_set_policy */
+ /* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -864,10 +848,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev,
bool frozen)
{
- int ret = 0, has_target = !!cpufreq_driver->target;
+ int ret = 0;
unsigned long flags;
- if (has_target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
@@ -875,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- lock_policy_rwsem_write(policy->cpu);
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -883,9 +867,9 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
- if (has_target) {
+ if (has_target()) {
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n", __func__);
@@ -930,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
goto err_free_cpumask;
INIT_LIST_HEAD(&policy->policy_list);
+ init_rwsem(&policy->rwsem);
+
return policy;
err_free_cpumask:
@@ -949,26 +935,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
- if (cpu == policy->cpu)
+ if (WARN_ON(cpu == policy->cpu))
return;
- /*
- * Take direct locks as lock_policy_rwsem_write wouldn't work here.
- * Also lock for last cpu is enough here as contention will happen only
- * after policy->cpu is changed and after it is changed, other threads
- * will try to acquire lock for new cpu. And policy is already updated
- * by then.
- */
- down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+ down_write(&policy->rwsem);
policy->last_cpu = policy->cpu;
policy->cpu = cpu;
- up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+ up_write(&policy->rwsem);
-#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
@@ -1053,6 +1030,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
+ if (cpufreq_driver->get) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ pr_err("%s: ->get() failed\n", __func__);
+ goto err_get_freq;
+ }
+ }
+
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
@@ -1107,6 +1092,9 @@ err_out_unregister:
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+err_get_freq:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
err_set_policy_cpu:
cpufreq_policy_free(policy);
nomem_out:
@@ -1147,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
- WARN_ON(lock_policy_rwsem_write(old_cpu));
+ down_write(&policy->rwsem);
cpumask_set_cpu(old_cpu, policy->cpus);
- unlock_policy_rwsem_write(old_cpu);
+ up_write(&policy->rwsem);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
@@ -1186,7 +1174,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
return -EINVAL;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
@@ -1200,22 +1188,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy->governor->name, CPUFREQ_NAME_LEN);
#endif
- lock_policy_rwsem_read(cpu);
+ down_read(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
- unlock_policy_rwsem_read(cpu);
+ up_read(&policy->rwsem);
if (cpu != policy->cpu) {
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
-
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
if (!frozen) {
- pr_debug("%s: policy Kobject moved to cpu: %d "
- "from: %d\n",__func__, new_cpu, cpu);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
}
}
}
@@ -1243,16 +1230,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
return -EINVAL;
}
- WARN_ON(lock_policy_rwsem_write(cpu));
+ down_write(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
if (cpus > 1)
cpumask_clear_cpu(cpu, policy->cpus);
- unlock_policy_rwsem_write(cpu);
+ up_write(&policy->rwsem);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
@@ -1263,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
if (!frozen) {
- lock_policy_rwsem_read(cpu);
+ down_read(&policy->rwsem);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
- unlock_policy_rwsem_read(cpu);
+ up_read(&policy->rwsem);
kobject_put(kobj);
/*
@@ -1295,7 +1282,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
if (!frozen)
cpufreq_policy_free(policy);
} else {
- if (cpufreq_driver->target) {
+ if (has_target()) {
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n",
@@ -1310,36 +1297,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
/**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
*/
-static inline int __cpufreq_remove_dev(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
-{
- int ret;
-
- ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
-
- if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
-
- return ret;
-}
-
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
- int retval;
+ int ret;
if (cpu_is_offline(cpu))
return 0;
- retval = __cpufreq_remove_dev(dev, sif, false);
- return retval;
+ ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+
+ if (!ret)
+ ret = __cpufreq_remove_dev_finish(dev, sif, false);
+
+ return ret;
}
static void handle_update(struct work_struct *work)
@@ -1458,22 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
if (cpufreq_disabled() || !cpufreq_driver)
return -ENOENT;
+ BUG_ON(!policy);
+
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
- if (unlikely(lock_policy_rwsem_read(cpu)))
- goto out_policy;
+ down_read(&policy->rwsem);
ret_freq = __cpufreq_get(cpu);
- unlock_policy_rwsem_read(cpu);
-
-out_policy:
+ up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
return ret_freq;
@@ -1681,12 +1656,75 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
+ /*
+ * This might look like a redundant call as we are checking it again
+ * after finding index. But it is left intentionally for cases where
+ * exactly same freq is called again and so we can save on few function
+ * calls.
+ */
if (target_freq == policy->cur)
return 0;
if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
+ else if (cpufreq_driver->target_index) {
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_freqs freqs;
+ bool notify;
+ int index;
+
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ goto out;
+ }
+
+ retval = cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ goto out;
+ }
+
+ if (freq_table[index].frequency == policy->cur) {
+ retval = 0;
+ goto out;
+ }
+
+ notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+
+ if (notify) {
+ freqs.old = policy->cur;
+ freqs.new = freq_table[index].frequency;
+ freqs.flags = 0;
+
+ pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+ __func__, policy->cpu, freqs.old,
+ freqs.new);
+
+ cpufreq_notify_transition(policy, &freqs,
+ CPUFREQ_PRECHANGE);
+ }
+
+ retval = cpufreq_driver->target_index(policy, index);
+ if (retval)
+ pr_err("%s: Failed to change cpu frequency: %d\n",
+ __func__, retval);
+
+ if (notify) {
+ /*
+ * Notify with old freq in case we failed to change
+ * frequency
+ */
+ if (retval)
+ freqs.new = freqs.old;
+
+ cpufreq_notify_transition(policy, &freqs,
+ CPUFREQ_POSTCHANGE);
+ }
+ }
+out:
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1697,14 +1735,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
{
int ret = -EINVAL;
- if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- goto fail;
+ down_write(&policy->rwsem);
ret = __cpufreq_driver_target(policy, target_freq, relation);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-fail:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1871,10 +1907,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get_policy);
/*
- * data : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
*/
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
int ret = 0, failed = 1;
@@ -1934,10 +1970,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
/* end old governor */
if (policy->governor) {
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- unlock_policy_rwsem_write(new_policy->cpu);
+ up_write(&policy->rwsem);
__cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(new_policy->cpu);
+ down_write(&policy->rwsem);
}
/* start new governor */
@@ -1946,10 +1982,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
failed = 0;
} else {
- unlock_policy_rwsem_write(new_policy->cpu);
+ up_write(&policy->rwsem);
__cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(new_policy->cpu);
+ down_write(&policy->rwsem);
}
}
@@ -1995,10 +2031,7 @@ int cpufreq_update_policy(unsigned int cpu)
goto no_policy;
}
- if (unlikely(lock_policy_rwsem_write(cpu))) {
- ret = -EINVAL;
- goto fail;
- }
+ down_write(&policy->rwsem);
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
@@ -2017,17 +2050,16 @@ int cpufreq_update_policy(unsigned int cpu)
pr_debug("Driver did not initialize current freq");
policy->cur = new_policy.cur;
} else {
- if (policy->cur != new_policy.cur && cpufreq_driver->target)
+ if (policy->cur != new_policy.cur && has_target())
cpufreq_out_of_sync(cpu, policy->cur,
new_policy.cur);
}
}
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
- unlock_policy_rwsem_write(cpu);
+ up_write(&policy->rwsem);
-fail:
cpufreq_cpu_put(policy);
no_policy:
return ret;
@@ -2096,7 +2128,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -ENODEV;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- ((!driver_data->setpolicy) && (!driver_data->target)))
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2183,14 +2216,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
- int cpu;
-
if (cpufreq_disabled())
return -ENODEV;
- for_each_possible_cpu(cpu)
- init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index f62d822048e..25a70d06c5b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load)
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
+ if (dbs_info->requested_freq > policy->max)
+ dbs_info->requested_freq = policy->max;
+
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
return;
@@ -80,13 +83,18 @@ static void cs_check_cpu(int cpu, unsigned int load)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
return;
- dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
+ freq_target = get_freq_target(cs_tuners, policy);
+ if (dbs_info->requested_freq > freq_target)
+ dbs_info->requested_freq -= freq_target;
+ else
+ dbs_info->requested_freq = policy->min;
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 0806c31e576..e6be63561fa 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_data->cdata->gov_dbs_timer);
}
- /*
- * conservative does not implement micro like ondemand
- * governor, thus we are bound to jiffes/HZ
- */
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 88cd39f7b0e..b5f2b861894 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -191,7 +191,10 @@ struct common_dbs_data {
struct attribute_group *attr_group_gov_sys; /* one governor - system */
struct attribute_group *attr_group_gov_pol; /* one governor - policy */
- /* Common data for platforms that don't set have_governor_per_policy */
+ /*
+ * Common data for platforms that don't set
+ * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+ */
struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 32f26f6e17c..18d40918909 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -168,7 +168,6 @@ static void od_check_cpu(int cpu, unsigned int load)
dbs_info->rate_mult =
od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
- return;
} else {
/* Calculate the next frequency proportional to load */
unsigned int freq_next;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 03078090b5f..4dbf1db16ac 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
- /*
- * We're safe from concurrent calls to ->target() here
- * as we hold the userspace_mutex lock. If we were calling
- * cpufreq_driver_target, a deadlock situation might occur:
- * A: cpufreq_set (lock userspace_mutex) ->
- * cpufreq_driver_target(lock policy->lock)
- * B: cpufreq_set_policy(lock policy->lock) ->
- * __cpufreq_governor ->
- * cpufreq_governor_userspace (lock userspace_mutex)
- */
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
-
err:
mutex_unlock(&userspace_mutex);
return ret;
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index cb8276dd19c..86559040c54 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
return clk_ctrl.pll ? 200000 : 6000;
}
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
reg_clkgen_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
- freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
- freqs.new = cris_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@@ -51,67 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, cris_freq_table,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- cris_freq_set_cpu_state(policy, newstate);
-
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = cris_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
- if (result)
- return (result);
-
- cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
- return 0;
-}
-
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
-
-static struct freq_attr *cris_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
- .verify = cris_freq_verify,
- .target = cris_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cris_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cris_freq",
- .attr = cris_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 72328f77dc5..26d940d40b1 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
return clk_ctrl.pll ? 200000 : 6000;
}
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
reg_config_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
- freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
- freqs.new = cris_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@@ -51,64 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target
- (policy, cris_freq_table, target_freq, relation, &newstate))
- return -EINVAL;
-
- cris_freq_set_cpu_state(policy, newstate);
-
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = cris_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
- if (result)
- return (result);
-
- cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *cris_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
- .verify = cris_freq_verify,
- .target = cris_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cris_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cris_freq",
- .attr = cris_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 551dd655c6f..5e8a854381b 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -68,58 +66,38 @@ static unsigned int davinci_getspeed(unsigned int cpu)
return clk_get_rate(cpufreq.armclk) / 1000;
}
-static int davinci_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
- int ret = 0;
- unsigned int idx;
- struct cpufreq_freqs freqs;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct clk *armclk = cpufreq.armclk;
+ unsigned int old_freq, new_freq;
+ int ret = 0;
- freqs.old = davinci_getspeed(0);
- freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
-
- if (freqs.old == freqs.new)
- return ret;
-
- dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
-
- ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
- freqs.new, relation, &idx);
- if (ret)
- return -EINVAL;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq = davinci_getspeed(0);
+ new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
- if (pdata->set_voltage && freqs.new > freqs.old) {
+ if (pdata->set_voltage && new_freq > old_freq) {
ret = pdata->set_voltage(idx);
if (ret)
- goto out;
+ return ret;
}
ret = clk_set_rate(armclk, idx);
if (ret)
- goto out;
+ return ret;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
- goto out;
+ return ret;
}
/* if moving to lower freq, lower the voltage after lowering freq */
- if (pdata->set_voltage && freqs.new < freqs.old)
+ if (pdata->set_voltage && new_freq < old_freq)
pdata->set_voltage(idx);
-out:
- if (ret)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
+ return 0;
}
static int davinci_cpu_init(struct cpufreq_policy *policy)
@@ -138,47 +116,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
return result;
}
- policy->cur = davinci_getspeed(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (result) {
- pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
- __func__);
- return result;
- }
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
* Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list.
*/
- policy->cpuinfo.transition_latency = 2000 * 1000;
- return 0;
+ return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
}
-static int davinci_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *davinci_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY,
.verify = davinci_verify_speed,
- .target = davinci_target,
+ .target_index = davinci_target,
.get = davinci_getspeed,
.init = davinci_cpu_init,
- .exit = davinci_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "davinci",
- .attr = davinci_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 26321cdc194..0e67ab96321 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -19,51 +19,11 @@
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *dbx500_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- unsigned int idx;
- int ret;
-
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &idx))
- return -EINVAL;
-
- freqs.old = policy->cur;
- freqs.new = freq_table[idx].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- /* pre-change notification */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* update armss clk frequency */
- ret = clk_set_rate(armss_clk, freqs.new * 1000);
-
- if (ret) {
- pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
- freqs.new * 1000, ret);
- freqs.new = freqs.old;
- }
-
- /* post change notification */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
+ return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
}
static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
@@ -84,43 +44,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int res;
-
- /* get policy fields based on the table */
- res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!res)
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- else {
- pr_err("dbx500-cpufreq: Failed to read policy table\n");
- return res;
- }
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /*
- * FIXME : Need to take time measurement across the target()
- * function with no/some/all drivers in the notification
- * list.
- */
- policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
- /* policy sharing between dual CPUs */
- cpumask_setall(policy->cpus);
-
- return 0;
+ return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
static struct cpufreq_driver dbx500_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
- .verify = dbx500_cpufreq_verify_speed,
- .target = dbx500_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = dbx500_cpufreq_target,
.get = dbx500_cpufreq_getspeed,
.init = dbx500_cpufreq_init,
.name = "DBX500",
- .attr = dbx500_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int dbx500_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 09f64cc8301..9012b8bb6b6 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
struct cpufreq_policy *policy,
u32 dest_state)
{
- struct cpufreq_freqs freqs;
u32 lo, hi;
- int err = 0;
int i;
- freqs.old = eps_get(policy->cpu);
- freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i = 0;
@@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
- err = -ENODEV;
- goto postchange;
+ return -ENODEV;
}
}
/* Set new multiplier and voltage */
@@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
- err = -ENODEV;
- goto postchange;
+ return -ENODEV;
}
} while (lo & ((1 << 16) | (1 << 17)));
- /* Return current frequency */
-postchange:
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
-
#ifdef DEBUG
{
u8 current_multiplier, current_voltage;
@@ -161,19 +148,12 @@ postchange:
current_multiplier);
}
#endif
- if (err)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return err;
+ return 0;
}
-static int eps_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int eps_target(struct cpufreq_policy *policy, unsigned int index)
{
struct eps_cpu_data *centaur;
- unsigned int newstate = 0;
unsigned int cpu = policy->cpu;
unsigned int dest_state;
int ret;
@@ -182,28 +162,14 @@ static int eps_target(struct cpufreq_policy *policy,
return -ENODEV;
centaur = eps_cpu[cpu];
- if (unlikely(cpufreq_frequency_table_target(policy,
- &eps_cpu[cpu]->freq_table[0],
- target_freq,
- relation,
- &newstate))) {
- return -EINVAL;
- }
-
/* Make frequency transition */
- dest_state = centaur->freq_table[newstate].driver_data & 0xffff;
+ dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
return ret;
}
-static int eps_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &eps_cpu[policy->cpu]->freq_table[0]);
-}
-
static int eps_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -401,15 +367,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
}
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
- policy->cur = fsb * current_multiplier;
- ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
+ ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
if (ret) {
kfree(centaur);
return ret;
}
- cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
return 0;
}
@@ -424,19 +388,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *eps_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver eps_driver = {
- .verify = eps_verify,
- .target = eps_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = eps_target,
.init = eps_cpu_init,
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = eps_attr,
+ .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 823a400d98f..de08acff510 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -105,32 +105,9 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
}
-/**
- * elanfreq_set_cpu_frequency: Change the CPU core frequency
- * @cpu: cpu number
- * @freq: frequency in kHz
- *
- * This function takes a frequency value and changes the CPU frequency
- * according to this. Note that the frequency has to be checked by
- * elanfreq_validatespeed() for correctness!
- *
- * There is no return value.
- */
-
-static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int elanfreq_target(struct cpufreq_policy *policy,
+ unsigned int state)
{
- struct cpufreq_freqs freqs;
-
- freqs.old = elanfreq_get_cpu_frequency(0);
- freqs.new = elan_multiplier[state].clock;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
- elan_multiplier[state].clock);
-
-
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
@@ -161,39 +138,8 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
udelay(10000);
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-
-/**
- * elanfreq_validatespeed: test if frequency range is valid
- * @policy: the policy to validate
- *
- * This function checks if a given frequency range in kHz is valid
- * for the hardware supported by the driver.
- */
-
-static int elanfreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
-}
-
-static int elanfreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- elanfreq_set_cpu_state(policy, newstate);
-
return 0;
}
-
-
/*
* Module init and exit code
*/
@@ -202,7 +148,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int i;
- int result;
/* capability check */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
@@ -221,21 +166,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = elanfreq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
- if (result)
- return result;
- cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
- return 0;
-}
-
-
-static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, elanfreq_table);
}
@@ -261,20 +193,14 @@ __setup("elanfreq=", elanfreq_setup);
#endif
-static struct freq_attr *elanfreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
- .verify = elanfreq_verify,
- .target = elanfreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = elanfreq_target,
.init = elanfreq_cpu_init,
- .exit = elanfreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "elanfreq",
- .attr = elanfreq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 0fac34439e3..f3c22874da7 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -25,18 +25,11 @@
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
-static struct cpufreq_freqs freqs;
static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- exynos_info->freq_table);
-}
-
static unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
@@ -65,21 +58,18 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+ unsigned int old_freq;
int index, old_index;
int ret = 0;
- freqs.old = policy->cur;
- freqs.new = target_freq;
-
- if (freqs.new == freqs.old)
- goto out;
+ old_freq = policy->cur;
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
- * policy and get the index from the raw freqeuncy table.
+ * policy and get the index from the raw frequency table.
*/
- old_index = exynos_cpufreq_get_index(freqs.old);
+ old_index = exynos_cpufreq_get_index(old_freq);
if (old_index < 0) {
ret = old_index;
goto out;
@@ -104,17 +94,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
}
arm_volt = volt_table[index];
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* When the new frequency is higher than current frequency */
- if ((freqs.new > freqs.old) && !safe_arm_volt) {
+ if ((target_freq > old_freq) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, arm_volt);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
@@ -124,24 +111,17 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, safe_arm_volt);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
exynos_info->set_freq(old_index, index);
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- if (ret)
- goto out;
-
/* When the new frequency is lower than current frequency */
- if ((freqs.new < freqs.old) ||
- ((freqs.new > freqs.old) && safe_arm_volt)) {
+ if ((target_freq < old_freq) ||
+ ((target_freq > old_freq) && safe_arm_volt)) {
/* down the voltage after frequency change */
- regulator_set_voltage(arm_regulator, arm_volt,
+ ret = regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
@@ -151,19 +131,14 @@ post_notify:
}
out:
-
cpufreq_cpu_put(policy);
return ret;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- unsigned int index;
- unsigned int new_freq;
int ret = 0;
mutex_lock(&cpufreq_lock);
@@ -171,15 +146,7 @@ static int exynos_target(struct cpufreq_policy *policy,
if (frequency_locked)
goto out;
- if (cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
- goto out;
- }
-
- new_freq = freq_table[index].frequency;
-
- ret = exynos_cpufreq_scale(new_freq);
+ ret = exynos_cpufreq_scale(freq_table[index].frequency);
out:
mutex_unlock(&cpufreq_lock);
@@ -247,38 +214,18 @@ static struct notifier_block exynos_cpufreq_nb = {
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
-
- /* set the transition latency value */
- policy->cpuinfo.transition_latency = 100000;
-
- cpumask_setall(policy->cpus);
-
- return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
-}
-
-static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
-static struct freq_attr *exynos_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
- .verify = exynos_verify_speed,
- .target = exynos_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
- .exit = exynos_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
- .attr = exynos_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index add7fbec4fc..f2c75065ce1 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
static void exynos4210_set_apll(unsigned int index)
{
- unsigned int tmp;
+ unsigned int tmp, freq = apll_freq_4210[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_4210[index].mps;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
+ clk_set_rate(mout_apll, freq * 1000);
- /* 4. wait_lock_time */
- do {
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
-}
-
static void exynos4210_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos4210_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
-
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4210[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- exynos4210_set_apll(new_index);
- }
+ exynos4210_set_clkdiv(new_index);
+ exynos4210_set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos4210_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4210[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- /* 2. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- exynos4210_set_apll(new_index);
- /* 2. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- }
+ exynos4210_set_apll(new_index);
+ exynos4210_set_clkdiv(new_index);
}
}
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
- info->need_apll_change = exynos4210_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 08b7477b0aa..8683304ce62 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
static void exynos4x12_set_apll(unsigned int index)
{
- unsigned int tmp, pdiv;
+ unsigned int tmp, freq = apll_freq_4x12[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
+ clk_set_rate(mout_apll, freq * 1000);
- __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_4x12[index].mps;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- /* 4. wait_lock_time */
- do {
- cpu_relax();
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
-}
-
static void exynos4x12_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos4x12_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4x12[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- exynos4x12_set_apll(new_index);
- }
+ exynos4x12_set_clkdiv(new_index);
+ exynos4x12_set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos4x12_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4x12[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
- /* 2. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- exynos4x12_set_apll(new_index);
- /* 2. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- }
+ exynos4x12_set_apll(new_index);
+ exynos4x12_set_clkdiv(new_index);
}
}
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
info->set_freq = exynos4x12_set_frequency;
- info->need_apll_change = exynos4x12_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index be5380ecdcd..76bef8b078c 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -118,12 +118,12 @@ static int init_div_table(void)
struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
unsigned int tmp, clk_div, ema_div, freq, volt_id;
int i = 0;
- struct opp *opp;
+ struct dev_pm_opp *opp;
rcu_read_lock();
for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
- opp = opp_find_freq_exact(dvfs_info->dev,
+ opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
freq_tbl[i].frequency * 1000, true);
if (IS_ERR(opp)) {
rcu_read_unlock();
@@ -142,7 +142,7 @@ static int init_div_table(void)
<< P0_7_CSCLKDEV_SHIFT;
/* Calculate EMA */
- volt_id = opp_get_voltage(opp);
+ volt_id = dev_pm_opp_get_voltage(opp);
volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
if (volt_id < PMIC_HIGH_VOLT) {
ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -209,38 +209,22 @@ static void exynos_enable_dvfs(void)
dvfs_info->base + XMU_DVFS_CTRL);
}
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- dvfs_info->freq_table);
-}
-
static unsigned int exynos_getspeed(unsigned int cpu)
{
return dvfs_info->cur_frequency;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int index, tmp;
- int ret = 0, i;
+ unsigned int tmp;
+ int i;
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
mutex_lock(&cpufreq_lock);
- ret = cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index);
- if (ret)
- goto out;
-
freqs.old = dvfs_info->cur_frequency;
freqs.new = freq_table[index].frequency;
- if (freqs.old == freqs.new)
- goto out;
-
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Set the target frequency in all C0_3_PSTATE register */
@@ -251,9 +235,8 @@ static int exynos_target(struct cpufreq_policy *policy,
__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
}
-out:
mutex_unlock(&cpufreq_lock);
- return ret;
+ return 0;
}
static void exynos_cpufreq_work(struct work_struct *work)
@@ -324,30 +307,19 @@ static void exynos_sort_descend_freq_table(void)
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
- if (ret) {
- dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cur = dvfs_info->cur_frequency;
- policy->cpuinfo.transition_latency = dvfs_info->latency;
- cpumask_setall(policy->cpus);
-
- cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, dvfs_info->freq_table,
+ dvfs_info->latency);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = exynos_verify_speed,
- .target = exynos_target,
+ .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
+ .exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
+ .attr = cpufreq_generic_attr,
};
static const struct of_device_id exynos_cpufreq_match[] = {
@@ -399,13 +371,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_put_node;
}
- ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+ &dvfs_info->freq_table);
if (ret) {
dev_err(dvfs_info->dev,
"failed to init cpufreq table: %d\n", ret);
goto err_put_node;
}
- dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+ dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
exynos_sort_descend_freq_table();
if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
@@ -454,7 +427,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
return 0;
err_free_table:
- opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
err_put_node:
of_node_put(np);
dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -464,7 +437,7 @@ err_put_node:
static int exynos_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&exynos_driver);
- opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
return 0;
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f111454a7ae..3458d27f63b 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- unsigned int next_larger = ~0;
- unsigned int i;
- unsigned int count = 0;
+ unsigned int next_larger = ~0, freq, i = 0;
+ bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- unsigned int freq = table[i].frequency;
+ for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
- if ((freq >= policy->min) && (freq <= policy->max))
- count++;
- else if ((next_larger > freq) && (freq > policy->max))
+ if ((freq >= policy->min) && (freq <= policy->max)) {
+ found = true;
+ break;
+ }
+
+ if ((next_larger > freq) && (freq > policy->max))
next_larger = freq;
}
- if (!count)
+ if (!found) {
policy->max = next_larger;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
+ }
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
+/*
+ * Generic routine to verify policy & frequency table, requires driver to call
+ * cpufreq_frequency_table_get_attr() prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!table)
+ return -ENODEV;
+
+ return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
};
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+struct freq_attr *cpufreq_generic_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
/*
* if you use these, you must assure that the frequency table is valid
* all the time between get_attr and put_attr!
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+ if (!ret)
+ cpufreq_frequency_table_get_attr(table, policy->cpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
{
pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 70442c7b5e7..d83e8266a58 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int maxfreq, curfreq;
+ unsigned int maxfreq;
if (!policy || policy->cpu != 0)
return -ENODEV;
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
stock_freq = maxfreq;
- curfreq = gx_get_cpuspeed(0);
pr_debug("cpu max frequency is %d.\n", maxfreq);
- pr_debug("cpu current frequency is %dkHz.\n", curfreq);
/* setup basic struct for cpufreq API */
policy->cpu = 0;
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
else
policy->min = maxfreq / POLICY_MIN_DIV;
policy->max = maxfreq;
- policy->cur = curfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 794123fcf3e..bf8902a0866 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
struct device_node *np;
int ret;
- if (!of_machine_is_compatible("calxeda,highbank"))
+ if ((!of_machine_is_compatible("calxeda,highbank")) &&
+ (!of_machine_is_compatible("calxeda,ecx-2000")))
return -ENODEV;
cpu_dev = get_cpu_device(0);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 3e14f031717..53c6ac637e1 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -141,7 +141,6 @@ processor_set_freq (
{
int ret = 0;
u32 value = 0;
- struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int retval;
@@ -168,13 +167,6 @@ processor_set_freq (
pr_debug("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
- /* cpufreq frequency struct */
- cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
- cpufreq_freqs.new = data->freq_table[state].frequency;
-
- /* notify cpufreq */
- cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
-
/*
* First we write the target state's 'control' value to the
* control_register.
@@ -186,22 +178,11 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
- unsigned int tmp = cpufreq_freqs.new;
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_POSTCHANGE);
- cpufreq_freqs.new = cpufreq_freqs.old;
- cpufreq_freqs.old = tmp;
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_POSTCHANGE);
printk(KERN_WARNING "Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
- cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
data->acpi_data.state = state;
retval = 0;
@@ -227,42 +208,11 @@ acpi_cpufreq_get (
static int
acpi_cpufreq_target (
struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
- unsigned int next_state = 0;
- unsigned int result = 0;
-
- pr_debug("acpi_cpufreq_setpolicy\n");
-
- result = cpufreq_frequency_table_target(policy,
- data->freq_table, target_freq, relation, &next_state);
- if (result)
- return (result);
-
- result = processor_set_freq(data, policy, next_state);
-
- return (result);
-}
-
-
-static int
-acpi_cpufreq_verify (
- struct cpufreq_policy *policy)
+ unsigned int index)
{
- unsigned int result = 0;
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
- pr_debug("acpi_cpufreq_verify\n");
-
- result = cpufreq_frequency_table_verify(policy,
- data->freq_table);
-
- return (result);
+ return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
}
-
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -321,7 +271,6 @@ acpi_cpufreq_cpu_init (
data->acpi_data.states[i].transition_latency * 1000;
}
}
- policy->cur = processor_get_freq(data, policy->cpu);
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
@@ -335,7 +284,7 @@ acpi_cpufreq_cpu_init (
}
}
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result) {
goto err_freqfree;
}
@@ -356,8 +305,6 @@ acpi_cpufreq_cpu_init (
(u32) data->acpi_data.states[i].status,
(u32) data->acpi_data.states[i].control);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
/* the first call to ->target() should result in us actually
* writing something to the appropriate registers. */
data->resume = 1;
@@ -396,20 +343,14 @@ acpi_cpufreq_cpu_exit (
}
-static struct freq_attr* acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
.get = acpi_cpufreq_get,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq",
- .attr = acpi_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c3fd2a101ca..4b3f18e5f36 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -35,73 +35,52 @@ static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
-static int imx6q_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int imx6q_get_speed(unsigned int cpu)
{
return clk_get_rate(arm_clk) / 1000;
}
-static int imx6q_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
- unsigned int index;
+ unsigned int old_freq, new_freq;
int ret;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (ret) {
- dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
- target_freq, ret);
- return ret;
- }
-
- freqs.new = freq_table[index].frequency;
- freq_hz = freqs.new * 1000;
- freqs.old = clk_get_rate(arm_clk) / 1000;
-
- if (freqs.old == freqs.new)
- return 0;
+ new_freq = freq_table[index].frequency;
+ freq_hz = new_freq * 1000;
+ old_freq = clk_get_rate(arm_clk) / 1000;
rcu_read_lock();
- opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
return PTR_ERR(opp);
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old / 1000,
- freqs.new / 1000, volt / 1000);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq / 1000, volt_old / 1000,
+ new_freq / 1000, volt / 1000);
/* scaling up? scale voltage before frequency */
- if (freqs.new > freqs.old) {
+ if (new_freq > old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/*
* Need to increase vddpu and vddsoc for safety
* if we are about to run at 1.2 GHz.
*/
- if (freqs.new == FREQ_1P2_GHZ / 1000) {
+ if (new_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_HIGH, 0);
regulator_set_voltage_tol(soc_reg,
@@ -121,21 +100,20 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+ clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, freqs.new * 1000);
+ ret = clk_set_rate(arm_clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/* scaling down? scale voltage after frequency */
- if (freqs.new < freqs.old) {
+ if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_warn(cpu_dev,
@@ -143,7 +121,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
ret = 0;
}
- if (freqs.old == FREQ_1P2_GHZ / 1000) {
+ if (old_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_NORMAL, 0);
regulator_set_voltage_tol(soc_reg,
@@ -151,55 +129,28 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
}
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
-}
-
-static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (ret) {
- dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = clk_get_rate(arm_clk) / 1000;
- cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
return 0;
}
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
}
-static struct freq_attr *imx6q_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver imx6q_cpufreq_driver = {
- .verify = imx6q_verify_speed,
- .target = imx6q_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = imx6q_set_target,
.get = imx6q_get_speed,
.init = imx6q_cpufreq_init,
- .exit = imx6q_cpufreq_exit,
+ .exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
- .attr = imx6q_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
@@ -237,14 +188,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
/* We expect an OPP table supplied by platform */
- num = opp_get_opp_count(cpu_dev);
+ num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto put_node;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto put_node;
@@ -259,12 +210,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
* same order.
*/
rcu_read_lock();
- opp = opp_find_freq_exact(cpu_dev,
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
- min_volt = opp_get_voltage(opp);
- opp = opp_find_freq_exact(cpu_dev,
+ min_volt = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[--num].frequency * 1000, true);
- max_volt = opp_get_voltage(opp);
+ max_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
if (ret > 0)
@@ -292,7 +243,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return 0;
free_freq_table:
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
of_node_put(np);
return ret;
@@ -301,7 +252,7 @@ put_node:
static int imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index f7c99df0880..7d8ab000d31 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -15,18 +15,19 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
#include <asm/mach-types.h>
#include <asm/hardware/icst.h>
-static struct cpufreq_driver integrator_driver;
+static void __iomem *cm_base;
+/* The cpufreq driver only use the OSC register */
+#define INTEGRATOR_HDR_OSC_OFFSET 0x08
+#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
-#define CM_ID __io_address(INTEGRATOR_HDR_ID)
-#define CM_OSC __io_address(INTEGRATOR_HDR_OSC)
-#define CM_STAT __io_address(INTEGRATOR_HDR_STAT)
-#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK)
+static struct cpufreq_driver integrator_driver;
static const struct icst_params lclk_params = {
.ref = 24000000,
@@ -59,9 +60,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
{
struct icst_vco vco;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
policy->max = icst_hz(&cclk_params, vco) / 1000;
@@ -69,10 +68,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
policy->min = icst_hz(&cclk_params, vco) / 1000;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -100,7 +96,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
BUG_ON(cpu != smp_processor_id());
/* get current setting */
- cm_osc = __raw_readl(CM_OSC);
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) {
vco.s = (cm_osc >> 8) & 7;
@@ -128,7 +124,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cm_osc = __raw_readl(CM_OSC);
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) {
cm_osc &= 0xfffff800;
@@ -138,9 +134,9 @@ static int integrator_set_target(struct cpufreq_policy *policy,
}
cm_osc |= vco.v;
- __raw_writel(0xa05f, CM_LOCK);
- __raw_writel(cm_osc, CM_OSC);
- __raw_writel(0, CM_LOCK);
+ __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
+ __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET);
+ __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
/*
* Restore the CPUs allowed mask.
@@ -165,7 +161,7 @@ static unsigned int integrator_get(unsigned int cpu)
BUG_ON(cpu != smp_processor_id());
/* detect memory etc. */
- cm_osc = __raw_readl(CM_OSC);
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) {
vco.s = (cm_osc >> 8) & 7;
@@ -186,10 +182,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{
/* set default policy and cpuinfo */
- policy->cpuinfo.max_freq = 160000;
- policy->cpuinfo.min_freq = 12000;
+ policy->max = policy->cpuinfo.max_freq = 160000;
+ policy->min = policy->cpuinfo.min_freq = 12000;
policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
- policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
return 0;
}
@@ -202,19 +197,43 @@ static struct cpufreq_driver integrator_driver = {
.name = "integrator",
};
-static int __init integrator_cpu_init(void)
+static int __init integrator_cpufreq_probe(struct platform_device *pdev)
{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!cm_base)
+ return -ENODEV;
+
return cpufreq_register_driver(&integrator_driver);
}
-static void __exit integrator_cpu_exit(void)
+static void __exit integrator_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&integrator_driver);
}
+static const struct of_device_id integrator_cpufreq_match[] = {
+ { .compatible = "arm,core-module-integrator"},
+ { },
+};
+
+static struct platform_driver integrator_cpufreq_driver = {
+ .driver = {
+ .name = "integrator-cpufreq",
+ .owner = THIS_MODULE,
+ .of_match_table = integrator_cpufreq_match,
+ },
+ .remove = __exit_p(integrator_cpufreq_remove),
+};
+
+module_platform_driver_probe(integrator_cpufreq_driver,
+ integrator_cpufreq_probe);
+
MODULE_AUTHOR ("Russell M. King");
MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
MODULE_LICENSE ("GPL");
-
-module_init(integrator_cpu_init);
-module_exit(integrator_cpu_exit);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index eb3fdc75500..5f1cbae3696 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
+#include <linux/acpi.h>
#include <trace/events/power.h>
#include <asm/div64.h>
@@ -33,6 +34,8 @@
#define SAMPLE_COUNT 3
+#define BYT_RATIOS 0x66a
+
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -78,7 +81,6 @@ struct cpudata {
struct timer_list timer;
- struct pstate_adjust_policy *pstate_policy;
struct pstate_data pstate;
struct _pid pid;
@@ -100,15 +102,21 @@ struct pstate_adjust_policy {
int i_gain_pct;
};
-static struct pstate_adjust_policy default_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
+struct pstate_funcs {
+ int (*get_max)(void);
+ int (*get_min)(void);
+ int (*get_turbo)(void);
+ void (*set)(int pstate);
+};
+
+struct cpu_defaults {
+ struct pstate_adjust_policy pid_policy;
+ struct pstate_funcs funcs;
};
+static struct pstate_adjust_policy pid_params;
+static struct pstate_funcs pstate_funcs;
+
struct perf_limits {
int no_turbo;
int max_perf_pct;
@@ -185,14 +193,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
{
- pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
- pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
- pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+ pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
+ pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
+ pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
pid_reset(&cpu->pid,
- cpu->pstate_policy->setpoint,
+ pid_params.setpoint,
100,
- cpu->pstate_policy->deadband,
+ pid_params.deadband,
0);
}
@@ -226,12 +234,12 @@ struct pid_param {
};
static struct pid_param pid_files[] = {
- {"sample_rate_ms", &default_policy.sample_rate_ms},
- {"d_gain_pct", &default_policy.d_gain_pct},
- {"i_gain_pct", &default_policy.i_gain_pct},
- {"deadband", &default_policy.deadband},
- {"setpoint", &default_policy.setpoint},
- {"p_gain_pct", &default_policy.p_gain_pct},
+ {"sample_rate_ms", &pid_params.sample_rate_ms},
+ {"d_gain_pct", &pid_params.d_gain_pct},
+ {"i_gain_pct", &pid_params.i_gain_pct},
+ {"deadband", &pid_params.deadband},
+ {"setpoint", &pid_params.setpoint},
+ {"p_gain_pct", &pid_params.p_gain_pct},
{NULL, NULL}
};
@@ -336,33 +344,92 @@ static void intel_pstate_sysfs_expose_params(void)
}
/************************** sysfs end ************************/
+static int byt_get_min_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_RATIOS, value);
+ return value & 0xFF;
+}
-static int intel_pstate_min_pstate(void)
+static int byt_get_max_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_RATIOS, value);
+ return (value >> 16) & 0xFF;
+}
+
+static int core_get_min_pstate(void)
{
u64 value;
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
-static int intel_pstate_max_pstate(void)
+static int core_get_max_pstate(void)
{
u64 value;
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
-static int intel_pstate_turbo_pstate(void)
+static int core_get_turbo_pstate(void)
{
u64 value;
int nont, ret;
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
- nont = intel_pstate_max_pstate();
+ nont = core_get_max_pstate();
ret = ((value) & 255);
if (ret <= nont)
ret = nont;
return ret;
}
+static void core_set_pstate(int pstate)
+{
+ u64 val;
+
+ val = pstate << 8;
+ if (limits.no_turbo)
+ val |= (u64)1 << 32;
+
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+}
+
+static struct cpu_defaults core_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+static struct cpu_defaults byt_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 14,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+ },
+ .funcs = {
+ .get_max = byt_get_max_pstate,
+ .get_min = byt_get_min_pstate,
+ .get_turbo = byt_get_max_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
@@ -383,7 +450,6 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
int max_perf, min_perf;
- u64 val;
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
@@ -395,11 +461,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
trace_cpu_frequency(pstate * 100000, cpu->cpu);
cpu->pstate.current_pstate = pstate;
- val = pstate << 8;
- if (limits.no_turbo)
- val |= (u64)1 << 32;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ pstate_funcs.set(pstate);
}
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -421,9 +484,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
sprintf(cpu->name, "Intel 2nd generation core");
- cpu->pstate.min_pstate = intel_pstate_min_pstate();
- cpu->pstate.max_pstate = intel_pstate_max_pstate();
- cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
/*
* goto max pstate so we don't slow up boot if we are built-in if we are
@@ -465,7 +528,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
int sample_time, delay;
- sample_time = cpu->pstate_policy->sample_rate_ms;
+ sample_time = pid_params.sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
@@ -521,14 +584,15 @@ static void intel_pstate_timer_func(unsigned long __data)
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(0x2a, default_policy),
- ICPU(0x2d, default_policy),
- ICPU(0x3a, default_policy),
- ICPU(0x3c, default_policy),
- ICPU(0x3e, default_policy),
- ICPU(0x3f, default_policy),
- ICPU(0x45, default_policy),
- ICPU(0x46, default_policy),
+ ICPU(0x2a, core_params),
+ ICPU(0x2d, core_params),
+ ICPU(0x37, byt_params),
+ ICPU(0x3a, core_params),
+ ICPU(0x3c, core_params),
+ ICPU(0x3e, core_params),
+ ICPU(0x3f, core_params),
+ ICPU(0x45, core_params),
+ ICPU(0x46, core_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -552,8 +616,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_get_cpu_pstates(cpu);
cpu->cpu = cpunum;
- cpu->pstate_policy =
- (struct pstate_adjust_policy *)id->driver_data;
+
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
cpu->timer.data =
@@ -613,9 +676,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
@@ -683,9 +744,9 @@ static int intel_pstate_msrs_not_valid(void)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- if (!intel_pstate_min_pstate() ||
- !intel_pstate_max_pstate() ||
- !intel_pstate_turbo_pstate())
+ if (!pstate_funcs.get_max() ||
+ !pstate_funcs.get_min() ||
+ !pstate_funcs.get_turbo())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
@@ -698,10 +759,96 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
+
+static void copy_pid_params(struct pstate_adjust_policy *policy)
+{
+ pid_params.sample_rate_ms = policy->sample_rate_ms;
+ pid_params.p_gain_pct = policy->p_gain_pct;
+ pid_params.i_gain_pct = policy->i_gain_pct;
+ pid_params.d_gain_pct = policy->d_gain_pct;
+ pid_params.deadband = policy->deadband;
+ pid_params.setpoint = policy->setpoint;
+}
+
+static void copy_cpu_funcs(struct pstate_funcs *funcs)
+{
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
+ pstate_funcs.set = funcs->set;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
+
+static bool intel_pstate_no_acpi_pss(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ acpi_status status;
+ union acpi_object *pss;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+
+ status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ pss = buffer.pointer;
+ if (pss && pss->type == ACPI_TYPE_PACKAGE) {
+ kfree(pss);
+ return false;
+ }
+
+ kfree(pss);
+ }
+
+ return true;
+}
+
+struct hw_vendor_info {
+ u16 valid;
+ char oem_id[ACPI_OEM_ID_SIZE];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+};
+
+/* Hardware vendor-specific info that has its own power management modes */
+static struct hw_vendor_info vendor_info[] = {
+ {1, "HP ", "ProLiant"},
+ {0, "", ""},
+};
+
+static bool intel_pstate_platform_pwr_mgmt_exists(void)
+{
+ struct acpi_table_header hdr;
+ struct hw_vendor_info *v_info;
+
+ if (acpi_disabled
+ || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ return false;
+
+ for (v_info = vendor_info; v_info->valid; v_info++) {
+ if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
+ && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
+ && intel_pstate_no_acpi_pss())
+ return true;
+ }
+
+ return false;
+}
+#else /* CONFIG_ACPI not enabled */
+static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+#endif /* CONFIG_ACPI */
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
const struct x86_cpu_id *id;
+ struct cpu_defaults *cpu_info;
if (no_load)
return -ENODEV;
@@ -710,6 +857,18 @@ static int __init intel_pstate_init(void)
if (!id)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
+ cpu_info = (struct cpu_defaults *)id->driver_data;
+
+ copy_pid_params(&cpu_info->pid_policy);
+ copy_cpu_funcs(&cpu_info->funcs);
+
if (intel_pstate_msrs_not_valid())
return -ENODEV;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index ba10658a939..0767a4e29df 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,69 +55,37 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
return kirkwood_freq_table[0].frequency;
}
-static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int index)
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
unsigned int state = kirkwood_freq_table[index].driver_data;
unsigned long reg;
- freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
- freqs.new = kirkwood_freq_table[index].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
- kirkwood_freq_table[index].frequency);
- dev_dbg(priv.dev, "old frequency was %i KHz\n",
- kirkwood_cpufreq_get_cpu_frequency(0));
-
- if (freqs.old != freqs.new) {
- local_irq_disable();
-
- /* Disable interrupts to the CPU */
- reg = readl_relaxed(priv.base);
- reg |= CPU_SW_INT_BLK;
- writel_relaxed(reg, priv.base);
-
- switch (state) {
- case STATE_CPU_FREQ:
- clk_disable(priv.powersave_clk);
- break;
- case STATE_DDR_FREQ:
- clk_enable(priv.powersave_clk);
- break;
- }
+ local_irq_disable();
- /* Wait-for-Interrupt, while the hardware changes frequency */
- cpu_do_idle();
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
- /* Enable interrupts to the CPU */
- reg = readl_relaxed(priv.base);
- reg &= ~CPU_SW_INT_BLK;
- writel_relaxed(reg, priv.base);
-
- local_irq_enable();
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_disable(priv.powersave_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_enable(priv.powersave_clk);
+ break;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
-}
-static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int index = 0;
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
- if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
- kirkwood_cpufreq_set_cpu_state(policy, index);
+ local_irq_enable();
return 0;
}
@@ -125,40 +93,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
/* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 5000; /* 5uS */
- policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
}
-static struct freq_attr *kirkwood_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver kirkwood_cpufreq_driver = {
.get = kirkwood_cpufreq_get_cpu_frequency,
- .verify = kirkwood_cpufreq_verify,
- .target = kirkwood_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
- .exit = kirkwood_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "kirkwood-cpufreq",
- .attr = kirkwood_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4ada1cccb05..45bafddfd8e 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -625,28 +625,13 @@ static void longhaul_setup_voltagescaling(void)
}
-static int longhaul_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, longhaul_table);
-}
-
-
static int longhaul_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int table_index)
{
- unsigned int table_index = 0;
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
- if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
- relation, &table_index))
- return -EINVAL;
-
- /* Don't set same frequency again */
- if (longhaul_index == table_index)
- return 0;
-
if (!can_scale_voltage)
longhaul_setstate(policy, table_index);
else {
@@ -919,36 +904,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_setup_voltagescaling();
policy->cpuinfo.transition_latency = 200000; /* nsec */
- policy->cur = calc_speed(longhaul_get_cpu_mult());
-
- ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
- if (ret)
- return ret;
-
- cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, longhaul_table);
}
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *longhaul_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver longhaul_driver = {
- .verify = longhaul_verify,
- .target = longhaul_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = longhaul_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "longhaul",
- .attr = longhaul_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 5aa031612d5..074971b1263 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
return -EINVAL;
policy->cpu = 0;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 7bc3c44d34e..a4360921810 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -53,51 +53,24 @@ static unsigned int loongson2_cpufreq_get(unsigned int cpu)
* Here we notify other drivers of the proposed change and the final change.
*/
static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
unsigned int cpu = policy->cpu;
- unsigned int newstate = 0;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
unsigned int freq;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
- if (cpufreq_frequency_table_target
- (policy, &loongson2_clockmod_table[0], target_freq, relation,
- &newstate))
- return -EINVAL;
-
freq =
((cpu_clock_freq / 1000) *
- loongson2_clockmod_table[newstate].driver_data) / 8;
- if (freq < policy->min || freq > policy->max)
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
-
- freqs.old = loongson2_cpufreq_get(cpu);
- freqs.new = freq;
- freqs.flags = 0;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ loongson2_clockmod_table[index].driver_data) / 8;
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %u kHz\n", freq);
-
return 0;
}
@@ -131,40 +104,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
- policy->cur = loongson2_cpufreq_get(policy->cpu);
-
- cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
- policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- &loongson2_clockmod_table[0]);
-}
-
-static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &loongson2_clockmod_table[0]);
+ return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
+ cpufreq_frequency_table_put_attr(policy->cpu);
clk_put(cpuclk);
return 0;
}
-static struct freq_attr *loongson2_table_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
- .verify = loongson2_cpufreq_verify,
- .target = loongson2_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = loongson2_cpufreq_target,
.get = loongson2_cpufreq_get,
.exit = loongson2_cpufreq_exit,
- .attr = loongson2_table_attr,
+ .attr = cpufreq_generic_attr,
};
static struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 6168d77b296..c4dfa42a75a 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -64,18 +64,11 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr *maple_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
static int maple_pmode_cur;
-static DEFINE_MUTEX(maple_switch_mutex);
-
static const u32 *maple_pmode_data;
static int maple_pmode_max;
@@ -135,37 +128,10 @@ static int maple_scom_query_freq(void)
* Common interface to the cpufreq core
*/
-static int maple_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
-}
-
static int maple_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
- int rc;
-
- if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- if (maple_pmode_cur == newstate)
- return 0;
-
- mutex_lock(&maple_switch_mutex);
-
- freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
- freqs.new = maple_cpu_freqs[newstate].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- rc = maple_scom_switch_freq(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- mutex_unlock(&maple_switch_mutex);
-
- return rc;
+ return maple_scom_switch_freq(index);
}
static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
@@ -175,27 +141,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cpuinfo.transition_latency = 12000;
- policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
- /* secondary CPUs are tied to the primary one by the
- * cpufreq core if in the secondary policy we tell it that
- * it actually must be one policy together with all others. */
- cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- maple_cpu_freqs);
+ return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
}
-
static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple",
.flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init,
- .verify = maple_cpufreq_verify,
- .target = maple_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = maple_cpufreq_target,
.get = maple_cpufreq_get_speed,
- .attr = maple_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init maple_cpufreq_init(void)
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index f31fcfcad51..a0acd0bfba4 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -40,13 +40,6 @@ static struct clk *mpu_clk;
static struct device *mpu_dev;
static struct regulator *mpu_reg;
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
- if (!freq_table)
- return -EINVAL;
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int omap_getspeed(unsigned int cpu)
{
unsigned long rate;
@@ -58,42 +51,17 @@ static unsigned int omap_getspeed(unsigned int cpu)
return rate;
}
-static int omap_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int i;
- int r, ret = 0;
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ int r, ret;
+ struct dev_pm_opp *opp;
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
- if (!freq_table) {
- dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
- policy->cpu);
- return -EINVAL;
- }
+ old_freq = omap_getspeed(policy->cpu);
+ new_freq = freq_table[index].frequency;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &i);
- if (ret) {
- dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
- __func__, policy->cpu, target_freq, ret);
- return ret;
- }
- freqs.new = freq_table[i].frequency;
- if (!freqs.new) {
- dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
- policy->cpu, target_freq);
- return -EINVAL;
- }
-
- freqs.old = omap_getspeed(policy->cpu);
-
- if (freqs.old == freqs.new && policy->cur == freqs.new)
- return ret;
-
- freq = freqs.new * 1000;
+ freq = new_freq * 1000;
ret = clk_round_rate(mpu_clk, freq);
if (IS_ERR_VALUE(ret)) {
dev_warn(mpu_dev,
@@ -105,143 +73,103 @@ static int omap_target(struct cpufreq_policy *policy,
if (mpu_reg) {
rcu_read_lock();
- opp = opp_find_freq_ceil(mpu_dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
- __func__, freqs.new);
+ __func__, new_freq);
return -EINVAL;
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
- freqs.new / 1000, volt ? volt / 1000 : -1);
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (mpu_reg && (freqs.new > freqs.old)) {
+ if (mpu_reg && (new_freq > old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
__func__);
- freqs.new = freqs.old;
- goto done;
+ return r;
}
}
- ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(mpu_clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
- if (mpu_reg && (freqs.new < freqs.old)) {
+ if (mpu_reg && (new_freq < old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
- ret = clk_set_rate(mpu_clk, freqs.old * 1000);
- freqs.new = freqs.old;
- goto done;
+ clk_set_rate(mpu_clk, old_freq * 1000);
+ return r;
}
}
- freqs.new = omap_getspeed(policy->cpu);
-
-done:
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return ret;
}
static inline void freq_table_free(void)
{
if (atomic_dec_and_test(&freq_table_users))
- opp_free_cpufreq_table(mpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
}
static int omap_cpu_init(struct cpufreq_policy *policy)
{
- int result = 0;
+ int result;
mpu_clk = clk_get(NULL, "cpufreq_ck");
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
- if (policy->cpu >= NR_CPUS) {
- result = -EINVAL;
- goto fail_ck;
- }
-
- policy->cur = omap_getspeed(policy->cpu);
-
- if (!freq_table)
- result = opp_init_cpufreq_table(mpu_dev, &freq_table);
-
- if (result) {
- dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+ if (!freq_table) {
+ result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+ if (result) {
+ dev_err(mpu_dev,
+ "%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
- goto fail_ck;
+ goto fail;
+ }
}
atomic_inc_return(&freq_table_users);
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (result)
- goto fail_table;
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
- policy->cur = omap_getspeed(policy->cpu);
-
- /*
- * On OMAP SMP configuartion, both processors share the voltage
- * and clock. So both CPUs needs to be scaled together and hence
- * needs software co-ordination. Use cpufreq affected_cpus
- * interface to handle this scenario. Additional is_smp() check
- * is to keep SMP_ON_UP build working.
- */
- if (is_smp())
- cpumask_setall(policy->cpus);
-
/* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- return 0;
+ result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (!result)
+ return 0;
-fail_table:
freq_table_free();
-fail_ck:
+fail:
clk_put(mpu_clk);
return result;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
+ cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
clk_put(mpu_clk);
return 0;
}
-static struct freq_attr *omap_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver omap_driver = {
.flags = CPUFREQ_STICKY,
- .verify = omap_verify_speed,
- .target = omap_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = omap_target,
.get = omap_getspeed,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
- .attr = omap_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 2f0a2a65c37..3d1cba9fd5f 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -105,47 +105,21 @@ static struct cpufreq_frequency_table p4clockmod_table[] = {
};
-static int cpufreq_p4_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = DC_RESV;
- struct cpufreq_freqs freqs;
int i;
- if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- freqs.old = cpufreq_p4_get(policy->cpu);
- freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
for_each_cpu(i, policy->cpus)
- cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data);
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
return 0;
}
-static int cpufreq_p4_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
-}
-
-
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
@@ -230,25 +204,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
- cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
- policy->cur = stock_freq;
- return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+ return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
}
-static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
u32 l, h;
@@ -267,19 +233,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
return stock_freq;
}
-static struct freq_attr *p4clockmod_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver p4clockmod_driver = {
- .verify = cpufreq_p4_verify,
- .target = cpufreq_p4_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
- .exit = cpufreq_p4_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = p4clockmod_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 534e43a60d1..0426008380d 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -28,6 +28,7 @@
#include <linux/cpufreq.h>
#include <linux/timer.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <asm/hw_irq.h>
#include <asm/io.h>
@@ -51,8 +52,6 @@
static void __iomem *sdcpwr_mapbase;
static void __iomem *sdcasr_mapbase;
-static DEFINE_MUTEX(pas_switch_mutex);
-
/* Current astate, is used when waking up from power savings on
* one core, in case the other core has switched states during
* the idle time.
@@ -69,11 +68,6 @@ static struct cpufreq_frequency_table pas_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr *pas_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/*
* hardware specific functions
*/
@@ -209,22 +203,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
}
- policy->cpuinfo.transition_latency = get_gizmo_latency();
-
cur_astate = get_cur_astate(policy->cpu);
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
- cpumask_copy(policy->cpus, cpu_online_mask);
-
ppc_proc_freq = policy->cur * 1000ul;
- cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
-
- /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
- * are set correctly
- */
- return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+ return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
out_unmap_sdcpwr:
iounmap(sdcpwr_mapbase);
@@ -253,31 +238,11 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static int pas_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pas_freqs);
-}
-
static int pas_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int pas_astate_new)
{
- struct cpufreq_freqs freqs;
- int pas_astate_new;
int i;
- cpufreq_frequency_table_target(policy,
- pas_freqs,
- target_freq,
- relation,
- &pas_astate_new);
-
- freqs.old = policy->cur;
- freqs.new = pas_freqs[pas_astate_new].frequency;
-
- mutex_lock(&pas_switch_mutex);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
pas_freqs[pas_astate_new].frequency,
@@ -288,10 +253,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&pas_switch_mutex);
-
- ppc_proc_freq = freqs.new * 1000ul;
+ ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
return 0;
}
@@ -300,9 +262,9 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit,
- .verify = pas_cpufreq_verify,
- .target = pas_cpufreq_target,
- .attr = pas_cpu_freqs_attr,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pas_cpufreq_target,
+ .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index d81c4e5ea0a..e2b4f40ff69 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info;
static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -396,15 +395,14 @@ static int __init pcc_cpufreq_probe(void)
struct pcc_memory_resource *mem_resource;
struct pcc_register_resource *reg_resource;
union acpi_object *out_obj, *member;
- acpi_handle handle, osc_handle, pcch_handle;
+ acpi_handle handle, osc_handle;
int ret = 0;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
- status = acpi_get_handle(handle, "PCCH", &pcch_handle);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "PCCH"))
return -ENODEV;
status = acpi_get_handle(handle, "_OSC", &osc_handle);
@@ -560,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
ioread32(&pcch_hdr->nominal) * 1000;
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
- policy->cur = pcc_get_freq(cpu);
-
- if (!policy->cur) {
- pr_debug("init: Unable to get current CPU frequency\n");
- result = -EINVAL;
- goto out;
- }
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index a096cd3fa23..cf55d202f33 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr* pmac_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static inline void local_delay(unsigned long ms)
{
if (no_schedule)
@@ -336,21 +331,11 @@ static int pmu_set_cpu_speed(int low_speed)
return 0;
}
-static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
- int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
{
- struct cpufreq_freqs freqs;
unsigned long l3cr;
static unsigned long prev_l3cr;
- freqs.old = cur_freq;
- freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
-
- if (freqs.old == freqs.new)
- return 0;
-
- if (notify)
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
@@ -366,8 +351,6 @@ static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
_set_L3CR(prev_l3cr);
}
- if (notify)
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
@@ -378,23 +361,12 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
return cur_freq;
}
-static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
-}
-
static int pmac_cpufreq_target( struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- unsigned int newstate = 0;
int rc;
- if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- rc = do_set_cpu_speed(policy, newstate, 1);
+ rc = do_set_cpu_speed(policy, index);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
@@ -402,14 +374,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -ENODEV;
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = cur_freq;
-
- cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+ return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
}
static u32 read_gpio(struct device_node *np)
@@ -443,7 +408,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
- do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
+ do_set_cpu_speed(policy, CPUFREQ_HIGH);
return 0;
}
@@ -460,7 +425,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
* probably high speed due to our suspend() routine
*/
do_set_cpu_speed(policy, sleep_freq == low_freq ?
- CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+ CPUFREQ_LOW : CPUFREQ_HIGH);
ppc_proc_freq = cur_freq * 1000ul;
@@ -469,14 +434,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pmac_cpufreq_driver = {
- .verify = pmac_cpufreq_verify,
- .target = pmac_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pmac_cpufreq_target,
.get = pmac_cpufreq_get_speed,
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_PM_NO_WARN,
- .attr = pmac_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 3a51ad7e47c..6a338f8c386 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr* g5_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
@@ -84,8 +79,6 @@ static void (*g5_switch_volt)(int speed_mode);
static int (*g5_switch_freq)(int speed_mode);
static int (*g5_query_freq)(void);
-static DEFINE_MUTEX(g5_switch_mutex);
-
static unsigned long transition_latency;
#ifdef CONFIG_PMAC_SMU
@@ -142,7 +135,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
pmf_call_one(pfunc_vdnap0_complete, &args);
if (done)
break;
- msleep(1);
+ usleep_range(1000, 1000);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -241,7 +234,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
if (pfunc_cpu1_volt_low)
pmf_call_one(pfunc_cpu1_volt_low, NULL);
}
- msleep(10); /* should be faster , to fix */
+ usleep_range(10000, 10000); /* should be faster , to fix */
}
/*
@@ -286,7 +279,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
pmf_call_one(pfunc_slewing_done, &args);
if (done)
break;
- msleep(1);
+ usleep_range(500, 500);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -317,37 +310,9 @@ static int g5_pfunc_query_freq(void)
* Common interface to the cpufreq core
*/
-static int g5_cpufreq_verify(struct cpufreq_policy *policy)
+static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
{
- return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
-}
-
-static int g5_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
- int rc;
-
- if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- if (g5_pmode_cur == newstate)
- return 0;
-
- mutex_lock(&g5_switch_mutex);
-
- freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
- freqs.new = g5_cpu_freqs[newstate].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- rc = g5_switch_freq(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- mutex_unlock(&g5_switch_mutex);
-
- return rc;
+ return g5_switch_freq(index);
}
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
@@ -357,27 +322,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
- /* secondary CPUs are tied to the primary one by the
- * cpufreq core if in the secondary policy we tell it that
- * it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, cpu_online_mask);
- cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- g5_cpu_freqs);
+ return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
}
-
static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac",
.flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init,
- .verify = g5_cpufreq_verify,
- .target = g5_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = g5_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
};
@@ -397,7 +352,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
/* Check supported platforms */
if (of_machine_is_compatible("PowerMac8,1") ||
of_machine_is_compatible("PowerMac8,2") ||
- of_machine_is_compatible("PowerMac9,1"))
+ of_machine_is_compatible("PowerMac9,1") ||
+ of_machine_is_compatible("PowerMac12,1"))
use_volts_smu = 1;
else if (of_machine_is_compatible("PowerMac11,2"))
use_volts_vdnap = 1;
@@ -647,8 +603,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = min_freq;
+ /* Based on a measurement on Xserve G5, rounded up. */
+ transition_latency = 10 * NSEC_PER_MSEC;
+
/* Set callbacks */
- transition_latency = CPUFREQ_ETERNAL;
g5_switch_volt = g5_pfunc_switch_volt;
g5_switch_freq = g5_pfunc_switch_freq;
g5_query_freq = g5_pfunc_query_freq;
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 85f1c8c25dd..643e7952cad 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -63,12 +63,12 @@ static int powernow_k6_get_cpu_multiplier(void)
/**
- * powernow_k6_set_state - set the PowerNow! multiplier
+ * powernow_k6_target - set the PowerNow! multiplier
* @best_i: clock_ratio[best_i] is the target multiplier
*
* Tries to change the PowerNow! multiplier
*/
-static void powernow_k6_set_state(struct cpufreq_policy *policy,
+static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
unsigned long outvalue = 0, invalue = 0;
@@ -77,7 +77,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
- return;
+ return -EINVAL;
}
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
@@ -100,44 +100,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return;
-}
-
-
-/**
- * powernow_k6_verify - verifies a new CPUfreq policy
- * @policy: new policy
- *
- * Policy must be within lowest and highest possible CPU Frequency,
- * and at least one possible state must be within min and max.
- */
-static int powernow_k6_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
-}
-
-
-/**
- * powernow_k6_setpolicy - sets a new CPUFreq policy
- * @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
- *
- * sets a new CPUFreq policy
- */
-static int powernow_k6_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- powernow_k6_set_state(policy, newstate);
-
return 0;
}
@@ -145,7 +107,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
- int result;
if (policy->cpu != 0)
return -ENODEV;
@@ -165,15 +126,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 200000;
- policy->cur = busfreq * max_multiplier;
-
- result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, clock_ratio);
}
@@ -182,7 +136,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
unsigned int i;
for (i = 0; i < 8; i++) {
if (i == max_multiplier)
- powernow_k6_set_state(policy, i);
+ powernow_k6_target(policy, i);
}
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
@@ -195,19 +149,14 @@ static unsigned int powernow_k6_get(unsigned int cpu)
return ret;
}
-static struct freq_attr *powernow_k6_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver powernow_k6_driver = {
- .verify = powernow_k6_verify,
- .target = powernow_k6_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = powernow_k6_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 14ce480be8a..946708a1d74 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@ static void change_VID(int vid)
}
-static void change_speed(struct cpufreq_policy *policy, unsigned int index)
+static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
{
u8 fid, vid;
struct cpufreq_freqs freqs;
@@ -291,6 +291,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
}
@@ -533,27 +535,6 @@ static int powernow_decode_bios(int maxfid, int startvid)
}
-static int powernow_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate;
-
- if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
- relation, &newstate))
- return -EINVAL;
-
- change_speed(policy, newstate);
-
- return 0;
-}
-
-
-static int powernow_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, powernow_table);
-}
-
/*
* We use the fact that the bus frequency is somehow
* a multiple of 100000/3 khz, then we compute sgtc according
@@ -678,11 +659,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
- policy->cur = powernow_get(0);
-
- cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
+ return cpufreq_table_validate_and_show(policy, powernow_table);
}
static int powernow_cpu_exit(struct cpufreq_policy *policy)
@@ -701,14 +678,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *powernow_table_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver powernow_driver = {
- .verify = powernow_verify,
- .target = powernow_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_target,
.get = powernow_get,
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
.bios_limit = acpi_processor_get_bios_limit,
@@ -716,7 +688,7 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = powernow_table_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 2344a9ed17f..0023c7d40a5 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -977,20 +977,17 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
struct powernowk8_target_arg {
struct cpufreq_policy *pol;
- unsigned targfreq;
- unsigned relation;
+ unsigned newstate;
};
static long powernowk8_target_fn(void *arg)
{
struct powernowk8_target_arg *pta = arg;
struct cpufreq_policy *pol = pta->pol;
- unsigned targfreq = pta->targfreq;
- unsigned relation = pta->relation;
+ unsigned newstate = pta->newstate;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
- unsigned int newstate;
int ret;
if (!data)
@@ -1004,8 +1001,9 @@ static long powernowk8_target_fn(void *arg)
return -EIO;
}
- pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
- pol->cpu, targfreq, pol->min, pol->max, relation);
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
+ pol->cpu, data->powernow_table[newstate].frequency, pol->min,
+ pol->max);
if (query_current_values_with_pending_wait(data))
return -EIO;
@@ -1021,10 +1019,6 @@ static long powernowk8_target_fn(void *arg)
checkvid, data->currvid);
}
- if (cpufreq_frequency_table_target(pol, data->powernow_table,
- targfreq, relation, &newstate))
- return -EIO;
-
mutex_lock(&fidvid_mutex);
powernow_k8_acpi_pst_values(data, newstate);
@@ -1044,26 +1038,13 @@ static long powernowk8_target_fn(void *arg)
}
/* Driver entry point to switch to the target frequency */
-static int powernowk8_target(struct cpufreq_policy *pol,
- unsigned targfreq, unsigned relation)
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
{
- struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
- .relation = relation };
+ struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
-/* Driver entry point to verify the policy and range of frequencies */
-static int powernowk8_verify(struct cpufreq_policy *pol)
-{
- struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
-
- if (!data)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(pol, data->powernow_table);
-}
-
struct init_on_cpu {
struct powernow_k8_data *data;
int rc;
@@ -1152,11 +1133,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
data->available_cores = pol->cpus;
- pol->cur = find_khz_freq_from_fid(data->currfid);
- pr_debug("policy current frequency %d kHz\n", pol->cur);
-
/* min/max the cpu is capable of */
- if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+ if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
powernow_k8_cpu_exit_acpi(data);
kfree(data->powernow_table);
@@ -1164,8 +1142,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
- cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1227,20 +1203,16 @@ out:
return khz;
}
-static struct freq_attr *powernow_k8_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cpufreq_amd64_driver = {
- .verify = powernowk8_verify,
- .target = powernowk8_target,
+ .flags = CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = powernowk8_cpu_init,
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = powernow_k8_attr,
+ .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 60e81d524ea..3f7be46d2b2 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -69,8 +69,6 @@ static const struct soc_data sdata[] = {
static u32 min_cpufreq;
static const u32 *fmask;
-/* serialize frequency changes */
-static DEFINE_MUTEX(cpufreq_lock);
static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
/* cpumask in a cluster */
@@ -202,7 +200,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
table[i].frequency = CPUFREQ_TABLE_END;
/* set the min and max frequency properly */
- ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ ret = cpufreq_table_validate_and_show(policy, table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
goto err_nomem1;
@@ -217,9 +215,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(cpu_data, i) = data;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = corenet_cpufreq_get_speed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(table, cpu);
of_node_put(np);
return 0;
@@ -253,60 +248,25 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *table =
- per_cpu(cpu_data, policy->cpu)->table;
-
- return cpufreq_frequency_table_verify(policy, table);
-}
-
static int corenet_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- unsigned int new;
struct clk *parent;
- int ret;
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
- cpufreq_frequency_table_target(policy, data->table,
- target_freq, relation, &new);
-
- if (policy->cur == data->table[new].frequency)
- return 0;
-
- freqs.old = policy->cur;
- freqs.new = data->table[new].frequency;
-
- mutex_lock(&cpufreq_lock);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- parent = of_clk_get(data->parent, data->table[new].driver_data);
- ret = clk_set_parent(data->clk, parent);
- if (ret)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cpufreq_lock);
-
- return ret;
+ parent = of_clk_get(data->parent, data->table[index].driver_data);
+ return clk_set_parent(data->clk, parent);
}
-static struct freq_attr *corenet_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit),
- .verify = corenet_cpufreq_verify,
- .target = corenet_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = corenet_cpufreq_target,
.get = corenet_cpufreq_get_speed,
- .attr = corenet_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct of_device_id node_matches[] __initdata = {
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 2e448f0bbdc..e42ca9c31ce 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -30,9 +30,6 @@
#include "ppc_cbe_cpufreq.h"
-static DEFINE_MUTEX(cbe_switch_mutex);
-
-
/* the CBE supports an 8 step frequency scaling */
static struct cpufreq_frequency_table cbe_freqs[] = {
{1, 0},
@@ -123,63 +120,28 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
- cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
/* this ensures that policy->cpuinfo_min
* and policy->cpuinfo_max are set correctly */
- return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, cbe_freqs);
+ return cpufreq_table_validate_and_show(policy, cbe_freqs);
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int cbe_pmode_new)
{
- int rc;
- struct cpufreq_freqs freqs;
- unsigned int cbe_pmode_new;
-
- cpufreq_frequency_table_target(policy,
- cbe_freqs,
- target_freq,
- relation,
- &cbe_pmode_new);
-
- freqs.old = policy->cur;
- freqs.new = cbe_freqs[cbe_pmode_new].frequency;
-
- mutex_lock(&cbe_switch_mutex);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
pr_debug("setting frequency for cpu %d to %d kHz, " \
"1/%d of max frequency\n",
policy->cpu,
cbe_freqs[cbe_pmode_new].frequency,
cbe_freqs[cbe_pmode_new].driver_data);
- rc = set_pmode(policy->cpu, cbe_pmode_new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cbe_switch_mutex);
-
- return rc;
+ return set_pmode(policy->cpu, cbe_pmode_new);
}
static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cbe_cpufreq_verify,
- .target = cbe_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 8749eaf1879..0a0f4369636 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -262,36 +262,15 @@ static u32 mdrefr_dri(unsigned int freq)
return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
-/* find a valid frequency point */
-static int pxa_verify_policy(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pxa_freqs_table;
- pxa_freqs_t *pxa_freqs;
- int ret;
-
- find_freq_tables(&pxa_freqs_table, &pxa_freqs);
- ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
-
- if (freq_debug)
- pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
- policy->min, policy->max);
-
- return ret;
-}
-
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
return get_clk_frequency_khz(0);
}
-static int pxa_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freq_settings;
- struct cpufreq_freqs freqs;
- unsigned int idx;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@@ -300,32 +279,19 @@ static int pxa_set_target(struct cpufreq_policy *policy,
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
- target_freq, relation, &idx)) {
- return -EINVAL;
- }
-
new_freq_cpu = pxa_freq_settings[idx].khz;
new_freq_mem = pxa_freq_settings[idx].membus;
- freqs.old = policy->cur;
- freqs.new = new_freq_cpu;
if (freq_debug)
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
- freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
+ new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
- if (vcc_core && freqs.new > freqs.old)
+ if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
- if (ret)
- return ret;
- /*
- * Tell everyone what we're about to do...
- * you should add a notify client with any platform specific
- * Vcc changing capability
- */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ if (ret)
+ return ret;
+ }
/* Calculate the next MDREFR. If we're slowing down the SDRAM clock
* we need to preset the smaller DRI before the change. If we're
@@ -376,13 +342,6 @@ static int pxa_set_target(struct cpufreq_policy *policy,
local_irq_restore(flags);
/*
- * Tell everyone what we've just done...
- * you should add a notify client with any platform specific
- * SDRAM refresh timer adjustments
- */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- /*
* Even if voltage setting fails, we don't report it, as the frequency
* change succeeded. The voltage reduction is not a critical failure,
* only power savings will suffer from this.
@@ -391,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
* bug is triggered (seems a deadlock). Should anybody find out where,
* the "return 0" should become a "return ret".
*/
- if (vcc_core && freqs.new < freqs.old)
+ if (vcc_core && new_freq_cpu < policy->cur)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
return 0;
@@ -414,8 +373,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
- policy->cur = get_clk_frequency_khz(0); /* current freq */
- policy->min = policy->max = policy->cur;
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
@@ -453,10 +410,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("PXA255 cpufreq using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
- cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+
+ cpufreq_table_validate_and_show(policy, pxa255_freq_table);
+ }
+ else if (cpu_is_pxa27x()) {
+ cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
- else if (cpu_is_pxa27x())
- cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
printk(KERN_INFO "PXA CPU frequency change support initialized\n");
@@ -464,9 +423,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa_cpufreq_driver = {
- .verify = pxa_verify_policy,
- .target = pxa_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa_set_target,
.init = pxa_cpufreq_init,
+ .exit = cpufreq_generic_exit,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index d26306fb00d..93840048dd1 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
pxa3xx_freqs_num = num;
pxa3xx_freqs_table = table;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static void __update_core_freq(struct pxa3xx_freq_info *info)
@@ -150,54 +150,26 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info)
cpu_relax();
}
-static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
-}
-
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
{
return pxa3xx_get_clk_frequency_khz(0);
}
-static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
{
struct pxa3xx_freq_info *next;
- struct cpufreq_freqs freqs;
unsigned long flags;
- int idx;
if (policy->cpu != 0)
return -EINVAL;
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
- target_freq, relation, &idx))
- return -EINVAL;
-
- next = &pxa3xx_freqs[idx];
-
- freqs.old = policy->cur;
- freqs.new = next->cpufreq_mhz * 1000;
-
- pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
- freqs.old / 1000, freqs.new / 1000,
- (freqs.old == freqs.new) ? " (skipped)" : "");
-
- if (freqs.old == target_freq)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ next = &pxa3xx_freqs[index];
local_irq_save(flags);
__update_core_freq(next);
__update_bus_freq(next);
local_irq_restore(flags);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return 0;
}
@@ -206,11 +178,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
int ret = -EINVAL;
/* set default policy and cpuinfo */
- policy->cpuinfo.min_freq = 104000;
- policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+ policy->min = policy->cpuinfo.min_freq = 104000;
+ policy->max = policy->cpuinfo.max_freq =
+ (cpu_is_pxa320()) ? 806000 : 624000;
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
- policy->max = pxa3xx_get_clk_frequency_khz(0);
- policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, pxa300_freqs,
@@ -230,9 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
- .verify = pxa3xx_cpufreq_verify,
- .target = pxa3xx_cpufreq_set,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
+ .exit = cpufreq_generic_exit,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 22dcb81ef9d..8d904a00027 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = {
{ 0, CPUFREQ_TABLE_END },
};
-static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
-}
-
static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
@@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
}
static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_freqs freqs;
+ unsigned int new_freq;
int idx, ret, to_dvs = 0;
- unsigned int i;
mutex_lock(&cpufreq_lock);
- pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation);
-
- ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- goto out;
-
- idx = s3c_freq->freq_table[i].driver_data;
+ idx = s3c_freq->freq_table[index].driver_data;
if (idx == SOURCE_HCLK)
to_dvs = 1;
@@ -256,24 +237,13 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
goto out;
}
- freqs.flags = 0;
- freqs.old = s3c_freq->is_dvs ? FREQ_DVS
- : clk_get_rate(s3c_freq->armclk) / 1000;
-
/* When leavin dvs mode, always switch the armdiv to the hclk rate
* The S3C2416 has stability issues when switching directly to
* higher frequencies.
*/
- freqs.new = (s3c_freq->is_dvs && !to_dvs)
+ new_freq = (s3c_freq->is_dvs && !to_dvs)
? clk_get_rate(s3c_freq->hclk) / 1000
- : s3c_freq->freq_table[i].frequency;
-
- pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- if (!to_dvs && freqs.old == freqs.new)
- goto out;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ : s3c_freq->freq_table[index].frequency;
if (to_dvs) {
pr_debug("cpufreq: enter dvs\n");
@@ -282,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
pr_debug("cpufreq: leave dvs\n");
ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
} else {
- pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
+ pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
out:
mutex_unlock(&cpufreq_lock);
@@ -486,20 +454,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
- policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
-
/* Datasheet says PLL stabalisation time must be at least 300us,
* so but add some fudge. (reference in LOCKCON0 register description)
*/
- policy->cpuinfo.transition_latency = (500 * 1000) +
- s3c_freq->regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
+ ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+ (500 * 1000) + s3c_freq->regulator_latency);
if (ret)
goto err_freq_table;
- cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
-
register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
return 0;
@@ -518,19 +480,14 @@ err_hclk:
return ret;
}
-static struct freq_attr *s3c2416_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver s3c2416_cpufreq_driver = {
.flags = 0,
- .verify = s3c2416_cpufreq_verify_speed,
- .target = s3c2416_cpufreq_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c2416_cpufreq_set_target,
.get = s3c2416_cpufreq_get_speed,
.init = s3c2416_cpufreq_driver_init,
.name = "s3c2416",
- .attr = s3c2416_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init s3c2416_cpufreq_init(void)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index b0f343fcb7e..48508825335 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
- printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- policy->cur = s3c_cpufreq_get(0);
- policy->min = policy->cpuinfo.min_freq = 0;
- policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /* feed the latency information from the cpu driver */
- policy->cpuinfo.transition_latency = cpu_cur.info->latency;
-
- if (ftab)
- cpufreq_frequency_table_cpuinfo(policy, ftab);
-
- return 0;
+ return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
}
static int __init s3c_cpufreq_initclks(void)
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void)
return 0;
}
-static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return 0;
-}
-
#ifdef CONFIG_PM
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c24xx_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s3c_cpufreq_verify,
.target = s3c_cpufreq_target,
.get = s3c_cpufreq_get,
.init = s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 15631f92ab7..67e302eeefe 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
};
#endif
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
@@ -71,66 +63,48 @@ static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
}
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- int ret;
- unsigned int i;
- struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq, new_freq;
+ int ret;
- ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- return ret;
-
- freqs.old = clk_get_rate(armclk) / 1000;
- freqs.new = s3c64xx_freq_table[i].frequency;
- freqs.flags = 0;
- dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data];
-
- if (freqs.old == freqs.new)
- return 0;
-
- pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq = clk_get_rate(armclk) / 1000;
+ new_freq = s3c64xx_freq_table[index].frequency;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new > freqs.old) {
+ if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- freqs.new = freqs.old;
- goto post_notify;
+ new_freq, ret);
+ return ret;
}
}
#endif
- ret = clk_set_rate(armclk, freqs.new * 1000);
+ ret = clk_set_rate(armclk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
- freqs.new, ret);
- freqs.new = freqs.old;
+ new_freq, ret);
+ return ret;
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- if (ret)
- goto err;
-
#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new < freqs.old) {
+ if (vddarm && new_freq < old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- goto err_clk;
+ new_freq, ret);
+ if (clk_set_rate(armclk, old_freq * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+
+ return ret;
}
}
#endif
@@ -139,14 +113,6 @@ post_notify:
clk_get_rate(armclk) / 1000);
return 0;
-
-err_clk:
- if (clk_set_rate(armclk, freqs.old * 1000) < 0)
- pr_err("Failed to restore original clock rate\n");
-err:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
}
#ifdef CONFIG_REGULATOR
@@ -243,15 +209,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
- policy->cur = clk_get_rate(armclk) / 1000;
-
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
- policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+ (500 * 1000) + regulator_latency);
if (ret != 0) {
pr_err("Failed to configure frequency table: %d\n",
ret);
@@ -264,8 +227,8 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.flags = 0,
- .verify = s3c64xx_cpufreq_verify_speed,
- .target = s3c64xx_cpufreq_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5c775707379..e3973dae28a 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -26,7 +26,6 @@
static struct clk *cpu_clk;
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
-static struct cpufreq_freqs freqs;
static DEFINE_MUTEX(set_freq_lock);
/* APLL M,P,S values for 1G/800Mhz */
@@ -36,16 +35,7 @@ static DEFINE_MUTEX(set_freq_lock);
/* Use 800MHz when entering sleep mode */
#define SLEEP_FREQ (800 * 1000)
-/*
- * relation has an additional symantics other than the standard of cpufreq
- * DISALBE_FURTHER_CPUFREQ: disable further access to target
- * ENABLE_FURTUER_CPUFREQ: enable access to target
- */
-enum cpufreq_access {
- DISABLE_FURTHER_CPUFREQ = 0x10,
- ENABLE_FURTHER_CPUFREQ = 0x20,
-};
-
+/* Tracks if cpu freqency can be updated anymore */
static bool no_cpufreq_access;
/*
@@ -174,14 +164,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
__raw_writel(tmp1, reg);
}
-static int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
static unsigned int s5pv210_getspeed(unsigned int cpu)
{
if (cpu)
@@ -190,22 +172,18 @@ static unsigned int s5pv210_getspeed(unsigned int cpu)
return clk_get_rate(cpu_clk) / 1000;
}
-static int s5pv210_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long reg;
- unsigned int index, priv_index;
+ unsigned int priv_index;
unsigned int pll_changing = 0;
unsigned int bus_speed_changing = 0;
+ unsigned int old_freq, new_freq;
int arm_volt, int_volt;
int ret = 0;
mutex_lock(&set_freq_lock);
- if (relation & ENABLE_FURTHER_CPUFREQ)
- no_cpufreq_access = false;
-
if (no_cpufreq_access) {
#ifdef CONFIG_PM_VERBOSE
pr_err("%s:%d denied access to %s as it is disabled"
@@ -215,27 +193,13 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- if (relation & DISABLE_FURTHER_CPUFREQ)
- no_cpufreq_access = true;
-
- relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
-
- freqs.old = s5pv210_getspeed(0);
-
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
- goto exit;
- }
-
- freqs.new = s5pv210_freq_table[index].frequency;
-
- if (freqs.new == freqs.old)
- goto exit;
+ old_freq = s5pv210_getspeed(0);
+ new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- freqs.old, relation, &priv_index)) {
+ old_freq, CPUFREQ_RELATION_H,
+ &priv_index)) {
ret = -EINVAL;
goto exit;
}
@@ -243,7 +207,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
- if (freqs.new > freqs.old) {
+ if (new_freq > old_freq) {
ret = regulator_set_voltage(arm_regulator,
arm_volt, arm_volt_max);
if (ret)
@@ -255,8 +219,6 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* Check if there need to change PLL */
if ((index == L0) || (priv_index == L0))
pll_changing = 1;
@@ -467,9 +429,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- if (freqs.new < freqs.old) {
+ if (new_freq < old_freq) {
regulator_set_voltage(int_regulator,
int_volt, int_volt_max);
@@ -551,13 +511,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
- policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
- cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
- policy->cpuinfo.transition_latency = 40000;
-
- return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+ return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
out_dmc1:
clk_put(dmc0_clk);
@@ -573,16 +527,18 @@ static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
switch (event) {
case PM_SUSPEND_PREPARE:
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- DISABLE_FURTHER_CPUFREQ);
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
+ /* Disable updation of cpu frequency */
+ no_cpufreq_access = true;
return NOTIFY_OK;
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
- cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- ENABLE_FURTHER_CPUFREQ);
+ /* Enable updation of cpu frequency */
+ no_cpufreq_access = false;
+ cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
return NOTIFY_OK;
}
@@ -595,18 +551,18 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
{
int ret;
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- DISABLE_FURTHER_CPUFREQ);
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
+ no_cpufreq_access = true;
return NOTIFY_DONE;
}
static struct cpufreq_driver s5pv210_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s5pv210_verify_speed,
- .target = s5pv210_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s5pv210_target,
.get = s5pv210_getspeed,
.init = s5pv210_cpu_init,
.name = "s5pv210",
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index cff18e87ca5..623da742f8e 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -177,60 +177,33 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed)
}
}
-static int sa1100_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
unsigned int cur = sa11x0_getspeed(0);
- unsigned int new_ppcr;
- struct cpufreq_freqs freqs;
-
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
- switch (relation) {
- case CPUFREQ_RELATION_L:
- if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
- new_ppcr--;
- break;
- case CPUFREQ_RELATION_H:
- if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
- (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
- new_ppcr--;
- break;
- }
-
- freqs.old = cur;
- freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
+ unsigned int new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ new_freq = sa11x0_freq_table[ppcr].frequency;
- if (freqs.new > cur)
- sa1100_update_dram_timings(cur, freqs.new);
+ if (new_freq > cur)
+ sa1100_update_dram_timings(cur, new_freq);
- PPCR = new_ppcr;
+ PPCR = ppcr;
- if (freqs.new < cur)
- sa1100_update_dram_timings(cur, freqs.new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ if (new_freq < cur)
+ sa1100_update_dram_timings(cur, new_freq);
return 0;
}
static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -EINVAL;
- policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
- policy->cpuinfo.min_freq = 59000;
- policy->cpuinfo.max_freq = 287000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- return 0;
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
static struct cpufreq_driver sa1100_driver __refdata = {
.flags = CPUFREQ_STICKY,
- .verify = sa11x0_verify_speed,
- .target = sa1100_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1100_target,
.get = sa11x0_getspeed,
.init = sa1100_cpu_init,
.name = "sa1100",
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 39c90b6f428..2c2b2e601d1 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -229,36 +229,14 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
/*
* Ok, set the CPU frequency.
*/
-static int sa1110_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
struct sdram_params *sdram = &sdram_params;
- struct cpufreq_freqs freqs;
struct sdram_info sd;
unsigned long flags;
- unsigned int ppcr, unused;
-
- switch (relation) {
- case CPUFREQ_RELATION_L:
- ppcr = sa11x0_freq_to_ppcr(target_freq);
- if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
- ppcr--;
- break;
- case CPUFREQ_RELATION_H:
- ppcr = sa11x0_freq_to_ppcr(target_freq);
- if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
- (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
- ppcr--;
- break;
- default:
- return -EINVAL;
- }
-
- freqs.old = sa11x0_getspeed(0);
- freqs.new = sa11x0_ppcr_to_freq(ppcr);
+ unsigned int unused;
- sdram_calculate_timing(&sd, freqs.new, sdram);
+ sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
#if 0
/*
@@ -277,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy,
sd.mdcas[2] = 0xaaaaaaaa;
#endif
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/*
* The clock could be going away for some time. Set the SDRAMs
* to refresh rapidly (every 64 memory clock cycles). To get
@@ -323,30 +299,22 @@ static int sa1110_target(struct cpufreq_policy *policy,
/*
* Now, return the SDRAM refresh back to normal.
*/
- sdram_update_refresh(freqs.new, sdram);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
return 0;
}
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -EINVAL;
- policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
- policy->cpuinfo.min_freq = 59000;
- policy->cpuinfo.max_freq = 287000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- return 0;
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
.flags = CPUFREQ_STICKY,
- .verify = sa11x0_verify_speed,
- .target = sa1110_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1110_target,
.get = sa11x0_getspeed,
.init = sa1110_cpu_init,
.name = "sa1110",
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index d6f6c6f4efa..6adb354e359 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,21 +53,11 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
}
}
-static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
u8 clockspeed_reg;
- freqs.old = sc520_freq_get_cpu_frequency(0);
- freqs.new = sc520_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- pr_debug("attempting to set frequency to %i kHz\n",
- sc520_freq_table[state].frequency);
-
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
@@ -75,30 +65,9 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int sc520_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
-}
-
-static int sc520_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, sc520_freq_table,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- sc520_freq_set_cpu_state(policy, newstate);
-
return 0;
}
-
/*
* Module init and exit code
*/
@@ -106,7 +75,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
- int result;
/* capability check */
if (c->x86_vendor != X86_VENDOR_AMD ||
@@ -115,39 +83,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = sc520_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, sc520_freq_table);
}
-static struct freq_attr *sc520_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
- .verify = sc520_freq_verify,
- .target = sc520_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
- .exit = sc520_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "sc520_freq",
- .attr = sc520_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index ffc6d24b0cf..387af12503a 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(cpuclk);
}
- policy->cur = sh_cpufreq_get(cpu);
-
freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
if (freq_table) {
int result;
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!result)
- cpufreq_frequency_table_get_attr(freq_table, cpu);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
+ if (result)
+ return result;
} else {
dev_notice(dev, "no frequency table found, falling back "
"to rate rounding.\n");
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *sh_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.get = sh_cpufreq_get,
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = sh_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index cf5bc2ca16f..62aa23e219d 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -245,14 +245,12 @@ static unsigned int us2e_freq_get(unsigned int cpu)
return clock_tick / estar_to_divisor(estar);
}
-static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
- unsigned int index)
+static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -266,41 +264,15 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
old_divisor = estar_to_divisor(estar);
- freqs.old = clock_tick / old_divisor;
- freqs.new = new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
if (old_divisor != divisor)
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us2e_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int new_index = 0;
-
- if (cpufreq_frequency_table_target(policy,
- &us2e_freq_table[policy->cpu].table[0],
- target_freq, relation, &new_index))
- return -EINVAL;
-
- us2e_set_cpu_divider_index(policy, new_index);
return 0;
}
-static int us2e_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &us2e_freq_table[policy->cpu].table[0]);
-}
-
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -324,13 +296,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us2e_driver)
- us2e_set_cpu_divider_index(policy, 0);
+ if (cpufreq_us2e_driver) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ us2e_freq_target(policy, 0);
+ }
return 0;
}
@@ -361,8 +335,8 @@ static int __init us2e_freq_init(void)
goto err_out;
driver->init = us2e_freq_cpu_init;
- driver->verify = us2e_freq_verify;
- driver->target = us2e_freq_target;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-IIe");
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index ac76b489979..724ffbd7105 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -93,13 +93,11 @@ static unsigned int us3_freq_get(unsigned int cpu)
return ret;
}
-static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
- unsigned int index)
+static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq, reg;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -125,43 +123,15 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
reg = read_safari_cfg();
- freqs.old = get_current_freq(cpu, reg);
- freqs.new = new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
reg &= ~SAFARI_CFG_DIV_MASK;
reg |= new_bits;
write_safari_cfg(reg);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us3_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int new_index = 0;
-
- if (cpufreq_frequency_table_target(policy,
- &us3_freq_table[policy->cpu].table[0],
- target_freq,
- relation,
- &new_index))
- return -EINVAL;
-
- us3_set_cpu_divider_index(policy, new_index);
return 0;
}
-static int us3_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &us3_freq_table[policy->cpu].table[0]);
-}
-
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -181,13 +151,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us3_driver)
- us3_set_cpu_divider_index(policy, 0);
+ if (cpufreq_us3_driver) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ us3_freq_target(policy, 0);
+ }
return 0;
}
@@ -222,8 +194,8 @@ static int __init us3_freq_init(void)
goto err_out;
driver->init = us3_freq_cpu_init;
- driver->verify = us3_freq_verify;
- driver->target = us3_freq_target;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us3_freq_target;
driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-III");
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 3f418166ce0..d02ccd19c9c 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,11 +30,6 @@ static struct {
u32 cnt;
} spear_cpufreq;
-static int spear_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
-}
-
static unsigned int spear_cpufreq_get(unsigned int cpu)
{
return clk_get_rate(spear_cpufreq.clk) / 1000;
@@ -110,20 +105,14 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
}
static int spear_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
long newfreq;
struct clk *srcclk;
- int index, ret, mult = 1;
-
- if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
- target_freq, relation, &index))
- return -EINVAL;
-
- freqs.old = spear_cpufreq_get(0);
+ int ret, mult = 1;
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+
if (of_machine_is_compatible("st,spear1340")) {
/*
* SPEAr1340 is special in the sense that due to the possibility
@@ -154,65 +143,32 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
return newfreq;
}
- freqs.new = newfreq / 1000;
- freqs.new /= mult;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
else
ret = clk_set_rate(spear_cpufreq.clk, newfreq);
- /* Get current rate after clk_set_rate, in case of failure */
- if (ret) {
+ if (ret)
pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
- freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
- }
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
- if (ret) {
- pr_err("cpufreq_frequency_table_cpuinfo() failed");
- return ret;
- }
-
- cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
- policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
- policy->cur = spear_cpufreq_get(0);
-
- cpumask_setall(policy->cpus);
-
- return 0;
-}
-
-static int spear_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+ spear_cpufreq.transition_latency);
}
-static struct freq_attr *spear_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
.flags = CPUFREQ_STICKY,
- .verify = spear_cpufreq_verify,
- .target = spear_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = spear_cpufreq_target,
.get = spear_cpufreq_get,
.init = spear_cpufreq_init,
- .exit = spear_cpufreq_exit,
- .attr = spear_cpufreq_attr,
+ .exit = cpufreq_generic_exit,
+ .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_driver_init(void)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index f897d510584..4e1daca5ce3 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
- unsigned freq;
unsigned l, h;
- int ret;
int i;
/* Only Intel makes Enhanced Speedstep-capable CPUs */
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (centrino_cpu_init_table(policy)) {
+ if (centrino_cpu_init_table(policy))
return -ENODEV;
- }
/* Check to see if Enhanced SpeedStep is enabled, and try to
enable it if not. */
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
}
}
- freq = get_cur_freq(policy->cpu);
policy->cpuinfo.transition_latency = 10000;
/* 10uS transition latency */
- policy->cur = freq;
-
- pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
- ret = cpufreq_frequency_table_cpuinfo(policy,
+ return cpufreq_table_validate_and_show(policy,
per_cpu(centrino_model, policy->cpu)->op_points);
- if (ret)
- return (ret);
-
- cpufreq_frequency_table_get_attr(
- per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
-
- return 0;
}
static int centrino_cpu_exit(struct cpufreq_policy *policy)
@@ -428,36 +414,18 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
}
/**
- * centrino_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within this model's frequency range at least one
- * border included.
- */
-static int centrino_verify (struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- per_cpu(centrino_model, policy->cpu)->op_points);
-}
-
-/**
* centrino_setpolicy - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
-static int centrino_target (struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
- struct cpufreq_freqs freqs;
int retval = 0;
- unsigned int j, first_cpu, tmp;
+ unsigned int j, first_cpu;
+ struct cpufreq_frequency_table *op_points;
cpumask_var_t covered_cpus;
if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -468,16 +436,8 @@ static int centrino_target (struct cpufreq_policy *policy,
goto out;
}
- if (unlikely(cpufreq_frequency_table_target(policy,
- per_cpu(centrino_model, cpu)->op_points,
- target_freq,
- relation,
- &newstate))) {
- retval = -EINVAL;
- goto out;
- }
-
first_cpu = 1;
+ op_points = &per_cpu(centrino_model, cpu)->op_points[index];
for_each_cpu(j, policy->cpus) {
int good_cpu;
@@ -501,7 +461,7 @@ static int centrino_target (struct cpufreq_policy *policy,
break;
}
- msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data;
+ msr = op_points->driver_data;
if (first_cpu) {
rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
@@ -512,15 +472,6 @@ static int centrino_target (struct cpufreq_policy *policy,
goto out;
}
- freqs.old = extract_clock(oldmsr, cpu, 0);
- freqs.new = extract_clock(msr, cpu, 0);
-
- pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
- target_freq, freqs.old, freqs.new, msr);
-
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_PRECHANGE);
-
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
@@ -535,8 +486,6 @@ static int centrino_target (struct cpufreq_policy *policy,
cpumask_set_cpu(j, covered_cpus);
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
@@ -547,12 +496,6 @@ static int centrino_target (struct cpufreq_policy *policy,
for_each_cpu(j, covered_cpus)
wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
-
- tmp = freqs.new;
- freqs.new = freqs.old;
- freqs.old = tmp;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
retval = 0;
@@ -561,20 +504,15 @@ out:
return retval;
}
-static struct freq_attr* centrino_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver centrino_driver = {
.name = "centrino", /* should be speedstep-centrino,
but there's a 16 char limit */
.init = centrino_cpu_init,
.exit = centrino_cpu_exit,
- .verify = centrino_verify,
- .target = centrino_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = centrino_target,
.get = get_cur_freq,
- .attr = centrino_attr,
+ .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 5355abb69af..7639b2be2a9 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -251,56 +251,23 @@ static unsigned int speedstep_get(unsigned int cpu)
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
-static int speedstep_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0, policy_cpu;
- struct cpufreq_freqs freqs;
-
- if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
- target_freq, relation, &newstate))
- return -EINVAL;
+ unsigned int policy_cpu;
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
- freqs.old = speedstep_get(policy_cpu);
- freqs.new = speedstep_freqs[newstate].frequency;
-
- pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
-
- /* no transition necessary */
- if (freqs.old == freqs.new)
- return 0;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
+ smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
true);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return 0;
}
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
struct get_freqs {
struct cpufreq_policy *policy;
int ret;
@@ -320,8 +287,7 @@ static void get_freqs_on_cpu(void *_get_freqs)
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
- int result;
- unsigned int policy_cpu, speed;
+ unsigned int policy_cpu;
struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */
@@ -336,49 +302,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
if (gf.ret)
return gf.ret;
- /* get current speed setting */
- speed = speedstep_get(policy_cpu);
- if (!speed)
- return -EIO;
-
- pr_debug("currently at %s speed setting - %i MHz\n",
- (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
- ? "low" : "high",
- (speed / 1000));
-
- /* cpuinfo and default policy values */
- policy->cur = speed;
-
- result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
- return 0;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *speedstep_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-ich",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = speedstep_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = speedstep_get,
- .attr = speedstep_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index abfba4f731e..0f5326d6f79 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -235,52 +235,21 @@ static void speedstep_set_state(unsigned int state)
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: new freq
- * @relation:
+ * @index: index of new freq
*
* Sets a new CPUFreq policy/freq.
*/
-static int speedstep_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
-
- if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
- freqs.new = speedstep_freqs[newstate].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- speedstep_set_state(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ speedstep_set_state(index);
return 0;
}
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
-
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
- unsigned int speed, state;
unsigned int *low, *high;
/* capability check */
@@ -316,32 +285,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
pr_debug("workaround worked.\n");
}
- /* get current speed setting */
- state = speedstep_get_state();
- speed = speedstep_freqs[state].frequency;
-
- pr_debug("currently at %s speed setting - %i MHz\n",
- (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
- ? "low" : "high",
- (speed / 1000));
-
- /* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = speed;
-
- result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
- return 0;
-}
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
static unsigned int speedstep_get(unsigned int cpu)
@@ -362,20 +307,15 @@ static int speedstep_resume(struct cpufreq_policy *policy)
return result;
}
-static struct freq_attr *speedstep_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = speedstep_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = speedstep_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index a7b876fdc1d..f42df7ec03c 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS];
static DEFINE_MUTEX(tegra_cpu_lock);
static bool is_suspended;
-static int tegra_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int tegra_getspeed(unsigned int cpu)
{
unsigned long rate;
@@ -107,12 +102,8 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
unsigned long rate)
{
int ret = 0;
- struct cpufreq_freqs freqs;
- freqs.old = tegra_getspeed(0);
- freqs.new = rate;
-
- if (freqs.old == freqs.new)
+ if (tegra_getspeed(0) == rate)
return ret;
/*
@@ -126,21 +117,10 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
else
clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-#ifdef CONFIG_CPU_FREQ_DEBUG
- printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
- freqs.old, freqs.new);
-#endif
-
- ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
- if (ret) {
- pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
- freqs.new);
- freqs.new = freqs.old;
- }
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ ret = tegra_cpu_clk_set_rate(rate * 1000);
+ if (ret)
+ pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n",
+ rate);
return ret;
}
@@ -155,11 +135,8 @@ static unsigned long tegra_cpu_highest_speed(void)
return rate;
}
-static int tegra_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int idx;
unsigned int freq;
int ret = 0;
@@ -170,10 +147,7 @@ static int tegra_target(struct cpufreq_policy *policy,
goto out;
}
- cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &idx);
-
- freq = freq_table[idx].frequency;
+ freq = freq_table[index].frequency;
target_cpu_speed[policy->cpu] = freq;
@@ -209,21 +183,23 @@ static struct notifier_block tegra_cpu_pm_notifier = {
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
+ int ret;
+
if (policy->cpu >= NUM_CPUS)
return -EINVAL;
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- policy->cur = tegra_getspeed(policy->cpu);
- target_cpu_speed[policy->cpu] = policy->cur;
+ target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
/* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- cpumask_copy(policy->cpus, cpu_possible_mask);
+ ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (ret) {
+ clk_disable_unprepare(cpu_clk);
+ clk_disable_unprepare(emc_clk);
+ return ret;
+ }
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
@@ -233,24 +209,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return 0;
}
-static struct freq_attr *tegra_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver tegra_cpufreq_driver = {
- .verify = tegra_verify_speed,
- .target = tegra_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = tegra_target,
.get = tegra_getspeed,
.init = tegra_cpu_init,
.exit = tegra_cpu_exit,
.name = "tegra",
- .attr = tegra_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index b225f04d8ae..653ae2955b5 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
- policy->cur = ucv2_getspeed(0);
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
new file mode 100644
index 00000000000..7f7c9c01b44
--- /dev/null
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -0,0 +1,70 @@
+/*
+ * Versatile Express SPC CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver.
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+static int ve_spc_init_opp_table(struct device *cpu_dev)
+{
+ /*
+ * platform specific SPC code must initialise the opp table
+ * so just check if the OPP count is non-zero
+ */
+ return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+}
+
+static int ve_spc_get_transition_latency(struct device *cpu_dev)
+{
+ return 1000000; /* 1 ms */
+}
+
+static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+ .name = "vexpress-spc",
+ .get_transition_latency = ve_spc_get_transition_latency,
+ .init_opp_table = ve_spc_init_opp_table,
+};
+
+static int ve_spc_cpufreq_probe(struct platform_device *pdev)
+{
+ return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+}
+
+static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+{
+ bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ return 0;
+}
+
+static struct platform_driver ve_spc_cpufreq_platdrv = {
+ .driver = {
+ .name = "vexpress-spc-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = ve_spc_cpufreq_probe,
+ .remove = ve_spc_cpufreq_remove,
+};
+module_platform_driver(ve_spc_cpufreq_platdrv);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 8e366032230..d988948a89a 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -2,9 +2,20 @@
# ARM CPU Idle drivers
#
+config ARM_BIG_LITTLE_CPUIDLE
+ bool "Support for ARM big.LITTLE processors"
+ depends on ARCH_VEXPRESS_TC2_PM
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this option to enable CPU idle driver for big.LITTLE based
+ ARM systems. Driver manages CPUs coordination through MCPM and
+ define different C-states for little and big cores through the
+ multiple CPU idle drivers infrastructure.
+
config ARM_HIGHBANK_CPUIDLE
bool "CPU Idle Driver for Calxeda processors"
- depends on ARCH_HIGHBANK
+ depends on ARM_PSCI
select ARM_CPU_SUSPEND
help
Select this to enable cpuidle on Calxeda processors.
@@ -27,13 +38,9 @@ config ARM_U8500_CPUIDLE
help
Select this to enable cpuidle for ST-E u8500 processors
-config CPU_IDLE_BIG_LITTLE
- bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM
- select ARM_CPU_SUSPEND
- select CPU_IDLE_MULTIPLE_DRIVERS
+config ARM_AT91_CPUIDLE
+ bool "Cpu Idle Driver for the AT91 processors"
+ default y
+ depends on ARCH_AT91
help
- Select this option to enable CPU idle driver for big.LITTLE based
- ARM systems. Driver manages CPUs coordination through MCPM and
- define different C-states for little and big cores through the
- multiple CPU idle drivers infrastructure.
+ Select this to enable cpuidle for AT91 processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cea5ef58876..527be28e5c1 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,8 +7,9 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
-obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o
+obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index f8a86364c6b..e952936418d 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -147,7 +147,7 @@ static cpumask_t cpuidle_coupled_poked;
* has returned from this function, the barrier is immediately available for
* reuse.
*
- * The atomic variable a must be initialized to 0 before any cpu calls
+ * The atomic variable must be initialized to 0 before any cpu calls
* this function, will be reset to 0 before any cpu returns from this function.
*
* Must only be called from within a coupled idle state handler
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c
new file mode 100644
index 00000000000..a0774370c6b
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -0,0 +1,69 @@
+/*
+ * based on arch/arm/mach-kirkwood/cpuidle.c
+ *
+ * CPU idle support for AT91 SoC
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The cpu idle uses wait-for-interrupt and RAM self refresh in order
+ * to implement two idle states -
+ * #1 wait-for-interrupt
+ * #2 wait-for-interrupt and RAM self refresh
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
+
+#define AT91_MAX_STATES 2
+
+static void (*at91_standby)(void);
+
+/* Actual code that puts the SoC in different idle states */
+static int at91_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ at91_standby();
+ return index;
+}
+
+static struct cpuidle_driver at91_idle_driver = {
+ .name = "at91_idle",
+ .owner = THIS_MODULE,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = at91_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 10000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "RAM_SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = AT91_MAX_STATES,
+};
+
+/* Initialize CPU idle by registering the idle states */
+static int at91_cpuidle_probe(struct platform_device *dev)
+{
+ at91_standby = (void *)(dev->dev.platform_data);
+
+ return cpuidle_register(&at91_idle_driver, NULL);
+}
+
+static struct platform_driver at91_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-at91",
+ .owner = THIS_MODULE,
+ },
+ .probe = at91_cpuidle_probe,
+};
+
+module_platform_driver(at91_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 34605847957..36795639df0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -21,53 +21,30 @@
*/
#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/suspend.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
-#include <asm/smp_scu.h>
#include <asm/suspend.h>
-#include <asm/cacheflush.h>
-#include <asm/cp15.h>
-
-extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
-extern void __iomem *scu_base_addr;
-
-static noinline void calxeda_idle_restore(void)
-{
- set_cr(get_cr() | CR_C);
- set_auxcr(get_auxcr() | 0x40);
- scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
-}
+#include <asm/psci.h>
static int calxeda_idle_finish(unsigned long val)
{
- /* Already flushed cache, but do it again as the outer cache functions
- * dirty the cache with spinlocks */
- flush_cache_all();
-
- set_auxcr(get_auxcr() & ~0x40);
- set_cr(get_cr() & ~CR_C);
-
- scu_power_mode(scu_base_addr, SCU_PM_DORMANT);
-
- cpu_do_idle();
-
- /* Restore things if we didn't enter power-gating */
- calxeda_idle_restore();
- return 1;
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+ return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
}
static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- highbank_set_cpu_jump(smp_processor_id(), cpu_resume);
+ cpu_pm_enter();
cpu_suspend(0, calxeda_idle_finish);
+ cpu_pm_exit();
+
return index;
}
@@ -88,11 +65,17 @@ static struct cpuidle_driver calxeda_idle_driver = {
.state_count = 2,
};
-static int __init calxeda_cpuidle_init(void)
+static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
{
- if (!of_machine_is_compatible("calxeda,highbank"))
- return -ENODEV;
-
return cpuidle_register(&calxeda_idle_driver, NULL);
}
-module_init(calxeda_cpuidle_init);
+
+static struct platform_driver calxeda_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-calxeda",
+ .owner = THIS_MODULE,
+ },
+ .probe = calxeda_cpuidle_probe,
+};
+
+module_platform_driver(calxeda_cpuidle_plat_driver);
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index e0564652af3..5e35804b1a9 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
.state_count = 2,
};
-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
+static int dbx500_cpuidle_probe(struct platform_device *pdev)
{
/* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 38e03a18359..aded7592802 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/cpu_pm.h>
#include <linux/cpuidle.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
@@ -70,14 +70,19 @@ static struct cpuidle_driver zynq_idle_driver = {
};
/* Initialize CPU idle by registering the idle states */
-static int __init zynq_cpuidle_init(void)
+static int zynq_cpuidle_probe(struct platform_device *pdev)
{
- if (!of_machine_is_compatible("xlnx,zynq-7000"))
- return -ENODEV;
-
pr_info("Xilinx Zynq CpuIdle Driver started\n");
return cpuidle_register(&zynq_idle_driver, NULL);
}
-device_initcall(zynq_cpuidle_init);
+static struct platform_driver zynq_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-zynq",
+ .owner = THIS_MODULE,
+ },
+ .probe = zynq_cpuidle_probe,
+};
+
+module_platform_driver(zynq_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d75040ddd2b..2a991e468f7 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -118,11 +118,9 @@ int cpuidle_idle_call(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv;
int next_state, entered_state;
+ bool broadcast;
- if (off)
- return -ENODEV;
-
- if (!initialized)
+ if (off || !initialized)
return -ENODEV;
/* check if the device is ready */
@@ -144,9 +142,10 @@ int cpuidle_idle_call(void)
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
- &dev->cpu);
+ broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
+
+ if (broadcast)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state))
entered_state = cpuidle_enter_state_coupled(dev, drv,
@@ -154,9 +153,8 @@ int cpuidle_idle_call(void)
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
- if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
- &dev->cpu);
+ if (broadcast)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -228,45 +226,6 @@ void cpuidle_resume(void)
mutex_unlock(&cpuidle_lock);
}
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- ktime_t t1, t2;
- s64 diff;
-
- t1 = ktime_get();
- local_irq_enable();
- while (!need_resched())
- cpu_relax();
-
- t2 = ktime_get();
- diff = ktime_to_us(ktime_sub(t2, t1));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
- return index;
-}
-
-static void poll_idle_init(struct cpuidle_driver *drv)
-{
- struct cpuidle_state *state = &drv->states[0];
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
- state->exit_latency = 0;
- state->target_residency = 0;
- state->power_usage = -1;
- state->flags = 0;
- state->enter = poll_idle;
- state->disabled = false;
-}
-#else
-static void poll_idle_init(struct cpuidle_driver *drv) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
/**
* cpuidle_enable_device - enables idle PM for a CPU
* @dev: the CPU
@@ -296,8 +255,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (!dev->state_count)
dev->state_count = drv->state_count;
- poll_idle_init(drv);
-
ret = cpuidle_add_device_sysfs(dev);
if (ret)
return ret;
@@ -358,12 +315,10 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
module_put(drv->owner);
}
-static int __cpuidle_device_init(struct cpuidle_device *dev)
+static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
-
- return 0;
}
/**
@@ -385,13 +340,12 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
- if (ret) {
+ if (ret)
__cpuidle_unregister_device(dev);
- return ret;
- }
+ else
+ dev->registered = 1;
- dev->registered = 1;
- return 0;
+ return ret;
}
/**
@@ -410,9 +364,7 @@ int cpuidle_register_device(struct cpuidle_device *dev)
if (dev->registered)
goto out_unlock;
- ret = __cpuidle_device_init(dev);
- if (ret)
- goto out_unlock;
+ __cpuidle_device_init(dev);
ret = __cpuidle_register_device(dev);
if (ret)
@@ -516,7 +468,7 @@ int cpuidle_register(struct cpuidle_driver *drv,
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
/*
- * On multiplatform for ARM, the coupled idle states could
+ * On multiplatform for ARM, the coupled idle states could be
* enabled in the kernel even if the cpuidle driver does not
* use it. Note, coupled_cpus is a struct copy.
*/
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 6e11701f0fc..06dbe7c8619 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
@@ -56,7 +57,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
}
/**
- * __cpuidle_set_driver - set per CPU driver variables the the given driver.
+ * __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
* For each CPU in the driver's cpumask, unset the registered driver per CPU
@@ -132,7 +133,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* cpuidle_setup_broadcast_timer - enable/disable the broadcast timer
* @arg: a void pointer used to match the SMP cross call API
*
- * @arg is used as a value of type 'long' with on of the two values:
+ * @arg is used as a value of type 'long' with one of the two values:
* - CLOCK_EVT_NOTIFY_BROADCAST_ON
* - CLOCK_EVT_NOTIFY_BROADCAST_OFF
*
@@ -149,10 +150,8 @@ static void cpuidle_setup_broadcast_timer(void *arg)
/**
* __cpuidle_driver_init - initialize the driver's internal data
* @drv: a valid pointer to a struct cpuidle_driver
- *
- * Returns 0 on success, a negative error code otherwise.
*/
-static int __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
@@ -169,20 +168,55 @@ static int __cpuidle_driver_init(struct cpuidle_driver *drv)
/*
* Look for the timer stop flag in the different states, so that we know
* if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually on of the the deeper states has this flag set.
+ * order, because usually one of the deeper states have this flag set.
*/
for (i = drv->state_count - 1; i >= 0 ; i--) {
+ if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ drv->bctimer = 1;
+ break;
+ }
+ }
+}
- if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
- continue;
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ ktime_t t1, t2;
+ s64 diff;
- drv->bctimer = 1;
- break;
- }
+ t1 = ktime_get();
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
- return 0;
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
}
+static void poll_idle_init(struct cpuidle_driver *drv)
+{
+ struct cpuidle_state *state = &drv->states[0];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->flags = 0;
+ state->enter = poll_idle;
+ state->disabled = false;
+}
+#else
+static void poll_idle_init(struct cpuidle_driver *drv) {}
+#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
+
/**
* __cpuidle_register_driver: register the driver
* @drv: a valid pointer to a struct cpuidle_driver
@@ -206,9 +240,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (cpuidle_disabled())
return -ENODEV;
- ret = __cpuidle_driver_init(drv);
- if (ret)
- return ret;
+ __cpuidle_driver_init(drv);
ret = __cpuidle_set_driver(drv);
if (ret)
@@ -218,6 +250,8 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
+ poll_idle_init(drv);
+
return 0;
}
@@ -346,10 +380,11 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
*/
void cpuidle_driver_unref(void)
{
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
+ drv = cpuidle_get_driver();
if (drv && !WARN_ON(drv->refcnt <= 0))
drv->refcnt--;
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index ea2f8e7aa24..ca89412f512 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -96,46 +96,3 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
return ret;
}
-
-/**
- * cpuidle_replace_governor - find a replacement governor
- * @exclude_rating: the rating that will be skipped while looking for
- * new governor.
- */
-static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating)
-{
- struct cpuidle_governor *gov;
- struct cpuidle_governor *ret_gov = NULL;
- unsigned int max_rating = 0;
-
- list_for_each_entry(gov, &cpuidle_governors, governor_list) {
- if (gov->rating == exclude_rating)
- continue;
- if (gov->rating > max_rating) {
- max_rating = gov->rating;
- ret_gov = gov;
- }
- }
-
- return ret_gov;
-}
-
-/**
- * cpuidle_unregister_governor - unregisters a governor
- * @gov: the governor
- */
-void cpuidle_unregister_governor(struct cpuidle_governor *gov)
-{
- if (!gov)
- return;
-
- mutex_lock(&cpuidle_lock);
- if (gov == cpuidle_curr_governor) {
- struct cpuidle_governor *new_gov;
- new_gov = cpuidle_replace_governor(gov->rating);
- cpuidle_switch_governor(new_gov);
- }
- list_del(&gov->governor_list);
- mutex_unlock(&cpuidle_lock);
-}
-
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 8739cc05228..e918b6d0caf 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -52,11 +52,12 @@ static ssize_t show_current_driver(struct device *dev,
char *buf)
{
ssize_t ret;
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
- if (cpuidle_driver)
- ret = sprintf(buf, "%s\n", cpuidle_driver->name);
+ drv = cpuidle_get_driver();
+ if (drv)
+ ret = sprintf(buf, "%s\n", drv->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index f88e3d8f6b6..efaf6302405 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -27,6 +27,9 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/dcr.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index b010d42a180..bc6d820812b 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -5,6 +5,9 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#include "compat.h"
#include "regs.h"
#include "intern.h"
@@ -224,7 +227,7 @@ static int caam_probe(struct platform_device *pdev)
topregs = (struct caam_full __iomem *)ctrl;
/* Get the IRQ of the controller (for security violations only) */
- ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+ ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 105ba4da618..bdb786d5a5e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -5,6 +5,8 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/of_irq.h>
+
#include "compat.h"
#include "regs.h"
#include "jr.h"
@@ -403,7 +405,7 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
dma_set_mask(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
- jrpriv->irq = of_irq_to_resource(np, 0, NULL);
+ jrpriv->irq = irq_of_parse_and_map(np, 0);
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6cad6..214357e12dc 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
static int support_aes = 1;
-static void dev_release(struct device *dev)
-{
- return;
-}
-
#define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
- .name = DRIVER_NAME,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .release = dev_release,
- }
-};
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
+ struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
static void one_packet(dma_addr_t phys)
{
+ struct device *dev = &pdev->dev;
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
tasklet_schedule(&crypto_done_tasklet);
}
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
return ret;
}
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
struct buffer_desc src_hook;
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1418,20 +1408,30 @@ static struct ixp_alg ixp4xx_algos[] = {
} };
#define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i,err ;
+ int i, err ;
- if (platform_device_register(&pseudo_dev))
- return -ENODEV;
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ dev = &pdev->dev;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
- err = init_ixp_crypto();
+ err = init_ixp_crypto(&pdev->dev);
if (err) {
- platform_device_unregister(&pseudo_dev);
+ platform_device_unregister(pdev);
return err;
}
for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@ static void __exit ixp_module_exit(void)
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
- release_ixp_crypto();
- platform_device_unregister(&pseudo_dev);
+ release_ixp_crypto(&pdev->dev);
+ platform_device_unregister(pdev);
}
module_init(ixp_module_init);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 8bdde57f6bb..e28104b4aab 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1818,7 +1818,7 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->irq = of_irq_to_resource(node, 0, NULL);
+ dd->irq = irq_of_parse_and_map(node, 0);
if (!dd->irq) {
dev_err(dev, "can't translate OF irq value\n");
err = -EINVAL;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 661dc3eb1d6..6cd0e603858 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -32,6 +32,8 @@
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 2d58da972ae..fa05e3c329b 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -268,7 +268,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
- INIT_COMPLETION(dd->op_complete);
+ reinit_completion(&dd->op_complete);
for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
do {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index c99c00d35d3..a0b2f7e0eed 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d,
{
struct devfreq *df = to_devfreq(d);
struct device *dev = df->dev.parent;
- struct opp *opp;
+ struct dev_pm_opp *opp;
ssize_t count = 0;
unsigned long freq = 0;
rcu_read_lock();
do {
- opp = opp_find_freq_ceil(dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
break;
@@ -993,10 +993,10 @@ static int __init devfreq_init(void)
}
devfreq_wq = create_freezable_workqueue("devfreq_wq");
- if (IS_ERR(devfreq_wq)) {
+ if (!devfreq_wq) {
class_destroy(devfreq_class);
pr_err("%s: couldn't create workqueue\n", __FILE__);
- return PTR_ERR(devfreq_wq);
+ return -ENOMEM;
}
devfreq_class->dev_groups = devfreq_groups;
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
- u32 flags)
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq,
+ u32 flags)
{
- struct opp *opp;
+ struct dev_pm_opp *opp;
if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
/* The freq is an upper bound. opp should be lower */
- opp = opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
- opp = opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor(dev, freq);
}
return opp;
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
int ret = 0;
rcu_read_lock();
- nh = opp_get_notifier(dev);
+ nh = dev_pm_opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
int ret = 0;
rcu_read_lock();
- nh = opp_get_notifier(dev);
+ nh = dev_pm_opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index c5f86d8caca..cede6f71cd6 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq;
unsigned long old_freq = data->curr_oppinfo.rate;
struct busfreq_opp_info new_oppinfo;
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
rcu_read_unlock();
return PTR_ERR(opp);
}
- new_oppinfo.rate = opp_get_freq(opp);
- new_oppinfo.volt = opp_get_voltage(opp);
+ new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
freq = new_oppinfo.rate;
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data)
exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
for (i = LV_0; i < EX4210_LV_NUM; i++) {
- err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
exynos4210_busclk_table[i].volt);
if (err) {
dev_err(data->dev, "Cannot add opp entries.\n");
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data)
}
for (i = 0; i < EX4x12_LV_NUM; i++) {
- ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
exynos4x12_mifclk_table[i].volt);
if (ret) {
dev_err(data->dev, "Fail to add opp entries.\n");
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
{
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
rcu_read_lock();
- opp = opp_find_freq_floor(data->dev, &maxfreq);
+ opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(data->dev, "%s: unable to find a min freq\n",
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
mutex_unlock(&data->lock);
return PTR_ERR(opp);
}
- new_oppinfo.rate = opp_get_freq(opp);
- new_oppinfo.volt = opp_get_voltage(opp);
+ new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
err = exynos4_bus_setvolt(data, &new_oppinfo,
@@ -1020,7 +1020,7 @@ unlock:
static int exynos4_busfreq_probe(struct platform_device *pdev)
{
struct busfreq_data *data;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct device *dev = &pdev->dev;
int err = 0;
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
}
rcu_read_lock();
- opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ opp = dev_pm_opp_find_freq_floor(dev,
+ &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos4_devfreq_profile.initial_freq);
return PTR_ERR(opp);
}
- data->curr_oppinfo.rate = opp_get_freq(opp);
- data->curr_oppinfo.volt = opp_get_voltage(opp);
+ data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
platform_set_drvdata(pdev, data);
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
index 574b16b59be..a60da3c1c48 100644
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ b/drivers/devfreq/exynos/exynos5_bus.c
@@ -15,10 +15,9 @@
#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/suspend.h>
-#include <linux/opp.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -132,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long old_freq, freq;
unsigned long volt;
@@ -144,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
return PTR_ERR(opp);
}
- freq = opp_get_freq(opp);
- volt = opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
old_freq = data->curr_freq;
@@ -246,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data)
int i, err = 0;
for (i = LV_0; i < _LV_END; i++) {
- err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
+ err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
exynos5_int_opp_table[i].volt);
if (err) {
dev_err(data->dev, "Cannot add opp entries.\n");
@@ -262,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
{
struct busfreq_data_int *data = container_of(this,
struct busfreq_data_int, pm_notifier);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long maxfreq = ULONG_MAX;
unsigned long freq;
unsigned long volt;
@@ -276,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
rcu_read_lock();
- opp = opp_find_freq_floor(data->dev, &maxfreq);
+ opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
if (IS_ERR(opp)) {
rcu_read_unlock();
err = PTR_ERR(opp);
goto unlock;
}
- freq = opp_get_freq(opp);
- volt = opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
err = exynos5_int_setvolt(data, volt);
@@ -316,7 +315,7 @@ unlock:
static int exynos5_busfreq_int_probe(struct platform_device *pdev)
{
struct busfreq_data_int *data;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct device *dev = &pdev->dev;
struct device_node *np;
unsigned long initial_freq;
@@ -351,46 +350,43 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
err = exynos5250_init_int_tables(data);
if (err)
- goto err_regulator;
+ return err;
- data->vdd_int = regulator_get(dev, "vdd_int");
+ data->vdd_int = devm_regulator_get(dev, "vdd_int");
if (IS_ERR(data->vdd_int)) {
dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- err = PTR_ERR(data->vdd_int);
- goto err_regulator;
+ return PTR_ERR(data->vdd_int);
}
- data->int_clk = clk_get(dev, "int_clk");
+ data->int_clk = devm_clk_get(dev, "int_clk");
if (IS_ERR(data->int_clk)) {
dev_err(dev, "Cannot get clock \"int_clk\"\n");
- err = PTR_ERR(data->int_clk);
- goto err_clock;
+ return PTR_ERR(data->int_clk);
}
rcu_read_lock();
- opp = opp_find_freq_floor(dev,
+ opp = dev_pm_opp_find_freq_floor(dev,
&exynos5_devfreq_int_profile.initial_freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos5_devfreq_int_profile.initial_freq);
- err = PTR_ERR(opp);
- goto err_opp_add;
+ return PTR_ERR(opp);
}
- initial_freq = opp_get_freq(opp);
- initial_volt = opp_get_voltage(opp);
+ initial_freq = dev_pm_opp_get_freq(opp);
+ initial_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
data->curr_freq = initial_freq;
err = clk_set_rate(data->int_clk, initial_freq * 1000);
if (err) {
dev_err(dev, "Failed to set initial frequency\n");
- goto err_opp_add;
+ return err;
}
err = exynos5_int_setvolt(data, initial_volt);
if (err)
- goto err_opp_add;
+ return err;
platform_set_drvdata(pdev, data);
@@ -419,12 +415,6 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
err_devfreq_add:
devfreq_remove_device(data->devfreq);
- platform_set_drvdata(pdev, NULL);
-err_opp_add:
- clk_put(data->int_clk);
-err_clock:
- regulator_put(data->vdd_int);
-err_regulator:
return err;
}
@@ -435,9 +425,6 @@ static int exynos5_busfreq_int_remove(struct platform_device *pdev)
pm_qos_remove_request(&data->int_req);
unregister_pm_notifier(&data->pm_notifier);
devfreq_remove_device(data->devfreq);
- regulator_put(data->vdd_int);
- clk_put(data->int_clk);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -479,7 +466,7 @@ static int __init exynos5_busfreq_int_init(void)
exynos5_devfreq_pdev =
platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
- if (IS_ERR_OR_NULL(exynos5_devfreq_pdev)) {
+ if (IS_ERR(exynos5_devfreq_pdev)) {
ret = PTR_ERR(exynos5_devfreq_pdev);
goto out1;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd3384..446687cc233 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,14 +89,15 @@ config AT_HDMAC
Support the Atmel AHB DMA controller.
config FSL_DMA
- tristate "Freescale Elo and Elo Plus DMA support"
+ tristate "Freescale Elo series DMA support"
depends on FSL_SOC
select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
- Enable support for the Freescale Elo and Elo Plus DMA controllers.
- The Elo is the DMA controller on some 82xx and 83xx parts, and the
- Elo Plus is the DMA controller on 85xx and 86xx parts.
+ Enable support for the Freescale Elo series DMA controllers.
+ The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
+ EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
+ some Txxx and Bxxx parts.
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
@@ -154,6 +155,18 @@ config TEGRA20_APB_DMA
This DMA controller transfers data from memory to peripheral fifo
or vice versa. It does not support memory to memory data transfer.
+config S3C24XX_DMAC
+ tristate "Samsung S3C24XX DMA support"
+ depends on ARCH_S3C24XX && !S3C24XX_DMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Samsung S3C24XX DMA controller driver. The
+ DMA controller is having multiple DMA channels which can be
+ configured for different peripherals like audio, UART, SPI.
+ The DMA controller can transfer data from memory to peripheral,
+ periphal to memory, periphal to periphal and memory to memory.
+
source "drivers/dma/sh/Kconfig"
config COH901318
@@ -195,7 +208,7 @@ config SIRF_DMA
config TI_EDMA
bool "TI EDMA support"
- depends on ARCH_DAVINCI || ARCH_OMAP
+ depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_PRIV_EDMA
@@ -301,7 +314,7 @@ config MMP_PDMA
depends on (ARCH_MMP || ARCH_PXA)
select DMA_ENGINE
help
- Support the MMP PDMA engine for PXA and MMP platfrom.
+ Support the MMP PDMA engine for PXA and MMP platform.
config DMA_JZ4740
tristate "JZ4740 DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035b362..0ce2da97e42 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5bf1c..16a2aa28f85 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd);
}
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
- struct device *dev = txd->vd.tx.chan->device->dev;
- struct pl08x_sg *dsg;
-
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- else {
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- }
- }
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- else
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- }
-}
-
static void pl08x_desc_free(struct virt_dma_desc *vd)
{
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
- if (!plchan->slave)
- pl08x_unmap_buffers(txd);
-
+ dma_descriptor_unmap(txd);
if (!txd->done)
pl08x_release_mux(plchan);
@@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
/*
@@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&plchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
+ if (ret != DMA_COMPLETE) {
vd = vchan_find_desc(&plchan->vc, cookie);
if (vd) {
/* On the issued list, so hasn't been processed yet */
@@ -2055,6 +2025,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
return ret;
+ /* Ensure that we can do DMA */
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out_no_pl08x;
+
/* Create the driver state holder */
pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) {
@@ -2133,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
- ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
- DRIVER_NAME, pl08x);
+ ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
__func__, adev->irq[0]);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c787f38a186..e2c04dc81e2 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
- /* unmap dma addresses (not on slave channels) */
- if (!atchan->chan_common.private) {
- struct device *parent = chan2parent(&atchan->chan_common);
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(parent,
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(parent,
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(parent,
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- else
- dma_unmap_page(parent,
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- }
- }
-
+ dma_descriptor_unmap(txd);
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
@@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan,
int bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
/*
* There's no point calculating the residue if there's
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
index 5e2ed30ba2c..2074e0e3fa2 100644
--- a/drivers/dma/bestcomm/sram.c
+++ b/drivers/dma/bestcomm/sram.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/mmu.h>
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 31011d2a26f..3c6716e0b78 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
+ err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
"coh901318", base);
if (err)
return err;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 7c82b92f9b1..c29dacff66f 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -141,6 +141,9 @@ struct cppi41_dd {
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
+
+ /* context for suspend/resume */
+ unsigned int dma_tdfdq;
};
#define FIST_COMPLETION_QUEUE 93
@@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val)
return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
}
+static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
+{
+ u32 desc;
+
+ desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
+ desc &= ~0x1f;
+ return desc;
+}
+
static irqreturn_t cppi41_irq(int irq, void *data)
{
struct cppi41_dd *cdd = data;
@@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
q_num = __fls(val);
val &= ~(1 << q_num);
q_num += 32 * i;
- desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num));
- desc &= ~0x1f;
+ desc = cppi41_pop_desc(cdd, q_num);
c = desc_to_chan(cdd, desc);
if (WARN_ON(!c)) {
pr_err("%s() q %d desc %08x\n", __func__,
@@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
/* lock */
ret = dma_cookie_status(chan, cookie, txstate);
- if (txstate && ret == DMA_SUCCESS)
+ if (txstate && ret == DMA_COMPLETE)
txstate->residue = c->residue;
/* unlock */
@@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d)
d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
}
-static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
-{
- u32 desc;
-
- desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
- desc &= ~0x1f;
- return desc;
-}
-
static int cppi41_tear_down_chan(struct cppi41_channel *c)
{
struct cppi41_dd *cdd = c->cdd;
@@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
c->td_retry = 100;
}
- if (!c->td_seen) {
- unsigned td_comp_queue;
+ if (!c->td_seen || !c->td_desc_seen) {
- if (c->is_tx)
- td_comp_queue = cdd->td_queue.complete;
- else
- td_comp_queue = c->q_comp_num;
+ desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
+ if (!desc_phys)
+ desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
- desc_phys = cppi41_pop_desc(cdd, td_comp_queue);
- if (desc_phys) {
- __iormb();
+ if (desc_phys == c->desc_phys) {
+ c->td_desc_seen = 1;
+
+ } else if (desc_phys == td_desc_phys) {
+ u32 pd0;
- if (desc_phys == td_desc_phys) {
- u32 pd0;
- pd0 = td->pd0;
- WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
- WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
- WARN_ON((pd0 & 0x1f) != c->port_num);
- } else {
- WARN_ON_ONCE(1);
- }
- c->td_seen = 1;
- }
- }
- if (!c->td_desc_seen) {
- desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
- if (desc_phys) {
__iormb();
- WARN_ON(c->desc_phys != desc_phys);
- c->td_desc_seen = 1;
+ pd0 = td->pd0;
+ WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
+ WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
+ WARN_ON((pd0 & 0x1f) != c->port_num);
+ c->td_seen = 1;
+ } else if (desc_phys) {
+ WARN_ON_ONCE(1);
}
}
c->td_retry--;
@@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
WARN_ON(!c->td_retry);
if (!c->td_desc_seen) {
- desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+ desc_phys = cppi41_pop_desc(cdd, c->q_num);
WARN_ON(!desc_phys);
}
@@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
}
}
-static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
struct cppi41_channel *cchan;
int i;
int ret;
u32 n_chans;
- ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+ ret = of_property_read_u32(dev->of_node, "#dma-channels",
&n_chans);
if (ret)
return ret;
@@ -719,7 +711,7 @@ err:
return -ENOMEM;
}
-static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int mem_decs;
int i;
@@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
- dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+ dma_free_coherent(dev, mem_decs, cdd->cd,
cdd->descs_phys);
}
}
@@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd)
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
}
-static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
disable_sched(cdd);
- purge_descs(pdev, cdd);
+ purge_descs(dev, cdd);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
- dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+ dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
cdd->scratch_phys);
}
-static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int desc_size;
unsigned int mem_decs;
@@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
reg |= ilog2(ALLOC_DECS_NUM) - 5;
BUILD_BUG_ON(DESCS_AREAS != 1);
- cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+ cdd->cd = dma_alloc_coherent(dev, mem_decs,
&cdd->descs_phys, GFP_KERNEL);
if (!cdd->cd)
return -ENOMEM;
@@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd)
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}
-static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
int ret;
BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
- cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+ cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
&cdd->scratch_phys, GFP_KERNEL);
if (!cdd->qmgr_scratch)
return -ENOMEM;
@@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
- ret = init_descs(pdev, cdd);
+ ret = init_descs(dev, cdd);
if (ret)
goto err_td;
@@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
init_sched(cdd);
return 0;
err_td:
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(dev, cdd);
return ret;
}
@@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
-static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+static const struct cppi_glue_infos *get_glue_info(struct device *dev)
{
const struct of_device_id *of_id;
- of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+ of_id = of_match_node(cppi41_dma_ids, dev->of_node);
if (!of_id)
return NULL;
return of_id->data;
@@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static int cppi41_dma_probe(struct platform_device *pdev)
{
struct cppi41_dd *cdd;
+ struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
int irq;
int ret;
- glue_info = get_glue_info(pdev);
+ glue_info = get_glue_info(dev);
if (!glue_info)
return -EINVAL;
@@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
cdd->ddev.device_control = cppi41_dma_control;
- cdd->ddev.dev = &pdev->dev;
+ cdd->ddev.dev = dev;
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
- cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
- cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
- cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
- cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+ cdd->usbss_mem = of_iomap(dev->of_node, 0);
+ cdd->ctrl_mem = of_iomap(dev->of_node, 1);
+ cdd->sched_mem = of_iomap(dev->of_node, 2);
+ cdd->qmgr_mem = of_iomap(dev->of_node, 3);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem) {
@@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev)
goto err_remap;
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
goto err_get_sync;
cdd->queues_rx = glue_info->queues_rx;
cdd->queues_tx = glue_info->queues_tx;
cdd->td_queue = glue_info->td_queue;
- ret = init_cppi41(pdev, cdd);
+ ret = init_cppi41(dev, cdd);
if (ret)
goto err_init_cppi;
- ret = cppi41_add_chans(pdev, cdd);
+ ret = cppi41_add_chans(dev, cdd);
if (ret)
goto err_chans;
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
goto err_irq;
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
- dev_name(&pdev->dev), cdd);
+ dev_name(dev), cdd);
if (ret)
goto err_irq;
cdd->irq = irq;
@@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_dma_reg;
- ret = of_dma_controller_register(pdev->dev.of_node,
+ ret = of_dma_controller_register(dev->of_node,
cppi41_dma_xlate, &cpp41_dma_info);
if (ret)
goto err_of;
@@ -1009,11 +1002,11 @@ err_irq:
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
cleanup_chans(cdd);
err_chans:
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(dev, cdd);
err_init_cppi:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put(dev);
err_get_sync:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
free_irq(cdd->irq, cdd);
cleanup_chans(cdd);
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(&pdev->dev, cdd);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int cppi41_suspend(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+ cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ disable_sched(cdd);
+
+ return 0;
+}
+
+static int cppi41_resume(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ struct cppi41_channel *c;
+ int i;
+
+ for (i = 0; i < DESCS_AREAS; i++)
+ cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+
+ list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
+ if (!c->is_tx)
+ cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+
+ init_sched(cdd);
+
+ cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
+ cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
+
+ cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
.remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.owner = THIS_MODULE,
+ .pm = &cppi41_pm_ops,
.of_match_table = of_match_ptr(cppi41_dma_ids),
},
};
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index b0c0c8268d4..94c380f0753 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
unsigned long flags;
status = dma_cookie_status(c, cookie, state);
- if (status == DMA_SUCCESS || !state)
+ if (status == DMA_COMPLETE || !state)
return status;
spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9162ac80c18..ea806bdc12e 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,6 +65,7 @@
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
+#include <linux/mempool.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
-/**
- * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
- * @chan: DMA channel to offload copy to
- * @dest: destination address (virtual)
- * @src: source address (virtual)
- * @len: length
- *
- * Both @dest and @src must be mappable to a bus address according to the
- * DMA mapping API rules for streaming mappings.
- * Both @dest and @src must stay memory resident (kernel memory or locked
- * user space pages).
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
- void *src, size_t len)
-{
- struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- unsigned long flags;
+struct dmaengine_unmap_pool {
+ struct kmem_cache *cache;
+ const char *name;
+ mempool_t *pool;
+ size_t size;
+};
- dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
- dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
- flags = DMA_CTRL_ACK |
- DMA_COMPL_SRC_UNMAP_SINGLE |
- DMA_COMPL_DEST_UNMAP_SINGLE;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
+static struct dmaengine_unmap_pool unmap_pool[] = {
+ __UNMAP_POOL(2),
+ #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
+ __UNMAP_POOL(16),
+ __UNMAP_POOL(128),
+ __UNMAP_POOL(256),
+ #endif
+};
- if (!tx) {
- dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
- return -ENOMEM;
+static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
+{
+ int order = get_count_order(nr);
+
+ switch (order) {
+ case 0 ... 1:
+ return &unmap_pool[0];
+ case 2 ... 4:
+ return &unmap_pool[1];
+ case 5 ... 7:
+ return &unmap_pool[2];
+ case 8:
+ return &unmap_pool[3];
+ default:
+ BUG();
+ return NULL;
}
+}
- tx->callback = NULL;
- cookie = tx->tx_submit(tx);
+static void dmaengine_unmap(struct kref *kref)
+{
+ struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
+ struct device *dev = unmap->dev;
+ int cnt, i;
+
+ cnt = unmap->to_cnt;
+ for (i = 0; i < cnt; i++)
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_TO_DEVICE);
+ cnt += unmap->from_cnt;
+ for (; i < cnt; i++)
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_FROM_DEVICE);
+ cnt += unmap->bidi_cnt;
+ for (; i < cnt; i++) {
+ if (unmap->addr[i] == 0)
+ continue;
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_BIDIRECTIONAL);
+ }
+ mempool_free(unmap, __get_unmap_pool(cnt)->pool);
+}
- preempt_disable();
- __this_cpu_add(chan->local->bytes_transferred, len);
- __this_cpu_inc(chan->local->memcpy_count);
- preempt_enable();
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+ if (unmap)
+ kref_put(&unmap->kref, dmaengine_unmap);
+}
+EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
- return cookie;
+static void dmaengine_destroy_unmap_pool(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
+ struct dmaengine_unmap_pool *p = &unmap_pool[i];
+
+ if (p->pool)
+ mempool_destroy(p->pool);
+ p->pool = NULL;
+ if (p->cache)
+ kmem_cache_destroy(p->cache);
+ p->cache = NULL;
+ }
}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-/**
- * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
- * @chan: DMA channel to offload copy to
- * @page: destination page
- * @offset: offset in page to copy to
- * @kdata: source address (virtual)
- * @len: length
- *
- * Both @page/@offset and @kdata must be mappable to a bus address according
- * to the DMA mapping API rules for streaming mappings.
- * Both @page/@offset and @kdata must stay memory resident (kernel memory or
- * locked user space pages)
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
- unsigned int offset, void *kdata, size_t len)
+static int __init dmaengine_init_unmap_pool(void)
{
- struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- unsigned long flags;
+ int i;
- dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
- dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
- flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+ for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
+ struct dmaengine_unmap_pool *p = &unmap_pool[i];
+ size_t size;
- if (!tx) {
- dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
- return -ENOMEM;
+ size = sizeof(struct dmaengine_unmap_data) +
+ sizeof(dma_addr_t) * p->size;
+
+ p->cache = kmem_cache_create(p->name, size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!p->cache)
+ break;
+ p->pool = mempool_create_slab_pool(1, p->cache);
+ if (!p->pool)
+ break;
}
- tx->callback = NULL;
- cookie = tx->tx_submit(tx);
+ if (i == ARRAY_SIZE(unmap_pool))
+ return 0;
- preempt_disable();
- __this_cpu_add(chan->local->bytes_transferred, len);
- __this_cpu_inc(chan->local->memcpy_count);
- preempt_enable();
+ dmaengine_destroy_unmap_pool();
+ return -ENOMEM;
+}
- return cookie;
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ struct dmaengine_unmap_data *unmap;
+
+ unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
+ if (!unmap)
+ return NULL;
+
+ memset(unmap, 0, sizeof(*unmap));
+ kref_init(&unmap->kref);
+ unmap->dev = dev;
+
+ return unmap;
}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
+EXPORT_SYMBOL(dmaengine_get_unmap_data);
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
@@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
+ struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
unsigned long flags;
- dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
- dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
- DMA_FROM_DEVICE);
+ unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
+ if (!unmap)
+ return -ENOMEM;
+
+ unmap->to_cnt = 1;
+ unmap->from_cnt = 1;
+ unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
+ DMA_TO_DEVICE);
+ unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
+ DMA_FROM_DEVICE);
+ unmap->len = len;
flags = DMA_CTRL_ACK;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+ tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
+ len, flags);
if (!tx) {
- dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
+ dmaengine_unmap_put(unmap);
return -ENOMEM;
}
- tx->callback = NULL;
+ dma_set_unmap(tx, unmap);
cookie = tx->tx_submit(tx);
+ dmaengine_unmap_put(unmap);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
@@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
+/**
+ * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
+ * @chan: DMA channel to offload copy to
+ * @dest: destination address (virtual)
+ * @src: source address (virtual)
+ * @len: length
+ *
+ * Both @dest and @src must be mappable to a bus address according to the
+ * DMA mapping API rules for streaming mappings.
+ * Both @dest and @src must stay memory resident (kernel memory or locked
+ * user space pages).
+ */
+dma_cookie_t
+dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
+ void *src, size_t len)
+{
+ return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
+ (unsigned long) dest & ~PAGE_MASK,
+ virt_to_page(src),
+ (unsigned long) src & ~PAGE_MASK, len);
+}
+EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
+
+/**
+ * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
+ * @chan: DMA channel to offload copy to
+ * @page: destination page
+ * @offset: offset in page to copy to
+ * @kdata: source address (virtual)
+ * @len: length
+ *
+ * Both @page/@offset and @kdata must be mappable to a bus address according
+ * to the DMA mapping API rules for streaming mappings.
+ * Both @page/@offset and @kdata must stay memory resident (kernel memory or
+ * locked user space pages)
+ */
+dma_cookie_t
+dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
+ unsigned int offset, void *kdata, size_t len)
+{
+ return dma_async_memcpy_pg_to_pg(chan, page, offset,
+ virt_to_page(kdata),
+ (unsigned long) kdata & ~PAGE_MASK, len);
+}
+EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
+
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
@@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
if (!tx)
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
@@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
+ int err = dmaengine_init_unmap_pool();
+
+ if (err)
+ return err;
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 92f796cdc6a..20f9a3aaf92 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -8,6 +8,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -19,10 +21,6 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
@@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
-/* Maximum amount of mismatched bytes in buffer to print */
-#define MAX_ERROR_COUNT 32
-
-/*
- * Initialization patterns. All bytes in the source buffer has bit 7
- * set, all bytes in the destination buffer has bit 7 cleared.
- *
- * Bit 6 is set for all bytes which are to be copied by the DMA
- * engine. Bit 5 is set for all bytes which are to be overwritten by
- * the DMA engine.
- *
- * The remaining bits are the inverse of a counter which increments by
- * one for each byte address.
- */
-#define PATTERN_SRC 0x80
-#define PATTERN_DST 0x00
-#define PATTERN_COPY 0x40
-#define PATTERN_OVERWRITE 0x20
-#define PATTERN_COUNT_MASK 0x1f
-
-enum dmatest_error_type {
- DMATEST_ET_OK,
- DMATEST_ET_MAP_SRC,
- DMATEST_ET_MAP_DST,
- DMATEST_ET_PREP,
- DMATEST_ET_SUBMIT,
- DMATEST_ET_TIMEOUT,
- DMATEST_ET_DMA_ERROR,
- DMATEST_ET_DMA_IN_PROGRESS,
- DMATEST_ET_VERIFY,
- DMATEST_ET_VERIFY_BUF,
-};
-
-struct dmatest_verify_buffer {
- unsigned int index;
- u8 expected;
- u8 actual;
-};
-
-struct dmatest_verify_result {
- unsigned int error_count;
- struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
- u8 pattern;
- bool is_srcbuf;
-};
-
-struct dmatest_thread_result {
- struct list_head node;
- unsigned int n;
- unsigned int src_off;
- unsigned int dst_off;
- unsigned int len;
- enum dmatest_error_type type;
- union {
- unsigned long data;
- dma_cookie_t cookie;
- enum dma_status status;
- int error;
- struct dmatest_verify_result *vr;
- };
-};
-
-struct dmatest_result {
- struct list_head node;
- char *name;
- struct list_head results;
-};
-
-struct dmatest_info;
-
-struct dmatest_thread {
- struct list_head node;
- struct dmatest_info *info;
- struct task_struct *task;
- struct dma_chan *chan;
- u8 **srcs;
- u8 **dsts;
- enum dma_transaction_type type;
- bool done;
-};
+static bool noverify;
+module_param(noverify, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
-struct dmatest_chan {
- struct list_head node;
- struct dma_chan *chan;
- struct list_head threads;
-};
+static bool verbose;
+module_param(verbose, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
/**
* struct dmatest_params - test parameters.
@@ -177,6 +96,7 @@ struct dmatest_params {
unsigned int xor_sources;
unsigned int pq_sources;
int timeout;
+ bool noverify;
};
/**
@@ -184,7 +104,7 @@ struct dmatest_params {
* @params: test parameters
* @lock: access protection to the fields of this structure
*/
-struct dmatest_info {
+static struct dmatest_info {
/* Test parameters */
struct dmatest_params params;
@@ -192,16 +112,95 @@ struct dmatest_info {
struct list_head channels;
unsigned int nr_channels;
struct mutex lock;
+ bool did_init;
+} test_info = {
+ .channels = LIST_HEAD_INIT(test_info.channels),
+ .lock = __MUTEX_INITIALIZER(test_info.lock),
+};
+
+static int dmatest_run_set(const char *val, const struct kernel_param *kp);
+static int dmatest_run_get(char *val, const struct kernel_param *kp);
+static struct kernel_param_ops run_ops = {
+ .set = dmatest_run_set,
+ .get = dmatest_run_get,
+};
+static bool dmatest_run;
+module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(run, "Run the test (default: false)");
+
+/* Maximum amount of mismatched bytes in buffer to print */
+#define MAX_ERROR_COUNT 32
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
- /* debugfs related stuff */
- struct dentry *root;
+struct dmatest_thread {
+ struct list_head node;
+ struct dmatest_info *info;
+ struct task_struct *task;
+ struct dma_chan *chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
- /* Test results */
- struct list_head results;
- struct mutex results_lock;
+struct dmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
};
-static struct dmatest_info test_info;
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static bool wait;
+
+static bool is_threaded_test_run(struct dmatest_info *info)
+{
+ struct dmatest_chan *dtc;
+
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int dmatest_wait_get(char *val, const struct kernel_param *kp)
+{
+ struct dmatest_info *info = &test_info;
+ struct dmatest_params *params = &info->params;
+
+ if (params->iterations)
+ wait_event(thread_wait, !is_threaded_test_run(info));
+ wait = true;
+ return param_get_bool(val, kp);
+}
+
+static struct kernel_param_ops wait_ops = {
+ .get = dmatest_wait_get,
+ .set = param_set_bool,
+};
+module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
+MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
struct dma_chan *chan)
@@ -223,7 +222,7 @@ static unsigned long dmatest_random(void)
{
unsigned long buf;
- get_random_bytes(&buf, sizeof(buf));
+ prandom_bytes(&buf, sizeof(buf));
return buf;
}
@@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
}
}
-static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
- unsigned int start, unsigned int end, unsigned int counter,
- u8 pattern, bool is_srcbuf)
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
{
unsigned int i;
unsigned int error_count = 0;
@@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
u8 expected;
u8 *buf;
unsigned int counter_orig = counter;
- struct dmatest_verify_buffer *vb;
for (; (buf = *bufs); bufs++) {
counter = counter_orig;
@@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
actual = buf[i];
expected = pattern | (~counter & PATTERN_COUNT_MASK);
if (actual != expected) {
- if (error_count < MAX_ERROR_COUNT && vr) {
- vb = &vr->data[error_count];
- vb->index = i;
- vb->expected = expected;
- vb->actual = actual;
- }
+ if (error_count < MAX_ERROR_COUNT)
+ dmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
error_count++;
}
counter++;
@@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
}
if (error_count > MAX_ERROR_COUNT)
- pr_warning("%s: %u errors suppressed\n",
+ pr_warn("%s: %u errors suppressed\n",
current->comm, error_count - MAX_ERROR_COUNT);
return error_count;
@@ -313,20 +330,6 @@ static void dmatest_callback(void *arg)
wake_up_all(done->wait);
}
-static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
- unsigned int count)
-{
- while (count--)
- dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
-}
-
-static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
- unsigned int count)
-{
- while (count--)
- dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
-}
-
static unsigned int min_odd(unsigned int x, unsigned int y)
{
unsigned int val = min(x, y);
@@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
return val % 2 ? val : val - 1;
}
-static char *verify_result_get_one(struct dmatest_verify_result *vr,
- unsigned int i)
+static void result(const char *err, unsigned int n, unsigned int src_off,
+ unsigned int dst_off, unsigned int len, unsigned long data)
{
- struct dmatest_verify_buffer *vb = &vr->data[i];
- u8 diff = vb->actual ^ vr->pattern;
- static char buf[512];
- char *msg;
-
- if (vr->is_srcbuf)
- msg = "srcbuf overwritten!";
- else if ((vr->pattern & PATTERN_COPY)
- && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
- msg = "dstbuf not copied!";
- else if (diff & PATTERN_SRC)
- msg = "dstbuf was copied!";
- else
- msg = "dstbuf mismatch!";
-
- snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
- vb->index, vb->expected, vb->actual);
-
- return buf;
+ pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ current->comm, n, err, src_off, dst_off, len, data);
}
-static char *thread_result_get(const char *name,
- struct dmatest_thread_result *tr)
+static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
+ unsigned int dst_off, unsigned int len,
+ unsigned long data)
{
- static const char * const messages[] = {
- [DMATEST_ET_OK] = "No errors",
- [DMATEST_ET_MAP_SRC] = "src mapping error",
- [DMATEST_ET_MAP_DST] = "dst mapping error",
- [DMATEST_ET_PREP] = "prep error",
- [DMATEST_ET_SUBMIT] = "submit error",
- [DMATEST_ET_TIMEOUT] = "test timed out",
- [DMATEST_ET_DMA_ERROR] =
- "got completion callback (DMA_ERROR)",
- [DMATEST_ET_DMA_IN_PROGRESS] =
- "got completion callback (DMA_IN_PROGRESS)",
- [DMATEST_ET_VERIFY] = "errors",
- [DMATEST_ET_VERIFY_BUF] = "verify errors",
- };
- static char buf[512];
-
- snprintf(buf, sizeof(buf) - 1,
- "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
- name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
- tr->len, tr->data);
-
- return buf;
+ pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ current->comm, n, err, src_off, dst_off, len, data);
}
-static int thread_result_add(struct dmatest_info *info,
- struct dmatest_result *r, enum dmatest_error_type type,
- unsigned int n, unsigned int src_off, unsigned int dst_off,
- unsigned int len, unsigned long data)
-{
- struct dmatest_thread_result *tr;
-
- tr = kzalloc(sizeof(*tr), GFP_KERNEL);
- if (!tr)
- return -ENOMEM;
-
- tr->type = type;
- tr->n = n;
- tr->src_off = src_off;
- tr->dst_off = dst_off;
- tr->len = len;
- tr->data = data;
+#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
+ if (verbose) \
+ result(err, n, src_off, dst_off, len, data); \
+ else \
+ dbg_result(err, n, src_off, dst_off, len, data); \
+})
- mutex_lock(&info->results_lock);
- list_add_tail(&tr->node, &r->results);
- mutex_unlock(&info->results_lock);
-
- if (tr->type == DMATEST_ET_OK)
- pr_debug("%s\n", thread_result_get(r->name, tr));
- else
- pr_warn("%s\n", thread_result_get(r->name, tr));
-
- return 0;
-}
-
-static unsigned int verify_result_add(struct dmatest_info *info,
- struct dmatest_result *r, unsigned int n,
- unsigned int src_off, unsigned int dst_off, unsigned int len,
- u8 **bufs, int whence, unsigned int counter, u8 pattern,
- bool is_srcbuf)
+static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
{
- struct dmatest_verify_result *vr;
- unsigned int error_count;
- unsigned int buf_off = is_srcbuf ? src_off : dst_off;
- unsigned int start, end;
-
- if (whence < 0) {
- start = 0;
- end = buf_off;
- } else if (whence > 0) {
- start = buf_off + len;
- end = info->params.buf_size;
- } else {
- start = buf_off;
- end = buf_off + len;
- }
+ unsigned long long per_sec = 1000000;
- vr = kmalloc(sizeof(*vr), GFP_KERNEL);
- if (!vr) {
- pr_warn("dmatest: No memory to store verify result\n");
- return dmatest_verify(NULL, bufs, start, end, counter, pattern,
- is_srcbuf);
- }
-
- vr->pattern = pattern;
- vr->is_srcbuf = is_srcbuf;
-
- error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
- is_srcbuf);
- if (error_count) {
- vr->error_count = error_count;
- thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
- dst_off, len, (unsigned long)vr);
- return error_count;
- }
-
- kfree(vr);
- return 0;
-}
-
-static void result_free(struct dmatest_info *info, const char *name)
-{
- struct dmatest_result *r, *_r;
-
- mutex_lock(&info->results_lock);
- list_for_each_entry_safe(r, _r, &info->results, node) {
- struct dmatest_thread_result *tr, *_tr;
-
- if (name && strcmp(r->name, name))
- continue;
-
- list_for_each_entry_safe(tr, _tr, &r->results, node) {
- if (tr->type == DMATEST_ET_VERIFY_BUF)
- kfree(tr->vr);
- list_del(&tr->node);
- kfree(tr);
- }
+ if (runtime <= 0)
+ return 0;
- kfree(r->name);
- list_del(&r->node);
- kfree(r);
+ /* drop precision until runtime is 32-bits */
+ while (runtime > UINT_MAX) {
+ runtime >>= 1;
+ per_sec <<= 1;
}
- mutex_unlock(&info->results_lock);
+ per_sec *= val;
+ do_div(per_sec, runtime);
+ return per_sec;
}
-static struct dmatest_result *result_init(struct dmatest_info *info,
- const char *name)
+static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
{
- struct dmatest_result *r;
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (r) {
- r->name = kstrdup(name, GFP_KERNEL);
- INIT_LIST_HEAD(&r->results);
- mutex_lock(&info->results_lock);
- list_add_tail(&r->node, &info->results);
- mutex_unlock(&info->results_lock);
- }
- return r;
+ return dmatest_persec(runtime, len >> 10);
}
/*
@@ -525,7 +405,6 @@ static int dmatest_func(void *data)
struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
- const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
unsigned int failed_tests = 0;
@@ -538,9 +417,10 @@ static int dmatest_func(void *data)
int src_cnt;
int dst_cnt;
int i;
- struct dmatest_result *result;
+ ktime_t ktime;
+ s64 runtime = 0;
+ unsigned long long total_len = 0;
- thread_name = current->comm;
set_freezable();
ret = -ENOMEM;
@@ -570,10 +450,6 @@ static int dmatest_func(void *data)
} else
goto err_thread_type;
- result = result_init(info, thread_name);
- if (!result)
- goto err_srcs;
-
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
@@ -597,17 +473,17 @@ static int dmatest_func(void *data)
set_user_nice(current, 10);
/*
- * src buffers are freed by the DMAEngine code with dma_unmap_single()
- * dst buffers are freed by ourselves below
+ * src and dst buffers are freed by ourselves below
*/
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
- | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ ktime = ktime_get();
while (!kthread_should_stop()
&& !(params->iterations && total_tests >= params->iterations)) {
struct dma_async_tx_descriptor *tx = NULL;
- dma_addr_t dma_srcs[src_cnt];
- dma_addr_t dma_dsts[dst_cnt];
+ struct dmaengine_unmap_data *um;
+ dma_addr_t srcs[src_cnt];
+ dma_addr_t *dsts;
u8 align = 0;
total_tests++;
@@ -626,81 +502,103 @@ static int dmatest_func(void *data)
break;
}
- len = dmatest_random() % params->buf_size + 1;
+ if (params->noverify) {
+ len = params->buf_size;
+ src_off = 0;
+ dst_off = 0;
+ } else {
+ len = dmatest_random() % params->buf_size + 1;
+ len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ src_off = dmatest_random() % (params->buf_size - len + 1);
+ dst_off = dmatest_random() % (params->buf_size - len + 1);
+
+ src_off = (src_off >> align) << align;
+ dst_off = (dst_off >> align) << align;
+
+ dmatest_init_srcs(thread->srcs, src_off, len,
+ params->buf_size);
+ dmatest_init_dsts(thread->dsts, dst_off, len,
+ params->buf_size);
+ }
+
len = (len >> align) << align;
if (!len)
len = 1 << align;
- src_off = dmatest_random() % (params->buf_size - len + 1);
- dst_off = dmatest_random() % (params->buf_size - len + 1);
+ total_len += len;
- src_off = (src_off >> align) << align;
- dst_off = (dst_off >> align) << align;
-
- dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
- dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
+ um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+ GFP_KERNEL);
+ if (!um) {
+ failed_tests++;
+ result("unmap data NULL", total_tests,
+ src_off, dst_off, len, ret);
+ continue;
+ }
+ um->len = params->buf_size;
for (i = 0; i < src_cnt; i++) {
- u8 *buf = thread->srcs[i] + src_off;
-
- dma_srcs[i] = dma_map_single(dev->dev, buf, len,
- DMA_TO_DEVICE);
- ret = dma_mapping_error(dev->dev, dma_srcs[i]);
+ unsigned long buf = (unsigned long) thread->srcs[i];
+ struct page *pg = virt_to_page(buf);
+ unsigned pg_off = buf & ~PAGE_MASK;
+
+ um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
+ um->len, DMA_TO_DEVICE);
+ srcs[i] = um->addr[i] + src_off;
+ ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
- unmap_src(dev->dev, dma_srcs, len, i);
- thread_result_add(info, result,
- DMATEST_ET_MAP_SRC,
- total_tests, src_off, dst_off,
- len, ret);
+ dmaengine_unmap_put(um);
+ result("src mapping error", total_tests,
+ src_off, dst_off, len, ret);
failed_tests++;
continue;
}
+ um->to_cnt++;
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
+ dsts = &um->addr[src_cnt];
for (i = 0; i < dst_cnt; i++) {
- dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
- params->buf_size,
- DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev->dev, dma_dsts[i]);
+ unsigned long buf = (unsigned long) thread->dsts[i];
+ struct page *pg = virt_to_page(buf);
+ unsigned pg_off = buf & ~PAGE_MASK;
+
+ dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
- unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, params->buf_size,
- i);
- thread_result_add(info, result,
- DMATEST_ET_MAP_DST,
- total_tests, src_off, dst_off,
- len, ret);
+ dmaengine_unmap_put(um);
+ result("dst mapping error", total_tests,
+ src_off, dst_off, len, ret);
failed_tests++;
continue;
}
+ um->bidi_cnt++;
}
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
- dma_dsts[0] + dst_off,
- dma_srcs[0], len,
- flags);
+ dsts[0] + dst_off,
+ srcs[0], len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
- dma_dsts[0] + dst_off,
- dma_srcs, src_cnt,
+ dsts[0] + dst_off,
+ srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
for (i = 0; i < dst_cnt; i++)
- dma_pq[i] = dma_dsts[i] + dst_off;
- tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
+ dma_pq[i] = dsts[i] + dst_off;
+ tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
src_cnt, pq_coefs,
len, flags);
}
if (!tx) {
- unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, params->buf_size,
- dst_cnt);
- thread_result_add(info, result, DMATEST_ET_PREP,
- total_tests, src_off, dst_off,
- len, 0);
+ dmaengine_unmap_put(um);
+ result("prep error", total_tests, src_off,
+ dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
@@ -712,9 +610,9 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- thread_result_add(info, result, DMATEST_ET_SUBMIT,
- total_tests, src_off, dst_off,
- len, cookie);
+ dmaengine_unmap_put(um);
+ result("submit error", total_tests, src_off,
+ dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
@@ -735,59 +633,59 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
- thread_result_add(info, result, DMATEST_ET_TIMEOUT,
- total_tests, src_off, dst_off,
- len, 0);
+ dmaengine_unmap_put(um);
+ result("test timed out", total_tests, src_off, dst_off,
+ len, 0);
failed_tests++;
continue;
- } else if (status != DMA_SUCCESS) {
- enum dmatest_error_type type = (status == DMA_ERROR) ?
- DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
- thread_result_add(info, result, type,
- total_tests, src_off, dst_off,
- len, status);
+ } else if (status != DMA_COMPLETE) {
+ dmaengine_unmap_put(um);
+ result(status == DMA_ERROR ?
+ "completion error status" :
+ "completion busy status", total_tests, src_off,
+ dst_off, len, ret);
failed_tests++;
continue;
}
- /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
+ dmaengine_unmap_put(um);
- error_count = 0;
+ if (params->noverify) {
+ verbose_result("test passed", total_tests, src_off,
+ dst_off, len, 0);
+ continue;
+ }
- pr_debug("%s: verifying source buffer...\n", thread_name);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->srcs, -1,
+ pr_debug("%s: verifying source buffer...\n", current->comm);
+ error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->srcs, 0,
- src_off, PATTERN_SRC | PATTERN_COPY, true);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->srcs, 1,
- src_off + len, PATTERN_SRC, true);
-
- pr_debug("%s: verifying dest buffer...\n", thread_name);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->dsts, -1,
+ error_count += dmatest_verify(thread->srcs, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += dmatest_verify(thread->srcs, src_off + len,
+ params->buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n", current->comm);
+ error_count += dmatest_verify(thread->dsts, 0, dst_off,
0, PATTERN_DST, false);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->dsts, 0,
- src_off, PATTERN_SRC | PATTERN_COPY, false);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->dsts, 1,
- dst_off + len, PATTERN_DST, false);
+ error_count += dmatest_verify(thread->dsts, dst_off,
+ dst_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += dmatest_verify(thread->dsts, dst_off + len,
+ params->buf_size, dst_off + len,
+ PATTERN_DST, false);
if (error_count) {
- thread_result_add(info, result, DMATEST_ET_VERIFY,
- total_tests, src_off, dst_off,
- len, error_count);
+ result("data error", total_tests, src_off, dst_off,
+ len, error_count);
failed_tests++;
} else {
- thread_result_add(info, result, DMATEST_ET_OK,
- total_tests, src_off, dst_off,
- len, 0);
+ verbose_result("test passed", total_tests, src_off,
+ dst_off, len, 0);
}
}
+ runtime = ktime_us_delta(ktime_get(), ktime);
ret = 0;
for (i = 0; thread->dsts[i]; i++)
@@ -802,20 +700,17 @@ err_srcbuf:
err_srcs:
kfree(pq_coefs);
err_thread_type:
- pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
- thread_name, total_tests, failed_tests, ret);
+ pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
+ current->comm, total_tests, failed_tests,
+ dmatest_persec(runtime, total_tests),
+ dmatest_KBs(runtime, total_len), ret);
/* terminate all transfers on specified channels */
if (ret)
dmaengine_terminate_all(chan);
thread->done = true;
-
- if (params->iterations > 0)
- while (!kthread_should_stop()) {
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
- interruptible_sleep_on(&wait_dmatest_exit);
- }
+ wake_up(&thread_wait);
return ret;
}
@@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
ret = kthread_stop(thread->task);
- pr_debug("dmatest: thread %s exited with status %d\n",
- thread->task->comm, ret);
+ pr_debug("thread %s exited with status %d\n",
+ thread->task->comm, ret);
list_del(&thread->node);
+ put_task_struct(thread->task);
kfree(thread);
}
@@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info,
for (i = 0; i < params->threads_per_chan; i++) {
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
- pr_warning("dmatest: No memory for %s-%s%u\n",
- dma_chan_name(chan), op, i);
-
+ pr_warn("No memory for %s-%s%u\n",
+ dma_chan_name(chan), op, i);
break;
}
thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
smp_wmb();
- thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
+ thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);
if (IS_ERR(thread->task)) {
- pr_warning("dmatest: Failed to run thread %s-%s%u\n",
- dma_chan_name(chan), op, i);
+ pr_warn("Failed to create thread %s-%s%u\n",
+ dma_chan_name(chan), op, i);
kfree(thread);
break;
}
/* srcbuf and dstbuf are allocated by the thread itself */
-
+ get_task_struct(thread->task);
list_add_tail(&thread->node, &dtc->threads);
+ wake_up_process(thread->task);
}
return i;
@@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) {
- pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
+ pr_warn("No memory for %s\n", dma_chan_name(chan));
return -ENOMEM;
}
@@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
thread_count += cnt > 0 ? cnt : 0;
}
- pr_info("dmatest: Started %u threads using %s\n",
+ pr_info("Started %u threads using %s\n",
thread_count, dma_chan_name(chan));
list_add_tail(&dtc->node, &info->channels);
@@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static int __run_threaded_test(struct dmatest_info *info)
+static void request_channels(struct dmatest_info *info,
+ enum dma_transaction_type type)
{
dma_cap_mask_t mask;
- struct dma_chan *chan;
- struct dmatest_params *params = &info->params;
- int err = 0;
dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(type, mask);
for (;;) {
+ struct dmatest_params *params = &info->params;
+ struct dma_chan *chan;
+
chan = dma_request_channel(mask, filter, params);
if (chan) {
- err = dmatest_add_channel(info, chan);
- if (err) {
+ if (dmatest_add_channel(info, chan)) {
dma_release_channel(chan);
break; /* add_channel failed, punt */
}
@@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info)
info->nr_channels >= params->max_channels)
break; /* we have all we need */
}
- return err;
}
-#ifndef MODULE
-static int run_threaded_test(struct dmatest_info *info)
+static void run_threaded_test(struct dmatest_info *info)
{
- int ret;
+ struct dmatest_params *params = &info->params;
- mutex_lock(&info->lock);
- ret = __run_threaded_test(info);
- mutex_unlock(&info->lock);
- return ret;
+ /* Copy test parameters */
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strlcpy(params->device, strim(test_device), sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
+ params->noverify = noverify;
+
+ request_channels(info, DMA_MEMCPY);
+ request_channels(info, DMA_XOR);
+ request_channels(info, DMA_PQ);
}
-#endif
-static void __stop_threaded_test(struct dmatest_info *info)
+static void stop_threaded_test(struct dmatest_info *info)
{
struct dmatest_chan *dtc, *_dtc;
struct dma_chan *chan;
@@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info)
list_del(&dtc->node);
chan = dtc->chan;
dmatest_cleanup_channel(dtc);
- pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
+ pr_debug("dropped channel %s\n", dma_chan_name(chan));
dma_release_channel(chan);
}
info->nr_channels = 0;
}
-static void stop_threaded_test(struct dmatest_info *info)
+static void restart_threaded_test(struct dmatest_info *info, bool run)
{
- mutex_lock(&info->lock);
- __stop_threaded_test(info);
- mutex_unlock(&info->lock);
-}
-
-static int __restart_threaded_test(struct dmatest_info *info, bool run)
-{
- struct dmatest_params *params = &info->params;
+ /* we might be called early to set run=, defer running until all
+ * parameters have been evaluated
+ */
+ if (!info->did_init)
+ return;
/* Stop any running test first */
- __stop_threaded_test(info);
-
- if (run == false)
- return 0;
-
- /* Clear results from previous run */
- result_free(info, NULL);
-
- /* Copy test parameters */
- params->buf_size = test_buf_size;
- strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
- strlcpy(params->device, strim(test_device), sizeof(params->device));
- params->threads_per_chan = threads_per_chan;
- params->max_channels = max_channels;
- params->iterations = iterations;
- params->xor_sources = xor_sources;
- params->pq_sources = pq_sources;
- params->timeout = timeout;
+ stop_threaded_test(info);
/* Run test with new parameters */
- return __run_threaded_test(info);
-}
-
-static bool __is_threaded_test_run(struct dmatest_info *info)
-{
- struct dmatest_chan *dtc;
-
- list_for_each_entry(dtc, &info->channels, node) {
- struct dmatest_thread *thread;
-
- list_for_each_entry(thread, &dtc->threads, node) {
- if (!thread->done)
- return true;
- }
- }
-
- return false;
+ run_threaded_test(info);
}
-static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int dmatest_run_get(char *val, const struct kernel_param *kp)
{
- struct dmatest_info *info = file->private_data;
- char buf[3];
+ struct dmatest_info *info = &test_info;
mutex_lock(&info->lock);
-
- if (__is_threaded_test_run(info)) {
- buf[0] = 'Y';
+ if (is_threaded_test_run(info)) {
+ dmatest_run = true;
} else {
- __stop_threaded_test(info);
- buf[0] = 'N';
+ stop_threaded_test(info);
+ dmatest_run = false;
}
-
mutex_unlock(&info->lock);
- buf[1] = '\n';
- buf[2] = 0x00;
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- char buf[16];
- bool bv;
- int ret = 0;
- if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
- return -EFAULT;
-
- if (strtobool(buf, &bv) == 0) {
- mutex_lock(&info->lock);
-
- if (__is_threaded_test_run(info))
- ret = -EBUSY;
- else
- ret = __restart_threaded_test(info, bv);
-
- mutex_unlock(&info->lock);
- }
-
- return ret ? ret : count;
+ return param_get_bool(val, kp);
}
-static const struct file_operations dtf_run_fops = {
- .read = dtf_read_run,
- .write = dtf_write_run,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static int dtf_results_show(struct seq_file *sf, void *data)
+static int dmatest_run_set(const char *val, const struct kernel_param *kp)
{
- struct dmatest_info *info = sf->private;
- struct dmatest_result *result;
- struct dmatest_thread_result *tr;
- unsigned int i;
+ struct dmatest_info *info = &test_info;
+ int ret;
- mutex_lock(&info->results_lock);
- list_for_each_entry(result, &info->results, node) {
- list_for_each_entry(tr, &result->results, node) {
- seq_printf(sf, "%s\n",
- thread_result_get(result->name, tr));
- if (tr->type == DMATEST_ET_VERIFY_BUF) {
- for (i = 0; i < tr->vr->error_count; i++) {
- seq_printf(sf, "\t%s\n",
- verify_result_get_one(tr->vr, i));
- }
- }
- }
+ mutex_lock(&info->lock);
+ ret = param_set_bool(val, kp);
+ if (ret) {
+ mutex_unlock(&info->lock);
+ return ret;
}
- mutex_unlock(&info->results_lock);
- return 0;
-}
-
-static int dtf_results_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dtf_results_show, inode->i_private);
-}
-
-static const struct file_operations dtf_results_fops = {
- .open = dtf_results_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int dmatest_register_dbgfs(struct dmatest_info *info)
-{
- struct dentry *d;
-
- d = debugfs_create_dir("dmatest", NULL);
- if (IS_ERR(d))
- return PTR_ERR(d);
- if (!d)
- goto err_root;
+ if (is_threaded_test_run(info))
+ ret = -EBUSY;
+ else if (dmatest_run)
+ restart_threaded_test(info, dmatest_run);
- info->root = d;
-
- /* Run or stop threaded test */
- debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
- &dtf_run_fops);
-
- /* Results of test in progress */
- debugfs_create_file("results", S_IRUGO, info->root, info,
- &dtf_results_fops);
-
- return 0;
+ mutex_unlock(&info->lock);
-err_root:
- pr_err("dmatest: Failed to initialize debugfs\n");
- return -ENOMEM;
+ return ret;
}
static int __init dmatest_init(void)
{
struct dmatest_info *info = &test_info;
- int ret;
-
- memset(info, 0, sizeof(*info));
+ struct dmatest_params *params = &info->params;
- mutex_init(&info->lock);
- INIT_LIST_HEAD(&info->channels);
+ if (dmatest_run) {
+ mutex_lock(&info->lock);
+ run_threaded_test(info);
+ mutex_unlock(&info->lock);
+ }
- mutex_init(&info->results_lock);
- INIT_LIST_HEAD(&info->results);
+ if (params->iterations && wait)
+ wait_event(thread_wait, !is_threaded_test_run(info));
- ret = dmatest_register_dbgfs(info);
- if (ret)
- return ret;
+ /* module parameters are stable, inittime tests are started,
+ * let userspace take over 'run' control
+ */
+ info->did_init = true;
-#ifdef MODULE
return 0;
-#else
- return run_threaded_test(info);
-#endif
}
/* when compiled-in wait for drivers to load first */
late_initcall(dmatest_init);
@@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void)
{
struct dmatest_info *info = &test_info;
- debugfs_remove_recursive(info->root);
+ mutex_lock(&info->lock);
stop_threaded_test(info);
- result_free(info, NULL);
+ mutex_unlock(&info->lock);
}
module_exit(dmatest_exit);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 89eb89f2228..7516be4677c 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
-static struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
@@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- if (!is_slave_direction(dwc->direction)) {
- struct device *parent = chan2parent(&dwc->chan);
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(parent, desc->lli.dar,
- desc->total_len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(parent, desc->lli.dar,
- desc->total_len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(parent, desc->lli.sar,
- desc->total_len, DMA_TO_DEVICE);
- else
- dma_unmap_page(parent, desc->lli.sar,
- desc->total_len, DMA_TO_DEVICE);
- }
- }
-
+ dma_descriptor_unmap(txd);
spin_unlock_irqrestore(&dwc->lock, flags);
if (callback)
@@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS)
+ if (ret != DMA_COMPLETE)
dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused && ret == DMA_IN_PROGRESS)
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index e35d9759031..453822cc4f9 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -191,11 +191,9 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- /* Apply default dma_mask if needed */
- if (!dev->dma_mask) {
- dev->dma_mask = &dev->coherent_dma_mask;
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- }
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
pdata = dev_get_platdata(dev);
if (!pdata)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 10b577fcf48..2539ea0cbc6 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -46,14 +46,21 @@
#define EDMA_CHANS 64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
-/* Max of 16 segments per channel to conserve PaRAM slots */
-#define MAX_NR_SG 16
+/*
+ * Max of 20 segments per channel to conserve PaRAM slots
+ * Also note that MAX_NR_SG should be atleast the no.of periods
+ * that are required for ASoC, otherwise DMA prep calls will
+ * fail. Today davinci-pcm is the only user of this driver and
+ * requires atleast 17 slots, so we setup the default to 20.
+ */
+#define MAX_NR_SG 20
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
struct edma_desc {
struct virt_dma_desc vdesc;
struct list_head node;
+ int cyclic;
int absync;
int pset_nr;
int processed;
@@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan)
* then setup a link to the dummy slot, this results in all future
* events being absorbed and that's OK because we're done
*/
- if (edesc->processed == edesc->pset_nr)
- edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+ if (edesc->processed == edesc->pset_nr) {
+ if (edesc->cyclic)
+ edma_link(echan->slot[nslots-1], echan->slot[1]);
+ else
+ edma_link(echan->slot[nslots-1],
+ echan->ecc->dummy_slot);
+ }
edma_resume(echan->ch_num);
@@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return ret;
}
+/*
+ * A PaRAM set configuration abstraction used by other modes
+ * @chan: Channel who's PaRAM set we're configuring
+ * @pset: PaRAM set to initialize and setup.
+ * @src_addr: Source address of the DMA
+ * @dst_addr: Destination address of the DMA
+ * @burst: In units of dev_width, how much to send
+ * @dev_width: How much is the dev_width
+ * @dma_length: Total length of the DMA transfer
+ * @direction: Direction of the transfer
+ */
+static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+ dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+ enum dma_slave_buswidth dev_width, unsigned int dma_length,
+ enum dma_transfer_direction direction)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ int acnt, bcnt, ccnt, cidx;
+ int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int absync;
+
+ acnt = dev_width;
+ /*
+ * If the maxburst is equal to the fifo width, use
+ * A-synced transfers. This allows for large contiguous
+ * buffer transfers using only one PaRAM set.
+ */
+ if (burst == 1) {
+ /*
+ * For the A-sync case, bcnt and ccnt are the remainder
+ * and quotient respectively of the division of:
+ * (dma_length / acnt) by (SZ_64K -1). This is so
+ * that in case bcnt over flows, we have ccnt to use.
+ * Note: In A-sync tranfer only, bcntrld is used, but it
+ * only applies for sg_dma_len(sg) >= SZ_64K.
+ * In this case, the best way adopted is- bccnt for the
+ * first frame will be the remainder below. Then for
+ * every successive frame, bcnt will be SZ_64K-1. This
+ * is assured as bcntrld = 0xffff in end of function.
+ */
+ absync = false;
+ ccnt = dma_length / acnt / (SZ_64K - 1);
+ bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
+ /*
+ * If bcnt is non-zero, we have a remainder and hence an
+ * extra frame to transfer, so increment ccnt.
+ */
+ if (bcnt)
+ ccnt++;
+ else
+ bcnt = SZ_64K - 1;
+ cidx = acnt;
+ } else {
+ /*
+ * If maxburst is greater than the fifo address_width,
+ * use AB-synced transfers where A count is the fifo
+ * address_width and B count is the maxburst. In this
+ * case, we are limited to transfers of C count frames
+ * of (address_width * maxburst) where C count is limited
+ * to SZ_64K-1. This places an upper bound on the length
+ * of an SG segment that can be handled.
+ */
+ absync = true;
+ bcnt = burst;
+ ccnt = dma_length / (acnt * bcnt);
+ if (ccnt > (SZ_64K - 1)) {
+ dev_err(dev, "Exceeded max SG segment size\n");
+ return -EINVAL;
+ }
+ cidx = acnt * bcnt;
+ }
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = 0;
+ dst_cidx = 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
+ src_bidx = 0;
+ src_cidx = 0;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
+ } else {
+ dev_err(dev, "%s: direction not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+
+ pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ /* Configure A or AB synchronized transfers */
+ if (absync)
+ pset->opt |= SYNCDIM;
+
+ pset->src = src_addr;
+ pset->dst = dst_addr;
+
+ pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+ pset->a_b_cnt = bcnt << 16 | acnt;
+ pset->ccnt = ccnt;
+ /*
+ * Only time when (bcntrld) auto reload is required is for
+ * A-sync case, and in this case, a requirement of reload value
+ * of SZ_64K-1 only is assured. 'link' is initially set to NULL
+ * and then later will be populated by edma_execute.
+ */
+ pset->link_bcntrld = 0xffffffff;
+ return absync;
+}
+
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
- dma_addr_t dev_addr;
+ dma_addr_t src_addr = 0, dst_addr = 0;
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int acnt, bcnt, ccnt, src, dst, cidx;
- int src_bidx, dst_bidx, src_cidx, dst_cidx;
- int i, nslots;
+ int i, nslots, ret;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
if (direction == DMA_DEV_TO_MEM) {
- dev_addr = echan->cfg.src_addr;
+ src_addr = echan->cfg.src_addr;
dev_width = echan->cfg.src_addr_width;
burst = echan->cfg.src_maxburst;
} else if (direction == DMA_MEM_TO_DEV) {
- dev_addr = echan->cfg.dst_addr;
+ dst_addr = echan->cfg.dst_addr;
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
@@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (echan->slot[i] < 0) {
kfree(edesc);
dev_err(dev, "Failed to allocate slot\n");
- kfree(edesc);
return NULL;
}
}
@@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure PaRAM sets for each SG */
for_each_sg(sgl, sg, sg_len, i) {
-
- acnt = dev_width;
-
- /*
- * If the maxburst is equal to the fifo width, use
- * A-synced transfers. This allows for large contiguous
- * buffer transfers using only one PaRAM set.
- */
- if (burst == 1) {
- edesc->absync = false;
- ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
- bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
- if (bcnt)
- ccnt++;
- else
- bcnt = SZ_64K - 1;
- cidx = acnt;
- /*
- * If maxburst is greater than the fifo address_width,
- * use AB-synced transfers where A count is the fifo
- * address_width and B count is the maxburst. In this
- * case, we are limited to transfers of C count frames
- * of (address_width * maxburst) where C count is limited
- * to SZ_64K-1. This places an upper bound on the length
- * of an SG segment that can be handled.
- */
- } else {
- edesc->absync = true;
- bcnt = burst;
- ccnt = sg_dma_len(sg) / (acnt * bcnt);
- if (ccnt > (SZ_64K - 1)) {
- dev_err(dev, "Exceeded max SG segment size\n");
- kfree(edesc);
- return NULL;
- }
- cidx = acnt * bcnt;
- }
-
- if (direction == DMA_MEM_TO_DEV) {
- src = sg_dma_address(sg);
- dst = dev_addr;
- src_bidx = acnt;
- src_cidx = cidx;
- dst_bidx = 0;
- dst_cidx = 0;
- } else {
- src = dev_addr;
- dst = sg_dma_address(sg);
- src_bidx = 0;
- src_cidx = 0;
- dst_bidx = acnt;
- dst_cidx = cidx;
+ /* Get address for each SG */
+ if (direction == DMA_DEV_TO_MEM)
+ dst_addr = sg_dma_address(sg);
+ else
+ src_addr = sg_dma_address(sg);
+
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+ dst_addr, burst, dev_width,
+ sg_dma_len(sg), direction);
+ if (ret < 0) {
+ kfree(edesc);
+ return NULL;
}
- edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
- /* Configure A or AB synchronized transfers */
- if (edesc->absync)
- edesc->pset[i].opt |= SYNCDIM;
+ edesc->absync = ret;
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
@@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
+ }
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ struct edma_desc *edesc;
+ dma_addr_t src_addr, dst_addr;
+ enum dma_slave_buswidth dev_width;
+ u32 burst;
+ int i, ret, nslots;
+
+ if (unlikely(!echan || !buf_len || !period_len))
+ return NULL;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src_addr = echan->cfg.src_addr;
+ dst_addr = buf_addr;
+ dev_width = echan->cfg.src_addr_width;
+ burst = echan->cfg.src_maxburst;
+ } else if (direction == DMA_MEM_TO_DEV) {
+ src_addr = buf_addr;
+ dst_addr = echan->cfg.dst_addr;
+ dev_width = echan->cfg.dst_addr_width;
+ burst = echan->cfg.dst_maxburst;
+ } else {
+ dev_err(dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+ dev_err(dev, "Undefined slave buswidth\n");
+ return NULL;
+ }
+
+ if (unlikely(buf_len % period_len)) {
+ dev_err(dev, "Period should be multiple of Buffer length\n");
+ return NULL;
+ }
+
+ nslots = (buf_len / period_len) + 1;
+
+ /*
+ * Cyclic DMA users such as audio cannot tolerate delays introduced
+ * by cases where the number of periods is more than the maximum
+ * number of SGs the EDMA driver can handle at a time. For DMA types
+ * such as Slave SGs, such delays are tolerable and synchronized,
+ * but the synchronization is difficult to achieve with Cyclic and
+ * cannot be guaranteed, so we error out early.
+ */
+ if (nslots > MAX_NR_SG)
+ return NULL;
+
+ edesc = kzalloc(sizeof(*edesc) + nslots *
+ sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (!edesc) {
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
+ return NULL;
+ }
+
+ edesc->cyclic = 1;
+ edesc->pset_nr = nslots;
+
+ dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
+ dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
+ dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
+
+ for (i = 0; i < nslots; i++) {
+ /* Allocate a PaRAM slot, if needed */
+ if (echan->slot[i] < 0) {
+ echan->slot[i] =
+ edma_alloc_slot(EDMA_CTLR(echan->ch_num),
+ EDMA_SLOT_ANY);
+ if (echan->slot[i] < 0) {
+ dev_err(dev, "Failed to allocate slot\n");
+ return NULL;
+ }
+ }
- edesc->pset[i].src = src;
- edesc->pset[i].dst = dst;
+ if (i == nslots - 1) {
+ memcpy(&edesc->pset[i], &edesc->pset[0],
+ sizeof(edesc->pset[0]));
+ break;
+ }
- edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
- edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+ dst_addr, burst, dev_width, period_len,
+ direction);
+ if (ret < 0)
+ return NULL;
- edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
- edesc->pset[i].ccnt = ccnt;
- edesc->pset[i].link_bcntrld = 0xffffffff;
+ if (direction == DMA_DEV_TO_MEM)
+ dst_addr += period_len;
+ else
+ src_addr += period_len;
+ dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
+ dev_dbg(dev,
+ "\n pset[%d]:\n"
+ " chnum\t%d\n"
+ " slot\t%d\n"
+ " opt\t%08x\n"
+ " src\t%08x\n"
+ " dst\t%08x\n"
+ " abcnt\t%08x\n"
+ " ccnt\t%08x\n"
+ " bidx\t%08x\n"
+ " cidx\t%08x\n"
+ " lkrld\t%08x\n",
+ i, echan->ch_num, echan->slot[i],
+ edesc->pset[i].opt,
+ edesc->pset[i].src,
+ edesc->pset[i].dst,
+ edesc->pset[i].a_b_cnt,
+ edesc->pset[i].ccnt,
+ edesc->pset[i].src_dst_bidx,
+ edesc->pset[i].src_dst_cidx,
+ edesc->pset[i].link_bcntrld);
+
+ edesc->absync = ret;
+
+ /*
+ * Enable interrupts for every period because callback
+ * has to be called for every period.
+ */
+ edesc->pset[i].opt |= TCINTEN;
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
unsigned long flags;
struct edmacc_param p;
- /* Pause the channel */
- edma_pause(echan->ch_num);
+ edesc = echan->edesc;
+
+ /* Pause the channel for non-cyclic */
+ if (!edesc || (edesc && !edesc->cyclic))
+ edma_pause(echan->ch_num);
switch (ch_status) {
- case DMA_COMPLETE:
+ case EDMA_DMA_COMPLETE:
spin_lock_irqsave(&echan->vchan.lock, flags);
- edesc = echan->edesc;
if (edesc) {
- if (edesc->processed == edesc->pset_nr) {
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ } else if (edesc->processed == edesc->pset_nr) {
dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
edma_stop(echan->ch_num);
vchan_cookie_complete(&edesc->vdesc);
+ edma_execute(echan);
} else {
dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+ edma_execute(echan);
}
-
- edma_execute(echan);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
- case DMA_CC_ERROR:
+ case EDMA_DMA_CC_ERROR:
spin_lock_irqsave(&echan->vchan.lock, flags);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS || !txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
@@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
struct device *dev)
{
dma->device_prep_slave_sg = edma_prep_slave_sg;
+ dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;
@@ -634,6 +837,10 @@ static int edma_probe(struct platform_device *pdev)
struct edma_cc *ecc;
int ret;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc) {
dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -705,11 +912,13 @@ static struct platform_device *pdev0, *pdev1;
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
+ .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
@@ -723,8 +932,6 @@ static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
- pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
- pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
@@ -734,8 +941,6 @@ static int edma_init(void)
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
- pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
- pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 591cd8c63ab..cb4bf682a70 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
spin_unlock_irqrestore(&edmac->lock, flags);
}
-static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
-{
- struct device *dev = desc->txd.chan->device->dev;
-
- if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, desc->src_addr, desc->size,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, desc->src_addr, desc->size,
- DMA_TO_DEVICE);
- }
- if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, desc->dst_addr, desc->size,
- DMA_FROM_DEVICE);
- else
- dma_unmap_page(dev, desc->dst_addr, desc->size,
- DMA_FROM_DEVICE);
- }
-}
-
static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
@@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
- /*
- * For the memcpy channels the API requires us to unmap the
- * buffers unless requested otherwise.
- */
- if (!edmac->chan.private)
- ep93xx_dma_unmap_buffers(desc);
-
+ dma_descriptor_unmap(&desc->txd);
ep93xx_dma_desc_put(edmac, desc);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b3f3e90054f..7086a16a55f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -33,6 +33,8 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include "dmaengine.h"
@@ -868,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
/* Run any dependencies */
dma_run_dependencies(txd);
- /* Unmap the dst buffer, if requested */
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
- }
-
- /* Unmap the src buffer, if requested */
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
- }
-
+ dma_descriptor_unmap(txd);
#ifdef FSL_DMA_LD_DEBUG
chan_dbg(chan, "LD %p free\n", desc);
#endif
@@ -1253,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
WARN_ON(fdev->feature != chan->feature);
chan->dev = fdev->dev;
- chan->id = ((res.start - 0x100) & 0xfff) >> 7;
+ chan->id = (res.start & 0xfff) < 0x300 ?
+ ((res.start - 0x100) & 0xfff) >> 7 :
+ ((res.start - 0x200) & 0xfff) >> 7;
if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL;
@@ -1426,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op)
}
static const struct of_device_id fsldma_of_ids[] = {
+ { .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{}
@@ -1447,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = {
static __init int fsldma_init(void)
{
- pr_info("Freescale Elo / Elo Plus DMA driver\n");
+ pr_info("Freescale Elo series DMA driver\n");
return platform_driver_register(&fsldma_of_driver);
}
@@ -1459,5 +1449,5 @@ static void __exit fsldma_exit(void)
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
-MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
+MODULE_DESCRIPTION("Freescale Elo series DMA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index f5c38791fc7..1ffc24484d2 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -112,7 +112,7 @@ struct fsldma_chan_regs {
};
struct fsldma_chan;
-#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
+#define FSL_DMA_MAX_CHANS_PER_DEVICE 8
struct fsldma_device {
void __iomem *regs; /* DGSR register base */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 55852c02679..6f9ac2022ab 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
- "dma_length=%d\n", __func__, imxdmac->channel,
- d->dest, d->src, d->len);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
+ __func__, imxdmac->channel,
+ (unsigned long long)d->dest,
+ (unsigned long long)d->src, d->len);
break;
/* Cyclic transfer is the same as slave_sg with special sg configuration. */
@@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
DMA_CCR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
- "total length=%d dev_addr=0x%08x (dev2mem)\n",
- __func__, imxdmac->channel, d->sg, d->sgcount,
- d->len, imxdmac->per_address);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
+ __func__, imxdmac->channel,
+ d->sg, d->sgcount, d->len,
+ (unsigned long long)imxdmac->per_address);
} else if (d->direction == DMA_MEM_TO_DEV) {
imx_dmav1_writel(imxdma, imxdmac->per_address,
DMA_DAR(imxdmac->channel));
imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
DMA_CCR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
- "total length=%d dev_addr=0x%08x (mem2dev)\n",
- __func__, imxdmac->channel, d->sg, d->sgcount,
- d->len, imxdmac->per_address);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
+ __func__, imxdmac->channel,
+ d->sg, d->sgcount, d->len,
+ (unsigned long long)imxdmac->per_address);
} else {
dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
__func__, imxdmac->channel);
@@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
desc->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */
desc->desc.flags = DMA_CTRL_ACK;
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
list_add_tail(&desc->node, &imxdmac->ld_free);
imxdmac->descs_allocated++;
@@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
int i;
unsigned int periods = buf_len / period_len;
- dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
+ dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
__func__, imxdmac->channel, buf_len, period_len);
if (list_empty(&imxdmac->ld_free) ||
@@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
- dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
- __func__, imxdmac->channel, src, dest, len);
+ dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
+ __func__, imxdmac->channel, (unsigned long long)src,
+ (unsigned long long)dest, len);
if (list_empty(&imxdmac->ld_free) ||
imxdma_chan_is_doing_cyclic(imxdmac))
@@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
- dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
- " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
- imxdmac->channel, xt->src_start, xt->dst_start,
+ dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
+ " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
+ imxdmac->channel, (unsigned long long)xt->src_start,
+ (unsigned long long) xt->dst_start,
xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
xt->numf, xt->frame_size);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fc43603cf0b..c75679d4202 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (error)
sdmac->status = DMA_ERROR;
else
- sdmac->status = DMA_SUCCESS;
+ sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
if (sdmac->desc.callback)
@@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
param &= ~BD_CONT;
}
- dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
- i, count, sg->dma_address,
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
+ i, count, (u64)sg->dma_address,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
if (i + 1 == num_periods)
param |= BD_WRAP;
- dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
- i, period_len, dma_addr,
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
+ i, period_len, (u64)dma_addr,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@@ -1432,6 +1432,10 @@ static int __init sdma_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
return -ENOMEM;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a975ebebea8..1aab8130efa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
callback_txd(param_txd);
}
if (midc->raw_tfr) {
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
if (desc->lli != NULL) {
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
+ if (ret != DMA_COMPLETE) {
spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 5ff6fc1819d..1a49c777607 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data)
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
-void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
- size_t len, struct ioat_dma_descriptor *hw)
-{
- struct pci_dev *pdev = chan->device->pdev;
- size_t offset = len - hw->size;
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
- ioat_unmap(pdev, hw->dst_addr - offset, len,
- PCI_DMA_FROMDEVICE, flags, 1);
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
- ioat_unmap(pdev, hw->src_addr - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
-}
-
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
dma_addr_t phys_complete;
@@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
dma_cookie_complete(tx);
- ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+ dma_descriptor_unmap(tx);
ioat->active -= desc->hw->tx_cnt;
if (tx->callback) {
tx->callback(tx->callback_param);
@@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
device->cleanup_fn((unsigned long) c);
@@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
- DMA_PREP_INTERRUPT;
+ flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
@@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (tmo == 0 ||
dma->device_tx_status(dma_chan, cookie, NULL)
- != DMA_SUCCESS) {
+ != DMA_COMPLETE) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto unmap_dma;
@@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
- "set ioat interrupt style: msix (default), "
- "msix-single-vector, msi, intx)");
+ "set ioat interrupt style: msix (default), msi, intx");
/**
* ioat_dma_setup_interrupts - setup interrupt handler
@@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device)
if (!strcmp(ioat_interrupt_style, "msix"))
goto msix;
- if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
- goto msix_single_vector;
if (!strcmp(ioat_interrupt_style, "msi"))
goto msi;
if (!strcmp(ioat_interrupt_style, "intx"))
@@ -920,10 +901,8 @@ msix:
device->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
- if (err < 0)
+ if (err)
goto msi;
- if (err > 0)
- goto msix_single_vector;
for (i = 0; i < msixcnt; i++) {
msix = &device->msix_entries[i];
@@ -937,29 +916,13 @@ msix:
chan = ioat_chan_by_index(device, j);
devm_free_irq(dev, msix->vector, chan);
}
- goto msix_single_vector;
+ goto msi;
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
device->irq_mode = IOAT_MSIX;
goto done;
-msix_single_vector:
- msix = &device->msix_entries[0];
- msix->entry = 0;
- err = pci_enable_msix(pdev, device->msix_entries, 1);
- if (err)
- goto msi;
-
- err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
- "ioat-msix", device);
- if (err) {
- pci_disable_msix(pdev);
- goto msi;
- }
- device->irq_mode = IOAT_MSIX_SINGLE;
- goto done;
-
msi:
err = pci_enable_msi(pdev);
if (err)
@@ -971,7 +934,7 @@ msi:
pci_disable_msi(pdev);
goto intx;
}
- device->irq_mode = IOAT_MSIX;
+ device->irq_mode = IOAT_MSI;
goto done;
intx:
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 54fb7b9ff9a..11fb877ddca 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -52,7 +52,6 @@
enum ioat_irq_mode {
IOAT_NOIRQ = 0,
IOAT_MSIX,
- IOAT_MSIX_SINGLE,
IOAT_MSI,
IOAT_INTX
};
@@ -83,7 +82,6 @@ struct ioatdma_device {
struct pci_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
- struct kmem_cache *sed_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
@@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
- int direction, enum dma_ctrl_flags flags, bool dst)
-{
- if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
- (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
- pci_unmap_single(pdev, addr, len, direction);
- else
- pci_unmap_page(pdev, addr, len, direction);
-}
-
int ioat_probe(struct ioatdma_device *device);
int ioat_register(struct ioatdma_device *device);
int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
@@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
-void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
- size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b925e1b1d13..5d3affe7e97 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
- ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+ dma_descriptor_unmap(tx);
dma_cookie_complete(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 212d584fe42..470292767e6 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-void ioat3_dma_remove(struct ioatdma_device *dev);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d8ececaf1b5..820817e97e6 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -67,6 +67,8 @@
#include "dma.h"
#include "dma_v2.h"
+extern struct kmem_cache *ioat3_sed_cache;
+
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
@@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6 };
-/*
- * technically sources 1 and 2 do not require SED, but the op will have
- * at least 9 descriptors so that's irrelevant.
- */
-static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1 };
-
static void ioat3_eh(struct ioat2_dma_chan *ioat);
-static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
-{
- struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
-
- return raw->field[xor_idx_to_field[idx]];
-}
-
static void xor_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, int idx)
{
@@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
pq->coef[idx] = coef;
}
-static int sed_get_pq16_pool_idx(int src_cnt)
-{
-
- return pq16_idx_to_sed[src_cnt];
-}
-
static bool is_jf_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
@@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
struct ioat_sed_ent *sed;
gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
- sed = kmem_cache_alloc(device->sed_pool, flags);
+ sed = kmem_cache_alloc(ioat3_sed_cache, flags);
if (!sed)
return NULL;
@@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
flags, &sed->dma);
if (!sed->hw) {
- kmem_cache_free(device->sed_pool, sed);
+ kmem_cache_free(ioat3_sed_cache, sed);
return NULL;
}
@@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s
return;
dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
- kmem_cache_free(device->sed_pool, sed);
-}
-
-static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
- struct ioat_ring_ent *desc, int idx)
-{
- struct ioat_chan_common *chan = &ioat->base;
- struct pci_dev *pdev = chan->device->pdev;
- size_t len = desc->len;
- size_t offset = len - desc->hw->size;
- struct dma_async_tx_descriptor *tx = &desc->txd;
- enum dma_ctrl_flags flags = tx->flags;
-
- switch (desc->hw->ctl_f.op) {
- case IOAT_OP_COPY:
- if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
- ioat_dma_unmap(chan, flags, len, desc->hw);
- break;
- case IOAT_OP_XOR_VAL:
- case IOAT_OP_XOR: {
- struct ioat_xor_descriptor *xor = desc->xor;
- struct ioat_ring_ent *ext;
- struct ioat_xor_ext_descriptor *xor_ex = NULL;
- int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
- struct ioat_raw_descriptor *descs[2];
- int i;
-
- if (src_cnt > 5) {
- ext = ioat2_get_ring_ent(ioat, idx + 1);
- xor_ex = ext->xor_ex;
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- descs[0] = (struct ioat_raw_descriptor *) xor;
- descs[1] = (struct ioat_raw_descriptor *) xor_ex;
- for (i = 0; i < src_cnt; i++) {
- dma_addr_t src = xor_get_src(descs, i);
-
- ioat_unmap(pdev, src - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
- }
-
- /* dest is a source in xor validate operations */
- if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
- ioat_unmap(pdev, xor->dst_addr - offset, len,
- PCI_DMA_TODEVICE, flags, 1);
- break;
- }
- }
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
- ioat_unmap(pdev, xor->dst_addr - offset, len,
- PCI_DMA_FROMDEVICE, flags, 1);
- break;
- }
- case IOAT_OP_PQ_VAL:
- case IOAT_OP_PQ: {
- struct ioat_pq_descriptor *pq = desc->pq;
- struct ioat_ring_ent *ext;
- struct ioat_pq_ext_descriptor *pq_ex = NULL;
- int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
- struct ioat_raw_descriptor *descs[2];
- int i;
-
- if (src_cnt > 3) {
- ext = ioat2_get_ring_ent(ioat, idx + 1);
- pq_ex = ext->pq_ex;
- }
-
- /* in the 'continue' case don't unmap the dests as sources */
- if (dmaf_p_disabled_continue(flags))
- src_cnt--;
- else if (dmaf_continue(flags))
- src_cnt -= 3;
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- descs[0] = (struct ioat_raw_descriptor *) pq;
- descs[1] = (struct ioat_raw_descriptor *) pq_ex;
- for (i = 0; i < src_cnt; i++) {
- dma_addr_t src = pq_get_src(descs, i);
-
- ioat_unmap(pdev, src - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
- }
-
- /* the dests are sources in pq validate operations */
- if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset,
- len, PCI_DMA_TODEVICE, flags, 0);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset,
- len, PCI_DMA_TODEVICE, flags, 0);
- break;
- }
- }
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- }
- break;
- }
- case IOAT_OP_PQ_16S:
- case IOAT_OP_PQ_VAL_16S: {
- struct ioat_pq_descriptor *pq = desc->pq;
- int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
- struct ioat_raw_descriptor *descs[4];
- int i;
-
- /* in the 'continue' case don't unmap the dests as sources */
- if (dmaf_p_disabled_continue(flags))
- src_cnt--;
- else if (dmaf_continue(flags))
- src_cnt -= 3;
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- descs[0] = (struct ioat_raw_descriptor *)pq;
- descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
- descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
- for (i = 0; i < src_cnt; i++) {
- dma_addr_t src = pq16_get_src(descs, i);
-
- ioat_unmap(pdev, src - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
- }
-
- /* the dests are sources in pq validate operations */
- if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset,
- len, PCI_DMA_TODEVICE,
- flags, 0);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset,
- len, PCI_DMA_TODEVICE,
- flags, 0);
- break;
- }
- }
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- }
- break;
- }
- default:
- dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
- __func__, desc->hw->ctl_f.op);
- }
+ kmem_cache_free(ioat3_sed_cache, sed);
}
static bool desc_has_ext(struct ioat_ring_ent *desc)
@@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
- ioat3_dma_unmap(ioat, desc, idx + i);
+ dma_descriptor_unmap(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
@@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
ioat3_cleanup(ioat);
@@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
u8 op;
int i, s, idx, num_descs;
- /* this function only handles src_cnt 9 - 16 */
- BUG_ON(src_cnt < 9);
-
/* this function is only called with 9-16 sources */
op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
@@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
descs[0] = (struct ioat_raw_descriptor *) pq;
- desc->sed = ioat3_alloc_sed(device,
- sed_get_pq16_pool_idx(src_cnt));
+ desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
if (!desc->sed) {
dev_err(to_dev(chan),
"%s: no free sed entries\n", __func__);
@@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
return &desc->txd;
}
+static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
+{
+ if (dmaf_p_disabled_continue(flags))
+ return src_cnt + 1;
+ else if (dmaf_continue(flags))
+ return src_cnt + 3;
+ else
+ return src_cnt;
+}
+
static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
- struct dma_device *dma = chan->device;
-
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
@@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
2, single_source_coef, len,
flags) :
@@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef, len, flags);
} else {
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
@@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
- struct dma_device *dma = chan->device;
-
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
@@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
*/
*pqres = 0;
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
@@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
- struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
@@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
- struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
-
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
@@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
- DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
@@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
+ &xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
@@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
+ memset(page_address(dest), 0, PAGE_SIZE);
+
/* test for non-zero parity sum */
op = IOAT_OP_XOR_VAL;
@@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
+ &xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
@@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device)
static int ioat3_irq_reinit(struct ioatdma_device *device)
{
- int msixcnt = device->common.chancnt;
struct pci_dev *pdev = device->pdev;
- int i;
- struct msix_entry *msix;
- struct ioat_chan_common *chan;
- int err = 0;
+ int irq = pdev->irq, i;
+
+ if (!is_bwd_ioat(pdev))
+ return 0;
switch (device->irq_mode) {
case IOAT_MSIX:
+ for (i = 0; i < device->common.chancnt; i++) {
+ struct msix_entry *msix = &device->msix_entries[i];
+ struct ioat_chan_common *chan;
- for (i = 0; i < msixcnt; i++) {
- msix = &device->msix_entries[i];
chan = ioat_chan_by_index(device, i);
devm_free_irq(&pdev->dev, msix->vector, chan);
}
pci_disable_msix(pdev);
break;
-
- case IOAT_MSIX_SINGLE:
- msix = &device->msix_entries[0];
- chan = ioat_chan_by_index(device, 0);
- devm_free_irq(&pdev->dev, msix->vector, chan);
- pci_disable_msix(pdev);
- break;
-
case IOAT_MSI:
- chan = ioat_chan_by_index(device, 0);
- devm_free_irq(&pdev->dev, pdev->irq, chan);
pci_disable_msi(pdev);
- break;
-
+ /* fall through */
case IOAT_INTX:
- chan = ioat_chan_by_index(device, 0);
- devm_free_irq(&pdev->dev, pdev->irq, chan);
+ devm_free_irq(&pdev->dev, irq, device);
break;
-
default:
return 0;
}
-
device->irq_mode = IOAT_NOIRQ;
- err = ioat_dma_setup_interrupts(device);
-
- return err;
+ return ioat_dma_setup_interrupts(device);
}
static int ioat3_reset_hw(struct ioat_chan_common *chan)
@@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
}
err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
- if (err) {
- dev_err(&pdev->dev, "Failed to reset!\n");
- return err;
- }
-
- if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
+ if (!err)
err = ioat3_irq_reinit(device);
+ if (err)
+ dev_err(&pdev->dev, "Failed to reset: %d\n", err);
+
return err;
}
@@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
char pool_name[14];
int i;
- /* allocate sw descriptor pool for SED */
- device->sed_pool = kmem_cache_create("ioat_sed",
- sizeof(struct ioat_sed_ent), 0, 0, NULL);
- if (!device->sed_pool)
- return -ENOMEM;
-
for (i = 0; i < MAX_SED_POOLS; i++) {
snprintf(pool_name, 14, "ioat_hw%d_sed", i);
/* allocate SED DMA pool */
- device->sed_hw_pool[i] = dma_pool_create(pool_name,
+ device->sed_hw_pool[i] = dmam_pool_create(pool_name,
&pdev->dev,
SED_SIZE * (i + 1), 64, 0);
if (!device->sed_hw_pool[i])
- goto sed_pool_cleanup;
+ return -ENOMEM;
}
}
@@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
-
-sed_pool_cleanup:
- if (device->sed_pool) {
- int i;
- kmem_cache_destroy(device->sed_pool);
-
- for (i = 0; i < MAX_SED_POOLS; i++)
- if (device->sed_hw_pool[i])
- dma_pool_destroy(device->sed_hw_pool[i]);
- }
-
- return -ENOMEM;
-}
-
-void ioat3_dma_remove(struct ioatdma_device *device)
-{
- if (device->sed_pool) {
- int i;
- kmem_cache_destroy(device->sed_pool);
-
- for (i = 0; i < MAX_SED_POOLS; i++)
- if (device->sed_hw_pool[i])
- dma_pool_destroy(device->sed_hw_pool[i]);
- }
}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 2c8d560e633..1d051cd045d 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
struct kmem_cache *ioat2_cache;
+struct kmem_cache *ioat3_sed_cache;
#define DRV_NAME "ioatdma"
@@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
- if (device->version >= IOAT_VER_3_0)
- ioat3_dma_remove(device);
-
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
@@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev)
static int __init ioat_init_module(void)
{
- int err;
+ int err = -ENOMEM;
pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
DRV_NAME, IOAT_DMA_VERSION);
@@ -231,9 +229,21 @@ static int __init ioat_init_module(void)
if (!ioat2_cache)
return -ENOMEM;
+ ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
+ if (!ioat3_sed_cache)
+ goto err_ioat2_cache;
+
err = pci_register_driver(&ioat_pci_driver);
if (err)
- kmem_cache_destroy(ioat2_cache);
+ goto err_ioat3_cache;
+
+ return 0;
+
+ err_ioat3_cache:
+ kmem_cache_destroy(ioat3_sed_cache);
+
+ err_ioat2_cache:
+ kmem_cache_destroy(ioat2_cache);
return err;
}
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index dd8b44a56e5..c56137bc386 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
}
}
-static void
-iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
-{
- struct dma_async_tx_descriptor *tx = &desc->async_tx;
- struct iop_adma_desc_slot *unmap = desc->group_head;
- struct device *dev = &iop_chan->device->pdev->dev;
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = tx->flags;
- u32 src_cnt;
- dma_addr_t addr;
- dma_addr_t dest;
-
- src_cnt = unmap->unmap_src_cnt;
- dest = iop_desc_get_dest_addr(unmap, iop_chan);
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- enum dma_data_direction dir;
-
- if (src_cnt > 1) /* is xor? */
- dir = DMA_BIDIRECTIONAL;
- else
- dir = DMA_FROM_DEVICE;
-
- dma_unmap_page(dev, dest, len, dir);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
- if (addr == dest)
- continue;
- dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
- }
- }
- desc->group_head = NULL;
-}
-
-static void
-iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
-{
- struct dma_async_tx_descriptor *tx = &desc->async_tx;
- struct iop_adma_desc_slot *unmap = desc->group_head;
- struct device *dev = &iop_chan->device->pdev->dev;
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = tx->flags;
- u32 src_cnt = unmap->unmap_src_cnt;
- dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
- dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
- int i;
-
- if (tx->flags & DMA_PREP_CONTINUE)
- src_cnt -= 3;
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
- dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
- dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- dma_addr_t addr;
-
- for (i = 0; i < src_cnt; i++) {
- addr = iop_desc_get_src_addr(unmap, iop_chan, i);
- dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
- }
- if (desc->pq_check_result) {
- dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
- dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
- }
- }
-
- desc->group_head = NULL;
-}
-
-
static dma_cookie_t
iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
@@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
if (tx->callback)
tx->callback(tx->callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- */
- if (desc->group_head && desc->unmap_len) {
- if (iop_desc_is_pq(desc))
- iop_desc_unmap_pq(iop_chan, desc);
- else
- iop_desc_unmap(iop_chan, desc);
- }
+ dma_descriptor_unmap(tx);
+ if (desc->group_head)
+ desc->group_head = NULL;
}
/* run dependent operations */
@@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
if (sw_desc) {
grp_start = sw_desc->group_head;
iop_desc_init_interrupt(grp_start, iop_chan);
- grp_start->unmap_len = 0;
sw_desc->async_tx.flags = flags;
}
spin_unlock_bh(&iop_chan->lock);
@@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
iop_desc_set_memcpy_src_addr(grp_start, dma_src);
- sw_desc->unmap_src_cnt = 1;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
}
spin_unlock_bh(&iop_chan->lock);
@@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_xor_src_addr(grp_start, src_cnt,
@@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__func__, grp_start->xor_check_result);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
@@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
dst[0] = dst[1] & 0x7;
iop_desc_set_pq_addr(g, dst);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
for (i = 0; i < src_cnt; i++)
iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
@@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
g->pq_check_result = pqres;
pr_debug("\t%s: g->pq_check_result: %p\n",
__func__, g->pq_check_result);
- sw_desc->unmap_src_cnt = src_cnt+2;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
@@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
int ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
msleep(1);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb9c0bc317e..128ca143486 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
descnew = desc;
- dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
- irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
+ dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
+ irq, (u64)sg_dma_address(*sg),
+ sgnext ? (u64)sg_dma_address(sgnext) : 0,
+ ichan->active_buffer, curbuf);
/* Find the descriptor of sgnext */
sgnew = idmac_sg_next(ichan, &descnew, *sg);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a2c330f5f95..e26075408e9 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
irq = platform_get_irq(op, 0);
ret = devm_request_irq(&op->dev, irq,
- k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ k3_dma_int_handler, 0, DRIVER_NAME, d);
if (ret)
return ret;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index ff8d7827f8c..dcb1e05149a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
* move the descriptors to a temporary list so we can drop
* the lock during the entire cleanup operation
*/
- list_del(&desc->node);
- list_add(&desc->node, &chain_cleanup);
+ list_move(&desc->node, &chain_cleanup);
/*
* Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
if (irq) {
ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+ mmp_pdma_chan_handler, 0, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+ mmp_pdma_int_handler, 0, "pdma", pdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 38cb517fb2e..3ddacc14a73 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -62,6 +62,11 @@
#define TDCR_BURSTSZ_16B (0x3 << 6)
#define TDCR_BURSTSZ_32B (0x6 << 6)
#define TDCR_BURSTSZ_64B (0x7 << 6)
+#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
+#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
+#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
+#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
+#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
#define TDCR_BURSTSZ_128B (0x5 << 6)
#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
/* disable irq */
writel(0, tdmac->reg_base + TDIMR);
- tdmac->status = DMA_SUCCESS;
+ tdmac->status = DMA_COMPLETE;
}
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
return -EINVAL;
}
} else if (tdmac->type == PXA910_SQU) {
- tdcr |= TDCR_BURSTSZ_SQU_32B;
tdcr |= TDCR_SSPMOD;
+
+ switch (tdmac->burst_sz) {
+ case 1:
+ tdcr |= TDCR_BURSTSZ_SQU_1B;
+ break;
+ case 2:
+ tdcr |= TDCR_BURSTSZ_SQU_2B;
+ break;
+ case 4:
+ tdcr |= TDCR_BURSTSZ_SQU_4B;
+ break;
+ case 8:
+ tdcr |= TDCR_BURSTSZ_SQU_8B;
+ break;
+ case 16:
+ tdcr |= TDCR_BURSTSZ_SQU_16B;
+ break;
+ case 32:
+ tdcr |= TDCR_BURSTSZ_SQU_32B;
+ break;
+ default:
+ dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ return -EINVAL;
+ }
}
writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
if (tdmac->irq) {
ret = devm_request_irq(tdmac->dev, tdmac->irq,
- mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
+ mmp_tdma_chan_handler, 0, "tdma", tdmac);
if (ret)
return ret;
}
@@ -350,12 +378,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
if (!gpool)
return NULL;
- tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
- if (!tdmac->desc_arr)
- return NULL;
-
- tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
- (unsigned long)tdmac->desc_arr);
+ tdmac->desc_arr = gen_pool_dma_alloc(gpool, size, &tdmac->desc_arr_phys);
return tdmac->desc_arr;
}
@@ -370,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
int num_periods = buf_len / period_len;
int i = 0, buf = 0;
- if (tdmac->status != DMA_SUCCESS)
+ if (tdmac->status != DMA_COMPLETE)
return NULL;
if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -504,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->idx = idx;
tdmac->type = type;
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
- tdmac->status = DMA_SUCCESS;
+ tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -559,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
- mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
+ mmp_tdma_int_handler, 0, "tdma", tdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2fe43537733..448750da440 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -39,7 +39,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/random.h>
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 536dcb8ba5f..7807f0ef4e2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
return hw_desc->phy_dest_addr;
}
-static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
- int src_idx)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
-}
-
-
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
@@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
desc->async_tx.callback(
desc->async_tx.callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- */
- if (desc->group_head && desc->unmap_len) {
- struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev = mv_chan_to_devp(mv_chan);
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = desc->async_tx.flags;
- u32 src_cnt;
- dma_addr_t addr;
- dma_addr_t dest;
-
- src_cnt = unmap->unmap_src_cnt;
- dest = mv_desc_get_dest_addr(unmap);
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- enum dma_data_direction dir;
-
- if (src_cnt > 1) /* is xor ? */
- dir = DMA_BIDIRECTIONAL;
- else
- dir = DMA_FROM_DEVICE;
- dma_unmap_page(dev, dest, len, dir);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = mv_desc_get_src_addr(unmap,
- src_cnt);
- if (addr == dest)
- continue;
- dma_unmap_page(dev, addr, len,
- DMA_TO_DEVICE);
- }
- }
+ dma_descriptor_unmap(&desc->async_tx);
+ if (desc->group_head)
desc->group_head = NULL;
- }
}
/* run dependent operations */
@@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS) {
+ if (ret == DMA_COMPLETE) {
mv_xor_clean_completed_slots(mv_chan);
return ret;
}
@@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
msleep(1);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
msleep(8);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
}
mv_chan->mmr_base = xordev->xor_base;
- if (!mv_chan->mmr_base) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ mv_chan->mmr_high_base = xordev->xor_high_base;
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
mv_chan);
@@ -1138,7 +1094,7 @@ static void
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
- void __iomem *base = xordev->xor_base;
+ void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 06b067f24c9..d0749229c87 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -34,13 +34,13 @@
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_DESCRIPTOR_SWAP BIT(14)
-#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
-#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
-#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
-#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
-#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
-#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
-#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
+#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
+#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
+#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
+#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
+#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
+#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
+#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
@@ -50,11 +50,11 @@
#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
#define XOR_INTR_MASK_VALUE 0x3F5
-#define WINDOW_BASE(w) (0x250 + ((w) << 2))
-#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
-#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
-#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
-#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
+#define WINDOW_BASE(w) (0x50 + ((w) << 2))
+#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
+#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
+#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
+#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
struct mv_xor_device {
void __iomem *xor_base;
@@ -82,6 +82,7 @@ struct mv_xor_chan {
int pending;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
+ void __iomem *mmr_high_base;
unsigned int idx;
int irq;
enum dma_transaction_type current_type;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ccd13df841d..ead491346da 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
+#include <linux/list.h>
#include <asm/irq.h>
@@ -57,6 +58,9 @@
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
#define HW_APBHX_CHn_SEMA(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
+#define HW_APBHX_CHn_BAR(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
+#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
/*
* ccw bits definitions
@@ -115,7 +119,9 @@ struct mxs_dma_chan {
int desc_count;
enum dma_status status;
unsigned int flags;
+ bool reset;
#define MXS_DMA_SG_LOOP (1 << 0)
+#define MXS_DMA_USE_SEMAPHORE (1 << 1)
};
#define MXS_DMA_CHANNELS 16
@@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
+ /*
+ * mxs dma channel resets can cause a channel stall. To recover from a
+ * channel stall, we have to reset the whole DMA engine. To avoid this,
+ * we use cyclic DMA with semaphores, that are enhanced in
+ * mxs_dma_int_handler. To reset the channel, we can simply stop writing
+ * into the semaphore counter.
+ */
+ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
+ mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ mxs_chan->reset = true;
+ } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
- else
+ } else {
+ unsigned long elapsed = 0;
+ const unsigned long max_wait = 50000; /* 50ms */
+ void __iomem *reg_dbg1 = mxs_dma->base +
+ HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
+
+ /*
+ * On i.MX28 APBX, the DMA channel can stop working if we reset
+ * the channel while it is in READ_FLUSH (0x08) state.
+ * We wait here until we leave the state. Then we trigger the
+ * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
+ * because of this.
+ */
+ while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
+ udelay(100);
+ elapsed += 100;
+ }
+
+ if (elapsed >= max_wait)
+ dev_err(&mxs_chan->mxs_dma->pdev->dev,
+ "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
+ chan_id);
+
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
+ }
+
+ mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
- writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
+ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
+ mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ /* A cyclic DMA consists of at least 2 segments, so initialize
+ * the semaphore with 2 so we have enough time to add 1 to the
+ * semaphore if we need to */
+ writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
+ } else {
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
+ }
+ mxs_chan->reset = false;
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- mxs_chan->status = DMA_SUCCESS;
+ mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data)
mxs_chan->desc.callback(mxs_chan->desc.callback_param);
}
+static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
+{
+ int i;
+
+ for (i = 0; i != mxs_dma->nr_channels; ++i)
+ if (mxs_dma->mxs_chans[i].chan_irq == irq)
+ return i;
+
+ return -EINVAL;
+}
+
static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
{
struct mxs_dma_engine *mxs_dma = dev_id;
- u32 stat1, stat2;
+ struct mxs_dma_chan *mxs_chan;
+ u32 completed;
+ u32 err;
+ int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
+
+ if (chan < 0)
+ return IRQ_NONE;
/* completion status */
- stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
- stat1 &= MXS_DMA_CHANNELS_MASK;
- writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
+ completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
+ completed = (completed >> chan) & 0x1;
+
+ /* Clear interrupt */
+ writel((1 << chan),
+ mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
- stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
- writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
+ err = readl(mxs_dma->base + HW_APBHX_CTRL2);
+ err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
+
+ /*
+ * error status bit is in the upper 16 bits, error irq bit in the lower
+ * 16 bits. We transform it into a simpler error code:
+ * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
+ */
+ err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
+
+ /* Clear error irq */
+ writel((1 << chan),
+ mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handle here in case of either it's (1) a bus
- * error or (2) a termination error with no completion.
+ * an error we need to handle here in case of either it's a bus
+ * error or a termination error with no completion. 0x01 is termination
+ * error, so we can subtract err & completed to get the real error case.
*/
- stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
- (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
-
- /* combine error and completion status for checking */
- stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
- while (stat1) {
- int channel = fls(stat1) - 1;
- struct mxs_dma_chan *mxs_chan =
- &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
-
- if (channel >= MXS_DMA_CHANNELS) {
- dev_dbg(mxs_dma->dma_device.dev,
- "%s: error in channel %d\n", __func__,
- channel - MXS_DMA_CHANNELS);
- mxs_chan->status = DMA_ERROR;
- mxs_dma_reset_chan(mxs_chan);
- } else {
- if (mxs_chan->flags & MXS_DMA_SG_LOOP)
- mxs_chan->status = DMA_IN_PROGRESS;
- else
- mxs_chan->status = DMA_SUCCESS;
- }
+ err -= err & completed;
- stat1 &= ~(1 << channel);
+ mxs_chan = &mxs_dma->mxs_chans[chan];
- if (mxs_chan->status == DMA_SUCCESS)
- dma_cookie_complete(&mxs_chan->desc);
+ if (err) {
+ dev_dbg(mxs_dma->dma_device.dev,
+ "%s: error in channel %d\n", __func__,
+ chan);
+ mxs_chan->status = DMA_ERROR;
+ mxs_dma_reset_chan(mxs_chan);
+ } else if (mxs_chan->status != DMA_COMPLETE) {
+ if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ mxs_chan->status = DMA_IN_PROGRESS;
+ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
+ writel(1, mxs_dma->base +
+ HW_APBHX_CHn_SEMA(mxs_dma, chan));
+ } else {
+ mxs_chan->status = DMA_COMPLETE;
+ }
+ }
- /* schedule tasklet on this channel */
- tasklet_schedule(&mxs_chan->tasklet);
+ if (mxs_chan->status == DMA_COMPLETE) {
+ if (mxs_chan->reset)
+ return IRQ_HANDLED;
+ dma_cookie_complete(&mxs_chan->desc);
}
+ /* schedule tasklet on this channel */
+ tasklet_schedule(&mxs_chan->tasklet);
+
return IRQ_HANDLED;
}
@@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
mxs_chan->status = DMA_IN_PROGRESS;
mxs_chan->flags |= MXS_DMA_SG_LOOP;
+ mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
if (num_periods > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
@@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= CCW_DEC_SEM;
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
@@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ u32 residue = 0;
+
+ if (mxs_chan->status == DMA_IN_PROGRESS &&
+ mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ struct mxs_dma_ccw *last_ccw;
+ u32 bar;
+
+ last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
+ residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
+
+ bar = readl(mxs_dma->base +
+ HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
+ residue -= bar;
+ }
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+ residue);
return mxs_chan->status;
}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ec3fc4fd916..2f66cf4e54f 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS || !txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24d20b..cdf0483b8f2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
list_move_tail(&desc->node, &pch->dmac->desc_pool);
}
+ dma_descriptor_unmap(&desc->txd);
+
if (callback) {
spin_unlock_irqrestore(&pch->lock, flags);
callback(callback_param);
@@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param)
return false;
peri_id = chan->private;
- return *peri_id == (unsigned)param;
+ return *peri_id == (unsigned long)param;
}
EXPORT_SYMBOL(pl330_filter);
@@ -2903,6 +2905,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = dev_get_platdata(&adev->dev);
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
@@ -2922,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
- irq = adev->irq[0];
- ret = request_irq(irq, pl330_irq_handler, 0,
- dev_name(&adev->dev), pi);
- if (ret)
- return ret;
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = adev->irq[i];
+ if (irq) {
+ ret = devm_request_irq(&adev->dev, irq,
+ pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ return ret;
+ } else {
+ break;
+ }
+ }
pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
- goto probe_err1;
+ return ret;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -3029,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
probe_err3:
- amba_set_drvdata(adev, NULL);
-
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
chan.device_node) {
@@ -3044,8 +3055,6 @@ probe_err3:
}
probe_err2:
pl330_del(pi);
-probe_err1:
- free_irq(irq, pi);
return ret;
}
@@ -3055,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- int irq;
if (!pdmac)
return 0;
@@ -3064,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev)
of_dma_controller_free(adev->dev.of_node);
dma_async_device_unregister(&pdmac->ddma);
- amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
@@ -3082,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev)
pl330_del(pi);
- irq = adev->irq[0];
- free_irq(irq, pi);
-
return 0;
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 370ff826563..8da48c6b2a3 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -42,6 +42,8 @@
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
@@ -802,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
}
/**
- * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
- */
-static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, int src_idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
- /* May have 0, 1, 2, or 3 sources */
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DFILL128:
- return 0;
- case DMA_CDB_OPC_DCHECK128:
- if (unlikely(src_idx)) {
- printk(KERN_ERR "%s: try to get %d source for"
- " DCHECK128\n", __func__, src_idx);
- BUG();
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- case DMA_CDB_OPC_MULTICAST:
- case DMA_CDB_OPC_MV_SG1_SG2:
- if (unlikely(src_idx > 2)) {
- printk(KERN_ERR "%s: try to get %d source from"
- " DMA descr\n", __func__, src_idx);
- BUG();
- }
- if (src_idx) {
- if (le32_to_cpu(dma_hw_desc->sg1u) &
- DMA_CUED_XOR_WIN_MSK) {
- u8 region;
-
- if (src_idx == 1)
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- desc->unmap_len;
-
- region = (le32_to_cpu(
- dma_hw_desc->sg1u)) >>
- DMA_CUED_REGION_OFF;
-
- region &= DMA_CUED_REGION_MSK;
- switch (region) {
- case DMA_RXOR123:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len << 1);
- case DMA_RXOR124:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len * 3);
- case DMA_RXOR125:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len << 2);
- default:
- printk(KERN_ERR
- "%s: try to"
- " get src3 for region %02x"
- "PPC440SPE_DESC_RXOR12?\n",
- __func__, region);
- BUG();
- }
- } else {
- printk(KERN_ERR
- "%s: try to get %d"
- " source for non-cued descr\n",
- __func__, src_idx);
- BUG();
- }
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- case PPC440SPE_XOR_ID:
- /* May have up to 16 sources */
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->ops[src_idx].l;
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_dest_addr - extract the destination address from the
- * descriptor
- */
-static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, int idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- if (likely(!idx))
- return le32_to_cpu(dma_hw_desc->sg2l);
- return le32_to_cpu(dma_hw_desc->sg3l);
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->cbtal;
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_src_num - extract the number of source addresses from
- * the descriptor
- */
-static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DFILL128:
- return 0;
- case DMA_CDB_OPC_DCHECK128:
- return 1;
- case DMA_CDB_OPC_MV_SG1_SG2:
- case DMA_CDB_OPC_MULTICAST:
- /*
- * Only for RXOR operations we have more than
- * one source
- */
- if (le32_to_cpu(dma_hw_desc->sg1u) &
- DMA_CUED_XOR_WIN_MSK) {
- /* RXOR op, there are 2 or 3 sources */
- if (((le32_to_cpu(dma_hw_desc->sg1u) >>
- DMA_CUED_REGION_OFF) &
- DMA_CUED_REGION_MSK) == DMA_RXOR12) {
- /* RXOR 1-2 */
- return 2;
- } else {
- /* RXOR 1-2-3/1-2-4/1-2-5 */
- return 3;
- }
- }
- return 1;
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- case PPC440SPE_XOR_ID:
- /* up to 16 sources */
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
- default:
- BUG();
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_dst_num - get the number of destination addresses in
- * this descriptor
- */
-static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_cdb *dma_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* May be 1 or 2 destinations */
- dma_hw_desc = desc->hw_desc;
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DCHECK128:
- return 0;
- case DMA_CDB_OPC_MV_SG1_SG2:
- case DMA_CDB_OPC_DFILL128:
- return 1;
- case DMA_CDB_OPC_MULTICAST:
- if (desc->dst_cnt == 2)
- return 2;
- else
- return 1;
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- case PPC440SPE_XOR_ID:
- /* Always only 1 destination */
- return 1;
- default:
- BUG();
- }
- return 0;
-}
-
-/**
* ppc440spe_desc_get_link - get the address of the descriptor that
* follows this one
*/
@@ -1705,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
}
}
-static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *desc)
-{
- u32 src_cnt, dst_cnt;
- dma_addr_t addr;
-
- /*
- * get the number of sources & destination
- * included in this descriptor and unmap
- * them all
- */
- src_cnt = ppc440spe_desc_get_src_num(desc, chan);
- dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
-
- /* unmap destinations */
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- while (dst_cnt--) {
- addr = ppc440spe_desc_get_dest_addr(
- desc, chan, dst_cnt);
- dma_unmap_page(chan->device->dev,
- addr, desc->unmap_len,
- DMA_FROM_DEVICE);
- }
- }
-
- /* unmap sources */
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = ppc440spe_desc_get_src_addr(
- desc, chan, src_cnt);
- dma_unmap_page(chan->device->dev,
- addr, desc->unmap_len,
- DMA_TO_DEVICE);
- }
- }
-}
-
/**
* ppc440spe_adma_run_tx_complete_actions - call functions to be called
* upon completion
@@ -1765,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
desc->async_tx.callback(
desc->async_tx.callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- *
- * actually, ppc's dma_unmap_page() functions are empty, so
- * the following code is just for the sake of completeness
- */
- if (chan && chan->needs_unmap && desc->group_head &&
- desc->unmap_len) {
- struct ppc440spe_adma_desc_slot *unmap =
- desc->group_head;
- /* assume 1 slot per op always */
- u32 slot_count = unmap->slot_cnt;
-
- /* Run through the group list and unmap addresses */
- for (i = 0; i < slot_count; i++) {
- BUG_ON(!unmap);
- ppc440spe_adma_unmap(chan, unmap);
- unmap = unmap->hw_next;
- }
- }
+ dma_descriptor_unmap(&desc->async_tx);
}
/* run dependent operations */
@@ -3891,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
ppc440spe_adma_slot_cleanup(ppc440spe_chan);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
new file mode 100644
index 00000000000..4cb12797863
--- /dev/null
+++ b/drivers/dma/s3c24xx-dma.c
@@ -0,0 +1,1350 @@
+/*
+ * S3C24XX DMA handling
+ *
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on amba-pl08x.c
+ *
+ * Copyright (c) 2006 ARM Ltd.
+ * Copyright (c) 2010 ST-Ericsson SA
+ *
+ * Author: Peter Pearse <peter.pearse@arm.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
+ * that can be routed to any of the 4 to 8 hardware-channels.
+ *
+ * Therefore on these DMA controllers the number of channels
+ * and the number of incoming DMA signals are two totally different things.
+ * It is usually not possible to theoretically handle all physical signals,
+ * so a multiplexing scheme with possible denial of use is necessary.
+ *
+ * Open items:
+ * - bursts
+ */
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_data/dma-s3c24xx.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define MAX_DMA_CHANNELS 8
+
+#define S3C24XX_DISRC 0x00
+#define S3C24XX_DISRCC 0x04
+#define S3C24XX_DISRCC_INC_INCREMENT 0
+#define S3C24XX_DISRCC_INC_FIXED BIT(0)
+#define S3C24XX_DISRCC_LOC_AHB 0
+#define S3C24XX_DISRCC_LOC_APB BIT(1)
+
+#define S3C24XX_DIDST 0x08
+#define S3C24XX_DIDSTC 0x0c
+#define S3C24XX_DIDSTC_INC_INCREMENT 0
+#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
+#define S3C24XX_DIDSTC_LOC_AHB 0
+#define S3C24XX_DIDSTC_LOC_APB BIT(1)
+#define S3C24XX_DIDSTC_INT_TC0 0
+#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
+
+#define S3C24XX_DCON 0x10
+
+#define S3C24XX_DCON_TC_MASK 0xfffff
+#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
+#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
+#define S3C24XX_DCON_DSZ_WORD (2 << 20)
+#define S3C24XX_DCON_DSZ_MASK (3 << 20)
+#define S3C24XX_DCON_DSZ_SHIFT 20
+#define S3C24XX_DCON_AUTORELOAD 0
+#define S3C24XX_DCON_NORELOAD BIT(22)
+#define S3C24XX_DCON_HWTRIG BIT(23)
+#define S3C24XX_DCON_HWSRC_SHIFT 24
+#define S3C24XX_DCON_SERV_SINGLE 0
+#define S3C24XX_DCON_SERV_WHOLE BIT(27)
+#define S3C24XX_DCON_TSZ_UNIT 0
+#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
+#define S3C24XX_DCON_INT BIT(29)
+#define S3C24XX_DCON_SYNC_PCLK 0
+#define S3C24XX_DCON_SYNC_HCLK BIT(30)
+#define S3C24XX_DCON_DEMAND 0
+#define S3C24XX_DCON_HANDSHAKE BIT(31)
+
+#define S3C24XX_DSTAT 0x14
+#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
+#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
+
+#define S3C24XX_DMASKTRIG 0x20
+#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
+#define S3C24XX_DMASKTRIG_ON BIT(1)
+#define S3C24XX_DMASKTRIG_STOP BIT(2)
+
+#define S3C24XX_DMAREQSEL 0x24
+#define S3C24XX_DMAREQSEL_HW BIT(0)
+
+/*
+ * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
+ * for a DMA source. Instead only specific channels are valid.
+ * All of these SoCs have 4 physical channels and the number of request
+ * source bits is 3. Additionally we also need 1 bit to mark the channel
+ * as valid.
+ * Therefore we separate the chansel element of the channel data into 4
+ * parts of 4 bits each, to hold the information if the channel is valid
+ * and the hw request source to use.
+ *
+ * Example:
+ * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
+ * For it the chansel field would look like
+ *
+ * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
+ * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
+ * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
+ */
+#define S3C24XX_CHANSEL_WIDTH 4
+#define S3C24XX_CHANSEL_VALID BIT(3)
+#define S3C24XX_CHANSEL_REQ_MASK 7
+
+/*
+ * struct soc_data - vendor-specific config parameters for individual SoCs
+ * @stride: spacing between the registers of each channel
+ * @has_reqsel: does the controller use the newer requestselection mechanism
+ * @has_clocks: are controllable dma-clocks present
+ */
+struct soc_data {
+ int stride;
+ bool has_reqsel;
+ bool has_clocks;
+};
+
+/*
+ * enum s3c24xx_dma_chan_state - holds the virtual channel states
+ * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
+ * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum s3c24xx_dma_chan_state {
+ S3C24XX_DMA_CHAN_IDLE,
+ S3C24XX_DMA_CHAN_RUNNING,
+ S3C24XX_DMA_CHAN_WAITING,
+};
+
+/*
+ * struct s3c24xx_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct s3c24xx_sg {
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ size_t len;
+ struct list_head node;
+};
+
+/*
+ * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @dsg_list: list of children sg's
+ * @at: sg currently being transfered
+ * @width: transfer width
+ * @disrcc: value for source control register
+ * @didstc: value for destination control register
+ * @dcon: base value for dcon register
+ */
+struct s3c24xx_txd {
+ struct virt_dma_desc vd;
+ struct list_head dsg_list;
+ struct list_head *at;
+ u8 width;
+ u32 disrcc;
+ u32 didstc;
+ u32 dcon;
+};
+
+struct s3c24xx_dma_chan;
+
+/*
+ * struct s3c24xx_dma_phy - holder for the physical channels
+ * @id: physical index to this channel
+ * @valid: does the channel have all required elements
+ * @base: virtual memory base (remapped) for the this channel
+ * @irq: interrupt for this channel
+ * @clk: clock for this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @serving: virtual channel currently being served by this physicalchannel
+ * @host: a pointer to the host (internal use)
+ */
+struct s3c24xx_dma_phy {
+ unsigned int id;
+ bool valid;
+ void __iomem *base;
+ unsigned int irq;
+ struct clk *clk;
+ spinlock_t lock;
+ struct s3c24xx_dma_chan *serving;
+ struct s3c24xx_dma_engine *host;
+};
+
+/*
+ * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
+ * @id: the id of the channel
+ * @name: name of the channel
+ * @vc: wrappped virtual channel
+ * @phy: the physical channel utilized by this channel, if there is one
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ */
+struct s3c24xx_dma_chan {
+ int id;
+ const char *name;
+ struct virt_dma_chan vc;
+ struct s3c24xx_dma_phy *phy;
+ struct dma_slave_config cfg;
+ struct s3c24xx_txd *at;
+ struct s3c24xx_dma_engine *host;
+ enum s3c24xx_dma_chan_state state;
+ bool slave;
+};
+
+/*
+ * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
+ * @pdev: the corresponding platform device
+ * @pdata: platform data passed in from the platform/machine
+ * @base: virtual memory base (remapped)
+ * @slave: slave engine for this instance
+ * @memcpy: memcpy engine for this instance
+ * @phy_chans: array of data for the physical channels
+ */
+struct s3c24xx_dma_engine {
+ struct platform_device *pdev;
+ const struct s3c24xx_dma_platdata *pdata;
+ struct soc_data *sdata;
+ void __iomem *base;
+ struct dma_device slave;
+ struct dma_device memcpy;
+ struct s3c24xx_dma_phy *phy_chans;
+};
+
+/*
+ * Physical channel handling
+ */
+
+/*
+ * Check whether a certain channel is busy or not.
+ */
+static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
+{
+ unsigned int val = readl(phy->base + S3C24XX_DSTAT);
+ return val & S3C24XX_DSTAT_STAT_BUSY;
+}
+
+static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
+ struct s3c24xx_dma_phy *phy)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+ int phyvalid;
+
+ /* every phy is valid for memcopy channels */
+ if (!s3cchan->slave)
+ return true;
+
+ /* On newer variants all phys can be used for all virtual channels */
+ if (s3cdma->sdata->has_reqsel)
+ return true;
+
+ phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
+ return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
+}
+
+/*
+ * Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
+ */
+static
+struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata;
+ struct s3c24xx_dma_phy *phy = NULL;
+ unsigned long flags;
+ int i;
+ int ret;
+
+ if (s3cchan->slave)
+ cdata = &pdata->channels[s3cchan->id];
+
+ for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+ phy = &s3cdma->phy_chans[i];
+
+ if (!phy->valid)
+ continue;
+
+ if (!s3c24xx_dma_phy_valid(s3cchan, phy))
+ continue;
+
+ spin_lock_irqsave(&phy->lock, flags);
+
+ if (!phy->serving) {
+ phy->serving = s3cchan;
+ spin_unlock_irqrestore(&phy->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&phy->lock, flags);
+ }
+
+ /* No physical channel available, cope with it */
+ if (i == s3cdma->pdata->num_phy_channels) {
+ dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
+ return NULL;
+ }
+
+ /* start the phy clock */
+ if (s3cdma->sdata->has_clocks) {
+ ret = clk_enable(phy->clk);
+ if (ret) {
+ dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
+ phy->id, ret);
+ phy->serving = NULL;
+ return NULL;
+ }
+ }
+
+ return phy;
+}
+
+/*
+ * Mark the physical channel as free.
+ *
+ * This drops the link between the physical and virtual channel.
+ */
+static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
+{
+ struct s3c24xx_dma_engine *s3cdma = phy->host;
+
+ if (s3cdma->sdata->has_clocks)
+ clk_disable(phy->clk);
+
+ phy->serving = NULL;
+}
+
+/*
+ * Stops the channel by writing the stop bit.
+ * This should not be used for an on-going transfer, but as a method of
+ * shutting down a channel (eg, when it's no longer used) or terminating a
+ * transfer.
+ */
+static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
+{
+ writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
+}
+
+/*
+ * Virtual channel handling
+ */
+
+static inline
+struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
+}
+
+static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_phy *phy = s3cchan->phy;
+ struct s3c24xx_txd *txd = s3cchan->at;
+ u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
+
+ return tc * txd->width;
+}
+
+static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
+ struct dma_slave_config *config)
+{
+ if (!s3cchan->slave)
+ return -EINVAL;
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ s3cchan->cfg = *config;
+
+ return 0;
+}
+
+/*
+ * Transfer handling
+ */
+
+static inline
+struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct s3c24xx_txd, vd.tx);
+}
+
+static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
+{
+ struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+
+ if (txd) {
+ INIT_LIST_HEAD(&txd->dsg_list);
+ txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
+ }
+
+ return txd;
+}
+
+static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
+{
+ struct s3c24xx_sg *dsg, *_dsg;
+
+ list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+ list_del(&dsg->node);
+ kfree(dsg);
+ }
+
+ kfree(txd);
+}
+
+static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
+ struct s3c24xx_txd *txd)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_dma_phy *phy = s3cchan->phy;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
+ u32 dcon = txd->dcon;
+ u32 val;
+
+ /* transfer-size and -count from len and width */
+ switch (txd->width) {
+ case 1:
+ dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
+ break;
+ case 2:
+ dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
+ break;
+ case 4:
+ dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
+ break;
+ }
+
+ if (s3cchan->slave) {
+ struct s3c24xx_dma_channel *cdata =
+ &pdata->channels[s3cchan->id];
+
+ if (s3cdma->sdata->has_reqsel) {
+ writel_relaxed((cdata->chansel << 1) |
+ S3C24XX_DMAREQSEL_HW,
+ phy->base + S3C24XX_DMAREQSEL);
+ } else {
+ int csel = cdata->chansel >> (phy->id *
+ S3C24XX_CHANSEL_WIDTH);
+
+ csel &= S3C24XX_CHANSEL_REQ_MASK;
+ dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
+ dcon |= S3C24XX_DCON_HWTRIG;
+ }
+ } else {
+ if (s3cdma->sdata->has_reqsel)
+ writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
+ }
+
+ writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
+ writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
+ writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
+ writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
+ writel_relaxed(dcon, phy->base + S3C24XX_DCON);
+
+ val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
+ val &= ~S3C24XX_DMASKTRIG_STOP;
+ val |= S3C24XX_DMASKTRIG_ON;
+
+ /* trigger the dma operation for memcpy transfers */
+ if (!s3cchan->slave)
+ val |= S3C24XX_DMASKTRIG_SWTRIG;
+
+ writel(val, phy->base + S3C24XX_DMASKTRIG);
+}
+
+/*
+ * Set the initial DMA register values and start first sg.
+ */
+static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_phy *phy = s3cchan->phy;
+ struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
+ struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
+
+ list_del(&txd->vd.node);
+
+ s3cchan->at = txd;
+
+ /* Wait for channel inactive */
+ while (s3c24xx_dma_phy_busy(phy))
+ cpu_relax();
+
+ /* point to the first element of the sg list */
+ txd->at = txd->dsg_list.next;
+ s3c24xx_dma_start_next_sg(s3cchan, txd);
+}
+
+static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
+ struct s3c24xx_dma_chan *s3cchan)
+{
+ LIST_HEAD(head);
+
+ vchan_get_all_descriptors(&s3cchan->vc, &head);
+ vchan_dma_desc_free_list(&s3cchan->vc, &head);
+}
+
+/*
+ * Try to allocate a physical channel. When successful, assign it to
+ * this virtual channel, and initiate the next descriptor. The
+ * virtual channel lock must be held at this point.
+ */
+static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_dma_phy *phy;
+
+ phy = s3c24xx_dma_get_phy(s3cchan);
+ if (!phy) {
+ dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
+ s3cchan->name);
+ s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
+ return;
+ }
+
+ dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
+ phy->id, s3cchan->name);
+
+ s3cchan->phy = phy;
+ s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
+
+ s3c24xx_dma_start_next_txd(s3cchan);
+}
+
+static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
+ struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+
+ dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
+ phy->id, s3cchan->name);
+
+ /*
+ * We do this without taking the lock; we're really only concerned
+ * about whether this pointer is NULL or not, and we're guaranteed
+ * that this will only be called when it _already_ is non-NULL.
+ */
+ phy->serving = s3cchan;
+ s3cchan->phy = phy;
+ s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
+ s3c24xx_dma_start_next_txd(s3cchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_dma_chan *p, *next;
+
+retry:
+ next = NULL;
+
+ /* Find a waiting virtual channel for the next transfer. */
+ list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
+ if (p->state == S3C24XX_DMA_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+
+ if (!next) {
+ list_for_each_entry(p, &s3cdma->slave.channels,
+ vc.chan.device_node)
+ if (p->state == S3C24XX_DMA_CHAN_WAITING &&
+ s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
+ next = p;
+ break;
+ }
+ }
+
+ /* Ensure that the physical channel is stopped */
+ s3c24xx_dma_terminate_phy(s3cchan->phy);
+
+ if (next) {
+ bool success;
+
+ /*
+ * Eww. We know this isn't going to deadlock
+ * but lockdep probably doesn't.
+ */
+ spin_lock(&next->vc.lock);
+ /* Re-check the state now that we have the lock */
+ success = next->state == S3C24XX_DMA_CHAN_WAITING;
+ if (success)
+ s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
+ spin_unlock(&next->vc.lock);
+
+ /* If the state changed, try to find another channel */
+ if (!success)
+ goto retry;
+ } else {
+ /* No more jobs, so free up the physical channel */
+ s3c24xx_dma_put_phy(s3cchan->phy);
+ }
+
+ s3cchan->phy = NULL;
+ s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+}
+
+static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
+{
+ struct device *dev = txd->vd.tx.chan->device->dev;
+ struct s3c24xx_sg *dsg;
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ else
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
+{
+ struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
+
+ if (!s3cchan->slave)
+ s3c24xx_dma_unmap_buffers(txd);
+
+ s3c24xx_dma_free_txd(txd);
+}
+
+static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
+{
+ struct s3c24xx_dma_phy *phy = data;
+ struct s3c24xx_dma_chan *s3cchan = phy->serving;
+ struct s3c24xx_txd *txd;
+
+ dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
+
+ /*
+ * Interrupts happen to notify the completion of a transfer and the
+ * channel should have moved into its stop state already on its own.
+ * Therefore interrupts on channels not bound to a virtual channel
+ * should never happen. Nevertheless send a terminate command to the
+ * channel if the unlikely case happens.
+ */
+ if (unlikely(!s3cchan)) {
+ dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
+ phy->id);
+
+ s3c24xx_dma_terminate_phy(phy);
+
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&s3cchan->vc.lock);
+ txd = s3cchan->at;
+ if (txd) {
+ /* when more sg's are in this txd, start the next one */
+ if (!list_is_last(txd->at, &txd->dsg_list)) {
+ txd->at = txd->at->next;
+ s3c24xx_dma_start_next_sg(s3cchan, txd);
+ } else {
+ s3cchan->at = NULL;
+ vchan_cookie_complete(&txd->vd);
+
+ /*
+ * And start the next descriptor (if any),
+ * otherwise free this channel.
+ */
+ if (vchan_next_desc(&s3cchan->vc))
+ s3c24xx_dma_start_next_txd(s3cchan);
+ else
+ s3c24xx_dma_phy_free(s3cchan);
+ }
+ }
+ spin_unlock(&s3cchan->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * The DMA ENGINE API
+ */
+
+static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&s3cchan->vc.lock, flags);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = s3c24xx_dma_set_runtime_config(s3cchan,
+ (struct dma_slave_config *)arg);
+ break;
+ case DMA_TERMINATE_ALL:
+ if (!s3cchan->phy && !s3cchan->at) {
+ dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
+ s3cchan->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+
+ /* Mark physical channel as free */
+ if (s3cchan->phy)
+ s3c24xx_dma_phy_free(s3cchan);
+
+ /* Dequeue current job */
+ if (s3cchan->at) {
+ s3c24xx_dma_desc_free(&s3cchan->at->vd);
+ s3cchan->at = NULL;
+ }
+
+ /* Dequeue jobs not yet fired as well */
+ s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+ break;
+ default:
+ /* Unknown command */
+ ret = -ENXIO;
+ break;
+ }
+
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ return ret;
+}
+
+static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ spin_lock_irqsave(&s3cchan->vc.lock, flags);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS) {
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+ return ret;
+ }
+
+ /*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate) {
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+ return ret;
+ }
+
+ vd = vchan_find_desc(&s3cchan->vc, cookie);
+ if (vd) {
+ /* On the issued list, so hasn't been processed yet */
+ txd = to_s3c24xx_txd(&vd->tx);
+
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+ } else {
+ /*
+ * Currently running, so sum over the pending sg's and
+ * the currently active one.
+ */
+ txd = s3cchan->at;
+
+ dsg = list_entry(txd->at, struct s3c24xx_sg, node);
+ list_for_each_entry_from(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+
+ bytes += s3c24xx_dma_getbytes_chan(s3cchan);
+ }
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ /*
+ * This cookie not complete yet
+ * Get number of bytes left in the active transactions and queue
+ */
+ dma_set_residue(txstate, bytes);
+
+ /* Whether waiting or running, we're in progress */
+ return ret;
+}
+
+/*
+ * Initialize a descriptor to be used by memcpy submit
+ */
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ int src_mod, dest_mod;
+
+ dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
+ len, s3cchan->name);
+
+ if ((len & S3C24XX_DCON_TC_MASK) != len) {
+ dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
+ return NULL;
+ }
+
+ txd = s3c24xx_dma_get_txd();
+ if (!txd)
+ return NULL;
+
+ dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+ if (!dsg) {
+ s3c24xx_dma_free_txd(txd);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->src_addr = src;
+ dsg->dst_addr = dest;
+ dsg->len = len;
+
+ /*
+ * Determine a suitable transfer width.
+ * The DMA controller cannot fetch/store information which is not
+ * naturally aligned on the bus, i.e., a 4 byte fetch must start at
+ * an address divisible by 4 - more generally addr % width must be 0.
+ */
+ src_mod = src % 4;
+ dest_mod = dest % 4;
+ switch (len % 4) {
+ case 0:
+ txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
+ break;
+ case 2:
+ txd->width = ((src_mod == 2 || src_mod == 0) &&
+ (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
+ break;
+ default:
+ txd->width = 1;
+ break;
+ }
+
+ txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
+ txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
+ txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
+ S3C24XX_DCON_SERV_WHOLE;
+
+ return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ struct scatterlist *sg;
+ dma_addr_t slave_addr;
+ u32 hwcfg = 0;
+ int tmp;
+
+ dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
+ sg_dma_len(sgl), s3cchan->name);
+
+ txd = s3c24xx_dma_get_txd();
+ if (!txd)
+ return NULL;
+
+ if (cdata->handshake)
+ txd->dcon |= S3C24XX_DCON_HANDSHAKE;
+
+ switch (cdata->bus) {
+ case S3C24XX_DMA_APB:
+ txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_APB;
+ break;
+ case S3C24XX_DMA_AHB:
+ txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_AHB;
+ break;
+ }
+
+ /*
+ * Always assume our peripheral desintation is a fixed
+ * address in memory.
+ */
+ hwcfg |= S3C24XX_DISRCC_INC_FIXED;
+
+ /*
+ * Individual dma operations are requested by the slave,
+ * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
+ */
+ txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
+ S3C24XX_DISRCC_INC_INCREMENT;
+ txd->didstc = hwcfg;
+ slave_addr = s3cchan->cfg.dst_addr;
+ txd->width = s3cchan->cfg.dst_addr_width;
+ } else if (direction == DMA_DEV_TO_MEM) {
+ txd->disrcc = hwcfg;
+ txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
+ S3C24XX_DIDSTC_INC_INCREMENT;
+ slave_addr = s3cchan->cfg.src_addr;
+ txd->width = s3cchan->cfg.src_addr_width;
+ } else {
+ s3c24xx_dma_free_txd(txd);
+ dev_err(&s3cdma->pdev->dev,
+ "direction %d unsupported\n", direction);
+ return NULL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, tmp) {
+ dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+ if (!dsg) {
+ s3c24xx_dma_free_txd(txd);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = sg_dma_len(sg);
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = sg_dma_address(sg);
+ dsg->dst_addr = slave_addr;
+ } else { /* DMA_DEV_TO_MEM */
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = sg_dma_address(sg);
+ }
+ break;
+ }
+
+ return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
+/*
+ * Slave transactions callback to the slave device to allow
+ * synchronization of slave DMA signals with the DMAC enable
+ */
+static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&s3cchan->vc.lock, flags);
+ if (vchan_issue_pending(&s3cchan->vc)) {
+ if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
+ s3c24xx_dma_phy_alloc_and_start(s3cchan);
+ }
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+}
+
+/*
+ * Bringup and teardown
+ */
+
+/*
+ * Initialise the DMAC memcpy/slave channels.
+ * Make a local wrapper to hold required data
+ */
+static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
+ struct dma_device *dmadev, unsigned int channels, bool slave)
+{
+ struct s3c24xx_dma_chan *chan;
+ int i;
+
+ INIT_LIST_HEAD(&dmadev->channels);
+
+ /*
+ * Register as many many memcpy as we have physical channels,
+ * we won't always be able to use all but the code will have
+ * to cope with that situation.
+ */
+ for (i = 0; i < channels; i++) {
+ chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(dmadev->dev,
+ "%s no memory for channel\n", __func__);
+ return -ENOMEM;
+ }
+
+ chan->id = i;
+ chan->host = s3cdma;
+ chan->state = S3C24XX_DMA_CHAN_IDLE;
+
+ if (slave) {
+ chan->slave = true;
+ chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
+ if (!chan->name)
+ return -ENOMEM;
+ } else {
+ chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
+ if (!chan->name)
+ return -ENOMEM;
+ }
+ dev_dbg(dmadev->dev,
+ "initialize virtual channel \"%s\"\n",
+ chan->name);
+
+ chan->vc.desc_free = s3c24xx_dma_desc_free;
+ vchan_init(&chan->vc, dmadev);
+ }
+ dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
+ i, slave ? "slave" : "memcpy");
+ return i;
+}
+
+static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
+{
+ struct s3c24xx_dma_chan *chan = NULL;
+ struct s3c24xx_dma_chan *next;
+
+ list_for_each_entry_safe(chan,
+ next, &dmadev->channels, vc.chan.device_node)
+ list_del(&chan->vc.chan.device_node);
+}
+
+/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
+static struct soc_data soc_s3c2410 = {
+ .stride = 0x40,
+ .has_reqsel = false,
+ .has_clocks = false,
+};
+
+/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
+static struct soc_data soc_s3c2412 = {
+ .stride = 0x40,
+ .has_reqsel = true,
+ .has_clocks = true,
+};
+
+/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
+static struct soc_data soc_s3c2443 = {
+ .stride = 0x100,
+ .has_reqsel = true,
+ .has_clocks = true,
+};
+
+static struct platform_device_id s3c24xx_dma_driver_ids[] = {
+ {
+ .name = "s3c2410-dma",
+ .driver_data = (kernel_ulong_t)&soc_s3c2410,
+ }, {
+ .name = "s3c2412-dma",
+ .driver_data = (kernel_ulong_t)&soc_s3c2412,
+ }, {
+ .name = "s3c2443-dma",
+ .driver_data = (kernel_ulong_t)&soc_s3c2443,
+ },
+ { },
+};
+
+static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
+{
+ return (struct soc_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c24xx_dma_probe(struct platform_device *pdev)
+{
+ const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct s3c24xx_dma_engine *s3cdma;
+ struct soc_data *sdata;
+ struct resource *res;
+ int ret;
+ int i;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ /* Basic sanity check */
+ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
+ dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+ pdata->num_phy_channels, MAX_DMA_CHANNELS);
+ return -EINVAL;
+ }
+
+ sdata = s3c24xx_dma_get_soc_data(pdev);
+ if (!sdata)
+ return -EINVAL;
+
+ s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
+ if (!s3cdma)
+ return -ENOMEM;
+
+ s3cdma->pdev = pdev;
+ s3cdma->pdata = pdata;
+ s3cdma->sdata = sdata;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(s3cdma->base))
+ return PTR_ERR(s3cdma->base);
+
+ s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
+ sizeof(struct s3c24xx_dma_phy) *
+ pdata->num_phy_channels,
+ GFP_KERNEL);
+ if (!s3cdma->phy_chans)
+ return -ENOMEM;
+
+ /* aquire irqs and clocks for all physical channels */
+ for (i = 0; i < pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+ char clk_name[6];
+
+ phy->id = i;
+ phy->base = s3cdma->base + (i * sdata->stride);
+ phy->host = s3cdma;
+
+ phy->irq = platform_get_irq(pdev, i);
+ if (phy->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
+ i, phy->irq);
+ continue;
+ }
+
+ ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
+ 0, pdev->name, phy);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
+ i, ret);
+ continue;
+ }
+
+ if (sdata->has_clocks) {
+ sprintf(clk_name, "dma.%d", i);
+ phy->clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(phy->clk) && sdata->has_clocks) {
+ dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
+ i, PTR_ERR(phy->clk));
+ continue;
+ }
+
+ ret = clk_prepare(phy->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
+ i, ret);
+ continue;
+ }
+ }
+
+ spin_lock_init(&phy->lock);
+ phy->valid = true;
+
+ dev_dbg(&pdev->dev, "physical channel %d is %s\n",
+ i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
+ }
+
+ /* Initialize memcpy engine */
+ dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
+ dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
+ s3cdma->memcpy.dev = &pdev->dev;
+ s3cdma->memcpy.device_alloc_chan_resources =
+ s3c24xx_dma_alloc_chan_resources;
+ s3cdma->memcpy.device_free_chan_resources =
+ s3c24xx_dma_free_chan_resources;
+ s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
+ s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
+ s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
+ s3cdma->memcpy.device_control = s3c24xx_dma_control;
+
+ /* Initialize slave engine for SoC internal dedicated peripherals */
+ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
+ dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
+ s3cdma->slave.dev = &pdev->dev;
+ s3cdma->slave.device_alloc_chan_resources =
+ s3c24xx_dma_alloc_chan_resources;
+ s3cdma->slave.device_free_chan_resources =
+ s3c24xx_dma_free_chan_resources;
+ s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
+ s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
+ s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
+ s3cdma->slave.device_control = s3c24xx_dma_control;
+
+ /* Register as many memcpy channels as there are physical channels */
+ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
+ pdata->num_phy_channels, false);
+ if (ret <= 0) {
+ dev_warn(&pdev->dev,
+ "%s failed to enumerate memcpy channels - %d\n",
+ __func__, ret);
+ goto err_memcpy;
+ }
+
+ /* Register slave channels */
+ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
+ pdata->num_channels, true);
+ if (ret <= 0) {
+ dev_warn(&pdev->dev,
+ "%s failed to enumerate slave channels - %d\n",
+ __func__, ret);
+ goto err_slave;
+ }
+
+ ret = dma_async_device_register(&s3cdma->memcpy);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "%s failed to register memcpy as an async device - %d\n",
+ __func__, ret);
+ goto err_memcpy_reg;
+ }
+
+ ret = dma_async_device_register(&s3cdma->slave);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "%s failed to register slave as an async device - %d\n",
+ __func__, ret);
+ goto err_slave_reg;
+ }
+
+ platform_set_drvdata(pdev, s3cdma);
+ dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
+ pdata->num_phy_channels);
+
+ return 0;
+
+err_slave_reg:
+ dma_async_device_unregister(&s3cdma->memcpy);
+err_memcpy_reg:
+ s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
+err_slave:
+ s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
+err_memcpy:
+ if (sdata->has_clocks)
+ for (i = 0; i < pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+ if (phy->valid)
+ clk_unprepare(phy->clk);
+ }
+
+ return ret;
+}
+
+static int s3c24xx_dma_remove(struct platform_device *pdev)
+{
+ const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
+ struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
+ int i;
+
+ dma_async_device_unregister(&s3cdma->slave);
+ dma_async_device_unregister(&s3cdma->memcpy);
+
+ s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
+ s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
+
+ if (sdata->has_clocks)
+ for (i = 0; i < pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+ if (phy->valid)
+ clk_unprepare(phy->clk);
+ }
+
+ return 0;
+}
+
+static struct platform_driver s3c24xx_dma_driver = {
+ .driver = {
+ .name = "s3c24xx-dma",
+ .owner = THIS_MODULE,
+ },
+ .id_table = s3c24xx_dma_driver_ids,
+ .probe = s3c24xx_dma_probe,
+ .remove = s3c24xx_dma_remove,
+};
+
+module_platform_driver(s3c24xx_dma_driver);
+
+bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct s3c24xx_dma_chan *s3cchan;
+
+ if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
+ return false;
+
+ s3cchan = to_s3c24xx_dma_chan(chan);
+
+ return s3cchan->id == (int)param;
+}
+EXPORT_SYMBOL(s3c24xx_dma_filter);
+
+MODULE_DESCRIPTION("S3C24XX DMA Driver");
+MODULE_AUTHOR("Heiko Stuebner");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 461a91ab70b..ab26d46bbe1 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
if (!state)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index d94ab592cc1..2e7b394def8 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
* If we don't find cookie on the queue, it has been aborted and we have
* to report error
*/
- if (status != DMA_SUCCESS) {
+ if (status != DMA_COMPLETE) {
struct shdma_desc *sdesc;
status = DMA_ERROR;
list_for_each_entry(sdesc, &schan->ld_queue, node)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 1069e8869f2..0d765c0e21e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static int sh_dmae_probe(struct platform_device *pdev)
{
const struct sh_dmae_pdata *pdata;
- unsigned long irqflags = IRQF_DISABLED,
+ unsigned long irqflags = 0,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
else
- chan_flag[irq_cnt] = IRQF_DISABLED;
+ chan_flag[irq_cnt] = 0;
dev_dbg(&pdev->dev,
"Found IRQ %d for channel %d\n",
i, irq_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 82d2b97ad94..b8c031b7de4 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
}
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS)
+ if (ret != DMA_COMPLETE)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
- ((src_addr_width > 1) && (src_addr_width & 1)) ||
- ((dst_addr_width > 1) && (dst_addr_width & 1)))
+ !is_power_of_2(src_addr_width) ||
+ !is_power_of_2(dst_addr_width))
return -EINVAL;
cfg->src_info.data_width = src_addr_width;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 5d4986e5f5f..73654e33f13 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_del(&sgreq->node);
if (sgreq->last_sg) {
- dma_desc->dma_status = DMA_SUCCESS;
+ dma_desc->dma_status = DMA_COMPLETE;
dma_cookie_complete(&dma_desc->txd);
if (!dma_desc->cb_count)
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned int residual;
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&tdc->lock, flags);
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return &dma_desc->txd;
}
-struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 28af214fce0..4506a7b4f97 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
return done;
}
-static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
- bool single)
-{
- dma_addr_t addr;
- int len;
-
- addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
- dma_desc[4];
-
- len = (dma_desc[3] << 8) | dma_desc[2];
-
- if (single)
- dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- DMA_TO_DEVICE);
-}
-
-static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
-{
- struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
- struct timb_dma_chan, chan);
- u8 *descs;
-
- for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
- __td_unmap_desc(td_chan, descs, single);
- if (descs[0] & 0x02)
- break;
- }
-}
-
static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
struct scatterlist *sg, bool last)
{
@@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
list_move(&td_desc->desc_node, &td_chan->free_list);
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
- __td_unmap_descs(td_desc,
- txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
-
+ dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 71e8e775189..bae6c29f550 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
list_splice_init(&desc->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
- if (!ds) {
- dma_addr_t dmaaddr;
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- dmaaddr = is_dmac64(dc) ?
- desc->hwdesc.DAR : desc->hwdesc32.DAR;
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- dmaaddr = is_dmac64(dc) ?
- desc->hwdesc.SAR : desc->hwdesc32.SAR;
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_TO_DEVICE);
- else
- dma_unmap_page(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_TO_DEVICE);
- }
- }
-
+ dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
@@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
- return DMA_SUCCESS;
+ if (ret == DMA_COMPLETE)
+ return DMA_COMPLETE;
spin_lock_bh(&dc->lock);
txx9dmac_scan_descriptors(dc);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3c9e4e98c65..b53d0de17e1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -339,8 +339,8 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow];
- base_bits = GENMASK(21, 31) | GENMASK(9, 15);
- mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
+ mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
addr_shift = 4;
/*
@@ -352,16 +352,16 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow >> 1];
- *base = (csbase & GENMASK(5, 15)) << 6;
- *base |= (csbase & GENMASK(19, 30)) << 8;
+ *base = (csbase & GENMASK_ULL(15, 5)) << 6;
+ *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
*mask = ~0ULL;
/* poke holes for the csmask */
- *mask &= ~((GENMASK(5, 15) << 6) |
- (GENMASK(19, 30) << 8));
+ *mask &= ~((GENMASK_ULL(15, 5) << 6) |
+ (GENMASK_ULL(30, 19) << 8));
- *mask |= (csmask & GENMASK(5, 15)) << 6;
- *mask |= (csmask & GENMASK(19, 30)) << 8;
+ *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
+ *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
return;
} else {
@@ -370,9 +370,11 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
addr_shift = 8;
if (pvt->fam == 0x15)
- base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ base_bits = mask_bits =
+ GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
else
- base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ base_bits = mask_bits =
+ GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
}
*base = (csbase & base_bits) << addr_shift;
@@ -561,7 +563,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
+ dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
(unsigned long)sys_addr, (unsigned long)dram_addr);
@@ -597,7 +599,7 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* concerning translating a DramAddr to an InputAddr.
*/
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
(dram_addr & 0xfff);
edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
@@ -849,7 +851,7 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
end_bit = 39;
}
- addr = m->addr & GENMASK(start_bit, end_bit);
+ addr = m->addr & GENMASK_ULL(end_bit, start_bit);
/*
* Erratum 637 workaround
@@ -861,7 +863,7 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
u16 mce_nid;
u8 intlv_en;
- if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
+ if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
return addr;
mce_nid = amd_get_nb_id(m->extcpu);
@@ -871,7 +873,7 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
intlv_en = tmp >> 21 & 0x7;
/* add [47:27] + 3 trailing bits */
- cc6_base = (tmp & GENMASK(0, 20)) << 3;
+ cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
/* reverse and add DramIntlvEn */
cc6_base |= intlv_en ^ 0x7;
@@ -880,18 +882,18 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
cc6_base <<= 24;
if (!intlv_en)
- return cc6_base | (addr & GENMASK(0, 23));
+ return cc6_base | (addr & GENMASK_ULL(23, 0));
amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
/* faster log2 */
- tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
+ tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
/* OR DramIntlvSel into bits [14:12] */
- tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
+ tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
/* add remaining [11:0] bits from original MC4_ADDR */
- tmp_addr |= addr & GENMASK(0, 11);
+ tmp_addr |= addr & GENMASK_ULL(11, 0);
return cc6_base | tmp_addr;
}
@@ -952,12 +954,12 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+ pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
/* {[39:27],111b} */
pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+ pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
/* [47:40] */
pvt->ranges[range].lim.hi |= llim >> 13;
@@ -1330,7 +1332,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
chan_off = dram_base;
}
- return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
+ return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
}
/*
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index d2443cfa069..6dc1fcc25af 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -160,14 +160,6 @@
#define OFF false
/*
- * Create a contiguous bitmask starting at bit position @lo and ending at
- * position @hi. For example
- *
- * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
-
-/*
* PCI-defined configuration space registers
*/
#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index c2eaf334b90..374b57fc596 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/io.h>
+#include <linux/of_address.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
@@ -162,6 +163,7 @@ static void cell_edac_init_csrows(struct mem_ctl_info *mci)
csrow->first_page, nr_pages);
break;
}
+ of_node_put(np);
}
static int cell_edac_probe(struct platform_device *pdev)
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 211021dfec7..10267434603 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -530,12 +530,9 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* Report action taken */
edac_device_printk(edac_dev, KERN_INFO,
- "Giving out device to module '%s' controller "
- "'%s': DEV '%s' (%s)\n",
- edac_dev->mod_name,
- edac_dev->ctl_name,
- edac_dev_name(edac_dev),
- edac_op_state_to_string(edac_dev->op_state));
+ "Giving out device to module %s controller %s: DEV %s (%s)\n",
+ edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
+ edac_op_state_to_string(edac_dev->op_state));
mutex_unlock(&device_ctls_mutex);
return 0;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 89e109022d7..e8c9ef03495 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -788,8 +788,10 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
}
/* Report action taken */
- edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
- " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+ edac_mc_printk(mci, KERN_INFO,
+ "Giving out device to module %s controller %s: DEV %s (%s)\n",
+ mci->mod_name, mci->ctl_name, mci->dev_name,
+ edac_op_state_to_string(mci->op_state));
edac_mc_owner = mci->mod_name;
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index dd370f92ace..2cf44b4db80 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -358,11 +358,9 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
}
edac_pci_printk(pci, KERN_INFO,
- "Giving out device to module '%s' controller '%s':"
- " DEV '%s' (%s)\n",
- pci->mod_name,
- pci->ctl_name,
- edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
+ "Giving out device to module %s controller %s: DEV %s (%s)\n",
+ pci->mod_name, pci->ctl_name, pci->dev_name,
+ edac_op_state_to_string(pci->op_state));
mutex_unlock(&edac_pci_ctls_mutex);
return 0;
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index bb534670ec0..d5a98a45c06 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -297,15 +297,14 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
}
/* Error address */
- if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
}
/* Error grain */
- if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) {
+ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
- }
/* Memory error location, mapped on e->location */
p = e->location;
@@ -315,6 +314,8 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
p += sprintf(p, "card:%d ", mem_err->card);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
p += sprintf(p, "module:%d ", mem_err->module);
+ if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+ p += sprintf(p, "rank:%d ", mem_err->rank);
if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
p += sprintf(p, "bank:%d ", mem_err->bank);
if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
@@ -323,6 +324,15 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
p += sprintf(p, "col:%d ", mem_err->column);
if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
+ if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
+ const char *bank = NULL, *device = NULL;
+ dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
+ if (bank != NULL && device != NULL)
+ p += sprintf(p, "DIMM location:%s %s ", bank, device);
+ else
+ p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
+ mem_err->mem_dev_handle);
+ }
if (p > e->location)
*(p - 1) = '\0';
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index c2bd8c6a434..2f193668ebc 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -50,8 +50,15 @@ static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static const struct of_device_id hb_l2_err_of_match[] = {
+ { .compatible = "calxeda,hb-sregs-l2-ecc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
+
static int highbank_l2_err_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
struct edac_device_ctl_info *dci;
struct hb_l2_drvdata *drvdata;
struct resource *r;
@@ -90,28 +97,32 @@ static int highbank_l2_err_probe(struct platform_device *pdev)
goto err;
}
+ id = of_match_device(hb_l2_err_of_match, &pdev->dev);
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = id ? id->compatible : "unknown";
+ dci->dev_name = dev_name(&pdev->dev);
+
+ if (edac_device_add_device(dci))
+ goto err;
+
drvdata->db_irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev, drvdata->db_irq,
highbank_l2_err_handler,
0, dev_name(&pdev->dev), dci);
if (res < 0)
- goto err;
+ goto err2;
drvdata->sb_irq = platform_get_irq(pdev, 1);
res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
highbank_l2_err_handler,
0, dev_name(&pdev->dev), dci);
if (res < 0)
- goto err;
-
- dci->mod_name = dev_name(&pdev->dev);
- dci->dev_name = dev_name(&pdev->dev);
-
- if (edac_device_add_device(dci))
- goto err;
+ goto err2;
devres_close_group(&pdev->dev, NULL);
return 0;
+err2:
+ edac_device_del_device(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
edac_device_free_ctl_info(dci);
@@ -127,12 +138,6 @@ static int highbank_l2_err_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id hb_l2_err_of_match[] = {
- { .compatible = "calxeda,hb-sregs-l2-ecc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
-
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
.remove = highbank_l2_err_remove,
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 4695dd2d71f..f784de1dc79 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -26,31 +26,40 @@
#include "edac_module.h"
/* DDR Ctrlr Error Registers */
-#define HB_DDR_ECC_OPT 0x128
-#define HB_DDR_ECC_U_ERR_ADDR 0x130
-#define HB_DDR_ECC_U_ERR_STAT 0x134
-#define HB_DDR_ECC_U_ERR_DATAL 0x138
-#define HB_DDR_ECC_U_ERR_DATAH 0x13c
-#define HB_DDR_ECC_C_ERR_ADDR 0x140
-#define HB_DDR_ECC_C_ERR_STAT 0x144
-#define HB_DDR_ECC_C_ERR_DATAL 0x148
-#define HB_DDR_ECC_C_ERR_DATAH 0x14c
-#define HB_DDR_ECC_INT_STATUS 0x180
-#define HB_DDR_ECC_INT_ACK 0x184
-#define HB_DDR_ECC_U_ERR_ID 0x424
-#define HB_DDR_ECC_C_ERR_ID 0x428
-#define HB_DDR_ECC_INT_STAT_CE 0x8
-#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
-#define HB_DDR_ECC_INT_STAT_UE 0x20
-#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
+#define HB_DDR_ECC_ERR_BASE 0x128
+#define MW_DDR_ECC_ERR_BASE 0x1b4
+
+#define HB_DDR_ECC_OPT 0x00
+#define HB_DDR_ECC_U_ERR_ADDR 0x08
+#define HB_DDR_ECC_U_ERR_STAT 0x0c
+#define HB_DDR_ECC_U_ERR_DATAL 0x10
+#define HB_DDR_ECC_U_ERR_DATAH 0x14
+#define HB_DDR_ECC_C_ERR_ADDR 0x18
+#define HB_DDR_ECC_C_ERR_STAT 0x1c
+#define HB_DDR_ECC_C_ERR_DATAL 0x20
+#define HB_DDR_ECC_C_ERR_DATAH 0x24
#define HB_DDR_ECC_OPT_MODE_MASK 0x3
#define HB_DDR_ECC_OPT_FWC 0x100
#define HB_DDR_ECC_OPT_XOR_SHIFT 16
+/* DDR Ctrlr Interrupt Registers */
+
+#define HB_DDR_ECC_INT_BASE 0x180
+#define MW_DDR_ECC_INT_BASE 0x218
+
+#define HB_DDR_ECC_INT_STATUS 0x00
+#define HB_DDR_ECC_INT_ACK 0x04
+
+#define HB_DDR_ECC_INT_STAT_CE 0x8
+#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
+#define HB_DDR_ECC_INT_STAT_UE 0x20
+#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
+
struct hb_mc_drvdata {
- void __iomem *mc_vbase;
+ void __iomem *mc_err_base;
+ void __iomem *mc_int_base;
};
static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
@@ -60,10 +69,10 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
u32 status, err_addr;
/* Read the interrupt status register */
- status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS);
+ status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
if (status & HB_DDR_ECC_INT_STAT_UE) {
- err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR);
+ err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, 0,
@@ -71,9 +80,9 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
mci->ctl_name, "");
}
if (status & HB_DDR_ECC_INT_STAT_CE) {
- u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT);
+ u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
syndrome = (syndrome >> 8) & 0xff;
- err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR);
+ err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, syndrome,
@@ -82,66 +91,79 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
}
/* clear the error, clears the interrupt */
- writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK);
+ writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
return IRQ_HANDLED;
}
-#ifdef CONFIG_EDAC_DEBUG
-static ssize_t highbank_mc_err_inject_write(struct file *file,
- const char __user *data,
- size_t count, loff_t *ppos)
+static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
{
- struct mem_ctl_info *mci = file->private_data;
struct hb_mc_drvdata *pdata = mci->pvt_info;
- char buf[32];
- size_t buf_size;
u32 reg;
+
+ reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
+ reg &= HB_DDR_ECC_OPT_MODE_MASK;
+ reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
+ writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static ssize_t highbank_mc_inject_ctrl(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
u8 synd;
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, data, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
+ if (kstrtou8(buf, 16, &synd))
+ return -EINVAL;
- if (!kstrtou8(buf, 16, &synd)) {
- reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT);
- reg &= HB_DDR_ECC_OPT_MODE_MASK;
- reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
- writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT);
- }
+ highbank_mc_err_inject(mci, synd);
return count;
}
-static const struct file_operations highbank_mc_debug_inject_fops = {
- .open = simple_open,
- .write = highbank_mc_err_inject_write,
- .llseek = generic_file_llseek,
+static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
+
+struct hb_mc_settings {
+ int err_offset;
+ int int_offset;
};
-static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
-{
- if (mci->debugfs)
- debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
- &highbank_mc_debug_inject_fops);
-;
-}
-#else
-static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
-{}
-#endif
+static struct hb_mc_settings hb_settings = {
+ .err_offset = HB_DDR_ECC_ERR_BASE,
+ .int_offset = HB_DDR_ECC_INT_BASE,
+};
+
+static struct hb_mc_settings mw_settings = {
+ .err_offset = MW_DDR_ECC_ERR_BASE,
+ .int_offset = MW_DDR_ECC_INT_BASE,
+};
+
+static struct of_device_id hb_ddr_ctrl_of_match[] = {
+ { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
+ { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
static int highbank_mc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
+ const struct hb_mc_settings *settings;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct hb_mc_drvdata *drvdata;
struct dimm_info *dimm;
struct resource *r;
+ void __iomem *base;
u32 control;
int irq;
int res = 0;
+ id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
@@ -174,35 +196,31 @@ static int highbank_mc_probe(struct platform_device *pdev)
goto err;
}
- drvdata->mc_vbase = devm_ioremap(&pdev->dev,
- r->start, resource_size(r));
- if (!drvdata->mc_vbase) {
+ base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!base) {
dev_err(&pdev->dev, "Unable to map regs\n");
res = -ENOMEM;
goto err;
}
- control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3;
+ settings = id->data;
+ drvdata->mc_err_base = base + settings->err_offset;
+ drvdata->mc_int_base = base + settings->int_offset;
+
+ control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
if (!control || (control == 0x2)) {
dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
res = -ENODEV;
goto err;
}
- irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
- 0, dev_name(&pdev->dev), mci);
- if (res < 0) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- goto err;
- }
-
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = dev_name(&pdev->dev);
+ mci->mod_name = pdev->dev.driver->name;
mci->mod_ver = "1";
- mci->ctl_name = dev_name(&pdev->dev);
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
/* Only a single 4GB DIMM is supported */
@@ -217,10 +235,20 @@ static int highbank_mc_probe(struct platform_device *pdev)
if (res < 0)
goto err;
- highbank_mc_create_debugfs_nodes(mci);
+ irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
+ 0, dev_name(&pdev->dev), mci);
+ if (res < 0) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err2;
+ }
+
+ device_create_file(&mci->dev, &dev_attr_inject_ctrl);
devres_close_group(&pdev->dev, NULL);
return 0;
+err2:
+ edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
edac_mc_free(mci);
@@ -231,17 +259,12 @@ static int highbank_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+ device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
-static const struct of_device_id hb_ddr_ctrl_of_match[] = {
- { .compatible = "calxeda,hb-ddr-ctrl", },
- {},
-};
-MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
-
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
.remove = highbank_mc_remove,
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 3eb32f62d72..fd46b0bd5f2 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -327,28 +327,6 @@ err:
}
EXPORT_SYMBOL(mpc85xx_pci_err_probe);
-static int mpc85xx_pci_err_remove(struct platform_device *op)
-{
- struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
- struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
-
- edac_dbg(0, "\n");
-
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
- orig_pci_err_cap_dr);
-
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
-
- edac_pci_del_device(pci->dev);
-
- if (edac_op_state == EDAC_OPSTATE_INT)
- irq_dispose_mapping(pdata->irq);
-
- edac_pci_free_ctl_info(pci);
-
- return 0;
-}
-
#endif /* CONFIG_PCI */
/**************************** L2 Err device ***************************/
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e04462b6075..8472405c558 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -34,7 +34,7 @@ static int probed;
/*
* Alter this version for the module when modifications are made
*/
-#define SBRIDGE_REVISION " Ver: 1.0.0 "
+#define SBRIDGE_REVISION " Ver: 1.1.0 "
#define EDAC_MOD_STR "sbridge_edac"
/*
@@ -50,7 +50,7 @@ static int probed;
* Get a bit field at register value <v>, from bit <lo> to bit <hi>
*/
#define GET_BITFIELD(v, lo, hi) \
- (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo))
+ (((v) & GENMASK_ULL(hi, lo)) >> (lo))
/*
* sbridge Memory Controller Registers
@@ -83,11 +83,17 @@ static int probed;
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
-static const u32 dram_rule[] = {
+static const u32 sbridge_dram_rule[] = {
0x80, 0x88, 0x90, 0x98, 0xa0,
0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
};
-#define MAX_SAD ARRAY_SIZE(dram_rule)
+
+static const u32 ibridge_dram_rule[] = {
+ 0x60, 0x68, 0x70, 0x78, 0x80,
+ 0x88, 0x90, 0x98, 0xa0, 0xa8,
+ 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
+ 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
+};
#define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
#define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
@@ -108,43 +114,50 @@ static char *get_dram_attr(u32 reg)
}
}
-static const u32 interleave_list[] = {
+static const u32 sbridge_interleave_list[] = {
0x84, 0x8c, 0x94, 0x9c, 0xa4,
0xac, 0xb4, 0xbc, 0xc4, 0xcc,
};
-#define MAX_INTERLEAVE ARRAY_SIZE(interleave_list)
-
-#define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2)
-#define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5)
-#define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10)
-#define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13)
-#define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18)
-#define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21)
-#define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26)
-#define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29)
-
-static inline int sad_pkg(u32 reg, int interleave)
+
+static const u32 ibridge_interleave_list[] = {
+ 0x64, 0x6c, 0x74, 0x7c, 0x84,
+ 0x8c, 0x94, 0x9c, 0xa4, 0xac,
+ 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
+ 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
+};
+
+struct interleave_pkg {
+ unsigned char start;
+ unsigned char end;
+};
+
+static const struct interleave_pkg sbridge_interleave_pkg[] = {
+ { 0, 2 },
+ { 3, 5 },
+ { 8, 10 },
+ { 11, 13 },
+ { 16, 18 },
+ { 19, 21 },
+ { 24, 26 },
+ { 27, 29 },
+};
+
+static const struct interleave_pkg ibridge_interleave_pkg[] = {
+ { 0, 3 },
+ { 4, 7 },
+ { 8, 11 },
+ { 12, 15 },
+ { 16, 19 },
+ { 20, 23 },
+ { 24, 27 },
+ { 28, 31 },
+};
+
+static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
+ int interleave)
{
- switch (interleave) {
- case 0:
- return SAD_PKG0(reg);
- case 1:
- return SAD_PKG1(reg);
- case 2:
- return SAD_PKG2(reg);
- case 3:
- return SAD_PKG3(reg);
- case 4:
- return SAD_PKG4(reg);
- case 5:
- return SAD_PKG5(reg);
- case 6:
- return SAD_PKG6(reg);
- case 7:
- return SAD_PKG7(reg);
- default:
- return -EINVAL;
- }
+ return GET_BITFIELD(reg, table[interleave].start,
+ table[interleave].end);
}
/* Devices 12 Function 7 */
@@ -262,7 +275,9 @@ static const u32 correrrthrsld[] = {
/* Device 17, function 0 */
-#define RANK_CFG_A 0x0328
+#define SB_RANK_CFG_A 0x0328
+
+#define IB_RANK_CFG_A 0x0320
#define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11)
@@ -273,8 +288,23 @@ static const u32 correrrthrsld[] = {
#define NUM_CHANNELS 4
#define MAX_DIMMS 3 /* Max DIMMS per channel */
+enum type {
+ SANDY_BRIDGE,
+ IVY_BRIDGE,
+};
+
+struct sbridge_pvt;
struct sbridge_info {
- u32 mcmtr;
+ enum type type;
+ u32 mcmtr;
+ u32 rankcfgr;
+ u64 (*get_tolm)(struct sbridge_pvt *pvt);
+ u64 (*get_tohm)(struct sbridge_pvt *pvt);
+ const u32 *dram_rule;
+ const u32 *interleave_list;
+ const struct interleave_pkg *interleave_pkg;
+ u8 max_sad;
+ u8 max_interleave;
};
struct sbridge_channel {
@@ -305,8 +335,9 @@ struct sbridge_dev {
struct sbridge_pvt {
struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
- struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0;
- struct pci_dev *pci_br;
+ struct pci_dev *pci_sad0, *pci_sad1;
+ struct pci_dev *pci_ha0, *pci_ha1;
+ struct pci_dev *pci_br0, *pci_br1;
struct pci_dev *pci_tad[NUM_CHANNELS];
struct sbridge_dev *sbridge_dev;
@@ -364,11 +395,75 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
{0,} /* 0 terminated list. */
};
+/* This changes depending if 1HA or 2HA:
+ * 1HA:
+ * 0x0eb8 (17.0) is DDRIO0
+ * 2HA:
+ * 0x0ebc (17.4) is DDRIO0
+ */
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
+
+/* pci ids */
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
+#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
+
+static const struct pci_id_descr pci_dev_descr_ibridge[] = {
+ /* Processor Home Agent */
+ { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
+
+ /* Memory controller */
+ { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
+ { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
+ { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
+ { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
+ { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
+ { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
+
+ /* System Address Decoder */
+ { PCI_DESCR(22, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
+
+ /* Broadcast Registers */
+ { PCI_DESCR(22, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
+ { PCI_DESCR(22, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
+
+ /* Optional, mode 2HA */
+ { PCI_DESCR(28, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
+#if 0
+ { PCI_DESCR(29, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
+ { PCI_DESCR(29, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
+#endif
+ { PCI_DESCR(29, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
+ { PCI_DESCR(29, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
+
+ { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
+ { PCI_DESCR(17, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
+};
+
+static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+ {0,} /* 0 terminated list. */
+};
+
/*
* pci_device_id table for which devices we are looking for
*/
static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
{0,} /* 0 terminated list. */
};
@@ -458,6 +553,52 @@ static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
kfree(sbridge_dev);
}
+static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+
+ /* Address range is 32:28 */
+ pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
+ return GET_TOLM(reg);
+}
+
+static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
+ return GET_TOHM(reg);
+}
+
+static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
+
+ return GET_TOLM(reg);
+}
+
+static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
+
+ return GET_TOHM(reg);
+}
+
+static inline u8 sad_pkg_socket(u8 pkg)
+{
+ /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
+ return (pkg >> 3) | (pkg & 0x3);
+}
+
+static inline u8 sad_pkg_ha(u8 pkg)
+{
+ return (pkg >> 2) & 0x1;
+}
+
/****************************************************************************
Memory check routines
****************************************************************************/
@@ -520,10 +661,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
enum edac_type mode;
enum mem_type mtype;
- pci_read_config_dword(pvt->pci_br, SAD_TARGET, &reg);
+ pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
pvt->sbridge_dev->source_id = SOURCE_ID(reg);
- pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
+ pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
pvt->sbridge_dev->node_id = NODE_ID(reg);
edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
pvt->sbridge_dev->mc,
@@ -558,7 +699,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
}
if (pvt->pci_ddrio) {
- pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
+ pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
+ &reg);
if (IS_RDIMM_ENABLED(reg)) {
/* FIXME: Can also be LRDIMM */
edac_dbg(0, "Memory is registered\n");
@@ -629,19 +771,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
* Step 1) Get TOLM/TOHM ranges
*/
- /* Address range is 32:28 */
- pci_read_config_dword(pvt->pci_sad1, TOLM,
- &reg);
- pvt->tolm = GET_TOLM(reg);
+ pvt->tolm = pvt->info.get_tolm(pvt);
tmp_mb = (1 + pvt->tolm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
/* Address range is already 45:25 */
- pci_read_config_dword(pvt->pci_sad1, TOHM,
- &reg);
- pvt->tohm = GET_TOHM(reg);
+ pvt->tohm = pvt->info.get_tohm(pvt);
tmp_mb = (1 + pvt->tohm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
@@ -654,9 +791,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
* algorithm bellow.
*/
prv = 0;
- for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
+ for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
/* SAD_LIMIT Address range is 45:26 */
- pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
+ pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
&reg);
limit = SAD_LIMIT(reg);
@@ -677,15 +814,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
reg);
prv = limit;
- pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
+ pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
&reg);
- sad_interl = sad_pkg(reg, 0);
+ sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
for (j = 0; j < 8; j++) {
- if (j > 0 && sad_interl == sad_pkg(reg, j))
+ u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
+ if (j > 0 && sad_interl == pkg)
break;
edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
- n_sads, j, sad_pkg(reg, j));
+ n_sads, j, pkg);
}
}
@@ -797,12 +935,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pci_ha;
int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
int interleave_mode;
- unsigned sad_interleave[MAX_INTERLEAVE];
+ unsigned sad_interleave[pvt->info.max_interleave];
u32 reg;
- u8 ch_way,sck_way;
+ u8 ch_way, sck_way, pkg, sad_ha = 0;
u32 tad_offset;
u32 rir_way;
u32 mb, kb;
@@ -828,8 +967,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
/*
* Step 1) Get socket
*/
- for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
- pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
+ for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
+ pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
&reg);
if (!DRAM_RULE_ENABLE(reg))
@@ -844,53 +983,65 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
prv = limit;
}
- if (n_sads == MAX_SAD) {
+ if (n_sads == pvt->info.max_sad) {
sprintf(msg, "Can't discover the memory socket");
return -EINVAL;
}
*area_type = get_dram_attr(reg);
interleave_mode = INTERLEAVE_MODE(reg);
- pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
+ pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
&reg);
- sad_interl = sad_pkg(reg, 0);
- for (sad_way = 0; sad_way < 8; sad_way++) {
- if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
+
+ if (pvt->info.type == SANDY_BRIDGE) {
+ sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
+ for (sad_way = 0; sad_way < 8; sad_way++) {
+ u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
+ if (sad_way > 0 && sad_interl == pkg)
+ break;
+ sad_interleave[sad_way] = pkg;
+ edac_dbg(0, "SAD interleave #%d: %d\n",
+ sad_way, sad_interleave[sad_way]);
+ }
+ edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
+ pvt->sbridge_dev->mc,
+ n_sads,
+ addr,
+ limit,
+ sad_way + 7,
+ !interleave_mode ? "" : "XOR[18:16]");
+ if (interleave_mode)
+ idx = ((addr >> 6) ^ (addr >> 16)) & 7;
+ else
+ idx = (addr >> 6) & 7;
+ switch (sad_way) {
+ case 1:
+ idx = 0;
break;
- sad_interleave[sad_way] = sad_pkg(reg, sad_way);
- edac_dbg(0, "SAD interleave #%d: %d\n",
- sad_way, sad_interleave[sad_way]);
- }
- edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
- pvt->sbridge_dev->mc,
- n_sads,
- addr,
- limit,
- sad_way + 7,
- interleave_mode ? "" : "XOR[18:16]");
- if (interleave_mode)
- idx = ((addr >> 6) ^ (addr >> 16)) & 7;
- else
+ case 2:
+ idx = idx & 1;
+ break;
+ case 4:
+ idx = idx & 3;
+ break;
+ case 8:
+ break;
+ default:
+ sprintf(msg, "Can't discover socket interleave");
+ return -EINVAL;
+ }
+ *socket = sad_interleave[idx];
+ edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
+ idx, sad_way, *socket);
+ } else {
+ /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
idx = (addr >> 6) & 7;
- switch (sad_way) {
- case 1:
- idx = 0;
- break;
- case 2:
- idx = idx & 1;
- break;
- case 4:
- idx = idx & 3;
- break;
- case 8:
- break;
- default:
- sprintf(msg, "Can't discover socket interleave");
- return -EINVAL;
+ pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
+ *socket = sad_pkg_socket(pkg);
+ sad_ha = sad_pkg_ha(pkg);
+ edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
+ idx, *socket, sad_ha);
}
- *socket = sad_interleave[idx];
- edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
- idx, sad_way, *socket);
/*
* Move to the proper node structure, in order to access the
@@ -909,9 +1060,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
* Step 2) Get memory channel
*/
prv = 0;
+ if (pvt->info.type == SANDY_BRIDGE)
+ pci_ha = pvt->pci_ha0;
+ else {
+ if (sad_ha)
+ pci_ha = pvt->pci_ha1;
+ else
+ pci_ha = pvt->pci_ha0;
+ }
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
- pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
- &reg);
+ pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
limit = TAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel");
@@ -921,14 +1079,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
prv = limit;
}
+ if (n_tads == MAX_TAD) {
+ sprintf(msg, "Can't discover the memory channel");
+ return -EINVAL;
+ }
+
ch_way = TAD_CH(reg) + 1;
sck_way = TAD_SOCK(reg) + 1;
- /*
- * FIXME: Is it right to always use channel 0 for offsets?
- */
- pci_read_config_dword(pvt->pci_tad[0],
- tad_ch_nilv_offset[n_tads],
- &tad_offset);
if (ch_way == 3)
idx = addr >> 6;
@@ -958,6 +1115,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
*channel_mask = 1 << base_ch;
+ pci_read_config_dword(pvt->pci_tad[base_ch],
+ tad_ch_nilv_offset[n_tads],
+ &tad_offset);
+
if (pvt->is_mirrored) {
*channel_mask |= 1 << ((base_ch + 2) % 4);
switch(ch_way) {
@@ -1091,12 +1252,6 @@ static void sbridge_put_all_devices(void)
}
}
-/*
- * sbridge_get_all_devices Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver
- *
- * Need to 'get' device 16 func 1 and func 2
- */
static int sbridge_get_onedevice(struct pci_dev **prev,
u8 *num_mc,
const struct pci_id_table *table,
@@ -1198,11 +1353,21 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
return 0;
}
-static int sbridge_get_all_devices(u8 *num_mc)
+/*
+ * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver.
+ * Need to 'get' device 16 func 1 and func 2.
+ * @num_mc: pointer to the memory controllers count, to be incremented in case
+ * of success.
+ * @table: model specific table
+ *
+ * returns 0 in case of success or error code
+ */
+static int sbridge_get_all_devices(u8 *num_mc,
+ const struct pci_id_table *table)
{
int i, rc;
struct pci_dev *pdev = NULL;
- const struct pci_id_table *table = pci_dev_descr_sbridge_table;
while (table && table->descr) {
for (i = 0; i < table->n_devs; i++) {
@@ -1226,8 +1391,8 @@ static int sbridge_get_all_devices(u8 *num_mc)
return 0;
}
-static int mci_bind_devs(struct mem_ctl_info *mci,
- struct sbridge_dev *sbridge_dev)
+static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
+ struct sbridge_dev *sbridge_dev)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
@@ -1255,7 +1420,7 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
case 13:
switch (func) {
case 6:
- pvt->pci_br = pdev;
+ pvt->pci_br0 = pdev;
break;
default:
goto error;
@@ -1329,6 +1494,131 @@ error:
return -EINVAL;
}
+static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
+ struct sbridge_dev *sbridge_dev)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev, *tmp;
+ int i, func, slot;
+ bool mode_2ha = false;
+
+ tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL);
+ if (tmp) {
+ mode_2ha = true;
+ pci_dev_put(tmp);
+ }
+
+ for (i = 0; i < sbridge_dev->n_devs; i++) {
+ pdev = sbridge_dev->pdev[i];
+ if (!pdev)
+ continue;
+ slot = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+
+ switch (slot) {
+ case 14:
+ if (func == 0) {
+ pvt->pci_ha0 = pdev;
+ break;
+ }
+ goto error;
+ case 15:
+ switch (func) {
+ case 0:
+ pvt->pci_ta = pdev;
+ break;
+ case 1:
+ pvt->pci_ras = pdev;
+ break;
+ case 4:
+ case 5:
+ /* if we have 2 HAs active, channels 2 and 3
+ * are in other device */
+ if (mode_2ha)
+ break;
+ /* fall through */
+ case 2:
+ case 3:
+ pvt->pci_tad[func - 2] = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 17:
+ if (func == 4) {
+ pvt->pci_ddrio = pdev;
+ break;
+ } else if (func == 0) {
+ if (!mode_2ha)
+ pvt->pci_ddrio = pdev;
+ break;
+ }
+ goto error;
+ case 22:
+ switch (func) {
+ case 0:
+ pvt->pci_sad0 = pdev;
+ break;
+ case 1:
+ pvt->pci_br0 = pdev;
+ break;
+ case 2:
+ pvt->pci_br1 = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 28:
+ if (func == 0) {
+ pvt->pci_ha1 = pdev;
+ break;
+ }
+ goto error;
+ case 29:
+ /* we shouldn't have this device if we have just one
+ * HA present */
+ WARN_ON(!mode_2ha);
+ if (func == 2 || func == 3) {
+ pvt->pci_tad[func] = pdev;
+ break;
+ }
+ goto error;
+ default:
+ goto error;
+ }
+
+ edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
+ sbridge_dev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev);
+ }
+
+ /* Check if everything were registered */
+ if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
+ !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras ||
+ !pvt->pci_ta)
+ goto enodev;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!pvt->pci_tad[i])
+ goto enodev;
+ }
+ return 0;
+
+enodev:
+ sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
+ return -ENODEV;
+
+error:
+ sbridge_printk(KERN_ERR,
+ "Device %d, function %d is out of the expected range\n",
+ slot, func);
+ return -EINVAL;
+}
+
/****************************************************************************
Error check routines
****************************************************************************/
@@ -1349,7 +1639,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
- bool recoverable = GET_BITFIELD(m->status, 56, 56);
+ bool recoverable;
u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
u32 mscod = GET_BITFIELD(m->status, 16, 31);
u32 errcode = GET_BITFIELD(m->status, 0, 15);
@@ -1360,6 +1650,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
int rc, dimm;
char *area_type = NULL;
+ if (pvt->info.type == IVY_BRIDGE)
+ recoverable = true;
+ else
+ recoverable = GET_BITFIELD(m->status, 56, 56);
+
if (uncorrected_error) {
if (ripv) {
type = "FATAL";
@@ -1409,6 +1704,10 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
}
}
+ /* Only decode errors with an valid address (ADDRV) */
+ if (!GET_BITFIELD(m->status, 58, 58))
+ return;
+
rc = get_memory_error_data(mci, m->addr, &socket,
&channel_mask, &rank, &area_type, msg);
if (rc < 0)
@@ -1614,11 +1913,12 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
sbridge_dev->mci = NULL;
}
-static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
+static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct sbridge_pvt *pvt;
+ struct pci_dev *pdev = sbridge_dev->pdev[0];
int rc;
/* Check the number of active and not disabled channels */
@@ -1640,7 +1940,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
return -ENOMEM;
edac_dbg(0, "MC: mci = %p, dev = %p\n",
- mci, &sbridge_dev->pdev[0]->dev);
+ mci, &pdev->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
@@ -1654,24 +1954,52 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "sbridge_edac.c";
mci->mod_ver = SBRIDGE_REVISION;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
- mci->dev_name = pci_name(sbridge_dev->pdev[0]);
+ mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
/* Set the function pointer to an actual operation function */
mci->edac_check = sbridge_check_error;
- /* Store pci devices at mci for faster access */
- rc = mci_bind_devs(mci, sbridge_dev);
- if (unlikely(rc < 0))
- goto fail0;
+ pvt->info.type = type;
+ if (type == IVY_BRIDGE) {
+ pvt->info.rankcfgr = IB_RANK_CFG_A;
+ pvt->info.get_tolm = ibridge_get_tolm;
+ pvt->info.get_tohm = ibridge_get_tohm;
+ pvt->info.dram_rule = ibridge_dram_rule;
+ pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
+ pvt->info.interleave_list = ibridge_interleave_list;
+ pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
+ pvt->info.interleave_pkg = ibridge_interleave_pkg;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
+
+ /* Store pci devices at mci for faster access */
+ rc = ibridge_mci_bind_devs(mci, sbridge_dev);
+ if (unlikely(rc < 0))
+ goto fail0;
+ } else {
+ pvt->info.rankcfgr = SB_RANK_CFG_A;
+ pvt->info.get_tolm = sbridge_get_tolm;
+ pvt->info.get_tohm = sbridge_get_tohm;
+ pvt->info.dram_rule = sbridge_dram_rule;
+ pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
+ pvt->info.interleave_list = sbridge_interleave_list;
+ pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
+ pvt->info.interleave_pkg = sbridge_interleave_pkg;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
+
+ /* Store pci devices at mci for faster access */
+ rc = sbridge_mci_bind_devs(mci, sbridge_dev);
+ if (unlikely(rc < 0))
+ goto fail0;
+ }
+
/* Get dimm basic config and the memory layout */
get_dimm_config(mci);
get_memory_layout(mci);
/* record ptr to the generic device */
- mci->pdev = &sbridge_dev->pdev[0]->dev;
+ mci->pdev = &pdev->dev;
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
@@ -1702,6 +2030,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int rc;
u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev;
+ enum type type;
/* get the pci devices we want to reserve for our use */
mutex_lock(&sbridge_edac_lock);
@@ -1715,7 +2044,13 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
probed++;
- rc = sbridge_get_all_devices(&num_mc);
+ if (pdev->device == PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA) {
+ rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
+ type = IVY_BRIDGE;
+ } else {
+ rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
+ type = SANDY_BRIDGE;
+ }
if (unlikely(rc < 0))
goto fail0;
mc = 0;
@@ -1724,7 +2059,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
edac_dbg(0, "Registering MC#%d (%d of %d)\n",
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev);
+ rc = sbridge_register_mci(sbridge_dev, type);
if (unlikely(rc < 0))
goto fail1;
}
@@ -1839,5 +2174,5 @@ MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
-MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - "
+MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
SBRIDGE_REVISION);
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 5985807e52c..e23f1c2e505 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -27,16 +27,16 @@
/**
* struct adc_jack_data - internal data for adc_jack device driver
- * @edev - extcon device.
- * @cable_names - list of supported cables.
- * @num_cables - size of cable_names.
- * @adc_conditions - list of adc value conditions.
- * @num_conditions - size of adc_conditions.
- * @irq - irq number of attach/detach event (0 if not exist).
- * @handling_delay - interrupt handler will schedule extcon event
- * handling at handling_delay jiffies.
- * @handler - extcon event handler called by interrupt handler.
- * @chan - iio channel being queried.
+ * @edev: extcon device.
+ * @cable_names: list of supported cables.
+ * @num_cables: size of cable_names.
+ * @adc_conditions: list of adc value conditions.
+ * @num_conditions: size of adc_conditions.
+ * @irq: irq number of attach/detach event (0 if not exist).
+ * @handling_delay: interrupt handler will schedule extcon event
+ * handling at handling_delay jiffies.
+ * @handler: extcon event handler called by interrupt handler.
+ * @chan: iio channel being queried.
*/
struct adc_jack_data {
struct extcon_dev edev;
@@ -64,7 +64,7 @@ static void adc_jack_handler(struct work_struct *work)
ret = iio_read_channel_raw(data->chan, &adc_val);
if (ret < 0) {
- dev_err(data->edev.dev, "read channel() error: %d\n", ret);
+ dev_err(&data->edev.dev, "read channel() error: %d\n", ret);
return;
}
@@ -95,7 +95,7 @@ static irqreturn_t adc_jack_irq_thread(int irq, void *_data)
static int adc_jack_probe(struct platform_device *pdev)
{
struct adc_jack_data *data;
- struct adc_jack_pdata *pdata = pdev->dev.platform_data;
+ struct adc_jack_pdata *pdata = dev_get_platdata(&pdev->dev);
int i, err = 0;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -110,6 +110,7 @@ static int adc_jack_probe(struct platform_device *pdev)
goto out;
}
+ data->edev.dev.parent = &pdev->dev;
data->edev.supported_cable = pdata->cable_names;
/* Check the length of array and set num_cables */
@@ -148,7 +149,7 @@ static int adc_jack_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- err = extcon_dev_register(&data->edev, &pdev->dev);
+ err = extcon_dev_register(&data->edev);
if (err)
goto out;
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index e55713083c7..3c55ec856e3 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -86,8 +86,8 @@ struct arizona_extcon_info {
};
static const struct arizona_micd_config micd_default_modes[] = {
- { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
- { 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
+ { ARIZONA_ACCDET_SRC, 1, 0 },
+ { 0, 2, 1 },
};
static const struct arizona_micd_range micd_default_ranges[] = {
@@ -182,7 +182,8 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
info->micd_modes[mode].gpio);
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_BIAS_SRC_MASK,
- info->micd_modes[mode].bias);
+ info->micd_modes[mode].bias <<
+ ARIZONA_MICD_BIAS_SRC_SHIFT);
regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
ARIZONA_ACCDET_SRC, info->micd_modes[mode].src);
@@ -193,7 +194,7 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
static const char *arizona_extcon_get_micbias(struct arizona_extcon_info *info)
{
- switch (info->micd_modes[0].bias >> ARIZONA_MICD_BIAS_SRC_SHIFT) {
+ switch (info->micd_modes[0].bias) {
case 1:
return "MICBIAS1";
case 2:
@@ -388,7 +389,7 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
>> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
- (val < 100 || val > 0x3fb)) {
+ (val < 100 || val >= 0x3fb)) {
range++;
dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
range);
@@ -401,7 +402,7 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
}
/* If we go out of range report top of range */
- if (val < 100 || val > 0x3fb) {
+ if (val < 100 || val >= 0x3fb) {
dev_dbg(arizona->dev, "Measurement out of range\n");
return ARIZONA_HPDET_MAX;
}
@@ -514,7 +515,7 @@ static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading,
}
/*
- * If we measure the mic as
+ * If we measure the mic as high impedance
*/
if (!id_gpio || info->hpdet_res[1] > 50) {
dev_dbg(arizona->dev, "Detected mic\n");
@@ -564,11 +565,10 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
}
ret = arizona_hpdet_read(info);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN)
goto out;
- } else if (ret < 0) {
+ else if (ret < 0)
goto done;
- }
reading = ret;
/* Reset back to starting range */
@@ -578,11 +578,10 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
0);
ret = arizona_hpdet_do_id(info, &reading, &mic);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN)
goto out;
- } else if (ret < 0) {
+ else if (ret < 0)
goto done;
- }
/* Report high impedence cables as line outputs */
if (reading >= 5000)
@@ -738,8 +737,8 @@ err:
static void arizona_micd_timeout_work(struct work_struct *work)
{
struct arizona_extcon_info *info = container_of(work,
- struct arizona_extcon_info,
- micd_timeout_work.work);
+ struct arizona_extcon_info,
+ micd_timeout_work.work);
mutex_lock(&info->lock);
@@ -756,8 +755,8 @@ static void arizona_micd_timeout_work(struct work_struct *work)
static void arizona_micd_detect(struct work_struct *work)
{
struct arizona_extcon_info *info = container_of(work,
- struct arizona_extcon_info,
- micd_detect_work.work);
+ struct arizona_extcon_info,
+ micd_detect_work.work);
struct arizona *arizona = info->arizona;
unsigned int val = 0, lvl;
int ret, i, key;
@@ -769,7 +768,8 @@ static void arizona_micd_detect(struct work_struct *work)
for (i = 0; i < 10 && !(val & 0x7fc); i++) {
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
- dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
+ dev_err(arizona->dev,
+ "Failed to read MICDET: %d\n", ret);
mutex_unlock(&info->lock);
return;
}
@@ -777,7 +777,8 @@ static void arizona_micd_detect(struct work_struct *work)
dev_dbg(arizona->dev, "MICDET: %x\n", val);
if (!(val & ARIZONA_MICD_VALID)) {
- dev_warn(arizona->dev, "Microphone detection state invalid\n");
+ dev_warn(arizona->dev,
+ "Microphone detection state invalid\n");
mutex_unlock(&info->lock);
return;
}
@@ -925,8 +926,8 @@ static irqreturn_t arizona_micdet(int irq, void *data)
static void arizona_hpdet_work(struct work_struct *work)
{
struct arizona_extcon_info *info = container_of(work,
- struct arizona_extcon_info,
- hpdet_work.work);
+ struct arizona_extcon_info,
+ hpdet_work.work);
mutex_lock(&info->lock);
arizona_start_hpdet_acc_id(info);
@@ -973,10 +974,13 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
&info->hpdet_work,
msecs_to_jiffies(HPDET_DEBOUNCE));
- if (cancelled_mic)
+ if (cancelled_mic) {
+ int micd_timeout = info->micd_timeout;
+
queue_delayed_work(system_power_efficient_wq,
&info->micd_timeout_work,
- msecs_to_jiffies(info->micd_timeout));
+ msecs_to_jiffies(micd_timeout));
+ }
goto out;
}
@@ -1039,6 +1043,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
else
info->micd_timeout = DEFAULT_MICD_TIMEOUT;
+out:
/* Clear trig_sts to make sure DCVDD is not forced up */
regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
ARIZONA_MICD_CLAMP_FALL_TRIG_STS |
@@ -1046,7 +1051,6 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
ARIZONA_JD1_FALL_TRIG_STS |
ARIZONA_JD1_RISE_TRIG_STS);
-out:
mutex_unlock(&info->lock);
pm_runtime_mark_last_busy(info->dev);
@@ -1129,9 +1133,10 @@ static int arizona_extcon_probe(struct platform_device *pdev)
}
info->edev.name = "Headset Jack";
+ info->edev.dev.parent = arizona->dev;
info->edev.supported_cable = arizona_cable;
- ret = extcon_dev_register(&info->edev, arizona->dev);
+ ret = extcon_dev_register(&info->edev);
if (ret < 0) {
dev_err(arizona->dev, "extcon_dev_register() failed: %d\n",
ret);
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 148382faded..15443d3b6be 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -74,7 +74,7 @@ static DEFINE_MUTEX(extcon_dev_list_lock);
/**
* check_mutually_exclusive - Check if new_state violates mutually_exclusive
- * condition.
+ * condition.
* @edev: the extcon device
* @new_state: new cable attach status for @edev
*
@@ -105,7 +105,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int i, count = 0;
- struct extcon_dev *edev = (struct extcon_dev *) dev_get_drvdata(dev);
+ struct extcon_dev *edev = dev_get_drvdata(dev);
if (edev->print_state) {
int ret = edev->print_state(edev, buf);
@@ -129,13 +129,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
return count;
}
-int extcon_set_state(struct extcon_dev *edev, u32 state);
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 state;
ssize_t ret = 0;
- struct extcon_dev *edev = (struct extcon_dev *) dev_get_drvdata(dev);
+ struct extcon_dev *edev = dev_get_drvdata(dev);
ret = sscanf(buf, "0x%x", &state);
if (ret == 0)
@@ -153,7 +152,7 @@ static DEVICE_ATTR_RW(state);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct extcon_dev *edev = (struct extcon_dev *) dev_get_drvdata(dev);
+ struct extcon_dev *edev = dev_get_drvdata(dev);
/* Optional callback given by the user */
if (edev->print_name) {
@@ -162,7 +161,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
return ret;
}
- return sprintf(buf, "%s\n", dev_name(edev->dev));
+ return sprintf(buf, "%s\n", dev_name(&edev->dev));
}
static DEVICE_ATTR_RO(name);
@@ -189,7 +188,7 @@ static ssize_t cable_state_show(struct device *dev,
/**
* extcon_update_state() - Update the cable attach states of the extcon device
- * only for the masked bits.
+ * only for the masked bits.
* @edev: the extcon device
* @mask: the bit mask to designate updated bits.
* @state: new cable attach status for @edev
@@ -227,11 +226,10 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
edev->state |= state & mask;
raw_notifier_call_chain(&edev->nh, old_state, edev);
-
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (prop_buf) {
- length = name_show(edev->dev, NULL, prop_buf);
+ length = name_show(&edev->dev, NULL, prop_buf);
if (length > 0) {
if (prop_buf[length - 1] == '\n')
prop_buf[length - 1] = 0;
@@ -239,7 +237,7 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
"NAME=%s", prop_buf);
envp[env_offset++] = name_buf;
}
- length = state_show(edev->dev, NULL, prop_buf);
+ length = state_show(&edev->dev, NULL, prop_buf);
if (length > 0) {
if (prop_buf[length - 1] == '\n')
prop_buf[length - 1] = 0;
@@ -251,14 +249,14 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
/* Unlock early before uevent */
spin_unlock_irqrestore(&edev->lock, flags);
- kobject_uevent_env(&edev->dev->kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&edev->dev.kobj, KOBJ_CHANGE, envp);
free_page((unsigned long)prop_buf);
} else {
/* Unlock early before uevent */
spin_unlock_irqrestore(&edev->lock, flags);
- dev_err(edev->dev, "out of memory in extcon_set_state\n");
- kobject_uevent(&edev->dev->kobj, KOBJ_CHANGE);
+ dev_err(&edev->dev, "out of memory in extcon_set_state\n");
+ kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
}
} else {
/* No changes */
@@ -339,8 +337,9 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state);
/**
* extcon_set_cable_state_() - Set the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @index: cable index that can be retrieved by extcon_find_cable_index().
+ * @edev: the extcon device that has the cable.
+ * @index: cable index that can be retrieved by
+ * extcon_find_cable_index().
* @cable_state: the new cable status. The default semantics is
* true: attached / false: detached.
*/
@@ -359,8 +358,8 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
/**
* extcon_set_cable_state() - Set the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @cable_name: cable name.
+ * @edev: the extcon device that has the cable.
+ * @cable_name: cable name.
* @cable_state: the new cable status. The default semantics is
* true: attached / false: detached.
*
@@ -419,14 +418,14 @@ static int _call_per_cable(struct notifier_block *nb, unsigned long val,
/**
* extcon_register_interest() - Register a notifier for a state change of a
- * specific cable, not an entier set of cables of a
- * extcon device.
- * @obj: an empty extcon_specific_cable_nb object to be returned.
+ * specific cable, not an entier set of cables of a
+ * extcon device.
+ * @obj: an empty extcon_specific_cable_nb object to be returned.
* @extcon_name: the name of extcon device.
* if NULL, extcon_register_interest will register
* every cable with the target cable_name given.
* @cable_name: the target cable name.
- * @nb: the notifier block to get notified.
+ * @nb: the notifier block to get notified.
*
* Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets
* the struct for you.
@@ -452,7 +451,8 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
if (!obj->edev)
return -ENODEV;
- obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
+ obj->cable_index = extcon_find_cable_index(obj->edev,
+ cable_name);
if (obj->cable_index < 0)
return obj->cable_index;
@@ -460,7 +460,8 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
obj->internal_nb.notifier_call = _call_per_cable;
- return raw_notifier_chain_register(&obj->edev->nh, &obj->internal_nb);
+ return raw_notifier_chain_register(&obj->edev->nh,
+ &obj->internal_nb);
} else {
struct class_dev_iter iter;
struct extcon_dev *extd;
@@ -470,7 +471,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
return -ENODEV;
class_dev_iter_init(&iter, extcon_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
- extd = (struct extcon_dev *)dev_get_drvdata(dev);
+ extd = dev_get_drvdata(dev);
if (extcon_find_cable_index(extd, cable_name) < 0)
continue;
@@ -487,7 +488,7 @@ EXPORT_SYMBOL_GPL(extcon_register_interest);
/**
* extcon_unregister_interest() - Unregister the notifier registered by
- * extcon_register_interest().
+ * extcon_register_interest().
* @obj: the extcon_specific_cable_nb object returned by
* extcon_register_interest().
*/
@@ -502,7 +503,7 @@ EXPORT_SYMBOL_GPL(extcon_unregister_interest);
/**
* extcon_register_notifier() - Register a notifiee to get notified by
- * any attach status changes from the extcon.
+ * any attach status changes from the extcon.
* @edev: the extcon device.
* @nb: a notifier block to be registered.
*
@@ -556,7 +557,6 @@ static int create_extcon_class(void)
static void extcon_dev_release(struct device *dev)
{
- kfree(dev);
}
static const char *muex_name = "mutually_exclusive";
@@ -567,14 +567,13 @@ static void dummy_sysfs_dev_release(struct device *dev)
/**
* extcon_dev_register() - Register a new extcon device
* @edev : the new extcon device (should be allocated before calling)
- * @dev : the parent device for this extcon device.
*
* Among the members of edev struct, please set the "user initializing data"
* in any case and set the "optional callbacks" if required. However, please
* do not set the values of "internal data", which are initialized by
* this function.
*/
-int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
+int extcon_dev_register(struct extcon_dev *edev)
{
int ret, index = 0;
@@ -594,19 +593,20 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
}
if (index > SUPPORTED_CABLE_MAX) {
- dev_err(edev->dev, "extcon: maximum number of supported cables exceeded.\n");
+ dev_err(&edev->dev, "extcon: maximum number of supported cables exceeded.\n");
return -EINVAL;
}
- edev->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!edev->dev)
- return -ENOMEM;
- edev->dev->parent = dev;
- edev->dev->class = extcon_class;
- edev->dev->release = extcon_dev_release;
+ edev->dev.class = extcon_class;
+ edev->dev.release = extcon_dev_release;
- edev->name = edev->name ? edev->name : dev_name(dev);
- dev_set_name(edev->dev, "%s", edev->name);
+ edev->name = edev->name ? edev->name : dev_name(edev->dev.parent);
+ if (IS_ERR_OR_NULL(edev->name)) {
+ dev_err(&edev->dev,
+ "extcon device name is null\n");
+ return -EINVAL;
+ }
+ dev_set_name(&edev->dev, "%s", edev->name);
if (edev->max_supported) {
char buf[10];
@@ -714,7 +714,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
goto err_alloc_groups;
}
- edev->extcon_dev_type.name = dev_name(edev->dev);
+ edev->extcon_dev_type.name = dev_name(&edev->dev);
edev->extcon_dev_type.release = dummy_sysfs_dev_release;
for (index = 0; index < edev->max_supported; index++)
@@ -724,25 +724,24 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
edev->extcon_dev_type.groups[index] =
&edev->attr_g_muex;
- edev->dev->type = &edev->extcon_dev_type;
+ edev->dev.type = &edev->extcon_dev_type;
}
- ret = device_register(edev->dev);
+ ret = device_register(&edev->dev);
if (ret) {
- put_device(edev->dev);
+ put_device(&edev->dev);
goto err_dev;
}
#if defined(CONFIG_ANDROID)
if (switch_class)
- ret = class_compat_create_link(switch_class, edev->dev,
- NULL);
+ ret = class_compat_create_link(switch_class, &edev->dev, NULL);
#endif /* CONFIG_ANDROID */
spin_lock_init(&edev->lock);
RAW_INIT_NOTIFIER_HEAD(&edev->nh);
- dev_set_drvdata(edev->dev, edev);
+ dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
mutex_lock(&extcon_dev_list_lock);
@@ -768,7 +767,6 @@ err_alloc_cables:
if (edev->max_supported)
kfree(edev->cables);
err_sysfs_alloc:
- kfree(edev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_dev_register);
@@ -788,9 +786,9 @@ void extcon_dev_unregister(struct extcon_dev *edev)
list_del(&edev->entry);
mutex_unlock(&extcon_dev_list_lock);
- if (IS_ERR_OR_NULL(get_device(edev->dev))) {
- dev_err(edev->dev, "Failed to unregister extcon_dev (%s)\n",
- dev_name(edev->dev));
+ if (IS_ERR_OR_NULL(get_device(&edev->dev))) {
+ dev_err(&edev->dev, "Failed to unregister extcon_dev (%s)\n",
+ dev_name(&edev->dev));
return;
}
@@ -812,10 +810,10 @@ void extcon_dev_unregister(struct extcon_dev *edev)
#if defined(CONFIG_ANDROID)
if (switch_class)
- class_compat_remove_link(switch_class, edev->dev, NULL);
+ class_compat_remove_link(switch_class, &edev->dev, NULL);
#endif
- device_unregister(edev->dev);
- put_device(edev->dev);
+ device_unregister(&edev->dev);
+ put_device(&edev->dev);
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index f874c30ddbf..7e0dff58e49 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -34,6 +34,7 @@
struct gpio_extcon_data {
struct extcon_dev edev;
unsigned gpio;
+ bool gpio_active_low;
const char *state_on;
const char *state_off;
int irq;
@@ -49,6 +50,8 @@ static void gpio_extcon_work(struct work_struct *work)
work);
state = gpio_get_value(data->gpio);
+ if (data->gpio_active_low)
+ state = !state;
extcon_set_state(&data->edev, state);
}
@@ -78,9 +81,9 @@ static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
static int gpio_extcon_probe(struct platform_device *pdev)
{
- struct gpio_extcon_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct gpio_extcon_data *extcon_data;
- int ret = 0;
+ int ret;
if (!pdata)
return -EBUSY;
@@ -95,14 +98,22 @@ static int gpio_extcon_probe(struct platform_device *pdev)
return -ENOMEM;
extcon_data->edev.name = pdata->name;
+ extcon_data->edev.dev.parent = &pdev->dev;
extcon_data->gpio = pdata->gpio;
+ extcon_data->gpio_active_low = pdata->gpio_active_low;
extcon_data->state_on = pdata->state_on;
extcon_data->state_off = pdata->state_off;
if (pdata->state_on && pdata->state_off)
extcon_data->edev.print_state = extcon_gpio_print_state;
- extcon_data->debounce_jiffies = msecs_to_jiffies(pdata->debounce);
+ if (pdata->debounce) {
+ ret = gpio_set_debounce(extcon_data->gpio,
+ pdata->debounce * 1000);
+ if (ret < 0)
+ extcon_data->debounce_jiffies =
+ msecs_to_jiffies(pdata->debounce);
+ }
- ret = extcon_dev_register(&extcon_data->edev, &pdev->dev);
+ ret = extcon_dev_register(&extcon_data->edev);
if (ret < 0)
return ret;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index b56bdaa27d4..da268fbc901 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -189,14 +189,17 @@ enum max77693_muic_acc_type {
/* The below accessories have same ADC value so ADCLow and
ADC1K bit is used to separate specific accessory */
- MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, VBVolot:0, ADCLow:0, ADC1K:0 */
- MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* ADC:0x0, VBVolot:1, ADCLow:0, ADC1K:0 */
- MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:0 */
- MAX77693_MUIC_GND_MHL = 0x103, /* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:1 */
- MAX77693_MUIC_GND_MHL_VB = 0x107, /* ADC:0x0, VBVolot:1, ADCLow:1, ADC1K:1 */
+ /* ADC|VBVolot|ADCLow|ADC1K| */
+ MAX77693_MUIC_GND_USB_OTG = 0x100, /* 0x0| 0| 0| 0| */
+ MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* 0x0| 1| 0| 0| */
+ MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* 0x0| 0| 1| 0| */
+ MAX77693_MUIC_GND_MHL = 0x103, /* 0x0| 0| 1| 1| */
+ MAX77693_MUIC_GND_MHL_VB = 0x107, /* 0x0| 1| 1| 1| */
};
-/* MAX77693 MUIC device support below list of accessories(external connector) */
+/*
+ * MAX77693 MUIC device support below list of accessories(external connector)
+ */
enum {
EXTCON_CABLE_USB = 0,
EXTCON_CABLE_USB_HOST,
@@ -395,12 +398,12 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
vbvolt >>= STATUS2_VBVOLT_SHIFT;
/**
- * [0x1][VBVolt][ADCLow][ADC1K]
- * [0x1 0 0 0 ] : USB_OTG
- * [0x1 1 0 0 ] : USB_OTG_VB
- * [0x1 0 1 0 ] : Audio Video Cable with load
- * [0x1 0 1 1 ] : MHL without charging connector
- * [0x1 1 1 1 ] : MHL with charging connector
+ * [0x1|VBVolt|ADCLow|ADC1K]
+ * [0x1| 0| 0| 0] USB_OTG
+ * [0x1| 1| 0| 0] USB_OTG_VB
+ * [0x1| 0| 1| 0] Audio Video cable with load
+ * [0x1| 0| 1| 1] MHL without charging cable
+ * [0x1| 1| 1| 1] MHL with charging cable
*/
cable_type = ((0x1 << 8)
| (vbvolt << 2)
@@ -723,11 +726,11 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
break;
- case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
- case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
- case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
- case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
- case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
/*
* Button of DOCK device
* - the Prev/Next/Volume Up/Volume Down/Play-Pause button
@@ -815,19 +818,21 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
case MAX77693_MUIC_GND_MHL_VB:
/*
* MHL cable with MHL_TA(USB/TA) cable
- * - MHL cable include two port(HDMI line and separate micro-
- * usb port. When the target connect MHL cable, extcon driver
- * check whether MHL_TA(USB/TA) cable is connected. If MHL_TA
- * cable is connected, extcon driver notify state to notifiee
- * for charging battery.
+ * - MHL cable include two port(HDMI line and separate
+ * micro-usb port. When the target connect MHL cable,
+ * extcon driver check whether MHL_TA(USB/TA) cable is
+ * connected. If MHL_TA cable is connected, extcon
+ * driver notify state to notifiee for charging battery.
*
* Features of 'MHL_TA(USB/TA) with MHL cable'
* - Support MHL
- * - Support charging through micro-usb port without data connection
+ * - Support charging through micro-usb port without
+ * data connection
*/
extcon_set_cable_state(info->edev, "MHL_TA", attached);
if (!cable_attached)
- extcon_set_cable_state(info->edev, "MHL", cable_attached);
+ extcon_set_cable_state(info->edev,
+ "MHL", cable_attached);
break;
}
@@ -839,47 +844,51 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
/*
* Dock-Audio device with USB/TA cable
- * - Dock device include two port(Dock-Audio and micro-usb
- * port). When the target connect Dock-Audio device, extcon
- * driver check whether USB/TA cable is connected. If USB/TA
- * cable is connected, extcon driver notify state to notifiee
- * for charging battery.
+ * - Dock device include two port(Dock-Audio and micro-
+ * usb port). When the target connect Dock-Audio device,
+ * extcon driver check whether USB/TA cable is connected
+ * or not. If USB/TA cable is connected, extcon driver
+ * notify state to notifiee for charging battery.
*
* Features of 'USB/TA cable with Dock-Audio device'
* - Support external output feature of audio.
- * - Support charging through micro-usb port without data
- * connection.
+ * - Support charging through micro-usb port without
+ * data connection.
*/
extcon_set_cable_state(info->edev, "USB", attached);
if (!cable_attached)
- extcon_set_cable_state(info->edev, "Dock-Audio", cable_attached);
+ extcon_set_cable_state(info->edev, "Dock-Audio",
+ cable_attached);
break;
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
/*
* Dock-Smart device with USB/TA cable
* - Dock-Desk device include three type of cable which
* are HDMI, USB for mouse/keyboard and micro-usb port
- * for USB/TA cable. Dock-Smart device need always exteranl
- * power supply(USB/TA cable through micro-usb cable). Dock-
- * Smart device support screen output of target to separate
- * monitor and mouse/keyboard for desktop mode.
+ * for USB/TA cable. Dock-Smart device need always
+ * exteranl power supply(USB/TA cable through micro-usb
+ * cable). Dock-Smart device support screen output of
+ * target to separate monitor and mouse/keyboard for
+ * desktop mode.
*
* Features of 'USB/TA cable with Dock-Smart device'
* - Support MHL
* - Support external output feature of audio
- * - Support charging through micro-usb port without data
- * connection if TA cable is connected to target.
- * - Support charging and data connection through micro-usb port
- * if USB cable is connected between target and host
- * device.
+ * - Support charging through micro-usb port without
+ * data connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-
+ * usb port if USB cable is connected between target
+ * and host device
* - Support OTG device (Mouse/Keyboard)
*/
- ret = max77693_muic_set_path(info, info->path_usb, attached);
+ ret = max77693_muic_set_path(info, info->path_usb,
+ attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "Dock-Smart",
+ attached);
extcon_set_cable_state(info->edev, "MHL", attached);
break;
@@ -889,25 +898,28 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
switch (chg_type) {
case MAX77693_CHARGER_TYPE_NONE:
/*
- * When MHL(with USB/TA cable) or Dock-Audio with USB/TA cable
- * is attached, muic device happen below two interrupt.
- * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting MHL/Dock-Audio.
- * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting USB/TA cable
- * connected to MHL or Dock-Audio.
- * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC interrupt
- * than MAX77693_MUIC_IRQ_INT2_CHGTYP interrupt.
+ * When MHL(with USB/TA cable) or Dock-Audio with USB/TA
+ * cable is attached, muic device happen below two irq.
+ * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting
+ * MHL/Dock-Audio.
+ * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting
+ * USB/TA cable connected to MHL or Dock-Audio.
+ * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC
+ * irq than MAX77693_MUIC_IRQ_INT2_CHGTYP irq.
*
- * If user attach MHL (with USB/TA cable and immediately detach
- * MHL with USB/TA cable before MAX77693_MUIC_IRQ_INT2_CHGTYP
- * interrupt is happened, USB/TA cable remain connected state to
- * target. But USB/TA cable isn't connected to target. The user
- * be face with unusual action. So, driver should check this
- * situation in spite of, that previous charger type is N/A.
+ * If user attach MHL (with USB/TA cable and immediately
+ * detach MHL with USB/TA cable before MAX77693_MUIC_IRQ
+ * _INT2_CHGTYP irq is happened, USB/TA cable remain
+ * connected state to target. But USB/TA cable isn't
+ * connected to target. The user be face with unusual
+ * action. So, driver should check this situation in
+ * spite of, that previous charger type is N/A.
*/
break;
case MAX77693_CHARGER_TYPE_USB:
/* Only USB cable, PATH:AP_USB */
- ret = max77693_muic_set_path(info, info->path_usb, attached);
+ ret = max77693_muic_set_path(info, info->path_usb,
+ attached);
if (ret < 0)
return ret;
@@ -953,7 +965,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
mutex_lock(&info->mutex);
- for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
if (info->irq == muic_irqs[i].virq)
irq_type = muic_irqs[i].irq;
@@ -1171,8 +1183,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
goto err_irq;
}
info->edev->name = DEV_NAME;
+ info->edev->dev.parent = &pdev->dev;
info->edev->supported_cable = max77693_extcon_cable;
- ret = extcon_dev_register(info->edev, NULL);
+ ret = extcon_dev_register(info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
goto err_irq;
@@ -1188,7 +1201,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
num_init_data = ARRAY_SIZE(default_init_data);
}
- for (i = 0 ; i < num_init_data ; i++) {
+ for (i = 0; i < num_init_data; i++) {
enum max77693_irq_source irq_src
= MAX77693_IRQ_GROUP_NR;
@@ -1214,7 +1227,8 @@ static int max77693_muic_probe(struct platform_device *pdev)
}
if (pdata->muic_data) {
- struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
+ struct max77693_muic_platform_data *muic_pdata
+ = pdata->muic_data;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 67d6738d85a..6a00464658c 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -426,7 +426,8 @@ static int max8997_muic_adc_handler(struct max8997_muic_info *info)
break;
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
+ ret = max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, attached);
if (ret < 0)
return ret;
break;
@@ -504,7 +505,8 @@ static int max8997_muic_chg_handler(struct max8997_muic_info *info)
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", attached);
+ extcon_set_cable_state(info->edev,
+ "Charge-downstream", attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_cable_state(info->edev, "TA", attached);
@@ -537,7 +539,7 @@ static void max8997_muic_irq_work(struct work_struct *work)
mutex_lock(&info->mutex);
- for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
if (info->irq == muic_irqs[i].virq)
irq_type = muic_irqs[i].irq;
@@ -705,8 +707,9 @@ static int max8997_muic_probe(struct platform_device *pdev)
goto err_irq;
}
info->edev->name = DEV_NAME;
+ info->edev->dev.parent = &pdev->dev;
info->edev->supported_cable = max8997_extcon_cable;
- ret = extcon_dev_register(info->edev, NULL);
+ ret = extcon_dev_register(info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
goto err_irq;
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 89fdd05c5fd..6c91976dd82 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -135,7 +135,7 @@ static void palmas_enable_irq(struct palmas_usb *palmas_usb)
static int palmas_usb_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
- struct palmas_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct palmas_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct palmas_usb *palmas_usb;
int status;
@@ -178,9 +178,10 @@ static int palmas_usb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, palmas_usb);
palmas_usb->edev.supported_cable = palmas_extcon_cable;
+ palmas_usb->edev.dev.parent = palmas_usb->dev;
palmas_usb->edev.mutually_exclusive = mutually_exclusive;
- status = extcon_dev_register(&palmas_usb->edev, palmas_usb->dev);
+ status = extcon_dev_register(&palmas_usb->edev);
if (status) {
dev_err(&pdev->dev, "failed to register extcon device\n");
return status;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index e5af0e3a26e..0e799516a2a 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -477,7 +477,7 @@ void fw_send_phy_config(struct fw_card *card,
phy_config_packet.header[1] = data;
phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
- INIT_COMPLETION(phy_config_done);
+ reinit_completion(&phy_config_done);
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ff080ee2019..1b5e8e46226 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -545,12 +545,15 @@ static int dcdbas_probe(struct platform_device *dev)
host_control_action = HC_ACTION_NONE;
host_control_smi_type = HC_SMITYPE_NONE;
+ dcdbas_pdev = dev;
+
/*
* BIOS SMI calls require buffer addresses be in 32-bit address space.
* This is done by setting the DMA mask below.
*/
- dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
+ error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+ if (error)
+ return error;
error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
if (error)
@@ -581,6 +584,14 @@ static struct platform_driver dcdbas_driver = {
.remove = dcdbas_remove,
};
+static const struct platform_device_info dcdbas_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = -1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
/**
* dcdbas_init: initialize driver
*/
@@ -592,20 +603,14 @@ static int __init dcdbas_init(void)
if (error)
return error;
- dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1);
- if (!dcdbas_pdev) {
- error = -ENOMEM;
+ dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+ if (IS_ERR(dcdbas_pdev_reg)) {
+ error = PTR_ERR(dcdbas_pdev_reg);
goto err_unregister_driver;
}
- error = platform_device_add(dcdbas_pdev);
- if (error)
- goto err_free_device;
-
return 0;
- err_free_device:
- platform_device_put(dcdbas_pdev);
err_unregister_driver:
platform_driver_unregister(&dcdbas_driver);
return error;
@@ -628,8 +633,9 @@ static void __exit dcdbas_exit(void)
* all sysfs attributes belonging to this module have been
* released.
*/
- smi_data_buf_free();
- platform_device_unregister(dcdbas_pdev);
+ if (dcdbas_pdev)
+ smi_data_buf_free();
+ platform_device_unregister(dcdbas_pdev_reg);
platform_driver_unregister(&dcdbas_driver);
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fa0affb699b..c7e81ff8f3e 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -8,6 +8,7 @@
#include <linux/bootmem.h>
#include <linux/random.h>
#include <asm/dmi.h>
+#include <asm/unaligned.h>
/*
* DMI stands for "Desktop Management Interface". It is part
@@ -25,6 +26,13 @@ static int dmi_initialized;
/* DMI system identification string used during boot */
static char dmi_ids_string[128] __initdata;
+static struct dmi_memdev_info {
+ const char *device;
+ const char *bank;
+ u16 handle;
+} *dmi_memdev;
+static int dmi_memdev_nr;
+
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
@@ -322,6 +330,42 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1)));
}
+static void __init count_mem_devices(const struct dmi_header *dm, void *v)
+{
+ if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ dmi_memdev_nr++;
+}
+
+static void __init save_mem_devices(const struct dmi_header *dm, void *v)
+{
+ const char *d = (const char *)dm;
+ static int nr;
+
+ if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ if (nr >= dmi_memdev_nr) {
+ pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
+ return;
+ }
+ dmi_memdev[nr].handle = get_unaligned(&dm->handle);
+ dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
+ dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ nr++;
+}
+
+void __init dmi_memdev_walk(void)
+{
+ if (!dmi_available)
+ return;
+
+ if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
+ dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
+ if (dmi_memdev)
+ dmi_walk_early(save_mem_devices);
+ }
+}
+
/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
@@ -815,3 +859,20 @@ bool dmi_match(enum dmi_field f, const char *str)
return !strcmp(info, str);
}
EXPORT_SYMBOL_GPL(dmi_match);
+
+void dmi_memdev_name(u16 handle, const char **bank, const char **device)
+{
+ int n;
+
+ if (dmi_memdev == NULL)
+ return;
+
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle) {
+ *bank = dmi_memdev[n].bank;
+ *device = dmi_memdev[n].device;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_name);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index b0fc7c79dfb..3150aa4874e 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,4 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
+config UEFI_CPER
+ def_bool n
+
endmenu
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 99245ab5a79..9ba156d3c77 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -4,3 +4,4 @@
obj-y += efi.o vars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
+obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/acpi/apei/cper.c b/drivers/firmware/efi/cper.c
index 33dc6a00480..1491dd4f08f 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -5,10 +5,10 @@
* Author: Huang Ying <ying.huang@intel.com>
*
* CPER is the format used to describe platform hardware error by
- * various APEI tables, such as ERST, BERT and HEST etc.
+ * various tables, such as ERST, BERT and HEST etc.
*
* For more information about CPER, please refer to Appendix N of UEFI
- * Specification version 2.3.
+ * Specification version 2.4.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
@@ -28,10 +28,12 @@
#include <linux/module.h>
#include <linux/time.h>
#include <linux/cper.h>
+#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/aer.h>
+#define INDENT_SP " "
/*
* CPER record ID need to be unique even after reboot, because record
* ID is used as index for ERST storage, while CPER records from
@@ -73,7 +75,7 @@ static const char *cper_severity_str(unsigned int severity)
* printed, with @pfx is printed at the beginning of each line.
*/
void cper_print_bits(const char *pfx, unsigned int bits,
- const char *strs[], unsigned int strs_size)
+ const char * const strs[], unsigned int strs_size)
{
int i, len = 0;
const char *str;
@@ -98,32 +100,32 @@ void cper_print_bits(const char *pfx, unsigned int bits,
printk("%s\n", buf);
}
-static const char *cper_proc_type_strs[] = {
+static const char * const cper_proc_type_strs[] = {
"IA32/X64",
"IA64",
};
-static const char *cper_proc_isa_strs[] = {
+static const char * const cper_proc_isa_strs[] = {
"IA32",
"IA64",
"X64",
};
-static const char *cper_proc_error_type_strs[] = {
+static const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
"micro-architectural error",
};
-static const char *cper_proc_op_strs[] = {
+static const char * const cper_proc_op_strs[] = {
"unknown or generic",
"data read",
"data write",
"instruction execution",
};
-static const char *cper_proc_flag_strs[] = {
+static const char * const cper_proc_flag_strs[] = {
"restartable",
"precise IP",
"overflow",
@@ -191,46 +193,58 @@ static const char *cper_mem_err_type_strs[] = {
"memory sparing",
"scrub corrected error",
"scrub uncorrected error",
+ "physical memory map-out event",
};
static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
{
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
- if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
+ if (mem->validation_bits & CPER_MEM_VALID_PA)
printk("%s""physical_address: 0x%016llx\n",
pfx, mem->physical_addr);
- if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
+ if (mem->validation_bits & CPER_MEM_VALID_PA_MASK)
printk("%s""physical_address_mask: 0x%016llx\n",
pfx, mem->physical_addr_mask);
if (mem->validation_bits & CPER_MEM_VALID_NODE)
- printk("%s""node: %d\n", pfx, mem->node);
+ pr_debug("node: %d\n", mem->node);
if (mem->validation_bits & CPER_MEM_VALID_CARD)
- printk("%s""card: %d\n", pfx, mem->card);
+ pr_debug("card: %d\n", mem->card);
if (mem->validation_bits & CPER_MEM_VALID_MODULE)
- printk("%s""module: %d\n", pfx, mem->module);
+ pr_debug("module: %d\n", mem->module);
+ if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+ pr_debug("rank: %d\n", mem->rank);
if (mem->validation_bits & CPER_MEM_VALID_BANK)
- printk("%s""bank: %d\n", pfx, mem->bank);
+ pr_debug("bank: %d\n", mem->bank);
if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
- printk("%s""device: %d\n", pfx, mem->device);
+ pr_debug("device: %d\n", mem->device);
if (mem->validation_bits & CPER_MEM_VALID_ROW)
- printk("%s""row: %d\n", pfx, mem->row);
+ pr_debug("row: %d\n", mem->row);
if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
- printk("%s""column: %d\n", pfx, mem->column);
+ pr_debug("column: %d\n", mem->column);
if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
- printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
+ pr_debug("bit_position: %d\n", mem->bit_pos);
if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
- printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
+ pr_debug("requestor_id: 0x%016llx\n", mem->requestor_id);
if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
- printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
+ pr_debug("responder_id: 0x%016llx\n", mem->responder_id);
if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
- printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
+ pr_debug("target_id: 0x%016llx\n", mem->target_id);
if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
u8 etype = mem->error_type;
printk("%s""error_type: %d, %s\n", pfx, etype,
etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
cper_mem_err_type_strs[etype] : "unknown");
}
+ if (mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
+ const char *bank = NULL, *device = NULL;
+ dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+ if (bank != NULL && device != NULL)
+ printk("%s""DIMM location: %s %s", pfx, bank, device);
+ else
+ printk("%s""DIMM DMI handle: 0x%.4x",
+ pfx, mem->mem_dev_handle);
+ }
}
static const char *cper_pcie_port_type_strs[] = {
@@ -248,7 +262,7 @@ static const char *cper_pcie_port_type_strs[] = {
};
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
- const struct acpi_hest_generic_data *gdata)
+ const struct acpi_generic_data *gdata)
{
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -283,55 +297,45 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
}
-static const char *apei_estatus_section_flag_strs[] = {
- "primary",
- "containment warning",
- "reset",
- "threshold exceeded",
- "resource not accessible",
- "latent error",
-};
-
-static void apei_estatus_print_section(
- const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
+static void cper_estatus_print_section(
+ const char *pfx, const struct acpi_generic_data *gdata, int sec_no)
{
uuid_le *sec_type = (uuid_le *)gdata->section_type;
__u16 severity;
+ char newpfx[64];
severity = gdata->error_severity;
- printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
+ printk("%s""Error %d, type: %s\n", pfx, sec_no,
cper_severity_str(severity));
- printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
- cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
- ARRAY_SIZE(apei_estatus_section_flag_strs));
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
- printk("%s""section_type: general processor error\n", pfx);
+ printk("%s""section_type: general processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*proc_err))
- cper_print_proc_generic(pfx, proc_err);
+ cper_print_proc_generic(newpfx, proc_err);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
- printk("%s""section_type: memory error\n", pfx);
+ printk("%s""section_type: memory error\n", newpfx);
if (gdata->error_data_length >= sizeof(*mem_err))
- cper_print_mem(pfx, mem_err);
+ cper_print_mem(newpfx, mem_err);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie = (void *)(gdata + 1);
- printk("%s""section_type: PCIe error\n", pfx);
+ printk("%s""section_type: PCIe error\n", newpfx);
if (gdata->error_data_length >= sizeof(*pcie))
- cper_print_pcie(pfx, pcie, gdata);
+ cper_print_pcie(newpfx, pcie, gdata);
else
goto err_section_too_small;
} else
- printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
+ printk("%s""section type: unknown, %pUl\n", newpfx, sec_type);
return;
@@ -339,34 +343,38 @@ err_section_too_small:
pr_err(FW_WARN "error section length is too small\n");
}
-void apei_estatus_print(const char *pfx,
- const struct acpi_hest_generic_status *estatus)
+void cper_estatus_print(const char *pfx,
+ const struct acpi_generic_status *estatus)
{
- struct acpi_hest_generic_data *gdata;
+ struct acpi_generic_data *gdata;
unsigned int data_len, gedata_len;
int sec_no = 0;
+ char newpfx[64];
__u16 severity;
- printk("%s""APEI generic hardware error status\n", pfx);
severity = estatus->error_severity;
- printk("%s""severity: %d, %s\n", pfx, severity,
- cper_severity_str(severity));
+ if (severity == CPER_SEV_CORRECTED)
+ printk("%s%s\n", pfx,
+ "It has been corrected by h/w "
+ "and requires no further action");
+ printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
- while (data_len > sizeof(*gdata)) {
+ gdata = (struct acpi_generic_data *)(estatus + 1);
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+ while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
- apei_estatus_print_section(pfx, gdata, sec_no);
+ cper_estatus_print_section(newpfx, gdata, sec_no);
data_len -= gedata_len + sizeof(*gdata);
gdata = (void *)(gdata + 1) + gedata_len;
sec_no++;
}
}
-EXPORT_SYMBOL_GPL(apei_estatus_print);
+EXPORT_SYMBOL_GPL(cper_estatus_print);
-int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+int cper_estatus_check_header(const struct acpi_generic_status *estatus)
{
if (estatus->data_length &&
- estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ estatus->data_length < sizeof(struct acpi_generic_data))
return -EINVAL;
if (estatus->raw_data_length &&
estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
@@ -374,19 +382,19 @@ int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
return 0;
}
-EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+EXPORT_SYMBOL_GPL(cper_estatus_check_header);
-int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+int cper_estatus_check(const struct acpi_generic_status *estatus)
{
- struct acpi_hest_generic_data *gdata;
+ struct acpi_generic_data *gdata;
unsigned int data_len, gedata_len;
int rc;
- rc = apei_estatus_check_header(estatus);
+ rc = cper_estatus_check_header(estatus);
if (rc)
return rc;
data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ gdata = (struct acpi_generic_data *)(estatus + 1);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
@@ -399,4 +407,4 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
return 0;
}
-EXPORT_SYMBOL_GPL(apei_estatus_check);
+EXPORT_SYMBOL_GPL(cper_estatus_check);
diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
new file mode 100644
index 00000000000..b6bffbfd3be
--- /dev/null
+++ b/drivers/firmware/efi/efi-stub-helper.c
@@ -0,0 +1,636 @@
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available
+ * under the terms of the GNU General Public License version 2.
+ *
+ */
+#define EFI_READ_CHUNK_SIZE (1024 * 1024)
+
+struct file_info {
+ efi_file_handle_t *handle;
+ u64 size;
+};
+
+
+
+
+static void efi_char16_printk(efi_system_table_t *sys_table_arg,
+ efi_char16_t *str)
+{
+ struct efi_simple_text_output_protocol *out;
+
+ out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
+ efi_call_phys2(out->output_string, out, str);
+}
+
+static void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+{
+ char *s8;
+
+ for (s8 = str; *s8; s8++) {
+ efi_char16_t ch[2] = { 0 };
+
+ ch[0] = *s8;
+ if (*s8 == '\n') {
+ efi_char16_t nl[2] = { '\r', 0 };
+ efi_char16_printk(sys_table_arg, nl);
+ }
+
+ efi_char16_printk(sys_table_arg, ch);
+ }
+}
+
+
+static efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+ efi_memory_desc_t **map,
+ unsigned long *map_size,
+ unsigned long *desc_size,
+ u32 *desc_ver,
+ unsigned long *key_ptr)
+{
+ efi_memory_desc_t *m = NULL;
+ efi_status_t status;
+ unsigned long key;
+ u32 desc_version;
+
+ *map_size = sizeof(*m) * 32;
+again:
+ /*
+ * Add an additional efi_memory_desc_t because we're doing an
+ * allocation which may be in a new descriptor region.
+ */
+ *map_size += sizeof(*m);
+ status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
+ EFI_LOADER_DATA, *map_size, (void **)&m);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = efi_call_phys5(sys_table_arg->boottime->get_memory_map,
+ map_size, m, &key, desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ goto again;
+ }
+
+ if (status != EFI_SUCCESS)
+ efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ if (key_ptr && status == EFI_SUCCESS)
+ *key_ptr = key;
+ if (desc_ver && status == EFI_SUCCESS)
+ *desc_ver = desc_version;
+
+fail:
+ *map = m;
+ return status;
+}
+
+/*
+ * Allocate at the highest possible address that is not above 'max'.
+ */
+static efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long max)
+{
+ unsigned long map_size, desc_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ u64 max_addr = 0;
+ int i;
+
+ status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+ NULL, NULL);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI requires when requesting
+ * a specific address. We are doing page-based allocations,
+ * so we must be aligned to a page.
+ */
+ if (align < EFI_PAGE_SIZE)
+ align = EFI_PAGE_SIZE;
+
+ nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+again:
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = (efi_memory_desc_t *)(m + (i * desc_size));
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+
+ if ((start + size) > end || (start + size) > max)
+ continue;
+
+ if (end - size > max)
+ end = max;
+
+ if (round_down(end - size, align) < start)
+ continue;
+
+ start = round_down(end - size, align);
+
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL.
+ */
+ if (start == 0x0)
+ continue;
+
+ if (start > max_addr)
+ max_addr = start;
+ }
+
+ if (!max_addr)
+ status = EFI_NOT_FOUND;
+ else {
+ status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ nr_pages, &max_addr);
+ if (status != EFI_SUCCESS) {
+ max = max_addr;
+ max_addr = 0;
+ goto again;
+ }
+
+ *addr = max_addr;
+ }
+
+ efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+
+fail:
+ return status;
+}
+
+/*
+ * Allocate at the lowest possible address.
+ */
+static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr)
+{
+ unsigned long map_size, desc_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+
+ status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+ NULL, NULL);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI requires when requesting
+ * a specific address. We are doing page-based allocations,
+ * so we must be aligned to a page.
+ */
+ if (align < EFI_PAGE_SIZE)
+ align = EFI_PAGE_SIZE;
+
+ nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = (efi_memory_desc_t *)(m + (i * desc_size));
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL. Skip the first 8
+ * bytes so we start at a nice even number.
+ */
+ if (start == 0x0)
+ start += 8;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+fail:
+ return status;
+}
+
+static void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
+ unsigned long addr)
+{
+ unsigned long nr_pages;
+
+ if (!size)
+ return;
+
+ nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ efi_call_phys2(sys_table_arg->boottime->free_pages, addr, nr_pages);
+}
+
+
+/*
+ * Check the cmdline for a LILO-style file= arguments.
+ *
+ * We only support loading a file from the same filesystem as
+ * the kernel image.
+ */
+static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ char *cmd_line, char *option_string,
+ unsigned long max_addr,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ struct file_info *files;
+ unsigned long file_addr;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ u64 file_size_total;
+ efi_file_io_interface_t *io;
+ efi_file_handle_t *fh;
+ efi_status_t status;
+ int nr_files;
+ char *str;
+ int i, j, k;
+
+ file_addr = 0;
+ file_size_total = 0;
+
+ str = cmd_line;
+
+ j = 0; /* See close_handles */
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ *load_addr = 0;
+ *load_size = 0;
+
+ if (!str || !*str)
+ return EFI_SUCCESS;
+
+ for (nr_files = 0; *str; nr_files++) {
+ str = strstr(str, option_string);
+ if (!str)
+ break;
+
+ str += strlen(option_string);
+
+ /* Skip any leading slashes */
+ while (*str == '/' || *str == '\\')
+ str++;
+
+ while (*str && *str != ' ' && *str != '\n')
+ str++;
+ }
+
+ if (!nr_files)
+ return EFI_SUCCESS;
+
+ status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
+ EFI_LOADER_DATA,
+ nr_files * sizeof(*files),
+ (void **)&files);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc mem for file handle list\n");
+ goto fail;
+ }
+
+ str = cmd_line;
+ for (i = 0; i < nr_files; i++) {
+ struct file_info *file;
+ efi_file_handle_t *h;
+ efi_file_info_t *info;
+ efi_char16_t filename_16[256];
+ unsigned long info_sz;
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ efi_char16_t *p;
+ u64 file_sz;
+
+ str = strstr(str, option_string);
+ if (!str)
+ break;
+
+ str += strlen(option_string);
+
+ file = &files[i];
+ p = filename_16;
+
+ /* Skip any leading slashes */
+ while (*str == '/' || *str == '\\')
+ str++;
+
+ while (*str && *str != ' ' && *str != '\n') {
+ if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
+ break;
+
+ if (*str == '/') {
+ *p++ = '\\';
+ str++;
+ } else {
+ *p++ = *str++;
+ }
+ }
+
+ *p = '\0';
+
+ /* Only open the volume once. */
+ if (!i) {
+ efi_boot_services_t *boottime;
+
+ boottime = sys_table_arg->boottime;
+
+ status = efi_call_phys3(boottime->handle_protocol,
+ image->device_handle, &fs_proto,
+ (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ goto free_files;
+ }
+
+ status = efi_call_phys2(io->open_volume, io, &fh);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to open volume\n");
+ goto free_files;
+ }
+ }
+
+ status = efi_call_phys5(fh->open, fh, &h, filename_16,
+ EFI_FILE_MODE_READ, (u64)0);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to open file: ");
+ efi_char16_printk(sys_table_arg, filename_16);
+ efi_printk(sys_table_arg, "\n");
+ goto close_handles;
+ }
+
+ file->handle = h;
+
+ info_sz = 0;
+ status = efi_call_phys4(h->get_info, h, &info_guid,
+ &info_sz, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ efi_printk(sys_table_arg, "Failed to get file info size\n");
+ goto close_handles;
+ }
+
+grow:
+ status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
+ EFI_LOADER_DATA, info_sz,
+ (void **)&info);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+ goto close_handles;
+ }
+
+ status = efi_call_phys4(h->get_info, h, &info_guid,
+ &info_sz, info);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_phys1(sys_table_arg->boottime->free_pool,
+ info);
+ goto grow;
+ }
+
+ file_sz = info->file_size;
+ efi_call_phys1(sys_table_arg->boottime->free_pool, info);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to get file info\n");
+ goto close_handles;
+ }
+
+ file->size = file_sz;
+ file_size_total += file_sz;
+ }
+
+ if (file_size_total) {
+ unsigned long addr;
+
+ /*
+ * Multiple files need to be at consecutive addresses in memory,
+ * so allocate enough memory for all the files. This is used
+ * for loading multiple files.
+ */
+ status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
+ &file_addr, max_addr);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc highmem for files\n");
+ goto close_handles;
+ }
+
+ /* We've run out of free low memory. */
+ if (file_addr > max_addr) {
+ efi_printk(sys_table_arg, "We've run out of free low memory\n");
+ status = EFI_INVALID_PARAMETER;
+ goto free_file_total;
+ }
+
+ addr = file_addr;
+ for (j = 0; j < nr_files; j++) {
+ unsigned long size;
+
+ size = files[j].size;
+ while (size) {
+ unsigned long chunksize;
+ if (size > EFI_READ_CHUNK_SIZE)
+ chunksize = EFI_READ_CHUNK_SIZE;
+ else
+ chunksize = size;
+ status = efi_call_phys3(fh->read,
+ files[j].handle,
+ &chunksize,
+ (void *)addr);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to read file\n");
+ goto free_file_total;
+ }
+ addr += chunksize;
+ size -= chunksize;
+ }
+
+ efi_call_phys1(fh->close, files[j].handle);
+ }
+
+ }
+
+ efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+
+ *load_addr = file_addr;
+ *load_size = file_size_total;
+
+ return status;
+
+free_file_total:
+ efi_free(sys_table_arg, file_size_total, file_addr);
+
+close_handles:
+ for (k = j; k < i; k++)
+ efi_call_phys1(fh->close, files[k].handle);
+free_files:
+ efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+fail:
+ *load_addr = 0;
+ *load_size = 0;
+
+ return status;
+}
+/*
+ * Relocate a kernel image, either compressed or uncompressed.
+ * In the ARM64 case, all kernel images are currently
+ * uncompressed, and as such when we relocate it we need to
+ * allocate additional space for the BSS segment. Any low
+ * memory that this function should avoid needs to be
+ * unavailable in the EFI memory map, as if the preferred
+ * address is not available the lowest available address will
+ * be used.
+ */
+static efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a prefered address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
+ &new_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "ERROR: Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
+
+/*
+ * Convert the unicode UEFI command line to ASCII to pass to kernel.
+ * Size of memory allocated return in *cmd_line_len.
+ * Returns NULL on error.
+ */
+static char *efi_convert_cmdline_to_ascii(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ int *cmd_line_len)
+{
+ u16 *s2;
+ u8 *s1 = NULL;
+ unsigned long cmdline_addr = 0;
+ int load_options_size = image->load_options_size / 2; /* ASCII */
+ void *options = image->load_options;
+ int options_size = 0;
+ efi_status_t status;
+ int i;
+ u16 zero = 0;
+
+ if (options) {
+ s2 = options;
+ while (*s2 && *s2 != '\n' && options_size < load_options_size) {
+ s2++;
+ options_size++;
+ }
+ }
+
+ if (options_size == 0) {
+ /* No command line options, so return empty string*/
+ options_size = 1;
+ options = &zero;
+ }
+
+ options_size++; /* NUL termination */
+#ifdef CONFIG_ARM
+ /*
+ * For ARM, allocate at a high address to avoid reserved
+ * regions at low addresses that we don't know the specfics of
+ * at the time we are processing the command line.
+ */
+ status = efi_high_alloc(sys_table_arg, options_size, 0,
+ &cmdline_addr, 0xfffff000);
+#else
+ status = efi_low_alloc(sys_table_arg, options_size, 0,
+ &cmdline_addr);
+#endif
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ s1 = (u8 *)cmdline_addr;
+ s2 = (u16 *)options;
+
+ for (i = 0; i < options_size - 1; i++)
+ *s1++ = *s2++;
+
+ *s1 = '\0';
+
+ *cmd_line_len = options_size;
+ return (char *)cmdline_addr;
+}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5145fa344ad..2e2fbdec084 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -13,11 +13,27 @@
* This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/efi.h>
+#include <linux/io.h>
+
+struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+ .acpi20 = EFI_INVALID_TABLE_ADDR,
+ .smbios = EFI_INVALID_TABLE_ADDR,
+ .sal_systab = EFI_INVALID_TABLE_ADDR,
+ .boot_info = EFI_INVALID_TABLE_ADDR,
+ .hcdp = EFI_INVALID_TABLE_ADDR,
+ .uga = EFI_INVALID_TABLE_ADDR,
+ .uv_systab = EFI_INVALID_TABLE_ADDR,
+};
+EXPORT_SYMBOL(efi);
static struct kobject *efi_kobj;
static struct kobject *efivars_kobj;
@@ -132,3 +148,127 @@ err_put:
}
subsys_initcall(efisubsys_init);
+
+
+/*
+ * We can't ioremap data in EFI boot services RAM, because we've already mapped
+ * it as RAM. So, look it up in the existing EFI memory map instead. Only
+ * callable after efi_enter_virtual_mode and before efi_free_boot_services.
+ */
+void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
+{
+ struct efi_memory_map *map;
+ void *p;
+ map = efi.memmap;
+ if (!map)
+ return NULL;
+ if (WARN_ON(!map->map))
+ return NULL;
+ for (p = map->map; p < map->map_end; p += map->desc_size) {
+ efi_memory_desc_t *md = p;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 end = md->phys_addr + size;
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+ if (!md->virt_addr)
+ continue;
+ if (phys_addr >= md->phys_addr && phys_addr < end) {
+ phys_addr += md->virt_addr - md->phys_addr;
+ return (__force void __iomem *)(unsigned long)phys_addr;
+ }
+ }
+ return NULL;
+}
+
+static __initdata efi_config_table_type_t common_tables[] = {
+ {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
+ {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
+ {HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
+ {MPS_TABLE_GUID, "MPS", &efi.mps},
+ {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
+ {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
+ {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
+ {NULL_GUID, NULL, 0},
+};
+
+static __init int match_config_table(efi_guid_t *guid,
+ unsigned long table,
+ efi_config_table_type_t *table_types)
+{
+ u8 str[EFI_VARIABLE_GUID_LEN + 1];
+ int i;
+
+ if (table_types) {
+ efi_guid_unparse(guid, str);
+
+ for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+ efi_guid_unparse(&table_types[i].guid, str);
+
+ if (!efi_guidcmp(*guid, table_types[i].guid)) {
+ *(table_types[i].ptr) = table;
+ pr_cont(" %s=0x%lx ",
+ table_types[i].name, table);
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int __init efi_config_init(efi_config_table_type_t *arch_tables)
+{
+ void *config_tables, *tablep;
+ int i, sz;
+
+ if (efi_enabled(EFI_64BIT))
+ sz = sizeof(efi_config_table_64_t);
+ else
+ sz = sizeof(efi_config_table_32_t);
+
+ /*
+ * Let's see what config tables the firmware passed to us.
+ */
+ config_tables = early_memremap(efi.systab->tables,
+ efi.systab->nr_tables * sz);
+ if (config_tables == NULL) {
+ pr_err("Could not map Configuration table!\n");
+ return -ENOMEM;
+ }
+
+ tablep = config_tables;
+ pr_info("");
+ for (i = 0; i < efi.systab->nr_tables; i++) {
+ efi_guid_t guid;
+ unsigned long table;
+
+ if (efi_enabled(EFI_64BIT)) {
+ u64 table64;
+ guid = ((efi_config_table_64_t *)tablep)->guid;
+ table64 = ((efi_config_table_64_t *)tablep)->table;
+ table = table64;
+#ifndef CONFIG_64BIT
+ if (table64 >> 32) {
+ pr_cont("\n");
+ pr_err("Table located above 4GB, disabling EFI.\n");
+ early_iounmap(config_tables,
+ efi.systab->nr_tables * sz);
+ return -EINVAL;
+ }
+#endif
+ } else {
+ guid = ((efi_config_table_32_t *)tablep)->guid;
+ table = ((efi_config_table_32_t *)tablep)->table;
+ }
+
+ if (!match_config_table(&guid, table, common_tables))
+ match_config_table(&guid, table, arch_tables);
+
+ tablep += sz;
+ }
+ pr_cont("\n");
+ early_iounmap(config_tables, efi.systab->nr_tables * sz);
+ return 0;
+}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 8a7432a4b41..933eb027d52 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -564,7 +564,7 @@ static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
return 0;
}
-void efivars_sysfs_exit(void)
+static void efivars_sysfs_exit(void)
{
/* Remove all entries and destroy */
__efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list, NULL, NULL);
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 6eb535ffedd..e5a67b24587 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -764,6 +764,13 @@ static __init int gsmi_system_valid(void)
static struct kobject *gsmi_kobj;
static struct efivars efivars;
+static const struct platform_device_info gsmi_dev_info = {
+ .name = "gsmi",
+ .id = -1,
+ /* SMI callbacks require 32bit addresses */
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static __init int gsmi_init(void)
{
unsigned long flags;
@@ -776,7 +783,7 @@ static __init int gsmi_init(void)
gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
/* register device */
- gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
+ gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@ static __init int gsmi_init(void)
/* SMI access needs to be serialized */
spin_lock_init(&gsmi_dev.lock);
- /* SMI callbacks require 32bit addresses */
- gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- gsmi_dev.pdev->dev.dma_mask =
- &gsmi_dev.pdev->dev.coherent_dma_mask;
ret = -ENOMEM;
gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
diff --git a/drivers/fmc/Kconfig b/drivers/fmc/Kconfig
index c01cf45bc3d..3a75f4256d0 100644
--- a/drivers/fmc/Kconfig
+++ b/drivers/fmc/Kconfig
@@ -46,6 +46,6 @@ config FMC_CHARDEV
This driver matches every mezzanine device and allows user
space to read and write registers using a char device. It
can be used to write user-space drivers, or just get
- aquainted with a mezzanine before writing its specific driver.
+ acquainted with a mezzanine before writing its specific driver.
endif # FMC
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b6ed304863e..0f0444475bf 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -30,10 +30,6 @@ config ARCH_REQUIRE_GPIOLIB
Selecting this from the architecture code will cause the gpiolib
code to always get built in.
-config GPIO_DEVRES
- def_bool y
- depends on HAS_IOMEM
-
menuconfig GPIOLIB
bool "GPIO Support"
@@ -47,6 +43,10 @@ menuconfig GPIOLIB
if GPIOLIB
+config GPIO_DEVRES
+ def_bool y
+ depends on HAS_IOMEM
+
config OF_GPIO
def_bool y
depends on OF
@@ -129,7 +129,7 @@ config GPIO_IT8761E
config GPIO_EM
tristate "Emma Mobile GPIO"
- depends on ARM
+ depends on ARM && OF_GPIO
help
Say yes here to support GPIO on Renesas Emma Mobile SoCs.
@@ -213,7 +213,7 @@ config GPIO_OCTEON
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
- depends on ARM && ARM_AMBA
+ depends on ARM_AMBA
select GENERIC_IRQ_CHIP
help
Say yes here to support the PrimeCell PL061 GPIO device
@@ -320,6 +320,15 @@ config GPIO_ICH
If unsure, say N.
+config GPIO_IOP
+ tristate "Intel IOP GPIO"
+ depends on ARM && (ARCH_IOP32X || ARCH_IOP33X)
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IOP32X or IOP33X.
+
+ If unsure, say N.
+
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on PCI
@@ -360,6 +369,10 @@ config GPIO_GRGPIO
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
+config GPIO_TB10X
+ bool
+ select OF_GPIO
+
comment "I2C GPIO expanders:"
config GPIO_ARIZONA
@@ -612,12 +625,12 @@ config GPIO_AMD8111
If unsure, say N
-config GPIO_LANGWELL
- bool "Intel Langwell/Penwell GPIO support"
+config GPIO_INTEL_MID
+ bool "Intel Mid GPIO support"
depends on PCI && X86
select IRQ_DOMAIN
help
- Say Y here to support Intel Langwell/Penwell GPIO.
+ Say Y here to support Intel Mid GPIO.
config GPIO_PCH
tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
@@ -703,7 +716,7 @@ config GPIO_74X164
comment "AC97 GPIO expanders:"
config GPIO_UCB1400
- bool "Philips UCB1400 GPIO"
+ tristate "Philips UCB1400 GPIO"
depends on UCB1400_CORE
help
This enables support for the Philips UCB1400 GPIO pins.
@@ -759,6 +772,12 @@ config GPIO_MSIC
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
+config GPIO_BCM_KONA
+ bool "Broadcom Kona GPIO"
+ depends on OF_GPIO
+ help
+ Turn on GPIO support for Broadcom "Kona" chips.
+
comment "USB GPIO expanders:"
config GPIO_VIPERBOARD
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 98e23ebba2c..7971e36b8b1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
+obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
@@ -28,11 +29,12 @@ obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
+obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
-obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
+obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
@@ -71,6 +73,7 @@ obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
+obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 3e7812f0405..307464fd015 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -15,10 +15,95 @@
*/
#include <linux/module.h>
+#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/device.h>
#include <linux/gfp.h>
+static void devm_gpiod_release(struct device *dev, void *res)
+{
+ struct gpio_desc **desc = res;
+
+ gpiod_put(*desc);
+}
+
+static int devm_gpiod_match(struct device *dev, void *res, void *data)
+{
+ struct gpio_desc **this = res, **gpio = data;
+
+ return *this == *gpio;
+}
+
+/**
+ * devm_gpiod_get - Resource-managed gpiod_get()
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ *
+ * Managed gpiod_get(). GPIO descriptors returned from this function are
+ * automatically disposed on driver detach. See gpiod_get() for detailed
+ * information about behavior and return values.
+ */
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
+ const char *con_id)
+{
+ return devm_gpiod_get_index(dev, con_id, 0);
+}
+EXPORT_SYMBOL(devm_gpiod_get);
+
+/**
+ * devm_gpiod_get_index - Resource-managed gpiod_get_index()
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ *
+ * Managed gpiod_get_index(). GPIO descriptors returned from this function are
+ * automatically disposed on driver detach. See gpiod_get_index() for detailed
+ * information about behavior and return values.
+ */
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx)
+{
+ struct gpio_desc **dr;
+ struct gpio_desc *desc;
+
+ dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *),
+ GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = gpiod_get_index(dev, con_id, idx);
+ if (IS_ERR(desc)) {
+ devres_free(dr);
+ return desc;
+ }
+
+ *dr = desc;
+ devres_add(dev, dr);
+
+ return desc;
+}
+EXPORT_SYMBOL(devm_gpiod_get_index);
+
+/**
+ * devm_gpiod_put - Resource-managed gpiod_put()
+ * @desc: GPIO descriptor to dispose of
+ *
+ * Dispose of a GPIO descriptor obtained with devm_gpiod_get() or
+ * devm_gpiod_get_index(). Normally this function will not be called as the GPIO
+ * will be disposed of by the resource management code.
+ */
+void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
+{
+ WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
+ &desc));
+}
+EXPORT_SYMBOL(devm_gpiod_put);
+
+
+
+
static void devm_gpio_release(struct device *dev, void *res)
{
unsigned *gpio = res;
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 5d518d5db7a..1e04bf91328 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -176,7 +176,6 @@ static int gen_74x164_probe(struct spi_device *spi)
return ret;
exit_destroy:
- spi_set_drvdata(spi, NULL);
mutex_destroy(&chip->lock);
return ret;
}
@@ -190,8 +189,6 @@ static int gen_74x164_remove(struct spi_device *spi)
if (chip == NULL)
return -ENODEV;
- spi_set_drvdata(spi, NULL);
-
ret = gpiochip_remove(&chip->gpio_chip);
if (!ret)
mutex_destroy(&chip->lock);
@@ -212,7 +209,7 @@ static struct spi_driver gen_74x164_driver = {
.driver = {
.name = "74x164",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(gen_74x164_dt_ids),
+ .of_match_table = gen_74x164_dt_ids,
},
.probe = gen_74x164_probe,
.remove = gen_74x164_remove,
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index c0f3fc44ab0..b204033acae 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -325,9 +325,9 @@ static irqreturn_t adnp_irq(int irq, void *data)
pending &= isr & ier;
for_each_set_bit(bit, &pending, 8) {
- unsigned int virq;
- virq = irq_find_mapping(adnp->domain, base + bit);
- handle_nested_irq(virq);
+ unsigned int child_irq;
+ child_irq = irq_find_mapping(adnp->domain, base + bit);
+ handle_nested_irq(child_irq);
}
}
@@ -594,7 +594,7 @@ static struct i2c_driver adnp_i2c_driver = {
.driver = {
.name = "gpio-adnp",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(adnp_of_match),
+ .of_match_table = adnp_of_match,
},
.probe = adnp_i2c_probe,
.remove = adnp_i2c_remove,
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index fa8b6a76276..dceb5dcf9d1 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -109,10 +109,14 @@ static int arizona_gpio_probe(struct platform_device *pdev)
arizona_gpio->arizona = arizona;
arizona_gpio->gpio_chip = template_chip;
arizona_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ arizona_gpio->gpio_chip.of_node = arizona->dev->of_node;
+#endif
switch (arizona->type) {
case WM5102:
case WM5110:
+ case WM8997:
arizona_gpio->gpio_chip.ngpio = 5;
break;
default:
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
new file mode 100644
index 00000000000..72c927dc3be
--- /dev/null
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright (C) 2012-2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define BCM_GPIO_PASSWD 0x00a5a501
+#define GPIO_PER_BANK 32
+#define GPIO_MAX_BANK_NUM 8
+
+#define GPIO_BANK(gpio) ((gpio) >> 5)
+#define GPIO_BIT(gpio) ((gpio) & (GPIO_PER_BANK - 1))
+
+#define GPIO_OUT_STATUS(bank) (0x00000000 + ((bank) << 2))
+#define GPIO_IN_STATUS(bank) (0x00000020 + ((bank) << 2))
+#define GPIO_OUT_SET(bank) (0x00000040 + ((bank) << 2))
+#define GPIO_OUT_CLEAR(bank) (0x00000060 + ((bank) << 2))
+#define GPIO_INT_STATUS(bank) (0x00000080 + ((bank) << 2))
+#define GPIO_INT_MASK(bank) (0x000000a0 + ((bank) << 2))
+#define GPIO_INT_MSKCLR(bank) (0x000000c0 + ((bank) << 2))
+#define GPIO_CONTROL(bank) (0x00000100 + ((bank) << 2))
+#define GPIO_PWD_STATUS(bank) (0x00000500 + ((bank) << 2))
+
+#define GPIO_GPPWR_OFFSET 0x00000520
+
+#define GPIO_GPCTR0_DBR_SHIFT 5
+#define GPIO_GPCTR0_DBR_MASK 0x000001e0
+
+#define GPIO_GPCTR0_ITR_SHIFT 3
+#define GPIO_GPCTR0_ITR_MASK 0x00000018
+#define GPIO_GPCTR0_ITR_CMD_RISING_EDGE 0x00000001
+#define GPIO_GPCTR0_ITR_CMD_FALLING_EDGE 0x00000002
+#define GPIO_GPCTR0_ITR_CMD_BOTH_EDGE 0x00000003
+
+#define GPIO_GPCTR0_IOTR_MASK 0x00000001
+#define GPIO_GPCTR0_IOTR_CMD_0UTPUT 0x00000000
+#define GPIO_GPCTR0_IOTR_CMD_INPUT 0x00000001
+
+#define GPIO_GPCTR0_DB_ENABLE_MASK 0x00000100
+
+#define LOCK_CODE 0xffffffff
+#define UNLOCK_CODE 0x00000000
+
+struct bcm_kona_gpio {
+ void __iomem *reg_base;
+ int num_bank;
+ spinlock_t lock;
+ struct gpio_chip gpio_chip;
+ struct irq_domain *irq_domain;
+ struct bcm_kona_gpio_bank *banks;
+ struct platform_device *pdev;
+};
+
+struct bcm_kona_gpio_bank {
+ int id;
+ int irq;
+ /* Used in the interrupt handler */
+ struct bcm_kona_gpio *kona_gpio;
+};
+
+static inline struct bcm_kona_gpio *to_kona_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct bcm_kona_gpio, gpio_chip);
+}
+
+static void bcm_kona_gpio_set_lockcode_bank(void __iomem *reg_base,
+ int bank_id, int lockcode)
+{
+ writel(BCM_GPIO_PASSWD, reg_base + GPIO_GPPWR_OFFSET);
+ writel(lockcode, reg_base + GPIO_PWD_STATUS(bank_id));
+}
+
+static inline void bcm_kona_gpio_lock_bank(void __iomem *reg_base, int bank_id)
+{
+ bcm_kona_gpio_set_lockcode_bank(reg_base, bank_id, LOCK_CODE);
+}
+
+static inline void bcm_kona_gpio_unlock_bank(void __iomem *reg_base,
+ int bank_id)
+{
+ bcm_kona_gpio_set_lockcode_bank(reg_base, bank_id, UNLOCK_CODE);
+}
+
+static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ u32 val, reg_offset;
+ unsigned long flags;
+
+ kona_gpio = to_kona_gpio(chip);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ /* determine the GPIO pin direction */
+ val = readl(reg_base + GPIO_CONTROL(gpio));
+ val &= GPIO_GPCTR0_IOTR_MASK;
+
+ /* this function only applies to output pin */
+ if (GPIO_GPCTR0_IOTR_CMD_INPUT == val)
+ goto out;
+
+ reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
+
+ val = readl(reg_base + reg_offset);
+ val |= BIT(bit);
+ writel(val, reg_base + reg_offset);
+
+out:
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+}
+
+static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ u32 val, reg_offset;
+ unsigned long flags;
+
+ kona_gpio = to_kona_gpio(chip);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ /* determine the GPIO pin direction */
+ val = readl(reg_base + GPIO_CONTROL(gpio));
+ val &= GPIO_GPCTR0_IOTR_MASK;
+
+ /* read the GPIO bank status */
+ reg_offset = (GPIO_GPCTR0_IOTR_CMD_INPUT == val) ?
+ GPIO_IN_STATUS(bank_id) : GPIO_OUT_STATUS(bank_id);
+ val = readl(reg_base + reg_offset);
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+
+ /* return the specified bit status */
+ return !!(val & bit);
+}
+
+static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ u32 val;
+ unsigned long flags;
+ int bank_id = GPIO_BANK(gpio);
+
+ kona_gpio = to_kona_gpio(chip);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_CONTROL(gpio));
+ val &= ~GPIO_GPCTR0_IOTR_MASK;
+ val |= GPIO_GPCTR0_IOTR_CMD_INPUT;
+ writel(val, reg_base + GPIO_CONTROL(gpio));
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+
+ return 0;
+}
+
+static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ u32 val, reg_offset;
+ unsigned long flags;
+
+ kona_gpio = to_kona_gpio(chip);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_CONTROL(gpio));
+ val &= ~GPIO_GPCTR0_IOTR_MASK;
+ val |= GPIO_GPCTR0_IOTR_CMD_0UTPUT;
+ writel(val, reg_base + GPIO_CONTROL(gpio));
+ reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
+
+ val = readl(reg_base + reg_offset);
+ val |= BIT(bit);
+ writel(val, reg_base + reg_offset);
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+
+ return 0;
+}
+
+static int bcm_kona_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcm_kona_gpio *kona_gpio;
+
+ kona_gpio = to_kona_gpio(chip);
+ if (gpio >= kona_gpio->gpio_chip.ngpio)
+ return -ENXIO;
+ return irq_create_mapping(kona_gpio->irq_domain, gpio);
+}
+
+static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
+ unsigned debounce)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ u32 val, res;
+ unsigned long flags;
+ int bank_id = GPIO_BANK(gpio);
+
+ kona_gpio = to_kona_gpio(chip);
+ reg_base = kona_gpio->reg_base;
+ /* debounce must be 1-128ms (or 0) */
+ if ((debounce > 0 && debounce < 1000) || debounce > 128000) {
+ dev_err(chip->dev, "Debounce value %u not in range\n",
+ debounce);
+ return -EINVAL;
+ }
+
+ /* calculate debounce bit value */
+ if (debounce != 0) {
+ /* Convert to ms */
+ debounce /= 1000;
+ /* find the MSB */
+ res = fls(debounce) - 1;
+ /* Check if MSB-1 is set (round up or down) */
+ if (res > 0 && (debounce & BIT(res - 1)))
+ res++;
+ }
+
+ /* spin lock for read-modify-write of the GPIO register */
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_CONTROL(gpio));
+ val &= ~GPIO_GPCTR0_DBR_MASK;
+
+ if (debounce == 0) {
+ /* disable debounce */
+ val &= ~GPIO_GPCTR0_DB_ENABLE_MASK;
+ } else {
+ val |= GPIO_GPCTR0_DB_ENABLE_MASK |
+ (res << GPIO_GPCTR0_DBR_SHIFT);
+ }
+
+ writel(val, reg_base + GPIO_CONTROL(gpio));
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+
+ return 0;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "bcm-kona-gpio",
+ .owner = THIS_MODULE,
+ .direction_input = bcm_kona_gpio_direction_input,
+ .get = bcm_kona_gpio_get,
+ .direction_output = bcm_kona_gpio_direction_output,
+ .set = bcm_kona_gpio_set,
+ .set_debounce = bcm_kona_gpio_set_debounce,
+ .to_irq = bcm_kona_gpio_to_irq,
+ .base = 0,
+};
+
+static void bcm_kona_gpio_irq_ack(struct irq_data *d)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int gpio = d->hwirq;
+ int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ u32 val;
+ unsigned long flags;
+
+ kona_gpio = irq_data_get_irq_chip_data(d);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_INT_STATUS(bank_id));
+ val |= BIT(bit);
+ writel(val, reg_base + GPIO_INT_STATUS(bank_id));
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+}
+
+static void bcm_kona_gpio_irq_mask(struct irq_data *d)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int gpio = d->hwirq;
+ int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ u32 val;
+ unsigned long flags;
+
+ kona_gpio = irq_data_get_irq_chip_data(d);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_INT_MASK(bank_id));
+ val |= BIT(bit);
+ writel(val, reg_base + GPIO_INT_MASK(bank_id));
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+}
+
+static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int gpio = d->hwirq;
+ int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ u32 val;
+ unsigned long flags;
+
+ kona_gpio = irq_data_get_irq_chip_data(d);
+ reg_base = kona_gpio->reg_base;
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
+ val |= BIT(bit);
+ writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+}
+
+static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct bcm_kona_gpio *kona_gpio;
+ void __iomem *reg_base;
+ int gpio = d->hwirq;
+ u32 lvl_type;
+ u32 val;
+ unsigned long flags;
+ int bank_id = GPIO_BANK(gpio);
+
+ kona_gpio = irq_data_get_irq_chip_data(d);
+ reg_base = kona_gpio->reg_base;
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ lvl_type = GPIO_GPCTR0_ITR_CMD_RISING_EDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ lvl_type = GPIO_GPCTR0_ITR_CMD_FALLING_EDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ lvl_type = GPIO_GPCTR0_ITR_CMD_BOTH_EDGE;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ /* BCM GPIO doesn't support level triggering */
+ default:
+ dev_err(kona_gpio->gpio_chip.dev,
+ "Invalid BCM GPIO irq type 0x%x\n", type);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&kona_gpio->lock, flags);
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ val = readl(reg_base + GPIO_CONTROL(gpio));
+ val &= ~GPIO_GPCTR0_ITR_MASK;
+ val |= lvl_type << GPIO_GPCTR0_ITR_SHIFT;
+ writel(val, reg_base + GPIO_CONTROL(gpio));
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+ spin_unlock_irqrestore(&kona_gpio->lock, flags);
+
+ return 0;
+}
+
+static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *reg_base;
+ int bit, bank_id;
+ unsigned long sta;
+ struct bcm_kona_gpio_bank *bank = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ /*
+ * For bank interrupts, we can't use chip_data to store the kona_gpio
+ * pointer, since GIC needs it for its own purposes. Therefore, we get
+ * our pointer from the bank structure.
+ */
+ reg_base = bank->kona_gpio->reg_base;
+ bank_id = bank->id;
+ bcm_kona_gpio_unlock_bank(reg_base, bank_id);
+
+ while ((sta = readl(reg_base + GPIO_INT_STATUS(bank_id)) &
+ (~(readl(reg_base + GPIO_INT_MASK(bank_id)))))) {
+ for_each_set_bit(bit, &sta, 32) {
+ int hwirq = GPIO_PER_BANK * bank_id + bit;
+ int child_irq =
+ irq_find_mapping(bank->kona_gpio->irq_domain,
+ hwirq);
+ /*
+ * Clear interrupt before handler is called so we don't
+ * miss any interrupt occurred during executing them.
+ */
+ writel(readl(reg_base + GPIO_INT_STATUS(bank_id)) |
+ BIT(bit), reg_base + GPIO_INT_STATUS(bank_id));
+ /* Invoke interrupt handler */
+ generic_handle_irq(child_irq);
+ }
+ }
+
+ bcm_kona_gpio_lock_bank(reg_base, bank_id);
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip bcm_gpio_irq_chip = {
+ .name = "bcm-kona-gpio",
+ .irq_ack = bcm_kona_gpio_irq_ack,
+ .irq_mask = bcm_kona_gpio_irq_mask,
+ .irq_unmask = bcm_kona_gpio_irq_unmask,
+ .irq_set_type = bcm_kona_gpio_irq_set_type,
+};
+
+static struct __initconst of_device_id bcm_kona_gpio_of_match[] = {
+ { .compatible = "brcm,kona-gpio" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, bcm_kona_gpio_of_match);
+
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
+static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ int ret;
+
+ ret = irq_set_chip_data(irq, d->host_data);
+ if (ret < 0)
+ return ret;
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return 0;
+}
+
+static void bcm_kona_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static struct irq_domain_ops bcm_kona_irq_ops = {
+ .map = bcm_kona_gpio_irq_map,
+ .unmap = bcm_kona_gpio_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
+{
+ void __iomem *reg_base;
+ int i;
+
+ reg_base = kona_gpio->reg_base;
+ /* disable interrupts and clear status */
+ for (i = 0; i < kona_gpio->num_bank; i++) {
+ bcm_kona_gpio_unlock_bank(reg_base, i);
+ writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
+ writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
+ bcm_kona_gpio_lock_bank(reg_base, i);
+ }
+}
+
+static int bcm_kona_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct resource *res;
+ struct bcm_kona_gpio_bank *bank;
+ struct bcm_kona_gpio *kona_gpio;
+ struct gpio_chip *chip;
+ int ret;
+ int i;
+
+ match = of_match_device(bcm_kona_gpio_of_match, dev);
+ if (!match) {
+ dev_err(dev, "Failed to find gpio controller\n");
+ return -ENODEV;
+ }
+
+ kona_gpio = devm_kzalloc(dev, sizeof(*kona_gpio), GFP_KERNEL);
+ if (!kona_gpio)
+ return -ENOMEM;
+
+ kona_gpio->gpio_chip = template_chip;
+ chip = &kona_gpio->gpio_chip;
+ kona_gpio->num_bank = of_irq_count(dev->of_node);
+ if (kona_gpio->num_bank == 0) {
+ dev_err(dev, "Couldn't determine # GPIO banks\n");
+ return -ENOENT;
+ }
+ if (kona_gpio->num_bank > GPIO_MAX_BANK_NUM) {
+ dev_err(dev, "Too many GPIO banks configured (max=%d)\n",
+ GPIO_MAX_BANK_NUM);
+ return -ENXIO;
+ }
+ kona_gpio->banks = devm_kzalloc(dev,
+ kona_gpio->num_bank *
+ sizeof(*kona_gpio->banks), GFP_KERNEL);
+ if (!kona_gpio->banks)
+ return -ENOMEM;
+
+ kona_gpio->pdev = pdev;
+ platform_set_drvdata(pdev, kona_gpio);
+ chip->of_node = dev->of_node;
+ chip->ngpio = kona_gpio->num_bank * GPIO_PER_BANK;
+
+ kona_gpio->irq_domain = irq_domain_add_linear(dev->of_node,
+ chip->ngpio,
+ &bcm_kona_irq_ops,
+ kona_gpio);
+ if (!kona_gpio->irq_domain) {
+ dev_err(dev, "Couldn't allocate IRQ domain\n");
+ return -ENXIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ kona_gpio->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(kona_gpio->reg_base)) {
+ ret = -ENXIO;
+ goto err_irq_domain;
+ }
+
+ for (i = 0; i < kona_gpio->num_bank; i++) {
+ bank = &kona_gpio->banks[i];
+ bank->id = i;
+ bank->irq = platform_get_irq(pdev, i);
+ bank->kona_gpio = kona_gpio;
+ if (bank->irq < 0) {
+ dev_err(dev, "Couldn't get IRQ for bank %d", i);
+ ret = -ENOENT;
+ goto err_irq_domain;
+ }
+ }
+
+ dev_info(&pdev->dev, "Setting up Kona GPIO\n");
+
+ bcm_kona_gpio_reset(kona_gpio);
+
+ ret = gpiochip_add(chip);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
+ goto err_irq_domain;
+ }
+ for (i = 0; i < chip->ngpio; i++) {
+ int irq = bcm_kona_gpio_to_irq(chip, i);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip,
+ handle_simple_irq);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ }
+ for (i = 0; i < kona_gpio->num_bank; i++) {
+ bank = &kona_gpio->banks[i];
+ irq_set_chained_handler(bank->irq, bcm_kona_gpio_irq_handler);
+ irq_set_handler_data(bank->irq, bank);
+ }
+
+ spin_lock_init(&kona_gpio->lock);
+
+ return 0;
+
+err_irq_domain:
+ irq_domain_remove(kona_gpio->irq_domain);
+
+ return ret;
+}
+
+static struct platform_driver bcm_kona_gpio_driver = {
+ .driver = {
+ .name = "bcm-kona-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_kona_gpio_of_match,
+ },
+ .probe = bcm_kona_gpio_probe,
+};
+
+module_platform_driver(bcm_kona_gpio_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 8369e71ebe4..9dfe36fd8ba 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -228,7 +228,6 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
err_release_mem:
release_mem_region(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
- pci_set_drvdata(dev, NULL);
err_disable:
pci_disable_device(dev);
err_freebg:
@@ -252,7 +251,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(bg);
}
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 0edaf2ce926..0924f20fa47 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -87,7 +87,7 @@ static struct platform_driver clps711x_gpio_driver = {
.driver = {
.name = "clps711x-gpio",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(clps711x_gpio_ids),
+ .of_match_table = clps711x_gpio_ids,
},
.probe = clps711x_gpio_probe,
.remove = clps711x_gpio_remove,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 17df6db5dca..8847adf392b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -15,8 +15,9 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-
-#include <asm/mach/irq.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/gpio-davinci.h>
struct davinci_gpio_regs {
u32 dir;
@@ -31,13 +32,14 @@ struct davinci_gpio_regs {
u32 intstat;
};
+#define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */
+
#define chip2controller(chip) \
container_of(chip, struct davinci_gpio_controller, chip)
-static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
static void __iomem *gpio_base;
-static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio)
+static struct davinci_gpio_regs __iomem *gpio2regs(unsigned gpio)
{
void __iomem *ptr;
@@ -65,7 +67,7 @@ static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
return g;
}
-static int __init davinci_gpio_irq_setup(void);
+static int davinci_gpio_irq_setup(struct platform_device *pdev);
/*--------------------------------------------------------------------------*/
@@ -131,33 +133,53 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
__raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
}
-static int __init davinci_gpio_setup(void)
+static int davinci_gpio_probe(struct platform_device *pdev)
{
int i, base;
unsigned ngpio;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct davinci_gpio_regs *regs;
-
- if (soc_info->gpio_type != GPIO_TYPE_DAVINCI)
- return 0;
+ struct davinci_gpio_controller *chips;
+ struct davinci_gpio_platform_data *pdata;
+ struct davinci_gpio_regs __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ pdata = dev->platform_data;
+ if (!pdata) {
+ dev_err(dev, "No platform data found\n");
+ return -EINVAL;
+ }
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
* bit index that's valid.
*/
- ngpio = soc_info->gpio_num;
+ ngpio = pdata->ngpio;
if (ngpio == 0) {
- pr_err("GPIO setup: how many GPIOs?\n");
+ dev_err(dev, "How many GPIOs?\n");
return -EINVAL;
}
if (WARN_ON(DAVINCI_N_GPIO < ngpio))
ngpio = DAVINCI_N_GPIO;
- gpio_base = ioremap(soc_info->gpio_base, SZ_4K);
- if (WARN_ON(!gpio_base))
+ chips = devm_kzalloc(dev,
+ ngpio * sizeof(struct davinci_gpio_controller),
+ GFP_KERNEL);
+ if (!chips) {
+ dev_err(dev, "Memory allocation failed\n");
return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Invalid memory resource\n");
+ return -EBUSY;
+ }
+
+ gpio_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gpio_base))
+ return PTR_ERR(gpio_base);
for (i = 0, base = 0; base < ngpio; i++, base += 32) {
chips[i].chip.label = "DaVinci";
@@ -183,13 +205,10 @@ static int __init davinci_gpio_setup(void)
gpiochip_add(&chips[i].chip);
}
- soc_info->gpio_ctlrs = chips;
- soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
-
- davinci_gpio_irq_setup();
+ platform_set_drvdata(pdev, chips);
+ davinci_gpio_irq_setup(pdev);
return 0;
}
-pure_initcall(davinci_gpio_setup);
/*--------------------------------------------------------------------------*/
/*
@@ -302,13 +321,14 @@ static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
{
- struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct davinci_gpio_controller *d = chip2controller(chip);
- /* NOTE: we assume for now that only irqs in the first gpio_chip
+ /*
+ * NOTE: we assume for now that only irqs in the first gpio_chip
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
- if (offset < soc_info->gpio_unbanked)
- return soc_info->gpio_irq + offset;
+ if (offset < d->irq_base)
+ return d->gpio_irq + offset;
else
return -ENODEV;
}
@@ -317,12 +337,11 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
{
struct davinci_gpio_controller *d;
struct davinci_gpio_regs __iomem *g;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
u32 mask;
d = (struct davinci_gpio_controller *)data->handler_data;
g = (struct davinci_gpio_regs __iomem *)d->regs;
- mask = __gpio_mask(data->irq - soc_info->gpio_irq);
+ mask = __gpio_mask(data->irq - d->gpio_irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
@@ -343,24 +362,33 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
* (dm6446) can be set appropriately for GPIOV33 pins.
*/
-static int __init davinci_gpio_irq_setup(void)
+static int davinci_gpio_irq_setup(struct platform_device *pdev)
{
unsigned gpio, irq, bank;
struct clk *clk;
u32 binten = 0;
unsigned ngpio, bank_irq;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct davinci_gpio_regs __iomem *g;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
+ struct davinci_gpio_platform_data *pdata = dev->platform_data;
+ struct davinci_gpio_regs __iomem *g;
- ngpio = soc_info->gpio_num;
+ ngpio = pdata->ngpio;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "Invalid IRQ resource\n");
+ return -EBUSY;
+ }
- bank_irq = soc_info->gpio_irq;
- if (bank_irq == 0) {
- printk(KERN_ERR "Don't know first GPIO bank IRQ.\n");
- return -EINVAL;
+ bank_irq = res->start;
+
+ if (!bank_irq) {
+ dev_err(dev, "Invalid IRQ resource\n");
+ return -ENODEV;
}
- clk = clk_get(NULL, "gpio");
+ clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
printk(KERN_ERR "Error %ld getting gpio clock?\n",
PTR_ERR(clk));
@@ -368,16 +396,17 @@ static int __init davinci_gpio_irq_setup(void)
}
clk_prepare_enable(clk);
- /* Arrange gpio_to_irq() support, handling either direct IRQs or
+ /*
+ * Arrange gpio_to_irq() support, handling either direct IRQs or
* banked IRQs. Having GPIOs in the first GPIO bank use direct
* IRQs, while the others use banked IRQs, would need some setup
* tweaks to recognize hardware which can do that.
*/
for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
chips[bank].chip.to_irq = gpio_to_irq_banked;
- chips[bank].irq_base = soc_info->gpio_unbanked
+ chips[bank].irq_base = pdata->gpio_unbanked
? -EINVAL
- : (soc_info->intc_irq_num + gpio);
+ : (pdata->intc_irq_num + gpio);
}
/*
@@ -385,7 +414,7 @@ static int __init davinci_gpio_irq_setup(void)
* controller only handling trigger modes. We currently assume no
* IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
*/
- if (soc_info->gpio_unbanked) {
+ if (pdata->gpio_unbanked) {
static struct irq_chip_type gpio_unbanked;
/* pass "bank 0" GPIO IRQs to AINTC */
@@ -405,7 +434,7 @@ static int __init davinci_gpio_irq_setup(void)
__raw_writel(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
- for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
+ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
irq_set_chip(irq, &gpio_unbanked.chip);
irq_set_handler_data(irq, &chips[gpio / 32]);
irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
@@ -450,12 +479,31 @@ static int __init davinci_gpio_irq_setup(void)
}
done:
- /* BINTEN -- per-bank interrupt enable. genirq would also let these
+ /*
+ * BINTEN -- per-bank interrupt enable. genirq would also let these
* bits be set/cleared dynamically.
*/
- __raw_writel(binten, gpio_base + 0x08);
+ __raw_writel(binten, gpio_base + BINTEN);
printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
return 0;
}
+
+static struct platform_driver davinci_gpio_driver = {
+ .probe = davinci_gpio_probe,
+ .driver = {
+ .name = "davinci_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**
+ * GPIO driver registration needs to be done before machine_init functions
+ * access GPIO. Hence davinci_gpio_drv_reg() is a postcore_initcall.
+ */
+static int __init davinci_gpio_drv_reg(void)
+{
+ return platform_driver_register(&davinci_gpio_driver);
+}
+postcore_initcall(davinci_gpio_drv_reg);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index c6e1f086efe..ec190361bf2 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -232,16 +232,16 @@ static void em_gio_free(struct gpio_chip *chip, unsigned offset)
em_gio_direction_input(chip, offset);
}
-static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
+static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct em_gio_priv *p = h->host_data;
- pr_debug("gio: map hw irq = %d, virq = %d\n", (int)hw, virq);
+ pr_debug("gio: map hw irq = %d, irq = %d\n", (int)hwirq, irq);
- irq_set_chip_data(virq, h->host_data);
- irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID); /* kill me now */
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID); /* kill me now */
return 0;
}
@@ -319,6 +319,7 @@ static int em_gio_probe(struct platform_device *pdev)
}
gpio_chip = &p->gpio_chip;
+ gpio_chip->of_node = pdev->dev.of_node;
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 56b98eebe1f..80829f3c654 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -51,15 +51,15 @@ static void ep93xx_gpio_update_int_params(unsigned port)
{
BUG_ON(port > 2);
- __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port]));
+ writeb_relaxed(0, EP93XX_GPIO_REG(int_en_register_offset[port]));
- __raw_writeb(gpio_int_type2[port],
+ writeb_relaxed(gpio_int_type2[port],
EP93XX_GPIO_REG(int_type2_register_offset[port]));
- __raw_writeb(gpio_int_type1[port],
+ writeb_relaxed(gpio_int_type1[port],
EP93XX_GPIO_REG(int_type1_register_offset[port]));
- __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
+ writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
EP93XX_GPIO_REG(int_en_register_offset[port]));
}
@@ -74,7 +74,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
else
gpio_int_debounce[port] &= ~port_mask;
- __raw_writeb(gpio_int_debounce[port],
+ writeb(gpio_int_debounce[port],
EP93XX_GPIO_REG(int_debounce_register_offset[port]));
}
@@ -83,7 +83,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
unsigned char status;
int i;
- status = __raw_readb(EP93XX_GPIO_A_INT_STATUS);
+ status = readb(EP93XX_GPIO_A_INT_STATUS);
for (i = 0; i < 8; i++) {
if (status & (1 << i)) {
int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i;
@@ -91,7 +91,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
- status = __raw_readb(EP93XX_GPIO_B_INT_STATUS);
+ status = readb(EP93XX_GPIO_B_INT_STATUS);
for (i = 0; i < 8; i++) {
if (status & (1 << i)) {
int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i;
@@ -124,7 +124,7 @@ static void ep93xx_gpio_irq_ack(struct irq_data *d)
ep93xx_gpio_update_int_params(port);
}
- __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
+ writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
@@ -139,7 +139,7 @@ static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
gpio_int_unmasked[port] &= ~port_mask;
ep93xx_gpio_update_int_params(port);
- __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
+ writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
new file mode 100644
index 00000000000..be803af658a
--- /dev/null
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -0,0 +1,471 @@
+/*
+ * Moorestown platform Langwell chip GPIO driver
+ *
+ * Copyright (c) 2008, 2009, 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
+ * Clovertrail platform Cloverview chip.
+ * Merrifield platform Tangier chip.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/irqdomain.h>
+
+#define INTEL_MID_IRQ_TYPE_EDGE (1 << 0)
+#define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1)
+
+/*
+ * Langwell chip has 64 pins and thus there are 2 32bit registers to control
+ * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
+ * registers to control them, so we only define the order here instead of a
+ * structure, to get a bit offset for a pin (use GPDR as an example):
+ *
+ * nreg = ngpio / 32;
+ * reg = offset / 32;
+ * bit = offset % 32;
+ * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
+ *
+ * so the bit of reg_addr is to control pin offset's GPDR feature
+*/
+
+enum GPIO_REG {
+ GPLR = 0, /* pin level read-only */
+ GPDR, /* pin direction */
+ GPSR, /* pin set */
+ GPCR, /* pin clear */
+ GRER, /* rising edge detect */
+ GFER, /* falling edge detect */
+ GEDR, /* edge detect result */
+ GAFR, /* alt function */
+};
+
+/* intel_mid gpio driver data */
+struct intel_mid_gpio_ddata {
+ u16 ngpio; /* number of gpio pins */
+ u32 gplr_offset; /* offset of first GPLR register from base */
+ u32 flis_base; /* base address of FLIS registers */
+ u32 flis_len; /* length of FLIS registers */
+ u32 (*get_flis_offset)(int gpio);
+ u32 chip_irq_type; /* chip interrupt type */
+};
+
+struct intel_mid_gpio {
+ struct gpio_chip chip;
+ void __iomem *reg_base;
+ spinlock_t lock;
+ struct pci_dev *pdev;
+ struct irq_domain *domain;
+};
+
+#define to_intel_gpio_priv(chip) container_of(chip, struct intel_mid_gpio, chip)
+
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
+ enum GPIO_REG reg_type)
+{
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(chip);
+ unsigned nreg = chip->ngpio / 32;
+ u8 reg = offset / 32;
+
+ return priv->reg_base + reg_type * nreg * 4 + reg * 4;
+}
+
+static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
+ enum GPIO_REG reg_type)
+{
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(chip);
+ unsigned nreg = chip->ngpio / 32;
+ u8 reg = offset / 16;
+
+ return priv->reg_base + reg_type * nreg * 4 + reg * 4;
+}
+
+static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
+ u32 value = readl(gafr);
+ int shift = (offset % 16) << 1, af = (value >> shift) & 3;
+
+ if (af) {
+ value &= ~(3 << shift);
+ writel(value, gafr);
+ }
+ return 0;
+}
+
+static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gplr = gpio_reg(chip, offset, GPLR);
+
+ return readl(gplr) & BIT(offset % 32);
+}
+
+static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ void __iomem *gpsr, *gpcr;
+
+ if (value) {
+ gpsr = gpio_reg(chip, offset, GPSR);
+ writel(BIT(offset % 32), gpsr);
+ } else {
+ gpcr = gpio_reg(chip, offset, GPCR);
+ writel(BIT(offset % 32), gpcr);
+ }
+}
+
+static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(chip);
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
+ u32 value;
+ unsigned long flags;
+
+ if (priv->pdev)
+ pm_runtime_get(&priv->pdev->dev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ value = readl(gpdr);
+ value &= ~BIT(offset % 32);
+ writel(value, gpdr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (priv->pdev)
+ pm_runtime_put(&priv->pdev->dev);
+
+ return 0;
+}
+
+static int intel_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(chip);
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
+ unsigned long flags;
+
+ intel_gpio_set(chip, offset, value);
+
+ if (priv->pdev)
+ pm_runtime_get(&priv->pdev->dev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ value = readl(gpdr);
+ value |= BIT(offset % 32);
+ writel(value, gpdr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (priv->pdev)
+ pm_runtime_put(&priv->pdev->dev);
+
+ return 0;
+}
+
+static int intel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(chip);
+ return irq_create_mapping(priv->domain, offset);
+}
+
+static int intel_mid_irq_type(struct irq_data *d, unsigned type)
+{
+ struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 value;
+ void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER);
+
+ if (gpio >= priv->chip.ngpio)
+ return -EINVAL;
+
+ if (priv->pdev)
+ pm_runtime_get(&priv->pdev->dev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value = readl(grer) | BIT(gpio % 32);
+ else
+ value = readl(grer) & (~BIT(gpio % 32));
+ writel(value, grer);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = readl(gfer) | BIT(gpio % 32);
+ else
+ value = readl(gfer) & (~BIT(gpio % 32));
+ writel(value, gfer);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (priv->pdev)
+ pm_runtime_put(&priv->pdev->dev);
+
+ return 0;
+}
+
+static void intel_mid_irq_unmask(struct irq_data *d)
+{
+}
+
+static void intel_mid_irq_mask(struct irq_data *d)
+{
+}
+
+static struct irq_chip intel_mid_irqchip = {
+ .name = "INTEL_MID-GPIO",
+ .irq_mask = intel_mid_irq_mask,
+ .irq_unmask = intel_mid_irq_unmask,
+ .irq_set_type = intel_mid_irq_type,
+};
+
+static const struct intel_mid_gpio_ddata gpio_lincroft = {
+ .ngpio = 64,
+};
+
+static const struct intel_mid_gpio_ddata gpio_penwell_aon = {
+ .ngpio = 96,
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+};
+
+static const struct intel_mid_gpio_ddata gpio_penwell_core = {
+ .ngpio = 96,
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+};
+
+static const struct intel_mid_gpio_ddata gpio_cloverview_aon = {
+ .ngpio = 96,
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE | INTEL_MID_IRQ_TYPE_LEVEL,
+};
+
+static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
+ .ngpio = 96,
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+};
+
+static const struct intel_mid_gpio_ddata gpio_tangier = {
+ .ngpio = 192,
+ .gplr_offset = 4,
+ .flis_base = 0xff0c0000,
+ .flis_len = 0x8000,
+ .get_flis_offset = NULL,
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(intel_gpio_ids) = {
+ {
+ /* Lincroft */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
+ .driver_data = (kernel_ulong_t)&gpio_lincroft,
+ },
+ {
+ /* Penwell AON */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f),
+ .driver_data = (kernel_ulong_t)&gpio_penwell_aon,
+ },
+ {
+ /* Penwell Core */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a),
+ .driver_data = (kernel_ulong_t)&gpio_penwell_core,
+ },
+ {
+ /* Cloverview Aon */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb),
+ .driver_data = (kernel_ulong_t)&gpio_cloverview_aon,
+ },
+ {
+ /* Cloverview Core */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+ .driver_data = (kernel_ulong_t)&gpio_cloverview_core,
+ },
+ {
+ /* Tangier */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+ .driver_data = (kernel_ulong_t)&gpio_tangier,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
+
+static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct intel_mid_gpio *priv = irq_data_get_irq_handler_data(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ u32 base, gpio, mask;
+ unsigned long pending;
+ void __iomem *gedr;
+
+ /* check GPIO controller to check which pin triggered the interrupt */
+ for (base = 0; base < priv->chip.ngpio; base += 32) {
+ gedr = gpio_reg(&priv->chip, base, GEDR);
+ while ((pending = readl(gedr))) {
+ gpio = __ffs(pending);
+ mask = BIT(gpio);
+ /* Clear before handling so we can't lose an edge */
+ writel(mask, gedr);
+ generic_handle_irq(irq_find_mapping(priv->domain,
+ base + gpio));
+ }
+ }
+
+ chip->irq_eoi(data);
+}
+
+static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+{
+ void __iomem *reg;
+ unsigned base;
+
+ for (base = 0; base < priv->chip.ngpio; base += 32) {
+ /* Clear the rising-edge detect register */
+ reg = gpio_reg(&priv->chip, base, GRER);
+ writel(0, reg);
+ /* Clear the falling-edge detect register */
+ reg = gpio_reg(&priv->chip, base, GFER);
+ writel(0, reg);
+ /* Clear the edge detect status register */
+ reg = gpio_reg(&priv->chip, base, GEDR);
+ writel(~0, reg);
+ }
+}
+
+static int intel_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct intel_mid_gpio *priv = d->host_data;
+
+ irq_set_chip_and_handler_name(irq, &intel_mid_irqchip,
+ handle_simple_irq, "demux");
+ irq_set_chip_data(irq, priv);
+ irq_set_irq_type(irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intel_gpio_irq_ops = {
+ .map = intel_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int intel_gpio_runtime_idle(struct device *dev)
+{
+ pm_schedule_suspend(dev, 500);
+ return -EBUSY;
+}
+
+static const struct dev_pm_ops intel_gpio_pm_ops = {
+ SET_RUNTIME_PM_OPS(NULL, NULL, intel_gpio_runtime_idle)
+};
+
+static int intel_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem *base;
+ struct intel_mid_gpio *priv;
+ u32 gpio_base;
+ u32 irq_base;
+ int retval;
+ struct intel_mid_gpio_ddata *ddata =
+ (struct intel_mid_gpio_ddata *)id->driver_data;
+
+ retval = pcim_enable_device(pdev);
+ if (retval)
+ return retval;
+
+ retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev));
+ if (retval) {
+ dev_err(&pdev->dev, "I/O memory mapping error\n");
+ return retval;
+ }
+
+ base = pcim_iomap_table(pdev)[1];
+
+ irq_base = readl(base);
+ gpio_base = readl(sizeof(u32) + base);
+
+ /* release the IO mapping, since we already get the info from bar1 */
+ pcim_iounmap_regions(pdev, 1 << 1);
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "can't allocate chip data\n");
+ return -ENOMEM;
+ }
+
+ priv->reg_base = pcim_iomap_table(pdev)[0];
+ priv->chip.label = dev_name(&pdev->dev);
+ priv->chip.request = intel_gpio_request;
+ priv->chip.direction_input = intel_gpio_direction_input;
+ priv->chip.direction_output = intel_gpio_direction_output;
+ priv->chip.get = intel_gpio_get;
+ priv->chip.set = intel_gpio_set;
+ priv->chip.to_irq = intel_gpio_to_irq;
+ priv->chip.base = gpio_base;
+ priv->chip.ngpio = ddata->ngpio;
+ priv->chip.can_sleep = 0;
+ priv->pdev = pdev;
+
+ spin_lock_init(&priv->lock);
+
+ priv->domain = irq_domain_add_simple(pdev->dev.of_node, ddata->ngpio,
+ irq_base, &intel_gpio_irq_ops, priv);
+ if (!priv->domain)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, priv);
+ retval = gpiochip_add(&priv->chip);
+ if (retval) {
+ dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
+ return retval;
+ }
+
+ intel_mid_irq_init_hw(priv);
+
+ irq_set_handler_data(pdev->irq, priv);
+ irq_set_chained_handler(pdev->irq, intel_mid_irq_handler);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static struct pci_driver intel_gpio_driver = {
+ .name = "intel_mid_gpio",
+ .id_table = intel_gpio_ids,
+ .probe = intel_gpio_probe,
+ .driver = {
+ .pm = &intel_gpio_pm_ops,
+ },
+};
+
+static int __init intel_gpio_init(void)
+{
+ return pci_register_driver(&intel_gpio_driver);
+}
+
+device_initcall(intel_gpio_init);
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
new file mode 100644
index 00000000000..c22a61be3a9
--- /dev/null
+++ b/drivers/gpio/gpio-iop.c
@@ -0,0 +1,130 @@
+/*
+ * arch/arm/plat-iop/gpio.c
+ * GPIO handling for Intel IOP3xx processors.
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+#define IOP3XX_N_GPIOS 8
+
+#define GPIO_IN 0
+#define GPIO_OUT 1
+#define GPIO_LOW 0
+#define GPIO_HIGH 1
+
+/* Memory base offset */
+static void __iomem *base;
+
+#define IOP3XX_GPIO_REG(reg) (base + (reg))
+#define IOP3XX_GPOE IOP3XX_GPIO_REG(0x0000)
+#define IOP3XX_GPID IOP3XX_GPIO_REG(0x0004)
+#define IOP3XX_GPOD IOP3XX_GPIO_REG(0x0008)
+
+static void gpio_line_config(int line, int direction)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = readl(IOP3XX_GPOE);
+ if (direction == GPIO_IN) {
+ val |= BIT(line);
+ } else if (direction == GPIO_OUT) {
+ val &= ~BIT(line);
+ }
+ writel(val, IOP3XX_GPOE);
+ local_irq_restore(flags);
+}
+
+static int gpio_line_get(int line)
+{
+ return !!(readl(IOP3XX_GPID) & BIT(line));
+}
+
+static void gpio_line_set(int line, int value)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = readl(IOP3XX_GPOD);
+ if (value == GPIO_LOW) {
+ val &= ~BIT(line);
+ } else if (value == GPIO_HIGH) {
+ val |= BIT(line);
+ }
+ writel(val, IOP3XX_GPOD);
+ local_irq_restore(flags);
+}
+
+static int iop3xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ gpio_line_config(gpio, GPIO_IN);
+ return 0;
+}
+
+static int iop3xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
+{
+ gpio_line_set(gpio, level);
+ gpio_line_config(gpio, GPIO_OUT);
+ return 0;
+}
+
+static int iop3xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ return gpio_line_get(gpio);
+}
+
+static void iop3xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ gpio_line_set(gpio, value);
+}
+
+static struct gpio_chip iop3xx_chip = {
+ .label = "iop3xx",
+ .direction_input = iop3xx_gpio_direction_input,
+ .get = iop3xx_gpio_get_value,
+ .direction_output = iop3xx_gpio_direction_output,
+ .set = iop3xx_gpio_set_value,
+ .base = 0,
+ .ngpio = IOP3XX_N_GPIOS,
+};
+
+static int iop3xx_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+
+ return gpiochip_add(&iop3xx_chip);
+}
+
+static struct platform_driver iop3xx_gpio_driver = {
+ .driver = {
+ .name = "gpio-iop",
+ .owner = THIS_MODULE,
+ },
+ .probe = iop3xx_gpio_probe,
+};
+
+static int __init iop3xx_gpio_init(void)
+{
+ return platform_driver_register(&iop3xx_gpio_driver);
+}
+arch_initcall(iop3xx_gpio_init);
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
deleted file mode 100644
index bfa1af1b519..00000000000
--- a/drivers/gpio/gpio-langwell.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Moorestown platform Langwell chip GPIO driver
- *
- * Copyright (c) 2008, 2009, 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* Supports:
- * Moorestown platform Langwell chip.
- * Medfield platform Penwell chip.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/stddef.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-#include <linux/irqdomain.h>
-
-/*
- * Langwell chip has 64 pins and thus there are 2 32bit registers to control
- * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
- * registers to control them, so we only define the order here instead of a
- * structure, to get a bit offset for a pin (use GPDR as an example):
- *
- * nreg = ngpio / 32;
- * reg = offset / 32;
- * bit = offset % 32;
- * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
- *
- * so the bit of reg_addr is to control pin offset's GPDR feature
-*/
-
-enum GPIO_REG {
- GPLR = 0, /* pin level read-only */
- GPDR, /* pin direction */
- GPSR, /* pin set */
- GPCR, /* pin clear */
- GRER, /* rising edge detect */
- GFER, /* falling edge detect */
- GEDR, /* edge detect result */
- GAFR, /* alt function */
-};
-
-struct lnw_gpio {
- struct gpio_chip chip;
- void __iomem *reg_base;
- spinlock_t lock;
- struct pci_dev *pdev;
- struct irq_domain *domain;
-};
-
-#define to_lnw_priv(chip) container_of(chip, struct lnw_gpio, chip)
-
-static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
- enum GPIO_REG reg_type)
-{
- struct lnw_gpio *lnw = to_lnw_priv(chip);
- unsigned nreg = chip->ngpio / 32;
- u8 reg = offset / 32;
-
- return lnw->reg_base + reg_type * nreg * 4 + reg * 4;
-}
-
-static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
- enum GPIO_REG reg_type)
-{
- struct lnw_gpio *lnw = to_lnw_priv(chip);
- unsigned nreg = chip->ngpio / 32;
- u8 reg = offset / 16;
-
- return lnw->reg_base + reg_type * nreg * 4 + reg * 4;
-}
-
-static int lnw_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
- u32 value = readl(gafr);
- int shift = (offset % 16) << 1, af = (value >> shift) & 3;
-
- if (af) {
- value &= ~(3 << shift);
- writel(value, gafr);
- }
- return 0;
-}
-
-static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *gplr = gpio_reg(chip, offset, GPLR);
-
- return readl(gplr) & BIT(offset % 32);
-}
-
-static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- void __iomem *gpsr, *gpcr;
-
- if (value) {
- gpsr = gpio_reg(chip, offset, GPSR);
- writel(BIT(offset % 32), gpsr);
- } else {
- gpcr = gpio_reg(chip, offset, GPCR);
- writel(BIT(offset % 32), gpcr);
- }
-}
-
-static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct lnw_gpio *lnw = to_lnw_priv(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- u32 value;
- unsigned long flags;
-
- if (lnw->pdev)
- pm_runtime_get(&lnw->pdev->dev);
-
- spin_lock_irqsave(&lnw->lock, flags);
- value = readl(gpdr);
- value &= ~BIT(offset % 32);
- writel(value, gpdr);
- spin_unlock_irqrestore(&lnw->lock, flags);
-
- if (lnw->pdev)
- pm_runtime_put(&lnw->pdev->dev);
-
- return 0;
-}
-
-static int lnw_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct lnw_gpio *lnw = to_lnw_priv(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- unsigned long flags;
-
- lnw_gpio_set(chip, offset, value);
-
- if (lnw->pdev)
- pm_runtime_get(&lnw->pdev->dev);
-
- spin_lock_irqsave(&lnw->lock, flags);
- value = readl(gpdr);
- value |= BIT(offset % 32);
- writel(value, gpdr);
- spin_unlock_irqrestore(&lnw->lock, flags);
-
- if (lnw->pdev)
- pm_runtime_put(&lnw->pdev->dev);
-
- return 0;
-}
-
-static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct lnw_gpio *lnw = to_lnw_priv(chip);
- return irq_create_mapping(lnw->domain, offset);
-}
-
-static int lnw_irq_type(struct irq_data *d, unsigned type)
-{
- struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
- u32 gpio = irqd_to_hwirq(d);
- unsigned long flags;
- u32 value;
- void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
- void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
-
- if (gpio >= lnw->chip.ngpio)
- return -EINVAL;
-
- if (lnw->pdev)
- pm_runtime_get(&lnw->pdev->dev);
-
- spin_lock_irqsave(&lnw->lock, flags);
- if (type & IRQ_TYPE_EDGE_RISING)
- value = readl(grer) | BIT(gpio % 32);
- else
- value = readl(grer) & (~BIT(gpio % 32));
- writel(value, grer);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = readl(gfer) | BIT(gpio % 32);
- else
- value = readl(gfer) & (~BIT(gpio % 32));
- writel(value, gfer);
- spin_unlock_irqrestore(&lnw->lock, flags);
-
- if (lnw->pdev)
- pm_runtime_put(&lnw->pdev->dev);
-
- return 0;
-}
-
-static void lnw_irq_unmask(struct irq_data *d)
-{
-}
-
-static void lnw_irq_mask(struct irq_data *d)
-{
-}
-
-static struct irq_chip lnw_irqchip = {
- .name = "LNW-GPIO",
- .irq_mask = lnw_irq_mask,
- .irq_unmask = lnw_irq_unmask,
- .irq_set_type = lnw_irq_type,
-};
-
-static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb), .driver_data = 96 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = 96 },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
-
-static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- u32 base, gpio, mask;
- unsigned long pending;
- void __iomem *gedr;
-
- /* check GPIO controller to check which pin triggered the interrupt */
- for (base = 0; base < lnw->chip.ngpio; base += 32) {
- gedr = gpio_reg(&lnw->chip, base, GEDR);
- while ((pending = readl(gedr))) {
- gpio = __ffs(pending);
- mask = BIT(gpio);
- /* Clear before handling so we can't lose an edge */
- writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(lnw->domain,
- base + gpio));
- }
- }
-
- chip->irq_eoi(data);
-}
-
-static void lnw_irq_init_hw(struct lnw_gpio *lnw)
-{
- void __iomem *reg;
- unsigned base;
-
- for (base = 0; base < lnw->chip.ngpio; base += 32) {
- /* Clear the rising-edge detect register */
- reg = gpio_reg(&lnw->chip, base, GRER);
- writel(0, reg);
- /* Clear the falling-edge detect register */
- reg = gpio_reg(&lnw->chip, base, GFER);
- writel(0, reg);
- /* Clear the edge detect status register */
- reg = gpio_reg(&lnw->chip, base, GEDR);
- writel(~0, reg);
- }
-}
-
-static int lnw_gpio_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct lnw_gpio *lnw = d->host_data;
-
- irq_set_chip_and_handler_name(virq, &lnw_irqchip, handle_simple_irq,
- "demux");
- irq_set_chip_data(virq, lnw);
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static const struct irq_domain_ops lnw_gpio_irq_ops = {
- .map = lnw_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int lnw_gpio_runtime_idle(struct device *dev)
-{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
-}
-
-static const struct dev_pm_ops lnw_gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(NULL, NULL, lnw_gpio_runtime_idle)
-};
-
-static int lnw_gpio_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- void __iomem *base;
- struct lnw_gpio *lnw;
- u32 gpio_base;
- u32 irq_base;
- int retval;
- int ngpio = id->driver_data;
-
- retval = pcim_enable_device(pdev);
- if (retval)
- return retval;
-
- retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev));
- if (retval) {
- dev_err(&pdev->dev, "I/O memory mapping error\n");
- return retval;
- }
-
- base = pcim_iomap_table(pdev)[1];
-
- irq_base = readl(base);
- gpio_base = readl(sizeof(u32) + base);
-
- /* release the IO mapping, since we already get the info from bar1 */
- pcim_iounmap_regions(pdev, 1 << 1);
-
- lnw = devm_kzalloc(&pdev->dev, sizeof(*lnw), GFP_KERNEL);
- if (!lnw) {
- dev_err(&pdev->dev, "can't allocate chip data\n");
- return -ENOMEM;
- }
-
- lnw->reg_base = pcim_iomap_table(pdev)[0];
- lnw->chip.label = dev_name(&pdev->dev);
- lnw->chip.request = lnw_gpio_request;
- lnw->chip.direction_input = lnw_gpio_direction_input;
- lnw->chip.direction_output = lnw_gpio_direction_output;
- lnw->chip.get = lnw_gpio_get;
- lnw->chip.set = lnw_gpio_set;
- lnw->chip.to_irq = lnw_gpio_to_irq;
- lnw->chip.base = gpio_base;
- lnw->chip.ngpio = ngpio;
- lnw->chip.can_sleep = 0;
- lnw->pdev = pdev;
-
- spin_lock_init(&lnw->lock);
-
- lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
- &lnw_gpio_irq_ops, lnw);
- if (!lnw->domain)
- return -ENOMEM;
-
- pci_set_drvdata(pdev, lnw);
- retval = gpiochip_add(&lnw->chip);
- if (retval) {
- dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
- return retval;
- }
-
- lnw_irq_init_hw(lnw);
-
- irq_set_handler_data(pdev->irq, lnw);
- irq_set_chained_handler(pdev->irq, lnw_irq_handler);
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static struct pci_driver lnw_gpio_driver = {
- .name = "langwell_gpio",
- .id_table = lnw_gpio_ids,
- .probe = lnw_gpio_probe,
- .driver = {
- .pm = &lnw_gpio_pm_ops,
- },
-};
-
-static int __init lnw_gpio_init(void)
-{
- return pci_register_driver(&lnw_gpio_driver);
-}
-
-device_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 90a80eb688a..2d5555decf0 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 41b5913ddab..a0804740a0b 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -242,14 +242,13 @@ static int lp_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return irq_create_mapping(lg->domain, offset);
}
-static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin, mask;
unsigned long reg, ena, pending;
- unsigned virq;
/* check from GPIO controller which pin triggered the interrupt */
for (base = 0; base < lg->chip.ngpio; base += 32) {
@@ -257,12 +256,14 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
while ((pending = (inl(reg) & inl(ena)))) {
+ unsigned irq;
+
pin = __ffs(pending);
mask = BIT(pin);
/* Clear before handling so we don't lose an edge */
outl(mask, reg);
- virq = irq_find_mapping(lg->domain, base + pin);
- generic_handle_irq(virq);
+ irq = irq_find_mapping(lg->domain, base + pin);
+ generic_handle_irq(irq);
}
}
chip->irq_eoi(data);
@@ -325,15 +326,15 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
}
}
-static int lp_gpio_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
+static int lp_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct lp_gpio *lg = d->host_data;
- irq_set_chip_and_handler_name(virq, &lp_irqchip, handle_simple_irq,
+ irq_set_chip_and_handler_name(irq, &lp_irqchip, handle_simple_irq,
"demux");
- irq_set_chip_data(virq, lg);
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_chip_data(irq, lg);
+ irq_set_irq_type(irq, IRQ_TYPE_NONE);
return 0;
}
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 3fd2caa4a2e..c0b7835f513 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -142,7 +142,6 @@ static int mc33880_probe(struct spi_device *spi)
return ret;
exit_destroy:
- spi_set_drvdata(spi, NULL);
mutex_destroy(&mc->lock);
return ret;
}
@@ -156,8 +155,6 @@ static int mc33880_remove(struct spi_device *spi)
if (mc == NULL)
return -ENODEV;
- spi_set_drvdata(spi, NULL);
-
ret = gpiochip_remove(&mc->chip);
if (!ret)
mutex_destroy(&mc->lock);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index a0b33a216d4..914e859e3ed 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/irq.h>
@@ -282,16 +283,16 @@ static struct irq_chip mpc8xxx_irq_chip = {
.irq_set_type = mpc8xxx_irq_set_type,
};
-static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
+static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
if (mpc8xxx_gc->of_dev_id_data)
mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
- irq_set_chip_data(virq, h->host_data);
- irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index f8e6af20dfb..532bcb336ef 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -254,7 +254,6 @@ static int mxs_gpio_probe(struct platform_device *pdev)
struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
- struct resource *iores = NULL;
int irq_base;
int err;
@@ -262,16 +261,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- if (np) {
- port->id = of_alias_get_id(np, "gpio");
- if (port->id < 0)
- return port->id;
- port->devid = (enum mxs_gpio_id) of_id->data;
- } else {
- port->id = pdev->id;
- port->devid = pdev->id_entry->driver_data;
- }
-
+ port->id = of_alias_get_id(np, "gpio");
+ if (port->id < 0)
+ return port->id;
+ port->devid = (enum mxs_gpio_id) of_id->data;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
return port->irq;
@@ -281,18 +274,11 @@ static int mxs_gpio_probe(struct platform_device *pdev)
* share the same one
*/
if (!base) {
- if (np) {
- parent = of_get_parent(np);
- base = of_iomap(parent, 0);
- of_node_put(parent);
- if (!base)
- return -EADDRNOTAVAIL;
- } else {
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, iores);
- if (IS_ERR(base))
- return PTR_ERR(base);
- }
+ parent = of_get_parent(np);
+ base = of_iomap(parent, 0);
+ of_node_put(parent);
+ if (!base)
+ return -EADDRNOTAVAIL;
}
port->base = base;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 89675f86230..f319c9ffd4a 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -514,6 +514,14 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
return -EINVAL;
}
+ retval = gpio_lock_as_irq(&bank->chip, offset);
+ if (retval) {
+ dev_err(bank->dev, "unable to lock offset %d for IRQ\n",
+ offset);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return retval;
+ }
+
bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
spin_unlock_irqrestore(&bank->lock, flags);
@@ -797,6 +805,7 @@ static void gpio_irq_shutdown(struct irq_data *d)
unsigned offset = GPIO_INDEX(bank, gpio);
spin_lock_irqsave(&bank->lock, flags);
+ gpio_unlock_as_irq(&bank->chip, offset);
bank->irq_usage &= ~(1 << offset);
_disable_gpio_module(bank, offset);
_reset_gpio(bank, gpio);
@@ -957,22 +966,13 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_bank *bank;
unsigned long flags;
- int retval = 0;
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
-
- if (LINE_USED(bank->irq_usage, offset)) {
- retval = -EINVAL;
- goto exit;
- }
-
bank->set_dataout(bank, offset, value);
_set_gpio_direction(bank, offset, 0);
-
-exit:
spin_unlock_irqrestore(&bank->lock, flags);
- return retval;
+ return 0;
}
static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 8588af0f766..11801e986dd 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -31,6 +31,10 @@ struct palmas_gpio {
struct palmas *palmas;
};
+struct palmas_device_data {
+ int ngpio;
+};
+
static inline struct palmas_gpio *to_palmas_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct palmas_gpio, gpio_chip);
@@ -42,23 +46,26 @@ static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset)
struct palmas *palmas = pg->palmas;
unsigned int val;
int ret;
+ unsigned int reg;
+ int gpio16 = (offset/8);
+
+ offset %= 8;
+ reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR;
- ret = palmas_read(palmas, PALMAS_GPIO_BASE, PALMAS_GPIO_DATA_DIR, &val);
+ ret = palmas_read(palmas, PALMAS_GPIO_BASE, reg, &val);
if (ret < 0) {
- dev_err(gc->dev, "GPIO_DATA_DIR read failed, err = %d\n", ret);
+ dev_err(gc->dev, "Reg 0x%02x read failed, %d\n", reg, ret);
return ret;
}
- if (val & (1 << offset)) {
- ret = palmas_read(palmas, PALMAS_GPIO_BASE,
- PALMAS_GPIO_DATA_OUT, &val);
- } else {
- ret = palmas_read(palmas, PALMAS_GPIO_BASE,
- PALMAS_GPIO_DATA_IN, &val);
- }
+ if (val & BIT(offset))
+ reg = (gpio16) ? PALMAS_GPIO_DATA_OUT2 : PALMAS_GPIO_DATA_OUT;
+ else
+ reg = (gpio16) ? PALMAS_GPIO_DATA_IN2 : PALMAS_GPIO_DATA_IN;
+
+ ret = palmas_read(palmas, PALMAS_GPIO_BASE, reg, &val);
if (ret < 0) {
- dev_err(gc->dev, "GPIO_DATA_IN/OUT read failed, err = %d\n",
- ret);
+ dev_err(gc->dev, "Reg 0x%02x read failed, %d\n", reg, ret);
return ret;
}
return !!(val & BIT(offset));
@@ -70,17 +77,20 @@ static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
struct palmas_gpio *pg = to_palmas_gpio(gc);
struct palmas *palmas = pg->palmas;
int ret;
+ unsigned int reg;
+ int gpio16 = (offset/8);
- if (value)
- ret = palmas_write(palmas, PALMAS_GPIO_BASE,
- PALMAS_GPIO_SET_DATA_OUT, BIT(offset));
+ offset %= 8;
+ if (gpio16)
+ reg = (value) ?
+ PALMAS_GPIO_SET_DATA_OUT2 : PALMAS_GPIO_CLEAR_DATA_OUT2;
else
- ret = palmas_write(palmas, PALMAS_GPIO_BASE,
- PALMAS_GPIO_CLEAR_DATA_OUT, BIT(offset));
+ reg = (value) ?
+ PALMAS_GPIO_SET_DATA_OUT : PALMAS_GPIO_CLEAR_DATA_OUT;
+
+ ret = palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
if (ret < 0)
- dev_err(gc->dev, "%s write failed, err = %d\n",
- (value) ? "GPIO_SET_DATA_OUT" : "GPIO_CLEAR_DATA_OUT",
- ret);
+ dev_err(gc->dev, "Reg 0x%02x write failed, %d\n", reg, ret);
}
static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -89,14 +99,19 @@ static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
struct palmas_gpio *pg = to_palmas_gpio(gc);
struct palmas *palmas = pg->palmas;
int ret;
+ unsigned int reg;
+ int gpio16 = (offset/8);
+
+ offset %= 8;
+ reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR;
/* Set the initial value */
palmas_gpio_set(gc, offset, value);
- ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE,
- PALMAS_GPIO_DATA_DIR, BIT(offset), BIT(offset));
+ ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg,
+ BIT(offset), BIT(offset));
if (ret < 0)
- dev_err(gc->dev, "GPIO_DATA_DIR write failed, err = %d\n", ret);
+ dev_err(gc->dev, "Reg 0x%02x update failed, %d\n", reg, ret);
return ret;
}
@@ -105,11 +120,15 @@ static int palmas_gpio_input(struct gpio_chip *gc, unsigned offset)
struct palmas_gpio *pg = to_palmas_gpio(gc);
struct palmas *palmas = pg->palmas;
int ret;
+ unsigned int reg;
+ int gpio16 = (offset/8);
+
+ offset %= 8;
+ reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR;
- ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE,
- PALMAS_GPIO_DATA_DIR, BIT(offset), 0);
+ ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg, BIT(offset), 0);
if (ret < 0)
- dev_err(gc->dev, "GPIO_DATA_DIR write failed, err = %d\n", ret);
+ dev_err(gc->dev, "Reg 0x%02x update failed, %d\n", reg, ret);
return ret;
}
@@ -121,12 +140,36 @@ static int palmas_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return palmas_irq_get_virq(palmas, PALMAS_GPIO_0_IRQ + offset);
}
+static const struct palmas_device_data palmas_dev_data = {
+ .ngpio = 8,
+};
+
+static const struct palmas_device_data tps80036_dev_data = {
+ .ngpio = 16,
+};
+
+static struct of_device_id of_palmas_gpio_match[] = {
+ { .compatible = "ti,palmas-gpio", .data = &palmas_dev_data,},
+ { .compatible = "ti,tps65913-gpio", .data = &palmas_dev_data,},
+ { .compatible = "ti,tps65914-gpio", .data = &palmas_dev_data,},
+ { .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,},
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
+
static int palmas_gpio_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_platform_data *palmas_pdata;
struct palmas_gpio *palmas_gpio;
int ret;
+ const struct of_device_id *match;
+ const struct palmas_device_data *dev_data;
+
+ match = of_match_device(of_palmas_gpio_match, &pdev->dev);
+ dev_data = match->data;
+ if (!dev_data)
+ dev_data = &palmas_dev_data;
palmas_gpio = devm_kzalloc(&pdev->dev,
sizeof(*palmas_gpio), GFP_KERNEL);
@@ -138,7 +181,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->palmas = palmas;
palmas_gpio->gpio_chip.owner = THIS_MODULE;
palmas_gpio->gpio_chip.label = dev_name(&pdev->dev);
- palmas_gpio->gpio_chip.ngpio = 8;
+ palmas_gpio->gpio_chip.ngpio = dev_data->ngpio;
palmas_gpio->gpio_chip.can_sleep = 1;
palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
@@ -172,15 +215,6 @@ static int palmas_gpio_remove(struct platform_device *pdev)
return gpiochip_remove(&palmas_gpio->gpio_chip);
}
-static struct of_device_id of_palmas_gpio_match[] = {
- { .compatible = "ti,palmas-gpio"},
- { .compatible = "ti,tps65913-gpio"},
- { .compatible = "ti,tps65914-gpio"},
- { .compatible = "ti,tps80036-gpio"},
- { },
-};
-MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
-
static struct platform_driver palmas_gpio_driver = {
.driver.name = "palmas-gpio",
.driver.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index cdd1aa12b89..6e48c07e3d8 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -683,17 +683,6 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
int ret;
u8 val[MAX_BANK];
- /* Let every port in proper state, that could save power */
- memset(val, 0, NBANK(chip));
- pca953x_write_regs(chip, PCA957X_PUPD, val);
- memset(val, 0xFF, NBANK(chip));
- pca953x_write_regs(chip, PCA957X_CFG, val);
- memset(val, 0, NBANK(chip));
- pca953x_write_regs(chip, PCA957X_OUT, val);
-
- ret = pca953x_read_regs(chip, PCA957X_IN, val);
- if (ret)
- goto out;
ret = pca953x_read_regs(chip, PCA957X_OUT, chip->reg_output);
if (ret)
goto out;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 9e61bb0719d..1535686e74e 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -26,9 +26,10 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
static const struct i2c_device_id pcf857x_id[] = {
@@ -50,6 +51,27 @@ static const struct i2c_device_id pcf857x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pcf857x_id);
+#ifdef CONFIG_OF
+static const struct of_device_id pcf857x_of_table[] = {
+ { .compatible = "nxp,pcf8574" },
+ { .compatible = "nxp,pcf8574a" },
+ { .compatible = "nxp,pca8574" },
+ { .compatible = "nxp,pca9670" },
+ { .compatible = "nxp,pca9672" },
+ { .compatible = "nxp,pca9674" },
+ { .compatible = "nxp,pcf8575" },
+ { .compatible = "nxp,pca8575" },
+ { .compatible = "nxp,pca9671" },
+ { .compatible = "nxp,pca9673" },
+ { .compatible = "nxp,pca9675" },
+ { .compatible = "maxim,max7328" },
+ { .compatible = "maxim,max7329" },
+ { .compatible = "ti,tca9554" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcf857x_of_table);
+#endif
+
/*
* The pcf857x, pca857x, and pca967x chips only expose one read and one
* write register. Writing a "one" bit (to match the reset state) lets
@@ -66,12 +88,11 @@ struct pcf857x {
struct gpio_chip chip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
- struct work_struct work; /* irq demux work */
struct irq_domain *irq_domain; /* for irq demux */
spinlock_t slock; /* protect irq demux */
unsigned out; /* software latch */
unsigned status; /* current status */
- int irq; /* real irq number */
+ unsigned irq_mapped; /* mapped gpio irqs */
int (*write)(struct i2c_client *client, unsigned data);
int (*read)(struct i2c_client *client);
@@ -164,48 +185,54 @@ static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
static int pcf857x_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ int ret;
- return irq_create_mapping(gpio->irq_domain, offset);
+ ret = irq_create_mapping(gpio->irq_domain, offset);
+ if (ret > 0)
+ gpio->irq_mapped |= (1 << offset);
+
+ return ret;
}
-static void pcf857x_irq_demux_work(struct work_struct *work)
+static irqreturn_t pcf857x_irq(int irq, void *data)
{
- struct pcf857x *gpio = container_of(work,
- struct pcf857x,
- work);
+ struct pcf857x *gpio = data;
unsigned long change, i, status, flags;
status = gpio->read(gpio->client);
spin_lock_irqsave(&gpio->slock, flags);
- change = gpio->status ^ status;
+ /*
+ * call the interrupt handler iff gpio is used as
+ * interrupt source, just to avoid bad irqs
+ */
+
+ change = ((gpio->status ^ status) & gpio->irq_mapped);
for_each_set_bit(i, &change, gpio->chip.ngpio)
generic_handle_irq(irq_find_mapping(gpio->irq_domain, i));
gpio->status = status;
spin_unlock_irqrestore(&gpio->slock, flags);
-}
-
-static irqreturn_t pcf857x_irq_demux(int irq, void *data)
-{
- struct pcf857x *gpio = data;
-
- /*
- * pcf857x can't read/write data here,
- * since i2c data access might go to sleep.
- */
- schedule_work(&gpio->work);
return IRQ_HANDLED;
}
-static int pcf857x_irq_domain_map(struct irq_domain *domain, unsigned int virq,
+static int pcf857x_irq_domain_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hw)
{
- irq_set_chip_and_handler(virq,
+ struct pcf857x *gpio = domain->host_data;
+
+ irq_set_chip_and_handler(irq,
&dummy_irq_chip,
handle_level_irq);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ gpio->irq_mapped |= (1 << hw);
+
return 0;
}
@@ -218,8 +245,6 @@ static void pcf857x_irq_domain_cleanup(struct pcf857x *gpio)
if (gpio->irq_domain)
irq_domain_remove(gpio->irq_domain);
- if (gpio->irq)
- free_irq(gpio->irq, gpio);
}
static int pcf857x_irq_domain_init(struct pcf857x *gpio,
@@ -230,20 +255,21 @@ static int pcf857x_irq_domain_init(struct pcf857x *gpio,
gpio->irq_domain = irq_domain_add_linear(client->dev.of_node,
gpio->chip.ngpio,
&pcf857x_irq_domain_ops,
- NULL);
+ gpio);
if (!gpio->irq_domain)
goto fail;
/* enable real irq */
- status = request_irq(client->irq, pcf857x_irq_demux, 0,
- dev_name(&client->dev), gpio);
+ status = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf857x_irq, IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING,
+ dev_name(&client->dev), gpio);
+
if (status)
goto fail;
/* enable gpio_to_irq() */
- INIT_WORK(&gpio->work, pcf857x_irq_demux_work);
gpio->chip.to_irq = pcf857x_to_irq;
- gpio->irq = client->irq;
return 0;
@@ -257,14 +283,18 @@ fail:
static int pcf857x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pcf857x_platform_data *pdata;
+ struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device_node *np = client->dev.of_node;
struct pcf857x *gpio;
+ unsigned int n_latch = 0;
int status;
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
+ if (IS_ENABLED(CONFIG_OF) && np)
+ of_property_read_u32(np, "lines-initial-states", &n_latch);
+ else if (pdata)
+ n_latch = pdata->n_latch;
+ else
dev_dbg(&client->dev, "no platform data\n");
- }
/* Allocate, initialize, and register this gpio_chip. */
gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
@@ -357,11 +387,11 @@ static int pcf857x_probe(struct i2c_client *client,
* may cause transient glitching since it can't know the last value
* written (some pins may need to be driven low).
*
- * Using pdata->n_latch avoids that trouble. When left initialized
- * to zero, our software copy of the "latch" then matches the chip's
- * all-ones reset state. Otherwise it flags pins to be driven low.
+ * Using n_latch avoids that trouble. When left initialized to zero,
+ * our software copy of the "latch" then matches the chip's all-ones
+ * reset state. Otherwise it flags pins to be driven low.
*/
- gpio->out = pdata ? ~pdata->n_latch : ~0;
+ gpio->out = ~n_latch;
gpio->status = gpio->out;
status = gpiochip_add(&gpio->chip);
@@ -423,6 +453,7 @@ static struct i2c_driver pcf857x_driver = {
.driver = {
.name = "pcf857x",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcf857x_of_table),
},
.probe = pcf857x_probe,
.remove = pcf857x_remove,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 4274e2e70ef..f22f7f3e2e5 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,15 +238,15 @@ static struct irq_chip pl061_irqchip = {
.irq_set_type = pl061_irq_type,
};
-static int pl061_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
+static int pl061_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct pl061_gpio *chip = d->host_data;
- irq_set_chip_and_handler_name(virq, &pl061_irqchip, handle_simple_irq,
+ irq_set_chip_and_handler_name(irq, &pl061_irqchip, handle_simple_irq,
"pl061");
- irq_set_chip_data(virq, chip);
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_chip_data(irq, chip);
+ irq_set_irq_type(irq, IRQ_TYPE_NONE);
return 0;
}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 6038966ab04..d3f15ae93bd 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/gpio-rcar.h>
#include <linux/platform_device.h>
@@ -266,16 +267,16 @@ static int gpio_rcar_to_irq(struct gpio_chip *chip, unsigned offset)
return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
}
-static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
+static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct gpio_rcar_priv *p = h->host_data;
- dev_dbg(&p->pdev->dev, "map hw irq = %d, virq = %d\n", (int)hw, virq);
+ dev_dbg(&p->pdev->dev, "map hw irq = %d, irq = %d\n", (int)hwirq, irq);
- irq_set_chip_data(virq, h->host_data);
- irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID); /* kill me now */
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID); /* kill me now */
return 0;
}
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 8ea3b33d4b4..a90be34e4d5 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -10,7 +10,7 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/module.h>
-
+#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 358a21c2d81..76e02b9460e 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -1033,7 +1033,7 @@ static int s3c24xx_gpiolib_fbank_to_irq(struct gpio_chip *chip, unsigned offset)
}
#endif
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
static int s3c64xx_gpiolib_mbank_to_irq(struct gpio_chip *chip, unsigned pin)
{
return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
@@ -1174,7 +1174,7 @@ struct samsung_gpio_chip s3c24xx_gpios[] = {
*/
static struct samsung_gpio_chip s3c64xx_gpios_4bit[] = {
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
{
.chip = {
.base = S3C64XX_GPA(0),
@@ -1227,7 +1227,7 @@ static struct samsung_gpio_chip s3c64xx_gpios_4bit[] = {
};
static struct samsung_gpio_chip s3c64xx_gpios_4bit2[] = {
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
{
.base = S3C64XX_GPH_BASE + 0x4,
.chip = {
@@ -1257,7 +1257,7 @@ static struct samsung_gpio_chip s3c64xx_gpios_4bit2[] = {
};
static struct samsung_gpio_chip s3c64xx_gpios_2bit[] = {
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
{
.base = S3C64XX_GPF_BASE,
.config = &samsung_gpio_cfgs[6],
@@ -2082,34 +2082,14 @@ static __init int samsung_gpiolib_init(void)
int i, nr_chips;
int group = 0;
-#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
/*
- * This gpio driver includes support for device tree support and there
- * are platforms using it. In order to maintain compatibility with those
- * platforms, and to allow non-dt Exynos4210 platforms to use this
- * gpiolib support, a check is added to find out if there is a active
- * pin-controller driver support available. If it is available, this
- * gpiolib support is ignored and the gpiolib support available in
- * pin-controller driver is used. This is a temporary check and will go
- * away when all of the Exynos4210 platforms have switched to using
- * device tree and the pin-ctrl driver.
- */
- struct device_node *pctrl_np;
- static const struct of_device_id exynos_pinctrl_ids[] = {
- { .compatible = "samsung,s3c2412-pinctrl", },
- { .compatible = "samsung,s3c2416-pinctrl", },
- { .compatible = "samsung,s3c2440-pinctrl", },
- { .compatible = "samsung,s3c2450-pinctrl", },
- { .compatible = "samsung,exynos4210-pinctrl", },
- { .compatible = "samsung,exynos4x12-pinctrl", },
- { .compatible = "samsung,exynos5250-pinctrl", },
- { .compatible = "samsung,exynos5440-pinctrl", },
- { }
- };
- for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
- if (pctrl_np && of_device_is_available(pctrl_np))
- return -ENODEV;
-#endif
+ * Currently there are two drivers that can provide GPIO support for
+ * Samsung SoCs. For device tree enabled platforms, the new
+ * pinctrl-samsung driver is used, providing both GPIO and pin control
+ * interfaces. For legacy (non-DT) platforms this driver is used.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index b33bad1bb4d..2647e243d47 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -254,9 +254,10 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = bank * 8 + bit;
- int virq = irq_find_mapping(stmpe_gpio->domain, line);
+ int child_irq = irq_find_mapping(stmpe_gpio->domain,
+ line);
- handle_nested_irq(virq);
+ handle_nested_irq(child_irq);
stat &= ~(1 << bit);
}
@@ -271,7 +272,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int stmpe_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+static int stmpe_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct stmpe_gpio *stmpe_gpio = d->host_data;
@@ -279,26 +280,26 @@ static int stmpe_gpio_irq_map(struct irq_domain *d, unsigned int virq,
if (!stmpe_gpio)
return -EINVAL;
- irq_set_chip_data(hwirq, stmpe_gpio);
- irq_set_chip_and_handler(hwirq, &stmpe_gpio_irq_chip,
+ irq_set_chip_data(irq, stmpe_gpio);
+ irq_set_chip_and_handler(irq, &stmpe_gpio_irq_chip,
handle_simple_irq);
- irq_set_nested_thread(hwirq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(hwirq, IRQF_VALID);
+ set_irq_flags(irq, IRQF_VALID);
#else
- irq_set_noprobe(hwirq);
+ irq_set_noprobe(irq);
#endif
return 0;
}
-static void stmpe_gpio_irq_unmap(struct irq_domain *d, unsigned int virq)
+static void stmpe_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
{
#ifdef CONFIG_ARM
- set_irq_flags(virq, 0);
+ set_irq_flags(irq, 0);
#endif
- irq_set_chip_and_handler(virq, NULL, NULL);
- irq_set_chip_data(virq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
new file mode 100644
index 00000000000..0502b9a041a
--- /dev/null
+++ b/drivers/gpio/gpio-tb10x.c
@@ -0,0 +1,328 @@
+/* Abilis Systems MODULE DESCRIPTION
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Authors: Sascha Leuenberger <sascha.leuenberger@abilis.com>
+ * Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/pinctrl/consumer.h>
+
+#define TB10X_GPIO_DIR_IN (0x00000000)
+#define TB10X_GPIO_DIR_OUT (0x00000001)
+#define OFFSET_TO_REG_DDR (0x00)
+#define OFFSET_TO_REG_DATA (0x04)
+#define OFFSET_TO_REG_INT_EN (0x08)
+#define OFFSET_TO_REG_CHANGE (0x0C)
+#define OFFSET_TO_REG_WRMASK (0x10)
+#define OFFSET_TO_REG_INT_TYPE (0x14)
+
+
+/**
+ * @spinlock: used for atomic read/modify/write of registers
+ * @base: register base address
+ * @domain: IRQ domain of GPIO generated interrupts managed by this controller
+ * @irq: Interrupt line of parent interrupt controller
+ * @gc: gpio_chip structure associated to this GPIO controller
+ */
+struct tb10x_gpio {
+ spinlock_t spinlock;
+ void __iomem *base;
+ struct irq_domain *domain;
+ int irq;
+ struct gpio_chip gc;
+};
+
+static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
+{
+ return ioread32(gpio->base + offs);
+}
+
+static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
+ u32 val)
+{
+ iowrite32(val, gpio->base + offs);
+}
+
+static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
+ u32 mask, u32 val)
+{
+ u32 r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio->spinlock, flags);
+
+ r = tb10x_reg_read(gpio, offs);
+ r = (r & ~mask) | (val & mask);
+
+ tb10x_reg_write(gpio, offs, r);
+
+ spin_unlock_irqrestore(&gpio->spinlock, flags);
+}
+
+static inline struct tb10x_gpio *to_tb10x_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tb10x_gpio, gc);
+}
+
+static int tb10x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
+ int mask = BIT(offset);
+ int val = TB10X_GPIO_DIR_IN << offset;
+
+ tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
+
+ return 0;
+}
+
+static int tb10x_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
+ int val;
+
+ val = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_DATA);
+
+ if (val & BIT(offset))
+ return 1;
+ else
+ return 0;
+}
+
+static void tb10x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
+ int mask = BIT(offset);
+ int val = value << offset;
+
+ tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DATA, mask, val);
+}
+
+static int tb10x_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
+ int mask = BIT(offset);
+ int val = TB10X_GPIO_DIR_OUT << offset;
+
+ tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
+
+ return 0;
+}
+
+static int tb10x_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void tb10x_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
+
+ return irq_create_mapping(tb10x_gpio->domain, offset);
+}
+
+static int tb10x_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQF_TRIGGER_MASK) != IRQ_TYPE_EDGE_BOTH) {
+ pr_err("Only (both) edge triggered interrupts supported.\n");
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(data, type);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
+{
+ struct tb10x_gpio *tb10x_gpio = data;
+ u32 r = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_CHANGE);
+ u32 m = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_INT_EN);
+ const unsigned long bits = r & m;
+ int i;
+
+ for_each_set_bit(i, &bits, 32)
+ generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i));
+
+ return IRQ_HANDLED;
+}
+
+static int tb10x_gpio_probe(struct platform_device *pdev)
+{
+ struct tb10x_gpio *tb10x_gpio;
+ struct resource *mem;
+ struct device_node *dn = pdev->dev.of_node;
+ int ret = -EBUSY;
+ u32 ngpio;
+
+ if (!dn)
+ return -EINVAL;
+
+ if (of_property_read_u32(dn, "abilis,ngpio", &ngpio))
+ return -EINVAL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource defined.\n");
+ return -EINVAL;
+ }
+
+ tb10x_gpio = devm_kzalloc(&pdev->dev, sizeof(*tb10x_gpio), GFP_KERNEL);
+ if (tb10x_gpio == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&tb10x_gpio->spinlock);
+
+ tb10x_gpio->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(tb10x_gpio->base))
+ return PTR_ERR(tb10x_gpio->base);
+
+ tb10x_gpio->gc.label = of_node_full_name(dn);
+ tb10x_gpio->gc.dev = &pdev->dev;
+ tb10x_gpio->gc.owner = THIS_MODULE;
+ tb10x_gpio->gc.direction_input = tb10x_gpio_direction_in;
+ tb10x_gpio->gc.get = tb10x_gpio_get;
+ tb10x_gpio->gc.direction_output = tb10x_gpio_direction_out;
+ tb10x_gpio->gc.set = tb10x_gpio_set;
+ tb10x_gpio->gc.request = tb10x_gpio_request;
+ tb10x_gpio->gc.free = tb10x_gpio_free;
+ tb10x_gpio->gc.base = -1;
+ tb10x_gpio->gc.ngpio = ngpio;
+ tb10x_gpio->gc.can_sleep = 0;
+
+
+ ret = gpiochip_add(&tb10x_gpio->gc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not add gpiochip.\n");
+ goto fail_gpiochip_registration;
+ }
+
+ platform_set_drvdata(pdev, tb10x_gpio);
+
+ if (of_find_property(dn, "interrupt-controller", NULL)) {
+ struct irq_chip_generic *gc;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "No interrupt specified.\n");
+ goto fail_get_irq;
+ }
+
+ tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
+ tb10x_gpio->irq = ret;
+
+ ret = devm_request_irq(&pdev->dev, ret, tb10x_gpio_irq_cascade,
+ IRQF_TRIGGER_NONE | IRQF_SHARED,
+ dev_name(&pdev->dev), tb10x_gpio);
+ if (ret != 0)
+ goto fail_request_irq;
+
+ tb10x_gpio->domain = irq_domain_add_linear(dn,
+ tb10x_gpio->gc.ngpio,
+ &irq_generic_chip_ops, NULL);
+ if (!tb10x_gpio->domain) {
+ ret = -ENOMEM;
+ goto fail_irq_domain;
+ }
+
+ ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
+ tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label,
+ handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret)
+ goto fail_irq_domain;
+
+ gc = tb10x_gpio->domain->gc->gc[0];
+ gc->reg_base = tb10x_gpio->base;
+ gc->chip_types[0].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_set_type = tb10x_gpio_irq_set_type;
+ gc->chip_types[0].regs.ack = OFFSET_TO_REG_CHANGE;
+ gc->chip_types[0].regs.mask = OFFSET_TO_REG_INT_EN;
+ }
+
+ return 0;
+
+fail_irq_domain:
+fail_request_irq:
+fail_get_irq:
+ gpiochip_remove(&tb10x_gpio->gc);
+fail_gpiochip_registration:
+fail_ioremap:
+ return ret;
+}
+
+static int __exit tb10x_gpio_remove(struct platform_device *pdev)
+{
+ struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ if (tb10x_gpio->gc.to_irq) {
+ irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0],
+ BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0);
+ kfree(tb10x_gpio->domain->gc);
+ irq_domain_remove(tb10x_gpio->domain);
+ free_irq(tb10x_gpio->irq, tb10x_gpio);
+ }
+ ret = gpiochip_remove(&tb10x_gpio->gc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id tb10x_gpio_dt_ids[] = {
+ { .compatible = "abilis,tb10x-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tb10x_gpio_dt_ids);
+
+static struct platform_driver tb10x_gpio_driver = {
+ .probe = tb10x_gpio_probe,
+ .remove = tb10x_gpio_remove,
+ .driver = {
+ .name = "tb10x-gpio",
+ .of_match_table = of_match_ptr(tb10x_gpio_dt_ids),
+ .owner = THIS_MODULE,
+ }
+};
+
+module_platform_driver(tb10x_gpio_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("tb10x gpio.");
+MODULE_VERSION("0.0.1");
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 4a5de273c23..ddb5fefaa71 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -96,27 +96,27 @@ static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
}
/**
- * tc3589x_gpio_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ * tc3589x_gpio_irq_get_irq(): Map a hardware IRQ on a chip to a Linux IRQ
*
* @tc3589x_gpio: tc3589x_gpio_irq controller to operate on.
- * @irq: index of the interrupt requested in the chip IRQs
+ * @irq: index of the hardware interrupt requested in the chip IRQs
*
* Useful for drivers to request their own IRQs.
*/
-static int tc3589x_gpio_irq_get_virq(struct tc3589x_gpio *tc3589x_gpio,
- int irq)
+static int tc3589x_gpio_irq_get_irq(struct tc3589x_gpio *tc3589x_gpio,
+ int hwirq)
{
if (!tc3589x_gpio)
return -EINVAL;
- return irq_create_mapping(tc3589x_gpio->domain, irq);
+ return irq_create_mapping(tc3589x_gpio->domain, hwirq);
}
static int tc3589x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip);
- return tc3589x_gpio_irq_get_virq(tc3589x_gpio, offset);
+ return tc3589x_gpio_irq_get_irq(tc3589x_gpio, offset);
}
static struct gpio_chip template_chip = {
@@ -242,9 +242,9 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = i * 8 + bit;
- int virq = tc3589x_gpio_irq_get_virq(tc3589x_gpio, line);
+ int irq = tc3589x_gpio_irq_get_irq(tc3589x_gpio, line);
- handle_nested_irq(virq);
+ handle_nested_irq(irq);
stat &= ~(1 << bit);
}
@@ -254,31 +254,31 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct tc3589x *tc3589x_gpio = d->host_data;
- irq_set_chip_data(virq, tc3589x_gpio);
- irq_set_chip_and_handler(virq, &tc3589x_gpio_irq_chip,
+ irq_set_chip_data(irq, tc3589x_gpio);
+ irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip,
handle_simple_irq);
- irq_set_nested_thread(virq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
+ set_irq_flags(irq, IRQF_VALID);
#else
- irq_set_noprobe(virq);
+ irq_set_noprobe(irq);
#endif
return 0;
}
-static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int virq)
+static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
{
#ifdef CONFIG_ARM
- set_irq_flags(virq, 0);
+ set_irq_flags(irq, 0);
#endif
- irq_set_chip_and_handler(virq, NULL, NULL);
- irq_set_chip_data(virq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
static struct irq_domain_ops tc3589x_irq_ops = {
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9a62672f1be..cfd3b9037bc 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -75,6 +75,7 @@ struct tegra_gpio_bank {
#endif
};
+static struct device *dev;
static struct irq_domain *irq_domain;
static void __iomem *regs;
static u32 tegra_gpio_bank_count;
@@ -205,6 +206,7 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
int lvl_type;
int val;
unsigned long flags;
+ int ret;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -231,6 +233,12 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
+ ret = gpio_lock_as_irq(&tegra_gpio_chip, gpio);
+ if (ret) {
+ dev_err(dev, "unable to lock Tegra GPIO %d as IRQ\n", gpio);
+ return ret;
+ }
+
spin_lock_irqsave(&bank->lvl_lock[port], flags);
val = tegra_gpio_readl(GPIO_INT_LVL(gpio));
@@ -251,6 +259,13 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void tegra_gpio_irq_shutdown(struct irq_data *d)
+{
+ int gpio = d->hwirq;
+
+ gpio_unlock_as_irq(&tegra_gpio_chip, gpio);
+}
+
static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct tegra_gpio_bank *bank;
@@ -368,6 +383,7 @@ static struct irq_chip tegra_gpio_irq_chip = {
.irq_mask = tegra_gpio_irq_mask,
.irq_unmask = tegra_gpio_irq_unmask,
.irq_set_type = tegra_gpio_irq_set_type,
+ .irq_shutdown = tegra_gpio_irq_shutdown,
#ifdef CONFIG_PM_SLEEP
.irq_set_wake = tegra_gpio_irq_set_wake,
#endif
@@ -413,6 +429,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
int i;
int j;
+ dev = &pdev->dev;
+
match = of_match_device(tegra_gpio_of_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c
index 3fa3e2867e1..58445bb6910 100644
--- a/drivers/gpio/gpio-tnetv107x.c
+++ b/drivers/gpio/gpio-tnetv107x.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <mach/common.h>
#include <mach/tnetv107x.h>
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index d8e4f6efcb2..b97d6a6577b 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -354,17 +354,18 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+ int ret = -EINVAL;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
- twl4030_set_gpio_dataout(offset, value);
+ ret = twl4030_set_gpio_direction(offset, 0);
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
twl_set(chip, offset, value);
- return 0;
+ return ret;
}
static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -435,7 +436,8 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
static int gpio_twl4030_remove(struct platform_device *pdev);
-static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
+static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
+ struct twl4030_gpio_platform_data *pdata)
{
struct twl4030_gpio_platform_data *omap_twl_info;
@@ -443,6 +445,9 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
if (!omap_twl_info)
return NULL;
+ if (pdata)
+ *omap_twl_info = *pdata;
+
omap_twl_info->use_leds = of_property_read_bool(dev->of_node,
"ti,use-leds");
@@ -500,7 +505,7 @@ no_irqs:
mutex_init(&priv->mutex);
if (node)
- pdata = of_gpio_twl4030(&pdev->dev);
+ pdata = of_gpio_twl4030(&pdev->dev, pdata);
if (pdata == NULL) {
dev_err(&pdev->dev, "Platform data is missing\n");
@@ -594,7 +599,7 @@ static struct platform_driver gpio_twl4030_driver = {
.driver = {
.name = "twl4030_gpio",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(twl_gpio_match),
+ .of_match_table = twl_gpio_match,
},
.probe = gpio_twl4030_probe,
.remove = gpio_twl4030_remove,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 5c1ef2b3ef1..ae0ffdce8bd 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -11,7 +11,7 @@
*/
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/export.h>
#include <linux/acpi_gpio.h>
#include <linux/acpi.h>
@@ -33,14 +33,15 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
}
/**
- * acpi_get_gpio() - Translate ACPI GPIO pin to GPIO number usable with GPIO API
+ * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
* @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
* @pin: ACPI GPIO pin number (0-based, controller-relative)
*
- * Returns GPIO number to use with Linux generic GPIO API, or errno error value
+ * Returns GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR
+ * error value
*/
-int acpi_get_gpio(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
{
struct gpio_chip *chip;
acpi_handle handle;
@@ -48,18 +49,17 @@ int acpi_get_gpio(char *path, int pin)
status = acpi_get_handle(NULL, path, &handle);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
chip = gpiochip_find(handle, acpi_gpiochip_find);
if (!chip)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
- if (!gpio_is_valid(chip->base + pin))
- return -EINVAL;
+ if (pin < 0 || pin > chip->ngpio)
+ return ERR_PTR(-EINVAL);
- return chip->base + pin;
+ return gpio_to_desc(chip->base + pin);
}
-EXPORT_SYMBOL_GPL(acpi_get_gpio);
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
@@ -73,15 +73,8 @@ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
{
struct acpi_gpio_evt_pin *evt_pin = data;
- struct acpi_object_list args;
- union acpi_object arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = evt_pin->pin;
- args.count = 1;
- args.pointer = &arg;
-
- acpi_evaluate_object(evt_pin->evt_handle, NULL, &args, NULL);
+ acpi_execute_simple_method(evt_pin->evt_handle, NULL, evt_pin->pin);
return IRQ_HANDLED;
}
@@ -201,10 +194,48 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
}
EXPORT_SYMBOL(acpi_gpiochip_request_interrupts);
+/**
+ * acpi_gpiochip_free_interrupts() - Free GPIO _EVT ACPI event interrupts.
+ * @chip: gpio chip
+ *
+ * Free interrupts associated with the _EVT method for the given GPIO chip.
+ *
+ * The remaining ACPI event interrupts associated with the chip are freed
+ * automatically.
+ */
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct list_head *evt_pins;
+ struct acpi_gpio_evt_pin *evt_pin, *ep;
+
+ if (!chip->dev || !chip->to_irq)
+ return;
+
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_evt_dh, (void **)&evt_pins);
+ if (ACPI_FAILURE(status))
+ return;
+
+ list_for_each_entry_safe_reverse(evt_pin, ep, evt_pins, node) {
+ devm_free_irq(chip->dev, evt_pin->irq, evt_pin);
+ list_del(&evt_pin->node);
+ kfree(evt_pin);
+ }
+
+ acpi_detach_data(handle, acpi_gpio_evt_dh);
+ kfree(evt_pins);
+}
+EXPORT_SYMBOL(acpi_gpiochip_free_interrupts);
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
- int gpio;
+ struct gpio_desc *desc;
int n;
};
@@ -215,37 +246,39 @@ static int acpi_find_gpio(struct acpi_resource *ares, void *data)
if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
return 1;
- if (lookup->n++ == lookup->index && lookup->gpio < 0) {
+ if (lookup->n++ == lookup->index && !lookup->desc) {
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
- lookup->gpio = acpi_get_gpio(agpio->resource_source.string_ptr,
- agpio->pin_table[0]);
+ lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
+ agpio->pin_table[0]);
lookup->info.gpioint =
agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
+ lookup->info.active_low =
+ agpio->polarity == ACPI_ACTIVE_LOW;
}
return 1;
}
/**
- * acpi_get_gpio_by_index() - get a GPIO number from device resources
+ * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
* @dev: pointer to a device to get GPIO from
* @index: index of GpioIo/GpioInt resource (starting from %0)
* @info: info pointer to fill in (optional)
*
* Function goes through ACPI resources for @dev and based on @index looks
- * up a GpioIo/GpioInt resource, translates it to the Linux GPIO number,
+ * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
* and returns it. @index matches GpioIo/GpioInt resources only so if there
* are total %3 GPIO resources, the index goes from %0 to %2.
*
- * If the GPIO cannot be translated or there is an error, negative errno is
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
* returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
* function only returns the first.
*/
-int acpi_get_gpio_by_index(struct device *dev, int index,
- struct acpi_gpio_info *info)
+struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info)
{
struct acpi_gpio_lookup lookup;
struct list_head resource_list;
@@ -254,65 +287,26 @@ int acpi_get_gpio_by_index(struct device *dev, int index,
int ret;
if (!dev)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
handle = ACPI_HANDLE(dev);
if (!handle || acpi_bus_get_device(handle, &adev))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
memset(&lookup, 0, sizeof(lookup));
lookup.index = index;
- lookup.gpio = -ENODEV;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, acpi_find_gpio,
&lookup);
if (ret < 0)
- return ret;
+ return ERR_PTR(ret);
acpi_dev_free_resource_list(&resource_list);
- if (lookup.gpio >= 0 && info)
+ if (lookup.desc && info)
*info = lookup.info;
- return lookup.gpio;
+ return lookup.desc ? lookup.desc : ERR_PTR(-ENODEV);
}
-EXPORT_SYMBOL_GPL(acpi_get_gpio_by_index);
-
-/**
- * acpi_gpiochip_free_interrupts() - Free GPIO _EVT ACPI event interrupts.
- * @chip: gpio chip
- *
- * Free interrupts associated with the _EVT method for the given GPIO chip.
- *
- * The remaining ACPI event interrupts associated with the chip are freed
- * automatically.
- */
-void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
-{
- acpi_handle handle;
- acpi_status status;
- struct list_head *evt_pins;
- struct acpi_gpio_evt_pin *evt_pin, *ep;
-
- if (!chip->dev || !chip->to_irq)
- return;
-
- handle = ACPI_HANDLE(chip->dev);
- if (!handle)
- return;
-
- status = acpi_get_data(handle, acpi_gpio_evt_dh, (void **)&evt_pins);
- if (ACPI_FAILURE(status))
- return;
-
- list_for_each_entry_safe_reverse(evt_pin, ep, evt_pins, node) {
- devm_free_irq(chip->dev, evt_pin->irq, evt_pin);
- list_del(&evt_pin->node);
- kfree(evt_pin);
- }
-
- acpi_detach_data(handle, acpi_gpio_evt_dh);
- kfree(evt_pins);
-}
-EXPORT_SYMBOL(acpi_gpiochip_free_interrupts);
+EXPORT_SYMBOL_GPL(acpi_get_gpiod_by_index);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 0dfaf20e4da..e0a98f581f5 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -15,19 +15,21 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/slab.h>
+struct gpio_desc;
+
/* Private data structure for of_gpiochip_find_and_xlate */
struct gg_data {
enum of_gpio_flags *flags;
struct of_phandle_args gpiospec;
- int out_gpio;
+ struct gpio_desc *out_gpio;
};
/* Private function for resolving node pointer to gpio_chip */
@@ -45,28 +47,31 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
if (ret < 0)
return false;
- gg_data->out_gpio = ret + gc->base;
+ gg_data->out_gpio = gpio_to_desc(ret + gc->base);
return true;
}
/**
- * of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
+ * of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API
* @np: device node to get GPIO from
* @propname: property name containing gpio specifier(s)
* @index: index of the GPIO
* @flags: a flags pointer to fill in
*
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
-int of_get_named_gpio_flags(struct device_node *np, const char *propname,
- int index, enum of_gpio_flags *flags)
+struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+ const char *propname, int index, enum of_gpio_flags *flags)
{
/* Return -EPROBE_DEFER to support probe() functions to be called
* later when the GPIO actually becomes available
*/
- struct gg_data gg_data = { .flags = flags, .out_gpio = -EPROBE_DEFER };
+ struct gg_data gg_data = {
+ .flags = flags,
+ .out_gpio = ERR_PTR(-EPROBE_DEFER)
+ };
int ret;
/* .of_xlate might decide to not fill in the flags, so clear it. */
@@ -78,16 +83,17 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
if (ret) {
pr_debug("%s: can't parse gpios property of node '%s[%d]'\n",
__func__, np->full_name, index);
- return ret;
+ return ERR_PTR(ret);
}
gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
of_node_put(gg_data.gpiospec.np);
- pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
+ pr_debug("%s exited with status %d\n", __func__,
+ PTR_RET(gg_data.out_gpio));
return gg_data.out_gpio;
}
-EXPORT_SYMBOL(of_get_named_gpio_flags);
+EXPORT_SYMBOL(of_get_named_gpiod_flags);
/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
@@ -190,10 +196,15 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
struct of_phandle_args pinspec;
struct pinctrl_dev *pctldev;
int index = 0, ret;
+ const char *name;
+ static const char group_names_propname[] = "gpio-ranges-group-names";
+ struct property *group_names;
if (!np)
return;
+ group_names = of_find_property(np, group_names_propname, NULL);
+
for (;; index++) {
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
index, &pinspec);
@@ -204,14 +215,56 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!pctldev)
break;
- ret = gpiochip_add_pin_range(chip,
- pinctrl_dev_get_devname(pctldev),
- pinspec.args[0],
- pinspec.args[1],
- pinspec.args[2]);
-
- if (ret)
- break;
+ if (pinspec.args[2]) {
+ if (group_names) {
+ ret = of_property_read_string_index(np,
+ group_names_propname,
+ index, &name);
+ if (strlen(name)) {
+ pr_err("%s: Group name of numeric GPIO ranges must be the empty string.\n",
+ np->full_name);
+ break;
+ }
+ }
+ /* npins != 0: linear range */
+ ret = gpiochip_add_pin_range(chip,
+ pinctrl_dev_get_devname(pctldev),
+ pinspec.args[0],
+ pinspec.args[1],
+ pinspec.args[2]);
+ if (ret)
+ break;
+ } else {
+ /* npins == 0: special range */
+ if (pinspec.args[1]) {
+ pr_err("%s: Illegal gpio-range format.\n",
+ np->full_name);
+ break;
+ }
+
+ if (!group_names) {
+ pr_err("%s: GPIO group range requested but no %s property.\n",
+ np->full_name, group_names_propname);
+ break;
+ }
+
+ ret = of_property_read_string_index(np,
+ group_names_propname,
+ index, &name);
+ if (ret)
+ break;
+
+ if (!strlen(name)) {
+ pr_err("%s: Group name of GPIO group range cannot be the empty string.\n",
+ np->full_name);
+ break;
+ }
+
+ ret = gpiochip_add_pingroup_range(chip, pctldev,
+ pinspec.args[0], name);
+ if (ret)
+ break;
+ }
}
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0dee0e0c247..4e10b10d3dd 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -10,22 +10,19 @@
#include <linux/seq_file.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/acpi_gpio.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
-/* Optional implementation infrastructure for GPIO interfaces.
+/* Implementation infrastructure for GPIO interfaces.
*
- * Platforms may want to use this if they tend to use very many GPIOs
- * that aren't part of a System-On-Chip core; or across I2C/SPI/etc.
- *
- * When kernel footprint or instruction count is an issue, simpler
- * implementations may be preferred. The GPIO programming interface
- * allows for inlining speed-critical get/set operations for common
- * cases, so that access to SOC-integrated GPIOs can sometimes cost
- * only an instruction or two per bit.
+ * The GPIO programming interface allows for inlining speed-critical
+ * get/set operations for common cases, so that access to SOC-integrated
+ * GPIOs can sometimes cost only an instruction or two per bit.
*/
@@ -57,9 +54,10 @@ struct gpio_desc {
#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 4 /* trigger on falling edge */
#define FLAG_TRIG_RISE 5 /* trigger on rising edge */
-#define FLAG_ACTIVE_LOW 6 /* sysfs value has active low */
+#define FLAG_ACTIVE_LOW 6 /* value has active low */
#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
+#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define ID_SHIFT 16 /* add new flags before this one */
@@ -74,34 +72,50 @@ static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
+static DEFINE_MUTEX(gpio_lookup_lock);
+static LIST_HEAD(gpio_lookup_list);
static LIST_HEAD(gpio_chips);
#ifdef CONFIG_GPIO_SYSFS
static DEFINE_IDR(dirent_idr);
#endif
-/*
- * Internal gpiod_* API using descriptors instead of the integer namespace.
- * Most of this should eventually go public.
- */
static int gpiod_request(struct gpio_desc *desc, const char *label);
static void gpiod_free(struct gpio_desc *desc);
-static int gpiod_direction_input(struct gpio_desc *desc);
-static int gpiod_direction_output(struct gpio_desc *desc, int value);
-static int gpiod_get_direction(const struct gpio_desc *desc);
-static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
-static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-static int gpiod_get_value(const struct gpio_desc *desc);
-static void gpiod_set_value(struct gpio_desc *desc, int value);
-static int gpiod_cansleep(const struct gpio_desc *desc);
-static int gpiod_to_irq(const struct gpio_desc *desc);
-static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
-static int gpiod_export_link(struct device *dev, const char *name,
- struct gpio_desc *desc);
-static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
-static void gpiod_unexport(struct gpio_desc *desc);
+#ifdef CONFIG_DEBUG_FS
+#define gpiod_emerg(desc, fmt, ...) \
+ pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+ ##__VA_ARGS__)
+#define gpiod_crit(desc, fmt, ...) \
+ pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+ ##__VA_ARGS__)
+#define gpiod_err(desc, fmt, ...) \
+ pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+ ##__VA_ARGS__)
+#define gpiod_warn(desc, fmt, ...) \
+ pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+ ##__VA_ARGS__)
+#define gpiod_info(desc, fmt, ...) \
+ pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+ ##__VA_ARGS__)
+#define gpiod_dbg(desc, fmt, ...) \
+ pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+ ##__VA_ARGS__)
+#else
+#define gpiod_emerg(desc, fmt, ...) \
+ pr_emerg("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
+#define gpiod_crit(desc, fmt, ...) \
+ pr_crit("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
+#define gpiod_err(desc, fmt, ...) \
+ pr_err("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
+#define gpiod_warn(desc, fmt, ...) \
+ pr_warn("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
+#define gpiod_info(desc, fmt, ...) \
+ pr_info("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
+#define gpiod_dbg(desc, fmt, ...) \
+ pr_debug("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
+#endif
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
@@ -121,23 +135,36 @@ static int gpio_chip_hwgpio(const struct gpio_desc *desc)
/**
* Convert a GPIO number to its descriptor
*/
-static struct gpio_desc *gpio_to_desc(unsigned gpio)
+struct gpio_desc *gpio_to_desc(unsigned gpio)
{
if (WARN(!gpio_is_valid(gpio), "invalid GPIO %d\n", gpio))
return NULL;
else
return &gpio_desc[gpio];
}
+EXPORT_SYMBOL_GPL(gpio_to_desc);
+
+/**
+ * Convert an offset on a certain chip to a corresponding descriptor
+ */
+static struct gpio_desc *gpiochip_offset_to_desc(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ unsigned int gpio = chip->base + offset;
+
+ return gpio_to_desc(gpio);
+}
/**
* Convert a GPIO descriptor to the integer namespace.
* This should disappear in the future but is needed since we still
* use GPIO numbers for error messages and sysfs nodes
*/
-static int desc_to_gpio(const struct gpio_desc *desc)
+int desc_to_gpio(const struct gpio_desc *desc)
{
return desc - &gpio_desc[0];
}
+EXPORT_SYMBOL_GPL(desc_to_gpio);
/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
@@ -172,16 +199,15 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
return 0;
}
-static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
+/**
+ * gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs
+ * @desc: descriptor to return the chip of
+ */
+struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
return desc ? desc->chip : NULL;
}
-
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-struct gpio_chip *gpio_to_chip(unsigned gpio)
-{
- return gpiod_to_chip(gpio_to_desc(gpio));
-}
+EXPORT_SYMBOL_GPL(gpiod_to_chip);
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
static int gpiochip_find_base(int ngpio)
@@ -207,8 +233,15 @@ static int gpiochip_find_base(int ngpio)
}
}
-/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
-static int gpiod_get_direction(const struct gpio_desc *desc)
+/**
+ * gpiod_get_direction - return the current direction of a GPIO
+ * @desc: GPIO to get the direction of
+ *
+ * Return GPIOF_DIR_IN or GPIOF_DIR_OUT, or an error code in case of error.
+ *
+ * This function may sleep if gpiod_cansleep() is true.
+ */
+int gpiod_get_direction(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
unsigned offset;
@@ -234,6 +267,7 @@ static int gpiod_get_direction(const struct gpio_desc *desc)
}
return status;
}
+EXPORT_SYMBOL_GPL(gpiod_get_direction);
#ifdef CONFIG_GPIO_SYSFS
@@ -318,17 +352,10 @@ static ssize_t gpio_value_show(struct device *dev,
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
- } else {
- int value;
-
- value = !!gpiod_get_value_cansleep(desc);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
-
- status = sprintf(buf, "%d\n", value);
- }
+ else
+ status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
mutex_unlock(&sysfs_lock);
return status;
@@ -351,9 +378,7 @@ static ssize_t gpio_value_store(struct device *dev,
status = kstrtol(buf, 0, &value);
if (status == 0) {
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- gpiod_set_value_cansleep(desc, value != 0);
+ gpiod_set_value_cansleep(desc, value);
status = size;
}
}
@@ -395,6 +420,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
desc->flags &= ~GPIO_TRIGGER_MASK;
if (!gpio_flags) {
+ gpiod_unlock_as_irq(desc);
ret = 0;
goto free_id;
}
@@ -408,7 +434,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!value_sd) {
- value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
+ value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
if (!value_sd) {
ret = -ENODEV;
goto err_out;
@@ -433,6 +459,12 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
if (ret < 0)
goto free_id;
+ ret = gpiod_lock_as_irq(desc);
+ if (ret < 0) {
+ gpiod_warn(desc, "failed to flag the GPIO for IRQ\n");
+ goto free_id;
+ }
+
desc->flags |= gpio_flags;
return 0;
@@ -736,7 +768,7 @@ static struct class gpio_class = {
/**
- * gpio_export - export a GPIO through sysfs
+ * gpiod_export - export a GPIO through sysfs
* @gpio: gpio to make available, already requested
* @direction_may_change: true if userspace may change gpio direction
* Context: arch_initcall or later
@@ -750,7 +782,7 @@ static struct class gpio_class = {
*
* Returns zero on success, else an error.
*/
-static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
unsigned long flags;
int status;
@@ -828,12 +860,7 @@ fail_unlock:
status);
return status;
}
-
-int gpio_export(unsigned gpio, bool direction_may_change)
-{
- return gpiod_export(gpio_to_desc(gpio), direction_may_change);
-}
-EXPORT_SYMBOL_GPL(gpio_export);
+EXPORT_SYMBOL_GPL(gpiod_export);
static int match_export(struct device *dev, const void *data)
{
@@ -841,7 +868,7 @@ static int match_export(struct device *dev, const void *data)
}
/**
- * gpio_export_link - create a sysfs link to an exported GPIO node
+ * gpiod_export_link - create a sysfs link to an exported GPIO node
* @dev: device under which to create symlink
* @name: name of the symlink
* @gpio: gpio to create symlink to, already exported
@@ -851,8 +878,8 @@ static int match_export(struct device *dev, const void *data)
*
* Returns zero on success, else an error.
*/
-static int gpiod_export_link(struct device *dev, const char *name,
- struct gpio_desc *desc)
+int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
{
int status = -EINVAL;
@@ -883,15 +910,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
return status;
}
-
-int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
-{
- return gpiod_export_link(dev, name, gpio_to_desc(gpio));
-}
-EXPORT_SYMBOL_GPL(gpio_export_link);
+EXPORT_SYMBOL_GPL(gpiod_export_link);
/**
- * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
+ * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value
* @gpio: gpio to change
* @value: non-zero to use active low, i.e. inverted values
*
@@ -902,7 +924,7 @@ EXPORT_SYMBOL_GPL(gpio_export_link);
*
* Returns zero on success, else an error.
*/
-static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
{
struct device *dev = NULL;
int status = -EINVAL;
@@ -933,20 +955,15 @@ unlock:
return status;
}
-
-int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
-}
-EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
+EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low);
/**
- * gpio_unexport - reverse effect of gpio_export()
+ * gpiod_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
*
* This is implicit on gpio_free().
*/
-static void gpiod_unexport(struct gpio_desc *desc)
+void gpiod_unexport(struct gpio_desc *desc)
{
int status = 0;
struct device *dev = NULL;
@@ -979,12 +996,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
}
-
-void gpio_unexport(unsigned gpio)
-{
- gpiod_unexport(gpio_to_desc(gpio));
-}
-EXPORT_SYMBOL_GPL(gpio_unexport);
+EXPORT_SYMBOL_GPL(gpiod_unexport);
static int gpiochip_export(struct gpio_chip *chip)
{
@@ -1091,27 +1103,6 @@ static inline void gpiochip_unexport(struct gpio_chip *chip)
{
}
-static inline int gpiod_export(struct gpio_desc *desc,
- bool direction_may_change)
-{
- return -ENOSYS;
-}
-
-static inline int gpiod_export_link(struct device *dev, const char *name,
- struct gpio_desc *desc)
-{
- return -ENOSYS;
-}
-
-static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
-{
- return -ENOSYS;
-}
-
-static inline void gpiod_unexport(struct gpio_desc *desc)
-{
-}
-
#endif /* CONFIG_GPIO_SYSFS */
/*
@@ -1320,6 +1311,53 @@ EXPORT_SYMBOL_GPL(gpiochip_find);
#ifdef CONFIG_PINCTRL
/**
+ * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
+ * @chip: the gpiochip to add the range for
+ * @pinctrl: the dev_name() of the pin controller to map to
+ * @gpio_offset: the start offset in the current gpio_chip number space
+ * @pin_group: name of the pin group inside the pin controller
+ */
+int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ struct gpio_pin_range *pin_range;
+ int ret;
+
+ pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
+ if (!pin_range) {
+ pr_err("%s: GPIO chip: failed to allocate pin ranges\n",
+ chip->label);
+ return -ENOMEM;
+ }
+
+ /* Use local offset as range ID */
+ pin_range->range.id = gpio_offset;
+ pin_range->range.gc = chip;
+ pin_range->range.name = chip->label;
+ pin_range->range.base = chip->base + gpio_offset;
+ pin_range->pctldev = pctldev;
+
+ ret = pinctrl_get_group_pins(pctldev, pin_group,
+ &pin_range->range.pins,
+ &pin_range->range.npins);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_add_gpio_range(pctldev, &pin_range->range);
+
+ pr_debug("GPIO chip %s: created GPIO range %d->%d ==> %s PINGRP %s\n",
+ chip->label, gpio_offset,
+ gpio_offset + pin_range->range.npins - 1,
+ pinctrl_dev_get_devname(pctldev), pin_group);
+
+ list_add_tail(&pin_range->node, &chip->pin_ranges);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
+
+/**
* gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
* @chip: the gpiochip to add the range for
* @pinctrl_name: the dev_name() of the pin controller to map to
@@ -1623,7 +1661,16 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* rely on gpio_request() having been called beforehand.
*/
-static int gpiod_direction_input(struct gpio_desc *desc)
+/**
+ * gpiod_direction_input - set the GPIO direction to input
+ * @desc: GPIO to set to input
+ *
+ * Set the direction of the passed GPIO to input, such as gpiod_get_value() can
+ * be called safely on it.
+ *
+ * Return 0 in case of success, else an error code.
+ */
+int gpiod_direction_input(struct gpio_desc *desc)
{
unsigned long flags;
struct gpio_chip *chip;
@@ -1637,8 +1684,9 @@ static int gpiod_direction_input(struct gpio_desc *desc)
chip = desc->chip;
if (!chip->get || !chip->direction_input) {
- pr_warn("%s: missing get() or direction_input() operations\n",
- __func__);
+ gpiod_warn(desc,
+ "%s: missing get() or direction_input() operations\n",
+ __func__);
return -EIO;
}
@@ -1658,8 +1706,7 @@ static int gpiod_direction_input(struct gpio_desc *desc)
if (status) {
status = chip->request(chip, offset);
if (status < 0) {
- pr_debug("GPIO-%d: chip request fail, %d\n",
- desc_to_gpio(desc), status);
+ gpiod_dbg(desc, "chip request fail, %d\n", status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1677,18 +1724,22 @@ lose:
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
- pr_debug("%s: gpio-%d status %d\n", __func__,
- desc_to_gpio(desc), status);
+ gpiod_dbg(desc, "%s status %d\n", __func__, status);
return status;
}
+EXPORT_SYMBOL_GPL(gpiod_direction_input);
-int gpio_direction_input(unsigned gpio)
-{
- return gpiod_direction_input(gpio_to_desc(gpio));
-}
-EXPORT_SYMBOL_GPL(gpio_direction_input);
-
-static int gpiod_direction_output(struct gpio_desc *desc, int value)
+/**
+ * gpiod_direction_output - set the GPIO direction to input
+ * @desc: GPIO to set to output
+ * @value: initial output value of the GPIO
+ *
+ * Set the direction of the passed GPIO to output, such as gpiod_set_value() can
+ * be called safely on it. The initial value of the output must be specified.
+ *
+ * Return 0 in case of success, else an error code.
+ */
+int gpiod_direction_output(struct gpio_desc *desc, int value)
{
unsigned long flags;
struct gpio_chip *chip;
@@ -1700,6 +1751,14 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EINVAL;
}
+ /* GPIOs used for IRQs shall not be set as output */
+ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
+ gpiod_err(desc,
+ "%s: tried to set a GPIO tied to an IRQ as output\n",
+ __func__);
+ return -EIO;
+ }
+
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
return gpiod_direction_input(desc);
@@ -1710,8 +1769,9 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
chip = desc->chip;
if (!chip->set || !chip->direction_output) {
- pr_warn("%s: missing set() or direction_output() operations\n",
- __func__);
+ gpiod_warn(desc,
+ "%s: missing set() or direction_output() operations\n",
+ __func__);
return -EIO;
}
@@ -1731,8 +1791,7 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
if (status) {
status = chip->request(chip, offset);
if (status < 0) {
- pr_debug("GPIO-%d: chip request fail, %d\n",
- desc_to_gpio(desc), status);
+ gpiod_dbg(desc, "chip request fail, %d\n", status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1750,26 +1809,20 @@ lose:
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
- pr_debug("%s: gpio-%d status %d\n", __func__,
- desc_to_gpio(desc), status);
+ gpiod_dbg(desc, "%s: gpio status %d\n", __func__, status);
return status;
}
-
-int gpio_direction_output(unsigned gpio, int value)
-{
- return gpiod_direction_output(gpio_to_desc(gpio), value);
-}
-EXPORT_SYMBOL_GPL(gpio_direction_output);
+EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
- * gpio_set_debounce - sets @debounce time for a @gpio
+ * gpiod_set_debounce - sets @debounce time for a @gpio
* @gpio: the gpio to set debounce time
* @debounce: debounce time is microseconds
*
* returns -ENOTSUPP if the controller does not support setting
* debounce.
*/
-static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
unsigned long flags;
struct gpio_chip *chip;
@@ -1783,8 +1836,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
chip = desc->chip;
if (!chip->set || !chip->set_debounce) {
- pr_debug("%s: missing set() or set_debounce() operations\n",
- __func__);
+ gpiod_dbg(desc,
+ "%s: missing set() or set_debounce() operations\n",
+ __func__);
return -ENOTSUPP;
}
@@ -1806,17 +1860,23 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
- pr_debug("%s: gpio-%d status %d\n", __func__,
- desc_to_gpio(desc), status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
+EXPORT_SYMBOL_GPL(gpiod_set_debounce);
-int gpio_set_debounce(unsigned gpio, unsigned debounce)
+/**
+ * gpiod_is_active_low - test whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to test
+ *
+ * Returns 1 if the GPIO is active-low, 0 otherwise.
+ */
+int gpiod_is_active_low(const struct gpio_desc *desc)
{
- return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
+ return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
}
-EXPORT_SYMBOL_GPL(gpio_set_debounce);
+EXPORT_SYMBOL_GPL(gpiod_is_active_low);
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
@@ -1840,42 +1900,68 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
* that the GPIO was actually requested.
*/
-/**
- * __gpio_get_value() - return a gpio's value
- * @gpio: gpio whose value will be returned
- * Context: any
- *
- * This is used directly or indirectly to implement gpio_get_value().
- * It returns the zero or nonzero value provided by the associated
- * gpio_chip.get() method; or zero if no such method is provided.
- */
-static int gpiod_get_value(const struct gpio_desc *desc)
+static int _gpiod_get_raw_value(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
- if (!desc)
- return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
- /* Should be using gpio_get_value_cansleep() */
- WARN_ON(chip->can_sleep);
value = chip->get ? chip->get(chip, offset) : 0;
trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
-int __gpio_get_value(unsigned gpio)
+/**
+ * gpiod_get_raw_value() - return a gpio's raw value
+ * @desc: gpio whose value will be returned
+ *
+ * Return the GPIO's raw value, i.e. the value of the physical line disregarding
+ * its ACTIVE_LOW status.
+ *
+ * This function should be called from contexts where we cannot sleep, and will
+ * complain if the GPIO chip functions potentially sleep.
+ */
+int gpiod_get_raw_value(const struct gpio_desc *desc)
+{
+ if (!desc)
+ return 0;
+ /* Should be using gpio_get_value_cansleep() */
+ WARN_ON(desc->chip->can_sleep);
+ return _gpiod_get_raw_value(desc);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
+
+/**
+ * gpiod_get_value() - return a gpio's value
+ * @desc: gpio whose value will be returned
+ *
+ * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * account.
+ *
+ * This function should be called from contexts where we cannot sleep, and will
+ * complain if the GPIO chip functions potentially sleep.
+ */
+int gpiod_get_value(const struct gpio_desc *desc)
{
- return gpiod_get_value(gpio_to_desc(gpio));
+ int value;
+ if (!desc)
+ return 0;
+ /* Should be using gpio_get_value_cansleep() */
+ WARN_ON(desc->chip->can_sleep);
+
+ value = _gpiod_get_raw_value(desc);
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+
+ return value;
}
-EXPORT_SYMBOL_GPL(__gpio_get_value);
+EXPORT_SYMBOL_GPL(gpiod_get_value);
/*
* _gpio_set_open_drain_value() - Set the open drain gpio's value.
- * @gpio: Gpio whose state need to be set.
- * @chip: Gpio chip.
+ * @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
@@ -1895,14 +1981,14 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
}
trace_gpio_direction(desc_to_gpio(desc), value, err);
if (err < 0)
- pr_err("%s: Error in set_value for open drain gpio%d err %d\n",
- __func__, desc_to_gpio(desc), err);
+ gpiod_err(desc,
+ "%s: Error in set_value for open drain err %d\n",
+ __func__, err);
}
/*
- * _gpio_set_open_source() - Set the open source gpio's value.
- * @gpio: Gpio whose state need to be set.
- * @chip: Gpio chip.
+ * _gpio_set_open_source_value() - Set the open source gpio's value.
+ * @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
@@ -1922,28 +2008,16 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
}
trace_gpio_direction(desc_to_gpio(desc), !value, err);
if (err < 0)
- pr_err("%s: Error in set_value for open source gpio%d err %d\n",
- __func__, desc_to_gpio(desc), err);
+ gpiod_err(desc,
+ "%s: Error in set_value for open source err %d\n",
+ __func__, err);
}
-/**
- * __gpio_set_value() - assign a gpio's value
- * @gpio: gpio whose value will be assigned
- * @value: value to assign
- * Context: any
- *
- * This is used directly or indirectly to implement gpio_set_value().
- * It invokes the associated gpio_chip.set() method.
- */
-static void gpiod_set_value(struct gpio_desc *desc, int value)
+static void _gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
- if (!desc)
- return;
chip = desc->chip;
- /* Should be using gpio_set_value_cansleep() */
- WARN_ON(chip->can_sleep);
trace_gpio_value(desc_to_gpio(desc), 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
_gpio_set_open_drain_value(desc, value);
@@ -1953,44 +2027,71 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
chip->set(chip, gpio_chip_hwgpio(desc), value);
}
-void __gpio_set_value(unsigned gpio, int value)
+/**
+ * gpiod_set_raw_value() - assign a gpio's raw value
+ * @desc: gpio whose value will be assigned
+ * @value: value to assign
+ *
+ * Set the raw value of the GPIO, i.e. the value of its physical line without
+ * regard for its ACTIVE_LOW status.
+ *
+ * This function should be called from contexts where we cannot sleep, and will
+ * complain if the GPIO chip functions potentially sleep.
+ */
+void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
- return gpiod_set_value(gpio_to_desc(gpio), value);
+ if (!desc)
+ return;
+ /* Should be using gpio_set_value_cansleep() */
+ WARN_ON(desc->chip->can_sleep);
+ _gpiod_set_raw_value(desc, value);
}
-EXPORT_SYMBOL_GPL(__gpio_set_value);
+EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
/**
- * __gpio_cansleep() - report whether gpio value access will sleep
- * @gpio: gpio in question
- * Context: any
+ * gpiod_set_value() - assign a gpio's value
+ * @desc: gpio whose value will be assigned
+ * @value: value to assign
+ *
+ * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into
+ * account
*
- * This is used directly or indirectly to implement gpio_cansleep(). It
- * returns nonzero if access reading or writing the GPIO value can sleep.
+ * This function should be called from contexts where we cannot sleep, and will
+ * complain if the GPIO chip functions potentially sleep.
*/
-static int gpiod_cansleep(const struct gpio_desc *desc)
+void gpiod_set_value(struct gpio_desc *desc, int value)
{
if (!desc)
- return 0;
- /* only call this on GPIOs that are valid! */
- return desc->chip->can_sleep;
+ return;
+ /* Should be using gpio_set_value_cansleep() */
+ WARN_ON(desc->chip->can_sleep);
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ _gpiod_set_raw_value(desc, value);
}
+EXPORT_SYMBOL_GPL(gpiod_set_value);
-int __gpio_cansleep(unsigned gpio)
+/**
+ * gpiod_cansleep() - report whether gpio value access may sleep
+ * @desc: gpio to check
+ *
+ */
+int gpiod_cansleep(const struct gpio_desc *desc)
{
- return gpiod_cansleep(gpio_to_desc(gpio));
+ if (!desc)
+ return 0;
+ return desc->chip->can_sleep;
}
-EXPORT_SYMBOL_GPL(__gpio_cansleep);
+EXPORT_SYMBOL_GPL(gpiod_cansleep);
/**
- * __gpio_to_irq() - return the IRQ corresponding to a GPIO
- * @gpio: gpio whose IRQ will be returned (already requested)
- * Context: any
+ * gpiod_to_irq() - return the IRQ corresponding to a GPIO
+ * @desc: gpio whose IRQ will be returned (already requested)
*
- * This is used directly or indirectly to implement gpio_to_irq().
- * It returns the number of the IRQ signaled by this (input) GPIO,
- * or a negative errno.
+ * Return the IRQ corresponding to the passed GPIO, or an error code in case of
+ * error.
*/
-static int gpiod_to_irq(const struct gpio_desc *desc)
+int gpiod_to_irq(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int offset;
@@ -2001,62 +2102,366 @@ static int gpiod_to_irq(const struct gpio_desc *desc)
offset = gpio_chip_hwgpio(desc);
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
}
+EXPORT_SYMBOL_GPL(gpiod_to_irq);
-int __gpio_to_irq(unsigned gpio)
+/**
+ * gpiod_lock_as_irq() - lock a GPIO to be used as IRQ
+ * @gpio: the GPIO line to lock as used for IRQ
+ *
+ * This is used directly by GPIO drivers that want to lock down
+ * a certain GPIO line to be used as IRQs, for example in the
+ * .to_irq() callback of their gpio_chip, or in the .irq_enable()
+ * of its irq_chip implementation if the GPIO is known from that
+ * code.
+ */
+int gpiod_lock_as_irq(struct gpio_desc *desc)
{
- return gpiod_to_irq(gpio_to_desc(gpio));
+ if (!desc)
+ return -EINVAL;
+
+ if (test_bit(FLAG_IS_OUT, &desc->flags)) {
+ gpiod_err(desc,
+ "%s: tried to flag a GPIO set as output for IRQ\n",
+ __func__);
+ return -EIO;
+ }
+
+ set_bit(FLAG_USED_AS_IRQ, &desc->flags);
+ return 0;
}
-EXPORT_SYMBOL_GPL(__gpio_to_irq);
+EXPORT_SYMBOL_GPL(gpiod_lock_as_irq);
+int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ return gpiod_lock_as_irq(gpiochip_offset_to_desc(chip, offset));
+}
+EXPORT_SYMBOL_GPL(gpio_lock_as_irq);
-/* There's no value in making it easy to inline GPIO calls that may sleep.
- * Common examples include ones connected to I2C or SPI chips.
+/**
+ * gpiod_unlock_as_irq() - unlock a GPIO used as IRQ
+ * @gpio: the GPIO line to unlock from IRQ usage
+ *
+ * This is used directly by GPIO drivers that want to indicate
+ * that a certain GPIO is no longer used exclusively for IRQ.
*/
+void gpiod_unlock_as_irq(struct gpio_desc *desc)
+{
+ if (!desc)
+ return;
-static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
+ clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_unlock_as_irq);
+
+void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ return gpiod_unlock_as_irq(gpiochip_offset_to_desc(chip, offset));
+}
+EXPORT_SYMBOL_GPL(gpio_unlock_as_irq);
+
+/**
+ * gpiod_get_raw_value_cansleep() - return a gpio's raw value
+ * @desc: gpio whose value will be returned
+ *
+ * Return the GPIO's raw value, i.e. the value of the physical line disregarding
+ * its ACTIVE_LOW status.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+{
+ might_sleep_if(extra_checks);
+ if (!desc)
+ return 0;
+ return _gpiod_get_raw_value(desc);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
+
+/**
+ * gpiod_get_value_cansleep() - return a gpio's value
+ * @desc: gpio whose value will be returned
+ *
+ * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * account.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
- struct gpio_chip *chip;
int value;
- int offset;
might_sleep_if(extra_checks);
if (!desc)
return 0;
- chip = desc->chip;
- offset = gpio_chip_hwgpio(desc);
- value = chip->get ? chip->get(chip, offset) : 0;
- trace_gpio_value(desc_to_gpio(desc), 1, value);
+
+ value = _gpiod_get_raw_value(desc);
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+
return value;
}
+EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
-int gpio_get_value_cansleep(unsigned gpio)
+/**
+ * gpiod_set_raw_value_cansleep() - assign a gpio's raw value
+ * @desc: gpio whose value will be assigned
+ * @value: value to assign
+ *
+ * Set the raw value of the GPIO, i.e. the value of its physical line without
+ * regard for its ACTIVE_LOW status.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
- return gpiod_get_value_cansleep(gpio_to_desc(gpio));
+ might_sleep_if(extra_checks);
+ if (!desc)
+ return;
+ _gpiod_set_raw_value(desc, value);
}
-EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
+EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
-static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+/**
+ * gpiod_set_value_cansleep() - assign a gpio's value
+ * @desc: gpio whose value will be assigned
+ * @value: value to assign
+ *
+ * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into
+ * account
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
- struct gpio_chip *chip;
-
might_sleep_if(extra_checks);
if (!desc)
return;
- chip = desc->chip;
- trace_gpio_value(desc_to_gpio(desc), 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- _gpio_set_open_drain_value(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- _gpio_set_open_source_value(desc, value);
+
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ _gpiod_set_raw_value(desc, value);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
+
+/**
+ * gpiod_add_table() - register GPIO device consumers
+ * @table: array of consumers to register
+ * @num: number of consumers in table
+ */
+void gpiod_add_table(struct gpiod_lookup *table, size_t size)
+{
+ mutex_lock(&gpio_lookup_lock);
+
+ while (size--) {
+ list_add_tail(&table->list, &gpio_lookup_list);
+ table++;
+ }
+
+ mutex_unlock(&gpio_lookup_lock);
+}
+
+/*
+ * Caller must have a acquired gpio_lookup_lock
+ */
+static struct gpio_chip *find_chip_by_name(const char *name)
+{
+ struct gpio_chip *chip = NULL;
+
+ list_for_each_entry(chip, &gpio_lookup_list, list) {
+ if (chip->label == NULL)
+ continue;
+ if (!strcmp(chip->label, name))
+ break;
+ }
+
+ return chip;
+}
+
+#ifdef CONFIG_OF
+static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
+ unsigned int idx, unsigned long *flags)
+{
+ char prop_name[32]; /* 32 is max size of property name */
+ enum of_gpio_flags of_flags;
+ struct gpio_desc *desc;
+
+ if (con_id)
+ snprintf(prop_name, 32, "%s-gpios", con_id);
else
- chip->set(chip, gpio_chip_hwgpio(desc), value);
+ snprintf(prop_name, 32, "gpios");
+
+ desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
+ &of_flags);
+
+ if (IS_ERR(desc))
+ return desc;
+
+ if (of_flags & OF_GPIO_ACTIVE_LOW)
+ *flags |= GPIOF_ACTIVE_LOW;
+
+ return desc;
+}
+#else
+static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
+ unsigned int idx, unsigned long *flags)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
+ unsigned int idx, unsigned long *flags)
+{
+ struct acpi_gpio_info info;
+ struct gpio_desc *desc;
+
+ desc = acpi_get_gpiod_by_index(dev, idx, &info);
+ if (IS_ERR(desc))
+ return desc;
+
+ if (info.gpioint && info.active_low)
+ *flags |= GPIOF_ACTIVE_LOW;
+
+ return desc;
}
-void gpio_set_value_cansleep(unsigned gpio, int value)
+static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
+ unsigned int idx, unsigned long *flags)
{
- return gpiod_set_value_cansleep(gpio_to_desc(gpio), value);
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct gpio_desc *desc = ERR_PTR(-ENODEV);
+ unsigned int match, best = 0;
+ struct gpiod_lookup *p;
+
+ mutex_lock(&gpio_lookup_lock);
+
+ list_for_each_entry(p, &gpio_lookup_list, list) {
+ match = 0;
+
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+
+ match += 2;
+ }
+
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+
+ match += 1;
+ }
+
+ if (p->idx != idx)
+ continue;
+
+ if (match > best) {
+ struct gpio_chip *chip;
+
+ chip = find_chip_by_name(p->chip_label);
+
+ if (!chip) {
+ dev_warn(dev, "cannot find GPIO chip %s\n",
+ p->chip_label);
+ continue;
+ }
+
+ if (chip->ngpio >= p->chip_hwnum) {
+ dev_warn(dev, "GPIO chip %s has %d GPIOs\n",
+ chip->label, chip->ngpio);
+ continue;
+ }
+
+ desc = gpio_to_desc(chip->base + p->chip_hwnum);
+ *flags = p->flags;
+
+ if (match != 3)
+ best = match;
+ else
+ break;
+ }
+ }
+
+ mutex_unlock(&gpio_lookup_lock);
+
+ return desc;
+}
+
+/**
+ * gpio_get - obtain a GPIO for a given GPIO function
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ *
+ * Return the GPIO descriptor corresponding to the function con_id of device
+ * dev, or an IS_ERR() condition if an error occured.
+ */
+struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id)
+{
+ return gpiod_get_index(dev, con_id, 0);
+}
+EXPORT_SYMBOL_GPL(gpiod_get);
+
+/**
+ * gpiod_get_index - obtain a GPIO from a multi-index GPIO function
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ *
+ * This variant of gpiod_get() allows to access GPIOs other than the first
+ * defined one for functions that define several GPIOs.
+ *
+ * Return a valid GPIO descriptor, or an IS_ERR() condition in case of error.
+ */
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx)
+{
+ struct gpio_desc *desc;
+ int status;
+ unsigned long flags = 0;
+
+ dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
+
+ /* Using device tree? */
+ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) {
+ dev_dbg(dev, "using device tree for GPIO lookup\n");
+ desc = of_find_gpio(dev, con_id, idx, &flags);
+ } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) {
+ dev_dbg(dev, "using ACPI for GPIO lookup\n");
+ desc = acpi_find_gpio(dev, con_id, idx, &flags);
+ } else {
+ dev_dbg(dev, "using lookup tables for GPIO lookup");
+ desc = gpiod_find(dev, con_id, idx, &flags);
+ }
+
+ if (IS_ERR(desc)) {
+ dev_warn(dev, "lookup for GPIO %s failed\n", con_id);
+ return desc;
+ }
+
+ status = gpiod_request(desc, con_id);
+
+ if (status < 0)
+ return ERR_PTR(status);
+
+ if (flags & GPIOF_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(gpiod_get_index);
+
+/**
+ * gpiod_put - dispose of a GPIO descriptor
+ * @desc: GPIO descriptor to dispose of
+ *
+ * No descriptor can be used after gpiod_put() has been called on it.
+ */
+void gpiod_put(struct gpio_desc *desc)
+{
+ gpiod_free(desc);
}
-EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
+EXPORT_SYMBOL_GPL(gpiod_put);
#ifdef CONFIG_DEBUG_FS
@@ -2066,6 +2471,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
unsigned gpio = chip->base;
struct gpio_desc *gdesc = &chip->desc[0];
int is_out;
+ int is_irq;
for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
@@ -2073,12 +2479,14 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
gpiod_get_direction(gdesc);
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
- seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
+ is_irq = test_bit(FLAG_USED_AS_IRQ, &gdesc->flags);
+ seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s",
gpio, gdesc->label,
is_out ? "out" : "in ",
chip->get
? (chip->get(chip, i) ? "hi" : "lo")
- : "? ");
+ : "? ",
+ is_irq ? "IRQ" : " ");
seq_printf(s, "\n");
}
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 955555d6ec8..f8642759116 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,11 +29,17 @@ config DRM_USB
config DRM_KMS_HELPER
tristate
depends on DRM
+ help
+ CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+ bool
+ depends on DRM_KMS_HELPER
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
help
- FB and CRTC helpers for KMS drivers.
+ FBDEV helpers for KMS drivers.
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
config DRM_KMS_CMA_HELPER
bool
select DRM_GEM_CMA_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@ config DRM_RADEON
select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -120,64 +128,7 @@ config DRM_I810
selected, the module will be called i810. AGP support is required
for this driver to work.
-config DRM_I915
- tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
- depends on DRM
- depends on AGP
- depends on AGP_INTEL
- # we need shmfs for the swappable backing store, and in particular
- # the shmem_readpage() which depends upon tmpfs
- select SHMEM
- select TMPFS
- select DRM_KMS_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- # i915 depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_LCD_SUPPORT if ACPI
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
- select INPUT if ACPI
- select THERMAL if ACPI
- select ACPI_VIDEO if ACPI
- select ACPI_BUTTON if ACPI
- help
- Choose this option if you have a system that has "Intel Graphics
- Media Accelerator" or "HD Graphics" integrated graphics,
- including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
- G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
- Core i5, Core i7 as well as Atom CPUs with integrated graphics.
- If M is selected, the module will be called i915. AGP support
- is required for this driver to work. This driver is used by
- the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
- replaces the older i830 module that supported a subset of the
- hardware in older X.org releases.
-
- Note that the older i810/i815 chipsets require the use of the
- i810 driver instead, and the Atom z5xx series has an entirely
- different implementation.
-
-config DRM_I915_KMS
- bool "Enable modesetting on intel by default"
- depends on DRM_I915
- help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain. Note that this causes
- the driver to bind to PCI devices, which precludes loading things
- like intelfb.
-
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
- depends on DRM_I915
- help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
-
- If in doubt, say "N".
+source "drivers/gpu/drm/i915/Kconfig"
config DRM_MGA
tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
+source "drivers/gpu/drm/armada/Kconfig"
+
source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f089adfe70e..cc08b845f96 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 00000000000..40d371521fe
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,24 @@
+config DRM_ARMADA
+ tristate "DRM support for Marvell Armada SoCs"
+ depends on DRM && HAVE_CLK && ARM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ help
+ Support the "LCD" controllers found on the Marvell Armada 510
+ devices. There are two controllers on the device, each controller
+ supports graphics and video overlays.
+
+ This driver provides no built-in acceleration; acceleration is
+ performed by other IP found on the SoC. This driver provides
+ kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+ bool "Support TDA1998X HDMI output"
+ depends on DRM_ARMADA != n
+ depends on I2C && DRM_I2C_NXP_TDA998X = y
+ default y
+ help
+ Support the TDA1998x HDMI output device found on the Solid-Run
+ CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 00000000000..d6f43e06150
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
+armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+ armada_gem.o armada_output.o armada_overlay.o \
+ armada_slave.o
+armada-y += armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 00000000000..59948eff609
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+ priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+ if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+ priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+ return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+ /* Lower the watermark so to eliminate jitter at higher bandwidths */
+ armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that. The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode, uint32_t *sclk)
+{
+ struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+ struct clk *clk = priv->extclk[0];
+ int ret;
+
+ if (dcrtc->num == 1)
+ return -EINVAL;
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (dcrtc->clk != clk) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+ dcrtc->clk = clk;
+ }
+
+ if (sclk) {
+ uint32_t rate, ref, div;
+
+ rate = mode->clock * 1000;
+ ref = clk_round_rate(clk, rate);
+ div = DIV_ROUND_UP(ref, rate);
+ if (div < 1)
+ div = 1;
+
+ clk_set_rate(clk, ref);
+ *sclk = div | SCLK_510_EXTCLK1;
+ }
+
+ return 0;
+}
+
+const struct armada_variant armada510_ops = {
+ .has_spu_adv_reg = true,
+ .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ .init = armada510_init,
+ .crtc_init = armada510_crtc_init,
+ .crtc_compute_clock = armada510_crtc_compute_clock,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 00000000000..d8e398275ca
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1098 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+ struct drm_pending_vblank_event *event;
+ struct armada_regs regs[4];
+ struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+ CSC_AUTO = 0,
+ CSC_YUV_CCIR601 = 1,
+ CSC_YUV_CCIR709 = 2,
+ CSC_RGB_COMPUTER = 1,
+ CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing. Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
+ * 1920 2448 2492 2640 1080 1084 1094 1125
+ * Which get translated to:
+ * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
+ * 1920 2448 2492 2640 540 542 547 562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC. Total clocks per
+ * line: 2640. The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN: 560 561 562 563 567 568 569
+ * DE: ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN: 1123 1124 1125 1 5 6 7
+ * DE: ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ * 23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ * vtotal = mode->crtc_vtotal + 1;
+ * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ * vtotal = mode->crtc_vtotal;
+ * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ * vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+ while (regs->offset != ~0) {
+ void __iomem *reg = dcrtc->base + regs->offset;
+ uint32_t val;
+
+ val = regs->mask;
+ if (val != 0)
+ val &= readl_relaxed(reg);
+ writel_relaxed(val | regs->val, reg);
+ ++regs;
+ }
+}
+
+#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+ uint32_t dumb_ctrl;
+
+ dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+ if (!dpms_blanked(dcrtc->dpms))
+ dumb_ctrl |= CFG_DUMB_ENA;
+
+ /*
+ * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+ * be using SPI or GPIO. If we set this to DUMB_BLANK, we will
+ * force LCD_D[23:0] to output blank color, overriding the GPIO or
+ * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
+ */
+ if (dpms_blanked(dcrtc->dpms) &&
+ (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ dumb_ctrl &= ~DUMB_MASK;
+ dumb_ctrl |= DUMB_BLANK;
+ }
+
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+ dumb_ctrl |= CFG_INV_CSYNC;
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+ dumb_ctrl |= CFG_INV_HSYNC;
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+ dumb_ctrl |= CFG_INV_VSYNC;
+
+ if (dcrtc->dumb_ctrl != dumb_ctrl) {
+ dcrtc->dumb_ctrl = dumb_ctrl;
+ writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+ int x, int y, struct armada_regs *regs, bool interlaced)
+{
+ struct armada_gem_object *obj = drm_fb_obj(fb);
+ unsigned pitch = fb->pitches[0];
+ unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+ uint32_t addr_odd, addr_even;
+ unsigned i = 0;
+
+ DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+ pitch, x, y, fb->bits_per_pixel);
+
+ addr_odd = addr_even = obj->dev_addr + offset;
+
+ if (interlaced) {
+ addr_even += pitch;
+ pitch *= 2;
+ }
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+ struct armada_frame_work *work)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+ unsigned long flags;
+ int ret;
+
+ ret = drm_vblank_get(dev, dcrtc->num);
+ if (ret) {
+ DRM_ERROR("failed to acquire vblank counter\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!dcrtc->frame_work)
+ dcrtc->frame_work = work;
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (ret)
+ drm_vblank_put(dev, dcrtc->num);
+
+ return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+ struct armada_frame_work *work = dcrtc->frame_work;
+
+ dcrtc->frame_work = NULL;
+
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+ if (work->event)
+ drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+ drm_vblank_put(dev, dcrtc->num);
+
+ /* Finally, queue the process-half of the cleanup. */
+ __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+ kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+ struct drm_framebuffer *fb, bool force)
+{
+ struct armada_frame_work *work;
+
+ if (!fb)
+ return;
+
+ if (force) {
+ /* Display is disabled, so just drop the old fb */
+ drm_framebuffer_unreference(fb);
+ return;
+ }
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (work) {
+ int i = 0;
+ work->event = NULL;
+ work->old_fb = fb;
+ armada_reg_queue_end(work->regs, i);
+
+ if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+ return;
+
+ kfree(work);
+ }
+
+ /*
+ * Oops - just drop the reference immediately and hope for
+ * the best. The worst that will happen is the buffer gets
+ * reused before it has finished being displayed.
+ */
+ drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+
+ /*
+ * Tell the DRM core that vblank IRQs aren't going to happen for
+ * a while. This cleans up any pending vblank events for us.
+ */
+ drm_vblank_off(dev, dcrtc->num);
+
+ /* Handle any pending flip event. */
+ spin_lock_irq(&dev->event_lock);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (dpms_blanked(dpms))
+ armada_drm_vblank_off(dcrtc);
+ }
+}
+
+/*
+ * Prepare for a mode set. Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_plane *plane;
+
+ /*
+ * If we have an overlay plane associated with this CRTC, disable
+ * it before the modeset to avoid its coordinates being outside
+ * the new mode parameters. DRM doesn't provide help with this.
+ */
+ plane = dcrtc->plane;
+ if (plane) {
+ struct drm_framebuffer *fb = plane->fb;
+
+ plane->funcs->disable_plane(plane);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ drm_framebuffer_unreference(fb);
+ }
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+ dcrtc->dpms = DRM_MODE_DPMS_ON;
+ armada_drm_crtc_update(dcrtc);
+ }
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ int ret;
+
+ /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+ if (!priv->variant->has_spu_adv_reg &&
+ adj->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ /* Check whether the display mode is possible */
+ ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+ struct armada_vbl_event *e, *n;
+ void __iomem *base = dcrtc->base;
+
+ if (stat & DMA_FF_UNDERFLOW)
+ DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+ if (stat & GRA_FF_UNDERFLOW)
+ DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+ if (stat & VSYNC_IRQ)
+ drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+ spin_lock(&dcrtc->irq_lock);
+
+ list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+ list_del_init(&e->node);
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ e->fn(dcrtc, e->data);
+ }
+
+ if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+ int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+ uint32_t val;
+
+ writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+ writel_relaxed(dcrtc->v[i].spu_v_h_total,
+ base + LCD_SPUT_V_H_TOTAL);
+
+ val = readl_relaxed(base + LCD_SPU_ADV_REG);
+ val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+ val |= dcrtc->v[i].spu_adv_reg;
+ writel_relaxed(val, base + LCD_SPU_ADV_REG);
+ }
+
+ if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ }
+
+ spin_unlock(&dcrtc->irq_lock);
+
+ if (stat & GRA_FRAME_IRQ) {
+ struct drm_device *dev = dcrtc->crtc.dev;
+
+ spin_lock(&dev->event_lock);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock(&dev->event_lock);
+
+ wake_up(&dcrtc->frame_wait);
+ }
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+ if (dcrtc->irq_ena & mask) {
+ dcrtc->irq_ena &= ~mask;
+ writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ }
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+ if ((dcrtc->irq_ena & mask) != mask) {
+ dcrtc->irq_ena |= mask;
+ writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ }
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+ struct drm_display_mode *adj = &dcrtc->crtc.mode;
+ uint32_t val = 0;
+
+ if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+ val |= CFG_CSC_YUV_CCIR709;
+ if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+ val |= CFG_CSC_RGB_STUDIO;
+
+ /*
+ * In auto mode, set the colorimetry, based upon the HDMI spec.
+ * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+ * ITU601. It may be more appropriate to set this depending on
+ * the source - but what if the graphic frame is YUV and the
+ * video frame is RGB?
+ */
+ if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+ !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+ (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+ if (dcrtc->csc_yuv_mode == CSC_AUTO)
+ val |= CFG_CSC_YUV_CCIR709;
+ }
+
+ /*
+ * We assume we're connected to a TV-like device, so the YUV->RGB
+ * conversion should produce a limited range. We should set this
+ * depending on the connectors attached to this CRTC, and what
+ * kind of device they report being connected.
+ */
+ if (dcrtc->csc_rgb_mode == CSC_AUTO)
+ val |= CFG_CSC_RGB_STUDIO;
+
+ return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode, struct drm_display_mode *adj,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[17];
+ uint32_t lm, rm, tm, bm, val, sclk;
+ unsigned long flags;
+ unsigned i;
+ bool interlaced;
+
+ drm_framebuffer_reference(crtc->fb);
+
+ interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+ i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+
+ rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+ lm = adj->crtc_htotal - adj->crtc_hsync_end;
+ bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+ tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+ DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+ adj->crtc_hdisplay,
+ adj->crtc_hsync_start,
+ adj->crtc_hsync_end,
+ adj->crtc_htotal, lm, rm);
+ DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+ adj->crtc_vdisplay,
+ adj->crtc_vsync_start,
+ adj->crtc_vsync_end,
+ adj->crtc_vtotal, tm, bm);
+
+ /* Wait for pending flips to complete */
+ wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+ drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+ crtc->mode = *adj;
+
+ val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+ if (val != dcrtc->dumb_ctrl) {
+ dcrtc->dumb_ctrl = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+ }
+
+ /* Now compute the divider for real */
+ priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+ /* Ensure graphic fifo is enabled */
+ armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+ if (interlaced ^ dcrtc->interlaced) {
+ if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+ drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ else
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ dcrtc->interlaced = interlaced;
+ }
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+ /* Even interlaced/progressive frame */
+ dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+ adj->crtc_htotal;
+ dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+ val = adj->crtc_hsync_start;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+ priv->variant->spu_adv_reg;
+
+ if (interlaced) {
+ /* Odd interlaced frame */
+ dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+ (1 << 16);
+ dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+ val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+ priv->variant->spu_adv_reg;
+ } else {
+ dcrtc->v[0] = dcrtc->v[1];
+ }
+
+ val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+ armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+ armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+ armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+ armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+ LCD_SPUT_V_H_TOTAL);
+
+ if (priv->variant->has_spu_adv_reg) {
+ armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+ ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+ ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+ }
+
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ if (interlaced)
+ val |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+
+ val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+ armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+ val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ armada_reg_queue_end(regs, i);
+
+ armada_drm_crtc_update_regs(dcrtc, regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+ armada_drm_crtc_update(dcrtc);
+
+ drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+ armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+ return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[4];
+ unsigned i;
+
+ i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+ dcrtc->interlaced);
+ armada_reg_queue_end(regs, i);
+
+ /* Wait for pending flips to complete */
+ wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+ /* Take a reference to the new fb as we're using it */
+ drm_framebuffer_reference(crtc->fb);
+
+ /* Update the base in the CRTC */
+ armada_drm_crtc_update_regs(dcrtc, regs);
+
+ /* Drop our previously held reference */
+ armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+ return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+
+ /* Power down most RAMs and FIFOs */
+ writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+ .dpms = armada_drm_crtc_dpms,
+ .prepare = armada_drm_crtc_prepare,
+ .commit = armada_drm_crtc_commit,
+ .mode_fixup = armada_drm_crtc_mode_fixup,
+ .mode_set = armada_drm_crtc_mode_set,
+ .mode_set_base = armada_drm_crtc_mode_set_base,
+ .load_lut = armada_drm_crtc_load_lut,
+ .disable = armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+ unsigned stride, unsigned width, unsigned height)
+{
+ uint32_t addr;
+ unsigned y;
+
+ addr = SRAM_HWC32_RAM1;
+ for (y = 0; y < height; y++) {
+ uint32_t *p = &pix[y * stride];
+ unsigned x;
+
+ for (x = 0; x < width; x++, p++) {
+ uint32_t val = *p;
+
+ val = (val & 0xff00ff00) |
+ (val & 0x000000ff) << 16 |
+ (val & 0x00ff0000) >> 16;
+
+ writel_relaxed(val,
+ base + LCD_SPU_SRAM_WRDAT);
+ writel_relaxed(addr | SRAM_WRITE,
+ base + LCD_SPU_SRAM_CTRL);
+ addr += 1;
+ if ((addr & 0x00ff) == 0)
+ addr += 0xf00;
+ if ((addr & 0x30ff) == 0)
+ addr = SRAM_HWC32_RAM2;
+ }
+ }
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+ unsigned addr;
+
+ for (addr = 0; addr < 256; addr++) {
+ /* write the default value */
+ writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+ writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+ base + LCD_SPU_SRAM_CTRL);
+ }
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+ uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+ uint32_t yoff, yscr, h = dcrtc->cursor_h;
+ uint32_t para1;
+
+ /*
+ * Calculate the visible width and height of the cursor,
+ * screen position, and the position in the cursor bitmap.
+ */
+ if (dcrtc->cursor_x < 0) {
+ xoff = -dcrtc->cursor_x;
+ xscr = 0;
+ w -= min(xoff, w);
+ } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+ xoff = 0;
+ xscr = dcrtc->cursor_x;
+ w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+ } else {
+ xoff = 0;
+ xscr = dcrtc->cursor_x;
+ }
+
+ if (dcrtc->cursor_y < 0) {
+ yoff = -dcrtc->cursor_y;
+ yscr = 0;
+ h -= min(yoff, h);
+ } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+ yoff = 0;
+ yscr = dcrtc->cursor_y;
+ h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+ } else {
+ yoff = 0;
+ yscr = dcrtc->cursor_y;
+ }
+
+ /* On interlaced modes, the vertical cursor size must be halved */
+ s = dcrtc->cursor_w;
+ if (dcrtc->interlaced) {
+ s *= 2;
+ yscr /= 2;
+ h /= 2;
+ }
+
+ if (!dcrtc->cursor_obj || !h || !w) {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ dcrtc->cursor_update = false;
+ armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ return 0;
+ }
+
+ para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+ armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+ /*
+ * Initialize the transparency if the SRAM was powered down.
+ * We must also reload the cursor data as well.
+ */
+ if (!(para1 & CFG_CSB_256x32)) {
+ armada_drm_crtc_cursor_tran(dcrtc->base);
+ reload = true;
+ }
+
+ if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ dcrtc->cursor_update = false;
+ armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ reload = true;
+ }
+ if (reload) {
+ struct armada_gem_object *obj = dcrtc->cursor_obj;
+ uint32_t *pix;
+ /* Set the top-left corner of the cursor image */
+ pix = obj->addr;
+ pix += yoff * s + xoff;
+ armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+ }
+
+ /* Reload the cursor position, size and enable in the IRQ handler */
+ spin_lock_irq(&dcrtc->irq_lock);
+ dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+ dcrtc->cursor_hw_sz = h << 16 | w;
+ dcrtc->cursor_update = true;
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+
+ return 0;
+}
+
+static void cursor_update(void *data)
+{
+ armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_gem_object *obj = NULL;
+ int ret;
+
+ /* If no cursor support, replicate drm's return value */
+ if (!priv->variant->has_spu_adv_reg)
+ return -ENXIO;
+
+ if (handle && w > 0 && h > 0) {
+ /* maximum size is 64x32 or 32x64 */
+ if (w > 64 || h > 64 || (w > 32 && h > 32))
+ return -ENOMEM;
+
+ obj = armada_gem_object_lookup(dev, file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ /* Must be a kernel-mapped object */
+ if (!obj->addr) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -EINVAL;
+ }
+
+ if (obj->obj.size < w * h * 4) {
+ DRM_ERROR("buffer is too small\n");
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (dcrtc->cursor_obj) {
+ dcrtc->cursor_obj->update = NULL;
+ dcrtc->cursor_obj->update_data = NULL;
+ drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+ }
+ dcrtc->cursor_obj = obj;
+ dcrtc->cursor_w = w;
+ dcrtc->cursor_h = h;
+ ret = armada_drm_crtc_cursor_update(dcrtc, true);
+ if (obj) {
+ obj->update_data = dcrtc;
+ obj->update = cursor_update;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+ int ret;
+
+ /* If no cursor support, replicate drm's return value */
+ if (!priv->variant->has_spu_adv_reg)
+ return -EFAULT;
+
+ mutex_lock(&dev->struct_mutex);
+ dcrtc->cursor_x = x;
+ dcrtc->cursor_y = y;
+ ret = armada_drm_crtc_cursor_update(dcrtc, false);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+
+ if (dcrtc->cursor_obj)
+ drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+ priv->dcrtc[dcrtc->num] = NULL;
+ drm_crtc_cleanup(&dcrtc->crtc);
+
+ if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+
+ kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_frame_work *work;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ unsigned i;
+ int ret;
+
+ /* We don't support changing the pixel format */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->event = event;
+ work->old_fb = dcrtc->crtc.fb;
+
+ i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+ dcrtc->interlaced);
+ armada_reg_queue_end(work->regs, i);
+
+ /*
+ * Hold the old framebuffer for the work - DRM appears to drop our
+ * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+ */
+ drm_framebuffer_reference(work->old_fb);
+
+ ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+ if (ret) {
+ /*
+ * Undo our reference above; DRM does not drop the reference
+ * to this object on error, so that's okay.
+ */
+ drm_framebuffer_unreference(work->old_fb);
+ kfree(work);
+ return ret;
+ }
+
+ /*
+ * Don't take a reference on the new framebuffer;
+ * drm_mode_page_flip_ioctl() has already grabbed a reference and
+ * will _not_ drop that reference on successful return from this
+ * function. Simply mark this new framebuffer as the current one.
+ */
+ dcrtc->crtc.fb = fb;
+
+ /*
+ * Finally, if the display is blanked, we won't receive an
+ * interrupt, so complete it now.
+ */
+ if (dpms_blanked(dcrtc->dpms)) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ bool update_csc = false;
+
+ if (property == priv->csc_yuv_prop) {
+ dcrtc->csc_yuv_mode = val;
+ update_csc = true;
+ } else if (property == priv->csc_rgb_prop) {
+ dcrtc->csc_rgb_mode = val;
+ update_csc = true;
+ }
+
+ if (update_csc) {
+ uint32_t val;
+
+ val = dcrtc->spu_iopad_ctrl |
+ armada_drm_crtc_calculate_csc(dcrtc);
+ writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+ }
+
+ return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+ .cursor_set = armada_drm_crtc_cursor_set,
+ .cursor_move = armada_drm_crtc_cursor_move,
+ .destroy = armada_drm_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = armada_drm_crtc_page_flip,
+ .set_property = armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+ { CSC_AUTO, "Auto" },
+ { CSC_YUV_CCIR601, "CCIR601" },
+ { CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+ { CSC_AUTO, "Auto" },
+ { CSC_RGB_COMPUTER, "Computer system" },
+ { CSC_RGB_STUDIO, "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->csc_yuv_prop)
+ return 0;
+
+ priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+ "CSC_YUV", armada_drm_csc_yuv_enum_list,
+ ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+ priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+ "CSC_RGB", armada_drm_csc_rgb_enum_list,
+ ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+ if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+ struct resource *res)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc;
+ void __iomem *base;
+ int ret;
+
+ ret = armada_drm_crtc_create_properties(dev);
+ if (ret)
+ return ret;
+
+ base = devm_request_and_ioremap(dev->dev, res);
+ if (!base) {
+ DRM_ERROR("failed to ioremap register\n");
+ return -ENOMEM;
+ }
+
+ dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+ if (!dcrtc) {
+ DRM_ERROR("failed to allocate Armada crtc\n");
+ return -ENOMEM;
+ }
+
+ dcrtc->base = base;
+ dcrtc->num = num;
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ dcrtc->csc_yuv_mode = CSC_AUTO;
+ dcrtc->csc_rgb_mode = CSC_AUTO;
+ dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+ dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+ spin_lock_init(&dcrtc->irq_lock);
+ dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+ INIT_LIST_HEAD(&dcrtc->vbl_list);
+ init_waitqueue_head(&dcrtc->frame_wait);
+
+ /* Initialize some registers which we don't otherwise set */
+ writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+ writel_relaxed(dcrtc->spu_iopad_ctrl,
+ dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+ writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+ writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+ if (priv->variant->crtc_init) {
+ ret = priv->variant->crtc_init(dcrtc);
+ if (ret) {
+ kfree(dcrtc);
+ return ret;
+ }
+ }
+
+ /* Ensure AXI pipeline is enabled */
+ armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+ priv->dcrtc[dcrtc->num] = dcrtc;
+
+ drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+ drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+ drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+ dcrtc->csc_yuv_mode);
+ drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+ dcrtc->csc_rgb_mode);
+
+ return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 00000000000..9c10a07e749
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
+ do { \
+ struct armada_regs *__reg = _r; \
+ __reg[_i].offset = _o; \
+ __reg[_i].mask = ~(_m); \
+ __reg[_i].val = _v; \
+ _i++; \
+ } while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o) \
+ armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i) \
+ armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+ struct drm_crtc crtc;
+ unsigned num;
+ void __iomem *base;
+ struct clk *clk;
+ struct {
+ uint32_t spu_v_h_total;
+ uint32_t spu_v_porch;
+ uint32_t spu_adv_reg;
+ } v[2];
+ bool interlaced;
+ bool cursor_update;
+ uint8_t csc_yuv_mode;
+ uint8_t csc_rgb_mode;
+
+ struct drm_plane *plane;
+
+ struct armada_gem_object *cursor_obj;
+ int cursor_x;
+ int cursor_y;
+ uint32_t cursor_hw_pos;
+ uint32_t cursor_hw_sz;
+ uint32_t cursor_w;
+ uint32_t cursor_h;
+
+ int dpms;
+ uint32_t cfg_dumb_ctrl;
+ uint32_t dumb_ctrl;
+ uint32_t spu_iopad_ctrl;
+
+ wait_queue_head_t frame_wait;
+ struct armada_frame_work *frame_work;
+
+ spinlock_t irq_lock;
+ uint32_t irq_ena;
+ struct list_head vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 00000000000..471e45627f1
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct armada_private *priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mm_dump_table(m, &priv->linear);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct armada_private *priv = dev->dev_private;
+ int n, i;
+
+ if (priv) {
+ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ struct armada_crtc *dcrtc = priv->dcrtc[n];
+ if (!dcrtc)
+ continue;
+
+ for (i = 0x84; i <= 0x1c4; i += 4) {
+ uint32_t v = readl_relaxed(dcrtc->base + i);
+ seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+ .owner = THIS_MODULE,
+ .open = armada_debugfs_reg_r_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct drm_device *dev = file->private_data;
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+ char buf[32], *p;
+ uint32_t reg, val;
+ int ret;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ reg = simple_strtoul(buf, &p, 16);
+ if (!isspace(*p))
+ return -EINVAL;
+ val = simple_strtoul(p + 1, NULL, 16);
+
+ if (reg >= 0x84 && reg <= 0x1c4)
+ writel(val, dcrtc->base + reg);
+
+ return len;
+}
+
+static const struct file_operations fops_reg_w = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = armada_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+ { "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+ const char *name, umode_t mode, const struct file_operations *fops)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+ return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+ int ret;
+
+ ret = drm_debugfs_create_files(armada_debugfs_list,
+ ARMADA_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
+ ret = armada_debugfs_create(minor->debugfs_root, minor,
+ "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+ if (ret)
+ goto err_1;
+
+ ret = armada_debugfs_create(minor->debugfs_root, minor,
+ "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+ if (ret)
+ goto err_2;
+ return ret;
+
+ err_2:
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+ drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+ minor);
+ return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 00000000000..eef09ec9a5f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+ uint32_t ov, v;
+
+ ov = v = readl_relaxed(ptr);
+ v = (v & ~mask) | val;
+ if (ov != v)
+ writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+ uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+ /* 88AP510 spec recommends pitch be a multiple of 128 */
+ return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+ struct list_head node;
+ void *data;
+ void (*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+ struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+ struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+ struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do { \
+ struct armada_vbl_event *__e = _e; \
+ INIT_LIST_HEAD(&__e->node); \
+ __e->data = _d; \
+ __e->fn = _f; \
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+ bool has_spu_adv_reg;
+ uint32_t spu_adv_reg;
+ int (*init)(struct armada_private *, struct device *);
+ int (*crtc_init)(struct armada_crtc *);
+ int (*crtc_compute_clock)(struct armada_crtc *,
+ const struct drm_display_mode *,
+ uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+ const struct armada_variant *variant;
+ struct work_struct fb_unref_work;
+ DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+ struct drm_fb_helper *fbdev;
+ struct armada_crtc *dcrtc[2];
+ struct drm_mm linear;
+ struct clk *extclk[2];
+ struct drm_property *csc_yuv_prop;
+ struct drm_property *csc_rgb_prop;
+ struct drm_property *colorkey_prop;
+ struct drm_property *colorkey_min_prop;
+ struct drm_property *colorkey_max_prop;
+ struct drm_property *colorkey_val_prop;
+ struct drm_property *colorkey_alpha_prop;
+ struct drm_property *colorkey_mode_prop;
+ struct drm_property *brightness_prop;
+ struct drm_property *contrast_prop;
+ struct drm_property *saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+ struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+ struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 00000000000..4f2b2835491
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+ /* With 0x24, there is no translation between vp_out and int_vp
+ FB LCD out Pins VIP Int Vp
+ R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
+ G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
+ B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
+ */
+ .swap_a = 2,
+ .swap_b = 3,
+ .swap_c = 4,
+ .swap_d = 5,
+ .swap_e = 0,
+ .swap_f = 1,
+ .audio_cfg = BIT(2),
+ .audio_frame[1] = 1,
+ .audio_format = AFMT_SPDIF,
+ .audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+ .i2c_adapter_id = 0,
+ .crtcs = 1 << 0, /* Only LCD0 at the moment */
+ .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+ .interlace_allowed = true,
+ .info = {
+ .type = "tda998x",
+ .addr = 0x70,
+ .platform_data = &params,
+ },
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+ struct armada_private *priv =
+ container_of(work, struct armada_private, fb_unref_work);
+ struct drm_framebuffer *fb;
+
+ while (kfifo_get(&priv->fb_unref, &fb))
+ drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ /*
+ * Yes, we really must jump through these hoops just to store a
+ * _pointer_ to something into the kfifo. This is utterly insane
+ * and idiotic, because it kfifo requires the _data_ pointed to by
+ * the pointer const, not the pointer itself. Not only that, but
+ * you have to pass a pointer _to_ the pointer you want stored.
+ */
+ const struct drm_framebuffer *silly_api_alert = fb;
+ WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ __armada_drm_queue_unref_work(dev, fb);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ const struct platform_device_id *id;
+ struct armada_private *priv;
+ struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+ struct resource *mem = NULL;
+ int ret, n, i;
+
+ memset(res, 0, sizeof(res));
+
+ for (n = i = 0; ; n++) {
+ struct resource *r = platform_get_resource(dev->platformdev,
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else if (i < ARRAY_SIZE(priv->dcrtc))
+ res[i++] = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!res[0] || !mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev->dev, mem->start,
+ resource_size(mem), "armada-drm"))
+ return -EBUSY;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate private\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ /* Get the implementation specific driver data. */
+ id = platform_get_device_id(dev->platformdev);
+ if (!id)
+ return -ENXIO;
+
+ priv->variant = (struct armada_variant *)id->driver_data;
+
+ ret = priv->variant->init(priv, dev->dev);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = 320;
+ dev->mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+ /* Create all LCD controllers */
+ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ if (!res[n])
+ break;
+
+ ret = armada_drm_crtc_create(dev, n, res[n]);
+ if (ret)
+ goto err_kms;
+ }
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+ ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+ if (ret)
+ goto err_kms;
+#endif
+
+ ret = drm_vblank_init(dev, n);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto err_kms;
+
+ dev->vblank_disable_allowed = 1;
+
+ ret = armada_fbdev_init(dev);
+ if (ret)
+ goto err_irq;
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+ err_irq:
+ drm_irq_uninstall(dev);
+ err_kms:
+ drm_mode_config_cleanup(dev);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_kms_helper_poll_fini(dev);
+ armada_fbdev_fini(dev);
+ drm_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ if (list_empty(&evt->node)) {
+ list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+ drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ }
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ if (!list_empty(&evt->node)) {
+ list_del_init(&evt->node);
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ }
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_vbl_event_remove(dcrtc, evt);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct armada_private *priv = dev->dev_private;
+ armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+ return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct armada_private *priv = dev->dev_private;
+ armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+ uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+ irqreturn_t handled = IRQ_NONE;
+
+ /*
+ * This is rediculous - rather than writing bits to clear, we
+ * have to set the actual status register value. This is racy.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /* Mask out those interrupts we haven't enabled */
+ v = stat & dcrtc->irq_ena;
+
+ if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+ armada_drm_crtc_irq(dcrtc, stat);
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+ spin_lock_irq(&dev->vbl_lock);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ spin_unlock_irq(&dev->vbl_lock);
+
+ return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+ DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+ DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+ DRM_UNLOCKED),
+};
+
+static const struct file_operations armada_drm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = drm_read,
+ .poll = drm_poll,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .open = drm_open,
+ .release = drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+ .load = armada_drm_load,
+ .open = NULL,
+ .preclose = NULL,
+ .postclose = NULL,
+ .lastclose = NULL,
+ .unload = armada_drm_unload,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = armada_drm_enable_vblank,
+ .disable_vblank = armada_drm_disable_vblank,
+ .irq_handler = armada_drm_irq_handler,
+ .irq_postinstall = armada_drm_irq_postinstall,
+ .irq_uninstall = armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = armada_drm_debugfs_init,
+ .debugfs_cleanup = armada_drm_debugfs_cleanup,
+#endif
+ .gem_free_object = armada_gem_free_object,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = armada_gem_prime_export,
+ .gem_prime_import = armada_gem_prime_import,
+ .dumb_create = armada_gem_dumb_create,
+ .dumb_map_offset = armada_gem_dumb_map_offset,
+ .dumb_destroy = armada_gem_dumb_destroy,
+ .gem_vm_ops = &armada_gem_vm_ops,
+ .major = 1,
+ .minor = 0,
+ .name = "armada-drm",
+ .desc = "Armada SoC DRM",
+ .date = "20120730",
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ .ioctls = armada_ioctls,
+ .fops = &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&armada_drm_driver, pdev);
+ return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+ {
+ .name = "armada-drm",
+ .driver_data = (unsigned long)&armada510_ops,
+ }, {
+ .name = "armada-510-drm",
+ .driver_data = (unsigned long)&armada510_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+ .probe = armada_drm_probe,
+ .remove = armada_drm_remove,
+ .driver = {
+ .name = "armada-drm",
+ .owner = THIS_MODULE,
+ },
+ .id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+ armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+ return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+ platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 00000000000..1c90969def3
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+ drm_framebuffer_cleanup(&dfb->fb);
+ drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+ kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *dfile, unsigned int *handle)
+{
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+ return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+ .destroy = armada_fb_destroy,
+ .create_handle = armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+ struct armada_framebuffer *dfb;
+ uint8_t format, config;
+ int ret;
+
+ switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod) \
+ case DRM_FORMAT_##drm: \
+ format = CFG_##fmt; \
+ config = mod; \
+ break
+ FMT(RGB565, 565, CFG_SWAPRB);
+ FMT(BGR565, 565, 0);
+ FMT(ARGB1555, 1555, CFG_SWAPRB);
+ FMT(ABGR1555, 1555, 0);
+ FMT(RGB888, 888PACK, CFG_SWAPRB);
+ FMT(BGR888, 888PACK, 0);
+ FMT(XRGB8888, X888, CFG_SWAPRB);
+ FMT(XBGR8888, X888, 0);
+ FMT(ARGB8888, 8888, CFG_SWAPRB);
+ FMT(ABGR8888, 8888, 0);
+ FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+ FMT(UYVY, 422PACK, CFG_YUV2RGB);
+ FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
+ FMT(YUV422, 422, CFG_YUV2RGB);
+ FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(YUV420, 420, CFG_YUV2RGB);
+ FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(C8, PSEUDO8, 0);
+#undef FMT
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+ if (!dfb) {
+ DRM_ERROR("failed to allocate Armada fb object\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dfb->fmt = format;
+ dfb->mod = config;
+ dfb->obj = obj;
+
+ drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+ ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+ if (ret) {
+ kfree(dfb);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Take a reference on our object as we're successful - the
+ * caller already holds a reference, which keeps us safe for
+ * the above call, but the caller will drop their reference
+ * to it. Hence we need to take our own reference.
+ */
+ drm_gem_object_reference(&obj->obj);
+
+ return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+ struct armada_gem_object *obj;
+ struct armada_framebuffer *dfb;
+ int ret;
+
+ DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+ mode->width, mode->height, mode->pixel_format,
+ mode->flags, mode->pitches[0], mode->pitches[1],
+ mode->pitches[2]);
+
+ /* We can only handle a single plane at the moment */
+ if (drm_format_num_planes(mode->pixel_format) > 1 &&
+ (mode->handles[0] != mode->handles[1] ||
+ mode->handles[0] != mode->handles[2])) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+ if (!obj) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (obj->obj.import_attach && !obj->sgt) {
+ ret = armada_gem_map_import(obj);
+ if (ret)
+ goto err_unref;
+ }
+
+ /* Framebuffer objects must have a valid device address for scanout */
+ if (obj->dev_addr == DMA_ERROR_CODE) {
+ ret = -EINVAL;
+ goto err_unref;
+ }
+
+ dfb = armada_framebuffer_create(dev, mode, obj);
+ if (IS_ERR(dfb)) {
+ ret = PTR_ERR(dfb);
+ goto err;
+ }
+
+ drm_gem_object_unreference_unlocked(&obj->obj);
+
+ return &dfb->fb;
+
+ err_unref:
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+ DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh = priv->fbdev;
+
+ if (fbh)
+ drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = armada_output_poll_changed,
+};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 00000000000..ce3f12ebfc5
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+ struct drm_framebuffer fb;
+ struct armada_gem_object *obj;
+ uint8_t fmt;
+ uint8_t mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+ container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 00000000000..dd5ea77dac9
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fbh->dev;
+ struct drm_mode_fb_cmd2 mode;
+ struct armada_framebuffer *dfb;
+ struct armada_gem_object *obj;
+ struct fb_info *info;
+ int size, ret;
+ void *ptr;
+
+ memset(&mode, 0, sizeof(mode));
+ mode.width = sizes->surface_width;
+ mode.height = sizes->surface_height;
+ mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+ mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode.pitches[0] * mode.height;
+ obj = armada_gem_alloc_private_object(dev, size);
+ if (!obj) {
+ DRM_ERROR("failed to allocate fb memory\n");
+ return -ENOMEM;
+ }
+
+ ret = armada_gem_linear_back(dev, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return ret;
+ }
+
+ ptr = armada_gem_map_object(dev, obj);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -ENOMEM;
+ }
+
+ dfb = armada_framebuffer_create(dev, &mode, obj);
+
+ /*
+ * A reference is now held by the framebuffer object if
+ * successful, otherwise this drops the ref for the error path.
+ */
+ drm_gem_object_unreference_unlocked(&obj->obj);
+
+ if (IS_ERR(dfb))
+ return PTR_ERR(dfb);
+
+ info = framebuffer_alloc(0, dev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err_fballoc;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_fbcmap;
+ }
+
+ strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+ info->par = fbh;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &armada_fb_ops;
+ info->fix.smem_start = obj->phys_addr;
+ info->fix.smem_len = obj->obj.size;
+ info->screen_size = obj->obj.size;
+ info->screen_base = ptr;
+ fbh->fb = &dfb->fb;
+ fbh->fbdev = info;
+ drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+ drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
+ dfb->fb.width, dfb->fb.height,
+ dfb->fb.bits_per_pixel, obj->phys_addr);
+
+ return 0;
+
+ err_fbcmap:
+ framebuffer_release(info);
+ err_fballoc:
+ dfb->fb.funcs->destroy(&dfb->fb);
+ return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ if (!fbh->fb) {
+ ret = armada_fb_create(fbh, sizes);
+ if (ret == 0)
+ ret = 1;
+ }
+ return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+ .gamma_set = armada_drm_crtc_gamma_set,
+ .gamma_get = armada_drm_crtc_gamma_get,
+ .fb_probe = armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh;
+ int ret;
+
+ fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+ if (!fbh)
+ return -ENOMEM;
+
+ priv->fbdev = fbh;
+
+ fbh->funcs = &armada_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, fbh, 1, 1);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm fb helper\n");
+ goto err_fb_helper;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fbh);
+ if (ret) {
+ DRM_ERROR("failed to add fb connectors\n");
+ goto err_fb_setup;
+ }
+
+ ret = drm_fb_helper_initial_config(fbh, 32);
+ if (ret) {
+ DRM_ERROR("failed to set initial config\n");
+ goto err_fb_setup;
+ }
+
+ return 0;
+ err_fb_setup:
+ drm_fb_helper_fini(fbh);
+ err_fb_helper:
+ priv->fbdev = NULL;
+ return ret;
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh = priv->fbdev;
+
+ if (fbh) {
+ struct fb_info *info = fbh->fbdev;
+
+ if (info) {
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (fbh->fb)
+ fbh->fb->funcs->destroy(fbh->fb);
+
+ drm_fb_helper_fini(fbh);
+
+ priv->fbdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 00000000000..9f2356bae7f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+ unsigned long addr = (unsigned long)vmf->virtual_address;
+ unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+ int ret;
+
+ pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, addr, pfn);
+
+ switch (ret) {
+ case 0:
+ case -EBUSY:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+ .fault = armada_gem_vm_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+ return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+ DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+ drm_gem_free_mmap_offset(&dobj->obj);
+
+ if (dobj->page) {
+ /* page backed memory */
+ unsigned int order = get_order(dobj->obj.size);
+ __free_pages(dobj->page, order);
+ } else if (dobj->linear) {
+ /* linear backed memory */
+ drm_mm_remove_node(dobj->linear);
+ kfree(dobj->linear);
+ if (dobj->addr)
+ iounmap(dobj->addr);
+ }
+
+ if (dobj->obj.import_attach) {
+ /* We only ever display imported data */
+ dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+ DMA_TO_DEVICE);
+ drm_prime_gem_destroy(&dobj->obj, NULL);
+ }
+
+ drm_gem_object_release(&dobj->obj);
+
+ kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+ struct armada_private *priv = dev->dev_private;
+ size_t size = obj->obj.size;
+
+ if (obj->page || obj->linear)
+ return 0;
+
+ /*
+ * If it is a small allocation (typically cursor, which will
+ * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+ * Framebuffers will never be this small (our minimum size for
+ * framebuffers is larger than this anyway.) Such objects are
+ * only accessed by the CPU so we don't need any special handing
+ * here.
+ */
+ if (size <= 8192) {
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (p) {
+ obj->addr = page_address(p);
+ obj->phys_addr = page_to_phys(p);
+ obj->page = p;
+
+ memset(obj->addr, 0, PAGE_ALIGN(size));
+ }
+ }
+
+ /*
+ * We could grab something from CMA if it's enabled, but that
+ * involves building in a problem:
+ *
+ * CMA's interface uses dma_alloc_coherent(), which provides us
+ * with an CPU virtual address and a device address.
+ *
+ * The CPU virtual address may be either an address in the kernel
+ * direct mapped region (for example, as it would be on x86) or
+ * it may be remapped into another part of kernel memory space
+ * (eg, as it would be on ARM.) This means virt_to_phys() on the
+ * returned virtual address is invalid depending on the architecture
+ * implementation.
+ *
+ * The device address may also not be a physical address; it may
+ * be that there is some kind of remapping between the device and
+ * system RAM, which makes the use of the device address also
+ * unsafe to re-use as a physical address.
+ *
+ * This makes DRM usage of dma_alloc_coherent() in a generic way
+ * at best very questionable and unsafe.
+ */
+
+ /* Otherwise, grab it from our linear allocation */
+ if (!obj->page) {
+ struct drm_mm_node *node;
+ unsigned align = min_t(unsigned, size, SZ_2M);
+ void __iomem *ptr;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOSPC;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mm_insert_node(&priv->linear, node, size, align,
+ DRM_MM_SEARCH_DEFAULT);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ obj->linear = node;
+
+ /* Ensure that the memory we're returning is cleared. */
+ ptr = ioremap_wc(obj->linear->start, size);
+ if (!ptr) {
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_remove_node(obj->linear);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(obj->linear);
+ obj->linear = NULL;
+ return -ENOMEM;
+ }
+
+ memset_io(ptr, 0, size);
+ iounmap(ptr);
+
+ obj->phys_addr = obj->linear->start;
+ obj->dev_addr = obj->linear->start;
+ }
+
+ DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
+ obj, obj->phys_addr, obj->dev_addr);
+
+ return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+ /* only linear objects need to be ioremap'd */
+ if (!dobj->addr && dobj->linear)
+ dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+ return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+ struct armada_gem_object *obj;
+
+ size = roundup_gem_size(size);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->obj, size);
+ obj->dev_addr = DMA_ERROR_CODE;
+
+ DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+ return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+ size_t size)
+{
+ struct armada_gem_object *obj;
+ struct address_space *mapping;
+
+ size = roundup_gem_size(size);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ if (drm_gem_object_init(dev, &obj->obj, size)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->dev_addr = DMA_ERROR_CODE;
+
+ mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+ DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+ return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct armada_gem_object *dobj;
+ u32 handle;
+ size_t size;
+ int ret;
+
+ args->pitch = armada_pitch(args->width, args->bpp);
+ args->size = size = args->pitch * args->height;
+
+ dobj = armada_gem_alloc_private_object(dev, size);
+ if (dobj == NULL)
+ return -ENOMEM;
+
+ ret = armada_gem_linear_back(dev, dobj);
+ if (ret)
+ goto err;
+
+ ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+ if (ret)
+ goto err;
+
+ args->handle = handle;
+
+ /* drop reference from allocate - handle holds it now */
+ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct armada_gem_object *obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = armada_gem_object_lookup(dev, file, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* Don't allow imported objects to be mapped */
+ if (obj->obj.import_attach) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(&obj->obj);
+ if (ret == 0) {
+ *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+ DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+ }
+
+ drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_create *args = data;
+ struct armada_gem_object *dobj;
+ size_t size;
+ u32 handle;
+ int ret;
+
+ if (args->size == 0)
+ return -ENOMEM;
+
+ size = args->size;
+
+ dobj = armada_gem_alloc_object(dev, size);
+ if (dobj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+ if (ret)
+ goto err;
+
+ args->handle = handle;
+
+ /* drop reference from allocate - handle holds it now */
+ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_mmap *args = data;
+ struct armada_gem_object *dobj;
+ unsigned long addr;
+
+ dobj = armada_gem_object_lookup(dev, file, args->handle);
+ if (dobj == NULL)
+ return -ENOENT;
+
+ if (!dobj->obj.filp) {
+ drm_gem_object_unreference(&dobj->obj);
+ return -EINVAL;
+ }
+
+ addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, args->offset);
+ drm_gem_object_unreference(&dobj->obj);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ args->addr = addr;
+
+ return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_pwrite *args = data;
+ struct armada_gem_object *dobj;
+ char __user *ptr;
+ int ret;
+
+ DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+ args->handle, args->offset, args->size, args->ptr);
+
+ if (args->size == 0)
+ return 0;
+
+ ptr = (char __user *)(uintptr_t)args->ptr;
+
+ if (!access_ok(VERIFY_READ, ptr, args->size))
+ return -EFAULT;
+
+ ret = fault_in_multipages_readable(ptr, args->size);
+ if (ret)
+ return ret;
+
+ dobj = armada_gem_object_lookup(dev, file, args->handle);
+ if (dobj == NULL)
+ return -ENOENT;
+
+ /* Must be a kernel-mapped object */
+ if (!dobj->addr)
+ return -EINVAL;
+
+ if (args->offset > dobj->obj.size ||
+ args->size > dobj->obj.size - args->offset) {
+ DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+ ret = -EINVAL;
+ goto unref;
+ }
+
+ if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+ ret = -EFAULT;
+ } else if (dobj->update) {
+ dobj->update(dobj->update_data);
+ ret = 0;
+ }
+
+ unref:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ int i, num;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (dobj->obj.filp) {
+ struct address_space *mapping;
+ gfp_t gfp;
+ int count;
+
+ count = dobj->obj.size / PAGE_SIZE;
+ if (sg_alloc_table(sgt, count, GFP_KERNEL))
+ goto free_sgt;
+
+ mapping = file_inode(dobj->obj.filp)->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+
+ for_each_sg(sgt->sgl, sg, count, i) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ num = i;
+ goto release;
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+ num = sgt->nents;
+ goto release;
+ }
+ } else if (dobj->page) {
+ /* Single contiguous page */
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free_sgt;
+
+ sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free_table;
+ } else if (dobj->linear) {
+ /* Single contiguous physical region - no struct page */
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free_sgt;
+ sg_dma_address(sgt->sgl) = dobj->dev_addr;
+ sg_dma_len(sgt->sgl) = dobj->obj.size;
+ } else {
+ goto free_sgt;
+ }
+ return sgt;
+
+ release:
+ for_each_sg(sgt->sgl, sg, num, i)
+ page_cache_release(sg_page(sg));
+ free_table:
+ sg_free_table(sgt);
+ free_sgt:
+ kfree(sgt);
+ return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ int i;
+
+ if (!dobj->linear)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ if (dobj->obj.filp) {
+ struct scatterlist *sg;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ page_cache_release(sg_page(sg));
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+ return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+ .map_dma_buf = armada_gem_prime_map_dma_buf,
+ .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap_atomic = armada_gem_dmabuf_no_kmap,
+ .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
+ .kmap = armada_gem_dmabuf_no_kmap,
+ .kunmap = armada_gem_dmabuf_no_kunmap,
+ .mmap = armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+ int flags)
+{
+ return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+ O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+ struct dma_buf_attachment *attach;
+ struct armada_gem_object *dobj;
+
+ if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing our own dmabuf(s) increases the
+ * refcount on the gem object itself.
+ */
+ drm_gem_object_reference(obj);
+ dma_buf_put(buf);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ dobj = armada_gem_alloc_private_object(dev, buf->size);
+ if (!dobj) {
+ dma_buf_detach(buf, attach);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dobj->obj.import_attach = attach;
+
+ /*
+ * Don't call dma_buf_map_attachment() here - it maps the
+ * scatterlist immediately for DMA, and this is not always
+ * an appropriate thing to do.
+ */
+ return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+ int ret;
+
+ dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+ DMA_TO_DEVICE);
+ if (!dobj->sgt) {
+ DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+ return -EINVAL;
+ }
+ if (IS_ERR(dobj->sgt)) {
+ ret = PTR_ERR(dobj->sgt);
+ dobj->sgt = NULL;
+ DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+ return ret;
+ }
+ if (dobj->sgt->nents > 1) {
+ DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+ return -EINVAL;
+ }
+ if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+ DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+ return -EINVAL;
+ }
+ dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+ return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 00000000000..00b6cd461a0
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+ struct drm_gem_object obj;
+ void *addr;
+ phys_addr_t phys_addr;
+ resource_size_t dev_addr;
+ struct drm_mm_node *linear; /* for linear backed */
+ struct page *page; /* for page backed */
+ struct sg_table *sgt; /* for imported */
+ void (*update)(void *);
+ void *update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+ size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+ struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+ struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+ struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+ return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 00000000000..27319a8335e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ * LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ * LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ * LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+ LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
+ LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
+ LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
+ LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
+ LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
+ LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
+ LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
+ LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
+ LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
+ LCD_SPU_DMA_PITCH_YC = 0x00e0,
+ LCD_SPU_DMA_PITCH_UV = 0x00e4,
+ LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
+ LCD_SPU_DMA_HPXL_VLN = 0x00ec,
+ LCD_SPU_DZM_HPXL_VLN = 0x00f0,
+ LCD_CFG_GRA_START_ADDR0 = 0x00f4,
+ LCD_CFG_GRA_START_ADDR1 = 0x00f8,
+ LCD_CFG_GRA_PITCH = 0x00fc,
+ LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
+ LCD_SPU_GRA_HPXL_VLN = 0x0104,
+ LCD_SPU_GZM_HPXL_VLN = 0x0108,
+ LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
+ LCD_SPU_HWC_HPXL_VLN = 0x0110,
+ LCD_SPUT_V_H_TOTAL = 0x0114,
+ LCD_SPU_V_H_ACTIVE = 0x0118,
+ LCD_SPU_H_PORCH = 0x011c,
+ LCD_SPU_V_PORCH = 0x0120,
+ LCD_SPU_BLANKCOLOR = 0x0124,
+ LCD_SPU_ALPHA_COLOR1 = 0x0128,
+ LCD_SPU_ALPHA_COLOR2 = 0x012c,
+ LCD_SPU_COLORKEY_Y = 0x0130,
+ LCD_SPU_COLORKEY_U = 0x0134,
+ LCD_SPU_COLORKEY_V = 0x0138,
+ LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
+ LCD_SPU_SPI_RXDATA = 0x0140,
+ LCD_SPU_ISA_RXDATA = 0x0144,
+ LCD_SPU_HWC_RDDAT = 0x0158,
+ LCD_SPU_GAMMA_RDDAT = 0x015c,
+ LCD_SPU_PALETTE_RDDAT = 0x0160,
+ LCD_SPU_IOPAD_IN = 0x0178,
+ LCD_CFG_RDREG5F = 0x017c,
+ LCD_SPU_SPI_CTRL = 0x0180,
+ LCD_SPU_SPI_TXDATA = 0x0184,
+ LCD_SPU_SMPN_CTRL = 0x0188,
+ LCD_SPU_DMA_CTRL0 = 0x0190,
+ LCD_SPU_DMA_CTRL1 = 0x0194,
+ LCD_SPU_SRAM_CTRL = 0x0198,
+ LCD_SPU_SRAM_WRDAT = 0x019c,
+ LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
+ LCD_SPU_SRAM_PARA1 = 0x01a4,
+ LCD_CFG_SCLK_DIV = 0x01a8,
+ LCD_SPU_CONTRAST = 0x01ac,
+ LCD_SPU_SATURATION = 0x01b0,
+ LCD_SPU_CBSH_HUE = 0x01b4,
+ LCD_SPU_DUMB_CTRL = 0x01b8,
+ LCD_SPU_IOPAD_CONTROL = 0x01bc,
+ LCD_SPU_IRQ_ENA = 0x01c0,
+ LCD_SPU_IRQ_ISR = 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+ ADV_VSYNC_L_OFF = 0xfff << 20,
+ ADV_GRACOLORKEY = 1 << 19,
+ ADV_VIDCOLORKEY = 1 << 18,
+ ADV_HWC32BLEND = 1 << 15,
+ ADV_HWC32ARGB = 1 << 14,
+ ADV_HWC32ENABLE = 1 << 13,
+ ADV_VSYNCOFFEN = 1 << 12,
+ ADV_VSYNC_H_OFF = 0xfff << 0,
+};
+
+enum {
+ CFG_565 = 0,
+ CFG_1555 = 1,
+ CFG_888PACK = 2,
+ CFG_X888 = 3,
+ CFG_8888 = 4,
+ CFG_422PACK = 5,
+ CFG_422 = 6,
+ CFG_420 = 7,
+ CFG_PSEUDO4 = 9,
+ CFG_PSEUDO8 = 10,
+ CFG_SWAPRB = 1 << 4,
+ CFG_SWAPUV = 1 << 3,
+ CFG_SWAPYU = 1 << 2,
+ CFG_YUV2RGB = 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+ CFG_NOBLENDING = 1 << 31,
+ CFG_GAMMA_ENA = 1 << 30,
+ CFG_CBSH_ENA = 1 << 29,
+ CFG_PALETTE_ENA = 1 << 28,
+ CFG_ARBFAST_ENA = 1 << 27,
+ CFG_HWC_1BITMOD = 1 << 26,
+ CFG_HWC_1BITENA = 1 << 25,
+ CFG_HWC_ENA = 1 << 24,
+ CFG_DMAFORMAT = 0xf << 20,
+#define CFG_DMA_FMT(x) ((x) << 20)
+ CFG_GRAFORMAT = 0xf << 16,
+#define CFG_GRA_FMT(x) ((x) << 16)
+#define CFG_GRA_MOD(x) ((x) << 8)
+ CFG_GRA_FTOGGLE = 1 << 15,
+ CFG_GRA_HSMOOTH = 1 << 14,
+ CFG_GRA_TSTMODE = 1 << 13,
+ CFG_GRA_ENA = 1 << 8,
+#define CFG_DMA_MOD(x) ((x) << 0)
+ CFG_DMA_FTOGGLE = 1 << 7,
+ CFG_DMA_HSMOOTH = 1 << 6,
+ CFG_DMA_TSTMODE = 1 << 5,
+ CFG_DMA_ENA = 1 << 0,
+};
+
+enum {
+ CKMODE_DISABLE = 0,
+ CKMODE_Y = 1,
+ CKMODE_U = 2,
+ CKMODE_RGB = 3,
+ CKMODE_V = 4,
+ CKMODE_R = 5,
+ CKMODE_G = 6,
+ CKMODE_B = 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+ CFG_FRAME_TRIG = 1 << 31,
+ CFG_VSYNC_INV = 1 << 27,
+ CFG_CKMODE_MASK = 0x7 << 24,
+#define CFG_CKMODE(x) ((x) << 24)
+ CFG_CARRY = 1 << 23,
+ CFG_GATED_CLK = 1 << 21,
+ CFG_PWRDN_ENA = 1 << 20,
+ CFG_DSCALE_MASK = 0x3 << 18,
+ CFG_DSCALE_NONE = 0x0 << 18,
+ CFG_DSCALE_HALF = 0x1 << 18,
+ CFG_DSCALE_QUAR = 0x2 << 18,
+ CFG_ALPHAM_MASK = 0x3 << 16,
+ CFG_ALPHAM_VIDEO = 0x0 << 16,
+ CFG_ALPHAM_GRA = 0x1 << 16,
+ CFG_ALPHAM_CFG = 0x2 << 16,
+ CFG_ALPHA_MASK = 0xff << 8,
+ CFG_PIXCMD_MASK = 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+ SRAM_READ = 0 << 14,
+ SRAM_WRITE = 2 << 14,
+ SRAM_INIT = 3 << 14,
+ SRAM_HWC32_RAM1 = 0xc << 8,
+ SRAM_HWC32_RAM2 = 0xd << 8,
+ SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
+ SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
+ SRAM_HWC32_RAMB = 0xe << 8,
+ SRAM_HWC32_TRAN = 0xf << 8,
+ SRAM_HWC = 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+ CFG_CSB_256x32 = 1 << 15, /* cursor */
+ CFG_CSB_256x24 = 1 << 14, /* palette */
+ CFG_CSB_256x8 = 1 << 13, /* gamma */
+ CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
+ CFG_PDWN256x32 = 1 << 7, /* power down cursor */
+ CFG_PDWN256x24 = 1 << 6, /* power down palette */
+ CFG_PDWN256x8 = 1 << 5, /* power down gamma */
+ CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
+ CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
+ CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
+ CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
+ CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+ /* Armada 510 */
+ SCLK_510_AXI = 0x0 << 30,
+ SCLK_510_EXTCLK0 = 0x1 << 30,
+ SCLK_510_PLL = 0x2 << 30,
+ SCLK_510_EXTCLK1 = 0x3 << 30,
+ SCLK_510_DIV_CHANGE = 1 << 29,
+ SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
+ SCLK_510_INT_DIV_MASK = 0xffff << 0,
+
+ /* Armada 16x */
+ SCLK_16X_AHB = 0x0 << 28,
+ SCLK_16X_PCLK = 0x1 << 28,
+ SCLK_16X_AXI = 0x4 << 28,
+ SCLK_16X_PLL = 0x8 << 28,
+ SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
+ SCLK_16X_INT_DIV_MASK = 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+ DUMB16_RGB565_0 = 0x0 << 28,
+ DUMB16_RGB565_1 = 0x1 << 28,
+ DUMB18_RGB666_0 = 0x2 << 28,
+ DUMB18_RGB666_1 = 0x3 << 28,
+ DUMB12_RGB444_0 = 0x4 << 28,
+ DUMB12_RGB444_1 = 0x5 << 28,
+ DUMB24_RGB888_0 = 0x6 << 28,
+ DUMB_BLANK = 0x7 << 28,
+ DUMB_MASK = 0xf << 28,
+ CFG_BIAS_OUT = 1 << 8,
+ CFG_REV_RGB = 1 << 7,
+ CFG_INV_CBLANK = 1 << 6,
+ CFG_INV_CSYNC = 1 << 5, /* Normally active high */
+ CFG_INV_HENA = 1 << 4,
+ CFG_INV_VSYNC = 1 << 3, /* Normally active high */
+ CFG_INV_HSYNC = 1 << 2, /* Normally active high */
+ CFG_INV_PCLK = 1 << 1,
+ CFG_DUMB_ENA = 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+ CFG_VSCALE_LN_EN = 3 << 18,
+ CFG_GRA_VM_ENA = 1 << 15,
+ CFG_DMA_VM_ENA = 1 << 13,
+ CFG_CMD_VM_ENA = 1 << 11,
+ CFG_CSC_MASK = 3 << 8,
+ CFG_CSC_YUV_CCIR709 = 1 << 9,
+ CFG_CSC_YUV_CCIR601 = 0 << 9,
+ CFG_CSC_RGB_STUDIO = 1 << 8,
+ CFG_CSC_RGB_COMPUTER = 0 << 8,
+ CFG_IOPAD_MASK = 0xf << 0,
+ CFG_IOPAD_DUMB24 = 0x0 << 0,
+ CFG_IOPAD_DUMB18SPI = 0x1 << 0,
+ CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
+ CFG_IOPAD_DUMB16SPI = 0x3 << 0,
+ CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
+ CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
+ CFG_IOPAD_SMART18 = 0x6 << 0,
+ CFG_IOPAD_SMART16 = 0x7 << 0,
+ CFG_IOPAD_SMART8 = 0x8 << 0,
+};
+
+#define IOPAD_DUMB24 0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+ DMA_FRAME_IRQ0_ENA = 1 << 31,
+ DMA_FRAME_IRQ1_ENA = 1 << 30,
+ DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+ DMA_FF_UNDERFLOW_ENA = 1 << 29,
+ GRA_FRAME_IRQ0_ENA = 1 << 27,
+ GRA_FRAME_IRQ1_ENA = 1 << 26,
+ GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+ GRA_FF_UNDERFLOW_ENA = 1 << 25,
+ VSYNC_IRQ_ENA = 1 << 23,
+ DUMB_FRAMEDONE_ENA = 1 << 22,
+ TWC_FRAMEDONE_ENA = 1 << 21,
+ HWC_FRAMEDONE_ENA = 1 << 20,
+ SLV_IRQ_ENA = 1 << 19,
+ SPI_IRQ_ENA = 1 << 18,
+ PWRDN_IRQ_ENA = 1 << 17,
+ ERR_IRQ_ENA = 1 << 16,
+ CLEAN_SPU_IRQ_ISR = 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+ DMA_FRAME_IRQ0 = 1 << 31,
+ DMA_FRAME_IRQ1 = 1 << 30,
+ DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+ DMA_FF_UNDERFLOW = 1 << 29,
+ GRA_FRAME_IRQ0 = 1 << 27,
+ GRA_FRAME_IRQ1 = 1 << 26,
+ GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+ GRA_FF_UNDERFLOW = 1 << 25,
+ VSYNC_IRQ = 1 << 23,
+ DUMB_FRAMEDONE = 1 << 22,
+ TWC_FRAMEDONE = 1 << 21,
+ HWC_FRAMEDONE = 1 << 20,
+ SLV_IRQ = 1 << 19,
+ SPI_IRQ = 1 << 18,
+ PWRDN_IRQ = 1 << 17,
+ ERR_IRQ = 1 << 16,
+ DMA_FRAME_IRQ0_LEVEL = 1 << 15,
+ DMA_FRAME_IRQ1_LEVEL = 1 << 14,
+ DMA_FRAME_CNT_ISR = 3 << 12,
+ GRA_FRAME_IRQ0_LEVEL = 1 << 11,
+ GRA_FRAME_IRQ1_LEVEL = 1 << 10,
+ GRA_FRAME_CNT_ISR = 3 << 8,
+ VSYNC_IRQ_LEVEL = 1 << 7,
+ DUMB_FRAMEDONE_LEVEL = 1 << 6,
+ TWC_FRAMEDONE_LEVEL = 1 << 5,
+ HWC_FRAMEDONE_LEVEL = 1 << 4,
+ SLV_FF_EMPTY = 1 << 3,
+ DMA_FF_ALLEMPTY = 1 << 2,
+ GRA_FF_ALLEMPTY = 1 << 1,
+ PWRDN_IRQ_LEVEL = 1 << 0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 00000000000..bd8c4562066
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 00000000000..d685a542148
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+ struct drm_connector conn;
+ const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+ struct drm_encoder *enc = conn->encoder;
+
+ return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+ struct drm_connector *conn, bool force)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (dconn->type->detect) {
+ status = dconn->type->detect(conn, force);
+ } else {
+ struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+ if (enc)
+ status = encoder_helper_funcs(enc)->detect(enc, conn);
+ }
+
+ return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+ drm_sysfs_connector_remove(conn);
+ drm_connector_cleanup(conn);
+ kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+ if (!dconn->type->set_property)
+ return -EINVAL;
+
+ return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = armada_drm_connector_detect,
+ .destroy = armada_drm_connector_destroy,
+ .set_property = armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+ encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+ encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+ int valid = MODE_BAD;
+
+ if (encoder) {
+ struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+ valid = slave->slave_funcs->mode_valid(encoder, mode);
+ }
+ return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value)
+{
+ struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+ int rc = -EINVAL;
+
+ if (encoder) {
+ struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+ rc = slave->slave_funcs->set_property(encoder, conn, property,
+ value);
+ }
+ return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+ const struct armada_output_type *type, const void *data)
+{
+ struct armada_connector *dconn;
+ int ret;
+
+ dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+ if (!dconn)
+ return -ENOMEM;
+
+ dconn->type = type;
+
+ ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+ type->connector_type);
+ if (ret) {
+ DRM_ERROR("unable to init connector\n");
+ goto err_destroy_dconn;
+ }
+
+ ret = type->create(&dconn->conn, data);
+ if (ret)
+ goto err_conn;
+
+ ret = drm_sysfs_connector_add(&dconn->conn);
+ if (ret)
+ goto err_sysfs;
+
+ return 0;
+
+ err_sysfs:
+ if (dconn->conn.encoder)
+ dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+ drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+ kfree(dconn);
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 00000000000..4126d43b505
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+ ((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+ int connector_type;
+ enum drm_connector_status (*detect)(struct drm_connector *, bool);
+ int (*create)(struct drm_connector *, const void *);
+ int (*set_property)(struct drm_connector *, struct drm_property *,
+ uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+ const struct armada_output_type *type, const void *data);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 00000000000..c5b06fdb459
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+ uint32_t colorkey_yr;
+ uint32_t colorkey_ug;
+ uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+ int16_t brightness;
+ uint16_t contrast;
+ uint16_t saturation;
+ uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+ struct drm_plane base;
+ spinlock_t lock;
+ struct drm_framebuffer *old_fb;
+ uint32_t src_hw;
+ uint32_t dst_hw;
+ uint32_t dst_yx;
+ uint32_t ctrl0;
+ struct {
+ struct armada_vbl_event update;
+ struct armada_regs regs[13];
+ wait_queue_head_t wait;
+ } vbl;
+ struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+ struct armada_crtc *dcrtc)
+{
+ writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+ writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+ writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+ writel_relaxed(prop->brightness << 16 | prop->contrast,
+ dcrtc->base + LCD_SPU_CONTRAST);
+ /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+ writel_relaxed(prop->saturation << 16,
+ dcrtc->base + LCD_SPU_SATURATION);
+ writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+ armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+ spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+ struct armada_plane *dplane = data;
+ struct drm_framebuffer *fb;
+
+ armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+ spin_lock(&dplane->lock);
+ fb = dplane->old_fb;
+ dplane->old_fb = NULL;
+ spin_unlock(&dplane->lock);
+
+ if (fb)
+ armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+ int end = start + size;
+ if (end < 0)
+ return 0;
+ if (start < 0)
+ start = 0;
+ return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ uint32_t val, ctrl0;
+ unsigned idx = 0;
+ int ret;
+
+ crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+ crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+ ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+ CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+ /* Does the position/size result in nothing to display? */
+ if (crtc_w == 0 || crtc_h == 0) {
+ ctrl0 &= ~CFG_DMA_ENA;
+ }
+
+ /*
+ * FIXME: if the starting point is off screen, we need to
+ * adjust src_x, src_y, src_w, src_h appropriately, and
+ * according to the scale.
+ */
+
+ if (!dcrtc->plane) {
+ dcrtc->plane = plane;
+ armada_ovl_update_attr(&dplane->prop, dcrtc);
+ }
+
+ /* FIXME: overlay on an interlaced display */
+ /* Just updating the position/size? */
+ if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ val = (src_h & 0xffff0000) | src_w >> 16;
+ dplane->src_hw = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+ val = crtc_h << 16 | crtc_w;
+ dplane->dst_hw = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+ val = crtc_y << 16 | crtc_x;
+ dplane->dst_yx = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+ return 0;
+ } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+ }
+
+ ret = wait_event_timeout(dplane->vbl.wait,
+ list_empty(&dplane->vbl.update.node),
+ HZ/25);
+ if (ret < 0)
+ return ret;
+
+ if (plane->fb != fb) {
+ struct armada_gem_object *obj = drm_fb_obj(fb);
+ uint32_t sy, su, sv;
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ if (plane->fb) {
+ struct drm_framebuffer *older_fb;
+
+ spin_lock_irq(&dplane->lock);
+ older_fb = dplane->old_fb;
+ dplane->old_fb = plane->fb;
+ spin_unlock_irq(&dplane->lock);
+ if (older_fb)
+ armada_drm_queue_unref_work(dcrtc->crtc.dev,
+ older_fb);
+ }
+
+ src_y >>= 16;
+ src_x >>= 16;
+ sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+ src_x * fb->bits_per_pixel / 8;
+ su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+ src_x;
+ sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+ src_x;
+
+ armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ LCD_SPU_DMA_START_ADDR_Y0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ LCD_SPU_DMA_START_ADDR_U0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ LCD_SPU_DMA_START_ADDR_V0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ LCD_SPU_DMA_START_ADDR_Y1);
+ armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ LCD_SPU_DMA_START_ADDR_U1);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ LCD_SPU_DMA_START_ADDR_V1);
+
+ val = fb->pitches[0] << 16 | fb->pitches[0];
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_PITCH_YC);
+ val = fb->pitches[1] << 16 | fb->pitches[2];
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_PITCH_UV);
+ }
+
+ val = (src_h & 0xffff0000) | src_w >> 16;
+ if (dplane->src_hw != val) {
+ dplane->src_hw = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_HPXL_VLN);
+ }
+ val = crtc_h << 16 | crtc_w;
+ if (dplane->dst_hw != val) {
+ dplane->dst_hw = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DZM_HPXL_VLN);
+ }
+ val = crtc_y << 16 | crtc_x;
+ if (dplane->dst_yx != val) {
+ dplane->dst_yx = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_OVSA_HPXL_VLN);
+ }
+ if (dplane->ctrl0 != ctrl0) {
+ dplane->ctrl0 = ctrl0;
+ armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+ CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+ CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+ CFG_YUV2RGB) | CFG_DMA_ENA,
+ LCD_SPU_DMA_CTRL0);
+ }
+ if (idx) {
+ armada_reg_queue_end(dplane->vbl.regs, idx);
+ armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+ }
+ return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct drm_framebuffer *fb;
+ struct armada_crtc *dcrtc;
+
+ if (!dplane->base.crtc)
+ return 0;
+
+ dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+ dcrtc->plane = NULL;
+
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+ armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ dplane->ctrl0 = 0;
+ spin_unlock_irq(&dcrtc->irq_lock);
+
+ /* Power down the Y/U/V FIFOs */
+ armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ spin_lock_irq(&dplane->lock);
+ fb = dplane->old_fb;
+ dplane->old_fb = NULL;
+ spin_unlock_irq(&dplane->lock);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
+ return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+ kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ bool update_attr = false;
+
+ if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+ dplane->prop.colorkey_yr = CCC(K2R(val));
+ dplane->prop.colorkey_ug = CCC(K2G(val));
+ dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+ update_attr = true;
+ } else if (property == priv->colorkey_min_prop) {
+ dplane->prop.colorkey_yr &= ~0x00ff0000;
+ dplane->prop.colorkey_yr |= K2R(val) << 16;
+ dplane->prop.colorkey_ug &= ~0x00ff0000;
+ dplane->prop.colorkey_ug |= K2G(val) << 16;
+ dplane->prop.colorkey_vb &= ~0x00ff0000;
+ dplane->prop.colorkey_vb |= K2B(val) << 16;
+ update_attr = true;
+ } else if (property == priv->colorkey_max_prop) {
+ dplane->prop.colorkey_yr &= ~0xff000000;
+ dplane->prop.colorkey_yr |= K2R(val) << 24;
+ dplane->prop.colorkey_ug &= ~0xff000000;
+ dplane->prop.colorkey_ug |= K2G(val) << 24;
+ dplane->prop.colorkey_vb &= ~0xff000000;
+ dplane->prop.colorkey_vb |= K2B(val) << 24;
+ update_attr = true;
+ } else if (property == priv->colorkey_val_prop) {
+ dplane->prop.colorkey_yr &= ~0x0000ff00;
+ dplane->prop.colorkey_yr |= K2R(val) << 8;
+ dplane->prop.colorkey_ug &= ~0x0000ff00;
+ dplane->prop.colorkey_ug |= K2G(val) << 8;
+ dplane->prop.colorkey_vb &= ~0x0000ff00;
+ dplane->prop.colorkey_vb |= K2B(val) << 8;
+ update_attr = true;
+ } else if (property == priv->colorkey_alpha_prop) {
+ dplane->prop.colorkey_yr &= ~0x000000ff;
+ dplane->prop.colorkey_yr |= K2R(val);
+ dplane->prop.colorkey_ug &= ~0x000000ff;
+ dplane->prop.colorkey_ug |= K2G(val);
+ dplane->prop.colorkey_vb &= ~0x000000ff;
+ dplane->prop.colorkey_vb |= K2B(val);
+ update_attr = true;
+ } else if (property == priv->colorkey_mode_prop) {
+ dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+ dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+ update_attr = true;
+ } else if (property == priv->brightness_prop) {
+ dplane->prop.brightness = val - 256;
+ update_attr = true;
+ } else if (property == priv->contrast_prop) {
+ dplane->prop.contrast = val;
+ update_attr = true;
+ } else if (property == priv->saturation_prop) {
+ dplane->prop.saturation = val;
+ update_attr = true;
+ }
+
+ if (update_attr && dplane->base.crtc)
+ armada_ovl_update_attr(&dplane->prop,
+ drm_to_armada_crtc(dplane->base.crtc));
+
+ return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+ .update_plane = armada_plane_update,
+ .disable_plane = armada_plane_disable,
+ .destroy = armada_plane_destroy,
+ .set_property = armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+ { CKMODE_DISABLE, "disabled" },
+ { CKMODE_Y, "Y component" },
+ { CKMODE_U, "U component" },
+ { CKMODE_V, "V component" },
+ { CKMODE_RGB, "RGB" },
+ { CKMODE_R, "R component" },
+ { CKMODE_G, "G component" },
+ { CKMODE_B, "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->colorkey_prop)
+ return 0;
+
+ priv->colorkey_prop = drm_property_create_range(dev, 0,
+ "colorkey", 0, 0xffffff);
+ priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+ "colorkey_min", 0, 0xffffff);
+ priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+ "colorkey_max", 0, 0xffffff);
+ priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+ "colorkey_val", 0, 0xffffff);
+ priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+ "colorkey_alpha", 0, 0xffffff);
+ priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+ "colorkey_mode",
+ armada_drm_colorkey_enum_list,
+ ARRAY_SIZE(armada_drm_colorkey_enum_list));
+ priv->brightness_prop = drm_property_create_range(dev, 0,
+ "brightness", 0, 256 + 255);
+ priv->contrast_prop = drm_property_create_range(dev, 0,
+ "contrast", 0, 0x7fff);
+ priv->saturation_prop = drm_property_create_range(dev, 0,
+ "saturation", 0, 0x7fff);
+
+ if (!priv->colorkey_prop)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_mode_object *mobj;
+ struct armada_plane *dplane;
+ int ret;
+
+ ret = armada_overlay_create_properties(dev);
+ if (ret)
+ return ret;
+
+ dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+ if (!dplane)
+ return -ENOMEM;
+
+ spin_lock_init(&dplane->lock);
+ init_waitqueue_head(&dplane->vbl.wait);
+ armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+ dplane);
+
+ drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+ armada_formats, ARRAY_SIZE(armada_formats), false);
+
+ dplane->prop.colorkey_yr = 0xfefefe00;
+ dplane->prop.colorkey_ug = 0x01010100;
+ dplane->prop.colorkey_vb = 0x01010100;
+ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+ dplane->prop.brightness = 0;
+ dplane->prop.contrast = 0x4000;
+ dplane->prop.saturation = 0x4000;
+
+ mobj = &dplane->base.base;
+ drm_object_attach_property(mobj, priv->colorkey_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_min_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_max_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_val_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+ 0x000000);
+ drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+ CKMODE_RGB);
+ drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->contrast_prop,
+ dplane->prop.contrast);
+ drm_object_attach_property(mobj, priv->saturation_prop,
+ dplane->prop.saturation);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 00000000000..00d0facb42f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+ struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+ int count = 0;
+
+ if (enc) {
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+ count = slave->slave_funcs->get_modes(enc, conn);
+ }
+
+ return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+ struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+ if (slave->slave_funcs)
+ slave->slave_funcs->destroy(enc);
+ if (client)
+ i2c_put_adapter(client->adapter);
+
+ drm_encoder_cleanup(&slave->base);
+ kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+ .destroy = armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+ .get_modes = armada_drm_slave_get_modes,
+ .mode_valid = armada_drm_slave_encoder_mode_valid,
+ .best_encoder = armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+ .dpms = drm_i2c_encoder_dpms,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = drm_i2c_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+ const struct armada_drm_slave_config *config = data;
+ struct drm_encoder_slave *slave;
+ struct i2c_adapter *adap;
+ int ret;
+
+ conn->interlace_allowed = config->interlace_allowed;
+ conn->doublescan_allowed = config->doublescan_allowed;
+ conn->polled = config->polled;
+
+ drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave)
+ return -ENOMEM;
+
+ slave->base.possible_crtcs = config->crtcs;
+
+ adap = i2c_get_adapter(config->i2c_adapter_id);
+ if (!adap) {
+ kfree(slave);
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_encoder_init(conn->dev, &slave->base,
+ &armada_drm_slave_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret) {
+ DRM_ERROR("unable to init encoder\n");
+ i2c_put_adapter(adap);
+ kfree(slave);
+ return ret;
+ }
+
+ ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+ i2c_put_adapter(adap);
+ if (ret) {
+ DRM_ERROR("unable to init encoder slave\n");
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+ ret = slave->slave_funcs->create_resources(&slave->base, conn);
+ if (ret) {
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+ if (ret) {
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ conn->encoder = &slave->base;
+
+ return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ .create = armada_drm_conn_slave_create,
+ .set_property = armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+ const struct armada_drm_slave_config *config)
+{
+ return armada_output_create(dev, &armada_drm_conn_slave, config);
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 00000000000..bf2374c96fc
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+ int i2c_adapter_id;
+ uint32_t crtcs;
+ uint8_t polled;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+ const struct armada_drm_slave_config *);
+
+#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da4a51eae82..8a784c460c8 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -6,6 +6,7 @@ config DRM_AST
select FB_SYS_FILLRECT
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270dc714..5137f15dba1 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_init_object = ast_gem_init_object,
.gem_free_object = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e873..9833a1b1acc 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
extern int ast_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d374c..af0b868a9df 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-int ast_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
- return 0;
-}
-
void ast_bo_unref(struct ast_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index bf67b22723f..9864559e5fb 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d9178..953fc8aea69 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_init_object = cirrus_gem_init_object,
.gem_free_object = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb9184af..b6aded73838 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
struct pci_dev *pdev,
uint32_t flags);
void cirrus_device_fini(struct cirrus_device *cdev);
-int cirrus_gem_init_object(struct drm_gem_object *obj);
void cirrus_gem_free_object(struct drm_gem_object *obj);
int cirrus_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a533a51..78e76f24343 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-int cirrus_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
- return 0;
-}
-
void cirrus_bo_unref(struct cirrus_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 60685b21cc3..adabc3daaa5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
int cirrus_vga_get_modes(struct drm_connector *connector)
{
- /* Just add a static list of modes */
- drm_add_modes_noedid(connector, 640, 480);
- drm_add_modes_noedid(connector, 800, 600);
- drm_add_modes_noedid(connector, 1024, 768);
- drm_add_modes_noedid(connector, 1280, 1024);
+ int count;
- return 4;
+ /* Just add a static list of modes */
+ count = drm_add_modes_noedid(connector, 1280, 1024);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return count;
}
static int cirrus_vga_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff965bcf..a4b017b6849 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
- ++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (pos->handle == ctx->handle) {
list_del(&pos->head);
kfree(pos);
- --dev->ctx_count;
}
}
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa941f6..d6cf77c472e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+ { DRM_MODE_CONNECTOR_DSI, "DSI" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+ { DRM_MODE_ENCODER_DSI, "DSI" },
};
void drm_connector_ida_init(void)
@@ -1301,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
}
/**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
+ if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+ return -EINVAL;
+
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1552,7 +1557,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
@@ -1579,6 +1584,19 @@ out:
return ret;
}
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+ const struct drm_file *file_priv)
+{
+ /*
+ * If user-space hasn't configured the driver to expose the stereo 3D
+ * modes, don't expose them.
+ */
+ if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+ return false;
+
+ return true;
+}
+
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@@ -1623,7 +1641,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, out_resp->connector_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head)
- mode_count++;
+ if (drm_mode_expose_to_userspace(mode, file_priv))
+ mode_count++;
out_resp->connector_id = connector->base.id;
out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
+ if (!drm_mode_expose_to_userspace(mode, file_priv))
+ continue;
+
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
@@ -1735,7 +1757,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
encoder = obj_to_encoder(obj);
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
+/*
+ * Checks that the framebuffer is big enough for the CRTC viewport
+ * (x, y, hdisplay, vdisplay)
+ */
+static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb)
+
+{
+ int hdisplay, vdisplay;
+
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (drm_mode_is_stereo(mode)) {
+ struct drm_display_mode adjusted = *mode;
+
+ drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+ hdisplay = adjusted.crtc_hdisplay;
+ vdisplay = adjusted.crtc_vdisplay;
+ }
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ x > fb->width - hdisplay ||
+ y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, x, y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/**
* drm_mode_setcrtc - set CRTC configuration
* @dev: drm device for the ioctl
@@ -2080,14 +2141,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
- int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
@@ -2104,7 +2164,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
}
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- hdisplay = mode->hdisplay;
- vdisplay = mode->vdisplay;
-
- if (crtc->invert_dimensions)
- swap(hdisplay, vdisplay);
-
- if (hdisplay > fb->width ||
- vdisplay > fb->height ||
- crtc_req->x > fb->width - hdisplay ||
- crtc_req->y > fb->height - vdisplay) {
- DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
- fb->width, fb->height,
- hdisplay, vdisplay, crtc_req->x, crtc_req->y,
- crtc->invert_dimensions ? " (inverted)" : "");
- ret = -ENOSPC;
+ ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+ mode, fb);
+ if (ret)
goto out;
- }
+
}
if (crtc_req->count_connectors == 0 && mode) {
@@ -2184,7 +2232,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!obj) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
@@ -2232,7 +2280,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
crtc = obj_to_crtc(obj);
@@ -2441,6 +2489,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
case DRM_FORMAT_YVU444:
return 0;
default:
+ DRM_DEBUG_KMS("invalid pixel format %s\n",
+ drm_get_format_name(r->pixel_format));
return -EINVAL;
}
}
@@ -2606,7 +2656,7 @@ fail_lookup:
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
- return -EINVAL;
+ return -ENOENT;
}
/**
@@ -2634,7 +2684,7 @@ int drm_mode_getfb(struct drm_device *dev,
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
- return -EINVAL;
+ return -ENOENT;
r->height = fb->height;
r->width = fb->width;
@@ -2679,7 +2729,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
- return -EINVAL;
+ return -ENOENT;
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -3011,7 +3061,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto done;
}
property = obj_to_property(obj);
@@ -3140,7 +3190,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto done;
}
blob = obj_to_blob(obj);
@@ -3301,7 +3351,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
if (!obj->properties) {
@@ -3354,8 +3404,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
- if (!arg_obj)
+ if (!arg_obj) {
+ ret = -ENOENT;
goto out;
+ }
if (!arg_obj->properties)
goto out;
@@ -3368,8 +3420,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
- if (!prop_obj)
+ if (!prop_obj) {
+ ret = -ENOENT;
goto out;
+ }
property = obj_to_property(prop_obj);
if (!drm_property_change_is_valid(property, arg->value))
@@ -3454,7 +3508,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
@@ -3513,7 +3567,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
@@ -3556,7 +3610,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
- int hdisplay, vdisplay;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3568,7 +3621,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
- return -EINVAL;
+ return -ENOENT;
crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
@@ -3585,25 +3638,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
- if (!fb)
+ if (!fb) {
+ ret = -ENOENT;
goto out;
+ }
- hdisplay = crtc->mode.hdisplay;
- vdisplay = crtc->mode.vdisplay;
-
- if (crtc->invert_dimensions)
- swap(hdisplay, vdisplay);
-
- if (hdisplay > fb->width ||
- vdisplay > fb->height ||
- crtc->x > fb->width - hdisplay ||
- crtc->y > fb->height - vdisplay) {
- DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
- fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
- crtc->invert_dimensions ? " (inverted)" : "");
- ret = -ENOSPC;
+ ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+ if (ret)
goto out;
- }
if (crtc->fb->pixel_format != fb->pixel_format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
@@ -3788,7 +3830,8 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
*bpp = 32;
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format\n");
+ DRM_DEBUG_KMS("unsupported pixel format %s\n",
+ drm_get_format_name(format));
*depth = 0;
*bpp = 0;
break;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b5404..01361aba033 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,10 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
@@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
{
struct drm_display_mode *mode;
- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+ DRM_MODE_FLAG_3D_MASK))
return;
list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+ !(flags & DRM_MODE_FLAG_3D_MASK))
+ mode->status = MODE_NO_STEREO;
}
return;
@@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (connector->stereo_allowed)
+ mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
@@ -395,22 +405,25 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
+ bool saved_enabled;
struct drm_encoder *encoder;
bool ret = true;
+ saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
adjusted_mode = drm_mode_duplicate(dev, mode);
- if (!adjusted_mode)
+ if (!adjusted_mode) {
+ crtc->enabled = saved_enabled;
return false;
+ }
- saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -529,7 +542,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
- crtc->hwmode = saved_hwmode;
+ crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
@@ -557,6 +570,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
continue;
connector->encoder = NULL;
+
+ /*
+ * drm_helper_disable_unused_functions() ought to be
+ * doing this, but since we've decoupled the encoder
+ * from the connector above, the required connection
+ * between them is henceforth no longer available.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
}
}
@@ -583,9 +604,8 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+ struct drm_crtc *new_crtc;
struct drm_encoder *save_encoders, *new_encoder, *encoder;
- struct drm_framebuffer *old_fb = NULL;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
@@ -621,38 +641,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
dev = set->crtc->dev;
- /* Allocate space for the backup of all (non-pointer) crtc, encoder and
- * connector data. */
- save_crtcs = kzalloc(dev->mode_config.num_crtc *
- sizeof(struct drm_crtc), GFP_KERNEL);
- if (!save_crtcs)
- return -ENOMEM;
-
+ /*
+ * Allocate space for the backup of all (non-pointer) encoder and
+ * connector data.
+ */
save_encoders = kzalloc(dev->mode_config.num_encoder *
sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders) {
- kfree(save_crtcs);
+ if (!save_encoders)
return -ENOMEM;
- }
save_connectors = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_connector), GFP_KERNEL);
if (!save_connectors) {
- kfree(save_crtcs);
kfree(save_encoders);
return -ENOMEM;
}
- /* Copy data. Note that driver private data is not affected.
+ /*
+ * Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- save_crtcs[count++] = *crtc;
- }
-
- count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
save_encoders[count++] = *encoder;
}
@@ -775,19 +785,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
if (mode_changed) {
- set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
- if (set->crtc->enabled) {
+ if (drm_helper_crtc_in_use(set->crtc)) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
- old_fb = set->crtc->fb;
set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
- old_fb)) {
+ save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
- set->crtc->fb = old_fb;
+ set->crtc->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
@@ -802,31 +810,24 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
-
- old_fb = set->crtc->fb;
- if (set->crtc->fb != set->fb)
- set->crtc->fb = set->fb;
+ set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
- set->x, set->y, old_fb);
+ set->x, set->y, save_set.fb);
if (ret != 0) {
- set->crtc->fb = old_fb;
+ set->crtc->x = save_set.x;
+ set->crtc->y = save_set.y;
+ set->crtc->fb = save_set.fb;
goto fail;
}
}
kfree(save_connectors);
kfree(save_encoders);
- kfree(save_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- *crtc = save_crtcs[count++];
- }
-
- count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
*encoder = save_encoders[count++];
}
@@ -844,7 +845,6 @@ fail:
kfree(save_connectors);
kfree(save_encoders);
- kfree(save_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1125,14 +1125,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
-void drm_helper_hpd_irq_event(struct drm_device *dev)
+bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
if (!dev->mode_config.poll_enabled)
- return;
+ return false;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1157,5 +1157,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
if (changed)
drm_kms_helper_hotplug_event(dev);
+
+ return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index a05087cf846..b4b51d46f33 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -42,7 +42,7 @@
* Initialization, etc.
**************************************************/
-static struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
@@ -84,7 +84,7 @@ static const struct file_operations drm_debugfs_fops = {
* Create a given set of debugfs files represented by an array of
* gdm_debugfs_lists in the given root directory.
*/
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
+int drm_debugfs_create_files(const struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
@@ -188,7 +188,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
*
* Remove all debugfs entries created by debugfs_init().
*/
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e19662716..9e978aae897 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
/* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
return (l >> s) & 0xf;
}
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fe58d0833a1..d9137e49c4e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-/**
- * drm_legacy_dev_reinit
- *
- * Reinitializes a legacy/ums drm device in it's lastclose function.
- */
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-}
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
-
- DRM_DEBUG("\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
-
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
- drm_legacy_dma_takedown(dev);
-
- dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_dev_reinit(dev);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
-}
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
return -ENODEV;
atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -473,7 +403,7 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
- DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->device),
file_priv->authenticated, cmd, nr);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 830f7501cb4..fb7cf0e796f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -458,6 +458,15 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
+/*
+ * These more or less come from the DMT spec. The 720x400 modes are
+ * inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
+ * modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
+ * should be 1152x870, again for the Mac, but instead we use the x864 DMT
+ * mode.
+ *
+ * The DMT modes have been fact-checked; the rest are mild guesses.
+ */
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
@@ -560,7 +569,7 @@ static const struct minimode est3_modes[] = {
{ 1600, 1200, 75, 0 },
{ 1600, 1200, 85, 0 },
{ 1792, 1344, 60, 0 },
- { 1792, 1344, 85, 0 },
+ { 1792, 1344, 75, 0 },
{ 1856, 1392, 60, 0 },
{ 1856, 1392, 75, 0 },
{ 1920, 1200, 60, 1 },
@@ -1264,6 +1273,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return duplicate edid or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+ return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
/*** EDID parsing ***/
/**
@@ -1308,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
}
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
/**
* edid_fixup_preferred - set preferred modes based on quirk list
@@ -1323,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
{
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
+ int cur_vrefresh, preferred_vrefresh;
if (list_empty(&connector->probed_modes))
return;
@@ -1345,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
+ cur_vrefresh = cur_mode->vrefresh ?
+ cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
+ preferred_vrefresh = preferred_mode->vrefresh ?
+ preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
- MODE_REFRESH_DIFF(cur_mode, target_refresh) <
- MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+ MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
preferred_mode = cur_mode;
}
}
@@ -2068,7 +2094,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
u8 *est = ((u8 *)timing) + 5;
for (i = 0; i < 6; i++) {
- for (j = 7; j > 0; j--) {
+ for (j = 7; j >= 0; j--) {
m = (i * 8) + (7 - j);
if (m >= ARRAY_SIZE(est3_modes))
break;
@@ -2404,7 +2430,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks(to_match, cea_mode))
+ drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1;
}
return 0;
@@ -2453,7 +2479,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks(to_match, hdmi_mode))
+ drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1;
}
return 0;
@@ -2507,6 +2533,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
if (!newmode)
continue;
+ /* Carry over the stereo flags */
+ newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
/*
* The current mode could be either variant. Make
* sure to pick the "other" clock for the new mode.
@@ -2553,20 +2582,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
return modes;
}
+struct stereo_mandatory_mode {
+ int width, height, vrefresh;
+ unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+ { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+ { 1920, 1080, 50,
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+ { 1920, 1080, 60,
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+ { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+ { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+ const struct stereo_mandatory_mode *stereo_mode)
+{
+ unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ return mode->hdisplay == stereo_mode->width &&
+ mode->vdisplay == stereo_mode->height &&
+ interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ const struct drm_display_mode *mode;
+ struct list_head stereo_modes;
+ int modes = 0, i;
+
+ INIT_LIST_HEAD(&stereo_modes);
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+ const struct stereo_mandatory_mode *mandatory;
+ struct drm_display_mode *new_mode;
+
+ if (!stereo_match_mandatory(mode,
+ &stereo_mandatory_modes[i]))
+ continue;
+
+ mandatory = &stereo_mandatory_modes[i];
+ new_mode = drm_mode_duplicate(dev, mode);
+ if (!new_mode)
+ continue;
+
+ new_mode->flags |= mandatory->flags;
+ list_add_tail(&new_mode->head, &stereo_modes);
+ modes++;
+ }
+ }
+
+ list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+ return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *newmode;
+
+ vic--; /* VICs start at 1 */
+ if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+ DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+ return 0;
+ }
+
+ newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+ if (!newmode)
+ return 0;
+
+ drm_mode_probed_add(connector, newmode);
+
+ return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+ const u8 *video_db, u8 video_len, u8 video_index)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *newmode;
+ int modes = 0;
+ u8 cea_mode;
+
+ if (video_db == NULL || video_index > video_len)
+ return 0;
+
+ /* CEA modes are numbered 1..127 */
+ cea_mode = (video_db[video_index] & 127) - 1;
+ if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+ return 0;
+
+ if (structure & (1 << 0)) {
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ if (structure & (1 << 6)) {
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ if (structure & (1 << 8)) {
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ return modes;
+}
+
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
- * Parses the HDMI VSDB looking for modes to add to @connector.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
*/
static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+ const u8 *video_db, u8 video_len)
{
- struct drm_device *dev = connector->dev;
- int modes = 0, offset = 0, i;
- u8 vic_len;
+ int modes = 0, offset = 0, i, multi_present = 0;
+ u8 vic_len, hdmi_3d_len = 0;
+ u16 mask;
+ u16 structure_all;
if (len < 8)
goto out;
@@ -2585,30 +2745,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
- offset += 2;
- if (len < (8 + offset))
+ if (len < (8 + offset + 2))
goto out;
+ /* 3D_Present */
+ offset++;
+ if (db[8 + offset] & (1 << 7)) {
+ modes += add_hdmi_mandatory_stereo_modes(connector);
+
+ /* 3D_Multi_present */
+ multi_present = (db[8 + offset] & 0x60) >> 5;
+ }
+
+ offset++;
vic_len = db[8 + offset] >> 5;
+ hdmi_3d_len = db[8 + offset] & 0x1f;
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
- struct drm_display_mode *newmode;
u8 vic;
vic = db[9 + offset + i];
+ modes += add_hdmi_mode(connector, vic);
+ }
+ offset += 1 + vic_len;
- vic--; /* VICs start at 1 */
- if (vic >= ARRAY_SIZE(edid_4k_modes)) {
- DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
- continue;
- }
+ if (!(multi_present == 1 || multi_present == 2))
+ goto out;
- newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
- if (!newmode)
- continue;
+ if ((multi_present == 1 && len < (9 + offset)) ||
+ (multi_present == 2 && len < (11 + offset)))
+ goto out;
- drm_mode_probed_add(connector, newmode);
- modes++;
+ if ((multi_present == 1 && hdmi_3d_len < 2) ||
+ (multi_present == 2 && hdmi_3d_len < 4))
+ goto out;
+
+ /* 3D_Structure_ALL */
+ structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+ /* check if 3D_MASK is present */
+ if (multi_present == 2)
+ mask = (db[10 + offset] << 8) | db[11 + offset];
+ else
+ mask = 0xffff;
+
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i))
+ modes += add_3d_struct_modes(connector,
+ structure_all,
+ video_db,
+ video_len, i);
}
out:
@@ -2668,8 +2854,8 @@ static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
const u8 *cea = drm_find_cea_extension(edid);
- const u8 *db;
- u8 dbl;
+ const u8 *db, *hdmi = NULL, *video = NULL;
+ u8 dbl, hdmi_len, video_len = 0;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2868,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
db = &cea[i];
dbl = cea_db_payload_len(db);
- if (cea_db_tag(db) == VIDEO_BLOCK)
- modes += do_cea_modes(connector, db + 1, dbl);
- else if (cea_db_is_hdmi_vsdb(db))
- modes += do_hdmi_vsdb_modes(connector, db, dbl);
+ if (cea_db_tag(db) == VIDEO_BLOCK) {
+ video = db + 1;
+ video_len = dbl;
+ modes += do_cea_modes(connector, video, dbl);
+ }
+ else if (cea_db_is_hdmi_vsdb(db)) {
+ hdmi = db;
+ hdmi_len = dbl;
+ }
}
}
+ /*
+ * We parse the HDMI VSDB after having added the cea modes as we will
+ * be patching their flags when the sink supports stereo 3D.
+ */
+ if (hdmi)
+ modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+ video_len);
+
return modes;
}
@@ -3288,6 +3487,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (drm_mode_width(mode) == hpref &&
+ drm_mode_height(mode) == vpref)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@@ -3321,6 +3533,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+ u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ switch (layout) {
+ case DRM_MODE_FLAG_3D_FRAME_PACKING:
+ return HDMI_3D_STRUCTURE_FRAME_PACKING;
+ case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+ return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+ case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+ return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+ case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+ case DRM_MODE_FLAG_3D_L_DEPTH:
+ return HDMI_3D_STRUCTURE_L_DEPTH;
+ case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+ return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+ case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+ return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+ case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+ default:
+ return HDMI_3D_STRUCTURE_INVALID;
+ }
+}
+
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
@@ -3338,20 +3577,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode)
{
int err;
+ u32 s3d_flags;
u8 vic;
if (!frame || !mode)
return -EINVAL;
vic = drm_match_hdmi_mode(mode);
- if (!vic)
+ s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ if (!vic && !s3d_flags)
+ return -EINVAL;
+
+ if (vic && s3d_flags)
return -EINVAL;
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
- frame->vic = vic;
+ if (vic)
+ frame->vic = vic;
+ else
+ frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42bbfb7..9081172ef05 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
#define GENERIC_EDIDS 5
-static char *generic_edid_name[GENERIC_EDIDS] = {
+static const char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1920x1080.bin",
};
-static u8 generic_edid[GENERIC_EDIDS][128] = {
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
},
};
+static int edid_size(const u8 *edid, int data_size)
+{
+ if (data_size < EDID_LENGTH)
+ return 0;
+
+ return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
static u8 *edid_load(struct drm_connector *connector, const char *name,
const char *connector_name)
{
- const struct firmware *fw;
- struct platform_device *pdev;
- u8 *fwdata = NULL, *edid, *new_edid;
- int fwsize, expected;
- int builtin = 0, err = 0;
+ const struct firmware *fw = NULL;
+ const u8 *fwdata;
+ u8 *edid;
+ int fwsize, builtin;
int i, valid_extensions = 0;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
- pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
- if (IS_ERR(pdev)) {
- DRM_ERROR("Failed to register EDID firmware platform device "
- "for connector \"%s\"\n", connector_name);
- err = -EINVAL;
- goto out;
- }
-
- err = request_firmware(&fw, name, &pdev->dev);
- platform_device_unregister(pdev);
-
- if (err) {
- i = 0;
- while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
- i++;
- if (i < GENERIC_EDIDS) {
- err = 0;
- builtin = 1;
+ builtin = 0;
+ for (i = 0; i < GENERIC_EDIDS; i++) {
+ if (strcmp(name, generic_edid_name[i]) == 0) {
fwdata = generic_edid[i];
fwsize = sizeof(generic_edid[i]);
+ builtin = 1;
+ break;
}
}
+ if (!builtin) {
+ struct platform_device *pdev;
+ int err;
- if (err) {
- DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
- name, err);
- goto out;
- }
+ pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ DRM_ERROR("Failed to register EDID firmware platform device "
+ "for connector \"%s\"\n", connector_name);
+ return ERR_CAST(pdev);
+ }
+
+ err = request_firmware(&fw, name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+ name, err);
+ return ERR_PTR(err);
+ }
- if (fwdata == NULL) {
- fwdata = (u8 *) fw->data;
+ fwdata = fw->data;
fwsize = fw->size;
}
- expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
- if (expected != fwsize) {
+ if (edid_size(fwdata, fwsize) != fwsize) {
DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
- "(expected %d, got %d)\n", name, expected, (int) fwsize);
- err = -EINVAL;
- goto relfw_out;
+ "(expected %d, got %d\n", name,
+ edid_size(fwdata, fwsize), (int)fwsize);
+ edid = ERR_PTR(-EINVAL);
+ goto out;
}
edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
if (edid == NULL) {
- err = -ENOMEM;
- goto relfw_out;
+ edid = ERR_PTR(-ENOMEM);
+ goto out;
}
if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
- err = -EINVAL;
- goto relfw_out;
+ edid = ERR_PTR(-EINVAL);
+ goto out;
}
for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
}
if (valid_extensions != edid[0x7e]) {
+ u8 *new_edid;
+
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
DRM_INFO("Found %d valid extensions instead of %d in EDID data "
"\"%s\" for connector \"%s\"\n", valid_extensions,
edid[0x7e], name, connector_name);
edid[0x7e] = valid_extensions;
+
new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
- GFP_KERNEL);
- if (new_edid == NULL) {
- err = -ENOMEM;
- kfree(edid);
- goto relfw_out;
- }
- edid = new_edid;
+ GFP_KERNEL);
+ if (new_edid)
+ edid = new_edid;
}
DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
name, connector_name);
-relfw_out:
- release_firmware(fw);
-
out:
- if (err)
- return ERR_PTR(err);
-
+ if (fw)
+ release_firmware(fw);
return edid;
}
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 0cfb60f5476..d18b88b755c 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
goto fail;
}
- if (!client->driver) {
+ if (!client->dev.driver) {
err = -ENODEV;
goto fail_unregister;
}
- module = client->driver->driver.owner;
+ module = client->dev.driver->owner;
if (!try_module_get(module)) {
err = -ENODEV;
goto fail_unregister;
@@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
encoder->bus_priv = client;
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+ encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
@@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
+ struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
encoder->bus_priv = NULL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257..0a19401aff8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -39,10 +39,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
static LIST_HEAD(kernel_fb_helper_list);
/**
@@ -844,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
- struct drm_crtc *crtc;
int ret = 0;
int i;
@@ -855,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
}
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *best_crtc;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
connector = fb_helper_conn->connector;
best_crtcs[n] = NULL;
- best_crtc = NULL;
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
@@ -1413,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
- best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
- int count = 0;
- u32 max_width, max_height, bpp_sel;
+ u32 max_width, max_height;
if (!fb_helper->fb)
return 0;
@@ -1596,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
max_width = fb_helper->fb->width;
max_height = fb_helper->fb->height;
- bpp_sel = fb_helper->fb->bits_per_pixel;
- count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
- max_height);
+ drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index e788882d902..f9c7fa3d001 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -34,7 +34,7 @@
*/
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
{
- if (kfifo_put(&work->fifo, (const void **)&val)) {
+ if (kfifo_put(&work->fifo, val)) {
atomic_inc(&work->pending);
} else {
DRM_ERROR("%s fifo full!\n", work->name);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3f84277d703..c5b929c3f77 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(inode, filp, dev);
if (retcode)
goto err_undo;
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
if (need_setup) {
retcode = drm_setup(dev);
if (retcode)
@@ -148,7 +147,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int minor_id = iminor(inode);
int err = -ENODEV;
- const struct file_operations *old_fops;
+ const struct file_operations *new_fops;
DRM_DEBUG("\n");
@@ -163,18 +162,13 @@ int drm_stub_open(struct inode *inode, struct file *filp)
if (drm_device_is_unplugged(dev))
goto out;
- old_fops = filp->f_op;
- filp->f_op = fops_get(dev->driver->fops);
- if (filp->f_op == NULL) {
- filp->f_op = old_fops;
+ new_fops = fops_get(dev->driver->fops);
+ if (!new_fops)
goto out;
- }
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
+ replace_fops(filp, new_fops);
+ if (filp->f_op->open)
+ err = filp->f_op->open(inode, filp);
out:
mutex_unlock(&drm_global_mutex);
return err;
@@ -240,7 +234,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
+ priv->always_authenticated = capable(CAP_SYS_ADMIN);
+ priv->authenticated = priv->always_authenticated;
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
@@ -379,13 +374,80 @@ static void drm_events_release(struct drm_file *file_priv)
}
/* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+ list_del(&e->link);
e->destroy(e);
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+ struct drm_vma_entry *vma, *vma_temp;
+
+ DRM_DEBUG("\n");
+
+ if (dev->driver->lastclose)
+ dev->driver->lastclose(dev);
+ DRM_DEBUG("driver lastclose completed\n");
+
+ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+
+ /* Clear vma list (only built for debugging) */
+ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+ list_del(&vma->head);
+ kfree(vma);
+ }
+
+ drm_legacy_dma_takedown(dev);
+
+ dev->dev_mapping = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_legacy_dev_reinit(dev);
+
+ DRM_DEBUG("lastclose completed\n");
+ return 0;
+}
+
+/**
* Release file.
*
* \param inode device inode
@@ -454,7 +516,6 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&pos->head);
kfree(pos);
- --dev->ctx_count;
}
}
}
@@ -468,7 +529,7 @@ int drm_release(struct inode *inode, struct file *filp)
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
- temp->authenticated = 0;
+ temp->authenticated = temp->always_authenticated;
}
/**
@@ -516,7 +577,6 @@ int drm_release(struct inode *inode, struct file *filp)
* End inline drm_release
*/
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc972..4761adedad2 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
- struct drm_gem_object *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- goto free;
-
- if (drm_gem_object_init(dev, obj, size) != 0)
- goto free;
-
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
- goto fput;
- }
- return obj;
-fput:
- /* Object_init mangles the global counters - readjust them. */
- fput(obj->filp);
-free:
- kfree(obj);
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f7311162a61..3d2e91c4d78 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
struct drm_global_item *item = &glob[ref->global_type];
- void *object;
mutex_lock(&item->mutex);
if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
}
++item->refcount;
ref->object = item->object;
- object = item->object;
mutex_unlock(&item->mutex);
return 0;
out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 53298320080..7d5a152eeb0 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
+ crtc, atomic_read(&dev->vblank[crtc].refcount));
seq_printf(m, "CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
+ crtc, dev->vblank[crtc].last_wait);
seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
+ crtc, dev->vblank[crtc].inmodeset);
}
mutex_unlock(&dev->struct_mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2855a..dffc836144c 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_set_client_cap *req = data;
+
+ switch (req->capability) {
+ case DRM_CLIENT_CAP_STEREO_3D:
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->stereo_allowed = req->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* Setversion ioctl.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a32f0..64c34d5876f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) ( \
- (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
- ((count) % DRM_VBLANKTIME_RBSIZE)])
+#define vblanktimestamp(dev, crtc, count) \
+ ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
*/
static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
{
- memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
- DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+ memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
}
/*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
dev->driver->disable_vblank(dev, crtc);
- dev->vblank_enabled[crtc] = 0;
+ dev->vblank[crtc].enabled = false;
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* delayed gpu counter increment.
*/
do {
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+ } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
if (!count)
vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
*/
- vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ vblcount = atomic_read(&dev->vblank[crtc].count);
diff_ns = timeval_to_ns(&tvblank) -
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hope for the best.
*/
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
- atomic_inc(&dev->_vblank_count[crtc]);
+ atomic_inc(&dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
}
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- dev->vblank_enabled[i]) {
+ if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+ dev->vblank[i].enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
vblank_disable_and_save(dev, i);
}
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
vblank_disable_fn((unsigned long)dev);
- kfree(dev->vbl_queue);
- kfree(dev->_vblank_count);
- kfree(dev->vblank_refcount);
- kfree(dev->vblank_enabled);
- kfree(dev->last_vblank);
- kfree(dev->last_vblank_wait);
- kfree(dev->vblank_inmodeset);
- kfree(dev->_vblank_time);
+ kfree(dev->vblank);
dev->num_crtcs = 0;
}
@@ -221,42 +212,14 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
dev->num_crtcs = num_crtcs;
- dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vbl_queue)
+ dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
goto err;
- dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
- if (!dev->_vblank_count)
- goto err;
-
- dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vblank_refcount)
- goto err;
-
- dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_enabled)
- goto err;
+ for (i = 0; i < num_crtcs; i++)
+ init_waitqueue_head(&dev->vblank[i].queue);
- dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank)
- goto err;
-
- dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank_wait)
- goto err;
-
- dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_inmodeset)
- goto err;
-
- dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
- sizeof(struct timeval), GFP_KERNEL);
- if (!dev->_vblank_time)
- goto err;
-
- DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+ DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
/* Driver specific high-precision vblank timestamping supported? */
if (dev->driver->get_vblank_timestamp)
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
else
DRM_INFO("No driver support for vblank timestamp query.\n");
- /* Zero per-crtc vblank stuff */
- for (i = 0; i < num_crtcs; i++) {
- init_waitqueue_head(&dev->vbl_queue[i]);
- atomic_set(&dev->_vblank_count[i], 0);
- atomic_set(&dev->vblank_refcount[i], 0);
- }
+ dev->vblank_disable_allowed = false;
- dev->vblank_disable_allowed = 0;
return 0;
err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
- dev->irq_enabled = 1;
+ dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
- int irq_enabled, i;
+ bool irq_enabled;
+ int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
/*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] =
+ DRM_WAKEUP(&dev->vblank[i].queue);
+ dev->vblank[i].enabled = false;
+ dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -497,8 +455,8 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
- /* Fields of interlaced scanout modes are only halve a frame duration.
- * Double the dotclock to get halve the frame-/line-/pixelduration.
+ /* Fields of interlaced scanout modes are only half a frame duration.
+ * Double the dotclock to get half the frame-/line-/pixelduration.
*/
if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
dotclock *= 2;
@@ -628,24 +586,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* code gets preempted or delayed for some reason.
*/
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
- /* Disable preemption to make it very likely to
- * succeed in the first iteration even on PREEMPT_RT kernel.
+ /*
+ * Get vertical and horizontal scanout position vpos, hpos,
+ * and bounding timestamps stime, etime, pre/post query.
*/
- preempt_disable();
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+ &hpos, &stime, &etime);
- /* Get system timestamp before query. */
- stime = ktime_get();
-
- /* Get vertical and horizontal scanout pos. vpos, hpos. */
- vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
-
- /* Get system timestamp after query. */
- etime = ktime_get();
+ /*
+ * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
+ * CLOCK_REALTIME is requested.
+ */
if (!drm_timestamp_monotonic)
mono_time_offset = ktime_get_monotonic_offset();
- preempt_enable();
-
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -653,6 +607,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
+ /* Compute uncertainty in timestamp of scanout position query. */
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
@@ -795,7 +750,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
- return atomic_read(&dev->_vblank_count[crtc]);
+ return atomic_read(&dev->vblank[crtc].count);
}
EXPORT_SYMBOL(drm_vblank_count);
@@ -824,10 +779,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
* a seqlock.
*/
do {
- cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ cur_vblank = atomic_read(&dev->vblank[crtc].count);
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
smp_rmb();
- } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+ } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
return cur_vblank;
}
@@ -914,12 +869,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
/* Deal with counter wrap */
- diff = cur_vblank - dev->last_vblank[crtc];
- if (cur_vblank < dev->last_vblank[crtc]) {
+ diff = cur_vblank - dev->vblank[crtc].last;
+ if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +885,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* reinitialize delayed at next vblank interrupt in that case.
*/
if (rc) {
- tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ tslot = atomic_read(&dev->vblank[crtc].count) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic_inc();
- atomic_add(diff, &dev->_vblank_count[crtc]);
+ atomic_add(diff, &dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
}
@@ -957,9 +912,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
- if (!dev->vblank_enabled[crtc]) {
+ if (!dev->vblank[crtc].enabled) {
/* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
@@ -970,16 +925,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
crtc, ret);
if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
+ atomic_dec(&dev->vblank[crtc].refcount);
else {
- dev->vblank_enabled[crtc] = 1;
+ dev->vblank[crtc].enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
} else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
+ if (!dev->vblank[crtc].enabled) {
+ atomic_dec(&dev->vblank[crtc].refcount);
ret = -EINVAL;
}
}
@@ -999,10 +954,10 @@ EXPORT_SYMBOL(drm_vblank_get);
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+ BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank_disable_timer,
jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +980,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ DRM_WAKEUP(&dev->vblank[crtc].queue);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1022,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 0x1;
+ if (!dev->vblank[crtc].inmodeset) {
+ dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank_inmodeset[crtc] |= 0x2;
+ dev->vblank[crtc].inmodeset |= 0x2;
}
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1038,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
if (!dev->num_crtcs)
return;
- if (dev->vblank_inmodeset[crtc]) {
+ if (dev->vblank[crtc].inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
+ dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- if (dev->vblank_inmodeset[crtc] & 0x2)
+ if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
- dev->vblank_inmodeset[crtc] = 0;
+ dev->vblank[crtc].inmodeset = 0;
}
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1243,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
- dev->last_vblank_wait[crtc] = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+ dev->vblank[crtc].last_wait = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled));
@@ -1367,7 +1322,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/* Vblank irq handling disabled. Nothing to do. */
- if (!dev->vblank_enabled[crtc]) {
+ if (!dev->vblank[crtc].enabled) {
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return false;
}
@@ -1377,7 +1332,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
*/
/* Get current timestamp and count. */
- vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ vblcount = atomic_read(&dev->vblank[crtc].count);
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1356,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* the timestamp computed above.
*/
smp_mb__before_atomic_inc();
- atomic_inc(&dev->_vblank_count[crtc]);
+ atomic_inc(&dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
}
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ DRM_WAKEUP(&dev->vblank[crtc].queue);
drm_handle_vblank_events(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96d609..f6452682141 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
if (drm_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb62b75..85071a1c454 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
*
* LOCKING:
* None.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ * interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ * buffers containing two eyes (only adjust the timings when needed, eg. for
+ * "frame packing" or "side by side full").
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
+ p->crtc_clock = p->clock;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vtotal *= p->vscan;
}
+ if (adjust_flags & CRTC_STEREO_DOUBLE) {
+ unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+ switch (layout) {
+ case DRM_MODE_FLAG_3D_FRAME_PACKING:
+ p->crtc_clock *= 2;
+ p->crtc_vdisplay += p->crtc_vtotal;
+ p->crtc_vsync_start += p->crtc_vtotal;
+ p->crtc_vsync_end += p->crtc_vtotal;
+ p->crtc_vtotal += p->crtc_vtotal;
+ break;
+ }
+ }
+
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock)
return false;
- return drm_mode_equal_no_clocks(mode1, mode2);
+ if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+ (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+ return false;
+
+ return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
/**
- * drm_mode_equal_no_clocks - test modes for equality
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
* None.
*
* Check to see if @mode1 and @mode2 are equivalent, but
- * don't check the pixel clocks.
+ * don't check the pixel clocks nor the stereo layout.
*
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
{
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
- mode1->flags == mode2->flags)
+ (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+ (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
return false;
}
-EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1014,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type |= pmode->type;
+ mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee6eee..02679793c9e 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -80,7 +80,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
+ SetPageReserved(virt_to_page((void *)addr));
}
return dmah;
@@ -103,7 +103,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
+ ClearPageReserved(virt_to_page((void *)addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
DRM_DEBUG("\n");
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = drm_dev_alloc(driver, &pdev->dev);
if (!dev)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret)
- goto err_g1;
+ goto err_free;
dev->pdev = pdev;
- dev->dev = &pdev->dev;
-
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
- mutex_lock(&drm_global_mutex);
-
- if ((ret = drm_fill_in_dev(dev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_g21;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_pci;
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
- mutex_unlock(&drm_global_mutex);
return 0;
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (dev->render)
- drm_put_minor(&dev->render);
-err_g21:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
+err_pci:
pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
+err_free:
+ drm_dev_free(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6ba4c..fc24fee8ec8 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
DRM_DEBUG("\n");
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = drm_dev_alloc(driver, &platdev->dev);
if (!dev)
return -ENOMEM;
dev->platformdev = platdev;
- dev->dev = &platdev->dev;
- mutex_lock(&drm_global_mutex);
-
- ret = drm_fill_in_dev(dev, NULL, driver);
-
- if (ret) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g1;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g1;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_g11;
- }
-
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ ret = drm_dev_register(dev, 0);
if (ret)
- goto err_g2;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, 0);
- if (ret)
- goto err_g3;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g3;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- mutex_unlock(&drm_global_mutex);
+ goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
return 0;
-err_g3:
- drm_put_minor(&dev->primary);
-err_g2:
- if (dev->render)
- drm_put_minor(&dev->render);
-err_g11:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
+err_free:
+ drm_dev_free(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470f7b3..56805c39c90 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len, offset;
+ u32 len;
int pg_index;
dma_addr_t addr;
pg_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
len = sg->length;
- offset = sg->offset;
page = sg_page(sg);
addr = sg_dma_address(sg);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d864576be..f53d5246979 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,81 +254,21 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int drm_fill_in_dev(struct drm_device *dev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- int retcode;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-
- if (drm_ht_create(&dev->map_hash, 12)) {
- return -ENOMEM;
- }
-
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- dev->driver = driver;
-
- if (dev->driver->bus->agp_init) {
- retcode = dev->driver->bus->agp_init(dev);
- if (retcode)
- goto error_out_unreg;
- }
-
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error_out_unreg;
- }
- }
-
- return 0;
-
- error_out_unreg:
- drm_lastclose(dev);
- return retcode;
-}
-EXPORT_SYMBOL(drm_fill_in_dev);
-
-
/**
- * Get a secondary minor number.
+ * drm_get_minor - Allocate and register new DRM minor
+ * @dev: DRM device
+ * @minor: Pointer to where new minor is stored
+ * @type: Type of minor
*
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
+ * Allocate a new minor of the given type and register it. A pointer to the new
+ * minor is returned in @minor.
+ * Caller must hold the global DRM mutex.
*
- * Search an empty entry and initialize it to the given parameters. This
- * routines assigns minor numbers to secondary heads of multi-headed cards
+ * RETURNS:
+ * 0 on success, negative error code on failure.
*/
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
+ int type)
{
struct drm_minor *new_minor;
int ret;
@@ -385,37 +325,48 @@ err_idr:
*minor = NULL;
return ret;
}
-EXPORT_SYMBOL(drm_get_minor);
/**
- * Put a secondary minor number.
+ * drm_unplug_minor - Unplug DRM minor
+ * @minor: Minor to unplug
*
- * \param sec_minor - structure to be released
- * \return always zero
+ * Unplugs the given DRM minor but keeps the object. So after this returns,
+ * minor->dev is still valid so existing open-files can still access it to get
+ * device information from their drm_file ojects.
+ * If the minor is already unplugged or if @minor is NULL, nothing is done.
+ * The global DRM mutex must be held by the caller.
*/
-int drm_put_minor(struct drm_minor **minor_p)
+static void drm_unplug_minor(struct drm_minor *minor)
{
- struct drm_minor *minor = *minor_p;
-
- DRM_DEBUG("release secondary minor %d\n", minor->index);
+ if (!minor || !minor->kdev)
+ return;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(minor);
#endif
drm_sysfs_device_remove(minor);
-
idr_remove(&drm_minors_idr, minor->index);
-
- kfree(minor);
- *minor_p = NULL;
- return 0;
}
-EXPORT_SYMBOL(drm_put_minor);
-static void drm_unplug_minor(struct drm_minor *minor)
+/**
+ * drm_put_minor - Destroy DRM minor
+ * @minor: Minor to destroy
+ *
+ * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
+ * is done if @minor is NULL. It is fine to call this on already unplugged
+ * minors.
+ * The global DRM mutex must be held by the caller.
+ */
+static void drm_put_minor(struct drm_minor *minor)
{
- drm_sysfs_device_remove(minor);
+ if (!minor)
+ return;
+
+ DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+ drm_unplug_minor(minor);
+ kfree(minor);
}
/**
@@ -427,66 +378,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
*/
void drm_put_dev(struct drm_device *dev)
{
- struct drm_driver *driver;
- struct drm_map_list *r_list, *list_temp;
-
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
- driver = dev->driver;
- drm_lastclose(dev);
+ drm_dev_unregister(dev);
+ drm_dev_free(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
- if (dev->driver->unload)
- dev->driver->unload(dev);
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_unplug_minor(dev->control);
+ if (dev->render)
+ drm_unplug_minor(dev->render);
+ drm_unplug_minor(dev->primary);
- if (dev->driver->bus->agp_destroy)
- dev->driver->bus->agp_destroy(dev);
+ mutex_lock(&drm_global_mutex);
- drm_vblank_cleanup(dev);
+ drm_device_set_unplugged(dev);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
- drm_ht_remove(&dev->map_hash);
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
- drm_ctxbitmap_cleanup(dev);
+/**
+ * drm_dev_alloc - Allocate new drm device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
+{
+ struct drm_device *dev;
+ int ret;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
- if (dev->render)
- drm_put_minor(&dev->render);
+ dev->dev = parent;
+ dev->driver = driver;
- if (driver->driver_features & DRIVER_GEM)
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+
+ spin_lock_init(&dev->count_lock);
+ spin_lock_init(&dev->event_lock);
+ mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->ctxlist_mutex);
+
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_free;
+
+ ret = drm_ctxbitmap_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto err_ht;
+ }
+
+ if (driver->driver_features & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto err_ctxbitmap;
+ }
+ }
+
+ return dev;
+
+err_ctxbitmap:
+ drm_ctxbitmap_cleanup(dev);
+err_ht:
+ drm_ht_remove(&dev->map_hash);
+err_free:
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+/**
+ * drm_dev_free - Free DRM device
+ * @dev: DRM device to free
+ *
+ * Free a DRM device that has previously been allocated via drm_dev_alloc().
+ * You must not use kfree() instead or you will leak memory.
+ *
+ * This must not be called once the device got registered. Use drm_put_dev()
+ * instead, which then calls drm_dev_free().
+ */
+void drm_dev_free(struct drm_device *dev)
+{
+ drm_put_minor(dev->control);
+ drm_put_minor(dev->render);
+ drm_put_minor(dev->primary);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
- drm_put_minor(&dev->primary);
+ drm_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
- list_del(&dev->driver_item);
kfree(dev->devname);
kfree(dev);
}
-EXPORT_SYMBOL(drm_put_dev);
+EXPORT_SYMBOL(drm_dev_free);
-void drm_unplug_dev(struct drm_device *dev)
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
- /* for a USB device */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_unplug_minor(dev->control);
- if (dev->render)
- drm_unplug_minor(dev->render);
- drm_unplug_minor(dev->primary);
+ int ret;
mutex_lock(&drm_global_mutex);
- drm_device_set_unplugged(dev);
+ if (dev->driver->bus->agp_init) {
+ ret = dev->driver->bus->agp_init(dev);
+ if (ret)
+ goto out_unlock;
+ }
- if (dev->open_count == 0) {
- drm_put_dev(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_agp;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_control_node;
+ }
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_render_node;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, flags);
+ if (ret)
+ goto err_primary_node;
}
+
+ /* setup grouping for legacy outputs */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_unload;
+ }
+
+ list_add_tail(&dev->driver_item, &dev->driver->device_list);
+
+ ret = 0;
+ goto out_unlock;
+
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+err_primary_node:
+ drm_put_minor(dev->primary);
+err_render_node:
+ drm_put_minor(dev->render);
+err_control_node:
+ drm_put_minor(dev->control);
+err_agp:
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
+out_unlock:
mutex_unlock(&drm_global_mutex);
+ return ret;
}
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_free() to free all resources.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ drm_lastclose(dev);
+
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
+
+ drm_vblank_cleanup(dev);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_rmmap(dev, r_list->map);
+
+ drm_unplug_minor(dev->control);
+ drm_unplug_minor(dev->render);
+ drm_unplug_minor(dev->primary);
+
+ list_del(&dev->driver_item);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b7383..bd2bca39579 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
#include <drm/drm_core.h>
#include <drm/drmP.h>
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
drm_class = NULL;
}
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff. But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
- memset(dev, 0, sizeof(struct device));
- return;
-}
-
/*
* Connector properties
*/
@@ -380,11 +366,6 @@ static struct bin_attribute edid_attr = {
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
- *
- * Note:
- * This routine should only be called *once* for each registered connector.
- * A second call for an already registered connector will trigger the BUG_ON
- * below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
{
@@ -394,29 +375,25 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
int i;
int ret;
- /* We shouldn't get called more than once for the same connector */
- BUG_ON(device_is_registered(&connector->kdev));
-
- connector->kdev.parent = &dev->primary->kdev;
- connector->kdev.class = drm_class;
- connector->kdev.release = drm_sysfs_device_release;
+ if (connector->kdev)
+ return 0;
+ connector->kdev = device_create(drm_class, dev->primary->kdev,
+ 0, connector, "card%d-%s",
+ dev->primary->index, drm_get_connector_name(connector));
DRM_DEBUG("adding \"%s\" to sysfs\n",
drm_get_connector_name(connector));
- dev_set_name(&connector->kdev, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
- ret = device_register(&connector->kdev);
-
- if (ret) {
- DRM_ERROR("failed to register connector device: %d\n", ret);
+ if (IS_ERR(connector->kdev)) {
+ DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+ ret = PTR_ERR(connector->kdev);
goto out;
}
/* Standard attributes */
for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+ ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
@@ -433,7 +410,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+ ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
@@ -442,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
break;
}
- ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+ ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
if (ret)
goto err_out_files;
@@ -453,10 +430,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
err_out_files:
for (i = 0; i < opt_cnt; i++)
- device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+ device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
for (i = 0; i < attr_cnt; i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- device_unregister(&connector->kdev);
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+ device_unregister(connector->kdev);
out:
return ret;
@@ -480,16 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
- if (!connector->kdev.parent)
+ if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
drm_get_connector_name(connector));
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
- device_unregister(&connector->kdev);
- connector->kdev.parent = NULL;
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+ sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+ device_unregister(connector->kdev);
+ connector->kdev = NULL;
}
EXPORT_SYMBOL(drm_sysfs_connector_remove);
@@ -508,10 +485,15 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
DRM_DEBUG("generating hotplug event\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@@ -523,15 +505,9 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
*/
int drm_sysfs_device_add(struct drm_minor *minor)
{
- int err;
char *minor_str;
+ int r;
- minor->kdev.parent = minor->dev->dev;
-
- minor->kdev.class = drm_class;
- minor->kdev.release = drm_sysfs_device_release;
- minor->kdev.devt = minor->device;
- minor->kdev.type = &drm_sysfs_device_minor;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
- dev_set_name(&minor->kdev, minor_str, minor->index);
-
- err = device_register(&minor->kdev);
- if (err) {
- DRM_ERROR("device add failed: %d\n", err);
- goto err_out;
+ minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
+ if (!minor->dev) {
+ r = -ENOMEM;
+ goto error;
}
+ device_initialize(minor->kdev);
+ minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ minor->kdev->class = drm_class;
+ minor->kdev->type = &drm_sysfs_device_minor;
+ minor->kdev->parent = minor->dev->dev;
+ minor->kdev->release = drm_sysfs_release;
+ dev_set_drvdata(minor->kdev, minor);
+
+ r = dev_set_name(minor->kdev, minor_str, minor->index);
+ if (r < 0)
+ goto error;
+
+ r = device_add(minor->kdev);
+ if (r < 0)
+ goto error;
+
return 0;
-err_out:
- return err;
+error:
+ DRM_ERROR("device create failed %d\n", r);
+ put_device(minor->kdev);
+ return r;
}
/**
@@ -562,9 +554,9 @@ err_out:
*/
void drm_sysfs_device_remove(struct drm_minor *minor)
{
- if (minor->kdev.parent)
- device_unregister(&minor->kdev);
- minor->kdev.parent = NULL;
+ if (minor->kdev)
+ device_unregister(minor->kdev);
+ minor->kdev = NULL;
}
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 87664723b9c..b179b70e785 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
struct drm_driver *driver)
{
struct drm_device *dev;
- struct usb_device *usbdev;
int ret;
DRM_DEBUG("\n");
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = drm_dev_alloc(driver, &interface->dev);
if (!dev)
return -ENOMEM;
- usbdev = interface_to_usbdev(interface);
- dev->usbdev = usbdev;
- dev->dev = &interface->dev;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_fill_in_dev(dev, NULL, driver);
- if (ret) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g1;
- }
-
+ dev->usbdev = interface_to_usbdev(interface);
usb_set_intfdata(interface, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g1;
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_g11;
- }
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ ret = drm_dev_register(dev, 0);
if (ret)
- goto err_g2;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, 0);
- if (ret)
- goto err_g3;
- }
-
- /* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g3;
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- mutex_unlock(&drm_global_mutex);
+ goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
return 0;
-err_g3:
- drm_put_minor(&dev->primary);
-err_g2:
- if (dev->render)
- drm_put_minor(&dev->render);
-err_g11:
- drm_put_minor(&dev->control);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
+err_free:
+ drm_dev_free(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index b5c5af7328d..93e95d7efd5 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -301,7 +301,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+ page = virt_to_page((void *)dma->pagelist[page_nr]);
get_page(page);
vmf->page = page;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 45b6ef59596..f227f544aa3 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@ config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef78ca8..b676006a95a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .gem_init_object = exynos_drm_gem_init_object,
.gem_free_object = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
@@ -286,7 +285,11 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d5299..23da72b5eae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
/*
* enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
+ * - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
- drm_dev->irq_enabled = 1;
+ drm_dev->irq_enabled = true;
/*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- drm_dev->vblank_disable_allowed = 1;
+ drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3271fd4b172..7bccedca487 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -383,6 +383,8 @@ out:
g2d_userptr->npages,
g2d_userptr->vma);
+ exynos_gem_put_vma(g2d_userptr->vma);
+
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd23275..1ade191d84f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
}
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
-{
- return 0;
-}
-
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555afdfe9..702ec3abe85 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
-/* initialize gem object. */
-int exynos_drm_gem_init_object(struct drm_gem_object *obj);
-
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330e444..ddaaedde173 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
{
struct vidi_context *ctx = get_vidi_context(dev);
struct edid *edid;
- int edid_len;
/*
* the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
return ERR_PTR(-EFAULT);
}
- edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
- edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ edid = drm_edid_duplicate(ctx->raw_edid);
if (!edid) {
DRM_DEBUG_KMS("failed to allocate edid\n");
return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
/*
* enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
+ * - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
- drm_dev->irq_enabled = 1;
+ drm_dev->irq_enabled = true;
/*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- drm_dev->vblank_disable_allowed = 1;
+ drm_dev->vblank_disable_allowed = true;
return 0;
}
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
- int edid_len;
if (!vidi) {
DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
}
- edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
- ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+ ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1f6e2dfaaea..508cf99a292 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -5,6 +5,7 @@ config DRM_GMA500
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 162f686c532..5a9a6a3063a 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -634,6 +634,7 @@ const struct psb_ops cdv_chip_ops = {
.crtcs = 2,
.hdmi_mask = (1 << 0) | (1 << 1),
.lvds_mask = (1 << 1),
+ .sdvo_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb43573ca..f88a1815d87 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = &connector->base.kdev;
+ intel_dp->adapter.dev.parent = connector->base.kdev;
if (is_edp(encoder))
cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 01dd7d22576..94b3fec22c2 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -714,7 +714,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
break;
case INTEL_OUTPUT_SDVO:
- crtc_mask = ((1 << 0) | (1 << 1));
+ crtc_mask = dev_priv->ops->sdvo_mask;
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c52d06..e2db48a81ed 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
#include <drm/drm_vma_manager.h>
#include "psb_drv.h"
-int psb_gem_init_object(struct drm_gem_object *obj)
-{
- return -EINVAL;
-}
-
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 62cd42e88f2..566d330aaee 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -51,6 +51,9 @@
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
+#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
+
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 20
@@ -71,7 +74,8 @@ struct intel_gpio {
void
gma_intel_i2c_reset(struct drm_device *dev)
{
- REG_WRITE(GMBUS0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ GMBUS_REG_WRITE(GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
@@ -98,11 +102,10 @@ static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gpio *gpio)
{
struct drm_psb_private *dev_priv = gpio->dev_priv;
- struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- reserved = REG_READ(gpio->reg) &
+ reserved = GMBUS_REG_READ(gpio->reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -113,29 +116,26 @@ static int get_clock(void *data)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
- struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
- REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- REG_WRITE(gpio->reg, reserved);
- return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ GMBUS_REG_WRITE(gpio->reg, reserved);
+ return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
- struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
- REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- REG_WRITE(gpio->reg, reserved);
- return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ GMBUS_REG_WRITE(gpio->reg, reserved);
+ return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
- struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
u32 clock_bits;
@@ -145,15 +145,14 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- REG_WRITE(gpio->reg, reserved | clock_bits);
- REG_READ(gpio->reg); /* Posting */
+ GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
+ GMBUS_REG_READ(gpio->reg); /* Posting */
}
static void set_data(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
- struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
u32 data_bits;
@@ -163,8 +162,8 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- REG_WRITE(gpio->reg, reserved | data_bits);
- REG_READ(gpio->reg);
+ GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
+ GMBUS_REG_READ(gpio->reg);
}
static struct i2c_adapter *
@@ -251,7 +250,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_psb_private *dev_priv = adapter->algo_data;
- struct drm_device *dev = dev_priv->dev;
int i, reg_offset;
if (bus->force_bit)
@@ -260,28 +258,30 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = 0;
- REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+ GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
u16 len = msgs[i].len;
u8 *buf = msgs[i].buf;
if (msgs[i].flags & I2C_M_RD) {
- REG_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
- REG_READ(GMBUS2+reg_offset);
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ GMBUS_REG_READ(GMBUS2+reg_offset);
do {
u32 val, loop = 0;
- if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+ (GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
- if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
- val = REG_READ(GMBUS3 + reg_offset);
+ val = GMBUS_REG_READ(GMBUS3 + reg_offset);
do {
*buf++ = val & 0xff;
val >>= 8;
@@ -295,18 +295,20 @@ gmbus_xfer(struct i2c_adapter *adapter,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- REG_WRITE(GMBUS3 + reg_offset, val);
- REG_WRITE(GMBUS1 + reg_offset,
+ GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset,
(i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
- REG_READ(GMBUS2+reg_offset);
+ GMBUS_REG_READ(GMBUS2+reg_offset);
while (len) {
- if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+ (GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
- if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
+ GMBUS_SATOER)
goto clear_err;
val = loop = 0;
@@ -314,14 +316,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- REG_WRITE(GMBUS3 + reg_offset, val);
- REG_READ(GMBUS2+reg_offset);
+ GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+ GMBUS_REG_READ(GMBUS2+reg_offset);
}
}
- if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
goto timeout;
- if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
}
@@ -332,20 +334,20 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
- REG_WRITE(GMBUS1 + reg_offset, 0);
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
done:
/* Mark the GMBUS interface as disabled. We will re-enable it at the
* start of the next xfer, till then let it sleep.
*/
- REG_WRITE(GMBUS0 + reg_offset, 0);
+ GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
return i;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
bus->reg0 & 0xff, bus->adapter.name);
- REG_WRITE(GMBUS0 + reg_offset, 0);
+ GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
@@ -399,6 +401,11 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
if (dev_priv->gmbus == NULL)
return -ENOMEM;
+ if (IS_MRST(dev))
+ dev_priv->gmbus_reg = dev_priv->aux_reg;
+ else
+ dev_priv->gmbus_reg = dev_priv->vdc_reg;
+
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
@@ -487,6 +494,7 @@ void gma_intel_teardown_gmbus(struct drm_device *dev)
i2c_del_adapter(&bus->adapter);
}
+ dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
kfree(dev_priv->gmbus);
dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 45d5af0546b..5b646c1f0c3 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -39,7 +39,7 @@
#include "psb_intel_reg.h"
#include "mdfld_output.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 54c98962b73..8195e859210 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -26,24 +26,10 @@
#include "gma_display.h"
#include "power.h"
-struct psb_intel_range_t {
- int min, max;
-};
-
-struct oaktrail_limit_t {
- struct psb_intel_range_t dot, m, p1;
-};
-
-struct oaktrail_clock_t {
- /* derived values */
- int dot;
- int m;
- int p1;
-};
-
-#define MRST_LIMIT_LVDS_100L 0
-#define MRST_LIMIT_LVDS_83 1
-#define MRST_LIMIT_LVDS_100 2
+#define MRST_LIMIT_LVDS_100L 0
+#define MRST_LIMIT_LVDS_83 1
+#define MRST_LIMIT_LVDS_100 2
+#define MRST_LIMIT_SDVO 3
#define MRST_DOT_MIN 19750
#define MRST_DOT_MAX 120000
@@ -57,21 +43,40 @@ struct oaktrail_clock_t {
#define MRST_P1_MAX_0 7
#define MRST_P1_MAX_1 8
-static const struct oaktrail_limit_t oaktrail_limits[] = {
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
+
+static const struct gma_limit_t mrst_limits[] = {
{ /* MRST_LIMIT_LVDS_100L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ .find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_LVDS_83L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+ .find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_LVDS_100 */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ .find_pll = mrst_lvds_find_best_pll,
+ },
+ { /* MRST_LIMIT_SDVO */
+ .vco = {.min = 1400000, .max = 2800000},
+ .n = {.min = 3, .max = 7},
+ .m = {.min = 80, .max = 137},
+ .p1 = {.min = 1, .max = 2},
+ .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = mrst_sdvo_find_best_pll,
},
};
@@ -82,9 +87,10 @@ static const u32 oaktrail_m_converts[] = {
0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
};
-static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
+ int refclk)
{
- const struct oaktrail_limit_t *limit = NULL;
+ const struct gma_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -92,45 +98,100 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
- limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
break;
case 166:
- limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
break;
case 200:
- limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
break;
}
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &mrst_limits[MRST_LIMIT_SDVO];
} else {
limit = NULL;
- dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+ dev_err(dev->dev, "mrst_limit Wrong display type.\n");
}
return limit;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
{
clock->dot = (refclk * clock->m) / (14 * clock->p1);
}
-static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+static void mrst_print_pll(struct gma_clock_t *clock)
{
- pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
- prefix, clock->dot, clock->m, clock->p1);
+ DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
+ clock->dot, clock->m, clock->m1, clock->m2, clock->n,
+ clock->p1, clock->p2);
+}
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock)
+{
+ struct gma_clock_t clock;
+ u32 target_vco, actual_freq;
+ s32 freq_error, min_error = 100000;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
+ clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ /* p2 value always stored in p2_slow on SDVO */
+ clock.p = clock.p1 * limit->p2.p2_slow;
+ target_vco = target * clock.p;
+
+ /* VCO will increase at this point so break */
+ if (target_vco > limit->vco.max)
+ break;
+
+ if (target_vco < limit->vco.min)
+ continue;
+
+ actual_freq = (refclk * clock.m) /
+ (clock.n * clock.p);
+ freq_error = 10000 -
+ ((target * 10000) / actual_freq);
+
+ if (freq_error < -min_error) {
+ /* freq_error will start to decrease at
+ this point so break */
+ break;
+ }
+
+ if (freq_error < 0)
+ freq_error = -freq_error;
+
+ if (freq_error < min_error) {
+ min_error = freq_error;
+ *best_clock = clock;
+ }
+ }
+ }
+ if (min_error == 0)
+ break;
+ }
+
+ return min_error == 0;
}
/**
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
-static bool
-mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
- struct oaktrail_clock_t *best_clock)
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock)
{
- struct oaktrail_clock_t clock;
- const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+ struct gma_clock_t clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -140,7 +201,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
clock.p1++) {
int this_err;
- oaktrail_clock(refclk, &clock);
+ mrst_lvds_clock(refclk, &clock);
this_err = abs(clock.dot - target);
if (this_err < err) {
@@ -149,7 +210,6 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
}
}
}
- dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
return err != target;
}
@@ -167,8 +227,10 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
+ int i;
+ int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
- if (pipe == 1) {
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
oaktrail_crtc_hdmi_dpms(crtc, mode);
return;
}
@@ -183,35 +245,45 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
+ for (i = 0; i <= need_aux; i++) {
+ /* Enable the DPLL */
+ temp = REG_READ_WITH_AUX(map->dpll, i);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE_WITH_AUX(map->dpll, temp, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE_WITH_AUX(map->dpll,
+ temp | DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE_WITH_AUX(map->dpll,
+ temp | DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ_WITH_AUX(map->conf, i);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE_WITH_AUX(map->conf,
+ temp | PIPEACONF_ENABLE, i);
+ }
+
+ /* Enable the plane */
+ temp = REG_READ_WITH_AUX(map->cntr, i);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE_WITH_AUX(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE,
+ i);
+ /* Flush the plane changes */
+ REG_WRITE_WITH_AUX(map->base,
+ REG_READ_WITH_AUX(map->base, i), i);
+ }
+ }
gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
@@ -223,48 +295,52 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
+ for (i = 0; i <= need_aux; i++) {
+ /* Disable the VGA plane that we never use */
+ REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
+ /* Disable display plane */
+ temp = REG_READ_WITH_AUX(map->cntr, i);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE_WITH_AUX(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE, i);
+ /* Flush the plane changes */
+ REG_WRITE_WITH_AUX(map->base,
+ REG_READ(map->base), i);
+ REG_READ_WITH_AUX(map->base, i);
+ }
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
- REG_READ(map->conf);
- }
- /* Wait for for the pipe disable to take effect. */
- gma_wait_for_vblank(dev);
+ /* Next, disable display pipes */
+ temp = REG_READ_WITH_AUX(map->conf, i);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE_WITH_AUX(map->conf,
+ temp & ~PIPEACONF_ENABLE, i);
+ REG_READ_WITH_AUX(map->conf, i);
+ }
+ /* Wait for for the pipe disable to take effect. */
+ gma_wait_for_vblank(dev);
+
+ temp = REG_READ_WITH_AUX(map->dpll, i);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE_WITH_AUX(map->dpll,
+ temp & ~DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ }
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
+ /* Wait for the clocks to turn off. */
+ udelay(150);
}
-
- /* Wait for the clocks to turn off. */
- udelay(150);
break;
}
- /*Set FIFO Watermarks*/
- REG_WRITE(DSPARB, 0x3FFF);
- REG_WRITE(DSPFW1, 0x3F88080A);
- REG_WRITE(DSPFW2, 0x0b060808);
+ /* Set FIFO Watermarks (values taken from EMGD) */
+ REG_WRITE(DSPARB, 0x3f80);
+ REG_WRITE(DSPFW1, 0x3f8f0404);
+ REG_WRITE(DSPFW2, 0x04040f04);
REG_WRITE(DSPFW3, 0x0);
- REG_WRITE(DSPFW4, 0x08030404);
+ REG_WRITE(DSPFW4, 0x04040404);
REG_WRITE(DSPFW5, 0x04040404);
REG_WRITE(DSPFW6, 0x78);
- REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
- /* Must write Bit 14 of the Chicken Bit Register */
+ REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
gma_power_end(dev);
}
@@ -297,7 +373,8 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
- struct oaktrail_clock_t clock;
+ struct gma_clock_t clock;
+ const struct gma_limit_t *limit;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false;
@@ -306,8 +383,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
+ int i;
+ int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
- if (pipe == 1)
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
if (!gma_power_begin(dev, true))
@@ -340,15 +419,17 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
}
/* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+ for (i = 0; i <= need_aux; i++)
+ REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
/* Disable the panel fitter if it was on our pipe */
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
- REG_WRITE(map->src,
- ((mode->crtc_hdisplay - 1) << 16) |
- (mode->crtc_vdisplay - 1));
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
+ (mode->crtc_vdisplay - 1), i);
+ }
if (gma_encoder)
drm_object_property_get_value(&connector->base,
@@ -365,35 +446,39 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(map->hblank,
- (adjusted_mode->crtc_hblank_start - offsetX - 1) |
- ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(map->hsync,
- (adjusted_mode->crtc_hsync_start - offsetX - 1) |
- ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(map->vblank,
- (adjusted_mode->crtc_vblank_start - offsetY - 1) |
- ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(map->vsync,
- (adjusted_mode->crtc_vsync_start - offsetY - 1) |
- ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hblank,
+ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hsync,
+ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vblank,
+ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vsync,
+ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
+ }
} else {
- REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16), i);
+ }
}
/* Flush the plane changes */
@@ -418,21 +503,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (is_mipi)
goto oaktrail_crtc_mode_set_exit;
- refclk = dev_priv->core_freq * 1000;
dpll = 0; /*BIT16 = 0 for 100MHz reference */
- ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+ refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
+ limit = mrst_limit(crtc, refclk);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
+ refclk, &clock);
- if (!ok) {
- dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
- } else {
- dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
- "m = %x, p1 = %x.\n", clock.dot, clock.m,
- clock.p1);
+ if (is_sdvo) {
+ /* Convert calculated values to register values */
+ clock.p1 = (1L << (clock.p1 - 1));
+ clock.m -= 2;
+ clock.n = (1L << (clock.n - 1));
}
- fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+ if (!ok)
+ DRM_ERROR("Failed to find proper PLL settings");
+
+ mrst_print_pll(&clock);
+
+ if (is_sdvo)
+ fp = clock.n << 16 | clock.m;
+ else
+ fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
dpll |= DPLL_VGA_MODE_DIS;
@@ -456,38 +550,43 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 2)) << 17;
+ if (is_sdvo)
+ dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
+ else
+ dpll |= (1 << (clock.p1 - 2)) << 17;
dpll |= DPLL_VCO_ENABLE;
- mrstPrintPll("chosen", &clock);
-
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(map->fp0, fp);
- REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Check the DPLLA lock bit PIPEACONF[29] */
- udelay(150);
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->fp0, fp, i);
+ REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Check the DPLLA lock bit PIPEACONF[29] */
+ udelay(150);
+ }
}
- REG_WRITE(map->fp0, fp);
- REG_WRITE(map->dpll, dpll);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->fp0, fp, i);
+ REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
- /* write it again -- the BIOS does, after all */
- REG_WRITE(map->dpll, dpll);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
- REG_WRITE(map->conf, pipeconf);
- REG_READ(map->conf);
- gma_wait_for_vblank(dev);
+ REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
+ REG_READ_WITH_AUX(map->conf, i);
+ gma_wait_for_vblank(dev);
- REG_WRITE(map->cntr, dspcntr);
- gma_wait_for_vblank(dev);
+ REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
+ gma_wait_for_vblank(dev);
+ }
oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
@@ -565,3 +664,9 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.commit = gma_crtc_commit,
};
+/* Not used yet */
+const struct gma_clock_funcs mrst_clock_funcs = {
+ .clock = mrst_lvds_clock,
+ .limit = mrst_limit,
+ .pll_is_valid = gma_pll_is_valid,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 08747fd7105..368a03ae301 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -26,7 +26,7 @@
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include "mid_bios.h"
#include "intel_bios.h"
@@ -40,6 +40,9 @@ static int oaktrail_output_init(struct drm_device *dev)
dev_err(dev->dev, "DSI is not supported\n");
if (dev_priv->hdmi_priv)
oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+
+ psb_intel_sdvo_init(dev, SDVOB);
+
return 0;
}
@@ -526,6 +529,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
}
+ gma_intel_setup_gmbus(dev);
oaktrail_hdmi_setup(dev);
return 0;
}
@@ -534,6 +538,7 @@ static void oaktrail_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ gma_intel_teardown_gmbus(dev);
oaktrail_hdmi_teardown(dev);
if (!dev_priv->has_gct)
psb_intel_destroy_bios(dev);
@@ -546,6 +551,7 @@ const struct psb_ops oaktrail_chip_ops = {
.crtcs = 2,
.hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0),
+ .sdvo_mask = (1 << 1),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 1eb86c79523..e2810706114 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -99,7 +99,7 @@ static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
i2c_dev->status = I2C_STAT_INIT;
i2c_dev->msg = pmsg;
i2c_dev->buf_offset = 0;
- INIT_COMPLETION(i2c_dev->complete);
+ reinit_completion(&i2c_dev->complete);
/* Enable I2C transaction */
temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e77d7214fca..5e069786273 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -22,7 +22,7 @@
#include <linux/i2c.h>
#include <drm/drmP.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include "intel_bios.h"
#include "psb_drv.h"
@@ -218,30 +218,6 @@ static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
.commit = oaktrail_lvds_commit,
};
-static struct drm_display_mode lvds_configuration_modes[] = {
- /* hard coded fixed mode for TPO LTPS LPJ040K001A */
- { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
- 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
- /* hard coded fixed mode for LVDS 800x480 */
- { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
- 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
- /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
- { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
- 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
- /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
- { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
- 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
- /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
- { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
- 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
- /* hard coded fixed mode for LVDS 1024x768 */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
- /* hard coded fixed mode for LVDS 1366x768 */
- { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
- 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
-};
-
/* Returns the panel fixed mode from configuration. */
static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
@@ -303,10 +279,10 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev,
dev_priv->lfp_lvds_vbt_mode);
- /* Then guess */
+
+ /* If we still got no mode then bail */
if (mode_dev->panel_fixed_mode == NULL)
- mode_dev->panel_fixed_mode
- = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+ return;
drm_mode_set_name(mode_dev->panel_fixed_mode);
drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 697678619bd..23fb33f1471 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -373,6 +373,7 @@ const struct psb_ops psb_chip_ops = {
.crtcs = 2,
.hdmi_mask = (1 << 0),
.lvds_mask = (1 << 1),
+ .sdvo_mask = (1 << 0),
.cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9ff1f2..1199180667c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -251,6 +251,12 @@ static int psb_driver_unload(struct drm_device *dev)
iounmap(dev_priv->sgx_reg);
dev_priv->sgx_reg = NULL;
}
+ if (dev_priv->aux_reg) {
+ iounmap(dev_priv->aux_reg);
+ dev_priv->aux_reg = NULL;
+ }
+ if (dev_priv->aux_pdev)
+ pci_dev_put(dev_priv->aux_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
@@ -266,7 +272,7 @@ static int psb_driver_unload(struct drm_device *dev)
static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct drm_psb_private *dev_priv;
- unsigned long resource_start;
+ unsigned long resource_start, resource_len;
unsigned long irqflags;
int ret = -ENOMEM;
struct drm_connector *connector;
@@ -296,6 +302,30 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->sgx_reg)
goto out_err;
+ if (IS_MRST(dev)) {
+ dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
+
+ if (dev_priv->aux_pdev) {
+ resource_start = pci_resource_start(dev_priv->aux_pdev,
+ PSB_AUX_RESOURCE);
+ resource_len = pci_resource_len(dev_priv->aux_pdev,
+ PSB_AUX_RESOURCE);
+ dev_priv->aux_reg = ioremap_nocache(resource_start,
+ resource_len);
+ if (!dev_priv->aux_reg)
+ goto out_err;
+
+ DRM_DEBUG_KMS("Found aux vdc");
+ } else {
+ /* Couldn't find the aux vdc so map to primary vdc */
+ dev_priv->aux_reg = dev_priv->vdc_reg;
+ DRM_DEBUG_KMS("Couldn't find aux pci device");
+ }
+ dev_priv->gmbus_reg = dev_priv->aux_reg;
+ } else {
+ dev_priv->gmbus_reg = dev_priv->vdc_reg;
+ }
+
psb_intel_opregion_setup(dev);
ret = dev_priv->ops->chip_setup(dev);
@@ -359,7 +389,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_irq_install(dev);
- dev->vblank_disable_allowed = 1;
+ dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -449,7 +479,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
dev_dbg(dev->dev, "Invalid Connector object.\n");
- return -EINVAL;
+ return -ENOENT;
}
connector = obj_to_connector(obj);
@@ -491,7 +521,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto mode_op_out;
}
@@ -646,7 +676,6 @@ static struct drm_driver driver = {
.preclose = psb_driver_preclose,
.postclose = psb_driver_close,
- .gem_init_object = psb_gem_init_object,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7708f..b59e6588c34 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@ enum {
CHIP_MFLD_0130 = 3, /* Medfield */
};
-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
/*
* Driver definitions
@@ -75,6 +75,7 @@ enum {
* PCI resource identifiers
*/
#define PSB_MMIO_RESOURCE 0
+#define PSB_AUX_RESOURCE 0
#define PSB_GATT_RESOURCE 2
#define PSB_GTT_RESOURCE 3
/*
@@ -455,6 +456,7 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
+ struct pci_dev *aux_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
const struct psb_offset *regmap;
@@ -486,6 +488,7 @@ struct drm_psb_private {
uint8_t __iomem *sgx_reg;
uint8_t __iomem *vdc_reg;
+ uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
uint32_t gatt_free_offset;
/*
@@ -532,6 +535,7 @@ struct drm_psb_private {
/* gmbus */
struct intel_gmbus *gmbus;
+ uint8_t __iomem *gmbus_reg;
/* Used by SDVO */
int crt_ddc_pin;
@@ -672,6 +676,7 @@ struct psb_ops {
int sgx_offset; /* Base offset of SGX device */
int hdmi_mask; /* Mask of HDMI CRTCs */
int lvds_mask; /* Mask of LVDS CRTCs */
+ int sdvo_mask; /* Mask of SDVO CRTCs */
int cursor_needs_phys; /* If cursor base reg need physical address */
/* Sub functions */
@@ -837,7 +842,6 @@ extern const struct drm_connector_helper_funcs
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
-extern int psb_gem_init_object(struct drm_gem_object *obj);
extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -928,16 +932,58 @@ static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
return ioread32(dev_priv->vdc_reg + reg);
}
+static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return ioread32(dev_priv->aux_reg + reg);
+}
+
#define REG_READ(reg) REGISTER_READ(dev, (reg))
+#define REG_READ_AUX(reg) REGISTER_READ_AUX(dev, (reg))
+
+/* Useful for post reads */
+static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
+ uint32_t reg, int aux)
+{
+ uint32_t val;
+
+ if (aux)
+ val = REG_READ_AUX(reg);
+ else
+ val = REG_READ(reg);
+
+ return val;
+}
+
+#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
- uint32_t val)
+ uint32_t val)
{
struct drm_psb_private *dev_priv = dev->dev_private;
iowrite32((val), dev_priv->vdc_reg + (reg));
}
+static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite32((val), dev_priv->aux_reg + (reg));
+}
+
#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
+#define REG_WRITE_AUX(reg, val) REGISTER_WRITE_AUX(dev, (reg), (val))
+
+static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
+ uint32_t val, int aux)
+{
+ if (aux)
+ REG_WRITE_AUX(reg, val);
+ else
+ REG_WRITE(reg, val);
+}
+
+#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
static inline void REGISTER_WRITE16(struct drm_device *dev,
uint32_t reg, uint32_t val)
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 97f8a03fee4..c8841ac6c8f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -572,7 +572,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
if (!drmmode_obj) {
dev_err(dev->dev, "no such CRTC id\n");
- return -EINVAL;
+ return -ENOENT;
}
crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 6f01cdf5e12..07d3a9e6d79 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -228,24 +228,26 @@ static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u3
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
u32 bval = val, cval = val;
- int i;
+ int i, j;
+ int need_aux = IS_MRST(dev) ? 1 : 0;
- if (psb_intel_sdvo->sdvo_reg == SDVOB) {
- cval = REG_READ(SDVOC);
- } else {
- bval = REG_READ(SDVOB);
- }
- /*
- * Write the registers twice for luck. Sometimes,
- * writing them only once doesn't appear to 'stick'.
- * The BIOS does this too. Yay, magic
- */
- for (i = 0; i < 2; i++)
- {
- REG_WRITE(SDVOB, bval);
- REG_READ(SDVOB);
- REG_WRITE(SDVOC, cval);
- REG_READ(SDVOC);
+ for (j = 0; j <= need_aux; j++) {
+ if (psb_intel_sdvo->sdvo_reg == SDVOB)
+ cval = REG_READ_WITH_AUX(SDVOC, j);
+ else
+ bval = REG_READ_WITH_AUX(SDVOB, j);
+
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++) {
+ REG_WRITE_WITH_AUX(SDVOB, bval, j);
+ REG_READ_WITH_AUX(SDVOB, j);
+ REG_WRITE_WITH_AUX(SDVOC, cval, j);
+ REG_READ_WITH_AUX(SDVOC, j);
+ }
}
}
@@ -995,6 +997,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
struct psb_intel_sdvo_dtd input_dtd;
int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
+ int need_aux = IS_MRST(dev) ? 1 : 0;
if (!mode)
return;
@@ -1060,7 +1063,11 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
return;
/* Set the SDVO control regs. */
- sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if (need_aux)
+ sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+ else
+ sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+
switch (psb_intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
@@ -1090,6 +1097,8 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 temp;
+ int i;
+ int need_aux = IS_MRST(dev) ? 1 : 0;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -1108,19 +1117,27 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if (need_aux)
+ temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+ else
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
if ((temp & SDVO_ENABLE) != 0) {
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
bool input1, input2;
- int i;
u8 status;
- temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if (need_aux)
+ temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+ else
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+
for (i = 0; i < 2; i++)
gma_wait_for_vblank(dev);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf3013..ba4830342d3 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
if (gma_power_is_on(dev))
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank_enabled[0])
+ if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
/* FIXME: Handle Medfield irq mask
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
- if (dev->vblank_enabled[2])
+ if (dev->vblank[2].enabled)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
*/
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank_enabled[0])
+ if (dev->vblank[0].enabled)
psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[2])
+ if (dev->vblank[2].enabled)
psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank_enabled[0])
+ if (dev->vblank[0].enabled)
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[2])
+ if (dev->vblank[2].enabled)
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
{
unsigned int cur_vblank;
int ret = 0;
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60e84043aa3..400b0c4a10f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -17,6 +17,7 @@
+#include <linux/hdmi.h>
#include <linux/module.h>
#include <drm/drmP.h>
@@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
buf[HB(0)] = 0x82;
buf[HB(1)] = 0x02;
buf[HB(2)] = 13;
+ buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+ buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
buf[PB(4)] = drm_match_cea_mode(mode);
tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb107..249fdff305c 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
dma->buflist[vertex->idx],
vertex->discard, vertex->used);
- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
mc->last_render);
- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
- /* i810 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
pci_set_master(dev->pdev);
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 00000000000..6199d0b5b95
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,67 @@
+config DRM_I915
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on AGP
+ depends on AGP_INTEL
+ # we need shmfs for the swappable backing store, and in particular
+ # the shmem_readpage() which depends upon tmpfs
+ select SHMEM
+ select TMPFS
+ select DRM_KMS_HELPER
+ # i915 depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select BACKLIGHT_LCD_SUPPORT if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ select ACPI_BUTTON if ACPI
+ help
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
+
+config DRM_I915_KMS
+ bool "Enable modesetting on intel by default"
+ depends on DRM_I915
+ help
+ Choose this option if you want kernel modesetting enabled by default,
+ and you have a new enough userspace to support this. Running old
+ userspaces with this enabled will cause pain. Note that this causes
+ the driver to bind to PCI devices, which precludes loading things
+ like intelfb.
+
+config DRM_I915_FBDEV
+ bool "Enable legacy fbdev support for the modesettting intel driver"
+ depends on DRM_I915
+ select DRM_KMS_FB_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provide the linux console
+ support on top of the intel modesetting driver.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+ bool "Enable preliminary support for prerelease Intel hardware by default"
+ depends on DRM_I915
+ help
+ Choose this option if you have prerelease Intel hardware and want the
+ i915 driver to support it by default. You can enable such support at
+ runtime with the module option i915.preliminary_hw_support=1; this
+ option changes the default for that module option.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a84a0d..41838eaa799 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
+ intel_dsi.o \
+ intel_dsi_cmd.o \
+ intel_dsi_pll.o \
intel_bios.o \
intel_ddi.o \
intel_dp.o \
@@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_panel.o \
intel_pm.o \
intel_i2c.o \
- intel_fb.o \
intel_tv.o \
intel_dvo.o \
intel_ringbuffer.o \
@@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_ACPI) += intel_acpi.o
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad8010..312163379db 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@ struct intel_dvo_dev_ops {
struct drm_display_mode *mode);
/*
- * Callback to adjust the mode to be set in the CRTC.
- *
- * This allows an output to adjust the clock or even the entire set of
- * timings, which is used for panels with fixed timings or for
- * buses with clock limitations.
- */
- bool (*mode_fixup)(struct intel_dvo_device *dvo,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5af18..6ed45a98423 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
*/
#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -38,9 +40,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DRM_I915_RING_DEBUG 1
-
-
#if defined(CONFIG_DEBUG_FS)
enum {
@@ -54,6 +53,32 @@ static const char *yesno(int v)
return v ? "yes" : "no";
}
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", obj->ring->name);
}
+static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+{
+ seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+ seq_putc(m, ' ');
+}
+
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -554,7 +586,53 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 8) {
+ int i;
+ seq_printf(m, "Master Interrupt Control:\t%08x\n",
+ I915_READ(GEN8_MASTER_IRQ));
+
+ for (i = 0; i < 4; i++) {
+ seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IMR(i)));
+ seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IIR(i)));
+ seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IER(i)));
+ }
+
+ for_each_pipe(i) {
+ seq_printf(m, "Pipe %c IMR:\t%08x\n",
+ pipe_name(i),
+ I915_READ(GEN8_DE_PIPE_IMR(i)));
+ seq_printf(m, "Pipe %c IIR:\t%08x\n",
+ pipe_name(i),
+ I915_READ(GEN8_DE_PIPE_IIR(i)));
+ seq_printf(m, "Pipe %c IER:\t%08x\n",
+ pipe_name(i),
+ I915_READ(GEN8_DE_PIPE_IER(i)));
+ }
+
+ seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IMR));
+ seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IIR));
+ seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IER));
+
+ seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IMR));
+ seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IIR));
+ seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IER));
+
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
+ } else if (IS_VALLEYVIEW(dev)) {
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
@@ -626,7 +704,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
for_each_ring(ring, dev_priv, i) {
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
ring->name, I915_READ_IMR(ring));
@@ -843,6 +921,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
if (IS_GEN5(dev)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1401,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1395,12 +1477,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_fbdev *ifbdev;
+ struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
- int ret;
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
@@ -1416,10 +1498,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
describe_obj(m, fb->obj);
seq_putc(m, '\n');
mutex_unlock(&dev->mode_config.mutex);
+#endif
mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
- if (&fb->base == ifbdev->helper.fb)
+ if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1525,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ struct i915_hw_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1544,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
- for_each_ring(ring, dev_priv, i) {
- if (ring->default_context) {
- seq_printf(m, "HW default context %s ring ", ring->name);
- describe_obj(m, ring->default_context->obj);
- seq_putc(m, '\n');
- }
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ seq_puts(m, "HW context ");
+ describe_ctx(m, ctx);
+ for_each_ring(ring, dev_priv, i)
+ if (ring->default_context == ctx)
+ seq_printf(m, "(default context %s) ", ring->name);
+
+ describe_obj(m, ctx->obj);
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1536,7 +1623,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
I915_READ16(C1DRB3));
- } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ } else if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
I915_READ(MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -1545,8 +1632,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- seq_printf(m, "ARB_MODE = 0x%08x\n",
- I915_READ(ARB_MODE));
+ if (IS_GEN8(dev))
+ seq_printf(m, "GAMTARBMODE = 0x%08x\n",
+ I915_READ(GAMTARBMODE));
+ else
+ seq_printf(m, "ARB_MODE = 0x%08x\n",
+ I915_READ(ARB_MODE));
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
@@ -1555,18 +1646,37 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_ppgtt_info(struct seq_file *m, void *data)
+static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- int i, ret;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int unused, i;
+ if (!ppgtt)
+ return;
+
+ seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
+ seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
+ for_each_ring(ring, dev_priv, unused) {
+ seq_printf(m, "%s\n", ring->name);
+ for (i = 0; i < 4; i++) {
+ u32 offset = 0x270 + i * 8;
+ u64 pdp = I915_READ(ring->mmio_base + offset + 4);
+ pdp <<= 32;
+ pdp |= I915_READ(ring->mmio_base + offset);
+ for (i = 0; i < 4; i++)
+ seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
+ }
+ }
+}
+
+static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
@@ -1585,6 +1695,22 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ int ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ gen8_ppgtt_info(m, dev);
+ else if (INTEL_INFO(dev)->gen >= 6)
+ gen6_ppgtt_info(m, dev);
+
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1610,27 +1736,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_DIV_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_DIV_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
- vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+ vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1655,126 +1781,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 psrstat, psrperf;
-
- if (!IS_HASWELL(dev)) {
- seq_puts(m, "PSR not supported on this platform\n");
- } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
- seq_puts(m, "PSR enabled\n");
- } else {
- seq_puts(m, "PSR disabled: ");
- switch (dev_priv->no_psr_reason) {
- case PSR_NO_SOURCE:
- seq_puts(m, "not supported on this platform");
- break;
- case PSR_NO_SINK:
- seq_puts(m, "not supported by panel");
- break;
- case PSR_MODULE_PARAM:
- seq_puts(m, "disabled by flag");
- break;
- case PSR_CRTC_NOT_ACTIVE:
- seq_puts(m, "crtc not active");
- break;
- case PSR_PWR_WELL_ENABLED:
- seq_puts(m, "power well enabled");
- break;
- case PSR_NOT_TILED:
- seq_puts(m, "not tiled");
- break;
- case PSR_SPRITE_ENABLED:
- seq_puts(m, "sprite enabled");
- break;
- case PSR_S3D_ENABLED:
- seq_puts(m, "stereo 3d enabled");
- break;
- case PSR_INTERLACED_ENABLED:
- seq_puts(m, "interlaced enabled");
- break;
- case PSR_HSW_NOT_DDIA:
- seq_puts(m, "HSW ties PSR to DDI A (eDP)");
- break;
- default:
- seq_puts(m, "unknown reason");
- }
- seq_puts(m, "\n");
- return 0;
- }
-
- psrstat = I915_READ(EDP_PSR_STATUS_CTL);
-
- seq_puts(m, "PSR Current State: ");
- switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
- case EDP_PSR_STATUS_STATE_IDLE:
- seq_puts(m, "Reset state\n");
- break;
- case EDP_PSR_STATUS_STATE_SRDONACK:
- seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
- break;
- case EDP_PSR_STATUS_STATE_SRDENT:
- seq_puts(m, "SRD entry\n");
- break;
- case EDP_PSR_STATUS_STATE_BUFOFF:
- seq_puts(m, "Wait for buffer turn off\n");
- break;
- case EDP_PSR_STATUS_STATE_BUFON:
- seq_puts(m, "Wait for buffer turn on\n");
- break;
- case EDP_PSR_STATUS_STATE_AUXACK:
- seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
- break;
- case EDP_PSR_STATUS_STATE_SRDOFFACK:
- seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_puts(m, "Link Status: ");
- switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
- case EDP_PSR_STATUS_LINK_FULL_OFF:
- seq_puts(m, "Link is fully off\n");
- break;
- case EDP_PSR_STATUS_LINK_FULL_ON:
- seq_puts(m, "Link is fully on\n");
- break;
- case EDP_PSR_STATUS_LINK_STANDBY:
- seq_puts(m, "Link is in standby\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "PSR Entry Count: %u\n",
- psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
- EDP_PSR_STATUS_COUNT_MASK);
-
- seq_printf(m, "Max Sleep Timer Counter: %u\n",
- psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
- EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
+ u32 psrperf = 0;
+ bool enabled = false;
- seq_printf(m, "Had AUX error: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+ seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+ seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
- seq_printf(m, "Sending AUX: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+ enabled = HAS_PSR(dev) &&
+ I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ seq_printf(m, "Enabled: %s\n", yesno(enabled));
- seq_printf(m, "Sending Idle: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
-
- seq_printf(m, "Sending TP2 TP3: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
-
- seq_printf(m, "Sending TP1: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
-
- seq_printf(m, "Idle Count: %u\n",
- psrstat & EDP_PSR_STATUS_IDLE_MASK);
-
- psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance Counter: %u\n", psrperf);
+ if (HAS_PSR(dev))
+ psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+ EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
return 0;
}
@@ -1825,6 +1845,751 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
return 0;
}
+struct pipe_crc_info {
+ const char *name;
+ struct drm_device *dev;
+ enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+
+ if (pipe_crc->opened) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EBUSY; /* already open */
+ }
+
+ pipe_crc->opened = true;
+ filep->private_data = inode->i_private;
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->opened = false;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+ assert_spin_locked(&pipe_crc->lock);
+ return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+ loff_t *pos)
+{
+ struct pipe_crc_info *info = filep->private_data;
+ struct drm_device *dev = info->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ char buf[PIPE_CRC_BUFFER_LEN];
+ int head, tail, n_entries, n;
+ ssize_t bytes_read;
+
+ /*
+ * Don't allow user space to provide buffers not big enough to hold
+ * a line of data.
+ */
+ if (count < PIPE_CRC_LINE_LEN)
+ return -EINVAL;
+
+ if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+ return 0;
+
+ /* nothing to read */
+ spin_lock_irq(&pipe_crc->lock);
+ while (pipe_crc_data_count(pipe_crc) == 0) {
+ int ret;
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+ pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+ if (ret) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return ret;
+ }
+ }
+
+ /* We now have one or more entries to read */
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
+ n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+ count / PIPE_CRC_LINE_LEN);
+ spin_unlock_irq(&pipe_crc->lock);
+
+ bytes_read = 0;
+ n = 0;
+ do {
+ struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+ int ret;
+
+ bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+ "%8u %8x %8x %8x %8x %8x\n",
+ entry->frame, entry->crc[0],
+ entry->crc[1], entry->crc[2],
+ entry->crc[3], entry->crc[4]);
+
+ ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+ buf, PIPE_CRC_LINE_LEN);
+ if (ret == PIPE_CRC_LINE_LEN)
+ return -EFAULT;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ n++;
+ } while (--n_entries);
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->tail = tail;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_pipe_crc_open,
+ .read = i915_pipe_crc_read,
+ .release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+ {
+ .name = "i915_pipe_A_crc",
+ .pipe = PIPE_A,
+ },
+ {
+ .name = "i915_pipe_B_crc",
+ .pipe = PIPE_B,
+ },
+ {
+ .name = "i915_pipe_C_crc",
+ .pipe = PIPE_C,
+ },
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+ enum pipe pipe)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+ struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+ info->dev = dev;
+ ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+ &i915_pipe_crc_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+ "none",
+ "plane1",
+ "plane2",
+ "pf",
+ "pipe",
+ "TV",
+ "DP-B",
+ "DP-C",
+ "DP-D",
+ "auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+ return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ seq_printf(m, "%c %s\n", pipe_name(i),
+ pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+ return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+ enum intel_pipe_crc_source *source)
+{
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ struct intel_digital_port *dig_port;
+ int ret = 0;
+
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->pipe != pipe)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_TVOUT:
+ *source = INTEL_PIPE_CRC_SOURCE_TV;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_EDP:
+ dig_port = enc_to_dig_port(&encoder->base);
+ switch (dig_port->port) {
+ case PORT_B:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+ break;
+ case PORT_C:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+ break;
+ case PORT_D:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+ break;
+ default:
+ WARN(1, "nonexisting DP port %c\n",
+ port_name(dig_port->port));
+ break;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev));
+
+ tmp |= DC_BALANCE_RESET_VLV;
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ if (!SUPPORTS_TV(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev));
+
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+ tmp &= ~DC_BALANCE_RESET_VLV;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+ }
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PF;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+ enum intel_pipe_crc_source source)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ u32 val;
+ int ret;
+
+ if (pipe_crc->source == source)
+ return 0;
+
+ /* forbid changing the source without going back to 'none' */
+ if (pipe_crc->source && source)
+ return -EINVAL;
+
+ if (IS_GEN2(dev))
+ ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+ else if (INTEL_INFO(dev)->gen < 5)
+ ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+ else if (IS_VALLEYVIEW(dev))
+ ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ret = ilk_pipe_crc_ctl_reg(&source, &val);
+ else
+ ret = ivb_pipe_crc_ctl_reg(&source, &val);
+
+ if (ret != 0)
+ return ret;
+
+ /* none -> real source transition */
+ if (source) {
+ DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+ pipe_name(pipe), pipe_crc_source_name(source));
+
+ pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+ INTEL_PIPE_CRC_ENTRIES_NR,
+ GFP_KERNEL);
+ if (!pipe_crc->entries)
+ return -ENOMEM;
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+ }
+
+ pipe_crc->source = source;
+
+ I915_WRITE(PIPE_CRC_CTL(pipe), val);
+ POSTING_READ(PIPE_CRC_CTL(pipe));
+
+ /* real source -> none transition */
+ if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+ struct intel_pipe_crc_entry *entries;
+
+ DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+ pipe_name(pipe));
+
+ intel_wait_for_vblank(dev, pipe);
+
+ spin_lock_irq(&pipe_crc->lock);
+ entries = pipe_crc->entries;
+ pipe_crc->entries = NULL;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ kfree(entries);
+
+ if (IS_G4X(dev))
+ g4x_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_undo_pipe_scramble_reset(dev, pipe);
+ }
+
+ return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ * command: wsp* object wsp+ name wsp+ source wsp*
+ * object: 'pipe'
+ * name: (A | B | C)
+ * source: (none | plane1 | plane2 | pf)
+ * wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
+ * "pipe A none" -> Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+ int n_words = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* skip leading white space */
+ buf = skip_spaces(buf);
+ if (!*buf)
+ break; /* end of buffer */
+
+ /* find end of word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+
+ if (n_words == max_words) {
+ DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+ max_words);
+ return -EINVAL; /* ran out of words[] before bytes */
+ }
+
+ if (*end)
+ *end++ = '\0';
+ words[n_words++] = buf;
+ buf = end;
+ }
+
+ return n_words;
+}
+
+enum intel_pipe_crc_object {
+ PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+ "pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+ if (!strcmp(buf, pipe_crc_objects[i])) {
+ *o = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+ const char name = buf[0];
+
+ if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ return -EINVAL;
+
+ *pipe = name - 'A';
+
+ return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+ if (!strcmp(buf, pipe_crc_sources[i])) {
+ *s = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+ int n_words;
+ char *words[N_WORDS];
+ enum pipe pipe;
+ enum intel_pipe_crc_object object;
+ enum intel_pipe_crc_source source;
+
+ n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+ if (n_words != N_WORDS) {
+ DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+ N_WORDS);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+ DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+ return -EINVAL;
+ }
+
+ return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+ char *tmpbuf;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(tmpbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ tmpbuf[len] = '\0';
+
+ ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+ kfree(tmpbuf);
+ if (ret < 0)
+ return ret;
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = display_crc_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = display_crc_ctl_write
+};
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -1885,6 +2650,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
i915_ring_stop_get, i915_ring_stop_set,
"0x%08llx\n");
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.missed_irq_rings;
+ return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Lock against concurrent debugfs callers */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ dev_priv->gpu_error.missed_irq_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+ i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+ "0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.test_irq_rings;
+
+ return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+ /* Lock against concurrent debugfs callers */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ dev_priv->gpu_error.test_irq_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+ i915_ring_test_irq_get, i915_ring_test_irq_set,
+ "0x%08llx\n");
+
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
@@ -1972,6 +2803,8 @@ i915_max_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1996,6 +2829,8 @@ i915_max_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2869,8 @@ i915_min_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -2058,6 +2895,8 @@ i915_min_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2975,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
-static int
-drm_add_fake_info_node(struct drm_minor *minor,
- struct dentry *ent,
- const void *key)
-{
- struct drm_info_node *node;
-
- node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
- debugfs_remove(ent);
- return -ENOMEM;
- }
-
- node->minor = minor;
- node->dent = ent;
- node->info_ent = (void *) key;
-
- mutex_lock(&minor->debugfs_lock);
- list_add(&node->list, &minor->debugfs_list);
- mutex_unlock(&minor->debugfs_lock);
-
- return 0;
-}
-
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -2227,7 +3040,7 @@ static int i915_debugfs_create(struct dentry *root,
return drm_add_fake_info_node(minor, ent, fops);
}
-static struct drm_info_list i915_debugfs_list[] = {
+static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
@@ -2269,7 +3082,7 @@ static struct drm_info_list i915_debugfs_list[] = {
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
-static struct i915_debugfs_files {
+static const struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {
@@ -2278,11 +3091,28 @@ static struct i915_debugfs_files {
{"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_ring_stop", &i915_ring_stop_fops},
+ {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+ {"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
{"i915_error_state", &i915_error_state_fops},
{"i915_next_seqno", &i915_next_seqno_fops},
+ {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
};
+void intel_display_crc_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+
+ pipe_crc->opened = false;
+ spin_lock_init(&pipe_crc->lock);
+ init_waitqueue_head(&pipe_crc->wq);
+ }
+}
+
int i915_debugfs_init(struct drm_minor *minor)
{
int ret, i;
@@ -2291,6 +3121,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret)
return ret;
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ret = i915_debugfs_create(minor->debugfs_root, minor,
i915_debugfs_files[i].name,
@@ -2310,8 +3146,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
+
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
+
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *)&i915_pipe_crc_data[i];
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
+
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
struct drm_info_list *info_list =
(struct drm_info_list *) i915_debugfs_files[i].fops;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5c784d4867..0cab2d04513 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
- intel_ring_advance(LP_RING(dev_priv))
+ __intel_ring_advance(LP_RING(dev_priv))
/**
* Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
- sizeof(struct drm_clip_rect),
+ sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
- sizeof(struct drm_clip_rect), GFP_KERNEL);
+ sizeof(*cliprects), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
- value = dev->pci_device;
+ value = dev->pdev->device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
@@ -1311,13 +1311,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ intel_power_domains_init_hw(dev);
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = i915_gem_init(dev);
if (ret)
- goto cleanup_irq;
+ goto cleanup_power;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
@@ -1325,9 +1327,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
- dev->vblank_disable_allowed = 1;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ dev->vblank_disable_allowed = true;
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ intel_display_power_put(dev, POWER_DOMAIN_VGA);
return 0;
+ }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1362,7 +1366,8 @@ cleanup_gem:
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_irq:
+cleanup_power:
+ intel_display_power_put(dev, POWER_DOMAIN_VGA);
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1403,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
+#ifdef CONFIG_DRM_I915_FBDEV
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@@ -1418,6 +1424,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
@@ -1459,17 +1470,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags;
/* Refuse to load on gen6+ without kms enabled. */
- if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+ DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
return -ENODEV;
+ }
- /* i915 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -1494,6 +1501,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+ intel_display_crc_init(dev);
+
i915_dump_device_info(dev_priv);
/* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1540,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_early_sanitize(dev);
- if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
- /* The docs do not explain exactly how the calculation can be
- * made. It is somewhat guessable, but for now, it's always
- * 128MB.
- * NB: We can't write IDICR yet because we do not have gt funcs
- * set up */
- dev_priv->ellc_size = 128;
- DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
- }
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
+ intel_uncore_init(dev);
ret = i915_gem_gtt_init(dev);
if (ret)
- goto put_bridge;
+ goto out_regs;
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1576,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
aperture_size);
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
- goto out_rmmap;
+ goto out_gtt;
}
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev);
-
intel_irq_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev);
- intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1640,13 +1640,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
if (HAS_POWER_WELL(dev))
- i915_init_power_well(dev);
+ intel_power_domains_init(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_gem_unload;
+ goto out_power_well;
}
} else {
/* Start out suspended in ums mode. */
@@ -1666,6 +1666,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
+out_power_well:
+ if (HAS_POWER_WELL(dev))
+ intel_power_domains_remove(dev);
+ drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1683,18 @@ out_gem_unload:
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+ list_del(&dev_priv->gtt.base.global_link);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
-out_rmmap:
+out_regs:
+ intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
kfree(dev_priv);
return ret;
}
@@ -1700,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
- intel_set_power_well(dev, true);
- i915_remove_power_well(dev);
+ intel_display_set_init_power(dev, true);
+ intel_power_domains_remove(dev);
}
i915_teardown_sysfs(dev);
@@ -1709,15 +1719,9 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
- mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
- i915_gem_retire_requests(dev);
- mutex_unlock(&dev->struct_mutex);
-
- /* Cancel the retire work handler, which should be idle now. */
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1778,8 @@ int i915_driver_unload(struct drm_device *dev)
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_mm_takedown(&dev_priv->gtt.base.mm);
- if (dev_priv->regs != NULL)
- pci_iounmap(dev->pdev, dev_priv->regs);
+
+ drm_vblank_cleanup(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1785,6 +1789,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+ intel_uncore_fini(dev);
+ if (dev_priv->regs != NULL)
+ pci_iounmap(dev->pdev, dev_priv->regs);
+
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@@ -1796,19 +1804,11 @@ int i915_driver_unload(struct drm_device *dev)
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- spin_lock_init(&file_priv->mm.lock);
- INIT_LIST_HEAD(&file_priv->mm.request_list);
+ int ret;
- idr_init(&file_priv->context_idr);
+ ret = i915_gem_open(dev, file);
+ if (ret)
+ return ret;
return 0;
}
@@ -1836,7 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_fb_restore_mode(dev);
+ intel_fbdev_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ad27880cd0..989be12cdd6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -160,49 +160,58 @@ extern int intel_agp_enabled;
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_845g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i865g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i965g_info = {
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1,
.has_overlay = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_g33_info = {
.gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
- .has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
- .has_force_wake = 1,
};
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
- .has_bsd_ring = 1, \
- .has_blt_ring = 1, \
- .has_llc = 1, \
- .has_force_wake = 1
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_llc = 1
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
- .has_vebox_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,25 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
- .has_vebox_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+};
+
+static const struct intel_device_info intel_broadwell_d_info = {
+ .is_preliminary = 1,
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+ .is_preliminary = 1,
+ .gen = 8, .is_mobile = 1, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
};
/*
@@ -362,7 +385,9 @@ static const struct intel_device_info intel_haswell_m_info = {
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
- INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
+ INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
+ INTEL_BDW_D_IDS(&intel_broadwell_d_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -416,13 +441,19 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
- DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
+ } else if (IS_BROADWELL(dev)) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->pch_id =
+ INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ DRM_DEBUG_KMS("This is Broadwell, assuming "
+ "LynxPoint LP PCH\n");
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
@@ -447,6 +478,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return 0;
+ /* Until we get further testing... */
+ if (IS_GEN8(dev)) {
+ WARN_ON(!i915_preliminary_hw_support);
+ return 0;
+ }
+
if (i915_semaphores >= 0)
return i915_semaphores;
@@ -472,7 +509,7 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
- intel_set_power_well(dev, true);
+ intel_display_set_init_power(dev, true);
drm_kms_helper_poll_disable(dev);
@@ -482,9 +519,7 @@ static int i915_drm_freeze(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
- mutex_lock(&dev->struct_mutex);
- error = i915_gem_idle(dev);
- mutex_unlock(&dev->struct_mutex);
+ error = i915_gem_suspend(dev);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
@@ -578,11 +613,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
drm_helper_hpd_irq_event(dev);
}
-static int __i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ intel_uncore_early_sanitize(dev);
+
+ intel_uncore_sanitize(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ restore_gtt_mappings) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ intel_power_domains_init_hw(dev);
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -642,20 +690,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
static int i915_drm_thaw(struct drm_device *dev)
{
- int error = 0;
-
- intel_uncore_sanitize(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- } else if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_check_and_clear_faults(dev);
- __i915_drm_thaw(dev);
-
- return error;
+ return __i915_drm_thaw(dev, true);
}
int i915_resume(struct drm_device *dev)
@@ -671,20 +709,12 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_uncore_sanitize(dev);
-
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
- * earlier) need this since the BIOS might clear all our scratch PTEs.
+ * earlier) need to restore the GTT mappings since the BIOS might clear
+ * all our scratch PTEs.
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
- !dev_priv->opregion.header) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- ret = __i915_drm_thaw(dev);
+ ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (ret)
return ret;
@@ -722,24 +752,19 @@ int i915_reset(struct drm_device *dev)
simulated = dev_priv->gpu_error.stop_rings != 0;
- if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
- DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
- ret = -ENODEV;
- } else {
- ret = intel_gpu_reset(dev);
-
- /* Also reset the gpu hangman. */
- if (simulated) {
- DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->gpu_error.stop_rings = 0;
- if (ret == -ENODEV) {
- DRM_ERROR("Reset not implemented, but ignoring "
- "error for simulated gpu hangs\n");
- ret = 0;
- }
- } else
- dev_priv->gpu_error.last_reset = get_seconds();
+ ret = intel_gpu_reset(dev);
+
+ /* Also reset the gpu hangman. */
+ if (simulated) {
+ DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->gpu_error.stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
}
+
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@@ -762,30 +787,17 @@ int i915_reset(struct drm_device *dev)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
- struct intel_ring_buffer *ring;
- int i;
-
+ bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
dev_priv->ums.mm_suspended = 0;
- i915_gem_init_swizzling(dev);
-
- for_each_ring(ring, dev_priv, i)
- ring->init(ring);
-
- i915_gem_context_init(dev);
- if (dev_priv->mm.aliasing_ppgtt) {
- ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
- if (ret)
- i915_gem_cleanup_aliasing_ppgtt(dev);
- }
-
- /*
- * It would make sense to re-init all the other hw state, at
- * least the rps/rc6/emon init done within modeset_init_hw. For
- * some unknown reason, this blows up my ilk, so don't.
- */
-
+ ret = i915_gem_init_hw(dev);
+ if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
+ DRM_ERROR("HW contexts didn't survive reset\n");
mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("Failed hw init on reset %d\n", ret);
+ return ret;
+ }
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -802,6 +814,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
+ if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+ DRM_INFO("This hardware requires preliminary hardware support.\n"
+ "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ return -ENODEV;
+ }
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
@@ -949,7 +967,6 @@ static struct drm_driver driver = {
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
- .gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ab0f2c0a440..ccdbecca070 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
#define DRIVER_DATE "20080730"
enum pipe {
+ INVALID_PIPE = -1,
PIPE_A = 0,
PIPE_B,
PIPE_C,
@@ -98,13 +99,29 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
};
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP))
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
enum hpd_pin {
HPD_NONE = 0,
@@ -225,9 +242,12 @@ struct intel_opregion {
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
+ struct work_struct asle_work;
};
#define OPREGION_SIZE (8*1024)
@@ -285,6 +305,7 @@ struct drm_i915_error_state {
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
+ u32 bbstate[I915_NUM_RINGS];
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +342,13 @@ struct drm_i915_error_state {
u32 dirty:1;
u32 purgeable:1;
s32 ring:4;
- u32 cache_level:2;
+ u32 cache_level:3;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
+ int hangcheck_score[I915_NUM_RINGS];
+ enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
struct intel_crtc_config;
@@ -357,7 +380,7 @@ struct drm_i915_display_funcs {
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
- void (*update_wm)(struct drm_device *dev);
+ void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
@@ -367,7 +390,6 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
- void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@@ -375,7 +397,8 @@ struct drm_i915_display_funcs {
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
- struct drm_crtc *crtc);
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +418,20 @@ struct drm_i915_display_funcs {
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+ uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+ uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+ uint32_t val, bool trace);
+ void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+ uint64_t val, bool trace);
};
struct intel_uncore {
@@ -404,6 +441,8 @@ struct intel_uncore {
unsigned fifo_count;
unsigned forcewake_count;
+
+ struct delayed_work force_wake_work;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +459,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
- func(has_force_wake) sep \
+ func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
@@ -428,9 +467,6 @@ struct intel_uncore {
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
- func(has_bsd_ring) sep \
- func(has_blt_ring) sep \
- func(has_vebox_ring) sep \
func(has_llc) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
@@ -442,6 +478,7 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen;
+ u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
};
@@ -542,10 +579,21 @@ struct i915_gtt {
struct i915_hw_ppgtt {
struct i915_address_space base;
unsigned num_pd_entries;
- struct page **pt_pages;
- uint32_t pd_offset;
- dma_addr_t *pt_dma_addr;
-
+ union {
+ struct page **pt_pages;
+ struct page *gen8_pt_pages;
+ };
+ struct page *pd_pages;
+ int num_pd_pages;
+ int num_pt_pages;
+ union {
+ uint32_t pd_offset;
+ dma_addr_t pd_dma_addr[4];
+ };
+ union {
+ dma_addr_t *pt_dma_addr;
+ dma_addr_t *gen8_pt_dma_addr[4];
+ };
int (*enable)(struct drm_device *dev);
};
@@ -570,6 +618,13 @@ struct i915_vma {
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
};
struct i915_ctx_hang_stats {
@@ -578,6 +633,12 @@ struct i915_ctx_hang_stats {
/* This context had batch active when hang was declared */
unsigned batch_active;
+
+ /* Time when this context was last blamed for a GPU reset */
+ unsigned long guilty_ts;
+
+ /* This context is banned to submit more work */
+ bool banned;
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -586,10 +647,13 @@ struct i915_hw_context {
struct kref ref;
int id;
bool is_initialized;
+ uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
+
+ struct list_head link;
};
struct i915_fbc {
@@ -623,17 +687,9 @@ struct i915_fbc {
} no_fbc_reason;
};
-enum no_psr_reason {
- PSR_NO_SOURCE, /* Not supported on platform */
- PSR_NO_SINK, /* Not supported by panel */
- PSR_MODULE_PARAM,
- PSR_CRTC_NOT_ACTIVE,
- PSR_PWR_WELL_ENABLED,
- PSR_NOT_TILED,
- PSR_SPRITE_ENABLED,
- PSR_S3D_ENABLED,
- PSR_INTERLACED_ENABLED,
- PSR_HSW_NOT_DDIA,
+struct i915_psr {
+ bool sink_support;
+ bool source_ok;
};
enum intel_pch {
@@ -704,6 +760,9 @@ struct i915_suspend_saved_registers {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
+ u32 saveBLC_HIST_CTL_B;
+ u32 saveBLC_PWM_CTL_B;
+ u32 saveBLC_PWM_CTL2_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -823,17 +882,20 @@ struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
- /* On vlv we need to manually drop to Vmin with a delayed work. */
- struct delayed_work vlv_work;
-
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
+ u8 rp1_delay;
+ u8 rp0_delay;
u8 hw_max;
+ int last_adj;
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ bool enabled;
struct delayed_work delayed_resume_work;
/*
@@ -870,11 +932,21 @@ struct intel_ilk_power_mgmt {
/* Power well structure for haswell */
struct i915_power_well {
- struct drm_device *device;
- spinlock_t lock;
/* power well enable/disable usage count */
int count;
- int i915_request;
+};
+
+#define I915_MAX_POWER_WELLS 1
+
+struct i915_power_domains {
+ /*
+ * Power wells needed for initialization at driver init and suspend
+ * time are on. They are kept on until after the first modeset.
+ */
+ bool init_power_on;
+
+ struct mutex lock;
+ struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
};
struct i915_dri1_state {
@@ -902,9 +974,11 @@ struct i915_ums_state {
int mm_suspended;
};
+#define MAX_L3_SLICES 2
struct intel_l3_parity {
- u32 *remap_info;
+ u32 *remap_info[MAX_L3_SLICES];
struct work_struct error_work;
+ int which_slice;
};
struct i915_gem_mm {
@@ -942,6 +1016,15 @@ struct i915_gem_mm {
struct delayed_work retire_work;
/**
+ * When we detect an idle GPU, we want to turn on
+ * powersaving features. So once we see that there
+ * are no more requests outstanding and no more
+ * arrive within a small period of time, we fire
+ * off the idle_work.
+ */
+ struct delayed_work idle_work;
+
+ /**
* Are we in a non-interruptible section of code like
* modesetting?
*/
@@ -979,6 +1062,9 @@ struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ /* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
struct timer_list hangcheck_timer;
/* For reset and error_state handling. */
@@ -987,7 +1073,8 @@ struct i915_gpu_error {
struct drm_i915_error_state *first_error;
struct work_struct work;
- unsigned long last_reset;
+
+ unsigned long missed_irq_rings;
/**
* State variable and reset counter controlling the reset flow
@@ -1027,6 +1114,9 @@ struct i915_gpu_error {
/* For gpu hang simulation. */
unsigned int stop_rings;
+
+ /* For missed irq/seqno simulation. */
+ unsigned int test_irq_rings;
};
enum modeset_restore {
@@ -1035,6 +1125,14 @@ enum modeset_restore {
MODESET_SUSPENDED,
};
+struct ddi_vbt_port_info {
+ uint8_t hdmi_level_shift;
+
+ uint8_t supports_dvi:1;
+ uint8_t supports_hdmi:1;
+ uint8_t supports_dp:1;
+};
+
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1060,10 +1158,17 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
+ /* MIPI DSI */
+ struct {
+ u16 panel_id;
+ } dsi;
+
int crt_ddc_pin;
int child_dev_num;
- struct child_device_config *child_dev;
+ union child_device_config *child_dev;
+
+ struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
};
enum intel_ddb_partitioning {
@@ -1079,6 +1184,15 @@ struct intel_wm_level {
uint32_t fbc_val;
};
+struct hsw_wm_values {
+ uint32_t wm_pipe[3];
+ uint32_t wm_lp[3];
+ uint32_t wm_lp_spr[3];
+ uint32_t wm_linetime[3];
+ bool enable_fbc_wm;
+ enum intel_ddb_partitioning partitioning;
+};
+
/*
* This struct tracks the state needed for the Package C8+ feature.
*
@@ -1148,6 +1262,36 @@ struct i915_package_c8 {
} regsave;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PF,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+ uint32_t frame;
+ uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ bool opened; /* exclusive access to the result file */
+ struct intel_pipe_crc_entry *entries;
+ enum intel_pipe_crc_source source;
+ int head, tail;
+ wait_queue_head_t wq;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1193,7 +1337,10 @@ typedef struct drm_i915_private {
struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask;
+ union {
+ u32 irq_mask;
+ u32 de_irq_mask[I915_MAX_PIPES];
+ };
u32 gt_irq_mask;
u32 pm_irq_mask;
@@ -1272,6 +1419,10 @@ typedef struct drm_i915_private {
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
@@ -1297,17 +1448,18 @@ typedef struct drm_i915_private {
* mchdev_lock in intel_pm.c */
struct intel_ilk_power_mgmt ips;
- /* Haswell power well */
- struct i915_power_well power_well;
+ struct i915_power_domains power_domains;
- enum no_psr_reason no_psr_reason;
+ struct i915_psr psr;
struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx;
+#ifdef CONFIG_DRM_I915_FBDEV
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+#endif
/*
* The console may be contended at resume, but we don't
@@ -1320,6 +1472,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
+ struct list_head context_list;
u32 fdi_rx_config;
@@ -1337,6 +1490,9 @@ typedef struct drm_i915_private {
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
+
+ /* current hardware state */
+ struct hsw_wm_values hw;
} wm;
struct i915_package_c8 pc8;
@@ -1400,8 +1556,6 @@ struct drm_i915_gem_object {
struct list_head ring_list;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
- /** This object's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
/**
* This is set if the object is on the active lists (has pending
@@ -1487,13 +1641,6 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
@@ -1505,11 +1652,14 @@ struct drm_i915_gem_object {
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
/** User space pin count and filp owning the pin */
- uint32_t user_pin_count;
+ unsigned long user_pin_count;
struct drm_file *pin_filp;
/** for phy allocated objects */
@@ -1560,48 +1710,56 @@ struct drm_i915_gem_request {
};
struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+
struct {
spinlock_t lock;
struct list_head request_list;
+ struct delayed_work idle_work;
} mm;
struct idr context_idr;
struct i915_ctx_hang_stats hang_stats;
+ atomic_t rps_wait_boost;
};
#define INTEL_INFO(dev) (to_i915(dev)->info)
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
+#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
- (dev)->pci_device == 0x0152 || \
- (dev)->pci_device == 0x015a)
-#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
- (dev)->pci_device == 0x0106 || \
- (dev)->pci_device == 0x010A)
+#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
+ (dev)->pdev->device == 0x0152 || \
+ (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
+ (dev)->pdev->device == 0x0106 || \
+ (dev)->pdev->device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
+#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- ((dev)->pci_device & 0xFF00) == 0x0C00)
+ ((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
- ((dev)->pci_device & 0xFF00) == 0x0A00)
+ ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
+ ((dev)->pdev->device & 0x00F0) == 0x0020)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
/*
* The genX designation typically refers to the render engine, so render
@@ -1615,10 +1773,15 @@ struct drm_i915_file_private {
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
+#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
+
+#define RENDER_RING (1<<RCS)
+#define BSD_RING (1<<VCS)
+#define BLT_RING (1<<BCS)
+#define VEBOX_RING (1<<VECS)
+#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -1640,7 +1803,6 @@ struct drm_i915_file_private {
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@@ -1648,11 +1810,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_IPS(dev) (IS_ULT(dev))
+#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
+#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1668,35 +1832,14 @@ struct drm_i915_file_private {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
-#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
-
-#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
#include "i915_trace.h"
-/**
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage. This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-#define INTEL_RC6_ENABLE (1<<0)
-#define INTEL_RC6p_ENABLE (1<<1)
-#define INTEL_RC6pp_ENABLE (1<<2)
-
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
@@ -1767,12 +1910,13 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1824,14 +1968,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
-int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1870,9 +2011,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -1913,7 +2053,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
}
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
@@ -1933,11 +2073,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_l3_remap(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
-int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
@@ -1964,6 +2104,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
@@ -1995,6 +2136,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2031,7 +2175,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
-#undef obj_to_ggtt
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
@@ -2094,6 +2237,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
unsigned cache_level,
bool mappable,
bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
@@ -2133,6 +2277,11 @@ int i915_verify_lists(struct drm_device *dev);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
/* i915_gpu_error.c */
__printf(2, 3)
@@ -2186,15 +2335,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
+struct intel_encoder;
extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+ pci_power_t state);
#else
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+ return 0;
+}
#endif
/* intel_acpi.c */
@@ -2256,8 +2420,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2266,37 +2438,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
-#define __i915_read(x) \
- u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
-
-#define __i915_write(x) \
- void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
-
-#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
-
-#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
-
-#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
-#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
-
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
-#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
+#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+
+#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+
+#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+
+#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da0e4c..12bbd5eac70 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force);
static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly);
+static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
@@ -61,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args)
{
/* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
args->size, &args->handle);
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
- if (i915_gem_obj_bound_any(obj)) {
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
}
ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
needs_clflush_after = cpu_write_needs_clflush(obj);
- if (i915_gem_obj_bound_any(obj)) {
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0;
- if (seqno == ring->outstanding_lazy_request)
+ if (seqno == ring->outstanding_lazy_seqno)
ret = i915_add_request(ring, NULL);
return ret;
}
+static void fake_irq(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+ if (file_priv == NULL)
+ return true;
+
+ return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned reset_counter,
- bool interruptible, struct timespec *timeout)
+ bool interruptible,
+ struct timespec *timeout,
+ struct drm_i915_file_private *file_priv)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- struct timespec before, now, wait_time={1,0};
- unsigned long timeout_jiffies;
- long end;
- bool wait_forever = true;
+ struct timespec before, now;
+ DEFINE_WAIT(wait);
+ long timeout_jiffies;
int ret;
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- trace_i915_gem_request_wait_begin(ring, seqno);
+ timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
- if (timeout != NULL) {
- wait_time = *timeout;
- wait_forever = false;
+ if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+ gen6_rps_boost(dev_priv);
+ if (file_priv)
+ mod_delayed_work(dev_priv->wq,
+ &file_priv->mm.idle_work,
+ msecs_to_jiffies(100));
}
- timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
- if (WARN_ON(!ring->irq_get(ring)))
+ if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+ WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
- /* Record current time in case interrupted by signal, or wedged * */
+ /* Record current time in case interrupted by signal, or wedged */
+ trace_i915_gem_request_wait_begin(ring, seqno);
getrawmonotonic(&before);
+ for (;;) {
+ struct timer_list timer;
+ unsigned long expire;
-#define EXIT_COND \
- (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- i915_reset_in_progress(&dev_priv->gpu_error) || \
- reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
- do {
- if (interruptible)
- end = wait_event_interruptible_timeout(ring->irq_queue,
- EXIT_COND,
- timeout_jiffies);
- else
- end = wait_event_timeout(ring->irq_queue, EXIT_COND,
- timeout_jiffies);
+ prepare_to_wait(&ring->irq_queue, &wait,
+ interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
- end = -EAGAIN;
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+ /* ... but upgrade the -EAGAIN to an -EIO if the gpu
+ * is truely gone. */
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ if (ret == 0)
+ ret = -EAGAIN;
+ break;
+ }
- /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
- * gone. */
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret)
- end = ret;
- } while (end == 0 && wait_forever);
+ if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ ret = 0;
+ break;
+ }
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (timeout_jiffies <= 0) {
+ ret = -ETIME;
+ break;
+ }
+
+ timer.function = NULL;
+ if (timeout || missed_irq(dev_priv, ring)) {
+ setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+ expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+ mod_timer(&timer, expire);
+ }
+
+ io_schedule();
+
+ if (timeout)
+ timeout_jiffies = expire - jiffies;
+
+ if (timer.function) {
+ del_singleshot_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
+ }
+ }
getrawmonotonic(&now);
+ trace_i915_gem_request_wait_end(ring, seqno);
ring->irq_put(ring);
- trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+ finish_wait(&ring->irq_queue, &wait);
if (timeout) {
struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
set_normalized_timespec(timeout, 0, 0);
}
- switch (end) {
- case -EIO:
- case -EAGAIN: /* Wedged */
- case -ERESTARTSYS: /* Signal */
- return (int)end;
- case 0: /* Timeout */
- return -ETIME;
- default: /* Completed */
- WARN_ON(end < 0); /* We're not aware of other errors */
- return 0;
- }
+ return ret;
}
/**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
return __wait_seqno(ring, seqno,
atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL);
+ interruptible, NULL, NULL);
}
static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ struct drm_file *file,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
if (ret)
goto unref;
@@ -1690,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-static long
+static unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next;
- long count = 0;
+ unsigned long count = 0;
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
return count;
}
-static long
+static unsigned long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
-static long
+static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list) {
- if (obj->pages_pin_count == 0)
+ if (i915_gem_object_put_pages(obj) == 0)
freed += obj->base.size >> PAGE_SHIFT;
- i915_gem_object_put_pages(obj);
}
return freed;
}
@@ -1865,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg->length += PAGE_SIZE;
}
last_pfn = page_to_pfn(page);
+
+ /* Check that the i965g/gm workaround works. */
+ WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
#ifdef CONFIG_SWIOTLB
if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
-void
+static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -1957,6 +1997,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
}
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring)
+{
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
@@ -2078,11 +2125,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
+ request = ring->preallocated_lazy_request;
+ if (WARN_ON(request == NULL))
return -ENOMEM;
-
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring);
- if (ret) {
- kfree(request);
+ if (ret)
return ret;
- }
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
- request->ctx = ring->last_context;
- request->batch_obj = obj;
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
+ request->batch_obj = obj;
+ /* Hold a reference to the current context so that we can inspect
+ * it later in case a hangcheck error event fires.
+ */
+ request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
@@ -2129,12 +2176,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
}
trace_i915_gem_request_add(ring, request->seqno);
- ring->outstanding_lazy_request = 0;
+ ring->outstanding_lazy_seqno = 0;
+ ring->preallocated_lazy_request = NULL;
if (!dev_priv->ums.mm_suspended) {
i915_queue_hangcheck(ring->dev);
if (was_empty) {
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
return;
spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
- list_del(&request->client_list);
- request->file_priv = NULL;
- }
+ list_del(&request->client_list);
+ request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
}
@@ -2224,6 +2271,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
return false;
}
+static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+{
+ const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+
+ if (hs->banned)
+ return true;
+
+ if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+ DRM_ERROR("context hanging too fast, declaring banned!\n");
+ return true;
+ }
+
+ return false;
+}
+
static void i915_set_reset_status(struct intel_ring_buffer *ring,
struct drm_i915_gem_request *request,
u32 acthd)
@@ -2260,10 +2322,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
hs = &request->file_priv->hang_stats;
if (hs) {
- if (guilty)
+ if (guilty) {
+ hs->banned = i915_context_is_banned(hs);
hs->batch_active++;
- else
+ hs->guilty_ts = get_seconds();
+ } else {
hs->batch_pending++;
+ }
}
}
@@ -2341,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_cleanup_ringbuffer(dev);
+
i915_gem_restore_fences(dev);
}
@@ -2405,57 +2472,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
WARN_ON(i915_verify_lists(ring->dev));
}
-void
+bool
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ bool idle = true;
int i;
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
+ idle &= list_empty(&ring->request_list);
+ }
+
+ if (idle)
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->mm.idle_work,
+ msecs_to_jiffies(100));
+
+ return idle;
}
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
- struct intel_ring_buffer *ring;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), mm.retire_work.work);
+ struct drm_device *dev = dev_priv->dev;
bool idle;
- int i;
-
- dev_priv = container_of(work, drm_i915_private_t,
- mm.retire_work.work);
- dev = dev_priv->dev;
/* Come back later if the device is busy... */
- if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- return;
- }
-
- i915_gem_retire_requests(dev);
-
- /* Send a periodic flush down the ring so we don't hold onto GEM
- * objects indefinitely.
- */
- idle = true;
- for_each_ring(ring, dev_priv, i) {
- if (ring->gpu_caches_dirty)
- i915_add_request(ring, NULL);
-
- idle &= list_empty(&ring->request_list);
+ idle = false;
+ if (mutex_trylock(&dev->struct_mutex)) {
+ idle = i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
}
-
- if (!dev_priv->ums.mm_suspended && !idle)
+ if (!idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
- if (idle)
- intel_mark_idle(dev);
+}
- mutex_unlock(&dev->struct_mutex);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), mm.idle_work.work);
+
+ intel_mark_idle(dev_priv->dev);
}
/**
@@ -2553,7 +2616,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
if (timeout)
args->timeout_ns = timespec_to_ns(timeout);
return ret;
@@ -2600,6 +2663,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ trace_i915_gem_ring_sync_to(from, to, seqno);
ret = to->sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_is_singular(&obj->vma_list));
+
if (list_empty(&vma->vma_link))
return 0;
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ i915_gem_vma_destroy(vma);
+
+ return 0;
+ }
if (obj->pin_count)
return -EBUSY;
@@ -2685,13 +2755,10 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_mm_remove_node(&vma->node);
-destroy:
i915_gem_vma_destroy(vma);
/* Since the unbound list is global, only move to that list if
- * no more VMAs exist.
- * NB: Until we have real VMAs there will only ever be one */
- WARN_ON(!list_empty(&obj->vma_list));
+ * no more VMAs exist. */
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
@@ -2887,6 +2954,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
obj->stride, obj->tiling_mode);
switch (INTEL_INFO(dev)->gen) {
+ case 8:
case 7:
case 6:
case 5:
@@ -3389,8 +3457,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj)) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
if (vma)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3828,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3865,6 +3932,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ if (obj->user_pin_count == ULONG_MAX) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (obj->user_pin_count == 0) {
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
@@ -4015,7 +4087,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
- INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
@@ -4087,13 +4158,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return obj;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4211,20 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
+
+ return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -4169,76 +4244,103 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
return vma;
}
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
+
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
+
+ /* Keep the vma as a placeholder in the execbuffer reservation lists */
+ if (!list_empty(&vma->exec_list))
+ return;
+
list_del(&vma->vma_link);
+
kfree(vma);
}
int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
- if (dev_priv->ums.mm_suspended) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->ums.mm_suspended)
+ goto err;
ret = i915_gpu_idle(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto err;
+
i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
- /* Cancel the retire work handler, which should be idle now. */
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound ums.mm_suspended!
+ */
+ dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+ DRIVER_MODESET);
+ mutex_unlock(&dev->struct_mutex);
+
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
return 0;
+
+err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
-void i915_gem_l3_remap(struct drm_device *dev)
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 misccpctl;
- int i;
-
- if (!HAS_L3_GPU_CACHE(dev))
- return;
+ u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+ u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+ int i, ret;
- if (!dev_priv->l3_parity.remap_info)
- return;
+ if (!HAS_L3_DPF(dev) || !remap_info)
+ return 0;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- POSTING_READ(GEN7_MISCCPCTL);
+ ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+ if (ret)
+ return ret;
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
- u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
- if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
- DRM_DEBUG("0x%x was already programmed to %x\n",
- GEN7_L3LOG_BASE + i, remap);
- if (remap && !dev_priv->l3_parity.remap_info[i/4])
- DRM_DEBUG_DRIVER("Clearing remapped register\n");
- I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, reg_base + i);
+ intel_ring_emit(ring, remap_info[i/4]);
}
- /* Make sure all the writes land before disabling dop clock gating */
- POSTING_READ(GEN7_L3LOG_BASE);
+ intel_ring_advance(ring);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ return ret;
}
void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4260,6 +4362,8 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else if (IS_GEN8(dev))
+ I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
@@ -4330,7 +4434,7 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
@@ -4338,20 +4442,26 @@ i915_gem_init_hw(struct drm_device *dev)
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+ if (IS_HSW_GT3(dev))
+ I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
+ else
+ I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
}
- i915_gem_l3_remap(dev);
-
i915_gem_init_swizzling(dev);
ret = i915_gem_init_rings(dev);
if (ret)
return ret;
+ for (i = 0; i < NUM_L3_SLICES(dev); i++)
+ i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
@@ -4454,26 +4564,12 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
drm_irq_uninstall(dev);
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_idle(dev);
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound ums.mm_suspended!
- */
- if (ret != 0)
- dev_priv->ums.mm_suspended = 1;
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return i915_gem_suspend(dev);
}
void
@@ -4484,11 +4580,9 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_idle(dev);
+ ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
- mutex_unlock(&dev->struct_mutex);
}
static void
@@ -4523,6 +4617,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
+ INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4627,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+ i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4679,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
if (dev_priv->mm.phys_objs[id - 1] || !size)
return 0;
- phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+ phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
if (!phys_obj)
return -ENOMEM;
@@ -4756,6 +4853,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
@@ -4773,6 +4872,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+ struct drm_i915_file_private *file_priv =
+ container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+ atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+ file_priv->dev_priv = dev->dev_private;
+
+ spin_lock_init(&file_priv->mm.lock);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
+ INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+ i915_gem_file_idle_work_handler);
+
+ idr_init(&file_priv->context_idr);
+
+ return 0;
+}
+
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
@@ -4823,6 +4954,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
if (unlock)
mutex_unlock(&dev->struct_mutex);
+
return count;
}
@@ -4859,11 +4991,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
- struct i915_address_space *vm;
+ struct i915_vma *vma;
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- if (i915_gem_obj_bound(o, vm))
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
return true;
return false;
@@ -4895,7 +5026,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
- int nr_to_scan = sc->nr_to_scan;
unsigned long freed;
bool unlock = true;
@@ -4909,38 +5039,30 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
unlock = false;
}
- freed = i915_gem_purge(dev_priv, nr_to_scan);
- if (freed < nr_to_scan)
- freed += __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (freed < nr_to_scan)
+ freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+ if (freed < sc->nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ false);
+ if (freed < sc->nr_to_scan)
freed += i915_gem_shrink_all(dev_priv);
if (unlock)
mutex_unlock(&dev->struct_mutex);
+
return freed;
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm)
- return vma;
- return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
+ if (WARN_ON(list_empty(&obj->vma_list)))
+ return NULL;
- vma = i915_gem_obj_to_vma(obj, vm);
- if (!vma)
- vma = i915_gem_vma_create(obj, vm);
+ vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+ if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+ return NULL;
return vma;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c2a7d..72a3df32292 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
- * GPU. The GPU has loaded it's state already and has stored away the gtt
+ * GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
@@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
+ case 8:
+ ret = GEN8_CXT_TOTAL_SIZE;
+ break;
default:
BUG();
}
@@ -129,6 +132,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
@@ -147,6 +151,7 @@ create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +171,7 @@ create_hw_context(struct drm_device *dev,
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
+ list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
@@ -178,6 +184,10 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
ctx->id = ret;
+ /* NB: Mark all slices as needing a remap so that when the context first
+ * loads it will restore whatever remap state already exists. If there
+ * is no remap info, it will be a NOP. */
+ ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
return ctx;
@@ -213,7 +223,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
- dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +235,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
goto err_unpin;
}
+ dev_priv->ring[RCS].default_context = ctx;
+
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
@@ -281,16 +292,24 @@ void i915_gem_context_fini(struct drm_device *dev)
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
- i915_gem_object_unpin(dctx->obj);
-
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
- drm_gem_object_unreference(&dctx->obj->base);
+ WARN_ON(!dev_priv->ring[RCS].last_context);
+ if (dev_priv->ring[RCS].last_context == dctx) {
+ /* Fake switch to NULL context */
+ WARN_ON(dctx->obj->active);
+ i915_gem_object_unpin(dctx->obj);
+ i915_gem_context_unreference(dctx);
+ }
+
+ i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].default_context = NULL;
+ dev_priv->ring[RCS].last_context = NULL;
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +412,11 @@ static int do_switch(struct i915_hw_context *to)
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
- int ret;
+ int ret, i;
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
- if (from == to)
+ if (from == to && !to->remap_slice)
return 0;
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +439,6 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
- else if (WARN_ON_ONCE(from == to)) /* not yet expected */
- hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
@@ -429,6 +446,18 @@ static int do_switch(struct i915_hw_context *to)
return ret;
}
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to->remap_slice & (1<<i)))
+ continue;
+
+ ret = i915_gem_l3_remap(ring, i);
+ /* If it failed, try again next round */
+ if (ret)
+ DRM_DEBUG_DRIVER("L3 remapping failed\n");
+ else
+ to->remap_slice &= ~(1<<i);
+ }
+
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
@@ -436,11 +465,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
- i915_gem_object_move_to_active(from->obj, ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -451,17 +477,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
- ret = i915_add_request(ring, NULL);
- if (ret) {
- /* Too late, we've already scheduled a context switch.
- * Try to undo the change so that the hw state is
- * consistent with out tracking. In case of emergency,
- * scream.
- */
- WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
- return ret;
- }
-
+ /* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b70015585..b7376533633 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
if (vma->obj->pin_count)
return false;
+ if (WARN_ON(!list_empty(&vma->exec_list)))
+ return false;
+
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
@@ -113,7 +116,7 @@ none:
}
/* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_everything() is unnecessary.
+ * So calling i915_gem_evict_vm() is unnecessary.
*/
return -ENOSPC;
@@ -152,12 +155,48 @@ found:
return ret;
}
+/**
+ * i915_gem_evict_vm - Try to free up VM space
+ *
+ * @vm: Address space to evict from
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * VM eviction is about freeing up virtual address space. If one wants fine
+ * grained eviction, they should see evict something for more details. In terms
+ * of freeing up actual system memory, this function may not accomplish the
+ * desired result. An object may be shared in multiple address space, and this
+ * function will not assert those objects be freed.
+ *
+ * Using do_idle will result in a more complete eviction because it retires, and
+ * inactivates current BOs.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+ struct i915_vma *vma, *next;
+ int ret;
+
+ trace_i915_gem_evict_vm(vm);
+
+ if (do_idle) {
+ ret = i915_gpu_idle(vm->dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(vm->dev);
+ }
+
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ if (vma->obj->pin_count == 0)
+ WARN_ON(i915_vma_unbind(vma));
+
+ return 0;
+}
+
int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm;
- struct i915_vma *vma, *next;
bool lists_empty = true;
int ret;
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
- if (vma->obj->pin_count == 0)
- WARN_ON(i915_vma_unbind(vma));
- }
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ WARN_ON(i915_gem_evict_vm(vm, false));
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf345777ae9..885d595e0e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
-struct eb_objects {
- struct list_head objects;
+struct eb_vmas {
+ struct list_head vmas;
int and;
union {
- struct drm_i915_gem_object *lut[0];
+ struct i915_vma *lut[0];
struct hlist_head buckets[0];
};
};
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
{
- struct eb_objects *eb = NULL;
+ struct eb_vmas *eb = NULL;
if (args->flags & I915_EXEC_HANDLE_LUT) {
- int size = args->buffer_count;
- size *= sizeof(struct drm_i915_gem_object *);
- size += sizeof(struct eb_objects);
+ unsigned size = args->buffer_count;
+ size *= sizeof(struct i915_vma *);
+ size += sizeof(struct eb_vmas);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
if (eb == NULL) {
- int size = args->buffer_count;
- int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ unsigned size = args->buffer_count;
+ unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
- sizeof(struct eb_objects),
+ sizeof(struct eb_vmas),
GFP_TEMPORARY);
if (eb == NULL)
return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
} else
eb->and = -args->buffer_count;
- INIT_LIST_HEAD(&eb->objects);
+ INIT_LIST_HEAD(&eb->vmas);
return eb;
}
static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static int
-eb_lookup_objects(struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- const struct drm_i915_gem_execbuffer2 *args,
- struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct i915_address_space *vm,
+ struct drm_file *file)
{
- int i;
+ struct drm_i915_gem_object *obj;
+ struct list_head objects;
+ int i, ret = 0;
+ INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
+ /* Grab a reference to the object and release the lock so we can lookup
+ * or create the VMA without using GFP_ATOMIC */
for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out;
}
- if (!list_empty(&obj->exec_list)) {
+ if (!list_empty(&obj->obj_exec_link)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
drm_gem_object_reference(&obj->base);
- list_add_tail(&obj->exec_list, &eb->objects);
+ list_add_tail(&obj->obj_exec_link, &objects);
+ }
+ spin_unlock(&file->table_lock);
- obj->exec_entry = &exec[i];
+ i = 0;
+ list_for_each_entry(obj, &objects, obj_exec_link) {
+ struct i915_vma *vma;
+
+ /*
+ * NOTE: We can leak any vmas created here when something fails
+ * later on. But that's no issue since vma_unbind can deal with
+ * vmas which are not actually bound. And since only
+ * lookup_or_create exists as an interface to get at the vma
+ * from the (obj, vm) we don't run the risk of creating
+ * duplicated vmas for the same vm.
+ */
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG("Failed to lookup VMA\n");
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ vma->exec_entry = &exec[i];
if (eb->and < 0) {
- eb->lut[i] = obj;
+ eb->lut[i] = vma;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
- obj->exec_handle = handle;
- hlist_add_head(&obj->exec_node,
+ vma->exec_handle = handle;
+ hlist_add_head(&vma->exec_node,
&eb->buckets[handle & eb->and]);
}
+ ++i;
}
- spin_unlock(&file->table_lock);
- return 0;
+
+out:
+ while (!list_empty(&objects)) {
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+ list_del_init(&obj->obj_exec_link);
+ if (ret)
+ drm_gem_object_unreference(&obj->base);
+ }
+ return ret;
}
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
- if (obj->exec_handle == handle)
- return obj;
+ vma = hlist_entry(node, struct i915_vma, exec_node);
+ if (vma->exec_handle == handle)
+ return vma;
}
return NULL;
}
}
-static void
-eb_destroy(struct eb_objects *eb)
-{
- while (!list_empty(&eb->objects)) {
- struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+ while (!list_empty(&eb->vmas)) {
+ struct i915_vma *vma;
- obj = list_first_entry(&eb->objects,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&eb->vmas,
+ struct i915_vma,
exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ list_del_init(&vma->exec_list);
+ drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
- return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ return (HAS_LLC(obj->base.dev) ||
+ obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
!obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -175,17 +212,31 @@ static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc)
{
+ struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
int ret = -EINVAL;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
return ret;
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ *(uint32_t *)(vaddr + page_offset) = 0;
+ }
+
kunmap_atomic(vaddr);
return 0;
@@ -216,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
iowrite32(reloc->delta, reloc_entry);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ reloc_entry += 1;
+
+ if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+ io_mapping_unmap_atomic(reloc_page);
+ reloc_page = io_mapping_map_atomic_wc(
+ dev_priv->gtt.mappable,
+ reloc->offset + sizeof(uint32_t));
+ reloc_entry = reloc_page;
+ }
+
+ iowrite32(0, reloc_entry);
+ }
+
io_mapping_unmap_atomic(reloc_page);
return 0;
@@ -223,22 +289,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
+ struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
+ struct i915_vma *target_vma;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
- target_obj = &eb_get_object(eb, reloc->target_handle)->base;
- if (unlikely(target_obj == NULL))
+ target_vma = eb_get_vma(eb, reloc->target_handle);
+ if (unlikely(target_vma == NULL))
return -ENOENT;
+ target_i915_obj = target_vma->obj;
+ target_obj = &target_vma->obj->base;
- target_i915_obj = to_intel_bo(target_obj);
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -284,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return 0;
/* Check that the relocation address is valid... */
- if (unlikely(reloc->offset > obj->base.size - 4)) {
+ if (unlikely(reloc->offset >
+ obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
@@ -320,14 +389,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+ struct eb_vmas *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +414,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
- vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+ vma->vm);
if (ret)
return ret;
@@ -368,17 +436,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_relocation_entry *relocs)
{
- const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
- vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+ vma->vm);
if (ret)
return ret;
}
@@ -387,10 +454,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
struct i915_address_space *vm)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +468,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
* lockdep complains vehemently.
*/
pagefault_disable();
- list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
@@ -415,31 +482,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
{
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- return entry->relocation_count && !use_cpu_reloc(obj);
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+ i915_is_ggtt(vma->vm);
}
static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct i915_address_space *vm,
- bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
+ struct drm_i915_gem_object *obj = vma->obj;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
- ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+ ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
@@ -467,8 +535,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != i915_gem_obj_offset(obj, vm)) {
- entry->offset = i915_gem_obj_offset(obj, vm);
+ if (entry->offset != vma->node.start) {
+ entry->offset = vma->node.start;
*need_reloc = true;
}
@@ -485,14 +553,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
- if (!i915_gem_obj_bound_any(obj))
+ if (!drm_mm_node_allocated(&vma->node))
return;
- entry = obj->exec_entry;
+ entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
@@ -505,41 +574,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
- struct list_head *objects,
- struct i915_address_space *vm,
+ struct list_head *vmas,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
- struct list_head ordered_objects;
+ struct i915_vma *vma;
+ struct i915_address_space *vm;
+ struct list_head ordered_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
- INIT_LIST_HEAD(&ordered_objects);
- while (!list_empty(objects)) {
+ if (list_empty(vmas))
+ return 0;
+
+ vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+ INIT_LIST_HEAD(&ordered_vmas);
+ while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
- obj = list_first_entry(objects,
- struct drm_i915_gem_object,
- exec_list);
- entry = obj->exec_entry;
+ vma = list_first_entry(vmas, struct i915_vma, exec_list);
+ obj = vma->obj;
+ entry = vma->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
if (need_mappable)
- list_move(&obj->exec_list, &ordered_objects);
+ list_move(&vma->exec_list, &ordered_vmas);
else
- list_move_tail(&obj->exec_list, &ordered_objects);
+ list_move_tail(&vma->exec_list, &ordered_vmas);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
- list_splice(&ordered_objects, objects);
+ list_splice(&ordered_vmas, vmas);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
@@ -558,52 +632,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
int ret = 0;
/* Unbind any ill-fitting objects or pin. */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
- u32 obj_offset;
- if (!i915_gem_obj_bound(obj, vm))
+ obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
continue;
- obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
WARN_ON((need_mappable || need_fence) &&
- !i915_is_ggtt(vm));
+ !i915_is_ggtt(vma->vm));
if ((entry->alignment &&
- obj_offset & (entry->alignment - 1)) ||
+ vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+ ret = i915_vma_unbind(vma);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
- list_for_each_entry(obj, objects, exec_list) {
- if (i915_gem_obj_bound(obj, vm))
+ list_for_each_entry(vma, vmas, exec_list) {
+ if (drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
err: /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_execbuffer_unreserve_object(obj);
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
if (ret != -ENOSPC || retry++)
return ret;
- ret = i915_gem_evict_everything(ring->dev);
+ ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
} while (1);
@@ -614,24 +688,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
- struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- struct i915_address_space *vm)
+ struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
- struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
- int count = args->buffer_count;
+ unsigned count = args->buffer_count;
+
+ if (WARN_ON(list_empty(&eb->vmas)))
+ return 0;
+
+ vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(&eb->objects)) {
- obj = list_first_entry(&eb->objects,
- struct drm_i915_gem_object,
- exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ while (!list_empty(&eb->vmas)) {
+ vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+ list_del_init(&vma->exec_list);
+ drm_gem_object_unreference(&vma->obj->base);
}
mutex_unlock(&dev->struct_mutex);
@@ -695,20 +772,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
- list_for_each_entry(obj, &eb->objects, exec_list) {
- int offset = obj->exec_entry - exec;
- ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset],
- vm);
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ int offset = vma->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+ reloc + reloc_offset[offset]);
if (ret)
goto err;
}
@@ -727,14 +803,15 @@ err:
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
- struct list_head *objects)
+ struct list_head *vmas)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
@@ -771,8 +848,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
- int relocs_total = 0;
- int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+ unsigned relocs_total = 0;
+ unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +886,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
}
static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
@@ -825,9 +902,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- /* FIXME: This lookup gets fixed later <-- danvet */
- list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
- i915_gem_object_move_to_active(obj, ring);
+ i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +960,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct eb_objects *eb;
+ struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ struct i915_ctx_hang_stats *hs;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
@@ -1000,7 +1076,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
@@ -1025,7 +1102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args);
+ eb = eb_create(args, vm);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -1033,18 +1110,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(eb->objects.prev,
- struct drm_i915_gem_object,
- exec_list);
+ batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
@@ -1054,7 +1129,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec, vm);
+ eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1071,15 +1146,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
- * hsw should have this fixed, but let's be paranoid and do it
- * unconditionally for now. */
+ * hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
+ hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+ if (IS_ERR(hs)) {
+ ret = PTR_ERR(hs);
+ goto err;
+ }
+
+ if (hs->banned) {
+ ret = -EIO;
+ goto err;
+ }
+
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1131,7 +1216,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+ i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1f7b4caefb6..3620a1b0a73 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,8 @@
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+typedef uint64_t gen8_gtt_pte_t;
+typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -57,6 +59,41 @@
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
+#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_LEGACY_PDPS 4
+
+#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
+#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+
+static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ pte |= addr;
+ if (level != I915_CACHE_NONE)
+ pte |= PPAT_CACHED_INDEX;
+ else
+ pte |= PPAT_UNCACHED_INDEX;
+ return pte;
+}
+
+static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ pde |= addr;
+ if (level != I915_CACHE_NONE)
+ pde |= PPAT_CACHED_PDE_INDEX;
+ else
+ pde |= PPAT_UNCACHED_INDEX;
+ return pde;
+}
+
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
@@ -158,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+/* Broadwell Page Directory Pointer Descriptors */
+static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+ uint64_t val)
+{
+ int ret;
+
+ BUG_ON(entry >= 4);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
+ intel_ring_emit(ring, (u32)(val >> 32));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
+ intel_ring_emit(ring, (u32)(val));
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen8_ppgtt_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int i, j, ret;
+
+ /* bit of a hack to find the actual last used pd */
+ int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+
+ for_each_ring(ring, dev_priv, j) {
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ }
+
+ for (i = used_pd - 1; i >= 0; i--) {
+ dma_addr_t addr = ppgtt->pd_dma_addr[i];
+ for_each_ring(ring, dev_priv, j) {
+ ret = gen8_write_pdp(ring, i, addr);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ unsigned first_entry,
+ unsigned num_entries,
+ bool use_scratch)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
+ unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+ unsigned last_pte, i;
+
+ scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+ I915_CACHE_LLC, use_scratch);
+
+ while (num_entries) {
+ struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+
+ last_pte = first_pte + num_entries;
+ if (last_pte > GEN8_PTES_PER_PAGE)
+ last_pte = GEN8_PTES_PER_PAGE;
+
+ pt_vaddr = kmap_atomic(page_table);
+
+ for (i = first_pte; i < last_pte; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pt++;
+ }
+}
+
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *pages,
+ unsigned first_entry,
+ enum i915_cache_level cache_level)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_gtt_pte_t *pt_vaddr;
+ unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
+ unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+ struct sg_page_iter sg_iter;
+
+ pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ dma_addr_t page_addr;
+
+ page_addr = sg_dma_address(sg_iter.sg) +
+ (sg_iter.sg_pgoffset << PAGE_SHIFT);
+ pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
+ true);
+ if (++act_pte == GEN8_PTES_PER_PAGE) {
+ kunmap_atomic(pt_vaddr);
+ act_pt++;
+ pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ act_pte = 0;
+
+ }
+ }
+ kunmap_atomic(pt_vaddr);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ int i, j;
+
+ for (i = 0; i < ppgtt->num_pd_pages ; i++) {
+ if (ppgtt->pd_dma_addr[i]) {
+ pci_unmap_page(ppgtt->base.dev->pdev,
+ ppgtt->pd_dma_addr[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ if (addr)
+ pci_unmap_page(ppgtt->base.dev->pdev,
+ addr,
+ PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ }
+ }
+ kfree(ppgtt->gen8_pt_dma_addr[i]);
+ }
+
+ __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
+ __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
+ * net effect resembling a 2-level page table in normal x86 terms. Each PDP
+ * represents 1GB of memory
+ * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
+ *
+ * TODO: Do something with the size parameter
+ **/
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+ struct page *pt_pages;
+ int i, j, ret = -ENOMEM;
+ const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+ const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+
+ if (size % (1<<30))
+ DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+ /* FIXME: split allocation into smaller pieces. For now we only ever do
+ * this once, but with full PPGTT, the multiple contiguous allocations
+ * will be bad.
+ */
+ ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
+ if (!ppgtt->pd_pages)
+ return -ENOMEM;
+
+ pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
+ if (!pt_pages) {
+ __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
+ return -ENOMEM;
+ }
+
+ ppgtt->gen8_pt_pages = pt_pages;
+ ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+ ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
+ ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+ ppgtt->enable = gen8_ppgtt_enable;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+ /*
+ * - Create a mapping for the page directories.
+ * - For each page directory:
+ * allocate space for page table mappings.
+ * map each page table
+ */
+ for (i = 0; i < max_pdp; i++) {
+ dma_addr_t temp;
+ temp = pci_map_page(ppgtt->base.dev->pdev,
+ &ppgtt->pd_pages[i], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
+ goto err_out;
+
+ ppgtt->pd_dma_addr[i] = temp;
+
+ ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
+ if (!ppgtt->gen8_pt_dma_addr[i])
+ goto err_out;
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
+ temp = pci_map_page(ppgtt->base.dev->pdev,
+ p, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
+ goto err_out;
+
+ ppgtt->gen8_pt_dma_addr[i][j] = temp;
+ }
+ }
+
+ /* For now, the PPGTT helper functions all require that the PDEs are
+ * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
+ * will never need to touch the PDEs again */
+ for (i = 0; i < max_pdp; i++) {
+ gen8_ppgtt_pde_t *pd_vaddr;
+ pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
+ I915_CACHE_LLC);
+ }
+ kunmap_atomic(pd_vaddr);
+ }
+
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
+ true);
+
+ DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
+ ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
+ DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
+ ppgtt->num_pt_pages,
+ (ppgtt->num_pt_pages - num_pt_pages) +
+ size % (1<<30));
+ return 0;
+
+err_out:
+ ppgtt->base.cleanup(&ppgtt->base);
+ return ret;
+}
+
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -342,7 +630,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
- ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+ ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
return -ENOMEM;
@@ -353,7 +641,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
goto err_pt_alloc;
}
- ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+ ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
@@ -410,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
+ else if (IS_GEN8(dev))
+ ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
@@ -573,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
+static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+{
+#ifdef writeq
+ writeq(pte, addr);
+#else
+ iowrite32((u32)pte, addr);
+ iowrite32(pte >> 32, addr + 4);
+#endif
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level level)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ gen8_gtt_pte_t __iomem *gtt_entries =
+ (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ int i = 0;
+ struct sg_page_iter sg_iter;
+ dma_addr_t addr;
+
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+ addr = sg_dma_address(sg_iter.sg) +
+ (sg_iter.sg_pgoffset << PAGE_SHIFT);
+ gen8_set_pte(&gtt_entries[i],
+ gen8_pte_encode(addr, level, true));
+ i++;
+ }
+
+ /*
+ * XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readq(&gtt_entries[i-1])
+ != gen8_pte_encode(addr, level, true));
+
+#if 0 /* TODO: Still needed on GEN8? */
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+#endif
+}
+
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
@@ -615,6 +956,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ unsigned int first_entry,
+ unsigned int num_entries,
+ bool use_scratch)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
+ (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = gen8_pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC,
+ use_scratch);
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+ readl(gtt_base);
+}
+
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
@@ -638,7 +1003,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
readl(gtt_base);
}
-
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
@@ -720,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
*end -= 4096;
}
}
+
void i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
@@ -817,7 +1182,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->gtt.base.mm);
- gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
+ if (INTEL_INFO(dev)->gen < 8)
+ gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -867,6 +1233,15 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 20;
}
+static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+ return bdw_gmch_ctl << 20;
+}
+
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -874,6 +1249,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 25; /* 32 MB units */
}
+static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
+ return bdw_gmch_ctl << 25; /* 32 MB units */
+}
+
+static int ggtt_probe_common(struct drm_device *dev,
+ size_t gtt_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ phys_addr_t gtt_bus_addr;
+ int ret;
+
+ /* For Modern GENs the PTEs and register space are split in the BAR */
+ gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+ (pci_resource_len(dev->pdev, 0) / 2);
+
+ dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ if (!dev_priv->gtt.gsm) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ return -ENOMEM;
+ }
+
+ ret = setup_scratch_page(dev);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(dev_priv->gtt.gsm);
+ }
+
+ return ret;
+}
+
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+/* FIXME(BDW): Bspec is completely confused about cache control bits. */
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_AGE(x) (x<<4)
+#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+ uint64_t pat;
+
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+ /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
+ * write would work. */
+ I915_WRITE(GEN8_PRIVATE_PAT, pat);
+ I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
+static int gen8_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int gtt_size;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ *mappable_base = pci_resource_start(dev->pdev, 2);
+ *mappable_end = pci_resource_len(dev->pdev, 2);
+
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ *stolen = gen8_get_stolen_size(snb_gmch_ctl);
+
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+
+ gen8_setup_private_ppat(dev_priv);
+
+ ret = ggtt_probe_common(dev, gtt_size);
+
+ dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+
+ return ret;
+}
+
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
@@ -881,7 +1358,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
@@ -901,24 +1377,13 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
-
- /* For Modern GENs the PTEs and register space are split in the BAR */
- gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
- (pci_resource_len(dev->pdev, 0) / 2);
- dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
- if (!dev_priv->gtt.gsm) {
- DRM_ERROR("Failed to map the gtt page table\n");
- return -ENOMEM;
- }
+ gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
- ret = setup_scratch_page(dev);
- if (ret)
- DRM_ERROR("Scratch setup failed\n");
+ ret = ggtt_probe_common(dev, gtt_size);
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
@@ -972,7 +1437,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 5) {
gtt->gtt_probe = i915_gmch_probe;
gtt->base.cleanup = i915_gmch_remove;
- } else {
+ } else if (INTEL_INFO(dev)->gen < 8) {
gtt->gtt_probe = gen6_gmch_probe;
gtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
@@ -985,6 +1450,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = snb_pte_encode;
+ } else {
+ dev_priv->gtt.gtt_probe = gen8_gmch_probe;
+ dev_priv->gtt.base.cleanup = gen6_gmch_remove;
}
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d90037..d284d892ed9 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_vma_create(obj, ggtt);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef9c89..b1390534804 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (obj->pin_count) {
+ if (obj->pin_count || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
- obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
int i;
if (obj->bit_17 == NULL) {
- obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+ sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f0028..79dcb8f896c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+ switch (a) {
+ case HANGCHECK_IDLE:
+ return "idle";
+ case HANGCHECK_WAIT:
+ return "wait";
+ case HANGCHECK_ACTIVE:
+ return "active";
+ case HANGCHECK_KICK:
+ return "kick";
+ case HANGCHECK_HUNG:
+ return "hung";
+ }
+
+ return "unknown";
+}
+
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
-
+ if (INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
if (INTEL_INFO(dev)->gen >= 4)
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+ err_printf(m, " hangcheck: %s [%d]\n",
+ hangcheck_action_to_str(error->hangcheck_action[ring]),
+ error->hangcheck_score[ring]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
- err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
+ err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -601,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 8:
case 7:
case 6:
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -703,6 +727,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
+ error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +745,9 @@ static void i915_record_ring_state(struct drm_device *dev,
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
+
+ error->hangcheck_score[ring->id] = ring->hangcheck.score;
+ error->hangcheck_action[ring->id] = ring->hangcheck.action;
}
@@ -769,7 +797,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].num_requests = count;
error->ring[i].requests =
- kmalloc(count*sizeof(struct drm_i915_error_request),
+ kcalloc(count, sizeof(*error->ring[i].requests),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
@@ -811,7 +839,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
- active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+ active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
if (active_bo)
pinned_bo = active_bo + error->active_bo_count[ndx];
}
@@ -885,8 +913,12 @@ void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("capturing error event; look for more information in "
- "/sys/class/drm/card%d/error\n", dev->primary->index);
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ dev->primary->index);
+ DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
kref_init(&error->ref);
error->eir = I915_READ(EIR);
@@ -988,6 +1020,7 @@ const char *i915_cache_level_str(int type)
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
default: return "";
}
}
@@ -1012,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
+ case 8:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228fd9b..5d1dedc02f1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
+#include <linux/circ_buf.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -269,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable)
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+ else
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
@@ -381,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (IS_GEN8(dev))
+ broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -441,7 +459,7 @@ done:
void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +476,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
}
void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +504,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -518,6 +537,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
}
}
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ /* Gen2 doesn't have a hardware frame counter */
+ return 0;
+}
+
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
@@ -526,7 +551,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low;
+ u32 high1, high2, low, pixel, vbl_start;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +559,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return 0;
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ const struct drm_display_mode *mode =
+ &intel_crtc->config.adjusted_mode;
+
+ vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+ } else {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ u32 htotal;
+
+ htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+ vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+ vbl_start *= htotal;
+ }
+
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
@@ -544,13 +587,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
*/
do {
high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
- low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ low = I915_READ(low_frame);
high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ pixel = low & PIPE_PIXEL_MASK;
low >>= PIPE_FRAME_LOW_SHIFT;
- return (high1 << 8) | low;
+
+ /*
+ * The frame counter increments at beginning of active.
+ * Cook up a vblank counter by also checking the pixel
+ * counter against vblank start.
+ */
+ return ((high1 << 8) | low) + (pixel >= vbl_start);
}
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,66 +617,163 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
+/* raw reads, only for fast reads of display block, no need for forcewake etc. */
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+
+static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t status;
+ int reg;
+
+ if (IS_VALLEYVIEW(dev)) {
+ status = pipe == PIPE_A ?
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ reg = VLV_ISR;
+ } else if (IS_GEN2(dev)) {
+ status = pipe == PIPE_A ?
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ reg = ISR;
+ } else if (INTEL_INFO(dev)->gen < 5) {
+ status = pipe == PIPE_A ?
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ reg = ISR;
+ } else if (INTEL_INFO(dev)->gen < 7) {
+ status = pipe == PIPE_A ?
+ DE_PIPEA_VBLANK :
+ DE_PIPEB_VBLANK;
+
+ reg = DEISR;
+ } else {
+ switch (pipe) {
+ default:
+ case PIPE_A:
+ status = DE_PIPEA_VBLANK_IVB;
+ break;
+ case PIPE_B:
+ status = DE_PIPEB_VBLANK_IVB;
+ break;
+ case PIPE_C:
+ status = DE_PIPEC_VBLANK_IVB;
+ break;
+ }
+
+ reg = DEISR;
+ }
+
+ if (IS_GEN2(dev))
+ return __raw_i915_read16(dev_priv, reg) & status;
+ else
+ return __raw_i915_read32(dev_priv, reg) & status;
+}
+
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
- int *vpos, int *hpos)
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 vbl = 0, position = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ int position;
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
+ unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe)) {
+ if (!intel_crtc->active) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
- /* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+ htotal = mode->crtc_htotal;
+ vtotal = mode->crtc_vtotal;
+ vbl_start = mode->crtc_vblank_start;
+ vbl_end = mode->crtc_vblank_end;
- if (INTEL_INFO(dev)->gen >= 4) {
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /*
+ * Lock uncore.lock, as we will do multiple timing critical raw
+ * register reads, potentially with preemption disabled, so the
+ * following code must not block on uncore.lock.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- position = I915_READ(PIPEDSL(pipe));
+ if (IS_GEN2(dev))
+ position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ else
+ position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- /* Decode into vertical scanout position. Don't have
- * horizontal scanout position.
+ /*
+ * The scanline counter increments at the leading edge
+ * of hsync, ie. it completely misses the active portion
+ * of the line. Fix up the counter at both edges of vblank
+ * to get a more accurate picture whether we're in vblank
+ * or not.
*/
- *vpos = position & 0x1fff;
- *hpos = 0;
+ in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && position == vbl_start - 1) ||
+ (!in_vbl && position == vbl_end - 1))
+ position = (position + 1) % vtotal;
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
* scanout position.
*/
- position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+ position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
- *vpos = position / htotal;
- *hpos = position - (*vpos * htotal);
+ /* convert to pixel counts */
+ vbl_start *= htotal;
+ vbl_end *= htotal;
+ vtotal *= htotal;
}
- /* Query vblank area. */
- vbl = I915_READ(VBLANK(cpu_transcoder));
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
- /* Test position against vblank region. */
- vbl_start = vbl & 0x1fff;
- vbl_end = (vbl >> 16) & 0x1fff;
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- if ((*vpos < vbl_start) || (*vpos > vbl_end))
- in_vbl = false;
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- /* Inside "upper part" of vblank area? Apply corrective offset: */
- if (in_vbl && (*vpos >= vbl_start))
- *vpos = *vpos - vtotal;
+ in_vbl = position >= vbl_start && position < vbl_end;
- /* Readouts valid? */
- if (vbl > 0)
- ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+ /*
+ * While in vblank, position will be negative
+ * counting up towards 0 at vbl_end. And outside
+ * vblank, position will be positive counting
+ * up since vbl_end.
+ */
+ if (position >= vbl_start)
+ position -= vbl_end;
+ else
+ position += vtotal - vbl_end;
+
+ if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ *vpos = position;
+ *hpos = 0;
+ } else {
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
/* In vblank? */
if (in_vbl)
@@ -665,7 +812,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
crtc);
}
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+ struct drm_connector *connector)
{
enum drm_connector_status old_status;
@@ -673,11 +821,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ if (old_status == connector->status)
+ return false;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
- old_status, connector->status);
- return (old_status != connector->status);
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ return true;
}
/*
@@ -801,7 +954,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+ trace_i915_gem_request_complete(ring);
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
@@ -812,7 +965,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
u32 pm_iir;
- u8 new_delay;
+ int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +982,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- new_delay = dev_priv->rps.cur_delay + 1;
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+ new_delay = dev_priv->rps.cur_delay + adj;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
- if (IS_VALLEYVIEW(dev_priv->dev) &&
- dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+ if (new_delay < dev_priv->rps.rpe_delay)
+ new_delay = dev_priv->rps.rpe_delay;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
- } else
- new_delay = dev_priv->rps.cur_delay - 1;
+ else
+ new_delay = dev_priv->rps.min_delay;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ new_delay = dev_priv->rps.cur_delay + adj;
+ } else { /* unknown event */
+ new_delay = dev_priv->rps.cur_delay;
+ }
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
- if (new_delay >= dev_priv->rps.min_delay &&
- new_delay <= dev_priv->rps.max_delay) {
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, new_delay);
- else
- gen6_set_rps(dev_priv->dev, new_delay);
- }
-
- if (IS_VALLEYVIEW(dev_priv->dev)) {
- /*
- * On VLV, when we enter RC6 we may not be at the minimum
- * voltage level, so arm a timer to check. It should only
- * fire when there's activity or once after we've entered
- * RC6, and then won't be re-armed until the next RPS interrupt.
- */
- mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
- msecs_to_jiffies(100));
- }
+ if (new_delay < (int)dev_priv->rps.min_delay)
+ new_delay = dev_priv->rps.min_delay;
+ if (new_delay > (int)dev_priv->rps.max_delay)
+ new_delay = dev_priv->rps.max_delay;
+ dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ valleyview_set_rps(dev_priv->dev, new_delay);
+ else
+ gen6_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -882,9 +1044,10 @@ static void ivybridge_parity_work(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
l3_parity.error_work);
u32 error_status, row, bank, subbank;
- char *parity_event[5];
+ char *parity_event[6];
uint32_t misccpctl;
unsigned long flags;
+ uint8_t slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1055,81 @@ static void ivybridge_parity_work(struct work_struct *work)
*/
mutex_lock(&dev_priv->dev->struct_mutex);
+ /* If we've screwed up tracking, just let the interrupt fire again */
+ if (WARN_ON(!dev_priv->l3_parity.which_slice))
+ goto out;
+
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
- error_status = I915_READ(GEN7_L3CDERRST1);
- row = GEN7_PARITY_ERROR_ROW(error_status);
- bank = GEN7_PARITY_ERROR_BANK(error_status);
- subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+ while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+ u32 reg;
- I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
- GEN7_L3CDERRST1_ENABLE);
- POSTING_READ(GEN7_L3CDERRST1);
+ slice--;
+ if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+ break;
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ dev_priv->l3_parity.which_slice &= ~(1<<slice);
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ reg = GEN7_L3CDERRST1 + (slice * 0x200);
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ error_status = I915_READ(reg);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
- parity_event[0] = I915_L3_PARITY_UEVENT "=1";
- parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
- parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
- parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
- parity_event[4] = NULL;
+ I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(reg);
+
+ parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+ parity_event[5] = NULL;
+
+ kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+ KOBJ_CHANGE, parity_event);
+
+ DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+ slice, row, bank, subbank);
+
+ kfree(parity_event[4]);
+ kfree(parity_event[3]);
+ kfree(parity_event[2]);
+ kfree(parity_event[1]);
+ }
- kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
- KOBJ_CHANGE, parity_event);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
- DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
- row, bank, subbank);
+out:
+ WARN_ON(dev_priv->l3_parity.which_slice);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- kfree(parity_event[3]);
- kfree(parity_event[2]);
- kfree(parity_event[1]);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (!HAS_L3_GPU_CACHE(dev))
+ if (!HAS_L3_DPF(dev))
return;
spin_lock(&dev_priv->irq_lock);
- ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
+ iir &= GT_PARITY_ERROR(dev);
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ dev_priv->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ dev_priv->l3_parity.which_slice |= 1 << 0;
+
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
@@ -975,8 +1164,58 @@ static void snb_gt_irq_handler(struct drm_device *dev,
i915_handle_error(dev, false);
}
- if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- ivybridge_parity_error_irq_handler(dev);
+ if (gt_iir & GT_PARITY_ERROR(dev))
+ ivybridge_parity_error_irq_handler(dev, gt_iir);
+}
+
+static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 master_ctl)
+{
+ u32 rcs, bcs, vcs;
+ uint32_t tmp = 0;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ tmp = I915_READ(GEN8_GT_IIR(0));
+ if (tmp) {
+ ret = IRQ_HANDLED;
+ rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
+ bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ if (rcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (bcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (GT0)!\n");
+ }
+
+ if (master_ctl & GEN8_GT_VCS1_IRQ) {
+ tmp = I915_READ(GEN8_GT_IIR(1));
+ if (tmp) {
+ ret = IRQ_HANDLED;
+ vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+ if (vcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (GT1)!\n");
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ tmp = I915_READ(GEN8_GT_IIR(3));
+ if (tmp) {
+ ret = IRQ_HANDLED;
+ vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+ if (vcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VECS]);
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (GT3)!\n");
+ }
+
+ return ret;
}
#define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1289,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
wake_up_all(&dev_priv->gmbus_wait_queue);
}
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+ uint32_t crc0, uint32_t crc1,
+ uint32_t crc2, uint32_t crc3,
+ uint32_t crc4)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_pipe_crc_entry *entry;
+ int head, tail;
+
+ spin_lock(&pipe_crc->lock);
+
+ if (!pipe_crc->entries) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("spurious interrupt\n");
+ return;
+ }
+
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
+
+ if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("CRC buffer overflowing\n");
+ return;
+ }
+
+ entry = &pipe_crc->entries[head];
+
+ entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+ entry->crc[0] = crc0;
+ entry->crc[1] = crc1;
+ entry->crc[2] = crc2;
+ entry->crc[3] = crc3;
+ entry->crc[4] = crc4;
+
+ head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ pipe_crc->head = head;
+
+ spin_unlock(&pipe_crc->lock);
+
+ wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+ uint32_t crc0, uint32_t crc1,
+ uint32_t crc2, uint32_t crc3,
+ uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ 0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t res1, res2;
+
+ if (INTEL_INFO(dev)->gen >= 3)
+ res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ else
+ res1 = 0;
+
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ else
+ res2 = 0;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_RED(pipe)),
+ I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+ I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ res1, res2);
+}
+
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
@@ -1117,13 +1452,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
for_each_pipe(pipe) {
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
drm_handle_vblank(dev, pipe);
if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip(dev, pipe);
}
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
}
/* Consume port. Then clear IIR or we'll miss events */
@@ -1212,21 +1550,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
+ enum pipe pipe;
if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n");
- if (err_int & ERR_INT_FIFO_UNDERRUN_A)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
- if (err_int & ERR_INT_FIFO_UNDERRUN_B)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+ for_each_pipe(pipe) {
+ if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- if (err_int & ERR_INT_FIFO_UNDERRUN_C)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
- DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+ if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+ if (IS_IVYBRIDGE(dev))
+ ivb_pipe_crc_irq_handler(dev, pipe);
+ else
+ hsw_pipe_crc_irq_handler(dev, pipe);
+ }
+ }
I915_WRITE(GEN7_ERR_INT, err_int);
}
@@ -1297,6 +1640,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
@@ -1304,31 +1648,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE)
intel_opregion_asle_intr(dev);
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
- if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+ for_each_pipe(pipe) {
+ if (de_iir & DE_PIPE_VBLANK(pipe))
+ drm_handle_vblank(dev, pipe);
- if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+ if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
- if (de_iir & DE_PLANEA_FLIP_DONE) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
+ if (de_iir & DE_PIPE_CRC_DONE(pipe))
+ i9xx_pipe_crc_irq_handler(dev, pipe);
- if (de_iir & DE_PLANEB_FLIP_DONE) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
+ /* plane/pipes map 1:1 on ilk+ */
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
}
/* check event from PCH */
@@ -1351,7 +1690,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ enum pipe i;
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
@@ -1362,10 +1701,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
- for (i = 0; i < 3; i++) {
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ for_each_pipe(i) {
+ if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
drm_handle_vblank(dev, i);
- if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+
+ /* plane/pipes map 1:1 on ilk+ */
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
@@ -1388,7 +1729,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- bool err_int_reenable = false;
atomic_inc(&dev_priv->irq_received);
@@ -1412,17 +1752,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
- /* On Haswell, also mask ERR_INT because we don't want to risk
- * generating "unclaimed register" interrupts from inside the interrupt
- * handler. */
- if (IS_HASWELL(dev)) {
- spin_lock(&dev_priv->irq_lock);
- err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
- if (err_int_reenable)
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
- spin_unlock(&dev_priv->irq_lock);
- }
-
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1781,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
}
- if (err_int_reenable) {
- spin_lock(&dev_priv->irq_lock);
- if (ivb_can_enable_err_int(dev))
- ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
- spin_unlock(&dev_priv->irq_lock);
- }
-
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
@@ -1469,6 +1791,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl;
+ irqreturn_t ret = IRQ_NONE;
+ uint32_t tmp = 0;
+ enum pipe pipe;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ master_ctl = I915_READ(GEN8_MASTER_IRQ);
+ master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+ if (!master_ctl)
+ return IRQ_NONE;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+ if (master_ctl & GEN8_DE_MISC_IRQ) {
+ tmp = I915_READ(GEN8_DE_MISC_IIR);
+ if (tmp & GEN8_DE_MISC_GSE)
+ intel_opregion_asle_intr(dev);
+ else if (tmp)
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
+ else
+ DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+
+ if (tmp) {
+ I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if (master_ctl & GEN8_DE_PORT_IRQ) {
+ tmp = I915_READ(GEN8_DE_PORT_IIR);
+ if (tmp & GEN8_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+ else if (tmp)
+ DRM_ERROR("Unexpected DE Port interrupt\n");
+ else
+ DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+
+ if (tmp) {
+ I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ for_each_pipe(pipe) {
+ uint32_t pipe_iir;
+
+ if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
+ continue;
+
+ pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ if (pipe_iir & GEN8_PIPE_VBLANK)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
+
+ if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
+
+ if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+ }
+
+ if (pipe_iir) {
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ } else
+ DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ }
+
+ if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
+ /*
+ * FIXME(BDW): Assume for now that the new interrupt handling
+ * scheme also closed the SDE interrupt handling race we've seen
+ * on older pch-split platforms. But this needs testing.
+ */
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ if (pch_iir) {
+ I915_WRITE(SDEIIR, pch_iir);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ return ret;
+}
+
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
@@ -1516,7 +1949,7 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
/*
* Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1963,7 @@ static void i915_error_work_func(struct work_struct *work)
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
/*
@@ -1557,7 +1990,7 @@ static void i915_error_work_func(struct work_struct *work)
smp_mb__before_atomic_inc();
atomic_inc(&dev_priv->gpu_error.reset_counter);
- kobject_uevent_env(&dev->primary->kdev.kobj,
+ kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2220,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK_ILK(pipe);
+ DE_PIPE_VBLANK(pipe);
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
@@ -1810,7 +2243,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
imr = I915_READ(VLV_IMR);
- if (pipe == 0)
+ if (pipe == PIPE_A)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1822,6 +2255,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
+static int gen8_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ return 0;
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -1845,7 +2294,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK_ILK(pipe);
+ DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2311,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
- if (pipe == 0)
+ if (pipe == PIPE_A)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1870,6 +2319,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void gen8_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
{
@@ -1965,6 +2429,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
+ i915_handle_error(dev, false);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
}
@@ -1976,6 +2441,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
+ i915_handle_error(dev, false);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
case 0:
@@ -2021,12 +2487,21 @@ static void i915_hangcheck_elapsed(unsigned long data)
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
+ ring->hangcheck.action = HANGCHECK_IDLE;
+
if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
- DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
- ring->name);
- wake_up_all(&ring->irq_queue);
- ring->hangcheck.score += HUNG;
+ if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+ if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
+ else
+ DRM_INFO("Fake missed irq on %s\n",
+ ring->name);
+ wake_up_all(&ring->irq_queue);
+ }
+ /* Safeguard against driver failure */
+ ring->hangcheck.score += BUSY;
} else
busy = false;
} else {
@@ -2049,6 +2524,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd);
switch (ring->hangcheck.action) {
+ case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
@@ -2064,6 +2540,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
}
} else {
+ ring->hangcheck.action = HANGCHECK_ACTIVE;
+
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
@@ -2190,6 +2668,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
}
+static void gen8_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ /* IIR can theoretically queue up two events. Be paranoid */
+#define GEN8_IRQ_INIT_NDX(type, which) do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IMR(which)); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ } while (0)
+
+#define GEN8_IRQ_INIT(type) do { \
+ I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IMR); \
+ I915_WRITE(GEN8_##type##_IER, 0); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+ } while (0)
+
+ GEN8_IRQ_INIT_NDX(GT, 0);
+ GEN8_IRQ_INIT_NDX(GT, 1);
+ GEN8_IRQ_INIT_NDX(GT, 2);
+ GEN8_IRQ_INIT_NDX(GT, 3);
+
+ for_each_pipe(pipe) {
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
+ }
+
+ GEN8_IRQ_INIT(DE_PORT);
+ GEN8_IRQ_INIT(DE_MISC);
+ GEN8_IRQ_INIT(PCU);
+#undef GEN8_IRQ_INIT
+#undef GEN8_IRQ_INIT_NDX
+
+ POSTING_READ(GEN8_PCU_IIR);
+}
+
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2254,10 +2779,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_GPU_CACHE(dev)) {
+ if (HAS_L3_DPF(dev)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+ gt_irqs |= GT_PARITY_ERROR(dev);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2831,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
- DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+ DE_AUX_CHANNEL_A |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+ DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
}
@@ -2341,7 +2868,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
+ PIPE_CRC_DONE_ENABLE;
unsigned long irqflags;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2899,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, pipestat_enable);
- i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
- i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+ i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2392,6 +2920,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ /* These are interrupts we'll toggle with the ring mask register */
+ uint32_t gt_interrupts[] = {
+ GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ 0,
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+ };
+
+ for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
+ u32 tmp = I915_READ(GEN8_GT_IIR(i));
+ if (tmp)
+ DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
+ i, tmp);
+ I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
+ I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
+ }
+ POSTING_READ(GEN8_GT_IER(0));
+}
+
+static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
+ GEN8_PIPE_CDCLK_CRC_DONE |
+ GEN8_PIPE_FIFO_UNDERRUN |
+ GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+ int pipe;
+ dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
+ dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
+ dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+
+ for_each_pipe(pipe) {
+ u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ if (tmp)
+ DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
+ pipe, tmp);
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
+ }
+ POSTING_READ(GEN8_DE_PIPE_ISR(0));
+
+ I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
+ I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
+ POSTING_READ(GEN8_DE_PORT_IER);
+}
+
+static int gen8_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen8_gt_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(dev_priv);
+
+ ibx_irq_postinstall(dev);
+
+ I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ return 0;
+}
+
+static void gen8_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+#define GEN8_IRQ_FINI_NDX(type, which) do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ } while (0)
+
+#define GEN8_IRQ_FINI(type) do { \
+ I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER, 0); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+ } while (0)
+
+ GEN8_IRQ_FINI_NDX(GT, 0);
+ GEN8_IRQ_FINI_NDX(GT, 1);
+ GEN8_IRQ_FINI_NDX(GT, 2);
+ GEN8_IRQ_FINI_NDX(GT, 3);
+
+ for_each_pipe(pipe) {
+ GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
+ }
+
+ GEN8_IRQ_FINI(DE_PORT);
+ GEN8_IRQ_FINI(DE_MISC);
+ GEN8_IRQ_FINI(PCU);
+#undef GEN8_IRQ_FINI
+#undef GEN8_IRQ_FINI_NDX
+
+ POSTING_READ(GEN8_PCU_IIR);
+}
+
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2464,6 +3103,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +3124,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT);
POSTING_READ16(IER);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -2570,13 +3217,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
- if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, 0, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i8xx_handle_vblank(dev, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
- if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, 1, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+ }
iir = new_iir;
}
@@ -2623,6 +3271,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
+ unsigned long irqflags;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2658,6 +3307,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
i915_enable_asle_pipestat(dev);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -2769,6 +3425,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3526,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
@@ -3013,6 +3674,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
}
@@ -3122,18 +3786,21 @@ void intel_irq_init(struct drm_device *dev)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev)) {
+ dev->max_vblank_count = 0;
+ dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ } else {
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
}
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
- else
- dev->driver->get_vblank_timestamp = NULL;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+ }
if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
@@ -3143,6 +3810,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (IS_GEN8(dev)) {
+ dev->driver->irq_handler = gen8_irq_handler;
+ dev->driver->irq_preinstall = gen8_irq_preinstall;
+ dev->driver->irq_postinstall = gen8_irq_postinstall;
+ dev->driver->irq_uninstall = gen8_irq_uninstall;
+ dev->driver->enable_vblank = gen8_enable_vblank;
+ dev->driver->disable_vblank = gen8_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ef9b35479f0..f9eafb6ed52 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -109,6 +110,9 @@
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
#define PP_DIR_DCLV_2G 0xffffffff
+#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
+#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
+
#define GAM_ECOCHK 0x4090
#define ECOCHK_SNB_BIT (1<<10)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
@@ -246,6 +250,7 @@
#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -264,6 +269,11 @@
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+
+#define MI_PREDICATE_RESULT_2 (0x2214)
+#define LOWER_SLICE_ENABLED (1<<0)
+#define LOWER_SLICE_DISABLED (0<<0)
+
/*
* 3D instructions used by the kernel
*/
@@ -346,12 +356,25 @@
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
+#define IOSF_PORT_GPIO_NC 0x13
+#define IOSF_PORT_CCK 0x14
+#define IOSF_PORT_CCU 0xA9
+#define IOSF_PORT_GPS_CORE 0x48
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
+#define PUNIT_REG_PWRGT_CTRL 0x60
+#define PUNIT_REG_PWRGT_STATUS 0x61
+#define PUNIT_CLK_GATE 1
+#define PUNIT_PWR_RESET 2
+#define PUNIT_PWR_GATE 3
+#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
+#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
+#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
+
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
@@ -372,6 +395,40 @@
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+/* vlv2 north clock has */
+#define CCK_FUSE_REG 0x8
+#define CCK_FUSE_HPLL_FREQ_MASK 0x3
+#define CCK_REG_DSI_PLL_FUSE 0x44
+#define CCK_REG_DSI_PLL_CONTROL 0x48
+#define DSI_PLL_VCO_EN (1 << 31)
+#define DSI_PLL_LDO_GATE (1 << 30)
+#define DSI_PLL_P1_POST_DIV_SHIFT 17
+#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
+#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
+#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
+#define DSI_PLL_MUX_MASK (3 << 9)
+#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
+#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
+#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
+#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
+#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
+#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
+#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
+#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
+#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
+#define DSI_PLL_LOCK (1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER 0x4c
+#define DSI_PLL_LFSR (1 << 31)
+#define DSI_PLL_FRACTION_EN (1 << 30)
+#define DSI_PLL_FRAC_COUNTER_SHIFT 27
+#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
+#define DSI_PLL_USYNC_CNT_SHIFT 18
+#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
+#define DSI_PLL_N1_DIV_SHIFT 16
+#define DSI_PLL_N1_DIV_MASK (3 << 16)
+#define DSI_PLL_M1_DIV_SHIFT 0
+#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
+
/*
* DPIO - a special bus for various display related registers to hide behind
*
@@ -387,11 +444,11 @@
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
-#define DPIO_RESET (1<<0)
+#define DPIO_CMNRST (1<<0)
#define _DPIO_TX3_SWING_CTL4_A 0x690
#define _DPIO_TX3_SWING_CTL4_B 0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
_DPIO_TX3_SWING_CTL4_B)
/*
@@ -602,6 +659,9 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define GAMTARBMODE 0x04a08
+#define ARB_MODE_BWGTLB_DISABLE (1<<9)
+#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define RING_FAULT_GTTSEL_MASK (1<<11)
@@ -609,6 +669,7 @@
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
#define RING_FAULT_VALID (1<<0)
#define DONE_REG 0x40b0
+#define GEN8_PRIVATE_PAT 0x40e0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
@@ -669,13 +730,18 @@
#define NOPID 0x02094
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
+#define RING_BBSTATE(base) ((base)+0x110)
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERR_INT_POISON (1<<31)
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
+#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
+#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
@@ -683,6 +749,7 @@
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050
+/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
@@ -716,6 +783,7 @@
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
@@ -890,6 +958,7 @@
#define GT_BLT_USER_INTERRUPT (1 << 22)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -900,6 +969,10 @@
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
+#define GT_PARITY_ERROR(dev) \
+ (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+ (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
@@ -1048,9 +1121,6 @@
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
-#define HSW_CLKGATE_DISABLE_PART_1 0x46500
-#define HSW_DPFC_GATING_DISABLE (1<<23)
-
/*
* GPIO regs
*/
@@ -1387,6 +1457,12 @@
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
+#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
+
/*
* Palette regs
*/
@@ -1404,13 +1480,15 @@
* device 0 function 0's pci config register 0x44 or 0x48 and matches it in
* every way. It is not accessible from the CP register read instructions.
*
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
*/
#define MCHBAR_MIRROR_BASE 0x10000
#define MCHBAR_MIRROR_BASE_SNB 0x140000
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK 0x5e04
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
@@ -1705,9 +1783,9 @@
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
-#define GEN6_GT_PERF_STATUS 0x145948
-#define GEN6_RP_STATE_LIMITS 0x145994
-#define GEN6_RP_STATE_CAP 0x145998
+#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
/*
* Logical Context regs
@@ -1752,6 +1830,12 @@
* on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
+/* Same as Haswell, but 72064 bytes now. */
+#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
+
+
+#define VLV_CLK_CTL2 0x101104
+#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
/*
* Overlay regs
@@ -1771,6 +1855,83 @@
* Display engine regs
*/
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
+#define PIPE_CRC_ENABLE (1 << 31)
+/* ivb+ source selection */
+#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
+#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
+#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
+/* ilk+ source selection */
+#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
+#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
+#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
+#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
+#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
+#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
+/* with DP port the pipe source is invalid */
+#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
+#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
+#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
+/* gen3+ source selection */
+#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
+#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
+#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
+#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
+#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
+#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
+#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
+/* gen2 doesn't have source selection bits */
+#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB 0x60064
+#define _PIPE_CRC_RES_2_A_IVB 0x60068
+#define _PIPE_CRC_RES_3_A_IVB 0x6006c
+#define _PIPE_CRC_RES_4_A_IVB 0x60070
+#define _PIPE_CRC_RES_5_A_IVB 0x60074
+
+#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
+#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
+#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
+#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
+#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB 0x61064
+#define _PIPE_CRC_RES_2_B_IVB 0x61068
+#define _PIPE_CRC_RES_3_B_IVB 0x6106c
+#define _PIPE_CRC_RES_4_B_IVB 0x61070
+#define _PIPE_CRC_RES_5_B_IVB 0x61074
+
+#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_RES_1_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+#define PIPE_CRC_RES_GREEN(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+#define PIPE_CRC_RES_BLUE(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+
/* Pipe A timing regs */
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
@@ -1793,7 +1954,6 @@
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
-
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1803,8 +1963,9 @@
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
-/* HSW eDP PSR registers */
-#define EDP_PSR_CTL 0x64800
+/* HSW+ eDP PSR registers */
+#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
+#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -1827,16 +1988,16 @@
#define EDP_PSR_TP1_TIME_0us (3<<4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
-#define EDP_PSR_AUX_CTL 0x64810
-#define EDP_PSR_AUX_DATA1 0x64814
+#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
#define EDP_PSR_DPCD_COMMAND 0x80060000
-#define EDP_PSR_AUX_DATA2 0x64818
+#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
-#define EDP_PSR_AUX_DATA3 0x6481c
-#define EDP_PSR_AUX_DATA4 0x64820
-#define EDP_PSR_AUX_DATA5 0x64824
+#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
-#define EDP_PSR_STATUS_CTL 0x64840
+#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -1860,10 +2021,10 @@
#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
-#define EDP_PSR_PERF_CNT 0x64844
+#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG_CTL 0x64860
+#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
@@ -2006,6 +2167,14 @@
#define PCH_HDMIC 0xe1150
#define PCH_HDMID 0xe1160
+#define PORT_DFT_I9XX 0x61150
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X 0x61154
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
+#define PIPE_B_SCRAMBLE_RESET (1 << 1)
+#define PIPE_A_SCRAMBLE_RESET (1 << 0)
+
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
@@ -2034,6 +2203,7 @@
/* Gen 4 SDVO/HDMI bits: */
#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
#define SDVO_ENCODING_SDVO (0 << 10)
#define SDVO_ENCODING_HDMI (2 << 10)
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
@@ -2238,6 +2408,21 @@
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+ _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+ _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+ _VLV_BLC_HIST_CTL_B)
+
/* Backlight control */
#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
@@ -2986,6 +3171,7 @@
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
@@ -3068,6 +3254,18 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define _PIPE_MISC_A 0x70030
+#define _PIPE_MISC_B 0x71030
+#define PIPEMISC_DITHER_BPC_MASK (7<<5)
+#define PIPEMISC_DITHER_8_BPC (0<<5)
+#define PIPEMISC_DITHER_10_BPC (1<<5)
+#define PIPEMISC_DITHER_6_BPC (2<<5)
+#define PIPEMISC_DITHER_12_BPC (3<<5)
+#define PIPEMISC_DITHER_ENABLE (1<<4)
+#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
+#define PIPEMISC_DITHER_TYPE_SP (0<<2)
+#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
@@ -3184,11 +3382,11 @@
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
-#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_MASK (0xffff<<16)
#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_MASK (0xff<<8)
#define WM0_PIPE_SPRITE_SHIFT 8
-#define WM0_PIPE_CURSOR_MASK (0x1f)
+#define WM0_PIPE_CURSOR_MASK (0xff)
#define WM0_PIPEB_ILK 0x45104
#define WM0_PIPEC_IVB 0x45200
@@ -3198,9 +3396,10 @@
#define WM1_LP_LATENCY_MASK (0x7f<<24)
#define WM1_LP_FBC_MASK (0xf<<20)
#define WM1_LP_FBC_SHIFT 20
-#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_FBC_SHIFT_BDW 19
+#define WM1_LP_SR_MASK (0x7ff<<8)
#define WM1_LP_SR_SHIFT 8
-#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK 0x4510c
#define WM2_LP_EN (1<<31)
#define WM3_LP_ILK 0x45110
@@ -3281,17 +3480,17 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45 0x70040
-#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
@@ -3422,10 +3621,10 @@
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
-#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
-#define _PIPEB_FRMCOUNT_GM45 0x71040
-#define _PIPEB_FLIPCOUNT_GM45 0x71044
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
/* Display B control */
@@ -3780,6 +3979,7 @@
#define DE_SPRITEA_FLIP_DONE (1 << 28)
#define DE_PLANEB_FLIP_DONE (1 << 27)
#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
#define DE_PCU_EVENT (1 << 25)
#define DE_GTT_FAULT (1 << 24)
#define DE_POISON (1 << 23)
@@ -3793,13 +3993,18 @@
#define DE_PIPEB_ODD_FIELD (1 << 13)
#define DE_PIPEB_LINE_COMPARE (1 << 12)
#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_CRC_DONE (1 << 2)
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
/* More Ivybridge lolz */
#define DE_ERR_INT_IVB (1<<30)
@@ -3815,9 +4020,8 @@
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
#define DE_PIPEA_VBLANK_IVB (1<<0)
-
-#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
@@ -3833,6 +4037,71 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define GEN8_MASTER_IRQ 0x44200
+#define GEN8_MASTER_IRQ_CONTROL (1<<31)
+#define GEN8_PCU_IRQ (1<<30)
+#define GEN8_DE_PCH_IRQ (1<<23)
+#define GEN8_DE_MISC_IRQ (1<<22)
+#define GEN8_DE_PORT_IRQ (1<<20)
+#define GEN8_DE_PIPE_C_IRQ (1<<18)
+#define GEN8_DE_PIPE_B_IRQ (1<<17)
+#define GEN8_DE_PIPE_A_IRQ (1<<16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
+#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_VCS2_IRQ (1<<3)
+#define GEN8_GT_VCS1_IRQ (1<<2)
+#define GEN8_GT_BCS_IRQ (1<<1)
+#define GEN8_GT_RCS_IRQ (1<<0)
+
+#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
+#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
+#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
+#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
+
+#define GEN8_BCS_IRQ_SHIFT 16
+#define GEN8_RCS_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VECS_IRQ_SHIFT 0
+
+#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
+#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
+#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
+#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
+#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
+#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
+#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
+#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
+#define GEN8_PIPE_FLIP_DONE (1 << 4)
+#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
+#define GEN8_PIPE_VSYNC (1 << 1)
+#define GEN8_PIPE_VBLANK (1 << 0)
+#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN8_PIPE_CURSOR_FAULT | \
+ GEN8_PIPE_SPRITE_FAULT | \
+ GEN8_PIPE_PRIMARY_FAULT)
+
+#define GEN8_DE_PORT_ISR 0x44440
+#define GEN8_DE_PORT_IMR 0x44444
+#define GEN8_DE_PORT_IIR 0x44448
+#define GEN8_DE_PORT_IER 0x4444c
+#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
+#define GEN8_AUX_CHANNEL_A (1 << 0)
+
+#define GEN8_DE_MISC_ISR 0x44460
+#define GEN8_DE_MISC_IMR 0x44464
+#define GEN8_DE_MISC_IIR 0x44468
+#define GEN8_DE_MISC_IER 0x4446c
+#define GEN8_DE_MISC_GSE (1 << 27)
+
+#define GEN8_PCU_ISR 0x444e0
+#define GEN8_PCU_IMR 0x444e4
+#define GEN8_PCU_IIR 0x444e8
+#define GEN8_PCU_IER 0x444ec
+
#define ILK_DISPLAY_CHICKEN2 0x42004
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -3858,8 +4127,14 @@
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
#define CHICKEN_PAR1_1 0x42080
+#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define _CHICKEN_PIPESL_1_A 0x420b0
+#define _CHICKEN_PIPESL_1_B 0x420b4
+#define DPRS_MASK_VBLANK_SRD (1 << 0)
+#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
@@ -3870,6 +4145,8 @@
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+#define COMMON_SLICE_CHICKEN2 0x7014
+# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
@@ -4416,6 +4693,8 @@
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
+#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
@@ -4447,7 +4726,6 @@
#define PANEL_PORT_SELECT_MASK (3 << 30)
#define PANEL_PORT_SELECT_LVDS (0 << 30)
#define PANEL_PORT_SELECT_DPA (1 << 30)
-#define EDP_PANEL (1 << 30)
#define PANEL_PORT_SELECT_DPC (2 << 30)
#define PANEL_PORT_SELECT_DPD (3 << 30)
#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
@@ -4456,11 +4734,6 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
-#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
-#define PANEL_POWER_PORT_LVDS (0 << 30)
-#define PANEL_POWER_PORT_DP_A (1 << 30)
-#define PANEL_POWER_PORT_DP_C (2 << 30)
-#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4638,7 +4911,7 @@
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
-#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4683,6 +4956,10 @@
GEN6_PM_RP_DOWN_TIMEOUT)
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define VLV_COUNTER_CONTROL 0x138104
+#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
+#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
@@ -4694,8 +4971,11 @@
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_PCODE_READ_D_COMP 0x10
+#define GEN6_PCODE_WRITE_D_COMP 0x11
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define DISPLAY_IPS_CONTROL 0x19
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -4713,6 +4993,7 @@
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
#define GEN7_PARITY_ERROR_VALID (1<<13)
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -4726,11 +5007,13 @@
#define GEN7_L3CDERRST1_ENABLE (1<<7)
#define GEN7_L3LOG_BASE 0xB070
+#define HSW_L3LOG_BASE_SLICE1 0xB270
#define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
+#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN7_ROW_CHICKEN2 0xe4f4
@@ -4740,6 +5023,10 @@
#define HSW_ROW_CHICKEN3 0xe49c
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+#define HALF_SLICE_CHICKEN3 0xe184
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4781,6 +5068,18 @@
CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
+#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ VLV_HDMIW_HDMIEDID_A, \
+ VLV_HDMIW_HDMIEDID_B)
+#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ VLV_AUD_CNTL_ST_A, \
+ VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
+
/* These are the 4 32-bit write offset registers for each stream
* output buffer. It determines the offset from the
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4797,6 +5096,12 @@
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
CPT_AUD_CONFIG_A, \
CPT_AUD_CONFIG_B)
+#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
+ VLV_AUD_CONFIG_A, \
+ VLV_AUD_CONFIG_B)
+
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4804,7 +5109,17 @@
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
@@ -4929,6 +5244,7 @@
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
+/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -4938,6 +5254,16 @@
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+/* Broadwell */
+#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
@@ -5047,6 +5373,9 @@
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
#define LCPLL_CLK_FREQ_450 (0<<26)
+#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
+#define LCPLL_CLK_FREQ_675_BDW (3<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
@@ -5128,4 +5457,414 @@
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define DPI_ENABLE (1 << 31) /* A + B */
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_MASK (1 << 26)
+#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
+#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
+#define DITHERING_ENABLE (1 << 25) /* A + B */
+#define FLOPPED_HSTX (1 << 23)
+#define DE_INVERT (1 << 19) /* XXX */
+#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
+#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
+#define AFE_LATCHOUT (1 << 17)
+#define LP_OUTPUT_HOLD (1 << 16)
+#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define CSB_SHIFT 9
+#define CSB_MASK (3 << 9)
+#define CSB_20MHZ (0 << 9)
+#define CSB_10MHZ (1 << 9)
+#define CSB_40MHZ (2 << 9)
+#define BANDGAP_MASK (1 << 8)
+#define BANDGAP_PNW_CIRCUIT (0 << 8)
+#define BANDGAP_LNC_CIRCUIT (1 << 8)
+#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
+#define TEARING_EFFECT_SHIFT 2 /* A + B */
+#define TEARING_EFFECT_MASK (3 << 2)
+#define TEARING_EFFECT_OFF (0 << 2)
+#define TEARING_EFFECT_DSI (1 << 2)
+#define TEARING_EFFECT_GPIO (2 << 2)
+#define LANE_CONFIGURATION_SHIFT 0
+#define LANE_CONFIGURATION_MASK (3 << 0)
+#define LANE_CONFIGURATION_4LANE (0 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
+
+#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define TEARING_EFFECT_DELAY_SHIFT 0
+#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
+#define ULPS_STATE_MASK (3 << 1)
+#define ULPS_STATE_ENTER (2 << 1)
+#define ULPS_STATE_EXIT (1 << 1)
+#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
+#define DEVICE_READY (1 << 0)
+
+#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define TEARING_EFFECT (1 << 31)
+#define SPL_PKT_SENT_INTERRUPT (1 << 30)
+#define GEN_READ_DATA_AVAIL (1 << 29)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define RX_PROT_VIOLATION (1 << 26)
+#define RX_INVALID_TX_LENGTH (1 << 25)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define LP_RX_TIMEOUT (1 << 22)
+#define HS_TX_TIMEOUT (1 << 21)
+#define DPI_FIFO_UNDERRUN (1 << 20)
+#define LOW_CONTENTION (1 << 19)
+#define HIGH_CONTENTION (1 << 18)
+#define TXDSI_VC_ID_INVALID (1 << 17)
+#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
+#define TXCHECKSUM_ERROR (1 << 15)
+#define TXECC_MULTIBIT_ERROR (1 << 14)
+#define TXECC_SINGLE_BIT_ERROR (1 << 13)
+#define TXFALSE_CONTROL_ERROR (1 << 12)
+#define RXDSI_VC_ID_INVALID (1 << 11)
+#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
+#define RXCHECKSUM_ERROR (1 << 9)
+#define RXECC_MULTIBIT_ERROR (1 << 8)
+#define RXECC_SINGLE_BIT_ERROR (1 << 7)
+#define RXFALSE_CONTROL_ERROR (1 << 6)
+#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RXEOT_SYNC_ERROR (1 << 2)
+#define RXSOT_SYNC_ERROR (1 << 1)
+#define RXSOT_ERROR (1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
+#define CMD_MODE_NOT_SUPPORTED (0 << 13)
+#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
+#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
+#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
+#define VID_MODE_FORMAT_MASK (0xf << 7)
+#define VID_MODE_NOT_SUPPORTED (0 << 7)
+#define VID_MODE_FORMAT_RGB565 (1 << 7)
+#define VID_MODE_FORMAT_RGB666 (2 << 7)
+#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
+#define VID_MODE_FORMAT_RGB888 (4 << 7)
+#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
+#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
+#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
+#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
+#define DATA_LANES_PRG_REG_SHIFT 0
+#define DATA_LANES_PRG_REG_MASK (7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define TURN_AROUND_TIMEOUT_MASK 0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define DEVICE_RESET_TIMER_MASK 0xffff
+
+#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define VERTICAL_ADDRESS_SHIFT 16
+#define VERTICAL_ADDRESS_MASK (0xffff << 16)
+#define HORIZONTAL_ADDRESS_SHIFT 0
+#define HORIZONTAL_ADDRESS_MASK 0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define DBI_FIFO_EMPTY_HALF (0 << 0)
+#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
+#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define DPI_LP_MODE (1 << 6)
+#define BACKLIGHT_OFF (1 << 5)
+#define BACKLIGHT_ON (1 << 4)
+#define COLOR_MODE_OFF (1 << 3)
+#define COLOR_MODE_ON (1 << 2)
+#define TURN_ON (1 << 1)
+#define SHUTDOWN (1 << 0)
+
+#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define COMMAND_BYTE_SHIFT 0
+#define COMMAND_BYTE_MASK (0x3f << 0)
+
+#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define MASTER_INIT_TIMER_SHIFT 0
+#define MASTER_INIT_TIMER_MASK (0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define MAX_RETURN_PKT_SIZE_SHIFT 0
+#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
+#define DISABLE_VIDEO_BTA (1 << 3)
+#define IP_TG_CONFIG (1 << 2)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
+#define VIDEO_MODE_BURST (3 << 0)
+
+#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
+#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
+#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
+#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
+#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
+#define CLOCKSTOP (1 << 1)
+#define EOT_DISABLE (1 << 0)
+
+#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define LP_BYTECLK_SHIFT 0
+#define LP_BYTECLK_MASK (0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define LONG_PACKET_WORD_COUNT_SHIFT 8
+#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
+#define SHORT_PACKET_PARAM_SHIFT 8
+#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
+#define VIRTUAL_CHANNEL_SHIFT 6
+#define VIRTUAL_CHANNEL_MASK (3 << 6)
+#define DATA_TYPE_SHIFT 0
+#define DATA_TYPE_MASK (3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_FULL (1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define DBI_HS_LP_MODE_MASK (1 << 0)
+#define DBI_LP_MODE (1 << 0)
+#define DBI_HS_MODE (0 << 0)
+
+#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define EXIT_ZERO_COUNT_SHIFT 24
+#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
+#define TRAIL_COUNT_SHIFT 16
+#define TRAIL_COUNT_MASK (0x1f << 16)
+#define CLK_ZERO_COUNT_SHIFT 8
+#define CLK_ZERO_COUNT_MASK (0xff << 8)
+#define PREPARE_COUNT_SHIFT 0
+#define PREPARE_COUNT_MASK (0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define LP_HS_SSW_CNT_SHIFT 16
+#define LP_HS_SSW_CNT_MASK (0xffff << 16)
+#define HS_LP_PWR_SW_CNT_SHIFT 0
+#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define STOP_STATE_STALL_COUNTER_SHIFT 0
+#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define RX_CONTENTION_DETECTED (1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define DBI_TYPEC_ENABLE (1 << 31)
+#define DBI_TYPEC_WIP (1 << 30)
+#define DBI_TYPEC_OPTION_SHIFT 28
+#define DBI_TYPEC_OPTION_MASK (3 << 28)
+#define DBI_TYPEC_FREQ_SHIFT 24
+#define DBI_TYPEC_FREQ_MASK (0xf << 24)
+#define DBI_TYPEC_OVERRIDE (1 << 8)
+#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
+#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
+#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
+#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
+#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
+#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
+#define READ_REQUEST_PRIORITY_SHIFT 3
+#define READ_REQUEST_PRIORITY_MASK (3 << 3)
+#define READ_REQUEST_PRIORITY_LOW (0 << 3)
+#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
+#define RGB_FLIP_TO_BGR (1 << 2)
+
+#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define DATA_MEM_ADDRESS_SHIFT 5
+#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define DATA_VALID (1 << 0)
+
+#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define DATA_LENGTH_SHIFT 0
+#define DATA_LENGTH_MASK (0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define COMMAND_MEM_ADDRESS_SHIFT 5
+#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define AUTO_PWG_ENABLE (1 << 2)
+#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
+#define COMMAND_VALID (1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
+#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+ (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define READ_DATA_VALID(n) (1 << (n))
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618989c..98790c7cccb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -214,6 +214,22 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+
+ dev_priv->regfile.saveBLC_PWM_CTL =
+ I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ dev_priv->regfile.saveBLC_HIST_CTL =
+ I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ dev_priv->regfile.saveBLC_PWM_CTL_B =
+ I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
+ dev_priv->regfile.saveBLC_HIST_CTL_B =
+ I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
+ dev_priv->regfile.saveBLC_PWM_CTL2_B =
+ I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -302,6 +318,19 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
+ dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
+ dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
+ dev_priv->regfile.saveBLC_PWM_CTL2);
+ I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
+ dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
+ dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
+ dev_priv->regfile.saveBLC_PWM_CTL2);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
@@ -340,7 +369,9 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_read_config_byte(dev->pdev, LBB,
+ &dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
@@ -367,7 +398,8 @@ int i915_save_state(struct drm_device *dev)
intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +422,9 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_write_config_byte(dev->pdev, LBB,
+ dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
@@ -414,7 +448,9 @@ int i915_restore_state(struct drm_device *dev)
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+ if (INTEL_INFO(dev)->gen < 7)
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+ 0xffff0000);
/* Memory arbitration state */
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112de11..cef38fd320a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,30 +32,50 @@
#include "intel_drv.h"
#include "i915_drv.h"
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
+ u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
if (!intel_enable_rc6(dev))
return 0;
- raw_time = I915_READ(reg) * 128ULL;
- return DIV_ROUND_UP_ULL(raw_time, 100000);
+ /* On VLV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(dev)) {
+ u32 clkctl2;
+
+ clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+ CLK_CTL2_CZCOUNT_30NS_SHIFT;
+ if (!clkctl2) {
+ WARN(!clkctl2, "bogus CZ count value");
+ return 0;
+ }
+ units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ units <<= 8;
+
+ div = 1000000ULL * bias;
+ }
+
+ raw_time = I915_READ(reg) * units;
+ return DIV_ROUND_UP_ULL(raw_time, div);
}
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
}
static ssize_t
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_get_drvdata(kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
@@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
static ssize_t
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ if (IS_VALLEYVIEW(dminor->dev))
+ rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
static ssize_t
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ if (IS_VALLEYVIEW(dminor->dev))
+ rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
@@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
- if (!HAS_L3_GPU_CACHE(dev))
+ if (!HAS_L3_DPF(dev))
return -EPERM;
if (offset % 4 != 0)
@@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
- uint32_t misccpctl;
- int i, ret;
+ int slice = (int)(uintptr_t)attr->private;
+ int ret;
+
+ count = round_down(count, 4);
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
+ count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
- for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
- *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
-
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ if (dev_priv->l3_parity.remap_info[slice])
+ memcpy(buf,
+ dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ count);
+ else
+ memset(buf, 0, count);
mutex_unlock(&drm_dev->struct_mutex);
- return i - offset;
+ return count;
}
static ssize_t
@@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct i915_hw_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
+ int slice = (int)(uintptr_t)attr->private;
int ret;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
+ if (dev_priv->hw_contexts_disabled)
+ return -ENXIO;
+
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
- if (!dev_priv->l3_parity.remap_info) {
+ if (!dev_priv->l3_parity.remap_info[slice]) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
- dev_priv->l3_parity.remap_info = temp;
+ dev_priv->l3_parity.remap_info[slice] = temp;
- memcpy(dev_priv->l3_parity.remap_info + (offset/4),
- buf + (offset/4),
- count);
+ memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
- i915_gem_l3_remap(drm_dev);
+ /* NB: We defer the remapping until we switch to the context */
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
+ ctx->remap_slice |= (1<<slice);
mutex_unlock(&drm_dev->struct_mutex);
@@ -200,17 +232,29 @@ static struct bin_attribute dpf_attrs = {
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
- .mmap = NULL
+ .mmap = NULL,
+ .private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+ .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+ .size = GEN7_L3LOG_SIZE,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
+ .mmap = NULL,
+ .private = (void *)1
};
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
@@ -227,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -238,11 +282,13 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap;
@@ -436,7 +488,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = container_of(kobj, struct device, kobj);
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
@@ -501,27 +553,34 @@ void i915_setup_sysfs(struct drm_device *dev)
#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
- ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
#endif
- if (HAS_L3_GPU_CACHE(dev)) {
- ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+ if (HAS_L3_DPF(dev)) {
+ ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
+
+ if (NUM_L3_SLICES(dev) > 1) {
+ ret = device_create_bin_file(dev->primary->kdev,
+ &dpf_attrs_1);
+ if (ret)
+ DRM_ERROR("l3 parity slice 1 setup failed\n");
+ }
}
ret = 0;
if (IS_VALLEYVIEW(dev))
- ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+ ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
else if (INTEL_INFO(dev)->gen >= 6)
- ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+ ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
- sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
+ sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
if (IS_VALLEYVIEW(dev))
- sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+ sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
else
- sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
- device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+ sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+ device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
+ device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
- sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6f619..6e580c98ded 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
TP_printk("dev=%d", __entry->dev)
);
+TRACE_EVENT(i915_gem_evict_vm,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ ),
+
+ TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+ TP_PROTO(struct intel_ring_buffer *from,
+ struct intel_ring_buffer *to,
+ u32 seqno),
+ TP_ARGS(from, to, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, sync_from)
+ __field(u32, sync_to)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = from->dev->primary->index;
+ __entry->sync_from = from->id;
+ __entry->sync_to = to->id;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ __entry->dev,
+ __entry->sync_from, __entry->sync_to,
+ __entry->seqno)
+);
+
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_ARGS(ring, seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+TRACE_EVENT(i915_gem_request_complete,
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->seqno = ring->get_seqno(ring, false);
+ ),
+
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 57fe1ae32a0..dfff0907f70 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -193,16 +193,14 @@ out:
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, intel_handle;
- acpi_status status;
+ acpi_handle dhandle;
int ret;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM")) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed8bc5..e4fba39631a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i, child_device_num, count;
u16 block_size;
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->old.device_type) {
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->slave_addr != SLAVE_ADDR1 &&
- p_child->slave_addr != SLAVE_ADDR2) {
+ if (p_child->old.slave_addr != SLAVE_ADDR1 &&
+ p_child->old.slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
}
- if (p_child->dvo_port != DEVICE_PORT_DVOB &&
- p_child->dvo_port != DEVICE_PORT_DVOC) {
+ if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
+ p_child->old.dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
" %s port\n",
- p_child->slave_addr,
- (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ p_child->old.slave_addr,
+ (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
if (!p_mapping->initialized) {
- p_mapping->dvo_port = p_child->dvo_port;
- p_mapping->slave_addr = p_child->slave_addr;
- p_mapping->dvo_wiring = p_child->dvo_wiring;
- p_mapping->ddc_pin = p_child->ddc_pin;
- p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->dvo_port = p_child->old.dvo_port;
+ p_mapping->slave_addr = p_child->old.slave_addr;
+ p_mapping->dvo_wiring = p_child->old.dvo_wiring;
+ p_mapping->ddc_pin = p_child->old.ddc_pin;
+ p_mapping->i2c_pin = p_child->old.i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->slave2_addr) {
+ if (p_child->old.slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
parse_driver_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct drm_device *dev = dev_priv->dev;
struct bdb_driver_features *driver;
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (SUPPORTS_EDP(dev) &&
- driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->vbt.edp_support = 1;
if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
+ if (dev_priv->vbt.edp_support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
@@ -569,11 +567,149 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
static void
+parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_mipi *mipi;
+
+ mipi = find_section(bdb, BDB_MIPI);
+ if (!mipi) {
+ DRM_DEBUG_KMS("No MIPI BDB found");
+ return;
+ }
+
+ /* XXX: add more info */
+ dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ struct bdb_header *bdb)
+{
+ union child_device_config *it, *child = NULL;
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+ uint8_t hdmi_level_shift;
+ int i, j;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ uint8_t aux_channel;
+ /* Each DDI port can have more than one value on the "DVO Port" field,
+ * so look for all the possible values for each port and abort if more
+ * than one is found. */
+ int dvo_ports[][2] = {
+ {DVO_PORT_HDMIA, DVO_PORT_DPA},
+ {DVO_PORT_HDMIB, DVO_PORT_DPB},
+ {DVO_PORT_HDMIC, DVO_PORT_DPC},
+ {DVO_PORT_HDMID, DVO_PORT_DPD},
+ {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+ };
+
+ /* Find the child device to use, abort if more than one found. */
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ it = dev_priv->vbt.child_dev + i;
+
+ for (j = 0; j < 2; j++) {
+ if (dvo_ports[port][j] == -1)
+ break;
+
+ if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (child) {
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+ port_name(port));
+ return;
+ }
+ child = it;
+ }
+ }
+ }
+ if (!child)
+ return;
+
+ aux_channel = child->raw[25];
+
+ is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+ is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+
+ info->supports_dvi = is_dvi;
+ info->supports_hdmi = is_hdmi;
+ info->supports_dp = is_dp;
+
+ DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+ port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+ if (is_edp && is_dvi)
+ DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+ port_name(port));
+ if (is_crt && port != PORT_E)
+ DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+ if (is_crt && (is_dvi || is_dp))
+ DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+ port_name(port));
+ if (is_dvi && (port == PORT_A || port == PORT_E))
+ DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+ if (!is_dvi && !is_dp && !is_crt)
+ DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+ port_name(port));
+ if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+ DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+ if (is_dvi) {
+ if (child->common.ddc_pin == 0x05 && port != PORT_B)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+ if (child->common.ddc_pin == 0x04 && port != PORT_C)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+ if (child->common.ddc_pin == 0x06 && port != PORT_D)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ }
+
+ if (is_dp) {
+ if (aux_channel == 0x40 && port != PORT_A)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+ if (aux_channel == 0x10 && port != PORT_B)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+ if (aux_channel == 0x20 && port != PORT_C)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+ if (aux_channel == 0x30 && port != PORT_D)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ }
+
+ if (bdb->version >= 158) {
+ /* The VBT HDMI level shift values match the table we have. */
+ hdmi_level_shift = child->raw[7] & 0xF;
+ if (hdmi_level_shift < 0xC) {
+ DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
+ info->hdmi_level_shift = hdmi_level_shift;
+ }
+ }
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum port port;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return;
+
+ if (bdb->version < 155)
+ return;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++)
+ parse_ddi_port(dev_priv, port, bdb);
+}
+
+static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *p_defs;
- struct child_device_config *p_child, *child_dev_ptr;
+ union child_device_config *p_child, *child_dev_ptr;
int i, child_device_num, count;
u16 block_size;
@@ -601,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
@@ -621,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
@@ -637,6 +773,7 @@ static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ enum port port;
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
@@ -653,8 +790,25 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
- dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ /*
+ * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+ * clock for LVDS.
+ */
+ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
+ !HAS_PCH_SPLIT(dev));
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+
+ /* Recommended BSpec default: 800mV 0dB. */
+ info->hdmi_level_shift = 6;
+
+ info->supports_dvi = (port != PORT_A && port != PORT_E);
+ info->supports_hdmi = info->supports_dvi;
+ info->supports_dp = (port != PORT_E);
+ }
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +899,8 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
+ parse_mipi(dev_priv, bdb);
+ parse_ddi_ports(dev_priv, bdb);
if (bios)
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f0956..f580a2b0ddd 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@ struct vbios_data {
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
+#define BDB_MIPI 50
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
@@ -201,7 +202,10 @@ struct bdb_general_features {
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
-struct child_device_config {
+/* We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. */
+struct old_child_dev_config {
u16 handle;
u16 device_type;
u8 device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@ struct child_device_config {
u8 dvo_function;
} __attribute__((packed));
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+struct common_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 not_common1[12];
+ u8 dvo_port;
+ u8 not_common2[2];
+ u8 ddc_pin;
+ u16 edid_ptr;
+} __attribute__((packed));
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+ /* This one is safe to be used anywhere, but the code should still check
+ * the BDB version. */
+ u8 raw[33];
+ /* This one should only be kept for legacy code. */
+ struct old_child_dev_config old;
+ /* This one should also be safe to use anywhere, even without version
+ * checks. */
+ struct common_child_dev_config common;
+};
+
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
- struct child_device_config devices[0];
+ union child_device_config devices[0];
} __attribute__((packed));
struct bdb_lvds_options {
@@ -608,6 +638,40 @@ int intel_parse_bios(struct drm_device *dev);
#define DEVICE_TYPE_DP 0x68C6
#define DEVICE_TYPE_eDP 0x78C6
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_NOT_HDMI_OUTPUT | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
@@ -618,4 +682,57 @@ int intel_parse_bios(struct drm_device *dev);
#define PORT_IDPC 8
#define PORT_IDPD 9
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+
+/* MIPI DSI panel info */
+struct bdb_mipi {
+ u16 panel_id;
+ u16 bridge_revision;
+
+ /* General params */
+ u32 dithering:1;
+ u32 bpp_pixel_format:1;
+ u32 rsvd1:1;
+ u32 dphy_valid:1;
+ u32 resvd2:28;
+
+ u16 port_info;
+ u16 rsvd3:2;
+ u16 num_lanes:2;
+ u16 rsvd4:12;
+
+ /* DSI config */
+ u16 virt_ch_num:2;
+ u16 vtm:2;
+ u16 rsvd5:12;
+
+ u32 dsi_clock;
+ u32 bridge_ref_clk;
+ u16 rsvd_pwr;
+
+ /* Dphy Params */
+ u32 prepare_cnt:5;
+ u32 rsvd6:3;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd7:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd8:2;
+
+ u32 hl_switch_cnt;
+ u32 lp_byte_clk;
+ u32 clk_lane_switch_cnt;
+} __attribute__((packed));
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 10d1de5bce6..b5b1b9b23ad 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -107,7 +107,17 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
+ int dotclock;
+
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -264,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder)
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
- if (HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -366,9 +376,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- /* FIXME: debug force function and remove */
- ret = true;
-
return ret;
}
@@ -670,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
static void intel_crt_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -776,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
if (!crt)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(crt);
return;
@@ -816,16 +822,15 @@ void intel_crt_init(struct drm_device *dev)
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- if (IS_HASWELL(dev))
- crt->base.get_config = hsw_crt_get_config;
- else
- crt->base.get_config = intel_crt_get_config;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev)) {
+ crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
- else
+ } else {
+ crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
+ }
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b53fff84a7d..330077bcd0b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
0x80C30FFF, 0x000B0000,
0x00FFFFFF, 0x00040006,
0x80D75FFF, 0x000B0000,
- 0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,64 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00C30FFF, 0x001E0000,
0x00FFFFFF, 0x00060006,
0x00D75FFF, 0x001E0000,
- 0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
-static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const u32 hsw_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff T mV diff db */
+ 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
+ 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
+ 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
+ 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
+ 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
+ 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
+ 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
+ 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
+ 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
+ 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
+ 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
+ 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
+};
+
+static const u32 bdw_ddi_translations_edp[] = {
+ 0x00FFFFFF, 0x00000012, /* DP parameters */
+ 0x00EBAFFF, 0x00020011,
+ 0x00C71FFF, 0x0006000F,
+ 0x00FFFFFF, 0x00020011,
+ 0x00DB6FFF, 0x0005000F,
+ 0x00BEEFFF, 0x000A000C,
+ 0x00FFFFFF, 0x0005000F,
+ 0x00DB6FFF, 0x000A000C,
+ 0x00FFFFFF, 0x000A000C,
+ 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0007000E, /* DP parameters */
+ 0x00D75FFF, 0x000E000A,
+ 0x00BEFFFF, 0x00140006,
+ 0x00FFFFFF, 0x000E000A,
+ 0x00D75FFF, 0x00180004,
+ 0x80CB2FFF, 0x001B0002,
+ 0x00F7DFFF, 0x00180004,
+ 0x80D75FFF, 0x001B0002,
+ 0x80FFFFFF, 0x001B0002,
+ 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0001000E, /* FDI parameters */
+ 0x00D75FFF, 0x0004000A,
+ 0x00C30FFF, 0x00070006,
+ 0x00AAAFFF, 0x000C0000,
+ 0x00FFFFFF, 0x0004000A,
+ 0x00D75FFF, 0x00090004,
+ 0x00C30FFF, 0x000C0000,
+ 0x00FFFFFF, 0x00070006,
+ 0x00D75FFF, 0x000C0000,
+ 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
@@ -78,8 +131,9 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
}
}
-/* On Haswell, DDI port buffers must be programmed with correct values
- * in advance. The buffer values are different for FDI and DP modes,
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
@@ -89,15 +143,58 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
- const u32 *ddi_translations = (port == PORT_E) ?
- hsw_ddi_translations_fdi :
- hsw_ddi_translations_dp;
+ int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ const u32 *ddi_translations_fdi;
+ const u32 *ddi_translations_dp;
+ const u32 *ddi_translations_edp;
+ const u32 *ddi_translations;
+
+ if (IS_BROADWELL(dev)) {
+ ddi_translations_fdi = bdw_ddi_translations_fdi;
+ ddi_translations_dp = bdw_ddi_translations_dp;
+ ddi_translations_edp = bdw_ddi_translations_edp;
+ } else if (IS_HASWELL(dev)) {
+ ddi_translations_fdi = hsw_ddi_translations_fdi;
+ ddi_translations_dp = hsw_ddi_translations_dp;
+ ddi_translations_edp = hsw_ddi_translations_dp;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ ddi_translations_edp = bdw_ddi_translations_dp;
+ ddi_translations_fdi = bdw_ddi_translations_fdi;
+ ddi_translations_dp = bdw_ddi_translations_dp;
+ }
+
+ switch (port) {
+ case PORT_A:
+ ddi_translations = ddi_translations_edp;
+ break;
+ case PORT_B:
+ case PORT_C:
+ ddi_translations = ddi_translations_dp;
+ break;
+ case PORT_D:
+ if (intel_dpd_is_edp(dev))
+ ddi_translations = ddi_translations_edp;
+ else
+ ddi_translations = ddi_translations_dp;
+ break;
+ case PORT_E:
+ ddi_translations = ddi_translations_fdi;
+ break;
+ default:
+ BUG();
+ }
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
+ /* Entry 9 is for HDMI: */
+ for (i = 0; i < 2; i++) {
+ I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+ reg += 4;
+ }
}
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +393,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(&encoder->base, adjusted_mode);
}
-
- intel_dp_init_link_config(intel_dp);
-
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
@@ -739,7 +833,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -767,18 +862,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
- /* Can only use the always-on power well for eDP when
- * not using the panel fitter, and when not using motion
- * blur mitigation (which we don't support). */
- if (intel_crtc->config.pch_pfit.enabled)
+ /* On Haswell, can only use the always-on power well for
+ * eDP when not using the panel fitter, and when not
+ * using motion blur mitigation (which we don't
+ * support). */
+ if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1139,18 +1235,29 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
- if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ if (lcpll & LCPLL_CD_SOURCE_FCLK) {
return 800000;
- else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
return 450000;
- else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
+ } else if (freq == LCPLL_CLK_FREQ_450) {
return 450000;
- else if (IS_ULT(dev_priv->dev))
- return 337500;
- else
- return 540000;
+ } else if (IS_HASWELL(dev)) {
+ if (IS_ULT(dev))
+ return 337500;
+ else
+ return 540000;
+ } else {
+ if (freq == LCPLL_CLK_FREQ_54O_BDW)
+ return 540000;
+ else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+ return 337500;
+ else
+ return 675000;
+ }
}
void intel_ddi_pll_init(struct drm_device *dev)
@@ -1202,7 +1309,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
@@ -1285,6 +1392,40 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
default:
break;
}
+
+ switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ case TRANS_DDI_MODE_SELECT_FDI:
+ break;
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->has_dp_encoder = true;
+ intel_dp_get_m_n(intel_crtc, pipe_config);
+ break;
+ default:
+ break;
+ }
+
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1314,6 +1455,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->port;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ if (!intel_dp_init_connector(intel_dig_port, connector)) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->port;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(intel_dig_port, connector);
+
+ return connector;
+}
+
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1322,17 +1498,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct drm_encoder *encoder;
struct intel_connector *hdmi_connector = NULL;
struct intel_connector *dp_connector = NULL;
+ bool init_hdmi, init_dp;
+
+ init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+ dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+ init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+ if (!init_dp && !init_hdmi) {
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ port_name(port));
+ init_hdmi = true;
+ init_dp = true;
+ }
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
- dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!dp_connector) {
- kfree(intel_dig_port);
- return;
- }
-
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
@@ -1352,28 +1533,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
- intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_ddi_hot_plug;
- if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
- drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
- kfree(dp_connector);
- return;
- }
+ if (init_dp)
+ dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
- if (intel_encoder->type != INTEL_OUTPUT_EDP) {
- hdmi_connector = kzalloc(sizeof(struct intel_connector),
- GFP_KERNEL);
- if (!hdmi_connector) {
- return;
- }
+ /* In theory we don't need the encoder->type check, but leave it just in
+ * case we have some really bad VBTs... */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
+ hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
- intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ if (!dp_connector && !hdmi_connector) {
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d78d33f9337..7ec8b488bb1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@ struct intel_limit {
intel_p2_t p2;
};
-/* FDI */
-#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
-
int
intel_pch_rawclk(struct drm_device *dev)
{
@@ -313,44 +309,44 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_vlv_dac = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m = { .min = 22, .max = 450 }, /* guess */
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3 },
- .p2 = { .dot_limit = 270000,
- .p2_slow = 2, .p2_fast = 20 },
-};
-
-static const intel_limit_t intel_limits_vlv_hdmi = {
- .dot = { .min = 25000, .max = 270000 },
+static const intel_limit_t intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
- .m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
- .p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
- .p2 = { .dot_limit = 270000,
- .p2_slow = 2, .p2_fast = 20 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
-static const intel_limit_t intel_limits_vlv_dp = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m = { .min = 22, .max = 450 },
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3 },
- .p2 = { .dot_limit = 270000,
- .p2_slow = 2, .p2_fast = 20 },
-};
+static void vlv_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
+ return true;
+
+ return false;
+}
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
@@ -412,12 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
else
limit = &intel_limits_pineview_sdvo;
} else if (IS_VALLEYVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
- limit = &intel_limits_vlv_dac;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
- limit = &intel_limits_vlv_hdmi;
- else
- limit = &intel_limits_vlv_dp;
+ limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / clock->n;
- clock->dot = clock->vco / clock->p;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / (clock->n + 2);
- clock->dot = clock->vco / clock->p;
-}
-
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->type == type)
- return true;
-
- return false;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
const intel_limit_t *limit,
const intel_clock_t *clock)
{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
INTELPllInvalid("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
- INTELPllInvalid("m1 <= m2\n");
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
+
+ if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+ if (clock->m1 <= clock->m2)
+ INTELPllInvalid("m1 <= m2\n");
+
+ if (!IS_VALLEYVIEW(dev)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ }
+
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
INTELPllInvalid("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
- u32 m, n, fastclk;
- u32 updrate, minupdate, p;
- unsigned long bestppm, ppm, absppm;
- int dotclk, flag;
-
- flag = 0;
- dotclk = target * 1000;
- bestppm = 1000000;
- ppm = absppm = 0;
- fastclk = dotclk / (2*100);
- updrate = 0;
- minupdate = 19200;
- n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
- bestm1 = bestm2 = bestp1 = bestp2 = 0;
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ target *= 5; /* fast clock */
+
+ memset(best_clock, 0, sizeof(*best_clock));
/* based on hardware requirement, prefer smaller n to precision */
- for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
- updrate = refclk / n;
- for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
- for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
- if (p2 > 10)
- p2 = p2 - 1;
- p = p1 * p2;
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
- for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
- m2 = (((2*(fastclk * p * n / m1 )) +
- refclk) / (2*refclk));
- m = m1 * m2;
- vco = updrate * m;
- if (vco >= limit->vco.min && vco < limit->vco.max) {
- ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
- absppm = (ppm > 0) ? ppm : (-ppm);
- if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
- bestppm = 0;
- flag = 1;
- }
- if (absppm < bestppm - 10) {
- bestppm = absppm;
- flag = 1;
- }
- if (flag) {
- bestn = n;
- bestm1 = m1;
- bestm2 = m2;
- bestp1 = p1;
- bestp2 = p2;
- flag = 0;
- }
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm, diff;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_clock(refclk, &clock);
+
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+
+ diff = abs(clock.dot - target);
+ ppm = div_u64(1000000ULL * diff, target);
+
+ if (ppm < 100 && clock.p > best_clock->p) {
+ bestppm = 0;
+ *best_clock = clock;
+ found = true;
+ }
+
+ if (bestppm >= 10 && ppm < bestppm - 10) {
+ bestppm = ppm;
+ *best_clock = clock;
+ found = true;
}
}
}
}
}
- best_clock->n = bestn;
- best_clock->m1 = bestm1;
- best_clock->m2 = bestm2;
- best_clock->p1 = bestp1;
- best_clock->p2 = bestp2;
- return true;
+ return found;
+}
+
+bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ */
+ return intel_crtc->active && crtc->fb &&
+ intel_crtc->config.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
DRM_DEBUG_KMS("vblank wait timed out\n");
}
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPEDSL(pipe);
+ u32 line1, line2;
+ u32 line_mask;
+
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
+ line1 = I915_READ(reg) & line_mask;
+ mdelay(5);
+ line2 = I915_READ(reg) & line_mask;
+
+ return line1 == line2;
+}
+
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
@@ -843,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
100))
WARN(1, "pipe_off wait timed out\n");
} else {
- u32 last_line, line_mask;
- int reg = PIPEDSL(pipe);
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
- if (IS_GEN2(dev))
- line_mask = DSL_LINEMASK_GEN2;
- else
- line_mask = DSL_LINEMASK_GEN3;
-
/* Wait for the display line to settle */
- do {
- last_line = I915_READ(reg) & line_mask;
- mdelay(5);
- } while (((I915_READ(reg) & line_mask) != last_line) &&
- time_after(timeout, jiffies));
- if (time_after(jiffies, timeout))
+ if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -929,6 +922,24 @@ void assert_pll(struct drm_i915_private *dev_priv,
state_string(state), state_string(cur_state));
}
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+ u32 val;
+ bool cur_state;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ cur_state = val & DSI_PLL_VCO_EN;
+ WARN(cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
@@ -1069,6 +1080,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
+static void assert_cursor(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ struct drm_device *dev = dev_priv->dev;
+ bool cur_state;
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+ else if (IS_845G(dev) || IS_I865G(dev))
+ cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+ else
+ cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ WARN(cur_state != state,
+ "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
@@ -1323,6 +1354,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
+static void intel_init_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all be set
+ * to 0.
+ *
+ * This should only be done on init and resume from S3 with both
+ * PLLs disabled, or we risk losing DPIO and PLL synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
static void vlv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(DPLL(pipe));
}
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 val = 0;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* Leave integrated clock source enabled */
+ if (pipe == PIPE_B)
+ val = DPLL_INTEGRATED_CRI_CLK_VLV;
+ I915_WRITE(DPLL(pipe), val);
+ POSTING_READ(DPLL(pipe));
+}
+
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
{
u32 port_mask;
@@ -1661,7 +1726,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
* returning.
*/
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
- bool pch_port)
+ bool pch_port, bool dsi)
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
@@ -1670,6 +1735,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 val;
assert_planes_disabled(dev_priv, pipe);
+ assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
- assert_pll_enabled(dev_priv, pipe);
+ if (dsi)
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* or we might hang the display.
*/
assert_planes_disabled(dev_priv, pipe);
+ assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
-void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
{
- if (dev_priv->info->gen >= 4)
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
- else
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+
+ I915_WRITE(reg, I915_READ(reg));
+ POSTING_READ(reg);
}
/**
- * intel_enable_plane - enable a display plane on a given pipe
+ * intel_enable_primary_plane - enable the primary plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
-static void intel_enable_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
{
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
+ WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
+
+ intel_crtc->primary_enabled = true;
+
reg = DSPCNTR(plane);
val = I915_READ(reg);
if (val & DISPLAY_PLANE_ENABLE)
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, plane);
+ intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
- * intel_disable_plane - disable a display plane
+ * intel_disable_primary_plane - disable the primary plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
*
* Disable @plane; should be an independent operation.
*/
-static void intel_disable_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
{
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
+ WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
+
+ intel_crtc->primary_enabled = false;
+
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, plane);
+ intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
@@ -1839,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
alignment = 0;
break;
case I915_TILING_Y:
- /* Despite that we check this in framebuffer_init userspace can
- * screw us over and change the tiling after the fact. Only
- * pinned buffers can't change their tiling. */
- DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+ WARN(1, "Y tiled bo slipped through, driver bug!\n");
return -EINVAL;
default:
BUG();
@@ -2077,7 +2156,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
else
dspcntr &= ~DISPPLANE_TILED;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
else
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -2097,7 +2176,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2244,11 +2323,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /* Update pipe size and adjust fitter if needed */
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ *
+ * To fix this properly, we need to hoist the checks up into
+ * compute_mode_changes (or above), check the actual pfit state and
+ * whether the platform allows pfit disable with pipe active, and only
+ * then update the pipesrc and pfit state, even on the flip path.
+ */
if (i915_fastboot) {
+ const struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+
I915_WRITE(PIPESRC(intel_crtc->pipe),
- ((crtc->mode.hdisplay - 1) << 16) |
- (crtc->mode.vdisplay - 1));
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+ (adjusted_mode->crtc_vdisplay - 1));
if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2873,6 +2967,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -2890,14 +2985,14 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
SBI_ICLK);
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
- if (crtc->mode.clock == 20000) {
+ if (clock == 20000) {
auxdiv = 1;
divsel = 0x41;
phaseinc = 0x20;
} else {
/* The iCLK virtual clock root frequency is in MHz,
- * but the crtc->mode.clock in in KHz. To get the divisors,
- * it is necessary to divide one by another, so we
+ * but the adjusted_mode->crtc_clock in in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
* convert the virtual clock precision to KHz here for higher
* precision.
*/
@@ -2905,7 +3000,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
u32 iclk_pi_range = 64;
u32 desired_divisor, msb_divisor_value, pi_value;
- desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ desired_divisor = (iclk_virtual_root_freq / clock);
msb_divisor_value = desired_divisor / iclk_pi_range;
pi_value = desired_divisor % iclk_pi_range;
@@ -2921,7 +3016,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- crtc->mode.clock,
+ clock,
auxdiv,
divsel,
phasedir,
@@ -3286,6 +3381,108 @@ static void intel_disable_planes(struct drm_crtc *crtc)
intel_plane_disable(&intel_plane->base);
}
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ /* We can only enable IPS after we enable a plane and wait for a vblank.
+ * We guarantee that the plane is enabled by calling intel_enable_ips
+ * only after intel_enable_plane. And intel_enable_plane already waits
+ * for a vblank, so all we need to do here is to enable the IPS bit. */
+ assert_plane_enabled(dev_priv, crtc->plane);
+ if (IS_BROADWELL(crtc->base.dev)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ /* Quoting Art Runyan: "its not safe to expect any particular
+ * value in IPS_CTL bit 31 after enabling IPS through the
+ * mailbox." Therefore we need to defer waiting on the state
+ * change.
+ * TODO: need to fix this for state checker
+ */
+ } else {
+ I915_WRITE(IPS_CTL, IPS_ENABLE);
+ /* The bit only becomes 1 in the next vblank, so this wait here
+ * is essentially intel_wait_for_vblank. If we don't have this
+ * and don't wait for vblanks until the end of crtc_enable, then
+ * the HW state readout code will complain that the expected
+ * IPS_CTL value is not the one we read. */
+ if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+ DRM_ERROR("Timed out waiting for IPS enable\n");
+ }
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ assert_plane_enabled(dev_priv, crtc->plane);
+ if (IS_BROADWELL(crtc->base.dev)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ } else
+ I915_WRITE(IPS_CTL, 0);
+ POSTING_READ(IPS_CTL);
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int palreg = PALETTE(pipe);
+ int i;
+ bool reenable_ips = false;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ }
+
+ /* use legacy palette for Ironlake */
+ if (HAS_PCH_SPLIT(dev))
+ palreg = LGC_PALETTE(pipe);
+
+ /* Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ */
+ if (intel_crtc->config.ips_enabled &&
+ ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(intel_crtc);
+ reenable_ips = true;
+ }
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+
+ if (reenable_ips)
+ hsw_enable_ips(intel_crtc);
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3305,8 +3502,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
- intel_update_watermarks(dev);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@@ -3329,9 +3524,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
*/
intel_crtc_load_lut(crtc);
+ intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
- intel_crtc->config.has_pch_encoder);
- intel_enable_plane(dev_priv, plane, pipe);
+ intel_crtc->config.has_pch_encoder, false);
+ intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -3365,34 +3561,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
-static void hsw_enable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
- if (!crtc->config.ips_enabled)
- return;
+ intel_enable_primary_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
- /* We can only enable IPS after we enable a plane and wait for a vblank.
- * We guarantee that the plane is enabled by calling intel_enable_ips
- * only after intel_enable_plane. And intel_enable_plane already waits
- * for a vblank, so all we need to do here is to enable the IPS bit. */
- assert_plane_enabled(dev_priv, crtc->plane);
- I915_WRITE(IPS_CTL, IPS_ENABLE);
+ hsw_enable_ips(intel_crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
-static void hsw_disable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
- if (!crtc->config.ips_enabled)
- return;
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
- assert_plane_enabled(dev_priv, crtc->plane);
- I915_WRITE(IPS_CTL, 0);
+ /* FBC must be disabled before disabling the plane on HSW. */
+ if (dev_priv->fbc.plane == plane)
+ intel_disable_fbc(dev);
- /* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev, crtc->pipe);
+ hsw_disable_ips(intel_crtc);
+
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_primary_plane(dev_priv, plane, pipe);
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_crtc *crtc_it, *other_active_crtc = NULL;
+
+ /* We want to get the other_active_crtc only if there's only 1 other
+ * active crtc. */
+ list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+ if (!crtc_it->active || crtc_it == crtc)
+ continue;
+
+ if (other_active_crtc)
+ return;
+
+ other_active_crtc = crtc_it;
+ }
+ if (!other_active_crtc)
+ return;
+
+ intel_wait_for_vblank(dev, other_active_crtc->pipe);
+ intel_wait_for_vblank(dev, other_active_crtc->pipe);
}
static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3402,7 +3638,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
@@ -3415,8 +3650,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
- intel_update_watermarks(dev);
-
if (intel_crtc->config.has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
@@ -3437,23 +3670,22 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_set_pipe_settings(crtc);
intel_ddi_enable_transcoder_func(crtc);
+ intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
- intel_crtc->config.has_pch_encoder);
- intel_enable_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
-
- hsw_enable_ips(intel_crtc);
+ intel_crtc->config.has_pch_encoder, false);
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
+ intel_opregion_notify_encoder(encoder, true);
+ }
+
+ /* If we change the relative order between pipe/planes enabling, we need
+ * to change the workaround. */
+ haswell_mode_set_planes_workaround(intel_crtc);
+ haswell_crtc_enable_planes(crtc);
/*
* There seems to be a race in PCH platform hw (at least on some
@@ -3506,7 +3738,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_primary_plane(dev_priv, plane, pipe);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3547,7 +3779,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
intel_crtc->active = false;
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
@@ -3561,27 +3793,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (!intel_crtc->active)
return;
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
-
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
-
- /* FBC must be disabled before disabling the plane on HSW. */
- if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
+ haswell_crtc_disable_planes(crtc);
- hsw_disable_ips(intel_crtc);
-
- intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
- intel_disable_plane(dev_priv, plane, pipe);
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ intel_opregion_notify_encoder(encoder, false);
+ encoder->disable(encoder);
+ }
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3604,7 +3826,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
}
intel_crtc->active = false;
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
@@ -3696,6 +3918,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ bool is_dsi;
WARN_ON(!crtc->enabled);
@@ -3703,13 +3926,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
return;
intel_crtc->active = true;
- intel_update_watermarks(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- vlv_enable_pll(intel_crtc);
+ is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+ if (!is_dsi)
+ vlv_enable_pll(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -3719,8 +3944,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
- intel_enable_pipe(dev_priv, pipe, false);
- intel_enable_plane(dev_priv, plane, pipe);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+ intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -3745,7 +3971,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
return;
intel_crtc->active = true;
- intel_update_watermarks(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -3757,8 +3982,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
- intel_enable_pipe(dev_priv, pipe, false);
- intel_enable_plane(dev_priv, plane, pipe);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(dev_priv, pipe, false, false);
+ intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
@@ -3814,7 +4040,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_primary_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -3824,11 +4050,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- i9xx_disable_pll(dev_priv, pipe);
+ if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ vlv_disable_pll(dev_priv, pipe);
+ else if (!IS_VALLEYVIEW(dev))
+ i9xx_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
+ intel_update_watermarks(crtc);
+
intel_update_fbc(dev);
- intel_update_watermarks(dev);
}
static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3902,6 +4132,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
if (crtc->fb) {
@@ -4029,7 +4260,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return false;
}
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
@@ -4091,8 +4322,7 @@ retry:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
- fdi_dotclock = adjusted_mode->clock;
- fdi_dotclock /= pipe_config->pixel_multiplier;
+ fdi_dotclock = adjusted_mode->crtc_clock;
lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
pipe_config->pipe_bpp);
@@ -4134,13 +4364,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- if (HAS_PCH_SPLIT(dev)) {
- /* FDI link clock is fixed at 2.7G */
- if (pipe_config->requested_mode.clock * 3
- > IRONLAKE_FDI_FREQ * 4)
+ /* FIXME should check pixel clock limits on all platforms */
+ if (INTEL_INFO(dev)->gen < 4) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int clock_limit =
+ dev_priv->display.get_display_clock_speed(dev);
+
+ /*
+ * Enable pixel doubling when the dot clock
+ * is > 90% of the (display) core speed.
+ *
+ * GDG double wide on either pipe,
+ * otherwise pipe A only.
+ */
+ if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+ adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+ clock_limit *= 2;
+ pipe_config->double_wide = true;
+ }
+
+ if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
return -EINVAL;
}
+ /*
+ * Pipe horizontal size must be even in:
+ * - DVO ganged mode
+ * - LVDS dual channel mode
+ * - Double wide pipe
+ */
+ if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+ pipe_config->pipe_src_w &= ~1;
+
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
@@ -4304,28 +4560,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int vlv_get_refclk(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk = 27000; /* for DP & HDMI */
-
- return 100000; /* only one validated so far */
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
- refclk = 96000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv))
- refclk = 100000;
- else
- refclk = 96000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
- refclk = 100000;
- }
-
- return refclk;
-}
-
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -4333,7 +4567,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
int refclk;
if (IS_VALLEYVIEW(dev)) {
- refclk = vlv_get_refclk(crtc);
+ refclk = 100000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4391,7 +4625,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+ pipe)
{
u32 reg_val;
@@ -4399,24 +4634,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
- vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
- reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
}
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4482,18 +4717,18 @@ static void vlv_update_pll(struct intel_crtc *crtc)
/* PLL B needs special handling */
if (pipe)
- vlv_pllb_recal_opamp(dev_priv);
+ vlv_pllb_recal_opamp(dev_priv, pipe);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+ vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4507,55 +4742,55 @@ static void vlv_update_pll(struct intel_crtc *crtc)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
0x00d0000f);
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+ coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+ vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
- vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- if (pipe)
+ /* We should never disable this, set it here for state tracking */
+ if (pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
@@ -4693,7 +4928,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
/* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4746,7 +4980,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ ((intel_crtc->config.pipe_src_w - 1) << 16) |
+ (intel_crtc->config.pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4784,8 +5019,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
}
tmp = I915_READ(PIPESRC(crtc->pipe));
- pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
- pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+ pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+ pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
}
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4805,7 +5043,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
crtc->mode.flags = pipe_config->adjusted_mode.flags;
- crtc->mode.clock = pipe_config->adjusted_mode.clock;
+ crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
}
@@ -4821,17 +5059,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
pipeconf |= PIPECONF_ENABLE;
- if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
- /* Enable pixel doubling when the dot clock is > 90% of the (display)
- * core speed.
- *
- * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
- * pipe == 0 check?
- */
- if (intel_crtc->config.requested_mode.clock >
- dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
- }
+ if (intel_crtc->config.double_wide)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4885,14 +5114,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr;
bool ok, has_reduced_clock = false;
- bool is_lvds = false;
+ bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
@@ -4902,42 +5130,49 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
+ case INTEL_OUTPUT_DSI:
+ is_dsi = true;
+ break;
}
num_connectors++;
}
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ if (is_dsi)
+ goto skip_dpll;
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc,
- intel_crtc->config.port_clock,
- refclk, NULL, &clock);
- if (!ok && !intel_crtc->config.clock_set) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
+ if (!intel_crtc->config.clock_set) {
+ refclk = i9xx_get_refclk(crtc, num_connectors);
- if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk, &clock,
- &reduced_clock);
- }
- /* Compat-code for transition, will disappear. */
- if (!intel_crtc->config.clock_set) {
+ * Returns a set of divisors for the desired target clock with
+ * the given refclk, or FALSE. The returned values represent
+ * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+ * 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc,
+ intel_crtc->config.port_clock,
+ refclk, NULL, &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target
+ * clock. If the clocks don't match, we can't switch
+ * the display clock by using the FP0/FP1. In such case
+ * we will disable the LVDS downclock feature.
+ */
+ has_reduced_clock =
+ dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk, &clock,
+ &reduced_clock);
+ }
+ /* Compat-code for transition, will disappear. */
intel_crtc->config.dpll.n = clock.n;
intel_crtc->config.dpll.m1 = clock.m1;
intel_crtc->config.dpll.m2 = clock.m2;
@@ -4945,17 +5180,19 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->config.dpll.p2 = clock.p2;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev)) {
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
- else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
- else
+ } else {
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
+ }
+skip_dpll:
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4972,8 +5209,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((mode->vdisplay - 1) << 16) |
- (mode->hdisplay - 1));
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
i9xx_set_pipeconf(intel_crtc);
@@ -4983,8 +5220,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
ret = intel_pipe_set_base(crtc, x, y, fb);
- intel_update_watermarks(dev);
-
return ret;
}
@@ -5015,6 +5250,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = pipe_config->cpu_transcoder;
+ intel_clock_t clock;
+ u32 mdiv;
+ int refclk = 100000;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+ vlv_clock(refclk, &clock);
+
+ /* clock.dot is the fast clock */
+ pipe_config->port_clock = clock.dot / 5;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -5045,6 +5306,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
+ if (INTEL_INFO(dev)->gen < 4)
+ pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
intel_get_pipe_timings(crtc, pipe_config);
i9xx_get_pfit_config(crtc, pipe_config);
@@ -5077,6 +5341,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
+ if (IS_VALLEYVIEW(dev))
+ vlv_crtc_clock_get(crtc, pipe_config);
+ else
+ i9xx_crtc_clock_get(crtc, pipe_config);
+
return true;
}
@@ -5565,14 +5834,16 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t val;
val = 0;
- if (intel_crtc->config.dither)
+ if (IS_HASWELL(dev) && intel_crtc->config.dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -5585,6 +5856,33 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+
+ if (IS_BROADWELL(dev)) {
+ val = 0;
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ val |= PIPEMISC_DITHER_6_BPC;
+ break;
+ case 24:
+ val |= PIPEMISC_DITHER_8_BPC;
+ break;
+ case 30:
+ val |= PIPEMISC_DITHER_10_BPC;
+ break;
+ case 36:
+ val |= PIPEMISC_DITHER_12_BPC;
+ break;
+ default:
+ /* Case prevented by pipe_config_set_bpp. */
+ BUG();
+ }
+
+ if (intel_crtc->config.dither)
+ val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+ I915_WRITE(PIPEMISC(pipe), val);
+ }
}
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
@@ -5819,11 +6117,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
intel_crtc->lowfreq_avail = false;
- if (intel_crtc->config.has_pch_encoder) {
- pll = intel_crtc_to_shared_dpll(intel_crtc);
-
- }
-
intel_set_pipe_timings(intel_crtc);
if (intel_crtc->config.has_pch_encoder) {
@@ -5839,25 +6132,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ret = intel_pipe_set_base(crtc, x, y, fb);
- intel_update_watermarks(dev);
-
return ret;
}
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
+
+ m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+ m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+ m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+ m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder transcoder = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
- pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
- pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
- pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
- & ~TU_SIZE_MASK;
- pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
- pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
- & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ if (INTEL_INFO(dev)->gen >= 5) {
+ m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+ m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+ m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+ m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ } else {
+ m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+ m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+ m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+ m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ }
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ if (crtc->config.has_pch_encoder)
+ intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+ else
+ intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+ &pipe_config->dp_m_n);
+}
+
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+ &pipe_config->fdi_m_n);
}
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5946,6 +6281,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+ ironlake_pch_clock_get(crtc, pipe_config);
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -6002,8 +6339,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
-void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down)
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
{
uint32_t val;
@@ -6031,7 +6368,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
val = I915_READ(D_COMP);
val |= D_COMP_COMP_DISABLE;
- I915_WRITE(D_COMP, val);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_ERROR("Failed to disable D_COMP\n");
+ mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
ndelay(100);
@@ -6050,7 +6390,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
-void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
@@ -6073,7 +6413,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val = I915_READ(D_COMP);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
- I915_WRITE(D_COMP, val);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_ERROR("Failed to enable D_COMP\n");
+ mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
val = I915_READ(LCPLL_CTL);
@@ -6175,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6182,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6219,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
if (!i915_enable_pc8)
return;
@@ -6242,36 +6594,103 @@ done:
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
- hsw_enable_package_c8(dev_priv);
+ __hsw_enable_package_c8(dev_priv);
}
+ mutex_unlock(&dev_priv->pc8.lock);
}
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
- hsw_disable_package_c8(dev_priv);
+ __hsw_disable_package_c8(dev_priv);
}
+ mutex_unlock(&dev_priv->pc8.lock);
}
-static void haswell_modeset_global_resources(struct drm_device *dev)
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
+static unsigned long get_pipe_power_domains(struct drm_device *dev,
+ enum pipe pipe, bool pfit_enabled)
{
- bool enable = false;
+ unsigned long mask;
+ enum transcoder transcoder;
+
+ transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+ mask = BIT(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ if (pfit_enabled)
+ mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+ return mask;
+}
+
+void intel_display_set_init_power(struct drm_device *dev, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_power_wells(struct drm_device *dev)
+{
+ unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
+ /*
+ * First get all needed power domains, then put all unneeded, to avoid
+ * any unnecessary toggling of the power wells.
+ */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
if (!crtc->base.enabled)
continue;
- if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
- crtc->config.cpu_transcoder != TRANSCODER_EDP)
- enable = true;
+ pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
+ crtc->pipe,
+ crtc->config.pch_pfit.enabled);
+
+ for_each_power_domain(domain, pipe_domains[crtc->pipe])
+ intel_display_power_get(dev, domain);
}
- intel_set_power_well(dev, enable);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+ for_each_power_domain(domain, crtc->enabled_power_domains)
+ intel_display_power_put(dev, domain);
+
+ crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+ }
+
+ intel_display_set_init_power(dev, false);
+}
+
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ modeset_update_power_wells(dev);
hsw_update_package_c8(dev);
}
@@ -6310,8 +6729,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
ret = intel_pipe_set_base(crtc, x, y, fb);
- intel_update_watermarks(dev);
-
return ret;
}
@@ -6419,6 +6836,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
+static struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (mode->clock == hdmi_audio_clock[i].clock)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+ i = 1;
+ }
+
+ DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
+
+ return hdmi_audio_clock[i].config;
+}
+
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
@@ -6449,7 +6904,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
}
static void g4x_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
@@ -6489,7 +6945,8 @@ static void g4x_write_eld(struct drm_connector *connector,
}
static void haswell_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
@@ -6542,8 +6999,9 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else
- I915_WRITE(aud_config, 0);
+ } else {
+ I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+ }
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
@@ -6576,7 +7034,8 @@ static void haswell_write_eld(struct drm_connector *connector,
}
static void ironlake_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
@@ -6594,6 +7053,11 @@ static void ironlake_write_eld(struct drm_connector *connector,
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(connector->dev)) {
+ hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
aud_config = CPT_AUD_CFG(pipe);
@@ -6603,8 +7067,19 @@ static void ironlake_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
- i = I915_READ(aud_cntl_st);
- i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ if (IS_VALLEYVIEW(connector->dev)) {
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+
+ intel_encoder = intel_attached_encoder(connector);
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ i = intel_dig_port->port;
+ } else {
+ i = I915_READ(aud_cntl_st);
+ i = (i >> 29) & DIP_PORT_SEL_MASK;
+ /* DIP_Port_Select, 0x1 = PortB */
+ }
+
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
@@ -6620,8 +7095,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else
- I915_WRITE(aud_config, 0);
+ } else {
+ I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+ }
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
@@ -6671,50 +7147,7 @@ void intel_write_eld(struct drm_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
if (dev_priv->display.write_eld)
- dev_priv->display.write_eld(connector, crtc);
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- int palreg = PALETTE(pipe);
- int i;
- bool reenable_ips = false;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled || !intel_crtc->active)
- return;
-
- if (!HAS_PCH_SPLIT(dev_priv->dev))
- assert_pll_enabled(dev_priv, pipe);
-
- /* use legacy palette for Ironlake */
- if (HAS_PCH_SPLIT(dev))
- palreg = LGC_PALETTE(pipe);
-
- /* Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- */
- if (intel_crtc->config.ips_enabled &&
- ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
- GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc);
- reenable_ips = true;
- }
-
- for (i = 0; i < 256; i++) {
- I915_WRITE(palreg + 4 * i,
- (intel_crtc->lut_r[i] << 16) |
- (intel_crtc->lut_g[i] << 8) |
- intel_crtc->lut_b[i]);
- }
-
- if (reenable_ips)
- hsw_enable_ips(intel_crtc);
+ dev_priv->display.write_eld(connector, crtc, mode);
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6770,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6790,7 +7225,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
}
@@ -6799,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR_IVB(pipe));
I915_WRITE(CURBASE_IVB(pipe), base);
+ POSTING_READ(CURBASE_IVB(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -6812,23 +7249,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
- u32 base, pos;
+ u32 base = 0, pos = 0;
bool visible;
- pos = 0;
-
- if (on && crtc->enabled && crtc->fb) {
+ if (on)
base = intel_crtc->cursor_addr;
- if (x > (int) crtc->fb->width)
- base = 0;
- if (y > (int) crtc->fb->height)
- base = 0;
- } else
+ if (x >= intel_crtc->config.pipe_src_w)
+ base = 0;
+
+ if (y >= intel_crtc->config.pipe_src_h)
base = 0;
if (x < 0) {
- if (x + intel_crtc->cursor_width < 0)
+ if (x + intel_crtc->cursor_width <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6837,7 +7271,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
- if (y + intel_crtc->cursor_height < 0)
+ if (y + intel_crtc->cursor_height <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6849,7 +7283,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
@@ -6980,8 +7414,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_crtc->cursor_x = x;
- intel_crtc->cursor_y = y;
+ intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
+ intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -6989,27 +7423,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-/** Sets the color ramps on behalf of RandR */
-void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->lut_r[regno] = red >> 8;
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
-}
-
-void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- *red = intel_crtc->lut_r[regno] << 8;
- *green = intel_crtc->lut_g[regno] << 8;
- *blue = intel_crtc->lut_b[regno] << 8;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -7045,14 +7458,21 @@ intel_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err;
+
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_unreference_unlocked(&obj->base);
- kfree(intel_fb);
- return ERR_PTR(ret);
- }
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ goto err;
return &intel_fb->base;
+err:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ kfree(intel_fb);
+
+ return ERR_PTR(ret);
}
static u32
@@ -7095,6 +7515,7 @@ static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
+#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
@@ -7115,6 +7536,9 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
return fb;
+#else
+ return NULL;
+#endif
}
bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7258,6 +7682,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
mutex_unlock(&crtc->mutex);
}
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->vbt.lvds_ssc_freq * 1000;
+ else if (HAS_PCH_SPLIT(dev))
+ return 120000;
+ else if (!IS_GEN2(dev))
+ return 96000;
+ else
+ return 48000;
+}
+
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
@@ -7265,14 +7705,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = pipe_config->cpu_transcoder;
- u32 dpll = I915_READ(DPLL(pipe));
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = I915_READ(FP0(pipe));
+ fp = pipe_config->dpll_hw_state.fp0;
else
- fp = I915_READ(FP1(pipe));
+ fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@@ -7303,14 +7744,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
- pipe_config->adjusted_mode.clock = 0;
return;
}
if (IS_PINEVIEW(dev))
- pineview_clock(96000, &clock);
+ pineview_clock(refclk, &clock);
else
- i9xx_clock(96000, &clock);
+ i9xx_clock(refclk, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
@@ -7318,13 +7758,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
clock.p2 = 14;
-
- if ((dpll & PLL_REF_INPUT_MASK) ==
- PLLB_REF_INPUT_SPREADSPECTRUMIN) {
- /* XXX: might not be 66MHz */
- i9xx_clock(66000, &clock);
- } else
- i9xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -7336,59 +7769,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.p2 = 4;
else
clock.p2 = 2;
-
- i9xx_clock(48000, &clock);
}
+
+ i9xx_clock(refclk, &clock);
}
- pipe_config->adjusted_mode.clock = clock.dot;
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = clock.dot;
}
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+int intel_dotclock_calculate(int link_freq,
+ const struct intel_link_m_n *m_n)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- int link_freq, repeat;
- u64 clock;
- u32 link_m, link_n;
-
- repeat = pipe_config->pixel_multiplier;
-
/*
* The calculation for the data clock is:
- * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
* But we want to avoid losing precison if possible, so:
- * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+ * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
* and the link clock is simpler:
- * link_clock = (m * link_clock * repeat) / n
+ * link_clock = (m * link_clock) / n
*/
- /*
- * We need to get the FDI or DP link clock here to derive
- * the M/N dividers.
- *
- * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
- * For DP, it's either 1.62GHz or 2.7GHz.
- * We do our calculations in 10*MHz since we don't need much precison.
- */
- if (pipe_config->has_pch_encoder)
- link_freq = intel_fdi_link_freq(dev) * 10000;
- else
- link_freq = pipe_config->port_clock;
+ if (!m_n->link_n)
+ return 0;
- link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
- link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+ return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
- if (!link_m || !link_n)
- return;
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
- clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
- do_div(clock, link_n);
+ /* read out port_clock from the DPLL */
+ i9xx_crtc_clock_get(crtc, pipe_config);
- pipe_config->adjusted_mode.clock = clock;
+ /*
+ * This value does not include pixel_multiplier.
+ * We will check that port_clock and adjusted_mode.crtc_clock
+ * agree once we know their relationship in the encoder's
+ * get_config() function.
+ */
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+ &pipe_config->fdi_m_n);
}
/** Returns the currently programmed mode of the given pipe. */
@@ -7404,6 +7833,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
int vsync = I915_READ(VSYNC(cpu_transcoder));
+ enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -7416,11 +7846,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
- pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+ pipe_config.cpu_transcoder = (enum transcoder) pipe;
pipe_config.pixel_multiplier = 1;
+ pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+ pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+ pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
- mode->clock = pipe_config.adjusted_mode.clock;
+ mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7526,6 +7959,9 @@ void intel_mark_idle(struct drm_device *dev)
intel_decrease_pllclock(crtc);
}
+
+ if (dev_priv->info->gen >= 6)
+ gen6_rps_idle(dev->dev_private);
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7714,7 +8150,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7756,7 +8192,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7805,7 +8241,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7850,7 +8286,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7929,7 +8365,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7974,7 +8410,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
fb->pitches[0] != crtc->fb->pitches[0]))
return -EINVAL;
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -8209,6 +8645,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return bpp;
}
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
const char *context)
@@ -8225,10 +8672,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
+ DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ pipe_config->has_dp_encoder,
+ pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+ pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+ pipe_config->dp_m_n.tu);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+ DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+ DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -8238,6 +8694,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->pch_pfit.size,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+ DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
}
static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8281,6 +8738,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
+
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8307,13 +8765,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (plane_bpp < 0)
goto fail;
+ /*
+ * Determine the real pipe dimensions. Note that stereo modes can
+ * increase the actual pipe size due to the frame doubling and
+ * insertion of additional space for blanks between the frame. This
+ * is stored in the crtc timings. We use the requested mode to do this
+ * computation to clearly distinguish it from the adjusted mode, which
+ * can be changed by the connectors in the below retry loop.
+ */
+ drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
+ pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
+ pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+ drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
@@ -8334,7 +8804,8 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->adjusted_mode.clock;
+ pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+ * pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret < 0) {
@@ -8521,13 +8992,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
}
-static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
- struct intel_crtc_config *new)
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
- int clock1, clock2, diff;
-
- clock1 = cur->adjusted_mode.clock;
- clock2 = new->adjusted_mode.clock;
+ int diff;
if (clock1 == clock2)
return true;
@@ -8581,6 +9048,15 @@ intel_pipe_config_compare(struct drm_device *dev,
return false; \
}
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+ if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -8594,6 +9070,13 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(fdi_m_n.link_n);
PIPE_CONF_CHECK_I(fdi_m_n.tu);
+ PIPE_CONF_CHECK_I(has_dp_encoder);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m_n.link_m);
+ PIPE_CONF_CHECK_I(dp_m_n.link_n);
+ PIPE_CONF_CHECK_I(dp_m_n.tu);
+
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8624,8 +9107,8 @@ intel_pipe_config_compare(struct drm_device *dev,
DRM_MODE_FLAG_NVSYNC);
}
- PIPE_CONF_CHECK_I(requested_mode.hdisplay);
- PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+ PIPE_CONF_CHECK_I(pipe_src_w);
+ PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8640,6 +9123,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(ips_enabled);
+ PIPE_CONF_CHECK_I(double_wide);
+
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -8649,20 +9134,17 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
+ if (!IS_HASWELL(dev)) {
+ PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
- if (!IS_HASWELL(dev)) {
- if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
- DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
- current_config->adjusted_mode.clock,
- pipe_config->adjusted_mode.clock);
- return false;
- }
- }
-
return true;
}
@@ -8789,14 +9271,10 @@ check_crtc_state(struct drm_device *dev)
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config &&
- encoder->get_hw_state(encoder, &pipe))
+ if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
- if (dev_priv->display.get_clock)
- dev_priv->display.get_clock(crtc, &pipe_config);
-
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
@@ -8871,6 +9349,18 @@ intel_modeset_check_state(struct drm_device *dev)
check_shared_dpll_state(dev);
}
+void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ int dotclock)
+{
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ pipe_config->adjusted_mode.crtc_clock, dotclock);
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
@@ -8883,7 +9373,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
- saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+ saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
saved_hwmode = saved_mode + 1;
@@ -9422,7 +9912,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc;
int i;
- intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
@@ -9451,6 +9941,18 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
}
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
+{
+ struct drm_encoder *encoder = connector->base.encoder;
+
+ WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
+
+ if (!encoder)
+ return INVALID_PIPE;
+
+ return to_intel_crtc(encoder->crtc)->pipe;
+}
+
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -9466,7 +9968,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
- return -EINVAL;
+ return -ENOENT;
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
@@ -9573,7 +10075,13 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
- /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+ PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+ }
+
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
@@ -9582,12 +10090,7 @@ static void intel_setup_outputs(struct drm_device *dev)
PORT_C);
}
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
- intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
- PORT_B);
- if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
- }
+ intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -9643,6 +10146,7 @@ static void intel_setup_outputs(struct drm_device *dev)
void intel_framebuffer_fini(struct intel_framebuffer *fb)
{
drm_framebuffer_cleanup(&fb->base);
+ WARN_ON(!fb->obj->framebuffer_references--);
drm_gem_object_unreference_unlocked(&fb->obj->base);
}
@@ -9674,9 +10178,12 @@ int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
+ int aligned_height, tile_height;
int pitch_limit;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
@@ -9765,8 +10272,16 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
+ tile_height = IS_GEN2(dev) ? 16 : 8;
+ aligned_height = ALIGN(mode_cmd->height,
+ obj->tiling_mode ? tile_height : 1);
+ /* FIXME drm helper for size checks (especially planar formats)? */
+ if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+ return -EINVAL;
+
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
+ intel_fb->obj->framebuffer_references++;
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
@@ -9792,9 +10307,15 @@ intel_user_framebuffer_create(struct drm_device *dev,
return intel_framebuffer_create(dev, mode_cmd, obj);
}
+#ifndef CONFIG_DRM_I915_FBDEV
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+#endif
+
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .output_poll_changed = intel_fb_output_poll_changed,
+ .output_poll_changed = intel_fbdev_output_poll_changed,
};
/* Set up chip specific display functions */
@@ -9820,7 +10341,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
- dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9828,7 +10348,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9836,7 +10355,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9886,7 +10404,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
dev_priv->display.modeset_global_resources =
@@ -9894,7 +10412,8 @@ static void intel_init_display(struct drm_device *dev)
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- }
+ } else if (IS_VALLEYVIEW(dev))
+ dev_priv->display.write_eld = ironlake_write_eld;
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -9917,6 +10436,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.queue_flip = intel_gen6_queue_flip;
break;
case 7:
+ case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
}
@@ -10012,8 +10532,7 @@ static struct intel_quirk intel_quirks[] = {
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
- /* 830/845 need to leave pipe A & dpll A up */
- { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ /* 830 needs to leave pipe A & dpll A up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
@@ -10022,20 +10541,11 @@ static struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
- /* Acer Aspire 5734Z must invert backlight brightness */
- { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
- /* Acer/eMachines G725 */
- { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
- /* Acer/eMachines e725 */
- { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
- /* Acer/Packard Bell NCL20 */
- { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
- /* Acer Aspire 4736Z */
- { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+ /*
+ * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
+ * seem to use inverted backlight PWM.
+ */
+ { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10084,12 +10594,19 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- intel_init_power_well(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
intel_prepare_ddi(dev);
intel_init_clock_gating(dev);
+ /* Enable the CRI clock source so we can get at the display */
+ if (IS_VALLEYVIEW(dev))
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_INTEGRATED_CRI_CLK_VLV);
+
+ intel_init_dpio(dev);
+
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
@@ -10357,7 +10874,7 @@ void i915_redisable_vga(struct drm_device *dev)
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
- if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
}
@@ -10380,6 +10897,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
&crtc->config);
crtc->base.enabled = crtc->active;
+ crtc->primary_enabled = crtc->active;
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
@@ -10413,27 +10931,17 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- if (encoder->get_config)
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}
encoder->connectors_active = false;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
encoder->base.crtc ? "enabled" : "disabled",
- pipe);
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
- if (!crtc->active)
- continue;
- if (dev_priv->display.get_clock)
- dev_priv->display.get_clock(crtc,
- &crtc->config);
+ pipe_name(pipe));
}
list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10460,7 +10968,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
int i;
@@ -10507,7 +11014,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
+ if (IS_HASWELL(dev))
+ ilk_wm_get_hw_state(dev);
+
if (force_restore) {
+ i915_redisable_vga(dev);
+
/*
* We need to use raw interfaces for restoring state to avoid
* checking (bogus) intermediate states.
@@ -10519,10 +11031,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->fb);
}
- list_for_each_entry(plane, &dev->mode_config.plane_list, head)
- intel_plane_restore(plane);
-
- i915_redisable_vga(dev);
} else {
intel_modeset_update_staged_output_state(dev);
}
@@ -10545,6 +11053,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ struct drm_connector *connector;
/*
* Interrupts and polling as the first thing to avoid creating havoc.
@@ -10585,6 +11094,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* destroy backlight, if any, before the connectors */
intel_panel_destroy_backlight(dev);
+ /* destroy the sysfs files before encoders/connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+
drm_mode_config_cleanup(dev);
intel_cleanup_overlay(dev);
@@ -10680,7 +11193,7 @@ intel_display_capture_error_state(struct drm_device *dev)
if (INTEL_INFO(dev)->num_pipes == 0)
return NULL;
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
@@ -10688,6 +11201,9 @@ intel_display_capture_error_state(struct drm_device *dev)
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
+ if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+ continue;
+
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10721,6 +11237,10 @@ intel_display_capture_error_state(struct drm_device *dev)
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
+ if (!intel_display_power_enabled(dev,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+ continue;
+
error->transcoder[i].cpu_transcoder = cpu_transcoder;
error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -10732,12 +11252,6 @@ intel_display_capture_error_state(struct drm_device *dev)
error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
- /* In the code above we read the registers without checking if the power
- * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
- * prevent the next I915_WRITE from detecting it and printing an error
- * message. */
- intel_uncore_clear_errors(dev);
-
return error;
}
@@ -10782,7 +11296,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
}
for (i = 0; i < error->num_transcoders; i++) {
- err_printf(m, " CPU transcoder: %c\n",
+ err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a431377d83..0b2e842fef0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+struct dp_link_dpll {
+ int link_bw;
+ struct dpll dpll;
+};
+
+static const struct dp_link_dpll gen4_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
}
}
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out);
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ enum pipe pipe;
+
+ /* modeset should have pipe */
+ if (crtc)
+ return to_intel_crtc(crtc)->pipe;
+
+ /* init time, try to find a pipe with this port selected */
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+ if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
+ return pipe;
+ if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+ return pipe;
+ }
+
+ /* shrug */
+ return PIPE_A;
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (HAS_PCH_SPLIT(dev))
+ return PCH_PP_CONTROL;
+ else
+ return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (HAS_PCH_SPLIT(dev))
+ return PCH_PP_STATUS;
+ else
+ return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_stat_reg;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+ return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_ctrl_reg;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
- return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+ return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_stat_reg, pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ I915_READ(_pp_stat_reg(intel_dp)),
+ I915_READ(_pp_ctrl_reg(intel_dp)));
}
}
@@ -330,6 +405,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t status;
int try, precharge, clock = 0;
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+ uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
@@ -344,6 +420,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
precharge = 5;
+ if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
intel_aux_display_runtime_get(dev_priv);
/* Try to wait for any previous AUX channel activity */
@@ -361,6 +442,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
+ /* Only 5 data registers! */
+ if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
@@ -373,7 +460,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
- DP_AUX_CH_CTL_TIME_OUT_400us |
+ timeout |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
@@ -451,9 +538,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
int msg_bytes;
uint8_t ack;
+ if (WARN_ON(send_bytes > 16))
+ return -E2BIG;
+
intel_dp_check_edp(intel_dp);
- if (send_bytes > 16)
- return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
@@ -494,6 +582,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint8_t ack;
int ret;
+ if (WARN_ON(recv_bytes > 19))
+ return -E2BIG;
+
intel_dp_check_edp(intel_dp);
msg[0] = AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
@@ -538,6 +629,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
int reply_bytes;
int ret;
+ ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
@@ -569,13 +661,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
- for (retry = 0; retry < 5; retry++) {
+ /*
+ * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
+ * required to retry at least seven times upon receiving AUX_DEFER
+ * before giving up the AUX transaction.
+ */
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
- return ret;
+ goto out;
}
switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +683,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
case AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
case AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
@@ -604,7 +702,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +711,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
- return reply_bytes - 1;
+ ret = reply_bytes - 1;
+ goto out;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
}
DRM_ERROR("too many retries, giving up\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+
+out:
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
}
static int
@@ -647,11 +753,9 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+ intel_dp->adapter.dev.parent = intel_connector->base.kdev;
- ironlake_edp_panel_vdd_on(intel_dp);
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
@@ -660,41 +764,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
+ const struct dp_link_dpll *divisor = NULL;
+ int i, count = 0;
if (IS_G4X(dev)) {
- if (link_bw == DP_LINK_BW_1_62) {
- pipe_config->dpll.p1 = 2;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.n = 2;
- pipe_config->dpll.m1 = 23;
- pipe_config->dpll.m2 = 8;
- } else {
- pipe_config->dpll.p1 = 1;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.n = 1;
- pipe_config->dpll.m1 = 14;
- pipe_config->dpll.m2 = 2;
- }
- pipe_config->clock_set = true;
+ divisor = gen4_dpll;
+ count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
- if (link_bw == DP_LINK_BW_1_62) {
- pipe_config->dpll.n = 1;
- pipe_config->dpll.p1 = 2;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.m1 = 12;
- pipe_config->dpll.m2 = 9;
- } else {
- pipe_config->dpll.n = 2;
- pipe_config->dpll.p1 = 1;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.m1 = 14;
- pipe_config->dpll.m2 = 8;
- }
- pipe_config->clock_set = true;
+ divisor = pch_dpll;
+ count = ARRAY_SIZE(pch_dpll);
} else if (IS_VALLEYVIEW(dev)) {
- /* FIXME: Need to figure out optimized DP clocks for vlv. */
+ divisor = vlv_dpll;
+ count = ARRAY_SIZE(vlv_dpll);
+ }
+
+ if (divisor && count) {
+ for (i = 0; i < count; i++) {
+ if (link_bw == divisor[i].link_bw) {
+ pipe_config->dpll = divisor[i].dpll;
+ pipe_config->clock_set = true;
+ break;
+ }
+ }
}
}
@@ -737,19 +830,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock], adjusted_mode->clock);
+ max_lane_count, bws[max_clock],
+ adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+ dev_priv->vbt.edp_bpp < bpp) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
- bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+ bpp = dev_priv->vbt.edp_bpp;
}
for (; bpp >= 6*3; bpp -= 2*3) {
- mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +890,8 @@ found:
mode_rate, link_avail);
intel_link_compute_m_n(bpp, lane_count,
- adjusted_mode->clock, pipe_config->port_clock,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
&pipe_config->dp_m_n);
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +899,6 @@ found:
return true;
}
-void intel_dp_init_link_config(struct intel_dp *intel_dp)
-{
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
-}
-
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +971,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_write_eld(&encoder->base, adjusted_mode);
}
- intel_dp_init_link_config(intel_dp);
-
/* Split out the IBX/CPU vs CPT settings */
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +980,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +994,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (crtc->pipe == 1)
@@ -944,8 +1024,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
mask, value,
@@ -987,11 +1067,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 control;
- u32 pp_ctrl_reg;
-
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
- control = I915_READ(pp_ctrl_reg);
+ control = I915_READ(_pp_ctrl_reg(intel_dp));
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
return control;
@@ -1006,17 +1083,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP VDD on\n");
WARN(intel_dp->want_panel_vdd,
"eDP VDD already requested on\n");
intel_dp->want_panel_vdd = true;
- if (ironlake_edp_have_panel_vdd(intel_dp)) {
- DRM_DEBUG_KMS("eDP VDD already on\n");
+ if (ironlake_edp_have_panel_vdd(intel_dp))
return;
- }
+
+ DRM_DEBUG_KMS("Turning eDP VDD on\n");
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1100,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1126,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_DEBUG_KMS("Turning eDP VDD off\n");
+
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1160,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
intel_dp->want_panel_vdd = false;
@@ -1119,20 +1196,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
ironlake_wait_panel_power_cycle(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
}
pp |= POWER_TARGET_ON;
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1140,8 +1216,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
}
}
@@ -1164,7 +1240,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1179,7 +1255,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
u32 pp_ctrl_reg;
@@ -1197,12 +1272,12 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_panel_enable_backlight(dev, pipe);
+ intel_panel_enable_backlight(intel_dp->attached_connector);
}
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1215,13 +1290,13 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- intel_panel_disable_backlight(dev);
+ intel_panel_disable_backlight(intel_dp->attached_connector);
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1443,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int dotclock;
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
tmp = I915_READ(intel_dp->output_reg);
@@ -1395,13 +1471,25 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
- if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+ pipe_config->has_dp_encoder = true;
+
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ if (port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
}
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
@@ -1423,20 +1511,21 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct intel_dp *intel_dp)
+static bool is_edp_psr(struct drm_device *dev)
{
- return is_edp(intel_dp) &&
- intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return dev_priv->psr.sink_support;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev))
+ if (!HAS_PSR(dev))
return false;
- return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1486,7 +1575,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
/* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
intel_dp->psr_setup_done = true;
@@ -1511,9 +1600,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
DP_PSR_MAIN_LINK_ACTIVE);
/* Setup AUX registers */
- I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
- I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
- I915_WRITE(EDP_PSR_AUX_CTL,
+ I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
+ I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1527,6 +1616,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
+ const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
val |= EDP_PSR_LINK_STANDBY;
@@ -1536,8 +1626,8 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
} else
val |= EDP_PSR_LINK_DISABLE;
- I915_WRITE(EDP_PSR_CTL, val |
- EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+ I915_WRITE(EDP_PSR_CTL(dev), val |
+ IS_BROADWELL(dev) ? 0 : link_entry_time |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
@@ -1553,42 +1643,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- if (!IS_HASWELL(dev)) {
+ dev_priv->psr.source_ok = false;
+
+ if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
- dev_priv->no_psr_reason = PSR_NO_SOURCE;
return false;
}
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
(dig_port->port != PORT_A)) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
- dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
- return false;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- dev_priv->no_psr_reason = PSR_NO_SINK;
return false;
}
if (!i915_enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
- dev_priv->no_psr_reason = PSR_MODULE_PARAM;
return false;
}
crtc = dig_port->base.base.crtc;
if (crtc == NULL) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
- dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+ if (!intel_crtc_active(crtc)) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
- dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
@@ -1596,29 +1677,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
- dev_priv->no_psr_reason = PSR_NOT_TILED;
return false;
}
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
- dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
return false;
}
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- dev_priv->no_psr_reason = PSR_S3D_ENABLED;
return false;
}
- if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
return false;
}
+ dev_priv->psr.source_ok = true;
return true;
}
@@ -1657,10 +1735,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
if (!intel_edp_is_psr_enabled(dev))
return;
- I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
}
@@ -1674,7 +1753,7 @@ void intel_edp_psr_update(struct drm_device *dev)
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
- if (!is_edp_psr(intel_dp))
+ if (!is_edp_psr(dev))
return;
if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1695,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
@@ -1733,14 +1812,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_enable_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
}
static void vlv_enable_dp(struct intel_encoder *encoder)
{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ ironlake_edp_backlight_on(intel_dp);
}
-static void intel_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1758,53 +1847,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
+ struct edp_power_seq power_seq;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
intel_enable_dp(encoder);
vlv_wait_port_ready(dev_priv, port);
}
-static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
-
- if (!IS_VALLEYVIEW(dev))
- return;
+ int pipe = intel_crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1869,7 +1964,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1885,7 +1980,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (HAS_DDI(dev)) {
+ if (IS_BROADWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_HASWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1939,10 +2045,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dport->base.base.crtc);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
@@ -2018,21 +2127,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
}
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_get_adjust_train(struct intel_dp *intel_dp,
+ const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
@@ -2193,6 +2303,41 @@ intel_hsw_signal_levels(uint8_t train_set)
}
}
+static uint32_t
+intel_bdw_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
+
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
+ }
+}
+
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2203,7 +2348,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev)) {
+ if (IS_BROADWELL(dev)) {
+ signal_levels = intel_bdw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_HASWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_VALLEYVIEW(dev)) {
@@ -2227,14 +2375,15 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
- uint32_t dp_reg_value,
+ uint32_t *DP,
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
- int ret;
+ uint8_t buf[sizeof(intel_dp->train_set) + 1];
+ int ret, len;
if (HAS_DDI(dev)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2263,62 +2412,93 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
I915_WRITE(DP_TP_CTL(port), temp);
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
- dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+ *DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ *DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
- dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ *DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
- dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
- dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
} else {
- dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+ *DP &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- dp_reg_value |= DP_LINK_TRAIN_OFF;
+ *DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
- dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ *DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
- dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ *DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
- dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ *DP |= DP_LINK_TRAIN_PAT_2;
break;
}
}
- I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
- DP_TRAINING_PATTERN_SET,
- dp_train_pat);
-
- if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ buf[0] = dp_train_pat;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
- ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
- return false;
+ /* don't write DP_TRAINING_LANEx_SET on disable */
+ len = 1;
+ } else {
+ /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+ memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+ len = intel_dp->lane_count + 1;
}
- return true;
+ ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
+ buf, len);
+
+ return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+ uint8_t dp_train_pat)
+{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ intel_dp_set_signal_levels(intel_dp, DP);
+ return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+ const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_set_signal_levels(intel_dp, DP);
+
+ I915_WRITE(intel_dp->output_reg, *DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+
+ return ret == intel_dp->lane_count;
}
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2362,32 +2542,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t voltage;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
+ uint8_t link_config[2];
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ link_config[0] = intel_dp->link_bw;
+ link_config[1] = intel_dp->lane_count;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
+
+ link_config[0] = 0;
+ link_config[1] = DP_SET_ANSI_8B10B;
+ intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
DP |= DP_PORT_EN;
- memset(intel_dp->train_set, 0, 4);
+ /* clock recovery */
+ if (!intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to enable link training\n");
+ return;
+ }
+
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
- /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- intel_dp_set_signal_levels(intel_dp, &DP);
-
- /* Set training pattern 1 */
- if (!intel_dp_set_link_train(intel_dp, DP,
- DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE))
- break;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2407,10 +2592,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
- DRM_DEBUG_KMS("too many full retries, give up\n");
+ DRM_ERROR("too many full retries, give up\n");
break;
}
- memset(intel_dp->train_set, 0, 4);
+ intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
@@ -2419,15 +2606,18 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
- DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ /* Update training set as requested by target */
+ if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
}
intel_dp->DP = DP;
@@ -2441,11 +2631,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
uint32_t DP = intel_dp->DP;
/* channel equalization */
+ if (!intel_dp_set_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to start channel equalization\n");
+ return;
+ }
+
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
@@ -2453,21 +2650,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- intel_dp_set_signal_levels(intel_dp, &DP);
-
- /* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, DP,
- DP_TRAINING_PATTERN_2 |
- DP_LINK_SCRAMBLING_DISABLE))
- break;
-
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
break;
+ }
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
+ intel_dp_set_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
@@ -2481,13 +2675,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (tries > 5) {
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
+ intel_dp_set_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ /* Update training set as requested by target */
+ if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
++tries;
}
@@ -2502,7 +2702,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
- intel_dp_set_link_train(intel_dp, intel_dp->DP,
+ intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_DISABLE);
}
@@ -2589,6 +2789,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2604,11 +2808,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
- intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
- if (is_edp_psr(intel_dp))
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ if (is_edp(intel_dp)) {
+ intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ dev_priv->psr.sink_support = true;
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ }
+ }
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2728,7 +2937,6 @@ static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
uint8_t *dpcd = intel_dp->dpcd;
- bool hpd;
uint8_t type;
if (!intel_dp_get_dpcd(intel_dp))
@@ -2739,8 +2947,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
- hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
- if (hpd) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
@@ -2754,9 +2962,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* Well we tried, say unknown for unreliable port types */
- type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
- if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
- return connector_status_unknown;
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA ||
+ type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+ } else {
+ type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_TYPE_MASK;
+ if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+ type == DP_DWN_STRM_PORT_TYPE_OTHER)
+ return connector_status_unknown;
+ }
/* Anything else is out of spec, warn and ignore */
DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2830,19 +3047,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
/* use cached edid if we have one */
if (intel_connector->edid) {
- struct edid *edid;
- int size;
-
/* invalid edid */
if (IS_ERR(intel_connector->edid))
return NULL;
- size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
- edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
- if (!edid)
- return NULL;
-
- return edid;
+ return drm_edid_duplicate(intel_connector->edid);
}
return drm_get_edid(connector, adapter);
@@ -3050,7 +3259,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -3121,7 +3329,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
bool intel_dpd_is_edp(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i;
if (!dev_priv->vbt.child_dev_num)
@@ -3130,8 +3338,9 @@ bool intel_dpd_is_edp(struct drm_device *dev)
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
- if (p_child->dvo_port == PORT_IDPD &&
- p_child->device_type == DEVICE_TYPE_eDP)
+ if (p_child->common.dvo_port == PORT_IDPD &&
+ (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
}
return false;
@@ -3164,24 +3373,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
u32 pp_on, pp_off, pp_div, pp;
- int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+ int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
if (HAS_PCH_SPLIT(dev)) {
- pp_control_reg = PCH_PP_CONTROL;
+ pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
} else {
- pp_control_reg = PIPEA_PP_CONTROL;
- pp_on_reg = PIPEA_PP_ON_DELAYS;
- pp_off_reg = PIPEA_PP_OFF_DELAYS;
- pp_div_reg = PIPEA_PP_DIVISOR;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp = ironlake_get_pp_control(intel_dp);
- I915_WRITE(pp_control_reg, pp);
+ I915_WRITE(pp_ctrl_reg, pp);
pp_on = I915_READ(pp_on_reg);
pp_off = I915_READ(pp_off_reg);
@@ -3269,9 +3480,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
} else {
- pp_on_reg = PIPEA_PP_ON_DELAYS;
- pp_off_reg = PIPEA_PP_OFF_DELAYS;
- pp_div_reg = PIPEA_PP_DIVISOR;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
/* And finally store the new values in the power sequencer. */
@@ -3288,12 +3501,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev)) {
- port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+ if (dp_to_dig_port(intel_dp)->port == PORT_B)
+ port_sel = PANEL_PORT_SELECT_DPB_VLV;
+ else
+ port_sel = PANEL_PORT_SELECT_DPC_VLV;
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (dp_to_dig_port(intel_dp)->port == PORT_A)
- port_sel = PANEL_POWER_PORT_DP_A;
+ port_sel = PANEL_PORT_SELECT_DPA;
else
- port_sel = PANEL_POWER_PORT_DP_D;
+ port_sel = PANEL_PORT_SELECT_DPD;
}
pp_on |= port_sel;
@@ -3346,7 +3562,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
- ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
@@ -3378,8 +3593,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
- ironlake_edp_panel_vdd_off(intel_dp, false);
-
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
@@ -3536,11 +3749,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
@@ -3559,12 +3772,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev)) {
- intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+ intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
} else {
- intel_encoder->pre_enable = intel_pre_enable_dp;
- intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
}
intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7f2b384ac93..1e49aa8f537 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
/* the i915, i945 have a single sDVO i2c bus - which is different */
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
-#define INTELFB_CONN_LIMIT 4
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_UNKNOWN 9
+#define INTEL_OUTPUT_DSI 9
+#define INTEL_OUTPUT_UNKNOWN 10
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_DSI_COMMAND_MODE 0
+#define INTEL_DSI_VIDEO_MODE 1
+
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@ struct intel_crtc_config {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
+ /* User requested mode, only valid as a starting point to
+ * compute adjusted_mode, except in the case of (S)DVO where
+ * it's also for the output timings of the (S)DVO chip.
+ * adjusted_mode will then correspond to the S(DVO) chip's
+ * preferred input timings. */
struct drm_display_mode requested_mode;
+ /* Actual pipe timings ie. what we program into the pipe timing
+ * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
struct drm_display_mode adjusted_mode;
+
+ /* Pipe source size (ie. panel fitter input size)
+ * All planes will be positioned inside this space,
+ * and get clipped at the edges. */
+ int pipe_src_w, pipe_src_h;
+
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@@ -262,7 +278,8 @@ struct intel_crtc_config {
/*
* Frequence the dpll for the port should run at. Differs from the
- * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
+ * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * already multiplied by pixel_multiplier.
*/
int port_clock;
@@ -288,6 +305,14 @@ struct intel_crtc_config {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
+
+ bool double_wide;
+};
+
+struct intel_pipe_wm {
+ struct intel_wm_level wm[5];
+ uint32_t linetime;
+ bool fbc_wm_enabled;
};
struct intel_crtc {
@@ -301,8 +326,9 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
+ unsigned long enabled_power_domains;
bool eld_vld;
- bool primary_disabled; /* is the crtc obscured by a plane? */
+ bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@ struct intel_crtc {
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
+
+ /* per-pipe watermark state */
+ struct {
+ /* watermarks currently being used */
+ struct intel_pipe_wm active;
+ } wm;
};
struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@ struct intel_hdmi {
};
#define DP_MAX_DOWNSTREAM_PORTS 0x10
-#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
@@ -495,80 +525,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-int intel_pch_rawclk(struct drm_device *dev);
-
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-
-extern void intel_attach_force_audio_property(struct drm_connector *connector);
-extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-
-extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev,
- int hdmi_reg, enum port port);
-extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
-extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
- bool is_sdvob);
-extern void intel_dvo_init(struct drm_device *dev);
-extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
-extern void intel_mark_idle(struct drm_device *dev);
-extern void intel_lvds_init(struct drm_device *dev);
-extern bool intel_is_dual_link_lvds(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int output_reg,
- enum port port);
-extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
-extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
-extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane);
-
-/* intel_panel.c */
-extern int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode);
-extern void intel_panel_fini(struct intel_panel *panel);
-
-extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
- int fitting_mode);
-extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
- int fitting_mode);
-extern void intel_panel_set_backlight(struct drm_device *dev,
- u32 level, u32 max);
-extern int intel_panel_setup_backlight(struct drm_connector *connector);
-extern void intel_panel_enable_backlight(struct drm_device *dev,
- enum pipe pipe);
-extern void intel_panel_disable_backlight(struct drm_device *dev);
-extern void intel_panel_destroy_backlight(struct drm_device *dev);
-extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@ struct intel_set_config {
bool mode_changed;
};
-extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
-extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_connector_dpms(struct drm_connector *, int mode);
-extern bool intel_connector_get_hw_state(struct intel_connector *connector);
-extern void intel_modeset_check_state(struct drm_device *dev);
-extern void intel_plane_restore(struct drm_plane *plane);
-extern void intel_plane_disable(struct drm_plane *plane);
-
+struct intel_load_detect_pipe {
+ struct drm_framebuffer *release_fb;
+ bool load_detect_temp;
+ int dpms_mode;
+};
-static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
{
return to_intel_connector(connector)->encoder;
}
@@ -616,73 +568,95 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+
+/* i915_irq.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable);
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void hsw_pc8_disable_interrupts(struct drm_device *dev);
+void hsw_pc8_restore_interrupts(struct drm_device *dev);
+
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+
+
+/* intel_display.c */
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_update_dpms(struct drm_crtc *crtc);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+void intel_connector_dpms(struct drm_connector *, int mode);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_modeset_check_state(struct drm_device *dev);
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
-
-extern void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
-
-extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern enum transcoder
-intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
-extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
-
-struct intel_load_detect_pipe {
- struct drm_framebuffer *release_fb;
- bool load_detect_temp;
- int dpms_mode;
-};
-extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
-
-extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
-
-extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
-extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-
-extern int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj);
-extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config(struct drm_device *dev);
-extern void intel_fbdev_fini(struct drm_device *dev);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
-extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-
-extern void intel_setup_overlay(struct drm_device *dev);
-extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern void intel_fb_output_poll_changed(struct drm_device *dev);
-extern void intel_fb_restore_mode(struct drm_device *dev);
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old);
+int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+void intel_framebuffer_fini(struct intel_framebuffer *fb);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
@@ -696,104 +670,199 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
- bool state);
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
+void intel_display_handle_reset(struct drm_device *dev);
+void hsw_enable_pc8_work(struct work_struct *__work);
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ int dotclock);
+bool intel_crtc_active(struct drm_crtc *crtc);
+void i915_disable_vga_mem(struct drm_device *dev);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+void intel_display_set_init_power(struct drm_device *dev, bool enable);
+
+
+/* intel_dp.c */
+void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_check_link_status(struct intel_dp *intel_dp);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+bool intel_dpd_is_edp(struct drm_device *dev);
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_psr_enable(struct intel_dp *intel_dp);
+void intel_edp_psr_disable(struct intel_dp *intel_dp);
+void intel_edp_psr_update(struct drm_device *dev);
+
+
+/* intel_dsi.c */
+bool intel_dsi_init(struct drm_device *dev);
+
+
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
+
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_I915_FBDEV
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
-extern void intel_init_clock_gating(struct drm_device *dev);
-extern void intel_suspend_hw(struct drm_device *dev);
-extern void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
-extern void intel_prepare_ddi(struct drm_device *dev);
-extern void hsw_fdi_link_train(struct drm_crtc *crtc);
-extern void intel_ddi_init(struct drm_device *dev, enum port port);
-
-/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled);
-
-extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
- unsigned int tiling_mode,
- unsigned int bpp,
- unsigned int pitch);
-
-extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/* Power-related functions, located in intel_pm.c */
-extern void intel_init_pm(struct drm_device *dev);
-/* FBC */
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_update_fbc(struct drm_device *dev);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
-
-/* Power well */
-extern int i915_init_power_well(struct drm_device *dev);
-extern void i915_remove_power_well(struct drm_device *dev);
-
-extern bool intel_display_power_enabled(struct drm_device *dev,
- enum intel_display_power_domain domain);
-extern void intel_init_power_well(struct drm_device *dev);
-extern void intel_set_power_well(struct drm_device *dev, bool enable);
-extern void intel_enable_gt_powersave(struct drm_device *dev);
-extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void ironlake_teardown_rc6(struct drm_device *dev);
+static inline void intel_fbdev_initial_config(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+ u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector);
+void intel_panel_enable_backlight(struct intel_connector *connector);
+void intel_panel_disable_backlight(struct intel_connector *connector);
+void intel_panel_destroy_backlight(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_update_fbc(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+int intel_power_domains_init(struct drm_device *dev);
+void intel_power_domains_remove(struct drm_device *dev);
+bool intel_display_power_enabled(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+void intel_power_domains_init_hw(struct drm_device *dev);
+void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_device *dev);
+
+
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
+
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+void intel_plane_restore(struct drm_plane *plane);
+void intel_plane_disable(struct drm_plane *plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
-extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe);
-extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
-extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
-extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder);
-extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
-extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
-extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
-extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
-extern bool
-intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-extern void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
-
-extern void intel_display_handle_reset(struct drm_device *dev);
-extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable);
-extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable);
-
-extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_update(struct drm_device *dev);
-extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down);
-extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
-extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
- uint32_t mask);
-extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask);
-extern void hsw_enable_pc8_work(struct work_struct *__work);
-extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
-extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
-extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 00000000000..d257b093ca6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/* the sub-encoders aka panel drivers */
+static const struct intel_dsi_device intel_dsi_devices[] = {
+};
+
+
+static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
+ u32 mask)
+{
+ u32 tmp = vlv_cck_read(dev_priv, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ vlv_cck_write(dev_priv, reg, tmp);
+}
+
+static void band_gap_wa(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Enable bandgap fix in GOP driver */
+ vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
+ msleep(20);
+
+ /* Turn Display Trunk on */
+ vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
+ msleep(20);
+
+ vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
+ msleep(20);
+
+ vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Need huge delay, otherwise clock is not stable */
+ msleep(100);
+}
+
+static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dsi, base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+}
+
+static void intel_dsi_hot_plug(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *config)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
+ struct drm_display_mode *mode = &config->requested_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (fixed_mode)
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+ if (intel_dsi->dev.dev_ops->mode_fixup)
+ return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
+ mode, adjusted_mode);
+
+ return true;
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ vlv_enable_dsi_pll(encoder);
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ DRM_DEBUG_KMS("\n");
+
+ temp = I915_READ(MIPI_DEVICE_READY(pipe));
+ if ((temp & DEVICE_READY) == 0) {
+ temp &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
+ } else if (temp & ULPS_STATE_MASK) {
+ temp &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
+ /*
+ * We need to ensure that there is a minimum of 1 ms time
+ * available before clearing the UPLS exit state.
+ */
+ msleep(2);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+ }
+
+ if (is_cmd_mode(intel_dsi))
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+
+ if (is_vid_mode(intel_dsi)) {
+ msleep(20); /* XXX */
+ dpi_send_cmd(intel_dsi, TURN_ON);
+ msleep(100);
+
+ /* assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(pipe));
+ }
+
+ intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+ if (is_vid_mode(intel_dsi)) {
+ dpi_send_cmd(intel_dsi, SHUTDOWN);
+ msleep(10);
+
+ /* de-assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(pipe));
+
+ msleep(2);
+ }
+
+ temp = I915_READ(MIPI_DEVICE_READY(pipe));
+ if (temp & DEVICE_READY) {
+ temp &= ~DEVICE_READY;
+ temp &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+ }
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ vlv_disable_dsi_pll(encoder);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 port, func;
+ enum pipe p;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* XXX: this only works for one DSI output */
+ for (p = PIPE_A; p <= PIPE_B; p++) {
+ port = I915_READ(MIPI_PORT_CTRL(p));
+ func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+
+ if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+ if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
+ *pipe = p;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ DRM_DEBUG_KMS("\n");
+
+ /* XXX: read flags, set to adjusted_mode */
+}
+
+static int intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+ return MODE_NO_DBLESCAN;
+ }
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+ switch (divider) {
+ case ESCAPE_CLOCK_DIVIDER_1:
+ default:
+ return 20 * us;
+ case ESCAPE_CLOCK_DIVIDER_2:
+ return 10 * us;
+ case ESCAPE_CLOCK_DIVIDER_4:
+ return 5 * us;
+ }
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int pipe = intel_crtc->pipe;
+ unsigned int bpp = intel_crtc->config.pipe_bpp;
+ unsigned int lane_count = intel_dsi->lane_count;
+
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+ hactive = mode->hdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsync = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* horizontal values are in terms of high speed byte clock */
+ hactive = txbyteclkhs(hactive, bpp, lane_count);
+ hfp = txbyteclkhs(hfp, bpp, lane_count);
+ hsync = txbyteclkhs(hsync, bpp, lane_count);
+ hbp = txbyteclkhs(hbp, bpp, lane_count);
+
+ I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
+ I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+
+ /* meaningful for video mode non-burst sync pulse mode only, can be zero
+ * for non-burst sync events and burst modes */
+ I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
+ I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+
+ /* vertical values are in terms of lines */
+ I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
+ I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
+ I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+}
+
+static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ int pipe = intel_crtc->pipe;
+ unsigned int bpp = intel_crtc->config.pipe_bpp;
+ u32 val, tmp;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ /* Update the DSI PLL */
+ vlv_enable_dsi_pll(intel_encoder);
+
+ /* XXX: Location of the call */
+ band_gap_wa(dev_priv);
+
+ /* escape clock divider, 20MHz, shared for A and C. device ready must be
+ * off when doing this! txclkesc? */
+ tmp = I915_READ(MIPI_CTRL(0));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+ /* read request priority is per pipe */
+ tmp = I915_READ(MIPI_CTRL(pipe));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+
+ /* XXX: why here, why like this? handling in irq handler?! */
+ I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
+ I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+
+ I915_WRITE(MIPI_DPHY_PARAM(pipe),
+ 0x3c << EXIT_ZERO_COUNT_SHIFT |
+ 0x1f << TRAIL_COUNT_SHIFT |
+ 0xc5 << CLK_ZERO_COUNT_SHIFT |
+ 0x1f << PREPARE_COUNT_SHIFT);
+
+ I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
+ adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+ adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+
+ set_dsi_timings(encoder, adjusted_mode);
+
+ val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+ if (is_cmd_mode(intel_dsi)) {
+ val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+ val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+ } else {
+ val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+ /* XXX: cross-check bpp vs. pixel format? */
+ val |= intel_dsi->pixel_format;
+ }
+ I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
+
+ /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
+ * stop state. */
+
+ /*
+ * In burst mode, value greater than one DPI line Time in byte clock
+ * (txbyteclkhs) To timeout this timer 1+ of the above said value is
+ * recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in byte
+ * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+ * is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in byte
+ * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+ * is recommended.
+ */
+
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+ txbyteclkhs(adjusted_mode->htotal, bpp,
+ intel_dsi->lane_count) + 1);
+ } else {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+ txbyteclkhs(adjusted_mode->vtotal *
+ adjusted_mode->htotal,
+ bpp, intel_dsi->lane_count) + 1);
+ }
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+
+ /* dphy stuff */
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
+
+ /* recovery disables */
+ I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+
+ /* XXX: low power clock equivalence in terms of byte clock. the number
+ * of byte clocks occupied in one low power clock. based on txbyteclkhs
+ * and txclkesc. txclkesc time / txbyteclk time * (105 +
+ * MIPI_STOP_STATE_STALL) / 105.???
+ */
+ I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+
+ /* the bw essential for transmitting 16 long packets containing 252
+ * bytes meant for dcs write memory command is programmed in this
+ * register in terms of byte clocks. based on dsi transfer rate and the
+ * number of lanes configured the time taken to transmit 16 long packets
+ * in a dsi stream varies. */
+ I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+
+ I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
+ 0xa << LP_HS_SSW_CNT_SHIFT |
+ 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi))
+ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
+ intel_dsi->video_mode_format);
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+ DRM_DEBUG_KMS("\n");
+ return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!intel_connector->panel.fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ return 0;
+ }
+
+ mode = drm_mode_duplicate(connector->dev,
+ intel_connector->panel.fixed_mode);
+ if (!mode) {
+ DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+static void intel_dsi_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ DRM_DEBUG_KMS("\n");
+ intel_panel_fini(&intel_connector->panel);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dsi_detect,
+ .destroy = intel_dsi_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+bool intel_dsi_init(struct drm_device *dev)
+{
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ const struct intel_dsi_device *dsi;
+ unsigned int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return false;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return false;
+ }
+
+ intel_encoder = &intel_dsi->base;
+ encoder = &intel_encoder->base;
+ intel_dsi->attached_connector = intel_connector;
+
+ connector = &intel_connector->base;
+
+ drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+ /* XXX: very likely not all of these are needed */
+ intel_encoder->hot_plug = intel_dsi_hot_plug;
+ intel_encoder->compute_config = intel_dsi_compute_config;
+ intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+ intel_encoder->pre_enable = intel_dsi_pre_enable;
+ intel_encoder->enable = intel_dsi_enable;
+ intel_encoder->mode_set = intel_dsi_mode_set;
+ intel_encoder->disable = intel_dsi_disable;
+ intel_encoder->post_disable = intel_dsi_post_disable;
+ intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+ intel_encoder->get_config = intel_dsi_get_config;
+
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
+ dsi = &intel_dsi_devices[i];
+ intel_dsi->dev = *dsi;
+
+ if (dsi->dev_ops->init(&intel_dsi->dev))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(intel_dsi_devices)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->crtc_mask = (1 << 0); /* XXX */
+
+ intel_encoder->cloneable = false;
+ drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ drm_sysfs_connector_add(connector);
+
+ fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+ if (!fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ goto err;
+ }
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+
+ return true;
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 00000000000..c7765f33d52
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dsi_device {
+ unsigned int panel_id;
+ const char *name;
+ int type;
+ const struct intel_dsi_dev_ops *dev_ops;
+ void *dev_priv;
+};
+
+struct intel_dsi_dev_ops {
+ bool (*init)(struct intel_dsi_device *dsi);
+
+ /* This callback must be able to assume DSI commands can be sent */
+ void (*enable)(struct intel_dsi_device *dsi);
+
+ /* This callback must be able to assume DSI commands can be sent */
+ void (*disable)(struct intel_dsi_device *dsi);
+
+ int (*mode_valid)(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode);
+
+ bool (*mode_fixup)(struct intel_dsi_device *dsi,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ void (*mode_set)(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+
+ bool (*get_hw_state)(struct intel_dsi_device *dev);
+
+ struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
+
+ void (*destroy) (struct intel_dsi_device *dsi);
+};
+
+struct intel_dsi {
+ struct intel_encoder base;
+
+ struct intel_dsi_device dev;
+
+ struct intel_connector *attached_connector;
+
+ /* if true, use HS mode, otherwise LP */
+ bool hs;
+
+ /* virtual channel */
+ int channel;
+
+ /* number of DSI lanes */
+ unsigned int lane_count;
+
+ /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+ u32 pixel_format;
+
+ /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+ u32 video_mode_format;
+
+ /* eot for MIPI_EOT_DISABLE register */
+ u32 eot_disable;
+};
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
+extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 00000000000..7c40f981d2c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/*
+ * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
+ * MIPI_COMMAND_ADDRESS registers.
+ *
+ * Apparently these registers provide a MIPI adapter level way to send (lots of)
+ * commands and data to the receiver, without having to write the commands and
+ * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
+ *
+ * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
+ * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
+ * framebuffer in command mode displays) these are just an optimization that can
+ * come later.
+ *
+ * For memory writes, these should probably be used for performance.
+ */
+
+static void print_stat(struct intel_dsi *intel_dsi)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ val = I915_READ(MIPI_INTR_STAT(pipe));
+
+#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
+ DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+ "\n", pipe, val,
+ STAT_BIT(val, TEARING_EFFECT),
+ STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
+ STAT_BIT(val, GEN_READ_DATA_AVAIL),
+ STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
+ STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
+ STAT_BIT(val, RX_PROT_VIOLATION),
+ STAT_BIT(val, RX_INVALID_TX_LENGTH),
+ STAT_BIT(val, ACK_WITH_NO_ERROR),
+ STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
+ STAT_BIT(val, LP_RX_TIMEOUT),
+ STAT_BIT(val, HS_TX_TIMEOUT),
+ STAT_BIT(val, DPI_FIFO_UNDERRUN),
+ STAT_BIT(val, LOW_CONTENTION),
+ STAT_BIT(val, HIGH_CONTENTION),
+ STAT_BIT(val, TXDSI_VC_ID_INVALID),
+ STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
+ STAT_BIT(val, TXCHECKSUM_ERROR),
+ STAT_BIT(val, TXECC_MULTIBIT_ERROR),
+ STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
+ STAT_BIT(val, TXFALSE_CONTROL_ERROR),
+ STAT_BIT(val, RXDSI_VC_ID_INVALID),
+ STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
+ STAT_BIT(val, RXCHECKSUM_ERROR),
+ STAT_BIT(val, RXECC_MULTIBIT_ERROR),
+ STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
+ STAT_BIT(val, RXFALSE_CONTROL_ERROR),
+ STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
+ STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
+ STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
+ STAT_BIT(val, RXEOT_SYNC_ERROR),
+ STAT_BIT(val, RXSOT_SYNC_ERROR),
+ STAT_BIT(val, RXSOT_ERROR));
+#undef STAT_BIT
+}
+
+enum dsi_type {
+ DSI_DCS,
+ DSI_GENERIC,
+};
+
+/* enable or disable command mode hs transmissions */
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 temp;
+ u32 mask = DBI_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+
+ temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+ temp &= DBI_HS_LP_MODE_MASK;
+ I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+
+ intel_dsi->hs = enable;
+}
+
+static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
+ u8 data_type, u16 data)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 ctrl_reg;
+ u32 ctrl;
+ u32 mask;
+
+ DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
+ channel, data_type, data);
+
+ if (intel_dsi->hs) {
+ ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+ mask = HS_CTRL_FIFO_FULL;
+ } else {
+ ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+ mask = LP_CTRL_FIFO_FULL;
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+ DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ print_stat(intel_dsi);
+ }
+
+ /*
+ * Note: This function is also used for long packets, with length passed
+ * as data, since SHORT_PACKET_PARAM_SHIFT ==
+ * LONG_PACKET_WORD_COUNT_SHIFT.
+ */
+ ctrl = data << SHORT_PACKET_PARAM_SHIFT |
+ channel << VIRTUAL_CHANNEL_SHIFT |
+ data_type << DATA_TYPE_SHIFT;
+
+ I915_WRITE(ctrl_reg, ctrl);
+
+ return 0;
+}
+
+static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
+ u8 data_type, const u8 *data, int len)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 data_reg;
+ int i, j, n;
+ u32 mask;
+
+ DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
+ channel, data_type, len);
+
+ if (intel_dsi->hs) {
+ data_reg = MIPI_HS_GEN_DATA(pipe);
+ mask = HS_DATA_FIFO_FULL;
+ } else {
+ data_reg = MIPI_LP_GEN_DATA(pipe);
+ mask = LP_DATA_FIFO_FULL;
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+ DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ for (i = 0; i < len; i += n) {
+ u32 val = 0;
+ n = min_t(int, len - i, 4);
+
+ for (j = 0; j < n; j++)
+ val |= *data++ << 8 * j;
+
+ I915_WRITE(data_reg, val);
+ /* XXX: check for data fifo full, once that is set, write 4
+ * dwords, then wait for not set, then continue. */
+ }
+
+ return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+}
+
+static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
+ int channel, const u8 *data, int len,
+ enum dsi_type type)
+{
+ int ret;
+
+ if (len == 0) {
+ BUG_ON(type == DSI_GENERIC);
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
+ 0);
+ } else if (len == 1) {
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+ } else if (len == 2) {
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE_PARAM,
+ (data[1] << 8) | data[0]);
+ } else {
+ ret = dsi_vc_send_long(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_LONG_WRITE :
+ MIPI_DSI_DCS_LONG_WRITE, data, len);
+ }
+
+ return ret;
+}
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len)
+{
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+}
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len)
+{
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+}
+
+static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd)
+{
+ return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
+ dcs_cmd);
+}
+
+static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
+ int channel, u8 *reqdata,
+ int reqlen)
+{
+ u16 data;
+ u8 data_type;
+
+ switch (reqlen) {
+ case 0:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+ data = 0;
+ break;
+ case 1:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+ data = reqdata[0];
+ break;
+ case 2:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+ data = (reqdata[1] << 8) | reqdata[0];
+ break;
+ default:
+ BUG();
+ }
+
+ return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+}
+
+static int dsi_read_data_return(struct intel_dsi *intel_dsi,
+ u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int i, len = 0;
+ u32 data_reg, val;
+
+ if (intel_dsi->hs) {
+ data_reg = MIPI_HS_GEN_DATA(pipe);
+ } else {
+ data_reg = MIPI_LP_GEN_DATA(pipe);
+ }
+
+ while (len < buflen) {
+ val = I915_READ(data_reg);
+ for (i = 0; i < 4 && len < buflen; i++, len++)
+ buf[len] = val >> 8 * i;
+ }
+
+ return len;
+}
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+ int ret;
+
+ /*
+ * XXX: should issue multiple read requests and reads if request is
+ * longer than MIPI_MAX_RETURN_PKT_SIZE
+ */
+
+ I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+ ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+ if (ret)
+ return ret;
+
+ mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ if (ret < 0)
+ return ret;
+
+ if (ret != buflen)
+ return -EIO;
+
+ return 0;
+}
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+ int ret;
+
+ /*
+ * XXX: should issue multiple read requests and reads if request is
+ * longer than MIPI_MAX_RETURN_PKT_SIZE
+ */
+
+ I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+ ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
+ reqlen);
+ if (ret)
+ return ret;
+
+ mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ if (ret < 0)
+ return ret;
+
+ if (ret != buflen)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (intel_dsi->hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* DPI virtual channel?! */
+
+ mask = DPI_FIFO_EMPTY;
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
+
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+ I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 00000000000..54c8a234a2e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#ifndef _INTEL_DSI_DSI_H
+#define _INTEL_DSI_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len);
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len);
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen);
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen);
+
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
+
+/* XXX: questionable write helpers */
+static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd)
+{
+ return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+}
+
+static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd, u8 param)
+{
+ u8 buf[2] = { dcs_cmd, param };
+ return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+}
+
+static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
+ int channel)
+{
+ return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+}
+
+static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
+ int channel, u8 param)
+{
+ return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
+}
+
+static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
+ int channel, u8 param1, u8 param2)
+{
+ u8 buf[2] = { param1, param2 };
+ return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+}
+
+/* XXX: questionable read helpers */
+static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
+ int channel, u8 *buf, int buflen)
+{
+ return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
+ int channel, u8 param, u8 *buf,
+ int buflen)
+{
+ return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
+ int channel, u8 param1, u8 param2,
+ u8 *buf, int buflen)
+{
+ u8 req[2] = { param1, param2 };
+
+ return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+}
+
+
+#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 00000000000..44279b2ade8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Shobhit Kumar <shobhit.kumar@intel.com>
+ * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE 4
+#define DSI_HSE_PACKET_SIZE 4
+#define DSI_HSA_PACKET_EXTRA_SIZE 6
+#define DSI_HBP_PACKET_EXTRA_SIZE 6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
+#define DSI_HFP_PACKET_EXTRA_SIZE 6
+#define DSI_EOTP_PACKET_SIZE 4
+
+struct dsi_mnp {
+ u32 dsi_pll_ctrl;
+ u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+ 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35 /* 91 - 92 */
+};
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+ int pixel_format, int video_mode_format,
+ int lane_count, bool eotp)
+{
+ u32 bpp;
+ u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+ u32 bytes_per_line, bytes_per_frame;
+ u32 num_frames;
+ u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+ u32 dsi_bit_clock_hz;
+ u32 dsi_clk;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ hactive = mode->hdisplay;
+ vactive = mode->vdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsync = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+ hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+ hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+ hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+ bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+ DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+ hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+ hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+ hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+ /*
+ * XXX: Need to accurately calculate LP to HS transition timeout and add
+ * it to bytes_per_line/bytes_per_frame.
+ */
+
+ if (eotp && video_mode_format == VIDEO_MODE_BURST)
+ bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+ bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+ vactive * bytes_per_line + vfp * bytes_per_line;
+
+ if (eotp &&
+ (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+ video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+ bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+ num_frames = drm_mode_vrefresh(mode);
+ bytes_per_x_frames = num_frames * bytes_per_frame;
+
+ bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+ /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+ dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+ dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+
+ if (eotp && video_mode_format == VIDEO_MODE_BURST)
+ dsi_clk *= 2;
+
+ return dsi_clk;
+}
+
+#ifdef MNP_FROM_TABLE
+
+struct dsi_clock_table {
+ u32 freq;
+ u8 m;
+ u8 p;
+};
+
+static const struct dsi_clock_table dsi_clk_tbl[] = {
+ {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
+ {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
+ {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
+ {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
+ {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
+ {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
+ {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
+ {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
+ {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
+ {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
+ {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
+ {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
+ {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
+ {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
+ {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
+ {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
+ {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
+ {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
+ {1000, 80, 2}, /* dsi clock frequency in Mhz*/
+};
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+ unsigned int i;
+ u8 m;
+ u8 n;
+ u8 p;
+ u32 m_seed;
+
+ if (dsi_clk < 300 || dsi_clk > 1000)
+ return -ECHRNG;
+
+ for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
+ if (dsi_clk_tbl[i].freq > dsi_clk)
+ break;
+ }
+
+ m = dsi_clk_tbl[i].m;
+ p = dsi_clk_tbl[i].p;
+ m_seed = lfsr_converts[m - 62];
+ n = 1;
+ dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
+ dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+ m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+ return 0;
+}
+
+#else
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+ u32 m, n, p;
+ u32 ref_clk;
+ u32 error;
+ u32 tmp_error;
+ u32 target_dsi_clk;
+ u32 calc_dsi_clk;
+ u32 calc_m;
+ u32 calc_p;
+ u32 m_seed;
+
+ if (dsi_clk < 300 || dsi_clk > 1150) {
+ DRM_ERROR("DSI CLK Out of Range\n");
+ return -ECHRNG;
+ }
+
+ ref_clk = 25000;
+ target_dsi_clk = dsi_clk * 1000;
+ error = 0xFFFFFFFF;
+ calc_m = 0;
+ calc_p = 0;
+
+ for (m = 62; m <= 92; m++) {
+ for (p = 2; p <= 6; p++) {
+
+ calc_dsi_clk = (m * ref_clk) / p;
+ if (calc_dsi_clk >= target_dsi_clk) {
+ tmp_error = calc_dsi_clk - target_dsi_clk;
+ if (tmp_error < error) {
+ error = tmp_error;
+ calc_m = m;
+ calc_p = p;
+ }
+ }
+ }
+ }
+
+ m_seed = lfsr_converts[calc_m - 62];
+ n = 1;
+ dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+ dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+ m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+ return 0;
+}
+
+#endif
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int ret;
+ struct dsi_mnp dsi_mnp;
+ u32 dsi_clk;
+
+ dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
+ intel_dsi->video_mode_format,
+ intel_dsi->lane_count, !intel_dsi->eot_disable);
+
+ ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+ if (ret) {
+ DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+ return;
+ }
+
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+ dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ vlv_configure_dsi_pll(encoder);
+
+ /* wait at least 0.5 us after ungating before enabling VCO */
+ usleep_range(1, 10);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp |= DSI_PLL_VCO_EN;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+ DRM_ERROR("DSI PLL lock failed\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp &= ~DSI_PLL_VCO_EN;
+ tmp |= DSI_PLL_LDO_GATE;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df546c1..3c773654685 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -171,11 +173,16 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &crtc->config.requested_mode,
+ &crtc->config.adjusted_mode);
+
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -184,6 +191,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
+ struct intel_crtc_config *config;
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
@@ -204,10 +212,16 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
+ config = &to_intel_crtc(crtc)->config;
+
intel_dvo->base.connectors_active = true;
intel_crtc_update_dpms(crtc);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &config->requested_mode,
+ &config->adjusted_mode);
+
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -267,11 +281,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
- if (intel_dvo->dev.dev_ops->mode_fixup)
- return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
- &pipe_config->requested_mode,
- adjusted_mode);
-
return true;
}
@@ -299,10 +308,6 @@ static void intel_dvo_mode_set(struct intel_encoder *encoder)
break;
}
- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &crtc->config.requested_mode,
- adjusted_mode);
-
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
@@ -370,7 +375,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -451,11 +455,11 @@ void intel_dvo_init(struct drm_device *dev)
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+ intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dvo);
return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c
index bc2100007b2..895fcb4fbd9 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
- 8), 64);
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
@@ -184,6 +184,27 @@ out:
return ret;
}
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return -ENOMEM;
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &ifbdev->helper,
INTEL_INFO(dev)->num_pipes,
- INTELFB_CONN_LIMIT);
+ 4);
if (ret) {
kfree(ifbdev);
return ret;
@@ -278,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
MODULE_LICENSE("GPL and additional rights");
-void intel_fb_output_poll_changed(struct drm_device *dev)
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
-void intel_fb_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_device *dev)
{
int ret;
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc85bf7..03f9ca70530 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp, flags = 0;
+ int dotclock;
tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+ dotclock = pipe_config->port_clock * 2 / 3;
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
}
static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -836,7 +847,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
if (IS_G4X(dev))
return 165000;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
else
return 225000;
@@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+ int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
@@ -904,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->clock > portclock_limit) {
+ if (adjusted_mode->crtc_clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
@@ -1063,7 +1074,7 @@ done:
return 0;
}
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
0x2b245f5f);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
0x5578b83a);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
0x0c782040);
- vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
0x2b247878);
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
0x00002000);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
/* Program lane clock */
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
@@ -1116,55 +1127,60 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, port);
}
-static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
if (!IS_VALLEYVIEW(dev))
return;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
0x00002000);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
-static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1211,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
+ connector->stereo_allowed = 1;
switch (port) {
case PORT_B:
@@ -1275,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
@@ -1296,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
- intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
- intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
- intel_encoder->post_disable = intel_hdmi_post_disable;
+ intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->enable = intel_enable_hdmi;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7f26..2ca17b14b6c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+enum disp_clk {
+ CDCLK,
+ CZCLK
+};
+
struct gmbus_port {
const char *name;
int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
+static int get_disp_clk_div(struct drm_i915_private *dev_priv,
+ enum disp_clk clk)
+{
+ u32 reg_val;
+ int clk_ratio;
+
+ reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
+
+ if (clk == CDCLK)
+ clk_ratio =
+ ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
+ else
+ clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
+
+ return clk_ratio;
+}
+
+static void gmbus_set_freq(struct drm_i915_private *dev_priv)
+{
+ int vco_freq[] = { 800, 1600, 2000, 2400 };
+ int gmbus_freq = 0, cdclk_div, hpll_freq;
+
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+ /* Skip setting the gmbus freq if BIOS has already programmed it */
+ if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
+ return;
+
+ /* Obtain SKU information */
+ mutex_lock(&dev_priv->dpio_lock);
+ hpll_freq =
+ vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Get the CDCLK divide ratio */
+ cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ if (cdclk_div)
+ gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+
+ if (WARN_ON(gmbus_freq == 0))
+ return;
+
+ I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
+}
+
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * In BIOS-less system, program the correct gmbus frequency
+ * before reading edid.
+ */
+ if (IS_VALLEYVIEW(dev))
+ gmbus_set_freq(dev_priv);
+
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b8af94a5be3..c3b4da7895e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
+ int dotclock;
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
+
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -198,7 +206,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
@@ -217,13 +226,15 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
- intel_panel_enable_backlight(dev, intel_crtc->pipe);
+ intel_panel_enable_backlight(intel_connector);
}
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
@@ -235,7 +246,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
stat_reg = PP_STATUS;
}
- intel_panel_disable_backlight(dev);
+ intel_panel_disable_backlight(intel_connector);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
@@ -466,7 +477,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
intel_panel_fini(&lvds_connector->base.panel);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -802,7 +812,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- struct child_device_config *child = dev_priv->vbt.child_dev + i;
+ union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+ struct old_child_dev_config *child = &uchild->old;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -956,11 +967,11 @@ void intel_lvds_init(struct drm_device *dev)
}
}
- lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+ lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
- lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+ lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771ff46a..6d69a9bad86 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
#include "i915_drv.h"
#include "intel_drv.h"
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+#define PCI_SWSCI 0xe8
+#define PCI_SWSCI_SCISEL (1 << 15)
+#define PCI_SWSCI_GSSCIE (1 << 0)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
@@ -107,25 +110,38 @@ struct opregion_asle {
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
- u8 rsvd[102];
+ u32 cddv; /* color correction default values */
+ u32 pcft; /* power conservation features */
+ u32 srot; /* supported rotation angles */
+ u32 iuer; /* IUER events */
+ u8 rsvd[86];
} __attribute__((packed));
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM (1 << 0)
-#define ASLE_SET_BACKLIGHT (1 << 1)
-#define ASLE_SET_PFIT (1 << 2)
-#define ASLE_SET_PWM_FREQ (1 << 3)
-#define ASLE_REQ_MSK 0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAILED (1<<10)
-#define ASLE_BACKLIGHT_FAILED (1<<12)
-#define ASLE_PFIT_FAILED (1<<14)
-#define ASLE_PWM_FREQ_FAILED (1<<16)
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM (1 << 0)
+#define ASLC_SET_BACKLIGHT (1 << 1)
+#define ASLC_SET_PFIT (1 << 2)
+#define ASLC_SET_PWM_FREQ (1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
+#define ASLC_BUTTON_ARRAY (1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
+#define ASLC_DOCKING_INDICATOR (1 << 7)
+#define ASLC_ISCT_STATE_CHANGE (1 << 8)
+#define ASLC_REQ_MSK 0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED (1 << 10)
+#define ASLC_BACKLIGHT_FAILED (1 << 12)
+#define ASLC_PFIT_FAILED (1 << 14)
+#define ASLC_PWM_FREQ_FAILED (1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
+#define ASLC_CONVERTIBLE_FAILED (1 << 22)
+#define ASLC_DOCKING_FAILED (1 << 24)
+#define ASLC_ISCT_STATE_FAILED (1 << 26)
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
@@ -151,6 +167,60 @@ struct opregion_asle {
#define ASLE_CBLV_VALID (1<<31)
+/* IUER */
+#define ASLE_IUER_DOCKING (1 << 7)
+#define ASLE_IUER_CONVERTIBLE (1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
+#define ASLE_IUER_WINDOWS_BTN (1 << 1)
+#define ASLE_IUER_POWER_BTN (1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR (1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
+#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+ ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+ (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA 4
+#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB 6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
#define ACPI_OTHER_OUTPUT (0<<8)
#define ACPI_VGA_OUTPUT (1<<8)
#define ACPI_TV_OUTPUT (2<<8)
@@ -158,24 +228,224 @@ struct opregion_asle {
#define ACPI_LVDS_OUTPUT (4<<8)
#ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
+ u32 main_function, sub_function, scic;
+ u16 pci_swsci;
+ u32 dslp;
+
+ if (!swsci)
+ return -ENODEV;
+
+ main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+ SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+ sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+ SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+ /* Check if we can call the function. See swsci_setup for details. */
+ if (main_function == SWSCI_SBCB) {
+ if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ } else if (main_function == SWSCI_GBDA) {
+ if ((dev_priv->opregion.swsci_gbda_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ }
+
+ /* Driver sleep timeout in ms. */
+ dslp = ioread32(&swsci->dslp);
+ if (!dslp) {
+ /* The spec says 2ms should be the default, but it's too small
+ * for some machines. */
+ dslp = 50;
+ } else if (dslp > 500) {
+ /* Hey bios, trust must be earned. */
+ WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
+ dslp = 500;
+ }
+
+ /* The spec tells us to do this, but we are the only user... */
+ scic = ioread32(&swsci->scic);
+ if (scic & SWSCI_SCIC_INDICATOR) {
+ DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+ return -EBUSY;
+ }
+
+ scic = function | SWSCI_SCIC_INDICATOR;
+
+ iowrite32(parm, &swsci->parm);
+ iowrite32(scic, &swsci->scic);
+
+ /* Ensure SCI event is selected and event trigger is cleared. */
+ pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+ if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+ pci_swsci |= PCI_SWSCI_SCISEL;
+ pci_swsci &= ~PCI_SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ }
+
+ /* Use event trigger to tell bios to check the mail. */
+ pci_swsci |= PCI_SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+ /* Poll for the result. */
+#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
+ if (wait_for(C, dslp)) {
+ DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+ SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+ /* Note: scic == 0 is an error! */
+ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+ DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+ return -EIO;
+ }
+
+ if (parm_out)
+ *parm_out = ioread32(&swsci->parm);
+
+ return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT 0
+#define DISPLAY_TYPE_TV 1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 parm = 0;
+ u32 type = 0;
+ u32 port;
+
+ /* don't care about old stuff for now */
+ if (!HAS_DDI(dev))
+ return 0;
+
+ port = intel_ddi_get_encoder_port(intel_encoder);
+ if (port == PORT_E) {
+ port = 0;
+ } else {
+ parm |= 1 << port;
+ port++;
+ }
+
+ if (!enable)
+ parm |= 4 << 8;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ type = DISPLAY_TYPE_CRT;
+ break;
+ case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+ break;
+ case INTEL_OUTPUT_EDP:
+ type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+ break;
+ default:
+ WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
+ return -EINVAL;
+ }
+
+ parm |= type << (16 + port * 3);
+
+ return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+ pci_power_t pci_power_state;
+ u32 parm;
+} power_state_map[] = {
+ { PCI_D0, 0x00 },
+ { PCI_D1, 0x01 },
+ { PCI_D2, 0x02 },
+ { PCI_D3hot, 0x04 },
+ { PCI_D3cold, 0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+ int i;
+
+ if (!HAS_DDI(dev))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+ if (state == power_state_map[i].pci_power_state)
+ return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ power_state_map[i].parm, NULL);
+ }
+
+ return -EINVAL;
+}
+
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector = NULL;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+ u32 ret = 0;
+ bool found = false;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
+ return ASLC_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
+ return ASLC_BACKLIGHT_FAILED;
+
+ mutex_lock(&dev->mode_config.mutex);
+ /*
+ * Could match the OpRegion connector here instead, but we'd also need
+ * to verify the connector could handle a backlight call.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ ret = ASLC_BACKLIGHT_FAILED;
+ goto out;
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ intel_connector = to_intel_connector(connector);
+
+ if (!intel_connector) {
+ ret = ASLC_BACKLIGHT_FAILED;
+ goto out;
+ }
- intel_panel_set_backlight(dev, bclp, 255);
+ DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+ intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
- return 0;
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@ -183,13 +453,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
DRM_DEBUG_DRIVER("Illum is not supported\n");
- return ASLE_ALS_ILLUM_FAILED;
+ return ASLC_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
- return ASLE_PWM_FREQ_FAILED;
+ return ASLC_PWM_FREQ_FAILED;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +467,118 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
DRM_DEBUG_DRIVER("Pfit is not supported\n");
- return ASLE_PFIT_FAILED;
+ return ASLC_PFIT_FAILED;
}
-void intel_opregion_asle_intr(struct drm_device *dev)
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ DRM_DEBUG_DRIVER("SROT is not supported\n");
+ return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+ if (!iuer)
+ DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+ if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+ if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+ if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+ if (iuer & ASLE_IUER_WINDOWS_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+ if (iuer & ASLE_IUER_POWER_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+ return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+ if (iuer & ASLE_IUER_CONVERTIBLE)
+ DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+ else
+ DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+ return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+ if (iuer & ASLE_IUER_DOCKING)
+ DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+ else
+ DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+ return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+ DRM_DEBUG_DRIVER("ISCT is not supported\n");
+ return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+ struct intel_opregion *opregion =
+ container_of(work, struct intel_opregion, asle_work);
+ struct drm_i915_private *dev_priv =
+ container_of(opregion, struct drm_i915_private, opregion);
+ struct drm_device *dev = dev_priv->dev;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
- u32 asle_stat = 0;
- u32 asle_req;
+ u32 aslc_stat = 0;
+ u32 aslc_req;
if (!asle)
return;
- asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
+ aslc_req = ioread32(&asle->aslc);
- if (!asle_req) {
- DRM_DEBUG_DRIVER("non asle set request??\n");
+ if (!(aslc_req & ASLC_REQ_MSK)) {
+ DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+ aslc_req);
return;
}
- if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+ if (aslc_req & ASLC_SET_ALS_ILLUM)
+ aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+ if (aslc_req & ASLC_SET_BACKLIGHT)
+ aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+ if (aslc_req & ASLC_SET_PFIT)
+ aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+ if (aslc_req & ASLC_SET_PWM_FREQ)
+ aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+ if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+ aslc_stat |= asle_set_supported_rotation_angles(dev,
+ ioread32(&asle->srot));
- if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+ if (aslc_req & ASLC_BUTTON_ARRAY)
+ aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
- if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+ if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+ aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
- iowrite32(asle_stat, &asle->aslc);
+ if (aslc_req & ASLC_DOCKING_INDICATOR)
+ aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
+
+ if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+ aslc_stat |= asle_isct_state(dev);
+
+ iowrite32(aslc_stat, &asle->aslc);
+}
+
+void intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->opregion.asle)
+ schedule_work(&dev_priv->opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -289,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp;
int i = 0;
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
@@ -432,6 +781,8 @@ void intel_opregion_fini(struct drm_device *dev)
if (opregion->asle)
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
+ cancel_work_sync(&dev_priv->opregion.asle_work);
+
if (opregion->acpi) {
iowrite32(0, &opregion->acpi->drdy);
@@ -446,8 +797,68 @@ void intel_opregion_fini(struct drm_device *dev)
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->vbt = NULL;
+ opregion->lid_state = NULL;
}
-#endif
+
+static void swsci_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ bool requested_callbacks = false;
+ u32 tmp;
+
+ /* Sub-function code 0 is okay, let's allow them. */
+ opregion->swsci_gbda_sub_functions = 1;
+ opregion->swsci_sbcb_sub_functions = 1;
+
+ /* We use GBDA to ask for supported GBDA calls. */
+ if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ tmp <<= 1;
+ opregion->swsci_gbda_sub_functions |= tmp;
+ }
+
+ /*
+ * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+ * must not call interfaces that are not specifically requested by the
+ * bios.
+ */
+ if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ /* here, the bits already match sub-function codes */
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ requested_callbacks = true;
+ }
+
+ /*
+ * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+ * the callback is _requested_. But we still can't call interfaces that
+ * are not requested.
+ */
+ if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ u32 low = tmp & 0x7ff;
+ u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+ tmp = (high << 4) | (low << 1) | 1;
+
+ /* best guess what to do with supported wrt requested */
+ if (requested_callbacks) {
+ u32 req = opregion->swsci_sbcb_sub_functions;
+ if ((req & tmp) != req)
+ DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+ /* XXX: for now, trust the requested callbacks */
+ /* opregion->swsci_sbcb_sub_functions &= tmp; */
+ } else {
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
+}
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif /* CONFIG_ACPI */
int intel_opregion_setup(struct drm_device *dev)
{
@@ -465,6 +876,10 @@ int intel_opregion_setup(struct drm_device *dev)
return -ENOTSUPP;
}
+#ifdef CONFIG_ACPI
+ INIT_WORK(&opregion->asle_work, asle_work);
+#endif
+
base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
@@ -490,6 +905,7 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ swsci_setup(dev);
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0aefe0c..a98a990fbab 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-
if (!crtc->active)
return -EINVAL;
/* can't use the overlay with double wide pipe */
- if (INTEL_INFO(overlay->dev)->gen < 4 &&
- (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ if (crtc->config.double_wide)
return -EINVAL;
return 0;
@@ -1056,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return ret;
}
- params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -1323,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!HAS_OVERLAY(dev))
return;
- overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a2896..f161ac02c4f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
- struct drm_display_mode *mode, *adjusted_mode;
+ struct drm_display_mode *adjusted_mode;
int x, y, width, height;
- mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
x = y = width = height = 0;
/* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay)
+ if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+ adjusted_mode->vdisplay == pipe_config->pipe_src_h)
goto done;
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
- width = mode->hdisplay;
- height = mode->vdisplay;
+ width = pipe_config->pipe_src_w;
+ height = pipe_config->pipe_src_h;
x = (adjusted_mode->hdisplay - width + 1)/2;
y = (adjusted_mode->vdisplay - height + 1)/2;
break;
@@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
{
- u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
- u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ u32 scaled_width = adjusted_mode->hdisplay
+ * pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w
+ * adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / mode->vdisplay;
+ width = scaled_height / pipe_config->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / mode->hdisplay;
+ height = scaled_width / pipe_config->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
+static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+ u32 *pfit_control)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ u32 scaled_width = adjusted_mode->hdisplay *
+ pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w *
+ adjusted_mode->vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
+ *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+ u32 *pfit_control, u32 *pfit_pgm_ratios,
+ u32 *border)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ u32 scaled_width = adjusted_mode->hdisplay *
+ pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w *
+ adjusted_mode->vdisplay;
+ u32 bits;
+
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode,
+ scaled_height /
+ pipe_config->pipe_src_h);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
+ bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+ adjusted_mode->vdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode,
+ scaled_width /
+ pipe_config->pipe_src_w);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+ bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+ adjusted_mode->hdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+}
+
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *mode, *adjusted_mode;
+ struct drm_display_mode *adjusted_mode;
- mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
/* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay)
+ if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+ adjusted_mode->vdisplay == pipe_config->pipe_src_h)
goto out;
switch (fitting_mode) {
@@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- centre_horizontally(adjusted_mode, mode->hdisplay);
- centre_vertically(adjusted_mode, mode->vdisplay);
+ centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+ centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4) {
- u32 scaled_width = adjusted_mode->hdisplay *
- mode->vdisplay;
- u32 scaled_height = mode->hdisplay *
- adjusted_mode->vdisplay;
-
- /* 965+ is easy, it does everything in hw */
- if (scaled_width > scaled_height)
- pfit_control |= PFIT_ENABLE |
- PFIT_SCALING_PILLAR;
- else if (scaled_width < scaled_height)
- pfit_control |= PFIT_ENABLE |
- PFIT_SCALING_LETTER;
- else if (adjusted_mode->hdisplay != mode->hdisplay)
- pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
- } else {
- u32 scaled_width = adjusted_mode->hdisplay *
- mode->vdisplay;
- u32 scaled_height = mode->hdisplay *
- adjusted_mode->vdisplay;
- /*
- * For earlier chips we have to calculate the scaling
- * ratio by hand and program it into the
- * PFIT_PGM_RATIO register
- */
- if (scaled_width > scaled_height) { /* pillar */
- centre_horizontally(adjusted_mode,
- scaled_height /
- mode->vdisplay);
-
- border = LVDS_BORDER_ENABLE;
- if (mode->vdisplay != adjusted_mode->vdisplay) {
- u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
- pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
- bits << PFIT_VERT_SCALE_SHIFT);
- pfit_control |= (PFIT_ENABLE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- } else if (scaled_width < scaled_height) { /* letter */
- centre_vertically(adjusted_mode,
- scaled_width /
- mode->hdisplay);
-
- border = LVDS_BORDER_ENABLE;
- if (mode->hdisplay != adjusted_mode->hdisplay) {
- u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
- pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
- bits << PFIT_VERT_SCALE_SHIFT);
- pfit_control |= (PFIT_ENABLE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- } else {
- /* Aspects match, Let hw scale both directions */
- pfit_control |= (PFIT_ENABLE |
- VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- }
+ if (INTEL_INFO(dev)->gen >= 4)
+ i965_scale_aspect(pipe_config, &pfit_control);
+ else
+ i9xx_scale_aspect(pipe_config, &pfit_control,
+ &pfit_pgm_ratios, &border);
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- if (mode->vdisplay != adjusted_mode->vdisplay ||
- mode->hdisplay != adjusted_mode->hdisplay) {
+ if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
+ pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (IS_GEN4(dev))
return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
if (IS_GEN2(dev))
@@ -320,7 +341,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
/* XXX: query mode clock or hardware clock and program max PWM appropriately
* when it's 0.
*/
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
@@ -337,6 +358,21 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
val = dev_priv->regfile.saveBLC_PWM_CTL2;
I915_WRITE(BLC_PWM_PCH_CTL2, val);
}
+ } else if (IS_VALLEYVIEW(dev)) {
+ val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ } else if (val == 0) {
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
+ dev_priv->regfile.saveBLC_PWM_CTL2);
+ }
+
+ if (!val)
+ val = 0x0f42ffff;
} else {
val = I915_READ(BLC_PWM_CTL);
if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
@@ -356,11 +392,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
return val;
}
-static u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 intel_panel_get_max_backlight(struct drm_device *dev,
+ enum pipe pipe)
{
u32 max;
- max = i915_read_blc_pwm_ctl(dev);
+ max = i915_read_blc_pwm_ctl(dev, pipe);
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -386,7 +423,8 @@ MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+static u32 intel_panel_compute_brightness(struct drm_device *dev,
+ enum pipe pipe, u32 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -395,7 +433,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- u32 max = intel_panel_get_max_backlight(dev);
+ u32 max = intel_panel_get_max_backlight(dev, pipe);
if (max)
return max - val;
}
@@ -403,18 +441,25 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
}
-static u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev,
+ enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
+ int reg;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
- val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (IS_VALLEYVIEW(dev))
+ reg = VLV_BLC_PWM_CTL(pipe);
+ else
+ reg = BLC_PWM_CTL;
+
+ val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
@@ -426,7 +471,7 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
}
}
- val = intel_panel_compute_brightness(dev, val);
+ val = intel_panel_compute_brightness(dev, pipe, val);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
@@ -441,19 +486,21 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
}
-static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev,
+ enum pipe pipe, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
+ int reg;
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
- level = intel_panel_compute_brightness(dev, level);
+ level = intel_panel_compute_brightness(dev, pipe, level);
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
if (is_backlight_combination_mode(dev)) {
- u32 max = intel_panel_get_max_backlight(dev);
+ u32 max = intel_panel_get_max_backlight(dev, pipe);
u8 lbpc;
/* we're screwed, but keep behaviour backwards compatible */
@@ -465,23 +512,34 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
}
- tmp = I915_READ(BLC_PWM_CTL);
+ if (IS_VALLEYVIEW(dev))
+ reg = VLV_BLC_PWM_CTL(pipe);
+ else
+ reg = BLC_PWM_CTL;
+
+ tmp = I915_READ(reg);
if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CTL, tmp | level);
+ I915_WRITE(reg, tmp | level);
}
/* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+ u32 max)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 freq;
unsigned long flags;
+ if (pipe == INVALID_PIPE)
+ return;
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- freq = intel_panel_get_max_backlight(dev);
+ freq = intel_panel_get_max_backlight(dev, pipe);
if (!freq) {
/* we are screwed, bail out */
goto out;
@@ -498,16 +556,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
dev_priv->backlight.device->props.brightness = level;
if (dev_priv->backlight.enabled)
- intel_panel_actually_set_backlight(dev, level);
+ intel_panel_actually_set_backlight(dev, pipe, level);
out:
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
-void intel_panel_disable_backlight(struct drm_device *dev)
+void intel_panel_disable_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
+ if (pipe == INVALID_PIPE)
+ return;
+
/*
* Do not disable backlight on the vgaswitcheroo path. When switching
* away from i915, the other client may depend on i915 to handle the
@@ -522,12 +585,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
- intel_panel_actually_set_backlight(dev, 0);
+ intel_panel_actually_set_backlight(dev, pipe, 0);
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
- reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+ if (HAS_PCH_SPLIT(dev))
+ reg = BLC_PWM_CPU_CTL2;
+ else if (IS_VALLEYVIEW(dev))
+ reg = VLV_BLC_PWM_CTL2(pipe);
+ else
+ reg = BLC_PWM_CTL2;
I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
@@ -541,18 +609,25 @@ void intel_panel_disable_backlight(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
-void intel_panel_enable_backlight(struct drm_device *dev,
- enum pipe pipe)
+void intel_panel_enable_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
unsigned long flags;
+ if (pipe == INVALID_PIPE)
+ return;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (dev_priv->backlight.level == 0) {
- dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+ dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
+ pipe);
if (dev_priv->backlight.device)
dev_priv->backlight.device->props.brightness =
dev_priv->backlight.level;
@@ -561,8 +636,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
- reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
-
+ if (HAS_PCH_SPLIT(dev))
+ reg = BLC_PWM_CPU_CTL2;
+ else if (IS_VALLEYVIEW(dev))
+ reg = VLV_BLC_PWM_CTL2(pipe);
+ else
+ reg = BLC_PWM_CTL2;
tmp = I915_READ(reg);
@@ -602,16 +681,41 @@ set_level:
* registers are set.
*/
dev_priv->backlight.enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
+ intel_panel_actually_set_backlight(dev, pipe,
+ dev_priv->backlight.level);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
+/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
+static void intel_panel_init_backlight_regs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ enum pipe pipe;
+
+ for_each_pipe(pipe) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+ /* Skip if the modulation freq is already set */
+ if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+ continue;
+
+ cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ cur_val);
+ }
+ }
+}
+
static void intel_panel_init_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->backlight.level = intel_panel_get_backlight(dev);
+ intel_panel_init_backlight_regs(dev);
+
+ dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
}
@@ -637,19 +741,34 @@ intel_panel_detect(struct drm_device *dev)
}
}
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static int intel_panel_update_status(struct backlight_device *bd)
{
- struct drm_device *dev = bl_get_data(bd);
- intel_panel_set_backlight(dev, bd->props.brightness,
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
+ intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness);
+ mutex_unlock(&dev->mode_config.mutex);
return 0;
}
static int intel_panel_get_brightness(struct backlight_device *bd)
{
- struct drm_device *dev = bl_get_data(bd);
- return intel_panel_get_backlight(dev);
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+ enum pipe pipe;
+
+ mutex_lock(&dev->mode_config.mutex);
+ pipe = intel_get_pipe_from_connector(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ if (pipe == INVALID_PIPE)
+ return 0;
+
+ return intel_panel_get_backlight(connector->base.dev, pipe);
}
static const struct backlight_ops intel_panel_bl_ops = {
@@ -674,7 +793,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
props.brightness = dev_priv->backlight.level;
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- props.max_brightness = intel_panel_get_max_backlight(dev);
+ props.max_brightness = intel_panel_get_max_backlight(dev, 0);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
if (props.max_brightness == 0) {
@@ -683,7 +802,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
}
dev_priv->backlight.device =
backlight_device_register("intel_backlight",
- &connector->kdev, dev,
+ connector->kdev,
+ to_intel_connector(connector),
&intel_panel_bl_ops, &props);
if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3e985..caf2ee4e544 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
#include <linux/module.h>
#include <drm/i915_powerwell.h>
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
* i915.i915_enable_fbc parameter
*/
-static bool intel_crtc_active(struct drm_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- */
- return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
-}
-
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- if (IS_IVYBRIDGE(dev))
- /* WaFbcDisableDpfcClockGating:ivb */
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) &
- ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
- if (IS_HASWELL(dev))
- /* WaFbcDisableDpfcClockGating:hsw */
- I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
- I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
- ~HSW_DPFC_GATING_DISABLE);
-
DRM_DEBUG_KMS("disabled FBC\n");
}
}
@@ -282,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
- /* WaFbcDisableDpfcClockGating:ivb */
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
- ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
HSW_BYPASS_FBC_QUEUE);
- /* WaFbcDisableDpfcClockGating:hsw */
- I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
- I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
- HSW_DPFC_GATING_DISABLE);
}
I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
intel_cancel_fbc_work(dev_priv);
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- unsigned int max_hdisplay, max_vdisplay;
+ const struct drm_display_mode *adjusted_mode;
+ unsigned int max_width, max_height;
if (!I915_HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@ void intel_update_fbc(struct drm_device *dev)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (intel_crtc_active(tmp_crtc) &&
- !to_intel_crtc(tmp_crtc)->primary_disabled) {
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@ void intel_update_fbc(struct drm_device *dev)
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915_enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@ void intel_update_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
- if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
- (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
@@ -523,14 +518,14 @@ void intel_update_fbc(struct drm_device *dev)
}
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- max_hdisplay = 4096;
- max_vdisplay = 2048;
+ max_width = 4096;
+ max_height = 2048;
} else {
- max_hdisplay = 2048;
- max_vdisplay = 1536;
+ max_width = 2048;
+ max_height = 1536;
}
- if ((crtc->mode.hdisplay > max_hdisplay) ||
- (crtc->mode.vdisplay > max_vdisplay)) {
+ if (intel_crtc->config.pipe_src_w > max_width ||
+ intel_crtc->config.pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
@@ -1087,8 +1082,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
return enabled;
}
-static void pineview_update_wm(struct drm_device *dev)
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@ static void pineview_update_wm(struct drm_device *dev)
crtc = single_enabled_crtc(dev);
if (crtc) {
- int clock = crtc->mode.clock;
+ const struct drm_display_mode *adjusted_mode;
int pixel_size = crtc->fb->bits_per_pixel / 8;
+ int clock;
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int *cursor_wm)
{
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
@@ -1177,9 +1178,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
return false;
}
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
@@ -1262,9 +1265,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
if (!intel_crtc_active(crtc))
return false;
- clock = crtc->mode.clock; /* VESA DOT Clock */
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
#define single_plane_enabled(mask) is_power_of_2(mask)
-static void valleyview_update_wm(struct drm_device *dev)
+static void valleyview_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@ static void valleyview_update_wm(struct drm_device *dev)
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void g4x_update_wm(struct drm_device *dev)
+static void g4x_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@ static void g4x_update_wm(struct drm_device *dev)
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev)
+static void i965_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
@@ -1488,9 +1495,11 @@ static void i965_update_wm(struct drm_device *dev)
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- int clock = crtc->mode.clock;
- int htotal = crtc->mode.htotal;
- int hdisplay = crtc->mode.hdisplay;
+ const struct drm_display_mode *adjusted_mode =
+ &to_intel_crtc(crtc)->config.adjusted_mode;
+ int clock = adjusted_mode->crtc_clock;
+ int htotal = adjusted_mode->htotal;
+ int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1541,8 +1550,9 @@ static void i965_update_wm(struct drm_device *dev)
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i9xx_update_wm(struct drm_device *dev)
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
+ const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
- planea_wm = intel_calculate_wm(crtc->mode.clock,
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
@@ -1576,11 +1588,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
+ const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
- planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
@@ -1607,9 +1621,11 @@ static void i9xx_update_wm(struct drm_device *dev)
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- int clock = enabled->mode.clock;
- int htotal = enabled->mode.htotal;
- int hdisplay = enabled->mode.hdisplay;
+ const struct drm_display_mode *adjusted_mode =
+ &to_intel_crtc(enabled)->config.adjusted_mode;
+ int clock = adjusted_mode->crtc_clock;
+ int htotal = adjusted_mode->htotal;
+ int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1658,10 +1674,12 @@ static void i9xx_update_wm(struct drm_device *dev)
}
}
-static void i830_update_wm(struct drm_device *dev)
+static void i830_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
@@ -1669,7 +1687,9 @@ static void i830_update_wm(struct drm_device *dev)
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+ &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
@@ -1753,9 +1774,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
display, cursor);
}
-static void ironlake_update_wm(struct drm_device *dev)
+static void ironlake_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -1868,8 +1891,9 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
-static void sandybridge_update_wm(struct drm_device *dev)
+static void sandybridge_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
@@ -1970,8 +1994,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
-static void ivybridge_update_wm(struct drm_device *dev)
+static void ivybridge_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
@@ -2098,7 +2123,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config.adjusted_mode.clock;
+ pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
- pipe_w = intel_crtc->config.requested_mode.hdisplay;
- pipe_h = intel_crtc->config.requested_mode.vdisplay;
+ pipe_w = intel_crtc->config.pipe_src_w;
+ pipe_h = intel_crtc->config.pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@ struct hsw_wm_maximums {
uint16_t fbc;
};
-struct hsw_wm_values {
- uint32_t wm_pipe[3];
- uint32_t wm_lp[3];
- uint32_t wm_lp_spr[3];
- uint32_t wm_linetime[3];
- bool enable_fbc_wm;
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
- bool fbc_wm_enabled;
};
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
@@ -2225,7 +2241,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
@@ -2248,7 +2264,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
@@ -2275,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
{
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_INFO(dev)->gen >= 8)
+ return 3072;
+ else if (INTEL_INFO(dev)->gen >= 7)
return 768;
else
return 512;
@@ -2320,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_INFO(dev)->gen >= 8)
+ max = level == 0 ? 255 : 2047;
+ else if (INTEL_INFO(dev)->gen >= 7)
/* IVB/HSW primary/sprite plane watermarks */
max = level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2350,27 +2370,30 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
}
/* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(void)
+static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
{
/* max that registers can hold */
- return 15;
+ if (INTEL_INFO(dev)->gen >= 8)
+ return 31;
+ else
+ return 15;
}
-static void ilk_wm_max(struct drm_device *dev,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct hsw_wm_maximums *max)
{
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_max();
+ max->fbc = ilk_fbc_wm_max(dev);
}
-static bool ilk_check_wm(int level,
- const struct hsw_wm_maximums *max,
- struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+ const struct hsw_wm_maximums *max,
+ struct intel_wm_level *result)
{
bool ret;
@@ -2406,14 +2429,12 @@ static bool ilk_check_wm(int level,
result->enable = true;
}
- DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
return ret;
}
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
int level,
- struct hsw_pipe_wm_parameters *p,
+ const struct hsw_pipe_wm_parameters *p,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2455,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
result->enable = true;
}
-static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
- int level, struct hsw_wm_maximums *max,
- struct hsw_pipe_wm_parameters *params,
- struct intel_wm_level *result)
-{
- enum pipe pipe;
- struct intel_wm_level res[3];
-
- for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
- ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
-
- result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
- result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
- result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
- result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
- result->enable = true;
-
- return ilk_check_wm(level, max, result);
-}
-
-static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- struct hsw_pipe_wm_parameters *params)
-{
- uint32_t pri_val, cur_val, spr_val;
- /* WM0 latency values stored in 0.1us units */
- uint16_t pri_latency = dev_priv->wm.pri_latency[0];
- uint16_t spr_latency = dev_priv->wm.spr_latency[0];
- uint16_t cur_latency = dev_priv->wm.cur_latency[0];
-
- pri_val = ilk_compute_pri_wm(params, pri_latency, false);
- spr_val = ilk_compute_spr_wm(params, spr_latency);
- cur_val = ilk_compute_cur_wm(params, cur_latency);
-
- WARN(pri_val > 127,
- "Primary WM error, mode not supported for pipe %c\n",
- pipe_name(pipe));
- WARN(spr_val > 127,
- "Sprite WM error, mode not supported for pipe %c\n",
- pipe_name(pipe));
- WARN(cur_val > 63,
- "Cursor WM error, mode not supported for pipe %c\n",
- pipe_name(pipe));
-
- return (pri_val << WM0_PIPE_PLANE_SHIFT) |
- (spr_val << WM0_PIPE_SPRITE_SHIFT) |
- cur_val;
-}
-
static uint32_t
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
@@ -2554,19 +2526,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
wm[3] *= 2;
}
-static void intel_print_wm_latency(struct drm_device *dev,
- const char *name,
- const uint16_t wm[5])
+static int ilk_wm_max_level(const struct drm_device *dev)
{
- int level, max_level;
-
/* how many WM levels are we expecting */
if (IS_HASWELL(dev))
- max_level = 4;
+ return 4;
else if (INTEL_INFO(dev)->gen >= 6)
- max_level = 3;
+ return 3;
else
- max_level = 2;
+ return 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+ const char *name,
+ const uint16_t wm[5])
+{
+ int level, max_level = ilk_wm_max_level(dev);
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
@@ -2606,218 +2581,321 @@ static void intel_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
-static void hsw_compute_wm_parameters(struct drm_device *dev,
- struct hsw_pipe_wm_parameters *params,
- struct hsw_wm_maximums *lp_max_1_2,
- struct hsw_wm_maximums *lp_max_5_6)
+static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
+ struct hsw_pipe_wm_parameters *p,
+ struct intel_wm_config *config)
{
- struct drm_crtc *crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- enum pipe pipe;
- struct intel_wm_config config = {};
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct hsw_pipe_wm_parameters *p;
-
- pipe = intel_crtc->pipe;
- p = &params[pipe];
-
- p->active = intel_crtc_active(crtc);
- if (!p->active)
- continue;
-
- config.num_pipes_active++;
+ p->active = intel_crtc_active(crtc);
+ if (p->active) {
p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
- p->pri.horiz_pixels =
- intel_crtc->config.requested_mode.hdisplay;
+ p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
p->cur.horiz_pixels = 64;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
p->cur.enabled = true;
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ config->num_pipes_active += intel_crtc_active(crtc);
+
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct hsw_pipe_wm_parameters *p;
- pipe = intel_plane->pipe;
- p = &params[pipe];
+ if (intel_plane->pipe == pipe)
+ p->spr = intel_plane->wm;
- p->spr = intel_plane->wm;
-
- config.sprites_enabled |= p->spr.enabled;
- config.sprites_scaled |= p->spr.scaled;
+ config->sprites_enabled |= intel_plane->wm.enabled;
+ config->sprites_scaled |= intel_plane->wm.scaled;
}
+}
+
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
+ const struct hsw_pipe_wm_parameters *params,
+ struct intel_pipe_wm *pipe_wm)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int level, max_level = ilk_wm_max_level(dev);
+ /* LP0 watermark maximums depend on this pipe alone */
+ struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = params->spr.enabled,
+ .sprites_scaled = params->spr.scaled,
+ };
+ struct hsw_wm_maximums max;
- ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
- /* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
- ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
- else
- *lp_max_5_6 = *lp_max_1_2;
+ for (level = 0; level <= max_level; level++)
+ ilk_compute_wm_level(dev_priv, level, params,
+ &pipe_wm->wm[level]);
+
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+ /* At least LP0 must be valid */
+ return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
}
-static void hsw_compute_wm_results(struct drm_device *dev,
- struct hsw_pipe_wm_parameters *params,
- struct hsw_wm_maximums *lp_maximums,
- struct hsw_wm_values *results)
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+ int level,
+ struct intel_wm_level *ret_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct intel_wm_level lp_results[4] = {};
- enum pipe pipe;
- int level, max_level, wm_lp;
+ const struct intel_crtc *intel_crtc;
- for (level = 1; level <= 4; level++)
- if (!hsw_compute_lp_wm(dev_priv, level,
- lp_maximums, params,
- &lp_results[level - 1]))
- break;
- max_level = level - 1;
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ const struct intel_wm_level *wm =
+ &intel_crtc->wm.active.wm[level];
+
+ if (!wm->enable)
+ return;
+
+ ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+ ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+ ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+ ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+ }
+
+ ret_wm->enable = true;
+}
- memset(results, 0, sizeof(*results));
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+ const struct hsw_wm_maximums *max,
+ struct intel_pipe_wm *merged)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+
+ merged->fbc_wm_enabled = true;
- /* The spec says it is preferred to disable FBC WMs instead of disabling
- * a WM level. */
- results->enable_fbc_wm = true;
+ /* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
- if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
- results->enable_fbc_wm = false;
- lp_results[level - 1].fbc_val = 0;
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ ilk_merge_wm_level(dev, level, wm);
+
+ if (!ilk_validate_wm_level(level, max, wm))
+ break;
+
+ /*
+ * The spec says it is preferred to disable
+ * FBC WMs instead of disabling a WM level.
+ */
+ if (wm->fbc_val > max->fbc) {
+ merged->fbc_wm_enabled = false;
+ wm->fbc_val = 0;
}
}
+}
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+ /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+static void hsw_compute_wm_results(struct drm_device *dev,
+ const struct intel_pipe_wm *merged,
+ enum intel_ddb_partitioning partitioning,
+ struct hsw_wm_values *results)
+{
+ struct intel_crtc *intel_crtc;
+ int level, wm_lp;
+
+ results->enable_fbc_wm = merged->fbc_wm_enabled;
+ results->partitioning = partitioning;
+
+ /* LP1+ register values */
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
const struct intel_wm_level *r;
- level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
- if (level > max_level)
+ level = ilk_wm_lp_to_level(wm_lp, merged);
+
+ r = &merged->wm[level];
+ if (!r->enable)
break;
- r = &lp_results[level - 1];
- results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
- r->fbc_val,
- r->pri_val,
- r->cur_val);
+ results->wm_lp[wm_lp - 1] = WM3_LP_EN |
+ ((level * 2) << WM1_LP_LATENCY_SHIFT) |
+ (r->pri_val << WM1_LP_SR_SHIFT) |
+ r->cur_val;
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ results->wm_lp[wm_lp - 1] |=
+ r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+ else
+ results->wm_lp[wm_lp - 1] |=
+ r->fbc_val << WM1_LP_FBC_SHIFT;
+
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
}
- for_each_pipe(pipe)
- results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
- &params[pipe]);
+ /* LP0 register values */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ enum pipe pipe = intel_crtc->pipe;
+ const struct intel_wm_level *r =
+ &intel_crtc->wm.active.wm[0];
- for_each_pipe(pipe) {
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+ if (WARN_ON(!r->enable))
+ continue;
+
+ results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+ results->wm_pipe[pipe] =
+ (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+ (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+ r->cur_val;
}
}
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
- struct hsw_wm_values *r2)
+static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
{
- int i, val_r1 = 0, val_r2 = 0;
+ int level, max_level = ilk_wm_max_level(dev);
+ int level1 = 0, level2 = 0;
- for (i = 0; i < 3; i++) {
- if (r1->wm_lp[i] & WM3_LP_EN)
- val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
- if (r2->wm_lp[i] & WM3_LP_EN)
- val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+ for (level = 1; level <= max_level; level++) {
+ if (r1->wm[level].enable)
+ level1 = level;
+ if (r2->wm[level].enable)
+ level2 = level;
}
- if (val_r1 == val_r2) {
- if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+ if (level1 == level2) {
+ if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
return r2;
else
return r1;
- } else if (val_r1 > val_r2) {
+ } else if (level1 > level2) {
return r1;
} else {
return r2;
}
}
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+ const struct hsw_wm_values *old,
+ const struct hsw_wm_values *new)
+{
+ unsigned int dirty = 0;
+ enum pipe pipe;
+ int wm_lp;
+
+ for_each_pipe(pipe) {
+ if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+ dirty |= WM_DIRTY_LINETIME(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+ dirty |= WM_DIRTY_PIPE(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+ }
+
+ if (old->enable_fbc_wm != new->enable_fbc_wm) {
+ dirty |= WM_DIRTY_FBC;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->partitioning != new->partitioning) {
+ dirty |= WM_DIRTY_DDB;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ /* LP1+ watermarks already deemed dirty, no need to continue */
+ if (dirty & WM_DIRTY_LP_ALL)
+ return dirty;
+
+ /* Find the lowest numbered LP1+ watermark in need of an update... */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+ old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+ break;
+ }
+
+ /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+ for (; wm_lp <= 3; wm_lp++)
+ dirty |= WM_DIRTY_LP(wm_lp);
+
+ return dirty;
+}
+
/*
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
- struct hsw_wm_values *results,
- enum intel_ddb_partitioning partitioning)
+ struct hsw_wm_values *results)
{
- struct hsw_wm_values previous;
+ struct hsw_wm_values *previous = &dev_priv->wm.hw;
+ unsigned int dirty;
uint32_t val;
- enum intel_ddb_partitioning prev_partitioning;
- bool prev_enable_fbc_wm;
-
- previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
- previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
- previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
- previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
- previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
- previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
- previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
- previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
- previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
- previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
- prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
- prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-
- if (memcmp(results->wm_pipe, previous.wm_pipe,
- sizeof(results->wm_pipe)) == 0 &&
- memcmp(results->wm_lp, previous.wm_lp,
- sizeof(results->wm_lp)) == 0 &&
- memcmp(results->wm_lp_spr, previous.wm_lp_spr,
- sizeof(results->wm_lp_spr)) == 0 &&
- memcmp(results->wm_linetime, previous.wm_linetime,
- sizeof(results->wm_linetime)) == 0 &&
- partitioning == prev_partitioning &&
- results->enable_fbc_wm == prev_enable_fbc_wm)
+
+ dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+ if (!dirty)
return;
- if (previous.wm_lp[2] != 0)
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, 0);
- if (previous.wm_lp[1] != 0)
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, 0);
- if (previous.wm_lp[0] != 0)
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, 0);
- if (previous.wm_pipe[0] != results->wm_pipe[0])
+ if (dirty & WM_DIRTY_PIPE(PIPE_A))
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
- if (previous.wm_pipe[1] != results->wm_pipe[1])
+ if (dirty & WM_DIRTY_PIPE(PIPE_B))
I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
- if (previous.wm_pipe[2] != results->wm_pipe[2])
+ if (dirty & WM_DIRTY_PIPE(PIPE_C))
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
- if (previous.wm_linetime[0] != results->wm_linetime[0])
+ if (dirty & WM_DIRTY_LINETIME(PIPE_A))
I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
- if (previous.wm_linetime[1] != results->wm_linetime[1])
+ if (dirty & WM_DIRTY_LINETIME(PIPE_B))
I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
- if (previous.wm_linetime[2] != results->wm_linetime[2])
+ if (dirty & WM_DIRTY_LINETIME(PIPE_C))
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
- if (prev_partitioning != partitioning) {
+ if (dirty & WM_DIRTY_DDB) {
val = I915_READ(WM_MISC);
- if (partitioning == INTEL_DDB_PART_1_2)
+ if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
I915_WRITE(WM_MISC, val);
}
- if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+ if (dirty & WM_DIRTY_FBC) {
val = I915_READ(DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2904,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(DISP_ARB_CTL, val);
}
- if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
- if (results->wm_lp[0] != 0)
+ if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
- if (results->wm_lp[1] != 0)
+ if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
- if (results->wm_lp[2] != 0)
+ if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+ dev_priv->wm.hw = *results;
}
-static void haswell_update_wm(struct drm_device *dev)
+static void haswell_update_wm(struct drm_crtc *crtc)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
- struct hsw_pipe_wm_parameters params[3];
- struct hsw_wm_values results_1_2, results_5_6, *best_results;
+ struct hsw_wm_maximums max;
+ struct hsw_pipe_wm_parameters params = {};
+ struct hsw_wm_values results = {};
enum intel_ddb_partitioning partitioning;
+ struct intel_pipe_wm pipe_wm = {};
+ struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ struct intel_wm_config config = {};
- hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
+ hsw_compute_wm_parameters(crtc, &params, &config);
+
+ intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+
+ if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+ return;
- hsw_compute_wm_results(dev, params,
- &lp_max_1_2, &results_1_2);
- if (lp_max_1_2.pri != lp_max_5_6.pri) {
- hsw_compute_wm_results(dev, params,
- &lp_max_5_6, &results_5_6);
- best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+ intel_crtc->wm.active = pipe_wm;
+
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev, &max, &lp_wm_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (INTEL_INFO(dev)->gen >= 7 &&
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev, &max, &lp_wm_5_6);
+
+ best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
- best_results = &results_1_2;
+ best_lp_wm = &lp_wm_1_2;
}
- partitioning = (best_results == &results_1_2) ?
+ partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- hsw_write_wm_values(dev_priv, best_results, partitioning);
+ hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+ hsw_write_wm_values(dev_priv, &results);
}
static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2977,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
- haswell_update_wm(plane->dev);
+ haswell_update_wm(crtc);
}
static bool
@@ -2898,7 +2996,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
return false;
}
- clock = crtc->mode.clock;
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
/* Use the small buffer method to calculate the sprite watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3031,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
if (!clock) {
*sprite_wm = 0;
return false;
@@ -3044,6 +3142,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
I915_WRITE(WM3S_LP_IVB, sprite_wm);
}
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_wm *active = &intel_crtc->wm.active;
+ enum pipe pipe = intel_crtc->pipe;
+ static const unsigned int wm0_pipe_reg[] = {
+ [PIPE_A] = WM0_PIPEA_ILK,
+ [PIPE_B] = WM0_PIPEB_ILK,
+ [PIPE_C] = WM0_PIPEC_IVB,
+ };
+
+ hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+ if (intel_crtc_active(crtc)) {
+ u32 tmp = hw->wm_pipe[pipe];
+
+ /*
+ * For active pipes LP0 watermark is marked as
+ * enabled, and LP1+ watermaks as disabled since
+ * we can't really reverse compute them in case
+ * multiple pipes are active.
+ */
+ active->wm[0].enable = true;
+ active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+ active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+ active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+ active->linetime = hw->wm_linetime[pipe];
+ } else {
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /*
+ * For inactive pipes, all watermark levels
+ * should be marked as enabled but zeroed,
+ * which is what we'd compute them to.
+ */
+ for (level = 0; level <= max_level; level++)
+ active->wm[level].enable = true;
+ }
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ ilk_pipe_wm_get_hw_state(crtc);
+
+ hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+ hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+ hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+ hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+
+ hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+ hw->enable_fbc_wm =
+ !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3076,12 +3242,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_device *dev)
+void intel_update_watermarks(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(dev);
+ dev_priv->display.update_wm(crtc);
}
void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3453,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
return limits;
}
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+ int new_power;
+
+ new_power = dev_priv->rps.power;
+ switch (dev_priv->rps.power) {
+ case LOW_POWER:
+ if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+ new_power = LOW_POWER;
+ else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val == dev_priv->rps.min_delay)
+ new_power = LOW_POWER;
+ if (val == dev_priv->rps.max_delay)
+ new_power = HIGH_POWER;
+ if (new_power == dev_priv->rps.power)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ I915_WRITE(GEN6_RP_UP_EI, 12500);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+
+ /* Downclock if less than 85% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ I915_WRITE(GEN6_RP_UP_EI, 10250);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+
+ /* Downclock if less than 75% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ I915_WRITE(GEN6_RP_UP_EI, 8000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+
+ /* Downclock if less than 60% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+ }
+
+ dev_priv->rps.power = new_power;
+ dev_priv->rps.last_adj = 0;
+}
+
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3557,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val == dev_priv->rps.cur_delay)
return;
+ gen6_set_rps_thresholds(dev_priv, val);
+
if (IS_HASWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
@@ -3320,6 +3580,32 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->info->is_valleyview)
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ else
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ dev_priv->rps.last_adj = 0;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->info->is_valleyview)
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ else
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ dev_priv->rps.last_adj = 0;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
/*
* Wait until the previous freq change has completed,
* or the timeout elapsed, and then update our notion
@@ -3415,6 +3701,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
}
}
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+ if (IS_GEN6(dev))
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+ if (IS_HASWELL(dev))
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
int intel_enable_rc6(const struct drm_device *dev)
{
/* No RC6 before Ironlake */
@@ -3429,18 +3729,13 @@ int intel_enable_rc6(const struct drm_device *dev)
if (INTEL_INFO(dev)->gen == 5)
return 0;
- if (IS_HASWELL(dev)) {
- DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+ if (IS_HASWELL(dev))
return INTEL_RC6_ENABLE;
- }
/* snb/ivb have more than one rc6 state. */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ if (INTEL_INFO(dev)->gen == 6)
return INTEL_RC6_ENABLE;
- }
- DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
@@ -3467,6 +3762,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
+static void gen8_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ uint32_t rc6_mask = 0, rp_state_cap;
+ int unused;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* 1c & 1d: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_ring(ring, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /* 3: Enable RC6 */
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
+
+ /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
+ I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
+
+ /* Docs recommend 900MHz, and 300 MHz respectively */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+ I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 5: Enable RPS */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* 6: Ring frequency + overclocking (our driver does this later */
+
+ gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
+
+ gen6_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3501,7 +3868,10 @@ static void gen6_enable_rps(struct drm_device *dev)
/* In units of 50MHz */
dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
- dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+ dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
dev_priv->rps.cur_delay = 0;
/* disable the counters and set deterministic thresholds */
@@ -3518,7 +3888,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -3539,48 +3909,16 @@ static void gen6_enable_rps(struct drm_device *dev)
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ intel_print_rc6_info(dev, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
- if (IS_HASWELL(dev)) {
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(10));
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(12));
- } else {
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
- }
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_delay << 24 |
- dev_priv->rps.min_delay << 16);
-
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
+ /* Power down if completely idle for over 50ms */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
if (!ret) {
@@ -3596,7 +3934,8 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
- gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
gen6_enable_rps_interrupts(dev);
@@ -3624,23 +3963,28 @@ void gen6_update_ring_freq(struct drm_device *dev)
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
int scaling_factor = 180;
+ struct cpufreq_policy *policy;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_ia_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
max_ia_freq = tsc_khz;
+ }
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
- min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
- /* convert DDR frequency from units of 133.3MHz to bandwidth */
- min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+ min_ring_freq = I915_READ(DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ min_ring_freq = mult_frac(min_ring_freq, 8, 3);
/*
* For each potential GPU frequency, load a ring frequency we'd like
@@ -3652,8 +3996,11 @@ void gen6_update_ring_freq(struct drm_device *dev)
int diff = dev_priv->rps.max_delay - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_HASWELL(dev)) {
- ring_freq = (gpu_freq * 5 + 3) / 4;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(dev)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
} else {
@@ -3709,24 +4056,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
-static void vlv_rps_timer_work(struct work_struct *work)
-{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps.vlv_work.work);
-
- /*
- * Timer fired, we must be idle. Drop to min voltage state.
- * Note: we use RPe here since it should match the
- * Vmin we were shooting for. That should give us better
- * perf when we come back out of RC6 than if we used the
- * min freq available.
- */
- mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4102,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 gtfifodbg, val;
+ u32 gtfifodbg, val, rc6_mode = 0;
int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
@@ -3812,9 +4142,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
/* allows RC6 residency counter to work */
- I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
- I915_WRITE(GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE);
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+ intel_print_rc6_info(dev, rc6_mode);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
switch ((val >> 6) & 3) {
@@ -3985,6 +4322,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+ intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4942,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
} else if (INTEL_INFO(dev)->gen >= 6) {
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
cancel_work_sync(&dev_priv->rps.work);
- if (IS_VALLEYVIEW(dev))
- cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
+ dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
@@ -4625,10 +4963,14 @@ static void intel_gen6_powersave_work(struct work_struct *work)
if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
+ } else if (IS_BROADWELL(dev)) {
+ gen8_enable_rps(dev);
+ gen6_update_ring_freq(dev);
} else {
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
}
+ dev_priv->rps.enabled = true;
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -4672,7 +5014,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
+ intel_flush_primary_plane(dev_priv, pipe);
}
}
@@ -4932,6 +5274,50 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void gen8_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe i;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* FIXME(BDW): Check all the w/a, some might only apply to
+ * pre-production hw. */
+
+ WARN(!i915_preliminary_hw_support,
+ "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
+
+ I915_WRITE(_3D_CHICKEN3,
+ _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+
+ I915_WRITE(COMMON_SLICE_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
+
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
+
+ /* WaSwitchSolVfFArbitrationPriority */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+ /* WaPsrDPAMaskVBlankInSRD */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+
+ /* WaPsrDPRSUnmaskVBlankInSRD */
+ for_each_pipe(i) {
+ I915_WRITE(CHICKEN_PIPESL_1(i),
+ I915_READ(CHICKEN_PIPESL_1(i) |
+ DPRS_MASK_VBLANK_SRD));
+ }
+}
+
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5255,6 +5641,25 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
+static bool is_always_on_power_domain(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ unsigned long always_on_domains;
+
+ BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+ if (IS_BROADWELL(dev)) {
+ always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
+ } else if (IS_HASWELL(dev)) {
+ always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+ } else {
+ WARN_ON(1);
+ return true;
+ }
+
+ return BIT(domain) & always_on_domains;
+}
+
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -5268,23 +5673,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
if (!HAS_POWER_WELL(dev))
return true;
- switch (domain) {
- case POWER_DOMAIN_PIPE_A:
- case POWER_DOMAIN_TRANSCODER_EDP:
+ if (is_always_on_power_domain(dev, domain))
return true;
- case POWER_DOMAIN_PIPE_B:
- case POWER_DOMAIN_PIPE_C:
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- case POWER_DOMAIN_TRANSCODER_A:
- case POWER_DOMAIN_TRANSCODER_B:
- case POWER_DOMAIN_TRANSCODER_C:
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
- default:
- BUG();
- }
}
static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5328,83 +5721,136 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for_each_pipe(p)
if (p != PIPE_A)
- dev->last_vblank[p] = 0;
+ dev->vblank[p].last = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
-static struct i915_power_well *hsw_pwr;
+static void __intel_power_well_get(struct drm_device *dev,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ __intel_set_power_well(dev, true);
+}
+
+static void __intel_power_well_put(struct drm_device *dev,
+ struct i915_power_well *power_well)
+{
+ WARN_ON(!power_well->count);
+ if (!--power_well->count && i915_disable_power_well)
+ __intel_set_power_well(dev, false);
+}
+
+void intel_display_power_get(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+
+ if (!HAS_POWER_WELL(dev))
+ return;
+
+ if (is_always_on_power_domain(dev, domain))
+ return;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_power_well_get(dev, &power_domains->power_wells[0]);
+ mutex_unlock(&power_domains->lock);
+}
+
+void intel_display_power_put(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+
+ if (!HAS_POWER_WELL(dev))
+ return;
+
+ if (is_always_on_power_domain(dev, domain))
+ return;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_power_well_put(dev, &power_domains->power_wells[0]);
+ mutex_unlock(&power_domains->lock);
+}
+
+static struct i915_power_domains *hsw_pwr;
/* Display audio driver power well request */
void i915_request_power_well(void)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!hsw_pwr))
return;
- spin_lock_irq(&hsw_pwr->lock);
- if (!hsw_pwr->count++ &&
- !hsw_pwr->i915_request)
- __intel_set_power_well(hsw_pwr->device, true);
- spin_unlock_irq(&hsw_pwr->lock);
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ mutex_lock(&hsw_pwr->lock);
+ __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
+ mutex_unlock(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
void i915_release_power_well(void)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!hsw_pwr))
return;
- spin_lock_irq(&hsw_pwr->lock);
- WARN_ON(!hsw_pwr->count);
- if (!--hsw_pwr->count &&
- !hsw_pwr->i915_request)
- __intel_set_power_well(hsw_pwr->device, false);
- spin_unlock_irq(&hsw_pwr->lock);
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ mutex_lock(&hsw_pwr->lock);
+ __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
+ mutex_unlock(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
-int i915_init_power_well(struct drm_device *dev)
+int intel_power_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
- hsw_pwr = &dev_priv->power_well;
+ mutex_init(&power_domains->lock);
+ hsw_pwr = power_domains;
- hsw_pwr->device = dev;
- spin_lock_init(&hsw_pwr->lock);
- hsw_pwr->count = 0;
+ power_well = &power_domains->power_wells[0];
+ power_well->count = 0;
return 0;
}
-void i915_remove_power_well(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_device *dev)
{
hsw_pwr = NULL;
}
-void intel_set_power_well(struct drm_device *dev, bool enable)
+static void intel_power_domains_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_power_well *power_well = &dev_priv->power_well;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
if (!HAS_POWER_WELL(dev))
return;
- if (!i915_disable_power_well && !enable)
- return;
+ mutex_lock(&power_domains->lock);
- spin_lock_irq(&power_well->lock);
- power_well->i915_request = enable;
+ power_well = &power_domains->power_wells[0];
+ __intel_set_power_well(dev, power_well->count > 0);
- /* only reject "disable" power well request */
- if (power_well->count && !enable) {
- spin_unlock_irq(&power_well->lock);
- return;
- }
-
- __intel_set_power_well(dev, enable);
- spin_unlock_irq(&power_well->lock);
+ mutex_unlock(&power_domains->lock);
}
/*
@@ -5413,7 +5859,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
-void intel_init_power_well(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5421,7 +5867,8 @@ void intel_init_power_well(struct drm_device *dev)
return;
/* For now, we need the power well to be always enabled. */
- intel_set_power_well(dev, true);
+ intel_display_set_init_power(dev, true);
+ intel_power_domains_resume(dev);
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
@@ -5525,6 +5972,8 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ } else if (INTEL_INFO(dev)->gen == 8) {
+ dev_priv->display.init_clock_gating = gen8_init_clock_gating;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
@@ -5686,7 +6135,4 @@ void intel_pm_init(struct drm_device *dev)
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
-
- INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
}
-
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee1026fc..b620337e6d6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
+void __intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ ring->tail &= ring->size - 1;
+ if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+ return;
+ ring->write_tail(ring, ring->tail);
+}
+
static int
gen2_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -350,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
+static int
+gen8_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
+ int ret;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+
+}
+
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -385,8 +436,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
int ret = 0;
u32 head;
- if (HAS_FORCE_WAKE(dev))
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
@@ -459,8 +509,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- if (HAS_FORCE_WAKE(dev))
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
return ret;
}
@@ -559,8 +608,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (HAS_L3_GPU_CACHE(dev))
- I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ if (HAS_L3_DPF(dev))
+ I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
return ret;
}
@@ -593,7 +642,7 @@ update_mboxes(struct intel_ring_buffer *ring,
#define MBOX_UPDATE_DWORDS 4
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_NOOP);
}
@@ -629,9 +678,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
}
@@ -723,7 +772,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +791,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
}
@@ -963,9 +1012,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
}
@@ -987,10 +1036,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ if (HAS_L3_DPF(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+ GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1058,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
- I915_WRITE_IMR(ring,
- ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ if (HAS_L3_DPF(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1059,6 +1107,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
+static bool
+gen8_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && ring->id == RCS) {
+ I915_WRITE_IMR(ring,
+ ~(ring->irq_enable_mask |
+ GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+ } else {
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ }
+ POSTING_READ(RING_IMR(ring->mmio_base));
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+gen8_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && ring->id == RCS) {
+ I915_WRITE_IMR(ring,
+ ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ } else {
+ I915_WRITE_IMR(ring, ~0);
+ }
+ POSTING_READ(RING_IMR(ring->mmio_base));
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
@@ -1317,7 +1411,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_ring_idle(ring);
- if (ret)
+ if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -1328,6 +1422,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
if (ring->cleanup)
ring->cleanup(ring);
@@ -1414,6 +1510,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
if (ret != -ENOSPC)
return ret;
+ /* force the tail write in case we have been skipping them */
+ __intel_ring_advance(ring);
+
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1574,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_request) {
+ if (ring->outstanding_lazy_seqno) {
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
@@ -1495,10 +1594,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
- if (ring->outstanding_lazy_request)
+ if (ring->outstanding_lazy_seqno)
return 0;
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+ if (ring->preallocated_lazy_request == NULL) {
+ struct drm_i915_gem_request *request;
+
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ring->preallocated_lazy_request = request;
+ }
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1654,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- BUG_ON(ring->outstanding_lazy_request);
+ BUG_ON(ring->outstanding_lazy_seqno);
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1667,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
ring->hangcheck.seqno = seqno;
}
-void intel_ring_advance(struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
- ring->tail &= ring->size - 1;
- if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
- return;
- ring->write_tail(ring, ring->tail);
-}
-
-
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -1613,6 +1711,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ cmd += 1;
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -1624,9 +1724,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
+ } else {
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ }
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
+ !(flags & I915_DISPATCH_SECURE);
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ /* FIXME(BDW): Address space and security selectors. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_ring_emit(ring, offset);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
+
return 0;
}
@@ -1686,6 +1815,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ cmd += 1;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -1697,8 +1828,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
+ } else {
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ }
intel_ring_advance(ring);
if (IS_GEN7(dev) && flush)
@@ -1721,8 +1857,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->flush = gen8_render_ring_flush;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ } else {
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ }
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
@@ -1764,6 +1906,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->write_tail = ring_write_tail;
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (IS_GEN8(dev))
+ ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
@@ -1877,7 +2021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->id = VCS;
ring->write_tail = ring_write_tail;
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (INTEL_INFO(dev)->gen >= 6) {
ring->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
@@ -1886,10 +2030,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer =
+ gen8_ring_dispatch_execbuffer;
+ } else {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer =
+ gen6_ring_dispatch_execbuffer;
+ }
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -1935,10 +2089,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ } else {
+ ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ }
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -1967,10 +2129,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- ring->irq_get = hsw_vebox_get_irq;
- ring->irq_put = hsw_vebox_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ } else {
+ ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ ring->irq_get = hsw_vebox_get_irq;
+ ring->irq_put = hsw_vebox_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ }
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974d5..71a73f4fe25 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
enum intel_ring_hangcheck_action {
+ HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_KICK,
@@ -140,7 +141,8 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
- u32 outstanding_lazy_request;
+ struct drm_i915_gem_request *preallocated_lazy_request;
+ u32 outstanding_lazy_seqno;
bool gpu_caches_dirty;
bool fbc_dirty;
@@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-void intel_ring_advance(struct intel_ring_buffer *ring);
+static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ ring->tail &= ring->size - 1;
+}
+void __intel_ring_advance(struct intel_ring_buffer *ring);
+
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
- BUG_ON(ring->outstanding_lazy_request == 0);
- return ring->outstanding_lazy_request;
+ BUG_ON(ring->outstanding_lazy_seqno == 0);
+ return ring->outstanding_lazy_seqno;
}
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd5b76..a583e8f718a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
while ((status == SDVO_CMD_STATUS_PENDING ||
- status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
if (retry < 10)
msleep(15);
else
@@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
{
- unsigned dotclock = pipe_config->adjusted_mode.clock;
+ unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
/* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
*/
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
- adjusted_mode->clock *= pipe_config->pixel_multiplier;
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
!intel_sdvo_set_tv_format(intel_sdvo))
return;
- /* We have tried to get input timing in mode_fixup, and filled into
- * adjusted_mode.
- */
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- input_dtd.part1.clock /= crtc->config.pixel_multiplier;
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
+ int dotclock;
u32 flags = 0, sdvox;
u8 val;
bool ret;
@@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
+ dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
&val, 1)) {
@@ -1770,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct edid *edid;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
/* set the bus switch and get the modes */
edid = intel_sdvo_get_edid(connector);
@@ -1865,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
uint32_t reply = 0, format_map = 0;
int i;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
/* Read the list of supported input resolutions for the selected TV
* format.
*/
@@ -1899,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
break;
}
}
-
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_connector->tv_format);
intel_sdvo_destroy_enhance_property(connector);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
@@ -2394,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2442,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2467,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
+ drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2479,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2510,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2534,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
+ drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2605,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
- if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+ drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
+ }
}
}
@@ -2876,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
- intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5ea54..9944d8135e8 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -25,7 +25,10 @@
#include "i915_drv.h"
#include "intel_drv.h"
-/* IOSF sideband */
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
@@ -101,19 +104,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
return val;
}
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
- DPIO_OPCODE_REG_READ, reg, &val);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+static u32 vlv_get_phy_port(enum pipe pipe)
+{
+ u32 port = IOSF_PORT_DPIO;
+
+ WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
+
+ return port;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+ DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
DPIO_OPCODE_REG_WRITE, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b3900..b9fabf826f7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,14 +260,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
@@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dev_priv->sprite_scaling_enabled |= 1 << pipe;
if (!scaling_was_enabled) {
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -306,7 +306,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
@@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
}
static void
@@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
}
static int
@@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
- if (!intel_crtc->primary_disabled)
+ if (intel_crtc->primary_enabled)
return;
- intel_crtc->primary_disabled = false;
- intel_update_fbc(dev);
+ intel_crtc->primary_enabled = true;
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ if (intel_crtc->config.ips_enabled) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ hsw_enable_ips(intel_crtc);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
static void
@@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
- if (intel_crtc->primary_disabled)
+ if (!intel_crtc->primary_enabled)
return;
- I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+ intel_crtc->primary_enabled = false;
- intel_crtc->primary_disabled = true;
- intel_update_fbc(dev);
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->fbc.plane == intel_crtc->plane)
+ intel_disable_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_disable_ips(intel_crtc);
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
static int
@@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj, *old_obj;
- int pipe = intel_plane->pipe;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int ret = 0;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int ret;
bool disable_primary = false;
bool visible;
int hscale, vscale;
@@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
- .x2 = crtc->mode.hdisplay,
- .y2 = crtc->mode.vdisplay,
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ const struct {
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ } orig = {
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
};
-
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- old_obj = intel_plane->obj;
-
- intel_plane->crtc_x = crtc_x;
- intel_plane->crtc_y = crtc_y;
- intel_plane->crtc_w = crtc_w;
- intel_plane->crtc_h = crtc_h;
- intel_plane->src_x = src_x;
- intel_plane->src_y = src_y;
- intel_plane->src_w = src_w;
- intel_plane->src_h = src_h;
-
- /* Pipe must be running... */
- if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
- DRM_DEBUG_KMS("Pipe disabled\n");
- return -EINVAL;
- }
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* we can disable the primary and save power.
*/
disable_primary = drm_rect_equals(&dst, &clip);
- WARN_ON(disable_primary && !visible);
+ WARN_ON(disable_primary && !visible && intel_crtc->active);
mutex_lock(&dev->struct_mutex);
@@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* the sprite planes only require 128KiB alignment and 32 PTE padding.
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret)
- goto out_unlock;
- intel_plane->obj = obj;
-
- /*
- * Be sure to re-enable the primary before the sprite is no longer
- * covering it fully.
- */
- if (!disable_primary)
- intel_enable_primary(crtc);
+ mutex_unlock(&dev->struct_mutex);
- if (visible)
- intel_plane->update_plane(plane, crtc, fb, obj,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- else
- intel_plane->disable_plane(plane, crtc);
+ if (ret)
+ return ret;
+
+ intel_plane->crtc_x = orig.crtc_x;
+ intel_plane->crtc_y = orig.crtc_y;
+ intel_plane->crtc_w = orig.crtc_w;
+ intel_plane->crtc_h = orig.crtc_h;
+ intel_plane->src_x = orig.src_x;
+ intel_plane->src_y = orig.src_y;
+ intel_plane->src_w = orig.src_w;
+ intel_plane->src_h = orig.src_h;
+ intel_plane->obj = obj;
- if (disable_primary)
- intel_disable_primary(crtc);
+ if (intel_crtc->active) {
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary)
+ intel_enable_primary(crtc);
+
+ if (visible)
+ intel_plane->update_plane(plane, crtc, fb, obj,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ else
+ intel_plane->disable_plane(plane, crtc);
+
+ if (disable_primary)
+ intel_disable_primary(crtc);
+ }
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
@@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
- if (old_obj != obj) {
- mutex_unlock(&dev->struct_mutex);
- intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
- mutex_lock(&dev->struct_mutex);
- }
+ if (old_obj != obj && intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
+ mutex_unlock(&dev->struct_mutex);
}
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
static int
@@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int ret = 0;
+ struct intel_crtc *intel_crtc;
if (!plane->fb)
return 0;
@@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane)
if (WARN_ON(!plane->crtc))
return -EINVAL;
- intel_enable_primary(plane->crtc);
- intel_plane->disable_plane(plane, plane->crtc);
+ intel_crtc = to_intel_crtc(plane->crtc);
- if (!intel_plane->obj)
- goto out;
+ if (intel_crtc->active) {
+ intel_enable_primary(plane->crtc);
+ intel_plane->disable_plane(plane, plane->crtc);
+ }
- intel_wait_for_vblank(dev, intel_plane->pipe);
+ if (intel_plane->obj) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_plane->pipe);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_plane->obj);
- intel_plane->obj = NULL;
- mutex_unlock(&dev->struct_mutex);
-out:
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(intel_plane->obj);
+ mutex_unlock(&dev->struct_mutex);
- return ret;
+ intel_plane->obj = NULL;
+ }
+
+ return 0;
}
static void intel_destroy_plane(struct drm_plane *plane)
@@ -921,7 +955,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out_unlock;
}
@@ -950,7 +984,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out_unlock;
}
@@ -1034,7 +1068,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
- intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+ intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
@@ -1058,6 +1092,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
break;
case 7:
+ case 8:
if (IS_IVYBRIDGE(dev)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84bf6c2..22cf0f4ba24 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
}
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@@ -912,7 +919,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (!tv_mode)
return false;
- pipe_config->adjusted_mode.clock = tv_mode->clock;
+ pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
@@ -1044,7 +1051,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (dev->pci_device < 0x2772)
+ if (dev->pdev->device < 0x2772)
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1101,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1123,7 +1130,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
j = 0;
@@ -1433,7 +1440,6 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1518,7 +1524,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
static int tv_is_present_in_vbt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i, ret;
if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1536,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
/*
* If the device type is not TV, continue.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
- p_child->device_type != DEVICE_TYPE_TV)
+ if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
+ p_child->old.device_type != DEVICE_TYPE_TV)
continue;
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
- if (p_child->addin_offset) {
+ if (p_child->old.addin_offset) {
ret = 1;
break;
}
@@ -1590,12 +1596,12 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+ intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
if (!intel_tv) {
return;
}
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_tv);
return;
@@ -1622,6 +1628,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->get_config = intel_tv_get_config;
intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c36b0..0b02078a0b8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -93,7 +93,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
- if (IS_HASWELL(dev_priv->dev))
+ if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
@@ -112,7 +112,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ if (INTEL_INFO(dev_priv->dev)->gen < 8)
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -204,61 +205,16 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_uncore_early_sanitize(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
-void intel_uncore_init(struct drm_device *dev)
+static void gen6_force_wake_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
- } else if (IS_HASWELL(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
- } else if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* IVB configs may use multi-threaded forcewake */
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+ unsigned long irqflags;
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- } else {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
- }
- } else if (IS_GEN6(dev)) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
- }
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void intel_uncore_forcewake_reset(struct drm_device *dev)
@@ -274,12 +230,49 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
}
}
+void intel_uncore_early_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+ if (IS_HASWELL(dev) &&
+ (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+ /* The docs do not explain exactly how the calculation can be
+ * made. It is somewhat guessable, but for now, it's always
+ * 128MB.
+ * NB: We can't write IDICR yet because we do not have gt funcs
+ * set up */
+ dev_priv->ellc_size = 128;
+ DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ }
+
+ intel_uncore_forcewake_reset(dev);
+}
+
void intel_uncore_sanitize(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg_val;
+
intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
+
+ /* Turn off power gate, require especially for the BIOS less system */
+ if (IS_VALLEYVIEW(dev)) {
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
+
+ if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ }
}
/*
@@ -292,6 +285,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
@@ -305,17 +301,22 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
+ if (!dev_priv->uncore.funcs.force_wake_put)
+ return;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ if (--dev_priv->uncore.forcewake_count == 0) {
+ dev_priv->uncore.forcewake_count++;
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->uncore.force_wake_work,
+ 1);
+ }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000 && (reg) != FORCEWAKE)
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -329,8 +330,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
@@ -340,20 +340,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unclaimed write to %x\n", reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
-#define __i915_read(x) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- if (dev_priv->info->gen == 5) \
- ilk_dummy_write(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_READ_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val
+
+#define __gen4_read(x) \
+static u##x \
+gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ REG_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ ilk_dummy_write(dev_priv); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ REG_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv); \
@@ -363,28 +386,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
+ REG_READ_FOOTER; \
+}
+
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen4_read(8)
+__gen4_read(16)
+__gen4_read(32)
+__gen4_read(64)
+
+#undef __gen6_read
+#undef __gen5_read
+#undef __gen4_read
+#undef REG_READ_FOOTER
+#undef REG_READ_HEADER
+
+#define REG_WRITE_HEADER \
+ unsigned long irqflags; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define __gen4_write(x) \
+static void \
+gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ __raw_i915_write##x(dev_priv, reg, val); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
- return val; \
}
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ ilk_dummy_write(dev_priv); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
-#define __i915_write(x) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
- unsigned long irqflags; \
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_HEADER; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ u32 __fifo_ret = 0; \
+ REG_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- if (dev_priv->info->gen == 5) \
- ilk_dummy_write(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
@@ -393,11 +461,185 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr
hsw_unclaimed_reg_check(dev_priv, reg); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
}
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
+
+static const u32 gen8_shadowed_regs[] = {
+ FORCEWAKE_MT,
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+ if (reg == gen8_shadowed_regs[i])
+ return true;
+
+ return false;
+}
+
+#define __gen8_write(x) \
+static void \
+gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
+ REG_WRITE_HEADER; \
+ if (__needs_put) { \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ } \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (__needs_put) { \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ } \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+__gen8_write(8)
+__gen8_write(16)
+__gen8_write(32)
+__gen8_write(64)
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen4_write(8)
+__gen4_write(16)
+__gen4_write(32)
+__gen4_write(64)
+
+#undef __gen8_write
+#undef __hsw_write
+#undef __gen6_write
+#undef __gen5_write
+#undef __gen4_write
+#undef REG_WRITE_HEADER
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
+ gen6_force_wake_work);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+
+ switch (INTEL_INFO(dev)->gen) {
+ default:
+ dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen8_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen8_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ break;
+ case 7:
+ case 6:
+ if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
+ dev_priv->uncore.funcs.mmio_writew = hsw_write16;
+ dev_priv->uncore.funcs.mmio_writel = hsw_write32;
+ dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen6_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen6_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
+ }
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ break;
+ case 5:
+ dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen5_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen5_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen5_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen5_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen5_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen5_read64;
+ break;
+ case 4:
+ case 3:
+ case 2:
+ dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen4_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen4_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen4_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen4_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen4_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen4_read64;
+ break;
+ }
+}
+
+void intel_uncore_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ flush_delayed_work(&dev_priv->uncore.force_wake_work);
+
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_uncore_sanitize(dev);
+}
static const struct register_whitelist {
uint64_t offset;
@@ -445,36 +687,6 @@ int i915_reg_read_ioctl(struct drm_device *dev,
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_I85X(dev))
- return -ENODEV;
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- if (IS_I830(dev) || IS_845G(dev)) {
- I915_WRITE(DEBUG_RESET_I830,
- DEBUG_RESET_DISPLAY |
- DEBUG_RESET_RENDER |
- DEBUG_RESET_FULL);
- POSTING_READ(DEBUG_RESET_I830);
- msleep(1);
-
- I915_WRITE(DEBUG_RESET_I830, 0);
- POSTING_READ(DEBUG_RESET_I830);
- }
-
- msleep(1);
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- return 0;
-}
-
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
@@ -576,7 +788,6 @@ int intel_gpu_reset(struct drm_device *dev)
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
case 4: return i965_do_reset(dev);
- case 2: return i8xx_do_reset(dev);
default: return -ENODEV;
}
}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166dd445..087db33f6cf 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
- dev->counters += 3;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
-
ret = drm_vblank_init(dev, 1);
if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281def0..2b0ceb8dc11 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
/* Disable *all* interrupts */
MGA_WRITE(MGA_IEN, 0);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
}
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b487cdec5ee..3a1c5fbae54 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -5,6 +5,7 @@ config DRM_MGAG200
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2f801..f15ea3c4a90 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_init_object = mgag200_gem_init_object,
.gem_free_object = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae19332e..cf11ee68a6d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
int mgag200_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj);
-int mgag200_gem_init_object(struct drm_gem_object *obj);
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861b10b..b1120cb1db6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-int mgag200_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
- return 0;
-}
-
void mgag200_bo_unref(struct mgag200_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 503a414cbda..ee6ed633b7b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -765,8 +765,6 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
}
mgag200_bo_unreserve(bo);
- DRM_INFO("mga base %llx\n", gpu_addr);
-
mga_set_start_address(crtc, (u32)gpu_addr);
return 0;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a06c19cc56f..f39ab7554fc 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@ config DRM_MSM
config DRM_MSM_FBDEV
bool "Enable legacy fbdev support for MSM modesetting driver"
depends on DRM_MSM
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e17914889e5..e5fa12b0d21 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -21,6 +21,7 @@ msm-y := \
msm_drv.o \
msm_fb.o \
msm_gem.o \
+ msm_gem_prime.o \
msm_gem_submit.o \
msm_gpu.o \
msm_ringbuffer.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 35463864b95..9588098741b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -4,16 +4,16 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -317,6 +317,38 @@ static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
+#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
+#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
+#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
+#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
+#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
+#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
+#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d183516067b..d4afdf65755 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -4,16 +4,16 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -637,11 +637,12 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
-#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
-#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
-static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
+#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
{
- return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+ return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
@@ -745,6 +746,7 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
@@ -767,7 +769,19 @@ static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
}
-#define REG_A3XX_UNKNOWN_20C3 0x000020c3
+#define REG_A3XX_RB_ALPHA_REF 0x000020c3
+#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
+#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
+static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
+}
+#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
+#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
+}
static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
@@ -1002,7 +1016,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
-#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
@@ -1038,7 +1052,8 @@ static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
-#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
@@ -2074,6 +2089,7 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
@@ -2134,6 +2150,12 @@ static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
{
return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
}
+#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
+}
#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
#define A3XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 61979d458ac..33dcc606c7c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -4,16 +4,16 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 94c13f418e7..259ad709b0c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -4,16 +4,16 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6f8396be431..6d4c62bf70d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -4,13 +4,13 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index aefc1b8feae..d1df38bf574 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -4,13 +4,13 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index a225e8170b2..0030a111302 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -4,13 +4,13 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index f5fa4865e05..4e939f82918 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -4,13 +4,13 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index bee36363bcd..dbde4f6339b 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -4,13 +4,13 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
index bbeeebe2db5..9908ffe1c3a 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -4,13 +4,13 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
@@ -42,28 +42,28 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum mpd4_bpc {
+enum mdp4_bpc {
BPC1 = 0,
BPC5 = 1,
BPC6 = 2,
BPC8 = 3,
};
-enum mpd4_bpc_alpha {
+enum mdp4_bpc_alpha {
BPC1A = 0,
BPC4A = 1,
BPC6A = 2,
BPC8A = 3,
};
-enum mpd4_alpha_type {
+enum mdp4_alpha_type {
FG_CONST = 0,
BG_CONST = 1,
FG_PIXEL = 2,
BG_PIXEL = 3,
};
-enum mpd4_pipe {
+enum mdp4_pipe {
VG1 = 0,
VG2 = 1,
RGB1 = 2,
@@ -73,13 +73,13 @@ enum mpd4_pipe {
VG4 = 6,
};
-enum mpd4_mixer {
+enum mdp4_mixer {
MIXER0 = 0,
MIXER1 = 1,
MIXER2 = 2,
};
-enum mpd4_mixer_stage_id {
+enum mdp4_mixer_stage_id {
STAGE_UNUSED = 0,
STAGE_BASE = 1,
STAGE0 = 2,
@@ -194,56 +194,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
}
@@ -254,56 +254,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id va
#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
}
@@ -369,7 +369,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
{
return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
}
@@ -377,7 +377,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
{
return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
}
@@ -472,19 +472,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
}
#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
}
#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
}
@@ -601,9 +601,9 @@ static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) {
static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -617,7 +617,7 @@ static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
@@ -631,7 +631,7 @@ static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
@@ -645,7 +645,7 @@ static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
#define MDP4_PIPE_DST_XY_Y__SHIFT 16
static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
@@ -659,13 +659,13 @@ static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -679,7 +679,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -693,7 +693,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
@@ -707,28 +707,28 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
}
@@ -750,7 +750,7 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
-static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -776,7 +776,7 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
}
-static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
@@ -789,36 +789,36 @@ static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020
#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
-static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
-static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
#define REG_MDP4_LCDC 0x000c0000
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
index de6bea297cd..019d530187f 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -26,6 +26,7 @@ struct mdp4_crtc {
struct drm_crtc base;
char name[8];
struct drm_plane *plane;
+ struct drm_plane *planes[8];
int id;
int ovlp;
enum mdp4_dma dma;
@@ -50,7 +51,11 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct work_struct pageflip_work;
+ struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
/* the fb that we currently hold a scanout ref to: */
struct drm_framebuffer *fb;
@@ -92,7 +97,8 @@ static void update_fb(struct drm_crtc *crtc, bool async,
}
}
-static void complete_flip(struct drm_crtc *crtc, bool canceled)
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -102,11 +108,14 @@ static void complete_flip(struct drm_crtc *crtc, bool canceled)
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp4_crtc->event;
if (event) {
- mdp4_crtc->event = NULL;
- if (canceled)
- event->base.destroy(&event->base);
- else
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ mdp4_crtc->event = NULL;
drm_send_vblank_event(dev, mdp4_crtc->id, event);
+ }
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -115,9 +124,15 @@ static void crtc_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t flush = 0;
+ uint32_t i, flush = 0;
- flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
flush |= ovlp2flush(mdp4_crtc->ovlp);
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -125,17 +140,29 @@ static void crtc_flush(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}
-static void pageflip_worker(struct work_struct *work)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
- container_of(work, struct mdp4_crtc, pageflip_work);
+ container_of(cb, struct mdp4_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct drm_framebuffer *fb = crtc->fb;
- mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
+ if (!fb)
+ return;
+
+ mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
crtc_flush(crtc);
/* enable vblank to complete flip: */
- mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+ request_pending(crtc, PENDING_FLIP);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -205,67 +232,69 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp4_kms *mdp4_kms = get_kms(crtc);
int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0;
-
- /*
- * This probably would also need to be triggered by any attached
- * plane when it changes.. for now since we are only using a single
- * private plane, the configuration is hard-coded:
- */
+ static const enum mdp4_mixer_stage_id stages[] = {
+ STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
+ };
+ /* statically (for now) map planes to mixer stage (z-order): */
+ static const int idxs[] = {
+ [VG1] = 1,
+ [VG2] = 2,
+ [RGB1] = 0,
+ [RGB2] = 0,
+ [RGB3] = 0,
+ [VG3] = 3,
+ [VG4] = 4,
+
+ };
+ bool alpha[4]= { false, false, false, false };
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+ /* TODO single register for all CRTCs, so this won't work properly
+ * when multiple CRTCs are active..
+ */
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ if (idx > 0) {
+ const struct mdp4_format *format =
+ to_mdp4_format(msm_framebuffer_format(plane->fb));
+ alpha[idx-1] = format->alpha_enable;
+ }
+ mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
+ }
+ }
+
+ /* this shouldn't happen.. and seems to cause underflow: */
+ WARN_ON(!mixer_cfg);
+
for (i = 0; i < 4; i++) {
- mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
- mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
- mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
- MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
- MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
- mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
+ uint32_t op;
+
+ if (alpha[i]) {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
+ } else {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
}
- /* TODO single register for all CRTCs, so this won't work properly
- * when multiple CRTCs are active..
- */
- switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
- case VG1:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
- break;
- case VG2:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
- break;
- case RGB1:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
- break;
- case RGB2:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
- break;
- case RGB3:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
- break;
- case VG3:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
- break;
- case VG4:
- mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
- COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
- break;
- default:
- WARN_ON("invalid pipe");
- break;
- }
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}
@@ -377,6 +406,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_object *obj;
+ unsigned long flags;
if (mdp4_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
@@ -385,11 +415,13 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
obj = msm_framebuffer_bo(new_fb, 0);
+ spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
update_fb(crtc, true, new_fb);
- return msm_gem_queue_inactive_work(obj,
- &mdp4_crtc->pageflip_work);
+ return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -498,6 +530,8 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ request_pending(crtc, PENDING_CURSOR);
+
return 0;
fail:
@@ -542,13 +576,21 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
struct drm_crtc *crtc = &mdp4_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
- update_cursor(crtc);
- complete_flip(crtc, false);
mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
- drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
- drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ pending = atomic_xchg(&mdp4_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+ }
+
+ if (pending & PENDING_CURSOR) {
+ update_cursor(crtc);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ }
}
static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
@@ -565,9 +607,10 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
return mdp4_crtc->vblank.irqmask;
}
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
{
- complete_flip(crtc, true);
+ DBG("cancel: %p", file);
+ complete_flip(crtc, file);
}
/* set dma config, ie. the format the encoder wants. */
@@ -622,6 +665,32 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}
+static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
+ struct drm_plane *plane)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
+
+ if (mdp4_crtc->planes[pipe_id] == plane)
+ return;
+
+ mdp4_crtc->planes[pipe_id] = plane;
+ blend_setup(crtc);
+ if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
+ crtc_flush(crtc);
+}
+
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp4_plane_pipe(plane), plane);
+}
+
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp4_plane_pipe(plane), NULL);
+}
+
static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
@@ -644,7 +713,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
- mdp4_crtc->plane->crtc = crtc;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
@@ -668,7 +736,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
"unref cursor", unref_cursor_worker);
- INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
+ INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
index 7b645f2e837..17330b0927b 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -44,6 +44,22 @@ static const struct mdp4_format formats[] = {
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
};
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp4_format *f = &formats[i];
+
+ if (i == max_formats)
+ break;
+
+ pixel_formats[i] = f->base.pixel_format;
+ }
+
+ return i;
+}
+
const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
{
int i;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index bc7fd11ad8b..8972ac35a43 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -135,7 +135,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
unsigned i;
for (i = 0; i < priv->num_crtcs; i++)
- mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
+ mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
}
static void mdp4_destroy(struct msm_kms *kms)
@@ -196,6 +196,23 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
* for more than just RGB1->DMA_E->DTV->HDMI
*/
+ /* construct non-private planes: */
+ plane = mdp4_plane_init(dev, VG1, false);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for VG1\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ plane = mdp4_plane_init(dev, VG2, false);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for VG2\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
/* the CRTCs get constructed with a private plane: */
plane = mdp4_plane_init(dev, RGB1, true);
if (IS_ERR(plane)) {
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
index 1e83554955f..eb015c83408 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -75,8 +75,8 @@ struct mdp4_platform_config {
struct mdp4_format {
struct msm_format base;
- enum mpd4_bpc bpc_r, bpc_g, bpc_b;
- enum mpd4_bpc_alpha bpc_a;
+ enum mdp4_bpc bpc_r, bpc_g, bpc_b;
+ enum mdp4_bpc_alpha bpc_a;
uint8_t unpack[4];
bool alpha_enable, unpack_tight;
uint8_t cpp, unpack_count;
@@ -93,7 +93,7 @@ static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
return msm_readl(mdp4_kms->mmio + reg);
}
-static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
+static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
{
switch (pipe) {
case VG1: return MDP4_OVERLAY_FLUSH_VG1;
@@ -133,6 +133,48 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
}
}
+static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
+ enum mdp4_mixer_stage_id stage)
+{
+ uint32_t mixer_cfg = 0;
+
+ switch (pipe) {
+ case VG1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ break;
+ case VG2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ break;
+ case RGB1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ break;
+ case RGB2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ break;
+ case RGB3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ break;
+ case VG3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ break;
+ case VG4:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ break;
+ default:
+ WARN_ON("invalid pipe");
+ break;
+ }
+
+ return mixer_cfg;
+}
+
int mdp4_disable(struct mdp4_kms *mdp4_kms);
int mdp4_enable(struct mdp4_kms *mdp4_kms);
@@ -146,6 +188,8 @@ void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
+ uint32_t max_formats);
const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
void mdp4_plane_install_properties(struct drm_plane *plane,
@@ -158,14 +202,16 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
-enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
- enum mpd4_pipe pipe_id, bool private_plane);
+ enum mdp4_pipe pipe_id, bool private_plane);
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
index 3468229d58b..0f0af243f6f 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -22,7 +22,7 @@ struct mdp4_plane {
struct drm_plane base;
const char *name;
- enum mpd4_pipe pipe;
+ enum mdp4_pipe pipe;
uint32_t nformats;
uint32_t formats[32];
@@ -61,7 +61,9 @@ static int mdp4_plane_update(struct drm_plane *plane,
static int mdp4_plane_disable(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- DBG("%s: TODO", mdp4_plane->name); // XXX
+ DBG("%s: disable", mdp4_plane->name);
+ if (plane->crtc)
+ mdp4_crtc_detach(plane->crtc, plane);
return 0;
}
@@ -101,7 +103,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
- enum mpd4_pipe pipe = mdp4_plane->pipe;
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
uint32_t iova;
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -129,7 +131,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
- enum mpd4_pipe pipe = mdp4_plane->pipe;
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
const struct mdp4_format *format;
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -141,6 +143,10 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
src_w = src_w >> 16;
src_h = src_h >> 16;
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
if (src_w != crtc_w) {
op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
/* TODO calc phasex_step */
@@ -191,7 +197,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
- plane->crtc = crtc;
+ /* TODO detach from old crtc (if we had more than one) */
+ mdp4_crtc_attach(crtc, plane);
return 0;
}
@@ -202,7 +209,7 @@ static const char *pipe_names[] = {
"VG3", "VG4",
};
-enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
return mdp4_plane->pipe;
@@ -210,9 +217,8 @@ enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
- enum mpd4_pipe pipe_id, bool private_plane)
+ enum mdp4_pipe pipe_id, bool private_plane)
{
- struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane = NULL;
struct mdp4_plane *mdp4_plane;
int ret;
@@ -228,8 +234,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
mdp4_plane->pipe = pipe_id;
mdp4_plane->name = pipe_names[pipe_id];
- drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
- mdp4_plane->formats, mdp4_plane->nformats, private_plane);
+ mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
+ ARRAY_SIZE(mdp4_plane->formats));
+
+ drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats,
+ private_plane);
mdp4_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b3a2f162904..86537692e45 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -187,6 +187,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
init_waitqueue_head(&priv->fence_event);
INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
drm_mode_config_init(dev);
@@ -539,15 +540,36 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret;
}
-/* call under struct_mutex */
+/* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
- if (fence > priv->completed_fence) {
- priv->completed_fence = fence;
- wake_up_all(&priv->fence_event);
+ mutex_lock(&dev->struct_mutex);
+ priv->completed_fence = max(fence, priv->completed_fence);
+
+ while (!list_empty(&priv->fence_cbs)) {
+ struct msm_fence_cb *cb;
+
+ cb = list_first_entry(&priv->fence_cbs,
+ struct msm_fence_cb, work.entry);
+
+ if (cb->fence > priv->completed_fence)
+ break;
+
+ list_del_init(&cb->work.entry);
+ queue_work(priv->wq, &cb->work);
}
+
+ mutex_unlock(&dev->struct_mutex);
+
+ wake_up_all(&priv->fence_event);
+}
+
+void __msm_fence_worker(struct work_struct *work)
+{
+ struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
+ cb->func(cb);
}
/*
@@ -650,13 +672,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
}
static const struct drm_ioctl_desc msm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -680,7 +702,11 @@ static const struct file_operations fops = {
};
static struct drm_driver msm_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .driver_features = DRIVER_HAVE_IRQ |
+ DRIVER_GEM |
+ DRIVER_PRIME |
+ DRIVER_RENDER |
+ DRIVER_MODESET,
.load = msm_load,
.unload = msm_unload,
.open = msm_open,
@@ -698,6 +724,16 @@ static struct drm_driver msm_driver = {
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = msm_gem_prime_pin,
+ .gem_prime_unpin = msm_gem_prime_unpin,
+ .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+ .gem_prime_vmap = msm_gem_prime_vmap,
+ .gem_prime_vunmap = msm_gem_prime_vunmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
.debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index df8f1d084bc..d39f0862b19 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -73,10 +73,16 @@ struct msm_drm_private {
struct workqueue_struct *wq;
+ /* callbacks deferred until bo is inactive: */
+ struct list_head fence_cbs;
+
/* registered IOMMU domains: */
unsigned int num_iommus;
struct iommu_domain *iommus[NUM_DOMAINS];
+ unsigned int num_planes;
+ struct drm_plane *planes[8];
+
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
@@ -94,6 +100,20 @@ struct msm_format {
uint32_t pixel_format;
};
+/* callback from wq once fence has passed: */
+struct msm_fence_cb {
+ struct work_struct work;
+ uint32_t fence;
+ void (*func)(struct msm_fence_cb *cb);
+};
+
+void __msm_fence_worker(struct work_struct *work);
+
+#define INIT_FENCE_CB(_cb, _func) do { \
+ INIT_WORK(&(_cb)->work, __msm_fence_worker); \
+ (_cb)->func = _func; \
+ } while (0)
+
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
@@ -141,17 +161,24 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+struct page **msm_gem_get_pages(struct drm_gem_object *obj);
+void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+void *msm_gem_prime_vmap(struct drm_gem_object *obj);
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size, struct sg_table *sg);
+int msm_gem_prime_pin(struct drm_gem_object *obj);
+void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
- struct work_struct *work);
+int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
+ struct msm_fence_cb *cb);
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
@@ -163,6 +190,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ uint32_t size, struct sg_table *sgt);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2bae46c66a3..e587d251c59 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
#include "msm_drv.h"
#include "msm_gem.h"
@@ -77,6 +78,21 @@ static void put_pages(struct drm_gem_object *obj)
}
}
+struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct page **p;
+ mutex_lock(&dev->struct_mutex);
+ p = get_pages(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return p;
+}
+
+void msm_gem_put_pages(struct drm_gem_object *obj)
+{
+ /* when we start tracking the pin count, then do something here */
+}
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -162,6 +178,11 @@ out:
case 0:
case -ERESTARTSYS:
case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
@@ -293,7 +314,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
+
+ /* this is safe right now because we don't unmap until the
+ * bo is deleted:
+ */
+ if (msm_obj->domain[id].iova) {
+ *iova = msm_obj->domain[id].iova;
+ return 0;
+ }
+
mutex_lock(&obj->dev->struct_mutex);
ret = msm_gem_get_iova_locked(obj, id, iova);
mutex_unlock(&obj->dev->struct_mutex);
@@ -363,8 +394,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
return ret;
}
-int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
- struct work_struct *work)
+/* setup callback for when bo is no longer busy..
+ * TODO probably want to differentiate read vs write..
+ */
+int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
+ struct msm_fence_cb *cb)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -372,12 +406,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
int ret = 0;
mutex_lock(&dev->struct_mutex);
- if (!list_empty(&work->entry)) {
+ if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
} else if (is_active(msm_obj)) {
- list_add_tail(&work->entry, &msm_obj->inactive_work);
+ cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
+ list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
- queue_work(priv->wq, work);
+ queue_work(priv->wq, &cb->work);
}
mutex_unlock(&dev->struct_mutex);
@@ -410,16 +445,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-
- while (!list_empty(&msm_obj->inactive_work)) {
- struct work_struct *work;
-
- work = list_first_entry(&msm_obj->inactive_work,
- struct work_struct, entry);
-
- list_del_init(&work->entry);
- queue_work(priv->wq, work);
- }
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
@@ -510,10 +535,21 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_gem_free_mmap_offset(obj);
- if (msm_obj->vaddr)
- vunmap(msm_obj->vaddr);
+ if (obj->import_attach) {
+ if (msm_obj->vaddr)
+ dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
- put_pages(obj);
+ /* Don't drop the pages for imported dmabuf, as they are not
+ * ours, just free the array we allocated:
+ */
+ if (msm_obj->pages)
+ drm_free_large(msm_obj->pages);
+
+ } else {
+ if (msm_obj->vaddr)
+ vunmap(msm_obj->vaddr);
+ put_pages(obj);
+ }
if (msm_obj->resv == &msm_obj->_resv)
reservation_object_fini(msm_obj->resv);
@@ -549,17 +585,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags)
+static int msm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags,
+ struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- struct drm_gem_object *obj = NULL;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- size = PAGE_ALIGN(size);
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
@@ -569,21 +600,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
default:
dev_err(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
- ret = -EINVAL;
- goto fail;
+ return -EINVAL;
}
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
- if (!msm_obj) {
- ret = -ENOMEM;
- goto fail;
- }
-
- obj = &msm_obj->base;
-
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
+ if (!msm_obj)
+ return -ENOMEM;
msm_obj->flags = flags;
@@ -591,9 +613,69 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
reservation_object_init(msm_obj->resv);
INIT_LIST_HEAD(&msm_obj->submit_entry);
- INIT_LIST_HEAD(&msm_obj->inactive_work);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ *obj = &msm_obj->base;
+
+ return 0;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ size = PAGE_ALIGN(size);
+
+ ret = msm_gem_new_impl(dev, size, flags, &obj);
+ if (ret)
+ goto fail;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+
+ return obj;
+
+fail:
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ uint32_t size, struct sg_table *sgt)
+{
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj;
+ int ret, npages;
+
+ size = PAGE_ALIGN(size);
+
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ if (ret)
+ goto fail;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ npages = size / PAGE_SIZE;
+
+ msm_obj = to_msm_bo(obj);
+ msm_obj->sgt = sgt;
+ msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!msm_obj->pages) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+ if (ret)
+ goto fail;
+
return obj;
fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 0676f32e2c6..f4f23a578d9 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -45,9 +45,6 @@ struct msm_gem_object {
*/
struct list_head submit_entry;
- /* work defered until bo is inactive: */
- struct list_head inactive_work;
-
struct page **pages;
struct sg_table *sgt;
void *vaddr;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
new file mode 100644
index 00000000000..d48f9fc5129
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ BUG_ON(!msm_obj->sgt); /* should have already pinned! */
+ return msm_obj->sgt;
+}
+
+void *msm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ return msm_gem_vaddr(obj);
+}
+
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* TODO msm_gem_vunmap() */
+}
+
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size, struct sg_table *sg)
+{
+ return msm_gem_import(dev, size, sg);
+}
+
+int msm_gem_prime_pin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach)
+ msm_gem_get_pages(obj);
+ return 0;
+}
+
+void msm_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach)
+ msm_gem_put_pages(obj);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3bab937965d..4583d61556f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -268,6 +268,8 @@ static void retire_worker(struct work_struct *work)
struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->funcs->last_fence(gpu);
+ msm_update_fence(gpu->dev, fence);
+
mutex_lock(&dev->struct_mutex);
while (!list_empty(&gpu->active_list)) {
@@ -287,8 +289,6 @@ static void retire_worker(struct work_struct *work)
}
}
- msm_update_fence(gpu->dev, fence);
-
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ff80f12480e..7cf787d697b 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,6 +3,7 @@ config DRM_NOUVEAU
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d939a1da320..edcf801613e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -28,7 +28,9 @@ nouveau-y += core/subdev/bar/nv50.o
nouveau-y += core/subdev/bar/nvc0.o
nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/boost.o
nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/cstep.o
nouveau-y += core/subdev/bios/dcb.o
nouveau-y += core/subdev/bios/disp.o
nouveau-y += core/subdev/bios/dp.o
@@ -39,17 +41,26 @@ nouveau-y += core/subdev/bios/init.o
nouveau-y += core/subdev/bios/mxm.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/timing.o
nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/vmap.o
+nouveau-y += core/subdev/bios/volt.o
nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/hwsq.o
nouveau-y += core/subdev/bus/nv04.o
nouveau-y += core/subdev/bus/nv31.o
nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nv94.o
nouveau-y += core/subdev/bus/nvc0.o
+nouveau-y += core/subdev/clock/base.o
nouveau-y += core/subdev/clock/nv04.o
nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nv84.o
nouveau-y += core/subdev/clock/nva3.o
nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/nve0.o
nouveau-y += core/subdev/clock/pllnv04.o
nouveau-y += core/subdev/clock/pllnva3.o
nouveau-y += core/subdev/devinit/base.o
@@ -78,7 +89,12 @@ nouveau-y += core/subdev/fb/nv47.o
nouveau-y += core/subdev/fb/nv49.o
nouveau-y += core/subdev/fb/nv4e.o
nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nv84.o
+nouveau-y += core/subdev/fb/nva3.o
+nouveau-y += core/subdev/fb/nvaa.o
+nouveau-y += core/subdev/fb/nvaf.o
nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/fb/nve0.o
nouveau-y += core/subdev/fb/ramnv04.o
nouveau-y += core/subdev/fb/ramnv10.o
nouveau-y += core/subdev/fb/ramnv1a.o
@@ -89,7 +105,12 @@ nouveau-y += core/subdev/fb/ramnv44.o
nouveau-y += core/subdev/fb/ramnv49.o
nouveau-y += core/subdev/fb/ramnv4e.o
nouveau-y += core/subdev/fb/ramnv50.o
+nouveau-y += core/subdev/fb/ramnva3.o
+nouveau-y += core/subdev/fb/ramnvaa.o
nouveau-y += core/subdev/fb/ramnvc0.o
+nouveau-y += core/subdev/fb/ramnve0.o
+nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr5.o
nouveau-y += core/subdev/gpio/base.o
nouveau-y += core/subdev/gpio/nv10.o
nouveau-y += core/subdev/gpio/nv50.o
@@ -113,13 +134,22 @@ nouveau-y += core/subdev/instmem/nv50.o
nouveau-y += core/subdev/ltcg/nvc0.o
nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv40.o
nouveau-y += core/subdev/mc/nv44.o
nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mc/nvc3.o
nouveau-y += core/subdev/mxm/base.o
nouveau-y += core/subdev/mxm/mxms.o
nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/pwr/base.o
+nouveau-y += core/subdev/pwr/memx.o
+nouveau-y += core/subdev/pwr/nva3.o
+nouveau-y += core/subdev/pwr/nvc0.o
+nouveau-y += core/subdev/pwr/nvd0.o
+nouveau-y += core/subdev/pwr/nv108.o
nouveau-y += core/subdev/therm/base.o
nouveau-y += core/subdev/therm/fan.o
nouveau-y += core/subdev/therm/fannil.o
@@ -140,6 +170,9 @@ nouveau-y += core/subdev/vm/nv41.o
nouveau-y += core/subdev/vm/nv44.o
nouveau-y += core/subdev/vm/nv50.o
nouveau-y += core/subdev/vm/nvc0.o
+nouveau-y += core/subdev/volt/base.o
+nouveau-y += core/subdev/volt/gpio.o
+nouveau-y += core/subdev/volt/nv40.o
nouveau-y += core/engine/falcon.o
nouveau-y += core/engine/xtensa.o
@@ -158,6 +191,7 @@ nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/ctrl.o
nouveau-y += core/engine/device/nv04.o
nouveau-y += core/engine/device/nv10.o
nouveau-y += core/engine/device/nv20.o
@@ -227,8 +261,18 @@ nouveau-y += core/engine/graph/nve4.o
nouveau-y += core/engine/graph/nvf0.o
nouveau-y += core/engine/mpeg/nv31.o
nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv44.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/perfmon/base.o
+nouveau-y += core/engine/perfmon/daemon.o
+nouveau-y += core/engine/perfmon/nv40.o
+nouveau-y += core/engine/perfmon/nv50.o
+nouveau-y += core/engine/perfmon/nv84.o
+nouveau-y += core/engine/perfmon/nva3.o
+nouveau-y += core/engine/perfmon/nvc0.o
+nouveau-y += core/engine/perfmon/nve0.o
+nouveau-y += core/engine/perfmon/nvf0.o
nouveau-y += core/engine/ppp/nv98.o
nouveau-y += core/engine/ppp/nvc0.o
nouveau-y += core/engine/software/nv04.o
@@ -260,9 +304,7 @@ include $(src)/dispnv04/Makefile
nouveau-y += nv50_display.o
# drm/pm
-nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
-nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
-nouveau-y += nouveau_mem.o
+nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
# other random bits
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 7eb81c1b6fa..3f3c76581a9 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -23,62 +23,114 @@
#include <core/os.h>
#include <core/event.h>
-static void
-nouveau_event_put_locked(struct nouveau_event *event, int index,
- struct nouveau_eventh *handler)
+void
+nouveau_event_put(struct nouveau_eventh *handler)
{
- if (!--event->index[index].refs) {
- if (event->disable)
- event->disable(event, index);
+ struct nouveau_event *event = handler->event;
+ unsigned long flags;
+ if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ if (!--event->index[handler->index].refs) {
+ if (event->disable)
+ event->disable(event, handler->index);
+ }
+ spin_unlock_irqrestore(&event->refs_lock, flags);
}
- list_del(&handler->head);
}
void
-nouveau_event_put(struct nouveau_event *event, int index,
- struct nouveau_eventh *handler)
+nouveau_event_get(struct nouveau_eventh *handler)
{
+ struct nouveau_event *event = handler->event;
unsigned long flags;
+ if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ if (!event->index[handler->index].refs++) {
+ if (event->enable)
+ event->enable(event, handler->index);
+ }
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ }
+}
- spin_lock_irqsave(&event->lock, flags);
- if (index < event->index_nr)
- nouveau_event_put_locked(event, index, handler);
- spin_unlock_irqrestore(&event->lock, flags);
+static void
+nouveau_event_fini(struct nouveau_eventh *handler)
+{
+ struct nouveau_event *event = handler->event;
+ unsigned long flags;
+ nouveau_event_put(handler);
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_del(&handler->head);
+ spin_unlock_irqrestore(&event->list_lock, flags);
}
-void
-nouveau_event_get(struct nouveau_event *event, int index,
- struct nouveau_eventh *handler)
+static int
+nouveau_event_init(struct nouveau_event *event, int index,
+ int (*func)(void *, int), void *priv,
+ struct nouveau_eventh *handler)
{
unsigned long flags;
- spin_lock_irqsave(&event->lock, flags);
- if (index < event->index_nr) {
- list_add(&handler->head, &event->index[index].list);
- if (!event->index[index].refs++) {
- if (event->enable)
- event->enable(event, index);
- }
+ if (index >= event->index_nr)
+ return -EINVAL;
+
+ handler->event = event;
+ handler->flags = 0;
+ handler->index = index;
+ handler->func = func;
+ handler->priv = priv;
+
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_add_tail(&handler->head, &event->index[index].list);
+ spin_unlock_irqrestore(&event->list_lock, flags);
+ return 0;
+}
+
+int
+nouveau_event_new(struct nouveau_event *event, int index,
+ int (*func)(void *, int), void *priv,
+ struct nouveau_eventh **phandler)
+{
+ struct nouveau_eventh *handler;
+ int ret = -ENOMEM;
+
+ handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (handler) {
+ ret = nouveau_event_init(event, index, func, priv, handler);
+ if (ret)
+ kfree(handler);
}
- spin_unlock_irqrestore(&event->lock, flags);
+
+ return ret;
+}
+
+void
+nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
+{
+ BUG_ON(handler != NULL);
+ if (*ref) {
+ nouveau_event_fini(*ref);
+ kfree(*ref);
+ }
+ *ref = handler;
}
void
nouveau_event_trigger(struct nouveau_event *event, int index)
{
- struct nouveau_eventh *handler, *temp;
+ struct nouveau_eventh *handler;
unsigned long flags;
- if (index >= event->index_nr)
+ if (WARN_ON(index >= event->index_nr))
return;
- spin_lock_irqsave(&event->lock, flags);
- list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
- if (handler->func(handler, index) == NVKM_EVENT_DROP) {
- nouveau_event_put_locked(event, index, handler);
- }
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_for_each_entry(handler, &event->index[index].list, head) {
+ if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
+ handler->func(handler->priv, index) == NVKM_EVENT_DROP)
+ nouveau_event_put(handler);
}
- spin_unlock_irqrestore(&event->lock, flags);
+ spin_unlock_irqrestore(&event->list_lock, flags);
}
void
@@ -102,7 +154,8 @@ nouveau_event_create(int index_nr, struct nouveau_event **pevent)
if (!event)
return -ENOMEM;
- spin_lock_init(&event->lock);
+ spin_lock_init(&event->list_lock);
+ spin_lock_init(&event->refs_lock);
for (i = 0; i < index_nr; i++)
INIT_LIST_HEAD(&event->index[i].list);
event->index_nr = index_nr;
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
index 62a432ea39e..9f6fcc5f66c 100644
--- a/drivers/gpu/drm/nouveau/core/core/option.c
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -25,15 +25,6 @@
#include <core/option.h>
#include <core/debug.h>
-/* compares unterminated string 'str' with zero-terminated string 'cmp' */
-static inline int
-strncasecmpz(const char *str, const char *cmp, size_t len)
-{
- if (strlen(cmp) != len)
- return len;
- return strncasecmp(str, cmp, len);
-}
-
const char *
nouveau_stropt(const char *optstr, const char *opt, int *arglen)
{
@@ -105,7 +96,7 @@ nouveau_dbgopt(const char *optstr, const char *sub)
else if (!strncasecmpz(optstr, "warn", len))
level = NV_DBG_WARN;
else if (!strncasecmpz(optstr, "info", len))
- level = NV_DBG_INFO;
+ level = NV_DBG_INFO_NORMAL;
else if (!strncasecmpz(optstr, "debug", len))
level = NV_DBG_DEBUG;
else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 52fb2aa129e..03e0060b13d 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,16 +27,38 @@
#include <core/subdev.h>
#include <core/printk.h>
-int nv_printk_suspend_level = NV_DBG_DEBUG;
+int nv_info_debug_level = NV_DBG_INFO_NORMAL;
void
-nv_printk_(struct nouveau_object *object, const char *pfx, int level,
- const char *fmt, ...)
+nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
{
static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+ const char *pfx;
char mfmt[256];
va_list args;
+ switch (level) {
+ case NV_DBG_FATAL:
+ pfx = KERN_CRIT;
+ break;
+ case NV_DBG_ERROR:
+ pfx = KERN_ERR;
+ break;
+ case NV_DBG_WARN:
+ pfx = KERN_WARNING;
+ break;
+ case NV_DBG_INFO_NORMAL:
+ pfx = KERN_INFO;
+ break;
+ case NV_DBG_DEBUG:
+ case NV_DBG_PARANOIA:
+ case NV_DBG_TRACE:
+ case NV_DBG_SPAM:
+ default:
+ pfx = KERN_DEBUG;
+ break;
+ }
+
if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
struct nouveau_object *device = object;
struct nouveau_object *subdev = object;
@@ -74,20 +96,3 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
vprintk(mfmt, args);
va_end(args);
}
-
-#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
-
-const char *nv_printk_level_to_pfx(int level)
-{
- switch (level) {
- CONV_LEVEL(FATAL);
- CONV_LEVEL(ERROR);
- CONV_LEVEL(WARN);
- CONV_LEVEL(INFO);
- CONV_LEVEL(DEBUG);
- CONV_LEVEL(PARANOIA);
- CONV_LEVEL(TRACE);
- CONV_LEVEL(SPAM);
- }
- return NV_PRINTK_DEBUG;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 4c72571655a..9135b25a29d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
#include <core/class.h>
-#include <engine/device.h>
+#include "priv.h"
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
@@ -75,7 +75,9 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
[NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
[NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
[NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
@@ -87,7 +89,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
[NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
- [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
+ [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
[NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
@@ -119,10 +121,12 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
return -ENODEV;
}
- ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
+ ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
+ nouveau_control_oclass,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_FIFO) |
- (1ULL << NVDEV_ENGINE_DISP), &devobj);
+ (1ULL << NVDEV_ENGINE_DISP) |
+ (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
*pobject = nv_object(devobj);
if (ret)
return ret;
@@ -158,22 +162,29 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
iounmap(map);
/* determine chipset and derive architecture from it */
- if ((boot0 & 0x0f000000) > 0) {
- device->chipset = (boot0 & 0xff00000) >> 20;
- switch (device->chipset & 0xf0) {
- case 0x10: device->card_type = NV_10; break;
- case 0x20: device->card_type = NV_20; break;
- case 0x30: device->card_type = NV_30; break;
- case 0x40:
- case 0x60: device->card_type = NV_40; break;
- case 0x50:
- case 0x80:
- case 0x90:
- case 0xa0: device->card_type = NV_50; break;
- case 0xc0: device->card_type = NV_C0; break;
- case 0xd0: device->card_type = NV_D0; break;
- case 0xe0:
- case 0xf0: device->card_type = NV_E0; break;
+ if ((boot0 & 0x1f000000) > 0) {
+ device->chipset = (boot0 & 0x1ff00000) >> 20;
+ switch (device->chipset & 0x1f0) {
+ case 0x010: {
+ if (0x461 & (1 << (device->chipset & 0xf)))
+ device->card_type = NV_10;
+ else
+ device->card_type = NV_11;
+ break;
+ }
+ case 0x020: device->card_type = NV_20; break;
+ case 0x030: device->card_type = NV_30; break;
+ case 0x040:
+ case 0x060: device->card_type = NV_40; break;
+ case 0x050:
+ case 0x080:
+ case 0x090:
+ case 0x0a0: device->card_type = NV_50; break;
+ case 0x0c0: device->card_type = NV_C0; break;
+ case 0x0d0: device->card_type = NV_D0; break;
+ case 0x0e0:
+ case 0x0f0:
+ case 0x100: device->card_type = NV_E0; break;
default:
break;
}
@@ -188,7 +199,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
switch (device->card_type) {
case NV_04: ret = nv04_identify(device); break;
- case NV_10: ret = nv10_identify(device); break;
+ case NV_10:
+ case NV_11: ret = nv10_identify(device); break;
case NV_20: ret = nv20_identify(device); break;
case NV_30: ret = nv30_identify(device); break;
case NV_40: ret = nv40_identify(device); break;
@@ -212,7 +224,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
nv_info(device, "Family : NV%02X\n", device->card_type);
/* determine frequency of timing crystal */
- if ( device->chipset < 0x17 ||
+ if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
(device->chipset >= 0x20 && device->chipset < 0x25))
strap &= 0x00000040;
else
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
new file mode 100644
index 00000000000..4b69bf56ed0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+static int
+nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_clock *clk = nouveau_clock(object);
+ struct nv_control_pstate_info *args = data;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ if (clk) {
+ args->count = clk->state_nr;
+ args->ustate = clk->ustate;
+ args->pstate = clk->pstate;
+ } else {
+ args->count = 0;
+ args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
+ args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_clock *clk = nouveau_clock(object);
+ struct nv_control_pstate_attr *args = data;
+ struct nouveau_clocks *domain;
+ struct nouveau_pstate *pstate;
+ struct nouveau_cstate *cstate;
+ int i = 0, j = -1;
+ u32 lo, hi;
+
+ if ((size < sizeof(*args)) || !clk ||
+ (args->state >= 0 && args->state >= clk->state_nr))
+ return -EINVAL;
+ domain = clk->domains;
+
+ while (domain->name != nv_clk_src_max) {
+ if (domain->mname && ++j == args->index)
+ break;
+ domain++;
+ }
+
+ if (domain->name == nv_clk_src_max)
+ return -EINVAL;
+
+ if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
+ list_for_each_entry(pstate, &clk->states, head) {
+ if (i++ == args->state)
+ break;
+ }
+
+ lo = pstate->base.domain[domain->name];
+ hi = lo;
+ list_for_each_entry(cstate, &pstate->list, head) {
+ lo = min(lo, cstate->domain[domain->name]);
+ hi = max(hi, cstate->domain[domain->name]);
+ }
+
+ args->state = pstate->pstate;
+ } else {
+ lo = max(clk->read(clk, domain->name), 0);
+ hi = lo;
+ }
+
+ snprintf(args->name, sizeof(args->name), "%s", domain->mname);
+ snprintf(args->unit, sizeof(args->unit), "MHz");
+ args->min = lo / domain->mdiv;
+ args->max = hi / domain->mdiv;
+
+ args->index = 0;
+ while ((++domain)->name != nv_clk_src_max) {
+ if (domain->mname) {
+ args->index = ++j;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_clock *clk = nouveau_clock(object);
+ struct nv_control_pstate_user *args = data;
+
+ if (size < sizeof(*args) || !clk)
+ return -EINVAL;
+
+ return nouveau_clock_ustate(clk, args->state);
+}
+
+struct nouveau_oclass
+nouveau_control_oclass[] = {
+ { .handle = NV_CONTROL_CLASS,
+ .ofuncs = &nouveau_object_ofuncs,
+ .omthds = (struct nouveau_omthds[]) {
+ { NV_CONTROL_PSTATE_INFO,
+ NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
+ { NV_CONTROL_PSTATE_ATTR,
+ NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
+ { NV_CONTROL_PSTATE_USER,
+ NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
+ {},
+ },
+ },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index a0284cf09c0..dbd2dde7b7e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -50,15 +50,15 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -68,15 +68,15 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 1b7809a095c..6e03dd6abee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -52,10 +52,10 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -69,15 +69,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -88,15 +88,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -107,15 +107,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -126,15 +126,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -145,15 +145,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -164,15 +164,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -183,15 +183,15 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 12a4005fa61..dcde53b9f07 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -53,15 +53,15 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -72,15 +72,15 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -91,15 +91,15 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -110,15 +110,15 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index cef0f1ea4c2..7b8662ef4f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -53,15 +53,15 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -72,15 +72,15 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
@@ -91,15 +91,15 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
@@ -111,15 +111,15 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
@@ -131,15 +131,15 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1719cb0ee59..c8c41e93695 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -35,6 +35,7 @@
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <subdev/volt.h>
#include <engine/device.h>
#include <engine/dmaobj.h>
@@ -43,6 +44,7 @@
#include <engine/graph.h>
#include <engine/mpeg.h>
#include <engine/disp.h>
+#include <engine/perfmon.h>
int
nv40_identify(struct nouveau_device *device)
@@ -56,18 +58,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x41:
device->cname = "NV41";
@@ -77,18 +81,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x42:
device->cname = "NV42";
@@ -98,18 +104,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x43:
device->cname = "NV43";
@@ -119,18 +127,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x45:
device->cname = "NV45";
@@ -140,18 +150,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x47:
device->cname = "G70";
@@ -161,18 +173,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x49:
device->cname = "G71";
@@ -182,18 +196,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4b:
device->cname = "G73";
@@ -203,18 +219,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x44:
device->cname = "NV44";
@@ -224,18 +242,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x46:
device->cname = "G72";
@@ -245,18 +265,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4a:
device->cname = "NV44A";
@@ -266,18 +288,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4c:
device->cname = "C61";
@@ -287,18 +311,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x4e:
device->cname = "C51";
@@ -308,18 +334,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x63:
device->cname = "C73";
@@ -329,18 +357,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x67:
device->cname = "C67";
@@ -350,18 +380,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
case 0x68:
device->cname = "C68";
@@ -371,18 +403,20 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
break;
default:
nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ffc18b80c5d..db139827047 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -36,6 +36,8 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
#include <engine/device.h>
#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
#include <engine/ppp.h>
#include <engine/copy.h>
#include <engine/disp.h>
+#include <engine/perfmon.h>
int
nv50_identify(struct nouveau_device *device)
@@ -59,257 +62,277 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
break;
case 0x84:
device->cname = "G84";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x86:
device->cname = "G86";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x92:
device->cname = "G92";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x94:
device->cname = "G94";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x96:
device->cname = "G96";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0x98:
device->cname = "G98";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xa0:
device->cname = "G200";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xaa:
device->cname = "MCP77/MCP78";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xac:
device->cname = "MCP79/MCP7A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
break;
case 0xa3:
device->cname = "GT215";
@@ -320,16 +343,18 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
@@ -337,6 +362,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
case 0xa5:
device->cname = "GT216";
@@ -347,22 +373,25 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
case 0xa8:
device->cname = "GT218";
@@ -373,22 +402,25 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
case 0xaf:
device->cname = "MCP89";
@@ -399,22 +431,25 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 418f51f50d7..8d06eef2b9e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -38,6 +38,8 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
#include <engine/device.h>
#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
#include <engine/ppp.h>
#include <engine/copy.h>
#include <engine/disp.h>
+#include <engine/perfmon.h>
int
nvc0_identify(struct nouveau_device *device)
@@ -63,18 +66,20 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -82,6 +87,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc4:
device->cname = "GF104";
@@ -92,18 +98,20 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -111,6 +119,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc3:
device->cname = "GF106";
@@ -121,24 +130,27 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xce:
device->cname = "GF114";
@@ -149,18 +161,20 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -168,6 +182,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xcf:
device->cname = "GF116";
@@ -178,18 +193,20 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -197,6 +214,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc1:
device->cname = "GF108";
@@ -207,24 +225,27 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xc8:
device->cname = "GF110";
@@ -235,18 +256,20 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -254,6 +277,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xd9:
device->cname = "GF119";
@@ -264,24 +288,27 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
case 0xd7:
device->cname = "GF117";
@@ -292,24 +319,25 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
default:
nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 7aca1877add..3900104976f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -38,6 +38,8 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
#include <engine/device.h>
#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
#include <engine/bsp.h>
#include <engine/vp.h>
#include <engine/ppp.h>
+#include <engine/perfmon.h>
int
nve0_identify(struct nouveau_device *device)
@@ -59,22 +62,24 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -83,28 +88,31 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
break;
case 0xe7:
device->cname = "GK107";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -113,28 +121,31 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
break;
case 0xe6:
device->cname = "GK106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -143,28 +154,31 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
break;
case 0xf0:
device->cname = "GK110";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -175,6 +189,43 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
#endif
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
+ break;
+ case 0x108:
+ device->cname = "GK208";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+#endif
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
new file mode 100644
index 00000000000..035fd5b9cfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_DEVICE_PRIV_H__
+#define __NVKM_DEVICE_PRIV_H__
+
+#include <engine/device.h>
+
+extern struct nouveau_oclass nouveau_control_oclass[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 054d9cff4f5..1bd4c63369c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -70,17 +70,10 @@ dp_set_link_config(struct dp_state *dp)
};
u32 lnkcmp;
u8 sink[2];
+ int ret;
DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
- /* set desired link configuration on the sink */
- sink[0] = dp->link_bw / 27000;
- sink[1] = dp->link_nr;
- if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
- sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
-
- nv_wraux(dp->aux, DPCD_LC00, sink, 2);
-
/* set desired link configuration on the source */
if ((lnkcmp = dp->info.lnkcmp)) {
if (dp->version < 0x30) {
@@ -96,10 +89,22 @@ dp_set_link_config(struct dp_state *dp)
nvbios_exec(&init);
}
- return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
- dp->link_nr, dp->link_bw / 27000,
- dp->dpcd[DPCD_RC02] &
- DPCD_RC02_ENHANCED_FRAME_CAP);
+ ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+ dp->link_nr, dp->link_bw / 27000,
+ dp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
+ if (ret) {
+ ERR("lnk_ctl failed with %d\n", ret);
+ return ret;
+ }
+
+ /* set desired link configuration on the sink */
+ sink[0] = dp->link_bw / 27000;
+ sink[1] = dp->link_nr;
+ if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+ return nv_wraux(dp->aux, DPCD_LC00, sink, 2);
}
static void
@@ -294,8 +299,17 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
if (ret) {
+ /* it's possible the display has been unplugged before we
+ * get here. we still need to execute the full set of
+ * vbios scripts, and program the OR at a high enough
+ * frequency to satisfy the target mode. failure to do
+ * so results at best in an UPDATE hanging, and at worst
+ * with PDISP running away to join the circus.
+ */
+ dp->dpcd[1] = link_bw[0] / 27000;
+ dp->dpcd[2] = 4;
+ dp->dpcd[3] = 0x00;
ERR("failed to read DPCD\n");
- return ret;
}
/* adjust required bandwidth for 8B/10B coding overhead */
@@ -308,7 +322,7 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
while (*link_bw > (dp->dpcd[1] * 27000))
link_bw++;
- while (link_bw[0]) {
+ while ((ret = -EIO) && link_bw[0]) {
/* find minimum required lane count at this link rate */
dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
while ((dp->link_nr >> 1) * link_bw[0] > datarate)
@@ -328,8 +342,10 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
!dp_link_train_eq(dp))
break;
} else
- if (ret >= 1) {
- /* dp_set_link_config() handled training */
+ if (ret) {
+ /* dp_set_link_config() handled training, or
+ * we failed to communicate with the sink.
+ */
break;
}
@@ -339,8 +355,10 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
/* finish link training */
dp_set_training_pattern(dp, 0);
+ if (ret < 0)
+ ERR("link training failed\n");
/* execute post-train script from vbios */
dp_link_train_fini(dp);
- return true;
+ return (ret < 0) ? false : true;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 05e903f08a3..a0bc8a89b69 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -59,6 +59,7 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
struct nv04_disp_priv *priv = (void *)subdev;
u32 crtc0 = nv_rd32(priv, 0x600100);
u32 crtc1 = nv_rd32(priv, 0x602100);
+ u32 pvideo;
if (crtc0 & 0x00000001) {
nouveau_event_trigger(priv->base.vblank, 0);
@@ -69,6 +70,14 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
+
+ if (nv_device(priv)->chipset >= 0x10 &&
+ nv_device(priv)->chipset <= 0x40) {
+ pvideo = nv_rd32(priv, 0x8100);
+ if (pvideo & ~0x11)
+ nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
+ nv_wr32(priv, 0x8100, pvideo);
+ }
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 52dd7a1db72..378a015091d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -541,6 +541,15 @@ nvd0_disp_base_init(struct nouveau_object *object)
nv_wr32(priv, 0x6100a0, 0x00000000);
nv_wr32(priv, 0x6100b0, 0x00000307);
+ /* disable underflow reporting, preventing an intermittent issue
+ * on some nve4 boards where the production vbios left this
+ * setting enabled by default.
+ *
+ * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+ */
+ for (i = 0; i < priv->head.nr; i++)
+ nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 7ec4ee83fb6..eea3ef59693 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -97,8 +97,9 @@ nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
{
struct nouveau_bios *bios = nouveau_bios(disp);
struct nv50_disp_priv *priv = (void *)disp;
+ const u32 shift = nv94_sor_dp_lane_map(priv, lane);
const u32 loff = nv94_sor_loff(outp);
- u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+ u32 addr, data[3];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
@@ -113,9 +114,12 @@ nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
if (!addr)
return -EINVAL;
- nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
- nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
- nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
+ nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
+ nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
+ nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 9e1d435d728..d2df572f16a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -93,8 +93,9 @@ nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
{
struct nouveau_bios *bios = nouveau_bios(disp);
struct nv50_disp_priv *priv = (void *)disp;
+ const u32 shift = nvd0_sor_dp_lane_map(priv, lane);
const u32 loff = nvd0_sor_loff(outp);
- u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+ u32 addr, data[3];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
@@ -109,9 +110,12 @@ nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
if (!addr)
return -EINVAL;
- nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
- nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
- nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
+ nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
+ nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
+ nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index f877bd524a9..54f26cc801c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -632,8 +632,8 @@ nv04_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv04_fifo_oclass = {
+struct nouveau_oclass *
+nv04_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2c927c1d173..571a22aa1ae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -159,8 +159,8 @@ nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv10_fifo_oclass = {
+struct nouveau_oclass *
+nv10_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0x10),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv10_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index a9cb51d38c5..f2576020931 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -196,8 +196,8 @@ nv17_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv17_fifo_oclass = {
+struct nouveau_oclass *
+nv17_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0x17),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv17_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 5c7433d5069..343487ed223 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -337,8 +337,8 @@ nv40_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv40_fifo_oclass = {
+struct nouveau_oclass *
+nv40_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7e5dff51d3c..5f555788121 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -502,8 +502,8 @@ nv50_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nv50_fifo_oclass = {
+struct nouveau_oclass *
+nv50_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 91a87cd7195..0908dc834c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -144,7 +144,7 @@ nv84_fifo_object_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
case NVDEV_ENGINE_VP : context |= 0x00400000; break;
case NVDEV_ENGINE_CRYPT :
- case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+ case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
default:
return -EINVAL;
@@ -180,7 +180,7 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
(1ULL << NVDEV_ENGINE_BSP) |
(1ULL << NVDEV_ENGINE_PPP) |
(1ULL << NVDEV_ENGINE_COPY0) |
- (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_VIC), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -243,7 +243,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
(1ULL << NVDEV_ENGINE_BSP) |
(1ULL << NVDEV_ENGINE_PPP) |
(1ULL << NVDEV_ENGINE_COPY0) |
- (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_VIC), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -435,8 +435,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv84_fifo_oclass = {
+struct nouveau_oclass *
+nv84_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ce92f289e75..9ac94d4e564 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -494,13 +494,6 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
- if (stat & 0x00200000) {
- if (mthd == 0x0054) {
- if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
- show &= ~0x00200000;
- }
- }
-
if (stat & 0x00800000) {
if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
@@ -720,8 +713,8 @@ nvc0_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nvc0_fifo_oclass = {
+struct nouveau_oclass *
+nvc0_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 8e8121abe31..04f412922d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -481,13 +481,6 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
- if (stat & 0x00200000) {
- if (mthd == 0x0054) {
- if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
- show &= ~0x00200000;
- }
- }
-
if (stat & 0x00800000) {
if (!nve0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
@@ -675,8 +668,8 @@ nve0_fifo_init(struct nouveau_object *object)
return 0;
}
-struct nouveau_oclass
-nve0_fifo_oclass = {
+struct nouveau_oclass *
+nve0_fifo_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(FIFO, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 64dca260912..fe67415c3e1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -1039,7 +1039,7 @@ nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv)
} while (!tpcnr[gpc]);
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
- tpc_set |= 1 << ((gpc * 8) + tpc);
+ tpc_set |= 1ULL << ((gpc * 8) + tpc);
}
nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index e5be3ee7f17..71b4283f7fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -587,6 +587,7 @@ nvc1_grctx_init_unk58xx[] = {
{ 0x405870, 4, 0x04, 0x00000001 },
{ 0x405a00, 2, 0x04, 0x00000000 },
{ 0x405a18, 1, 0x04, 0x00000000 },
+ {}
};
static struct nvc0_graph_init
@@ -598,6 +599,7 @@ nvc1_grctx_init_rop[] = {
{ 0x408904, 1, 0x04, 0x62000001 },
{ 0x408908, 1, 0x04, 0x00c80929 },
{ 0x408980, 1, 0x04, 0x0000011d },
+ {}
};
static struct nvc0_graph_init
@@ -671,6 +673,7 @@ nvc1_grctx_init_gpc_0[] = {
{ 0x419000, 1, 0x04, 0x00000780 },
{ 0x419004, 2, 0x04, 0x00000000 },
{ 0x419014, 1, 0x04, 0x00000004 },
+ {}
};
static struct nvc0_graph_init
@@ -717,6 +720,7 @@ nvc1_grctx_init_tpc[] = {
{ 0x419e98, 1, 0x04, 0x00000000 },
{ 0x419ee0, 1, 0x04, 0x00011110 },
{ 0x419f30, 11, 0x04, 0x00000000 },
+ {}
};
void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 438e7841080..c4740d52853 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -258,6 +258,7 @@ nvd7_grctx_init_hub[] = {
nvc0_grctx_init_unk78xx,
nvc0_grctx_init_unk80xx,
nvd9_grctx_init_rop,
+ NULL
};
struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index 818a4751df4..a1102cbf2fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -466,6 +466,7 @@ nvd9_grctx_init_hub[] = {
nvc0_grctx_init_unk78xx,
nvc0_grctx_init_unk80xx,
nvd9_grctx_init_rop,
+ NULL
};
struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 23c143aaa55..4532f7e5618 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -945,7 +945,8 @@ nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
- if (nv_device(priv)->chipset >= 0x17) {
+ if (nv_device(priv)->card_type >= NV_11 &&
+ nv_device(priv)->chipset >= 0x17) {
for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
}
@@ -970,7 +971,8 @@ nv10_graph_unload_context(struct nv10_graph_chan *chan)
for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
- if (nv_device(priv)->chipset >= 0x17) {
+ if (nv_device(priv)->card_type >= NV_11 &&
+ nv_device(priv)->chipset >= 0x17) {
for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
}
@@ -1052,7 +1054,8 @@ nv10_graph_context_ctor(struct nouveau_object *parent,
NV_WRITE_CTX(0x00400e14, 0x00001000);
NV_WRITE_CTX(0x00400e30, 0x00080008);
NV_WRITE_CTX(0x00400e34, 0x00080008);
- if (nv_device(priv)->chipset >= 0x17) {
+ if (nv_device(priv)->card_type >= NV_11 &&
+ nv_device(priv)->chipset >= 0x17) {
/* is it really needed ??? */
NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
@@ -1231,7 +1234,7 @@ nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv10_graph_sclass;
else
if (nv_device(priv)->chipset < 0x17 ||
- nv_device(priv)->chipset == 0x1a)
+ nv_device(priv)->card_type < NV_11)
nv_engine(priv)->sclass = nv15_graph_sclass;
else
nv_engine(priv)->sclass = nv17_graph_sclass;
@@ -1270,7 +1273,8 @@ nv10_graph_init(struct nouveau_object *object)
nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
- if (nv_device(priv)->chipset >= 0x17) {
+ if (nv_device(priv)->card_type >= NV_11 &&
+ nv_device(priv)->chipset >= 0x17) {
nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
nv_wr32(priv, 0x400a10, 0x03ff3fb6);
nv_wr32(priv, 0x400838, 0x002f8684);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 3f4f35cc384..434bb4b0fa2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1138,7 +1138,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->unit = 0x18001000;
+ nv_subdev(priv)->unit = 0x08001000;
nv_subdev(priv)->intr = nvc0_graph_intr;
priv->base.units = nvc0_graph_units;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index c1900430130..7eb6d94c84e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -34,16 +34,7 @@
#include <engine/fifo.h>
#include <engine/mpeg.h>
-#include <engine/graph/nv40.h>
-
-struct nv31_mpeg_priv {
- struct nouveau_mpeg base;
- atomic_t refcount;
-};
-
-struct nv31_mpeg_chan {
- struct nouveau_object base;
-};
+#include <engine/mpeg/nv31.h>
/*******************************************************************************
* MPEG object classes
@@ -89,18 +80,18 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
if (mthd == 0x0190) {
/* DMA_CMD */
- nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+ nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
nv_wr32(priv, 0x00b334, base);
nv_wr32(priv, 0x00b324, size);
} else
if (mthd == 0x01a0) {
/* DMA_DATA */
- nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+ nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
nv_wr32(priv, 0x00b360, base);
nv_wr32(priv, 0x00b364, size);
} else {
/* DMA_IMAGE, VRAM only */
- if (dma0 & 0x000c0000)
+ if (dma0 & 0x00030000)
return -EINVAL;
nv_wr32(priv, 0x00b370, base);
@@ -110,7 +101,7 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
return 0;
}
-static struct nouveau_ofuncs
+struct nouveau_ofuncs
nv31_mpeg_ofuncs = {
.ctor = nv31_mpeg_object_ctor,
.dtor = _nouveau_gpuobj_dtor,
@@ -146,16 +137,23 @@ nv31_mpeg_context_ctor(struct nouveau_object *parent,
{
struct nv31_mpeg_priv *priv = (void *)engine;
struct nv31_mpeg_chan *chan;
+ unsigned long flags;
int ret;
- if (!atomic_add_unless(&priv->refcount, 1, 1))
- return -EBUSY;
-
ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
+ spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+ if (priv->chan) {
+ spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
+ nouveau_object_destroy(&chan->base);
+ *pobject = NULL;
+ return -EBUSY;
+ }
+ priv->chan = chan;
+ spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
return 0;
}
@@ -164,11 +162,15 @@ nv31_mpeg_context_dtor(struct nouveau_object *object)
{
struct nv31_mpeg_priv *priv = (void *)object->engine;
struct nv31_mpeg_chan *chan = (void *)object;
- atomic_dec(&priv->refcount);
+ unsigned long flags;
+
+ spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+ priv->chan = NULL;
+ spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
nouveau_object_destroy(&chan->base);
}
-static struct nouveau_oclass
+struct nouveau_oclass
nv31_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x31),
.ofuncs = &(struct nouveau_ofuncs) {
@@ -197,21 +199,19 @@ nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
void
nv31_mpeg_intr(struct nouveau_subdev *subdev)
{
+ struct nv31_mpeg_priv *priv = (void *)subdev;
struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
struct nouveau_handle *handle;
- struct nv31_mpeg_priv *priv = (void *)subdev;
- u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+ struct nouveau_object *engctx;
u32 stat = nv_rd32(priv, 0x00b100);
u32 type = nv_rd32(priv, 0x00b230);
u32 mthd = nv_rd32(priv, 0x00b234);
u32 data = nv_rd32(priv, 0x00b238);
u32 show = stat;
- int chid;
+ unsigned long flags;
- engctx = nouveau_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
+ spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+ engctx = nv_object(priv->chan);
if (stat & 0x01000000) {
/* happens on initial binding of the object */
@@ -220,7 +220,7 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
show &= ~0x01000000;
}
- if (type == 0x00000010) {
+ if (type == 0x00000010 && engctx) {
handle = nouveau_handle_get_class(engctx, 0x3174);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~0x01000000;
@@ -232,13 +232,12 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b230, 0x00000001);
if (show) {
- nv_error(priv,
- "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst << 4, nouveau_client_name(engctx), stat,
- type, mthd, data);
+ nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ pfifo->chid(pfifo, engctx),
+ nouveau_client_name(engctx), stat, type, mthd, data);
}
- nouveau_engctx_put(engctx);
+ spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
}
static int
@@ -284,10 +283,7 @@ nv31_mpeg_init(struct nouveau_object *object)
/* PMPEG init */
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
- if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
- nv_wr32(priv, 0x00b220, 0x00000044);
- else
- nv_wr32(priv, 0x00b220, 0x00000031);
+ nv_wr32(priv, 0x00b220, 0x00000031);
nv_wr32(priv, 0x00b300, 0x02001ec1);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
new file mode 100644
index 00000000000..d08629d0b6a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
@@ -0,0 +1,15 @@
+#ifndef __NV31_MPEG_H__
+#define __NV31_MPEG_H__
+
+#include <engine/mpeg.h>
+
+struct nv31_mpeg_chan {
+ struct nouveau_object base;
+};
+
+struct nv31_mpeg_priv {
+ struct nouveau_mpeg base;
+ struct nv31_mpeg_chan *chan;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index dd6196072e9..d4e7ec0ba68 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -31,66 +31,63 @@
#include <subdev/instmem.h>
#include <engine/mpeg.h>
-#include <engine/graph/nv40.h>
-
-struct nv40_mpeg_priv {
- struct nouveau_mpeg base;
-};
-
-struct nv40_mpeg_chan {
- struct nouveau_mpeg_chan base;
-};
+#include <engine/mpeg/nv31.h>
/*******************************************************************************
- * PMPEG context
+ * MPEG object classes
******************************************************************************/
static int
-nv40_mpeg_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
{
- struct nv40_mpeg_chan *chan;
- int ret;
-
- ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
- 264 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
+ struct nouveau_instmem *imem = nouveau_instmem(object);
+ struct nv31_mpeg_priv *priv = (void *)object->engine;
+ u32 inst = *(u32 *)arg << 4;
+ u32 dma0 = nv_ro32(imem, inst + 0);
+ u32 dma1 = nv_ro32(imem, inst + 4);
+ u32 dma2 = nv_ro32(imem, inst + 8);
+ u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+ u32 size = dma1 + 1;
+
+ /* only allow linear DMA objects */
+ if (!(dma0 & 0x00002000))
+ return -EINVAL;
+
+ if (mthd == 0x0190) {
+ /* DMA_CMD */
+ nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+ nv_wr32(priv, 0x00b334, base);
+ nv_wr32(priv, 0x00b324, size);
+ } else
+ if (mthd == 0x01a0) {
+ /* DMA_DATA */
+ nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+ nv_wr32(priv, 0x00b360, base);
+ nv_wr32(priv, 0x00b364, size);
+ } else {
+ /* DMA_IMAGE, VRAM only */
+ if (dma0 & 0x00030000)
+ return -EINVAL;
+
+ nv_wr32(priv, 0x00b370, base);
+ nv_wr32(priv, 0x00b374, size);
+ }
- nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
return 0;
}
-static int
-nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
-{
-
- struct nv40_mpeg_priv *priv = (void *)object->engine;
- struct nv40_mpeg_chan *chan = (void *)object;
- u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
-
- nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(priv, 0x00b318) == inst)
- nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
- return 0;
-}
+static struct nouveau_omthds
+nv40_mpeg_omthds[] = {
+ { 0x0190, 0x0190, nv40_mpeg_mthd_dma },
+ { 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
+ { 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
+ {}
+};
-static struct nouveau_oclass
-nv40_mpeg_cclass = {
- .handle = NV_ENGCTX(MPEG, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_mpeg_context_ctor,
- .dtor = _nouveau_mpeg_context_dtor,
- .init = _nouveau_mpeg_context_init,
- .fini = nv40_mpeg_context_fini,
- .rd32 = _nouveau_mpeg_context_rd32,
- .wr32 = _nouveau_mpeg_context_wr32,
- },
+struct nouveau_oclass
+nv40_mpeg_sclass[] = {
+ { 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
+ {}
};
/*******************************************************************************
@@ -100,7 +97,7 @@ nv40_mpeg_cclass = {
static void
nv40_mpeg_intr(struct nouveau_subdev *subdev)
{
- struct nv40_mpeg_priv *priv = (void *)subdev;
+ struct nv31_mpeg_priv *priv = (void *)subdev;
u32 stat;
if ((stat = nv_rd32(priv, 0x00b100)))
@@ -117,7 +114,7 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv40_mpeg_priv *priv;
+ struct nv31_mpeg_priv *priv;
int ret;
ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
@@ -127,8 +124,8 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->unit = 0x00000002;
nv_subdev(priv)->intr = nv40_mpeg_intr;
- nv_engine(priv)->cclass = &nv40_mpeg_cclass;
- nv_engine(priv)->sclass = nv31_mpeg_sclass;
+ nv_engine(priv)->cclass = &nv31_mpeg_cclass;
+ nv_engine(priv)->sclass = nv40_mpeg_sclass;
nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
new file mode 100644
index 00000000000..3d8c2133e0e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/fifo.h>
+#include <engine/mpeg.h>
+
+struct nv44_mpeg_priv {
+ struct nouveau_mpeg base;
+};
+
+struct nv44_mpeg_chan {
+ struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv44_mpeg_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv44_mpeg_chan *chan;
+ int ret;
+
+ ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
+ 264 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
+ return 0;
+}
+
+static int
+nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+{
+
+ struct nv44_mpeg_priv *priv = (void *)object->engine;
+ struct nv44_mpeg_chan *chan = (void *)object;
+ u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+
+ nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
+ if (nv_rd32(priv, 0x00b318) == inst)
+ nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv44_mpeg_cclass = {
+ .handle = NV_ENGCTX(MPEG, 0x44),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv44_mpeg_context_ctor,
+ .dtor = _nouveau_mpeg_context_dtor,
+ .init = _nouveau_mpeg_context_init,
+ .fini = nv44_mpeg_context_fini,
+ .rd32 = _nouveau_mpeg_context_rd32,
+ .wr32 = _nouveau_mpeg_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv44_mpeg_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ struct nouveau_handle *handle;
+ struct nv44_mpeg_priv *priv = (void *)subdev;
+ u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+ u32 stat = nv_rd32(priv, 0x00b100);
+ u32 type = nv_rd32(priv, 0x00b230);
+ u32 mthd = nv_rd32(priv, 0x00b234);
+ u32 data = nv_rd32(priv, 0x00b238);
+ u32 show = stat;
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat & 0x01000000) {
+ /* happens on initial binding of the object */
+ if (type == 0x00000020 && mthd == 0x0000) {
+ nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+ show &= ~0x01000000;
+ }
+
+ if (type == 0x00000010) {
+ handle = nouveau_handle_get_class(engctx, 0x3174);
+ if (handle && !nv_call(handle->object, mthd, data))
+ show &= ~0x01000000;
+ nouveau_handle_put(handle);
+ }
+ }
+
+ nv_wr32(priv, 0x00b100, stat);
+ nv_wr32(priv, 0x00b230, 0x00000001);
+
+ if (show) {
+ nv_error(priv,
+ "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), stat,
+ type, mthd, data);
+ }
+
+ nouveau_engctx_put(engctx);
+}
+
+static void
+nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
+{
+ struct nv44_mpeg_priv *priv = (void *)subdev;
+ u32 stat;
+
+ if ((stat = nv_rd32(priv, 0x00b100)))
+ nv44_mpeg_intr(subdev);
+
+ if ((stat = nv_rd32(priv, 0x00b800))) {
+ nv_error(priv, "PMSRCH 0x%08x\n", stat);
+ nv_wr32(priv, 0x00b800, stat);
+ }
+}
+
+static int
+nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv44_mpeg_priv *priv;
+ int ret;
+
+ ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_subdev(priv)->intr = nv44_mpeg_me_intr;
+ nv_engine(priv)->cclass = &nv44_mpeg_cclass;
+ nv_engine(priv)->sclass = nv40_mpeg_sclass;
+ nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+ return 0;
+}
+
+struct nouveau_oclass
+nv44_mpeg_oclass = {
+ .handle = NV_ENGINE(MPEG, 0x44),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv44_mpeg_ctor,
+ .dtor = _nouveau_mpeg_dtor,
+ .init = nv31_mpeg_init,
+ .fini = _nouveau_mpeg_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
new file mode 100644
index 00000000000..e9c5e51943e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+#define QUAD_MASK 0x0f
+#define QUAD_FREE 0x01
+
+static struct nouveau_perfsig *
+nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
+{
+ char path[64];
+ int i;
+
+ if (name[0] != '/') {
+ for (i = 0; i < dom->signal_nr; i++) {
+ if ( dom->signal[i].name &&
+ !strncmp(name, dom->signal[i].name, size))
+ return &dom->signal[i];
+ }
+ } else {
+ for (i = 0; i < dom->signal_nr; i++) {
+ snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
+ if (!strncmp(name, path, size))
+ return &dom->signal[i];
+ }
+ }
+
+ return NULL;
+}
+
+struct nouveau_perfsig *
+nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
+ struct nouveau_perfdom **pdom)
+{
+ struct nouveau_perfdom *dom = *pdom;
+ struct nouveau_perfsig *sig;
+
+ if (dom == NULL) {
+ list_for_each_entry(dom, &ppm->domains, head) {
+ sig = nouveau_perfsig_find_(dom, name, size);
+ if (sig) {
+ *pdom = dom;
+ return sig;
+ }
+ }
+
+ return NULL;
+ }
+
+ return nouveau_perfsig_find_(dom, name, size);
+}
+
+struct nouveau_perfctr *
+nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
+ struct nouveau_perfdom **pdom)
+{
+ struct nouveau_perfsig *sig;
+ struct nouveau_perfctr *ctr;
+
+ sig = nouveau_perfsig_find(ppm, name, strlen(name), pdom);
+ if (!sig)
+ return NULL;
+
+ ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+ if (ctr) {
+ ctr->signal[0] = sig;
+ ctr->logic_op = 0xaaaa;
+ }
+
+ return ctr;
+}
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+static int
+nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_perfmon *ppm = (void *)object->engine;
+ struct nouveau_perfdom *dom = NULL, *chk;
+ struct nv_perfctr_query *args = data;
+ const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
+ const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
+ const char *name;
+ int tmp = 0, di, si;
+ char path[64];
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ di = (args->iter & 0xff000000) >> 24;
+ si = (args->iter & 0x00ffffff) - 1;
+
+ list_for_each_entry(chk, &ppm->domains, head) {
+ if (tmp++ == di) {
+ dom = chk;
+ break;
+ }
+ }
+
+ if (dom == NULL || si >= (int)dom->signal_nr)
+ return -EINVAL;
+
+ if (si >= 0) {
+ if (raw || !(name = dom->signal[si].name)) {
+ snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
+ name = path;
+ }
+
+ if (args->name)
+ strncpy(args->name, name, args->size);
+ args->size = strlen(name) + 1;
+ }
+
+ do {
+ while (++si < dom->signal_nr) {
+ if (all || dom->signal[si].name) {
+ args->iter = (di << 24) | ++si;
+ return 0;
+ }
+ }
+ si = -1;
+ di = di + 1;
+ dom = list_entry(dom->head.next, typeof(*dom), head);
+ } while (&dom->head != &ppm->domains);
+
+ args->iter = 0xffffffff;
+ return 0;
+}
+
+static int
+nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_perfmon *ppm = (void *)object->engine;
+ struct nouveau_perfctr *ctr, *tmp;
+ struct nouveau_perfdom *dom;
+ struct nv_perfctr_sample *args = data;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+ ppm->sequence++;
+
+ list_for_each_entry(dom, &ppm->domains, head) {
+ /* sample previous batch of counters */
+ if (dom->quad != QUAD_MASK) {
+ dom->func->next(ppm, dom);
+ tmp = NULL;
+ while (!list_empty(&dom->list)) {
+ ctr = list_first_entry(&dom->list,
+ typeof(*ctr), head);
+ if (ctr->slot < 0) break;
+ if ( tmp && tmp == ctr) break;
+ if (!tmp) tmp = ctr;
+ dom->func->read(ppm, dom, ctr);
+ ctr->slot = -1;
+ list_move_tail(&ctr->head, &dom->list);
+ }
+ }
+
+ dom->quad = QUAD_MASK;
+
+ /* setup next batch of counters for sampling */
+ list_for_each_entry(ctr, &dom->list, head) {
+ ctr->slot = ffs(dom->quad) - 1;
+ if (ctr->slot < 0)
+ break;
+ dom->quad &= ~(QUAD_FREE << ctr->slot);
+ dom->func->init(ppm, dom, ctr);
+ }
+
+ if (dom->quad != QUAD_MASK)
+ dom->func->next(ppm, dom);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_perfctr *ctr = (void *)object;
+ struct nv_perfctr_read *args = data;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+ if (!ctr->clk)
+ return -EAGAIN;
+
+ args->clk = ctr->clk;
+ args->ctr = ctr->ctr;
+ return 0;
+}
+
+static void
+nouveau_perfctr_dtor(struct nouveau_object *object)
+{
+ struct nouveau_perfctr *ctr = (void *)object;
+ if (ctr->head.next)
+ list_del(&ctr->head);
+ nouveau_object_destroy(&ctr->base);
+}
+
+static int
+nouveau_perfctr_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_perfmon *ppm = (void *)engine;
+ struct nouveau_perfdom *dom = NULL;
+ struct nouveau_perfsig *sig[4] = {};
+ struct nouveau_perfctr *ctr;
+ struct nv_perfctr_class *args = data;
+ int ret, i;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
+ sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
+ args->signal[i].size, &dom);
+ if (!sig[i])
+ return -EINVAL;
+ }
+
+ ret = nouveau_object_create(parent, engine, oclass, 0, &ctr);
+ *pobject = nv_object(ctr);
+ if (ret)
+ return ret;
+
+ ctr->slot = -1;
+ ctr->logic_op = args->logic_op;
+ ctr->signal[0] = sig[0];
+ ctr->signal[1] = sig[1];
+ ctr->signal[2] = sig[2];
+ ctr->signal[3] = sig[3];
+ if (dom)
+ list_add_tail(&ctr->head, &dom->list);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nouveau_perfctr_ofuncs = {
+ .ctor = nouveau_perfctr_ctor,
+ .dtor = nouveau_perfctr_dtor,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+};
+
+static struct nouveau_omthds
+nouveau_perfctr_omthds[] = {
+ { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
+ { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
+ { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
+ {}
+};
+
+struct nouveau_oclass
+nouveau_perfmon_sclass[] = {
+ { .handle = NV_PERFCTR_CLASS,
+ .ofuncs = &nouveau_perfctr_ofuncs,
+ .omthds = nouveau_perfctr_omthds,
+ },
+ {},
+};
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+static void
+nouveau_perfctx_dtor(struct nouveau_object *object)
+{
+ struct nouveau_perfmon *ppm = (void *)object->engine;
+ mutex_lock(&nv_subdev(ppm)->mutex);
+ ppm->context = NULL;
+ mutex_unlock(&nv_subdev(ppm)->mutex);
+}
+
+static int
+nouveau_perfctx_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_perfmon *ppm = (void *)engine;
+ struct nouveau_perfctx *ctx;
+ int ret;
+
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+ 0, 0, 0, &ctx);
+ *pobject = nv_object(ctx);
+ if (ret)
+ return ret;
+
+ mutex_lock(&nv_subdev(ppm)->mutex);
+ if (ppm->context == NULL)
+ ppm->context = ctx;
+ mutex_unlock(&nv_subdev(ppm)->mutex);
+
+ if (ctx != ppm->context)
+ return -EBUSY;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nouveau_perfmon_cclass = {
+ .handle = NV_ENGCTX(PERFMON, 0x00),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nouveau_perfctx_ctor,
+ .dtor = nouveau_perfctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ },
+};
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+int
+nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
+ u32 base, u32 size_unit, u32 size_domain,
+ const struct nouveau_specdom *spec)
+{
+ const struct nouveau_specdom *sdom;
+ const struct nouveau_specsig *ssig;
+ struct nouveau_perfdom *dom;
+ int i;
+
+ for (i = 0; i == 0 || mask; i++) {
+ u32 addr = base + (i * size_unit);
+ if (i && !(mask & (1 << i)))
+ continue;
+
+ sdom = spec;
+ while (sdom->signal_nr) {
+ dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
+ sizeof(*dom->signal), GFP_KERNEL);
+ if (!dom)
+ return -ENOMEM;
+
+ if (mask) {
+ snprintf(dom->name, sizeof(dom->name),
+ "%s/%02x/%02x", name, i,
+ (int)(sdom - spec));
+ } else {
+ snprintf(dom->name, sizeof(dom->name),
+ "%s/%02x", name, (int)(sdom - spec));
+ }
+
+ list_add_tail(&dom->head, &ppm->domains);
+ INIT_LIST_HEAD(&dom->list);
+ dom->func = sdom->func;
+ dom->addr = addr;
+ dom->quad = QUAD_MASK;
+ dom->signal_nr = sdom->signal_nr;
+
+ ssig = (sdom++)->signal;
+ while (ssig->name) {
+ dom->signal[ssig->signal].name = ssig->name;
+ ssig++;
+ }
+
+ addr += size_domain;
+ }
+
+ mask &= ~(1 << i);
+ }
+
+ return 0;
+}
+
+int
+_nouveau_perfmon_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_perfmon *ppm = (void *)object;
+ return nouveau_engine_fini(&ppm->base, suspend);
+}
+
+int
+_nouveau_perfmon_init(struct nouveau_object *object)
+{
+ struct nouveau_perfmon *ppm = (void *)object;
+ return nouveau_engine_init(&ppm->base);
+}
+
+void
+_nouveau_perfmon_dtor(struct nouveau_object *object)
+{
+ struct nouveau_perfmon *ppm = (void *)object;
+ struct nouveau_perfdom *dom, *tmp;
+
+ list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+ list_del(&dom->head);
+ kfree(dom);
+ }
+
+ nouveau_engine_destroy(&ppm->base);
+}
+
+int
+nouveau_perfmon_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int length, void **pobject)
+{
+ struct nouveau_perfmon *ppm;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true, "PPM",
+ "perfmon", length, pobject);
+ ppm = *pobject;
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&ppm->domains);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
new file mode 100644
index 00000000000..50696cc7b7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static void
+pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+ struct nouveau_perfctr *ctr)
+{
+ u32 mask = 0x00000000;
+ u32 ctrl = 0x00000001;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
+ mask |= 1 << (ctr->signal[i] - dom->signal);
+
+ nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
+ nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
+ nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
+}
+
+static void
+pwr_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+ struct nouveau_perfctr *ctr)
+{
+ ctr->ctr = ppm->pwr[ctr->slot];
+ ctr->clk = ppm->pwr[ppm->last];
+}
+
+static void
+pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+ int i;
+
+ for (i = 0; i <= ppm->last; i++) {
+ ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
+ nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
+ }
+}
+
+static const struct nouveau_funcdom
+pwr_perfctr_func = {
+ .init = pwr_perfctr_init,
+ .read = pwr_perfctr_read,
+ .next = pwr_perfctr_next,
+};
+
+const struct nouveau_specdom
+nva3_perfmon_pwr[] = {
+ { 0x20, (const struct nouveau_specsig[]) {
+ { 0x00, "pwr_gr_idle" },
+ { 0x04, "pwr_bsp_idle" },
+ { 0x05, "pwr_vp_idle" },
+ { 0x06, "pwr_ppp_idle" },
+ { 0x13, "pwr_ce0_idle" },
+ {}
+ }, &pwr_perfctr_func },
+ {}
+};
+
+const struct nouveau_specdom
+nvc0_perfmon_pwr[] = {
+ { 0x20, (const struct nouveau_specsig[]) {
+ { 0x00, "pwr_gr_idle" },
+ { 0x04, "pwr_bsp_idle" },
+ { 0x05, "pwr_vp_idle" },
+ { 0x06, "pwr_ppp_idle" },
+ { 0x13, "pwr_ce0_idle" },
+ { 0x14, "pwr_ce1_idle" },
+ {}
+ }, &pwr_perfctr_func },
+ {}
+};
+
+const struct nouveau_specdom
+nve0_perfmon_pwr[] = {
+ { 0x20, (const struct nouveau_specsig[]) {
+ { 0x00, "pwr_gr_idle" },
+ { 0x04, "pwr_bsp_idle" },
+ { 0x05, "pwr_vp_idle" },
+ { 0x06, "pwr_ppp_idle" },
+ { 0x13, "pwr_ce0_idle" },
+ { 0x14, "pwr_ce1_idle" },
+ { 0x15, "pwr_ce2_idle" },
+ {}
+ }, &pwr_perfctr_func },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
new file mode 100644
index 00000000000..b2a10785adb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+ struct nouveau_perfctr *ctr)
+{
+ struct nv40_perfmon_priv *priv = (void *)ppm;
+ struct nv40_perfmon_cntr *cntr = (void *)ctr;
+ u32 log = ctr->logic_op;
+ u32 src = 0x00000000;
+ int i;
+
+ for (i = 0; i < 4 && ctr->signal[i]; i++)
+ src |= (ctr->signal[i] - dom->signal) << (i * 8);
+
+ nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
+ nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
+ nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+}
+
+static void
+nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+ struct nouveau_perfctr *ctr)
+{
+ struct nv40_perfmon_priv *priv = (void *)ppm;
+ struct nv40_perfmon_cntr *cntr = (void *)ctr;
+
+ switch (cntr->base.slot) {
+ case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
+ case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
+ case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
+ case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+ }
+ cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+}
+
+static void
+nv40_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+ struct nv40_perfmon_priv *priv = (void *)ppm;
+ if (priv->sequence != ppm->sequence) {
+ nv_wr32(priv, 0x400084, 0x00000020);
+ priv->sequence = ppm->sequence;
+ }
+}
+
+const struct nouveau_funcdom
+nv40_perfctr_func = {
+ .init = nv40_perfctr_init,
+ .read = nv40_perfctr_read,
+ .next = nv40_perfctr_next,
+};
+
+static const struct nouveau_specdom
+nv40_perfmon[] = {
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+int
+nv40_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv40_perfmon_oclass *mclass = (void *)oclass;
+ struct nv40_perfmon_priv *priv;
+ int ret;
+
+ ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+ nv_engine(priv)->sclass = nouveau_perfmon_sclass;
+ return 0;
+}
+
+struct nouveau_oclass *
+nv40_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+ .base.handle = NV_ENGINE(PERFMON, 0x40),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = _nouveau_perfmon_fini,
+ },
+ .doms = nv40_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
new file mode 100644
index 00000000000..1b5792d1df1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_PM_NV40_H__
+#define __NVKM_PM_NV40_H__
+
+#include "priv.h"
+
+struct nv40_perfmon_oclass {
+ struct nouveau_oclass base;
+ const struct nouveau_specdom *doms;
+};
+
+struct nv40_perfmon_priv {
+ struct nouveau_perfmon base;
+ u32 sequence;
+};
+
+int nv40_perfmon_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *data, u32 size,
+ struct nouveau_object **pobject);
+
+struct nv40_perfmon_cntr {
+ struct nouveau_perfctr base;
+};
+
+extern const struct nouveau_funcdom nv40_perfctr_func;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
new file mode 100644
index 00000000000..94217691fe6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nv50_perfmon[] = {
+ { 0x040, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x100, (const struct nouveau_specsig[]) {
+ { 0xc8, "gr_idle" },
+ {}
+ }, &nv40_perfctr_func },
+ { 0x100, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x020, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x040, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+struct nouveau_oclass *
+nv50_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+ .base.handle = NV_ENGINE(PERFMON, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = _nouveau_perfmon_fini,
+ },
+ .doms = nv50_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
new file mode 100644
index 00000000000..9232c7fc625
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nv84_perfmon[] = {
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+struct nouveau_oclass *
+nv84_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+ .base.handle = NV_ENGINE(PERFMON, 0x84),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = _nouveau_perfmon_fini,
+ },
+ .doms = nv84_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
new file mode 100644
index 00000000000..6197ebdeb64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nva3_perfmon[] = {
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nouveau_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+static int
+nva3_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **object)
+{
+ int ret = nv40_perfmon_ctor(parent, engine, oclass, data, size, object);
+ if (ret == 0) {
+ struct nv40_perfmon_priv *priv = (void *)*object;
+ ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+ nva3_perfmon_pwr);
+ if (ret)
+ return ret;
+
+ priv->base.last = 3;
+ }
+ return ret;
+}
+
+struct nouveau_oclass *
+nva3_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+ .base.handle = NV_ENGINE(PERFMON, 0xa3),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = _nouveau_perfmon_fini,
+ },
+ .doms = nva3_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
new file mode 100644
index 00000000000..74b24104250
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nvc0_perfmon_hub[] = {
+ {}
+};
+
+static const struct nouveau_specdom
+nvc0_perfmon_gpc[] = {
+ {}
+};
+
+static const struct nouveau_specdom
+nvc0_perfmon_part[] = {
+ {}
+};
+
+static void
+nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+ struct nouveau_perfctr *ctr)
+{
+ struct nvc0_perfmon_priv *priv = (void *)ppm;
+ struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+ u32 log = ctr->logic_op;
+ u32 src = 0x00000000;
+ int i;
+
+ for (i = 0; i < 4 && ctr->signal[i]; i++)
+ src |= (ctr->signal[i] - dom->signal) << (i * 8);
+
+ nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
+ nv_wr32(priv, dom->addr + 0x100, 0x00000000);
+ nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
+ nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+}
+
+static void
+nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+ struct nouveau_perfctr *ctr)
+{
+ struct nvc0_perfmon_priv *priv = (void *)ppm;
+ struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+
+ switch (cntr->base.slot) {
+ case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
+ case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
+ case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
+ case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+ }
+ cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+}
+
+static void
+nvc0_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+ struct nvc0_perfmon_priv *priv = (void *)ppm;
+ nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+ nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+}
+
+const struct nouveau_funcdom
+nvc0_perfctr_func = {
+ .init = nvc0_perfctr_init,
+ .read = nvc0_perfctr_read,
+ .next = nvc0_perfctr_next,
+};
+
+int
+nvc0_perfmon_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvc0_perfmon_priv *priv = (void *)object;
+ nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
+ nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
+ return nouveau_perfmon_fini(&priv->base, suspend);
+}
+
+static int
+nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_perfmon_priv *priv;
+ u32 mask;
+ int ret;
+
+ ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+ nvc0_perfmon_pwr);
+ if (ret)
+ return ret;
+
+ /* HUB */
+ ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+ nvc0_perfmon_hub);
+ if (ret)
+ return ret;
+
+ /* GPC */
+ mask = (1 << nv_rd32(priv, 0x022430)) - 1;
+ mask &= ~nv_rd32(priv, 0x022504);
+ mask &= ~nv_rd32(priv, 0x022584);
+
+ ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+ 0x1000, 0x200, nvc0_perfmon_gpc);
+ if (ret)
+ return ret;
+
+ /* PART */
+ mask = (1 << nv_rd32(priv, 0x022438)) - 1;
+ mask &= ~nv_rd32(priv, 0x022548);
+ mask &= ~nv_rd32(priv, 0x0225c8);
+
+ ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+ 0x1000, 0x200, nvc0_perfmon_part);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+ nv_engine(priv)->sclass = nouveau_perfmon_sclass;
+ priv->base.last = 7;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_perfmon_oclass = {
+ .handle = NV_ENGINE(PERFMON, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = nvc0_perfmon_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
new file mode 100644
index 00000000000..f66bca48426
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_PM_NVC0_H__
+#define __NVKM_PM_NVC0_H__
+
+#include "priv.h"
+
+struct nvc0_perfmon_priv {
+ struct nouveau_perfmon base;
+};
+
+struct nvc0_perfmon_cntr {
+ struct nouveau_perfctr base;
+};
+
+extern const struct nouveau_funcdom nvc0_perfctr_func;
+int nvc0_perfmon_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
new file mode 100644
index 00000000000..71d718c1207
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nve0_perfmon_hub[] = {
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "hub00_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x40, (const struct nouveau_specsig[]) {
+ { 0x27, "hub01_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "hub02_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "hub03_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x40, (const struct nouveau_specsig[]) {
+ { 0x03, "host_mmio_rd" },
+ { 0x27, "hub04_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "hub05_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0xc0, (const struct nouveau_specsig[]) {
+ { 0x74, "host_fb_rd3x" },
+ { 0x75, "host_fb_rd3x_2" },
+ { 0xa7, "hub06_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "hub07_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ {}
+};
+
+static const struct nouveau_specdom
+nve0_perfmon_gpc[] = {
+ { 0xe0, (const struct nouveau_specsig[]) {
+ { 0xc7, "gpc00_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ {}
+};
+
+static const struct nouveau_specdom
+nve0_perfmon_part[] = {
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "part00_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ { 0x60, (const struct nouveau_specsig[]) {
+ { 0x47, "part01_user_0" },
+ {}
+ }, &nvc0_perfctr_func },
+ {}
+};
+
+static int
+nve0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_perfmon_priv *priv;
+ u32 mask;
+ int ret;
+
+ ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ /* PDAEMON */
+ ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+ nve0_perfmon_pwr);
+ if (ret)
+ return ret;
+
+ /* HUB */
+ ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+ nve0_perfmon_hub);
+ if (ret)
+ return ret;
+
+ /* GPC */
+ mask = (1 << nv_rd32(priv, 0x022430)) - 1;
+ mask &= ~nv_rd32(priv, 0x022504);
+ mask &= ~nv_rd32(priv, 0x022584);
+
+ ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+ 0x1000, 0x200, nve0_perfmon_gpc);
+ if (ret)
+ return ret;
+
+ /* PART */
+ mask = (1 << nv_rd32(priv, 0x022438)) - 1;
+ mask &= ~nv_rd32(priv, 0x022548);
+ mask &= ~nv_rd32(priv, 0x0225c8);
+
+ ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+ 0x1000, 0x200, nve0_perfmon_part);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+ nv_engine(priv)->sclass = nouveau_perfmon_sclass;
+ priv->base.last = 7;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_perfmon_oclass = {
+ .handle = NV_ENGINE(PERFMON, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = nvc0_perfmon_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
new file mode 100644
index 00000000000..47256f78a89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvf0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_perfmon_priv *priv;
+ int ret;
+
+ ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+ nve0_perfmon_pwr);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+ nv_engine(priv)->sclass = nouveau_perfmon_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvf0_perfmon_oclass = {
+ .handle = NV_ENGINE(PERFMON, 0xf0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvf0_perfmon_ctor,
+ .dtor = _nouveau_perfmon_dtor,
+ .init = _nouveau_perfmon_init,
+ .fini = nvc0_perfmon_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
new file mode 100644
index 00000000000..0ac8714fe0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
@@ -0,0 +1,91 @@
+#ifndef __NVKM_PERFMON_PRIV_H__
+#define __NVKM_PERFMON_PRIV_H__
+
+#include <engine/perfmon.h>
+
+struct nouveau_perfctr {
+ struct nouveau_object base;
+ struct list_head head;
+ struct nouveau_perfsig *signal[4];
+ int slot;
+ u32 logic_op;
+ u32 clk;
+ u32 ctr;
+};
+
+extern struct nouveau_oclass nouveau_perfmon_sclass[];
+
+struct nouveau_perfctx {
+ struct nouveau_engctx base;
+};
+
+extern struct nouveau_oclass nouveau_perfmon_cclass;
+
+struct nouveau_specsig {
+ u8 signal;
+ const char *name;
+};
+
+struct nouveau_perfsig {
+ const char *name;
+};
+
+struct nouveau_perfdom;
+struct nouveau_perfctr *
+nouveau_perfsig_wrap(struct nouveau_perfmon *, const char *,
+ struct nouveau_perfdom **);
+
+struct nouveau_specdom {
+ u16 signal_nr;
+ const struct nouveau_specsig *signal;
+ const struct nouveau_funcdom *func;
+};
+
+extern const struct nouveau_specdom nva3_perfmon_pwr[];
+extern const struct nouveau_specdom nvc0_perfmon_pwr[];
+extern const struct nouveau_specdom nve0_perfmon_pwr[];
+
+struct nouveau_perfdom {
+ struct list_head head;
+ struct list_head list;
+ const struct nouveau_funcdom *func;
+ char name[32];
+ u32 addr;
+ u8 quad;
+ u32 signal_nr;
+ struct nouveau_perfsig signal[];
+};
+
+struct nouveau_funcdom {
+ void (*init)(struct nouveau_perfmon *, struct nouveau_perfdom *,
+ struct nouveau_perfctr *);
+ void (*read)(struct nouveau_perfmon *, struct nouveau_perfdom *,
+ struct nouveau_perfctr *);
+ void (*next)(struct nouveau_perfmon *, struct nouveau_perfdom *);
+};
+
+int nouveau_perfdom_new(struct nouveau_perfmon *, const char *, u32,
+ u32, u32, u32, const struct nouveau_specdom *);
+
+#define nouveau_perfmon_create(p,e,o,d) \
+ nouveau_perfmon_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_perfmon_dtor(p) ({ \
+ struct nouveau_perfmon *c = (p); \
+ _nouveau_perfmon_dtor(nv_object(c)); \
+})
+#define nouveau_perfmon_init(p) ({ \
+ struct nouveau_perfmon *c = (p); \
+ _nouveau_perfmon_init(nv_object(c)); \
+})
+#define nouveau_perfmon_fini(p,s) ({ \
+ struct nouveau_perfmon *c = (p); \
+ _nouveau_perfmon_fini(nv_object(c), (s)); \
+})
+
+int nouveau_perfmon_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_perfmon_dtor(struct nouveau_object *);
+int _nouveau_perfmon_init(struct nouveau_object *);
+int _nouveau_perfmon_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 2a859a31c30..c571758e4a2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -135,8 +135,8 @@ nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv04_software_oclass = {
+struct nouveau_oclass *
+nv04_software_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(SW, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a019364b1e1..a62f11a7843 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -117,8 +117,8 @@ nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-struct nouveau_oclass
-nv10_software_oclass = {
+struct nouveau_oclass *
+nv10_software_oclass = &(struct nouveau_oclass) {
.handle = NV_ENGINE(SW, 0x10),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv10_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index c48e7495377..b574dd4bb82 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -32,16 +32,9 @@
#include <subdev/bar.h>
-#include <engine/software.h>
#include <engine/disp.h>
-struct nv50_software_priv {
- struct nouveau_software base;
-};
-
-struct nv50_software_chan {
- struct nouveau_software_chan base;
-};
+#include "nv50.h"
/*******************************************************************************
* software object classes
@@ -62,7 +55,7 @@ nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
- chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
+ chan->vblank.ctxdma = gpuobj->node->offset >> 4;
ret = 0;
}
nouveau_namedb_put(handle);
@@ -74,34 +67,33 @@ nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
- chan->base.vblank.offset = *(u32 *)args;
+ chan->vblank.offset = *(u32 *)args;
return 0;
}
-static int
+int
nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
- chan->base.vblank.value = *(u32 *)args;
+ chan->vblank.value = *(u32 *)args;
return 0;
}
-static int
+int
nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
- struct nouveau_disp *disp = nouveau_disp(object);
- u32 crtc = *(u32 *)args;
- if (crtc > 1)
+ u32 head = *(u32 *)args;
+ if (head >= chan->vblank.nr_event)
return -EINVAL;
- nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
+ nouveau_event_get(chan->vblank.event[head]);
return 0;
}
-static int
+int
nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
@@ -132,10 +124,9 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
-nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+nv50_software_vblsem_release(void *data, int head)
{
- struct nouveau_software_chan *chan =
- container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nv50_software_chan *chan = data;
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
struct nouveau_bar *bar = nouveau_bar(priv);
@@ -154,45 +145,76 @@ nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
return NVKM_EVENT_DROP;
}
-static int
+void
+nv50_software_context_dtor(struct nouveau_object *object)
+{
+ struct nv50_software_chan *chan = (void *)object;
+ int i;
+
+ if (chan->vblank.event) {
+ for (i = 0; i < chan->vblank.nr_event; i++)
+ nouveau_event_ref(NULL, &chan->vblank.event[i]);
+ kfree(chan->vblank.event);
+ }
+
+ nouveau_software_context_destroy(&chan->base);
+}
+
+int
nv50_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ struct nouveau_disp *pdisp = nouveau_disp(parent);
+ struct nv50_software_cclass *pclass = (void *)oclass;
struct nv50_software_chan *chan;
- int ret;
+ int ret, i;
ret = nouveau_software_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
- chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
- chan->base.vblank.event.func = nv50_software_vblsem_release;
+ chan->vblank.nr_event = pdisp->vblank->index_nr;
+ chan->vblank.event = kzalloc(chan->vblank.nr_event *
+ sizeof(*chan->vblank.event), GFP_KERNEL);
+ if (!chan->vblank.event)
+ return -ENOMEM;
+
+ for (i = 0; i < chan->vblank.nr_event; i++) {
+ ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank,
+ chan, &chan->vblank.event[i]);
+ if (ret)
+ return ret;
+ }
+
+ chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
return 0;
}
-static struct nouveau_oclass
+static struct nv50_software_cclass
nv50_software_cclass = {
- .handle = NV_ENGCTX(SW, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+ .base.handle = NV_ENGCTX(SW, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_context_ctor,
.dtor = _nouveau_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
+ .vblank = nv50_software_vblsem_release,
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
-static int
+int
nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ struct nv50_software_oclass *pclass = (void *)oclass;
struct nv50_software_priv *priv;
int ret;
@@ -201,19 +223,21 @@ nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->cclass = &nv50_software_cclass;
- nv_engine(priv)->sclass = nv50_software_sclass;
+ nv_engine(priv)->cclass = pclass->cclass;
+ nv_engine(priv)->sclass = pclass->sclass;
nv_subdev(priv)->intr = nv04_software_intr;
return 0;
}
-struct nouveau_oclass
-nv50_software_oclass = {
- .handle = NV_ENGINE(SW, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_software_oclass = &(struct nv50_software_oclass) {
+ .base.handle = NV_ENGINE(SW, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_ctor,
.dtor = _nouveau_software_dtor,
.init = _nouveau_software_init,
.fini = _nouveau_software_fini,
},
-};
+ .cclass = &nv50_software_cclass.base,
+ .sclass = nv50_software_sclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
new file mode 100644
index 00000000000..2de370c2127
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -0,0 +1,47 @@
+#ifndef __NVKM_SW_NV50_H__
+#define __NVKM_SW_NV50_H__
+
+#include <engine/software.h>
+
+struct nv50_software_oclass {
+ struct nouveau_oclass base;
+ struct nouveau_oclass *cclass;
+ struct nouveau_oclass *sclass;
+};
+
+struct nv50_software_priv {
+ struct nouveau_software base;
+};
+
+int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
+struct nv50_software_cclass {
+ struct nouveau_oclass base;
+ int (*vblank)(void *, int);
+};
+
+struct nv50_software_chan {
+ struct nouveau_software_chan base;
+ struct {
+ struct nouveau_eventh **event;
+ int nr_event;
+ u32 channel;
+ u32 ctxdma;
+ u64 offset;
+ u32 value;
+ } vblank;
+};
+
+int nv50_software_context_ctor(struct nouveau_object *,
+ struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv50_software_context_dtor(struct nouveau_object *);
+
+int nv50_software_mthd_vblsem_value(struct nouveau_object *, u32, void *, u32);
+int nv50_software_mthd_vblsem_release(struct nouveau_object *, u32, void *, u32);
+int nv50_software_mthd_flip(struct nouveau_object *, u32, void *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index d698e710ddd..f9430c1bf3e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -32,13 +32,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-struct nvc0_software_priv {
- struct nouveau_software base;
-};
-
-struct nvc0_software_chan {
- struct nouveau_software_chan base;
-};
+#include "nv50.h"
/*******************************************************************************
* software object classes
@@ -48,58 +42,24 @@ static int
nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
- struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
u64 data = *(u32 *)args;
if (mthd == 0x0400) {
- chan->base.vblank.offset &= 0x00ffffffffULL;
- chan->base.vblank.offset |= data << 32;
+ chan->vblank.offset &= 0x00ffffffffULL;
+ chan->vblank.offset |= data << 32;
} else {
- chan->base.vblank.offset &= 0xff00000000ULL;
- chan->base.vblank.offset |= data;
+ chan->vblank.offset &= 0xff00000000ULL;
+ chan->vblank.offset |= data;
}
return 0;
}
static int
-nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
- chan->base.vblank.value = *(u32 *)args;
- return 0;
-}
-
-static int
-nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
- struct nouveau_disp *disp = nouveau_disp(object);
- u32 crtc = *(u32 *)args;
-
- if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
- return -EINVAL;
-
- nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
- return 0;
-}
-
-static int
-nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
- if (chan->base.flip)
- return chan->base.flip(chan->base.flip_data);
- return -EINVAL;
-}
-
-static int
nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
- struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
- struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
u32 data = *(u32 *)args;
switch (mthd) {
@@ -124,9 +84,9 @@ static struct nouveau_omthds
nvc0_software_omthds[] = {
{ 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
{ 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
- { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
- { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
- { 0x0500, 0x0500, nvc0_software_mthd_flip },
+ { 0x0408, 0x0408, nv50_software_mthd_vblsem_value },
+ { 0x040c, 0x040c, nv50_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_software_mthd_flip },
{ 0x0600, 0x0600, nvc0_software_mthd_mp_control },
{ 0x0644, 0x0644, nvc0_software_mthd_mp_control },
{ 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
@@ -144,11 +104,10 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
-nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+nvc0_software_vblsem_release(void *data, int head)
{
- struct nouveau_software_chan *chan =
- container_of(event, struct nouveau_software_chan, vblank.event);
- struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nv50_software_chan *chan = data;
+ struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
struct nouveau_bar *bar = nouveau_bar(priv);
nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
@@ -160,66 +119,31 @@ nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
return NVKM_EVENT_DROP;
}
-static int
-nvc0_software_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_software_chan *chan;
- int ret;
-
- ret = nouveau_software_context_create(parent, engine, oclass, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
- chan->base.vblank.event.func = nvc0_software_vblsem_release;
- return 0;
-}
-
-static struct nouveau_oclass
+static struct nv50_software_cclass
nvc0_software_cclass = {
- .handle = NV_ENGCTX(SW, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_software_context_ctor,
+ .base.handle = NV_ENGCTX(SW, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_software_context_ctor,
.dtor = _nouveau_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
+ .vblank = nvc0_software_vblsem_release,
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
-static int
-nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_software_priv *priv;
- int ret;
-
- ret = nouveau_software_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_engine(priv)->cclass = &nvc0_software_cclass;
- nv_engine(priv)->sclass = nvc0_software_sclass;
- nv_subdev(priv)->intr = nv04_software_intr;
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_software_oclass = {
- .handle = NV_ENGINE(SW, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_software_ctor,
+struct nouveau_oclass *
+nvc0_software_oclass = &(struct nv50_software_oclass) {
+ .base.handle = NV_ENGINE(SW, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_software_ctor,
.dtor = _nouveau_software_dtor,
.init = _nouveau_software_init,
.fini = _nouveau_software_fini,
},
-};
+ .cclass = &nvc0_software_cclass.base,
+ .sclass = nvc0_software_sclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 5a5961b6a6a..560c3593dae 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -22,7 +22,7 @@
#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
-#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL
#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
struct nv_device_class {
@@ -98,6 +98,77 @@ struct nv_dma_class {
u32 conf0;
};
+/* Perfmon counter class
+ *
+ * XXXX: NV_PERFCTR
+ */
+#define NV_PERFCTR_CLASS 0x0000ffff
+#define NV_PERFCTR_QUERY 0x00000000
+#define NV_PERFCTR_SAMPLE 0x00000001
+#define NV_PERFCTR_READ 0x00000002
+
+struct nv_perfctr_class {
+ u16 logic_op;
+ struct {
+ char __user *name; /*XXX: use cfu when exposed to userspace */
+ u32 size;
+ } signal[4];
+};
+
+struct nv_perfctr_query {
+ u32 iter;
+ u32 size;
+ char __user *name; /*XXX: use ctu when exposed to userspace */
+};
+
+struct nv_perfctr_sample {
+};
+
+struct nv_perfctr_read {
+ u32 ctr;
+ u32 clk;
+};
+
+/* Device control class
+ *
+ * XXXX: NV_CONTROL
+ */
+#define NV_CONTROL_CLASS 0x0000fffe
+
+#define NV_CONTROL_PSTATE_INFO 0x00000000
+#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1)
+#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2)
+#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1)
+#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2)
+#define NV_CONTROL_PSTATE_ATTR 0x00000001
+#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1)
+#define NV_CONTROL_PSTATE_USER 0x00000002
+#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1)
+#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2)
+
+struct nv_control_pstate_info {
+ u32 count; /* out: number of power states */
+ s32 ustate; /* out: current target pstate index */
+ u32 pstate; /* out: current pstate index */
+};
+
+struct nv_control_pstate_attr {
+ s32 state; /* in: index of pstate to query
+ * out: pstate identifier
+ */
+ u32 index; /* in: index of attribute to query
+ * out: index of next attribute, or 0 if no more
+ */
+ char name[32];
+ char unit[16];
+ u32 min;
+ u32 max;
+};
+
+struct nv_control_pstate_user {
+ s32 state; /* in: pstate identifier */
+};
+
/* DMA FIFO channel classes
*
* 006b: NV03_CHANNEL_DMA
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
index 9ea18dfcb4d..8092e2e9032 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/debug.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -1,13 +1,20 @@
#ifndef __NOUVEAU_DEBUG_H__
#define __NOUVEAU_DEBUG_H__
+extern int nv_info_debug_level;
+
#define NV_DBG_FATAL 0
#define NV_DBG_ERROR 1
#define NV_DBG_WARN 2
-#define NV_DBG_INFO 3
+#define NV_DBG_INFO nv_info_debug_level
#define NV_DBG_DEBUG 4
#define NV_DBG_TRACE 5
#define NV_DBG_PARANOIA 6
#define NV_DBG_SPAM 7
+#define NV_DBG_INFO_NORMAL 3
+#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
+
+#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 99b6600fe80..ac2881d1776 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -33,9 +33,10 @@ enum nv_subdev_type {
NVDEV_SUBDEV_INSTMEM,
NVDEV_SUBDEV_VM,
NVDEV_SUBDEV_BAR,
+ NVDEV_SUBDEV_PWR,
NVDEV_SUBDEV_VOLT,
- NVDEV_SUBDEV_CLOCK,
NVDEV_SUBDEV_THERM,
+ NVDEV_SUBDEV_CLOCK,
NVDEV_ENGINE_DMAOBJ,
NVDEV_ENGINE_FIFO,
@@ -50,9 +51,10 @@ enum nv_subdev_type {
NVDEV_ENGINE_COPY0,
NVDEV_ENGINE_COPY1,
NVDEV_ENGINE_COPY2,
- NVDEV_ENGINE_UNK1C1,
+ NVDEV_ENGINE_VIC,
NVDEV_ENGINE_VENC,
NVDEV_ENGINE_DISP,
+ NVDEV_ENGINE_PERFMON,
NVDEV_SUBDEV_NR,
};
@@ -72,6 +74,7 @@ struct nouveau_device {
enum {
NV_04 = 0x04,
NV_10 = 0x10,
+ NV_11 = 0x11,
NV_20 = 0x20,
NV_30 = 0x30,
NV_40 = 0x40,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 9e094408f14..5d539ebff3e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -5,13 +5,21 @@
#define NVKM_EVENT_DROP 0
#define NVKM_EVENT_KEEP 1
+/* nouveau_eventh.flags bit #s */
+#define NVKM_EVENT_ENABLE 0
+
struct nouveau_eventh {
+ struct nouveau_event *event;
struct list_head head;
- int (*func)(struct nouveau_eventh *, int index);
+ unsigned long flags;
+ int index;
+ int (*func)(void *, int);
+ void *priv;
};
struct nouveau_event {
- spinlock_t lock;
+ spinlock_t list_lock;
+ spinlock_t refs_lock;
void *priv;
void (*enable)(struct nouveau_event *, int index);
@@ -28,9 +36,11 @@ int nouveau_event_create(int index_nr, struct nouveau_event **);
void nouveau_event_destroy(struct nouveau_event **);
void nouveau_event_trigger(struct nouveau_event *, int index);
-void nouveau_event_get(struct nouveau_event *, int index,
- struct nouveau_eventh *);
-void nouveau_event_put(struct nouveau_event *, int index,
- struct nouveau_eventh *);
+int nouveau_event_new(struct nouveau_event *, int index,
+ int (*func)(void *, int), void *,
+ struct nouveau_eventh **);
+void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
+void nouveau_event_get(struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_eventh *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
index 27074957fd2..ed055847887 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/option.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -8,4 +8,13 @@ bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
int nouveau_dbgopt(const char *optstr, const char *sub);
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+ if (strlen(cmp) != len)
+ return len;
+ return strncasecmp(str, cmp, len);
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index d87836e3a70..0f9a37bd32b 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -6,27 +6,12 @@
struct nouveau_object;
-#define NV_PRINTK_FATAL KERN_CRIT
-#define NV_PRINTK_ERROR KERN_ERR
-#define NV_PRINTK_WARN KERN_WARNING
-#define NV_PRINTK_INFO KERN_INFO
-#define NV_PRINTK_DEBUG KERN_DEBUG
-#define NV_PRINTK_PARANOIA KERN_DEBUG
-#define NV_PRINTK_TRACE KERN_DEBUG
-#define NV_PRINTK_SPAM KERN_DEBUG
-
-extern int nv_printk_suspend_level;
-
-#define NV_DBG_SUSPEND (nv_printk_suspend_level)
-#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
-
-const char *nv_printk_level_to_pfx(int level);
-void __printf(4, 5)
-nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+void __printf(3, 4)
+nv_printk_(struct nouveau_object *, int, const char *, ...);
#define nv_printk(o,l,f,a...) do { \
if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
- nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a); \
+ nv_printk_(nv_object(o), NV_DBG_##l, f, ##a); \
} while(0)
#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
@@ -37,16 +22,9 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
-#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
-
-static inline void nv_suspend_set_printk_level(int level)
-{
- nv_printk_suspend_level = level;
-}
-
#define nv_assert(f,a...) do { \
if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
- nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
+ nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \
BUG_ON(1); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 633c2f80648..8c32cf4d83c 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -101,14 +101,14 @@ nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
#define _nouveau_fifo_init _nouveau_engine_init
#define _nouveau_fifo_fini _nouveau_engine_fini
-extern struct nouveau_oclass nv04_fifo_oclass;
-extern struct nouveau_oclass nv10_fifo_oclass;
-extern struct nouveau_oclass nv17_fifo_oclass;
-extern struct nouveau_oclass nv40_fifo_oclass;
-extern struct nouveau_oclass nv50_fifo_oclass;
-extern struct nouveau_oclass nv84_fifo_oclass;
-extern struct nouveau_oclass nvc0_fifo_oclass;
-extern struct nouveau_oclass nve0_fifo_oclass;
+extern struct nouveau_oclass *nv04_fifo_oclass;
+extern struct nouveau_oclass *nv10_fifo_oclass;
+extern struct nouveau_oclass *nv17_fifo_oclass;
+extern struct nouveau_oclass *nv40_fifo_oclass;
+extern struct nouveau_oclass *nv50_fifo_oclass;
+extern struct nouveau_oclass *nv84_fifo_oclass;
+extern struct nouveau_oclass *nvc0_fifo_oclass;
+extern struct nouveau_oclass *nve0_fifo_oclass;
void nv04_fifo_intr(struct nouveau_subdev *);
int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
index 1d1a89a06ee..9b0d938199f 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -42,10 +42,13 @@ struct nouveau_mpeg {
extern struct nouveau_oclass nv31_mpeg_oclass;
extern struct nouveau_oclass nv40_mpeg_oclass;
+extern struct nouveau_oclass nv44_mpeg_oclass;
extern struct nouveau_oclass nv50_mpeg_oclass;
extern struct nouveau_oclass nv84_mpeg_oclass;
-
+extern struct nouveau_ofuncs nv31_mpeg_ofuncs;
+extern struct nouveau_oclass nv31_mpeg_cclass;
extern struct nouveau_oclass nv31_mpeg_sclass[];
+extern struct nouveau_oclass nv40_mpeg_sclass[];
void nv31_mpeg_intr(struct nouveau_subdev *);
void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
int nv31_mpeg_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
new file mode 100644
index 00000000000..49b0024910f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_PERFMON_H__
+#define __NVKM_PERFMON_H__
+
+#include <core/device.h>
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/class.h>
+
+struct nouveau_perfdom;
+struct nouveau_perfctr;
+struct nouveau_perfmon {
+ struct nouveau_engine base;
+
+ struct nouveau_perfctx *context;
+ void *profile_data;
+
+ struct list_head domains;
+ u32 sequence;
+
+ /*XXX: temp for daemon backend */
+ u32 pwr[8];
+ u32 last;
+};
+
+static inline struct nouveau_perfmon *
+nouveau_perfmon(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_PERFMON];
+}
+
+extern struct nouveau_oclass *nv40_perfmon_oclass;
+extern struct nouveau_oclass *nv50_perfmon_oclass;
+extern struct nouveau_oclass *nv84_perfmon_oclass;
+extern struct nouveau_oclass *nva3_perfmon_oclass;
+extern struct nouveau_oclass nvc0_perfmon_oclass;
+extern struct nouveau_oclass nve0_perfmon_oclass;
+extern struct nouveau_oclass nvf0_perfmon_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index 45799487e57..23a462b50d0 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,19 +3,10 @@
#include <core/engine.h>
#include <core/engctx.h>
-#include <core/event.h>
struct nouveau_software_chan {
struct nouveau_engctx base;
- struct {
- struct nouveau_eventh event;
- u32 channel;
- u32 ctxdma;
- u64 offset;
- u32 value;
- } vblank;
-
int (*flip)(void *);
void *flip_data;
};
@@ -50,10 +41,10 @@ struct nouveau_software {
#define _nouveau_software_init _nouveau_engine_init
#define _nouveau_software_fini _nouveau_engine_fini
-extern struct nouveau_oclass nv04_software_oclass;
-extern struct nouveau_oclass nv10_software_oclass;
-extern struct nouveau_oclass nv50_software_oclass;
-extern struct nouveau_oclass nvc0_software_oclass;
+extern struct nouveau_oclass *nv04_software_oclass;
+extern struct nouveau_oclass *nv10_software_oclass;
+extern struct nouveau_oclass *nv50_software_oclass;
+extern struct nouveau_oclass *nvc0_software_oclass;
void nv04_software_intr(struct nouveau_subdev *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
new file mode 100644
index 00000000000..662b2072685
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
@@ -0,0 +1,29 @@
+#ifndef __NVBIOS_BOOST_H__
+#define __NVBIOS_BOOST_H__
+
+u16 nvbios_boostTe(struct nouveau_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+
+struct nvbios_boostE {
+ u8 pstate;
+ u32 min;
+ u32 max;
+};
+
+u16 nvbios_boostEe(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u16 nvbios_boostEp(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+ struct nvbios_boostE *);
+u16 nvbios_boostEm(struct nouveau_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+ struct nvbios_boostE *);
+
+struct nvbios_boostS {
+ u8 domain;
+ u8 percent;
+ u32 min;
+ u32 max;
+};
+
+u16 nvbios_boostSe(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8);
+u16 nvbios_boostSp(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8,
+ struct nvbios_boostS *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
new file mode 100644
index 00000000000..a80a4380988
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
@@ -0,0 +1,28 @@
+#ifndef __NVBIOS_CSTEP_H__
+#define __NVBIOS_CSTEP_H__
+
+u16 nvbios_cstepTe(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
+
+struct nvbios_cstepE {
+ u8 pstate;
+ u8 index;
+};
+
+u16 nvbios_cstepEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_cstepE *);
+u16 nvbios_cstepEm(struct nouveau_bios *, u8 pstate, u8 *ver, u8 *hdr,
+ struct nvbios_cstepE *);
+
+struct nvbios_cstepX {
+ u32 freq;
+ u8 unkn[2];
+ u8 voltage;
+};
+
+u16 nvbios_cstepXe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepXp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_cstepX *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 96d3364f6db..c7b2e586be0 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -7,7 +7,15 @@ enum dcb_gpio_func_name {
DCB_GPIO_TVDAC1 = 0x2d,
DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
- DCB_GPIO_UNUSED = 0xff
+ DCB_GPIO_UNUSED = 0xff,
+ DCB_GPIO_VID0 = 0x04,
+ DCB_GPIO_VID1 = 0x05,
+ DCB_GPIO_VID2 = 0x06,
+ DCB_GPIO_VID3 = 0x1a,
+ DCB_GPIO_VID4 = 0x73,
+ DCB_GPIO_VID5 = 0x74,
+ DCB_GPIO_VID6 = 0x75,
+ DCB_GPIO_VID7 = 0x76,
};
#define DCB_GPIO_LOG_DIR 0x02
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
index 0b285e99be5..16ff06ec2a8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -3,6 +3,39 @@
struct nouveau_bios;
+u16 nvbios_perf_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+struct nvbios_perfE {
+ u8 pstate;
+ u8 fanspeed;
+ u8 voltage;
+ u32 core;
+ u32 shader;
+ u32 memory;
+ u32 vdec;
+ u32 disp;
+ u32 script;
+};
+
+u16 nvbios_perf_entry(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_perfEp(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
+
+struct nvbios_perfS {
+ union {
+ struct {
+ u32 freq;
+ } v40;
+ };
+};
+
+u32 nvbios_perfSe(struct nouveau_bios *, u32 data, int idx,
+ u8 *ver, u8 *hdr, u8 cnt, u8 len);
+u32 nvbios_perfSp(struct nouveau_bios *, u32 data, int idx,
+ u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_perfS *);
+
struct nvbios_perf_fan {
u32 pwm_divisor;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
new file mode 100644
index 00000000000..bc15e032087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -0,0 +1,11 @@
+#ifndef __NVBIOS_RAMMAP_H__
+#define __NVBIOS_RAMMAP_H__
+
+u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
new file mode 100644
index 00000000000..963694b5422
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -0,0 +1,8 @@
+#ifndef __NVBIOS_TIMING_H__
+#define __NVBIOS_TIMING_H__
+
+u16 nvbios_timing_table(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
new file mode 100644
index 00000000000..ad5a8f20e11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
@@ -0,0 +1,25 @@
+#ifndef __NVBIOS_VMAP_H__
+#define __NVBIOS_VMAP_H__
+
+struct nouveau_bios;
+
+struct nvbios_vmap {
+};
+
+u16 nvbios_vmap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_vmap_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_vmap *);
+
+struct nvbios_vmap_entry {
+ u8 unk0;
+ u8 link;
+ u32 min;
+ u32 max;
+ s32 arg[6];
+};
+
+u16 nvbios_vmap_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_vmap_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
+ struct nvbios_vmap_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
new file mode 100644
index 00000000000..6a11dcd5977
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_VOLT_H__
+#define __NVBIOS_VOLT_H__
+
+struct nouveau_bios;
+
+struct nvbios_volt {
+ u8 vidmask;
+ u32 min;
+ u32 max;
+ u32 base;
+ s16 step;
+};
+
+u16 nvbios_volt_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_volt_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_volt *);
+
+struct nvbios_volt_entry {
+ u32 voltage;
+ u8 vid;
+};
+
+u16 nvbios_volt_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_volt_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
+ struct nvbios_volt_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
index 7d88ec4a6d0..697f7ce70aa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -11,6 +11,8 @@ struct nouveau_bus_intr {
struct nouveau_bus {
struct nouveau_subdev base;
+ int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
+ u32 hwsq_size;
};
static inline struct nouveau_bus *
@@ -33,9 +35,19 @@ nouveau_bus(void *obj)
#define _nouveau_bus_init _nouveau_subdev_init
#define _nouveau_bus_fini _nouveau_subdev_fini
-extern struct nouveau_oclass nv04_bus_oclass;
-extern struct nouveau_oclass nv31_bus_oclass;
-extern struct nouveau_oclass nv50_bus_oclass;
-extern struct nouveau_oclass nvc0_bus_oclass;
+extern struct nouveau_oclass *nv04_bus_oclass;
+extern struct nouveau_oclass *nv31_bus_oclass;
+extern struct nouveau_oclass *nv50_bus_oclass;
+extern struct nouveau_oclass *nv94_bus_oclass;
+extern struct nouveau_oclass *nvc0_bus_oclass;
+
+/* interface to sequencer */
+struct nouveau_hwsq;
+int nouveau_hwsq_init(struct nouveau_bus *, struct nouveau_hwsq **);
+int nouveau_hwsq_fini(struct nouveau_hwsq **, bool exec);
+void nouveau_hwsq_wr32(struct nouveau_hwsq *, u32 addr, u32 data);
+void nouveau_hwsq_setf(struct nouveau_hwsq *, u8 flag, int data);
+void nouveau_hwsq_wait(struct nouveau_hwsq *, u8 flag, u8 data);
+void nouveau_hwsq_nsec(struct nouveau_hwsq *, u32 nsec);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 89ee289097a..e2675bc0edb 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -7,9 +7,78 @@
struct nouveau_pll_vals;
struct nvbios_pll;
+enum nv_clk_src {
+ nv_clk_src_crystal,
+ nv_clk_src_href,
+
+ nv_clk_src_hclk,
+ nv_clk_src_hclkm3,
+ nv_clk_src_hclkm3d2,
+
+ nv_clk_src_host,
+
+ nv_clk_src_sppll0,
+ nv_clk_src_sppll1,
+
+ nv_clk_src_mpllsrcref,
+ nv_clk_src_mpllsrc,
+ nv_clk_src_mpll,
+ nv_clk_src_mdiv,
+
+ nv_clk_src_core,
+ nv_clk_src_shader,
+
+ nv_clk_src_mem,
+
+ nv_clk_src_gpc,
+ nv_clk_src_rop,
+ nv_clk_src_hubk01,
+ nv_clk_src_hubk06,
+ nv_clk_src_hubk07,
+ nv_clk_src_copy,
+ nv_clk_src_daemon,
+ nv_clk_src_disp,
+ nv_clk_src_vdec,
+
+ nv_clk_src_dom6,
+
+ nv_clk_src_max,
+};
+
+struct nouveau_cstate {
+ struct list_head head;
+ u8 voltage;
+ u32 domain[nv_clk_src_max];
+};
+
+struct nouveau_pstate {
+ struct list_head head;
+ struct list_head list; /* c-states */
+ struct nouveau_cstate base;
+ u8 pstate;
+ u8 fanspeed;
+};
+
struct nouveau_clock {
struct nouveau_subdev base;
+ struct nouveau_clocks *domains;
+ struct nouveau_pstate bstate;
+
+ struct list_head states;
+ int state_nr;
+
+ int pstate; /* current */
+ int ustate; /* user-requested (-1 disabled, -2 perfmon) */
+ int astate; /* perfmon adjustment (base) */
+ int tstate; /* thermal adjustment (max-) */
+ int dstate; /* display adjustment (min+) */
+
+ int (*read)(struct nouveau_clock *, enum nv_clk_src);
+ int (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
+ int (*prog)(struct nouveau_clock *);
+ void (*tidy)(struct nouveau_clock *);
+
/*XXX: die, these are here *only* to support the completely
* bat-shit insane what-was-nouveau_hw.c code
*/
@@ -25,27 +94,42 @@ nouveau_clock(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
}
-#define nouveau_clock_create(p,e,o,d) \
- nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
-#define nouveau_clock_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_clock_init(p) \
- nouveau_subdev_init(&(p)->base)
+struct nouveau_clocks {
+ enum nv_clk_src name;
+ u8 bios; /* 0xff for none */
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+ u8 flags;
+ const char *mname;
+ int mdiv;
+};
+
+#define nouveau_clock_create(p,e,o,i,d) \
+ nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
+#define nouveau_clock_destroy(p) ({ \
+ struct nouveau_clock *clk = (p); \
+ _nouveau_clock_dtor(nv_object(clk)); \
+})
+#define nouveau_clock_init(p) ({ \
+ struct nouveau_clock *clk = (p); \
+ _nouveau_clock_init(nv_object(clk)); \
+})
#define nouveau_clock_fini(p,s) \
nouveau_subdev_fini(&(p)->base, (s))
int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *, u32, int, void **);
-
-#define _nouveau_clock_dtor _nouveau_subdev_dtor
-#define _nouveau_clock_init _nouveau_subdev_init
+ struct nouveau_oclass *,
+ struct nouveau_clocks *, int, void **);
+void _nouveau_clock_dtor(struct nouveau_object *);
+int _nouveau_clock_init(struct nouveau_object *);
#define _nouveau_clock_fini _nouveau_subdev_fini
extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass;
-extern struct nouveau_oclass nv50_clock_oclass;
+extern struct nouveau_oclass *nv50_clock_oclass;
+extern struct nouveau_oclass *nv84_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass;
+extern struct nouveau_oclass nve0_clock_oclass;
int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -55,4 +139,9 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
int clk, struct nouveau_pll_vals *);
+int nouveau_clock_ustate(struct nouveau_clock *, int req);
+int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
+int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
+int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 2e740508426..8541aa382ff 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -78,23 +78,28 @@ nouveau_fb(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
}
-extern struct nouveau_oclass nv04_fb_oclass;
-extern struct nouveau_oclass nv10_fb_oclass;
-extern struct nouveau_oclass nv1a_fb_oclass;
-extern struct nouveau_oclass nv20_fb_oclass;
-extern struct nouveau_oclass nv25_fb_oclass;
-extern struct nouveau_oclass nv30_fb_oclass;
-extern struct nouveau_oclass nv35_fb_oclass;
-extern struct nouveau_oclass nv36_fb_oclass;
-extern struct nouveau_oclass nv40_fb_oclass;
-extern struct nouveau_oclass nv41_fb_oclass;
-extern struct nouveau_oclass nv44_fb_oclass;
-extern struct nouveau_oclass nv46_fb_oclass;
-extern struct nouveau_oclass nv47_fb_oclass;
-extern struct nouveau_oclass nv49_fb_oclass;
-extern struct nouveau_oclass nv4e_fb_oclass;
-extern struct nouveau_oclass nv50_fb_oclass;
-extern struct nouveau_oclass nvc0_fb_oclass;
+extern struct nouveau_oclass *nv04_fb_oclass;
+extern struct nouveau_oclass *nv10_fb_oclass;
+extern struct nouveau_oclass *nv1a_fb_oclass;
+extern struct nouveau_oclass *nv20_fb_oclass;
+extern struct nouveau_oclass *nv25_fb_oclass;
+extern struct nouveau_oclass *nv30_fb_oclass;
+extern struct nouveau_oclass *nv35_fb_oclass;
+extern struct nouveau_oclass *nv36_fb_oclass;
+extern struct nouveau_oclass *nv40_fb_oclass;
+extern struct nouveau_oclass *nv41_fb_oclass;
+extern struct nouveau_oclass *nv44_fb_oclass;
+extern struct nouveau_oclass *nv46_fb_oclass;
+extern struct nouveau_oclass *nv47_fb_oclass;
+extern struct nouveau_oclass *nv49_fb_oclass;
+extern struct nouveau_oclass *nv4e_fb_oclass;
+extern struct nouveau_oclass *nv50_fb_oclass;
+extern struct nouveau_oclass *nv84_fb_oclass;
+extern struct nouveau_oclass *nva3_fb_oclass;
+extern struct nouveau_oclass *nvaa_fb_oclass;
+extern struct nouveau_oclass *nvaf_fb_oclass;
+extern struct nouveau_oclass *nvc0_fb_oclass;
+extern struct nouveau_oclass *nve0_fb_oclass;
struct nouveau_ram {
struct nouveau_object base;
@@ -121,6 +126,17 @@ struct nouveau_ram {
int (*get)(struct nouveau_fb *, u64 size, u32 align,
u32 size_nc, u32 type, struct nouveau_mem **);
void (*put)(struct nouveau_fb *, struct nouveau_mem **);
+
+ int (*calc)(struct nouveau_fb *, u32 freq);
+ int (*prog)(struct nouveau_fb *);
+ void (*tidy)(struct nouveau_fb *);
+ struct {
+ u8 version;
+ u32 data;
+ u8 size;
+ } rammap, ramcfg, timing;
+ u32 freq;
+ u32 mr[16];
};
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7e4e2775f24..9fa5da72387 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -60,13 +60,18 @@ void _nouveau_i2c_port_dtor(struct nouveau_object *);
#define _nouveau_i2c_port_init nouveau_object_init
#define _nouveau_i2c_port_fini nouveau_object_fini
+struct nouveau_i2c_board_info {
+ struct i2c_board_info dev;
+ u8 udelay; /* set to 0 to use the standard delay */
+};
+
struct nouveau_i2c {
struct nouveau_subdev base;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
int (*identify)(struct nouveau_i2c *, int index,
- const char *what, struct i2c_board_info *,
+ const char *what, struct nouveau_i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
struct i2c_board_info *));
struct list_head ports;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index ce6569f365a..adc88b73d91 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -11,7 +11,6 @@ struct nouveau_mc_intr {
struct nouveau_mc {
struct nouveau_subdev base;
- const struct nouveau_mc_intr *intr_map;
bool use_msi;
};
@@ -21,8 +20,8 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,m,d) \
- nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,d) \
+ nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nouveau_mc_destroy(p) ({ \
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
})
@@ -34,20 +33,24 @@ nouveau_mc(void *obj)
})
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, const struct nouveau_mc_intr *,
- int, void **);
+ struct nouveau_oclass *, int, void **);
void _nouveau_mc_dtor(struct nouveau_object *);
int _nouveau_mc_init(struct nouveau_object *);
int _nouveau_mc_fini(struct nouveau_object *, bool);
-extern struct nouveau_oclass nv04_mc_oclass;
-extern struct nouveau_oclass nv44_mc_oclass;
-extern struct nouveau_oclass nv50_mc_oclass;
-extern struct nouveau_oclass nv98_mc_oclass;
-extern struct nouveau_oclass nvc0_mc_oclass;
+struct nouveau_mc_oclass {
+ struct nouveau_oclass base;
+ const struct nouveau_mc_intr *intr;
+ void (*msi_rearm)(struct nouveau_mc *);
+};
-extern const struct nouveau_mc_intr nv04_mc_intr[];
-int nv04_mc_init(struct nouveau_object *);
-int nv50_mc_init(struct nouveau_object *);
+extern struct nouveau_oclass *nv04_mc_oclass;
+extern struct nouveau_oclass *nv40_mc_oclass;
+extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv50_mc_oclass;
+extern struct nouveau_oclass *nv94_mc_oclass;
+extern struct nouveau_oclass *nv98_mc_oclass;
+extern struct nouveau_oclass *nvc0_mc_oclass;
+extern struct nouveau_oclass *nvc3_mc_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
new file mode 100644
index 00000000000..c5c92cbed33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -0,0 +1,80 @@
+#ifndef __NOUVEAU_PWR_H__
+#define __NOUVEAU_PWR_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_pwr {
+ struct nouveau_subdev base;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } code;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } data;
+
+ struct {
+ u32 base;
+ u32 size;
+ } send;
+
+ struct {
+ u32 base;
+ u32 size;
+
+ struct work_struct work;
+ wait_queue_head_t wait;
+ u32 process;
+ u32 message;
+ u32 data[2];
+ } recv;
+
+ int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+};
+
+static inline struct nouveau_pwr *
+nouveau_pwr(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
+}
+
+#define nouveau_pwr_create(p, e, o, d) \
+ nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_pwr_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_pwr_init(p) ({ \
+ struct nouveau_pwr *ppwr = (p); \
+ _nouveau_pwr_init(nv_object(ppwr)); \
+})
+#define nouveau_pwr_fini(p,s) ({ \
+ struct nouveau_pwr *ppwr = (p); \
+ _nouveau_pwr_fini(nv_object(ppwr), (s)); \
+})
+
+int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+#define _nouveau_pwr_dtor _nouveau_subdev_dtor
+int _nouveau_pwr_init(struct nouveau_object *);
+int _nouveau_pwr_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nva3_pwr_oclass;
+extern struct nouveau_oclass nvc0_pwr_oclass;
+extern struct nouveau_oclass nvd0_pwr_oclass;
+extern struct nouveau_oclass nv108_pwr_oclass;
+
+/* interface to MEMX process running on PPWR */
+struct nouveau_memx;
+int nouveau_memx_init(struct nouveau_pwr *, struct nouveau_memx **);
+int nouveau_memx_fini(struct nouveau_memx **, bool exec);
+void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
+void nouveau_memx_wait(struct nouveau_memx *,
+ u32 addr, u32 mask, u32 data, u32 nsec);
+void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index c075998d82e..69891d4a3fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -71,6 +71,8 @@ void _nouveau_therm_dtor(struct nouveau_object *);
int _nouveau_therm_init(struct nouveau_object *);
int _nouveau_therm_fini(struct nouveau_object *, bool);
+int nouveau_therm_cstate(struct nouveau_therm *, int, int);
+
extern struct nouveau_oclass nv40_therm_oclass;
extern struct nouveau_oclass nv50_therm_oclass;
extern struct nouveau_oclass nv84_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
new file mode 100644
index 00000000000..820b62ffd75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_VOLT_H__
+#define __NOUVEAU_VOLT_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_voltage {
+ u32 uv;
+ u8 id;
+};
+
+struct nouveau_volt {
+ struct nouveau_subdev base;
+
+ int (*vid_get)(struct nouveau_volt *);
+ int (*get)(struct nouveau_volt *);
+ int (*vid_set)(struct nouveau_volt *, u8 vid);
+ int (*set)(struct nouveau_volt *, u32 uv);
+ int (*set_id)(struct nouveau_volt *, u8 id, int condition);
+
+ u8 vid_mask;
+ u8 vid_nr;
+ struct {
+ u32 uv;
+ u8 vid;
+ } vid[256];
+};
+
+static inline struct nouveau_volt *
+nouveau_volt(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VOLT];
+}
+
+#define nouveau_volt_create(p, e, o, d) \
+ nouveau_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_volt_destroy(p) ({ \
+ struct nouveau_volt *v = (p); \
+ _nouveau_volt_dtor(nv_object(v)); \
+})
+#define nouveau_volt_init(p) ({ \
+ struct nouveau_volt *v = (p); \
+ _nouveau_volt_init(nv_object(v)); \
+})
+#define nouveau_volt_fini(p,s) \
+ nouveau_subdev_fini((p), (s))
+
+int nouveau_volt_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_volt_dtor(struct nouveau_object *);
+int _nouveau_volt_init(struct nouveau_object *);
+#define _nouveau_volt_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv40_volt_oclass;
+
+int nouveau_voltgpio_init(struct nouveau_volt *);
+int nouveau_voltgpio_get(struct nouveau_volt *);
+int nouveau_voltgpio_set(struct nouveau_volt *, u8);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
new file mode 100644
index 00000000000..c1835e591c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/boost.h>
+
+u16
+nvbios_boostTe(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+{
+ struct bit_entry bit_P;
+ u16 boost = 0x0000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ boost = nv_ro16(bios, bit_P.offset + 0x30);
+
+ if (boost) {
+ *ver = nv_ro08(bios, boost + 0);
+ switch (*ver) {
+ case 0x11:
+ *hdr = nv_ro08(bios, boost + 1);
+ *cnt = nv_ro08(bios, boost + 5);
+ *len = nv_ro08(bios, boost + 2);
+ *snr = nv_ro08(bios, boost + 4);
+ *ssz = nv_ro08(bios, boost + 3);
+ return boost;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_boostEe(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u8 snr, ssz;
+ u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ if (data && idx < *cnt) {
+ data = data + *hdr + (idx * (*len + (snr * ssz)));
+ *hdr = *len;
+ *cnt = snr;
+ *len = ssz;
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_boostEp(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
+{
+ u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
+ info->min = nv_ro16(bios, data + 0x02) * 1000;
+ info->max = nv_ro16(bios, data + 0x04) * 1000;
+ }
+ return data;
+}
+
+u16
+nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
+{
+ u32 data, idx = 0;
+ while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) {
+ if (info->pstate == pstate)
+ break;
+ }
+ return data;
+}
+
+u16
+nvbios_boostSe(struct nouveau_bios *bios, int idx,
+ u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
+{
+ if (data && idx < cnt) {
+ data = data + *hdr + (idx * len);
+ *hdr = len;
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_boostSp(struct nouveau_bios *bios, int idx,
+ u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
+ struct nvbios_boostS *info)
+{
+ data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->domain = nv_ro08(bios, data + 0x00);
+ info->percent = nv_ro08(bios, data + 0x01);
+ info->min = nv_ro16(bios, data + 0x02) * 1000;
+ info->max = nv_ro16(bios, data + 0x04) * 1000;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
new file mode 100644
index 00000000000..d3b15327fbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/cstep.h>
+
+u16
+nvbios_cstepTe(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
+{
+ struct bit_entry bit_P;
+ u16 cstep = 0x0000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ cstep = nv_ro16(bios, bit_P.offset + 0x34);
+
+ if (cstep) {
+ *ver = nv_ro08(bios, cstep + 0);
+ switch (*ver) {
+ case 0x10:
+ *hdr = nv_ro08(bios, cstep + 1);
+ *cnt = nv_ro08(bios, cstep + 3);
+ *len = nv_ro08(bios, cstep + 2);
+ *xnr = nv_ro08(bios, cstep + 5);
+ *xsz = nv_ro08(bios, cstep + 4);
+ return cstep;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+ u8 cnt, len, xnr, xsz;
+ u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ if (data && idx < cnt) {
+ data = data + *hdr + (idx * len);
+ *hdr = len;
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_cstepE *info)
+{
+ u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
+ info->index = nv_ro08(bios, data + 0x03);
+ }
+ return data;
+}
+
+u16
+nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
+ struct nvbios_cstepE *info)
+{
+ u32 data, idx = 0;
+ while ((data = nvbios_cstepEp(bios, idx++, ver, hdr, info))) {
+ if (info->pstate == pstate)
+ break;
+ }
+ return data;
+}
+
+u16
+nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+ u8 cnt, len, xnr, xsz;
+ u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ if (data && idx < xnr) {
+ data = data + *hdr + (cnt * len) + (idx * xsz);
+ *hdr = xsz;
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_cstepXp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_cstepX *info)
+{
+ u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->freq = nv_ro16(bios, data + 0x00) * 1000;
+ info->unkn[0] = nv_ro08(bios, data + 0x02);
+ info->unkn[1] = nv_ro08(bios, data + 0x03);
+ info->voltage = nv_ro08(bios, data + 0x04);
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 663853bcca8..7628fe75922 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -89,6 +89,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
struct nvbios_dpout *info)
{
u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
if (data && *ver) {
info->type = nv_ro16(bios, data + 0x00);
info->mask = nv_ro16(bios, data + 0x02);
@@ -99,9 +100,12 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
info->script[0] = nv_ro16(bios, data + 0x06);
info->script[1] = nv_ro16(bios, data + 0x08);
info->lnkcmp = nv_ro16(bios, data + 0x0a);
- info->script[2] = nv_ro16(bios, data + 0x0c);
- info->script[3] = nv_ro16(bios, data + 0x0e);
- info->script[4] = nv_ro16(bios, data + 0x10);
+ if (*len >= 0x0f) {
+ info->script[2] = nv_ro16(bios, data + 0x0c);
+ info->script[3] = nv_ro16(bios, data + 0x0e);
+ }
+ if (*len >= 0x11)
+ info->script[4] = nv_ro16(bios, data + 0x10);
break;
case 0x40:
info->flags = nv_ro08(bios, data + 0x04);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 57cda2a1437..420908cb82b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2180,7 +2180,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
u16 data;
if (execute)
- nv_suspend(bios, "running init tables\n");
+ nv_info(bios, "running init tables\n");
while (!ret && (data = (init_script(bios, ++i)))) {
struct nvbios_init init = {
.subdev = subdev,
@@ -2210,5 +2210,5 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
ret = nvbios_exec(&init);
}
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
index bcbb056c288..675e221680a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -26,8 +26,9 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/perf.h>
-static u16
-perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u16
+nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u16 perf = 0x0000;
@@ -38,10 +39,22 @@ perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (perf) {
*ver = nv_ro08(bios, perf + 0);
*hdr = nv_ro08(bios, perf + 1);
+ if (*ver >= 0x40 && *ver < 0x41) {
+ *cnt = nv_ro08(bios, perf + 5);
+ *len = nv_ro08(bios, perf + 2);
+ *snr = nv_ro08(bios, perf + 4);
+ *ssz = nv_ro08(bios, perf + 3);
+ return perf;
+ } else
+ if (*ver >= 0x20 && *ver < 0x40) {
+ *cnt = nv_ro08(bios, perf + 2);
+ *len = nv_ro08(bios, perf + 3);
+ *snr = nv_ro08(bios, perf + 4);
+ *ssz = nv_ro08(bios, perf + 5);
+ return perf;
+ }
}
- } else
- nv_error(bios, "unknown offset for perf in BIT P %d\n",
- bit_P.version);
+ }
}
if (bios->bmp_offset) {
@@ -50,19 +63,132 @@ perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (perf) {
*hdr = nv_ro08(bios, perf + 0);
*ver = nv_ro08(bios, perf + 1);
+ *cnt = nv_ro08(bios, perf + 2);
+ *len = nv_ro08(bios, perf + 3);
+ *snr = 0;
+ *ssz = 0;
+ return perf;
}
}
}
+ return 0x0000;
+}
+
+u16
+nvbios_perf_entry(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u8 snr, ssz;
+ u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ if (perf && idx < *cnt) {
+ perf = perf + *hdr + (idx * (*len + (snr * ssz)));
+ *hdr = *len;
+ *cnt = snr;
+ *len = ssz;
+ return perf;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_perfEp(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_perfE *info)
+{
+ u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ info->pstate = nv_ro08(bios, perf + 0x00);
+ switch (!!perf * *ver) {
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ info->core = nv_ro32(bios, perf + 0x01) * 10;
+ info->memory = nv_ro32(bios, perf + 0x05) * 20;
+ info->fanspeed = nv_ro08(bios, perf + 0x37);
+ if (*hdr > 0x38)
+ info->voltage = nv_ro08(bios, perf + 0x38);
+ break;
+ case 0x21:
+ case 0x23:
+ case 0x24:
+ info->fanspeed = nv_ro08(bios, perf + 0x04);
+ info->voltage = nv_ro08(bios, perf + 0x05);
+ info->shader = nv_ro16(bios, perf + 0x06) * 1000;
+ info->core = info->shader + (signed char)
+ nv_ro08(bios, perf + 0x08) * 1000;
+ switch (nv_device(bios)->chipset) {
+ case 0x49:
+ case 0x4b:
+ info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+ break;
+ default:
+ info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+ break;
+ }
+ break;
+ case 0x25:
+ info->fanspeed = nv_ro08(bios, perf + 0x04);
+ info->voltage = nv_ro08(bios, perf + 0x05);
+ info->core = nv_ro16(bios, perf + 0x06) * 1000;
+ info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
+ info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
+ break;
+ case 0x30:
+ info->script = nv_ro16(bios, perf + 0x02);
+ case 0x35:
+ info->fanspeed = nv_ro08(bios, perf + 0x06);
+ info->voltage = nv_ro08(bios, perf + 0x07);
+ info->core = nv_ro16(bios, perf + 0x08) * 1000;
+ info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
+ info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
+ info->vdec = nv_ro16(bios, perf + 0x10) * 1000;
+ info->disp = nv_ro16(bios, perf + 0x14) * 1000;
+ break;
+ case 0x40:
+ info->voltage = nv_ro08(bios, perf + 0x02);
+ break;
+ default:
+ return 0x0000;
+ }
return perf;
}
+u32
+nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
+ u8 *ver, u8 *hdr, u8 cnt, u8 len)
+{
+ u32 data = 0x00000000;
+ if (idx < cnt) {
+ data = perfE + *hdr + (idx * len);
+ *hdr = len;
+ }
+ return data;
+}
+
+u32
+nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
+ u8 *ver, u8 *hdr, u8 cnt, u8 len,
+ struct nvbios_perfS *info)
+{
+ u32 data = nvbios_perfSe(bios, perfE, idx, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x40:
+ info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+ break;
+ default:
+ break;
+ }
+ return data;
+}
+
int
nvbios_perf_fan_parse(struct nouveau_bios *bios,
struct nvbios_perf_fan *fan)
{
- u8 ver = 0, hdr = 0, cnt = 0, len = 0;
- u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
+ u8 ver, hdr, cnt, len, snr, ssz;
+ u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!perf)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index f835501203e..1f76de597d4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -114,6 +114,7 @@ pll_map(struct nouveau_bios *bios)
switch (nv_device(bios)->card_type) {
case NV_04:
case NV_10:
+ case NV_11:
case NV_20:
case NV_30:
return nv04_pll_mapping;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
new file mode 100644
index 00000000000..916fa9d302b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/rammap.h>
+
+u16
+nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+{
+ struct bit_entry bit_P;
+ u16 rammap = 0x0000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ rammap = nv_ro16(bios, bit_P.offset + 4);
+
+ if (rammap) {
+ *ver = nv_ro08(bios, rammap + 0);
+ switch (*ver) {
+ case 0x10:
+ case 0x11:
+ *hdr = nv_ro08(bios, rammap + 1);
+ *cnt = nv_ro08(bios, rammap + 5);
+ *len = nv_ro08(bios, rammap + 2);
+ *snr = nv_ro08(bios, rammap + 4);
+ *ssz = nv_ro08(bios, rammap + 3);
+ return rammap;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u8 snr, ssz;
+ u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ if (rammap && idx < *cnt) {
+ rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
+ *hdr = *len;
+ *cnt = snr;
+ *len = ssz;
+ return rammap;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ int idx = 0;
+ u32 data;
+ while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+ if (khz >= nv_ro16(bios, data + 0x00) &&
+ khz <= nv_ro16(bios, data + 0x02))
+ break;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
new file mode 100644
index 00000000000..151c2d6aaee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/timing.h>
+
+u16
+nvbios_timing_table(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_P;
+ u16 timing = 0x0000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 1)
+ timing = nv_ro16(bios, bit_P.offset + 4);
+ else
+ if (bit_P.version == 2)
+ timing = nv_ro16(bios, bit_P.offset + 8);
+
+ if (timing) {
+ *ver = nv_ro08(bios, timing + 0);
+ switch (*ver) {
+ case 0x10:
+ *hdr = nv_ro08(bios, timing + 1);
+ *cnt = nv_ro08(bios, timing + 2);
+ *len = nv_ro08(bios, timing + 3);
+ return timing;
+ case 0x20:
+ *hdr = nv_ro08(bios, timing + 1);
+ *cnt = nv_ro08(bios, timing + 3);
+ *len = nv_ro08(bios, timing + 2);
+ return timing;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+ u8 hdr, cnt;
+ u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
+ if (timing && idx < cnt)
+ return timing + hdr + (idx * *len);
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
new file mode 100644
index 00000000000..f343a1b060e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vmap.h>
+
+u16
+nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_P;
+ u16 vmap = 0x0000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 2) {
+ vmap = nv_ro16(bios, bit_P.offset + 0x20);
+ if (vmap) {
+ *ver = nv_ro08(bios, vmap + 0);
+ switch (*ver) {
+ case 0x10:
+ case 0x20:
+ *hdr = nv_ro08(bios, vmap + 1);
+ *cnt = nv_ro08(bios, vmap + 3);
+ *len = nv_ro08(bios, vmap + 2);
+ return vmap;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_vmap *info)
+{
+ u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!vmap * *ver) {
+ case 0x10:
+ case 0x20:
+ break;
+ }
+ return vmap;
+}
+
+u16
+nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+ u8 hdr, cnt;
+ u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
+ if (vmap && idx < cnt) {
+ vmap = vmap + hdr + (idx * *len);
+ return vmap;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_vmap_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+ struct nvbios_vmap_entry *info)
+{
+ u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!vmap * *ver) {
+ case 0x10:
+ info->link = 0xff;
+ info->min = nv_ro32(bios, vmap + 0x00);
+ info->max = nv_ro32(bios, vmap + 0x04);
+ info->arg[0] = nv_ro32(bios, vmap + 0x08);
+ info->arg[1] = nv_ro32(bios, vmap + 0x0c);
+ info->arg[2] = nv_ro32(bios, vmap + 0x10);
+ break;
+ case 0x20:
+ info->unk0 = nv_ro08(bios, vmap + 0x00);
+ info->link = nv_ro08(bios, vmap + 0x01);
+ info->min = nv_ro32(bios, vmap + 0x02);
+ info->max = nv_ro32(bios, vmap + 0x06);
+ info->arg[0] = nv_ro32(bios, vmap + 0x0a);
+ info->arg[1] = nv_ro32(bios, vmap + 0x0e);
+ info->arg[2] = nv_ro32(bios, vmap + 0x12);
+ info->arg[3] = nv_ro32(bios, vmap + 0x16);
+ info->arg[4] = nv_ro32(bios, vmap + 0x1a);
+ info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+ break;
+ }
+ return vmap;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
new file mode 100644
index 00000000000..bb590de4ecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/volt.h>
+
+u16
+nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_P;
+ u16 volt = 0x0000;
+
+ if (!bit_entry(bios, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ volt = nv_ro16(bios, bit_P.offset + 0x0c);
+ else
+ if (bit_P.version == 1)
+ volt = nv_ro16(bios, bit_P.offset + 0x10);
+
+ if (volt) {
+ *ver = nv_ro08(bios, volt + 0);
+ switch (*ver) {
+ case 0x12:
+ *hdr = 5;
+ *cnt = nv_ro08(bios, volt + 2);
+ *len = nv_ro08(bios, volt + 1);
+ return volt;
+ case 0x20:
+ *hdr = nv_ro08(bios, volt + 1);
+ *cnt = nv_ro08(bios, volt + 2);
+ *len = nv_ro08(bios, volt + 3);
+ return volt;
+ case 0x30:
+ case 0x40:
+ case 0x50:
+ *hdr = nv_ro08(bios, volt + 1);
+ *cnt = nv_ro08(bios, volt + 3);
+ *len = nv_ro08(bios, volt + 2);
+ return volt;
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_volt *info)
+{
+ u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!volt * *ver) {
+ case 0x12:
+ info->vidmask = nv_ro08(bios, volt + 0x04);
+ break;
+ case 0x20:
+ info->vidmask = nv_ro08(bios, volt + 0x05);
+ break;
+ case 0x30:
+ info->vidmask = nv_ro08(bios, volt + 0x04);
+ break;
+ case 0x40:
+ info->base = nv_ro32(bios, volt + 0x04);
+ info->step = nv_ro16(bios, volt + 0x08);
+ info->vidmask = nv_ro08(bios, volt + 0x0b);
+ /*XXX*/
+ info->min = 0;
+ info->max = info->base;
+ break;
+ case 0x50:
+ info->vidmask = nv_ro08(bios, volt + 0x06);
+ info->min = nv_ro32(bios, volt + 0x0a);
+ info->max = nv_ro32(bios, volt + 0x0e);
+ info->base = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
+ info->step = nv_ro16(bios, volt + 0x16);
+ break;
+ }
+ return volt;
+}
+
+u16
+nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+ u8 hdr, cnt;
+ u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
+ if (volt && idx < cnt) {
+ volt = volt + hdr + (idx * *len);
+ return volt;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_volt_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+ struct nvbios_volt_entry *info)
+{
+ u16 volt = nvbios_volt_entry(bios, idx, ver, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!volt * *ver) {
+ case 0x12:
+ case 0x20:
+ info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
+ info->vid = nv_ro08(bios, volt + 0x01);
+ break;
+ case 0x30:
+ info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
+ info->vid = nv_ro08(bios, volt + 0x01) >> 2;
+ break;
+ case 0x40:
+ case 0x50:
+ break;
+ }
+ return volt;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
new file mode 100644
index 00000000000..f757470e228
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/timer.h>
+#include <subdev/bus.h>
+
+struct nouveau_hwsq {
+ struct nouveau_bus *pbus;
+ u32 addr;
+ u32 data;
+ struct {
+ u8 data[512];
+ u8 size;
+ } c;
+};
+
+static void
+hwsq_cmd(struct nouveau_hwsq *hwsq, int size, u8 data[])
+{
+ memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
+ hwsq->c.size += size;
+}
+
+int
+nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
+{
+ struct nouveau_hwsq *hwsq;
+
+ hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
+ if (hwsq) {
+ hwsq->pbus = pbus;
+ hwsq->addr = ~0;
+ hwsq->data = ~0;
+ memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
+ hwsq->c.size = 0;
+ }
+
+ return hwsq ? 0 : -ENOMEM;
+}
+
+int
+nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
+{
+ struct nouveau_hwsq *hwsq = *phwsq;
+ int ret = 0, i;
+ if (hwsq) {
+ struct nouveau_bus *pbus = hwsq->pbus;
+ hwsq->c.size = (hwsq->c.size + 4) / 4;
+ if (hwsq->c.size <= pbus->hwsq_size) {
+ if (exec)
+ ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
+ hwsq->c.size);
+ if (ret)
+ nv_error(pbus, "hwsq exec failed: %d\n", ret);
+ } else {
+ nv_error(pbus, "hwsq ucode too large\n");
+ ret = -ENOSPC;
+ }
+
+ for (i = 0; ret && i < hwsq->c.size; i++)
+ nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+
+ *phwsq = NULL;
+ kfree(hwsq);
+ }
+ return ret;
+}
+
+void
+nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
+{
+ nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+
+ if (hwsq->data != data) {
+ if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
+ hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
+ data >> 16, data >> 24 });
+ } else {
+ hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
+ }
+ }
+
+ if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
+ hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
+ addr >> 16, addr >> 24 });
+ } else {
+ hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
+ }
+
+ hwsq->addr = addr;
+ hwsq->data = data;
+}
+
+void
+nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
+{
+ nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+ flag += 0x80;
+ if (data >= 0)
+ flag += 0x20;
+ if (data >= 1)
+ flag += 0x20;
+ hwsq_cmd(hwsq, 1, (u8[]){ flag });
+}
+
+void
+nouveau_hwsq_wait(struct nouveau_hwsq *hwsq, u8 flag, u8 data)
+{
+ nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+ hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
+}
+
+void
+nouveau_hwsq_nsec(struct nouveau_hwsq *hwsq, u32 nsec)
+{
+ u8 shift = 0, usec = nsec / 1000;
+ while (usec & ~3) {
+ usec >>= 2;
+ shift++;
+ }
+
+ nv_debug(hwsq->pbus, " DELAY = %d ns\n", nsec);
+ hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
new file mode 100644
index 00000000000..12176f9c1bc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
@@ -0,0 +1,113 @@
+#ifndef __NVKM_BUS_HWSQ_H__
+#define __NVKM_BUS_HWSQ_H__
+
+#include <subdev/bus.h>
+
+struct hwsq {
+ struct nouveau_subdev *subdev;
+ struct nouveau_hwsq *hwsq;
+ int sequence;
+};
+
+struct hwsq_reg {
+ int sequence;
+ bool force;
+ u32 addr[2];
+ u32 data;
+};
+
+static inline struct hwsq_reg
+hwsq_reg2(u32 addr1, u32 addr2)
+{
+ return (struct hwsq_reg) {
+ .sequence = 0,
+ .force = 0,
+ .addr = { addr1, addr2 },
+ .data = 0xdeadbeef,
+ };
+}
+
+static inline struct hwsq_reg
+hwsq_reg(u32 addr)
+{
+ return hwsq_reg2(addr, addr);
+}
+
+static inline int
+hwsq_init(struct hwsq *ram, struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ int ret;
+
+ ret = nouveau_hwsq_init(pbus, &ram->hwsq);
+ if (ret)
+ return ret;
+
+ ram->sequence++;
+ ram->subdev = subdev;
+ return 0;
+}
+
+static inline int
+hwsq_exec(struct hwsq *ram, bool exec)
+{
+ int ret = 0;
+ if (ram->subdev) {
+ ret = nouveau_hwsq_fini(&ram->hwsq, exec);
+ ram->subdev = NULL;
+ }
+ return ret;
+}
+
+static inline u32
+hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
+{
+ if (reg->sequence != ram->sequence)
+ reg->data = nv_rd32(ram->subdev, reg->addr[0]);
+ return reg->data;
+}
+
+static inline void
+hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
+{
+ reg->sequence = ram->sequence;
+ reg->data = data;
+ if (reg->addr[0] != reg->addr[1])
+ nouveau_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
+ nouveau_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
+}
+
+static inline void
+hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
+{
+ reg->force = true;
+}
+
+static inline u32
+hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
+{
+ u32 temp = hwsq_rd32(ram, reg);
+ if (temp != ((temp & ~mask) | data) || reg->force)
+ hwsq_wr32(ram, reg, (temp & ~mask) | data);
+ return temp;
+}
+
+static inline void
+hwsq_setf(struct hwsq *ram, u8 flag, int data)
+{
+ nouveau_hwsq_setf(ram->hwsq, flag, data);
+}
+
+static inline void
+hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
+{
+ nouveau_hwsq_wait(ram->hwsq, flag, data);
+}
+
+static inline void
+hwsq_nsec(struct hwsq *ram, u32 nsec)
+{
+ nouveau_hwsq_nsec(ram->hwsq, nsec);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
index 8c7f8057a18..23921b5351d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -23,11 +23,7 @@
* Ben Skeggs
*/
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
- struct nouveau_bus base;
-};
+#include "nv04.h"
static void
nv04_bus_intr(struct nouveau_subdev *subdev)
@@ -56,10 +52,22 @@ nv04_bus_intr(struct nouveau_subdev *subdev)
}
static int
+nv04_bus_init(struct nouveau_object *object)
+{
+ struct nv04_bus_priv *priv = (void *)object;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00000111);
+
+ return nouveau_bus_init(&priv->base);
+}
+
+int
nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ struct nv04_bus_impl *impl = (void *)oclass;
struct nv04_bus_priv *priv;
int ret;
@@ -68,28 +76,20 @@ nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->intr = nv04_bus_intr;
+ nv_subdev(priv)->intr = impl->intr;
+ priv->base.hwsq_exec = impl->hwsq_exec;
+ priv->base.hwsq_size = impl->hwsq_size;
return 0;
}
-static int
-nv04_bus_init(struct nouveau_object *object)
-{
- struct nv04_bus_priv *priv = (void *)object;
-
- nv_wr32(priv, 0x001100, 0xffffffff);
- nv_wr32(priv, 0x001140, 0x00000111);
-
- return nouveau_bus_init(&priv->base);
-}
-
-struct nouveau_oclass
-nv04_bus_oclass = {
- .handle = NV_SUBDEV(BUS, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_bus_oclass = &(struct nv04_bus_impl) {
+ .base.handle = NV_SUBDEV(BUS, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_bus_ctor,
.dtor = _nouveau_bus_dtor,
.init = nv04_bus_init,
.fini = _nouveau_bus_fini,
},
-};
+ .intr = nv04_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
new file mode 100644
index 00000000000..4d7602450a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_BUS_NV04_H__
+#define __NVKM_BUS_NV04_H__
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+ struct nouveau_bus base;
+};
+
+int nv04_bus_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+int nv50_bus_init(struct nouveau_object *);
+void nv50_bus_intr(struct nouveau_subdev *);
+
+struct nv04_bus_impl {
+ struct nouveau_oclass base;
+ void (*intr)(struct nouveau_subdev *);
+ int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
+ u32 hwsq_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
index 34132aef34e..94da46f6162 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -23,11 +23,7 @@
* Ben Skeggs
*/
-#include <subdev/bus.h>
-
-struct nv31_bus_priv {
- struct nouveau_bus base;
-};
+#include "nv04.h"
static void
nv31_bus_intr(struct nouveau_subdev *subdev)
@@ -71,7 +67,7 @@ nv31_bus_intr(struct nouveau_subdev *subdev)
static int
nv31_bus_init(struct nouveau_object *object)
{
- struct nv31_bus_priv *priv = (void *)object;
+ struct nv04_bus_priv *priv = (void *)object;
int ret;
ret = nouveau_bus_init(&priv->base);
@@ -83,30 +79,14 @@ nv31_bus_init(struct nouveau_object *object)
return 0;
}
-static int
-nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv31_bus_priv *priv;
- int ret;
-
- ret = nouveau_bus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->intr = nv31_bus_intr;
- return 0;
-}
-
-struct nouveau_oclass
-nv31_bus_oclass = {
- .handle = NV_SUBDEV(BUS, 0x31),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv31_bus_ctor,
+struct nouveau_oclass *
+nv31_bus_oclass = &(struct nv04_bus_impl) {
+ .base.handle = NV_SUBDEV(BUS, 0x31),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
.dtor = _nouveau_bus_dtor,
.init = nv31_bus_init,
.fini = _nouveau_bus_fini,
},
-};
+ .intr = nv31_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
index f5b2117fa8c..11918f7e2ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -23,13 +23,27 @@
* Ben Skeggs
*/
-#include <subdev/bus.h>
+#include <subdev/timer.h>
-struct nv50_bus_priv {
- struct nouveau_bus base;
-};
+#include "nv04.h"
-static void
+static int
+nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+{
+ struct nv50_bus_priv *priv = (void *)pbus;
+ int i;
+
+ nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
+ nv_wr32(pbus, 0x001304, 0x00000000);
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x001400 + (i * 4), data[i]);
+ nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
+ nv_wr32(pbus, 0x00130c, 0x00000003);
+
+ return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+}
+
+void
nv50_bus_intr(struct nouveau_subdev *subdev)
{
struct nouveau_bus *pbus = nouveau_bus(subdev);
@@ -61,10 +75,10 @@ nv50_bus_intr(struct nouveau_subdev *subdev)
}
}
-static int
+int
nv50_bus_init(struct nouveau_object *object)
{
- struct nv50_bus_priv *priv = (void *)object;
+ struct nv04_bus_priv *priv = (void *)object;
int ret;
ret = nouveau_bus_init(&priv->base);
@@ -76,30 +90,16 @@ nv50_bus_init(struct nouveau_object *object)
return 0;
}
-static int
-nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_bus_priv *priv;
- int ret;
-
- ret = nouveau_bus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->intr = nv50_bus_intr;
- return 0;
-}
-
-struct nouveau_oclass
-nv50_bus_oclass = {
- .handle = NV_SUBDEV(BUS, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_bus_ctor,
+struct nouveau_oclass *
+nv50_bus_oclass = &(struct nv04_bus_impl) {
+ .base.handle = NV_SUBDEV(BUS, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
.dtor = _nouveau_bus_dtor,
.init = nv50_bus_init,
.fini = _nouveau_bus_fini,
},
-};
+ .intr = nv50_bus_intr,
+ .hwsq_exec = nv50_bus_hwsq_exec,
+ .hwsq_size = 64,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
new file mode 100644
index 00000000000..d3659055fa4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/timer.h>
+
+#include "nv04.h"
+
+static int
+nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+{
+ struct nv50_bus_priv *priv = (void *)pbus;
+ int i;
+
+ nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
+ nv_wr32(pbus, 0x001304, 0x00000000);
+ nv_wr32(pbus, 0x001318, 0x00000000);
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x080000 + (i * 4), data[i]);
+ nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
+ nv_wr32(pbus, 0x00130c, 0x00000001);
+
+ return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+}
+
+struct nouveau_oclass *
+nv94_bus_oclass = &(struct nv04_bus_impl) {
+ .base.handle = NV_SUBDEV(BUS, 0x94),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv50_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+ .intr = nv50_bus_intr,
+ .hwsq_exec = nv94_bus_hwsq_exec,
+ .hwsq_size = 128,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
index b192d624636..73839d7151a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -23,11 +23,7 @@
* Ben Skeggs
*/
-#include <subdev/bus.h>
-
-struct nvc0_bus_priv {
- struct nouveau_bus base;
-};
+#include "nv04.h"
static void
nvc0_bus_intr(struct nouveau_subdev *subdev)
@@ -60,7 +56,7 @@ nvc0_bus_intr(struct nouveau_subdev *subdev)
static int
nvc0_bus_init(struct nouveau_object *object)
{
- struct nvc0_bus_priv *priv = (void *)object;
+ struct nv04_bus_priv *priv = (void *)object;
int ret;
ret = nouveau_bus_init(&priv->base);
@@ -72,30 +68,14 @@ nvc0_bus_init(struct nouveau_object *object)
return 0;
}
-static int
-nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_bus_priv *priv;
- int ret;
-
- ret = nouveau_bus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->intr = nvc0_bus_intr;
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_bus_oclass = {
- .handle = NV_SUBDEV(BUS, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_bus_ctor,
+struct nouveau_oclass *
+nvc0_bus_oclass = &(struct nv04_bus_impl) {
+ .base.handle = NV_SUBDEV(BUS, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
.dtor = _nouveau_bus_dtor,
.init = nvc0_bus_init,
.fini = _nouveau_bus_fini,
},
-};
+ .intr = nvc0_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
new file mode 100644
index 00000000000..e2938a21b06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/volt.h>
+#include <subdev/fb.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/boost.h>
+#include <subdev/bios/cstep.h>
+#include <subdev/bios/perf.h>
+
+/******************************************************************************
+ * misc
+ *****************************************************************************/
+static u32
+nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
+ u8 pstate, u8 domain, u32 input)
+{
+ struct nouveau_bios *bios = nouveau_bios(clk);
+ struct nvbios_boostE boostE;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+
+ data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
+ if (data) {
+ struct nvbios_boostS boostS;
+ u8 idx = 0, sver, shdr;
+ u16 subd;
+
+ input = max(boostE.min, input);
+ input = min(boostE.max, input);
+ do {
+ sver = ver;
+ shdr = hdr;
+ subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
+ cnt, len, &boostS);
+ if (subd && boostS.domain == domain) {
+ if (adjust)
+ input = input * boostS.percent / 100;
+ input = max(boostS.min, input);
+ input = min(boostS.max, input);
+ break;
+ }
+ } while (subd);
+ }
+
+ return input;
+}
+
+/******************************************************************************
+ * C-States
+ *****************************************************************************/
+static int
+nouveau_cstate_prog(struct nouveau_clock *clk,
+ struct nouveau_pstate *pstate, int cstatei)
+{
+ struct nouveau_therm *ptherm = nouveau_therm(clk);
+ struct nouveau_volt *volt = nouveau_volt(clk);
+ struct nouveau_cstate *cstate;
+ int ret;
+
+ if (!list_empty(&pstate->list)) {
+ cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+ } else {
+ cstate = &pstate->base;
+ }
+
+ ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
+ if (ret && ret != -ENODEV) {
+ nv_error(clk, "failed to raise fan speed: %d\n", ret);
+ return ret;
+ }
+
+ ret = volt->set_id(volt, cstate->voltage, +1);
+ if (ret && ret != -ENODEV) {
+ nv_error(clk, "failed to raise voltage: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk->calc(clk, cstate);
+ if (ret == 0) {
+ ret = clk->prog(clk);
+ clk->tidy(clk);
+ }
+
+ ret = volt->set_id(volt, cstate->voltage, -1);
+ if (ret && ret != -ENODEV)
+ nv_error(clk, "failed to lower voltage: %d\n", ret);
+
+ ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
+ if (ret && ret != -ENODEV)
+ nv_error(clk, "failed to lower fan speed: %d\n", ret);
+
+ return 0;
+}
+
+static void
+nouveau_cstate_del(struct nouveau_cstate *cstate)
+{
+ list_del(&cstate->head);
+ kfree(cstate);
+}
+
+static int
+nouveau_cstate_new(struct nouveau_clock *clk, int idx,
+ struct nouveau_pstate *pstate)
+{
+ struct nouveau_bios *bios = nouveau_bios(clk);
+ struct nouveau_clocks *domain = clk->domains;
+ struct nouveau_cstate *cstate = NULL;
+ struct nvbios_cstepX cstepX;
+ u8 ver, hdr;
+ u16 data;
+
+ data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
+ if (!data)
+ return -ENOENT;
+
+ cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (!cstate)
+ return -ENOMEM;
+
+ *cstate = pstate->base;
+ cstate->voltage = cstepX.voltage;
+
+ while (domain && domain->name != nv_clk_src_max) {
+ if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
+ u32 freq = nouveau_clock_adjust(clk, true,
+ pstate->pstate,
+ domain->bios,
+ cstepX.freq);
+ cstate->domain[domain->name] = freq;
+ }
+ domain++;
+ }
+
+ list_add(&cstate->head, &pstate->list);
+ return 0;
+}
+
+/******************************************************************************
+ * P-States
+ *****************************************************************************/
+static int
+nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
+{
+ struct nouveau_fb *pfb = nouveau_fb(clk);
+ struct nouveau_pstate *pstate;
+ int ret, idx = 0;
+
+ list_for_each_entry(pstate, &clk->states, head) {
+ if (idx++ == pstatei)
+ break;
+ }
+
+ nv_debug(clk, "setting performance state %d\n", pstatei);
+ clk->pstate = pstatei;
+
+ if (pfb->ram->calc) {
+ ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
+ if (ret == 0)
+ ret = pfb->ram->prog(pfb);
+ pfb->ram->tidy(pfb);
+ }
+
+ return nouveau_cstate_prog(clk, pstate, 0);
+}
+
+static int
+nouveau_pstate_calc(struct nouveau_clock *clk)
+{
+ int pstate, ret = 0;
+
+ nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
+ clk->ustate, clk->astate, clk->tstate, clk->dstate);
+
+ if (clk->state_nr && clk->ustate != -1) {
+ pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
+ pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+ pstate = max(pstate, clk->dstate);
+ } else {
+ pstate = clk->pstate = -1;
+ }
+
+ nv_trace(clk, "-> %d\n", pstate);
+ if (pstate != clk->pstate)
+ ret = nouveau_pstate_prog(clk, pstate);
+ return ret;
+}
+
+static void
+nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
+{
+ struct nouveau_clocks *clock = clk->domains - 1;
+ struct nouveau_cstate *cstate;
+ char info[3][32] = { "", "", "" };
+ char name[4] = "--";
+ int i = -1;
+
+ if (pstate->pstate != 0xff)
+ snprintf(name, sizeof(name), "%02x", pstate->pstate);
+
+ while ((++clock)->name != nv_clk_src_max) {
+ u32 lo = pstate->base.domain[clock->name];
+ u32 hi = lo;
+ if (hi == 0)
+ continue;
+
+ nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+ list_for_each_entry(cstate, &pstate->list, head) {
+ u32 freq = cstate->domain[clock->name];
+ lo = min(lo, freq);
+ hi = max(hi, freq);
+ nv_debug(clk, "%10d KHz\n", freq);
+ }
+
+ if (clock->mname && ++i < ARRAY_SIZE(info)) {
+ lo /= clock->mdiv;
+ hi /= clock->mdiv;
+ if (lo == hi) {
+ snprintf(info[i], sizeof(info[i]), "%s %d MHz",
+ clock->mname, lo);
+ } else {
+ snprintf(info[i], sizeof(info[i]),
+ "%s %d-%d MHz", clock->mname, lo, hi);
+ }
+ }
+ }
+
+ nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+}
+
+static void
+nouveau_pstate_del(struct nouveau_pstate *pstate)
+{
+ struct nouveau_cstate *cstate, *temp;
+
+ list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
+ nouveau_cstate_del(cstate);
+ }
+
+ list_del(&pstate->head);
+ kfree(pstate);
+}
+
+static int
+nouveau_pstate_new(struct nouveau_clock *clk, int idx)
+{
+ struct nouveau_bios *bios = nouveau_bios(clk);
+ struct nouveau_clocks *domain = clk->domains - 1;
+ struct nouveau_pstate *pstate;
+ struct nouveau_cstate *cstate;
+ struct nvbios_cstepE cstepE;
+ struct nvbios_perfE perfE;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+
+ data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
+ if (!data)
+ return -EINVAL;
+ if (perfE.pstate == 0xff)
+ return 0;
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ cstate = &pstate->base;
+ if (!pstate)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pstate->list);
+
+ pstate->pstate = perfE.pstate;
+ pstate->fanspeed = perfE.fanspeed;
+ cstate->voltage = perfE.voltage;
+ cstate->domain[nv_clk_src_core] = perfE.core;
+ cstate->domain[nv_clk_src_shader] = perfE.shader;
+ cstate->domain[nv_clk_src_mem] = perfE.memory;
+ cstate->domain[nv_clk_src_vdec] = perfE.vdec;
+ cstate->domain[nv_clk_src_dom6] = perfE.disp;
+
+ while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
+ struct nvbios_perfS perfS;
+ u8 sver = ver, shdr = hdr;
+ u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
+ &sver, &shdr, cnt, len, &perfS);
+ if (perfSe == 0 || sver != 0x40)
+ continue;
+
+ if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
+ perfS.v40.freq = nouveau_clock_adjust(clk, false,
+ pstate->pstate,
+ domain->bios,
+ perfS.v40.freq);
+ }
+
+ cstate->domain[domain->name] = perfS.v40.freq;
+ }
+
+ data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
+ if (data) {
+ int idx = cstepE.index;
+ do {
+ nouveau_cstate_new(clk, idx, pstate);
+ } while(idx--);
+ }
+
+ nouveau_pstate_info(clk, pstate);
+ list_add_tail(&pstate->head, &clk->states);
+ clk->state_nr++;
+ return 0;
+}
+
+/******************************************************************************
+ * Adjustment triggers
+ *****************************************************************************/
+static int
+nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
+{
+ struct nouveau_pstate *pstate;
+ int i = 0;
+
+ /* YKW repellant */
+ return -ENOSYS;
+
+ if (req != -1 && req != -2) {
+ list_for_each_entry(pstate, &clk->states, head) {
+ if (pstate->pstate == req)
+ break;
+ i++;
+ }
+
+ if (pstate->pstate != req)
+ return -EINVAL;
+ req = i;
+ }
+
+ clk->ustate = req;
+ return 0;
+}
+
+int
+nouveau_clock_ustate(struct nouveau_clock *clk, int req)
+{
+ int ret = nouveau_clock_ustate_update(clk, req);
+ if (ret)
+ return ret;
+ return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
+{
+ if (!rel) clk->astate = req;
+ if ( rel) clk->astate += rel;
+ clk->astate = min(clk->astate, clk->state_nr - 1);
+ clk->astate = max(clk->astate, 0);
+ return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
+{
+ if (!rel) clk->tstate = req;
+ if ( rel) clk->tstate += rel;
+ clk->tstate = min(clk->tstate, 0);
+ clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
+ return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
+{
+ if (!rel) clk->dstate = req;
+ if ( rel) clk->dstate += rel;
+ clk->dstate = min(clk->dstate, clk->state_nr - 1);
+ clk->dstate = max(clk->dstate, 0);
+ return nouveau_pstate_calc(clk);
+}
+
+/******************************************************************************
+ * subdev base class implementation
+ *****************************************************************************/
+int
+_nouveau_clock_init(struct nouveau_object *object)
+{
+ struct nouveau_clock *clk = (void *)object;
+ struct nouveau_clocks *clock = clk->domains;
+ int ret;
+
+ memset(&clk->bstate, 0x00, sizeof(clk->bstate));
+ INIT_LIST_HEAD(&clk->bstate.list);
+ clk->bstate.pstate = 0xff;
+
+ while (clock->name != nv_clk_src_max) {
+ ret = clk->read(clk, clock->name);
+ if (ret < 0) {
+ nv_error(clk, "%02x freq unknown\n", clock->name);
+ return ret;
+ }
+ clk->bstate.base.domain[clock->name] = ret;
+ clock++;
+ }
+
+ nouveau_pstate_info(clk, &clk->bstate);
+
+ clk->astate = clk->state_nr - 1;
+ clk->tstate = 0;
+ clk->dstate = 0;
+ clk->pstate = -1;
+ nouveau_pstate_calc(clk);
+ return 0;
+}
+
+void
+_nouveau_clock_dtor(struct nouveau_object *object)
+{
+ struct nouveau_clock *clk = (void *)object;
+ struct nouveau_pstate *pstate, *temp;
+
+ list_for_each_entry_safe(pstate, temp, &clk->states, head) {
+ nouveau_pstate_del(pstate);
+ }
+
+ nouveau_subdev_destroy(&clk->base);
+}
+
+int
+nouveau_clock_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct nouveau_clocks *clocks,
+ int length, void **object)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_clock *clk;
+ int ret, idx, arglen;
+ const char *mode;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "CLK",
+ "clock", length, object);
+ clk = *object;
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&clk->states);
+ clk->domains = clocks;
+ clk->ustate = -1;
+
+ idx = 0;
+ do {
+ ret = nouveau_pstate_new(clk, idx++);
+ } while (ret == 0);
+
+ mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
+ if (mode) {
+ if (!strncasecmpz(mode, "disabled", arglen)) {
+ clk->ustate = -1;
+ } else {
+ char save = mode[arglen];
+ long v;
+
+ ((char *)mode)[arglen] = '\0';
+ if (!kstrtol(mode, 0, &v))
+ nouveau_clock_ustate_update(clk, v);
+ ((char *)mode)[arglen] = save;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index a1427758659..da50c1b1292 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -77,7 +77,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 0db5dbfd91b..db7346f7908 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -23,11 +23,188 @@
*/
#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
struct nv40_clock_priv {
struct nouveau_clock base;
+ u32 ctrl;
+ u32 npll_ctrl;
+ u32 npll_coef;
+ u32 spll;
+};
+
+static struct nouveau_clocks
+nv40_domain[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_max }
};
+static u32
+read_pll_1(struct nv40_clock_priv *priv, u32 reg)
+{
+ u32 ctrl = nv_rd32(priv, reg + 0x00);
+ int P = (ctrl & 0x00070000) >> 16;
+ int N = (ctrl & 0x0000ff00) >> 8;
+ int M = (ctrl & 0x000000ff) >> 0;
+ u32 ref = 27000, clk = 0;
+
+ if (ctrl & 0x80000000)
+ clk = ref * N / M;
+
+ return clk >> P;
+}
+
+static u32
+read_pll_2(struct nv40_clock_priv *priv, u32 reg)
+{
+ u32 ctrl = nv_rd32(priv, reg + 0x00);
+ u32 coef = nv_rd32(priv, reg + 0x04);
+ int N2 = (coef & 0xff000000) >> 24;
+ int M2 = (coef & 0x00ff0000) >> 16;
+ int N1 = (coef & 0x0000ff00) >> 8;
+ int M1 = (coef & 0x000000ff) >> 0;
+ int P = (ctrl & 0x00070000) >> 16;
+ u32 ref = 27000, clk = 0;
+
+ if ((ctrl & 0x80000000) && M1) {
+ clk = ref * N1 / M1;
+ if ((ctrl & 0x40000100) == 0x40000000) {
+ if (M2)
+ clk = clk * N2 / M2;
+ else
+ clk = 0;
+ }
+ }
+
+ return clk >> P;
+}
+
+static u32
+read_clk(struct nv40_clock_priv *priv, u32 src)
+{
+ switch (src) {
+ case 3:
+ return read_pll_2(priv, 0x004000);
+ case 2:
+ return read_pll_1(priv, 0x004008);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nv40_clock_priv *priv = (void *)clk;
+ u32 mast = nv_rd32(priv, 0x00c040);
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(priv)->crystal;
+ case nv_clk_src_href:
+ return 100000; /*XXX: PCIE/AGP differ*/
+ case nv_clk_src_core:
+ return read_clk(priv, (mast & 0x00000003) >> 0);
+ case nv_clk_src_shader:
+ return read_clk(priv, (mast & 0x00000030) >> 4);
+ case nv_clk_src_mem:
+ return read_pll_2(priv, 0x4020);
+ default:
+ break;
+ }
+
+ nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ return -EINVAL;
+}
+
+static int
+nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
+ int *N1, int *M1, int *N2, int *M2, int *log2P)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll pll;
+ int ret;
+
+ ret = nvbios_pll_parse(bios, reg, &pll);
+ if (ret)
+ return ret;
+
+ if (clk < pll.vco1.max_freq)
+ pll.vco2.max_freq = 0;
+
+ ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+ if (ret == 0)
+ return -ERANGE;
+ return ret;
+}
+
+static int
+nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nv40_clock_priv *priv = (void *)clk;
+ int gclk = cstate->domain[nv_clk_src_core];
+ int sclk = cstate->domain[nv_clk_src_shader];
+ int N1, M1, N2, M2, log2P;
+ int ret;
+
+ /* core/geometric clock */
+ ret = nv40_clock_calc_pll(priv, 0x004000, gclk,
+ &N1, &M1, &N2, &M2, &log2P);
+ if (ret < 0)
+ return ret;
+
+ if (N2 == M2) {
+ priv->npll_ctrl = 0x80000100 | (log2P << 16);
+ priv->npll_coef = (N1 << 8) | M1;
+ } else {
+ priv->npll_ctrl = 0xc0000000 | (log2P << 16);
+ priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+ }
+
+ /* use the second pll for shader/rop clock, if it differs from core */
+ if (sclk && sclk != gclk) {
+ ret = nv40_clock_calc_pll(priv, 0x004008, sclk,
+ &N1, &M1, NULL, NULL, &log2P);
+ if (ret < 0)
+ return ret;
+
+ priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+ priv->ctrl = 0x00000223;
+ } else {
+ priv->spll = 0x00000000;
+ priv->ctrl = 0x00000333;
+ }
+
+ return 0;
+}
+
+static int
+nv40_clock_prog(struct nouveau_clock *clk)
+{
+ struct nv40_clock_priv *priv = (void *)clk;
+ nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
+ nv_wr32(priv, 0x004004, priv->npll_coef);
+ nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
+ nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+ mdelay(5);
+ nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+ return 0;
+}
+
+static void
+nv40_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
static int
nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -36,13 +213,17 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv40_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.pll_calc = nv04_clock_pll_calc;
priv->base.pll_prog = nv04_clock_pll_prog;
+ priv->base.read = nv40_clock_read;
+ priv->base.calc = nv40_clock_calc;
+ priv->base.prog = nv40_clock_prog;
+ priv->base.tidy = nv40_clock_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index d09d3e78040..250a6d96016 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -22,40 +22,538 @@
* Authors: Ben Skeggs
*/
-#include <subdev/clock.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
+#include "nv50.h"
#include "pll.h"
+#include "seq.h"
-struct nv50_clock_priv {
- struct nouveau_clock base;
-};
+static u32
+read_div(struct nv50_clock_priv *priv)
+{
+ switch (nv_device(priv)->chipset) {
+ case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ case 0xa0:
+ return nv_rd32(priv, 0x004700);
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ return nv_rd32(priv, 0x004800);
+ default:
+ return 0x00000000;
+ }
+}
+
+static u32
+read_pll_src(struct nv50_clock_priv *priv, u32 base)
+{
+ struct nouveau_clock *clk = &priv->base;
+ u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
+ u32 rsel = nv_rd32(priv, 0x00e18c);
+ int P, N, M, id;
+
+ switch (nv_device(priv)->chipset) {
+ case 0x50:
+ case 0xa0:
+ switch (base) {
+ case 0x4020:
+ case 0x4028: id = !!(rsel & 0x00000004); break;
+ case 0x4008: id = !!(rsel & 0x00000008); break;
+ case 0x4030: id = 0; break;
+ default:
+ nv_error(priv, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+ ref *= (coef & 0x01000000) ? 2 : 4;
+ P = (coef & 0x00070000) >> 16;
+ N = ((coef & 0x0000ff00) >> 8) + 1;
+ M = ((coef & 0x000000ff) >> 0) + 1;
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ coef = nv_rd32(priv, 0x00e81c);
+ P = (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ rsel = nv_rd32(priv, 0x00c050);
+ switch (base) {
+ case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+ case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+ case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+ case 0x4030: rsel = 3; break;
+ default:
+ nv_error(priv, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ switch (rsel) {
+ case 0: id = 1; break;
+ case 1: return clk->read(clk, nv_clk_src_crystal);
+ case 2: return clk->read(clk, nv_clk_src_href);
+ case 3: id = 0; break;
+ }
+
+ coef = nv_rd32(priv, 0x00e81c + (id * 0x28));
+ P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+ P += (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (M)
+ return (ref * N / M) >> P;
+ return 0;
+}
+
+static u32
+read_pll_ref(struct nv50_clock_priv *priv, u32 base)
+{
+ struct nouveau_clock *clk = &priv->base;
+ u32 src, mast = nv_rd32(priv, 0x00c040);
+
+ switch (base) {
+ case 0x004028:
+ src = !!(mast & 0x00200000);
+ break;
+ case 0x004020:
+ src = !!(mast & 0x00400000);
+ break;
+ case 0x004008:
+ src = !!(mast & 0x00010000);
+ break;
+ case 0x004030:
+ src = !!(mast & 0x02000000);
+ break;
+ case 0x00e810:
+ return clk->read(clk, nv_clk_src_crystal);
+ default:
+ nv_error(priv, "bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ if (src)
+ return clk->read(clk, nv_clk_src_href);
+ return read_pll_src(priv, base);
+}
+
+static u32
+read_pll(struct nv50_clock_priv *priv, u32 base)
+{
+ struct nouveau_clock *clk = &priv->base;
+ u32 mast = nv_rd32(priv, 0x00c040);
+ u32 ctrl = nv_rd32(priv, base + 0);
+ u32 coef = nv_rd32(priv, base + 4);
+ u32 ref = read_pll_ref(priv, base);
+ u32 freq = 0;
+ int N1, N2, M1, M2;
+
+ if (base == 0x004028 && (mast & 0x00100000)) {
+ /* wtf, appears to only disable post-divider on nva0 */
+ if (nv_device(priv)->chipset != 0xa0)
+ return clk->read(clk, nv_clk_src_dom6);
+ }
+
+ N2 = (coef & 0xff000000) >> 24;
+ M2 = (coef & 0x00ff0000) >> 16;
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ freq = ref * N1 / M1;
+ if ((ctrl & 0x40000100) == 0x40000000) {
+ if (M2)
+ freq = freq * N2 / M2;
+ else
+ freq = 0;
+ }
+ }
+
+ return freq;
+}
static int
+nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nv50_clock_priv *priv = (void *)clk;
+ u32 mast = nv_rd32(priv, 0x00c040);
+ u32 P = 0;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(priv)->crystal;
+ case nv_clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case nv_clk_src_hclk:
+ return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+ case nv_clk_src_hclkm3:
+ return clk->read(clk, nv_clk_src_hclk) * 3;
+ case nv_clk_src_hclkm3d2:
+ return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+ case nv_clk_src_host:
+ switch (mast & 0x30000000) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x10000000: break;
+ case 0x20000000: /* !0x50 */
+ case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+ }
+ break;
+ case nv_clk_src_core:
+ if (!(mast & 0x00100000))
+ P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+ switch (mast & 0x00000003) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
+ case 0x00000002: return read_pll(priv, 0x004020) >> P;
+ case 0x00000003: return read_pll(priv, 0x004028) >> P;
+ }
+ break;
+ case nv_clk_src_shader:
+ P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000080)
+ return clk->read(clk, nv_clk_src_host) >> P;
+ return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(priv, 0x004028) >> P;
+ case 0x00000030: return read_pll(priv, 0x004020) >> P;
+ }
+ break;
+ case nv_clk_src_mem:
+ P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
+ if (nv_rd32(priv, 0x004008) & 0x00000200) {
+ switch (mast & 0x0000c000) {
+ case 0x00000000:
+ return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00008000:
+ case 0x0000c000:
+ return clk->read(clk, nv_clk_src_href) >> P;
+ }
+ } else {
+ return read_pll(priv, 0x004008) >> P;
+ }
+ break;
+ case nv_clk_src_vdec:
+ P = (read_div(priv) & 0x00000700) >> 8;
+ switch (nv_device(priv)->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
+ return clk->read(clk, nv_clk_src_core) >> P;
+ return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ if (mast & 0x01000000)
+ return read_pll(priv, 0x004028) >> P;
+ return read_pll(priv, 0x004030) >> P;
+ case 0x00000c00:
+ return clk->read(clk, nv_clk_src_core) >> P;
+ }
+ break;
+ case 0x98:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ return clk->read(clk, nv_clk_src_core) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+ case 0x00000c00:
+ return clk->read(clk, nv_clk_src_mem) >> P;
+ }
+ break;
+ }
+ break;
+ case nv_clk_src_dom6:
+ switch (nv_device(priv)->chipset) {
+ case 0x50:
+ case 0xa0:
+ return read_pll(priv, 0x00e810) >> 2;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ P = (read_div(priv) & 0x00000007) >> 0;
+ switch (mast & 0x0c000000) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x04000000: break;
+ case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+ case 0x0c000000:
+ return clk->read(clk, nv_clk_src_hclkm3) >> P;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ return -EINVAL;
+}
+
+static u32
+calc_pll(struct nv50_clock_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll pll;
+ int ret;
+
+ ret = nvbios_pll_parse(bios, reg, &pll);
+ if (ret)
+ return 0;
+
+ pll.vco2.max_freq = 0;
+ pll.refclk = read_pll_ref(priv, reg);
+ if (!pll.refclk)
+ return 0;
+
+ return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
+ }
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+ return ((a / 1000) == (b / 1000));
+}
+
+static int
+nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nv50_clock_priv *priv = (void *)clk;
+ struct nv50_clock_hwsq *hwsq = &priv->hwsq;
+ const int shader = cstate->domain[nv_clk_src_shader];
+ const int core = cstate->domain[nv_clk_src_core];
+ const int vdec = cstate->domain[nv_clk_src_vdec];
+ const int dom6 = cstate->domain[nv_clk_src_dom6];
+ u32 mastm = 0, mastv = 0;
+ u32 divsm = 0, divsv = 0;
+ int N, M, P1, P2;
+ int freq, out;
+
+ /* prepare a hwsq script from which we'll perform the reclock */
+ out = clk_init(hwsq, nv_subdev(clk));
+ if (out)
+ return out;
+
+ clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */
+ clk_nsec(hwsq, 8000);
+ clk_setf(hwsq, 0x10, 0x00); /* disable fb */
+ clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+
+ /* vdec: avoid modifying xpll until we know exactly how the other
+ * clock domains work, i suspect at least some of them can also be
+ * tied to xpll...
+ */
+ if (vdec) {
+ /* see how close we can get using nvclk as a source */
+ freq = calc_div(core, vdec, &P1);
+
+ /* see how close we can get using xpll/hclk as a source */
+ if (nv_device(priv)->chipset != 0x98)
+ out = read_pll(priv, 0x004030);
+ else
+ out = clk->read(clk, nv_clk_src_hclkm3d2);
+ out = calc_div(out, vdec, &P2);
+
+ /* select whichever gets us closest */
+ if (abs(vdec - freq) <= abs(vdec - out)) {
+ if (nv_device(priv)->chipset != 0x98)
+ mastv |= 0x00000c00;
+ divsv |= P1 << 8;
+ } else {
+ mastv |= 0x00000800;
+ divsv |= P2 << 8;
+ }
+
+ mastm |= 0x00000c00;
+ divsm |= 0x00000700;
+ }
+
+ /* dom6: nfi what this is, but we're limited to various combinations
+ * of the host clock frequency
+ */
+ if (dom6) {
+ if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+ mastv |= 0x00000000;
+ } else
+ if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+ mastv |= 0x08000000;
+ } else {
+ freq = clk->read(clk, nv_clk_src_hclk) * 3;
+ freq = calc_div(freq, dom6, &P1);
+
+ mastv |= 0x0c000000;
+ divsv |= P1;
+ }
+
+ mastm |= 0x0c000000;
+ divsm |= 0x00000007;
+ }
+
+ /* vdec/dom6: switch to "safe" clocks temporarily, update dividers
+ * and then switch to target clocks
+ */
+ clk_mask(hwsq, mast, mastm, 0x00000000);
+ clk_mask(hwsq, divs, divsm, divsv);
+ clk_mask(hwsq, mast, mastm, mastv);
+
+ /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
+ * sclk to hclk) before reprogramming
+ */
+ if (nv_device(priv)->chipset < 0x92)
+ clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
+ else
+ clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
+
+ /* core: for the moment at least, always use nvpll */
+ freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+ if (freq == 0)
+ return -ERANGE;
+
+ clk_mask(hwsq, nvpll[0], 0xc03f0100,
+ 0x80000000 | (P1 << 19) | (P1 << 16));
+ clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M);
+
+ /* shader: tie to nvclk if possible, otherwise use spll. have to be
+ * very careful that the shader clock is at least twice the core, or
+ * some chipsets will be very unhappy. i expect most or all of these
+ * cases will be handled by tying to nvclk, but it's possible there's
+ * corners
+ */
+ if (P1-- && shader == (core << 1)) {
+ clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
+ clk_mask(hwsq, mast, 0x00100033, 0x00000023);
+ } else {
+ freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+ if (freq == 0)
+ return -ERANGE;
+
+ clk_mask(hwsq, spll[0], 0xc03f0100,
+ 0x80000000 | (P1 << 19) | (P1 << 16));
+ clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M);
+ clk_mask(hwsq, mast, 0x00100033, 0x00000033);
+ }
+
+ /* restore normal operation */
+ clk_setf(hwsq, 0x10, 0x01); /* enable fb */
+ clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
+ clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */
+ return 0;
+}
+
+static int
+nv50_clock_prog(struct nouveau_clock *clk)
+{
+ struct nv50_clock_priv *priv = (void *)clk;
+ return clk_exec(&priv->hwsq, true);
+}
+
+static void
+nv50_clock_tidy(struct nouveau_clock *clk)
+{
+ struct nv50_clock_priv *priv = (void *)clk;
+ clk_exec(&priv->hwsq, false);
+}
+
+int
nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ struct nv50_clock_oclass *pclass = (void *)oclass;
struct nv50_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.pll_calc = nv04_clock_pll_calc;
+ priv->hwsq.r_fifo = hwsq_reg(0x002504);
+ priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
+ priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
+ priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+ priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+ switch (nv_device(priv)->chipset) {
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ priv->hwsq.r_divs = hwsq_reg(0x004800);
+ break;
+ default:
+ priv->hwsq.r_divs = hwsq_reg(0x004700);
+ break;
+ }
+ priv->hwsq.r_mast = hwsq_reg(0x00c040);
+
+ priv->base.read = nv50_clock_read;
+ priv->base.calc = nv50_clock_calc;
+ priv->base.prog = nv50_clock_prog;
+ priv->base.tidy = nv50_clock_tidy;
return 0;
}
-struct nouveau_oclass
-nv50_clock_oclass = {
- .handle = NV_SUBDEV(CLOCK, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+static struct nouveau_clocks
+nv50_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_max }
+};
+
+struct nouveau_oclass *
+nv50_clock_oclass = &(struct nv50_clock_oclass) {
+ .base.handle = NV_SUBDEV(CLOCK, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_clock_ctor,
.dtor = _nouveau_clock_dtor,
.init = _nouveau_clock_init,
.fini = _nouveau_clock_fini,
},
-};
+ .domains = nv50_domains,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
new file mode 100644
index 00000000000..f10917d789e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_CLK_NV50_H__
+#define __NVKM_CLK_NV50_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+#include <subdev/clock.h>
+
+struct nv50_clock_hwsq {
+ struct hwsq base;
+ struct hwsq_reg r_fifo;
+ struct hwsq_reg r_spll[2];
+ struct hwsq_reg r_nvpll[2];
+ struct hwsq_reg r_divs;
+ struct hwsq_reg r_mast;
+};
+
+struct nv50_clock_priv {
+ struct nouveau_clock base;
+ struct nv50_clock_hwsq hwsq;
+};
+
+int nv50_clock_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
+struct nv50_clock_oclass {
+ struct nouveau_oclass base;
+ struct nouveau_clocks *domains;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
new file mode 100644
index 00000000000..b0b7c1437f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nv50.h"
+
+static struct nouveau_clocks
+nv84_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0xff },
+ { nv_clk_src_max }
+};
+
+struct nouveau_oclass *
+nv84_clock_oclass = &(struct nv50_clock_oclass) {
+ .base.handle = NV_SUBDEV(CLOCK, 0x84),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_clock_ctor,
+ .dtor = _nouveau_clock_dtor,
+ .init = _nouveau_clock_init,
+ .fini = _nouveau_clock_fini,
+ },
+ .domains = nv84_domains,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index f074cd20bc9..4f5a1373f00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -22,33 +22,277 @@
* Authors: Ben Skeggs
*/
-#include <subdev/clock.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
#include "pll.h"
+#include "nva3.h"
+
struct nva3_clock_priv {
struct nouveau_clock base;
+ struct nva3_clock_info eng[nv_clk_src_max];
};
+static u32 read_clk(struct nva3_clock_priv *, int, bool);
+static u32 read_pll(struct nva3_clock_priv *, int, u32);
+
+static u32
+read_vco(struct nva3_clock_priv *priv, int clk)
+{
+ u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+ if ((sctl & 0x00000030) != 0x00000030)
+ return read_pll(priv, 0x41, 0x00e820);
+ return read_pll(priv, 0x42, 0x00e8a0);
+}
+
+static u32
+read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
+{
+ u32 sctl, sdiv, sclk;
+
+ /* refclk for the 0xe8xx plls is a fixed frequency */
+ if (clk >= 0x40) {
+ if (nv_device(priv)->chipset == 0xaf) {
+ /* no joke.. seriously.. sigh.. */
+ return nv_rd32(priv, 0x00471c) * 1000;
+ }
+
+ return nv_device(priv)->crystal;
+ }
+
+ sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+ if (!ignore_en && !(sctl & 0x00000100))
+ return 0;
+
+ switch (sctl & 0x00003000) {
+ case 0x00000000:
+ return nv_device(priv)->crystal;
+ case 0x00002000:
+ if (sctl & 0x00000040)
+ return 108000;
+ return 100000;
+ case 0x00003000:
+ sclk = read_vco(priv, clk);
+ sdiv = ((sctl & 0x003f0000) >> 16) + 2;
+ return (sclk * 2) / sdiv;
+ default:
+ return 0;
+ }
+}
+
+static u32
+read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
+{
+ u32 ctrl = nv_rd32(priv, pll + 0);
+ u32 sclk = 0, P = 1, N = 1, M = 1;
+
+ if (!(ctrl & 0x00000008)) {
+ if (ctrl & 0x00000001) {
+ u32 coef = nv_rd32(priv, pll + 4);
+ M = (coef & 0x000000ff) >> 0;
+ N = (coef & 0x0000ff00) >> 8;
+ P = (coef & 0x003f0000) >> 16;
+
+ /* no post-divider on these.. */
+ if ((pll & 0x00ff00) == 0x00e800)
+ P = 1;
+
+ sclk = read_clk(priv, 0x00 + clk, false);
+ }
+ } else {
+ sclk = read_clk(priv, 0x10 + clk, false);
+ }
+
+ if (M * P)
+ return sclk * N / (M * P);
+ return 0;
+}
+
+static int
+nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nva3_clock_priv *priv = (void *)clk;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(priv)->crystal;
+ case nv_clk_src_href:
+ return 100000;
+ case nv_clk_src_core:
+ return read_pll(priv, 0x00, 0x4200);
+ case nv_clk_src_shader:
+ return read_pll(priv, 0x01, 0x4220);
+ case nv_clk_src_mem:
+ return read_pll(priv, 0x02, 0x4000);
+ case nv_clk_src_disp:
+ return read_clk(priv, 0x20, false);
+ case nv_clk_src_vdec:
+ return read_clk(priv, 0x21, false);
+ case nv_clk_src_daemon:
+ return read_clk(priv, 0x25, false);
+ default:
+ nv_error(clk, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
int
-nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
- int clk, struct nouveau_pll_vals *pv)
+nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
+ struct nva3_clock_info *info)
{
- int ret, N, M, P;
+ struct nouveau_bios *bios = nouveau_bios(clock);
+ struct nva3_clock_priv *priv = (void *)clock;
+ struct nvbios_pll limits;
+ u32 oclk, sclk, sdiv;
+ int P, N, M, diff;
+ int ret;
+
+ info->pll = 0;
+ info->clk = 0;
+
+ switch (khz) {
+ case 27000:
+ info->clk = 0x00000100;
+ return khz;
+ case 100000:
+ info->clk = 0x00002100;
+ return khz;
+ case 108000:
+ info->clk = 0x00002140;
+ return khz;
+ default:
+ sclk = read_vco(priv, clk);
+ sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
+ /* if the clock has a PLL attached, and we can get a within
+ * [-2, 3) MHz of a divider, we'll disable the PLL and use
+ * the divider instead.
+ *
+ * divider can go as low as 2, limited here because NVIDIA
+ * and the VBIOS on my NVA8 seem to prefer using the PLL
+ * for 810MHz - is there a good reason?
+ */
+ if (sdiv > 4) {
+ oclk = (sclk * 2) / sdiv;
+ diff = khz - oclk;
+ if (!pll || (diff >= -2000 && diff < 3000)) {
+ info->clk = (((sdiv - 2) << 16) | 0x00003100);
+ return oclk;
+ }
+ }
+
+ if (!pll)
+ return -ERANGE;
+ break;
+ }
- ret = nva3_pll_calc(nv_subdev(clock), info, clk, &N, NULL, &M, &P);
+ ret = nvbios_pll_parse(bios, pll, &limits);
+ if (ret)
+ return ret;
+
+ limits.refclk = read_clk(priv, clk - 0x10, true);
+ if (!limits.refclk)
+ return -EINVAL;
- if (ret > 0) {
- pv->refclk = info->refclk;
- pv->N1 = N;
- pv->M1 = M;
- pv->log2P = P;
+ ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+ if (ret >= 0) {
+ info->clk = nv_rd32(priv, 0x4120 + (clk * 4));
+ info->pll = (P << 16) | (N << 8) | M;
}
+
+ return ret ? ret : -ERANGE;
+}
+
+static int
+calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
+ int clk, u32 pll, int idx)
+{
+ int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx],
+ &priv->eng[idx]);
+ if (ret >= 0)
+ return 0;
return ret;
}
+static void
+prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
+{
+ struct nva3_clock_info *info = &priv->eng[idx];
+ const u32 src0 = 0x004120 + (clk * 4);
+ const u32 src1 = 0x004160 + (clk * 4);
+ const u32 ctrl = pll + 0;
+ const u32 coef = pll + 4;
+
+ if (info->pll) {
+ nv_mask(priv, src0, 0x00000101, 0x00000101);
+ nv_wr32(priv, coef, info->pll);
+ nv_mask(priv, ctrl, 0x00000015, 0x00000015);
+ nv_mask(priv, ctrl, 0x00000010, 0x00000000);
+ nv_wait(priv, ctrl, 0x00020000, 0x00020000);
+ nv_mask(priv, ctrl, 0x00000010, 0x00000010);
+ nv_mask(priv, ctrl, 0x00000008, 0x00000000);
+ nv_mask(priv, src1, 0x00000100, 0x00000000);
+ nv_mask(priv, src1, 0x00000001, 0x00000000);
+ } else {
+ nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
+ nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+ udelay(20);
+ nv_mask(priv, ctrl, 0x00000001, 0x00000000);
+ nv_mask(priv, src0, 0x00000100, 0x00000000);
+ nv_mask(priv, src0, 0x00000001, 0x00000000);
+ }
+}
+
+static void
+prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
+{
+ struct nva3_clock_info *info = &priv->eng[idx];
+ nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+}
+
+static int
+nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nva3_clock_priv *priv = (void *)clk;
+ int ret;
+
+ if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+ (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+ (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+ (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)))
+ return ret;
+
+ return 0;
+}
+
+static int
+nva3_clock_prog(struct nouveau_clock *clk)
+{
+ struct nva3_clock_priv *priv = (void *)clk;
+ prog_pll(priv, 0x00, 0x004200, nv_clk_src_core);
+ prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
+ prog_clk(priv, 0x20, nv_clk_src_disp);
+ prog_clk(priv, 0x21, nv_clk_src_vdec);
+ return 0;
+}
+
+static void
+nva3_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static struct nouveau_clocks
+nva3_domain[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0x00, 0, "core", 1000 },
+ { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0x03 },
+ { nv_clk_src_disp , 0x04 },
+ { nv_clk_src_max }
+};
static int
nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -58,12 +302,15 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.pll_calc = nva3_clock_pll_calc;
+ priv->base.read = nva3_clock_read;
+ priv->base.calc = nva3_clock_calc;
+ priv->base.prog = nva3_clock_prog;
+ priv->base.tidy = nva3_clock_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
new file mode 100644
index 00000000000..6229a509b42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_CLK_NVA3_H__
+#define __NVKM_CLK_NVA3_H__
+
+#include <subdev/clock.h>
+
+struct nva3_clock_info {
+ u32 clk;
+ u32 pll;
+};
+
+int nva3_clock_info(struct nouveau_clock *, int, u32, u32,
+ struct nva3_clock_info *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 439d81c2613..c3105720ed2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -25,11 +25,408 @@
#include <subdev/clock.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
#include "pll.h"
+struct nvc0_clock_info {
+ u32 freq;
+ u32 ssel;
+ u32 mdiv;
+ u32 dsrc;
+ u32 ddiv;
+ u32 coef;
+};
+
struct nvc0_clock_priv {
struct nouveau_clock base;
+ struct nvc0_clock_info eng[16];
+};
+
+static u32 read_div(struct nvc0_clock_priv *, int, u32, u32);
+
+static u32
+read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
+{
+ struct nouveau_clock *clk = &priv->base;
+ u32 ssrc = nv_rd32(priv, dsrc);
+ if (!(ssrc & 0x00000100))
+ return clk->read(clk, nv_clk_src_sppll0);
+ return clk->read(clk, nv_clk_src_sppll1);
+}
+
+static u32
+read_pll(struct nvc0_clock_priv *priv, u32 pll)
+{
+ struct nouveau_clock *clk = &priv->base;
+ u32 ctrl = nv_rd32(priv, pll + 0x00);
+ u32 coef = nv_rd32(priv, pll + 0x04);
+ u32 P = (coef & 0x003f0000) >> 16;
+ u32 N = (coef & 0x0000ff00) >> 8;
+ u32 M = (coef & 0x000000ff) >> 0;
+ u32 sclk;
+
+ if (!(ctrl & 0x00000001))
+ return 0;
+
+ switch (pll) {
+ case 0x00e800:
+ case 0x00e820:
+ sclk = nv_device(priv)->crystal;
+ P = 1;
+ break;
+ case 0x132000:
+ sclk = clk->read(clk, nv_clk_src_mpllsrc);
+ break;
+ case 0x132020:
+ sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+ break;
+ case 0x137000:
+ case 0x137020:
+ case 0x137040:
+ case 0x1370e0:
+ sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+ break;
+ default:
+ return 0;
+ }
+
+ return sclk * N / M / P;
+}
+
+static u32
+read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+{
+ u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
+ u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+
+ switch (ssrc & 0x00000003) {
+ case 0:
+ if ((ssrc & 0x00030000) != 0x00030000)
+ return nv_device(priv)->crystal;
+ return 108000;
+ case 2:
+ return 100000;
+ case 3:
+ if (sctl & 0x80000000) {
+ u32 sclk = read_vco(priv, dsrc + (doff * 4));
+ u32 sdiv = (sctl & 0x0000003f) + 2;
+ return (sclk * 2) / sdiv;
+ }
+
+ return read_vco(priv, dsrc + (doff * 4));
+ default:
+ return 0;
+ }
+}
+
+static u32
+read_clk(struct nvc0_clock_priv *priv, int clk)
+{
+ u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+ u32 ssel = nv_rd32(priv, 0x137100);
+ u32 sclk, sdiv;
+
+ if (ssel & (1 << clk)) {
+ if (clk < 7)
+ sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+ else
+ sclk = read_pll(priv, 0x1370e0);
+ sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+ } else {
+ sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+ }
+
+ if (sctl & 0x80000000)
+ return (sclk * 2) / sdiv;
+
+ return sclk;
+}
+
+static int
+nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nouveau_device *device = nv_device(clk);
+ struct nvc0_clock_priv *priv = (void *)clk;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return device->crystal;
+ case nv_clk_src_href:
+ return 100000;
+ case nv_clk_src_sppll0:
+ return read_pll(priv, 0x00e800);
+ case nv_clk_src_sppll1:
+ return read_pll(priv, 0x00e820);
+
+ case nv_clk_src_mpllsrcref:
+ return read_div(priv, 0, 0x137320, 0x137330);
+ case nv_clk_src_mpllsrc:
+ return read_pll(priv, 0x132020);
+ case nv_clk_src_mpll:
+ return read_pll(priv, 0x132000);
+ case nv_clk_src_mdiv:
+ return read_div(priv, 0, 0x137300, 0x137310);
+ case nv_clk_src_mem:
+ if (nv_rd32(priv, 0x1373f0) & 0x00000002)
+ return clk->read(clk, nv_clk_src_mpll);
+ return clk->read(clk, nv_clk_src_mdiv);
+
+ case nv_clk_src_gpc:
+ return read_clk(priv, 0x00);
+ case nv_clk_src_rop:
+ return read_clk(priv, 0x01);
+ case nv_clk_src_hubk07:
+ return read_clk(priv, 0x02);
+ case nv_clk_src_hubk06:
+ return read_clk(priv, 0x07);
+ case nv_clk_src_hubk01:
+ return read_clk(priv, 0x08);
+ case nv_clk_src_copy:
+ return read_clk(priv, 0x09);
+ case nv_clk_src_daemon:
+ return read_clk(priv, 0x0c);
+ case nv_clk_src_vdec:
+ return read_clk(priv, 0x0e);
+ default:
+ nv_error(clk, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static u32
+calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+ u32 div = min((ref * 2) / freq, (u32)65);
+ if (div < 2)
+ div = 2;
+
+ *ddiv = div - 2;
+ return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+ u32 sclk;
+
+ /* use one of the fixed frequencies if possible */
+ *ddiv = 0x00000000;
+ switch (freq) {
+ case 27000:
+ case 108000:
+ *dsrc = 0x00000000;
+ if (freq == 108000)
+ *dsrc |= 0x00030000;
+ return freq;
+ case 100000:
+ *dsrc = 0x00000002;
+ return freq;
+ default:
+ *dsrc = 0x00000003;
+ break;
+ }
+
+ /* otherwise, calculate the closest divider */
+ sclk = read_vco(priv, 0x137160 + (clk * 4));
+ if (clk < 7)
+ sclk = calc_div(priv, clk, sclk, freq, ddiv);
+ return sclk;
+}
+
+static u32
+calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll limits;
+ int N, M, P, ret;
+
+ ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+ if (ret)
+ return 0;
+
+ limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+ if (!limits.refclk)
+ return 0;
+
+ ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ if (ret <= 0)
+ return 0;
+
+ *coef = (P << 16) | (N << 8) | M;
+ return ret;
+}
+
+static int
+calc_clk(struct nvc0_clock_priv *priv,
+ struct nouveau_cstate *cstate, int clk, int dom)
+{
+ struct nvc0_clock_info *info = &priv->eng[clk];
+ u32 freq = cstate->domain[dom];
+ u32 src0, div0, div1D, div1P = 0;
+ u32 clk0, clk1 = 0;
+
+ /* invalid clock domain */
+ if (!freq)
+ return 0;
+
+ /* first possible path, using only dividers */
+ clk0 = calc_src(priv, clk, freq, &src0, &div0);
+ clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+
+ /* see if we can get any closer using PLLs */
+ if (clk0 != freq && (0x00004387 & (1 << clk))) {
+ if (clk <= 7)
+ clk1 = calc_pll(priv, clk, freq, &info->coef);
+ else
+ clk1 = cstate->domain[nv_clk_src_hubk06];
+ clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+ }
+
+ /* select the method which gets closest to target freq */
+ if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+ info->dsrc = src0;
+ if (div0) {
+ info->ddiv |= 0x80000000;
+ info->ddiv |= div0 << 8;
+ info->ddiv |= div0;
+ }
+ if (div1D) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1D;
+ }
+ info->ssel = info->coef = 0;
+ info->freq = clk0;
+ } else {
+ if (div1P) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1P << 8;
+ }
+ info->ssel = (1 << clk);
+ info->freq = clk1;
+ }
+
+ return 0;
+}
+
+static int
+nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nvc0_clock_priv *priv = (void *)clk;
+ int ret;
+
+ if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
+ (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
+ (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
+ (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
+ (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
+ (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
+ (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
+ (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+ return ret;
+
+ return 0;
+}
+
+static void
+nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
+{
+ struct nvc0_clock_info *info = &priv->eng[clk];
+ if (clk < 7 && !info->ssel) {
+ nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+ }
+}
+
+static void
+nvc0_clock_prog_1(struct nvc0_clock_priv *priv, int clk)
+{
+ nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
+ nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+}
+
+static void
+nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
+{
+ struct nvc0_clock_info *info = &priv->eng[clk];
+ const u32 addr = 0x137000 + (clk * 0x20);
+ if (clk <= 7) {
+ nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
+ nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+ if (info->coef) {
+ nv_wr32(priv, addr + 0x04, info->coef);
+ nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
+ nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
+ nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+ }
+ }
+}
+
+static void
+nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
+{
+ struct nvc0_clock_info *info = &priv->eng[clk];
+ if (info->ssel) {
+ nv_mask(priv, 0x137100, (1 << clk), info->ssel);
+ nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+ }
+}
+
+static void
+nvc0_clock_prog_4(struct nvc0_clock_priv *priv, int clk)
+{
+ struct nvc0_clock_info *info = &priv->eng[clk];
+ nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static int
+nvc0_clock_prog(struct nouveau_clock *clk)
+{
+ struct nvc0_clock_priv *priv = (void *)clk;
+ struct {
+ void (*exec)(struct nvc0_clock_priv *, int);
+ } stage[] = {
+ { nvc0_clock_prog_0 }, /* div programming */
+ { nvc0_clock_prog_1 }, /* select div mode */
+ { nvc0_clock_prog_2 }, /* (maybe) program pll */
+ { nvc0_clock_prog_3 }, /* (maybe) select pll mode */
+ { nvc0_clock_prog_4 }, /* final divider */
+ };
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(stage); i++) {
+ for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+ if (!priv->eng[j].freq)
+ continue;
+ stage[i].exec(priv, j);
+ }
+ }
+
+ return 0;
+}
+
+static void
+nvc0_clock_tidy(struct nouveau_clock *clk)
+{
+ struct nvc0_clock_priv *priv = (void *)clk;
+ memset(priv->eng, 0x00, sizeof(priv->eng));
+}
+
+static struct nouveau_clocks
+nvc0_domain[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_hubk06 , 0x00 },
+ { nv_clk_src_hubk01 , 0x01 },
+ { nv_clk_src_copy , 0x02 },
+ { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_rop , 0x04 },
+ { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0x06 },
+ { nv_clk_src_daemon , 0x0a },
+ { nv_clk_src_hubk07 , 0x0b },
+ { nv_clk_src_max }
};
static int
@@ -40,12 +437,15 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.pll_calc = nva3_clock_pll_calc;
+ priv->base.read = nvc0_clock_read;
+ priv->base.calc = nvc0_clock_calc;
+ priv->base.prog = nvc0_clock_prog;
+ priv->base.tidy = nvc0_clock_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
new file mode 100644
index 00000000000..4c62e84b96f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nve0_clock_info {
+ u32 freq;
+ u32 ssel;
+ u32 mdiv;
+ u32 dsrc;
+ u32 ddiv;
+ u32 coef;
+};
+
+struct nve0_clock_priv {
+ struct nouveau_clock base;
+ struct nve0_clock_info eng[16];
+};
+
+static u32 read_div(struct nve0_clock_priv *, int, u32, u32);
+static u32 read_pll(struct nve0_clock_priv *, u32);
+
+static u32
+read_vco(struct nve0_clock_priv *priv, u32 dsrc)
+{
+ u32 ssrc = nv_rd32(priv, dsrc);
+ if (!(ssrc & 0x00000100))
+ return read_pll(priv, 0x00e800);
+ return read_pll(priv, 0x00e820);
+}
+
+static u32
+read_pll(struct nve0_clock_priv *priv, u32 pll)
+{
+ u32 ctrl = nv_rd32(priv, pll + 0x00);
+ u32 coef = nv_rd32(priv, pll + 0x04);
+ u32 P = (coef & 0x003f0000) >> 16;
+ u32 N = (coef & 0x0000ff00) >> 8;
+ u32 M = (coef & 0x000000ff) >> 0;
+ u32 sclk;
+ u16 fN = 0xf000;
+
+ if (!(ctrl & 0x00000001))
+ return 0;
+
+ switch (pll) {
+ case 0x00e800:
+ case 0x00e820:
+ sclk = nv_device(priv)->crystal;
+ P = 1;
+ break;
+ case 0x132000:
+ sclk = read_pll(priv, 0x132020);
+ P = (coef & 0x10000000) ? 2 : 1;
+ break;
+ case 0x132020:
+ sclk = read_div(priv, 0, 0x137320, 0x137330);
+ fN = nv_rd32(priv, pll + 0x10) >> 16;
+ break;
+ case 0x137000:
+ case 0x137020:
+ case 0x137040:
+ case 0x1370e0:
+ sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+ break;
+ default:
+ return 0;
+ }
+
+ if (P == 0)
+ P = 1;
+
+ sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
+ return sclk / (M * P);
+}
+
+static u32
+read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+{
+ u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
+ u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+
+ switch (ssrc & 0x00000003) {
+ case 0:
+ if ((ssrc & 0x00030000) != 0x00030000)
+ return nv_device(priv)->crystal;
+ return 108000;
+ case 2:
+ return 100000;
+ case 3:
+ if (sctl & 0x80000000) {
+ u32 sclk = read_vco(priv, dsrc + (doff * 4));
+ u32 sdiv = (sctl & 0x0000003f) + 2;
+ return (sclk * 2) / sdiv;
+ }
+
+ return read_vco(priv, dsrc + (doff * 4));
+ default:
+ return 0;
+ }
+}
+
+static u32
+read_mem(struct nve0_clock_priv *priv)
+{
+ switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
+ case 1: return read_pll(priv, 0x132020);
+ case 2: return read_pll(priv, 0x132000);
+ default:
+ return 0;
+ }
+}
+
+static u32
+read_clk(struct nve0_clock_priv *priv, int clk)
+{
+ u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+ u32 sclk, sdiv;
+
+ if (clk < 7) {
+ u32 ssel = nv_rd32(priv, 0x137100);
+ if (ssel & (1 << clk)) {
+ sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+ sdiv = 1;
+ } else {
+ sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sdiv = 0;
+ }
+ } else {
+ u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+ if ((ssrc & 0x00000003) == 0x00000003) {
+ sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ if (ssrc & 0x00000100) {
+ if (ssrc & 0x40000000)
+ sclk = read_pll(priv, 0x1370e0);
+ sdiv = 1;
+ } else {
+ sdiv = 0;
+ }
+ } else {
+ sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sdiv = 0;
+ }
+ }
+
+ if (sctl & 0x80000000) {
+ if (sdiv)
+ sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+ else
+ sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+ return (sclk * 2) / sdiv;
+ }
+
+ return sclk;
+}
+
+static int
+nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nouveau_device *device = nv_device(clk);
+ struct nve0_clock_priv *priv = (void *)clk;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return device->crystal;
+ case nv_clk_src_href:
+ return 100000;
+ case nv_clk_src_mem:
+ return read_mem(priv);
+ case nv_clk_src_gpc:
+ return read_clk(priv, 0x00);
+ case nv_clk_src_rop:
+ return read_clk(priv, 0x01);
+ case nv_clk_src_hubk07:
+ return read_clk(priv, 0x02);
+ case nv_clk_src_hubk06:
+ return read_clk(priv, 0x07);
+ case nv_clk_src_hubk01:
+ return read_clk(priv, 0x08);
+ case nv_clk_src_daemon:
+ return read_clk(priv, 0x0c);
+ case nv_clk_src_vdec:
+ return read_clk(priv, 0x0e);
+ default:
+ nv_error(clk, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static u32
+calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+ u32 div = min((ref * 2) / freq, (u32)65);
+ if (div < 2)
+ div = 2;
+
+ *ddiv = div - 2;
+ return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+ u32 sclk;
+
+ /* use one of the fixed frequencies if possible */
+ *ddiv = 0x00000000;
+ switch (freq) {
+ case 27000:
+ case 108000:
+ *dsrc = 0x00000000;
+ if (freq == 108000)
+ *dsrc |= 0x00030000;
+ return freq;
+ case 100000:
+ *dsrc = 0x00000002;
+ return freq;
+ default:
+ *dsrc = 0x00000003;
+ break;
+ }
+
+ /* otherwise, calculate the closest divider */
+ sclk = read_vco(priv, 0x137160 + (clk * 4));
+ if (clk < 7)
+ sclk = calc_div(priv, clk, sclk, freq, ddiv);
+ return sclk;
+}
+
+static u32
+calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll limits;
+ int N, M, P, ret;
+
+ ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+ if (ret)
+ return 0;
+
+ limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+ if (!limits.refclk)
+ return 0;
+
+ ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ if (ret <= 0)
+ return 0;
+
+ *coef = (P << 16) | (N << 8) | M;
+ return ret;
+}
+
+static int
+calc_clk(struct nve0_clock_priv *priv,
+ struct nouveau_cstate *cstate, int clk, int dom)
+{
+ struct nve0_clock_info *info = &priv->eng[clk];
+ u32 freq = cstate->domain[dom];
+ u32 src0, div0, div1D, div1P = 0;
+ u32 clk0, clk1 = 0;
+
+ /* invalid clock domain */
+ if (!freq)
+ return 0;
+
+ /* first possible path, using only dividers */
+ clk0 = calc_src(priv, clk, freq, &src0, &div0);
+ clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+
+ /* see if we can get any closer using PLLs */
+ if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
+ if (clk <= 7)
+ clk1 = calc_pll(priv, clk, freq, &info->coef);
+ else
+ clk1 = cstate->domain[nv_clk_src_hubk06];
+ clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+ }
+
+ /* select the method which gets closest to target freq */
+ if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+ info->dsrc = src0;
+ if (div0) {
+ info->ddiv |= 0x80000000;
+ info->ddiv |= div0 << 8;
+ info->ddiv |= div0;
+ }
+ if (div1D) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1D;
+ }
+ info->ssel = 0;
+ info->freq = clk0;
+ } else {
+ if (div1P) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1P << 8;
+ }
+ info->ssel = (1 << clk);
+ info->dsrc = 0x40000100;
+ info->freq = clk1;
+ }
+
+ return 0;
+}
+
+static int
+nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nve0_clock_priv *priv = (void *)clk;
+ int ret;
+
+ if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
+ (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
+ (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
+ (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
+ (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
+ (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
+ (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+ return ret;
+
+ return 0;
+}
+
+static void
+nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
+{
+ struct nve0_clock_info *info = &priv->eng[clk];
+ if (!info->ssel) {
+ nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+ }
+}
+
+static void
+nve0_clock_prog_1_0(struct nve0_clock_priv *priv, int clk)
+{
+ nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
+ nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+}
+
+static void
+nve0_clock_prog_1_1(struct nve0_clock_priv *priv, int clk)
+{
+ nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+}
+
+static void
+nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
+{
+ struct nve0_clock_info *info = &priv->eng[clk];
+ const u32 addr = 0x137000 + (clk * 0x20);
+ nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
+ nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+ if (info->coef) {
+ nv_wr32(priv, addr + 0x04, info->coef);
+ nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
+ nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
+ nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+ }
+}
+
+static void
+nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
+{
+ struct nve0_clock_info *info = &priv->eng[clk];
+ nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static void
+nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
+{
+ struct nve0_clock_info *info = &priv->eng[clk];
+ if (info->ssel) {
+ nv_mask(priv, 0x137100, (1 << clk), info->ssel);
+ nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+ }
+}
+
+static void
+nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
+{
+ struct nve0_clock_info *info = &priv->eng[clk];
+ if (info->ssel) {
+ nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
+ nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+ }
+}
+
+static int
+nve0_clock_prog(struct nouveau_clock *clk)
+{
+ struct nve0_clock_priv *priv = (void *)clk;
+ struct {
+ u32 mask;
+ void (*exec)(struct nve0_clock_priv *, int);
+ } stage[] = {
+ { 0x007f, nve0_clock_prog_0 }, /* div programming */
+ { 0x007f, nve0_clock_prog_1_0 }, /* select div mode */
+ { 0xff80, nve0_clock_prog_1_1 },
+ { 0x00ff, nve0_clock_prog_2 }, /* (maybe) program pll */
+ { 0xff80, nve0_clock_prog_3 }, /* final divider */
+ { 0x007f, nve0_clock_prog_4_0 }, /* (maybe) select pll mode */
+ { 0xff80, nve0_clock_prog_4_1 },
+ };
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(stage); i++) {
+ for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+ if (!(stage[i].mask & (1 << j)))
+ continue;
+ if (!priv->eng[j].freq)
+ continue;
+ stage[i].exec(priv, j);
+ }
+ }
+
+ return 0;
+}
+
+static void
+nve0_clock_tidy(struct nouveau_clock *clk)
+{
+ struct nve0_clock_priv *priv = (void *)clk;
+ memset(priv->eng, 0x00, sizeof(priv->eng));
+}
+
+static struct nouveau_clocks
+nve0_domain[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_mem , 0x03, 0, "memory", 1000 },
+ { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_hubk01 , 0x05 },
+ { nv_clk_src_vdec , 0x06 },
+ { nv_clk_src_daemon , 0x07 },
+ { nv_clk_src_max }
+};
+
+static int
+nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_clock_priv *priv;
+ int ret;
+
+ ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.read = nve0_clock_read;
+ priv->base.calc = nve0_clock_calc;
+ priv->base.prog = nve0_clock_prog;
+ priv->base.tidy = nve0_clock_tidy;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_clock_oclass = {
+ .handle = NV_SUBDEV(CLOCK, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_clock_ctor,
+ .dtor = _nouveau_clock_dtor,
+ .init = _nouveau_clock_init,
+ .fini = _nouveau_clock_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
index cf1ed0dc9bc..b47d543ab2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -38,7 +38,7 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- int cv = nouveau_bios(subdev)->version.chip;
+ struct nouveau_bios *bios = nouveau_bios(subdev);
int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
int minM = info->vco1.min_m, maxM = info->vco1.max_m;
int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -54,18 +54,21 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
/* possibly correlated with introduction of 27MHz crystal */
- if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
- if (clk > 250000)
- maxM = 6;
- if (clk > 340000)
- maxM = 2;
- } else if (cv < 0x40) {
- if (clk > 150000)
- maxM = 6;
- if (clk > 200000)
- maxM = 4;
- if (clk > 340000)
- maxM = 2;
+ if (bios->version.major < 0x60) {
+ int cv = bios->version.chip;
+ if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+ if (clk > 250000)
+ maxM = 6;
+ if (clk > 340000)
+ maxM = 2;
+ } else if (cv < 0x40) {
+ if (clk > 150000)
+ maxM = 6;
+ if (clk > 200000)
+ maxM = 4;
+ if (clk > 340000)
+ maxM = 2;
+ }
}
P = 1 << maxP;
@@ -227,10 +230,12 @@ nv04_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info, u32 freq,
{
int ret;
- if (!info->vco2.max_freq) {
+ if (!info->vco2.max_freq || !N2) {
ret = getMNP_single(subdev, info, freq, N1, M1, P);
- *N2 = 1;
- *M2 = 1;
+ if (N2) {
+ *N2 = 1;
+ *M2 = 1;
+ }
} else {
ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 2fe1f712eef..8eca457c281 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -45,6 +45,7 @@ nva3_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info,
lM = max(lM, (int)info->vco1.min_m);
hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
hM = min(hM, (int)info->vco1.max_m);
+ lM = min(lM, hM);
for (M = lM; M <= hM; M++) {
u32 tmp = freq * *P * M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
new file mode 100644
index 00000000000..fb33f06ebd5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_CLK_SEQ_H__
+#define __NVKM_CLK_SEQ_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+
+#define clk_init(s,p) hwsq_init(&(s)->base, (p))
+#define clk_exec(s,e) hwsq_exec(&(s)->base, (e))
+#define clk_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define clk_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r)
+#define clk_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
+#define clk_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define clk_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
+#define clk_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
+#define clk_nsec(s,n) hwsq_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index b22357d9b82..27c8235f1a8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -168,7 +168,8 @@ setPLL_single(struct nouveau_devinit *devinit, u32 reg,
/* downclock -- write new NM first */
nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
- if (chip_version < 0x17 && chip_version != 0x11)
+ if ((chip_version < 0x17 || chip_version == 0x1a) &&
+ chip_version != 0x11)
/* wait a bit on older chips */
msleep(64);
nv_rd32(devinit, reg);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 463b08fa096..8d274dba1ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -38,12 +38,18 @@ static void
nv10_devinit_meminit(struct nouveau_devinit *devinit)
{
struct nv10_devinit_priv *priv = (void *)devinit;
- const int mem_width[] = { 0x10, 0x00, 0x20 };
- const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
+ static const int mem_width[] = { 0x10, 0x00, 0x20 };
+ int mem_width_count;
uint32_t patt = 0xdeadbeef;
struct io_mapping *fb;
int i, j, k;
+ if (nv_device(priv)->card_type >= NV_11 &&
+ nv_device(priv)->chipset >= 0x17)
+ mem_width_count = 3;
+ else
+ mem_width_count = 2;
+
/* Map the framebuffer aperture */
fb = fbmem_init(nv_device(priv)->pdev);
if (!fb) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index 821cd75b86a..f009d8a39d9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -22,9 +22,10 @@
* Authors: Ben Skeggs
*/
-#include "subdev/fb.h"
-#include "subdev/bios.h"
-#include "subdev/bios/bit.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+
+#include "priv.h"
int
nouveau_fb_bios_memtype(struct nouveau_bios *bios)
@@ -106,9 +107,9 @@ _nouveau_fb_dtor(struct nouveau_object *object)
int
nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, struct nouveau_oclass *ramcls,
- int length, void **pobject)
+ struct nouveau_oclass *oclass, int length, void **pobject)
{
+ struct nouveau_fb_impl *impl = (void *)oclass;
static const char *name[] = {
[NV_MEM_TYPE_UNKNOWN] = "unknown",
[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
@@ -132,8 +133,10 @@ nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ pfb->memtype_valid = impl->memtype;
+
ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb),
- ramcls, NULL, 0, &ram);
+ impl->ram, NULL, 0, &ram);
if (ret) {
nv_fatal(pfb, "error detecting memory configuration!!\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
new file mode 100644
index 00000000000..34f9605ffee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+int
+nouveau_gddr5_calc(struct nouveau_ram *ram)
+{
+ struct nouveau_bios *bios = nouveau_bios(ram);
+ int pd, lf, xd, vh, vr, vo;
+ int WL, CL, WR, at, dt, ds;
+ int rq = ram->freq < 1000000; /* XXX */
+
+ switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+ case 0x11:
+ pd = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
+ lf = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
+ xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
+ vh = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
+ vr = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
+ vo = nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ switch (!!ram->timing.data * ram->timing.version) {
+ case 0x20:
+ WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
+ CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
+ WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
+ at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
+ dt = nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
+ ds = nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ if (WL < 1 || WL > 7 || CL < 5 || CL > 36 || WR < 4 || WR > 35)
+ return -EINVAL;
+ CL -= 5;
+ WR -= 4;
+
+ ram->mr[0] &= ~0xf7f;
+ ram->mr[0] |= (WR & 0x0f) << 8;
+ ram->mr[0] |= (CL & 0x0f) << 3;
+ ram->mr[0] |= (WL & 0x07) << 0;
+
+ ram->mr[1] &= ~0x0bf;
+ ram->mr[1] |= (xd & 0x01) << 7;
+ ram->mr[1] |= (at & 0x03) << 4;
+ ram->mr[1] |= (dt & 0x03) << 2;
+ ram->mr[1] |= (ds & 0x03) << 0;
+
+ ram->mr[3] &= ~0x020;
+ ram->mr[3] |= (rq & 0x01) << 5;
+
+ if (!vo)
+ vo = (ram->mr[6] & 0xff0) >> 4;
+ if (ram->mr[6] & 0x001)
+ pd = 1; /* binary driver does this.. bug? */
+ ram->mr[6] &= ~0xff1;
+ ram->mr[6] |= (vo & 0xff) << 4;
+ ram->mr[6] |= (pd & 0x01) << 0;
+
+ if (!(ram->mr[7] & 0x100))
+ vr = 0; /* binary driver does this.. bug? */
+ ram->mr[7] &= ~0x188;
+ ram->mr[7] |= (vr & 0x01) << 8;
+ ram->mr[7] |= (vh & 0x01) << 7;
+ ram->mr[7] |= (lf & 0x01) << 3;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index 1f103c7b89f..8309fe33fe8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -22,14 +22,10 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv04.h"
#define NV04_PFB_CFG0 0x00100200
-struct nv04_fb_priv {
- struct nouveau_fb base;
-};
-
bool
nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
{
@@ -57,30 +53,37 @@ nv04_fb_init(struct nouveau_object *object)
return 0;
}
-static int
+int
nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ struct nv04_fb_impl *impl = (void *)oclass;
struct nv04_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &nv04_ram_oclass, &priv);
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.tile.regions = impl->tile.regions;
+ priv->base.tile.init = impl->tile.init;
+ priv->base.tile.comp = impl->tile.comp;
+ priv->base.tile.fini = impl->tile.fini;
+ priv->base.tile.prog = impl->tile.prog;
return 0;
}
-struct nouveau_oclass
-nv04_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x04),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv04_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv04_ram_oclass,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
new file mode 100644
index 00000000000..06ce71f87a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
@@ -0,0 +1,55 @@
+#ifndef __NVKM_FB_NV04_H__
+#define __NVKM_FB_NV04_H__
+
+#include "priv.h"
+
+struct nv04_fb_priv {
+ struct nouveau_fb base;
+};
+
+int nv04_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
+struct nv04_fb_impl {
+ struct nouveau_fb_impl base;
+ struct {
+ int regions;
+ void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+ void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
+ void (*fini)(struct nouveau_fb *, int i,
+ struct nouveau_fb_tile *);
+ void (*prog)(struct nouveau_fb *, int i,
+ struct nouveau_fb_tile *);
+ } tile;
+};
+
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv30_fb_init(struct nouveau_object *);
+void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
+
+int nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index be069b5306b..ffb7ec6d97a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv10_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
void
nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -57,34 +53,19 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_rd32(pfb, 0x100240 + (i * 0x10));
}
-static int
-nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv10_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv10_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv10_fb_tile_init;
- priv->base.tile.fini = nv10_fb_tile_fini;
- priv->base.tile.prog = nv10_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv10_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_fb_ctor,
+struct nouveau_oclass *
+nv10_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x10),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = _nouveau_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv10_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv10_fb_tile_init,
+ .tile.fini = nv10_fb_tile_fini,
+ .tile.prog = nv10_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 57a2af0079b..9159a5ccee9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -24,40 +24,21 @@
*
*/
-#include "priv.h"
+#include "nv04.h"
-struct nv1a_fb_priv {
- struct nouveau_fb base;
-};
-
-static int
-nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv1a_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv1a_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv10_fb_tile_init;
- priv->base.tile.fini = nv10_fb_tile_fini;
- priv->base.tile.prog = nv10_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv1a_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x1a),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv1a_fb_ctor,
+struct nouveau_oclass *
+nv1a_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x1a),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = _nouveau_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv10_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv10_fb_tile_init,
+ .tile.fini = nv10_fb_tile_fini,
+ .tile.prog = nv10_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index b18c4e63bb4..f003c1b1893 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv20_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
void
nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -80,35 +76,20 @@ nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
}
-static int
-nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv20_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv20_fb_tile_init;
- priv->base.tile.comp = nv20_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv20_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv20_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x20),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv20_fb_ctor,
+struct nouveau_oclass *
+nv20_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x20),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = _nouveau_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv20_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv20_fb_tile_init,
+ .tile.comp = nv20_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
index 32ccabf10c4..f34f4223210 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv25_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
static void
nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -46,35 +42,20 @@ nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
}
-static int
-nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv25_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv20_fb_tile_init;
- priv->base.tile.comp = nv25_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv20_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv25_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x25),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv25_fb_ctor,
+struct nouveau_oclass *
+nv25_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x25),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = _nouveau_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv20_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv20_fb_tile_init,
+ .tile.comp = nv25_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index bef756d43d3..69093f7151f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv30_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
void
nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -67,7 +63,7 @@ nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
static int
-calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
{
struct nouveau_device *device = nv_device(priv);
int b = (device->chipset > 0x30 ?
@@ -78,7 +74,7 @@ calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
}
static int
-calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
{
int j, x = 0;
@@ -95,7 +91,7 @@ int
nv30_fb_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
- struct nv30_fb_priv *priv = (void *)object;
+ struct nv04_fb_priv *priv = (void *)object;
int ret, i, j;
ret = nouveau_fb_init(&priv->base);
@@ -124,35 +120,20 @@ nv30_fb_init(struct nouveau_object *object)
return 0;
}
-static int
-nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv30_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv30_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv20_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv30_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x30),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv30_fb_ctor,
+struct nouveau_oclass *
+nv30_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x30),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv30_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv20_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv30_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
index 097d8e3824f..161b06e8fc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv35_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
static void
nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@ nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
}
-static int
-nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv35_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv35_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv20_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv35_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x35),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv35_fb_ctor,
+struct nouveau_oclass *
+nv35_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x35),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv30_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv20_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv35_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
index 9d6d9df896d..2dd3d0aab6b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv36_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
static void
nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@ nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
}
}
-static int
-nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv36_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv36_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv20_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv36_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x36),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv36_fb_ctor,
+struct nouveau_oclass *
+nv36_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x36),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv30_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv20_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv36_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 33b4393a782..95a115ab0c8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv40_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
void
nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -50,7 +46,7 @@ nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
static int
nv40_fb_init(struct nouveau_object *object)
{
- struct nv40_fb_priv *priv = (void *)object;
+ struct nv04_fb_priv *priv = (void *)object;
int ret;
ret = nouveau_fb_init(&priv->base);
@@ -61,36 +57,20 @@ nv40_fb_init(struct nouveau_object *object)
return 0;
}
-static int
-nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv40_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv40_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 8;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv40_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv20_fb_tile_prog;
- return 0;
-}
-
-
-struct nouveau_oclass
-nv40_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv40_fb_ctor,
+struct nouveau_oclass *
+nv40_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x40),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv40_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv40_ram_oclass,
+ .tile.regions = 8,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv40_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
new file mode 100644
index 00000000000..581f808527f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FB_NV40_H__
+#define __NVKM_FB_NV40_H__
+
+#include "priv.h"
+
+struct nv40_ram {
+ struct nouveau_ram base;
+ u32 ctrl;
+ u32 coef;
+};
+
+
+int nv40_ram_calc(struct nouveau_fb *, u32);
+int nv40_ram_prog(struct nouveau_fb *);
+void nv40_ram_tidy(struct nouveau_fb *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
index 02cd83789cd..b239a861559 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv41_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
void
nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
@@ -43,7 +39,7 @@ nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
int
nv41_fb_init(struct nouveau_object *object)
{
- struct nv41_fb_priv *priv = (void *)object;
+ struct nv04_fb_priv *priv = (void *)object;
int ret;
ret = nouveau_fb_init(&priv->base);
@@ -54,36 +50,20 @@ nv41_fb_init(struct nouveau_object *object)
return 0;
}
-static int
-nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv41_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 12;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv40_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv41_fb_tile_prog;
- return 0;
-}
-
-
-struct nouveau_oclass
-nv41_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x41),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv41_fb_ctor,
+struct nouveau_oclass *
+nv41_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x41),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv41_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv41_ram_oclass,
+ .tile.regions = 12,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv40_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
index c5246c29f29..d8478208a68 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv44_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
static void
nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -52,7 +48,7 @@ nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
int
nv44_fb_init(struct nouveau_object *object)
{
- struct nv44_fb_priv *priv = (void *)object;
+ struct nv04_fb_priv *priv = (void *)object;
int ret;
ret = nouveau_fb_init(&priv->base);
@@ -64,35 +60,19 @@ nv44_fb_init(struct nouveau_object *object)
return 0;
}
-static int
-nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv44_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 12;
- priv->base.tile.init = nv44_fb_tile_init;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv44_fb_tile_prog;
- return 0;
-}
-
-
-struct nouveau_oclass
-nv44_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x44),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv44_fb_ctor,
+struct nouveau_oclass *
+nv44_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x44),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv44_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv44_ram_oclass,
+ .tile.regions = 12,
+ .tile.init = nv44_fb_tile_init,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
index e2b57909bfc..a5b77514d35 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -24,11 +24,7 @@
*
*/
-#include "priv.h"
-
-struct nv46_fb_priv {
- struct nouveau_fb base;
-};
+#include "nv04.h"
void
nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -44,35 +40,19 @@ nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-static int
-nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv46_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 15;
- priv->base.tile.init = nv46_fb_tile_init;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv44_fb_tile_prog;
- return 0;
-}
-
-
-struct nouveau_oclass
-nv46_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x46),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv46_fb_ctor,
+struct nouveau_oclass *
+nv46_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x46),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv44_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv44_ram_oclass,
+ .tile.regions = 15,
+ .tile.init = nv46_fb_tile_init,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
index fe6a2278621..3bea142376b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -24,42 +24,22 @@
*
*/
-#include "priv.h"
+#include "nv04.h"
-struct nv47_fb_priv {
- struct nouveau_fb base;
-};
-
-static int
-nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv47_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 15;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv40_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv41_fb_tile_prog;
- return 0;
-}
-
-
-struct nouveau_oclass
-nv47_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x47),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv47_fb_ctor,
+struct nouveau_oclass *
+nv47_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x47),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv41_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv41_ram_oclass,
+ .tile.regions = 15,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv40_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
index 5eca99b8c7e..666cbd5d47f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -24,42 +24,22 @@
*
*/
-#include "priv.h"
+#include "nv04.h"
-struct nv49_fb_priv {
- struct nouveau_fb base;
-};
-
-static int
-nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv49_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv49_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 15;
- priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.comp = nv40_fb_tile_comp;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv41_fb_tile_prog;
- return 0;
-}
-
-
-struct nouveau_oclass
-nv49_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x49),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv49_fb_ctor,
+struct nouveau_oclass *
+nv49_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x49),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv41_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv49_ram_oclass,
+ .tile.regions = 15,
+ .tile.init = nv30_fb_tile_init,
+ .tile.comp = nv40_fb_tile_comp,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
index 1190b78a1e9..42e64f364ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -24,40 +24,21 @@
*
*/
-#include "priv.h"
+#include "nv04.h"
-struct nv4e_fb_priv {
- struct nouveau_fb base;
-};
-
-static int
-nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv4e_fb_priv *priv;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &nv4e_ram_oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.memtype_valid = nv04_fb_memtype_valid;
- priv->base.tile.regions = 12;
- priv->base.tile.init = nv46_fb_tile_init;
- priv->base.tile.fini = nv20_fb_tile_fini;
- priv->base.tile.prog = nv44_fb_tile_prog;
- return 0;
-}
-
-struct nouveau_oclass
-nv4e_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x4e),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv4e_fb_ctor,
+struct nouveau_oclass *
+nv4e_fb_oclass = &(struct nv04_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x4e),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv44_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv04_fb_memtype_valid,
+ .base.ram = &nv4e_ram_oclass,
+ .tile.regions = 12,
+ .tile.init = nv46_fb_tile_init,
+ .tile.fini = nv20_fb_tile_fini,
+ .tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index da614ec5564..cbc7f00c127 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -27,14 +27,9 @@
#include <core/engctx.h>
#include <core/object.h>
-#include "priv.h"
#include <subdev/bios.h>
-struct nv50_fb_priv {
- struct nouveau_fb base;
- struct page *r100c08_page;
- dma_addr_t r100c08;
-};
+#include "nv50.h"
int
nv50_fb_memtype[0x80] = {
@@ -48,7 +43,7 @@ nv50_fb_memtype[0x80] = {
1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
};
-static bool
+bool
nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
{
return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
@@ -239,7 +234,7 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
pr_cont("0x%08x\n", st1);
}
-static int
+int
nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -248,7 +243,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &nv50_ram_oclass, &priv);
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -264,12 +259,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_warn(priv, "failed 0x100c08 page alloc\n");
}
- priv->base.memtype_valid = nv50_fb_memtype_valid;
nv_subdev(priv)->intr = nv50_fb_intr;
return 0;
}
-static void
+void
nv50_fb_dtor(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
@@ -284,10 +278,10 @@ nv50_fb_dtor(struct nouveau_object *object)
nouveau_fb_destroy(&priv->base);
}
-static int
+int
nv50_fb_init(struct nouveau_object *object)
{
- struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_impl *impl = (void *)object->oclass;
struct nv50_fb_priv *priv = (void *)object;
int ret;
@@ -303,33 +297,20 @@ nv50_fb_init(struct nouveau_object *object)
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
- switch (device->chipset) {
- case 0x50:
- nv_wr32(priv, 0x100c90, 0x000707ff);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_wr32(priv, 0x100c90, 0x000d0fff);
- break;
- case 0xaf:
- nv_wr32(priv, 0x100c90, 0x089d1fff);
- break;
- default:
- nv_wr32(priv, 0x100c90, 0x001d07ff);
- break;
- }
-
+ nv_wr32(priv, 0x100c90, impl->trap);
return 0;
}
-struct nouveau_oclass
-nv50_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_fb_oclass = &(struct nv50_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x50),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_fb_ctor,
.dtor = nv50_fb_dtor,
.init = nv50_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .base.memtype = nv50_fb_memtype_valid,
+ .base.ram = &nv50_ram_oclass,
+ .trap = 0x000707ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
new file mode 100644
index 00000000000..c5e5a888c60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FB_NV50_H__
+#define __NVKM_FB_NV50_H__
+
+#include "priv.h"
+
+struct nv50_fb_priv {
+ struct nouveau_fb base;
+ struct page *r100c08_page;
+ dma_addr_t r100c08;
+};
+
+int nv50_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv50_fb_dtor(struct nouveau_object *);
+int nv50_fb_init(struct nouveau_object *);
+
+struct nv50_fb_impl {
+ struct nouveau_fb_impl base;
+ u32 trap;
+};
+
+#define nv50_ram_create(p,e,o,d) \
+ nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int nv50_ram_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+int nv50_ram_get(struct nouveau_fb *, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nouveau_mem **);
+void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
+extern int nv50_fb_memtype[0x80];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
new file mode 100644
index 00000000000..cf0e767d383
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nv84_fb_oclass = &(struct nv50_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0x84),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .base.memtype = nv50_fb_memtype_valid,
+ .base.ram = &nv50_ram_oclass,
+ .trap = 0x001d07ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
new file mode 100644
index 00000000000..dab6e1c63d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nva3_fb_oclass = &(struct nv50_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0xa3),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .base.memtype = nv50_fb_memtype_valid,
+ .base.ram = &nva3_ram_oclass,
+ .trap = 0x000d0fff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
new file mode 100644
index 00000000000..cba8e681803
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nvaa_fb_oclass = &(struct nv50_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0xaa),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .base.memtype = nv50_fb_memtype_valid,
+ .base.ram = &nvaa_ram_oclass,
+ .trap = 0x001d07ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
new file mode 100644
index 00000000000..5423faa2c09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nvaf_fb_oclass = &(struct nv50_fb_impl) {
+ .base.base.handle = NV_SUBDEV(FB, 0xaf),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .base.memtype = nv50_fb_memtype_valid,
+ .base.ram = &nvaa_ram_oclass,
+ .trap = 0x089d1fff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index f35d76fd746..e5fc37c4caa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -22,24 +22,18 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
-
-struct nvc0_fb_priv {
- struct nouveau_fb base;
- struct page *r100c10_page;
- dma_addr_t r100c10;
-};
+#include "nvc0.h"
extern const u8 nvc0_pte_storage_type_map[256];
-static bool
+bool
nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
}
-static int
+int
nvc0_fb_init(struct nouveau_object *object)
{
struct nvc0_fb_priv *priv = (void *)object;
@@ -54,7 +48,7 @@ nvc0_fb_init(struct nouveau_object *object)
return 0;
}
-static void
+void
nvc0_fb_dtor(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
@@ -69,7 +63,7 @@ nvc0_fb_dtor(struct nouveau_object *object)
nouveau_fb_destroy(&priv->base);
}
-static int
+int
nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -78,13 +72,11 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_fb_priv *priv;
int ret;
- ret = nouveau_fb_create(parent, engine, oclass, &nvc0_ram_oclass, &priv);
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.memtype_valid = nvc0_fb_memtype_valid;
-
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c10_page) {
priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
@@ -97,14 +89,15 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-
-struct nouveau_oclass
-nvc0_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_fb_oclass = &(struct nouveau_fb_impl) {
+ .base.handle = NV_SUBDEV(FB, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_fb_ctor,
.dtor = nvc0_fb_dtor,
.init = nvc0_fb_init,
.fini = _nouveau_fb_fini,
},
-};
+ .memtype = nvc0_fb_memtype_valid,
+ .ram = &nvc0_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
new file mode 100644
index 00000000000..9e1931eb746
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -0,0 +1,29 @@
+#ifndef __NVKM_RAM_NVC0_H__
+#define __NVKM_RAM_NVC0_H__
+
+#include "priv.h"
+#include "nv50.h"
+
+struct nvc0_fb_priv {
+ struct nouveau_fb base;
+ struct page *r100c10_page;
+ dma_addr_t r100c10;
+};
+
+int nvc0_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nvc0_fb_dtor(struct nouveau_object *);
+int nvc0_fb_init(struct nouveau_object *);
+bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
+
+
+#define nvc0_ram_create(p,e,o,d) \
+ nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
+ struct nouveau_mem **);
+void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
new file mode 100644
index 00000000000..595db50cfef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+struct nouveau_oclass *
+nve0_fb_oclass = &(struct nouveau_fb_impl) {
+ .base.handle = NV_SUBDEV(FB, 0xe0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_fb_ctor,
+ .dtor = nvc0_fb_dtor,
+ .init = nvc0_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .memtype = nvc0_fb_memtype_valid,
+ .ram = &nve0_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index db9d6ddde52..493125214e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -12,6 +12,8 @@
#define nouveau_ram_fini(p,s) \
nouveau_object_fini(&(p)->base, (s))
+#define nouveau_ram_create_(p,e,o,s,d) \
+ nouveau_object_create_((p), (e), (o), 0, (s), (void **)d)
#define _nouveau_ram_dtor nouveau_object_destroy
#define _nouveau_ram_init nouveau_object_init
#define _nouveau_ram_fini nouveau_object_fini
@@ -26,10 +28,16 @@ extern struct nouveau_oclass nv44_ram_oclass;
extern struct nouveau_oclass nv49_ram_oclass;
extern struct nouveau_oclass nv4e_ram_oclass;
extern struct nouveau_oclass nv50_ram_oclass;
+extern struct nouveau_oclass nva3_ram_oclass;
+extern struct nouveau_oclass nvaa_ram_oclass;
extern struct nouveau_oclass nvc0_ram_oclass;
+extern struct nouveau_oclass nve0_ram_oclass;
-#define nouveau_fb_create(p,e,c,r,d) \
- nouveau_fb_create_((p), (e), (c), (r), sizeof(**d), (void **)d)
+int nouveau_sddr3_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram);
+
+#define nouveau_fb_create(p,e,c,d) \
+ nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
#define nouveau_fb_destroy(p) ({ \
struct nouveau_fb *pfb = (p); \
_nouveau_fb_dtor(nv_object(pfb)); \
@@ -44,44 +52,21 @@ extern struct nouveau_oclass nvc0_ram_oclass;
})
int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, struct nouveau_oclass *,
- int length, void **pobject);
+ struct nouveau_oclass *, int, void **);
void _nouveau_fb_dtor(struct nouveau_object *);
int _nouveau_fb_init(struct nouveau_object *);
int _nouveau_fb_fini(struct nouveau_object *, bool);
-struct nouveau_bios;
-int nouveau_fb_bios_memtype(struct nouveau_bios *);
+struct nouveau_fb_impl {
+ struct nouveau_oclass base;
+ struct nouveau_oclass *ram;
+ bool (*memtype)(struct nouveau_fb *, u32);
+};
bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+bool nv50_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
-void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int nv30_fb_init(struct nouveau_object *);
-void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
- struct nouveau_fb_tile *);
-
-int nv41_fb_init(struct nouveau_object *);
-void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int nv44_fb_init(struct nouveau_object *);
-void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
-extern int nv50_fb_memtype[0x80];
+struct nouveau_bios;
+int nouveau_fb_bios_memtype(struct nouveau_bios *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
new file mode 100644
index 00000000000..0f57fcfe0bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -0,0 +1,118 @@
+#ifndef __NVKM_FBRAM_FUC_H__
+#define __NVKM_FBRAM_FUC_H__
+
+#include <subdev/pwr.h>
+
+struct ramfuc {
+ struct nouveau_memx *memx;
+ struct nouveau_fb *pfb;
+ int sequence;
+};
+
+struct ramfuc_reg {
+ int sequence;
+ bool force;
+ u32 addr[2];
+ u32 data;
+};
+
+static inline struct ramfuc_reg
+ramfuc_reg2(u32 addr1, u32 addr2)
+{
+ return (struct ramfuc_reg) {
+ .sequence = 0,
+ .addr = { addr1, addr2 },
+ .data = 0xdeadbeef,
+ };
+}
+
+static inline struct ramfuc_reg
+ramfuc_reg(u32 addr)
+{
+ return ramfuc_reg2(addr, addr);
+}
+
+static inline int
+ramfuc_init(struct ramfuc *ram, struct nouveau_fb *pfb)
+{
+ struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+ int ret;
+
+ ret = nouveau_memx_init(ppwr, &ram->memx);
+ if (ret)
+ return ret;
+
+ ram->sequence++;
+ ram->pfb = pfb;
+ return 0;
+}
+
+static inline int
+ramfuc_exec(struct ramfuc *ram, bool exec)
+{
+ int ret = 0;
+ if (ram->pfb) {
+ ret = nouveau_memx_fini(&ram->memx, exec);
+ ram->pfb = NULL;
+ }
+ return ret;
+}
+
+static inline u32
+ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
+{
+ if (reg->sequence != ram->sequence)
+ reg->data = nv_rd32(ram->pfb, reg->addr[0]);
+ return reg->data;
+}
+
+static inline void
+ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
+{
+ reg->sequence = ram->sequence;
+ reg->data = data;
+ if (reg->addr[0] != reg->addr[1])
+ nouveau_memx_wr32(ram->memx, reg->addr[1], reg->data);
+ nouveau_memx_wr32(ram->memx, reg->addr[0], reg->data);
+}
+
+static inline void
+ramfuc_nuke(struct ramfuc *ram, struct ramfuc_reg *reg)
+{
+ reg->force = true;
+}
+
+static inline u32
+ramfuc_mask(struct ramfuc *ram, struct ramfuc_reg *reg, u32 mask, u32 data)
+{
+ u32 temp = ramfuc_rd32(ram, reg);
+ if (temp != ((temp & ~mask) | data) || reg->force) {
+ ramfuc_wr32(ram, reg, (temp & ~mask) | data);
+ reg->force = false;
+ }
+ return temp;
+}
+
+static inline void
+ramfuc_wait(struct ramfuc *ram, u32 addr, u32 mask, u32 data, u32 nsec)
+{
+ nouveau_memx_wait(ram->memx, addr, mask, data, nsec);
+}
+
+static inline void
+ramfuc_nsec(struct ramfuc *ram, u32 nsec)
+{
+ nouveau_memx_nsec(ram->memx, nsec);
+}
+
+#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
+#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
+#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
+#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
+#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
+#define ram_mask(s,r,m,d) ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
+#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
index ee49ac4dbdb..7648beb1119 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
@@ -22,7 +22,154 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
+#include "nv40.h"
+
+int
+nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nv40_ram *ram = (void *)pfb->ram;
+ struct nvbios_pll pll;
+ int N1, M1, N2, M2;
+ int log2P, ret;
+
+ ret = nvbios_pll_parse(bios, 0x04, &pll);
+ if (ret) {
+ nv_error(pfb, "mclk pll data not found\n");
+ return ret;
+ }
+
+ ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
+ &N1, &M1, &N2, &M2, &log2P);
+ if (ret < 0)
+ return ret;
+
+ ram->ctrl = 0x80000000 | (log2P << 16);
+ ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20;
+ if (N2 == M2) {
+ ram->ctrl |= 0x00000100;
+ ram->coef = (N1 << 8) | M1;
+ } else {
+ ram->ctrl |= 0x40000000;
+ ram->coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+ }
+
+ return 0;
+}
+
+int
+nv40_ram_prog(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nv40_ram *ram = (void *)pfb->ram;
+ struct bit_entry M;
+ u32 crtc_mask = 0;
+ u8 sr1[2];
+ int i;
+
+ /* determine which CRTCs are active, fetch VGA_SR1 for each */
+ for (i = 0; i < 2; i++) {
+ u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+ u32 cnt = 0;
+ do {
+ if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
+ nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+ sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+ if (!(sr1[i] & 0x20))
+ crtc_mask |= (1 << i);
+ break;
+ }
+ udelay(1);
+ } while (cnt++ < 32);
+ }
+
+ /* wait for vblank start on active crtcs, disable memory access */
+ for (i = 0; i < 2; i++) {
+ if (!(crtc_mask & (1 << i)))
+ continue;
+ nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
+ nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+ nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+ nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+ }
+
+ /* prepare ram for reclocking */
+ nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
+ nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
+ nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
+ nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+ nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+ /* change the PLL of each memory partition */
+ nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
+ switch (nv_device(pfb)->chipset) {
+ case 0x40:
+ case 0x45:
+ case 0x41:
+ case 0x42:
+ case 0x47:
+ nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
+ nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
+ nv_wr32(pfb, 0x004048, ram->coef);
+ nv_wr32(pfb, 0x004030, ram->coef);
+ case 0x43:
+ case 0x49:
+ case 0x4b:
+ nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
+ nv_wr32(pfb, 0x00403c, ram->coef);
+ default:
+ nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
+ nv_wr32(pfb, 0x004024, ram->coef);
+ break;
+ }
+ udelay(100);
+ nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+
+ /* re-enable normal operation of memory controller */
+ nv_wr32(pfb, 0x1002dc, 0x00000000);
+ nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+ udelay(100);
+
+ /* execute memory reset script from vbios */
+ if (!bit_entry(bios, 'M', &M)) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(pfb),
+ .bios = bios,
+ .offset = nv_ro16(bios, M.offset + 0x00),
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ /* make sure we're in vblank (hopefully the same one as before), and
+ * then re-enable crtc memory access
+ */
+ for (i = 0; i < 2; i++) {
+ if (!(crtc_mask & (1 << i)))
+ continue;
+ nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+ nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+ nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+ }
+
+ return 0;
+}
+
+void
+nv40_ram_tidy(struct nouveau_fb *pfb)
+{
+}
static int
nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +177,7 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nv40_ram *ram;
u32 pbus1218 = nv_rd32(pfb, 0x001218);
int ret;
@@ -40,15 +187,18 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
switch (pbus1218 & 0x00000300) {
- case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: ram->type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
}
- ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->tags = nv_rd32(pfb, 0x100320);
+ ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ ram->base.tags = nv_rd32(pfb, 0x100320);
+ ram->base.calc = nv40_ram_calc;
+ ram->base.prog = nv40_ram_prog;
+ ram->base.tidy = nv40_ram_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
index 1dab7e12aba..d64498a4d9e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv40.h"
static int
nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nv40_ram *ram;
u32 pfb474 = nv_rd32(pfb, 0x100474);
int ret;
@@ -40,15 +40,18 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
if (pfb474 & 0x00000004)
- ram->type = NV_MEM_TYPE_GDDR3;
+ ram->base.type = NV_MEM_TYPE_GDDR3;
if (pfb474 & 0x00000002)
- ram->type = NV_MEM_TYPE_DDR2;
+ ram->base.type = NV_MEM_TYPE_DDR2;
if (pfb474 & 0x00000001)
- ram->type = NV_MEM_TYPE_DDR1;
+ ram->base.type = NV_MEM_TYPE_DDR1;
- ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->tags = nv_rd32(pfb, 0x100320);
+ ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ ram->base.tags = nv_rd32(pfb, 0x100320);
+ ram->base.calc = nv40_ram_calc;
+ ram->base.prog = nv40_ram_prog;
+ ram->base.tidy = nv40_ram_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
index 25fff842e5c..089acac810c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv40.h"
static int
nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nv40_ram *ram;
u32 pfb474 = nv_rd32(pfb, 0x100474);
int ret;
@@ -40,13 +40,16 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
if (pfb474 & 0x00000004)
- ram->type = NV_MEM_TYPE_GDDR3;
+ ram->base.type = NV_MEM_TYPE_GDDR3;
if (pfb474 & 0x00000002)
- ram->type = NV_MEM_TYPE_DDR2;
+ ram->base.type = NV_MEM_TYPE_DDR2;
if (pfb474 & 0x00000001)
- ram->type = NV_MEM_TYPE_DDR1;
+ ram->base.type = NV_MEM_TYPE_DDR1;
- ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->base.calc = nv40_ram_calc;
+ ram->base.prog = nv40_ram_prog;
+ ram->base.tidy = nv40_ram_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index ab7ef0ac9e3..baa013afa57 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv40.h"
static int
nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nv40_ram *ram;
u32 pfb914 = nv_rd32(pfb, 0x100914);
int ret;
@@ -40,15 +40,18 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
switch (pfb914 & 0x00000003) {
- case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
case 0x00000003: break;
}
- ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->tags = nv_rd32(pfb, 0x100320);
+ ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ ram->base.tags = nv_rd32(pfb, 0x100320);
+ ram->base.calc = nv40_ram_calc;
+ ram->base.prog = nv40_ram_prog;
+ ram->base.tidy = nv40_ram_tidy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 903baff77fd..76762a17d89 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -23,8 +23,215 @@
*/
#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/perf.h>
+#include <subdev/bios/timing.h>
+#include <subdev/clock/pll.h>
+#include <subdev/fb.h>
+
+#include <core/option.h>
#include <core/mm.h>
-#include "priv.h"
+
+#include "ramseq.h"
+
+#include "nv50.h"
+
+struct nv50_ramseq {
+ struct hwsq base;
+ struct hwsq_reg r_0x002504;
+ struct hwsq_reg r_0x004008;
+ struct hwsq_reg r_0x00400c;
+ struct hwsq_reg r_0x00c040;
+ struct hwsq_reg r_0x100210;
+ struct hwsq_reg r_0x1002d0;
+ struct hwsq_reg r_0x1002d4;
+ struct hwsq_reg r_0x1002dc;
+ struct hwsq_reg r_0x100da0[8];
+ struct hwsq_reg r_0x100e20;
+ struct hwsq_reg r_0x100e24;
+ struct hwsq_reg r_0x611200;
+ struct hwsq_reg r_timing[9];
+ struct hwsq_reg r_mr[4];
+};
+
+struct nv50_ram {
+ struct nouveau_ram base;
+ struct nv50_ramseq hwsq;
+};
+
+#define QFX5800NVA0 1
+
+static int
+nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nv50_ram *ram = (void *)pfb->ram;
+ struct nv50_ramseq *hwsq = &ram->hwsq;
+ struct nvbios_perfE perfE;
+ struct nvbios_pll mpll;
+ struct bit_entry M;
+ struct {
+ u32 data;
+ u8 size;
+ } ramcfg, timing;
+ u8 ver, hdr, cnt, strap;
+ u32 data;
+ int N1, M1, N2, M2, P;
+ int ret, i;
+
+ /* lookup closest matching performance table entry for frequency */
+ i = 0;
+ do {
+ ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+ &ramcfg.size, &perfE);
+ if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
+ (ramcfg.size < 2)) {
+ nv_error(pfb, "invalid/missing perftab entry\n");
+ return -EINVAL;
+ }
+ } while (perfE.memory < freq);
+
+ /* locate specific data set for the attached memory */
+ if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
+ nv_error(pfb, "invalid/missing memory table\n");
+ return -EINVAL;
+ }
+
+ strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+ data = nv_ro16(bios, M.offset + 3);
+ if (data)
+ strap = nv_ro08(bios, data + strap);
+
+ if (strap >= cnt) {
+ nv_error(pfb, "invalid ramcfg strap\n");
+ return -EINVAL;
+ }
+
+ ramcfg.data += hdr + (strap * ramcfg.size);
+
+ /* lookup memory timings, if bios says they're present */
+ strap = nv_ro08(bios, ramcfg.data + 0x01);
+ if (strap != 0xff) {
+ timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+ if (!timing.data || ver != 0x10 || hdr < 0x12) {
+ nv_error(pfb, "invalid/missing timing entry "
+ "%02x %04x %02x %02x\n",
+ strap, timing.data, ver, hdr);
+ return -EINVAL;
+ }
+ } else {
+ timing.data = 0;
+ }
+
+ ret = ram_init(hwsq, nv_subdev(pfb));
+ if (ret)
+ return ret;
+
+ ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
+ ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
+ ram_wr32(hwsq, 0x611200, 0x00003300);
+ ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
+ ram_nsec(hwsq, 8000);
+ ram_setf(hwsq, 0x10, 0x00); /* disable fb */
+ ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+
+ ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
+ ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
+ ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
+ ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */
+ ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+ ret = nvbios_pll_parse(bios, 0x004008, &mpll);
+ mpll.vco2.max_freq = 0;
+ if (ret == 0) {
+ ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+ &N1, &M1, &N2, &M2, &P);
+ if (ret == 0)
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
+ ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+ ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
+ ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
+ (P << 22) | (P << 16));
+#if QFX5800NVA0
+ for (i = 0; i < 8; i++)
+ ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
+#endif
+ ram_nsec(hwsq, 96000); /*XXX*/
+ ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
+
+ ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+ ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
+
+ ram_nsec(hwsq, 12000);
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ ram_nuke(hwsq, mr[0]); /* force update */
+ ram_mask(hwsq, mr[0], 0x000, 0x000);
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ ram_mask(hwsq, mr[2], 0x000, 0x000);
+ ram_nuke(hwsq, mr[0]); /* force update */
+ ram_mask(hwsq, mr[0], 0x000, 0x000);
+ break;
+ default:
+ break;
+ }
+
+ ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
+ ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
+
+ ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+
+#if QFX5800NVA0
+ ram_nuke(hwsq, 0x100e24);
+ ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
+ ram_nuke(hwsq, 0x100e20);
+ ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
+#endif
+
+ ram_mask(hwsq, mr[0], 0x100, 0x100);
+ ram_mask(hwsq, mr[0], 0x100, 0x000);
+
+ ram_setf(hwsq, 0x10, 0x01); /* enable fb */
+ ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
+ ram_wr32(hwsq, 0x611200, 0x00003330);
+ ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+ return 0;
+}
+
+static int
+nv50_ram_prog(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nv50_ram *ram = (void *)pfb->ram;
+ struct nv50_ramseq *hwsq = &ram->hwsq;
+
+ ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ return 0;
+}
+
+static void
+nv50_ram_tidy(struct nouveau_fb *pfb)
+{
+ struct nv50_ram *ram = (void *)pfb->ram;
+ struct nv50_ramseq *hwsq = &ram->hwsq;
+ ram_exec(hwsq, false);
+}
void
__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
@@ -57,7 +264,7 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
kfree(mem);
}
-static int
+int
nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
{
@@ -160,77 +367,114 @@ nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
return rblock_size;
}
-static int
-nv50_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 datasize,
- struct nouveau_object **pobject)
+int
+nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int length, void **pobject)
{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_device *device = nv_device(pfb);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nouveau_ram *ram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 size;
+ struct nouveau_bios *bios = nouveau_bios(parent);
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_ram *ram;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nouveau_ram_create_(parent, engine, oclass, length, pobject);
+ ram = *pobject;
if (ret)
return ret;
ram->size = nv_rd32(pfb, 0x10020c);
- ram->size = (ram->size & 0xffffff00) |
- ((ram->size & 0x000000ff) << 32);
-
- size = (ram->size >> 12) - rsvd_head - rsvd_tail;
- switch (device->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf: /* IGPs, no reordering, no real VRAM */
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
- if (ret)
- return ret;
+ ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+ case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+ ram->type = NV_MEM_TYPE_DDR3;
+ else
+ ram->type = NV_MEM_TYPE_DDR2;
break;
+ case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
+ case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
+ case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
default:
- switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
- case 0: ram->type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
- ram->type = NV_MEM_TYPE_DDR3;
- else
- ram->type = NV_MEM_TYPE_DDR2;
- break;
- case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
- case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
- case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
-
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
- nv50_fb_vram_rblock(pfb, ram) >> 12);
- if (ret)
- return ret;
-
- ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
- ram->tags = nv_rd32(pfb, 0x100320);
break;
}
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+ (rsvd_head + rsvd_tail),
+ nv50_fb_vram_rblock(pfb, ram) >> 12);
+ if (ret)
+ return ret;
+
+ ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+ ram->tags = nv_rd32(pfb, 0x100320);
ram->get = nv50_ram_get;
ram->put = nv50_ram_put;
return 0;
}
+static int
+nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 datasize,
+ struct nouveau_object **pobject)
+{
+ struct nv50_ram *ram;
+ int ret, i;
+
+ ret = nv50_ram_create(parent, engine, oclass, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ case NV_MEM_TYPE_GDDR3:
+ ram->base.calc = nv50_ram_calc;
+ ram->base.prog = nv50_ram_prog;
+ ram->base.tidy = nv50_ram_tidy;
+ break;
+ default:
+ nv_warn(ram, "reclocking of this ram type unsupported\n");
+ return 0;
+ }
+
+ ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
+ ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
+ ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
+ ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+ ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+ ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
+ ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
+ ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
+ for (i = 0; i < 8; i++)
+ ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+ ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
+ ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
+ ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
+
+ for (i = 0; i < 9; i++)
+ ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04));
+
+ if (ram->base.ranks > 1) {
+ ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8);
+ ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc);
+ ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8);
+ ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec);
+ } else {
+ ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0);
+ ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4);
+ ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0);
+ ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
+ }
+
+ return 0;
+}
+
struct nouveau_oclass
nv50_ram_oclass = {
- .handle = 0,
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_ram_create,
+ .ctor = nv50_ram_ctor,
.dtor = _nouveau_ram_dtor,
.init = _nouveau_ram_init,
.fini = _nouveau_ram_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
new file mode 100644
index 00000000000..f6292cd9207
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+
+#include <subdev/clock/nva3.h>
+#include <subdev/clock/pll.h>
+
+#include <core/option.h>
+
+#include "ramfuc.h"
+
+#include "nv50.h"
+
+struct nva3_ramfuc {
+ struct ramfuc base;
+ struct ramfuc_reg r_0x004000;
+ struct ramfuc_reg r_0x004004;
+ struct ramfuc_reg r_0x004018;
+ struct ramfuc_reg r_0x004128;
+ struct ramfuc_reg r_0x004168;
+ struct ramfuc_reg r_0x100200;
+ struct ramfuc_reg r_0x100210;
+ struct ramfuc_reg r_0x100220[9];
+ struct ramfuc_reg r_0x1002d0;
+ struct ramfuc_reg r_0x1002d4;
+ struct ramfuc_reg r_0x1002dc;
+ struct ramfuc_reg r_0x10053c;
+ struct ramfuc_reg r_0x1005a0;
+ struct ramfuc_reg r_0x1005a4;
+ struct ramfuc_reg r_0x100714;
+ struct ramfuc_reg r_0x100718;
+ struct ramfuc_reg r_0x10071c;
+ struct ramfuc_reg r_0x100760;
+ struct ramfuc_reg r_0x1007a0;
+ struct ramfuc_reg r_0x1007e0;
+ struct ramfuc_reg r_0x10f804;
+ struct ramfuc_reg r_0x1110e0;
+ struct ramfuc_reg r_0x111100;
+ struct ramfuc_reg r_0x111104;
+ struct ramfuc_reg r_0x611200;
+ struct ramfuc_reg r_mr[4];
+};
+
+struct nva3_ram {
+ struct nouveau_ram base;
+ struct nva3_ramfuc fuc;
+};
+
+static int
+nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nva3_ramfuc *fuc = &ram->fuc;
+ struct nva3_clock_info mclk;
+ struct bit_entry M;
+ u8 ver, cnt, strap;
+ u32 data;
+ struct {
+ u32 data;
+ u8 size;
+ } rammap, ramcfg, timing;
+ u32 r004018, r100760, ctrl;
+ u32 unk714, unk718, unk71c;
+ int ret;
+
+ /* lookup memory config data relevant to the target frequency */
+ rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
+ &cnt, &ramcfg.size);
+ if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
+ nv_error(pfb, "invalid/missing rammap entry\n");
+ return -EINVAL;
+ }
+
+ /* locate specific data set for the attached memory */
+ if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+ nv_error(pfb, "invalid/missing memory table\n");
+ return -EINVAL;
+ }
+
+ strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+ data = nv_ro16(bios, M.offset + 1);
+ if (data)
+ strap = nv_ro08(bios, data + strap);
+
+ if (strap >= cnt) {
+ nv_error(pfb, "invalid ramcfg strap\n");
+ return -EINVAL;
+ }
+
+ ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
+ if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
+ nv_error(pfb, "invalid/missing ramcfg entry\n");
+ return -EINVAL;
+ }
+
+ /* lookup memory timings, if bios says they're present */
+ strap = nv_ro08(bios, ramcfg.data + 0x01);
+ if (strap != 0xff) {
+ timing.data = nvbios_timing_entry(bios, strap, &ver,
+ &timing.size);
+ if (!timing.data || ver != 0x10 || timing.size < 0x19) {
+ nv_error(pfb, "invalid/missing timing entry\n");
+ return -EINVAL;
+ }
+ } else {
+ timing.data = 0;
+ }
+
+ ret = nva3_clock_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
+ if (ret < 0) {
+ nv_error(pfb, "failed mclk calculation\n");
+ return ret;
+ }
+
+ ret = ram_init(fuc, pfb);
+ if (ret)
+ return ret;
+
+ /* XXX: where the fuck does 750MHz come from? */
+ if (freq <= 750000) {
+ r004018 = 0x10000000;
+ r100760 = 0x22222222;
+ } else {
+ r004018 = 0x00000000;
+ r100760 = 0x00000000;
+ }
+
+ ctrl = ram_rd32(fuc, 0x004000);
+ if (ctrl & 0x00000008) {
+ if (mclk.pll) {
+ ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
+ ram_wr32(fuc, 0x004004, mclk.pll);
+ ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
+ ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
+ ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
+ ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
+ ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
+ ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
+ }
+ } else {
+ u32 ssel = 0x00000101;
+ if (mclk.clk)
+ ssel |= mclk.clk;
+ else
+ ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+ ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
+ }
+
+ if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
+ ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
+ } else {
+ ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
+ ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
+ }
+
+ if (!(nv_ro08(bios, rammap.data + 0x04) & 0x02))
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+ ram_wr32(fuc, 0x611200, 0x00003300);
+ if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x10))
+ ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
+
+ ram_wr32(fuc, 0x1002d4, 0x00000001);
+ ram_wr32(fuc, 0x1002d0, 0x00000001);
+ ram_wr32(fuc, 0x1002d0, 0x00000001);
+ ram_wr32(fuc, 0x100210, 0x00000000);
+ ram_wr32(fuc, 0x1002dc, 0x00000001);
+ ram_nsec(fuc, 2000);
+
+ ctrl = ram_rd32(fuc, 0x004000);
+ if (!(ctrl & 0x00000008) && mclk.pll) {
+ ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+ ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
+ ram_wr32(fuc, 0x004018, 0x00001000);
+ ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
+ ram_wr32(fuc, 0x004004, mclk.pll);
+ ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
+ udelay(64);
+ ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
+ udelay(20);
+ } else
+ if (!mclk.pll) {
+ ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
+ ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+ ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
+ ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
+ }
+
+ if ( (nv_ro08(bios, rammap.data + 0x04) & 0x08)) {
+ u32 unk5a0 = (nv_ro16(bios, ramcfg.data + 0x05) << 8) |
+ nv_ro08(bios, ramcfg.data + 0x05);
+ u32 unk5a4 = (nv_ro16(bios, ramcfg.data + 0x07));
+ u32 unk804 = (nv_ro08(bios, ramcfg.data + 0x09) & 0xf0) << 16 |
+ (nv_ro08(bios, ramcfg.data + 0x03) & 0x0f) << 16 |
+ (nv_ro08(bios, ramcfg.data + 0x09) & 0x0f) |
+ 0x80000000;
+ ram_wr32(fuc, 0x1005a0, unk5a0);
+ ram_wr32(fuc, 0x1005a4, unk5a4);
+ ram_wr32(fuc, 0x10f804, unk804);
+ ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
+ } else {
+ ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
+ ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
+ ram_mask(fuc, 0x100760, 0x22222222, r100760);
+ ram_mask(fuc, 0x1007a0, 0x22222222, r100760);
+ ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
+ }
+
+ if (mclk.pll) {
+ ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
+ ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
+ }
+
+ /*XXX: LEAVE */
+ ram_wr32(fuc, 0x1002dc, 0x00000000);
+ ram_wr32(fuc, 0x1002d4, 0x00000001);
+ ram_wr32(fuc, 0x100210, 0x80000000);
+ ram_nsec(fuc, 1000);
+ ram_nsec(fuc, 1000);
+
+ ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
+ ram_nsec(fuc, 1000);
+ ram_nuke(fuc, mr[0]);
+ ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
+ ram_nsec(fuc, 1000);
+
+ ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
+ ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
+
+ data = (nv_ro08(bios, ramcfg.data + 0x02) & 0x08) ? 0x00000000 : 0x00001000;
+ ram_mask(fuc, 0x100200, 0x00001000, data);
+
+ unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
+ unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
+ unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+ if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x20))
+ unk714 |= 0xf0000000;
+ if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x04))
+ unk714 |= 0x00000010;
+ ram_wr32(fuc, 0x100714, unk714);
+
+ if (nv_ro08(bios, ramcfg.data + 0x02) & 0x01)
+ unk71c |= 0x00000100;
+ ram_wr32(fuc, 0x10071c, unk71c);
+
+ if (nv_ro08(bios, ramcfg.data + 0x02) & 0x02)
+ unk718 |= 0x00000100;
+ ram_wr32(fuc, 0x100718, unk718);
+
+ if (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)
+ ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
+
+ ram_mask(fuc, mr[0], 0x100, 0x100);
+ ram_nsec(fuc, 1000);
+ ram_mask(fuc, mr[0], 0x100, 0x000);
+ ram_nsec(fuc, 1000);
+
+ ram_nsec(fuc, 2000);
+ ram_nsec(fuc, 12000);
+
+ ram_wr32(fuc, 0x611200, 0x00003330);
+ if ( (nv_ro08(bios, rammap.data + 0x04) & 0x02))
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
+ if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
+ ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
+ ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
+ } else {
+ ram_mask(fuc, 0x111104, 0x00000600, 0x00000600);
+ }
+
+ if (mclk.pll) {
+ ram_mask(fuc, 0x004168, 0x00000001, 0x00000000);
+ ram_mask(fuc, 0x004168, 0x00000100, 0x00000000);
+ } else {
+ ram_mask(fuc, 0x004000, 0x00000001, 0x00000000);
+ ram_mask(fuc, 0x004128, 0x00000001, 0x00000000);
+ ram_mask(fuc, 0x004128, 0x00000100, 0x00000000);
+ }
+
+ return 0;
+}
+
+static int
+nva3_ram_prog(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nva3_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ return 0;
+}
+
+static void
+nva3_ram_tidy(struct nouveau_fb *pfb)
+{
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nva3_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, false);
+}
+
+static int
+nva3_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object->parent;
+ struct nva3_ram *ram = (void *)object;
+ int ret, i;
+
+ ret = nouveau_ram_init(&ram->base);
+ if (ret)
+ return ret;
+
+ /* prepare for ddr link training, and load training patterns */
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR3: {
+ static const u32 pattern[16] = {
+ 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+ 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+ 0x33333333, 0x55555555, 0x77777777, 0x66666666,
+ 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+ };
+
+ nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
+ nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+ nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+ nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 datasize,
+ struct nouveau_object **pobject)
+{
+ struct nva3_ram *ram;
+ int ret, i;
+
+ ret = nv50_ram_create(parent, engine, oclass, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ ram->base.calc = nva3_ram_calc;
+ ram->base.prog = nva3_ram_prog;
+ ram->base.tidy = nva3_ram_tidy;
+ break;
+ default:
+ nv_warn(ram, "reclocking of this ram type unsupported\n");
+ return 0;
+ }
+
+ ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
+ ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
+ ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
+ ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
+ ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
+ ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
+ ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
+ for (i = 0; i < 9; i++)
+ ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
+ ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
+ ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
+ ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
+ ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
+ ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
+ ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
+ ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
+ ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
+ ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
+ ram->fuc.r_0x100760 = ramfuc_reg(0x100760);
+ ram->fuc.r_0x1007a0 = ramfuc_reg(0x1007a0);
+ ram->fuc.r_0x1007e0 = ramfuc_reg(0x1007e0);
+ ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
+ ram->fuc.r_0x1110e0 = ramfuc_reg(0x1110e0);
+ ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
+ ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
+ ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
+
+ if (ram->base.ranks > 1) {
+ ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8);
+ ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc);
+ ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8);
+ ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec);
+ } else {
+ ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0);
+ ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4);
+ ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
+ ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_ram_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_ram_ctor,
+ .dtor = _nouveau_ram_dtor,
+ .init = nva3_ram_init,
+ .fini = _nouveau_ram_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
new file mode 100644
index 00000000000..00f2ca7e44a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 datasize,
+ struct nouveau_object **pobject)
+{
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_ram *ram;
+ int ret;
+
+ ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+
+ ram->size = nv_rd32(pfb, 0x10020c);
+ ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+ (rsvd_head + rsvd_tail), 1);
+ if (ret)
+ return ret;
+
+ ram->type = NV_MEM_TYPE_STOLEN;
+ ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ ram->get = nv50_ram_get;
+ ram->put = nv50_ram_put;
+ return 0;
+}
+
+struct nouveau_oclass
+nvaa_ram_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvaa_ram_ctor,
+ .dtor = _nouveau_ram_dtor,
+ .init = _nouveau_ram_init,
+ .fini = _nouveau_ram_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index cf97c4de4a6..f464547c6ba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,9 +23,414 @@
*/
#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
#include <subdev/ltcg.h>
-#include "priv.h"
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+
+#include <core/option.h>
+
+#include "ramfuc.h"
+
+#include "nvc0.h"
+
+struct nvc0_ramfuc {
+ struct ramfuc base;
+
+ struct ramfuc_reg r_0x10fe20;
+ struct ramfuc_reg r_0x10fe24;
+ struct ramfuc_reg r_0x137320;
+ struct ramfuc_reg r_0x137330;
+
+ struct ramfuc_reg r_0x132000;
+ struct ramfuc_reg r_0x132004;
+ struct ramfuc_reg r_0x132100;
+
+ struct ramfuc_reg r_0x137390;
+
+ struct ramfuc_reg r_0x10f290;
+ struct ramfuc_reg r_0x10f294;
+ struct ramfuc_reg r_0x10f298;
+ struct ramfuc_reg r_0x10f29c;
+ struct ramfuc_reg r_0x10f2a0;
+
+ struct ramfuc_reg r_0x10f300;
+ struct ramfuc_reg r_0x10f338;
+ struct ramfuc_reg r_0x10f340;
+ struct ramfuc_reg r_0x10f344;
+ struct ramfuc_reg r_0x10f348;
+
+ struct ramfuc_reg r_0x10f910;
+ struct ramfuc_reg r_0x10f914;
+
+ struct ramfuc_reg r_0x100b0c;
+ struct ramfuc_reg r_0x10f050;
+ struct ramfuc_reg r_0x10f090;
+ struct ramfuc_reg r_0x10f200;
+ struct ramfuc_reg r_0x10f210;
+ struct ramfuc_reg r_0x10f310;
+ struct ramfuc_reg r_0x10f314;
+ struct ramfuc_reg r_0x10f610;
+ struct ramfuc_reg r_0x10f614;
+ struct ramfuc_reg r_0x10f800;
+ struct ramfuc_reg r_0x10f808;
+ struct ramfuc_reg r_0x10f824;
+ struct ramfuc_reg r_0x10f830;
+ struct ramfuc_reg r_0x10f988;
+ struct ramfuc_reg r_0x10f98c;
+ struct ramfuc_reg r_0x10f990;
+ struct ramfuc_reg r_0x10f998;
+ struct ramfuc_reg r_0x10f9b0;
+ struct ramfuc_reg r_0x10f9b4;
+ struct ramfuc_reg r_0x10fb04;
+ struct ramfuc_reg r_0x10fb08;
+ struct ramfuc_reg r_0x137300;
+ struct ramfuc_reg r_0x137310;
+ struct ramfuc_reg r_0x137360;
+ struct ramfuc_reg r_0x1373ec;
+ struct ramfuc_reg r_0x1373f0;
+ struct ramfuc_reg r_0x1373f8;
+
+ struct ramfuc_reg r_0x61c140;
+ struct ramfuc_reg r_0x611200;
+
+ struct ramfuc_reg r_0x13d8f4;
+};
+
+struct nvc0_ram {
+ struct nouveau_ram base;
+ struct nvc0_ramfuc fuc;
+ struct nvbios_pll refpll;
+ struct nvbios_pll mempll;
+};
+
+static void
+nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
+{
+ struct nvc0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct nouveau_fb *pfb = nouveau_fb(ram);
+ u32 part = nv_rd32(pfb, 0x022438), i;
+ u32 mask = nv_rd32(pfb, 0x022554);
+ u32 addr = 0x110974;
+
+ ram_wr32(fuc, 0x10f910, magic);
+ ram_wr32(fuc, 0x10f914, magic);
+
+ for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
+ if (mask & (1 << i))
+ continue;
+ ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
+ }
+}
+
+static int
+nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_clock *clk = nouveau_clock(pfb);
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nvc0_ram *ram = (void *)pfb->ram;
+ struct nvc0_ramfuc *fuc = &ram->fuc;
+ struct bit_entry M;
+ u8 ver, cnt, strap;
+ u32 data;
+ struct {
+ u32 data;
+ u8 size;
+ } rammap, ramcfg, timing;
+ int ref, div, out;
+ int from, mode;
+ int N1, M1, P;
+ int ret;
+
+ /* lookup memory config data relevant to the target frequency */
+ rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
+ &cnt, &ramcfg.size);
+ if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
+ nv_error(pfb, "invalid/missing rammap entry\n");
+ return -EINVAL;
+ }
+
+ /* locate specific data set for the attached memory */
+ if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+ nv_error(pfb, "invalid/missing memory table\n");
+ return -EINVAL;
+ }
+
+ strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+ data = nv_ro16(bios, M.offset + 1);
+ if (data)
+ strap = nv_ro08(bios, data + strap);
+
+ if (strap >= cnt) {
+ nv_error(pfb, "invalid ramcfg strap\n");
+ return -EINVAL;
+ }
+
+ ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
+ if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
+ nv_error(pfb, "invalid/missing ramcfg entry\n");
+ return -EINVAL;
+ }
+
+ /* lookup memory timings, if bios says they're present */
+ strap = nv_ro08(bios, ramcfg.data + 0x01);
+ if (strap != 0xff) {
+ timing.data = nvbios_timing_entry(bios, strap, &ver,
+ &timing.size);
+ if (!timing.data || ver != 0x10 || timing.size < 0x19) {
+ nv_error(pfb, "invalid/missing timing entry\n");
+ return -EINVAL;
+ }
+ } else {
+ timing.data = 0;
+ }
+
+ ret = ram_init(fuc, pfb);
+ if (ret)
+ return ret;
+
+ /* determine current mclk configuration */
+ from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
+
+ /* determine target mclk configuration */
+ if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
+ ref = clk->read(clk, nv_clk_src_sppll0);
+ else
+ ref = clk->read(clk, nv_clk_src_sppll1);
+ div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
+ out = (ref * 2) / (div + 2);
+ mode = freq != out;
+
+ ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
+
+ if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
+ ram_nuke(fuc, 0x132000);
+ ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
+ ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
+ }
+
+ if (mode == 1) {
+ ram_nuke(fuc, 0x10fe20);
+ ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
+ ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
+ }
+
+// 0x00020034 // 0x0000000a
+ ram_wr32(fuc, 0x132100, 0x00000001);
+
+ if (mode == 1 && from == 0) {
+ /* calculate refpll */
+ ret = nva3_pll_calc(nv_subdev(pfb), &ram->refpll,
+ ram->mempll.refclk, &N1, NULL, &M1, &P);
+ if (ret <= 0) {
+ nv_error(pfb, "unable to calc refpll\n");
+ return ret ? ret : -ERANGE;
+ }
+
+ ram_wr32(fuc, 0x10fe20, 0x20010000);
+ ram_wr32(fuc, 0x137320, 0x00000003);
+ ram_wr32(fuc, 0x137330, 0x81200006);
+ ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
+ ram_wr32(fuc, 0x10fe20, 0x20010001);
+ ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+
+ /* calculate mempll */
+ ret = nva3_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+ &N1, NULL, &M1, &P);
+ if (ret <= 0) {
+ nv_error(pfb, "unable to calc refpll\n");
+ return ret ? ret : -ERANGE;
+ }
+
+ ram_wr32(fuc, 0x10fe20, 0x20010005);
+ ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
+ ram_wr32(fuc, 0x132000, 0x18010101);
+ ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
+ } else
+ if (mode == 0) {
+ ram_wr32(fuc, 0x137300, 0x00000003);
+ }
+
+ if (from == 0) {
+ ram_nuke(fuc, 0x10fb04);
+ ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
+ ram_nuke(fuc, 0x10fb08);
+ ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
+ ram_wr32(fuc, 0x10f988, 0x2004ff00);
+ ram_wr32(fuc, 0x10f98c, 0x003fc040);
+ ram_wr32(fuc, 0x10f990, 0x20012001);
+ ram_wr32(fuc, 0x10f998, 0x00011a00);
+ ram_wr32(fuc, 0x13d8f4, 0x00000000);
+ } else {
+ ram_wr32(fuc, 0x10f988, 0x20010000);
+ ram_wr32(fuc, 0x10f98c, 0x00000000);
+ ram_wr32(fuc, 0x10f990, 0x20012001);
+ ram_wr32(fuc, 0x10f998, 0x00010a00);
+ }
+
+ if (from == 0) {
+// 0x00020039 // 0x000000ba
+ }
+
+// 0x0002003a // 0x00000002
+ ram_wr32(fuc, 0x100b0c, 0x00080012);
+// 0x00030014 // 0x00000000 // 0x02b5f070
+// 0x00030014 // 0x00010000 // 0x02b5f070
+ ram_wr32(fuc, 0x611200, 0x00003300);
+// 0x00020034 // 0x0000000a
+// 0x00030020 // 0x00000001 // 0x00000000
+
+ ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+ ram_wr32(fuc, 0x10f210, 0x00000000);
+ ram_nsec(fuc, 1000);
+ if (mode == 0)
+ nvc0_ram_train(fuc, 0x000c1001);
+ ram_wr32(fuc, 0x10f310, 0x00000001);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f090, 0x00000061);
+ ram_wr32(fuc, 0x10f090, 0xc000007f);
+ ram_nsec(fuc, 1000);
+
+ if (from == 0) {
+ ram_wr32(fuc, 0x10f824, 0x00007fd4);
+ } else {
+ ram_wr32(fuc, 0x1373ec, 0x00020404);
+ }
+
+ if (mode == 0) {
+ ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
+ ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+ ram_wr32(fuc, 0x10f830, 0x41500010);
+ ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+ ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
+ ram_wr32(fuc, 0x10f050, 0xff000090);
+ ram_wr32(fuc, 0x1373ec, 0x00020f0f);
+ ram_wr32(fuc, 0x1373f0, 0x00000003);
+ ram_wr32(fuc, 0x137310, 0x81201616);
+ ram_wr32(fuc, 0x132100, 0x00000001);
+// 0x00020039 // 0x000000ba
+ ram_wr32(fuc, 0x10f830, 0x00300017);
+ ram_wr32(fuc, 0x1373f0, 0x00000001);
+ ram_wr32(fuc, 0x10f824, 0x00007e77);
+ ram_wr32(fuc, 0x132000, 0x18030001);
+ ram_wr32(fuc, 0x10f090, 0x4000007e);
+ ram_nsec(fuc, 2000);
+ ram_wr32(fuc, 0x10f314, 0x00000001);
+ ram_wr32(fuc, 0x10f210, 0x80000000);
+ ram_wr32(fuc, 0x10f338, 0x00300220);
+ ram_wr32(fuc, 0x10f300, 0x0000011d);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f290, 0x02060505);
+ ram_wr32(fuc, 0x10f294, 0x34208288);
+ ram_wr32(fuc, 0x10f298, 0x44050411);
+ ram_wr32(fuc, 0x10f29c, 0x0000114c);
+ ram_wr32(fuc, 0x10f2a0, 0x42e10069);
+ ram_wr32(fuc, 0x10f614, 0x40044f77);
+ ram_wr32(fuc, 0x10f610, 0x40044f77);
+ ram_wr32(fuc, 0x10f344, 0x00600009);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f348, 0x00700008);
+ ram_wr32(fuc, 0x61c140, 0x19240000);
+ ram_wr32(fuc, 0x10f830, 0x00300017);
+ nvc0_ram_train(fuc, 0x80021001);
+ nvc0_ram_train(fuc, 0x80081001);
+ ram_wr32(fuc, 0x10f340, 0x00500004);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f830, 0x01300017);
+ ram_wr32(fuc, 0x10f830, 0x00300017);
+// 0x00030020 // 0x00000000 // 0x00000000
+// 0x00020034 // 0x0000000b
+ ram_wr32(fuc, 0x100b0c, 0x00080028);
+ ram_wr32(fuc, 0x611200, 0x00003330);
+ } else {
+ ram_wr32(fuc, 0x10f800, 0x00001800);
+ ram_wr32(fuc, 0x13d8f4, 0x00000000);
+ ram_wr32(fuc, 0x1373ec, 0x00020404);
+ ram_wr32(fuc, 0x1373f0, 0x00000003);
+ ram_wr32(fuc, 0x10f830, 0x40700010);
+ ram_wr32(fuc, 0x10f830, 0x40500010);
+ ram_wr32(fuc, 0x13d8f4, 0x00000000);
+ ram_wr32(fuc, 0x1373f8, 0x00000000);
+ ram_wr32(fuc, 0x132100, 0x00000101);
+ ram_wr32(fuc, 0x137310, 0x89201616);
+ ram_wr32(fuc, 0x10f050, 0xff000090);
+ ram_wr32(fuc, 0x1373ec, 0x00030404);
+ ram_wr32(fuc, 0x1373f0, 0x00000002);
+ // 0x00020039 // 0x00000011
+ ram_wr32(fuc, 0x132100, 0x00000001);
+ ram_wr32(fuc, 0x1373f8, 0x00002000);
+ ram_nsec(fuc, 2000);
+ ram_wr32(fuc, 0x10f808, 0x7aaa0050);
+ ram_wr32(fuc, 0x10f830, 0x00500010);
+ ram_wr32(fuc, 0x10f200, 0x00ce1000);
+ ram_wr32(fuc, 0x10f090, 0x4000007e);
+ ram_nsec(fuc, 2000);
+ ram_wr32(fuc, 0x10f314, 0x00000001);
+ ram_wr32(fuc, 0x10f210, 0x80000000);
+ ram_wr32(fuc, 0x10f338, 0x00300200);
+ ram_wr32(fuc, 0x10f300, 0x0000084d);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f290, 0x0b343825);
+ ram_wr32(fuc, 0x10f294, 0x3483028e);
+ ram_wr32(fuc, 0x10f298, 0x440c0600);
+ ram_wr32(fuc, 0x10f29c, 0x0000214c);
+ ram_wr32(fuc, 0x10f2a0, 0x42e20069);
+ ram_wr32(fuc, 0x10f200, 0x00ce0000);
+ ram_wr32(fuc, 0x10f614, 0x60044e77);
+ ram_wr32(fuc, 0x10f610, 0x60044e77);
+ ram_wr32(fuc, 0x10f340, 0x00500000);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f344, 0x00600228);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f348, 0x00700000);
+ ram_wr32(fuc, 0x13d8f4, 0x00000000);
+ ram_wr32(fuc, 0x61c140, 0x09a40000);
+
+ nvc0_ram_train(fuc, 0x800e1008);
+
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f800, 0x00001804);
+ // 0x00030020 // 0x00000000 // 0x00000000
+ // 0x00020034 // 0x0000000b
+ ram_wr32(fuc, 0x13d8f4, 0x00000000);
+ ram_wr32(fuc, 0x100b0c, 0x00080028);
+ ram_wr32(fuc, 0x611200, 0x00003330);
+ ram_nsec(fuc, 100000);
+ ram_wr32(fuc, 0x10f9b0, 0x05313f41);
+ ram_wr32(fuc, 0x10f9b4, 0x00002f50);
+
+ nvc0_ram_train(fuc, 0x010c1001);
+ }
+
+ ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
+// 0x00020016 // 0x00000000
+
+ if (mode == 0)
+ ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+ return 0;
+}
+
+static int
+nvc0_ram_prog(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nvc0_ram *ram = (void *)pfb->ram;
+ struct nvc0_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ return 0;
+}
+
+static void
+nvc0_ram_tidy(struct nouveau_fb *pfb)
+{
+ struct nvc0_ram *ram = (void *)pfb->ram;
+ struct nvc0_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, false);
+}
extern const u8 nvc0_pte_storage_type_map[256];
@@ -110,10 +515,9 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
return 0;
}
-static int
-nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+int
+nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int size, void **pobject)
{
struct nouveau_fb *pfb = nouveau_fb(parent);
struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -127,8 +531,8 @@ nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
bool uniform = true;
int ret, part;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nouveau_ram_create_(parent, engine, oclass, size, pobject);
+ ram = *pobject;
if (ret)
return ret;
@@ -182,13 +586,158 @@ nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
+static int
+nvc0_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object->parent;
+ struct nvc0_ram *ram = (void *)object;
+ int ret, i;
+
+ ret = nouveau_ram_init(&ram->base);
+ if (ret)
+ return ret;
+
+ /* prepare for ddr link training, and load training patterns */
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_GDDR5: {
+ static const u8 train0[] = {
+ 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+ 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+ };
+ static const u32 train1[] = {
+ 0x00000000, 0xffffffff,
+ 0x55555555, 0xaaaaaaaa,
+ 0x33333333, 0xcccccccc,
+ 0xf0f0f0f0, 0x0f0f0f0f,
+ 0x00ff00ff, 0xff00ff00,
+ 0x0000ffff, 0xffff0000,
+ };
+
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
+ nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
+ nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f918, train1[i % 12]);
+ nv_wr32(pfb, 0x10f91c, train1[i % 12]);
+ nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f918, train1[i % 12]);
+ nv_wr32(pfb, 0x10f91c, train1[i % 12]);
+ }
+ } break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bios *bios = nouveau_bios(parent);
+ struct nvc0_ram *ram;
+ int ret;
+
+ ret = nvc0_ram_create(parent, engine, oclass, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+
+ ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
+ if (ret) {
+ nv_error(ram, "mclk refpll data not found\n");
+ return ret;
+ }
+
+ ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
+ if (ret) {
+ nv_error(ram, "mclk pll data not found\n");
+ return ret;
+ }
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_GDDR5:
+ ram->base.calc = nvc0_ram_calc;
+ ram->base.prog = nvc0_ram_prog;
+ ram->base.tidy = nvc0_ram_tidy;
+ break;
+ default:
+ nv_warn(ram, "reclocking of this ram type unsupported\n");
+ return 0;
+ }
+
+ ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
+ ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
+ ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
+ ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
+
+ ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
+ ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
+ ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
+
+ ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
+
+ ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
+ ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
+ ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
+ ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
+ ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
+
+ ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
+ ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
+ ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
+ ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
+ ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
+
+ ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
+ ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
+
+ ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
+ ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
+ ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
+ ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
+ ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
+ ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
+ ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
+ ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
+ ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
+ ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
+ ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
+ ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
+ ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
+ ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
+ ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
+ ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
+ ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
+ ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
+ ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
+ ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
+ ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
+ ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
+ ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
+ ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
+ ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
+ ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
+ ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
+
+ ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
+ ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
+
+ ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
+ return 0;
+}
+
struct nouveau_oclass
nvc0_ram_oclass = {
.handle = 0,
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_ram_create,
+ .ctor = nvc0_ram_ctor,
.dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
+ .init = nvc0_ram_init,
.fini = _nouveau_ram_fini,
}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
new file mode 100644
index 00000000000..bc86cfd084f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+
+#include <subdev/timer.h>
+
+#include <core/option.h>
+
+#include "nvc0.h"
+
+#include "ramfuc.h"
+
+struct nve0_ramfuc {
+ struct ramfuc base;
+
+ struct nvbios_pll refpll;
+ struct nvbios_pll mempll;
+
+ struct ramfuc_reg r_gpioMV;
+ u32 r_funcMV[2];
+ struct ramfuc_reg r_gpio2E;
+ u32 r_func2E[2];
+ struct ramfuc_reg r_gpiotrig;
+
+ struct ramfuc_reg r_0x132020;
+ struct ramfuc_reg r_0x132028;
+ struct ramfuc_reg r_0x132024;
+ struct ramfuc_reg r_0x132030;
+ struct ramfuc_reg r_0x132034;
+ struct ramfuc_reg r_0x132000;
+ struct ramfuc_reg r_0x132004;
+ struct ramfuc_reg r_0x132040;
+
+ struct ramfuc_reg r_0x10f248;
+ struct ramfuc_reg r_0x10f290;
+ struct ramfuc_reg r_0x10f294;
+ struct ramfuc_reg r_0x10f298;
+ struct ramfuc_reg r_0x10f29c;
+ struct ramfuc_reg r_0x10f2a0;
+ struct ramfuc_reg r_0x10f2a4;
+ struct ramfuc_reg r_0x10f2a8;
+ struct ramfuc_reg r_0x10f2ac;
+ struct ramfuc_reg r_0x10f2cc;
+ struct ramfuc_reg r_0x10f2e8;
+ struct ramfuc_reg r_0x10f250;
+ struct ramfuc_reg r_0x10f24c;
+ struct ramfuc_reg r_0x10fec4;
+ struct ramfuc_reg r_0x10fec8;
+ struct ramfuc_reg r_0x10f604;
+ struct ramfuc_reg r_0x10f614;
+ struct ramfuc_reg r_0x10f610;
+ struct ramfuc_reg r_0x100770;
+ struct ramfuc_reg r_0x100778;
+ struct ramfuc_reg r_0x10f224;
+
+ struct ramfuc_reg r_0x10f870;
+ struct ramfuc_reg r_0x10f698;
+ struct ramfuc_reg r_0x10f694;
+ struct ramfuc_reg r_0x10f6b8;
+ struct ramfuc_reg r_0x10f808;
+ struct ramfuc_reg r_0x10f670;
+ struct ramfuc_reg r_0x10f60c;
+ struct ramfuc_reg r_0x10f830;
+ struct ramfuc_reg r_0x1373ec;
+ struct ramfuc_reg r_0x10f800;
+ struct ramfuc_reg r_0x10f82c;
+
+ struct ramfuc_reg r_0x10f978;
+ struct ramfuc_reg r_0x10f910;
+ struct ramfuc_reg r_0x10f914;
+
+ struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
+
+ struct ramfuc_reg r_0x62c000;
+ struct ramfuc_reg r_0x10f200;
+ struct ramfuc_reg r_0x10f210;
+ struct ramfuc_reg r_0x10f310;
+ struct ramfuc_reg r_0x10f314;
+ struct ramfuc_reg r_0x10f318;
+ struct ramfuc_reg r_0x10f090;
+ struct ramfuc_reg r_0x10f69c;
+ struct ramfuc_reg r_0x10f824;
+ struct ramfuc_reg r_0x1373f0;
+ struct ramfuc_reg r_0x1373f4;
+ struct ramfuc_reg r_0x137320;
+ struct ramfuc_reg r_0x10f65c;
+ struct ramfuc_reg r_0x10f6bc;
+ struct ramfuc_reg r_0x100710;
+ struct ramfuc_reg r_0x10f750;
+};
+
+struct nve0_ram {
+ struct nouveau_ram base;
+ struct nve0_ramfuc fuc;
+ int from;
+ int mode;
+ int N1, fN1, M1, P1;
+ int N2, M2, P2;
+};
+
+/*******************************************************************************
+ * GDDR5
+ ******************************************************************************/
+static void
+train(struct nve0_ramfuc *fuc, u32 magic)
+{
+ struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct nouveau_fb *pfb = nouveau_fb(ram);
+ const int mc = nv_rd32(pfb, 0x02243c);
+ int i;
+
+ ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
+ ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
+ for (i = 0; i < mc; i++) {
+ const u32 addr = 0x110974 + (i * 0x1000);
+ ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
+ }
+}
+
+static void
+r1373f4_init(struct nve0_ramfuc *fuc)
+{
+ struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
+ const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
+ const u32 runk0 = ram->fN1 << 16;
+ const u32 runk1 = ram->fN1;
+
+ if (ram->from == 2) {
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
+ } else {
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
+ }
+
+ ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
+ ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
+
+ /* (re)program refpll, if required */
+ if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
+ (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
+ ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+ ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
+ ram_wr32(fuc, 0x137320, 0x00000000);
+ ram_mask(fuc, 0x132030, 0xffff0000, runk0);
+ ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
+ ram_wr32(fuc, 0x132024, rcoef);
+ ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
+ ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
+ ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+ ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
+ }
+
+ /* (re)program mempll, if required */
+ if (ram->mode == 2) {
+ ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+ ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+ ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
+ ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
+ ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
+ } else {
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100);
+ }
+
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
+}
+
+static void
+r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+{
+ struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+ struct nouveau_bios *bios = nouveau_bios(ram);
+ u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
+ u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+ u32 tmp;
+
+ tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
+ ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
+ ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
+ if (ram->mode == 2) {
+ ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000002);
+ ram_mask(fuc, 0x1373f4, 0x00001100, 0x000000000);
+ } else {
+ ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000001);
+ ram_mask(fuc, 0x1373f4, 0x00010000, 0x000000000);
+ }
+ ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
+}
+
+static int
+nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ const u32 rammap = ram->base.rammap.data;
+ const u32 ramcfg = ram->base.ramcfg.data;
+ const u32 timing = ram->base.timing.data;
+ int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
+ int mv = 1; /*XXX*/
+ u32 mask, data;
+
+ ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ /* MR1: turn termination on early, for some reason.. */
+ if ((ram->base.mr[1] & 0x03c) != 0x030)
+ ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+
+ if (vc == 1 && ram_have(fuc, gpio2E)) {
+ u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
+ if (temp != ram_rd32(fuc, gpio2E)) {
+ ram_wr32(fuc, gpiotrig, 1);
+ ram_nsec(fuc, 20000);
+ }
+ }
+
+ ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+
+ ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
+ ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+
+ ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+ ram_nsec(fuc, 1000);
+
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+ ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+ ram_wr32(fuc, 0x10f090, 0x00000061);
+ ram_wr32(fuc, 0x10f090, 0xc000007f);
+ ram_nsec(fuc, 1000);
+
+ ram_wr32(fuc, 0x10f698, 0x00000000);
+ ram_wr32(fuc, 0x10f69c, 0x00000000);
+
+ /*XXX: there does appear to be some kind of condition here, simply
+ * modifying these bits in the vbios from the default pl0
+ * entries shows no change. however, the data does appear to
+ * be correct and may be required for the transition back
+ */
+ mask = 0x800f07e0;
+ data = 0x00030000;
+ if (ram_rd32(fuc, 0x10f978) & 0x00800000)
+ data |= 0x00040000;
+
+ if (1) {
+ data |= 0x800807e0;
+ switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
+ case 0xc0: data &= ~0x00000040; break;
+ case 0x80: data &= ~0x00000100; break;
+ case 0x40: data &= ~0x80000000; break;
+ case 0x00: data &= ~0x00000400; break;
+ }
+
+ switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
+ case 0x30: data &= ~0x00000020; break;
+ case 0x20: data &= ~0x00000080; break;
+ case 0x10: data &= ~0x00080000; break;
+ case 0x00: data &= ~0x00000200; break;
+ }
+ }
+
+ if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+ mask |= 0x03000000;
+ if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+ mask |= 0x00002000;
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+ mask |= 0x00004000;
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+ mask |= 0x00000003;
+ else {
+ mask |= 0x34000000;
+ if (ram_rd32(fuc, 0x10f978) & 0x00800000)
+ mask |= 0x40000000;
+ }
+ ram_mask(fuc, 0x10f824, mask, data);
+
+ ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
+
+ if (ram->from == 2 && ram->mode != 2) {
+ ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
+ ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+ ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
+ ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
+ ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+ r1373f4_init(fuc);
+ ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
+ r1373f4_fini(fuc, ramcfg);
+ ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
+ } else
+ if (ram->from != 2 && ram->mode != 2) {
+ r1373f4_init(fuc);
+ r1373f4_fini(fuc, ramcfg);
+ }
+
+ if (ram_have(fuc, gpioMV)) {
+ u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
+ if (temp != ram_rd32(fuc, gpioMV)) {
+ ram_wr32(fuc, gpiotrig, 1);
+ ram_nsec(fuc, 64000);
+ }
+ }
+
+ if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
+ (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+ ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
+ ram_nsec(fuc, 20000);
+ }
+
+ if (ram->from != 2 && ram->mode == 2) {
+ ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
+ ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
+ ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
+ r1373f4_init(fuc);
+ r1373f4_fini(fuc, ramcfg);
+ ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
+ ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
+ } else
+ if (ram->from == 2 && ram->mode == 2) {
+ ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
+ r1373f4_init(fuc);
+ r1373f4_fini(fuc, ramcfg);
+ }
+
+ if (ram->mode != 2) /*XXX*/ {
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+ ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
+ }
+
+ data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
+ ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
+ ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+ ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+
+ data = nv_ro08(bios, ramcfg + 0x04);
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+ ram_wr32(fuc, 0x10f698, 0x01010101 * data);
+ ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+ }
+
+ if (ram->mode != 2) {
+ u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
+ ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+ }
+
+ if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+ data = 0x00000080;
+ else
+ data = 0x00000000;
+ ram_mask(fuc, 0x10f60c, 0x00000080, data);
+
+ mask = 0x00070000;
+ data = 0x00000000;
+ if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+ data |= 0x03000000;
+ if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+ data |= 0x00002000;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+ data |= 0x00004000;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+ data |= 0x00000003;
+ else
+ data |= 0x74000000;
+ ram_mask(fuc, 0x10f824, mask, data);
+
+ if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+ data = 0x00000000;
+ else
+ data = 0x00001000;
+ ram_mask(fuc, 0x10f200, 0x00001000, data);
+
+ if (ram_rd32(fuc, 0x10f670) & 0x80000000) {
+ ram_nsec(fuc, 10000);
+ ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
+ }
+
+ if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+ data = 0x00100000;
+ else
+ data = 0x00000000;
+ ram_mask(fuc, 0x10f82c, 0x00100000, data);
+
+ data = 0x00000000;
+ if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+ data |= 0x00002000;
+ if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+ data |= 0x00001000;
+ if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+ data |= 0x00004000;
+ ram_mask(fuc, 0x10f830, 0x00007000, data);
+
+ /* PFB timing */
+ ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
+ ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
+ ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
+ ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
+ ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
+ ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
+ ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
+ ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
+ ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
+ ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
+ ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+
+ data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
+ if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
+ data |= 0x70000000;
+ ram_mask(fuc, 0x10f604, 0x70000300, data);
+
+ data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
+ if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+ data |= 0x00000100;
+ ram_mask(fuc, 0x10f614, 0x70000000, data);
+
+ data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
+ if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
+ data |= 0x00000100;
+ ram_mask(fuc, 0x10f610, 0x70000000, data);
+
+ mask = 0x33f00000;
+ data = 0x00000000;
+ if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+ data |= 0x20200000;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ data |= 0x12800000;
+ /*XXX: see note above about there probably being some condition
+ * for the 10f824 stuff that uses ramcfg 3...
+ */
+ if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
+ if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ mask |= 0x00000020;
+ else
+ data |= 0x00000020;
+ mask |= 0x00000004;
+ }
+ } else {
+ mask |= 0x40000020;
+ data |= 0x00000004;
+ }
+
+ ram_mask(fuc, 0x10f808, mask, data);
+
+ data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
+ ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+
+ data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
+ if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
+ data |= 0x00000004;
+ if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
+ ram_wr32(fuc, 0x10f750, 0x04000009);
+ ram_wr32(fuc, 0x100710, 0x00000000);
+ ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
+ }
+ ram_mask(fuc, 0x100770, 0x00000007, data);
+
+ data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
+ if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+ data |= 0x80000000;
+ ram_mask(fuc, 0x100778, 0x00000700, data);
+
+ data = nv_ro16(bios, timing + 0x2c);
+ ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
+ ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
+
+ data = nv_ro08(bios, timing + 0x30);
+ ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+
+ data = nv_ro16(bios, timing + 0x31);
+ ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
+ (data & 0x0780) << 10 |
+ (data & 0x0078) << 5 |
+ (data & 0x0007));
+ ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
+ (data & 0x7000) >> 12);
+
+ ram_wr32(fuc, 0x10f090, 0x4000007e);
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+ ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+ ram_nsec(fuc, 2000);
+ ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
+
+ if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+ u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
+ train(fuc, 0xa4010000); /*XXX*/
+ ram_nsec(fuc, 1000);
+ ram_wr32(fuc, 0x10f294, temp);
+ }
+
+ ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]);
+ ram_wr32(fuc, mr[0], ram->base.mr[0]);
+ ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
+ ram_nsec(fuc, 1000);
+ ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
+ ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+ ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
+ ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
+
+ if (vc == 0 && ram_have(fuc, gpio2E)) {
+ u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
+ if (temp != ram_rd32(fuc, gpio2E)) {
+ ram_wr32(fuc, gpiotrig, 1);
+ ram_nsec(fuc, 20000);
+ }
+ }
+
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+ ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+ ram_nsec(fuc, 1000);
+
+ data = ram_rd32(fuc, 0x10f978);
+ data &= ~0x00046144;
+ data |= 0x0000000b;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+ data |= 0x0000200c;
+ else
+ data |= 0x00000000;
+ } else {
+ data |= 0x00040044;
+ }
+ ram_wr32(fuc, 0x10f978, data);
+
+ if (ram->mode == 1) {
+ data = ram_rd32(fuc, 0x10f830) | 0x00000001;
+ ram_wr32(fuc, 0x10f830, data);
+ }
+
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+ data = 0x88020000;
+ if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+ data |= 0x10000000;
+ if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+ data |= 0x00080000;
+ } else {
+ data = 0xa40e0000;
+ }
+ train(fuc, data);
+ ram_nsec(fuc, 1000);
+
+ if (ram->mode == 2) { /*XXX*/
+ ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
+ }
+
+ /* MR5: (re)enable LP3 if necessary
+ * XXX: need to find the switch, keeping off for now
+ */
+ ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+
+ if (ram->mode != 2) {
+ ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
+ ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+ }
+
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
+ ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
+ ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
+ }
+
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nv_ro08(bios, rammap + 0x08) & 0x01)
+ data = 0x00000800;
+ else
+ data = 0x00000000;
+ ram_mask(fuc, 0x10f200, 0x00000800, data);
+ return 0;
+}
+
+/*******************************************************************************
+ * DDR3
+ ******************************************************************************/
+
+static int
+nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
+ const u32 runk0 = ram->fN1 << 16;
+ const u32 runk1 = ram->fN1;
+ const u32 rammap = ram->base.rammap.data;
+ const u32 ramcfg = ram->base.ramcfg.data;
+ const u32 timing = ram->base.timing.data;
+ int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
+ int mv = 1; /*XXX*/
+ u32 mask, data;
+
+ ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (vc == 1 && ram_have(fuc, gpio2E)) {
+ u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
+ if (temp != ram_rd32(fuc, gpio2E)) {
+ ram_wr32(fuc, gpiotrig, 1);
+ ram_nsec(fuc, 20000);
+ }
+ }
+
+ ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+ if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+ ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
+
+ ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+ ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
+ ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+ ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+ ram_nsec(fuc, 1000);
+
+ ram_wr32(fuc, 0x10f090, 0x00000060);
+ ram_wr32(fuc, 0x10f090, 0xc000007e);
+
+ /*XXX: there does appear to be some kind of condition here, simply
+ * modifying these bits in the vbios from the default pl0
+ * entries shows no change. however, the data does appear to
+ * be correct and may be required for the transition back
+ */
+ mask = 0x00010000;
+ data = 0x00010000;
+
+ if (1) {
+ mask |= 0x800807e0;
+ data |= 0x800807e0;
+ switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
+ case 0xc0: data &= ~0x00000040; break;
+ case 0x80: data &= ~0x00000100; break;
+ case 0x40: data &= ~0x80000000; break;
+ case 0x00: data &= ~0x00000400; break;
+ }
+
+ switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
+ case 0x30: data &= ~0x00000020; break;
+ case 0x20: data &= ~0x00000080; break;
+ case 0x10: data &= ~0x00080000; break;
+ case 0x00: data &= ~0x00000200; break;
+ }
+ }
+
+ if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+ mask |= 0x03000000;
+ if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+ mask |= 0x00002000;
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+ mask |= 0x00004000;
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+ mask |= 0x00000003;
+ else
+ mask |= 0x14000000;
+ ram_mask(fuc, 0x10f824, mask, data);
+
+ ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
+
+ ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
+ data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
+ data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+ ram_wr32(fuc, 0x1373ec, data);
+ ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
+ ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
+
+ /* (re)program refpll, if required */
+ if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
+ (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
+ ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+ ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
+ ram_wr32(fuc, 0x137320, 0x00000000);
+ ram_mask(fuc, 0x132030, 0xffff0000, runk0);
+ ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
+ ram_wr32(fuc, 0x132024, rcoef);
+ ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
+ ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
+ ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+ ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
+ }
+
+ ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010);
+ ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
+ ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+
+ if (ram_have(fuc, gpioMV)) {
+ u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
+ if (temp != ram_rd32(fuc, gpioMV)) {
+ ram_wr32(fuc, gpiotrig, 1);
+ ram_nsec(fuc, 64000);
+ }
+ }
+
+ if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
+ (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+ ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
+ ram_nsec(fuc, 20000);
+ }
+
+ if (ram->mode != 2) /*XXX*/ {
+ if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+ ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
+ }
+
+ data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
+ ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
+ ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+ ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+
+ mask = 0x00010000;
+ data = 0x00000000;
+ if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+ data |= 0x03000000;
+ if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+ data |= 0x00002000;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+ data |= 0x00004000;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+ data |= 0x00000003;
+ else
+ data |= 0x14000000;
+ ram_mask(fuc, 0x10f824, mask, data);
+ ram_nsec(fuc, 1000);
+
+ if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+ data = 0x00100000;
+ else
+ data = 0x00000000;
+ ram_mask(fuc, 0x10f82c, 0x00100000, data);
+
+ /* PFB timing */
+ ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
+ ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
+ ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
+ ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
+ ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
+ ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
+ ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
+ ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
+ ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
+ ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
+ ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+
+ mask = 0x33f00000;
+ data = 0x00000000;
+ if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+ data |= 0x20200000;
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ data |= 0x12800000;
+ /*XXX: see note above about there probably being some condition
+ * for the 10f824 stuff that uses ramcfg 3...
+ */
+ if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
+ if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
+ if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ mask |= 0x00000020;
+ else
+ data |= 0x00000020;
+ mask |= 0x08000004;
+ }
+ data |= 0x04000000;
+ } else {
+ mask |= 0x44000020;
+ data |= 0x08000004;
+ }
+
+ ram_mask(fuc, 0x10f808, mask, data);
+
+ data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
+ ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+
+ data = nv_ro16(bios, timing + 0x2c);
+ ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
+
+ if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6) >
+ ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
+ data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6;
+ else
+ data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+ ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+
+ data = nv_ro08(bios, timing + 0x30);
+ ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+
+ ram_wr32(fuc, 0x10f090, 0x4000007f);
+ ram_nsec(fuc, 1000);
+
+ ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+ ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+ ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
+ ram_nsec(fuc, 1000);
+
+ ram_nuke(fuc, mr[0]);
+ ram_mask(fuc, mr[0], 0x100, 0x100);
+ ram_mask(fuc, mr[0], 0x100, 0x000);
+
+ ram_mask(fuc, mr[2], 0xfff, ram->base.mr[2]);
+ ram_wr32(fuc, mr[0], ram->base.mr[0]);
+ ram_nsec(fuc, 1000);
+
+ ram_nuke(fuc, mr[0]);
+ ram_mask(fuc, mr[0], 0x100, 0x100);
+ ram_mask(fuc, mr[0], 0x100, 0x000);
+
+ if (vc == 0 && ram_have(fuc, gpio2E)) {
+ u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
+ if (temp != ram_rd32(fuc, gpio2E)) {
+ ram_wr32(fuc, gpiotrig, 1);
+ ram_nsec(fuc, 20000);
+ }
+ }
+
+ if (ram->mode != 2) {
+ ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
+ ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+ }
+
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+ ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
+ ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+ ram_nsec(fuc, 1000);
+
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nv_ro08(bios, rammap + 0x08) & 0x01)
+ data = 0x00000800;
+ else
+ data = 0x00000000;
+ ram_mask(fuc, 0x10f200, 0x00000800, data);
+ return 0;
+}
+
+/*******************************************************************************
+ * main hooks
+ ******************************************************************************/
+
+static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ struct bit_entry M;
+ int ret, refclk, strap, i;
+ u32 data;
+ u8 cnt;
+
+ /* lookup memory config data relevant to the target frequency */
+ ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
+ &ram->base.rammap.version,
+ &ram->base.rammap.size, &cnt,
+ &ram->base.ramcfg.size);
+ if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
+ ram->base.rammap.size < 0x09) {
+ nv_error(pfb, "invalid/missing rammap entry\n");
+ return -EINVAL;
+ }
+
+ /* locate specific data set for the attached memory */
+ if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+ nv_error(pfb, "invalid/missing memory table\n");
+ return -EINVAL;
+ }
+
+ strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+ data = nv_ro16(bios, M.offset + 1);
+ if (data)
+ strap = nv_ro08(bios, data + strap);
+
+ if (strap >= cnt) {
+ nv_error(pfb, "invalid ramcfg strap\n");
+ return -EINVAL;
+ }
+
+ ram->base.ramcfg.version = ram->base.rammap.version;
+ ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
+ (ram->base.ramcfg.size * strap);
+ if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
+ ram->base.ramcfg.size < 0x08) {
+ nv_error(pfb, "invalid/missing ramcfg entry\n");
+ return -EINVAL;
+ }
+
+ /* lookup memory timings, if bios says they're present */
+ strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
+ if (strap != 0xff) {
+ ram->base.timing.data =
+ nvbios_timing_entry(bios, strap,
+ &ram->base.timing.version,
+ &ram->base.timing.size);
+ if (!ram->base.timing.data ||
+ ram->base.timing.version != 0x20 ||
+ ram->base.timing.size < 0x33) {
+ nv_error(pfb, "invalid/missing timing entry\n");
+ return -EINVAL;
+ }
+ } else {
+ ram->base.timing.data = 0;
+ }
+
+ ret = ram_init(fuc, pfb);
+ if (ret)
+ return ret;
+
+ ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+ ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
+
+ /* XXX: this is *not* what nvidia do. on fermi nvidia generally
+ * select, based on some unknown condition, one of the two possible
+ * reference frequencies listed in the vbios table for mempll and
+ * program refpll to that frequency.
+ *
+ * so far, i've seen very weird values being chosen by nvidia on
+ * kepler boards, no idea how/why they're chosen.
+ */
+ refclk = freq;
+ if (ram->mode == 2)
+ refclk = fuc->mempll.refclk;
+
+ /* calculate refpll coefficients */
+ ret = nva3_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+ &ram->fN1, &ram->M1, &ram->P1);
+ fuc->mempll.refclk = ret;
+ if (ret <= 0) {
+ nv_error(pfb, "unable to calc refpll\n");
+ return -EINVAL;
+ }
+
+ /* calculate mempll coefficients, if we're using it */
+ if (ram->mode == 2) {
+ /* post-divider doesn't work... the reg takes the values but
+ * appears to completely ignore it. there *is* a bit at
+ * bit 28 that appears to divide the clock by 2 if set.
+ */
+ fuc->mempll.min_p = 1;
+ fuc->mempll.max_p = 2;
+
+ ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+ &ram->N2, NULL, &ram->M2, &ram->P2);
+ if (ret <= 0) {
+ nv_error(pfb, "unable to calc mempll\n");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) {
+ if (ram_have(fuc, mr[i]))
+ ram->base.mr[i] = ram_rd32(fuc, mr[i]);
+ }
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ ret = nouveau_sddr3_calc(&ram->base);
+ if (ret == 0)
+ ret = nve0_ram_calc_sddr3(pfb, freq);
+ break;
+ case NV_MEM_TYPE_GDDR5:
+ ret = nouveau_gddr5_calc(&ram->base);
+ if (ret == 0)
+ ret = nve0_ram_calc_gddr5(pfb, freq);
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+nve0_ram_prog(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ return 0;
+}
+
+static void
+nve0_ram_tidy(struct nouveau_fb *pfb)
+{
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ ram_exec(fuc, false);
+}
+
+static int
+nve0_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object->parent;
+ struct nve0_ram *ram = (void *)object;
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ static const u8 train0[] = {
+ 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+ };
+ static const u32 train1[] = {
+ 0x00000000, 0xffffffff,
+ 0x55555555, 0xaaaaaaaa,
+ 0x33333333, 0xcccccccc,
+ 0xf0f0f0f0, 0x0f0f0f0f,
+ 0x00ff00ff, 0xff00ff00,
+ 0x0000ffff, 0xffff0000,
+ };
+ u8 ver, hdr, cnt, len, snr, ssz;
+ u32 data, save;
+ int ret, i;
+
+ ret = nouveau_ram_init(&ram->base);
+ if (ret)
+ return ret;
+
+ /* run a bunch of tables from rammap table. there's actually
+ * individual pointers for each rammap entry too, but, nvidia
+ * seem to just run the last two entries' scripts early on in
+ * their init, and never again.. we'll just run 'em all once
+ * for now.
+ *
+ * i strongly suspect that each script is for a separate mode
+ * (likely selected by 0x10f65c's lower bits?), and the
+ * binary driver skips the one that's already been setup by
+ * the init tables.
+ */
+ data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ if (!data || hdr < 0x15)
+ return -EINVAL;
+
+ cnt = nv_ro08(bios, data + 0x14); /* guess at count */
+ data = nv_ro32(bios, data + 0x10); /* guess u32... */
+ save = nv_rd32(pfb, 0x10f65c);
+ for (i = 0; i < cnt; i++) {
+ nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+ nvbios_exec(&(struct nvbios_init) {
+ .subdev = nv_subdev(pfb),
+ .bios = bios,
+ .offset = nv_ro32(bios, data), /* guess u32 */
+ .execute = 1,
+ });
+ data += 4;
+ }
+ nv_wr32(pfb, 0x10f65c, save);
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_GDDR5:
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
+ nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f918, train1[i % 12]);
+ nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f918, train1[i % 12]);
+
+ nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
+ nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f91c, train1[i % 12]);
+ nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
+ nv_wr32(pfb, 0x10f91c, train1[i % 12]);
+ }
+
+ for (i = 0; i < 0x100; i++) {
+ nv_wr32(pfb, 0x10f968, i);
+ nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
+ }
+
+ for (i = 0; i < 0x100; i++) {
+ nv_wr32(pfb, 0x10f96c, i);
+ nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+ struct dcb_gpio_func func;
+ struct nve0_ram *ram;
+ int ret;
+
+ ret = nvc0_ram_create(parent, engine, oclass, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ case NV_MEM_TYPE_GDDR5:
+ ram->base.calc = nve0_ram_calc;
+ ram->base.prog = nve0_ram_prog;
+ ram->base.tidy = nve0_ram_tidy;
+ break;
+ default:
+ nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
+ break;
+ }
+
+ // parse bios data for both pll's
+ ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
+ if (ret) {
+ nv_error(pfb, "mclk refpll data not found\n");
+ return ret;
+ }
+
+ ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
+ if (ret) {
+ nv_error(pfb, "mclk pll data not found\n");
+ return ret;
+ }
+
+ ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+ if (ret == 0) {
+ ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
+ ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
+ ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
+ }
+
+ ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ if (ret == 0) {
+ ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
+ ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
+ ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12;
+ }
+
+ ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
+
+ ram->fuc.r_0x132020 = ramfuc_reg(0x132020);
+ ram->fuc.r_0x132028 = ramfuc_reg(0x132028);
+ ram->fuc.r_0x132024 = ramfuc_reg(0x132024);
+ ram->fuc.r_0x132030 = ramfuc_reg(0x132030);
+ ram->fuc.r_0x132034 = ramfuc_reg(0x132034);
+ ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
+ ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
+ ram->fuc.r_0x132040 = ramfuc_reg(0x132040);
+
+ ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248);
+ ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
+ ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
+ ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
+ ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
+ ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
+ ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4);
+ ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8);
+ ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac);
+ ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc);
+ ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8);
+ ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250);
+ ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c);
+ ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4);
+ ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8);
+ ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604);
+ ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
+ ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
+ ram->fuc.r_0x100770 = ramfuc_reg(0x100770);
+ ram->fuc.r_0x100778 = ramfuc_reg(0x100778);
+ ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224);
+
+ ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870);
+ ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698);
+ ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694);
+ ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8);
+ ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
+ ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670);
+ ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c);
+ ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
+ ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
+ ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
+ ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c);
+
+ ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978);
+ ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
+ ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_GDDR5:
+ ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
+ ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
+ ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
+ ram->fuc.r_mr[3] = ramfuc_reg(0x10f338);
+ ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c);
+ ram->fuc.r_mr[5] = ramfuc_reg(0x10f340);
+ ram->fuc.r_mr[6] = ramfuc_reg(0x10f344);
+ ram->fuc.r_mr[7] = ramfuc_reg(0x10f348);
+ ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
+ ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
+ break;
+ case NV_MEM_TYPE_DDR3:
+ ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
+ ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
+ break;
+ default:
+ break;
+ }
+
+ ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000);
+ ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
+ ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
+ ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
+ ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
+ ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318);
+ ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
+ ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c);
+ ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
+ ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
+ ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4);
+ ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
+ ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
+ ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
+ ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
+ ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_ram_oclass = {
+ .handle = 0,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_ram_ctor,
+ .dtor = _nouveau_ram_dtor,
+ .init = nve0_ram_init,
+ .fini = _nouveau_ram_fini,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
new file mode 100644
index 00000000000..571077e3907
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_FBRAM_SEQ_H__
+#define __NVKM_FBRAM_SEQ_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+
+#define ram_init(s,p) hwsq_init(&(s)->base, (p))
+#define ram_exec(s,e) hwsq_exec(&(s)->base, (e))
+#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define ram_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r)
+#define ram_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
+#define ram_nuke(s,r) hwsq_nuke(&(s)->base, &(s)->r_##r)
+#define ram_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define ram_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
+#define ram_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
+#define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
new file mode 100644
index 00000000000..ebd4cd9c35d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+struct ramxlat {
+ int id;
+ u8 enc;
+};
+
+static inline int
+ramxlat(const struct ramxlat *xlat, int id)
+{
+ while (xlat->id >= 0) {
+ if (xlat->id == id)
+ return xlat->enc;
+ xlat++;
+ }
+ return -EINVAL;
+}
+
+static const struct ramxlat
+ramddr3_cl[] = {
+ { 5, 2 }, { 6, 4 }, { 7, 6 }, { 8, 8 }, { 9, 10 }, { 10, 12 },
+ { 11, 14 },
+ /* the below are mentioned in some, but not all, ddr3 docs */
+ { 12, 1 }, { 13, 3 }, { 14, 5 },
+ { -1 }
+};
+
+static const struct ramxlat
+ramddr3_wr[] = {
+ { 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
+ /* the below are mentioned in some, but not all, ddr3 docs */
+ { 14, 7 }, { 16, 0 },
+ { -1 }
+};
+
+static const struct ramxlat
+ramddr3_cwl[] = {
+ { 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
+ /* the below are mentioned in some, but not all, ddr3 docs */
+ { 9, 4 },
+ { -1 }
+};
+
+int
+nouveau_sddr3_calc(struct nouveau_ram *ram)
+{
+ struct nouveau_bios *bios = nouveau_bios(ram);
+ int WL, CL, WR;
+
+ switch (!!ram->timing.data * ram->timing.version) {
+ case 0x20:
+ WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
+ CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
+ WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ WL = ramxlat(ramddr3_cwl, WL);
+ CL = ramxlat(ramddr3_cl, CL);
+ WR = ramxlat(ramddr3_wr, WR);
+ if (WL < 0 || CL < 0 || WR < 0)
+ return -EINVAL;
+
+ ram->mr[0] &= ~0xe74;
+ ram->mr[0] |= (WR & 0x07) << 9;
+ ram->mr[0] |= (CL & 0x0e) << 3;
+ ram->mr[0] |= (CL & 0x01) << 2;
+
+ ram->mr[2] &= ~0x038;
+ ram->mr[2] |= (WL & 0x07) << 3;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index d422acc9af1..f572c2804c3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -67,7 +67,7 @@ nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
}
}
- return -EINVAL;
+ return -ENOENT;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2895c19bb15..041fd5edaeb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -195,7 +195,7 @@ nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
static int
nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
- struct i2c_board_info *info,
+ struct nouveau_i2c_board_info *info,
bool (*match)(struct nouveau_i2c_port *,
struct i2c_board_info *))
{
@@ -208,12 +208,29 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
- for (i = 0; info[i].addr; i++) {
- if (nv_probe_i2c(port, info[i].addr) &&
- (!match || match(port, &info[i]))) {
- nv_info(i2c, "detected %s: %s\n", what, info[i].type);
+ for (i = 0; info[i].dev.addr; i++) {
+ u8 orig_udelay = 0;
+
+ if ((port->adapter.algo == &i2c_bit_algo) &&
+ (info[i].udelay != 0)) {
+ struct i2c_algo_bit_data *algo = port->adapter.algo_data;
+ nv_debug(i2c, "using custom udelay %d instead of %d\n",
+ info[i].udelay, algo->udelay);
+ orig_udelay = algo->udelay;
+ algo->udelay = info[i].udelay;
+ }
+
+ if (nv_probe_i2c(port, info[i].dev.addr) &&
+ (!match || match(port, &info[i].dev))) {
+ nv_info(i2c, "detected %s: %s\n", what,
+ info[i].dev.type);
return i;
}
+
+ if (orig_udelay) {
+ struct i2c_algo_bit_data *algo = port->adapter.algo_data;
+ algo->udelay = orig_udelay;
+ }
}
nv_debug(i2c, "no devices found.\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index e290cfa4ace..b4b9943773b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -25,38 +25,48 @@
#include <subdev/mc.h>
#include <core/option.h>
+static inline u32
+nouveau_mc_intr_mask(struct nouveau_mc *pmc)
+{
+ u32 intr = nv_rd32(pmc, 0x000100);
+ if (intr == 0xffffffff) /* likely fallen off the bus */
+ intr = 0x00000000;
+ return intr;
+}
+
static irqreturn_t
nouveau_mc_intr(int irq, void *arg)
{
struct nouveau_mc *pmc = arg;
- const struct nouveau_mc_intr *map = pmc->intr_map;
- struct nouveau_device *device = nv_device(pmc);
+ const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
+ const struct nouveau_mc_intr *map = oclass->intr;
struct nouveau_subdev *unit;
- u32 stat, intr;
-
- intr = stat = nv_rd32(pmc, 0x000100);
- if (intr == 0xffffffff)
- return IRQ_NONE;
- while (stat && map->stat) {
- if (stat & map->stat) {
- unit = nouveau_subdev(pmc, map->unit);
- if (unit && unit->intr)
- unit->intr(unit);
- intr &= ~map->stat;
- }
- map++;
- }
+ u32 intr;
+ nv_wr32(pmc, 0x000140, 0x00000000);
+ nv_rd32(pmc, 0x000140);
+ intr = nouveau_mc_intr_mask(pmc);
if (pmc->use_msi)
- nv_wr08(pmc->base.base.parent, 0x00088068, 0xff);
+ oclass->msi_rearm(pmc);
if (intr) {
- nv_error(pmc, "unknown intr 0x%08x\n", stat);
+ u32 stat = intr = nouveau_mc_intr_mask(pmc);
+ while (map->stat) {
+ if (intr & map->stat) {
+ unit = nouveau_subdev(pmc, map->unit);
+ if (unit && unit->intr)
+ unit->intr(unit);
+ stat &= ~map->stat;
+ }
+ map++;
+ }
+
+ if (stat)
+ nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
- if (stat == IRQ_HANDLED)
- pm_runtime_mark_last_busy(&device->pdev->dev);
- return stat ? IRQ_HANDLED : IRQ_NONE;
+ nv_wr32(pmc, 0x000140, 0x00000001);
+ return intr ? IRQ_HANDLED : IRQ_NONE;
}
int
@@ -91,37 +101,42 @@ _nouveau_mc_dtor(struct nouveau_object *object)
int
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- const struct nouveau_mc_intr *intr_map,
- int length, void **pobject)
+ struct nouveau_oclass *bclass, int length, void **pobject)
{
+ const struct nouveau_mc_oclass *oclass = (void *)bclass;
struct nouveau_device *device = nv_device(parent);
struct nouveau_mc *pmc;
int ret;
- ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
+ ret = nouveau_subdev_create_(parent, engine, bclass, 0, "PMC",
"master", length, pobject);
pmc = *pobject;
if (ret)
return ret;
- pmc->intr_map = intr_map;
-
switch (device->pdev->device & 0x0ff0) {
- case 0x00f0: /* BR02? */
- case 0x02e0: /* BR02? */
- pmc->use_msi = false;
+ case 0x00f0:
+ case 0x02e0:
+ /* BR02? NFI how these would be handled yet exactly */
break;
default:
- pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
+ switch (device->chipset) {
+ case 0xaa: break; /* reported broken, nv also disable it */
+ default:
+ pmc->use_msi = true;
+ break;
+ }
+ }
+
+ pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi);
+ if (pmc->use_msi && oclass->msi_rearm) {
+ pmc->use_msi = pci_enable_msi(device->pdev) == 0;
if (pmc->use_msi) {
- pmc->use_msi = pci_enable_msi(device->pdev) == 0;
- if (pmc->use_msi) {
- nv_info(pmc, "MSI interrupts enabled\n");
- nv_wr08(device, 0x00088068, 0xff);
- }
+ nv_info(pmc, "MSI interrupts enabled\n");
+ oclass->msi_rearm(pmc);
}
- break;
+ } else {
+ pmc->use_msi = false;
}
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 64aa4edb0d9..2d787e4dfef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -22,17 +22,14 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
-
-struct nv04_mc_priv {
- struct nouveau_mc base;
-};
+#include "nv04.h"
const struct nouveau_mc_intr
nv04_mc_intr[] = {
{ 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
+ { 0x00010000, NVDEV_ENGINE_DISP },
{ 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
@@ -42,7 +39,18 @@ nv04_mc_intr[] = {
{}
};
-static int
+int
+nv04_mc_init(struct nouveau_object *object)
+{
+ struct nv04_mc_priv *priv = (void *)object;
+
+ nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+ nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
+
+ return nouveau_mc_init(&priv->base);
+}
+
+int
nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -50,7 +58,7 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -58,24 +66,14 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-int
-nv04_mc_init(struct nouveau_object *object)
-{
- struct nv04_mc_priv *priv = (void *)object;
-
- nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
- nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
-
- return nouveau_mc_init(&priv->base);
-}
-
-struct nouveau_oclass
-nv04_mc_oclass = {
- .handle = NV_SUBDEV(MC, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv04_mc_init,
.fini = _nouveau_mc_fini,
},
-};
+ .intr = nv04_mc_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
new file mode 100644
index 00000000000..b0d5c31606c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_MC_NV04_H__
+#define __NVKM_MC_NV04_H__
+
+#include <subdev/mc.h>
+
+struct nv04_mc_priv {
+ struct nouveau_mc base;
+};
+
+int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
+extern const struct nouveau_mc_intr nv04_mc_intr[];
+int nv04_mc_init(struct nouveau_object *);
+void nv40_mc_msi_rearm(struct nouveau_mc *);
+int nv50_mc_init(struct nouveau_object *);
+extern const struct nouveau_mc_intr nv50_mc_intr[];
+extern const struct nouveau_mc_intr nvc0_mc_intr[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
new file mode 100644
index 00000000000..5b1faecfed2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+void
+nv40_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+ struct nv04_mc_priv *priv = (void *)pmc;
+ nv_wr08(priv, 0x088068, 0xff);
+}
+
+struct nouveau_oclass *
+nv40_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x40),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv04_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nv04_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index d9891782bf2..3bfee5c6c4f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -22,32 +22,12 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
-
-struct nv44_mc_priv {
- struct nouveau_mc base;
-};
-
-static int
-nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv44_mc_priv *priv;
- int ret;
-
- ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
+#include "nv04.h"
static int
nv44_mc_init(struct nouveau_object *object)
{
- struct nv44_mc_priv *priv = (void *)object;
+ struct nv04_mc_priv *priv = (void *)object;
u32 tmp = nv_rd32(priv, 0x10020c);
nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
@@ -60,13 +40,15 @@ nv44_mc_init(struct nouveau_object *object)
return nouveau_mc_init(&priv->base);
}
-struct nouveau_oclass
-nv44_mc_oclass = {
- .handle = NV_SUBDEV(MC, 0x44),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv44_mc_ctor,
+struct nouveau_oclass *
+nv44_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x44),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv44_mc_init,
.fini = _nouveau_mc_fini,
},
-};
+ .intr = nv04_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 2b1afe225db..e8822a934c4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -22,13 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
+#include "nv04.h"
-struct nv50_mc_priv {
- struct nouveau_mc base;
-};
-
-static const struct nouveau_mc_intr
+const struct nouveau_mc_intr
nv50_mc_intr[] = {
{ 0x00000001, NVDEV_ENGINE_MPEG },
{ 0x00000100, NVDEV_ENGINE_FIFO },
@@ -45,37 +41,30 @@ nv50_mc_intr[] = {
{},
};
-static int
-nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+static void
+nv50_mc_msi_rearm(struct nouveau_mc *pmc)
{
- struct nv50_mc_priv *priv;
- int ret;
-
- ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
+ struct nouveau_device *device = nv_device(pmc);
+ pci_write_config_byte(device->pdev, 0x68, 0xff);
}
int
nv50_mc_init(struct nouveau_object *object)
{
- struct nv50_mc_priv *priv = (void *)object;
+ struct nv04_mc_priv *priv = (void *)object;
nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
return nouveau_mc_init(&priv->base);
}
-struct nouveau_oclass
-nv50_mc_oclass = {
- .handle = NV_SUBDEV(MC, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_mc_ctor,
+struct nouveau_oclass *
+nv50_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv50_mc_init,
.fini = _nouveau_mc_fini,
},
-};
+ .intr = nv50_mc_intr,
+ .msi_rearm = nv50_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
new file mode 100644
index 00000000000..5f4541105e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+nv94_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x94),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv50_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nv50_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 06710419a59..f8a6f18e2d3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -22,11 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
-
-struct nv98_mc_priv {
- struct nouveau_mc base;
-};
+#include "nv04.h"
static const struct nouveau_mc_intr
nv98_mc_intr[] = {
@@ -36,6 +32,7 @@ nv98_mc_intr[] = {
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
{ 0x00008000, NVDEV_ENGINE_BSP },
{ 0x00020000, NVDEV_ENGINE_VP },
+ { 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */
{ 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -47,29 +44,15 @@ nv98_mc_intr[] = {
{},
};
-static int
-nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_mc_priv *priv;
- int ret;
-
- ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_oclass
-nv98_mc_oclass = {
- .handle = NV_SUBDEV(MC, 0x98),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_mc_ctor,
+struct nouveau_oclass *
+nv98_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x98),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv50_mc_init,
.fini = _nouveau_mc_fini,
},
-};
+ .intr = nv98_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 104175c5a2d..c02b4763a2d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -22,13 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
+#include "nv04.h"
-struct nvc0_mc_priv {
- struct nouveau_mc base;
-};
-
-static const struct nouveau_mc_intr
+const struct nouveau_mc_intr
nvc0_mc_intr[] = {
{ 0x00000001, NVDEV_ENGINE_PPP },
{ 0x00000020, NVDEV_ENGINE_COPY0 },
@@ -41,6 +37,7 @@ nvc0_mc_intr[] = {
{ 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
+ { 0x01000000, NVDEV_SUBDEV_PWR },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
@@ -49,29 +46,22 @@ nvc0_mc_intr[] = {
{},
};
-static int
-nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+static void
+nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
{
- struct nvc0_mc_priv *priv;
- int ret;
-
- ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
+ struct nv04_mc_priv *priv = (void *)pmc;
+ nv_wr32(priv, 0x088704, 0x00000000);
}
-struct nouveau_oclass
-nvc0_mc_oclass = {
- .handle = NV_SUBDEV(MC, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_mc_ctor,
+struct nouveau_oclass *
+nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv50_mc_init,
.fini = _nouveau_mc_fini,
},
-};
+ .intr = nvc0_mc_intr,
+ .msi_rearm = nvc0_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
new file mode 100644
index 00000000000..837e545aeb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0xc3),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv50_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nvc0_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index e286e132c7e..129120473f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
acpi_handle handle;
int ret;
- handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
+ handle = ACPI_HANDLE(&device->pdev->dev);
if (!handle)
return false;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
new file mode 100644
index 00000000000..d4fd3bc9c66
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+#include <subdev/timer.h>
+
+static int
+nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ struct nouveau_subdev *subdev = nv_subdev(ppwr);
+ u32 addr;
+
+ /* wait for a free slot in the fifo */
+ addr = nv_rd32(ppwr, 0x10a4a0);
+ if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
+ return -EBUSY;
+
+ /* we currently only support a single process at a time waiting
+ * on a synchronous reply, take the PPWR mutex and tell the
+ * receive handler what we're waiting for
+ */
+ if (reply) {
+ mutex_lock(&subdev->mutex);
+ ppwr->recv.message = message;
+ ppwr->recv.process = process;
+ }
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(ppwr, 0x10a580, 0x00000001);
+ } while (nv_rd32(ppwr, 0x10a580) != 0x00000001);
+
+ /* write the packet */
+ nv_wr32(ppwr, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+ ppwr->send.base));
+ nv_wr32(ppwr, 0x10a1c4, process);
+ nv_wr32(ppwr, 0x10a1c4, message);
+ nv_wr32(ppwr, 0x10a1c4, data0);
+ nv_wr32(ppwr, 0x10a1c4, data1);
+ nv_wr32(ppwr, 0x10a4a0, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+ /* wait for reply, if requested */
+ if (reply) {
+ wait_event(ppwr->recv.wait, (ppwr->recv.process == 0));
+ reply[0] = ppwr->recv.data[0];
+ reply[1] = ppwr->recv.data[1];
+ mutex_unlock(&subdev->mutex);
+ }
+
+ return 0;
+}
+
+static void
+nouveau_pwr_recv(struct work_struct *work)
+{
+ struct nouveau_pwr *ppwr =
+ container_of(work, struct nouveau_pwr, recv.work);
+ u32 process, message, data0, data1;
+
+ /* nothing to do if GET == PUT */
+ u32 addr = nv_rd32(ppwr, 0x10a4cc);
+ if (addr == nv_rd32(ppwr, 0x10a4c8))
+ return;
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(ppwr, 0x10a580, 0x00000002);
+ } while (nv_rd32(ppwr, 0x10a580) != 0x00000002);
+
+ /* read the packet */
+ nv_wr32(ppwr, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+ ppwr->recv.base));
+ process = nv_rd32(ppwr, 0x10a1c4);
+ message = nv_rd32(ppwr, 0x10a1c4);
+ data0 = nv_rd32(ppwr, 0x10a1c4);
+ data1 = nv_rd32(ppwr, 0x10a1c4);
+ nv_wr32(ppwr, 0x10a4cc, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+ /* wake process if it's waiting on a synchronous reply */
+ if (ppwr->recv.process) {
+ if (process == ppwr->recv.process &&
+ message == ppwr->recv.message) {
+ ppwr->recv.data[0] = data0;
+ ppwr->recv.data[1] = data1;
+ ppwr->recv.process = 0;
+ wake_up(&ppwr->recv.wait);
+ return;
+ }
+ }
+
+ /* right now there's no other expected responses from the engine,
+ * so assume that any unexpected message is an error.
+ */
+ nv_warn(ppwr, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (char)((process & 0x000000ff) >> 0),
+ (char)((process & 0x0000ff00) >> 8),
+ (char)((process & 0x00ff0000) >> 16),
+ (char)((process & 0xff000000) >> 24),
+ process, message, data0, data1);
+}
+
+static void
+nouveau_pwr_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_pwr *ppwr = (void *)subdev;
+ u32 disp = nv_rd32(ppwr, 0x10a01c);
+ u32 intr = nv_rd32(ppwr, 0x10a008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000020) {
+ u32 stat = nv_rd32(ppwr, 0x10a16c);
+ if (stat & 0x80000000) {
+ nv_error(ppwr, "UAS fault at 0x%06x addr 0x%08x\n",
+ stat & 0x00ffffff, nv_rd32(ppwr, 0x10a168));
+ nv_wr32(ppwr, 0x10a16c, 0x00000000);
+ intr &= ~0x00000020;
+ }
+ }
+
+ if (intr & 0x00000040) {
+ schedule_work(&ppwr->recv.work);
+ nv_wr32(ppwr, 0x10a004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr & 0x00000080) {
+ nv_info(ppwr, "wr32 0x%06x 0x%08x\n", nv_rd32(ppwr, 0x10a7a0),
+ nv_rd32(ppwr, 0x10a7a4));
+ nv_wr32(ppwr, 0x10a004, 0x00000080);
+ intr &= ~0x00000080;
+ }
+
+ if (intr) {
+ nv_error(ppwr, "intr 0x%08x\n", intr);
+ nv_wr32(ppwr, 0x10a004, intr);
+ }
+}
+
+int
+_nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_pwr *ppwr = (void *)object;
+
+ nv_wr32(ppwr, 0x10a014, 0x00000060);
+ flush_work(&ppwr->recv.work);
+
+ return nouveau_subdev_fini(&ppwr->base, suspend);
+}
+
+int
+_nouveau_pwr_init(struct nouveau_object *object)
+{
+ struct nouveau_pwr *ppwr = (void *)object;
+ int ret, i;
+
+ ret = nouveau_subdev_init(&ppwr->base);
+ if (ret)
+ return ret;
+
+ nv_subdev(ppwr)->intr = nouveau_pwr_intr;
+ ppwr->message = nouveau_pwr_send;
+
+ /* prevent previous ucode from running, wait for idle, reset */
+ nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+ nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
+ nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
+ nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
+
+ /* upload data segment */
+ nv_wr32(ppwr, 0x10a1c0, 0x01000000);
+ for (i = 0; i < ppwr->data.size / 4; i++)
+ nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
+
+ /* upload code segment */
+ nv_wr32(ppwr, 0x10a180, 0x01000000);
+ for (i = 0; i < ppwr->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(ppwr, 0x10a188, i >> 6);
+ nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
+ }
+
+ /* start it running */
+ nv_wr32(ppwr, 0x10a10c, 0x00000000);
+ nv_wr32(ppwr, 0x10a104, 0x00000000);
+ nv_wr32(ppwr, 0x10a100, 0x00000002);
+
+ /* wait for valid host->pwr ring configuration */
+ if (!nv_wait_ne(ppwr, 0x10a4d0, 0xffffffff, 0x00000000))
+ return -EBUSY;
+ ppwr->send.base = nv_rd32(ppwr, 0x10a4d0) & 0x0000ffff;
+ ppwr->send.size = nv_rd32(ppwr, 0x10a4d0) >> 16;
+
+ /* wait for valid pwr->host ring configuration */
+ if (!nv_wait_ne(ppwr, 0x10a4dc, 0xffffffff, 0x00000000))
+ return -EBUSY;
+ ppwr->recv.base = nv_rd32(ppwr, 0x10a4dc) & 0x0000ffff;
+ ppwr->recv.size = nv_rd32(ppwr, 0x10a4dc) >> 16;
+
+ nv_wr32(ppwr, 0x10a010, 0x000000e0);
+ return 0;
+}
+
+int
+nouveau_pwr_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int length, void **pobject)
+{
+ struct nouveau_pwr *ppwr;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PPWR",
+ "pwr", length, pobject);
+ ppwr = *pobject;
+ if (ret)
+ return ret;
+
+ INIT_WORK(&ppwr->recv.work, nouveau_pwr_recv);
+ init_waitqueue_head(&ppwr->recv.wait);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
new file mode 100644
index 00000000000..2284ecb1c9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_HOST, #host_init, #host_recv)
+#endif
+
+/******************************************************************************
+ * HOST data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+// HOST (R)FIFO packet format
+.equ #fifo_process 0x00
+.equ #fifo_message 0x04
+.equ #fifo_data0 0x08
+.equ #fifo_data1 0x0c
+
+// HOST HOST->PWR queue description
+.equ #fifo_qlen 4 // log2(size of queue entry in bytes)
+.equ #fifo_qnum 3 // log2(max number of entries in queue)
+.equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue
+.equ #fifo_qmaskp (#fifo_qmaskb - 1)
+.equ #fifo_qmaskf ((#fifo_qmaskb << 1) - 1)
+.equ #fifo_qsize (1 << (#fifo_qlen + #fifo_qnum))
+fifo_queue: .skip 128 // #fifo_qsize
+
+// HOST PWR->HOST queue description
+.equ #rfifo_qlen 4 // log2(size of queue entry in bytes)
+.equ #rfifo_qnum 3 // log2(max number of entries in queue)
+.equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
+.equ #rfifo_qmaskp (#rfifo_qmaskb - 1)
+.equ #rfifo_qmaskf ((#rfifo_qmaskb << 1) - 1)
+.equ #rfifo_qsize (1 << (#rfifo_qlen + #rfifo_qnum))
+rfifo_queue: .skip 128 // #rfifo_qsize
+#endif
+
+/******************************************************************************
+ * HOST code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// HOST->PWR comms - dequeue message(s) for process(es) from FIFO
+//
+// $r15 - current (host)
+// $r0 - zero
+host_send:
+ nv_iord($r1, NV_PPWR_FIFO_GET(0))
+ nv_iord($r2, NV_PPWR_FIFO_PUT(0))
+ cmp b32 $r1 $r2
+ bra e #host_send_done
+ // calculate address of message
+ and $r14 $r1 #fifo_qmaskp
+ shl b32 $r14 $r14 #fifo_qlen
+ add b32 $r14 #fifo_queue
+
+ // read message data, and pass to appropriate process
+ ld b32 $r11 D[$r14 + #fifo_data1]
+ ld b32 $r12 D[$r14 + #fifo_data0]
+ ld b32 $r13 D[$r14 + #fifo_message]
+ ld b32 $r14 D[$r14 + #fifo_process]
+ call(send)
+
+ // increment GET
+ add b32 $r1 0x1
+ and $r14 $r1 #fifo_qmaskf
+ nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+ bra #host_send
+ host_send_done:
+ ret
+
+// PWR->HOST comms - enqueue message for HOST to RFIFO
+//
+// $r15 - current (host)
+// $r14 - process
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0 - zero
+host_recv:
+ // message from intr handler == HOST->PWR comms pending
+ mov $r1 (PROC_KERN & 0x0000ffff)
+ sethi $r1 (PROC_KERN & 0xffff0000)
+ cmp b32 $r14 $r1
+ bra e #host_send
+
+ // wait for space in RFIFO
+ host_recv_wait:
+ nv_iord($r1, NV_PPWR_RFIFO_GET)
+ nv_iord($r2, NV_PPWR_RFIFO_PUT)
+ xor $r1 #rfifo_qmaskb
+ cmp b32 $r1 $r2
+ bra e #host_recv_wait
+
+ and $r3 $r2 #rfifo_qmaskp
+ shl b32 $r3 #rfifo_qlen
+ add b32 $r3 #rfifo_queue
+
+ // enqueue message
+ st b32 D[$r3 + #fifo_data1] $r11
+ st b32 D[$r3 + #fifo_data0] $r12
+ st b32 D[$r3 + #fifo_message] $r13
+ st b32 D[$r3 + #fifo_process] $r14
+
+ add b32 $r2 0x1
+ and $r2 #rfifo_qmaskf
+ nv_iowr(NV_PPWR_RFIFO_PUT, $r2)
+
+ // notify host of pending message
+ mov $r2 NV_PPWR_INTR_TRIGGER_USER0
+ nv_iowr(NV_PPWR_INTR_TRIGGER, $r2)
+ ret
+
+// $r15 - current (host)
+// $r0 - zero
+host_init:
+ // store each fifo's base/size in H2D/D2H scratch regs
+ mov $r1 #fifo_qsize
+ shl b32 $r1 16
+ or $r1 #fifo_queue
+ nv_iowr(NV_PPWR_H2D, $r1);
+
+ mov $r1 #rfifo_qsize
+ shl b32 $r1 16
+ or $r1 #rfifo_queue
+ nv_iowr(NV_PPWR_D2H, $r1);
+
+ // enable fifo subintr for first fifo
+ mov $r1 1
+ nv_iowr(NV_PPWR_FIFO_INTR_EN, $r1)
+ ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
new file mode 100644
index 00000000000..98f1c3738b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_IDLE, #idle, #idle_recv)
+#endif
+
+/******************************************************************************
+ * IDLE data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * IDLE code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (idle)
+// $r14 - message
+// $r0 - zero
+idle_recv:
+ ret
+
+// description
+//
+// $r15 - current (idle)
+// $r0 - zero
+idle:
+ // set our "no interrupt has occurred during our execution" flag
+ bset $flags $p0
+
+ // count IDLE invocations for debugging purposes
+ nv_iord($r1, NV_PPWR_DSCRATCH(1))
+ add b32 $r1 1
+ nv_iowr(NV_PPWR_DSCRATCH(1), $r1)
+
+ // keep looping while there's pending messages for any process
+ idle_loop:
+ mov $r1 #proc_list_head
+ bclr $flags $p2
+ idle_proc:
+ // process the process' messages until there's none left
+ idle_proc_exec:
+ push $r1
+ mov b32 $r14 $r1
+ call(recv)
+ pop $r1
+ bra not $p1 #idle_proc_next
+ bset $flags $p2
+ bra #idle_proc_exec
+ // next process!
+ idle_proc_next:
+ add b32 $r1 #proc_size
+ cmp b32 $r1 $r15
+ bra ne #idle_proc
+ bra $p2 #idle_loop
+
+ // sleep if no interrupts have occurred
+ sleep $p0
+ bra #idle
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
new file mode 100644
index 00000000000..0a7b05fa5c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/******************************************************************************
+ * kernel data segment
+ *****************************************************************************/
+#ifdef INCLUDE_PROC
+proc_kern:
+process(PROC_KERN, 0, 0)
+proc_list_head:
+#endif
+
+#ifdef INCLUDE_DATA
+proc_list_tail:
+time_prev: .b32 0
+time_next: .b32 0
+#endif
+
+/******************************************************************************
+ * kernel code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+ bra #init
+
+// read nv register
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - data (return)
+// $r0 - zero
+rd32:
+ nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
+ mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
+ sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+ nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+ rd32_wait:
+ nv_iord($r14, NV_PPWR_MMIO_CTRL)
+ and $r14 NV_PPWR_MMIO_CTRL_STATUS
+ bra nz #rd32_wait
+ nv_iord($r13, NV_PPWR_MMIO_DATA)
+ ret
+
+// write nv register
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - data
+// $r0 - zero
+wr32:
+ nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
+ nv_iowr(NV_PPWR_MMIO_DATA, $r13)
+ mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
+ or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
+ sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+
+#ifdef NVKM_FALCON_MMIO_TRAP
+ mov $r8 NV_PPWR_INTR_TRIGGER_USER1
+ nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+ wr32_host:
+ nv_iord($r8, NV_PPWR_INTR)
+ and $r8 NV_PPWR_INTR_USER1
+ bra nz #wr32_host
+#endif
+
+ nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+ wr32_wait:
+ nv_iord($r14, NV_PPWR_MMIO_CTRL)
+ and $r14 NV_PPWR_MMIO_CTRL_STATUS
+ bra nz #wr32_wait
+ ret
+
+// busy-wait for a period of time
+//
+// $r15 - current
+// $r14 - ns
+// $r0 - zero
+nsec:
+ nv_iord($r8, NV_PPWR_TIMER_LOW)
+ nsec_loop:
+ nv_iord($r9, NV_PPWR_TIMER_LOW)
+ sub b32 $r9 $r8
+ cmp b32 $r9 $r14
+ bra l #nsec_loop
+ ret
+
+// busy-wait for a period of time
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - mask
+// $r12 - data
+// $r11 - timeout (ns)
+// $r0 - zero
+wait:
+ nv_iord($r8, NV_PPWR_TIMER_LOW)
+ wait_loop:
+ nv_rd32($r10, $r14)
+ and $r10 $r13
+ cmp b32 $r10 $r12
+ bra e #wait_done
+ nv_iord($r9, NV_PPWR_TIMER_LOW)
+ sub b32 $r9 $r8
+ cmp b32 $r9 $r11
+ bra l #wait_loop
+ wait_done:
+ ret
+
+// $r15 - current (kern)
+// $r14 - process
+// $r8 - NV_PPWR_INTR
+intr_watchdog:
+ // read process' timer status, skip if not enabled
+ ld b32 $r9 D[$r14 + #proc_time]
+ cmp b32 $r9 0
+ bra z #intr_watchdog_next_proc
+
+ // subtract last timer's value from process' timer,
+ // if it's <= 0 then the timer has expired
+ ld b32 $r10 D[$r0 + #time_prev]
+ sub b32 $r9 $r10
+ bra g #intr_watchdog_next_time
+ mov $r13 KMSG_ALARM
+ call(send_proc)
+ clear b32 $r9
+ bra #intr_watchdog_next_proc
+
+ // otherwise, update the next timer's value if this
+ // process' timer is the soonest
+ intr_watchdog_next_time:
+ // ... or if there's no next timer yet
+ ld b32 $r10 D[$r0 + #time_next]
+ cmp b32 $r10 0
+ bra z #intr_watchdog_next_time_set
+
+ cmp b32 $r9 $r10
+ bra g #intr_watchdog_next_proc
+ intr_watchdog_next_time_set:
+ st b32 D[$r0 + #time_next] $r9
+
+ // update process' timer status, and advance
+ intr_watchdog_next_proc:
+ st b32 D[$r14 + #proc_time] $r9
+ add b32 $r14 #proc_size
+ cmp b32 $r14 #proc_list_tail
+ bra ne #intr_watchdog
+ ret
+
+intr:
+ push $r0
+ clear b32 $r0
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r12
+ push $r13
+ push $r14
+ push $r15
+ mov $r15 #proc_kern
+ mov $r8 $flags
+ push $r8
+
+ nv_iord($r8, NV_PPWR_DSCRATCH(0))
+ add b32 $r8 1
+ nv_iowr(NV_PPWR_DSCRATCH(0), $r8)
+
+ nv_iord($r8, NV_PPWR_INTR)
+ and $r9 $r8 NV_PPWR_INTR_WATCHDOG
+ bra z #intr_skip_watchdog
+ st b32 D[$r0 + #time_next] $r0
+ mov $r14 #proc_list_head
+ call(intr_watchdog)
+ ld b32 $r9 D[$r0 + #time_next]
+ cmp b32 $r9 0
+ bra z #intr_skip_watchdog
+ nv_iowr(NV_PPWR_WATCHDOG_TIME, $r9)
+ st b32 D[$r0 + #time_prev] $r9
+
+ intr_skip_watchdog:
+ and $r9 $r8 NV_PPWR_INTR_SUBINTR
+ bra z #intr_skip_subintr
+ nv_iord($r9, NV_PPWR_SUBINTR)
+ and $r10 $r9 NV_PPWR_SUBINTR_FIFO
+ bra z #intr_subintr_skip_fifo
+ nv_iord($r12, NV_PPWR_FIFO_INTR)
+ push $r12
+ mov $r14 (PROC_HOST & 0x0000ffff)
+ sethi $r14 (PROC_HOST & 0xffff0000)
+ mov $r13 KMSG_FIFO
+ call(send)
+ pop $r12
+ nv_iowr(NV_PPWR_FIFO_INTR, $r12)
+ intr_subintr_skip_fifo:
+ nv_iowr(NV_PPWR_SUBINTR, $r9)
+
+ intr_skip_subintr:
+ and $r9 $r8 NV_PPWR_INTR_PAUSE
+ bra z #intr_skip_pause
+ and $r10 0xffbf
+
+ intr_skip_pause:
+ and $r9 $r8 NV_PPWR_INTR_USER0
+ bra z #intr_skip_user0
+ and $r10 0xffbf
+
+ intr_skip_user0:
+ nv_iowr(NV_PPWR_INTR_ACK, $r8)
+ pop $r8
+ mov $flags $r8
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r12
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ pop $r0
+ bclr $flags $p0
+ iret
+
+// request the current process be sent a message after a timeout expires
+//
+// $r15 - current
+// $r14 - ticks
+// $r0 - zero
+timer:
+ // interrupts off to prevent racing with timer isr
+ bclr $flags ie0
+
+ // if current process already has a timer set, bail
+ ld b32 $r8 D[$r15 + #proc_time]
+ cmp b32 $r8 0
+ bra g #timer_done
+ st b32 D[$r15 + #proc_time] $r14
+
+ // halt watchdog timer temporarily and check for a pending
+ // interrupt. if there's one already pending, we can just
+ // bail since the timer isr will queue the next soonest
+ // right after it's done
+ nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
+ nv_iord($r8, NV_PPWR_INTR)
+ and $r8 NV_PPWR_INTR_WATCHDOG
+ bra nz #timer_enable
+
+ // update the watchdog if this timer should expire first,
+ // or if there's no timeout already set
+ nv_iord($r8, NV_PPWR_WATCHDOG_TIME)
+ cmp b32 $r14 $r0
+ bra e #timer_reset
+ cmp b32 $r14 $r8
+ bra l #timer_done
+ timer_reset:
+ nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14)
+ st b32 D[$r0 + #time_prev] $r14
+
+ // re-enable the watchdog timer
+ timer_enable:
+ mov $r8 1
+ nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
+
+ // interrupts back on
+ timer_done:
+ bset $flags ie0
+ ret
+
+// send message to another process
+//
+// $r15 - current
+// $r14 - process
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0 - zero
+send_proc:
+ push $r8
+ push $r9
+ // check for space in queue
+ ld b32 $r8 D[$r14 + #proc_qget]
+ ld b32 $r9 D[$r14 + #proc_qput]
+ xor $r8 #proc_qmaskb
+ cmp b32 $r8 $r9
+ bra e #send_done
+
+ // enqueue message
+ and $r8 $r9 #proc_qmaskp
+ shl b32 $r8 $r8 #proc_qlen
+ add b32 $r8 #proc_queue
+ add b32 $r8 $r14
+
+ ld b32 $r10 D[$r15 + #proc_id]
+ st b32 D[$r8 + #msg_process] $r10
+ st b32 D[$r8 + #msg_message] $r13
+ st b32 D[$r8 + #msg_data0] $r12
+ st b32 D[$r8 + #msg_data1] $r11
+
+ // increment PUT
+ add b32 $r9 1
+ and $r9 #proc_qmaskf
+ st b32 D[$r14 + #proc_qput] $r9
+ bset $flags $p2
+ send_done:
+ pop $r9
+ pop $r8
+ ret
+
+// lookup process structure by its name
+//
+// $r15 - current
+// $r14 - process name
+// $r0 - zero
+//
+// $r14 - process
+// $p1 - success
+find:
+ push $r8
+ mov $r8 #proc_list_head
+ bset $flags $p1
+ find_loop:
+ ld b32 $r10 D[$r8 + #proc_id]
+ cmp b32 $r10 $r14
+ bra e #find_done
+ add b32 $r8 #proc_size
+ cmp b32 $r8 #proc_list_tail
+ bra ne #find_loop
+ bclr $flags $p1
+ find_done:
+ mov b32 $r14 $r8
+ pop $r8
+ ret
+
+// send message to another process
+//
+// $r15 - current
+// $r14 - process id
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0 - zero
+send:
+ call(find)
+ bra $p1 #send_proc
+ ret
+
+// process single message for a given process
+//
+// $r15 - current
+// $r14 - process
+// $r0 - zero
+recv:
+ ld b32 $r8 D[$r14 + #proc_qget]
+ ld b32 $r9 D[$r14 + #proc_qput]
+ bclr $flags $p1
+ cmp b32 $r8 $r9
+ bra e #recv_done
+ // dequeue message
+ and $r9 $r8 #proc_qmaskp
+ add b32 $r8 1
+ and $r8 #proc_qmaskf
+ st b32 D[$r14 + #proc_qget] $r8
+ ld b32 $r10 D[$r14 + #proc_recv]
+
+ push $r15
+ mov $r15 $flags
+ push $r15
+ mov b32 $r15 $r14
+
+ shl b32 $r9 $r9 #proc_qlen
+ add b32 $r14 $r9
+ add b32 $r14 #proc_queue
+ ld b32 $r11 D[$r14 + #msg_data1]
+ ld b32 $r12 D[$r14 + #msg_data0]
+ ld b32 $r13 D[$r14 + #msg_message]
+ ld b32 $r14 D[$r14 + #msg_process]
+
+ // process it
+ call $r10
+ pop $r15
+ mov $flags $r15
+ bset $flags $p1
+ pop $r15
+ recv_done:
+ ret
+
+init:
+ // setup stack
+ nv_iord($r1, NV_PPWR_CAPS)
+ extr $r1 $r1 9:17
+ shl b32 $r1 8
+ mov $sp $r1
+
+#ifdef NVKM_FALCON_MMIO_UAS
+ // somehow allows the magic "access mmio via D[]" stuff that's
+ // used by the nv_rd32/nv_wr32 macros to work
+ mov $r1 0x0010
+ sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
+ nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
+#endif
+
+ // route all interrupts except user0/1 and pause to fuc
+ mov $r1 0x00e0
+ sethi $r1 0x00000000
+ nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
+
+ // enable watchdog and subintr intrs
+ mov $r1 NV_PPWR_INTR_EN_CLR_MASK
+ nv_iowr(NV_PPWR_INTR_EN_CLR, $r1)
+ mov $r1 NV_PPWR_INTR_EN_SET_WATCHDOG
+ or $r1 NV_PPWR_INTR_EN_SET_SUBINTR
+ nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
+
+ // enable interrupts globally
+ mov $r1 #intr
+ sethi $r1 0x00000000
+ mov $iv0 $r1
+ bset $flags ie0
+
+ // enable watchdog timer
+ mov $r1 1
+ nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r1)
+
+ // bootstrap processes, idle process will be last, and not return
+ mov $r15 #proc_list_head
+ init_proc:
+ ld b32 $r1 D[$r15 + #proc_init]
+ cmp b32 $r1 0
+ bra z #init_proc
+ call $r1
+ add b32 $r15 #proc_size
+ bra #init_proc
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
new file mode 100644
index 00000000000..2a74ea90760
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define GT215 0xa3
+#define GF100 0xc0
+#define GF119 0xd9
+#define GK208 0x108
+
+#include "os.h"
+
+// IO addresses
+#define NV_PPWR_INTR_TRIGGER 0x0000
+#define NV_PPWR_INTR_TRIGGER_USER1 0x00000080
+#define NV_PPWR_INTR_TRIGGER_USER0 0x00000040
+#define NV_PPWR_INTR_ACK 0x0004
+#define NV_PPWR_INTR_ACK_SUBINTR 0x00000800
+#define NV_PPWR_INTR_ACK_WATCHDOG 0x00000002
+#define NV_PPWR_INTR 0x0008
+#define NV_PPWR_INTR_SUBINTR 0x00000800
+#define NV_PPWR_INTR_USER1 0x00000080
+#define NV_PPWR_INTR_USER0 0x00000040
+#define NV_PPWR_INTR_PAUSE 0x00000020
+#define NV_PPWR_INTR_WATCHDOG 0x00000002
+#define NV_PPWR_INTR_EN_SET 0x0010
+#define NV_PPWR_INTR_EN_SET_SUBINTR 0x00000800
+#define NV_PPWR_INTR_EN_SET_WATCHDOG 0x00000002
+#define NV_PPWR_INTR_EN_CLR 0x0014
+#define NV_PPWR_INTR_EN_CLR_MASK /* fuck i hate envyas */ -1
+#define NV_PPWR_INTR_ROUTE 0x001c
+#define NV_PPWR_TIMER_LOW 0x002c
+#define NV_PPWR_WATCHDOG_TIME 0x0034
+#define NV_PPWR_WATCHDOG_ENABLE 0x0038
+#define NV_PPWR_CAPS 0x0108
+#define NV_PPWR_UAS_CONFIG 0x0164
+#define NV_PPWR_UAS_CONFIG_ENABLE 0x00010000
+#if NVKM_PPWR_CHIPSET >= GK208
+#define NV_PPWR_DSCRATCH(i) (4 * (i) + 0x0450)
+#endif
+#define NV_PPWR_FIFO_PUT(i) (4 * (i) + 0x04a0)
+#define NV_PPWR_FIFO_GET(i) (4 * (i) + 0x04b0)
+#define NV_PPWR_FIFO_INTR 0x04c0
+#define NV_PPWR_FIFO_INTR_EN 0x04c4
+#define NV_PPWR_RFIFO_PUT 0x04c8
+#define NV_PPWR_RFIFO_GET 0x04cc
+#define NV_PPWR_H2D 0x04d0
+#define NV_PPWR_D2H 0x04dc
+#if NVKM_PPWR_CHIPSET < GK208
+#define NV_PPWR_DSCRATCH(i) (4 * (i) + 0x05d0)
+#endif
+#define NV_PPWR_SUBINTR 0x0688
+#define NV_PPWR_SUBINTR_FIFO 0x00000002
+#define NV_PPWR_MMIO_ADDR 0x07a0
+#define NV_PPWR_MMIO_DATA 0x07a4
+#define NV_PPWR_MMIO_CTRL 0x07ac
+#define NV_PPWR_MMIO_CTRL_TRIGGER 0x00010000
+#define NV_PPWR_MMIO_CTRL_STATUS 0x00007000
+#define NV_PPWR_MMIO_CTRL_STATUS_IDLE 0x00000000
+#define NV_PPWR_MMIO_CTRL_MASK 0x000000f0
+#define NV_PPWR_MMIO_CTRL_MASK_B32_0 0x000000f0
+#define NV_PPWR_MMIO_CTRL_OP 0x00000003
+#define NV_PPWR_MMIO_CTRL_OP_RD 0x00000001
+#define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002
+#define NV_PPWR_OUTPUT 0x07c0
+#define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004
+#define NV_PPWR_OUTPUT_SET 0x07e0
+#define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004
+#define NV_PPWR_OUTPUT_CLR 0x07e4
+#define NV_PPWR_OUTPUT_CLR_FB_PAUSE 0x00000004
+
+// Inter-process message format
+.equ #msg_process 0x00 /* send() target, recv() sender */
+.equ #msg_message 0x04
+.equ #msg_data0 0x08
+.equ #msg_data1 0x0c
+
+// Kernel message IDs
+#define KMSG_FIFO 0x00000000
+#define KMSG_ALARM 0x00000001
+
+// Process message queue description
+.equ #proc_qlen 4 // log2(size of queue entry in bytes)
+.equ #proc_qnum 2 // log2(max number of entries in queue)
+.equ #proc_qmaskb (1 << #proc_qnum) // max number of entries in queue
+.equ #proc_qmaskp (#proc_qmaskb - 1)
+.equ #proc_qmaskf ((#proc_qmaskb << 1) - 1)
+.equ #proc_qsize (1 << (#proc_qlen + #proc_qnum))
+
+// Process table entry
+.equ #proc_id 0x00
+.equ #proc_init 0x04
+.equ #proc_recv 0x08
+.equ #proc_time 0x0c
+.equ #proc_qput 0x10
+.equ #proc_qget 0x14
+.equ #proc_queue 0x18
+.equ #proc_size (0x18 + #proc_qsize)
+
+#define process(id,init,recv) /*
+*/ .b32 id /*
+*/ .b32 init /*
+*/ .b32 recv /*
+*/ .b32 0 /*
+*/ .b32 0 /*
+*/ .b32 0 /*
+*/ .skip 64
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iord(reg,ior) /*
+*/ mov reg ior /*
+*/ shl b32 reg 6 /*
+*/ iord reg I[reg + 0x000]
+#else
+#define nv_iord(reg,ior) /*
+*/ mov reg ior /*
+*/ iord reg I[reg + 0x000]
+#endif
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iowr(ior,reg) /*
+*/ mov $r0 ior /*
+*/ shl b32 $r0 6 /*
+*/ iowr I[$r0 + 0x000] reg /*
+*/ clear b32 $r0
+#else
+#define nv_iowr(ior,reg) /*
+*/ mov $r0 ior /*
+*/ iowr I[$r0 + 0x000] reg /*
+*/ clear b32 $r0
+#endif
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iowrs(ior,reg) /*
+*/ mov $r0 ior /*
+*/ shl b32 $r0 6 /*
+*/ iowrs I[$r0 + 0x000] reg /*
+*/ clear b32 $r0
+#else
+#define nv_iowrs(ior,reg) /*
+*/ mov $r0 ior /*
+*/ iowrs I[$r0 + 0x000] reg /*
+*/ clear b32 $r0
+#endif
+
+#define hash #
+#define fn(a) a
+#ifndef NVKM_FALCON_PC24
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
+
+#ifndef NVKM_FALCON_MMIO_UAS
+#define nv_rd32(reg,addr) /*
+*/ mov b32 $r14 addr /*
+*/ call(rd32) /*
+*/ mov b32 reg $r13
+#else
+#define nv_rd32(reg,addr) /*
+*/ sethi $r0 0x14000000 /*
+*/ or $r0 addr /*
+*/ ld b32 reg D[$r0] /*
+*/ clear b32 $r0
+#endif
+
+#if !defined(NVKM_FALCON_MMIO_UAS) || defined(NVKM_FALCON_MMIO_TRAP)
+#define nv_wr32(addr,reg) /*
+*/ push addr /*
+*/ push reg /*
+*/ pop $r13 /*
+*/ pop $r14 /*
+*/ call(wr32) /*
+#else
+#define nv_wr32(addr,reg) /*
+*/ sethi $r0 0x14000000 /*
+*/ or $r0 addr /*
+*/ st b32 D[$r0] reg /*
+*/ clear b32 $r0
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
new file mode 100644
index 00000000000..d43741eccb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_MEMX, #memx_init, #memx_recv)
+#endif
+
+/******************************************************************************
+ * MEMX data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+.equ #memx_opcode 0
+.equ #memx_header 2
+.equ #memx_length 4
+.equ #memx_func 8
+
+#define handler(cmd,hdr,len,func) /*
+*/ .b16 MEMX_##cmd /*
+*/ .b16 hdr /*
+*/ .b16 len /*
+*/ .b16 0 /*
+*/ .b32 func
+
+memx_func_head:
+handler(ENTER , 0x0001, 0x0000, #memx_func_enter)
+memx_func_next:
+handler(LEAVE , 0x0000, 0x0000, #memx_func_leave)
+handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
+handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
+handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
+memx_func_tail:
+
+.equ #memx_func_size #memx_func_next - #memx_func_head
+.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size
+
+memx_data_head:
+.skip 0x0800
+memx_data_tail:
+#endif
+
+/******************************************************************************
+ * MEMX code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (memx)
+// $r4 - packet length
+// +00: bitmask of heads to wait for vblank on
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_enter:
+ mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE
+ nv_iowr(NV_PPWR_OUTPUT_SET, $r6)
+ memx_func_enter_wait:
+ nv_iord($r6, NV_PPWR_OUTPUT)
+ and $r6 NV_PPWR_OUTPUT_FB_PAUSE
+ bra z #memx_func_enter_wait
+ //XXX: TODO
+ ld b32 $r6 D[$r1 + 0x00]
+ add b32 $r1 0x04
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4 - packet length
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_leave:
+ mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE
+ nv_iowr(NV_PPWR_OUTPUT_CLR, $r6)
+ memx_func_leave_wait:
+ nv_iord($r6, NV_PPWR_OUTPUT)
+ and $r6 NV_PPWR_OUTPUT_FB_PAUSE
+ bra nz #memx_func_leave_wait
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4 - packet length
+// +00*n: addr
+// +04*n: data
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_wr32:
+ ld b32 $r6 D[$r1 + 0x00]
+ ld b32 $r5 D[$r1 + 0x04]
+ add b32 $r1 0x08
+ nv_wr32($r6, $r5)
+ sub b32 $r4 0x02
+ bra nz #memx_func_wr32
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4 - packet length
+// +00: addr
+// +04: mask
+// +08: data
+// +0c: timeout (ns)
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_wait:
+ nv_iord($r8, NV_PPWR_TIMER_LOW)
+ ld b32 $r14 D[$r1 + 0x00]
+ ld b32 $r13 D[$r1 + 0x04]
+ ld b32 $r12 D[$r1 + 0x08]
+ ld b32 $r11 D[$r1 + 0x0c]
+ add b32 $r1 0x10
+ call(wait)
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4 - packet length
+// +00: time (ns)
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_delay:
+ ld b32 $r14 D[$r1 + 0x00]
+ add b32 $r1 0x04
+ call(nsec)
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message (exec)
+// $r12 - head of script
+// $r11 - tail of script
+// $r0 - zero
+memx_exec:
+ push $r14
+ push $r13
+ mov b32 $r1 $r12
+ mov b32 $r2 $r11
+ memx_exec_next:
+ // fetch the packet header, and locate opcode info
+ ld b32 $r3 D[$r1]
+ add b32 $r1 4
+ shr b32 $r4 $r3 16
+ mulu $r3 #memx_func_size
+
+ // execute the opcode handler
+ ld b32 $r5 D[$r3 + #memx_func_head + #memx_func]
+ call $r5
+
+ // keep going, if we haven't reached the end
+ cmp b32 $r1 $r2
+ bra l #memx_exec_next
+
+ // send completion reply
+ pop $r13
+ pop $r14
+ call(send)
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0 - zero
+memx_info:
+ mov $r12 #memx_data_head
+ mov $r11 #memx_data_tail - #memx_data_head
+ call(send)
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0 - zero
+memx_recv:
+ cmp b32 $r13 MEMX_MSG_EXEC
+ bra e #memx_exec
+ cmp b32 $r13 MEMX_MSG_INFO
+ bra e #memx_info
+ ret
+
+// description
+//
+// $r15 - current (memx)
+// $r0 - zero
+memx_init:
+ ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
new file mode 100644
index 00000000000..947be536dae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GK208
+
+#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nv108_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nv108_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
new file mode 100644
index 00000000000..9342e2d7d3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -0,0 +1,1165 @@
+uint32_t nv108_pwr_data[] = {
+/* 0x0000: proc_kern */
+ 0x52544e49,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: proc_list_head */
+ 0x54534f48,
+ 0x00000379,
+ 0x0000032a,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x584d454d,
+ 0x0000046f,
+ 0x00000461,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x46524550,
+ 0x00000473,
+ 0x00000471,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54534554,
+ 0x00000494,
+ 0x00000475,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x454c4449,
+ 0x0000049f,
+ 0x0000049d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+ 0x00000000,
+/* 0x0214: time_next */
+ 0x00000000,
+/* 0x0218: fifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0298: rfifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0318: memx_func_head */
+ 0x00010000,
+ 0x00000000,
+ 0x000003a9,
+/* 0x0324: memx_func_next */
+ 0x00000001,
+ 0x00000000,
+ 0x000003c7,
+ 0x00000002,
+ 0x00000002,
+ 0x000003df,
+ 0x00040003,
+ 0x00000000,
+ 0x00000407,
+ 0x00010004,
+ 0x00000000,
+ 0x00000421,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0b54: memx_data_tail */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv108_pwr_code[] = {
+ 0x02910ef5,
+/* 0x0004: rd32 */
+ 0xf607a040,
+ 0x04bd000e,
+ 0xe3f0010e,
+ 0x07ac4001,
+ 0xbd000ef6,
+/* 0x0019: rd32_wait */
+ 0x07ac4e04,
+ 0xf100eecf,
+ 0xf47000e4,
+ 0xa44df61b,
+ 0x00ddcf07,
+/* 0x002e: wr32 */
+ 0xa04000f8,
+ 0x000ef607,
+ 0xa44004bd,
+ 0x000df607,
+ 0x020e04bd,
+ 0xf0f0e5f0,
+ 0xac4001e3,
+ 0x000ef607,
+/* 0x004e: wr32_wait */
+ 0xac4e04bd,
+ 0x00eecf07,
+ 0x7000e4f1,
+ 0xf8f61bf4,
+/* 0x005d: nsec */
+ 0xcf2c0800,
+/* 0x0062: nsec_loop */
+ 0x2c090088,
+ 0xbb0099cf,
+ 0x9ea60298,
+ 0xf8f61ef4,
+/* 0x0071: wait */
+ 0xcf2c0800,
+/* 0x0076: wait_loop */
+ 0xeeb20088,
+ 0x0000047e,
+ 0xadfddab2,
+ 0xf4aca604,
+ 0x2c09100b,
+ 0xbb0099cf,
+ 0x9ba60298,
+/* 0x0093: wait_done */
+ 0xf8e61ef4,
+/* 0x0095: intr_watchdog */
+ 0x03e99800,
+ 0xf40096b0,
+ 0x0a98280b,
+ 0x029abb84,
+ 0x0d0e1cf4,
+ 0x01de7e01,
+ 0xf494bd00,
+/* 0x00b2: intr_watchdog_next_time */
+ 0x0a98140e,
+ 0x00a6b085,
+ 0xa6080bf4,
+ 0x061cf49a,
+/* 0x00c0: intr_watchdog_next_time_set */
+/* 0x00c3: intr_watchdog_next_proc */
+ 0xb58509b5,
+ 0xe0b603e9,
+ 0x10e6b158,
+ 0xc81bf402,
+/* 0x00d2: intr */
+ 0x00f900f8,
+ 0x80f904bd,
+ 0xa0f990f9,
+ 0xc0f9b0f9,
+ 0xe0f9d0f9,
+ 0x000ff0f9,
+ 0xf90188fe,
+ 0x04504880,
+ 0xb60088cf,
+ 0x50400180,
+ 0x0008f604,
+ 0x080804bd,
+ 0xc40088cf,
+ 0x0bf40289,
+ 0x8500b51f,
+ 0x957e580e,
+ 0x09980000,
+ 0x0096b085,
+ 0x000d0bf4,
+ 0x0009f634,
+ 0x09b504bd,
+/* 0x0125: intr_skip_watchdog */
+ 0x0089e484,
+ 0x360bf408,
+ 0xcf068849,
+ 0x9ac40099,
+ 0x220bf402,
+ 0xcf04c04c,
+ 0xc0f900cc,
+ 0xf14f484e,
+ 0x0d5453e3,
+ 0x023f7e00,
+ 0x40c0fc00,
+ 0x0cf604c0,
+/* 0x0157: intr_subintr_skip_fifo */
+ 0x4004bd00,
+ 0x09f60688,
+/* 0x015f: intr_skip_subintr */
+ 0xc404bd00,
+ 0x0bf42089,
+ 0xbfa4f107,
+/* 0x0169: intr_skip_pause */
+ 0x4089c4ff,
+ 0xf1070bf4,
+/* 0x0173: intr_skip_user0 */
+ 0x00ffbfa4,
+ 0x0008f604,
+ 0x80fc04bd,
+ 0xfc0088fe,
+ 0xfce0fcf0,
+ 0xfcc0fcd0,
+ 0xfca0fcb0,
+ 0xfc80fc90,
+ 0x0032f400,
+/* 0x0196: timer */
+ 0x32f401f8,
+ 0x03f89810,
+ 0xf40086b0,
+ 0xfeb53a1c,
+ 0xf6380003,
+ 0x04bd0008,
+ 0x88cf0808,
+ 0x0284f000,
+ 0x081c1bf4,
+ 0x0088cf34,
+ 0x0bf4e0a6,
+ 0xf4e8a608,
+/* 0x01c6: timer_reset */
+ 0x3400161e,
+ 0xbd000ef6,
+ 0x840eb504,
+/* 0x01d0: timer_enable */
+ 0x38000108,
+ 0xbd0008f6,
+/* 0x01d9: timer_done */
+ 0x1031f404,
+/* 0x01de: send_proc */
+ 0x80f900f8,
+ 0xe89890f9,
+ 0x04e99805,
+ 0xa60486f0,
+ 0x2a0bf489,
+ 0x940398c4,
+ 0x80b60488,
+ 0x008ebb18,
+ 0xb500fa98,
+ 0x8db5008a,
+ 0x028cb501,
+ 0xb6038bb5,
+ 0x94f00190,
+ 0x04e9b507,
+/* 0x0217: send_done */
+ 0xfc0231f4,
+ 0xf880fc90,
+/* 0x021d: find */
+ 0x0880f900,
+ 0x0131f458,
+/* 0x0224: find_loop */
+ 0xa6008a98,
+ 0x100bf4ae,
+ 0xb15880b6,
+ 0xf4021086,
+ 0x32f4f11b,
+/* 0x0239: find_done */
+ 0xfc8eb201,
+/* 0x023f: send */
+ 0x7e00f880,
+ 0xf400021d,
+ 0x00f89b01,
+/* 0x0248: recv */
+ 0x9805e898,
+ 0x32f404e9,
+ 0xf489a601,
+ 0x89c43c0b,
+ 0x0180b603,
+ 0xb50784f0,
+ 0xea9805e8,
+ 0xfef0f902,
+ 0xf0f9018f,
+ 0x9994efb2,
+ 0x00e9bb04,
+ 0x9818e0b6,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0xf900ee98,
+ 0xfef0fca5,
+ 0x31f400f8,
+/* 0x028f: recv_done */
+ 0xf8f0fc01,
+/* 0x0291: init */
+ 0x01084100,
+ 0xe70011cf,
+ 0xb6010911,
+ 0x14fe0814,
+ 0x00e04100,
+ 0x000013f0,
+ 0x0001f61c,
+ 0xff0104bd,
+ 0x01f61400,
+ 0x0104bd00,
+ 0x0015f102,
+ 0xf6100008,
+ 0x04bd0001,
+ 0xf000d241,
+ 0x10fe0013,
+ 0x1031f400,
+ 0x38000101,
+ 0xbd0001f6,
+/* 0x02db: init_proc */
+ 0x98580f04,
+ 0x16b001f1,
+ 0xfa0bf400,
+ 0xf0b615f9,
+ 0xf20ef458,
+/* 0x02ec: host_send */
+ 0xcf04b041,
+ 0xa0420011,
+ 0x0022cf04,
+ 0x0bf412a6,
+ 0x071ec42e,
+ 0xb704ee94,
+ 0x980218e0,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0x7e00ee98,
+ 0xb600023f,
+ 0x1ec40110,
+ 0x04b0400f,
+ 0xbd0001f6,
+ 0xc70ef404,
+/* 0x0328: host_send_done */
+/* 0x032a: host_recv */
+ 0x494100f8,
+ 0x5413f14e,
+ 0xf4e1a652,
+/* 0x0336: host_recv_wait */
+ 0xcc41b90b,
+ 0x0011cf04,
+ 0xcf04c842,
+ 0x16f00022,
+ 0xf412a608,
+ 0x23c4ef0b,
+ 0x0434b607,
+ 0x029830b7,
+ 0xb5033bb5,
+ 0x3db5023c,
+ 0x003eb501,
+ 0xf00120b6,
+ 0xc8400f24,
+ 0x0002f604,
+ 0x400204bd,
+ 0x02f60000,
+ 0xf804bd00,
+/* 0x0379: host_init */
+ 0x00804100,
+ 0xf11014b6,
+ 0x40021815,
+ 0x01f604d0,
+ 0x4104bd00,
+ 0x14b60080,
+ 0x9815f110,
+ 0x04dc4002,
+ 0xbd0001f6,
+ 0x40010104,
+ 0x01f604c4,
+ 0xf804bd00,
+/* 0x03a9: memx_func_enter */
+ 0x40040600,
+ 0x06f607e0,
+/* 0x03b3: memx_func_enter_wait */
+ 0x4604bd00,
+ 0x66cf07c0,
+ 0x0464f000,
+ 0x98f70bf4,
+ 0x10b60016,
+/* 0x03c7: memx_func_leave */
+ 0x0600f804,
+ 0x07e44004,
+ 0xbd0006f6,
+/* 0x03d1: memx_func_leave_wait */
+ 0x07c04604,
+ 0xf00066cf,
+ 0x1bf40464,
+/* 0x03df: memx_func_wr32 */
+ 0x9800f8f7,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
+ 0xe0fcd0fc,
+ 0x00002e7e,
+ 0x140003f1,
+ 0xa00506fd,
+ 0xb604bd05,
+ 0x1bf40242,
+/* 0x0407: memx_func_wait */
+ 0x0800f8dd,
+ 0x0088cf2c,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0x7e1010b6,
+ 0xf8000071,
+/* 0x0421: memx_func_delay */
+ 0x001e9800,
+ 0x7e0410b6,
+ 0xf800005d,
+/* 0x042d: memx_exec */
+ 0xf9e0f900,
+ 0xb2c1b2d0,
+/* 0x0435: memx_exec_next */
+ 0x001398b2,
+ 0x950410b6,
+ 0x30f01034,
+ 0xc835980c,
+ 0x12a655f9,
+ 0xfced1ef4,
+ 0x7ee0fcd0,
+ 0xf800023f,
+/* 0x0455: memx_info */
+ 0x03544c00,
+ 0x7e08004b,
+ 0xf800023f,
+/* 0x0461: memx_recv */
+ 0x01d6b000,
+ 0xb0c90bf4,
+ 0x0bf400d6,
+/* 0x046f: memx_init */
+ 0xf800f8eb,
+/* 0x0471: perf_recv */
+/* 0x0473: perf_init */
+ 0xf800f800,
+/* 0x0475: test_recv */
+ 0x04584100,
+ 0xb60011cf,
+ 0x58400110,
+ 0x0001f604,
+ 0xe7f104bd,
+ 0xe3f1d900,
+ 0x967e134f,
+ 0x00f80001,
+/* 0x0494: test_init */
+ 0x7e08004e,
+ 0xf8000196,
+/* 0x049d: idle_recv */
+/* 0x049f: idle */
+ 0xf400f800,
+ 0x54410031,
+ 0x0011cf04,
+ 0x400110b6,
+ 0x01f60454,
+/* 0x04b3: idle_loop */
+ 0x0104bd00,
+ 0x0232f458,
+/* 0x04b8: idle_proc */
+/* 0x04b8: idle_proc_exec */
+ 0x1eb210f9,
+ 0x0002487e,
+ 0x11f410fc,
+ 0x0231f409,
+/* 0x04cb: idle_proc_next */
+ 0xb6f00ef4,
+ 0x1fa65810,
+ 0xf4e81bf4,
+ 0x28f4e002,
+ 0xc60ef400,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
new file mode 100644
index 00000000000..6fde0b89e5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GT215
+
+//#define NVKM_FALCON_PC24
+//#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nva3_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nva3_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
new file mode 100644
index 00000000000..0fa4d7dcd40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nva3_pwr_data[] = {
+/* 0x0000: proc_kern */
+ 0x52544e49,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: proc_list_head */
+ 0x54534f48,
+ 0x00000430,
+ 0x000003cd,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x584d454d,
+ 0x0000054e,
+ 0x00000540,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x46524550,
+ 0x00000552,
+ 0x00000550,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54534554,
+ 0x0000057b,
+ 0x00000554,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x454c4449,
+ 0x00000587,
+ 0x00000585,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+ 0x00000000,
+/* 0x0214: time_next */
+ 0x00000000,
+/* 0x0218: fifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0298: rfifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0318: memx_func_head */
+ 0x00010000,
+ 0x00000000,
+ 0x0000046f,
+/* 0x0324: memx_func_next */
+ 0x00000001,
+ 0x00000000,
+ 0x00000496,
+ 0x00000002,
+ 0x00000002,
+ 0x000004b7,
+ 0x00040003,
+ 0x00000000,
+ 0x000004df,
+ 0x00010004,
+ 0x00000000,
+ 0x000004fc,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0b54: memx_data_tail */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nva3_pwr_code[] = {
+ 0x030d0ef5,
+/* 0x0004: rd32 */
+ 0x07a007f1,
+ 0xd00604b6,
+ 0x04bd000e,
+ 0xf001e7f0,
+ 0x07f101e3,
+ 0x04b607ac,
+ 0x000ed006,
+/* 0x0022: rd32_wait */
+ 0xe7f104bd,
+ 0xe4b607ac,
+ 0x00eecf06,
+ 0x7000e4f1,
+ 0xf1f21bf4,
+ 0xb607a4d7,
+ 0xddcf06d4,
+/* 0x003f: wr32 */
+ 0xf100f800,
+ 0xb607a007,
+ 0x0ed00604,
+ 0xf104bd00,
+ 0xb607a407,
+ 0x0dd00604,
+ 0xf004bd00,
+ 0xe5f002e7,
+ 0x01e3f0f0,
+ 0x07ac07f1,
+ 0xd00604b6,
+ 0x04bd000e,
+/* 0x006c: wr32_wait */
+ 0x07ace7f1,
+ 0xcf06e4b6,
+ 0xe4f100ee,
+ 0x1bf47000,
+/* 0x007f: nsec */
+ 0xf000f8f2,
+ 0x84b62c87,
+ 0x0088cf06,
+/* 0x0088: nsec_loop */
+ 0xb62c97f0,
+ 0x99cf0694,
+ 0x0298bb00,
+ 0xf4069eb8,
+ 0x00f8f11e,
+/* 0x009c: wait */
+ 0xb62c87f0,
+ 0x88cf0684,
+/* 0x00a5: wait_loop */
+ 0x02eeb900,
+ 0xb90421f4,
+ 0xadfd02da,
+ 0x06acb804,
+ 0xf0150bf4,
+ 0x94b62c97,
+ 0x0099cf06,
+ 0xb80298bb,
+ 0x1ef4069b,
+/* 0x00c9: wait_done */
+/* 0x00cb: intr_watchdog */
+ 0x9800f8df,
+ 0x96b003e9,
+ 0x2a0bf400,
+ 0xbb840a98,
+ 0x1cf4029a,
+ 0x01d7f00f,
+ 0x025421f5,
+ 0x0ef494bd,
+/* 0x00e9: intr_watchdog_next_time */
+ 0x850a9815,
+ 0xf400a6b0,
+ 0x9ab8090b,
+ 0x061cf406,
+/* 0x00f8: intr_watchdog_next_time_set */
+/* 0x00fb: intr_watchdog_next_proc */
+ 0x80850980,
+ 0xe0b603e9,
+ 0x10e6b158,
+ 0xc61bf402,
+/* 0x010a: intr */
+ 0x00f900f8,
+ 0x80f904bd,
+ 0xa0f990f9,
+ 0xc0f9b0f9,
+ 0xe0f9d0f9,
+ 0xf7f0f0f9,
+ 0x0188fe00,
+ 0x87f180f9,
+ 0x84b605d0,
+ 0x0088cf06,
+ 0xf10180b6,
+ 0xb605d007,
+ 0x08d00604,
+ 0xf004bd00,
+ 0x84b60887,
+ 0x0088cf06,
+ 0xf40289c4,
+ 0x0080230b,
+ 0x58e7f085,
+ 0x98cb21f4,
+ 0x96b08509,
+ 0x110bf400,
+ 0xb63407f0,
+ 0x09d00604,
+ 0x8004bd00,
+/* 0x016e: intr_skip_watchdog */
+ 0x89e48409,
+ 0x0bf40800,
+ 0x8897f148,
+ 0x0694b606,
+ 0xc40099cf,
+ 0x0bf4029a,
+ 0xc0c7f12c,
+ 0x06c4b604,
+ 0xf900cccf,
+ 0x48e7f1c0,
+ 0x53e3f14f,
+ 0x00d7f054,
+ 0x02b921f5,
+ 0x07f1c0fc,
+ 0x04b604c0,
+ 0x000cd006,
+/* 0x01ae: intr_subintr_skip_fifo */
+ 0x07f104bd,
+ 0x04b60688,
+ 0x0009d006,
+/* 0x01ba: intr_skip_subintr */
+ 0x89c404bd,
+ 0x070bf420,
+ 0xffbfa4f1,
+/* 0x01c4: intr_skip_pause */
+ 0xf44089c4,
+ 0xa4f1070b,
+/* 0x01ce: intr_skip_user0 */
+ 0x07f0ffbf,
+ 0x0604b604,
+ 0xbd0008d0,
+ 0xfe80fc04,
+ 0xf0fc0088,
+ 0xd0fce0fc,
+ 0xb0fcc0fc,
+ 0x90fca0fc,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x01f5: timer */
+ 0x1032f401,
+ 0xb003f898,
+ 0x1cf40086,
+ 0x03fe8051,
+ 0xb63807f0,
+ 0x08d00604,
+ 0xf004bd00,
+ 0x84b60887,
+ 0x0088cf06,
+ 0xf40284f0,
+ 0x87f0261b,
+ 0x0684b634,
+ 0xb80088cf,
+ 0x0bf406e0,
+ 0x06e8b809,
+/* 0x0233: timer_reset */
+ 0xf01f1ef4,
+ 0x04b63407,
+ 0x000ed006,
+ 0x0e8004bd,
+/* 0x0241: timer_enable */
+ 0x0187f084,
+ 0xb63807f0,
+ 0x08d00604,
+/* 0x024f: timer_done */
+ 0xf404bd00,
+ 0x00f81031,
+/* 0x0254: send_proc */
+ 0x90f980f9,
+ 0x9805e898,
+ 0x86f004e9,
+ 0x0689b804,
+ 0xc42a0bf4,
+ 0x88940398,
+ 0x1880b604,
+ 0x98008ebb,
+ 0x8a8000fa,
+ 0x018d8000,
+ 0x80028c80,
+ 0x90b6038b,
+ 0x0794f001,
+ 0xf404e980,
+/* 0x028e: send_done */
+ 0x90fc0231,
+ 0x00f880fc,
+/* 0x0294: find */
+ 0x87f080f9,
+ 0x0131f458,
+/* 0x029c: find_loop */
+ 0xb8008a98,
+ 0x0bf406ae,
+ 0x5880b610,
+ 0x021086b1,
+ 0xf4f01bf4,
+/* 0x02b2: find_done */
+ 0x8eb90132,
+ 0xf880fc02,
+/* 0x02b9: send */
+ 0x9421f500,
+ 0x9701f402,
+/* 0x02c2: recv */
+ 0xe89800f8,
+ 0x04e99805,
+ 0xb80132f4,
+ 0x0bf40689,
+ 0x0389c43d,
+ 0xf00180b6,
+ 0xe8800784,
+ 0x02ea9805,
+ 0x8ffef0f9,
+ 0xb9f0f901,
+ 0x999402ef,
+ 0x00e9bb04,
+ 0x9818e0b6,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0xf900ee98,
+ 0xfef0fca5,
+ 0x31f400f8,
+/* 0x030b: recv_done */
+ 0xf8f0fc01,
+/* 0x030d: init */
+ 0x0817f100,
+ 0x0614b601,
+ 0xe70011cf,
+ 0xb6010911,
+ 0x14fe0814,
+ 0xe017f100,
+ 0x0013f000,
+ 0xb61c07f0,
+ 0x01d00604,
+ 0xf004bd00,
+ 0x07f0ff17,
+ 0x0604b614,
+ 0xbd0001d0,
+ 0x0217f004,
+ 0x080015f1,
+ 0xb61007f0,
+ 0x01d00604,
+ 0xf104bd00,
+ 0xf0010a17,
+ 0x10fe0013,
+ 0x1031f400,
+ 0xf00117f0,
+ 0x04b63807,
+ 0x0001d006,
+ 0xf7f004bd,
+/* 0x0371: init_proc */
+ 0x01f19858,
+ 0xf40016b0,
+ 0x15f9fa0b,
+ 0xf458f0b6,
+/* 0x0382: host_send */
+ 0x17f1f20e,
+ 0x14b604b0,
+ 0x0011cf06,
+ 0x04a027f1,
+ 0xcf0624b6,
+ 0x12b80022,
+ 0x320bf406,
+ 0x94071ec4,
+ 0xe0b704ee,
+ 0xeb980218,
+ 0x02ec9803,
+ 0x9801ed98,
+ 0x21f500ee,
+ 0x10b602b9,
+ 0x0f1ec401,
+ 0x04b007f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x03cb: host_send_done */
+ 0xf8ba0ef4,
+/* 0x03cd: host_recv */
+ 0x4917f100,
+ 0x5413f14e,
+ 0x06e1b852,
+/* 0x03db: host_recv_wait */
+ 0xf1aa0bf4,
+ 0xb604cc17,
+ 0x11cf0614,
+ 0xc827f100,
+ 0x0624b604,
+ 0xf00022cf,
+ 0x12b80816,
+ 0xe60bf406,
+ 0xb60723c4,
+ 0x30b70434,
+ 0x3b800298,
+ 0x023c8003,
+ 0x80013d80,
+ 0x20b6003e,
+ 0x0f24f001,
+ 0x04c807f1,
+ 0xd00604b6,
+ 0x04bd0002,
+ 0xf04027f0,
+ 0x04b60007,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0430: host_init */
+ 0x008017f1,
+ 0xf11014b6,
+ 0xf1021815,
+ 0xb604d007,
+ 0x01d00604,
+ 0xf104bd00,
+ 0xb6008017,
+ 0x15f11014,
+ 0x07f10298,
+ 0x04b604dc,
+ 0x0001d006,
+ 0x17f004bd,
+ 0xc407f101,
+ 0x0604b604,
+ 0xbd0001d0,
+/* 0x046f: memx_func_enter */
+ 0xf000f804,
+ 0x07f10467,
+ 0x04b607e0,
+ 0x0006d006,
+/* 0x047e: memx_func_enter_wait */
+ 0x67f104bd,
+ 0x64b607c0,
+ 0x0066cf06,
+ 0xf40464f0,
+ 0x1698f30b,
+ 0x0410b600,
+/* 0x0496: memx_func_leave */
+ 0x67f000f8,
+ 0xe407f104,
+ 0x0604b607,
+ 0xbd0006d0,
+/* 0x04a5: memx_func_leave_wait */
+ 0xc067f104,
+ 0x0664b607,
+ 0xf00066cf,
+ 0x1bf40464,
+/* 0x04b7: memx_func_wr32 */
+ 0x9800f8f3,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
+ 0xe0fcd0fc,
+ 0xf13f21f4,
+ 0xfd140003,
+ 0x05800506,
+ 0xb604bd00,
+ 0x1bf40242,
+/* 0x04df: memx_func_wait */
+ 0xf000f8dd,
+ 0x84b62c87,
+ 0x0088cf06,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0xf41010b6,
+ 0x00f89c21,
+/* 0x04fc: memx_func_delay */
+ 0xb6001e98,
+ 0x21f40410,
+/* 0x0507: memx_exec */
+ 0xf900f87f,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x0511: memx_exec_next */
+ 0x00139802,
+ 0x950410b6,
+ 0x30f01034,
+ 0xc835980c,
+ 0x12b855f9,
+ 0xec1ef406,
+ 0xe0fcd0fc,
+ 0x02b921f5,
+/* 0x0532: memx_info */
+ 0xc7f100f8,
+ 0xb7f10354,
+ 0x21f50800,
+ 0x00f802b9,
+/* 0x0540: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0c40b,
+ 0xe90bf400,
+/* 0x054e: memx_init */
+ 0x00f800f8,
+/* 0x0550: perf_recv */
+/* 0x0552: perf_init */
+ 0x00f800f8,
+/* 0x0554: test_recv */
+ 0x05d817f1,
+ 0xcf0614b6,
+ 0x10b60011,
+ 0xd807f101,
+ 0x0604b605,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0xf521f513,
+/* 0x057b: test_init */
+ 0xf100f801,
+ 0xf50800e7,
+ 0xf801f521,
+/* 0x0585: idle_recv */
+/* 0x0587: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x05a3: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x05a9: idle_proc */
+/* 0x05a9: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc02c2,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x05bd: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
new file mode 100644
index 00000000000..eaa64da68e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF100
+
+//#define NVKM_FALCON_PC24
+//#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nvc0_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nvc0_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
new file mode 100644
index 00000000000..82c8e8b8891
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nvc0_pwr_data[] = {
+/* 0x0000: proc_kern */
+ 0x52544e49,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: proc_list_head */
+ 0x54534f48,
+ 0x00000430,
+ 0x000003cd,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x584d454d,
+ 0x0000054e,
+ 0x00000540,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x46524550,
+ 0x00000552,
+ 0x00000550,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54534554,
+ 0x0000057b,
+ 0x00000554,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x454c4449,
+ 0x00000587,
+ 0x00000585,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+ 0x00000000,
+/* 0x0214: time_next */
+ 0x00000000,
+/* 0x0218: fifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0298: rfifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0318: memx_func_head */
+ 0x00010000,
+ 0x00000000,
+ 0x0000046f,
+/* 0x0324: memx_func_next */
+ 0x00000001,
+ 0x00000000,
+ 0x00000496,
+ 0x00000002,
+ 0x00000002,
+ 0x000004b7,
+ 0x00040003,
+ 0x00000000,
+ 0x000004df,
+ 0x00010004,
+ 0x00000000,
+ 0x000004fc,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0b54: memx_data_tail */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nvc0_pwr_code[] = {
+ 0x030d0ef5,
+/* 0x0004: rd32 */
+ 0x07a007f1,
+ 0xd00604b6,
+ 0x04bd000e,
+ 0xf001e7f0,
+ 0x07f101e3,
+ 0x04b607ac,
+ 0x000ed006,
+/* 0x0022: rd32_wait */
+ 0xe7f104bd,
+ 0xe4b607ac,
+ 0x00eecf06,
+ 0x7000e4f1,
+ 0xf1f21bf4,
+ 0xb607a4d7,
+ 0xddcf06d4,
+/* 0x003f: wr32 */
+ 0xf100f800,
+ 0xb607a007,
+ 0x0ed00604,
+ 0xf104bd00,
+ 0xb607a407,
+ 0x0dd00604,
+ 0xf004bd00,
+ 0xe5f002e7,
+ 0x01e3f0f0,
+ 0x07ac07f1,
+ 0xd00604b6,
+ 0x04bd000e,
+/* 0x006c: wr32_wait */
+ 0x07ace7f1,
+ 0xcf06e4b6,
+ 0xe4f100ee,
+ 0x1bf47000,
+/* 0x007f: nsec */
+ 0xf000f8f2,
+ 0x84b62c87,
+ 0x0088cf06,
+/* 0x0088: nsec_loop */
+ 0xb62c97f0,
+ 0x99cf0694,
+ 0x0298bb00,
+ 0xf4069eb8,
+ 0x00f8f11e,
+/* 0x009c: wait */
+ 0xb62c87f0,
+ 0x88cf0684,
+/* 0x00a5: wait_loop */
+ 0x02eeb900,
+ 0xb90421f4,
+ 0xadfd02da,
+ 0x06acb804,
+ 0xf0150bf4,
+ 0x94b62c97,
+ 0x0099cf06,
+ 0xb80298bb,
+ 0x1ef4069b,
+/* 0x00c9: wait_done */
+/* 0x00cb: intr_watchdog */
+ 0x9800f8df,
+ 0x96b003e9,
+ 0x2a0bf400,
+ 0xbb840a98,
+ 0x1cf4029a,
+ 0x01d7f00f,
+ 0x025421f5,
+ 0x0ef494bd,
+/* 0x00e9: intr_watchdog_next_time */
+ 0x850a9815,
+ 0xf400a6b0,
+ 0x9ab8090b,
+ 0x061cf406,
+/* 0x00f8: intr_watchdog_next_time_set */
+/* 0x00fb: intr_watchdog_next_proc */
+ 0x80850980,
+ 0xe0b603e9,
+ 0x10e6b158,
+ 0xc61bf402,
+/* 0x010a: intr */
+ 0x00f900f8,
+ 0x80f904bd,
+ 0xa0f990f9,
+ 0xc0f9b0f9,
+ 0xe0f9d0f9,
+ 0xf7f0f0f9,
+ 0x0188fe00,
+ 0x87f180f9,
+ 0x84b605d0,
+ 0x0088cf06,
+ 0xf10180b6,
+ 0xb605d007,
+ 0x08d00604,
+ 0xf004bd00,
+ 0x84b60887,
+ 0x0088cf06,
+ 0xf40289c4,
+ 0x0080230b,
+ 0x58e7f085,
+ 0x98cb21f4,
+ 0x96b08509,
+ 0x110bf400,
+ 0xb63407f0,
+ 0x09d00604,
+ 0x8004bd00,
+/* 0x016e: intr_skip_watchdog */
+ 0x89e48409,
+ 0x0bf40800,
+ 0x8897f148,
+ 0x0694b606,
+ 0xc40099cf,
+ 0x0bf4029a,
+ 0xc0c7f12c,
+ 0x06c4b604,
+ 0xf900cccf,
+ 0x48e7f1c0,
+ 0x53e3f14f,
+ 0x00d7f054,
+ 0x02b921f5,
+ 0x07f1c0fc,
+ 0x04b604c0,
+ 0x000cd006,
+/* 0x01ae: intr_subintr_skip_fifo */
+ 0x07f104bd,
+ 0x04b60688,
+ 0x0009d006,
+/* 0x01ba: intr_skip_subintr */
+ 0x89c404bd,
+ 0x070bf420,
+ 0xffbfa4f1,
+/* 0x01c4: intr_skip_pause */
+ 0xf44089c4,
+ 0xa4f1070b,
+/* 0x01ce: intr_skip_user0 */
+ 0x07f0ffbf,
+ 0x0604b604,
+ 0xbd0008d0,
+ 0xfe80fc04,
+ 0xf0fc0088,
+ 0xd0fce0fc,
+ 0xb0fcc0fc,
+ 0x90fca0fc,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x01f5: timer */
+ 0x1032f401,
+ 0xb003f898,
+ 0x1cf40086,
+ 0x03fe8051,
+ 0xb63807f0,
+ 0x08d00604,
+ 0xf004bd00,
+ 0x84b60887,
+ 0x0088cf06,
+ 0xf40284f0,
+ 0x87f0261b,
+ 0x0684b634,
+ 0xb80088cf,
+ 0x0bf406e0,
+ 0x06e8b809,
+/* 0x0233: timer_reset */
+ 0xf01f1ef4,
+ 0x04b63407,
+ 0x000ed006,
+ 0x0e8004bd,
+/* 0x0241: timer_enable */
+ 0x0187f084,
+ 0xb63807f0,
+ 0x08d00604,
+/* 0x024f: timer_done */
+ 0xf404bd00,
+ 0x00f81031,
+/* 0x0254: send_proc */
+ 0x90f980f9,
+ 0x9805e898,
+ 0x86f004e9,
+ 0x0689b804,
+ 0xc42a0bf4,
+ 0x88940398,
+ 0x1880b604,
+ 0x98008ebb,
+ 0x8a8000fa,
+ 0x018d8000,
+ 0x80028c80,
+ 0x90b6038b,
+ 0x0794f001,
+ 0xf404e980,
+/* 0x028e: send_done */
+ 0x90fc0231,
+ 0x00f880fc,
+/* 0x0294: find */
+ 0x87f080f9,
+ 0x0131f458,
+/* 0x029c: find_loop */
+ 0xb8008a98,
+ 0x0bf406ae,
+ 0x5880b610,
+ 0x021086b1,
+ 0xf4f01bf4,
+/* 0x02b2: find_done */
+ 0x8eb90132,
+ 0xf880fc02,
+/* 0x02b9: send */
+ 0x9421f500,
+ 0x9701f402,
+/* 0x02c2: recv */
+ 0xe89800f8,
+ 0x04e99805,
+ 0xb80132f4,
+ 0x0bf40689,
+ 0x0389c43d,
+ 0xf00180b6,
+ 0xe8800784,
+ 0x02ea9805,
+ 0x8ffef0f9,
+ 0xb9f0f901,
+ 0x999402ef,
+ 0x00e9bb04,
+ 0x9818e0b6,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0xf900ee98,
+ 0xfef0fca5,
+ 0x31f400f8,
+/* 0x030b: recv_done */
+ 0xf8f0fc01,
+/* 0x030d: init */
+ 0x0817f100,
+ 0x0614b601,
+ 0xe70011cf,
+ 0xb6010911,
+ 0x14fe0814,
+ 0xe017f100,
+ 0x0013f000,
+ 0xb61c07f0,
+ 0x01d00604,
+ 0xf004bd00,
+ 0x07f0ff17,
+ 0x0604b614,
+ 0xbd0001d0,
+ 0x0217f004,
+ 0x080015f1,
+ 0xb61007f0,
+ 0x01d00604,
+ 0xf104bd00,
+ 0xf0010a17,
+ 0x10fe0013,
+ 0x1031f400,
+ 0xf00117f0,
+ 0x04b63807,
+ 0x0001d006,
+ 0xf7f004bd,
+/* 0x0371: init_proc */
+ 0x01f19858,
+ 0xf40016b0,
+ 0x15f9fa0b,
+ 0xf458f0b6,
+/* 0x0382: host_send */
+ 0x17f1f20e,
+ 0x14b604b0,
+ 0x0011cf06,
+ 0x04a027f1,
+ 0xcf0624b6,
+ 0x12b80022,
+ 0x320bf406,
+ 0x94071ec4,
+ 0xe0b704ee,
+ 0xeb980218,
+ 0x02ec9803,
+ 0x9801ed98,
+ 0x21f500ee,
+ 0x10b602b9,
+ 0x0f1ec401,
+ 0x04b007f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x03cb: host_send_done */
+ 0xf8ba0ef4,
+/* 0x03cd: host_recv */
+ 0x4917f100,
+ 0x5413f14e,
+ 0x06e1b852,
+/* 0x03db: host_recv_wait */
+ 0xf1aa0bf4,
+ 0xb604cc17,
+ 0x11cf0614,
+ 0xc827f100,
+ 0x0624b604,
+ 0xf00022cf,
+ 0x12b80816,
+ 0xe60bf406,
+ 0xb60723c4,
+ 0x30b70434,
+ 0x3b800298,
+ 0x023c8003,
+ 0x80013d80,
+ 0x20b6003e,
+ 0x0f24f001,
+ 0x04c807f1,
+ 0xd00604b6,
+ 0x04bd0002,
+ 0xf04027f0,
+ 0x04b60007,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0430: host_init */
+ 0x008017f1,
+ 0xf11014b6,
+ 0xf1021815,
+ 0xb604d007,
+ 0x01d00604,
+ 0xf104bd00,
+ 0xb6008017,
+ 0x15f11014,
+ 0x07f10298,
+ 0x04b604dc,
+ 0x0001d006,
+ 0x17f004bd,
+ 0xc407f101,
+ 0x0604b604,
+ 0xbd0001d0,
+/* 0x046f: memx_func_enter */
+ 0xf000f804,
+ 0x07f10467,
+ 0x04b607e0,
+ 0x0006d006,
+/* 0x047e: memx_func_enter_wait */
+ 0x67f104bd,
+ 0x64b607c0,
+ 0x0066cf06,
+ 0xf40464f0,
+ 0x1698f30b,
+ 0x0410b600,
+/* 0x0496: memx_func_leave */
+ 0x67f000f8,
+ 0xe407f104,
+ 0x0604b607,
+ 0xbd0006d0,
+/* 0x04a5: memx_func_leave_wait */
+ 0xc067f104,
+ 0x0664b607,
+ 0xf00066cf,
+ 0x1bf40464,
+/* 0x04b7: memx_func_wr32 */
+ 0x9800f8f3,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
+ 0xe0fcd0fc,
+ 0xf13f21f4,
+ 0xfd140003,
+ 0x05800506,
+ 0xb604bd00,
+ 0x1bf40242,
+/* 0x04df: memx_func_wait */
+ 0xf000f8dd,
+ 0x84b62c87,
+ 0x0088cf06,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0xf41010b6,
+ 0x00f89c21,
+/* 0x04fc: memx_func_delay */
+ 0xb6001e98,
+ 0x21f40410,
+/* 0x0507: memx_exec */
+ 0xf900f87f,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x0511: memx_exec_next */
+ 0x00139802,
+ 0x950410b6,
+ 0x30f01034,
+ 0xc835980c,
+ 0x12b855f9,
+ 0xec1ef406,
+ 0xe0fcd0fc,
+ 0x02b921f5,
+/* 0x0532: memx_info */
+ 0xc7f100f8,
+ 0xb7f10354,
+ 0x21f50800,
+ 0x00f802b9,
+/* 0x0540: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0c40b,
+ 0xe90bf400,
+/* 0x054e: memx_init */
+ 0x00f800f8,
+/* 0x0550: perf_recv */
+/* 0x0552: perf_init */
+ 0x00f800f8,
+/* 0x0554: test_recv */
+ 0x05d817f1,
+ 0xcf0614b6,
+ 0x10b60011,
+ 0xd807f101,
+ 0x0604b605,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0xf521f513,
+/* 0x057b: test_init */
+ 0xf100f801,
+ 0xf50800e7,
+ 0xf801f521,
+/* 0x0585: idle_recv */
+/* 0x0587: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x05a3: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x05a9: idle_proc */
+/* 0x05a9: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc02c2,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x05bd: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
new file mode 100644
index 00000000000..32d65ea254d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF119
+
+//#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nvd0_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nvd0_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
new file mode 100644
index 00000000000..ce65e2a4b78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nvd0_pwr_data[] = {
+/* 0x0000: proc_kern */
+ 0x52544e49,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: proc_list_head */
+ 0x54534f48,
+ 0x000003be,
+ 0x00000367,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x584d454d,
+ 0x000004c4,
+ 0x000004b6,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x46524550,
+ 0x000004c8,
+ 0x000004c6,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54534554,
+ 0x000004eb,
+ 0x000004ca,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x454c4449,
+ 0x000004f7,
+ 0x000004f5,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+ 0x00000000,
+/* 0x0214: time_next */
+ 0x00000000,
+/* 0x0218: fifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0298: rfifo_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0318: memx_func_head */
+ 0x00010000,
+ 0x00000000,
+ 0x000003f4,
+/* 0x0324: memx_func_next */
+ 0x00000001,
+ 0x00000000,
+ 0x00000415,
+ 0x00000002,
+ 0x00000002,
+ 0x00000430,
+ 0x00040003,
+ 0x00000000,
+ 0x00000458,
+ 0x00010004,
+ 0x00000000,
+ 0x00000472,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0b54: memx_data_tail */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nvd0_pwr_code[] = {
+ 0x02bf0ef5,
+/* 0x0004: rd32 */
+ 0x07a007f1,
+ 0xbd000ed0,
+ 0x01e7f004,
+ 0xf101e3f0,
+ 0xd007ac07,
+ 0x04bd000e,
+/* 0x001c: rd32_wait */
+ 0x07ace7f1,
+ 0xf100eecf,
+ 0xf47000e4,
+ 0xd7f1f51b,
+ 0xddcf07a4,
+/* 0x0033: wr32 */
+ 0xf100f800,
+ 0xd007a007,
+ 0x04bd000e,
+ 0x07a407f1,
+ 0xbd000dd0,
+ 0x02e7f004,
+ 0xf0f0e5f0,
+ 0x07f101e3,
+ 0x0ed007ac,
+/* 0x0057: wr32_wait */
+ 0xf104bd00,
+ 0xcf07ace7,
+ 0xe4f100ee,
+ 0x1bf47000,
+/* 0x0067: nsec */
+ 0xf000f8f5,
+ 0x88cf2c87,
+/* 0x006d: nsec_loop */
+ 0x2c97f000,
+ 0xbb0099cf,
+ 0x9eb80298,
+ 0xf41ef406,
+/* 0x007e: wait */
+ 0x87f000f8,
+ 0x0088cf2c,
+/* 0x0084: wait_loop */
+ 0xf402eeb9,
+ 0xdab90421,
+ 0x04adfd02,
+ 0xf406acb8,
+ 0x97f0120b,
+ 0x0099cf2c,
+ 0xb80298bb,
+ 0x1ef4069b,
+/* 0x00a5: wait_done */
+/* 0x00a7: intr_watchdog */
+ 0x9800f8e2,
+ 0x96b003e9,
+ 0x2a0bf400,
+ 0xbb840a98,
+ 0x1cf4029a,
+ 0x01d7f00f,
+ 0x020621f5,
+ 0x0ef494bd,
+/* 0x00c5: intr_watchdog_next_time */
+ 0x850a9815,
+ 0xf400a6b0,
+ 0x9ab8090b,
+ 0x061cf406,
+/* 0x00d4: intr_watchdog_next_time_set */
+/* 0x00d7: intr_watchdog_next_proc */
+ 0x80850980,
+ 0xe0b603e9,
+ 0x10e6b158,
+ 0xc61bf402,
+/* 0x00e6: intr */
+ 0x00f900f8,
+ 0x80f904bd,
+ 0xa0f990f9,
+ 0xc0f9b0f9,
+ 0xe0f9d0f9,
+ 0xf7f0f0f9,
+ 0x0188fe00,
+ 0x87f180f9,
+ 0x88cf05d0,
+ 0x0180b600,
+ 0x05d007f1,
+ 0xbd0008d0,
+ 0x0887f004,
+ 0xc40088cf,
+ 0x0bf40289,
+ 0x85008020,
+ 0xf458e7f0,
+ 0x0998a721,
+ 0x0096b085,
+ 0xf00e0bf4,
+ 0x09d03407,
+ 0x8004bd00,
+/* 0x013e: intr_skip_watchdog */
+ 0x89e48409,
+ 0x0bf40800,
+ 0x8897f13c,
+ 0x0099cf06,
+ 0xf4029ac4,
+ 0xc7f1260b,
+ 0xcccf04c0,
+ 0xf1c0f900,
+ 0xf14f48e7,
+ 0xf05453e3,
+ 0x21f500d7,
+ 0xc0fc026b,
+ 0x04c007f1,
+ 0xbd000cd0,
+/* 0x0175: intr_subintr_skip_fifo */
+ 0x8807f104,
+ 0x0009d006,
+/* 0x017e: intr_skip_subintr */
+ 0x89c404bd,
+ 0x070bf420,
+ 0xffbfa4f1,
+/* 0x0188: intr_skip_pause */
+ 0xf44089c4,
+ 0xa4f1070b,
+/* 0x0192: intr_skip_user0 */
+ 0x07f0ffbf,
+ 0x0008d004,
+ 0x80fc04bd,
+ 0xfc0088fe,
+ 0xfce0fcf0,
+ 0xfcc0fcd0,
+ 0xfca0fcb0,
+ 0xfc80fc90,
+ 0x0032f400,
+/* 0x01b6: timer */
+ 0x32f401f8,
+ 0x03f89810,
+ 0xf40086b0,
+ 0xfe80421c,
+ 0x3807f003,
+ 0xbd0008d0,
+ 0x0887f004,
+ 0xf00088cf,
+ 0x1bf40284,
+ 0x3487f020,
+ 0xb80088cf,
+ 0x0bf406e0,
+ 0x06e8b809,
+/* 0x01eb: timer_reset */
+ 0xf0191ef4,
+ 0x0ed03407,
+ 0x8004bd00,
+/* 0x01f6: timer_enable */
+ 0x87f0840e,
+ 0x3807f001,
+ 0xbd0008d0,
+/* 0x0201: timer_done */
+ 0x1031f404,
+/* 0x0206: send_proc */
+ 0x80f900f8,
+ 0xe89890f9,
+ 0x04e99805,
+ 0xb80486f0,
+ 0x0bf40689,
+ 0x0398c42a,
+ 0xb6048894,
+ 0x8ebb1880,
+ 0x00fa9800,
+ 0x80008a80,
+ 0x8c80018d,
+ 0x038b8002,
+ 0xf00190b6,
+ 0xe9800794,
+ 0x0231f404,
+/* 0x0240: send_done */
+ 0x80fc90fc,
+/* 0x0246: find */
+ 0x80f900f8,
+ 0xf45887f0,
+/* 0x024e: find_loop */
+ 0x8a980131,
+ 0x06aeb800,
+ 0xb6100bf4,
+ 0x86b15880,
+ 0x1bf40210,
+ 0x0132f4f0,
+/* 0x0264: find_done */
+ 0xfc028eb9,
+/* 0x026b: send */
+ 0xf500f880,
+ 0xf4024621,
+ 0x00f89701,
+/* 0x0274: recv */
+ 0x9805e898,
+ 0x32f404e9,
+ 0x0689b801,
+ 0xc43d0bf4,
+ 0x80b60389,
+ 0x0784f001,
+ 0x9805e880,
+ 0xf0f902ea,
+ 0xf9018ffe,
+ 0x02efb9f0,
+ 0xbb049994,
+ 0xe0b600e9,
+ 0x03eb9818,
+ 0x9802ec98,
+ 0xee9801ed,
+ 0xfca5f900,
+ 0x00f8fef0,
+ 0xfc0131f4,
+/* 0x02bd: recv_done */
+/* 0x02bf: init */
+ 0xf100f8f0,
+ 0xcf010817,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0x00e017f1,
+ 0xf00013f0,
+ 0x01d01c07,
+ 0xf004bd00,
+ 0x07f0ff17,
+ 0x0001d014,
+ 0x17f004bd,
+ 0x0015f102,
+ 0x1007f008,
+ 0xbd0001d0,
+ 0xe617f104,
+ 0x0013f000,
+ 0xf40010fe,
+ 0x17f01031,
+ 0x3807f001,
+ 0xbd0001d0,
+ 0x58f7f004,
+/* 0x0314: init_proc */
+ 0xb001f198,
+ 0x0bf40016,
+ 0xb615f9fa,
+ 0x0ef458f0,
+/* 0x0325: host_send */
+ 0xb017f1f2,
+ 0x0011cf04,
+ 0x04a027f1,
+ 0xb80022cf,
+ 0x0bf40612,
+ 0x071ec42f,
+ 0xb704ee94,
+ 0x980218e0,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0xf500ee98,
+ 0xb6026b21,
+ 0x1ec40110,
+ 0xb007f10f,
+ 0x0001d004,
+ 0x0ef404bd,
+/* 0x0365: host_send_done */
+/* 0x0367: host_recv */
+ 0xf100f8c3,
+ 0xf14e4917,
+ 0xb8525413,
+ 0x0bf406e1,
+/* 0x0375: host_recv_wait */
+ 0xcc17f1b3,
+ 0x0011cf04,
+ 0x04c827f1,
+ 0xf00022cf,
+ 0x12b80816,
+ 0xec0bf406,
+ 0xb60723c4,
+ 0x30b70434,
+ 0x3b800298,
+ 0x023c8003,
+ 0x80013d80,
+ 0x20b6003e,
+ 0x0f24f001,
+ 0x04c807f1,
+ 0xbd0002d0,
+ 0x4027f004,
+ 0xd00007f0,
+ 0x04bd0002,
+/* 0x03be: host_init */
+ 0x17f100f8,
+ 0x14b60080,
+ 0x1815f110,
+ 0xd007f102,
+ 0x0001d004,
+ 0x17f104bd,
+ 0x14b60080,
+ 0x9815f110,
+ 0xdc07f102,
+ 0x0001d004,
+ 0x17f004bd,
+ 0xc407f101,
+ 0x0001d004,
+ 0x00f804bd,
+/* 0x03f4: memx_func_enter */
+ 0xf10467f0,
+ 0xd007e007,
+ 0x04bd0006,
+/* 0x0400: memx_func_enter_wait */
+ 0x07c067f1,
+ 0xf00066cf,
+ 0x0bf40464,
+ 0x001698f6,
+ 0xf80410b6,
+/* 0x0415: memx_func_leave */
+ 0x0467f000,
+ 0x07e407f1,
+ 0xbd0006d0,
+/* 0x0421: memx_func_leave_wait */
+ 0xc067f104,
+ 0x0066cf07,
+ 0xf40464f0,
+ 0x00f8f61b,
+/* 0x0430: memx_func_wr32 */
+ 0x98001698,
+ 0x10b60115,
+ 0xf960f908,
+ 0xfcd0fc50,
+ 0x3321f4e0,
+ 0x140003f1,
+ 0x800506fd,
+ 0x04bd0005,
+ 0xf40242b6,
+ 0x00f8dd1b,
+/* 0x0458: memx_func_wait */
+ 0xcf2c87f0,
+ 0x1e980088,
+ 0x011d9800,
+ 0x98021c98,
+ 0x10b6031b,
+ 0x7e21f410,
+/* 0x0472: memx_func_delay */
+ 0x1e9800f8,
+ 0x0410b600,
+ 0xf86721f4,
+/* 0x047d: memx_exec */
+ 0xf9e0f900,
+ 0x02c1b9d0,
+/* 0x0487: memx_exec_next */
+ 0x9802b2b9,
+ 0x10b60013,
+ 0x10349504,
+ 0x980c30f0,
+ 0x55f9c835,
+ 0xf40612b8,
+ 0xd0fcec1e,
+ 0x21f5e0fc,
+ 0x00f8026b,
+/* 0x04a8: memx_info */
+ 0x0354c7f1,
+ 0x0800b7f1,
+ 0x026b21f5,
+/* 0x04b6: memx_recv */
+ 0xd6b000f8,
+ 0xc40bf401,
+ 0xf400d6b0,
+ 0x00f8e90b,
+/* 0x04c4: memx_init */
+/* 0x04c6: perf_recv */
+ 0x00f800f8,
+/* 0x04c8: perf_init */
+/* 0x04ca: test_recv */
+ 0x17f100f8,
+ 0x11cf05d8,
+ 0x0110b600,
+ 0x05d807f1,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0xb621f513,
+/* 0x04eb: test_init */
+ 0xf100f801,
+ 0xf50800e7,
+ 0xf801b621,
+/* 0x04f5: idle_recv */
+/* 0x04f7: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x11cf05d4,
+ 0x0110b600,
+ 0x05d407f1,
+ 0xbd0001d0,
+/* 0x050d: idle_loop */
+ 0x5817f004,
+/* 0x0513: idle_proc */
+/* 0x0513: idle_proc_exec */
+ 0xf90232f4,
+ 0x021eb910,
+ 0x027421f5,
+ 0x11f410fc,
+ 0x0231f409,
+/* 0x0527: idle_proc_next */
+ 0xb6ef0ef4,
+ 0x1fb85810,
+ 0xe61bf406,
+ 0xf4dd02f4,
+ 0x0ef40028,
+ 0x000000c1,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
new file mode 100644
index 00000000000..5fb0cccc6c6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -0,0 +1,27 @@
+#ifndef __NVKM_PWR_OS_H__
+#define __NVKM_PWR_OS_H__
+
+/* Process names */
+#define PROC_KERN 0x52544e49
+#define PROC_IDLE 0x454c4449
+#define PROC_HOST 0x54534f48
+#define PROC_MEMX 0x584d454d
+#define PROC_PERF 0x46524550
+#define PROC_TEST 0x54534554
+
+/* KERN: message identifiers */
+#define KMSG_FIFO 0x00000000
+#define KMSG_ALARM 0x00000001
+
+/* MEMX: message identifiers */
+#define MEMX_MSG_INFO 0
+#define MEMX_MSG_EXEC 1
+
+/* MEMX: script opcode definitions */
+#define MEMX_ENTER 0
+#define MEMX_LEAVE 1
+#define MEMX_WR32 2
+#define MEMX_WAIT 3
+#define MEMX_DELAY 4
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
new file mode 100644
index 00000000000..38eadf705cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_PERF, #perf_init, #perf_recv)
+#endif
+
+/******************************************************************************
+ * PERF data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * PERF code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// description
+//
+// $r15 - current (perf)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0 - zero
+perf_recv:
+ ret
+
+// description
+//
+// $r15 - current (perf)
+// $r0 - zero
+perf_init:
+ ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
new file mode 100644
index 00000000000..0c3a71bf545
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_TEST, #test_init, #test_recv)
+#endif
+
+/******************************************************************************
+ * TEST data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * TEST code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (test)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0 - zero
+test_recv:
+ nv_iord($r1, NV_PPWR_DSCRATCH(2))
+ add b32 $r1 1
+ nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
+ mov $r14 -0x2700 /* 0xd900, envyas grrr! */
+ sethi $r14 0x134f0000
+ call(timer)
+ ret
+
+// description
+//
+// $r15 - current (test)
+// $r0 - zero
+test_init:
+ mov $r14 0x800
+ call(timer)
+ ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
new file mode 100644
index 00000000000..03de3107d29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -0,0 +1,121 @@
+#ifndef __NVKM_PWR_MEMX_H__
+#define __NVKM_PWR_MEMX_H__
+
+#include <subdev/pwr.h>
+#include <subdev/pwr/fuc/os.h>
+
+struct nouveau_memx {
+ struct nouveau_pwr *ppwr;
+ u32 base;
+ u32 size;
+ struct {
+ u32 mthd;
+ u32 size;
+ u32 data[64];
+ } c;
+};
+
+static void
+memx_out(struct nouveau_memx *memx)
+{
+ struct nouveau_pwr *ppwr = memx->ppwr;
+ int i;
+
+ if (memx->c.size) {
+ nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+ for (i = 0; i < memx->c.size; i++)
+ nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
+ memx->c.size = 0;
+ }
+}
+
+static void
+memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
+{
+ if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
+ (memx->c.size && memx->c.mthd != mthd))
+ memx_out(memx);
+ memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
+ memx->c.size += size;
+ memx->c.mthd = mthd;
+}
+
+int
+nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
+{
+ struct nouveau_memx *memx;
+ u32 reply[2];
+ int ret;
+
+ ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
+ if (ret)
+ return ret;
+
+ memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
+ if (!memx)
+ return -ENOMEM;
+ memx->ppwr = ppwr;
+ memx->base = reply[0];
+ memx->size = reply[1];
+
+ /* acquire data segment access */
+ do {
+ nv_wr32(ppwr, 0x10a580, 0x00000003);
+ } while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
+ nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
+ nv_wr32(ppwr, 0x10a1c4, 0x00010000 | MEMX_ENTER);
+ nv_wr32(ppwr, 0x10a1c4, 0x00000000);
+ return 0;
+}
+
+int
+nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
+{
+ struct nouveau_memx *memx = *pmemx;
+ struct nouveau_pwr *ppwr = memx->ppwr;
+ u32 finish, reply[2];
+
+ /* flush the cache... */
+ memx_out(memx);
+
+ /* release data segment access */
+ nv_wr32(ppwr, 0x10a1c4, 0x00000000 | MEMX_LEAVE);
+ finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
+ nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+ /* call MEMX process to execute the script, and wait for reply */
+ if (exec) {
+ ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_EXEC,
+ memx->base, finish);
+ }
+
+ kfree(memx);
+ return 0;
+}
+
+void
+nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
+{
+ nv_debug(memx->ppwr, "R[%06x] = 0x%08x\n", addr, data);
+ memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
+}
+
+void
+nouveau_memx_wait(struct nouveau_memx *memx,
+ u32 addr, u32 mask, u32 data, u32 nsec)
+{
+ nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
+ addr, mask, data, nsec);
+ memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
+{
+ nv_debug(memx->ppwr, " DELAY = %d ns\n", nsec);
+ memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
+ memx_out(memx); /* fuc can't handle multiple */
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
new file mode 100644
index 00000000000..52c85414866
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nv108.fuc.h"
+
+struct nv108_pwr_priv {
+ struct nouveau_pwr base;
+};
+
+static int
+nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv108_pwr_priv *priv;
+ int ret;
+
+ ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.code.data = nv108_pwr_code;
+ priv->base.code.size = sizeof(nv108_pwr_code);
+ priv->base.data.data = nv108_pwr_data;
+ priv->base.data.size = sizeof(nv108_pwr_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nv108_pwr_oclass = {
+ .handle = NV_SUBDEV(PWR, 0x00),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv108_pwr_ctor,
+ .dtor = _nouveau_pwr_dtor,
+ .init = _nouveau_pwr_init,
+ .fini = _nouveau_pwr_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
new file mode 100644
index 00000000000..c132b7ca974
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_pwr_priv {
+ struct nouveau_pwr base;
+};
+
+static int
+nva3_pwr_init(struct nouveau_object *object)
+{
+ struct nva3_pwr_priv *priv = (void *)object;
+ nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
+ return nouveau_pwr_init(&priv->base);
+}
+
+static int
+nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nva3_pwr_priv *priv;
+ int ret;
+
+ ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.code.data = nva3_pwr_code;
+ priv->base.code.size = sizeof(nva3_pwr_code);
+ priv->base.data.data = nva3_pwr_data;
+ priv->base.data.size = sizeof(nva3_pwr_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_pwr_oclass = {
+ .handle = NV_SUBDEV(PWR, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_pwr_ctor,
+ .dtor = _nouveau_pwr_dtor,
+ .init = nva3_pwr_init,
+ .fini = _nouveau_pwr_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
new file mode 100644
index 00000000000..495f6857428
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_pwr_priv {
+ struct nouveau_pwr base;
+};
+
+static int
+nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_pwr_priv *priv;
+ int ret;
+
+ ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.code.data = nvc0_pwr_code;
+ priv->base.code.size = sizeof(nvc0_pwr_code);
+ priv->base.data.data = nvc0_pwr_data;
+ priv->base.data.size = sizeof(nvc0_pwr_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_pwr_oclass = {
+ .handle = NV_SUBDEV(PWR, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_pwr_ctor,
+ .dtor = _nouveau_pwr_dtor,
+ .init = _nouveau_pwr_init,
+ .fini = _nouveau_pwr_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
new file mode 100644
index 00000000000..043aa142fe8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nvd0.fuc.h"
+
+struct nvd0_pwr_priv {
+ struct nouveau_pwr base;
+};
+
+static int
+nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_pwr_priv *priv;
+ int ret;
+
+ ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.code.data = nvd0_pwr_code;
+ priv->base.code.size = sizeof(nvd0_pwr_code);
+ priv->base.data.data = nvd0_pwr_data;
+ priv->base.data.size = sizeof(nvd0_pwr_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_pwr_oclass = {
+ .handle = NV_SUBDEV(PWR, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_pwr_ctor,
+ .dtor = _nouveau_pwr_dtor,
+ .init = _nouveau_pwr_init,
+ .fini = _nouveau_pwr_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f1de7a9c572..80e584a1bd1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -92,10 +92,11 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
struct nouveau_timer *ptimer = nouveau_timer(therm);
struct nouveau_therm_priv *priv = (void *)therm;
unsigned long flags;
- int duty;
+ bool immd = true;
+ bool poll = true;
+ int duty = -1;
spin_lock_irqsave(&priv->lock, flags);
- nv_debug(therm, "FAN speed check\n");
if (mode < 0)
mode = priv->mode;
priv->mode = mode;
@@ -106,28 +107,49 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
duty = nouveau_therm_fan_get(therm);
if (duty < 0)
duty = 100;
+ poll = false;
break;
case NOUVEAU_THERM_CTRL_AUTO:
- if (priv->fan->bios.nr_fan_trip)
+ if (priv->fan->bios.nr_fan_trip) {
duty = nouveau_therm_update_trip(therm);
- else
+ } else
+ if (priv->fan->bios.linear_min_temp ||
+ priv->fan->bios.linear_max_temp) {
duty = nouveau_therm_update_linear(therm);
+ } else {
+ if (priv->cstate)
+ duty = priv->cstate;
+ poll = false;
+ }
+ immd = false;
break;
case NOUVEAU_THERM_CTRL_NONE:
default:
ptimer->alarm_cancel(ptimer, &priv->alarm);
- goto done;
+ poll = false;
}
- nv_debug(therm, "FAN target request: %d%%\n", duty);
- nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
-
-done:
- if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+ if (list_empty(&priv->alarm.head) && poll)
ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
- else if (!list_empty(&priv->alarm.head))
- nv_debug(therm, "therm fan alarm list is not empty\n");
spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (duty >= 0) {
+ nv_debug(therm, "FAN target request: %d%%\n", duty);
+ nouveau_therm_fan_set(therm, immd, duty);
+ }
+}
+
+int
+nouveau_therm_cstate(struct nouveau_therm *ptherm, int fan, int dir)
+{
+ struct nouveau_therm_priv *priv = (void *)ptherm;
+ if (!dir || (dir < 0 && fan < priv->cstate) ||
+ (dir > 0 && fan > priv->cstate)) {
+ nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
+ priv->cstate = fan;
+ nouveau_therm_update(ptherm, -1);
+ }
+ return 0;
}
static void
@@ -149,14 +171,15 @@ nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
"automatic"
};
- /* The default PDAEMON ucode interferes with fan management */
+ /* The default PPWR ucode on fermi interferes with fan management */
if ((mode >= ARRAY_SIZE(name)) ||
- (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+ (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
+ !nouveau_subdev(device, NVDEV_SUBDEV_PWR)))
return -EINVAL;
/* do not allow automatic fan management if the thermal sensor is
* not available */
- if (priv->mode == 2 && therm->temp_get(therm) < 0)
+ if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
return -EINVAL;
if (priv->mode == mode)
@@ -335,7 +358,7 @@ nouveau_therm_preinit(struct nouveau_therm *therm)
nouveau_therm_ic_ctor(therm);
nouveau_therm_fan_ctor(therm);
- nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+ nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
nouveau_therm_sensor_preinit(therm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 39f47b950ad..95f6129eeed 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -185,8 +185,11 @@ nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
priv->fan->bios.max_duty = 100;
priv->fan->bios.bump_period = 500;
priv->fan->bios.slow_down_period = 2000;
+/*XXX: talk to mupuf */
+#if 0
priv->fan->bios.linear_min_temp = 40;
priv->fan->bios.linear_max_temp = 85;
+#endif
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
index e601773ee47..f69dab11f72 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -97,6 +97,13 @@ nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
{
struct nouveau_therm_priv *tpriv = (void *)therm;
struct nouveau_fantog_priv *priv;
+ int ret;
+
+ if (therm->pwm_ctrl) {
+ ret = therm->pwm_ctrl(therm, func->line, false);
+ if (ret)
+ return ret;
+ }
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
tpriv->fan = &priv->base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 8b3adec5fbb..e44ed7b93c6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
if (!client)
return false;
- if (!client->driver || client->driver->detect(client, info)) {
+ if (!client->dev.driver ||
+ to_i2c_driver(client->dev.driver)->detect(client, info)) {
i2c_unregister_device(client);
return false;
}
@@ -55,28 +56,28 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
return true;
}
-static struct i2c_board_info
+static struct nouveau_i2c_board_info
nv_board_infos[] = {
- { I2C_BOARD_INFO("w83l785ts", 0x2d) },
- { I2C_BOARD_INFO("w83781d", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2e) },
- { I2C_BOARD_INFO("adt7473", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2c) },
- { I2C_BOARD_INFO("f75375", 0x2e) },
- { I2C_BOARD_INFO("lm99", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x18) },
- { I2C_BOARD_INFO("adm1021", 0x19) },
- { I2C_BOARD_INFO("adm1021", 0x1a) },
- { I2C_BOARD_INFO("adm1021", 0x29) },
- { I2C_BOARD_INFO("adm1021", 0x2a) },
- { I2C_BOARD_INFO("adm1021", 0x2b) },
- { I2C_BOARD_INFO("adm1021", 0x4c) },
- { I2C_BOARD_INFO("adm1021", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x4e) },
- { I2C_BOARD_INFO("lm63", 0x18) },
- { I2C_BOARD_INFO("lm63", 0x4e) },
+ { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
+ { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
+ { { I2C_BOARD_INFO("adt7473", 0x2e) }, 20 },
+ { { I2C_BOARD_INFO("adt7473", 0x2d) }, 20 },
+ { { I2C_BOARD_INFO("adt7473", 0x2c) }, 20 },
+ { { I2C_BOARD_INFO("f75375", 0x2e) }, 0 },
+ { { I2C_BOARD_INFO("lm99", 0x4c) }, 0 },
+ { { I2C_BOARD_INFO("lm90", 0x4c) }, 0 },
+ { { I2C_BOARD_INFO("lm90", 0x4d) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x18) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x19) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x1a) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x29) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x2a) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x2b) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x4c) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x4d) }, 0 },
+ { { I2C_BOARD_INFO("adm1021", 0x4e) }, 0 },
+ { { I2C_BOARD_INFO("lm63", 0x18) }, 0 },
+ { { I2C_BOARD_INFO("lm63", 0x4e) }, 0 },
{ }
};
@@ -89,9 +90,9 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
struct nvbios_extdev_func extdev_entry;
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
- struct i2c_board_info board[] = {
- { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
- { }
+ struct nouveau_i2c_board_info board[] = {
+ { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
+ { }
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
@@ -101,9 +102,9 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
}
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
- struct i2c_board_info board[] = {
- { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
- { }
+ struct nouveau_i2c_board_info board[] = {
+ { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
+ { }
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
index 42ba633ccff..1d15c52fad0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -126,7 +126,7 @@ nv84_therm_intr(struct nouveau_subdev *subdev)
spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
- intr = nv_rd32(therm, 0x20100);
+ intr = nv_rd32(therm, 0x20100) & 0x3ff;
/* THRS_4: downclock */
if (intr & 0x002) {
@@ -209,6 +209,19 @@ nv84_therm_ctor(struct nouveau_object *parent,
return nouveau_therm_preinit(&priv->base.base);
}
+int
+nv84_therm_fini(struct nouveau_object *object, bool suspend)
+{
+ /* Disable PTherm IRQs */
+ nv_wr32(object, 0x20000, 0x00000000);
+
+ /* ACK all PTherm IRQs */
+ nv_wr32(object, 0x20100, 0xffffffff);
+ nv_wr32(object, 0x1100, 0x10000); /* PBUS */
+
+ return _nouveau_therm_fini(object, suspend);
+}
+
struct nouveau_oclass
nv84_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0x84),
@@ -216,6 +229,6 @@ nv84_therm_oclass = {
.ctor = nv84_therm_ctor,
.dtor = _nouveau_therm_dtor,
.init = _nouveau_therm_init,
- .fini = _nouveau_therm_fini,
+ .fini = nv84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index d11a7c40081..3b2c4580098 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -94,6 +94,6 @@ nva3_therm_oclass = {
.ctor = nva3_therm_ctor,
.dtor = _nouveau_therm_dtor,
.init = nva3_therm_init,
- .fini = _nouveau_therm_fini,
+ .fini = nv84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 54c28bdc420..4dd4f81ae87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -148,6 +148,6 @@ nvd0_therm_oclass = {
.ctor = nvd0_therm_ctor,
.dtor = _nouveau_therm_dtor,
.init = nvd0_therm_init,
- .fini = _nouveau_therm_fini,
+ .fini = nv84_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index dd38529262f..96f8f95693c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -76,6 +76,7 @@ struct nouveau_therm_priv {
spinlock_t lock;
struct nouveau_therm_trip_point *last_trip;
int mode;
+ int cstate;
int suspend;
/* bios */
@@ -144,6 +145,7 @@ int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
int nv50_fan_pwm_clock(struct nouveau_therm *);
int nv84_temp_get(struct nouveau_therm *therm);
+int nv84_therm_fini(struct nouveau_object *object, bool suspend);
int nva3_therm_fan_sense(struct nouveau_therm *);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b80a33011b9..cfde9eb44ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,8 +180,6 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
- nv_debug(therm, "polling the internal temperature\n");
-
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
NOUVEAU_THERM_THRS_FANBOOST);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 57711ecb566..c0bdd10358d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -119,16 +119,8 @@ nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
{
struct nv04_timer_priv *priv = (void *)ptimer;
unsigned long flags;
-
- /* avoid deleting an entry while the alarm intr is running */
spin_lock_irqsave(&priv->lock, flags);
-
- /* delete the alarm from the list */
- list_del(&alarm->head);
-
- /* reset the head so as list_empty returns 1 */
- INIT_LIST_HEAD(&alarm->head);
-
+ list_del_init(&alarm->head);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
new file mode 100644
index 00000000000..32794a99910
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/vmap.h>
+#include <subdev/bios/volt.h>
+
+static int
+nouveau_volt_get(struct nouveau_volt *volt)
+{
+ if (volt->vid_get) {
+ int ret = volt->vid_get(volt), i;
+ if (ret >= 0) {
+ for (i = 0; i < volt->vid_nr; i++) {
+ if (volt->vid[i].vid == ret)
+ return volt->vid[i].uv;
+ }
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+ return -ENODEV;
+}
+
+static int
+nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
+{
+ if (volt->vid_set) {
+ int i, ret = -EINVAL;
+ for (i = 0; i < volt->vid_nr; i++) {
+ if (volt->vid[i].uv == uv) {
+ ret = volt->vid_set(volt, volt->vid[i].vid);
+ nv_debug(volt, "set %duv: %d\n", uv, ret);
+ break;
+ }
+ }
+ return ret;
+ }
+ return -ENODEV;
+}
+
+static int
+nouveau_volt_map(struct nouveau_volt *volt, u8 id)
+{
+ struct nouveau_bios *bios = nouveau_bios(volt);
+ struct nvbios_vmap_entry info;
+ u8 ver, len;
+ u16 vmap;
+
+ vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+ if (vmap) {
+ if (info.link != 0xff) {
+ int ret = nouveau_volt_map(volt, info.link);
+ if (ret < 0)
+ return ret;
+ info.min += ret;
+ }
+ return info.min;
+ }
+
+ return id ? id * 10000 : -ENODEV;
+}
+
+static int
+nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+{
+ int ret = nouveau_volt_map(volt, id);
+ if (ret >= 0) {
+ int prev = nouveau_volt_get(volt);
+ if (!condition || prev < 0 ||
+ (condition < 0 && ret < prev) ||
+ (condition > 0 && ret > prev)) {
+ ret = nouveau_volt_set(volt, ret);
+ } else {
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+int
+_nouveau_volt_init(struct nouveau_object *object)
+{
+ struct nouveau_volt *volt = (void *)object;
+ int ret;
+
+ ret = nouveau_subdev_init(&volt->base);
+ if (ret)
+ return ret;
+
+ ret = volt->get(volt);
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ nv_debug(volt, "current voltage unknown\n");
+ return 0;
+ }
+
+ nv_info(volt, "GPU voltage: %duv\n", ret);
+ return 0;
+}
+
+void
+_nouveau_volt_dtor(struct nouveau_object *object)
+{
+ struct nouveau_volt *volt = (void *)object;
+ nouveau_subdev_destroy(&volt->base);
+}
+
+int
+nouveau_volt_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int length, void **pobject)
+{
+ struct nouveau_bios *bios = nouveau_bios(parent);
+ struct nouveau_volt *volt;
+ struct nvbios_volt_entry ivid;
+ struct nvbios_volt info;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ int ret, i;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
+ "voltage", length, pobject);
+ volt = *pobject;
+ if (ret)
+ return ret;
+
+ volt->get = nouveau_volt_get;
+ volt->set = nouveau_volt_set;
+ volt->set_id = nouveau_volt_set_id;
+
+ data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
+ if (data && info.vidmask && info.base && info.step) {
+ for (i = 0; i < info.vidmask + 1; i++) {
+ if (info.base >= info.min &&
+ info.base <= info.max) {
+ volt->vid[volt->vid_nr].uv = info.base;
+ volt->vid[volt->vid_nr].vid = i;
+ volt->vid_nr++;
+ }
+ info.base += info.step;
+ }
+ volt->vid_mask = info.vidmask;
+ } else
+ if (data && info.vidmask) {
+ for (i = 0; i < cnt; i++) {
+ data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
+ &ivid);
+ if (data) {
+ volt->vid[volt->vid_nr].uv = ivid.voltage;
+ volt->vid[volt->vid_nr].vid = ivid.vid;
+ volt->vid_nr++;
+ }
+ }
+ volt->vid_mask = info.vidmask;
+ }
+
+ if (volt->vid_nr) {
+ for (i = 0; i < volt->vid_nr; i++) {
+ nv_debug(volt, "VID %02x: %duv\n",
+ volt->vid[i].vid, volt->vid[i].uv);
+ }
+
+ /*XXX: this is an assumption.. there probably exists boards
+ * out there with i2c-connected voltage controllers too..
+ */
+ ret = nouveau_voltgpio_init(volt);
+ if (ret == 0) {
+ volt->vid_get = nouveau_voltgpio_get;
+ volt->vid_set = nouveau_voltgpio_set;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
new file mode 100644
index 00000000000..755fa91bcd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+#include <subdev/gpio.h>
+#include <subdev/bios/gpio.h>
+
+static const u8 tags[] = {
+ DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
+ DCB_GPIO_VID4, DCB_GPIO_VID5, DCB_GPIO_VID6, DCB_GPIO_VID7,
+};
+
+int
+nouveau_voltgpio_get(struct nouveau_volt *volt)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(volt);
+ u8 vid = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tags); i++) {
+ if (volt->vid_mask & (1 << i)) {
+ int ret = gpio->get(gpio, 0, tags[i], 0xff);
+ if (ret < 0)
+ return ret;
+ vid |= ret << i;
+ }
+ }
+
+ return vid;
+}
+
+int
+nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(volt);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
+ if (volt->vid_mask & (1 << i)) {
+ int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_voltgpio_init(struct nouveau_volt *volt)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(volt);
+ struct dcb_gpio_func func;
+ int i;
+
+ /* check we have gpio function info for each vid bit. on some
+ * boards (ie. nvs295) the vid mask has more bits than there
+ * are valid gpio functions... from traces, nvidia appear to
+ * just touch the existing ones, so let's mask off the invalid
+ * bits and continue with life
+ */
+ for (i = 0; i < ARRAY_SIZE(tags); i++) {
+ if (volt->vid_mask & (1 << i)) {
+ int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+ if (ret) {
+ if (ret != -ENOENT)
+ return ret;
+ nv_debug(volt, "VID bit %d has no GPIO\n", i);
+ volt->vid_mask &= ~(1 << i);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
new file mode 100644
index 00000000000..87d5358376a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+
+struct nv40_volt_priv {
+ struct nouveau_volt base;
+};
+
+static int
+nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv40_volt_priv *priv;
+ int ret;
+
+ ret = nouveau_volt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv40_volt_oclass = {
+ .handle = NV_SUBDEV(VOLT, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_volt_ctor,
+ .dtor = _nouveau_volt_dtor,
+ .init = _nouveau_volt_init,
+ .fini = _nouveau_volt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
index ea3f5b8a0f9..424a489d0f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Makefile
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -5,6 +5,7 @@ nouveau-y += dispnv04/dac.o
nouveau-y += dispnv04/dfp.o
nouveau-y += dispnv04/disp.o
nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/overlay.o
nouveau-y += dispnv04/tvmodesnv17.o
nouveau-y += dispnv04/tvnv04.o
nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462883e..2a15b98b4d2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.nvclk_khz = NVClk;
sim_data.bpp = bpp;
sim_data.two_heads = nv_two_heads(dev);
- if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
- (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
if (nv_device(drm->device)->card_type < NV_20)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
- else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
- (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11360f..0e3270c3ffd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->MiscOutReg = 0x23; /* +hsync +vsync */
}
- regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
-
/*
* Time Sequencer
*/
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff009..936a71c5908 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,10 +490,10 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
- dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
+ if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
+ dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
- nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+ nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
} else {
nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
@@ -625,13 +625,15 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, 2);
- struct i2c_board_info info[] = {
+ struct nouveau_i2c_board_info info[] = {
{
- .type = "sil164",
- .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
- .platform_data = &(struct sil164_encoder_params) {
- SIL164_INPUT_EDGE_RISING
- }
+ {
+ .type = "sil164",
+ .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
+ .platform_data = &(struct sil164_encoder_params) {
+ SIL164_INPUT_EDGE_RISING
+ }
+ }, 0
},
{ }
};
@@ -646,7 +648,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
return;
drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &port->adapter, &info[type]);
+ &port->adapter, &info[type].dev);
}
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4908d3fd048..b13ff0fc42d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -140,6 +140,8 @@ nv04_display_create(struct drm_device *dev)
func->save(encoder);
}
+ nouveau_overlay_init(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187f0a7..56a28db0400 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -123,11 +123,14 @@ int nv04_tv_create(struct drm_connector *, struct dcb_output *);
/* nv17_tv.c */
int nv17_tv_create(struct drm_connector *, struct dcb_output *);
+/* overlay.c */
+void nouveau_overlay_init(struct drm_device *dev);
+
static inline bool
nv_two_heads(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pci_device & 0x0ff0;
+ const int impl = dev->pdev->device & 0x0ff0;
if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +142,14 @@ nv_two_heads(struct drm_device *dev)
static inline bool
nv_gf4_disp_arch(struct drm_device *dev)
{
- return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+ return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
}
static inline bool
nv_two_reg_pll(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pci_device & 0x0ff0;
+ const int impl = dev->pdev->device & 0x0ff0;
if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b8620..aca76af115b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,6 +27,7 @@
#include "hw.h"
#include <subdev/bios/pll.h>
+#include <subdev/fb.h>
#include <subdev/clock.h>
#include <subdev/timer.h>
@@ -220,7 +221,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
int ret;
if (plltype == PLL_MEMORY &&
- (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+ (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +231,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
return 400000 / mpllP;
} else
if (plltype == PLL_MEMORY &&
- (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+ (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
@@ -664,6 +665,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_timer *ptimer = nouveau_timer(device);
+ struct nouveau_fb *pfb = nouveau_fb(device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
@@ -680,10 +682,10 @@ nv_load_state_ext(struct drm_device *dev, int head,
nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
- nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
- nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
- nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
- nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
+ nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
+ nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
+ nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
+ nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -740,7 +742,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
/* NV11 and NV20 stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
- if (nv_device(drm->device)->card_type == NV_10) {
+ if (nv_device(drm->device)->card_type < NV_20) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
new file mode 100644
index 00000000000..3618ac6b631
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Implementation based on the pre-KMS implementation in xf86-video-nouveau,
+ * written by Arthur Huillet.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+
+#include "nouveau_drm.h"
+
+#include "nouveau_bo.h"
+#include "nouveau_connector.h"
+#include "nouveau_display.h"
+#include "nvreg.h"
+
+
+struct nouveau_plane {
+ struct drm_plane base;
+ bool flip;
+ struct nouveau_bo *cur;
+
+ struct {
+ struct drm_property *colorkey;
+ struct drm_property *contrast;
+ struct drm_property *brightness;
+ struct drm_property *hue;
+ struct drm_property *saturation;
+ struct drm_property *iturbt_709;
+ } props;
+
+ int colorkey;
+ int contrast;
+ int brightness;
+ int hue;
+ int saturation;
+ int iturbt_709;
+};
+
+static uint32_t formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_UYVY,
+};
+
+/* Sine can be approximated with
+ * http://en.wikipedia.org/wiki/Bhaskara_I's_sine_approximation_formula
+ * sin(x degrees) ~= 4 x (180 - x) / (40500 - x (180 - x) )
+ * Note that this only works for the range [0, 180].
+ * Also note that sin(x) == -sin(x - 180)
+ */
+static inline int
+sin_mul(int degrees, int factor)
+{
+ if (degrees > 180) {
+ degrees -= 180;
+ factor *= -1;
+ }
+ return factor * 4 * degrees * (180 - degrees) /
+ (40500 - degrees * (180 - degrees));
+}
+
+/* cos(x) = sin(x + 90) */
+static inline int
+cos_mul(int degrees, int factor)
+{
+ return sin_mul((degrees + 90) % 360, factor);
+}
+
+static int
+nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_bo *cur = nv_plane->cur;
+ bool flip = nv_plane->flip;
+ int format = ALIGN(src_w * 4, 0x100);
+ int soff = NV_PCRTC0_SIZE * nv_crtc->index;
+ int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
+ int ret;
+
+ if (format > 0xffff)
+ return -EINVAL;
+
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ nv_plane->cur = nv_fb->nvbo;
+
+ /* Source parameters given in 16.16 fixed point, ignore fractional. */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
+ nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
+
+ nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
+ nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+ nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
+ nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
+ nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
+ nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
+ nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
+ nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
+
+ if (fb->pixel_format == DRM_FORMAT_NV12) {
+ format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+ format |= NV_PVIDEO_FORMAT_PLANAR;
+ }
+ if (nv_plane->iturbt_709)
+ format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
+ if (nv_plane->colorkey & (1 << 24))
+ format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
+
+ if (fb->pixel_format == DRM_FORMAT_NV12) {
+ nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
+ nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
+ nv_fb->nvbo->bo.offset + fb->offsets[1]);
+ }
+ nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+ nv_wr32(dev, NV_PVIDEO_STOP, 0);
+ /* TODO: wait for vblank? */
+ nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
+ nv_plane->flip = !flip;
+
+ if (cur)
+ nouveau_bo_unpin(cur);
+
+ return 0;
+}
+
+static int
+nv10_disable_plane(struct drm_plane *plane)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+ nv_wr32(dev, NV_PVIDEO_STOP, 1);
+ if (nv_plane->cur) {
+ nouveau_bo_unpin(nv_plane->cur);
+ nv_plane->cur = NULL;
+ }
+
+ return 0;
+}
+
+static void
+nv10_destroy_plane(struct drm_plane *plane)
+{
+ nv10_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
+
+static void
+nv10_set_params(struct nouveau_plane *plane)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->base.dev);
+ u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
+ u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
+ (cos_mul(plane->hue, plane->saturation) & 0xffff);
+ u32 format = 0;
+
+ nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
+ nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
+ nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
+ nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
+ nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
+
+ if (plane->cur) {
+ if (plane->iturbt_709)
+ format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
+ if (plane->colorkey & (1 << 24))
+ format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
+ nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
+ NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
+ NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
+ format);
+ }
+}
+
+static int
+nv10_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+ if (property == nv_plane->props.colorkey)
+ nv_plane->colorkey = value;
+ else if (property == nv_plane->props.contrast)
+ nv_plane->contrast = value;
+ else if (property == nv_plane->props.brightness)
+ nv_plane->brightness = value;
+ else if (property == nv_plane->props.hue)
+ nv_plane->hue = value;
+ else if (property == nv_plane->props.saturation)
+ nv_plane->saturation = value;
+ else if (property == nv_plane->props.iturbt_709)
+ nv_plane->iturbt_709 = value;
+ else
+ return -EINVAL;
+
+ nv10_set_params(nv_plane);
+ return 0;
+}
+
+static const struct drm_plane_funcs nv10_plane_funcs = {
+ .update_plane = nv10_update_plane,
+ .disable_plane = nv10_disable_plane,
+ .set_property = nv10_set_property,
+ .destroy = nv10_destroy_plane,
+};
+
+static void
+nv10_overlay_init(struct drm_device *device)
+{
+ struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+ int ret;
+
+ if (!plane)
+ return;
+
+ ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
+ &nv10_plane_funcs,
+ formats, ARRAY_SIZE(formats), false);
+ if (ret)
+ goto err;
+
+ /* Set up the plane properties */
+ plane->props.colorkey = drm_property_create_range(
+ device, 0, "colorkey", 0, 0x01ffffff);
+ plane->props.contrast = drm_property_create_range(
+ device, 0, "contrast", 0, 8192 - 1);
+ plane->props.brightness = drm_property_create_range(
+ device, 0, "brightness", 0, 1024);
+ plane->props.hue = drm_property_create_range(
+ device, 0, "hue", 0, 359);
+ plane->props.saturation = drm_property_create_range(
+ device, 0, "saturation", 0, 8192 - 1);
+ plane->props.iturbt_709 = drm_property_create_range(
+ device, 0, "iturbt_709", 0, 1);
+ if (!plane->props.colorkey ||
+ !plane->props.contrast ||
+ !plane->props.brightness ||
+ !plane->props.hue ||
+ !plane->props.saturation ||
+ !plane->props.iturbt_709)
+ goto cleanup;
+
+ plane->colorkey = 0;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.colorkey, plane->colorkey);
+
+ plane->contrast = 0x1000;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.contrast, plane->contrast);
+
+ plane->brightness = 512;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.brightness, plane->brightness);
+
+ plane->hue = 0;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.hue, plane->hue);
+
+ plane->saturation = 0x1000;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.saturation, plane->saturation);
+
+ plane->iturbt_709 = 0;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.iturbt_709, plane->iturbt_709);
+
+ nv10_set_params(plane);
+ nv_wr32(dev, NV_PVIDEO_STOP, 1);
+ return;
+cleanup:
+ drm_plane_cleanup(&plane->base);
+err:
+ kfree(plane);
+ nv_error(dev, "Failed to create plane\n");
+}
+
+void
+nouveau_overlay_init(struct drm_device *device)
+{
+ struct nouveau_device *dev = nouveau_dev(device);
+ if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+ nv10_overlay_init(device);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index bf13db4e863..cc4b208ce54 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -37,15 +37,18 @@
#include <subdev/i2c.h>
-static struct i2c_board_info nv04_tv_encoder_info[] = {
+static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
{
- I2C_BOARD_INFO("ch7006", 0x75),
- .platform_data = &(struct ch7006_encoder_params) {
- CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
- 0, 0, 0,
- CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
- CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
- }
+ {
+ I2C_BOARD_INFO("ch7006", 0x75),
+ .platform_data = &(struct ch7006_encoder_params) {
+ CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+ 0, 0, 0,
+ CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+ CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+ }
+ },
+ 0
},
{ }
};
@@ -229,7 +232,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
/* Run the slave-specific initialization */
ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &port->adapter, &nv04_tv_encoder_info[type]);
+ &port->adapter,
+ &nv04_tv_encoder_info[type].dev);
if (ret < 0)
goto fail_cleanup;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7bfd1..6828d81ed7b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,6 +87,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
case NV_04:
return 0x006e;
case NV_10:
+ case NV_11:
case NV_20:
case NV_30:
case NV_40:
@@ -130,7 +131,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+ drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
}
if (chan->heap.block_size)
@@ -178,10 +179,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- getparam->value = dev->pci_vendor;
+ getparam->value = dev->pdev->vendor;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- getparam->value = dev->pci_device;
+ getparam->value = dev->pdev->device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_pci_device_is_agp(dev))
@@ -297,7 +298,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- if (device->card_type < NV_C0) {
+ if (device->card_type < NV_10) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
init->subchan[1].handle = NvSw;
@@ -320,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
}
- ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
+ ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
&init->notifier_handle);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dd7d2e18271..95c74045404 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, nvidia_handle;
- acpi_status status;
+ acpi_handle dhandle;
int retval = 0;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM"))
return false;
- }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -317,6 +314,16 @@ static bool nouveau_dsm_detect(void)
has_optimus = 1;
}
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
+ vga_count++;
+
+ retval = nouveau_dsm_pci_probe(pdev);
+ if (retval & NOUVEAU_DSM_HAS_MUX)
+ has_dsm |= 1;
+ if (retval & NOUVEAU_DSM_HAS_OPT)
+ has_optimus = 1;
+ }
+
/* find the optimus DSM or the old v1 DSM */
if (has_optimus == 1) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
@@ -407,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
return false;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -441,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 6e7a55f93a8..2953c4e91e1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -11,10 +11,28 @@ MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
static int nouveau_agpmode = -1;
module_param_named(agpmode, nouveau_agpmode, int, 0400);
+struct nouveau_agpmode_quirk {
+ u16 hostbridge_vendor;
+ u16 hostbridge_device;
+ u16 chip_vendor;
+ u16 chip_device;
+ int mode;
+};
+
+static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
+ /* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
+ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+
+ {},
+};
+
static unsigned long
-get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
+get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
{
struct nouveau_device *device = nv_device(drm->device);
+ struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
+ int agpmode = nouveau_agpmode;
+ unsigned long mode = info->mode;
/*
* FW seems to be broken on nv18, it makes the card lock up
@@ -24,11 +42,27 @@ get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
mode &= ~PCI_AGP_COMMAND_FW;
/*
+ * Go through the quirks list and adjust the agpmode accordingly.
+ */
+ while (agpmode == -1 && quirk->hostbridge_vendor) {
+ if (info->id_vendor == quirk->hostbridge_vendor &&
+ info->id_device == quirk->hostbridge_device &&
+ device->pdev->vendor == quirk->chip_vendor &&
+ device->pdev->device == quirk->chip_device) {
+ agpmode = quirk->mode;
+ nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
+ agpmode);
+ break;
+ }
+ ++quirk;
+ }
+
+ /*
* AGP mode set in the command line.
*/
- if (nouveau_agpmode > 0) {
+ if (agpmode > 0) {
bool agpv3 = mode & 0x8;
- int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+ int rate = agpv3 ? agpmode / 4 : agpmode;
mode = (mode & ~0x7) | (rate & 0x7);
}
@@ -90,7 +124,7 @@ nouveau_agp_reset(struct nouveau_drm *drm)
if (ret)
return;
- mode.mode = get_agp_mode(drm, info.mode);
+ mode.mode = get_agp_mode(drm, &info);
mode.mode &= ~PCI_AGP_COMMAND_FW;
ret = drm_agp_enable(dev, mode);
@@ -139,7 +173,7 @@ nouveau_agp_init(struct nouveau_drm *drm)
}
/* see agp.h for the AGPSTAT_* modes available */
- mode.mode = get_agp_mode(drm, info.mode);
+ mode.mode = get_agp_mode(drm, &info);
ret = drm_agp_enable(dev, mode);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad2176b7..630f6e84fc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@ nv40_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 31;
- bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
+ bd = backlight_device_register("nv_backlight", connector->kdev, drm,
&nv40_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -204,7 +204,7 @@ nv50_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 100;
- bd = backlight_device_register("nv_backlight", &connector->kdev,
+ bd = backlight_device_register("nv_backlight", connector->kdev,
nv_encoder, ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e7287675ec..4c3feaaa103 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
#ifdef __powerpc__
/* Powerbook specific quirks */
if (script == LVDS_RESET &&
- (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329))
+ (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
+ dev->pdev->device == 0x0329))
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d0627..c0fde6b9393 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -98,12 +98,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
if (tile) {
spin_lock(&drm->tile.lock);
- if (fence) {
- /* Mark it as pending. */
- tile->fence = fence;
- nouveau_fence_ref(fence);
- }
-
+ tile->fence = nouveau_fence_ref(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -146,7 +141,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem))
+ if (unlikely(nvbo->gem.filp))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -269,7 +264,8 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
struct nouveau_fb *pfb = nouveau_fb(drm->device);
u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
- if (nv_device(drm->device)->card_type == NV_10 &&
+ if ((nv_device(drm->device)->card_type == NV_10 ||
+ nv_device(drm->device)->card_type == NV_11) &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -982,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_channel *chan = chan = drm->ttm.chan;
+ struct nouveau_channel *chan = drm->ttm.chan;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
@@ -1267,7 +1263,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
+ return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
}
static int
@@ -1461,14 +1457,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{
+ struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
struct nouveau_fence *old_fence = NULL;
- if (likely(fence))
- nouveau_fence_ref(fence);
-
spin_lock(&nvbo->bo.bdev->fence_lock);
old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
+ nvbo->bo.sync_obj = new_fence;
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_fence_unref(&old_fence);
@@ -1551,7 +1545,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (node->sg)
nouveau_vm_map_sg_table(vma, 0, size, node);
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbbd4fa..ff17c1f432f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@ struct nouveau_bo {
u32 tile_flags;
struct nouveau_drm_tile *tile;
- struct drm_gem_object *gem;
+ /* Only valid if allocated via nouveau_gem_new() and iff you hold a
+ * gem reference to it! For debugging, use gem.filp != NULL to test
+ * whether it is valid. */
+ struct drm_gem_object gem;
/* protect by the ttm reservation lock */
int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e84f4c32331..cc5152be2cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -346,22 +346,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING(chan, 0x00000000);
- /* allocate software object class (used for fences on <= nv05, and
- * to signal flip completion), bind it to a subchannel.
- */
- if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
+ /* allocate software object class (used for fences on <= nv05) */
+ if (device->card_type < NV_10) {
ret = nouveau_object_new(nv_object(client), chan->handle,
- NvSw, nouveau_abi16_swclass(chan->drm),
- NULL, 0, &object);
+ NvSw, 0x006e, NULL, 0, &object);
if (ret)
return ret;
swch = (void *)object->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
- }
- if (device->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9e9a1..1674882d60d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -100,6 +100,7 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ nouveau_event_ref(NULL, &nv_connector->hpd_func);
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -214,9 +215,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
} else {
connector->doublescan_allowed = true;
if (nv_device(drm->device)->card_type == NV_20 ||
- (nv_device(drm->device)->card_type == NV_10 &&
- (dev->pci_device & 0x0ff0) != 0x0100 &&
- (dev->pci_device & 0x0ff0) != 0x0150))
+ ((nv_device(drm->device)->card_type == NV_10 ||
+ nv_device(drm->device)->card_type == NV_11) &&
+ (dev->pdev->device & 0x0ff0) != 0x0100 &&
+ (dev->pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
@@ -932,10 +934,9 @@ nouveau_connector_hotplug_work(struct work_struct *work)
}
static int
-nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+nouveau_connector_hotplug(void *data, int index)
{
- struct nouveau_connector *nv_connector =
- container_of(event, struct nouveau_connector, hpd_func);
+ struct nouveau_connector *nv_connector = data;
schedule_work(&nv_connector->hpd_work);
return NVKM_EVENT_KEEP;
}
@@ -1007,10 +1008,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
DCB_GPIO_UNUSED, &nv_connector->hpd);
- nv_connector->hpd_func.func = nouveau_connector_hotplug;
if (ret)
nv_connector->hpd.func = DCB_GPIO_UNUSED;
+ if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_new(gpio->events, nv_connector->hpd.line,
+ nouveau_connector_hotplug,
+ nv_connector,
+ &nv_connector->hpd_func);
+ }
+
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
DRM_MODE_CONNECTOR_Unknown) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 6e399aad491..264a778f473 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -69,7 +69,7 @@ struct nouveau_connector {
struct dcb_gpio_func hpd;
struct work_struct hpd_work;
- struct nouveau_eventh hpd_func;
+ struct nouveau_eventh *hpd_func;
int dithering_mode;
int dithering_depth;
@@ -107,7 +107,4 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
-int
-nouveau_connector_bpp(struct drm_connector *);
-
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590f556..7809d92183c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,7 +26,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "nouveau_fbcon.h"
#include "dispnv04/hw.h"
@@ -38,19 +37,92 @@
#include "nouveau_fence.h"
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
#include <engine/disp.h>
#include <core/class.h>
+static int
+nouveau_display_vblank_handler(void *data, int head)
+{
+ struct nouveau_drm *drm = data;
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
+}
+
+int
+nouveau_display_vblank_enable(struct drm_device *dev, int head)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ if (disp) {
+ nouveau_event_get(disp->vblank[head]);
+ return 0;
+ }
+ return -EIO;
+}
+
+void
+nouveau_display_vblank_disable(struct drm_device *dev, int head)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ if (disp)
+ nouveau_event_put(disp->vblank[head]);
+}
+
+static void
+nouveau_display_vblank_fini(struct drm_device *dev)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ int i;
+
+ if (disp->vblank) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++)
+ nouveau_event_ref(NULL, &disp->vblank[i]);
+ kfree(disp->vblank);
+ disp->vblank = NULL;
+ }
+
+ drm_vblank_cleanup(dev);
+}
+
+static int
+nouveau_display_vblank_init(struct drm_device *dev)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ int ret, i;
+
+ disp->vblank = kzalloc(dev->mode_config.num_crtc *
+ sizeof(*disp->vblank), GFP_KERNEL);
+ if (!disp->vblank)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ ret = nouveau_event_new(pdisp->vblank, i,
+ nouveau_display_vblank_handler,
+ drm, &disp->vblank[i]);
+ if (ret) {
+ nouveau_display_vblank_fini(dev);
+ return ret;
+ }
+ }
+
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ nouveau_display_vblank_fini(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -63,7 +135,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+ return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -227,9 +299,7 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
int
nouveau_display_init(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct drm_connector *connector;
int ret;
@@ -243,10 +313,7 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
- nouveau_event_get(gpio->events, conn->hpd.line,
- &conn->hpd_func);
- }
+ if (conn->hpd_func) nouveau_event_get(conn->hpd_func);
}
return ret;
@@ -255,18 +322,13 @@ nouveau_display_init(struct drm_device *dev)
void
nouveau_display_fini(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct drm_connector *connector;
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
- nouveau_event_put(gpio->events, conn->hpd.line,
- &conn->hpd_func);
- }
+ if (conn->hpd_func) nouveau_event_put(conn->hpd_func);
}
drm_kms_helper_poll_disable(dev);
@@ -336,6 +398,11 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ if (nv_device(drm->device)->chipset < 0x11)
+ dev->mode_config.async_page_flip = false;
+ else
+ dev->mode_config.async_page_flip = true;
+
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
@@ -352,7 +419,7 @@ nouveau_display_create(struct drm_device *dev)
goto disp_create_err;
if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ ret = nouveau_display_vblank_init(dev);
if (ret)
goto vblank_err;
}
@@ -374,7 +441,7 @@ nouveau_display_destroy(struct drm_device *dev)
struct nouveau_display *disp = nouveau_display(dev);
nouveau_backlight_exit(dev);
- drm_vblank_cleanup(dev);
+ nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -394,7 +461,7 @@ nouveau_display_suspend(struct drm_device *dev)
nouveau_display_fini(dev);
- NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
+ NV_INFO(drm, "unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -492,19 +559,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
goto fail;
/* Emit the pageflip */
- ret = RING_SPACE(chan, 3);
+ ret = RING_SPACE(chan, 2);
if (ret)
goto fail;
- if (nv_device(drm->device)->card_type < NV_C0) {
+ if (nv_device(drm->device)->card_type < NV_C0)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING (chan, 0x00000000);
- OUT_RING (chan, 0x00000000);
- } else {
- BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, 0);
- BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
- }
+ else
+ BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
ret = nouveau_fence_new(chan, false, pfence);
@@ -521,22 +584,16 @@ fail:
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+ struct drm_pending_vblank_event *event, u32 flags)
{
+ const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan = NULL;
+ struct nouveau_channel *chan = drm->channel;
struct nouveau_fence *fence;
- struct ttm_validate_buffer resv[2] = {
- { .bo = &old_bo->bo },
- { .bo = &new_bo->bo },
- };
- struct ww_acquire_ctx ticket;
- LIST_HEAD(res);
int ret;
if (!drm->channel)
@@ -546,26 +603,22 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (!s)
return -ENOMEM;
- /* Choose the channel the flip will be handled in */
- spin_lock(&old_bo->bo.bdev->fence_lock);
- fence = new_bo->bo.sync_obj;
- if (fence)
- chan = fence->channel;
- if (!chan)
- chan = drm->channel;
- spin_unlock(&old_bo->bo.bdev->fence_lock);
+ /* synchronise rendering channel with the kernel's channel */
+ spin_lock(&new_bo->bo.bdev->fence_lock);
+ fence = nouveau_fence_ref(new_bo->bo.sync_obj);
+ spin_unlock(&new_bo->bo.bdev->fence_lock);
+ ret = nouveau_fence_sync(fence, chan);
+ if (ret)
+ return ret;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
if (ret)
goto fail_free;
-
- list_add(&resv[1].head, &res);
}
- list_add(&resv[0].head, &res);
mutex_lock(&chan->cli->mutex);
- ret = ttm_eu_reserve_buffers(&ticket, &res);
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
@@ -577,12 +630,29 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
- ret = nv50_display_flip_next(crtc, fb, chan, 0);
+ ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
} else {
struct nv04_display *dispnv04 = nv04_display(dev);
- nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
+ int head = nouveau_crtc(crtc)->index;
+
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ goto fail_unreserve;
+
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
+ }
+
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -593,14 +663,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Update the crtc struct and cleanup */
crtc->fb = fb;
- ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
- ttm_eu_backoff_reservation(&ticket, &res);
+ ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&chan->cli->mutex);
if (old_bo != new_bo)
@@ -674,8 +745,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
- drm_gem_object_unreference_unlocked(bo->gem);
+ ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
}
@@ -688,7 +759,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
- struct nouveau_bo *bo = gem->driver_private;
+ struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 025c66f8e0e..8bc8bab90e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,8 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
+ struct nouveau_eventh **vblank;
+
struct drm_property *dithering_mode;
struct drm_property *dithering_depth;
struct drm_property *underscan_property;
@@ -59,6 +61,8 @@ void nouveau_display_fini(struct drm_device *dev);
int nouveau_display_suspend(struct drm_device *dev);
void nouveau_display_repin(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
+int nouveau_display_vblank_enable(struct drm_device *, int);
+void nouveau_display_vblank_disable(struct drm_device *, int);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 690d5930ce3..984004d66a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -51,9 +51,11 @@ enum {
NvSubCtxSurf2D = 0,
NvSubSw = 1,
NvSubImageBlit = 2,
- NvSub2D = 3,
NvSubGdiRect = 3,
- NvSubCopy = 4,
+
+ NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
+ NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
+ FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
};
/* Object handles. */
@@ -194,7 +196,6 @@ WIND_RING(struct nouveau_channel *chan)
#define NV84_SUBCHAN_UEVENT 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
-#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c536240..7a3759f1c41 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,6 +37,7 @@
#include <engine/device.h>
#include <engine/disp.h>
#include <engine/fifo.h>
+#include <engine/software.h>
#include <subdev/vm.h>
@@ -46,7 +47,8 @@
#include "nouveau_gem.h"
#include "nouveau_agp.h"
#include "nouveau_vga.h"
-#include "nouveau_pm.h"
+#include "nouveau_sysfs.h"
+#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
#include "nouveau_ioctl.h"
@@ -78,41 +80,6 @@ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
static struct drm_driver driver;
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
- struct nouveau_drm *drm =
- container_of(event, struct nouveau_drm, vblank[head]);
- drm_handle_vblank(drm->dev, head);
- return NVKM_EVENT_KEEP;
-}
-
-static int
-nouveau_drm_vblank_enable(struct drm_device *dev, int head)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-
- if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
- return -EIO;
- WARN_ON_ONCE(drm->vblank[head].func);
- drm->vblank[head].func = nouveau_drm_vblank_handler;
- nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
- return 0;
-}
-
-static void
-nouveau_drm_vblank_disable(struct drm_device *dev, int head)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- if (drm->vblank[head].func)
- nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
- else
- WARN_ON_ONCE(1);
- drm->vblank[head].func = NULL;
-}
-
static u64
nouveau_name(struct pci_dev *pdev)
{
@@ -177,7 +144,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->chipset < 0x17) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_11 ||
+ device->chipset < 0x17) ret = nv10_fence_create(drm);
else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
@@ -224,6 +192,32 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
+ ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
+ nouveau_abi16_swclass(drm), NULL, 0, &object);
+ if (ret == 0) {
+ struct nouveau_software_chan *swch = (void *)object->parent;
+ ret = RING_SPACE(drm->channel, 2);
+ if (ret == 0) {
+ if (device->card_type < NV_C0) {
+ BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
+ OUT_RING (drm->channel, NVDRM_NVSW);
+ } else
+ if (device->card_type < NV_E0) {
+ BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
+ OUT_RING (drm->channel, 0x001f0000);
+ }
+ }
+ swch = (void *)object->parent;
+ swch->flip = nouveau_flip_complete;
+ swch->flip_data = drm->channel;
+ }
+
+ if (ret) {
+ NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
+ nouveau_accel_fini(drm);
+ return;
+ }
+
if (device->card_type < NV_C0) {
ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
&drm->notify);
@@ -418,8 +412,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
goto fail_dispinit;
}
- nouveau_pm_init(dev);
-
+ nouveau_sysfs_init(dev);
+ nouveau_hwmon_init(dev);
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
@@ -455,8 +449,8 @@ nouveau_drm_unload(struct drm_device *dev)
pm_runtime_get_sync(dev->dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
-
- nouveau_pm_fini(dev);
+ nouveau_hwmon_fini(dev);
+ nouveau_sysfs_fini(dev);
if (dev->mode_config.num_crtc)
nouveau_display_fini(dev);
@@ -496,16 +490,16 @@ nouveau_do_suspend(struct drm_device *dev)
int ret;
if (dev->mode_config.num_crtc) {
- NV_SUSPEND(drm, "suspending display...\n");
+ NV_INFO(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev);
if (ret)
return ret;
}
- NV_SUSPEND(drm, "evicting buffers...\n");
+ NV_INFO(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
- NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
+ NV_INFO(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
@@ -518,7 +512,7 @@ nouveau_do_suspend(struct drm_device *dev)
return ret;
}
- NV_SUSPEND(drm, "suspending client object trees...\n");
+ NV_INFO(drm, "suspending client object trees...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm))
return -ENOMEM;
@@ -530,7 +524,7 @@ nouveau_do_suspend(struct drm_device *dev)
goto fail_client;
}
- NV_SUSPEND(drm, "suspending kernel object tree...\n");
+ NV_INFO(drm, "suspending kernel object tree...\n");
ret = nouveau_client_fini(&drm->client.base, true);
if (ret)
goto fail_client;
@@ -544,7 +538,7 @@ fail_client:
}
if (dev->mode_config.num_crtc) {
- NV_SUSPEND(drm, "resuming display...\n");
+ NV_INFO(drm, "resuming display...\n");
nouveau_display_resume(dev);
}
return ret;
@@ -563,7 +557,6 @@ int nouveau_pmops_suspend(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 1);
- nv_suspend_set_printk_level(NV_DBG_INFO);
ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
@@ -571,8 +564,6 @@ int nouveau_pmops_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
- nv_suspend_set_printk_level(NV_DBG_DEBUG);
-
return 0;
}
@@ -582,15 +573,15 @@ nouveau_do_resume(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- NV_SUSPEND(drm, "re-enabling device...\n");
+ NV_INFO(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
- NV_SUSPEND(drm, "resuming kernel object tree...\n");
+ NV_INFO(drm, "resuming kernel object tree...\n");
nouveau_client_init(&drm->client.base);
nouveau_agp_init(drm);
- NV_SUSPEND(drm, "resuming client object trees...\n");
+ NV_INFO(drm, "resuming client object trees...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
@@ -599,10 +590,9 @@ nouveau_do_resume(struct drm_device *dev)
}
nouveau_run_vbios_init(dev);
- nouveau_pm_resume(dev);
if (dev->mode_config.num_crtc) {
- NV_SUSPEND(drm, "resuming display...\n");
+ NV_INFO(drm, "resuming display...\n");
nouveau_display_repin(dev);
}
@@ -626,19 +616,15 @@ int nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- nv_suspend_set_printk_level(NV_DBG_INFO);
ret = nouveau_do_resume(drm_dev);
- if (ret) {
- nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ if (ret)
return ret;
- }
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
if (drm_dev->mode_config.num_crtc)
nouveau_display_resume(drm_dev);
- nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -648,12 +634,10 @@ static int nouveau_pmops_freeze(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- nv_suspend_set_printk_level(NV_DBG_INFO);
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 1);
ret = nouveau_do_suspend(drm_dev);
- nv_suspend_set_printk_level(NV_DBG_DEBUG);
return ret;
}
@@ -663,18 +647,14 @@ static int nouveau_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- nv_suspend_set_printk_level(NV_DBG_INFO);
ret = nouveau_do_resume(drm_dev);
- if (ret) {
- nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ if (ret)
return ret;
- }
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
if (drm_dev->mode_config.num_crtc)
nouveau_display_resume(drm_dev);
- nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -816,8 +796,8 @@ driver = {
#endif
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = nouveau_drm_vblank_enable,
- .disable_vblank = nouveau_drm_vblank_disable,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -834,7 +814,6 @@ driver = {
.gem_prime_vmap = nouveau_gem_prime_vmap,
.gem_prime_vunmap = nouveau_gem_prime_vunmap,
- .gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
@@ -879,6 +858,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
if (nouveau_runtime_pm == 0)
return -EINVAL;
+ nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
nouveau_switcheroo_optimus_dsm();
@@ -915,6 +895,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
nv_mask(device, 0x88488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ nv_debug_level(NORMAL);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 994fd6ec373..4b0fb6c66be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -51,10 +51,12 @@ struct nouveau_drm_tile {
};
enum nouveau_drm_handle {
- NVDRM_CLIENT = 0xffffffff,
- NVDRM_DEVICE = 0xdddddddd,
- NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
- NVDRM_CHAN = 0xcccc0000, /* |= client chid */
+ NVDRM_CLIENT = 0xffffffff,
+ NVDRM_DEVICE = 0xdddddddd,
+ NVDRM_CONTROL = 0xdddddddc,
+ NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
+ NVDRM_CHAN = 0xcccc0000, /* |= client chid */
+ NVDRM_NVSW = 0x55550000,
};
struct nouveau_cli {
@@ -127,10 +129,10 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
- struct nouveau_eventh vblank[4];
/* power management */
- struct nouveau_pm *pm;
+ struct nouveau_hwmon *hwmon;
+ struct nouveau_sysfs *sysfs;
/* display power reference */
bool have_disp_power_ref;
@@ -154,7 +156,6 @@ nouveau_dev(struct drm_device *dev)
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
-#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf65c16..7903e0ed3c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&fbcon->helper);
@@ -503,34 +503,45 @@ nouveau_fbcon_fini(struct drm_device *dev)
drm->fbcon = NULL;
}
-void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+void
+nouveau_fbcon_save_disable_accel(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
-
- drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
- drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ if (drm->fbcon) {
+ drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+ drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ }
}
-void nouveau_fbcon_restore_accel(struct drm_device *dev)
+void
+nouveau_fbcon_restore_accel(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+ if (drm->fbcon) {
+ drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+ }
}
-void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- console_lock();
- if (state == 0)
- nouveau_fbcon_save_disable_accel(dev);
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 1)
- nouveau_fbcon_restore_accel(dev);
- console_unlock();
+ if (drm->fbcon) {
+ console_lock();
+ if (state == 0)
+ nouveau_fbcon_save_disable_accel(dev);
+ fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ if (state == 1)
+ nouveau_fbcon_restore_accel(dev);
+ console_unlock();
+ }
}
-void nouveau_fbcon_zfill_all(struct drm_device *dev)
+void
+nouveau_fbcon_zfill_all(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- nouveau_fbcon_zfill(dev, drm->fbcon);
+ if (drm->fbcon) {
+ nouveau_fbcon_zfill(dev, drm->fbcon);
+ }
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index be3149932c2..40cf52e6d6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -165,17 +165,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
return !fence->channel;
}
-struct nouveau_fence_uevent {
- struct nouveau_eventh handler;
- struct nouveau_fence_priv *priv;
-};
-
static int
-nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+nouveau_fence_wait_uevent_handler(void *data, int index)
{
- struct nouveau_fence_uevent *uevent =
- container_of(event, struct nouveau_fence_uevent, handler);
- wake_up_all(&uevent->priv->waiting);
+ struct nouveau_fence_priv *priv = data;
+ wake_up_all(&priv->waiting);
return NVKM_EVENT_KEEP;
}
@@ -186,13 +180,16 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
struct nouveau_channel *chan = fence->channel;
struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
struct nouveau_fence_priv *priv = chan->drm->fence;
- struct nouveau_fence_uevent uevent = {
- .handler.func = nouveau_fence_wait_uevent_handler,
- .priv = priv,
- };
+ struct nouveau_eventh *handler;
int ret = 0;
- nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+ ret = nouveau_event_new(pfifo->uevent, 0,
+ nouveau_fence_wait_uevent_handler,
+ priv, &handler);
+ if (ret)
+ return ret;
+
+ nouveau_event_get(handler);
if (fence->timeout) {
unsigned long timeout = fence->timeout - jiffies;
@@ -224,7 +221,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
}
}
- nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+ nouveau_event_ref(NULL, &handler);
if (unlikely(ret < 0))
return ret;
@@ -309,7 +306,8 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *fence)
{
- kref_get(&fence->kref);
+ if (fence)
+ kref_get(&fence->kref);
return fence;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b71238c0..78a27f8ad7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
-int
-nouveau_gem_object_new(struct drm_gem_object *gem)
-{
- return 0;
-}
-
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
- struct nouveau_bo *nvbo = gem->driver_private;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct ttm_buffer_object *bo = &nvbo->bo;
- if (!nvbo)
- return;
- nvbo->gem = NULL;
-
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
- ttm_bo_unref(&bo);
-
drm_gem_object_release(gem);
- kfree(gem);
+
+ /* reset filp so nouveau_bo_del_ttm() can test for it */
+ gem->filp = NULL;
+ ttm_bo_unref(&bo);
}
int
@@ -115,8 +106,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
if (mapped) {
spin_lock(&nvbo->bo.bdev->fence_lock);
- if (nvbo->bo.sync_obj)
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+ fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
}
@@ -186,14 +176,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
if (nv_device(drm->device)->card_type >= NV_50)
nvbo->valid_domains &= domain;
- nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
- if (!nvbo->gem) {
+ /* Initialize the embedded gem-object. We return a single gem-reference
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ if (ret) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
- nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
- nvbo->gem->driver_private = nvbo;
+ nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
return 0;
}
@@ -250,15 +241,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+ ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
if (ret == 0) {
- ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+ ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(nvbo->gem);
+ drm_gem_object_unreference_unlocked(&nvbo->gem);
return ret;
}
@@ -266,7 +257,7 @@ static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
uint32_t write_domains, uint32_t valid_domains)
{
- struct nouveau_bo *nvbo = gem->driver_private;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
@@ -317,7 +308,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
- nouveau_bo_fence(nvbo, fence);
+ if (likely(fence))
+ nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
- drm_gem_object_unreference_unlocked(nvbo->gem);
+ drm_gem_object_unreference_unlocked(&nvbo->gem);
}
}
@@ -376,7 +368,7 @@ retry:
validate_fini(op, NULL);
return -ENOENT;
}
- nvbo = gem->driver_private;
+ nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
drm_gem_object_unreference_unlocked(gem);
@@ -446,8 +438,7 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
int ret = 0;
spin_lock(&nvbo->bo.bdev->fence_lock);
- if (nvbo->bo.sync_obj)
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+ fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (fence) {
@@ -478,7 +469,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
return ret;
}
- ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+ ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e4290aa8..7caca057bc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
- return gem ? gem->driver_private : NULL;
+ return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
}
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, int size, int align,
uint32_t domain, uint32_t tile_mode,
uint32_t tile_flags, struct nouveau_bo **);
-extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 936b442a6ab..38a4db5bfe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -32,369 +32,12 @@
#include <drm/drmP.h>
#include "nouveau_drm.h"
-#include "nouveau_pm.h"
+#include "nouveau_hwmon.h"
#include <subdev/gpio.h>
#include <subdev/timer.h>
#include <subdev/therm.h>
-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
-static char *nouveau_perflvl;
-module_param_named(perflvl, nouveau_perflvl, charp, 0400);
-
-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
-static int nouveau_perflvl_wr;
-module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
-
-static int
-nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- struct nouveau_pm_level *a, struct nouveau_pm_level *b)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
- int ret;
-
- /*XXX: not on all boards, we should control based on temperature
- * on recent boards.. or maybe on some other factor we don't
- * know about?
- */
- if (therm && therm->fan_set &&
- a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
- ret = therm->fan_set(therm, perflvl->fanspeed);
- if (ret && ret != -ENODEV) {
- NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
- }
- }
-
- if (pm->voltage.supported && pm->voltage_set) {
- if (perflvl->volt_min && b->volt_min > a->volt_min) {
- ret = pm->voltage_set(dev, perflvl->volt_min);
- if (ret) {
- NV_ERROR(drm, "voltage set failed: %d\n", ret);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int
-nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_pm *pm = nouveau_pm(dev);
- void *state;
- int ret;
-
- if (perflvl == pm->cur)
- return 0;
-
- ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
- if (ret)
- return ret;
-
- state = pm->clocks_pre(dev, perflvl);
- if (IS_ERR(state)) {
- ret = PTR_ERR(state);
- goto error;
- }
- ret = pm->clocks_set(dev, state);
- if (ret)
- goto error;
-
- ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
- if (ret)
- return ret;
-
- pm->cur = perflvl;
- return 0;
-
-error:
- /* restore the fan speed and voltage before leaving */
- nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
- return ret;
-}
-
-void
-nouveau_pm_trigger(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_timer *ptimer = nouveau_timer(drm->device);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_profile *profile = NULL;
- struct nouveau_pm_level *perflvl = NULL;
- int ret;
-
- /* select power profile based on current power source */
- if (power_supply_is_system_supplied())
- profile = pm->profile_ac;
- else
- profile = pm->profile_dc;
-
- if (profile != pm->profile) {
- pm->profile->func->fini(pm->profile);
- pm->profile = profile;
- pm->profile->func->init(pm->profile);
- }
-
- /* select performance level based on profile */
- perflvl = profile->func->select(profile);
-
- /* change perflvl, if necessary */
- if (perflvl != pm->cur) {
- u64 time0 = ptimer->read(ptimer);
-
- NV_INFO(drm, "setting performance level: %d", perflvl->id);
- ret = nouveau_pm_perflvl_set(dev, perflvl);
- if (ret)
- NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
-
- NV_INFO(drm, "> reclocking took %lluns\n\n",
- ptimer->read(ptimer) - time0);
- }
-}
-
-static struct nouveau_pm_profile *
-profile_find(struct drm_device *dev, const char *string)
-{
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_profile *profile;
-
- list_for_each_entry(profile, &pm->profiles, head) {
- if (!strncmp(profile->name, string, sizeof(profile->name)))
- return profile;
- }
-
- return NULL;
-}
-
-static int
-nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
-{
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_profile *ac = NULL, *dc = NULL;
- char string[16], *cur = string, *ptr;
-
- /* safety precaution, for now */
- if (nouveau_perflvl_wr != 7777)
- return -EPERM;
-
- strncpy(string, profile, sizeof(string));
- string[sizeof(string) - 1] = 0;
- if ((ptr = strchr(string, '\n')))
- *ptr = '\0';
-
- ptr = strsep(&cur, ",");
- if (ptr)
- ac = profile_find(dev, ptr);
-
- ptr = strsep(&cur, ",");
- if (ptr)
- dc = profile_find(dev, ptr);
- else
- dc = ac;
-
- if (ac == NULL || dc == NULL)
- return -EINVAL;
-
- pm->profile_ac = ac;
- pm->profile_dc = dc;
- nouveau_pm_trigger(dev);
- return 0;
-}
-
-static void
-nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
-{
-}
-
-static struct nouveau_pm_level *
-nouveau_pm_static_select(struct nouveau_pm_profile *profile)
-{
- return container_of(profile, struct nouveau_pm_level, profile);
-}
-
-const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
- .destroy = nouveau_pm_static_dummy,
- .init = nouveau_pm_static_dummy,
- .fini = nouveau_pm_static_dummy,
- .select = nouveau_pm_static_select,
-};
-
-static int
-nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
- int ret;
-
- memset(perflvl, 0, sizeof(*perflvl));
-
- if (pm->clocks_get) {
- ret = pm->clocks_get(dev, perflvl);
- if (ret)
- return ret;
- }
-
- if (pm->voltage.supported && pm->voltage_get) {
- ret = pm->voltage_get(dev);
- if (ret > 0) {
- perflvl->volt_min = ret;
- perflvl->volt_max = ret;
- }
- }
-
- if (therm && therm->fan_get) {
- ret = therm->fan_get(therm);
- if (ret >= 0)
- perflvl->fanspeed = ret;
- }
-
- nouveau_mem_timing_read(dev, &perflvl->timing);
- return 0;
-}
-
-static void
-nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
-{
- char c[16], s[16], v[32], f[16], m[16];
-
- c[0] = '\0';
- if (perflvl->core)
- snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
-
- s[0] = '\0';
- if (perflvl->shader)
- snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
-
- m[0] = '\0';
- if (perflvl->memory)
- snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
-
- v[0] = '\0';
- if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
- snprintf(v, sizeof(v), " voltage %dmV-%dmV",
- perflvl->volt_min / 1000, perflvl->volt_max / 1000);
- } else
- if (perflvl->volt_min) {
- snprintf(v, sizeof(v), " voltage %dmV",
- perflvl->volt_min / 1000);
- }
-
- f[0] = '\0';
- if (perflvl->fanspeed)
- snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
-
- snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
-}
-
-static ssize_t
-nouveau_pm_get_perflvl_info(struct device *d,
- struct device_attribute *a, char *buf)
-{
- struct nouveau_pm_level *perflvl =
- container_of(a, struct nouveau_pm_level, dev_attr);
- char *ptr = buf;
- int len = PAGE_SIZE;
-
- snprintf(ptr, len, "%d:", perflvl->id);
- ptr += strlen(buf);
- len -= strlen(buf);
-
- nouveau_pm_perflvl_info(perflvl, ptr, len);
- return strlen(buf);
-}
-
-static ssize_t
-nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
-{
- struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_level cur;
- int len = PAGE_SIZE, ret;
- char *ptr = buf;
-
- snprintf(ptr, len, "profile: %s, %s\nc:",
- pm->profile_ac->name, pm->profile_dc->name);
- ptr += strlen(buf);
- len -= strlen(buf);
-
- ret = nouveau_pm_perflvl_get(dev, &cur);
- if (ret == 0)
- nouveau_pm_perflvl_info(&cur, ptr, len);
- return strlen(buf);
-}
-
-static ssize_t
-nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
- const char *buf, size_t count)
-{
- struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
- int ret;
-
- ret = nouveau_pm_profile_set(dev, buf);
- if (ret)
- return ret;
- return strlen(buf);
-}
-
-static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
- nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
-
-static int
-nouveau_sysfs_init(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct device *d = &dev->pdev->dev;
- int ret, i;
-
- ret = device_create_file(d, &dev_attr_performance_level);
- if (ret)
- return ret;
-
- for (i = 0; i < pm->nr_perflvl; i++) {
- struct nouveau_pm_level *perflvl = &pm->perflvl[i];
-
- perflvl->dev_attr.attr.name = perflvl->name;
- perflvl->dev_attr.attr.mode = S_IRUGO;
- perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
- perflvl->dev_attr.store = NULL;
- sysfs_attr_init(&perflvl->dev_attr.attr);
-
- ret = device_create_file(d, &perflvl->dev_attr);
- if (ret) {
- NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
- perflvl->id, i);
- perflvl->dev_attr.attr.name = NULL;
- nouveau_pm_fini(dev);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void
-nouveau_sysfs_fini(struct drm_device *dev)
-{
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct device *d = &dev->pdev->dev;
- int i;
-
- device_remove_file(d, &dev_attr_performance_level);
- for (i = 0; i < pm->nr_perflvl; i++) {
- struct nouveau_pm_level *pl = &pm->perflvl[i];
-
- if (!pl->dev_attr.attr.name)
- break;
-
- device_remove_file(d, &pl->dev_attr);
- }
-}
-
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
static ssize_t
nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
@@ -778,9 +421,6 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
int ret = -ENODEV;
long value;
- if (nouveau_perflvl_wr != 7777)
- return -EPERM;
-
if (kstrtol(buf, 10, &value) == -EINVAL)
return -EINVAL;
@@ -919,17 +559,21 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
};
#endif
-static int
+int
nouveau_hwmon_init(struct drm_device *dev)
{
- struct nouveau_pm *pm = nouveau_pm(dev);
-
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
+ hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+ hwmon->dev = dev;
+
if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
return -ENODEV;
@@ -976,199 +620,37 @@ nouveau_hwmon_init(struct drm_device *dev)
goto error;
}
- pm->hwmon = hwmon_dev;
+ hwmon->hwmon = hwmon_dev;
return 0;
error:
NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
hwmon_device_unregister(hwmon_dev);
- pm->hwmon = NULL;
+ hwmon->hwmon = NULL;
return ret;
#else
- pm->hwmon = NULL;
+ hwmon->hwmon = NULL;
return 0;
#endif
}
-static void
+void
nouveau_hwmon_fini(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
- struct nouveau_pm *pm = nouveau_pm(dev);
+ struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
- if (pm->hwmon) {
- sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
- sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
- sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
+ if (hwmon->hwmon) {
+ sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_default_attrgroup);
+ sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup);
+ sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
+ sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
- hwmon_device_unregister(pm->hwmon);
+ hwmon_device_unregister(hwmon->hwmon);
}
-#endif
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-static int
-nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
-{
- struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
- struct nouveau_drm *drm = nouveau_drm(pm->dev);
- struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
-
- if (strcmp(entry->device_class, "ac_adapter") == 0) {
- bool ac = power_supply_is_system_supplied();
- NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
- nouveau_pm_trigger(pm->dev);
- }
-
- return NOTIFY_OK;
-}
+ nouveau_drm(dev)->hwmon = NULL;
+ kfree(hwmon);
#endif
-
-int
-nouveau_pm_init(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_pm *pm;
- char info[256];
- int ret, i;
-
- pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
- if (!pm)
- return -ENOMEM;
-
- pm->dev = dev;
-
- if (device->card_type < NV_40) {
- pm->clocks_get = nv04_pm_clocks_get;
- pm->clocks_pre = nv04_pm_clocks_pre;
- pm->clocks_set = nv04_pm_clocks_set;
- if (nouveau_gpio(drm->device)) {
- pm->voltage_get = nouveau_voltage_gpio_get;
- pm->voltage_set = nouveau_voltage_gpio_set;
- }
- } else
- if (device->card_type < NV_50) {
- pm->clocks_get = nv40_pm_clocks_get;
- pm->clocks_pre = nv40_pm_clocks_pre;
- pm->clocks_set = nv40_pm_clocks_set;
- pm->voltage_get = nouveau_voltage_gpio_get;
- pm->voltage_set = nouveau_voltage_gpio_set;
- } else
- if (device->card_type < NV_C0) {
- if (device->chipset < 0xa3 ||
- device->chipset == 0xaa ||
- device->chipset == 0xac) {
- pm->clocks_get = nv50_pm_clocks_get;
- pm->clocks_pre = nv50_pm_clocks_pre;
- pm->clocks_set = nv50_pm_clocks_set;
- } else {
- pm->clocks_get = nva3_pm_clocks_get;
- pm->clocks_pre = nva3_pm_clocks_pre;
- pm->clocks_set = nva3_pm_clocks_set;
- }
- pm->voltage_get = nouveau_voltage_gpio_get;
- pm->voltage_set = nouveau_voltage_gpio_set;
- } else
- if (device->card_type < NV_E0) {
- pm->clocks_get = nvc0_pm_clocks_get;
- pm->clocks_pre = nvc0_pm_clocks_pre;
- pm->clocks_set = nvc0_pm_clocks_set;
- pm->voltage_get = nouveau_voltage_gpio_get;
- pm->voltage_set = nouveau_voltage_gpio_set;
- }
-
-
- /* parse aux tables from vbios */
- nouveau_volt_init(dev);
-
- INIT_LIST_HEAD(&pm->profiles);
-
- /* determine current ("boot") performance level */
- ret = nouveau_pm_perflvl_get(dev, &pm->boot);
- if (ret) {
- NV_ERROR(drm, "failed to determine boot perflvl\n");
- return ret;
- }
-
- strncpy(pm->boot.name, "boot", 4);
- strncpy(pm->boot.profile.name, "boot", 4);
- pm->boot.profile.func = &nouveau_pm_static_profile_func;
-
- list_add(&pm->boot.profile.head, &pm->profiles);
-
- pm->profile_ac = &pm->boot.profile;
- pm->profile_dc = &pm->boot.profile;
- pm->profile = &pm->boot.profile;
- pm->cur = &pm->boot;
-
- /* add performance levels from vbios */
- nouveau_perf_init(dev);
-
- /* display available performance levels */
- NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
- for (i = 0; i < pm->nr_perflvl; i++) {
- nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
- NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
- }
-
- nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
- NV_INFO(drm, "c:%s", info);
-
- /* switch performance levels now if requested */
- if (nouveau_perflvl != NULL)
- nouveau_pm_profile_set(dev, nouveau_perflvl);
-
- nouveau_sysfs_init(dev);
- nouveau_hwmon_init(dev);
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
- pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
- register_acpi_notifier(&pm->acpi_nb);
-#endif
-
- return 0;
-}
-
-void
-nouveau_pm_fini(struct drm_device *dev)
-{
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_profile *profile, *tmp;
-
- list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
- list_del(&profile->head);
- profile->func->destroy(profile);
- }
-
- if (pm->cur != &pm->boot)
- nouveau_pm_perflvl_set(dev, &pm->boot);
-
- nouveau_perf_fini(dev);
- nouveau_volt_fini(dev);
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
- unregister_acpi_notifier(&pm->acpi_nb);
-#endif
- nouveau_hwmon_fini(dev);
- nouveau_sysfs_fini(dev);
-
- nouveau_drm(dev)->pm = NULL;
- kfree(pm);
-}
-
-void
-nouveau_pm_resume(struct drm_device *dev)
-{
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_level *perflvl;
-
- if (!pm->cur || pm->cur == &pm->boot)
- return;
-
- perflvl = pm->cur;
- pm->cur = &pm->boot;
- nouveau_pm_perflvl_set(dev, perflvl);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.h b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
new file mode 100644
index 00000000000..62ccbb39863
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+struct nouveau_hwmon {
+ struct drm_device *dev;
+ struct device *hwmon;
+};
+
+static inline struct nouveau_hwmon *
+nouveau_hwmon(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->hwmon;
+}
+
+/* nouveau_hwmon.c */
+int nouveau_hwmon_init(struct drm_device *dev);
+void nouveau_hwmon_fini(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
deleted file mode 100644
index 697687593a8..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_HWSQ_H__
-#define __NOUVEAU_HWSQ_H__
-
-struct hwsq_ucode {
- u8 data[0x200];
- union {
- u8 *u08;
- u16 *u16;
- u32 *u32;
- } ptr;
- u16 len;
-
- u32 reg;
- u32 val;
-};
-
-static inline void
-hwsq_init(struct hwsq_ucode *hwsq)
-{
- hwsq->ptr.u08 = hwsq->data;
- hwsq->reg = 0xffffffff;
- hwsq->val = 0xffffffff;
-}
-
-static inline void
-hwsq_fini(struct hwsq_ucode *hwsq)
-{
- do {
- *hwsq->ptr.u08++ = 0x7f;
- hwsq->len = hwsq->ptr.u08 - hwsq->data;
- } while (hwsq->len & 3);
- hwsq->ptr.u08 = hwsq->data;
-}
-
-static inline void
-hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
-{
- u32 shift = 0;
- while (usec & ~3) {
- usec >>= 2;
- shift++;
- }
-
- *hwsq->ptr.u08++ = (shift << 2) | usec;
-}
-
-static inline void
-hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
-{
- flag += 0x80;
- if (val >= 0)
- flag += 0x20;
- if (val >= 1)
- flag += 0x20;
- *hwsq->ptr.u08++ = flag;
-}
-
-static inline void
-hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
-{
- *hwsq->ptr.u08++ = 0x5f;
- *hwsq->ptr.u08++ = v0;
- *hwsq->ptr.u08++ = v1;
-}
-
-static inline void
-hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
-{
- if (val != hwsq->val) {
- if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
- *hwsq->ptr.u08++ = 0x42;
- *hwsq->ptr.u16++ = (val & 0x0000ffff);
- } else {
- *hwsq->ptr.u08++ = 0xe2;
- *hwsq->ptr.u32++ = val;
- }
-
- hwsq->val = val;
- }
-
- if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
- *hwsq->ptr.u08++ = 0x40;
- *hwsq->ptr.u16++ = (reg & 0x0000ffff);
- } else {
- *hwsq->ptr.u08++ = 0xe0;
- *hwsq->ptr.u32++ = reg;
- }
- hwsq->reg = reg;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
deleted file mode 100644
index 4f6a572f225..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
- * Copyright 2005 Stephane Marchesin
- *
- * The Weather Channel (TM) funded Tungsten Graphics to develop the
- * initial release of the Radeon 8500 driver under the XFree86 license.
- * This notice must be preserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Ben Skeggs <bskeggs@redhat.com>
- * Roy Spliet <r.spliet@student.tudelft.nl>
- */
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/fb.h>
-
-static int
-nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
- /* XXX: I don't trust the -1's and +1's... they must come
- * from somewhere! */
- t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
- 1 << 16 |
- (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
- (e->tCL + 2 - (t->tCWL - 1));
-
- t->reg[2] = 0x20200000 |
- ((t->tCWL - 1) << 24 |
- e->tRRD << 16 |
- e->tRCDWR << 8 |
- e->tRCDRD);
-
- NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
- t->reg[0], t->reg[1], t->reg[2]);
- return 0;
-}
-
-static int
-nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry P;
- uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
-
- if (bit_table(dev, 'P', &P))
- return -EINVAL;
-
- switch (min(len, (u8) 22)) {
- case 22:
- unk21 = e->tUNK_21;
- case 21:
- unk20 = e->tUNK_20;
- case 20:
- if (e->tCWL > 0)
- t->tCWL = e->tCWL;
- case 19:
- unk18 = e->tUNK_18;
- break;
- }
-
- t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
- t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
- max(unk18, (u8) 1) << 16 |
- (e->tWTR + 2 + (t->tCWL - 1)) << 8;
-
- t->reg[2] = ((t->tCWL - 1) << 24 |
- e->tRRD << 16 |
- e->tRCDWR << 8 |
- e->tRCDRD);
-
- t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
-
- t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
-
- t->reg[8] = boot->reg[8] & 0xffffff00;
-
- if (P.version == 1) {
- t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
-
- t->reg[3] = (0x14 + e->tCL) << 24 |
- 0x16 << 16 |
- (e->tCL - 1) << 8 |
- (e->tCL - 1);
-
- t->reg[4] |= boot->reg[4] & 0xffff0000;
-
- t->reg[6] = (0x33 - t->tCWL) << 16 |
- t->tCWL << 8 |
- (0x2e + e->tCL - t->tCWL);
-
- t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
-
- /* XXX: P.version == 1 only has DDR2 and GDDR3? */
- if (pfb->ram->type == NV_MEM_TYPE_DDR2) {
- t->reg[5] |= (e->tCL + 3) << 8;
- t->reg[6] |= (t->tCWL - 2) << 8;
- t->reg[8] |= (e->tCL - 4);
- } else {
- t->reg[5] |= (e->tCL + 2) << 8;
- t->reg[6] |= t->tCWL << 8;
- t->reg[8] |= (e->tCL - 2);
- }
- } else {
- t->reg[1] |= (5 + e->tCL - (t->tCWL));
-
- /* XXX: 0xb? 0x30? */
- t->reg[3] = (0x30 + e->tCL) << 24 |
- (boot->reg[3] & 0x00ff0000)|
- (0xb + e->tCL) << 8 |
- (e->tCL - 1);
-
- t->reg[4] |= (unk20 << 24 | unk21 << 16);
-
- /* XXX: +6? */
- t->reg[5] |= (t->tCWL + 6) << 8;
-
- t->reg[6] = (0x5a + e->tCL) << 16 |
- (6 - e->tCL + t->tCWL) << 8 |
- (0x50 + e->tCL - t->tCWL);
-
- tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
- t->reg[7] = (tmp7_3 << 24) |
- ((tmp7_3 - 6 + e->tCL) << 16) |
- 0x202;
- }
-
- NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
- t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
- NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n",
- t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
- NV_DEBUG(drm, " 240: %08x\n", t->reg[8]);
- return 0;
-}
-
-static int
-nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (e->tCWL > 0)
- t->tCWL = e->tCWL;
-
- t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
- e->tRFC << 8 | e->tRC);
-
- t->reg[1] = (boot->reg[1] & 0xff000000) |
- (e->tRCDWR & 0x0f) << 20 |
- (e->tRCDRD & 0x0f) << 14 |
- (t->tCWL << 7) |
- (e->tCL & 0x0f);
-
- t->reg[2] = (boot->reg[2] & 0xff0000ff) |
- e->tWR << 16 | e->tWTR << 8;
-
- t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
- (e->tUNK_21 & 0xf) << 5 |
- (e->tUNK_13 & 0x1f);
-
- t->reg[4] = (boot->reg[4] & 0xfff00fff) |
- (e->tRRD&0x1f) << 15;
-
- NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
- t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
- NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]);
- return 0;
-}
-
-/**
- * MR generation methods
- */
-
-static int
-nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- t->drive_strength = 0;
- if (len < 15) {
- t->odt = boot->odt;
- } else {
- t->odt = e->RAM_FT1 & 0x07;
- }
-
- if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
- NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
- }
-
- if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
- NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
-
- if (t->odt > 3) {
- NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
- t->id, t->odt);
- t->odt = 0;
- }
-
- t->mr[0] = (boot->mr[0] & 0x100f) |
- (e->tCL) << 4 |
- (e->tWR - 1) << 9;
- t->mr[1] = (boot->mr[1] & 0x101fbb) |
- (t->odt & 0x1) << 2 |
- (t->odt & 0x2) << 5;
-
- NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
- return 0;
-}
-
-static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
- 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
-
-static int
-nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- u8 cl = e->tCL - 4;
-
- t->drive_strength = 0;
- if (len < 15) {
- t->odt = boot->odt;
- } else {
- t->odt = e->RAM_FT1 & 0x07;
- }
-
- if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
- NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
- }
-
- if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
- NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
-
- if (e->tCWL < 5) {
- NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
- return -ERANGE;
- }
-
- t->mr[0] = (boot->mr[0] & 0x180b) |
- /* CAS */
- (cl & 0x7) << 4 |
- (cl & 0x8) >> 1 |
- (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
- t->mr[1] = (boot->mr[1] & 0x101dbb) |
- (t->odt & 0x1) << 2 |
- (t->odt & 0x2) << 5 |
- (t->odt & 0x4) << 7;
- t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
-
- NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
- return 0;
-}
-
-static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
- 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
-static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
- 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
-
-static int
-nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (len < 15) {
- t->drive_strength = boot->drive_strength;
- t->odt = boot->odt;
- } else {
- t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
- t->odt = e->RAM_FT1 & 0x07;
- }
-
- if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
- NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
- }
-
- if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
- NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
-
- if (t->odt > 3) {
- NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
- t->id, t->odt);
- t->odt = 0;
- }
-
- t->mr[0] = (boot->mr[0] & 0xe0b) |
- /* CAS */
- ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
- ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
- t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
- (t->odt << 2) |
- (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
- t->mr[2] = boot->mr[2];
-
- NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
- t->mr[0], t->mr[1], t->mr[2]);
- return 0;
-}
-
-static int
-nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (len < 15) {
- t->drive_strength = boot->drive_strength;
- t->odt = boot->odt;
- } else {
- t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
- t->odt = e->RAM_FT1 & 0x03;
- }
-
- if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
- NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
- }
-
- if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
- NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
-
- if (t->odt > 3) {
- NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
- t->id, t->odt);
- t->odt = 0;
- }
-
- t->mr[0] = (boot->mr[0] & 0x007) |
- ((e->tCL - 5) << 3) |
- ((e->tWR - 4) << 8);
- t->mr[1] = (boot->mr[1] & 0x1007f0) |
- t->drive_strength |
- (t->odt << 2);
-
- NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
- return 0;
-}
-
-int
-nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_memtiming *t)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_memtiming *boot = &pm->boot.timing;
- struct nouveau_pm_tbl_entry *e;
- u8 ver, len, *ptr, *ramcfg;
- int ret;
-
- ptr = nouveau_perf_timing(dev, freq, &ver, &len);
- if (!ptr || ptr[0] == 0x00) {
- *t = *boot;
- return 0;
- }
- e = (struct nouveau_pm_tbl_entry *)ptr;
-
- t->tCWL = boot->tCWL;
-
- switch (device->card_type) {
- case NV_40:
- ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
- break;
- case NV_50:
- ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
- break;
- case NV_C0:
- case NV_D0:
- ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
- break;
- default:
- ret = -ENODEV;
- break;
- }
-
- switch (pfb->ram->type * !ret) {
- case NV_MEM_TYPE_GDDR3:
- ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
- break;
- case NV_MEM_TYPE_GDDR5:
- ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
- break;
- case NV_MEM_TYPE_DDR2:
- ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
- break;
- case NV_MEM_TYPE_DDR3:
- ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
- if (ramcfg) {
- int dll_off;
-
- if (ver == 0x00)
- dll_off = !!(ramcfg[3] & 0x04);
- else
- dll_off = !!(ramcfg[2] & 0x40);
-
- switch (pfb->ram->type) {
- case NV_MEM_TYPE_GDDR3:
- t->mr[1] &= ~0x00000040;
- t->mr[1] |= 0x00000040 * dll_off;
- break;
- default:
- t->mr[1] &= ~0x00000001;
- t->mr[1] |= 0x00000001 * dll_off;
- break;
- }
- }
-
- return ret;
-}
-
-void
-nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- u32 timing_base, timing_regs, mr_base;
- int i;
-
- if (device->card_type >= 0xC0) {
- timing_base = 0x10f290;
- mr_base = 0x10f300;
- } else {
- timing_base = 0x100220;
- mr_base = 0x1002c0;
- }
-
- t->id = -1;
-
- switch (device->card_type) {
- case NV_50:
- timing_regs = 9;
- break;
- case NV_C0:
- case NV_D0:
- timing_regs = 5;
- break;
- case NV_30:
- case NV_40:
- timing_regs = 3;
- break;
- default:
- timing_regs = 0;
- return;
- }
- for(i = 0; i < timing_regs; i++)
- t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
-
- t->tCWL = 0;
- if (device->card_type < NV_C0) {
- t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
- } else if (device->card_type <= NV_D0) {
- t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
- }
-
- t->mr[0] = nv_rd32(device, mr_base);
- t->mr[1] = nv_rd32(device, mr_base + 0x04);
- t->mr[2] = nv_rd32(device, mr_base + 0x20);
- t->mr[3] = nv_rd32(device, mr_base + 0x24);
-
- t->odt = 0;
- t->drive_strength = 0;
-
- switch (pfb->ram->type) {
- case NV_MEM_TYPE_DDR3:
- t->odt |= (t->mr[1] & 0x200) >> 7;
- case NV_MEM_TYPE_DDR2:
- t->odt |= (t->mr[1] & 0x04) >> 2 |
- (t->mr[1] & 0x40) >> 5;
- break;
- case NV_MEM_TYPE_GDDR3:
- case NV_MEM_TYPE_GDDR5:
- t->drive_strength = t->mr[1] & 0x03;
- t->odt = (t->mr[1] & 0x0c) >> 2;
- break;
- default:
- break;
- }
-}
-
-int
-nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
- struct nouveau_pm_level *perflvl)
-{
- struct nouveau_drm *drm = nouveau_drm(exec->dev);
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nouveau_pm_memtiming *info = &perflvl->timing;
- u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
- u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
- u32 mr1_dlloff;
-
- switch (pfb->ram->type) {
- case NV_MEM_TYPE_DDR2:
- tDLLK = 2000;
- mr1_dlloff = 0x00000001;
- break;
- case NV_MEM_TYPE_DDR3:
- tDLLK = 12000;
- tCKSRE = 2000;
- tXS = 1000;
- mr1_dlloff = 0x00000001;
- break;
- case NV_MEM_TYPE_GDDR3:
- tDLLK = 40000;
- mr1_dlloff = 0x00000040;
- break;
- default:
- NV_ERROR(drm, "cannot reclock unsupported memtype\n");
- return -ENODEV;
- }
-
- /* fetch current MRs */
- switch (pfb->ram->type) {
- case NV_MEM_TYPE_GDDR3:
- case NV_MEM_TYPE_DDR3:
- mr[2] = exec->mrg(exec, 2);
- default:
- mr[1] = exec->mrg(exec, 1);
- mr[0] = exec->mrg(exec, 0);
- break;
- }
-
- /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
- if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
- exec->precharge(exec);
- exec->mrs (exec, 1, mr[1] | mr1_dlloff);
- exec->wait(exec, tMRD);
- }
-
- /* enter self-refresh mode */
- exec->precharge(exec);
- exec->refresh(exec);
- exec->refresh(exec);
- exec->refresh_auto(exec, false);
- exec->refresh_self(exec, true);
- exec->wait(exec, tCKSRE);
-
- /* modify input clock frequency */
- exec->clock_set(exec);
-
- /* exit self-refresh mode */
- exec->wait(exec, tCKSRX);
- exec->precharge(exec);
- exec->refresh_self(exec, false);
- exec->refresh_auto(exec, true);
- exec->wait(exec, tXS);
- exec->wait(exec, tXS);
-
- /* update MRs */
- if (mr[2] != info->mr[2]) {
- exec->mrs (exec, 2, info->mr[2]);
- exec->wait(exec, tMRD);
- }
-
- if (mr[1] != info->mr[1]) {
- /* need to keep DLL off until later, at least on GDDR3 */
- exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
- exec->wait(exec, tMRD);
- }
-
- if (mr[0] != info->mr[0]) {
- exec->mrs (exec, 0, info->mr[0]);
- exec->wait(exec, tMRD);
- }
-
- /* update PFB timing registers */
- exec->timing_set(exec);
-
- /* DLL (enable + ) reset */
- if (!(info->mr[1] & mr1_dlloff)) {
- if (mr[1] & mr1_dlloff) {
- exec->mrs (exec, 1, info->mr[1]);
- exec->wait(exec, tMRD);
- }
- exec->mrs (exec, 0, info->mr[0] | 0x00000100);
- exec->wait(exec, tMRD);
- exec->mrs (exec, 0, info->mr[0] | 0x00000000);
- exec->wait(exec, tMRD);
- exec->wait(exec, tDLLK);
- if (pfb->ram->type == NV_MEM_TYPE_GDDR3)
- exec->precharge(exec);
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
deleted file mode 100644
index 4fe883c5491..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_reg.h"
-#include "nouveau_pm.h"
-
-static u8 *
-nouveau_perf_table(struct drm_device *dev, u8 *ver)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- struct bit_entry P;
-
- if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
- u8 *perf = ROMPTR(dev, P.data[0]);
- if (perf) {
- *ver = perf[0];
- return perf;
- }
- }
-
- if (bios->type == NVBIOS_BMP) {
- if (bios->data[bios->offset + 6] >= 0x25) {
- u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
- if (perf) {
- *ver = perf[1];
- return perf;
- }
- }
- }
-
- return NULL;
-}
-
-static u8 *
-nouveau_perf_entry(struct drm_device *dev, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
- u8 *perf = nouveau_perf_table(dev, ver);
- if (perf) {
- if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
- *hdr = perf[3];
- *cnt = 0;
- *len = 0;
- return perf + perf[0] + idx * perf[3];
- } else
- if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
- *hdr = perf[3];
- *cnt = perf[4];
- *len = perf[5];
- return perf + perf[1] + idx * (*hdr + (*cnt * *len));
- } else
- if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
- *hdr = perf[2];
- *cnt = perf[4];
- *len = perf[3];
- return perf + perf[1] + idx * (*hdr + (*cnt * *len));
- }
- }
- return NULL;
-}
-
-u8 *
-nouveau_perf_rammap(struct drm_device *dev, u32 freq,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry P;
- u8 *perf, i = 0;
-
- if (!bit_table(dev, 'P', &P) && P.version == 2) {
- u8 *rammap = ROMPTR(dev, P.data[4]);
- if (rammap) {
- u8 *ramcfg = rammap + rammap[1];
-
- *ver = rammap[0];
- *hdr = rammap[2];
- *cnt = rammap[4];
- *len = rammap[3];
-
- freq /= 1000;
- for (i = 0; i < rammap[5]; i++) {
- if (freq >= ROM16(ramcfg[0]) &&
- freq <= ROM16(ramcfg[2]))
- return ramcfg;
-
- ramcfg += *hdr + (*cnt * *len);
- }
- }
-
- return NULL;
- }
-
- if (nv_device(drm->device)->chipset == 0x49 ||
- nv_device(drm->device)->chipset == 0x4b)
- freq /= 2;
-
- while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
- if (*ver >= 0x20 && *ver < 0x25) {
- if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
- break;
- } else
- if (*ver >= 0x25 && *ver < 0x40) {
- if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
- break;
- }
- }
-
- if (perf) {
- u8 *ramcfg = perf + *hdr;
- *ver = 0x00;
- *hdr = 0;
- return ramcfg;
- }
-
- return NULL;
-}
-
-u8 *
-nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- u8 strap, hdr, cnt;
- u8 *rammap;
-
- strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
- if (bios->ram_restrict_tbl_ptr)
- strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
-
- rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
- if (rammap && strap < cnt)
- return rammap + hdr + (strap * *len);
-
- return NULL;
-}
-
-u8 *
-nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- struct bit_entry P;
- u8 *perf, *timing = NULL;
- u8 i = 0, hdr, cnt;
-
- if (bios->type == NVBIOS_BMP) {
- while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
- len)) && *ver == 0x15) {
- if (freq <= ROM32(perf[5]) * 20) {
- *ver = 0x00;
- *len = 14;
- return perf + 41;
- }
- }
- return NULL;
- }
-
- if (!bit_table(dev, 'P', &P)) {
- if (P.version == 1)
- timing = ROMPTR(dev, P.data[4]);
- else
- if (P.version == 2)
- timing = ROMPTR(dev, P.data[8]);
- }
-
- if (timing && timing[0] == 0x10) {
- u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
- if (ramcfg && ramcfg[1] < timing[2]) {
- *ver = timing[0];
- *len = timing[3];
- return timing + timing[1] + (ramcfg[1] * timing[3]);
- }
- }
-
- return NULL;
-}
-
-static void
-legacy_perf_init(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- struct nouveau_pm *pm = nouveau_pm(dev);
- char *perf, *entry, *bmp = &bios->data[bios->offset];
- int headerlen, use_straps;
-
- if (bmp[5] < 0x5 || bmp[6] < 0x14) {
- NV_DEBUG(drm, "BMP version too old for perf\n");
- return;
- }
-
- perf = ROMPTR(dev, bmp[0x73]);
- if (!perf) {
- NV_DEBUG(drm, "No memclock table pointer found.\n");
- return;
- }
-
- switch (perf[0]) {
- case 0x12:
- case 0x14:
- case 0x18:
- use_straps = 0;
- headerlen = 1;
- break;
- case 0x01:
- use_straps = perf[1] & 1;
- headerlen = (use_straps ? 8 : 2);
- break;
- default:
- NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
- return;
- }
-
- entry = perf + headerlen;
- if (use_straps)
- entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
-
- sprintf(pm->perflvl[0].name, "performance_level_0");
- pm->perflvl[0].memory = ROM16(entry[0]) * 20;
- pm->nr_perflvl = 1;
-}
-
-static void
-nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry P;
- u8 *vmap;
- int id;
-
- id = perflvl->volt_min;
- perflvl->volt_min = 0;
-
- /* boards using voltage table version <0x40 store the voltage
- * level directly in the perflvl entry as a multiple of 10mV
- */
- if (drm->pm->voltage.version < 0x40) {
- perflvl->volt_min = id * 10000;
- perflvl->volt_max = perflvl->volt_min;
- return;
- }
-
- /* on newer ones, the perflvl stores an index into yet another
- * vbios table containing a min/max voltage value for the perflvl
- */
- if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
- NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
- P.version, P.length);
- return;
- }
-
- vmap = ROMPTR(dev, P.data[32]);
- if (!vmap) {
- NV_DEBUG(drm, "volt map table pointer invalid\n");
- return;
- }
-
- if (id < vmap[3]) {
- vmap += vmap[1] + (vmap[2] * id);
- perflvl->volt_min = ROM32(vmap[0]);
- perflvl->volt_max = ROM32(vmap[4]);
- }
-}
-
-void
-nouveau_perf_init(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nvbios *bios = &drm->vbios;
- u8 *perf, ver, hdr, cnt, len;
- int ret, vid, i = -1;
-
- if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
- legacy_perf_init(dev);
- return;
- }
-
- perf = nouveau_perf_table(dev, &ver);
-
- while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
- struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
-
- if (perf[0] == 0xff)
- continue;
-
- switch (ver) {
- case 0x12:
- case 0x13:
- case 0x15:
- perflvl->fanspeed = perf[55];
- if (hdr > 56)
- perflvl->volt_min = perf[56];
- perflvl->core = ROM32(perf[1]) * 10;
- perflvl->memory = ROM32(perf[5]) * 20;
- break;
- case 0x21:
- case 0x23:
- case 0x24:
- perflvl->fanspeed = perf[4];
- perflvl->volt_min = perf[5];
- perflvl->shader = ROM16(perf[6]) * 1000;
- perflvl->core = perflvl->shader;
- perflvl->core += (signed char)perf[8] * 1000;
- if (nv_device(drm->device)->chipset == 0x49 ||
- nv_device(drm->device)->chipset == 0x4b)
- perflvl->memory = ROM16(perf[11]) * 1000;
- else
- perflvl->memory = ROM16(perf[11]) * 2000;
- break;
- case 0x25:
- perflvl->fanspeed = perf[4];
- perflvl->volt_min = perf[5];
- perflvl->core = ROM16(perf[6]) * 1000;
- perflvl->shader = ROM16(perf[10]) * 1000;
- perflvl->memory = ROM16(perf[12]) * 1000;
- break;
- case 0x30:
- perflvl->memscript = ROM16(perf[2]);
- case 0x35:
- perflvl->fanspeed = perf[6];
- perflvl->volt_min = perf[7];
- perflvl->core = ROM16(perf[8]) * 1000;
- perflvl->shader = ROM16(perf[10]) * 1000;
- perflvl->memory = ROM16(perf[12]) * 1000;
- perflvl->vdec = ROM16(perf[16]) * 1000;
- perflvl->dom6 = ROM16(perf[20]) * 1000;
- break;
- case 0x40:
-#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
- perflvl->fanspeed = 0; /*XXX*/
- perflvl->volt_min = perf[2];
- if (nv_device(drm->device)->card_type == NV_50) {
- perflvl->core = subent(0);
- perflvl->shader = subent(1);
- perflvl->memory = subent(2);
- perflvl->vdec = subent(3);
- perflvl->unka0 = subent(4);
- } else {
- perflvl->hub06 = subent(0);
- perflvl->hub01 = subent(1);
- perflvl->copy = subent(2);
- perflvl->shader = subent(3);
- perflvl->rop = subent(4);
- perflvl->memory = subent(5);
- perflvl->vdec = subent(6);
- perflvl->daemon = subent(10);
- perflvl->hub07 = subent(11);
- perflvl->core = perflvl->shader / 2;
- }
- break;
- }
-
- /* make sure vid is valid */
- nouveau_perf_voltage(dev, perflvl);
- if (pm->voltage.supported && perflvl->volt_min) {
- vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
- if (vid < 0) {
- NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
- continue;
- }
- }
-
- /* get the corresponding memory timings */
- ret = nouveau_mem_timing_calc(dev, perflvl->memory,
- &perflvl->timing);
- if (ret) {
- NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
- continue;
- }
-
- snprintf(perflvl->name, sizeof(perflvl->name),
- "performance_level_%d", i);
- perflvl->id = i;
-
- snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
- "%d", perflvl->id);
- perflvl->profile.func = &nouveau_pm_static_profile_func;
- list_add_tail(&perflvl->profile.head, &pm->profiles);
-
-
- pm->nr_perflvl++;
- }
-}
-
-void
-nouveau_perf_fini(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
deleted file mode 100644
index 73b789c230a..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_PM_H__
-#define __NOUVEAU_PM_H__
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-
-struct nouveau_pm_voltage_level {
- u32 voltage; /* microvolts */
- u8 vid;
-};
-
-struct nouveau_pm_voltage {
- bool supported;
- u8 version;
- u8 vid_mask;
-
- struct nouveau_pm_voltage_level *level;
- int nr_level;
-};
-
-/* Exclusive upper limits */
-#define NV_MEM_CL_DDR2_MAX 8
-#define NV_MEM_WR_DDR2_MAX 9
-#define NV_MEM_CL_DDR3_MAX 17
-#define NV_MEM_WR_DDR3_MAX 17
-#define NV_MEM_CL_GDDR3_MAX 16
-#define NV_MEM_WR_GDDR3_MAX 18
-#define NV_MEM_CL_GDDR5_MAX 21
-#define NV_MEM_WR_GDDR5_MAX 20
-
-struct nouveau_pm_memtiming {
- int id;
-
- u32 reg[9];
- u32 mr[4];
-
- u8 tCWL;
-
- u8 odt;
- u8 drive_strength;
-};
-
-struct nouveau_pm_tbl_header {
- u8 version;
- u8 header_len;
- u8 entry_cnt;
- u8 entry_len;
-};
-
-struct nouveau_pm_tbl_entry {
- u8 tWR;
- u8 tWTR;
- u8 tCL;
- u8 tRC;
- u8 empty_4;
- u8 tRFC; /* Byte 5 */
- u8 empty_6;
- u8 tRAS; /* Byte 7 */
- u8 empty_8;
- u8 tRP; /* Byte 9 */
- u8 tRCDRD;
- u8 tRCDWR;
- u8 tRRD;
- u8 tUNK_13;
- u8 RAM_FT1; /* 14, a bitmask of random RAM features */
- u8 empty_15;
- u8 tUNK_16;
- u8 empty_17;
- u8 tUNK_18;
- u8 tCWL;
- u8 tUNK_20, tUNK_21;
-};
-
-struct nouveau_pm_profile;
-struct nouveau_pm_profile_func {
- void (*destroy)(struct nouveau_pm_profile *);
- void (*init)(struct nouveau_pm_profile *);
- void (*fini)(struct nouveau_pm_profile *);
- struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
-};
-
-struct nouveau_pm_profile {
- const struct nouveau_pm_profile_func *func;
- struct list_head head;
- char name[8];
-};
-
-#define NOUVEAU_PM_MAX_LEVEL 8
-struct nouveau_pm_level {
- struct nouveau_pm_profile profile;
- struct device_attribute dev_attr;
- char name[32];
- int id;
-
- struct nouveau_pm_memtiming timing;
- u32 memory;
- u16 memscript;
-
- u32 core;
- u32 shader;
- u32 rop;
- u32 copy;
- u32 daemon;
- u32 vdec;
- u32 dom6;
- u32 unka0; /* nva3:nvc0 */
- u32 hub01; /* nvc0- */
- u32 hub06; /* nvc0- */
- u32 hub07; /* nvc0- */
-
- u32 volt_min; /* microvolts */
- u32 volt_max;
- u8 fanspeed;
-};
-
-struct nouveau_pm_temp_sensor_constants {
- u16 offset_constant;
- s16 offset_mult;
- s16 offset_div;
- s16 slope_mult;
- s16 slope_div;
-};
-
-struct nouveau_pm_threshold_temp {
- s16 critical;
- s16 down_clock;
-};
-
-struct nouveau_pm {
- struct drm_device *dev;
-
- struct nouveau_pm_voltage voltage;
- struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
- int nr_perflvl;
- struct nouveau_pm_temp_sensor_constants sensor_constants;
- struct nouveau_pm_threshold_temp threshold_temp;
-
- struct nouveau_pm_profile *profile_ac;
- struct nouveau_pm_profile *profile_dc;
- struct nouveau_pm_profile *profile;
- struct list_head profiles;
-
- struct nouveau_pm_level boot;
- struct nouveau_pm_level *cur;
-
- struct device *hwmon;
- struct notifier_block acpi_nb;
-
- int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
- void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
- int (*clocks_set)(struct drm_device *, void *);
-
- int (*voltage_get)(struct drm_device *);
- int (*voltage_set)(struct drm_device *, int voltage);
-};
-
-static inline struct nouveau_pm *
-nouveau_pm(struct drm_device *dev)
-{
- return nouveau_drm(dev)->pm;
-}
-
-struct nouveau_mem_exec_func {
- struct drm_device *dev;
- void (*precharge)(struct nouveau_mem_exec_func *);
- void (*refresh)(struct nouveau_mem_exec_func *);
- void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
- void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
- void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
- u32 (*mrg)(struct nouveau_mem_exec_func *, int mr);
- void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
- void (*clock_set)(struct nouveau_mem_exec_func *);
- void (*timing_set)(struct nouveau_mem_exec_func *);
- void *priv;
-};
-
-/* nouveau_mem.c */
-int nouveau_mem_exec(struct nouveau_mem_exec_func *,
- struct nouveau_pm_level *);
-
-/* nouveau_pm.c */
-int nouveau_pm_init(struct drm_device *dev);
-void nouveau_pm_fini(struct drm_device *dev);
-void nouveau_pm_resume(struct drm_device *dev);
-extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
-void nouveau_pm_trigger(struct drm_device *dev);
-
-/* nouveau_volt.c */
-void nouveau_volt_init(struct drm_device *);
-void nouveau_volt_fini(struct drm_device *);
-int nouveau_volt_vid_lookup(struct drm_device *, int voltage);
-int nouveau_volt_lvl_lookup(struct drm_device *, int vid);
-int nouveau_voltage_gpio_get(struct drm_device *);
-int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
-
-/* nouveau_perf.c */
-void nouveau_perf_init(struct drm_device *);
-void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
- u8 *hdr, u8 *cnt, u8 *len);
-u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-
-/* nouveau_mem.c */
-void nouveau_mem_timing_init(struct drm_device *);
-void nouveau_mem_timing_fini(struct drm_device *);
-
-/* nv04_pm.c */
-int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv04_pm_clocks_set(struct drm_device *, void *);
-
-/* nv40_pm.c */
-int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv40_pm_clocks_set(struct drm_device *, void *);
-int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
-
-/* nv50_pm.c */
-int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv50_pm_clocks_set(struct drm_device *, void *);
-int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
-
-/* nva3_pm.c */
-int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nva3_pm_clocks_set(struct drm_device *, void *);
-
-/* nvc0_pm.c */
-int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nvc0_pm_clocks_set(struct drm_device *, void *);
-
-/* nouveau_mem.c */
-int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
- struct nouveau_pm_memtiming *);
-void nouveau_mem_timing_read(struct drm_device *,
- struct nouveau_pm_memtiming *);
-
-static inline int
-nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
- int *N, int *fN, int *M, int *P)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_clock *clk = nouveau_clock(device);
- struct nouveau_pll_vals pv;
- int ret;
-
- ret = clk->pll_calc(clk, pll, freq, &pv);
- *N = pv.N1;
- *M = pv.M1;
- *P = pv.log2P;
- return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d5e5c..51a2cb102b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(ret);
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
- nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
- if (!nvbo->gem) {
+
+ /* Initialize the embedded gem-object. We return a single gem-reference
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM);
}
- nvbo->gem->driver_private = nvbo;
- return nvbo->gem;
+ return &nvbo->gem;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
new file mode 100644
index 00000000000..89201a17ce7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_sysfs.h"
+
+#include <core/object.h>
+#include <core/class.h>
+
+static inline struct drm_device *
+drm_device(struct device *d)
+{
+ return pci_get_drvdata(to_pci_dev(d));
+}
+
+#define snappendf(p,r,f,a...) do { \
+ snprintf(p, r, f, ##a); \
+ r -= strlen(p); \
+ p += strlen(p); \
+} while(0)
+
+static ssize_t
+nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
+{
+ struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
+ struct nv_control_pstate_info info;
+ size_t cnt = PAGE_SIZE;
+ char *buf = b;
+ int ret, i;
+
+ ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info.count + 1; i++) {
+ const s32 state = i < info.count ? i :
+ NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
+ struct nv_control_pstate_attr attr = {
+ .state = state,
+ .index = 0,
+ };
+
+ ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
+ &attr, sizeof(attr));
+ if (ret)
+ return ret;
+
+ if (i < info.count)
+ snappendf(buf, cnt, "%02x:", attr.state);
+ else
+ snappendf(buf, cnt, "--:");
+
+ attr.index = 0;
+ do {
+ attr.state = state;
+ ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
+ &attr, sizeof(attr));
+ if (ret)
+ return ret;
+
+ snappendf(buf, cnt, " %s %d", attr.name, attr.min);
+ if (attr.min != attr.max)
+ snappendf(buf, cnt, "-%d", attr.max);
+ snappendf(buf, cnt, " %s", attr.unit);
+ } while (attr.index);
+
+ if ((state >= 0 && info.pstate == state) ||
+ (state < 0 && info.ustate < 0))
+ snappendf(buf, cnt, " *");
+ snappendf(buf, cnt, "\n");
+ }
+
+ return strlen(b);
+}
+
+static ssize_t
+nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
+ struct nv_control_pstate_user args;
+ long value, ret;
+ char *tmp;
+
+ if ((tmp = strchr(buf, '\n')))
+ *tmp = '\0';
+
+ if (!strcasecmp(buf, "none"))
+ args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
+ else
+ if (!strcasecmp(buf, "auto"))
+ args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
+ else {
+ ret = kstrtol(buf, 16, &value);
+ if (ret)
+ return ret;
+ args.state = value;
+ }
+
+ ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR,
+ nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set);
+
+void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+ struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (sysfs->ctrl) {
+ device_remove_file(&dev->pdev->dev, &dev_attr_pstate);
+ nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
+ }
+
+ drm->sysfs = NULL;
+ kfree(sysfs);
+}
+
+int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_sysfs *sysfs;
+ int ret;
+
+ sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
+ if (!sysfs)
+ return -ENOMEM;
+
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
+ NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
+ if (ret == 0)
+ device_create_file(&dev->pdev->dev, &dev_attr_pstate);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
new file mode 100644
index 00000000000..74b47f1e01e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -0,0 +1,19 @@
+#ifndef __NOUVEAU_SYSFS_H__
+#define __NOUVEAU_SYSFS_H__
+
+#include "nouveau_drm.h"
+
+struct nouveau_sysfs {
+ struct nouveau_object *ctrl;
+};
+
+static inline struct nouveau_sysfs *
+nouveau_sysfs(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->sysfs;
+}
+
+int nouveau_sysfs_init(struct drm_device *);
+void nouveau_sysfs_fini(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
deleted file mode 100644
index 9976414cbe5..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
-
-static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
-static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
-
-int
-nouveau_voltage_gpio_get(struct drm_device *dev)
-{
- struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(device);
- u8 vid = 0;
- int i;
-
- for (i = 0; i < nr_vidtag; i++) {
- if (!(volt->vid_mask & (1 << i)))
- continue;
-
- vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
- }
-
- return nouveau_volt_lvl_lookup(dev, vid);
-}
-
-int
-nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(device);
- struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
- int vid, i;
-
- vid = nouveau_volt_vid_lookup(dev, voltage);
- if (vid < 0)
- return vid;
-
- for (i = 0; i < nr_vidtag; i++) {
- if (!(volt->vid_mask & (1 << i)))
- continue;
-
- gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
- }
-
- return 0;
-}
-
-int
-nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
-{
- struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
- int i;
-
- for (i = 0; i < volt->nr_level; i++) {
- if (volt->level[i].voltage == voltage)
- return volt->level[i].vid;
- }
-
- return -ENOENT;
-}
-
-int
-nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
-{
- struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
- int i;
-
- for (i = 0; i < volt->nr_level; i++) {
- if (volt->level[i].vid == vid)
- return volt->level[i].voltage;
- }
-
- return -ENOENT;
-}
-
-void
-nouveau_volt_init(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_pm_voltage *voltage = &pm->voltage;
- struct nvbios *bios = &drm->vbios;
- struct dcb_gpio_func func;
- struct bit_entry P;
- u8 *volt = NULL, *entry;
- int i, headerlen, recordlen, entries, vidmask, vidshift;
-
- if (bios->type == NVBIOS_BIT) {
- if (bit_table(dev, 'P', &P))
- return;
-
- if (P.version == 1)
- volt = ROMPTR(dev, P.data[16]);
- else
- if (P.version == 2)
- volt = ROMPTR(dev, P.data[12]);
- else {
- NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
- }
- } else {
- if (bios->data[bios->offset + 6] < 0x27) {
- NV_DEBUG(drm, "BMP version too old for voltage\n");
- return;
- }
-
- volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
- }
-
- if (!volt) {
- NV_DEBUG(drm, "voltage table pointer invalid\n");
- return;
- }
-
- switch (volt[0]) {
- case 0x10:
- case 0x11:
- case 0x12:
- headerlen = 5;
- recordlen = volt[1];
- entries = volt[2];
- vidshift = 0;
- vidmask = volt[4];
- break;
- case 0x20:
- headerlen = volt[1];
- recordlen = volt[3];
- entries = volt[2];
- vidshift = 0; /* could be vidshift like 0x30? */
- vidmask = volt[5];
- break;
- case 0x30:
- headerlen = volt[1];
- recordlen = volt[2];
- entries = volt[3];
- vidmask = volt[4];
- /* no longer certain what volt[5] is, if it's related to
- * the vid shift then it's definitely not a function of
- * how many bits are set.
- *
- * after looking at a number of nva3+ vbios images, they
- * all seem likely to have a static shift of 2.. lets
- * go with that for now until proven otherwise.
- */
- vidshift = 2;
- break;
- case 0x40:
- headerlen = volt[1];
- recordlen = volt[2];
- entries = volt[3]; /* not a clue what the entries are for.. */
- vidmask = volt[11]; /* guess.. */
- vidshift = 0;
- break;
- default:
- NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
- return;
- }
-
- /* validate vid mask */
- voltage->vid_mask = vidmask;
- if (!voltage->vid_mask)
- return;
-
- i = 0;
- while (vidmask) {
- if (i > nr_vidtag) {
- NV_DEBUG(drm, "vid bit %d unknown\n", i);
- return;
- }
-
- if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
- NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
- return;
- }
-
- vidmask >>= 1;
- i++;
- }
-
- /* parse vbios entries into common format */
- voltage->version = volt[0];
- if (voltage->version < 0x40) {
- voltage->nr_level = entries;
- voltage->level =
- kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
- if (!voltage->level)
- return;
-
- entry = volt + headerlen;
- for (i = 0; i < entries; i++, entry += recordlen) {
- voltage->level[i].voltage = entry[0] * 10000;
- voltage->level[i].vid = entry[1] >> vidshift;
- }
- } else {
- u32 volt_uv = ROM32(volt[4]);
- s16 step_uv = ROM16(volt[8]);
- u8 vid;
-
- voltage->nr_level = voltage->vid_mask + 1;
- voltage->level = kcalloc(voltage->nr_level,
- sizeof(*voltage->level), GFP_KERNEL);
- if (!voltage->level)
- return;
-
- for (vid = 0; vid <= voltage->vid_mask; vid++) {
- voltage->level[vid].voltage = volt_uv;
- voltage->level[vid].vid = vid;
- volt_uv += step_uv;
- }
- }
-
- voltage->supported = true;
-}
-
-void
-nouveau_volt_fini(struct drm_device *dev)
-{
- struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-
- kfree(volt->level);
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 77dcc9c5077..8fe32bbed99 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -255,6 +255,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, NvCtxSurf2D);
BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
+ if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, 2);
+ }
BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
OUT_RING(chan, NvGdiRect);
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
deleted file mode 100644
index 27afc0ea28b..00000000000
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_reg.h"
-#include "dispnv04/hw.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-int
-nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- int ret;
-
- ret = nouveau_hw_get_clock(dev, PLL_CORE);
- if (ret < 0)
- return ret;
- perflvl->core = ret;
-
- ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
- if (ret < 0)
- return ret;
- perflvl->memory = ret;
-
- return 0;
-}
-
-struct nv04_pm_clock {
- struct nvbios_pll pll;
- struct nouveau_pll_vals calc;
-};
-
-struct nv04_pm_state {
- struct nv04_pm_clock core;
- struct nv04_pm_clock memory;
-};
-
-static int
-calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nouveau_clock *pclk = nouveau_clock(device);
- int ret;
-
- ret = nvbios_pll_parse(bios, id, &clk->pll);
- if (ret)
- return ret;
-
- ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
- if (!ret)
- return -EINVAL;
-
- return 0;
-}
-
-void *
-nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nv04_pm_state *info;
- int ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
- if (ret)
- goto error;
-
- if (perflvl->memory) {
- ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
- if (ret)
- goto error;
- }
-
- return info;
-error:
- kfree(info);
- return ERR_PTR(ret);
-}
-
-static void
-prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_clock *pclk = nouveau_clock(device);
- u32 reg = clk->pll.reg;
-
- /* thank the insane nouveau_hw_setpll() interface for this */
- if (device->card_type >= NV_40)
- reg += 4;
-
- pclk->pll_prog(pclk, reg, &clk->calc);
-}
-
-int
-nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_timer *ptimer = nouveau_timer(device);
- struct nv04_pm_state *state = pre_state;
-
- prog_pll(dev, &state->core);
-
- if (state->memory.pll.reg) {
- prog_pll(dev, &state->memory);
- if (device->card_type < NV_30) {
- if (device->card_type == NV_20)
- nv_mask(device, 0x1002c4, 0, 1 << 20);
-
- /* Reset the DLLs */
- nv_mask(device, 0x1002c0, 0, 1 << 8);
- }
- }
-
- nv_ofuncs(ptimer)->init(nv_object(ptimer));
-
- kfree(state);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
deleted file mode 100644
index 625f80d53dc..00000000000
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-#include "dispnv04/hw.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-#include <engine/fifo.h>
-
-#define min2(a,b) ((a) < (b) ? (a) : (b))
-
-static u32
-read_pll_1(struct drm_device *dev, u32 reg)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ctrl = nv_rd32(device, reg + 0x00);
- int P = (ctrl & 0x00070000) >> 16;
- int N = (ctrl & 0x0000ff00) >> 8;
- int M = (ctrl & 0x000000ff) >> 0;
- u32 ref = 27000, clk = 0;
-
- if (ctrl & 0x80000000)
- clk = ref * N / M;
-
- return clk >> P;
-}
-
-static u32
-read_pll_2(struct drm_device *dev, u32 reg)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ctrl = nv_rd32(device, reg + 0x00);
- u32 coef = nv_rd32(device, reg + 0x04);
- int N2 = (coef & 0xff000000) >> 24;
- int M2 = (coef & 0x00ff0000) >> 16;
- int N1 = (coef & 0x0000ff00) >> 8;
- int M1 = (coef & 0x000000ff) >> 0;
- int P = (ctrl & 0x00070000) >> 16;
- u32 ref = 27000, clk = 0;
-
- if ((ctrl & 0x80000000) && M1) {
- clk = ref * N1 / M1;
- if ((ctrl & 0x40000100) == 0x40000000) {
- if (M2)
- clk = clk * N2 / M2;
- else
- clk = 0;
- }
- }
-
- return clk >> P;
-}
-
-static u32
-read_clk(struct drm_device *dev, u32 src)
-{
- switch (src) {
- case 3:
- return read_pll_2(dev, 0x004000);
- case 2:
- return read_pll_1(dev, 0x004008);
- default:
- break;
- }
-
- return 0;
-}
-
-int
-nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ctrl = nv_rd32(device, 0x00c040);
-
- perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
- perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
- perflvl->memory = read_pll_2(dev, 0x4020);
- return 0;
-}
-
-struct nv40_pm_state {
- u32 ctrl;
- u32 npll_ctrl;
- u32 npll_coef;
- u32 spll;
- u32 mpll_ctrl;
- u32 mpll_coef;
-};
-
-static int
-nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
- u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nouveau_clock *pclk = nouveau_clock(device);
- struct nouveau_pll_vals coef;
- int ret;
-
- ret = nvbios_pll_parse(bios, reg, pll);
- if (ret)
- return ret;
-
- if (clk < pll->vco1.max_freq)
- pll->vco2.max_freq = 0;
-
- ret = pclk->pll_calc(pclk, pll, clk, &coef);
- if (ret == 0)
- return -ERANGE;
-
- *N1 = coef.N1;
- *M1 = coef.M1;
- if (N2 && M2) {
- if (pll->vco2.max_freq) {
- *N2 = coef.N2;
- *M2 = coef.M2;
- } else {
- *N2 = 1;
- *M2 = 1;
- }
- }
- *log2P = coef.log2P;
- return 0;
-}
-
-void *
-nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nv40_pm_state *info;
- struct nvbios_pll pll;
- int N1, N2, M1, M2, log2P;
- int ret;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- /* core/geometric clock */
- ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
- &N1, &M1, &N2, &M2, &log2P);
- if (ret < 0)
- goto out;
-
- if (N2 == M2) {
- info->npll_ctrl = 0x80000100 | (log2P << 16);
- info->npll_coef = (N1 << 8) | M1;
- } else {
- info->npll_ctrl = 0xc0000000 | (log2P << 16);
- info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
- }
-
- /* use the second PLL for shader/rop clock, if it differs from core */
- if (perflvl->shader && perflvl->shader != perflvl->core) {
- ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
- &N1, &M1, NULL, NULL, &log2P);
- if (ret < 0)
- goto out;
-
- info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
- info->ctrl = 0x00000223;
- } else {
- info->spll = 0x00000000;
- info->ctrl = 0x00000333;
- }
-
- /* memory clock */
- if (!perflvl->memory) {
- info->mpll_ctrl = 0x00000000;
- goto out;
- }
-
- ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
- &N1, &M1, &N2, &M2, &log2P);
- if (ret < 0)
- goto out;
-
- info->mpll_ctrl = 0x80000000 | (log2P << 16);
- info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
- if (N2 == M2) {
- info->mpll_ctrl |= 0x00000100;
- info->mpll_coef = (N1 << 8) | M1;
- } else {
- info->mpll_ctrl |= 0x40000000;
- info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
- }
-
-out:
- if (ret < 0) {
- kfree(info);
- info = ERR_PTR(ret);
- }
- return info;
-}
-
-static bool
-nv40_pm_gr_idle(void *data)
-{
- struct drm_device *dev = data;
- struct nouveau_device *device = nouveau_dev(dev);
-
- if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
- (nv_rd32(device, 0x400760) & 0x0000000f))
- return false;
-
- if (nv_rd32(device, 0x400700))
- return false;
-
- return true;
-}
-
-int
-nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_fifo *pfifo = nouveau_fifo(device);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv40_pm_state *info = pre_state;
- unsigned long flags;
- struct bit_entry M;
- u32 crtc_mask = 0;
- u8 sr1[2];
- int i, ret = -EAGAIN;
-
- /* determine which CRTCs are active, fetch VGA_SR1 for each */
- for (i = 0; i < 2; i++) {
- u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
- u32 cnt = 0;
- do {
- if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
- nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
- sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
- if (!(sr1[i] & 0x20))
- crtc_mask |= (1 << i);
- break;
- }
- udelay(1);
- } while (cnt++ < 32);
- }
-
- /* halt and idle engines */
- pfifo->pause(pfifo, &flags);
-
- if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
- goto resume;
-
- ret = 0;
-
- /* set engine clocks */
- nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
- nv_wr32(device, 0x004004, info->npll_coef);
- nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
- nv_mask(device, 0x004008, 0xc007ffff, info->spll);
- mdelay(5);
- nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
-
- if (!info->mpll_ctrl)
- goto resume;
-
- /* wait for vblank start on active crtcs, disable memory access */
- for (i = 0; i < 2; i++) {
- if (!(crtc_mask & (1 << i)))
- continue;
- nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
- nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
- nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
- nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
- }
-
- /* prepare ram for reclocking */
- nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
- nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
- nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
- nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
- nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
-
- /* change the PLL of each memory partition */
- nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
- switch (nv_device(drm->device)->chipset) {
- case 0x40:
- case 0x45:
- case 0x41:
- case 0x42:
- case 0x47:
- nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
- nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
- nv_wr32(device, 0x004048, info->mpll_coef);
- nv_wr32(device, 0x004030, info->mpll_coef);
- case 0x43:
- case 0x49:
- case 0x4b:
- nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
- nv_wr32(device, 0x00403c, info->mpll_coef);
- default:
- nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
- nv_wr32(device, 0x004024, info->mpll_coef);
- break;
- }
- udelay(100);
- nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
-
- /* re-enable normal operation of memory controller */
- nv_wr32(device, 0x1002dc, 0x00000000);
- nv_mask(device, 0x100210, 0x80000000, 0x80000000);
- udelay(100);
-
- /* execute memory reset script from vbios */
- if (!bit_table(dev, 'M', &M))
- nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
-
- /* make sure we're in vblank (hopefully the same one as before), and
- * then re-enable crtc memory access
- */
- for (i = 0; i < 2; i++) {
- if (!(crtc_mask & (1 << i)))
- continue;
- nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
- nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
- nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
- }
-
- /* resume engines */
-resume:
- pfifo->start(pfifo, &flags);
- kfree(info);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
deleted file mode 100644
index 4efc33fa73f..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ /dev/null
@@ -1,855 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "dispnv04/hw.h"
-#include "nouveau_pm.h"
-#include "nouveau_hwsq.h"
-
-#include "nv50_display.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-enum clk_src {
- clk_src_crystal,
- clk_src_href,
- clk_src_hclk,
- clk_src_hclkm3,
- clk_src_hclkm3d2,
- clk_src_host,
- clk_src_nvclk,
- clk_src_sclk,
- clk_src_mclk,
- clk_src_vdec,
- clk_src_dom6
-};
-
-static u32 read_clk(struct drm_device *, enum clk_src);
-
-static u32
-read_div(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- switch (nv_device(drm->device)->chipset) {
- case 0x50: /* it exists, but only has bit 31, not the dividers.. */
- case 0x84:
- case 0x86:
- case 0x98:
- case 0xa0:
- return nv_rd32(device, 0x004700);
- case 0x92:
- case 0x94:
- case 0x96:
- return nv_rd32(device, 0x004800);
- default:
- return 0x00000000;
- }
-}
-
-static u32
-read_pll_src(struct drm_device *dev, u32 base)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 coef, ref = read_clk(dev, clk_src_crystal);
- u32 rsel = nv_rd32(device, 0x00e18c);
- int P, N, M, id;
-
- switch (nv_device(drm->device)->chipset) {
- case 0x50:
- case 0xa0:
- switch (base) {
- case 0x4020:
- case 0x4028: id = !!(rsel & 0x00000004); break;
- case 0x4008: id = !!(rsel & 0x00000008); break;
- case 0x4030: id = 0; break;
- default:
- NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
- return 0;
- }
-
- coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
- ref *= (coef & 0x01000000) ? 2 : 4;
- P = (coef & 0x00070000) >> 16;
- N = ((coef & 0x0000ff00) >> 8) + 1;
- M = ((coef & 0x000000ff) >> 0) + 1;
- break;
- case 0x84:
- case 0x86:
- case 0x92:
- coef = nv_rd32(device, 0x00e81c);
- P = (coef & 0x00070000) >> 16;
- N = (coef & 0x0000ff00) >> 8;
- M = (coef & 0x000000ff) >> 0;
- break;
- case 0x94:
- case 0x96:
- case 0x98:
- rsel = nv_rd32(device, 0x00c050);
- switch (base) {
- case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
- case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
- case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
- case 0x4030: rsel = 3; break;
- default:
- NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
- return 0;
- }
-
- switch (rsel) {
- case 0: id = 1; break;
- case 1: return read_clk(dev, clk_src_crystal);
- case 2: return read_clk(dev, clk_src_href);
- case 3: id = 0; break;
- }
-
- coef = nv_rd32(device, 0x00e81c + (id * 0x28));
- P = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
- P += (coef & 0x00070000) >> 16;
- N = (coef & 0x0000ff00) >> 8;
- M = (coef & 0x000000ff) >> 0;
- break;
- default:
- BUG_ON(1);
- }
-
- if (M)
- return (ref * N / M) >> P;
- return 0;
-}
-
-static u32
-read_pll_ref(struct drm_device *dev, u32 base)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 src, mast = nv_rd32(device, 0x00c040);
-
- switch (base) {
- case 0x004028:
- src = !!(mast & 0x00200000);
- break;
- case 0x004020:
- src = !!(mast & 0x00400000);
- break;
- case 0x004008:
- src = !!(mast & 0x00010000);
- break;
- case 0x004030:
- src = !!(mast & 0x02000000);
- break;
- case 0x00e810:
- return read_clk(dev, clk_src_crystal);
- default:
- NV_ERROR(drm, "bad pll 0x%06x\n", base);
- return 0;
- }
-
- if (src)
- return read_clk(dev, clk_src_href);
- return read_pll_src(dev, base);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 base)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 mast = nv_rd32(device, 0x00c040);
- u32 ctrl = nv_rd32(device, base + 0);
- u32 coef = nv_rd32(device, base + 4);
- u32 ref = read_pll_ref(dev, base);
- u32 clk = 0;
- int N1, N2, M1, M2;
-
- if (base == 0x004028 && (mast & 0x00100000)) {
- /* wtf, appears to only disable post-divider on nva0 */
- if (nv_device(drm->device)->chipset != 0xa0)
- return read_clk(dev, clk_src_dom6);
- }
-
- N2 = (coef & 0xff000000) >> 24;
- M2 = (coef & 0x00ff0000) >> 16;
- N1 = (coef & 0x0000ff00) >> 8;
- M1 = (coef & 0x000000ff);
- if ((ctrl & 0x80000000) && M1) {
- clk = ref * N1 / M1;
- if ((ctrl & 0x40000100) == 0x40000000) {
- if (M2)
- clk = clk * N2 / M2;
- else
- clk = 0;
- }
- }
-
- return clk;
-}
-
-static u32
-read_clk(struct drm_device *dev, enum clk_src src)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 mast = nv_rd32(device, 0x00c040);
- u32 P = 0;
-
- switch (src) {
- case clk_src_crystal:
- return device->crystal;
- case clk_src_href:
- return 100000; /* PCIE reference clock */
- case clk_src_hclk:
- return read_clk(dev, clk_src_href) * 27778 / 10000;
- case clk_src_hclkm3:
- return read_clk(dev, clk_src_hclk) * 3;
- case clk_src_hclkm3d2:
- return read_clk(dev, clk_src_hclk) * 3 / 2;
- case clk_src_host:
- switch (mast & 0x30000000) {
- case 0x00000000: return read_clk(dev, clk_src_href);
- case 0x10000000: break;
- case 0x20000000: /* !0x50 */
- case 0x30000000: return read_clk(dev, clk_src_hclk);
- }
- break;
- case clk_src_nvclk:
- if (!(mast & 0x00100000))
- P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
- switch (mast & 0x00000003) {
- case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
- case 0x00000001: return read_clk(dev, clk_src_dom6);
- case 0x00000002: return read_pll(dev, 0x004020) >> P;
- case 0x00000003: return read_pll(dev, 0x004028) >> P;
- }
- break;
- case clk_src_sclk:
- P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
- switch (mast & 0x00000030) {
- case 0x00000000:
- if (mast & 0x00000080)
- return read_clk(dev, clk_src_host) >> P;
- return read_clk(dev, clk_src_crystal) >> P;
- case 0x00000010: break;
- case 0x00000020: return read_pll(dev, 0x004028) >> P;
- case 0x00000030: return read_pll(dev, 0x004020) >> P;
- }
- break;
- case clk_src_mclk:
- P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
- if (nv_rd32(device, 0x004008) & 0x00000200) {
- switch (mast & 0x0000c000) {
- case 0x00000000:
- return read_clk(dev, clk_src_crystal) >> P;
- case 0x00008000:
- case 0x0000c000:
- return read_clk(dev, clk_src_href) >> P;
- }
- } else {
- return read_pll(dev, 0x004008) >> P;
- }
- break;
- case clk_src_vdec:
- P = (read_div(dev) & 0x00000700) >> 8;
- switch (nv_device(drm->device)->chipset) {
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0xa0:
- switch (mast & 0x00000c00) {
- case 0x00000000:
- if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
- return read_clk(dev, clk_src_nvclk) >> P;
- return read_clk(dev, clk_src_crystal) >> P;
- case 0x00000400:
- return 0;
- case 0x00000800:
- if (mast & 0x01000000)
- return read_pll(dev, 0x004028) >> P;
- return read_pll(dev, 0x004030) >> P;
- case 0x00000c00:
- return read_clk(dev, clk_src_nvclk) >> P;
- }
- break;
- case 0x98:
- switch (mast & 0x00000c00) {
- case 0x00000000:
- return read_clk(dev, clk_src_nvclk) >> P;
- case 0x00000400:
- return 0;
- case 0x00000800:
- return read_clk(dev, clk_src_hclkm3d2) >> P;
- case 0x00000c00:
- return read_clk(dev, clk_src_mclk) >> P;
- }
- break;
- }
- break;
- case clk_src_dom6:
- switch (nv_device(drm->device)->chipset) {
- case 0x50:
- case 0xa0:
- return read_pll(dev, 0x00e810) >> 2;
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- P = (read_div(dev) & 0x00000007) >> 0;
- switch (mast & 0x0c000000) {
- case 0x00000000: return read_clk(dev, clk_src_href);
- case 0x04000000: break;
- case 0x08000000: return read_clk(dev, clk_src_hclk);
- case 0x0c000000:
- return read_clk(dev, clk_src_hclkm3) >> P;
- }
- break;
- default:
- break;
- }
- default:
- break;
- }
-
- NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
- return 0;
-}
-
-int
-nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0xaa ||
- nv_device(drm->device)->chipset == 0xac)
- return 0;
-
- perflvl->core = read_clk(dev, clk_src_nvclk);
- perflvl->shader = read_clk(dev, clk_src_sclk);
- perflvl->memory = read_clk(dev, clk_src_mclk);
- if (nv_device(drm->device)->chipset != 0x50) {
- perflvl->vdec = read_clk(dev, clk_src_vdec);
- perflvl->dom6 = read_clk(dev, clk_src_dom6);
- }
-
- return 0;
-}
-
-struct nv50_pm_state {
- struct nouveau_pm_level *perflvl;
- struct hwsq_ucode eclk_hwsq;
- struct hwsq_ucode mclk_hwsq;
- u32 mscript;
- u32 mmast;
- u32 mctrl;
- u32 mcoef;
-};
-
-static u32
-calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
- u32 clk, int *N1, int *M1, int *log2P)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nouveau_clock *pclk = nouveau_clock(device);
- struct nouveau_pll_vals coef;
- int ret;
-
- ret = nvbios_pll_parse(bios, reg, pll);
- if (ret)
- return 0;
-
- pll->vco2.max_freq = 0;
- pll->refclk = read_pll_ref(dev, reg);
- if (!pll->refclk)
- return 0;
-
- ret = pclk->pll_calc(pclk, pll, clk, &coef);
- if (ret == 0)
- return 0;
-
- *N1 = coef.N1;
- *M1 = coef.M1;
- *log2P = coef.log2P;
- return ret;
-}
-
-static inline u32
-calc_div(u32 src, u32 target, int *div)
-{
- u32 clk0 = src, clk1 = src;
- for (*div = 0; *div <= 7; (*div)++) {
- if (clk0 <= target) {
- clk1 = clk0 << (*div ? 1 : 0);
- break;
- }
- clk0 >>= 1;
- }
-
- if (target - clk0 <= clk1 - target)
- return clk0;
- (*div)--;
- return clk1;
-}
-
-static inline u32
-clk_same(u32 a, u32 b)
-{
- return ((a / 1000) == (b / 1000));
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- if (nsec > 1000)
- hwsq_usec(hwsq, (nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- if (mr <= 1)
- return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
- if (mr <= 3)
- return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
- return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- if (mr <= 1) {
- if (pfb->ram->ranks > 1)
- hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
- hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
- } else
- if (mr <= 3) {
- if (pfb->ram->ranks > 1)
- hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
- hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
- }
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
- u32 ctrl = nv_rd32(device, 0x004008);
-
- info->mmast = nv_rd32(device, 0x00c040);
- info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
- info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
-
- hwsq_wr32(hwsq, 0xc040, info->mmast);
- hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
- if (info->mctrl & 0x80000000)
- hwsq_wr32(hwsq, 0x400c, info->mcoef);
- hwsq_wr32(hwsq, 0x4008, info->mctrl);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nv50_pm_state *info = exec->priv;
- struct nouveau_pm_level *perflvl = info->perflvl;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
- int i;
-
- for (i = 0; i < 9; i++) {
- u32 reg = 0x100220 + (i * 4);
- u32 val = nv_rd32(device, reg);
- if (val != perflvl->timing.reg[i])
- hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
- }
-}
-
-static int
-calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- struct nv50_pm_state *info)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
- struct nouveau_mem_exec_func exec = {
- .dev = dev,
- .precharge = mclk_precharge,
- .refresh = mclk_refresh,
- .refresh_auto = mclk_refresh_auto,
- .refresh_self = mclk_refresh_self,
- .wait = mclk_wait,
- .mrg = mclk_mrg,
- .mrs = mclk_mrs,
- .clock_set = mclk_clock_set,
- .timing_set = mclk_timing_set,
- .priv = info
- };
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
- struct nvbios_pll pll;
- int N, M, P;
- int ret;
-
- /* use pcie refclock if possible, otherwise use mpll */
- info->mctrl = nv_rd32(device, 0x004008);
- info->mctrl &= ~0x81ff0200;
- if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
- info->mctrl |= 0x00000200 | (pll.bias_p << 19);
- } else {
- ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
- if (ret == 0)
- return -EINVAL;
-
- info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
- info->mctrl |= pll.bias_p << 19;
- info->mcoef = (N << 8) | M;
- }
-
- /* build the ucode which will reclock the memory for us */
- hwsq_init(hwsq);
- if (crtc_mask) {
- hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
- hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
- }
- if (nv_device(drm->device)->chipset >= 0x92)
- hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
- hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
- hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
-
- ret = nouveau_mem_exec(&exec, perflvl);
- if (ret)
- return ret;
-
- hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
- hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
- if (nv_device(drm->device)->chipset >= 0x92)
- hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
- hwsq_fini(hwsq);
- return 0;
-}
-
-void *
-nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_pm_state *info;
- struct hwsq_ucode *hwsq;
- struct nvbios_pll pll;
- u32 out, mast, divs, ctrl;
- int clk, ret = -EINVAL;
- int N, M, P1, P2;
-
- if (nv_device(drm->device)->chipset == 0xaa ||
- nv_device(drm->device)->chipset == 0xac)
- return ERR_PTR(-ENODEV);
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
- info->perflvl = perflvl;
-
- /* memory: build hwsq ucode which we'll use to reclock memory.
- * use pcie refclock if possible, otherwise use mpll */
- info->mclk_hwsq.len = 0;
- if (perflvl->memory) {
- ret = calc_mclk(dev, perflvl, info);
- if (ret)
- goto error;
- info->mscript = perflvl->memscript;
- }
-
- divs = read_div(dev);
- mast = info->mmast;
-
- /* start building HWSQ script for engine reclocking */
- hwsq = &info->eclk_hwsq;
- hwsq_init(hwsq);
- hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
- hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
-
- /* vdec/dom6: switch to "safe" clocks temporarily */
- if (perflvl->vdec) {
- mast &= ~0x00000c00;
- divs &= ~0x00000700;
- }
-
- if (perflvl->dom6) {
- mast &= ~0x0c000000;
- divs &= ~0x00000007;
- }
-
- hwsq_wr32(hwsq, 0x00c040, mast);
-
- /* vdec: avoid modifying xpll until we know exactly how the other
- * clock domains work, i suspect at least some of them can also be
- * tied to xpll...
- */
- if (perflvl->vdec) {
- /* see how close we can get using nvclk as a source */
- clk = calc_div(perflvl->core, perflvl->vdec, &P1);
-
- /* see how close we can get using xpll/hclk as a source */
- if (nv_device(drm->device)->chipset != 0x98)
- out = read_pll(dev, 0x004030);
- else
- out = read_clk(dev, clk_src_hclkm3d2);
- out = calc_div(out, perflvl->vdec, &P2);
-
- /* select whichever gets us closest */
- if (abs((int)perflvl->vdec - clk) <=
- abs((int)perflvl->vdec - out)) {
- if (nv_device(drm->device)->chipset != 0x98)
- mast |= 0x00000c00;
- divs |= P1 << 8;
- } else {
- mast |= 0x00000800;
- divs |= P2 << 8;
- }
- }
-
- /* dom6: nfi what this is, but we're limited to various combinations
- * of the host clock frequency
- */
- if (perflvl->dom6) {
- if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
- mast |= 0x00000000;
- } else
- if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
- mast |= 0x08000000;
- } else {
- clk = read_clk(dev, clk_src_hclk) * 3;
- clk = calc_div(clk, perflvl->dom6, &P1);
-
- mast |= 0x0c000000;
- divs |= P1;
- }
- }
-
- /* vdec/dom6: complete switch to new clocks */
- switch (nv_device(drm->device)->chipset) {
- case 0x92:
- case 0x94:
- case 0x96:
- hwsq_wr32(hwsq, 0x004800, divs);
- break;
- default:
- hwsq_wr32(hwsq, 0x004700, divs);
- break;
- }
-
- hwsq_wr32(hwsq, 0x00c040, mast);
-
- /* core/shader: make sure sclk/nvclk are disconnected from their
- * PLLs (nvclk to dom6, sclk to hclk)
- */
- if (nv_device(drm->device)->chipset < 0x92)
- mast = (mast & ~0x001000b0) | 0x00100080;
- else
- mast = (mast & ~0x000000b3) | 0x00000081;
-
- hwsq_wr32(hwsq, 0x00c040, mast);
-
- /* core: for the moment at least, always use nvpll */
- clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
- if (clk == 0)
- goto error;
-
- ctrl = nv_rd32(device, 0x004028) & ~0xc03f0100;
- mast &= ~0x00100000;
- mast |= 3;
-
- hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
- hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
-
- /* shader: tie to nvclk if possible, otherwise use spll. have to be
- * very careful that the shader clock is at least twice the core, or
- * some chipsets will be very unhappy. i expect most or all of these
- * cases will be handled by tying to nvclk, but it's possible there's
- * corners
- */
- ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
-
- if (P1-- && perflvl->shader == (perflvl->core << 1)) {
- hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
- hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
- } else {
- clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
- if (clk == 0)
- goto error;
- ctrl |= 0x80000000;
-
- hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
- hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
- hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
- }
-
- hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
- hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
- hwsq_fini(hwsq);
-
- return info;
-error:
- kfree(info);
- return ERR_PTR(ret);
-}
-
-static int
-prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 hwsq_data, hwsq_kick;
- int i;
-
- if (nv_device(drm->device)->chipset < 0x94) {
- hwsq_data = 0x001400;
- hwsq_kick = 0x00000003;
- } else {
- hwsq_data = 0x080000;
- hwsq_kick = 0x00000001;
- }
- /* upload hwsq ucode */
- nv_mask(device, 0x001098, 0x00000008, 0x00000000);
- nv_wr32(device, 0x001304, 0x00000000);
- if (nv_device(drm->device)->chipset >= 0x92)
- nv_wr32(device, 0x001318, 0x00000000);
- for (i = 0; i < hwsq->len / 4; i++)
- nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
- nv_mask(device, 0x001098, 0x00000018, 0x00000018);
-
- /* launch, and wait for completion */
- nv_wr32(device, 0x00130c, hwsq_kick);
- if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
- NV_ERROR(drm, "hwsq ucode exec timed out\n");
- NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
- for (i = 0; i < hwsq->len / 4; i++) {
- NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
- nv_rd32(device, 0x001400 + (i * 4)));
- }
-
- return -EIO;
- }
-
- return 0;
-}
-
-int
-nv50_pm_clocks_set(struct drm_device *dev, void *data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nv50_pm_state *info = data;
- struct bit_entry M;
- int ret = -EBUSY;
-
- /* halt and idle execution engines */
- nv_mask(device, 0x002504, 0x00000001, 0x00000001);
- if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
- goto resume;
- if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
- goto resume;
-
- /* program memory clock, if necessary - must come before engine clock
- * reprogramming due to how we construct the hwsq scripts in pre()
- */
-#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
- if (info->mclk_hwsq.len) {
- /* execute some scripts that do ??? from the vbios.. */
- if (!bit_table(dev, 'M', &M) && M.version == 1) {
- if (M.length >= 6)
- nouveau_bios_init_exec(dev, ROM16(M.data[5]));
- if (M.length >= 8)
- nouveau_bios_init_exec(dev, ROM16(M.data[7]));
- if (M.length >= 10)
- nouveau_bios_init_exec(dev, ROM16(M.data[9]));
- nouveau_bios_init_exec(dev, info->mscript);
- }
-
- ret = prog_hwsq(dev, &info->mclk_hwsq);
- if (ret)
- goto resume;
- }
-
- /* program engine clocks */
- ret = prog_hwsq(dev, &info->eclk_hwsq);
-
-resume:
- nv_mask(device, 0x002504, 0x00000001, 0x00000000);
- kfree(info);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
deleted file mode 100644
index 0d0ed597fea..00000000000
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/bios.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32 read_clk(struct drm_device *, int, bool);
-static u32 read_pll(struct drm_device *, int, u32);
-
-static u32
-read_vco(struct drm_device *dev, int clk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
- if ((sctl & 0x00000030) != 0x00000030)
- return read_pll(dev, 0x41, 0x00e820);
- return read_pll(dev, 0x42, 0x00e8a0);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk, bool ignore_en)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 sctl, sdiv, sclk;
-
- /* refclk for the 0xe8xx plls is a fixed frequency */
- if (clk >= 0x40) {
- if (nv_device(drm->device)->chipset == 0xaf) {
- /* no joke.. seriously.. sigh.. */
- return nv_rd32(device, 0x00471c) * 1000;
- }
-
- return device->crystal;
- }
-
- sctl = nv_rd32(device, 0x4120 + (clk * 4));
- if (!ignore_en && !(sctl & 0x00000100))
- return 0;
-
- switch (sctl & 0x00003000) {
- case 0x00000000:
- return device->crystal;
- case 0x00002000:
- if (sctl & 0x00000040)
- return 108000;
- return 100000;
- case 0x00003000:
- sclk = read_vco(dev, clk);
- sdiv = ((sctl & 0x003f0000) >> 16) + 2;
- return (sclk * 2) / sdiv;
- default:
- return 0;
- }
-}
-
-static u32
-read_pll(struct drm_device *dev, int clk, u32 pll)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ctrl = nv_rd32(device, pll + 0);
- u32 sclk = 0, P = 1, N = 1, M = 1;
-
- if (!(ctrl & 0x00000008)) {
- if (ctrl & 0x00000001) {
- u32 coef = nv_rd32(device, pll + 4);
- M = (coef & 0x000000ff) >> 0;
- N = (coef & 0x0000ff00) >> 8;
- P = (coef & 0x003f0000) >> 16;
-
- /* no post-divider on these.. */
- if ((pll & 0x00ff00) == 0x00e800)
- P = 1;
-
- sclk = read_clk(dev, 0x00 + clk, false);
- }
- } else {
- sclk = read_clk(dev, 0x10 + clk, false);
- }
-
- if (M * P)
- return sclk * N / (M * P);
- return 0;
-}
-
-struct creg {
- u32 clk;
- u32 pll;
-};
-
-static int
-calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nvbios_pll limits;
- u32 oclk, sclk, sdiv;
- int P, N, M, diff;
- int ret;
-
- reg->pll = 0;
- reg->clk = 0;
- if (!khz) {
- NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
- return 0;
- }
-
- switch (khz) {
- case 27000:
- reg->clk = 0x00000100;
- return khz;
- case 100000:
- reg->clk = 0x00002100;
- return khz;
- case 108000:
- reg->clk = 0x00002140;
- return khz;
- default:
- sclk = read_vco(dev, clk);
- sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
- /* if the clock has a PLL attached, and we can get a within
- * [-2, 3) MHz of a divider, we'll disable the PLL and use
- * the divider instead.
- *
- * divider can go as low as 2, limited here because NVIDIA
- * and the VBIOS on my NVA8 seem to prefer using the PLL
- * for 810MHz - is there a good reason?
- */
- if (sdiv > 4) {
- oclk = (sclk * 2) / sdiv;
- diff = khz - oclk;
- if (!pll || (diff >= -2000 && diff < 3000)) {
- reg->clk = (((sdiv - 2) << 16) | 0x00003100);
- return oclk;
- }
- }
-
- if (!pll) {
- NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
- return -ERANGE;
- }
-
- break;
- }
-
- ret = nvbios_pll_parse(bios, pll, &limits);
- if (ret)
- return ret;
-
- limits.refclk = read_clk(dev, clk - 0x10, true);
- if (!limits.refclk)
- return -EINVAL;
-
- ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
- if (ret >= 0) {
- reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
- reg->pll = (P << 16) | (N << 8) | M;
- }
-
- return ret;
-}
-
-static void
-prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 src0 = 0x004120 + (clk * 4);
- const u32 src1 = 0x004160 + (clk * 4);
- const u32 ctrl = pll + 0;
- const u32 coef = pll + 4;
-
- if (!reg->clk && !reg->pll) {
- NV_DEBUG(drm, "no clock for %02x\n", clk);
- return;
- }
-
- if (reg->pll) {
- nv_mask(device, src0, 0x00000101, 0x00000101);
- nv_wr32(device, coef, reg->pll);
- nv_mask(device, ctrl, 0x00000015, 0x00000015);
- nv_mask(device, ctrl, 0x00000010, 0x00000000);
- nv_wait(device, ctrl, 0x00020000, 0x00020000);
- nv_mask(device, ctrl, 0x00000010, 0x00000010);
- nv_mask(device, ctrl, 0x00000008, 0x00000000);
- nv_mask(device, src1, 0x00000100, 0x00000000);
- nv_mask(device, src1, 0x00000001, 0x00000000);
- } else {
- nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
- nv_mask(device, ctrl, 0x00000018, 0x00000018);
- udelay(20);
- nv_mask(device, ctrl, 0x00000001, 0x00000000);
- nv_mask(device, src0, 0x00000100, 0x00000000);
- nv_mask(device, src0, 0x00000001, 0x00000000);
- }
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct creg *reg)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (!reg->clk) {
- NV_DEBUG(drm, "no clock for %02x\n", clk);
- return;
- }
-
- nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
-}
-
-int
-nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- perflvl->core = read_pll(dev, 0x00, 0x4200);
- perflvl->shader = read_pll(dev, 0x01, 0x4220);
- perflvl->memory = read_pll(dev, 0x02, 0x4000);
- perflvl->unka0 = read_clk(dev, 0x20, false);
- perflvl->vdec = read_clk(dev, 0x21, false);
- perflvl->daemon = read_clk(dev, 0x25, false);
- perflvl->copy = perflvl->core;
- return 0;
-}
-
-struct nva3_pm_state {
- struct nouveau_pm_level *perflvl;
-
- struct creg nclk;
- struct creg sclk;
- struct creg vdec;
- struct creg unka0;
-
- struct creg mclk;
- u8 *rammap;
- u8 rammap_ver;
- u8 rammap_len;
- u8 *ramcfg;
- u8 ramcfg_len;
- u32 r004018;
- u32 r100760;
-};
-
-void *
-nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nva3_pm_state *info;
- u8 ramcfg_cnt;
- int ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
- if (ret < 0)
- goto out;
-
- ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
- if (ret < 0)
- goto out;
-
- ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
- if (ret < 0)
- goto out;
-
- ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
- if (ret < 0)
- goto out;
-
- ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
- if (ret < 0)
- goto out;
-
- info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
- &info->rammap_ver,
- &info->rammap_len,
- &ramcfg_cnt, &info->ramcfg_len);
- if (info->rammap_ver != 0x10 || info->rammap_len < 5)
- info->rammap = NULL;
-
- info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
- &info->rammap_ver,
- &info->ramcfg_len);
- if (info->rammap_ver != 0x10)
- info->ramcfg = NULL;
-
- info->perflvl = perflvl;
-out:
- if (ret < 0) {
- kfree(info);
- info = ERR_PTR(ret);
- }
- return info;
-}
-
-static bool
-nva3_pm_grcp_idle(void *data)
-{
- struct drm_device *dev = data;
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x400304) & 0x00000001))
- return true;
- if (nv_rd32(device, 0x400308) == 0x0050001c)
- return true;
- return false;
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- nv_wr32(device, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- nv_wr32(device, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- volatile u32 post = nv_rd32(device, 0); (void)post;
- udelay((nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- if (mr <= 1)
- return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
- if (mr <= 3)
- return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
- return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- if (mr <= 1) {
- if (pfb->ram->ranks > 1)
- nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
- nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
- } else
- if (mr <= 3) {
- if (pfb->ram->ranks > 1)
- nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
- nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
- }
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nva3_pm_state *info = exec->priv;
- u32 ctrl;
-
- ctrl = nv_rd32(device, 0x004000);
- if (!(ctrl & 0x00000008) && info->mclk.pll) {
- nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
- nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
- nv_wr32(device, 0x004018, 0x00001000);
- nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
- nv_wr32(device, 0x004004, info->mclk.pll);
- nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
- udelay(64);
- nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
- udelay(20);
- } else
- if (!info->mclk.pll) {
- nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
- nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
- nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
- nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
- }
-
- if (info->rammap) {
- if (info->ramcfg && (info->rammap[4] & 0x08)) {
- u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
- info->ramcfg[5];
- u32 unk5a4 = ROM16(info->ramcfg[7]);
- u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
- (info->ramcfg[3] & 0x0f) << 16 |
- (info->ramcfg[9] & 0x0f) |
- 0x80000000;
- nv_wr32(device, 0x1005a0, unk5a0);
- nv_wr32(device, 0x1005a4, unk5a4);
- nv_wr32(device, 0x10f804, unk804);
- nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
- } else {
- nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
- nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
- nv_mask(device, 0x100760, 0x22222222, info->r100760);
- nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
- nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
- }
- }
-
- if (info->mclk.pll) {
- nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
- nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
- }
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nva3_pm_state *info = exec->priv;
- struct nouveau_pm_level *perflvl = info->perflvl;
- int i;
-
- for (i = 0; i < 9; i++)
- nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
-
- if (info->ramcfg) {
- u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
- nv_mask(device, 0x100200, 0x00001000, data);
- }
-
- if (info->ramcfg) {
- u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
- u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
- u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
- if ( (info->ramcfg[2] & 0x20))
- unk714 |= 0xf0000000;
- if (!(info->ramcfg[2] & 0x04))
- unk714 |= 0x00000010;
- nv_wr32(device, 0x100714, unk714);
-
- if (info->ramcfg[2] & 0x01)
- unk71c |= 0x00000100;
- nv_wr32(device, 0x10071c, unk71c);
-
- if (info->ramcfg[2] & 0x02)
- unk718 |= 0x00000100;
- nv_wr32(device, 0x100718, unk718);
-
- if (info->ramcfg[2] & 0x10)
- nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
- }
-}
-
-static void
-prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_mem_exec_func exec = {
- .dev = dev,
- .precharge = mclk_precharge,
- .refresh = mclk_refresh,
- .refresh_auto = mclk_refresh_auto,
- .refresh_self = mclk_refresh_self,
- .wait = mclk_wait,
- .mrg = mclk_mrg,
- .mrs = mclk_mrs,
- .clock_set = mclk_clock_set,
- .timing_set = mclk_timing_set,
- .priv = info
- };
- u32 ctrl;
-
- /* XXX: where the fuck does 750MHz come from? */
- if (info->perflvl->memory <= 750000) {
- info->r004018 = 0x10000000;
- info->r100760 = 0x22222222;
- }
-
- ctrl = nv_rd32(device, 0x004000);
- if (ctrl & 0x00000008) {
- if (info->mclk.pll) {
- nv_mask(device, 0x004128, 0x00000101, 0x00000101);
- nv_wr32(device, 0x004004, info->mclk.pll);
- nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
- nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
- nv_wait(device, 0x004000, 0x00020000, 0x00020000);
- nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
- nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
- nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
- }
- } else {
- u32 ssel = 0x00000101;
- if (info->mclk.clk)
- ssel |= info->mclk.clk;
- else
- ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
- nv_mask(device, 0x004168, 0x003f3141, ctrl);
- }
-
- if (info->ramcfg) {
- if (info->ramcfg[2] & 0x10) {
- nv_mask(device, 0x111104, 0x00000600, 0x00000000);
- } else {
- nv_mask(device, 0x111100, 0x40000000, 0x40000000);
- nv_mask(device, 0x111104, 0x00000180, 0x00000000);
- }
- }
- if (info->rammap && !(info->rammap[4] & 0x02))
- nv_mask(device, 0x100200, 0x00000800, 0x00000000);
- nv_wr32(device, 0x611200, 0x00003300);
- if (!(info->ramcfg[2] & 0x10))
- nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
-
- nouveau_mem_exec(&exec, info->perflvl);
-
- nv_wr32(device, 0x611200, 0x00003330);
- if (info->rammap && (info->rammap[4] & 0x02))
- nv_mask(device, 0x100200, 0x00000800, 0x00000800);
- if (info->ramcfg) {
- if (info->ramcfg[2] & 0x10) {
- nv_mask(device, 0x111104, 0x00000180, 0x00000180);
- nv_mask(device, 0x111100, 0x40000000, 0x00000000);
- } else {
- nv_mask(device, 0x111104, 0x00000600, 0x00000600);
- }
- }
-
- if (info->mclk.pll) {
- nv_mask(device, 0x004168, 0x00000001, 0x00000000);
- nv_mask(device, 0x004168, 0x00000100, 0x00000000);
- } else {
- nv_mask(device, 0x004000, 0x00000001, 0x00000000);
- nv_mask(device, 0x004128, 0x00000001, 0x00000000);
- nv_mask(device, 0x004128, 0x00000100, 0x00000000);
- }
-}
-
-int
-nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nva3_pm_state *info = pre_state;
- int ret = -EAGAIN;
-
- /* prevent any new grctx switches from starting */
- nv_wr32(device, 0x400324, 0x00000000);
- nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
- /* wait for any pending grctx switches to complete */
- if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
- NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
- goto cleanup;
- }
- /* freeze PFIFO */
- nv_mask(device, 0x002504, 0x00000001, 0x00000001);
- if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
- NV_ERROR(drm, "pm: fifo didn't go idle\n");
- goto cleanup;
- }
-
- prog_pll(dev, 0x00, 0x004200, &info->nclk);
- prog_pll(dev, 0x01, 0x004220, &info->sclk);
- prog_clk(dev, 0x20, &info->unka0);
- prog_clk(dev, 0x21, &info->vdec);
-
- if (info->mclk.clk || info->mclk.pll)
- prog_mem(dev, info);
-
- ret = 0;
-
-cleanup:
- /* unfreeze PFIFO */
- nv_mask(device, 0x002504, 0x00000001, 0x00000000);
- /* restore ctxprog to normal */
- nv_wr32(device, 0x400324, 0x00000000);
- nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
- /* unblock it if necessary */
- if (nv_rd32(device, 0x400308) == 0x0050001c)
- nv_mask(device, 0x400824, 0x10000000, 0x10000000);
- kfree(info);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
deleted file mode 100644
index 3b7041cb013..00000000000
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/bios.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32 read_div(struct drm_device *, int, u32, u32);
-static u32 read_pll(struct drm_device *, u32);
-
-static u32
-read_vco(struct drm_device *dev, u32 dsrc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ssrc = nv_rd32(device, dsrc);
- if (!(ssrc & 0x00000100))
- return read_pll(dev, 0x00e800);
- return read_pll(dev, 0x00e820);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 pll)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ctrl = nv_rd32(device, pll + 0);
- u32 coef = nv_rd32(device, pll + 4);
- u32 P = (coef & 0x003f0000) >> 16;
- u32 N = (coef & 0x0000ff00) >> 8;
- u32 M = (coef & 0x000000ff) >> 0;
- u32 sclk, doff;
-
- if (!(ctrl & 0x00000001))
- return 0;
-
- switch (pll & 0xfff000) {
- case 0x00e000:
- sclk = 27000;
- P = 1;
- break;
- case 0x137000:
- doff = (pll - 0x137000) / 0x20;
- sclk = read_div(dev, doff, 0x137120, 0x137140);
- break;
- case 0x132000:
- switch (pll) {
- case 0x132000:
- sclk = read_pll(dev, 0x132020);
- break;
- case 0x132020:
- sclk = read_div(dev, 0, 0x137320, 0x137330);
- break;
- default:
- return 0;
- }
- break;
- default:
- return 0;
- }
-
- return sclk * N / M / P;
-}
-
-static u32
-read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
- u32 sctl = nv_rd32(device, dctl + (doff * 4));
-
- switch (ssrc & 0x00000003) {
- case 0:
- if ((ssrc & 0x00030000) != 0x00030000)
- return 27000;
- return 108000;
- case 2:
- return 100000;
- case 3:
- if (sctl & 0x80000000) {
- u32 sclk = read_vco(dev, dsrc + (doff * 4));
- u32 sdiv = (sctl & 0x0000003f) + 2;
- return (sclk * 2) / sdiv;
- }
-
- return read_vco(dev, dsrc + (doff * 4));
- default:
- return 0;
- }
-}
-
-static u32
-read_mem(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 ssel = nv_rd32(device, 0x1373f0);
- if (ssel & 0x00000001)
- return read_div(dev, 0, 0x137300, 0x137310);
- return read_pll(dev, 0x132000);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
- u32 ssel = nv_rd32(device, 0x137100);
- u32 sclk, sdiv;
-
- if (ssel & (1 << clk)) {
- if (clk < 7)
- sclk = read_pll(dev, 0x137000 + (clk * 0x20));
- else
- sclk = read_pll(dev, 0x1370e0);
- sdiv = ((sctl & 0x00003f00) >> 8) + 2;
- } else {
- sclk = read_div(dev, clk, 0x137160, 0x1371d0);
- sdiv = ((sctl & 0x0000003f) >> 0) + 2;
- }
-
- if (sctl & 0x80000000)
- return (sclk * 2) / sdiv;
- return sclk;
-}
-
-int
-nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- perflvl->shader = read_clk(dev, 0x00);
- perflvl->core = perflvl->shader / 2;
- perflvl->memory = read_mem(dev);
- perflvl->rop = read_clk(dev, 0x01);
- perflvl->hub07 = read_clk(dev, 0x02);
- perflvl->hub06 = read_clk(dev, 0x07);
- perflvl->hub01 = read_clk(dev, 0x08);
- perflvl->copy = read_clk(dev, 0x09);
- perflvl->daemon = read_clk(dev, 0x0c);
- perflvl->vdec = read_clk(dev, 0x0e);
- return 0;
-}
-
-struct nvc0_pm_clock {
- u32 freq;
- u32 ssel;
- u32 mdiv;
- u32 dsrc;
- u32 ddiv;
- u32 coef;
-};
-
-struct nvc0_pm_state {
- struct nouveau_pm_level *perflvl;
- struct nvc0_pm_clock eng[16];
- struct nvc0_pm_clock mem;
-};
-
-static u32
-calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
-{
- u32 div = min((ref * 2) / freq, (u32)65);
- if (div < 2)
- div = 2;
-
- *ddiv = div - 2;
- return (ref * 2) / div;
-}
-
-static u32
-calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
-{
- u32 sclk;
-
- /* use one of the fixed frequencies if possible */
- *ddiv = 0x00000000;
- switch (freq) {
- case 27000:
- case 108000:
- *dsrc = 0x00000000;
- if (freq == 108000)
- *dsrc |= 0x00030000;
- return freq;
- case 100000:
- *dsrc = 0x00000002;
- return freq;
- default:
- *dsrc = 0x00000003;
- break;
- }
-
- /* otherwise, calculate the closest divider */
- sclk = read_vco(dev, clk);
- if (clk < 7)
- sclk = calc_div(dev, clk, sclk, freq, ddiv);
- return sclk;
-}
-
-static u32
-calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nvbios_pll limits;
- int N, M, P, ret;
-
- ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
- if (ret)
- return 0;
-
- limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
- if (!limits.refclk)
- return 0;
-
- ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
- if (ret <= 0)
- return 0;
-
- *coef = (P << 16) | (N << 8) | M;
- return ret;
-}
-
-/* A (likely rather simplified and incomplete) view of the clock tree
- *
- * Key:
- *
- * S: source select
- * D: divider
- * P: pll
- * F: switch
- *
- * Engine clocks:
- *
- * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
- * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
- *
- * Not all registers exist for all clocks. For example: clocks >= 8 don't
- * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
- * they have the divider at 1371d0, though the source selection at 137160
- * still exists. You must use the divider at 137250 for these instead.
- *
- * Memory clock:
- *
- * TBD, read_mem() above is likely very wrong...
- *
- */
-
-static int
-calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
-{
- u32 src0, div0, div1D, div1P = 0;
- u32 clk0, clk1 = 0;
-
- /* invalid clock domain */
- if (!freq)
- return 0;
-
- /* first possible path, using only dividers */
- clk0 = calc_src(dev, clk, freq, &src0, &div0);
- clk0 = calc_div(dev, clk, clk0, freq, &div1D);
-
- /* see if we can get any closer using PLLs */
- if (clk0 != freq && (0x00004387 & (1 << clk))) {
- if (clk < 7)
- clk1 = calc_pll(dev, clk, freq, &info->coef);
- else
- clk1 = read_pll(dev, 0x1370e0);
- clk1 = calc_div(dev, clk, clk1, freq, &div1P);
- }
-
- /* select the method which gets closest to target freq */
- if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
- info->dsrc = src0;
- if (div0) {
- info->ddiv |= 0x80000000;
- info->ddiv |= div0 << 8;
- info->ddiv |= div0;
- }
- if (div1D) {
- info->mdiv |= 0x80000000;
- info->mdiv |= div1D;
- }
- info->ssel = 0;
- info->freq = clk0;
- } else {
- if (div1P) {
- info->mdiv |= 0x80000000;
- info->mdiv |= div1P << 8;
- }
- info->ssel = (1 << clk);
- info->freq = clk1;
- }
-
- return 0;
-}
-
-static int
-calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
- struct nvbios_pll pll;
- int N, M, P, ret;
- u32 ctrl;
-
- /* mclk pll input freq comes from another pll, make sure it's on */
- ctrl = nv_rd32(device, 0x132020);
- if (!(ctrl & 0x00000001)) {
- /* if not, program it to 567MHz. nfi where this value comes
- * from - it looks like it's in the pll limits table for
- * 132000 but the binary driver ignores all my attempts to
- * change this value.
- */
- nv_wr32(device, 0x137320, 0x00000103);
- nv_wr32(device, 0x137330, 0x81200606);
- nv_wait(device, 0x132020, 0x00010000, 0x00010000);
- nv_wr32(device, 0x132024, 0x0001150f);
- nv_mask(device, 0x132020, 0x00000001, 0x00000001);
- nv_wait(device, 0x137390, 0x00020000, 0x00020000);
- nv_mask(device, 0x132020, 0x00000004, 0x00000004);
- }
-
- /* for the moment, until the clock tree is better understood, use
- * pll mode for all clock frequencies
- */
- ret = nvbios_pll_parse(bios, 0x132000, &pll);
- if (ret == 0) {
- pll.refclk = read_pll(dev, 0x132020);
- if (pll.refclk) {
- ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
- if (ret > 0) {
- info->coef = (P << 16) | (N << 8) | M;
- return 0;
- }
- }
- }
-
- return -EINVAL;
-}
-
-void *
-nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvc0_pm_state *info;
- int ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- /* NFI why this is still in the performance table, the ROPCs appear
- * to get their clock from clock 2 ("hub07", actually hub05 on this
- * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
- * are always the same freq with the binary driver even when the
- * performance table says they should differ.
- */
- if (device->chipset == 0xd9)
- perflvl->rop = 0;
-
- if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
- (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
- (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
- (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
- (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
- (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
- (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
- (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
- kfree(info);
- return ERR_PTR(ret);
- }
-
- if (perflvl->memory) {
- ret = calc_mem(dev, &info->mem, perflvl->memory);
- if (ret) {
- kfree(info);
- return ERR_PTR(ret);
- }
- }
-
- info->perflvl = perflvl;
- return info;
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- /* program dividers at 137160/1371d0 first */
- if (clk < 7 && !info->ssel) {
- nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
- nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
- }
-
- /* switch clock to non-pll mode */
- nv_mask(device, 0x137100, (1 << clk), 0x00000000);
- nv_wait(device, 0x137100, (1 << clk), 0x00000000);
-
- /* reprogram pll */
- if (clk < 7) {
- /* make sure it's disabled first... */
- u32 base = 0x137000 + (clk * 0x20);
- u32 ctrl = nv_rd32(device, base + 0x00);
- if (ctrl & 0x00000001) {
- nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
- nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
- }
- /* program it to new values, if necessary */
- if (info->ssel) {
- nv_wr32(device, base + 0x04, info->coef);
- nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
- nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
- nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
- }
- }
-
- /* select pll/non-pll mode, and program final clock divider */
- nv_mask(device, 0x137100, (1 << clk), info->ssel);
- nv_wait(device, 0x137100, (1 << clk), info->ssel);
- nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
- udelay((nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
- if (mr <= 1)
- return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
- return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
- } else {
- if (mr == 0)
- return nv_rd32(device, 0x10f300 + (mr * 4));
- else
- if (mr <= 7)
- return nv_rd32(device, 0x10f32c + (mr * 4));
- return nv_rd32(device, 0x10f34c);
- }
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nouveau_fb *pfb = nouveau_fb(device);
- if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
- if (mr <= 1) {
- nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
- if (pfb->ram->ranks > 1)
- nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
- } else
- if (mr <= 3) {
- nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
- if (pfb->ram->ranks > 1)
- nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
- }
- } else {
- if (mr == 0) nv_wr32(device, 0x10f300 + (mr * 4), data);
- else if (mr <= 7) nv_wr32(device, 0x10f32c + (mr * 4), data);
- else if (mr == 15) nv_wr32(device, 0x10f34c, data);
- }
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nvc0_pm_state *info = exec->priv;
- u32 ctrl = nv_rd32(device, 0x132000);
-
- nv_wr32(device, 0x137360, 0x00000001);
- nv_wr32(device, 0x137370, 0x00000000);
- nv_wr32(device, 0x137380, 0x00000000);
- if (ctrl & 0x00000001)
- nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
-
- nv_wr32(device, 0x132004, info->mem.coef);
- nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
- nv_wait(device, 0x137390, 0x00000002, 0x00000002);
- nv_wr32(device, 0x132018, 0x00005000);
-
- nv_wr32(device, 0x137370, 0x00000001);
- nv_wr32(device, 0x137380, 0x00000001);
- nv_wr32(device, 0x137360, 0x00000000);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
- struct nouveau_device *device = nouveau_dev(exec->dev);
- struct nvc0_pm_state *info = exec->priv;
- struct nouveau_pm_level *perflvl = info->perflvl;
- int i;
-
- for (i = 0; i < 5; i++)
- nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
-}
-
-static void
-prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_mem_exec_func exec = {
- .dev = dev,
- .precharge = mclk_precharge,
- .refresh = mclk_refresh,
- .refresh_auto = mclk_refresh_auto,
- .refresh_self = mclk_refresh_self,
- .wait = mclk_wait,
- .mrg = mclk_mrg,
- .mrs = mclk_mrs,
- .clock_set = mclk_clock_set,
- .timing_set = mclk_timing_set,
- .priv = info
- };
-
- if (device->chipset < 0xd0)
- nv_wr32(device, 0x611200, 0x00003300);
- else
- nv_wr32(device, 0x62c000, 0x03030000);
-
- nouveau_mem_exec(&exec, info->perflvl);
-
- if (device->chipset < 0xd0)
- nv_wr32(device, 0x611200, 0x00003330);
- else
- nv_wr32(device, 0x62c000, 0x03030300);
-}
-int
-nvc0_pm_clocks_set(struct drm_device *dev, void *data)
-{
- struct nvc0_pm_state *info = data;
- int i;
-
- if (info->mem.coef)
- prog_mem(dev, info);
-
- for (i = 0; i < 16; i++) {
- if (!info->eng[i].freq)
- continue;
- prog_clk(dev, i, &info->eng[i]);
- }
-
- kfree(info);
- return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 20c41e73d44..6c220cd3497 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,7 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
depends on OMAP2_DSS
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf667859cb..701c4c10e08 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
}
/* set dma mask for device */
- /* NOTE: this is a workaround for the hwmod not initializing properly */
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d909f49..e7fa3cd9674 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_init_object = omap_gem_init_object,
.gem_free_object = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b73665..07847693cf4 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
void omap_gem_free_object(struct drm_gem_object *obj);
-int omap_gem_init_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6ebec53..5aec3e81fe2 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@ unlock:
return ret;
}
-int omap_gem_init_object(struct drm_gem_object *obj)
-{
- return -EINVAL; /* unused */
-}
-
/* don't call directly.. called from GEM core when it is time to actually
* free the object..
*/
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db117ff..cb858600185 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
- dev->irq_enabled = 1;
+ dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
/* Before installing handler */
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
dispc_free_irq(dev);
}
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
int omap_drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
- int irq_enabled, i;
+ bool irq_enabled;
+ int i;
mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
/*
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] =
+ DRM_WAKEUP(&dev->vblank[i].queue);
+ dev->vblank[i].enabled = false;
+ dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d6c12796023..037d324bf58 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@ config DRM_QXL
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba026d..5e827c29d19 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -107,10 +107,17 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
}
- drm_helper_hpd_irq_event(qdev->ddev);
+
+ if (!drm_helper_hpd_irq_event(qdev->ddev)) {
+ /* notify that the monitor configuration changed, to
+ adjust at the arbitrary resolution */
+ drm_kms_helper_hotplug_event(qdev->ddev);
+ }
}
-static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+static int qxl_add_monitors_config_modes(struct drm_connector *connector,
+ unsigned *pwidth,
+ unsigned *pheight)
{
struct drm_device *dev = connector->dev;
struct qxl_device *qdev = dev->dev_private;
@@ -126,11 +133,15 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector)
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
false);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+ *pwidth = head->width;
+ *pheight = head->height;
drm_mode_probed_add(connector, mode);
return 1;
}
-static int qxl_add_common_modes(struct drm_connector *connector)
+static int qxl_add_common_modes(struct drm_connector *connector,
+ unsigned pwidth,
+ unsigned pheight)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
@@ -159,12 +170,9 @@ static int qxl_add_common_modes(struct drm_connector *connector)
};
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
-
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
- if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+ if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
}
@@ -720,16 +728,18 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
{
int ret = 0;
struct qxl_device *qdev = connector->dev->dev_private;
+ unsigned pwidth = 1024;
+ unsigned pheight = 768;
DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
/* TODO: what should we do here? only show the configured modes for the
* device, or allow the full list, or both? */
if (qdev->monitors_config && qdev->monitors_config->count) {
- ret = qxl_add_monitors_config_modes(connector);
+ ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
if (ret < 0)
return ret;
}
- ret += qxl_add_common_modes(connector);
+ ret += qxl_add_common_modes(connector, pwidth, pheight);
return ret;
}
@@ -793,7 +803,10 @@ static enum drm_connector_status qxl_conn_detect(
qdev->client_monitors_config->count > output->index &&
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
- DRM_DEBUG("\n");
+ DRM_DEBUG("#%d connected: %d\n", output->index, connected);
+ if (!connected)
+ qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
+
return connected ? connector_status_connected
: connector_status_disconnected;
}
@@ -835,8 +848,21 @@ static const struct drm_encoder_funcs qxl_enc_funcs = {
.destroy = qxl_enc_destroy,
};
+static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
+{
+ if (qdev->hotplug_mode_update_property)
+ return 0;
+
+ qdev->hotplug_mode_update_property =
+ drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
+ "hotplug_mode_update", 0, 1);
+
+ return 0;
+}
+
static int qdev_output_init(struct drm_device *dev, int num_output)
{
+ struct qxl_device *qdev = dev->dev_private;
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -863,6 +889,8 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+ drm_object_attach_property(&connector->base,
+ qdev->hotplug_mode_update_property, 0);
drm_sysfs_connector_add(connector);
return 0;
}
@@ -975,6 +1003,9 @@ int qxl_modeset_init(struct qxl_device *qdev)
qdev->ddev->mode_config.max_height = 8192;
qdev->ddev->mode_config.fb_base = qdev->vram_base;
+
+ qxl_mode_create_hotplug_mode_update_property(qdev);
+
for (i = 0 ; i < qxl_num_crtc; ++i) {
qdev_crtc_init(qdev->ddev, i);
qdev_output_init(qdev->ddev, i);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118ae72d..fee8748bdca 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
#endif
- .gem_init_object = qxl_gem_object_init,
.gem_free_object = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9adde46a..7bda32f68d3 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -323,6 +323,8 @@ struct qxl_device {
struct work_struct gc_work;
struct work_struct fb_work;
+
+ struct drm_property *hotplug_mode_update_property;
};
/* forward declaration for QXL_INFO_IO */
@@ -412,7 +414,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct qxl_surface *surf,
struct qxl_bo **qobj,
uint32_t *handle);
-int qxl_gem_object_init(struct drm_gem_object *obj);
void qxl_gem_object_free(struct drm_gem_object *gobj);
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 88722f23343..f437b30ce68 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -108,7 +108,7 @@ static void qxl_fb_dirty_flush(struct fb_info *info)
u32 x1, x2, y1, y2;
/* TODO: hard coding 32 bpp */
- int stride = qfbdev->qfb.base.pitches[0] * 4;
+ int stride = qfbdev->qfb.base.pitches[0];
x1 = qfbdev->dirty.x1;
x2 = qfbdev->dirty.x2;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e4125af..b96f0c9d89b 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-int qxl_gem_object_init(struct drm_gem_object *obj)
-{
- /* we do nothings here */
- return 0;
-}
-
void qxl_gem_object_free(struct drm_gem_object *gobj)
{
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9e8da9ee973..e5ca498be92 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -120,7 +120,7 @@ int qxl_device_init(struct qxl_device *qdev,
struct pci_dev *pdev,
unsigned long flags)
{
- int r;
+ int r, sb;
qdev->dev = &pdev->dev;
qdev->ddev = ddev;
@@ -136,21 +136,39 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
qdev->vram_base = pci_resource_start(pdev, 0);
- qdev->surfaceram_base = pci_resource_start(pdev, 1);
- qdev->surfaceram_size = pci_resource_len(pdev, 1);
qdev->io_base = pci_resource_start(pdev, 3);
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
- qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
- DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+
+ if (pci_resource_len(pdev, 4) > 0) {
+ /* 64bit surface bar present */
+ sb = 4;
+ qdev->surfaceram_base = pci_resource_start(pdev, sb);
+ qdev->surfaceram_size = pci_resource_len(pdev, sb);
+ qdev->surface_mapping =
+ io_mapping_create_wc(qdev->surfaceram_base,
+ qdev->surfaceram_size);
+ }
+ if (qdev->surface_mapping == NULL) {
+ /* 64bit surface bar not present (or mapping failed) */
+ sb = 1;
+ qdev->surfaceram_base = pci_resource_start(pdev, sb);
+ qdev->surfaceram_size = pci_resource_len(pdev, sb);
+ qdev->surface_mapping =
+ io_mapping_create_wc(qdev->surfaceram_base,
+ qdev->surfaceram_size);
+ }
+
+ DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
(unsigned long long)qdev->vram_base,
(unsigned long long)pci_resource_end(pdev, 0),
(int)pci_resource_len(pdev, 0) / 1024 / 1024,
(int)pci_resource_len(pdev, 0) / 1024,
(unsigned long long)qdev->surfaceram_base,
- (unsigned long long)pci_resource_end(pdev, 1),
+ (unsigned long long)pci_resource_end(pdev, sb),
(int)qdev->surfaceram_size / 1024 / 1024,
- (int)qdev->surfaceram_size / 1024);
+ (int)qdev->surfaceram_size / 1024,
+ (sb == 4) ? "64bit" : "32bit");
qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
if (!qdev->rom) {
@@ -230,9 +248,13 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->surfaces_mem_slot = setup_slot(qdev, 1,
(unsigned long)qdev->surfaceram_base,
(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
- DRM_INFO("main mem slot %d [%lx,%x)\n",
- qdev->main_mem_slot,
- (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+ DRM_INFO("main mem slot %d [%lx,%x]\n",
+ qdev->main_mem_slot,
+ (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+ DRM_INFO("surface mem slot %d [%lx,%lx]\n",
+ qdev->surfaces_mem_slot,
+ (unsigned long)qdev->surfaceram_base,
+ (unsigned long)qdev->surfaceram_size);
qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 037786d7c1d..c7e7e6590c2 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -516,6 +516,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
(unsigned)qdev->vram_size / (1024 * 1024));
DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+ DRM_INFO("qxl: %uM of Surface memory size\n",
+ (unsigned)qdev->surfaceram_size / (1024 * 1024));
if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
r = qxl_ttm_debugfs_init(qdev);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index af10f8571d8..92be50c39ff 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1711,7 +1711,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
@@ -2223,7 +2225,7 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
USHORT usVoltageLevel; // real voltage level
}SET_VOLTAGE_PARAMETERS_V2;
-
+// used by both SetVoltageTable v1.3 and v1.4
typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
{
UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
@@ -2290,15 +2292,36 @@ typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
#define ATOM_GET_VOLTAGE_VID 0x00
#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
-// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
-#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
-// undefined power state
+
#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
+{
+ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
+ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+ ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
+
+// New in GetVoltageInfo v1.2 ucVoltageMode
+#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09
+
+// New Added from CI Hawaii for EVV feature
+typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
+{
+ USHORT usVoltageLevel; // real voltage level in unit of mv
+ USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
+ ULONG ulReseved;
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
+
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
@@ -3864,6 +3887,8 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
#define PP_AC_DC_SWITCH_GPIO_PINID 60
//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
#define VDDC_VRHOT_GPIO_PINID 61
+//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
+#define VDDC_PCC_GPIO_PINID 62
typedef struct _ATOM_GPIO_PIN_LUT
{
@@ -4169,10 +4194,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
-
+#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_BRACKET_LAYOUT_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -4397,6 +4422,31 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
USHORT usReserved;
}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
+{
+ USHORT usConnectorObjectId;
+ UCHAR ucConnectorType;
+ UCHAR ucPosition;
+}ATOM_CONNECTOR_LAYOUT_INFO;
+
+// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
+#define CONNECTOR_TYPE_DVI_D 1
+#define CONNECTOR_TYPE_DVI_I 2
+#define CONNECTOR_TYPE_VGA 3
+#define CONNECTOR_TYPE_HDMI 4
+#define CONNECTOR_TYPE_DISPLAY_PORT 5
+#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6
+
+typedef struct _ATOM_BRACKET_LAYOUT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucLength;
+ UCHAR ucWidth;
+ UCHAR ucConnNum;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
+}ATOM_BRACKET_LAYOUT_RECORD;
+
/****************************************************************************/
// ASIC voltage data table
/****************************************************************************/
@@ -4524,8 +4574,9 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_EVV 8
+#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
typedef struct _VOLTAGE_LUT_ENTRY_V2
@@ -4552,6 +4603,10 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
+// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
+#define VOLTAGE_DATA_ONE_BYTE 0
+#define VOLTAGE_DATA_TWO_BYTE 1
+
typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
@@ -4584,7 +4639,8 @@ typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
// 1:0 – offset trim,
USHORT usLoadLine_PSI;
// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
- UCHAR ucReserved[2];
+ UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
+ UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31
ULONG ulReserved;
}ATOM_SVID2_VOLTAGE_OBJECT_V3;
@@ -4637,6 +4693,49 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
}ATOM_ASIC_PROFILING_INFO_V2_1;
+typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ULONG ulEvvDerateTdp;
+ ULONG ulEvvDerateTdc;
+ ULONG ulBoardCoreTemp;
+ ULONG ulMaxVddc;
+ ULONG ulMinVddc;
+ ULONG ulLoadLineSlop;
+ ULONG ulLeakageTemp;
+ ULONG ulLeakageVoltage;
+ ULONG ulCACmEncodeRange;
+ ULONG ulCACmEncodeAverage;
+ ULONG ulCACbEncodeRange;
+ ULONG ulCACbEncodeAverage;
+ ULONG ulKt_bEncodeRange;
+ ULONG ulKt_bEncodeAverage;
+ ULONG ulKv_mEncodeRange;
+ ULONG ulKv_mEncodeAverage;
+ ULONG ulKv_bEncodeRange;
+ ULONG ulKv_bEncodeAverage;
+ ULONG ulLkgEncodeLn_MaxDivMin;
+ ULONG ulLkgEncodeMin;
+ ULONG ulEfuseLogisticAlpha;
+ USHORT usPowerDpm0;
+ USHORT usCurrentDpm0;
+ USHORT usPowerDpm1;
+ USHORT usCurrentDpm1;
+ USHORT usPowerDpm2;
+ USHORT usCurrentDpm2;
+ USHORT usPowerDpm3;
+ USHORT usCurrentDpm3;
+ USHORT usPowerDpm4;
+ USHORT usCurrentDpm4;
+ USHORT usPowerDpm5;
+ USHORT usCurrentDpm5;
+ USHORT usPowerDpm6;
+ USHORT usCurrentDpm6;
+ USHORT usPowerDpm7;
+ USHORT usCurrentDpm7;
+}ATOM_ASIC_PROFILING_INFO_V3_1;
+
+
typedef struct _ATOM_POWER_SOURCE_OBJECT
{
UCHAR ucPwrSrcId; // Power source
@@ -5808,6 +5907,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
+#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02
+#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200
#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
@@ -6242,6 +6343,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define _128Mx32 0x53
#define _256Mx8 0x61
#define _256Mx16 0x62
+#define _512Mx8 0x71
#define SAMSUNG 0x1
#define INFINEON 0x2
@@ -6987,9 +7089,10 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
UCHAR ucMaxDispEngineNum;
UCHAR ucMaxActiveDispEngineNum;
UCHAR ucMaxPPLLNum;
- UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
- UCHAR ucReserved[3];
- ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
+ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
+ UCHAR ucDispCaps;
+ UCHAR ucReserved[2];
+ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
//ucDispCaps
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bf87f6d435f..80a20120e62 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1753,7 +1753,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else {
+ } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
@@ -1910,6 +1910,21 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ int r;
+ struct radeon_framebuffer *radeon_fb;
+ struct radeon_bo *rbo;
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ }
/* disable the GRPH */
if (ASIC_IS_DCE4(rdev))
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
@@ -1940,7 +1955,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
break;
case ATOM_PPLL0:
/* disable the ppll */
- if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE))
+ if ((rdev->family == CHIP_ARUBA) ||
+ (rdev->family == CHIP_BONAIRE) ||
+ (rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 00885417fff..fb3ae07a146 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
- if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
- dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 5e891b226ac..a42d61571f4 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
- bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -1662,19 +1662,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} else {
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- /* some dce3.x boards have a bug in their transmitter control table.
- * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
- * does the same thing and more.
- */
- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
- (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1692,16 +1684,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- } else if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
@@ -2410,6 +2397,15 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
+ /* set up the FMT blocks */
+ if (ASIC_IS_DCE8(rdev))
+ dce8_program_fmt(encoder);
+ else if (ASIC_IS_DCE4(rdev))
+ dce4_program_fmt(encoder);
+ else if (ASIC_IS_DCE3(rdev))
+ dce3_program_fmt(encoder);
+ else if (ASIC_IS_AVIVO(rdev))
+ avivo_program_fmt(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index deaf98cdca3..0652ee0a209 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
return -EINVAL;
}
args.ucRegIndex = buf[0];
- if (num > 1)
- memcpy(&out, &buf[1], num - 1);
+ if (num > 1) {
+ num--;
+ memcpy(&out, &buf[1], num);
+ }
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 51e947a97ed..1ed47997635 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -40,6 +40,20 @@
#define VOLTAGE_VID_OFFSET_SCALE1 625
#define VOLTAGE_VID_OFFSET_SCALE2 100
+static const struct ci_pt_defaults defaults_hawaii_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
+ { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
+ { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+};
+
static const struct ci_pt_defaults defaults_bonaire_xt =
{
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
@@ -187,22 +201,38 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
struct ci_power_info *pi = ci_get_pi(rdev);
switch (rdev->pdev->device) {
- case 0x6650:
- case 0x6658:
- case 0x665C:
- default:
+ case 0x6650:
+ case 0x6658:
+ case 0x665C:
+ default:
pi->powertune_defaults = &defaults_bonaire_xt;
break;
- case 0x6651:
- case 0x665D:
+ case 0x6651:
+ case 0x665D:
pi->powertune_defaults = &defaults_bonaire_pro;
break;
- case 0x6640:
+ case 0x6640:
pi->powertune_defaults = &defaults_saturn_xt;
break;
- case 0x6641:
+ case 0x6641:
pi->powertune_defaults = &defaults_saturn_pro;
break;
+ case 0x67B8:
+ case 0x67B0:
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
+ pi->powertune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x67BA:
+ case 0x67B1:
+ pi->powertune_defaults = &defaults_hawaii_pro;
+ break;
}
pi->dte_tj_offset = 0;
@@ -5142,9 +5172,15 @@ int ci_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
- pi->thermal_temp_setting.temperature_low = 99500;
- pi->thermal_temp_setting.temperature_high = 100000;
- pi->thermal_temp_setting.temperature_shutdown = 104000;
+ if (rdev->family == CHIP_HAWAII) {
+ pi->thermal_temp_setting.temperature_low = 94500;
+ pi->thermal_temp_setting.temperature_high = 95000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+ } else {
+ pi->thermal_temp_setting.temperature_low = 99500;
+ pi->thermal_temp_setting.temperature_high = 100000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+ }
pi->uvd_enabled = false;
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 252e10a41cf..9c745dd2243 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -217,6 +217,10 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_start_address = BONAIRE_SMC_UCODE_START;
ucode_size = BONAIRE_SMC_UCODE_SIZE;
break;
+ case CHIP_HAWAII:
+ ucode_start_address = HAWAII_SMC_UCODE_START;
+ ucode_size = HAWAII_SMC_UCODE_SIZE;
+ break;
default:
DRM_ERROR("unknown asic in smc ucode loader\n");
BUG();
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9cd2bc989ac..b43a3a3c906 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -41,6 +41,14 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
+MODULE_FIRMWARE("radeon/HAWAII_me.bin");
+MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
+MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -67,11 +75,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
extern int cik_sdma_resume(struct radeon_device *rdev);
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
extern void cik_sdma_fini(struct radeon_device *rdev);
-extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
static void cik_rlc_stop(struct radeon_device *rdev);
static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
@@ -1302,6 +1305,171 @@ static const u32 kalindi_mgcg_cgcg_init[] =
0xd80c, 0xff000ff0, 0x00000100
};
+static const u32 hawaii_golden_spm_registers[] =
+{
+ 0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 hawaii_golden_common_registers[] =
+{
+ 0x30800, 0xffffffff, 0xe0000000,
+ 0x28350, 0xffffffff, 0x3a00161a,
+ 0x28354, 0xffffffff, 0x0000002e,
+ 0x9a10, 0xffffffff, 0x00018208,
+ 0x98f8, 0xffffffff, 0x12011003
+};
+
+static const u32 hawaii_golden_registers[] =
+{
+ 0x3354, 0x00000333, 0x00000333,
+ 0x9a10, 0x00010000, 0x00058208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0xf0311fff, 0x80300000,
+ 0x350c, 0x00810000, 0x408af000,
+ 0x7030, 0x31000111, 0x00000011,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2120, 0x0000007f, 0x0000001b,
+ 0x21dc, 0x00007fb6, 0x00002191,
+ 0x3628, 0x0000003f, 0x0000000a,
+ 0x362c, 0x0000003f, 0x0000000a,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8bf0, 0x00002001, 0x00000001,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x30a04, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x3e78, 0x00000001, 0x00000002,
+ 0xc768, 0x00000008, 0x00000008,
+ 0xc770, 0x00000f00, 0x00000800,
+ 0xc774, 0x00000f00, 0x00000800,
+ 0xc798, 0x00ffffff, 0x00ff7fbf,
+ 0xc79c, 0x00ffffff, 0x00ff7faf,
+ 0x8c00, 0x000000ff, 0x00000800,
+ 0xe40, 0x00001fff, 0x00001fff,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xae00, 0x00100000, 0x000ff07c,
+ 0xac14, 0x000003ff, 0x0000000f,
+ 0xac10, 0xffffffff, 0x7564fdec,
+ 0xac0c, 0xffffffff, 0x3120b9a8,
+ 0xac08, 0x20000000, 0x0f9c0000
+};
+
+static const u32 hawaii_mgcg_cgcg_init[] =
+{
+ 0xc420, 0xffffffff, 0xfffffffd,
+ 0x30800, 0xffffffff, 0xe0000000,
+ 0x3c2a0, 0xffffffff, 0x00000100,
+ 0x3c208, 0xffffffff, 0x00000100,
+ 0x3c2c0, 0xffffffff, 0x00000100,
+ 0x3c2c8, 0xffffffff, 0x00000100,
+ 0x3c2c4, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00200100,
+ 0x3c280, 0xffffffff, 0x00000100,
+ 0x3c214, 0xffffffff, 0x06000100,
+ 0x3c220, 0xffffffff, 0x00000100,
+ 0x3c218, 0xffffffff, 0x06000100,
+ 0x3c204, 0xffffffff, 0x00000100,
+ 0x3c2e0, 0xffffffff, 0x00000100,
+ 0x3c224, 0xffffffff, 0x00000100,
+ 0x3c200, 0xffffffff, 0x00000100,
+ 0x3c230, 0xffffffff, 0x00000100,
+ 0x3c234, 0xffffffff, 0x00000100,
+ 0x3c250, 0xffffffff, 0x00000100,
+ 0x3c254, 0xffffffff, 0x00000100,
+ 0x3c258, 0xffffffff, 0x00000100,
+ 0x3c25c, 0xffffffff, 0x00000100,
+ 0x3c260, 0xffffffff, 0x00000100,
+ 0x3c27c, 0xffffffff, 0x00000100,
+ 0x3c278, 0xffffffff, 0x00000100,
+ 0x3c210, 0xffffffff, 0x06000100,
+ 0x3c290, 0xffffffff, 0x00000100,
+ 0x3c274, 0xffffffff, 0x00000100,
+ 0x3c2b4, 0xffffffff, 0x00000100,
+ 0x3c2b0, 0xffffffff, 0x00000100,
+ 0x3c270, 0xffffffff, 0x00000100,
+ 0x30800, 0xffffffff, 0xe0000000,
+ 0x3c020, 0xffffffff, 0x00010000,
+ 0x3c024, 0xffffffff, 0x00030002,
+ 0x3c028, 0xffffffff, 0x00040007,
+ 0x3c02c, 0xffffffff, 0x00060005,
+ 0x3c030, 0xffffffff, 0x00090008,
+ 0x3c034, 0xffffffff, 0x00010000,
+ 0x3c038, 0xffffffff, 0x00030002,
+ 0x3c03c, 0xffffffff, 0x00040007,
+ 0x3c040, 0xffffffff, 0x00060005,
+ 0x3c044, 0xffffffff, 0x00090008,
+ 0x3c048, 0xffffffff, 0x00010000,
+ 0x3c04c, 0xffffffff, 0x00030002,
+ 0x3c050, 0xffffffff, 0x00040007,
+ 0x3c054, 0xffffffff, 0x00060005,
+ 0x3c058, 0xffffffff, 0x00090008,
+ 0x3c05c, 0xffffffff, 0x00010000,
+ 0x3c060, 0xffffffff, 0x00030002,
+ 0x3c064, 0xffffffff, 0x00040007,
+ 0x3c068, 0xffffffff, 0x00060005,
+ 0x3c06c, 0xffffffff, 0x00090008,
+ 0x3c070, 0xffffffff, 0x00010000,
+ 0x3c074, 0xffffffff, 0x00030002,
+ 0x3c078, 0xffffffff, 0x00040007,
+ 0x3c07c, 0xffffffff, 0x00060005,
+ 0x3c080, 0xffffffff, 0x00090008,
+ 0x3c084, 0xffffffff, 0x00010000,
+ 0x3c088, 0xffffffff, 0x00030002,
+ 0x3c08c, 0xffffffff, 0x00040007,
+ 0x3c090, 0xffffffff, 0x00060005,
+ 0x3c094, 0xffffffff, 0x00090008,
+ 0x3c098, 0xffffffff, 0x00010000,
+ 0x3c09c, 0xffffffff, 0x00030002,
+ 0x3c0a0, 0xffffffff, 0x00040007,
+ 0x3c0a4, 0xffffffff, 0x00060005,
+ 0x3c0a8, 0xffffffff, 0x00090008,
+ 0x3c0ac, 0xffffffff, 0x00010000,
+ 0x3c0b0, 0xffffffff, 0x00030002,
+ 0x3c0b4, 0xffffffff, 0x00040007,
+ 0x3c0b8, 0xffffffff, 0x00060005,
+ 0x3c0bc, 0xffffffff, 0x00090008,
+ 0x3c0c0, 0xffffffff, 0x00010000,
+ 0x3c0c4, 0xffffffff, 0x00030002,
+ 0x3c0c8, 0xffffffff, 0x00040007,
+ 0x3c0cc, 0xffffffff, 0x00060005,
+ 0x3c0d0, 0xffffffff, 0x00090008,
+ 0x3c0d4, 0xffffffff, 0x00010000,
+ 0x3c0d8, 0xffffffff, 0x00030002,
+ 0x3c0dc, 0xffffffff, 0x00040007,
+ 0x3c0e0, 0xffffffff, 0x00060005,
+ 0x3c0e4, 0xffffffff, 0x00090008,
+ 0x3c0e8, 0xffffffff, 0x00010000,
+ 0x3c0ec, 0xffffffff, 0x00030002,
+ 0x3c0f0, 0xffffffff, 0x00040007,
+ 0x3c0f4, 0xffffffff, 0x00060005,
+ 0x3c0f8, 0xffffffff, 0x00090008,
+ 0xc318, 0xffffffff, 0x00020200,
+ 0x3350, 0xffffffff, 0x00000200,
+ 0x15c0, 0xffffffff, 0x00000400,
+ 0x55e8, 0xffffffff, 0x00000000,
+ 0x2f50, 0xffffffff, 0x00000902,
+ 0x3c000, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc424, 0xffffffff, 0x0020003f,
+ 0x38, 0xffffffff, 0x0140001c,
+ 0x3c, 0x000f0000, 0x000f0000,
+ 0x220, 0xffffffff, 0xc060000c,
+ 0x224, 0xc0000fff, 0x00000100,
+ 0xf90, 0xffffffff, 0x00000100,
+ 0xf98, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd00c, 0xff000ff0, 0x00000100,
+ 0xd80c, 0xff000ff0, 0x00000100
+};
+
static void cik_init_golden_registers(struct radeon_device *rdev)
{
switch (rdev->family) {
@@ -1347,6 +1515,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
spectre_golden_spm_registers,
(const u32)ARRAY_SIZE(spectre_golden_spm_registers));
break;
+ case CHIP_HAWAII:
+ radeon_program_register_sequence(rdev,
+ hawaii_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ hawaii_golden_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_registers));
+ radeon_program_register_sequence(rdev,
+ hawaii_golden_common_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
+ radeon_program_register_sequence(rdev,
+ hawaii_golden_spm_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+ break;
default:
break;
}
@@ -1378,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
* cik_mm_rdoorbell - read a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
*
* Returns the value in the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{
- if (offset < rdev->doorbell.size) {
- return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ return readl(rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
@@ -1397,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
* cik_mm_wdoorbell - write a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{
- if (offset < rdev->doorbell.size) {
- writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ writel(v, rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
@@ -1454,6 +1636,35 @@ static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
{0x0000009f, 0x00b48000}
};
+#define HAWAII_IO_MC_REGS_SIZE 22
+
+static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
+{
+ {0x0000007d, 0x40000000},
+ {0x0000007e, 0x40180304},
+ {0x0000007f, 0x0000ff00},
+ {0x00000081, 0x00000000},
+ {0x00000083, 0x00000800},
+ {0x00000086, 0x00000000},
+ {0x00000087, 0x00000100},
+ {0x00000088, 0x00020100},
+ {0x00000089, 0x00000000},
+ {0x0000008b, 0x00040000},
+ {0x0000008c, 0x00000100},
+ {0x0000008e, 0xff010000},
+ {0x00000090, 0xffffefff},
+ {0x00000091, 0xfff3efff},
+ {0x00000092, 0xfff3efbf},
+ {0x00000093, 0xf7ffffff},
+ {0x00000094, 0xffffff7f},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x0000009f, 0x00c79000}
+};
+
+
/**
* cik_srbm_select - select specific register instances
*
@@ -1498,11 +1709,17 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BONAIRE:
- default:
io_mc_regs = (u32 *)&bonaire_io_mc_regs;
ucode_size = CIK_MC_UCODE_SIZE;
regs_size = BONAIRE_IO_MC_REGS_SIZE;
break;
+ case CHIP_HAWAII:
+ io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+ ucode_size = HAWAII_MC_UCODE_SIZE;
+ regs_size = HAWAII_IO_MC_REGS_SIZE;
+ break;
+ default:
+ return -EINVAL;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1564,8 +1781,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
- mec_req_size, rlc_req_size, mc_req_size,
- sdma_req_size, smc_req_size;
+ mec_req_size, rlc_req_size, mc_req_size = 0,
+ sdma_req_size, smc_req_size = 0;
char fw_name[30];
int err;
@@ -1583,6 +1800,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
break;
+ case CHIP_HAWAII:
+ chip_name = "HAWAII";
+ pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+ me_req_size = CIK_ME_UCODE_SIZE * 4;
+ ce_req_size = CIK_CE_UCODE_SIZE * 4;
+ mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+ rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
+ mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
+ sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+ break;
case CHIP_KAVERI:
chip_name = "KAVERI";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
@@ -1763,9 +1991,227 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
num_pipe_configs = rdev->config.cik.max_tile_pipes;
if (num_pipe_configs > 8)
- num_pipe_configs = 8; /* ??? */
+ num_pipe_configs = 16;
- if (num_pipe_configs == 8) {
+ if (num_pipe_configs == 16) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ break;
+ case 7:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 12:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 17:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 27:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ break;
+ case 28:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 29:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 30:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ }
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 4:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 5:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 9:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 10:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 14:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ }
+ } else if (num_pipe_configs == 8) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
@@ -1981,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 4) {
@@ -2327,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 2) {
@@ -2544,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -2650,7 +3099,10 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
- disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+ if (rdev->family == CHIP_HAWAII)
+ disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
+ else
+ disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
@@ -2667,6 +3119,12 @@ static void cik_setup_rb(struct radeon_device *rdev,
data = 0;
for (j = 0; j < sh_per_se; j++) {
switch (enabled_rbs & 3) {
+ case 0:
+ if (j == 0)
+ data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
+ else
+ data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
+ break;
case 1:
data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
break;
@@ -2719,6 +3177,23 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_HAWAII:
+ rdev->config.cik.max_shader_engines = 4;
+ rdev->config.cik.max_tile_pipes = 16;
+ rdev->config.cik.max_cu_per_sh = 11;
+ rdev->config.cik.max_sh_per_se = 1;
+ rdev->config.cik.max_backends_per_se = 4;
+ rdev->config.cik.max_texture_channel_caches = 16;
+ rdev->config.cik.max_gprs = 256;
+ rdev->config.cik.max_gs_threads = 32;
+ rdev->config.cik.max_hw_contexts = 8;
+
+ rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+ break;
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
@@ -3084,17 +3559,98 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
+/* TODO: figure out why semaphore cause lockups */
+#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * cik_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (CIK+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.blit_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes, control;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ control = 0;
+ if (size_in_bytes == 0)
+ control |= PACKET3_DMA_DATA_CP_SYNC;
+ radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
+ radeon_ring_write(ring, control);
+ radeon_ring_write(ring, lower_32_bits(src_offset));
+ radeon_ring_write(ring, upper_32_bits(src_offset));
+ radeon_ring_write(ring, lower_32_bits(dst_offset));
+ radeon_ring_write(ring, upper_32_bits(dst_offset));
+ radeon_ring_write(ring, cur_size_in_bytes);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
}
/*
@@ -3403,7 +3959,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
int r;
WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ if (rdev->family != CHIP_HAWAII)
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -3500,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
- WDOORBELL32(ring->doorbell_offset, ring->wptr);
+ WDOORBELL32(ring->doorbell_index, ring->wptr);
}
/**
@@ -3841,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
return r;
}
- /* doorbell offset */
- rdev->ring[idx].doorbell_offset =
- (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
-
/* init the mqd struct */
memset(buf, 0, sizeof(struct bonaire_mqd));
@@ -3956,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
mqd->queue_state.cp_hqd_pq_doorbell_control |=
- DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
+ DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
mqd->queue_state.cp_hqd_pq_doorbell_control &=
~(DOORBELL_SOURCE | DOORBELL_HIT);
@@ -4740,12 +5293,17 @@ void cik_vm_fini(struct radeon_device *rdev)
static void cik_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr, u32 mc_client)
{
- u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ u32 mc_id;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+ if (rdev->family == CHIP_HAWAII)
+ mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ else
+ mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+
printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
@@ -4834,62 +5392,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
}
}
-/**
- * cik_vm_set_page - update the page tables using sDMA
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using CP or sDMA (CIK).
- */
-void cik_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- uint64_t value;
- unsigned ndw;
-
- if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
- /* CP */
- while (count) {
- ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
- ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1));
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- /* DMA */
- cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
- }
-}
-
/*
* RLC
* The RLC is a multi-purpose microengine that handles a
@@ -5058,6 +5560,7 @@ static int cik_rlc_resume(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BONAIRE:
+ case CHIP_HAWAII:
default:
size = BONAIRE_RLC_UCODE_SIZE;
break;
@@ -5556,7 +6059,7 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
}
for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
- dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+ dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += CP_ME_TABLE_SIZE;
}
@@ -5778,52 +6281,57 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
if (buffer == NULL)
return;
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
- buffer[count++] = 0x80000000;
- buffer[count++] = 0x80000000;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
- buffer[count++] = ext->reg_index - 0xa000;
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = ext->extent[i];
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
- buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (rdev->family) {
case CHIP_BONAIRE:
- buffer[count++] = 0x16000012;
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x16000012);
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
case CHIP_KAVERI:
- buffer[count++] = 0x00000000; /* XXX */
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
case CHIP_KABINI:
- buffer[count++] = 0x00000000; /* XXX */
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_HAWAII:
+ buffer[count++] = 0x3a00161a;
+ buffer[count++] = 0x0000002e;
break;
default:
- buffer[count++] = 0x00000000;
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000);
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
}
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
- buffer[count++] = 0;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
}
static void cik_init_pg(struct radeon_device *rdev)
@@ -7118,7 +7626,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- RADEON_CP_PACKET2);
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7332,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
@@ -7428,6 +7936,70 @@ void cik_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+void dce8_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS/eDP FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
+ break;
+ case 10:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
+ break;
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
/* display watermark setup */
/**
* dce8_line_buffer_adjust - Set up the line buffer
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index b6286068e11..0300727a4f7 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -25,6 +25,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_trace.h"
#include "cikd.h"
/* sdma */
@@ -101,14 +102,6 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (fence->ring == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
/* write the fence */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
@@ -118,12 +111,12 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
/* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+ /* We should be using the new POLL_REG_MEM special op packet here
+ * but it causes sDMA to hang sometimes
+ */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+ radeon_ring_write(ring, 0);
}
/**
@@ -137,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (CIK).
*/
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -148,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8);
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+
+ return true;
}
/**
@@ -450,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -653,11 +643,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -669,16 +660,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
- value |= r600_flags;
+ value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@@ -689,7 +674,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0x7FFFF)
ndw = 0x7FFFF;
- if (flags & RADEON_VM_PAGE_VALID)
+ if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@@ -697,7 +682,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
@@ -724,18 +709,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
if (vm == NULL)
return;
- if (ridx == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm->id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
@@ -770,12 +747,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+ /* We should be using the new POLL_REG_MEM special op packet here
+ * but it causes sDMA to hang sometimes
+ */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+ radeon_ring_write(ring, 0);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a09a1f..5964af5e5b2 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -25,8 +25,10 @@
#define CIK_H
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
@@ -499,6 +501,7 @@
* bit 4: write
*/
#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define HAWAII_MEMORY_CLIENT_ID_MASK (0x1ff << 12)
#define MEMORY_CLIENT_ID_SHIFT 12
#define MEMORY_CLIENT_RW_MASK (1 << 24)
#define MEMORY_CLIENT_RW_SHIFT 24
@@ -906,6 +909,39 @@
#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
# define STUTTER_ENABLE (1 << 0)
+/* DCE8 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
+# define FMT_DYNAMIC_EXP_EN (1 << 0)
+# define FMT_DYNAMIC_EXP_MODE (1 << 4)
+ /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL 0x6fb8
+# define FMT_PIXEL_ENCODING (1 << 16)
+ /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL 0x6fc8
+# define FMT_TRUNCATE_EN (1 << 0)
+# define FMT_TRUNCATE_MODE (1 << 1)
+# define FMT_TRUNCATE_DEPTH(x) ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+# define FMT_SPATIAL_DITHER_EN (1 << 8)
+# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+# define FMT_SPATIAL_DITHER_DEPTH(x) ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+# define FMT_RGB_RANDOM_ENABLE (1 << 14)
+# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+# define FMT_TEMPORAL_DITHER_EN (1 << 16)
+# define FMT_TEMPORAL_DITHER_DEPTH(x) ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+# define FMT_TEMPORAL_LEVEL (1 << 24)
+# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+# define FMT_25FRC_SEL(x) ((x) << 26)
+# define FMT_50FRC_SEL(x) ((x) << 28)
+# define FMT_75FRC_SEL(x) ((x) << 30)
+#define FMT_CLAMP_CONTROL 0x6fe4
+# define FMT_CLAMP_DATA_EN (1 << 0)
+# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
+# define FMT_CLAMP_6BPC 0
+# define FMT_CLAMP_8BPC 1
+# define FMT_CLAMP_10BPC 2
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -1129,6 +1165,8 @@
# define ADDR_SURF_P8_32x32_16x16 12
# define ADDR_SURF_P8_32x32_16x32 13
# define ADDR_SURF_P8_32x64_32x32 14
+# define ADDR_SURF_P16_32x32_8x16 16
+# define ADDR_SURF_P16_32x32_16x16 17
# define TILE_SPLIT(x) ((x) << 11)
# define ADDR_SURF_TILE_SPLIT_64B 0
# define ADDR_SURF_TILE_SPLIT_128B 1
@@ -1422,6 +1460,7 @@
# define RASTER_CONFIG_RB_MAP_1 1
# define RASTER_CONFIG_RB_MAP_2 2
# define RASTER_CONFIG_RB_MAP_3 3
+#define PKR_MAP(x) ((x) << 8)
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
@@ -1714,6 +1753,68 @@
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
#define PACKET3_DMA_DATA 0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+ /* 0 - LRU
+ * 1 - Stream
+ * 2 - Bypass
+ */
+# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
+ /* 0 - DST_ADDR using DAS
+ * 1 - GDS
+ * 3 - DST_ADDR using L2
+ */
+# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+ /* 0 - LRU
+ * 1 - Stream
+ * 2 - Bypass
+ */
+# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR using SAS
+ * 1 - GDS
+ * 2 - DATA
+ * 3 - SRC_ADDR using L2
+ */
+# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
+# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
+# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
+# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
#define PACKET3_AQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 91bb470de0a..920e1e4a52c 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
static int cypress_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+#endif
u32 tmp;
udelay(10);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 9fcd338c0fc..009f46e0ce7 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -102,6 +102,49 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp = 0, offset;
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (connector->latency_present[1])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+ AUDIO_LIPSYNC(connector->audio_latency[1]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ } else {
+ if (connector->latency_present[0])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+ AUDIO_LIPSYNC(connector->audio_latency[0]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ }
+ WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -113,9 +156,6 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
if (!dig->afmt->pin)
return;
@@ -201,20 +241,30 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
- break;
+ stereo_freqs |= sad->freq;
+ else
+ break;
}
}
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b5c67a99dda..9702e55e924 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1174,23 +1174,72 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
- u16 ctl, v;
- int err;
-
- err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
- if (err)
- return;
-
- v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+ int readrq;
+ u16 v;
+ readrq = pcie_get_readrq(rdev->pdev);
+ v = ffs(readrq) - 8;
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
- if ((v == 0) || (v == 6) || (v == 7)) {
- ctl &= ~PCI_EXP_DEVCTL_READRQ;
- ctl |= (2 << 12);
- pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
+ if ((v == 0) || (v == 6) || (v == 7))
+ pcie_set_readrq(rdev->pdev, 512);
+}
+
+void dce4_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
}
+
+ /* LVDS/eDP FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN);
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
}
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
@@ -3963,7 +4012,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->family >= CHIP_TAHITI) {
/* SI */
for (i = 0; i < rdev->rlc.reg_list_size; i++)
- dst_ptr[i] = src_ptr[i];
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
} else {
/* ON/LN/TN */
/* format:
@@ -3977,10 +4026,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (i < dws)
data |= (src_ptr[i] >> 2) << 16;
j = (((i - 1) * 3) / 2);
- dst_ptr[j] = data;
+ dst_ptr[j] = cpu_to_le32(data);
}
j = ((i * 3) / 2);
- dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+ dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
}
radeon_bo_kunmap(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
@@ -4042,40 +4091,40 @@ int sumo_rlc_init(struct radeon_device *rdev)
cik_get_csb_buffer(rdev, dst_ptr);
} else if (rdev->family >= CHIP_TAHITI) {
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
- dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
- dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
- dst_ptr[2] = rdev->rlc.clear_state_size;
+ dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+ dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+ dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
} else {
reg_list_hdr_blk_index = 0;
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
for (i = 0; cs_data[i].section != NULL; i++) {
for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
reg_num = cs_data[i].section[j].reg_count;
data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
data = 0x08000000 | (reg_num * 4);
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
for (k = 0; k < reg_num; k++) {
data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
}
reg_list_mc_addr += reg_num * 4;
reg_list_blk_index += reg_num;
}
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
}
radeon_bo_kunmap(rdev->rlc.clear_state_obj);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 6a0656d00ed..a37b5443638 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 57fcc4b16a5..aa695c4feb3 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -35,6 +35,8 @@
extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
/*
* update the N and CTS parameters for a given pixel clock rate
@@ -58,6 +60,42 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp = 0;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (connector->latency_present[1])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+ AUDIO_LIPSYNC(connector->audio_latency[1]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ } else {
+ if (connector->latency_present[0])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+ AUDIO_LIPSYNC(connector->audio_latency[0]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ }
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -67,12 +105,11 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -124,8 +161,10 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
};
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -142,20 +181,30 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
- break;
+ stereo_freqs |= sad->freq;
+ else
+ break;
}
}
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
WREG32(eld_reg_to_type[i][0], value);
}
@@ -324,8 +373,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
if (ASIC_IS_DCE6(rdev)) {
dce6_afmt_select_pin(encoder);
dce6_afmt_write_sad_regs(encoder);
+ dce6_afmt_write_latency_fields(encoder, mode);
} else {
evergreen_hdmi_write_sad_regs(encoder);
+ dce4_afmt_write_latency_fields(encoder, mode);
}
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4f6d2962767..17f99079899 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -750,6 +750,44 @@
* bit6 = 192 kHz
*/
+#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
+# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
+# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
+/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
+ * 0 = use stream header
+ * 1-7 = channel count - 1
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
+# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
+# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0 = invalid
+ * x = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
+# define HBR_CAPABLE (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
+# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
+# define DISPLAY_TYPE_NONE 0
+# define DISPLAY_TYPE_HDMI 1
+# define DISPLAY_TYPE_DP 2
+# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
+# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
+# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
+# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
+# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
+# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
+# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
+# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
+# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
+# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
+# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
+# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
+
#define AZ_HOT_PLUG_CONTROL 0x5e78
# define AZ_FORCE_CODEC_WAKE (1 << 0)
# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
@@ -1312,6 +1350,38 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* DCE4/5/6 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
+# define FMT_DYNAMIC_EXP_EN (1 << 0)
+# define FMT_DYNAMIC_EXP_MODE (1 << 4)
+ /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL 0x6fb8
+# define FMT_PIXEL_ENCODING (1 << 16)
+ /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL 0x6fc8
+# define FMT_TRUNCATE_EN (1 << 0)
+# define FMT_TRUNCATE_DEPTH (1 << 4)
+# define FMT_SPATIAL_DITHER_EN (1 << 8)
+# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
+# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+# define FMT_RGB_RANDOM_ENABLE (1 << 14)
+# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+# define FMT_TEMPORAL_DITHER_EN (1 << 16)
+# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+# define FMT_TEMPORAL_LEVEL (1 << 24)
+# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+# define FMT_25FRC_SEL(x) ((x) << 26)
+# define FMT_50FRC_SEL(x) ((x) << 28)
+# define FMT_75FRC_SEL(x) ((x) << 30)
+#define FMT_CLAMP_CONTROL 0x6fe4
+# define FMT_CLAMP_DATA_EN (1 << 0)
+# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
+# define FMT_CLAMP_6BPC 0
+# define FMT_CLAMP_8BPC 1
+# define FMT_CLAMP_10BPC 2
+
/* ASYNC DMA */
#define DMA_RB_RPTR 0xd008
#define DMA_RB_WPTR 0xd00c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cac2866d79d..11aab2ab54c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
-extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -2400,77 +2395,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
-#define R600_ENTRY_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
-{
- uint32_t r600_flags = 0;
- r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
- r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
- r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- r600_flags |= R600_PTE_SYSTEM;
- r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
- }
- return r600_flags;
-}
-
-/**
- * cayman_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (cayman/TN).
- */
-void cayman_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- uint64_t value;
- unsigned ndw;
-
- if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
- while (count) {
- ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
- }
-}
-
/**
* cayman_vm_flush - vm flush using the CP
*
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e9688fbe..bdeb65ed365 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_trace.h"
#include "nid.h"
u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -245,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
- * @r600_flags: hw access flags
+ * @flags: hw access flags
*
* Update the page tables using the DMA (cayman/TN).
*/
@@ -256,11 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
- if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -271,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
+ } else if (flags & R600_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
- value |= r600_flags;
+ value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@@ -291,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
- if (flags & RADEON_VM_PAGE_VALID)
+ if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@@ -299,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f2633902815..cdc003085a7 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d71333033b2..10abc4d5a6c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
+ return false;
}
int r100_copy_blit(struct radeon_device *rdev,
@@ -1434,7 +1435,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f9be22062df..9ad06732a78 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -124,6 +124,59 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return 0;
}
+void dce3_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= FMT_SPATIAL_DITHER_EN;
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -2597,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -2611,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+
+ return true;
}
/**
@@ -2653,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 01a3ec83f28..5dceea6f71a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -2328,13 +2328,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
kfree(parser->relocs);
- for (i = 0; i < parser->nchunks; i++) {
- kfree(parser->chunks[i].kdata);
- if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
- }
- }
+ for (i = 0; i < parser->nchunks; i++)
+ drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
}
@@ -2391,13 +2386,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
- r = r600_cs_parse(&parser);
- if (r) {
- DRM_ERROR("Invalid command stream !\n");
+ if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+ r = -EFAULT;
r600_cs_parser_fini(&parser, r);
return r;
}
- r = radeon_cs_finish_pages(&parser);
+ r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 3b317456512..7844d15c139 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI).
*/
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+
+ return true;
}
/**
@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 06022e3b9c3..4b89262f3f0 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -24,6 +24,7 @@
* Authors: Christian König
*/
#include <linux/hdmi.h>
+#include <linux/gcd.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
- { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
- { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
- { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
- { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
};
+
/*
- * calculate CTS value if it's not found in the table
+ * calculate CTS and N values if they are not found in the table
*/
-static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
{
- u64 n;
- u32 d;
-
- if (*CTS == 0) {
- n = (u64)clock * (u64)N * 1000ULL;
- d = 128 * freq;
- do_div(n, d);
- *CTS = n;
- }
- DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
- N, *CTS, freq);
+ int n, cts;
+ unsigned long div, mul;
+
+ /* Safe, but overly large values */
+ n = 128 * freq;
+ cts = clock * 1000;
+
+ /* Smallest valid fraction */
+ div = gcd(n, cts);
+
+ n /= div;
+ cts /= div;
+
+ /*
+ * The optimal N is 128*freq/1000. Calculate the closest larger
+ * value that doesn't truncate any bits.
+ */
+ mul = ((128*freq/1000) + (n-1))/n;
+
+ n *= mul;
+ cts *= mul;
+
+ /* Check that we are in spec (not always possible) */
+ if (n < (128*freq/1500))
+ printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
+ if (n > (128*freq/300))
+ printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
+
+ *N = n;
+ *CTS = cts;
+
+ DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+ *N, *CTS, freq);
}
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
@@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
struct radeon_hdmi_acr res;
u8 i;
- for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
- r600_hdmi_predefined_acr[i].clock != 0; i++)
- ;
- res = r600_hdmi_predefined_acr[i];
+ /* Precalculated values for common clocks */
+ for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
+ if (r600_hdmi_predefined_acr[i].clock == clock)
+ return r600_hdmi_predefined_acr[i];
+ }
- /* In case some CTS are missing */
- r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
- r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
- r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+ /* And odd clocks get manually calculated */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
return res;
}
@@ -313,8 +337,10 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -366,8 +392,10 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
};
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -384,20 +412,30 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
- break;
+ stereo_freqs |= sad->freq;
+ else
+ break;
}
}
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
WREG32(eld_reg_to_type[i][0], value);
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7b3c7b5932c..ebe38724a97 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1199,6 +1199,34 @@
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+/* DCE3 FMT blocks */
+#define FMT_CONTROL 0x6700
+# define FMT_PIXEL_ENCODING (1 << 16)
+ /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL 0x6710
+# define FMT_TRUNCATE_EN (1 << 0)
+# define FMT_TRUNCATE_DEPTH (1 << 4)
+# define FMT_SPATIAL_DITHER_EN (1 << 8)
+# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
+# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+# define FMT_RGB_RANDOM_ENABLE (1 << 14)
+# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+# define FMT_TEMPORAL_DITHER_EN (1 << 16)
+# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+# define FMT_TEMPORAL_LEVEL (1 << 24)
+# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+# define FMT_25FRC_SEL(x) ((x) << 26)
+# define FMT_50FRC_SEL(x) ((x) << 28)
+# define FMT_75FRC_SEL(x) ((x) << 30)
+#define FMT_CLAMP_CONTROL 0x672c
+# define FMT_CLAMP_DATA_EN (1 << 0)
+# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
+# define FMT_CLAMP_6BPC 0
+# define FMT_CLAMP_8BPC 1
+# define FMT_CLAMP_10BPC 2
+
/* Power management */
#define CG_SPLL_FUNC_CNTL 0x600
# define SPLL_RESET (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 24f4960f59e..ecf2a3960c0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -98,6 +98,7 @@ extern int radeon_lockup_timeout;
extern int radeon_fastfb;
extern int radeon_dpm;
extern int radeon_aspm;
+extern int radeon_runtime_pm;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -327,7 +328,6 @@ struct radeon_fence_driver {
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
- unsigned long last_activity;
bool initialized;
};
@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_locked(struct radeon_fence *fence);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
@@ -548,17 +549,20 @@ struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter);
+ int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* GPU doorbell structures, functions & helpers
*/
+#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
+
struct radeon_doorbell {
- u32 num_pages;
- bool free[1024];
/* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- void __iomem *ptr;
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
+ unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
};
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@@ -765,7 +771,6 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@@ -799,8 +804,7 @@ struct radeon_ring {
u32 pipe;
u32 queue;
struct radeon_bo *mqd_obj;
- u32 doorbell_page_num;
- u32 doorbell_offset;
+ u32 doorbell_index;
unsigned wptr_offs;
};
@@ -832,6 +836,12 @@ struct radeon_mec {
#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
struct radeon_vm {
struct list_head list;
struct list_head va;
@@ -915,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -967,12 +976,8 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- int kpage_idx[2];
- uint32_t *kpage[2];
uint32_t *kdata;
void __user *user_ptr;
- int last_copied_page;
- int last_page_index;
};
struct radeon_cs_parser {
@@ -1007,8 +1012,15 @@ struct radeon_cs_parser {
struct ww_acquire_ctx ticket;
};
-extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
-extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+
+ if (ibc->kdata)
+ return ibc->kdata[idx];
+ return p->ib.ptr[idx];
+}
+
struct radeon_cs_packet {
unsigned idx;
@@ -1629,7 +1641,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1675,8 +1687,6 @@ struct radeon_asic {
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
-
- u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
@@ -1972,6 +1982,7 @@ struct cik_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
+ uint32_t macrotile_mode_array[16];
};
union radeon_asic_config {
@@ -2170,6 +2181,7 @@ struct radeon_device {
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
+ bool needs_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
@@ -2212,6 +2224,9 @@ struct radeon_device {
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
+
+ struct dev_pm_domain vga_pm_domain;
+ bool have_disp_power_ref;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2228,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
@@ -2292,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
-#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
-#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
+#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
+#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
/*
* Indirect registers accessor
@@ -2673,8 +2688,8 @@ extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern int radeon_resume_kms(struct drm_device *dev);
-extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 10f98c7742d..98a9074b306 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
count = radeon_atif_get_sbios_requests(handle, &req);
if (count <= 0)
@@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
struct radeon_atcs *atcs = &rdev->atcs;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
u32 retry = 3;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
int ret;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
/* No need to proceed if we're sure that ATIF is not supported */
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8f7e04538fd..e354ce94cdd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cayman_vm_set_page,
+ .set_page = &cayman_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cayman_vm_set_page,
+ .set_page = &cayman_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &si_vm_set_page,
+ .set_page = &si_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1879,7 +1876,7 @@ static struct radeon_asic si_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &si_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cik_vm_set_page,
+ .set_page = &cik_sdma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2019,6 +2015,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
@@ -2100,8 +2098,7 @@ static struct radeon_asic kv_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cik_vm_set_page,
+ .set_page = &cik_sdma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2119,6 +2116,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
@@ -2442,27 +2441,48 @@ int radeon_asic_init(struct radeon_device *rdev)
}
break;
case CHIP_BONAIRE:
+ case CHIP_HAWAII:
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
rdev->has_uvd = true;
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CGTS_LS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_SDMA_LS |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
+ if (rdev->family == CHIP_BONAIRE) {
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ } else {
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ }
break;
case CHIP_KAVERI:
case CHIP_KABINI:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 70c29d5e080..c9fd97b5807 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@@ -581,17 +577,18 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int ni_dpm_init(struct radeon_device *rdev);
@@ -653,17 +650,17 @@ int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -696,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -705,6 +702,10 @@ int cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+int cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -712,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -731,11 +732,11 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
@@ -802,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -814,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
/* uvd v3.1 */
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index d96070bf838..9d302eaeea1 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -8,8 +8,7 @@
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <linux/pci.h>
#include "radeon_acpi.h"
@@ -59,6 +58,10 @@ struct atpx_mux {
u16 mux;
} __packed;
+bool radeon_is_px(void) {
+ return radeon_atpx_priv.atpx_detected;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -443,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
acpi_handle dhandle, atpx_handle;
acpi_status status;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -489,7 +492,7 @@ static int radeon_atpx_init(void)
*/
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227dae0..b3633d9a531 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 64565732cb9..20a768ac89a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -31,6 +31,8 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/pm_runtime.h>
+
extern void
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
@@ -411,6 +413,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
}
}
+ if (property == rdev->mode_info.dither_property) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_connector->dither != val) {
+ radeon_connector->dither = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -626,6 +643,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -651,6 +673,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
@@ -750,6 +774,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
struct drm_encoder_helper_funcs *encoder_funcs;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -790,9 +819,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
- return connector->status;
- else
- return ret;
+ ret = connector->status;
+ goto out;
}
if (radeon_connector->dac_load_detect && encoder) {
@@ -817,6 +845,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
+
+out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
return ret;
}
@@ -873,10 +906,15 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
struct drm_encoder_helper_funcs *encoder_funcs;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
+ int r;
if (!radeon_connector->dac_load_detect)
return ret;
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
@@ -887,6 +925,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
@@ -954,12 +994,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_mode_object *obj;
- int i;
+ int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- if (!force && radeon_check_hpd_status_unchanged(connector))
- return connector->status;
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+
+ if (!force && radeon_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+ goto exit;
+ }
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector, false);
@@ -1110,6 +1156,11 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
+
+exit:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
return ret;
}
@@ -1377,9 +1428,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ int r;
- if (!force && radeon_check_hpd_status_unchanged(connector))
- return connector->status;
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+
+ if (!force && radeon_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+ goto out;
+ }
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -1443,6 +1501,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
+out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
return ret;
}
@@ -1658,12 +1720,16 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+
if (radeon_audio != 0)
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1760,9 +1826,12 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+ }
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -1807,9 +1876,12 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+ }
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1853,9 +1925,13 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+ }
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+
}
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e35bc6..f41594b2eea 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
+ radeon_semaphore_sync_to(p->ib.semaphore,
+ p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -212,9 +213,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -237,25 +236,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
- if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
- (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
- size = p->chunks[i].length_dw * sizeof(uint32_t);
- p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
- p->chunks[i].user_ptr, size)) {
- return -EFAULT;
- }
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->cs_flags = p->chunks[i].kdata[0];
- if (p->chunks[i].length_dw > 1)
- ring = p->chunks[i].kdata[1];
- if (p->chunks[i].length_dw > 2)
- priority = (s32)p->chunks[i].kdata[2];
- }
+ size = p->chunks[i].length_dw;
+ cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+ p->chunks[i].user_ptr = cdata;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+ continue;
+
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
+ continue;
+ }
+
+ p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ size *= sizeof(uint32_t);
+ if (p->chunks[i].kdata == NULL) {
+ return -ENOMEM;
+ }
+ if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+ return -EFAULT;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
}
}
@@ -278,34 +283,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
}
}
- /* deal with non-vm */
- if ((p->chunk_ib_idx != -1) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
- (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
- if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
- DRM_ERROR("cs IB too big: %d\n",
- p->chunks[p->chunk_ib_idx].length_dw);
- return -EINVAL;
- }
- if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
- p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
- kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
- p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
- p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
- return -ENOMEM;
- }
- }
- p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
- p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
- p->chunks[p->chunk_ib_idx].last_copied_page = -1;
- p->chunks[p->chunk_ib_idx].last_page_index =
- ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
- }
-
return 0;
}
@@ -339,13 +316,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
kfree(parser->track);
kfree(parser->relocs);
kfree(parser->relocs_ptr);
- for (i = 0; i < parser->nchunks; i++) {
- kfree(parser->chunks[i].kdata);
- if ((parser->rdev->flags & RADEON_IS_AGP)) {
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
- }
- }
+ for (i = 0; i < parser->nchunks; i++)
+ drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
@@ -355,7 +327,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
- struct radeon_cs_chunk *ib_chunk;
int r;
if (parser->chunk_ib_idx == -1)
@@ -364,28 +335,11 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (parser->cs_flags & RADEON_CS_USE_VM)
return 0;
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
- /* Copy the packet into the IB, the parser will read from the
- * input memory (cached) and write to the IB (which can be
- * uncached).
- */
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- NULL, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
return r;
}
- r = radeon_cs_finish_pages(parser);
- if (r) {
- DRM_ERROR("Invalid command stream !\n");
- return r;
- }
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
@@ -423,7 +377,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
- struct radeon_cs_chunk *ib_chunk;
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
int r;
@@ -433,49 +386,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
- if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
- ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
- vm, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get const ib !\n");
- return r;
- }
- parser->const_ib.is_const_ib = true;
- parser->const_ib.length_dw = ib_chunk->length_dw;
- /* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
- ib_chunk->length_dw * 4)) {
- return -EFAULT;
- }
+ if (parser->const_ib.length_dw) {
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- vm, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib.length_dw = ib_chunk->length_dw;
- /* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
- ib_chunk->length_dw * 4)) {
- return -EFAULT;
- }
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
@@ -495,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_ib_sync_to(&parser->ib, vm->fence);
- radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
- rdev, vm, parser->ring));
+ radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
+ radeon_semaphore_sync_to(parser->ib.semaphore,
+ radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -527,6 +444,62 @@ static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
return r;
}
+static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_vm *vm = NULL;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if (parser->cs_flags & RADEON_CS_USE_VM) {
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ vm = &fpriv->vm;
+
+ if ((rdev->family >= CHIP_TAHITI) &&
+ (parser->chunk_const_ib_idx != -1)) {
+ ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get const ib !\n");
+ return r;
+ }
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+ ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4))
+ return -EFAULT;
+ }
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ }
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib.length_dw = ib_chunk->length_dw;
+ if (ib_chunk->kdata)
+ memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
+ else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+ return -EFAULT;
+ return 0;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -552,10 +525,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
- r = radeon_cs_parser_relocs(&parser);
- if (r) {
- if (r != -ERESTARTSYS)
+
+ r = radeon_cs_ib_fill(rdev, &parser);
+ if (!r) {
+ r = radeon_cs_parser_relocs(&parser);
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
+ }
+
+ if (r) {
radeon_cs_parser_fini(&parser, r, false);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
@@ -579,97 +557,6 @@ out:
return r;
}
-int radeon_cs_finish_pages(struct radeon_cs_parser *p)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- int i;
- int size = PAGE_SIZE;
-
- for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
- if (i == ibc->last_page_index) {
- size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
- }
-
- if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
- ibc->user_ptr + (i * PAGE_SIZE),
- size))
- return -EFAULT;
- }
- return 0;
-}
-
-static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
-{
- int new_page;
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- int i;
- int size = PAGE_SIZE;
- bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
- false : true;
-
- for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
- if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
- ibc->user_ptr + (i * PAGE_SIZE),
- PAGE_SIZE)) {
- p->parser_error = -EFAULT;
- return 0;
- }
- }
-
- if (pg_idx == ibc->last_page_index) {
- size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
- }
-
- new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
- if (copy1)
- ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
-
- if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
- ibc->user_ptr + (pg_idx * PAGE_SIZE),
- size)) {
- p->parser_error = -EFAULT;
- return 0;
- }
-
- /* copy to IB for non single case */
- if (!copy1)
- memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
-
- ibc->last_copied_page = pg_idx;
- ibc->kpage_idx[new_page] = pg_idx;
-
- return new_page;
-}
-
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
-
/**
* radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 841d0e09be3..39b033b441d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,9 +98,16 @@ static const char radeon_family_name[][16] = {
"BONAIRE",
"KAVERI",
"KABINI",
+ "HAWAII",
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_program_register_sequence - program an array of registers.
*
@@ -244,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
int radeon_doorbell_init(struct radeon_device *rdev)
{
- int i;
-
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
- /* limit to 4 MB for now */
- if (rdev->doorbell.size > (4 * 1024 * 1024))
- rdev->doorbell.size = 4 * 1024 * 1024;
+ rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
+ if (rdev->doorbell.num_doorbells == 0)
+ return -EINVAL;
- rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
+ rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
- rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
+ memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- rdev->doorbell.free[i] = true;
- }
return 0;
}
@@ -283,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
}
/**
- * radeon_doorbell_get - Allocate a doorbell page
+ * radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Allocate a doorbell page for use by the driver (all asics).
+ * Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
- int i;
-
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- if (rdev->doorbell.free[i]) {
- rdev->doorbell.free[i] = false;
- *doorbell = i;
- return 0;
- }
+ unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
+ if (offset < rdev->doorbell.num_doorbells) {
+ __set_bit(offset, rdev->doorbell.used);
+ *doorbell = offset;
+ return 0;
+ } else {
+ return -EINVAL;
}
- return -EINVAL;
}
/**
- * radeon_doorbell_free - Free a doorbell page
+ * radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Free a doorbell page allocated for use by the driver (all asics)
+ * Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
- if (doorbell < rdev->doorbell.num_pages)
- rdev->doorbell.free[doorbell] = true;
+ if (doorbell < rdev->doorbell.num_doorbells)
+ __clear_bit(doorbell, rdev->doorbell.used);
}
/*
@@ -1076,7 +1076,10 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
+ return;
+
if (state == VGA_SWITCHEROO_ON) {
unsigned d3_delay = dev->pdev->d3_delay;
@@ -1087,7 +1090,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
dev->pdev->d3_delay = 20;
- radeon_resume_kms(dev);
+ radeon_resume_kms(dev, true, true);
dev->pdev->d3_delay = d3_delay;
@@ -1097,7 +1100,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- radeon_suspend_kms(dev, pmm);
+ radeon_suspend_kms(dev, true, true);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1147,6 +1150,7 @@ int radeon_device_init(struct radeon_device *rdev,
{
int r, i;
int dma_bits;
+ bool runtime = false;
rdev->shutdown = false;
rdev->dev = &pdev->dev;
@@ -1293,7 +1297,14 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
+
+ if (radeon_runtime_pm == 1)
+ runtime = true;
+ if ((radeon_runtime_pm == -1) && radeon_is_px())
+ runtime = true;
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ if (runtime)
+ vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
r = radeon_init(rdev);
if (r)
@@ -1383,7 +1394,7 @@ void radeon_device_fini(struct radeon_device *rdev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
@@ -1394,9 +1405,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
- if (state.event == PM_EVENT_PRETHAW) {
- return 0;
- }
+
rdev = dev->dev_private;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1455,14 +1464,17 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (state.event == PM_EVENT_SUSPEND) {
+ if (suspend) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- console_lock();
- radeon_fbdev_set_suspend(rdev, 1);
- console_unlock();
+
+ if (fbcon) {
+ console_lock();
+ radeon_fbdev_set_suspend(rdev, 1);
+ console_unlock();
+ }
return 0;
}
@@ -1475,7 +1487,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int radeon_resume_kms(struct drm_device *dev)
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
@@ -1484,12 +1496,17 @@ int radeon_resume_kms(struct drm_device *dev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- console_lock();
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
- console_unlock();
- return -1;
+ if (fbcon) {
+ console_lock();
+ }
+ if (resume) {
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev)) {
+ if (fbcon)
+ console_unlock();
+ return -1;
+ }
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
@@ -1502,9 +1519,11 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- radeon_fbdev_set_suspend(rdev, 0);
- console_unlock();
-
+ if (fbcon) {
+ radeon_fbdev_set_suspend(rdev, 0);
+ console_unlock();
+ }
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
@@ -1549,6 +1568,14 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int resched;
down_write(&rdev->exclusive_lock);
+
+ if (!rdev->needs_reset) {
+ up_write(&rdev->exclusive_lock);
+ return 0;
+ }
+
+ rdev->needs_reset = false;
+
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa050d41..7b253815a32 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -30,6 +30,7 @@
#include "atom.h"
#include <asm/div64.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -306,7 +307,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
*/
if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
- &vpos, &hpos)) &&
+ &vpos, &hpos, NULL, NULL)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
/* crtc didn't flip in this target vblank interval,
@@ -494,11 +495,55 @@ unlock_free:
return r;
}
+static int
+radeon_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct radeon_device *rdev;
+ struct drm_crtc *crtc;
+ bool active = false;
+ int ret;
+
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ dev = set->crtc->dev;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_crtc_helper_set_config(set);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->enabled)
+ active = true;
+
+ pm_runtime_mark_last_busy(dev->dev);
+
+ rdev = dev->dev_private;
+ /* if we have active crtcs and we don't have a power ref,
+ take the current one */
+ if (active && !rdev->have_disp_power_ref) {
+ rdev->have_disp_power_ref = true;
+ return ret;
+ }
+ /* if we have no active crtcs, then drop the power ref
+ we got before */
+ if (!active && rdev->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ rdev->have_disp_power_ref = false;
+ }
+
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
.page_flip = radeon_crtc_page_flip,
};
@@ -1178,6 +1223,12 @@ static struct drm_prop_enum_list radeon_audio_enum_list[] =
{ RADEON_AUDIO_AUTO, "auto" },
};
+/* XXX support different dither options? spatial, temporal, both, etc. */
+static struct drm_prop_enum_list radeon_dither_enum_list[] =
+{ { RADEON_FMT_DITHER_DISABLE, "off" },
+ { RADEON_FMT_DITHER_ENABLE, "on" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1234,6 +1285,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
"audio",
radeon_audio_enum_list, sz);
+ sz = ARRAY_SIZE(radeon_dither_enum_list);
+ rdev->mode_info.dither_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "dither",
+ radeon_dither_enum_list, sz);
+
return 0;
}
@@ -1539,12 +1596,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
/*
- * Retrieve current video scanout position of crtc on a given gpu.
+ * Retrieve current video scanout position of crtc on a given gpu, and
+ * an optional accurate timestamp of when query happened.
*
* \param dev Device to query.
* \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
+ * \param *stime Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
@@ -1560,7 +1622,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1568,6 +1631,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
struct radeon_device *rdev = dev->dev_private;
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
@@ -1650,6 +1719,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
}
}
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
/* Decode into vertical and horizontal scanout position. */
*vpos = position & 0x1fff;
*hpos = (position >> 16) & 0x1fff;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9c14a1ba1de..9f5ff28864f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,8 +36,9 @@
#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
-
-
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drm_crtc_helper.h"
/*
* KMS wrapper.
* - 2.0.0 - initial interface
@@ -75,9 +76,10 @@
* 2.32.0 - new info request for rings working
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
+ * 2.35.0 - Add CIK macrotile mode array query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 34
+#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -87,8 +89,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-int radeon_resume_kms(struct drm_device *dev);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
@@ -100,14 +102,14 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
- int *vpos, int *hpos);
+ int *vpos, int *hpos, ktime_t *stime,
+ ktime_t *etime);
extern const struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -137,9 +139,11 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
#if defined(CONFIG_VGA_SWITCHEROO)
void radeon_register_atpx_handler(void);
void radeon_unregister_atpx_handler(void);
+bool radeon_is_px(void);
#else
static inline void radeon_register_atpx_handler(void) {}
static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_is_px(void) { return false; }
#endif
int radeon_no_wb;
@@ -162,6 +166,7 @@ int radeon_lockup_timeout = 10000;
int radeon_fastfb = 0;
int radeon_dpm = -1;
int radeon_aspm = -1;
+int radeon_runtime_pm = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -223,6 +228,9 @@ module_param_named(dpm, radeon_dpm, int, 0444);
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, radeon_aspm, int, 0444);
+MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+module_param_named(runpm, radeon_runtime_pm, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -259,6 +267,7 @@ static int radeon_resume(struct drm_device *dev)
return 0;
}
+
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -353,25 +362,144 @@ radeon_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int radeon_pmops_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- return radeon_suspend_kms(dev, state);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_suspend_kms(drm_dev, true, true);
}
-static int
-radeon_pci_resume(struct pci_dev *pdev)
+static int radeon_pmops_resume(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- return radeon_resume_kms(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_resume_kms(drm_dev, true, true);
+}
+
+static int radeon_pmops_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_suspend_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_resume_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (radeon_runtime_pm == 0)
+ return -EINVAL;
+
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+
+ ret = radeon_suspend_kms(drm_dev, false, false);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+
+ return 0;
+}
+
+static int radeon_pmops_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (radeon_runtime_pm == 0)
+ return -EINVAL;
+
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = radeon_resume_kms(drm_dev, false, false);
+ drm_kms_helper_poll_enable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ return 0;
}
+static int radeon_pmops_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_crtc *crtc;
+
+ if (radeon_runtime_pm == 0)
+ return -EBUSY;
+
+ /* are we PX enabled? */
+ if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+ DRM_DEBUG_DRIVER("failing to power off - not px\n");
+ return -EBUSY;
+ }
+
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+ if (crtc->enabled) {
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ return -EBUSY;
+ }
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ return 1;
+}
+
+long radeon_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ long ret;
+ dev = file_priv->minor->dev;
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static const struct dev_pm_ops radeon_pm_ops = {
+ .suspend = radeon_pmops_suspend,
+ .resume = radeon_pmops_resume,
+ .freeze = radeon_pmops_freeze,
+ .thaw = radeon_pmops_thaw,
+ .poweroff = radeon_pmops_freeze,
+ .restore = radeon_pmops_resume,
+ .runtime_suspend = radeon_pmops_runtime_suspend,
+ .runtime_resume = radeon_pmops_runtime_resume,
+ .runtime_idle = radeon_pmops_runtime_idle,
+};
+
static const struct file_operations radeon_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = radeon_drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
.read = drm_read,
@@ -380,6 +508,15 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
+
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ radeon_driver_unload_kms(dev);
+}
+
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
@@ -392,8 +529,6 @@ static struct drm_driver kms_driver = {
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
.unload = radeon_driver_unload_kms,
- .suspend = radeon_suspend_kms,
- .resume = radeon_resume_kms,
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
.disable_vblank = radeon_disable_vblank_kms,
@@ -408,7 +543,6 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.ioctls = radeon_ioctls_kms,
- .gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
@@ -451,8 +585,8 @@ static struct pci_driver radeon_kms_pci_driver = {
.id_table = pciidlist,
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
- .suspend = radeon_pci_suspend,
- .resume = radeon_pci_resume,
+ .driver.pm = &radeon_pm_ops,
+ .shutdown = radeon_pci_shutdown,
};
static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b369d42f7de..543dcfae7e6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
+long radeon_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
/* The rest of the file is DEPRECATED! */
#ifdef CONFIG_DRM_RADEON_UMS
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 3c8289083f9..614ad549297 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -96,6 +96,7 @@ enum radeon_family {
CHIP_BONAIRE,
CHIP_KAVERI,
CHIP_KABINI,
+ CHIP_HAWAII,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e04eb..d3a86e43c01 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -190,10 +190,8 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake) {
- rdev->fence_drv[ring].last_activity = jiffies;
+ if (wake)
wake_up_all(&rdev->fence_queue);
- }
}
/**
@@ -212,13 +210,13 @@ static void radeon_fence_destroy(struct kref *kref)
}
/**
- * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ * radeon_fence_seq_signaled - check if a fence sequence number has signaled
*
* @rdev: radeon device pointer
* @seq: sequence number
* @ring: ring index the fence is associated with
*
- * Check if the last singled fence sequnce number is >= the requested
+ * Check if the last signaled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if the fence has signaled (current fence value
* is >= requested value) or false if it has not (current fence
@@ -263,113 +261,131 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
}
/**
- * radeon_fence_wait_seq - wait for a specific sequence number
+ * radeon_fence_any_seq_signaled - check if any sequence number is signaled
*
* @rdev: radeon device pointer
- * @target_seq: sequence number we want to wait for
- * @ring: ring index the fence is associated with
+ * @seq: sequence numbers
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if any has signaled (current value is >= requested value)
+ * or false if it has not. Helper function for radeon_fence_wait_seq.
+ */
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence numbers
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
* @lock_ring: whether the ring should be locked or not
*
- * Wait for the requested sequence number to be written (all asics).
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
- * for radeon_fence_wait(), et al.
+ * for radeon_fence_wait_*().
* Returns 0 if the sequence number has passed, error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected and the ring is
- * marked as not ready so no further jobs get scheduled until a successful
- * reset.
+ * -EDEADLK is returned when a GPU lockup has been detected.
*/
-static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
- unsigned ring, bool intr, bool lock_ring)
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
+ bool intr, bool lock_ring)
{
- unsigned long timeout, last_activity;
- uint64_t seq;
- unsigned i;
+ uint64_t last_seq[RADEON_NUM_RINGS];
bool signaled;
- int r;
+ int i, r;
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
- while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
- if (!rdev->ring[ring].ready) {
- return -EBUSY;
+ /* Save current sequence values, used to check for GPU lockups */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+ radeon_irq_kms_sw_irq_get(rdev, i);
}
- timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
- if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
- /* the normal case, timeout is somewhere before last_activity */
- timeout = rdev->fence_drv[ring].last_activity - timeout;
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue, (
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+ || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
} else {
- /* either jiffies wrapped around, or no fence was signaled in the last 500ms
- * anyway we will just wait for the minimum amount and then check for a lockup
- */
- timeout = 1;
+ r = wait_event_timeout(rdev->fence_queue, (
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+ || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
}
- seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
- /* Save current last activity valuee, used to check for GPU lockups */
- last_activity = rdev->fence_drv[ring].last_activity;
- trace_radeon_fence_wait_begin(rdev->ddev, seq);
- radeon_irq_kms_sw_irq_get(rdev, ring);
- if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_queue,
- (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
- timeout);
- } else {
- r = wait_event_timeout(rdev->fence_queue,
- (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
- timeout);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
}
- radeon_irq_kms_sw_irq_put(rdev, ring);
- if (unlikely(r < 0)) {
+
+ if (unlikely(r < 0))
return r;
- }
- trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!signaled)) {
+ if (rdev->needs_reset)
+ return -EDEADLK;
+
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
- if (r) {
+ if (r)
continue;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
+ break;
}
- /* check if sequence value has changed since last_activity */
- if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (i != RADEON_NUM_RINGS)
continue;
- }
- if (lock_ring) {
+ if (lock_ring)
mutex_lock(&rdev->ring_lock);
- }
- /* test if somebody else has already decided that this is a lockup */
- if (last_activity != rdev->fence_drv[ring].last_activity) {
- if (lock_ring) {
- mutex_unlock(&rdev->ring_lock);
- }
- continue;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
+ break;
}
- if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ if (i < RADEON_NUM_RINGS) {
/* good news we believe it's a lockup */
- dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
- target_seq, seq);
-
- /* change last activity so nobody else think there is a lockup */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- rdev->fence_drv[i].last_activity = jiffies;
- }
-
- /* mark the ring as not ready any more */
- rdev->ring[ring].ready = false;
- if (lock_ring) {
+ dev_warn(rdev->dev, "GPU lockup (waiting for "
+ "0x%016llx last fence id 0x%016llx on"
+ " ring %d)\n",
+ target_seq[i], last_seq[i], i);
+
+ /* remember that we need an reset */
+ rdev->needs_reset = true;
+ if (lock_ring)
mutex_unlock(&rdev->ring_lock);
- }
+ wake_up_all(&rdev->fence_queue);
return -EDEADLK;
}
- if (lock_ring) {
+ if (lock_ring)
mutex_unlock(&rdev->ring_lock);
- }
}
}
return 0;
@@ -388,6 +404,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
*/
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
+ uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
if (fence == NULL) {
@@ -395,147 +412,15 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return -EINVAL;
}
- r = radeon_fence_wait_seq(fence->rdev, fence->seq,
- fence->ring, intr, true);
- if (r) {
- return r;
- }
- fence->seq = RADEON_FENCE_SIGNALED_SEQ;
- return 0;
-}
-
-static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-{
- unsigned i;
+ seq[fence->ring] = fence->seq;
+ if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
- return true;
- }
- }
- return false;
-}
-
-/**
- * radeon_fence_wait_any_seq - wait for a sequence number on any ring
- *
- * @rdev: radeon device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics). Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number. Helper function
- * for radeon_fence_wait_any(), et al.
- * Returns 0 if the sequence number has passed, error for all other cases.
- */
-static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
- u64 *target_seq, bool intr)
-{
- unsigned long timeout, last_activity, tmp;
- unsigned i, ring = RADEON_NUM_RINGS;
- bool signaled;
- int r;
-
- for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i]) {
- continue;
- }
-
- /* use the most recent one as indicator */
- if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
- last_activity = rdev->fence_drv[i].last_activity;
- }
-
- /* For lockup detection just pick the lowest ring we are
- * actively waiting for
- */
- if (i < ring) {
- ring = i;
- }
- }
-
- /* nothing to wait for ? */
- if (ring == RADEON_NUM_RINGS) {
- return -ENOENT;
- }
-
- while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
- timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
- if (time_after(last_activity, timeout)) {
- /* the normal case, timeout is somewhere before last_activity */
- timeout = last_activity - timeout;
- } else {
- /* either jiffies wrapped around, or no fence was signaled in the last 500ms
- * anyway we will just wait for the minimum amount and then check for a lockup
- */
- timeout = 1;
- }
-
- trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (target_seq[i]) {
- radeon_irq_kms_sw_irq_get(rdev, i);
- }
- }
- if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_queue,
- (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
- timeout);
- } else {
- r = wait_event_timeout(rdev->fence_queue,
- (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
- timeout);
- }
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (target_seq[i]) {
- radeon_irq_kms_sw_irq_put(rdev, i);
- }
- }
- if (unlikely(r < 0)) {
- return r;
- }
- trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
-
- if (unlikely(!signaled)) {
- /* we were interrupted for some reason and fence
- * isn't signaled yet, resume waiting */
- if (r) {
- continue;
- }
-
- mutex_lock(&rdev->ring_lock);
- for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
- if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
- tmp = rdev->fence_drv[i].last_activity;
- }
- }
- /* test if somebody else has already decided that this is a lockup */
- if (last_activity != tmp) {
- last_activity = tmp;
- mutex_unlock(&rdev->ring_lock);
- continue;
- }
-
- if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
- /* good news we believe it's a lockup */
- dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
- target_seq[ring]);
-
- /* change last activity so nobody else think there is a lockup */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- rdev->fence_drv[i].last_activity = jiffies;
- }
+ r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+ if (r)
+ return r;
- /* mark the ring as not ready any more */
- rdev->ring[ring].ready = false;
- mutex_unlock(&rdev->ring_lock);
- return -EDEADLK;
- }
- mutex_unlock(&rdev->ring_lock);
- }
- }
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
@@ -557,7 +442,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
bool intr)
{
uint64_t seq[RADEON_NUM_RINGS];
- unsigned i;
+ unsigned i, num_rings = 0;
int r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -567,15 +452,19 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
continue;
}
- if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
- /* something was allready signaled */
- return 0;
- }
-
seq[i] = fences[i]->seq;
+ ++num_rings;
+
+ /* test if something was allready signaled */
+ if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
}
- r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ /* nothing to wait for ? */
+ if (num_rings == 0)
+ return -ENOENT;
+
+ r = radeon_fence_wait_seq(rdev, seq, intr, true);
if (r) {
return r;
}
@@ -583,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
}
/**
+ * radeon_fence_wait_locked - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ *
+ * Wait for the requested fence to signal (all asics).
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_locked(struct radeon_fence *fence)
+{
+ uint64_t seq[RADEON_NUM_RINGS] = {};
+ int r;
+
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
+ }
+
+ seq[fence->ring] = fence->seq;
+ if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
+ if (r)
+ return r;
+
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
@@ -594,15 +513,15 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
*/
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
- uint64_t seq;
+ uint64_t seq[RADEON_NUM_RINGS] = {};
- seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
- if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+ seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
}
- return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ return radeon_fence_wait_seq(rdev, seq, false, false);
}
/**
@@ -617,14 +536,18 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
*/
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
- r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
+ if (!seq[ring])
+ return 0;
+
+ r = radeon_fence_wait_seq(rdev, seq, false, false);
if (r) {
- if (r == -EDEADLK) {
+ if (r == -EDEADLK)
return -EDEADLK;
- }
+
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
ring, r);
}
@@ -826,7 +749,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
- rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b990b1a2bd5..3044e504f4e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -607,8 +607,8 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
- unsigned pd_size, pts_size;
- u64 *pd_addr;
+ unsigned pd_size, pd_entries, pts_size;
+ struct radeon_ib ib;
int r;
if (vm == NULL) {
@@ -619,8 +619,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
return 0;
}
-retry:
pd_size = radeon_vm_directory_size(rdev);
+ pd_entries = radeon_vm_num_pdes(rdev);
+
+retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +639,31 @@ retry:
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
/* Initially clear the page directory */
- pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
- memset(pd_addr, 0, pd_size);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+ NULL, pd_entries * 2 + 64);
+ if (r) {
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return r;
+ }
+ ib.length_dw = 0;
+
+ radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
+ 0, pd_entries, 0, 0);
+
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return r;
+ }
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
+ radeon_fence_unref(&vm->last_flush);
+
+ /* allocate page table array */
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
@@ -914,6 +938,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
}
/**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+ uint32_t hw_flags = 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ hw_flags |= R600_PTE_SYSTEM;
+ hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return hw_flags;
+}
+
+/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
* @rdev: radeon_device pointer
@@ -974,7 +1018,11 @@ retry:
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
- RADEON_VM_PAGE_VALID);
+ R600_PTE_VALID);
+
+ count *= RADEON_VM_PTE_COUNT;
+ radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+ count, 0, 0);
}
count = 1;
@@ -987,8 +1035,11 @@ retry:
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
- incr, RADEON_VM_PAGE_VALID);
+ incr, R600_PTE_VALID);
+ count *= RADEON_VM_PTE_COUNT;
+ radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+ count, 0, 0);
}
return 0;
@@ -1082,7 +1133,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
- unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
@@ -1151,11 +1201,16 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* reserve space for pde addresses */
ndw += npdes * 2;
+ /* reserve space for clearing new page tables */
+ ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
+
/* update too big for an IB */
if (ndw > 0xfffff)
return -ENOMEM;
- r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+ if (r)
+ return r;
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,9 +1220,9 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, bo_va->flags);
+ addr, radeon_vm_page_flags(bo_va->flags));
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8a583..805c5e566b9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
-int radeon_gem_object_init(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index c180df8e84d..bdb0f93e73b 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -418,7 +418,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = drm_ioctl(filp, cmd, arg);
+ ret = radeon_drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e8482cf3..ec6240b0046 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/pm_runtime.h>
+
#define RADEON_WAIT_IDLE_TIMEOUT 200
/**
@@ -47,8 +49,12 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
+ irqreturn_t ret;
- return radeon_irq_process(rdev);
+ ret = radeon_irq_process(rdev);
+ if (ret == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(dev->dev);
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580ddc4eb..55d0b474bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-
+#include <linux/pm_runtime.h>
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -50,9 +50,14 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
+
if (rdev->rmmio == NULL)
goto done_free;
+
+ pm_runtime_get_sync(dev->dev);
+
radeon_acpi_fini(rdev);
+
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
@@ -125,9 +130,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
+ if (radeon_runtime_pm != 0) {
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_allow(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ }
+
out:
if (r)
radeon_driver_unload_kms(dev);
+
+
return r;
}
@@ -191,7 +207,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pci_device;
+ *value = dev->pdev->device;
break;
case RADEON_INFO_NUM_GB_PIPES:
*value = rdev->num_gb_pipes;
@@ -324,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_BONAIRE)
- return -EINVAL;
+ *value = rdev->config.cik.backend_map;
else if (rdev->family >= CHIP_TAHITI)
*value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
@@ -433,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
+ if (rdev->family >= CHIP_BONAIRE) {
+ value = rdev->config.cik.macrotile_mode_array;
+ value_size = sizeof(uint32_t)*16;
+ } else {
+ DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
+ return -EINVAL;
+ }
+ break;
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
@@ -475,9 +500,14 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
+ int r;
file_priv->driver_priv = NULL;
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
+
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
@@ -506,6 +536,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = fpriv;
}
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7cb178a34a0..0b158f98d28 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+retry:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
&base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
+
+ /* On old GPU like RN50 with little vram pining can fails because
+ * current fb is taking all space needed. So instead of unpining
+ * the old buffer after pining the new one, first unpin old one
+ * and then retry pining new one.
+ *
+ * As only master can set mode only master can pin and it is
+ * unlikely the master client will race with itself especialy
+ * on those old gpu with single crtc.
+ *
+ * We don't shutdown the display controller because new buffer
+ * will end up in same spot.
+ */
+ if (!atomic && fb && fb != crtc->fb) {
+ struct radeon_bo *old_rbo;
+ unsigned long nsize, osize;
+
+ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+ osize = radeon_bo_size(old_rbo);
+ nsize = radeon_bo_size(rbo);
+ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
+ radeon_bo_unpin(old_rbo);
+ radeon_bo_unreserve(old_rbo);
+ fb = NULL;
+ goto retry;
+ }
+ }
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
@@ -1056,6 +1084,26 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
}
}
+static void radeon_crtc_disable(struct drm_crtc *crtc)
+{
+ radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ int r;
+ struct radeon_framebuffer *radeon_fb;
+ struct radeon_bo *rbo;
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ }
+}
+
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.dpms = radeon_crtc_dpms,
.mode_fixup = radeon_crtc_mode_fixup,
@@ -1065,6 +1113,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
+ .disable = radeon_crtc_disable
};
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512f5c8..c89971d904c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
- bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ef63d3f00b2..3f0dd664af9 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -249,6 +249,8 @@ struct radeon_mode_info {
struct drm_property *underscan_vborder_property;
/* audio */
struct drm_property *audio_property;
+ /* FMT dithering */
+ struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -479,6 +481,11 @@ enum radeon_connector_audio {
RADEON_AUDIO_AUTO = 2
};
+enum radeon_connector_dither {
+ RADEON_FMT_DITHER_DISABLE = 0,
+ RADEON_FMT_DITHER_ENABLE = 1,
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -498,6 +505,7 @@ struct radeon_connector {
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
enum radeon_connector_audio audio;
+ enum radeon_connector_dither dither;
};
struct radeon_framebuffer {
@@ -758,7 +766,8 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
- int *vpos, int *hpos);
+ int *vpos, int *hpos, ktime_t *stime,
+ ktime_t *etime);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
@@ -850,6 +859,12 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+/* fmt blocks */
+void avivo_program_fmt(struct drm_encoder *encoder);
+void dce3_program_fmt(struct drm_encoder *encoder);
+void dce4_program_fmt(struct drm_encoder *encoder);
+void dce8_program_fmt(struct drm_encoder *encoder);
+
/* fbdev layer */
int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4f6b7fc7ad3..d1385ccc672 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -508,17 +508,21 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_AUTO;
} else {
- mutex_unlock(&rdev->pm.mutex);
count = -EINVAL;
goto fail;
}
if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active) {
+ count = -EINVAL;
+ goto fail;
+ }
ret = radeon_dpm_force_performance_level(rdev, level);
if (ret)
count = -EINVAL;
}
- mutex_unlock(&rdev->pm.mutex);
fail:
+ mutex_unlock(&rdev->pm.mutex);
+
return count;
}
@@ -881,11 +885,12 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
}
}
- printk("switching from power state:\n");
- radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
-
+ if (radeon_dpm == 1) {
+ printk("switching from power state:\n");
+ radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
+ printk("switching to power state:\n");
+ radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
+ }
mutex_lock(&rdev->ddev->struct_mutex);
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
@@ -918,12 +923,16 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
if (rdev->asic->dpm.force_performance_level) {
- if (rdev->pm.dpm.thermal_active)
+ if (rdev->pm.dpm.thermal_active) {
+ enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
/* force low perf level for thermal */
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
- else
- /* otherwise, enable auto */
- radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+ /* save the user's level */
+ rdev->pm.dpm.forced_level = level;
+ } else {
+ /* otherwise, user selected level */
+ radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
+ }
}
done:
@@ -1179,7 +1188,8 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
mutex_lock(&rdev->pm.mutex);
radeon_dpm_init(rdev);
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
- radeon_dpm_print_power_states(rdev);
+ if (radeon_dpm == 1)
+ radeon_dpm_print_power_states(rdev);
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
@@ -1241,6 +1251,23 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
+ case CHIP_CAYMAN:
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+ /* DPM requires the RLC, RV770+ dGPU requires SMC */
+ if (!rdev->rlc_fw)
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if ((rdev->family >= CHIP_RV770) &&
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!rdev->smc_fw))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (radeon_dpm == 1)
+ rdev->pm.pm_method = PM_METHOD_DPM;
+ else
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
@@ -1256,16 +1283,12 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
- case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- case CHIP_BONAIRE:
- case CHIP_KABINI:
- case CHIP_KAVERI:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1273,10 +1296,10 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if (radeon_dpm == 1)
- rdev->pm.pm_method = PM_METHOD_DPM;
- else
+ else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else
+ rdev->pm.pm_method = PM_METHOD_DPM;
break;
default:
/* default to profile method */
@@ -1468,7 +1491,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 18254e1c3e7..9214403ae17 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
- int i, r;
+ int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
ib->is_const_ib = false;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- ib->sync_to[i] = NULL;
return 0;
}
@@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
- * radeon_ib_sync_to - sync to fence before executing the IB
- *
- * @ib: IB object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence before executing the IB
- */
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = ib->sync_to[fence->ring];
- ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- bool need_sync = false;
- int i, r = 0;
+ int r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_fence *fence = ib->sync_to[i];
- if (radeon_fence_need_sync(fence, ib->ring)) {
- need_sync = true;
- radeon_semaphore_sync_rings(rdev, ib->semaphore,
- fence->ring, ib->ring);
- radeon_fence_note_sync(fence, ib->ring);
- }
- }
- /* immediately free semaphore when we don't need to sync */
- if (!need_sync) {
- radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
}
+
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 8dcc20f53d7..2b42aa1914f 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -29,12 +29,12 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
-
+#include "radeon_trace.h"
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- int r;
+ int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ (*semaphore)->sync_to[i] = NULL;
+
return 0;
}
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- --semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_signale(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
+ --semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_signal_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
}
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- ++semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_wait(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
+ ++semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_wait_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_semaphore_sync_to - use the semaphore to sync to a fence
+ *
+ * @semaphore: semaphore object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using this semaphore object
+ */
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = semaphore->sync_to[fence->ring];
+ semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
-/* caller must hold ring lock */
+/**
+ * radeon_semaphore_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @semaphore: semaphore object to use for sync
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter)
+ int ring)
{
- int r;
+ int i, r;
- /* no need to signal and wait on the same ring */
- if (signaler == waiter) {
- return 0;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = semaphore->sync_to[i];
- /* prevent GPU deadlocks */
- if (!rdev->ring[signaler].ready) {
- dev_err(rdev->dev, "Trying to sync to a disabled ring!");
- return -EINVAL;
- }
+ /* check if we really need to sync */
+ if (!radeon_fence_need_sync(fence, ring))
+ continue;
- r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
- if (r) {
- return r;
- }
- radeon_semaphore_emit_signal(rdev, signaler, semaphore);
- radeon_ring_commit(rdev, &rdev->ring[signaler]);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Syncing to a disabled ring!");
+ return -EINVAL;
+ }
- /* we assume caller has already allocated space on waiters ring */
- radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* allocate enough space for sync command */
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+ if (r) {
+ return r;
+ }
- /* for debugging lockup only, used by sysfs debug files */
- rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
- rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+ /* emit the signal semaphore */
+ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+ /* signaling wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ /* we assume caller has already allocated space on waiters ring */
+ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+ /* waiting wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ radeon_fence_note_sync(fence, ring);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f7e36781596..9f0e18172b6 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,30 @@ TRACE_EVENT(radeon_cs,
__entry->fences)
);
+TRACE_EVENT(radeon_vm_set_page,
+ TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags),
+ TP_ARGS(pe, addr, count, incr, flags),
+ TP_STRUCT__entry(
+ __field(u64, pe)
+ __field(u64, addr)
+ __field(u32, count)
+ __field(u32, incr)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->pe = pe;
+ __entry->addr = addr;
+ __entry->count = count;
+ __entry->incr = incr;
+ __entry->flags = flags;
+ ),
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+ __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -87,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_ARGS(dev, seqno)
);
+DECLARE_EVENT_CLASS(radeon_semaphore_request,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem),
+
+ TP_STRUCT__entry(
+ __field(int, ring)
+ __field(signed, waiters)
+ __field(uint64_t, gpu_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->waiters = sem->waiters;
+ __entry->gpu_addr = sem->gpu_addr;
+ ),
+
+ TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
+ __entry->waiters, __entry->gpu_addr)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 33858364fe8..a77cd274dfc 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -59,6 +59,7 @@
#define SI_MC_UCODE_SIZE 7769
#define OLAND_MC_UCODE_SIZE 7863
#define CIK_MC_UCODE_SIZE 7866
+#define HAWAII_MC_UCODE_SIZE 7933
/* SDMA */
#define CIK_SDMA_UCODE_SIZE 1050
@@ -143,4 +144,7 @@
#define BONAIRE_SMC_UCODE_START 0x20000
#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
+#define HAWAII_SMC_UCODE_START 0x20000
+#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 308eff5be1b..373d088bac6 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -97,6 +97,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
+ case CHIP_HAWAII:
fw_name = FIRMWARE_BONAIRE;
break;
@@ -240,6 +241,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
+ radeon_uvd_note_usage(rdev);
+
r = radeon_uvd_get_destroy_msg(rdev,
R600_RING_TYPE_UVD_INDEX, handle, &fence);
if (r) {
@@ -620,7 +623,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
if (r)
goto err;
- r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+ r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
if (r)
goto err;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba8017b9..76cc8d3aafe 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -153,6 +153,70 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
+void avivo_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+ else
+ tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
+ AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
+ AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1447d794c22..1c560629575 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -345,9 +345,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
max_bandwidth = rdev->pm.sideport_bandwidth;
- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
- read_delay_latency.full = dfixed_div(read_delay_latency,
- rdev->pm.igp_sideport_mclk);
+ read_delay_latency.full = dfixed_const(370 * 800);
+ a.full = dfixed_const(1000);
+ b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
+ read_delay_latency.full = dfixed_div(read_delay_latency, b);
+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
} else {
if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
rdev->pm.k8_bandwidth.full)
@@ -488,14 +490,10 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -526,8 +524,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -555,8 +551,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
}
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 873eb4b193b..5d1c316115e 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1155,14 +1155,10 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1193,8 +1189,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1222,8 +1216,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
}
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 5811d277a36..26633a02525 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -407,9 +407,9 @@ static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device
WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
}
-static u64 rv6xx_clocks_per_unit(u32 unit)
+static u32 rv6xx_clocks_per_unit(u32 unit)
{
- u64 tmp = 1 << (2 * unit);
+ u32 tmp = 1 << (2 * unit);
return tmp;
}
@@ -417,7 +417,7 @@ static u64 rv6xx_clocks_per_unit(u32 unit)
static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
u32 unscaled_count, u32 unit)
{
- u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
+ u32 count_per_unit = rv6xx_clocks_per_unit(unit);
return (unscaled_count + count_per_unit - 1) / count_per_unit;
}
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index f9b02e3d683..aca8cbe8a33 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d96f7cbca0a..6a64ccaa069 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
-extern void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
static void si_fini_pg(struct radeon_device *rdev);
@@ -4673,61 +4668,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
-/**
- * si_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (SI).
- */
-void si_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- uint64_t value;
- unsigned ndw;
-
- if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
- while (count) {
- ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
- ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1));
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- /* DMA */
- si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
- }
-}
-
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
@@ -5372,52 +5312,53 @@ void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
if (buffer == NULL)
return;
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
- buffer[count++] = 0x80000000;
- buffer[count++] = 0x80000000;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
- buffer[count++] = ext->reg_index - 0xa000;
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = ext->extent[i];
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (rdev->family) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- buffer[count++] = 0x2a00126a;
+ buffer[count++] = cpu_to_le32(0x2a00126a);
break;
case CHIP_VERDE:
- buffer[count++] = 0x0000124a;
+ buffer[count++] = cpu_to_le32(0x0000124a);
break;
case CHIP_OLAND:
- buffer[count++] = 0x00000082;
+ buffer[count++] = cpu_to_le32(0x00000082);
break;
case CHIP_HAINAN:
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
default:
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
}
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
- buffer[count++] = 0;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
}
static void si_init_pg(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 49909d23dfc..59be2cfcbb4 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_trace.h"
#include "sid.h"
u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -75,11 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -90,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
- value |= r600_flags;
+ value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@@ -110,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
- if (flags & RADEON_VM_PAGE_VALID)
+ if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@@ -118,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
@@ -199,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2332aa1bf93..0b00c790fb7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3589,7 +3589,12 @@ static void si_program_display_gap(struct radeon_device *rdev)
WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
}
- si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
+ /* Setting this to false forces the performance state to low if the crtcs are disabled.
+ * This can be a problem on PowerXpress systems or if you want to use the card
+ * for offscreen rendering or compute if there are no crtcs enabled. Set it to
+ * true for now so that performance scales even if the displays are off.
+ */
+ si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
}
static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -4553,7 +4558,7 @@ static int si_init_smc_table(struct radeon_device *rdev)
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
- table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+ table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7e2e0ea66a0..b322acc4809 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -478,7 +478,7 @@
#define STATE3_MASK (0x1f << 15)
#define STATE3_SHIFT 15
-#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
+#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define TRAIN_DONE_D0 (1 << 30)
#define TRAIN_DONE_D1 (1 << 31)
@@ -683,6 +683,51 @@
* bit5 = 176.4 kHz
* bit6 = 192 kHz
*/
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
+# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
+# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0 = invalid
+ * x = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
+# define HBR_CAPABLE (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
+# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
+# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
+# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
+# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
+# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
+# define DESCRIPTION0(x) (((x) & 0xff) << 0)
+# define DESCRIPTION1(x) (((x) & 0xff) << 8)
+# define DESCRIPTION2(x) (((x) & 0xff) << 16)
+# define DESCRIPTION3(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
+# define DESCRIPTION4(x) (((x) & 0xff) << 0)
+# define DESCRIPTION5(x) (((x) & 0xff) << 8)
+# define DESCRIPTION6(x) (((x) & 0xff) << 16)
+# define DESCRIPTION7(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
+# define DESCRIPTION8(x) (((x) & 0xff) << 0)
+# define DESCRIPTION9(x) (((x) & 0xff) << 8)
+# define DESCRIPTION10(x) (((x) & 0xff) << 16)
+# define DESCRIPTION11(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
+# define DESCRIPTION12(x) (((x) & 0xff) << 0)
+# define DESCRIPTION13(x) (((x) & 0xff) << 8)
+# define DESCRIPTION14(x) (((x) & 0xff) << 16)
+# define DESCRIPTION15(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
+# define DESCRIPTION16(x) (((x) & 0xff) << 0)
+# define DESCRIPTION17(x) (((x) & 0xff) << 8)
+
#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
# define AUDIO_ENABLED (1 << 31)
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 9364129ba29..d700698a1f2 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true;
- pi->enable_mg_clock_gating = true;
- pi->enable_gfx_dynamic_mgpg = true; /* ??? */
- pi->override_dynamic_mgpg = true;
+ pi->enable_mg_clock_gating = false;
+ pi->enable_gfx_dynamic_mgpg = false;
+ pi->override_dynamic_mgpg = false;
pi->enable_auto_thermal_throttling = true;
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
pi->uvd_dpm = true; /* ??? */
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d978..d4a68af1a27 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+ return true;
}
/**
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
index 5b6fa1f62d4..d722db2cf34 100644
--- a/drivers/gpu/drm/radeon/uvd_v3_1.c
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -37,7 +37,7 @@
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+ return true;
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c590cd9dca0..d8e835ac2c5 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
+ select DRM_KMS_FB_HELPER
help
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index ca498d151a7..2ee44ca9d67 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,7 +1,9 @@
config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
depends on DRM && (ARM || SUPERH)
+ select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 54bad98e947..562f9a401cf 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -40,7 +40,7 @@
static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
{
if (sdev->clock)
- clk_enable(sdev->clock);
+ clk_prepare_enable(sdev->clock);
#if 0
if (sdev->meram_dev && sdev->meram_dev->pdev)
pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
@@ -54,7 +54,7 @@ static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
#endif
if (sdev->clock)
- clk_disable(sdev->clock);
+ clk_disable_unprepare(sdev->clock);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 69853a4de40..8961ba6a34b 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,7 +1,10 @@
config DRM_TEGRA
bool "NVIDIA Tegra DRM"
+ depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
depends on DRM
+ select TEGRA_HOST1X
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -13,6 +16,11 @@ config DRM_TEGRA
if DRM_TEGRA
+config DRM_TEGRA_DEBUG
+ bool "NVIDIA Tegra DRM debug support"
+ help
+ Say yes here to enable debugging support.
+
config DRM_TEGRA_STAGING
bool "Enable HOST1X interface"
depends on STAGING
@@ -21,9 +29,4 @@ config DRM_TEGRA_STAGING
If unsure, choose N.
-config DRM_TEGRA_DEBUG
- bool "NVIDIA Tegra DRM debug support"
- help
- Say yes here to enable debugging support.
-
endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 00000000000..edc76abd58b
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,15 @@
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := \
+ bus.o \
+ drm.o \
+ gem.o \
+ fb.o \
+ dc.o \
+ output.o \
+ rgb.o \
+ hdmi.o \
+ gr2d.o \
+ gr3d.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644
index 00000000000..565f8f7b9a4
--- /dev/null
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static int drm_host1x_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ const char *device = dev_name(dev->dev);
+ const char *driver = dev->driver->name;
+ const char *bus = dev->dev->bus->name;
+ int length;
+
+ master->unique_len = strlen(bus) + 1 + strlen(device);
+ master->unique_size = master->unique_len;
+
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+ if (!master->unique)
+ return -ENOMEM;
+
+ snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
+
+ length = strlen(driver) + 1 + master->unique_len;
+
+ dev->devname = kmalloc(length + 1, GFP_KERNEL);
+ if (!dev->devname)
+ return -ENOMEM;
+
+ snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
+
+ return 0;
+}
+
+static struct drm_bus drm_host1x_bus = {
+ .bus_type = DRIVER_BUS_HOST1X,
+ .set_busid = drm_host1x_set_busid,
+};
+
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
+{
+ struct drm_device *drm;
+ int ret;
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->bus = &drm_host1x_bus;
+
+ drm = drm_dev_alloc(driver, &device->dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_free;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+ driver->major, driver->minor, driver->patchlevel,
+ driver->date, drm->primary->index);
+
+ return 0;
+
+err_free:
+ drm_dev_free(drm);
+ return ret;
+}
+
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
+{
+ struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
+
+ drm_put_dev(tegra->drm);
+}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/drm/tegra/dc.c
index b1a05ad901c..ae1cb31ead7 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,9 @@
*/
#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
-#include "host1x_client.h"
#include "dc.h"
#include "drm.h"
#include "gem.h"
@@ -51,6 +47,8 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.dst.h = crtc_h;
window.format = tegra_dc_format(fb->pixel_format);
window.bits_per_pixel = fb->bits_per_pixel;
+ window.bottom_up = tegra_fb_is_bottom_up(fb);
+ window.tiled = tegra_fb_is_tiled(fb);
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@ static int tegra_plane_disable(struct drm_plane *plane)
static void tegra_plane_destroy(struct drm_plane *plane)
{
+ struct tegra_plane *p = to_tegra_plane(plane);
+
tegra_plane_disable(plane);
drm_plane_cleanup(plane);
+ kfree(p);
}
static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
for (i = 0; i < 2; i++) {
struct tegra_plane *plane;
- plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return -ENOMEM;
@@ -133,8 +134,10 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
&tegra_plane_funcs, plane_formats,
ARRAY_SIZE(plane_formats), false);
- if (err < 0)
+ if (err < 0) {
+ kfree(plane);
return err;
+ }
}
return 0;
@@ -145,6 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
{
unsigned int format = tegra_dc_format(fb->pixel_format);
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+ unsigned int h_offset = 0, v_offset = 0;
unsigned long value;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
+ if (tegra_fb_is_tiled(fb)) {
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ } else {
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
+ /* make sure bottom-up buffers are properly displayed */
+ if (tegra_fb_is_bottom_up(fb)) {
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value |= INVERT_V;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ v_offset += fb->height - 1;
+ } else {
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~INVERT_V;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+ }
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
value = GENERAL_UPDATE | WIN_A_UPDATE;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
@@ -255,14 +285,26 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
}
+static void drm_crtc_clear(struct drm_crtc *crtc)
+{
+ memset(crtc, 0, sizeof(*crtc));
+}
+
+static void tegra_dc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ drm_crtc_clear(crtc);
+}
+
static const struct drm_crtc_funcs tegra_crtc_funcs = {
.page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
- .destroy = drm_crtc_cleanup,
+ .destroy = tegra_dc_destroy,
};
static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
@@ -277,6 +319,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
}
}
}
+
+ drm_vblank_off(drm, dc->pipe);
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
}
+ if (window->bottom_up)
+ v_offset += window->src.h - 1;
+
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+ if (window->tiled) {
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ } else {
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
value = WIN_ENABLE;
if (yuv) {
@@ -512,6 +569,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
value |= COLOR_EXPAND;
}
+ if (window->bottom_up)
+ value |= INVERT_V;
+
tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
/*
@@ -1041,30 +1101,30 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
return 0;
}
-static int tegra_dc_drm_init(struct host1x_client *client,
- struct drm_device *drm)
+static int tegra_dc_init(struct host1x_client *client)
{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
- dc->pipe = drm->mode_config.num_crtc;
+ dc->pipe = tegra->drm->mode_config.num_crtc;
- drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
- err = tegra_dc_rgb_init(drm, dc);
+ err = tegra_dc_rgb_init(tegra->drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
return err;
}
- err = tegra_dc_add_planes(drm, dc);
+ err = tegra_dc_add_planes(tegra->drm, dc);
if (err < 0)
return err;
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dc_debugfs_init(dc, drm->primary);
+ err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
if (err < 0)
dev_err(dc->dev, "debugfs setup failed: %d\n", err);
}
@@ -1080,7 +1140,7 @@ static int tegra_dc_drm_init(struct host1x_client *client,
return 0;
}
-static int tegra_dc_drm_exit(struct host1x_client *client)
+static int tegra_dc_exit(struct host1x_client *client)
{
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
@@ -1103,13 +1163,12 @@ static int tegra_dc_drm_exit(struct host1x_client *client)
}
static const struct host1x_client_ops dc_client_ops = {
- .drm_init = tegra_dc_drm_init,
- .drm_exit = tegra_dc_drm_exit,
+ .init = tegra_dc_init,
+ .exit = tegra_dc_exit,
};
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1153,7 +1212,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
- err = host1x_register_client(host1x, &dc->client);
+ err = host1x_client_register(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
@@ -1167,17 +1226,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
static int tegra_dc_remove(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_dc *dc = platform_get_drvdata(pdev);
int err;
- err = host1x_unregister_client(host1x, &dc->client);
+ err = host1x_client_unregister(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
err);
return err;
}
+ err = tegra_dc_rgb_remove(dc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
+ return err;
+ }
+
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/host1x/drm/dc.h b/drivers/gpu/drm/tegra/dc.h
index 79eaec9aac7..91bbda29147 100644
--- a/drivers/gpu/host1x/drm/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -302,6 +302,7 @@
#define DC_WIN_CSC_KVB 0x618
#define DC_WIN_WIN_OPTIONS 0x700
+#define INVERT_V (1 << 2)
#define COLOR_EXPAND (1 << 6)
#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
@@ -365,6 +366,10 @@
#define DC_WIN_BUF_STRIDE 0x70b
#define DC_WIN_UV_BUF_STRIDE 0x70c
#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
#define DC_WIN_DV_CONTROL 0x70e
#define DC_WIN_BLEND_NOKEY 0x70f
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 00000000000..28e17813771
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/host1x.h>
+
+#include "drm.h"
+#include "gem.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct tegra_drm_file {
+ struct list_head contexts;
+};
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct host1x_device *device = to_host1x_device(drm->dev);
+ struct tegra_drm *tegra;
+ int err;
+
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ dev_set_drvdata(drm->dev, tegra);
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ err = host1x_device_init(device);
+ if (err < 0)
+ return err;
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ return err;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+ struct host1x_device *device = to_host1x_device(drm->dev);
+ int err;
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_vblank_cleanup(drm);
+ drm_mode_config_cleanup(drm);
+
+ err = host1x_device_exit(device);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ struct tegra_drm_file *fpriv;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (!fpriv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fpriv->contexts);
+ filp->driver_priv = fpriv;
+
+ return 0;
+}
+
+static void tegra_drm_context_free(struct tegra_drm_context *context)
+{
+ context->client->ops->close_channel(context);
+ kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra_fbdev_restore_mode(tegra->fbdev);
+}
+
+static struct host1x_bo *
+host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, handle);
+ if (!gem)
+ return NULL;
+
+ mutex_lock(&drm->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&drm->struct_mutex);
+
+ bo = to_tegra_bo(gem);
+ return &bo->base;
+}
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file)
+{
+ unsigned int num_cmdbufs = args->num_cmdbufs;
+ unsigned int num_relocs = args->num_relocs;
+ unsigned int num_waitchks = args->num_waitchks;
+ struct drm_tegra_cmdbuf __user *cmdbufs =
+ (void * __user)(uintptr_t)args->cmdbufs;
+ struct drm_tegra_reloc __user *relocs =
+ (void * __user)(uintptr_t)args->relocs;
+ struct drm_tegra_waitchk __user *waitchks =
+ (void * __user)(uintptr_t)args->waitchks;
+ struct drm_tegra_syncpt syncpt;
+ struct host1x_job *job;
+ int err;
+
+ /* We don't yet support other than one syncpt_incr struct per submit */
+ if (args->num_syncpts != 1)
+ return -EINVAL;
+
+ job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+ args->num_relocs, args->num_waitchks);
+ if (!job)
+ return -ENOMEM;
+
+ job->num_relocs = args->num_relocs;
+ job->num_waitchk = args->num_waitchks;
+ job->client = (u32)args->context;
+ job->class = context->client->base.class;
+ job->serialize = true;
+
+ while (num_cmdbufs) {
+ struct drm_tegra_cmdbuf cmdbuf;
+ struct host1x_bo *bo;
+
+ err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+ if (err)
+ goto fail;
+
+ bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+ if (!bo) {
+ err = -ENOENT;
+ goto fail;
+ }
+
+ host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+ num_cmdbufs--;
+ cmdbufs++;
+ }
+
+ err = copy_from_user(job->relocarray, relocs,
+ sizeof(*relocs) * num_relocs);
+ if (err)
+ goto fail;
+
+ while (num_relocs--) {
+ struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+ struct host1x_bo *cmdbuf, *target;
+
+ cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+ target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+ reloc->cmdbuf = cmdbuf;
+ reloc->target = target;
+
+ if (!reloc->target || !reloc->cmdbuf) {
+ err = -ENOENT;
+ goto fail;
+ }
+ }
+
+ err = copy_from_user(job->waitchk, waitchks,
+ sizeof(*waitchks) * num_waitchks);
+ if (err)
+ goto fail;
+
+ err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+ sizeof(syncpt));
+ if (err)
+ goto fail;
+
+ job->is_addr_reg = context->client->ops->is_addr_reg;
+ job->syncpt_incrs = syncpt.incrs;
+ job->syncpt_id = syncpt.id;
+ job->timeout = 10000;
+
+ if (args->timeout && args->timeout < 10000)
+ job->timeout = args->timeout;
+
+ err = host1x_job_pin(job, context->client->base.dev);
+ if (err)
+ goto fail;
+
+ err = host1x_job_submit(job);
+ if (err)
+ goto fail_submit;
+
+ args->fence = job->syncpt_end;
+
+ host1x_job_put(job);
+ return 0;
+
+fail_submit:
+ host1x_job_unpin(job);
+fail:
+ host1x_job_put(job);
+ return err;
+}
+
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
+{
+ return (struct tegra_drm_context *)(uintptr_t)context;
+}
+
+static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
+ struct tegra_drm_context *context)
+{
+ struct tegra_drm_context *ctx;
+
+ list_for_each_entry(ctx, &file->contexts, list)
+ if (ctx == context)
+ return true;
+
+ return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_create *args = data;
+ struct tegra_bo *bo;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_mmap *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -EINVAL;
+
+ bo = to_tegra_bo(gem);
+
+ args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_read *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get(host, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ args->value = host1x_syncpt_read_min(sp);
+ return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_incr *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_incr(sp);
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_wait *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+ &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_tegra_open_channel *args = data;
+ struct tegra_drm_context *context;
+ struct tegra_drm_client *client;
+ int err = -ENODEV;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ list_for_each_entry(client, &tegra->clients, list)
+ if (client->base.class == args->client) {
+ err = client->ops->open_channel(client, context);
+ if (err)
+ break;
+
+ list_add(&context->list, &fpriv->contexts);
+ args->context = (uintptr_t)context;
+ context->client = client;
+ return 0;
+ }
+
+ kfree(context);
+ return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_close_channel *args = data;
+ struct tegra_drm_context *context;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -EINVAL;
+
+ list_del(&context->list);
+ tegra_drm_context_free(context);
+
+ return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_get_syncpt *args = data;
+ struct tegra_drm_context *context;
+ struct host1x_syncpt *syncpt;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ if (args->index >= context->client->base.num_syncpts)
+ return -EINVAL;
+
+ syncpt = context->client->base.syncpts[args->index];
+ args->id = host1x_syncpt_id(syncpt);
+
+ return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_submit *args = data;
+ struct tegra_drm_context *context;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ return context->client->ops->submit(context, args, drm, file);
+}
+
+static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_get_syncpt_base *args = data;
+ struct tegra_drm_context *context;
+ struct host1x_syncpt_base *base;
+ struct host1x_syncpt *syncpt;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ if (args->syncpt >= context->client->base.num_syncpts)
+ return -EINVAL;
+
+ syncpt = context->client->base.syncpts[args->syncpt];
+
+ base = host1x_syncpt_get_base(syncpt);
+ if (!base)
+ return -ENXIO;
+
+ args->id = host1x_syncpt_base_id(base);
+
+ return 0;
+}
+#endif
+
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = tegra_drm_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (dc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ /* TODO: implement real hardware counter using syncpoints */
+ return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!crtc)
+ return -ENODEV;
+
+ tegra_dc_enable_vblank(dc);
+
+ return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (crtc)
+ tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm_context *context, *tmp;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ tegra_dc_cancel_page_flip(crtc, file);
+
+ list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+ tegra_drm_context_free(context);
+
+ kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height, fb->depth,
+ fb->bits_per_pixel,
+ atomic_read(&fb->refcount.refcount));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .load = tegra_drm_load,
+ .unload = tegra_drm_unload,
+ .open = tegra_drm_open,
+ .preclose = tegra_drm_preclose,
+ .lastclose = tegra_drm_lastclose,
+
+ .get_vblank_counter = tegra_drm_get_vblank_counter,
+ .enable_vblank = tegra_drm_enable_vblank,
+ .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+ .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+ .gem_free_object = tegra_bo_free_object,
+ .gem_vm_ops = &tegra_bo_vm_ops,
+ .dumb_create = tegra_bo_dumb_create,
+ .dumb_map_offset = tegra_bo_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client)
+{
+ mutex_lock(&tegra->clients_lock);
+ list_add_tail(&client->list, &tegra->clients);
+ mutex_unlock(&tegra->clients_lock);
+
+ return 0;
+}
+
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client)
+{
+ mutex_lock(&tegra->clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&tegra->clients_lock);
+
+ return 0;
+}
+
+static int host1x_drm_probe(struct host1x_device *device)
+{
+ return drm_host1x_init(&tegra_drm_driver, device);
+}
+
+static int host1x_drm_remove(struct host1x_device *device)
+{
+ drm_host1x_exit(&tegra_drm_driver, device);
+
+ return 0;
+}
+
+static const struct of_device_id host1x_drm_subdevs[] = {
+ { .compatible = "nvidia,tegra20-dc", },
+ { .compatible = "nvidia,tegra20-hdmi", },
+ { .compatible = "nvidia,tegra20-gr2d", },
+ { .compatible = "nvidia,tegra20-gr3d", },
+ { .compatible = "nvidia,tegra30-dc", },
+ { .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra30-gr2d", },
+ { .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-hdmi", },
+ { .compatible = "nvidia,tegra114-gr3d", },
+ { /* sentinel */ }
+};
+
+static struct host1x_driver host1x_drm_driver = {
+ .name = "drm",
+ .probe = host1x_drm_probe,
+ .remove = host1x_drm_remove,
+ .subdevs = host1x_drm_subdevs,
+};
+
+static int __init host1x_drm_init(void)
+{
+ int err;
+
+ err = host1x_driver_register(&host1x_drm_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_driver_register(&tegra_dc_driver);
+ if (err < 0)
+ goto unregister_host1x;
+
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dc;
+
+ err = platform_driver_register(&tegra_gr2d_driver);
+ if (err < 0)
+ goto unregister_hdmi;
+
+ err = platform_driver_register(&tegra_gr3d_driver);
+ if (err < 0)
+ goto unregister_gr2d;
+
+ return 0;
+
+unregister_gr2d:
+ platform_driver_unregister(&tegra_gr2d_driver);
+unregister_hdmi:
+ platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+ platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+ host1x_driver_unregister(&host1x_drm_driver);
+ return err;
+}
+module_init(host1x_drm_init);
+
+static void __exit host1x_drm_exit(void)
+{
+ platform_driver_unregister(&tegra_gr3d_driver);
+ platform_driver_unregister(&tegra_gr2d_driver);
+ platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dc_driver);
+ host1x_driver_unregister(&host1x_drm_driver);
+}
+module_exit(host1x_drm_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/host1x/drm/drm.h b/drivers/gpu/drm/tegra/drm.h
index 02ce020f257..fdfe259ed7f 100644
--- a/drivers/gpu/host1x/drm/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -10,14 +10,14 @@
#ifndef HOST1X_DRM_H
#define HOST1X_DRM_H 1
+#include <uapi/drm/tegra_drm.h>
+#include <linux/host1x.h>
+
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
-#include <uapi/drm/tegra_drm.h>
-
-#include "host1x.h"
struct tegra_fb {
struct drm_framebuffer base;
@@ -30,17 +30,8 @@ struct tegra_fbdev {
struct tegra_fb *fb;
};
-struct host1x_drm {
+struct tegra_drm {
struct drm_device *drm;
- struct device *dev;
- void __iomem *regs;
- struct clk *clk;
- int syncpt;
- int irq;
-
- struct mutex drm_clients_lock;
- struct list_head drm_clients;
- struct list_head drm_active;
struct mutex clients_lock;
struct list_head clients;
@@ -48,66 +39,60 @@ struct host1x_drm {
struct tegra_fbdev *fbdev;
};
-struct host1x_client;
+struct tegra_drm_client;
-struct host1x_drm_context {
- struct host1x_client *client;
+struct tegra_drm_context {
+ struct tegra_drm_client *client;
struct host1x_channel *channel;
struct list_head list;
};
-struct host1x_client_ops {
- int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
- int (*drm_exit)(struct host1x_client *client);
- int (*open_channel)(struct host1x_client *client,
- struct host1x_drm_context *context);
- void (*close_channel)(struct host1x_drm_context *context);
- int (*submit)(struct host1x_drm_context *context,
+struct tegra_drm_client_ops {
+ int (*open_channel)(struct tegra_drm_client *client,
+ struct tegra_drm_context *context);
+ void (*close_channel)(struct tegra_drm_context *context);
+ int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
+ int (*submit)(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file);
};
-struct host1x_drm_file {
- struct list_head contexts;
-};
-
-struct host1x_client {
- struct host1x_drm *host1x;
- struct device *dev;
-
- const struct host1x_client_ops *ops;
-
- enum host1x_class class;
- struct host1x_channel *channel;
-
- struct host1x_syncpt **syncpts;
- unsigned int num_syncpts;
+int tegra_drm_submit(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file);
+struct tegra_drm_client {
+ struct host1x_client base;
struct list_head list;
+
+ const struct tegra_drm_client_ops *ops;
};
-extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x_drm *host1x);
+static inline struct tegra_drm_client *
+host1x_to_drm_client(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_drm_client, base);
+}
+
+extern int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
+extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
-extern int host1x_register_client(struct host1x_drm *host1x,
- struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x_drm *host1x,
- struct host1x_client *client);
+extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+extern int tegra_drm_exit(struct tegra_drm *tegra);
struct tegra_output;
struct tegra_dc {
struct host1x_client client;
- spinlock_t lock;
-
- struct host1x_drm *host1x;
struct device *dev;
+ spinlock_t lock;
struct drm_crtc base;
int pipe;
struct clk *clk;
-
void __iomem *regs;
int irq;
@@ -123,7 +108,8 @@ struct tegra_dc {
struct drm_pending_vblank_event *event;
};
-static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
{
return container_of(client, struct tegra_dc, client);
}
@@ -162,6 +148,8 @@ struct tegra_dc_window {
unsigned int format;
unsigned int stride[2];
unsigned long base[3];
+ bool bottom_up;
+ bool tiled;
};
/* from dc.c */
@@ -249,23 +237,34 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
return output ? -ENOSYS : -EINVAL;
}
+/* from bus.c */
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
+
/* from rgb.c */
extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
/* from output.c */
-extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_probe(struct tegra_output *output);
+extern int tegra_output_remove(struct tegra_output *output);
extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
extern int tegra_output_exit(struct tegra_output *output);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
extern int tegra_drm_fb_init(struct drm_device *drm);
extern void tegra_drm_fb_exit(struct drm_device *drm);
extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
-extern struct drm_driver tegra_drm_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_gr3d_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/drm/tegra/fb.c
index 979a3e32b78..490f7719e31 100644
--- a/drivers/gpu/host1x/drm/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,8 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-
#include "drm.h"
#include "gem.h"
@@ -36,6 +34,26 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
return fb->planes[index];
}
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+ if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
+ return true;
+
+ return false;
+}
+
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+ if (fb->planes[0]->flags & TEGRA_BO_TILED)
+ return true;
+
+ return false;
+}
+
static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
size = cmd.pitches[0] * cmd.height;
- bo = tegra_bo_create(drm, size);
+ bo = tegra_bo_create(drm, size, 0);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -323,10 +341,10 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
static void tegra_fb_output_poll_changed(struct drm_device *drm)
{
- struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_drm *tegra = drm->dev_private;
- if (host1x->fbdev)
- drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+ if (tegra->fbdev)
+ drm_fb_helper_hotplug_event(&tegra->fbdev->base);
}
static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
int tegra_drm_fb_init(struct drm_device *drm)
{
- struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_drm *tegra = drm->dev_private;
struct tegra_fbdev *fbdev;
drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@ int tegra_drm_fb_init(struct drm_device *drm)
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
- host1x->fbdev = fbdev;
+ tegra->fbdev = fbdev;
return 0;
}
void tegra_drm_fb_exit(struct drm_device *drm)
{
- struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_drm *tegra = drm->dev_private;
- tegra_fbdev_free(host1x->fbdev);
+ tegra_fbdev_free(tegra->fbdev);
}
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/drm/tegra/gem.c
index 59623de4ee1..28a9cbc07ab 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,25 +18,18 @@
* GNU General Public License for more details.
*/
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
+#include <drm/tegra_drm.h>
#include "gem.h"
-static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
{
return container_of(bo, struct tegra_bo, base);
}
static void tegra_bo_put(struct host1x_bo *bo)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct drm_device *drm = obj->gem.dev;
mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
return obj->paddr;
}
@@ -57,7 +50,7 @@ static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
return obj->vaddr;
}
@@ -68,7 +61,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
return obj->vaddr + page * PAGE_SIZE;
}
@@ -80,7 +73,7 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct drm_device *drm = obj->gem.dev;
mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+ unsigned long flags)
{
struct tegra_bo *bo;
int err;
@@ -135,6 +129,12 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
if (err)
goto err_mmap;
+ if (flags & DRM_TEGRA_GEM_CREATE_TILED)
+ bo->flags |= TEGRA_BO_TILED;
+
+ if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
return bo;
err_mmap:
@@ -149,14 +149,15 @@ err_dma:
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
- struct drm_device *drm,
- unsigned int size,
- unsigned int *handle)
+ struct drm_device *drm,
+ unsigned int size,
+ unsigned long flags,
+ unsigned int *handle)
{
struct tegra_bo *bo;
int ret;
- bo = tegra_bo_create(drm, size);
+ bo = tegra_bo_create(drm, size, flags);
if (IS_ERR(bo))
return bo;
@@ -178,7 +179,6 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
struct tegra_bo *bo = to_tegra_bo(gem);
drm_gem_free_mmap_offset(gem);
-
drm_gem_object_release(gem);
tegra_bo_destroy(gem->dev, bo);
@@ -197,8 +197,8 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- bo = tegra_bo_create_with_handle(file, drm, args->size,
- &args->handle);
+ bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+ &args->handle);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/drm/tegra/gem.h
index 492533a2dac..7674000bf47 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -19,14 +19,18 @@
#ifndef __HOST1X_GEM_H
#define __HOST1X_GEM_H
+#include <linux/host1x.h>
+
#include <drm/drm.h>
#include <drm/drmP.h>
-#include "host1x_bo.h"
+#define TEGRA_BO_TILED (1 << 0)
+#define TEGRA_BO_BOTTOM_UP (1 << 1)
struct tegra_bo {
struct drm_gem_object gem;
struct host1x_bo base;
+ unsigned long flags;
dma_addr_t paddr;
void *vaddr;
};
@@ -38,11 +42,13 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
extern const struct host1x_bo_ops tegra_bo_ops;
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+ unsigned long flags);
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
- struct drm_device *drm,
- unsigned int size,
- unsigned int *handle);
+ struct drm_device *drm,
+ unsigned int size,
+ unsigned long flags,
+ unsigned int *handle);
void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 00000000000..7ec4259ffde
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr2d.h"
+
+struct gr2d {
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct clk *clk;
+
+ DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
+};
+
+static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
+{
+ return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+ struct gr2d *gr2d = to_gr2d(drm);
+
+ gr2d->channel = host1x_channel_request(client->dev);
+ if (!gr2d->channel)
+ return -ENOMEM;
+
+ client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ if (!client->syncpts[0]) {
+ host1x_channel_free(gr2d->channel);
+ return -ENOMEM;
+ }
+
+ return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr2d_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct gr2d *gr2d = to_gr2d(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ host1x_syncpt_free(client->syncpts[0]);
+ host1x_channel_free(gr2d->channel);
+
+ return 0;
+}
+
+static const struct host1x_client_ops gr2d_client_ops = {
+ .init = gr2d_init,
+ .exit = gr2d_exit,
+};
+
+static int gr2d_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct gr2d *gr2d = to_gr2d(client);
+
+ context->channel = host1x_channel_get(gr2d->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr2d_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ if (offset == 0x2b)
+ return 1;
+
+ break;
+
+ case HOST1X_CLASS_GR2D:
+ case HOST1X_CLASS_GR2D_SB:
+ if (offset >= GR2D_NUM_REGS)
+ break;
+
+ if (test_bit(offset, gr2d->addr_regs))
+ return 1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops gr2d_ops = {
+ .open_channel = gr2d_open_channel,
+ .close_channel = gr2d_close_channel,
+ .is_addr_reg = gr2d_is_addr_reg,
+ .submit = tegra_drm_submit,
+};
+
+static const struct of_device_id gr2d_match[] = {
+ { .compatible = "nvidia,tegra30-gr2d" },
+ { .compatible = "nvidia,tegra20-gr2d" },
+ { },
+};
+
+static const u32 gr2d_addr_regs[] = {
+ GR2D_UA_BASE_ADDR,
+ GR2D_VA_BASE_ADDR,
+ GR2D_PAT_BASE_ADDR,
+ GR2D_DSTA_BASE_ADDR,
+ GR2D_DSTB_BASE_ADDR,
+ GR2D_DSTC_BASE_ADDR,
+ GR2D_SRCA_BASE_ADDR,
+ GR2D_SRCB_BASE_ADDR,
+ GR2D_SRC_BASE_ADDR_SB,
+ GR2D_DSTA_BASE_ADDR_SB,
+ GR2D_DSTB_BASE_ADDR_SB,
+ GR2D_UA_BASE_ADDR_SB,
+ GR2D_VA_BASE_ADDR_SB,
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct gr2d *gr2d;
+ unsigned int i;
+ int err;
+
+ gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+ if (!gr2d)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ gr2d->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(gr2d->clk)) {
+ dev_err(dev, "cannot get clock\n");
+ return PTR_ERR(gr2d->clk);
+ }
+
+ err = clk_prepare_enable(gr2d->clk);
+ if (err) {
+ dev_err(dev, "cannot turn on clock\n");
+ return err;
+ }
+
+ INIT_LIST_HEAD(&gr2d->client.base.list);
+ gr2d->client.base.ops = &gr2d_client_ops;
+ gr2d->client.base.dev = dev;
+ gr2d->client.base.class = HOST1X_CLASS_GR2D;
+ gr2d->client.base.syncpts = syncpts;
+ gr2d->client.base.num_syncpts = 1;
+
+ INIT_LIST_HEAD(&gr2d->client.list);
+ gr2d->client.ops = &gr2d_ops;
+
+ err = host1x_client_register(&gr2d->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ clk_disable_unprepare(gr2d->clk);
+ return err;
+ }
+
+ /* initialize address register map */
+ for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
+ set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
+
+ platform_set_drvdata(pdev, gr2d);
+
+ return 0;
+}
+
+static int gr2d_remove(struct platform_device *pdev)
+{
+ struct gr2d *gr2d = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&gr2d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_disable_unprepare(gr2d->clk);
+
+ return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+ .driver = {
+ .name = "tegra-gr2d",
+ .of_match_table = gr2d_match,
+ },
+ .probe = gr2d_probe,
+ .remove = gr2d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 00000000000..4d7304fb015
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR2D_H
+#define TEGRA_GR2D_H
+
+#define GR2D_UA_BASE_ADDR 0x1a
+#define GR2D_VA_BASE_ADDR 0x1b
+#define GR2D_PAT_BASE_ADDR 0x26
+#define GR2D_DSTA_BASE_ADDR 0x2b
+#define GR2D_DSTB_BASE_ADDR 0x2c
+#define GR2D_DSTC_BASE_ADDR 0x2d
+#define GR2D_SRCA_BASE_ADDR 0x31
+#define GR2D_SRCB_BASE_ADDR 0x32
+#define GR2D_SRC_BASE_ADDR_SB 0x48
+#define GR2D_DSTA_BASE_ADDR_SB 0x49
+#define GR2D_DSTB_BASE_ADDR_SB 0x4a
+#define GR2D_UA_BASE_ADDR_SB 0x4b
+#define GR2D_VA_BASE_ADDR_SB 0x4c
+
+#define GR2D_NUM_REGS 0x4d
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 00000000000..4cec8f526af
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2013 Avionic Design GmbH
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr3d.h"
+
+struct gr3d {
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct clk *clk_secondary;
+ struct clk *clk;
+
+ DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
+};
+
+static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
+{
+ return container_of(client, struct gr3d, client);
+}
+
+static int gr3d_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+ struct gr3d *gr3d = to_gr3d(drm);
+
+ gr3d->channel = host1x_channel_request(client->dev);
+ if (!gr3d->channel)
+ return -ENOMEM;
+
+ client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ if (!client->syncpts[0]) {
+ host1x_channel_free(gr3d->channel);
+ return -ENOMEM;
+ }
+
+ return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr3d_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct gr3d *gr3d = to_gr3d(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ host1x_syncpt_free(client->syncpts[0]);
+ host1x_channel_free(gr3d->channel);
+
+ return 0;
+}
+
+static const struct host1x_client_ops gr3d_client_ops = {
+ .init = gr3d_init,
+ .exit = gr3d_exit,
+};
+
+static int gr3d_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct gr3d *gr3d = to_gr3d(client);
+
+ context->channel = host1x_channel_get(gr3d->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr3d_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ if (offset == 0x2b)
+ return 1;
+
+ break;
+
+ case HOST1X_CLASS_GR3D:
+ if (offset >= GR3D_NUM_REGS)
+ break;
+
+ if (test_bit(offset, gr3d->addr_regs))
+ return 1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops gr3d_ops = {
+ .open_channel = gr3d_open_channel,
+ .close_channel = gr3d_close_channel,
+ .is_addr_reg = gr3d_is_addr_reg,
+ .submit = tegra_drm_submit,
+};
+
+static const struct of_device_id tegra_gr3d_match[] = {
+ { .compatible = "nvidia,tegra114-gr3d" },
+ { .compatible = "nvidia,tegra30-gr3d" },
+ { .compatible = "nvidia,tegra20-gr3d" },
+ { }
+};
+
+static const u32 gr3d_addr_regs[] = {
+ GR3D_IDX_ATTRIBUTE( 0),
+ GR3D_IDX_ATTRIBUTE( 1),
+ GR3D_IDX_ATTRIBUTE( 2),
+ GR3D_IDX_ATTRIBUTE( 3),
+ GR3D_IDX_ATTRIBUTE( 4),
+ GR3D_IDX_ATTRIBUTE( 5),
+ GR3D_IDX_ATTRIBUTE( 6),
+ GR3D_IDX_ATTRIBUTE( 7),
+ GR3D_IDX_ATTRIBUTE( 8),
+ GR3D_IDX_ATTRIBUTE( 9),
+ GR3D_IDX_ATTRIBUTE(10),
+ GR3D_IDX_ATTRIBUTE(11),
+ GR3D_IDX_ATTRIBUTE(12),
+ GR3D_IDX_ATTRIBUTE(13),
+ GR3D_IDX_ATTRIBUTE(14),
+ GR3D_IDX_ATTRIBUTE(15),
+ GR3D_IDX_INDEX_BASE,
+ GR3D_QR_ZTAG_ADDR,
+ GR3D_QR_CTAG_ADDR,
+ GR3D_QR_CZ_ADDR,
+ GR3D_TEX_TEX_ADDR( 0),
+ GR3D_TEX_TEX_ADDR( 1),
+ GR3D_TEX_TEX_ADDR( 2),
+ GR3D_TEX_TEX_ADDR( 3),
+ GR3D_TEX_TEX_ADDR( 4),
+ GR3D_TEX_TEX_ADDR( 5),
+ GR3D_TEX_TEX_ADDR( 6),
+ GR3D_TEX_TEX_ADDR( 7),
+ GR3D_TEX_TEX_ADDR( 8),
+ GR3D_TEX_TEX_ADDR( 9),
+ GR3D_TEX_TEX_ADDR(10),
+ GR3D_TEX_TEX_ADDR(11),
+ GR3D_TEX_TEX_ADDR(12),
+ GR3D_TEX_TEX_ADDR(13),
+ GR3D_TEX_TEX_ADDR(14),
+ GR3D_TEX_TEX_ADDR(15),
+ GR3D_DW_MEMORY_OUTPUT_ADDRESS,
+ GR3D_GLOBAL_SURFADDR( 0),
+ GR3D_GLOBAL_SURFADDR( 1),
+ GR3D_GLOBAL_SURFADDR( 2),
+ GR3D_GLOBAL_SURFADDR( 3),
+ GR3D_GLOBAL_SURFADDR( 4),
+ GR3D_GLOBAL_SURFADDR( 5),
+ GR3D_GLOBAL_SURFADDR( 6),
+ GR3D_GLOBAL_SURFADDR( 7),
+ GR3D_GLOBAL_SURFADDR( 8),
+ GR3D_GLOBAL_SURFADDR( 9),
+ GR3D_GLOBAL_SURFADDR(10),
+ GR3D_GLOBAL_SURFADDR(11),
+ GR3D_GLOBAL_SURFADDR(12),
+ GR3D_GLOBAL_SURFADDR(13),
+ GR3D_GLOBAL_SURFADDR(14),
+ GR3D_GLOBAL_SURFADDR(15),
+ GR3D_GLOBAL_SPILLSURFADDR,
+ GR3D_GLOBAL_SURFOVERADDR( 0),
+ GR3D_GLOBAL_SURFOVERADDR( 1),
+ GR3D_GLOBAL_SURFOVERADDR( 2),
+ GR3D_GLOBAL_SURFOVERADDR( 3),
+ GR3D_GLOBAL_SURFOVERADDR( 4),
+ GR3D_GLOBAL_SURFOVERADDR( 5),
+ GR3D_GLOBAL_SURFOVERADDR( 6),
+ GR3D_GLOBAL_SURFOVERADDR( 7),
+ GR3D_GLOBAL_SURFOVERADDR( 8),
+ GR3D_GLOBAL_SURFOVERADDR( 9),
+ GR3D_GLOBAL_SURFOVERADDR(10),
+ GR3D_GLOBAL_SURFOVERADDR(11),
+ GR3D_GLOBAL_SURFOVERADDR(12),
+ GR3D_GLOBAL_SURFOVERADDR(13),
+ GR3D_GLOBAL_SURFOVERADDR(14),
+ GR3D_GLOBAL_SURFOVERADDR(15),
+ GR3D_GLOBAL_SAMP01SURFADDR( 0),
+ GR3D_GLOBAL_SAMP01SURFADDR( 1),
+ GR3D_GLOBAL_SAMP01SURFADDR( 2),
+ GR3D_GLOBAL_SAMP01SURFADDR( 3),
+ GR3D_GLOBAL_SAMP01SURFADDR( 4),
+ GR3D_GLOBAL_SAMP01SURFADDR( 5),
+ GR3D_GLOBAL_SAMP01SURFADDR( 6),
+ GR3D_GLOBAL_SAMP01SURFADDR( 7),
+ GR3D_GLOBAL_SAMP01SURFADDR( 8),
+ GR3D_GLOBAL_SAMP01SURFADDR( 9),
+ GR3D_GLOBAL_SAMP01SURFADDR(10),
+ GR3D_GLOBAL_SAMP01SURFADDR(11),
+ GR3D_GLOBAL_SAMP01SURFADDR(12),
+ GR3D_GLOBAL_SAMP01SURFADDR(13),
+ GR3D_GLOBAL_SAMP01SURFADDR(14),
+ GR3D_GLOBAL_SAMP01SURFADDR(15),
+ GR3D_GLOBAL_SAMP23SURFADDR( 0),
+ GR3D_GLOBAL_SAMP23SURFADDR( 1),
+ GR3D_GLOBAL_SAMP23SURFADDR( 2),
+ GR3D_GLOBAL_SAMP23SURFADDR( 3),
+ GR3D_GLOBAL_SAMP23SURFADDR( 4),
+ GR3D_GLOBAL_SAMP23SURFADDR( 5),
+ GR3D_GLOBAL_SAMP23SURFADDR( 6),
+ GR3D_GLOBAL_SAMP23SURFADDR( 7),
+ GR3D_GLOBAL_SAMP23SURFADDR( 8),
+ GR3D_GLOBAL_SAMP23SURFADDR( 9),
+ GR3D_GLOBAL_SAMP23SURFADDR(10),
+ GR3D_GLOBAL_SAMP23SURFADDR(11),
+ GR3D_GLOBAL_SAMP23SURFADDR(12),
+ GR3D_GLOBAL_SAMP23SURFADDR(13),
+ GR3D_GLOBAL_SAMP23SURFADDR(14),
+ GR3D_GLOBAL_SAMP23SURFADDR(15),
+};
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct host1x_syncpt **syncpts;
+ struct gr3d *gr3d;
+ unsigned int i;
+ int err;
+
+ gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+ if (!gr3d)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ gr3d->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gr3d->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(gr3d->clk);
+ }
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
+ gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
+ if (IS_ERR(gr3d->clk)) {
+ dev_err(&pdev->dev, "cannot get secondary clock\n");
+ return PTR_ERR(gr3d->clk);
+ }
+ }
+
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to power up 3D unit\n");
+ return err;
+ }
+
+ if (gr3d->clk_secondary) {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
+ gr3d->clk_secondary);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to power up secondary 3D unit\n");
+ return err;
+ }
+ }
+
+ INIT_LIST_HEAD(&gr3d->client.base.list);
+ gr3d->client.base.ops = &gr3d_client_ops;
+ gr3d->client.base.dev = &pdev->dev;
+ gr3d->client.base.class = HOST1X_CLASS_GR3D;
+ gr3d->client.base.syncpts = syncpts;
+ gr3d->client.base.num_syncpts = 1;
+
+ INIT_LIST_HEAD(&gr3d->client.list);
+ gr3d->client.ops = &gr3d_ops;
+
+ err = host1x_client_register(&gr3d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ /* initialize address register map */
+ for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
+ set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
+
+ platform_set_drvdata(pdev, gr3d);
+
+ return 0;
+}
+
+static int gr3d_remove(struct platform_device *pdev)
+{
+ struct gr3d *gr3d = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&gr3d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ if (gr3d->clk_secondary) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
+ clk_disable_unprepare(gr3d->clk_secondary);
+ }
+
+ tegra_powergate_power_off(TEGRA_POWERGATE_3D);
+ clk_disable_unprepare(gr3d->clk);
+
+ return 0;
+}
+
+struct platform_driver tegra_gr3d_driver = {
+ .driver = {
+ .name = "tegra-gr3d",
+ .of_match_table = tegra_gr3d_match,
+ },
+ .probe = gr3d_probe,
+ .remove = gr3d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 00000000000..0c30a1351c8
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR3D_H
+#define TEGRA_GR3D_H
+
+#define GR3D_IDX_ATTRIBUTE(x) (0x100 + (x) * 2)
+#define GR3D_IDX_INDEX_BASE 0x121
+#define GR3D_QR_ZTAG_ADDR 0x415
+#define GR3D_QR_CTAG_ADDR 0x417
+#define GR3D_QR_CZ_ADDR 0x419
+#define GR3D_TEX_TEX_ADDR(x) (0x710 + (x))
+#define GR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
+#define GR3D_GLOBAL_SURFADDR(x) (0xe00 + (x))
+#define GR3D_GLOBAL_SPILLSURFADDR 0xe2a
+#define GR3D_GLOBAL_SURFOVERADDR(x) (0xe30 + (x))
+#define GR3D_GLOBAL_SAMP01SURFADDR(x) (0xe50 + (x))
+#define GR3D_GLOBAL_SAMP23SURFADDR(x) (0xe60 + (x))
+
+#define GR3D_NUM_REGS 0xe88
+
+#endif
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 644d95c7d48..0cd9bc2056e 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,21 +8,33 @@
*/
#include <linux/clk.h>
+#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
-#include <linux/gpio.h>
#include <linux/hdmi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/clk/tegra.h>
-
-#include <drm/drm_edid.h>
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
-#include "host1x_client.h"
+
+struct tmds_config {
+ unsigned int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current;
+ u32 drive_current;
+ u32 peak_current;
+};
+
+struct tegra_hdmi_config {
+ const struct tmds_config *tmds;
+ unsigned int num_tmds;
+
+ unsigned long fuse_override_offset;
+ unsigned long fuse_override_value;
+
+ bool has_sor_io_peak_current;
+};
struct tegra_hdmi {
struct host1x_client client;
@@ -38,6 +50,8 @@ struct tegra_hdmi {
struct clk *clk_parent;
struct clk *clk;
+ const struct tegra_hdmi_config *config;
+
unsigned int audio_source;
unsigned int audio_freq;
bool stereo;
@@ -143,15 +157,7 @@ static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
{ 0, 0, 0, 0 },
};
-struct tmds_config {
- unsigned int pclk;
- u32 pll0;
- u32 pll1;
- u32 pe_current;
- u32 drive_current;
-};
-
-static const struct tmds_config tegra2_tmds_config[] = {
+static const struct tmds_config tegra20_tmds_config[] = {
{ /* slow pixel clock modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@ static const struct tmds_config tegra2_tmds_config[] = {
},
};
-static const struct tmds_config tegra3_tmds_config[] = {
+static const struct tmds_config tegra30_tmds_config[] = {
{ /* 480p modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@ static const struct tmds_config tegra3_tmds_config[] = {
},
};
+static const struct tmds_config tegra114_tmds_config[] = {
+ { /* 480p/576p / 25.2MHz/27MHz modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 720p / 74.25MHz modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_15_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 1080p / 148.5MHz modes */
+ .pclk = 148500000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_10_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 225/297MHz modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+ | SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+ },
+};
+
static const struct tegra_hdmi_audio_config *
tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
{
@@ -511,7 +596,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
- dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+ dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
err);
return;
}
@@ -531,7 +616,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
* contain 7 bytes. Including the 3 byte header only the first 10
* bytes can be programmed.
*/
- tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
+ tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
- value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ tegra_hdmi_writel(hdmi, tmds->drive_current,
+ HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+ value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
+ value |= hdmi->config->fuse_override_value;
+ tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
+
+ if (hdmi->config->has_sor_io_peak_current)
+ tegra_hdmi_writel(hdmi, tmds->peak_current,
+ HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
+}
+
+static bool tegra_output_is_hdmi(struct tegra_output *output)
+{
+ struct edid *edid;
+
+ if (!output->connector.edid_blob_ptr)
+ return false;
+
+ edid = (struct edid *)output->connector.edid_blob_ptr->data;
+
+ return drm_detect_hdmi_monitor(edid);
}
static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
struct tegra_hdmi *hdmi = to_hdmi(output);
struct device_node *node = hdmi->dev->of_node;
unsigned int pulse_start, div82, pclk;
- const struct tmds_config *tmds;
- unsigned int num_tmds;
unsigned long value;
int retries = 1000;
int err;
+ hdmi->dvi = !tegra_output_is_hdmi(output);
+
pclk = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = regulator_enable(hdmi->vdd);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
- }
-
err = regulator_enable(hdmi->pll);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
tegra_hdmi_setup_stereo_infoframe(hdmi);
/* TMDS CONFIG */
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- num_tmds = ARRAY_SIZE(tegra3_tmds_config);
- tmds = tegra3_tmds_config;
- } else {
- num_tmds = ARRAY_SIZE(tegra2_tmds_config);
- tmds = tegra2_tmds_config;
- }
-
- for (i = 0; i < num_tmds; i++) {
- if (pclk <= tmds[i].pclk) {
- tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+ for (i = 0; i < hdmi->config->num_tmds; i++) {
+ if (pclk <= hdmi->config->tmds[i].pclk) {
+ tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
break;
}
}
@@ -824,7 +915,6 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
tegra_periph_reset_assert(hdmi->clk);
clk_disable(hdmi->clk);
regulator_disable(hdmi->pll);
- regulator_disable(hdmi->vdd);
return 0;
}
@@ -1055,6 +1145,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
#undef DUMP_REG
@@ -1122,24 +1213,31 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
return 0;
}
-static int tegra_hdmi_drm_init(struct host1x_client *client,
- struct drm_device *drm)
+static int tegra_hdmi_init(struct host1x_client *client)
{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(client->dev, "failed to enable VDD regulator: %d\n",
+ err);
+ return err;
+ }
+
hdmi->output.type = TEGRA_OUTPUT_HDMI;
hdmi->output.dev = client->dev;
hdmi->output.ops = &hdmi_ops;
- err = tegra_output_init(drm, &hdmi->output);
+ err = tegra_output_init(tegra->drm, &hdmi->output);
if (err < 0) {
dev_err(client->dev, "output setup failed: %d\n", err);
return err;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+ err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
if (err < 0)
dev_err(client->dev, "debugfs setup failed: %d\n", err);
}
@@ -1147,7 +1245,7 @@ static int tegra_hdmi_drm_init(struct host1x_client *client,
return 0;
}
-static int tegra_hdmi_drm_exit(struct host1x_client *client)
+static int tegra_hdmi_exit(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
@@ -1171,25 +1269,63 @@ static int tegra_hdmi_drm_exit(struct host1x_client *client)
return err;
}
+ regulator_disable(hdmi->vdd);
+
return 0;
}
static const struct host1x_client_ops hdmi_client_ops = {
- .drm_init = tegra_hdmi_drm_init,
- .drm_exit = tegra_hdmi_drm_exit,
+ .init = tegra_hdmi_init,
+ .exit = tegra_hdmi_exit,
+};
+
+static const struct tegra_hdmi_config tegra20_hdmi_config = {
+ .tmds = tegra20_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra20_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra30_hdmi_config = {
+ .tmds = tegra30_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra30_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra114_hdmi_config = {
+ .tmds = tegra114_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra114_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = true,
+};
+
+static const struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
+ { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
+ { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
+ { },
};
static int tegra_hdmi_probe(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+ const struct of_device_id *match;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
+ match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
+ hdmi->config = match->data;
hdmi->dev = &pdev->dev;
hdmi->audio_source = AUTO;
hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->output.dev = &pdev->dev;
- err = tegra_output_parse_dt(&hdmi->output);
+ err = tegra_output_probe(&hdmi->output);
if (err < 0)
return err;
@@ -1252,11 +1388,11 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->irq = err;
- hdmi->client.ops = &hdmi_client_ops;
INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.ops = &hdmi_client_ops;
hdmi->client.dev = &pdev->dev;
- err = host1x_register_client(host1x, &hdmi->client);
+ err = host1x_client_register(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
@@ -1270,29 +1406,28 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
static int tegra_hdmi_remove(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
- err = host1x_unregister_client(host1x, &hdmi->client);
+ err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
err);
return err;
}
+ err = tegra_output_remove(&hdmi->output);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+ return err;
+ }
+
clk_unprepare(hdmi->clk_parent);
clk_unprepare(hdmi->clk);
return 0;
}
-static struct of_device_id tegra_hdmi_of_match[] = {
- { .compatible = "nvidia,tegra30-hdmi", },
- { .compatible = "nvidia,tegra20-hdmi", },
- { },
-};
-
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
diff --git a/drivers/gpu/host1x/drm/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 52ac36e08cc..0aebc485f7f 100644
--- a/drivers/gpu/host1x/drm/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -233,7 +233,10 @@
#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
-#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) << 0)
+#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) << 8)
+#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
+#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
#define DRIVE_CURRENT_1_500_mA 0x00
#define DRIVE_CURRENT_1_875_mA 0x01
@@ -299,6 +302,79 @@
#define DRIVE_CURRENT_24_375_mA 0x3d
#define DRIVE_CURRENT_24_750_mA 0x3e
+#define DRIVE_CURRENT_0_000_mA_T114 0x00
+#define DRIVE_CURRENT_0_400_mA_T114 0x01
+#define DRIVE_CURRENT_0_800_mA_T114 0x02
+#define DRIVE_CURRENT_1_200_mA_T114 0x03
+#define DRIVE_CURRENT_1_600_mA_T114 0x04
+#define DRIVE_CURRENT_2_000_mA_T114 0x05
+#define DRIVE_CURRENT_2_400_mA_T114 0x06
+#define DRIVE_CURRENT_2_800_mA_T114 0x07
+#define DRIVE_CURRENT_3_200_mA_T114 0x08
+#define DRIVE_CURRENT_3_600_mA_T114 0x09
+#define DRIVE_CURRENT_4_000_mA_T114 0x0a
+#define DRIVE_CURRENT_4_400_mA_T114 0x0b
+#define DRIVE_CURRENT_4_800_mA_T114 0x0c
+#define DRIVE_CURRENT_5_200_mA_T114 0x0d
+#define DRIVE_CURRENT_5_600_mA_T114 0x0e
+#define DRIVE_CURRENT_6_000_mA_T114 0x0f
+#define DRIVE_CURRENT_6_400_mA_T114 0x10
+#define DRIVE_CURRENT_6_800_mA_T114 0x11
+#define DRIVE_CURRENT_7_200_mA_T114 0x12
+#define DRIVE_CURRENT_7_600_mA_T114 0x13
+#define DRIVE_CURRENT_8_000_mA_T114 0x14
+#define DRIVE_CURRENT_8_400_mA_T114 0x15
+#define DRIVE_CURRENT_8_800_mA_T114 0x16
+#define DRIVE_CURRENT_9_200_mA_T114 0x17
+#define DRIVE_CURRENT_9_600_mA_T114 0x18
+#define DRIVE_CURRENT_10_000_mA_T114 0x19
+#define DRIVE_CURRENT_10_400_mA_T114 0x1a
+#define DRIVE_CURRENT_10_800_mA_T114 0x1b
+#define DRIVE_CURRENT_11_200_mA_T114 0x1c
+#define DRIVE_CURRENT_11_600_mA_T114 0x1d
+#define DRIVE_CURRENT_12_000_mA_T114 0x1e
+#define DRIVE_CURRENT_12_400_mA_T114 0x1f
+#define DRIVE_CURRENT_12_800_mA_T114 0x20
+#define DRIVE_CURRENT_13_200_mA_T114 0x21
+#define DRIVE_CURRENT_13_600_mA_T114 0x22
+#define DRIVE_CURRENT_14_000_mA_T114 0x23
+#define DRIVE_CURRENT_14_400_mA_T114 0x24
+#define DRIVE_CURRENT_14_800_mA_T114 0x25
+#define DRIVE_CURRENT_15_200_mA_T114 0x26
+#define DRIVE_CURRENT_15_600_mA_T114 0x27
+#define DRIVE_CURRENT_16_000_mA_T114 0x28
+#define DRIVE_CURRENT_16_400_mA_T114 0x29
+#define DRIVE_CURRENT_16_800_mA_T114 0x2a
+#define DRIVE_CURRENT_17_200_mA_T114 0x2b
+#define DRIVE_CURRENT_17_600_mA_T114 0x2c
+#define DRIVE_CURRENT_18_000_mA_T114 0x2d
+#define DRIVE_CURRENT_18_400_mA_T114 0x2e
+#define DRIVE_CURRENT_18_800_mA_T114 0x2f
+#define DRIVE_CURRENT_19_200_mA_T114 0x30
+#define DRIVE_CURRENT_19_600_mA_T114 0x31
+#define DRIVE_CURRENT_20_000_mA_T114 0x32
+#define DRIVE_CURRENT_20_400_mA_T114 0x33
+#define DRIVE_CURRENT_20_800_mA_T114 0x34
+#define DRIVE_CURRENT_21_200_mA_T114 0x35
+#define DRIVE_CURRENT_21_600_mA_T114 0x36
+#define DRIVE_CURRENT_22_000_mA_T114 0x37
+#define DRIVE_CURRENT_22_400_mA_T114 0x38
+#define DRIVE_CURRENT_22_800_mA_T114 0x39
+#define DRIVE_CURRENT_23_200_mA_T114 0x3a
+#define DRIVE_CURRENT_23_600_mA_T114 0x3b
+#define DRIVE_CURRENT_24_000_mA_T114 0x3c
+#define DRIVE_CURRENT_24_400_mA_T114 0x3d
+#define DRIVE_CURRENT_24_800_mA_T114 0x3e
+#define DRIVE_CURRENT_25_200_mA_T114 0x3f
+#define DRIVE_CURRENT_25_400_mA_T114 0x40
+#define DRIVE_CURRENT_25_800_mA_T114 0x41
+#define DRIVE_CURRENT_26_200_mA_T114 0x42
+#define DRIVE_CURRENT_26_600_mA_T114 0x43
+#define DRIVE_CURRENT_27_000_mA_T114 0x44
+#define DRIVE_CURRENT_27_400_mA_T114 0x45
+#define DRIVE_CURRENT_27_800_mA_T114 0x46
+#define DRIVE_CURRENT_28_200_mA_T114 0x47
+
#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
@@ -358,6 +434,23 @@
#define PE_CURRENT_7_0_mA 0xe
#define PE_CURRENT_7_5_mA 0xf
+#define PE_CURRENT_0_mA_T114 0x0
+#define PE_CURRENT_1_mA_T114 0x1
+#define PE_CURRENT_2_mA_T114 0x2
+#define PE_CURRENT_3_mA_T114 0x3
+#define PE_CURRENT_4_mA_T114 0x4
+#define PE_CURRENT_5_mA_T114 0x5
+#define PE_CURRENT_6_mA_T114 0x6
+#define PE_CURRENT_7_mA_T114 0x7
+#define PE_CURRENT_8_mA_T114 0x8
+#define PE_CURRENT_9_mA_T114 0x9
+#define PE_CURRENT_10_mA_T114 0xa
+#define PE_CURRENT_11_mA_T114 0xb
+#define PE_CURRENT_12_mA_T114 0xc
+#define PE_CURRENT_13_mA_T114 0xd
+#define PE_CURRENT_14_mA_T114 0xe
+#define PE_CURRENT_15_mA_T114 0xf
+
#define HDMI_NV_PDISP_KEY_CTRL 0x9a
#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
@@ -383,4 +476,61 @@
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
+#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
+#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
+#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
+#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
+
+#define PEAK_CURRENT_0_000_mA 0x00
+#define PEAK_CURRENT_0_200_mA 0x01
+#define PEAK_CURRENT_0_400_mA 0x02
+#define PEAK_CURRENT_0_600_mA 0x03
+#define PEAK_CURRENT_0_800_mA 0x04
+#define PEAK_CURRENT_1_000_mA 0x05
+#define PEAK_CURRENT_1_200_mA 0x06
+#define PEAK_CURRENT_1_400_mA 0x07
+#define PEAK_CURRENT_1_600_mA 0x08
+#define PEAK_CURRENT_1_800_mA 0x09
+#define PEAK_CURRENT_2_000_mA 0x0a
+#define PEAK_CURRENT_2_200_mA 0x0b
+#define PEAK_CURRENT_2_400_mA 0x0c
+#define PEAK_CURRENT_2_600_mA 0x0d
+#define PEAK_CURRENT_2_800_mA 0x0e
+#define PEAK_CURRENT_3_000_mA 0x0f
+#define PEAK_CURRENT_3_200_mA 0x10
+#define PEAK_CURRENT_3_400_mA 0x11
+#define PEAK_CURRENT_3_600_mA 0x12
+#define PEAK_CURRENT_3_800_mA 0x13
+#define PEAK_CURRENT_4_000_mA 0x14
+#define PEAK_CURRENT_4_200_mA 0x15
+#define PEAK_CURRENT_4_400_mA 0x16
+#define PEAK_CURRENT_4_600_mA 0x17
+#define PEAK_CURRENT_4_800_mA 0x18
+#define PEAK_CURRENT_5_000_mA 0x19
+#define PEAK_CURRENT_5_200_mA 0x1a
+#define PEAK_CURRENT_5_400_mA 0x1b
+#define PEAK_CURRENT_5_600_mA 0x1c
+#define PEAK_CURRENT_5_800_mA 0x1d
+#define PEAK_CURRENT_6_000_mA 0x1e
+#define PEAK_CURRENT_6_200_mA 0x1f
+#define PEAK_CURRENT_6_400_mA 0x20
+#define PEAK_CURRENT_6_600_mA 0x21
+#define PEAK_CURRENT_6_800_mA 0x22
+#define PEAK_CURRENT_7_000_mA 0x23
+#define PEAK_CURRENT_7_200_mA 0x24
+#define PEAK_CURRENT_7_400_mA 0x25
+#define PEAK_CURRENT_7_600_mA 0x26
+#define PEAK_CURRENT_7_800_mA 0x27
+#define PEAK_CURRENT_8_000_mA 0x28
+#define PEAK_CURRENT_8_200_mA 0x29
+#define PEAK_CURRENT_8_400_mA 0x2a
+#define PEAK_CURRENT_8_600_mA 0x2b
+#define PEAK_CURRENT_8_800_mA 0x2c
+#define PEAK_CURRENT_9_000_mA 0x2d
+#define PEAK_CURRENT_9_200_mA 0x2e
+#define PEAK_CURRENT_9_400_mA 0x2f
+
+#define HDMI_NV_PDISP_SOR_PAD_CTLS0 0xd2
+
#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/drm/tegra/output.c
index 137ae81ab80..2cb0065e057 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,9 +7,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/of_gpio.h>
-#include <linux/i2c.h>
#include "drm.h"
@@ -81,10 +79,16 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
return status;
}
+static void drm_connector_clear(struct drm_connector *connector)
+{
+ memset(connector, 0, sizeof(*connector));
+}
+
static void tegra_connector_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
+ drm_connector_clear(connector);
}
static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +98,15 @@ static const struct drm_connector_funcs connector_funcs = {
.destroy = tegra_connector_destroy,
};
+static void drm_encoder_clear(struct drm_encoder *encoder)
+{
+ memset(encoder, 0, sizeof(*encoder));
+}
+
static void tegra_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
+ drm_encoder_clear(encoder);
}
static const struct drm_encoder_funcs encoder_funcs = {
@@ -151,7 +161,7 @@ static irqreturn_t hpd_irq(int irq, void *data)
return IRQ_HANDLED;
}
-int tegra_output_parse_dt(struct tegra_output *output)
+int tegra_output_probe(struct tegra_output *output)
{
enum of_gpio_flags flags;
struct device_node *ddc;
@@ -181,14 +191,6 @@ int tegra_output_parse_dt(struct tegra_output *output)
output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
"nvidia,hpd-gpio", 0,
&flags);
-
- return 0;
-}
-
-int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
-{
- int connector, encoder, err;
-
if (gpio_is_valid(output->hpd_gpio)) {
unsigned long flags;
@@ -202,7 +204,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
err = gpio_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpio_to_irq(): %d\n", err);
- goto free_hpd;
+ gpio_free(output->hpd_gpio);
+ return err;
}
output->hpd_irq = err;
@@ -215,12 +218,33 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
- goto free_hpd;
+ gpio_free(output->hpd_gpio);
+ return err;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
}
+ return 0;
+}
+
+int tegra_output_remove(struct tegra_output *output)
+{
+ if (gpio_is_valid(output->hpd_gpio)) {
+ free_irq(output->hpd_irq, output);
+ gpio_free(output->hpd_gpio);
+ }
+
+ if (output->ddc)
+ put_device(&output->ddc->dev);
+
+ return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+ int connector, encoder;
+
switch (output->type) {
case TEGRA_OUTPUT_RGB:
connector = DRM_MODE_CONNECTOR_LVDS;
@@ -241,6 +265,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
drm_connector_init(drm, &output->connector, &connector_funcs,
connector);
drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+ output->connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +276,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
output->encoder.possible_crtcs = 0x3;
return 0;
-
-free_hpd:
- gpio_free(output->hpd_gpio);
-
- return err;
}
int tegra_output_exit(struct tegra_output *output)
{
- if (gpio_is_valid(output->hpd_gpio)) {
- free_irq(output->hpd_irq, output);
- gpio_free(output->hpd_gpio);
- }
-
- if (output->ddc)
- put_device(&output->ddc->dev);
-
return 0;
}
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 5aa66ef7a94..ba47ca4fb88 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,9 +8,6 @@
*/
#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include "drm.h"
#include "dc.h"
@@ -150,7 +147,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->output.dev = dc->dev;
rgb->output.of_node = np;
- err = tegra_output_parse_dt(&rgb->output);
+ err = tegra_output_probe(&rgb->output);
if (err < 0)
return err;
@@ -177,6 +174,20 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
return 0;
}
+int tegra_dc_rgb_remove(struct tegra_dc *dc)
+{
+ int err;
+
+ if (!dc->rgb)
+ return 0;
+
+ err = tegra_output_remove(dc->rgb);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
{
struct tegra_rgb *rgb = to_rgb(dc->rgb);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7a4d1010690..7c3ef79fcb3 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dde2af..b433b9f040c 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -5,10 +5,6 @@ ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
- ttm_bo_manager.o
-
-ifeq ($(CONFIG_SWIOTLB),y)
-ttm-y += ttm_page_alloc_dma.o
-endif
+ ttm_bo_manager.o ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f1a857ec102..07e02c4bf5a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
-
+ mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@@ -429,8 +429,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
- if (!ret)
+ if (!ret) {
+
+ /*
+ * Make NO_EVICT bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ */
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ ttm_bo_add_to_lru(bo);
+ }
+
ww_mutex_unlock(&bo->resv->lock);
+ }
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -986,24 +998,32 @@ out_unlock:
return ret;
}
-static int ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem)
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
- return -1;
+ return false;
for (i = 0; i < placement->num_placement; i++) {
- if ((placement->placement[i] & mem->placement &
- TTM_PL_MASK_CACHING) &&
- (placement->placement[i] & mem->placement &
- TTM_PL_MASK_MEM))
- return i;
+ *new_flags = placement->placement[i];
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
}
- return -1;
+
+ for (i = 0; i < placement->num_busy_placement; i++) {
+ *new_flags = placement->busy_placement[i];
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
+ }
+
+ return false;
}
int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
int ret;
+ uint32_t new_flags;
lockdep_assert_held(&bo->resv->lock.base);
/* Check that range is valid */
@@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- ret = ttm_bo_mem_compat(placement, &bo->mem);
- if (ret < 0) {
+ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, interruptible,
no_wait_gpu);
if (ret)
@@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ttm_flag_masked(&bo->mem.placement, new_flags,
~TTM_PL_MASK_MEMTYPE);
}
/*
@@ -1103,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
+ mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1684,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+/**
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
+ * unreserved
+ *
+ * @bo: Pointer to buffer
+ */
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ /*
+ * In the absense of a wait_unlocked API,
+ * Use the bo::wu_mutex to avoid triggering livelocks due to
+ * concurrent use of this function. Note that this use of
+ * bo::wu_mutex can go away if we change locking order to
+ * mmap_sem -> bo::reserve.
+ */
+ ret = mutex_lock_interruptible(&bo->wu_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+ if (!ww_mutex_is_locked(&bo->resv->lock))
+ goto out_unlock;
+ ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ ww_mutex_unlock(&bo->resv->lock);
+
+out_unlock:
+ mutex_unlock(&bo->wu_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cc904d3a4d..15b86a94949 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -343,19 +343,28 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ret)
goto out;
+ /*
+ * Single TTM move. NOP.
+ */
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
- if (old_iomap == NULL && ttm == NULL)
+
+ /*
+ * Don't move nonexistent data. Clear destination instead.
+ */
+ if (old_iomap == NULL &&
+ (ttm == NULL || ttm->state == tt_unpopulated)) {
+ memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
+ }
- if (ttm->state == tt_unpopulated) {
+ /*
+ * TTM might be null for moves within the same region.
+ */
+ if (ttm && ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret) {
- /* if we fail here don't nuke the mm node
- * as the bo still owns it */
- old_copy.mm_node = NULL;
+ if (ret)
goto out1;
- }
}
add = 0;
@@ -381,11 +390,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- if (ret) {
- /* failing here, means keep old copy as-is */
- old_copy.mm_node = NULL;
+ if (ret)
goto out1;
- }
}
mb();
out2:
@@ -403,7 +409,12 @@ out1:
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
- ttm_bo_mem_put(bo, &old_copy);
+
+ /*
+ * On error, keep the mm node!
+ */
+ if (!ret)
+ ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1006c15445e..b249ab9b1eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -41,6 +41,51 @@
#define TTM_BO_VM_NUM_PREFAULT 16
+static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+
+ spin_lock(&bdev->fence_lock);
+ if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+ goto out_unlock;
+
+ /*
+ * Quick non-stalling check for idle.
+ */
+ ret = ttm_bo_wait(bo, false, false, true);
+ if (likely(ret == 0))
+ goto out_unlock;
+
+ /*
+ * If possible, avoid waiting for GPU with mmap_sem
+ * held.
+ */
+ if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ ret = VM_FAULT_RETRY;
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out_unlock;
+
+ up_read(&vma->vm_mm->mmap_sem);
+ (void) ttm_bo_wait(bo, false, true, false);
+ goto out_unlock;
+ }
+
+ /*
+ * Ordinary wait.
+ */
+ ret = ttm_bo_wait(bo, false, true, false);
+ if (unlikely(ret != 0))
+ ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ VM_FAULT_NOPAGE;
+
+out_unlock:
+ spin_unlock(&bdev->fence_lock);
+ return ret;
+}
+
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -57,17 +102,33 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
+ struct vm_area_struct cvma;
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after scheduling.
+ * for reserve, and if it fails, retry the fault after waiting
+ * for the buffer to become unreserved.
*/
-
- ret = ttm_bo_reserve(bo, true, true, false, 0);
+ ret = ttm_bo_reserve(bo, true, true, false, NULL);
if (unlikely(ret != 0)) {
- if (ret == -EBUSY)
- set_need_resched();
+ if (ret != -EBUSY)
+ return VM_FAULT_NOPAGE;
+
+ if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ up_read(&vma->vm_mm->mmap_sem);
+ (void) ttm_bo_wait_unreserved(bo);
+ }
+
+ return VM_FAULT_RETRY;
+ }
+
+ /*
+ * If we'd want to change locking order to
+ * mmap_sem -> bo::reserve, we'd use a blocking reserve here
+ * instead of retrying the fault...
+ */
return VM_FAULT_NOPAGE;
}
@@ -77,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
case 0:
break;
case -EBUSY:
- set_need_resched();
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
goto out_unlock;
@@ -91,18 +151,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Wait for buffer data in transit, due to a pipelined
* move.
*/
-
- spin_lock(&bdev->fence_lock);
- if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
- ret = ttm_bo_wait(bo, false, true, false);
- spin_unlock(&bdev->fence_lock);
- if (unlikely(ret != 0)) {
- retval = (ret != -ERESTARTSYS) ?
- VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
- goto out_unlock;
- }
- } else
- spin_unlock(&bdev->fence_lock);
+ ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+ if (unlikely(ret != 0)) {
+ retval = ret;
+ goto out_unlock;
+ }
ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) {
@@ -126,26 +179,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/*
- * Strictly, we're not allowed to modify vma->vm_page_prot here,
- * since the mmap_sem is only held in read mode. However, we
- * modify only the caching bits of vma->vm_page_prot and
- * consider those bits protected by
- * the bo->mutex, as we should be the only writers.
- * There shouldn't really be any readers of these bits except
- * within vm_insert_mixed()? fork?
- *
- * TODO: Add a list of vmas to the bo, and change the
- * vma->vm_page_prot when the object changes caching policy, with
- * the correct locks held.
+ * Make a local vma copy to modify the page_prot member
+ * and vm_flags if necessary. The vma parameter is protected
+ * by mmap_sem in write mode.
*/
+ cvma = *vma;
+ cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
+
if (bo->mem.bus.is_iomem) {
- vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
- vma->vm_page_prot);
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ cvma.vm_page_prot);
} else {
ttm = bo->ttm;
- vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
- vm_get_page_prot(vma->vm_flags) :
- ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+ if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ cvma.vm_page_prot);
/* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -172,7 +220,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pfn = page_to_pfn(page);
}
- ret = vm_insert_mixed(vma, address, pfn);
+ ret = vm_insert_mixed(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5..479e9418e3d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
- struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- ww_acquire_fini(ticket);
+ ttm_eu_backoff_reservation_locked(list);
+ if (ticket)
+ ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- ww_acquire_init(ticket, &reservation_ww_class);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
if (entry->reserved)
continue;
-
- ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+ ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+ ticket);
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
+ BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
}
}
- ww_acquire_done(ticket);
+ if (ticket)
+ ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
err:
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
+ }
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
- ww_acquire_fini(ticket);
+ if (ticket)
+ ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176c258..6fe7b92a82d 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * While no substantial code is shared, the prime code is inspired by
+ * drm_prime.c, with
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
*/
/** @file ttm_ref_object.c
*
@@ -34,6 +40,7 @@
* and release on file close.
*/
+
/**
* struct ttm_object_file
*
@@ -84,6 +91,9 @@ struct ttm_object_device {
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
+ struct dma_buf_ops ops;
+ void (*dmabuf_release)(struct dma_buf *dma_buf);
+ size_t dma_buf_size;
};
/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
struct ttm_object_file *tfile;
};
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
+
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
@@ -416,9 +428,10 @@ out_err:
}
EXPORT_SYMBOL(ttm_object_file_init);
-struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
- *mem_glob,
- unsigned int hash_order)
+struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
+ if (ret != 0)
+ goto out_no_object_hash;
- if (likely(ret == 0))
- return tdev;
+ tdev->ops = *ops;
+ tdev->dmabuf_release = tdev->ops.release;
+ tdev->ops.release = ttm_prime_dmabuf_release;
+ tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
+ ttm_round_pot(sizeof(struct file));
+ return tdev;
+out_no_object_hash:
kfree(tdev);
return NULL;
}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
+
+/**
+ * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
+ *
+ * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ *
+ * Obtain a file reference from a lookup structure that doesn't refcount
+ * the file, but synchronizes with its release method to make sure it has
+ * not been freed yet. See for example kref_get_unless_zero documentation.
+ * Returns true if refcounting succeeds, false otherwise.
+ *
+ * Nobody really wants this as a public API yet, so let it mature here
+ * for some time...
+ */
+static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
+{
+ return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+}
+
+/**
+ * ttm_prime_refcount_release - refcount release method for a prime object.
+ *
+ * @p_base: Pointer to ttm_base_object pointer.
+ *
+ * This is a wrapper that calls the refcount_release founction of the
+ * underlying object. At the same time it cleans up the prime object.
+ * This function is called when all references to the base object we
+ * derive from are gone.
+ */
+static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_prime_object *prime;
+
+ *p_base = NULL;
+ prime = container_of(base, struct ttm_prime_object, base);
+ BUG_ON(prime->dma_buf != NULL);
+ mutex_destroy(&prime->mutex);
+ if (prime->refcount_release)
+ prime->refcount_release(&base);
+}
+
+/**
+ * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
+ *
+ * @dma_buf:
+ *
+ * This function first calls the dma_buf release method the driver
+ * provides. Then it cleans up our dma_buf pointer used for lookup,
+ * and finally releases the reference the dma_buf has on our base
+ * object.
+ */
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct ttm_prime_object *prime =
+ (struct ttm_prime_object *) dma_buf->priv;
+ struct ttm_base_object *base = &prime->base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ if (tdev->dmabuf_release)
+ tdev->dmabuf_release(dma_buf);
+ mutex_lock(&prime->mutex);
+ if (prime->dma_buf == dma_buf)
+ prime->dma_buf = NULL;
+ mutex_unlock(&prime->mutex);
+ ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
+ ttm_base_object_unref(&base);
+}
+
+/**
+ * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @fd: The prime / dmabuf fd.
+ * @handle: The returned handle.
+ *
+ * This function returns a handle to an object that previously exported
+ * a dma-buf. Note that we don't handle imports yet, because we simply
+ * have no consumers of that implementation.
+ */
+int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ struct ttm_base_object *base;
+ int ret;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &tdev->ops)
+ return -ENOSYS;
+
+ prime = (struct ttm_prime_object *) dma_buf->priv;
+ base = &prime->base;
+ *handle = base->hash.key;
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+
+ dma_buf_put(dma_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
+
+/**
+ * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
+ *
+ * @tfile: Struct ttm_object_file identifying the caller.
+ * @handle: Handle to the object we're exporting from.
+ * @flags: flags for dma-buf creation. We just pass them on.
+ * @prime_fd: The returned file descriptor.
+ *
+ */
+int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ int ret;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL ||
+ base->object_type != ttm_prime_type)) {
+ ret = -ENOENT;
+ goto out_unref;
+ }
+
+ prime = container_of(base, struct ttm_prime_object, base);
+ if (unlikely(!base->shareable)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = mutex_lock_interruptible(&prime->mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_unref;
+ }
+
+ dma_buf = prime->dma_buf;
+ if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+
+ /*
+ * Need to create a new dma_buf, with memory accounting.
+ */
+ ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ dma_buf = dma_buf_export(prime, &tdev->ops,
+ prime->size, flags);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ttm_mem_global_free(tdev->mem_glob,
+ tdev->dma_buf_size);
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ /*
+ * dma_buf has taken the base object reference
+ */
+ base = NULL;
+ prime->dma_buf = dma_buf;
+ }
+ mutex_unlock(&prime->mutex);
+
+ ret = dma_buf_fd(dma_buf, flags);
+ if (ret >= 0) {
+ *prime_fd = ret;
+ ret = 0;
+ } else
+ dma_buf_put(dma_buf);
+
+out_unref:
+ if (base)
+ ttm_base_object_unref(&base);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
+
+/**
+ * ttm_prime_object_init - Initialize a ttm_prime_object
+ *
+ * @tfile: struct ttm_object_file identifying the caller
+ * @size: The size of the dma_bufs we export.
+ * @prime: The object to be initialized.
+ * @shareable: See ttm_base_object_init
+ * @type: See ttm_base_object_init
+ * @refcount_release: See ttm_base_object_init
+ * @ref_obj_release: See ttm_base_object_init
+ *
+ * Initializes an object which is compatible with the drm_prime model
+ * for data sharing between processes and devices.
+ */
+int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
+ struct ttm_prime_object *prime, bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ mutex_init(&prime->mutex);
+ prime->size = PAGE_ALIGN(size);
+ prime->real_type = type;
+ prime->dma_buf = NULL;
+ prime->refcount_release = refcount_release;
+ return ttm_base_object_init(tfile, &prime->base, shareable,
+ ttm_prime_type,
+ ttm_prime_refcount_release,
+ ref_obj_release);
+}
+EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7957beeeaf7..fb8259f6983 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,6 +33,7 @@
* when freed).
*/
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h>
@@ -1142,3 +1143,5 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
+
+#endif
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 6222af19f45..f02528686cd 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -8,6 +8,7 @@ config DRM_UDL
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0d78c..3ddd6cd98ac 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
.unload = udl_driver_unload,
/* gem hooks */
- .gem_init_object = udl_gem_init_object,
.gem_free_object = udl_gem_free_object,
.gem_vm_ops = &udl_gem_vm_ops,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec9409fa..1fbf7b357f1 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf646183ba..24ffbe99073 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-int udl_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
{
struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87c366..92788910548 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
/* Linux specific until context tracking code gets ported to BSD */
/* Last context, perform cleanup */
- if (dev->ctx_count == 1 && dev->dev_private) {
+ if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
DRM_DEBUG("Last Context\n");
drm_irq_uninstall(dev);
via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2cc6cd91ac1..9f8b690bcf5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o
+ vmwgfx_surface.o vmwgfx_prime.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 96dc84dc34d..7776e6f0aef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -141,37 +141,374 @@ struct ttm_placement vmw_srf_placement = {
};
struct vmw_ttm_tt {
- struct ttm_tt ttm;
+ struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
+ struct sg_table sgt;
+ struct vmw_sg_table vsgt;
+ uint64_t sg_alloc_size;
+ bool mapped;
};
+/**
+ * Helper functions to advance a struct vmw_piter iterator.
+ *
+ * @viter: Pointer to the iterator.
+ *
+ * These functions return false if past the end of the list,
+ * true otherwise. Functions are selected depending on the current
+ * DMA mapping mode.
+ */
+static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
+{
+ return ++(viter->i) < viter->num_pages;
+}
+
+static bool __vmw_piter_sg_next(struct vmw_piter *viter)
+{
+ return __sg_page_iter_next(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return a pointer to the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
+{
+ return viter->pages[viter->i];
+}
+
+static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
+{
+ return sg_page_iter_page(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return the DMA address of the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
+{
+ return page_to_phys(viter->pages[viter->i]);
+}
+
+static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+ return viter->addrs[viter->i];
+}
+
+static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
+{
+ return sg_page_iter_dma_address(&viter->iter);
+}
+
+
+/**
+ * vmw_piter_start - Initialize a struct vmw_piter.
+ *
+ * @viter: Pointer to the iterator to initialize
+ * @vsgt: Pointer to a struct vmw_sg_table to initialize from
+ *
+ * Note that we're following the convention of __sg_page_iter_start, so that
+ * the iterator doesn't point to a valid page after initialization; it has
+ * to be advanced one step first.
+ */
+void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
+ unsigned long p_offset)
+{
+ viter->i = p_offset - 1;
+ viter->num_pages = vsgt->num_pages;
+ switch (vsgt->mode) {
+ case vmw_dma_phys:
+ viter->next = &__vmw_piter_non_sg_next;
+ viter->dma_address = &__vmw_piter_phys_addr;
+ viter->page = &__vmw_piter_non_sg_page;
+ viter->pages = vsgt->pages;
+ break;
+ case vmw_dma_alloc_coherent:
+ viter->next = &__vmw_piter_non_sg_next;
+ viter->dma_address = &__vmw_piter_dma_addr;
+ viter->page = &__vmw_piter_non_sg_page;
+ viter->addrs = vsgt->addrs;
+ break;
+ case vmw_dma_map_populate:
+ case vmw_dma_map_bind:
+ viter->next = &__vmw_piter_sg_next;
+ viter->dma_address = &__vmw_piter_sg_addr;
+ viter->page = &__vmw_piter_sg_page;
+ __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
+ vsgt->sgt->orig_nents, p_offset);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
+ * TTM pages
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
+ */
+static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
+{
+ struct device *dev = vmw_tt->dev_priv->dev->dev;
+
+ dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
+ DMA_BIDIRECTIONAL);
+ vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+}
+
+/**
+ * vmw_ttm_map_for_dma - map TTM pages to get device addresses
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * This function is used to get device addresses from the kernel DMA layer.
+ * However, it's violating the DMA API in that when this operation has been
+ * performed, it's illegal for the CPU to write to the pages without first
+ * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
+ * therefore only legal to call this function if we know that the function
+ * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
+ * a CPU write buffer flush.
+ */
+static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
+{
+ struct device *dev = vmw_tt->dev_priv->dev->dev;
+ int ret;
+
+ ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(ret == 0))
+ return -ENOMEM;
+
+ vmw_tt->sgt.nents = ret;
+
+ return 0;
+}
+
+/**
+ * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Select the correct function for and make sure the TTM pages are
+ * visible to the device. Allocate storage for the device mappings.
+ * If a mapping has already been performed, indicated by the storage
+ * pointer being non NULL, the function returns success.
+ */
+static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+{
+ struct vmw_private *dev_priv = vmw_tt->dev_priv;
+ struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+ struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+ struct vmw_piter iter;
+ dma_addr_t old;
+ int ret = 0;
+ static size_t sgl_size;
+ static size_t sgt_size;
+
+ if (vmw_tt->mapped)
+ return 0;
+
+ vsgt->mode = dev_priv->map_mode;
+ vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
+ vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+ vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+ vsgt->sgt = &vmw_tt->sgt;
+
+ switch (dev_priv->map_mode) {
+ case vmw_dma_map_bind:
+ case vmw_dma_map_populate:
+ if (unlikely(!sgl_size)) {
+ sgl_size = ttm_round_pot(sizeof(struct scatterlist));
+ sgt_size = ttm_round_pot(sizeof(struct sg_table));
+ }
+ vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
+ ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
+ true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
+ vsgt->num_pages, 0,
+ (unsigned long)
+ vsgt->num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (unlikely(ret != 0))
+ goto out_sg_alloc_fail;
+
+ if (vsgt->num_pages > vmw_tt->sgt.nents) {
+ uint64_t over_alloc =
+ sgl_size * (vsgt->num_pages -
+ vmw_tt->sgt.nents);
+
+ ttm_mem_global_free(glob, over_alloc);
+ vmw_tt->sg_alloc_size -= over_alloc;
+ }
+
+ ret = vmw_ttm_map_for_dma(vmw_tt);
+ if (unlikely(ret != 0))
+ goto out_map_fail;
+
+ break;
+ default:
+ break;
+ }
+
+ old = ~((dma_addr_t) 0);
+ vmw_tt->vsgt.num_regions = 0;
+ for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
+ dma_addr_t cur = vmw_piter_dma_addr(&iter);
+
+ if (cur != old + PAGE_SIZE)
+ vmw_tt->vsgt.num_regions++;
+ old = cur;
+ }
+
+ vmw_tt->mapped = true;
+ return 0;
+
+out_map_fail:
+ sg_free_table(vmw_tt->vsgt.sgt);
+ vmw_tt->vsgt.sgt = NULL;
+out_sg_alloc_fail:
+ ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+ return ret;
+}
+
+/**
+ * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Tear down any previously set up device DMA mappings and free
+ * any storage space allocated for them. If there are no mappings set up,
+ * this function is a NOP.
+ */
+static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
+{
+ struct vmw_private *dev_priv = vmw_tt->dev_priv;
+
+ if (!vmw_tt->vsgt.sgt)
+ return;
+
+ switch (dev_priv->map_mode) {
+ case vmw_dma_map_bind:
+ case vmw_dma_map_populate:
+ vmw_ttm_unmap_from_dma(vmw_tt);
+ sg_free_table(vmw_tt->vsgt.sgt);
+ vmw_tt->vsgt.sgt = NULL;
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_tt->sg_alloc_size);
+ break;
+ default:
+ break;
+ }
+ vmw_tt->mapped = false;
+}
+
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ struct vmw_ttm_tt *vmw_be =
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ int ret;
+
+ ret = vmw_ttm_map_dma(vmw_be);
+ if (unlikely(ret != 0))
+ return ret;
vmw_be->gmr_id = bo_mem->start;
- return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+ return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id);
}
static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
- struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ struct vmw_ttm_tt *vmw_be =
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
+ vmw_ttm_unmap_dma(vmw_be);
+
return 0;
}
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
- struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
-
- ttm_tt_fini(ttm);
+ struct vmw_ttm_tt *vmw_be =
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ vmw_ttm_unmap_dma(vmw_be);
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ ttm_dma_tt_fini(&vmw_be->dma_ttm);
+ else
+ ttm_tt_fini(ttm);
kfree(vmw_be);
}
+static int vmw_ttm_populate(struct ttm_tt *ttm)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ struct vmw_private *dev_priv = vmw_tt->dev_priv;
+ struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+ size_t size =
+ ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+ ret = ttm_mem_global_alloc(glob, size, false, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+ if (unlikely(ret != 0))
+ ttm_mem_global_free(glob, size);
+ } else
+ ret = ttm_pool_populate(ttm);
+
+ return ret;
+}
+
+static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+{
+ struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+ dma_ttm.ttm);
+ struct vmw_private *dev_priv = vmw_tt->dev_priv;
+ struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+ vmw_ttm_unmap_dma(vmw_tt);
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+ size_t size =
+ ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+
+ ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+ ttm_mem_global_free(glob, size);
+ } else
+ ttm_pool_unpopulate(ttm);
+}
+
static struct ttm_backend_func vmw_ttm_func = {
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
@@ -183,20 +520,28 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
struct page *dummy_read_page)
{
struct vmw_ttm_tt *vmw_be;
+ int ret;
- vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+ vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
- vmw_be->ttm.func = &vmw_ttm_func;
+ vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
- if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(vmw_be);
- return NULL;
- }
-
- return &vmw_be->ttm;
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
+ dummy_read_page);
+ else
+ ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
+ dummy_read_page);
+ if (unlikely(ret != 0))
+ goto out_no_init;
+
+ return &vmw_be->dma_ttm.ttm;
+out_no_init:
+ kfree(vmw_be);
+ return NULL;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -332,8 +677,8 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
- .ttm_tt_populate = &ttm_pool_populate,
- .ttm_tt_unpopulate = &ttm_pool_unpopulate,
+ .ttm_tt_populate = &vmw_ttm_populate,
+ .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0508f93b979..c7a549694e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
+#include <linux/dma_remapping.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -185,6 +186,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+static int vmw_force_iommu;
+static int vmw_restrict_iommu;
+static int vmw_force_coherent;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -193,6 +197,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
+module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
+module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+
static void vmw_print_capabilities(uint32_t capabilities)
{
@@ -427,12 +438,85 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
dev_priv->initial_height = height;
}
+/**
+ * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
+ * system.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * This functions tries to determine the IOMMU setup and what actions
+ * need to be taken by the driver to make system pages visible to the
+ * device.
+ * If this function decides that DMA is not possible, it returns -EINVAL.
+ * The driver may then try to disable features of the device that require
+ * DMA.
+ */
+static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+{
+ static const char *names[vmw_dma_map_max] = {
+ [vmw_dma_phys] = "Using physical TTM page addresses.",
+ [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
+ [vmw_dma_map_populate] = "Keeping DMA mappings.",
+ [vmw_dma_map_bind] = "Giving up DMA mappings early."};
+#ifdef CONFIG_X86
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
+
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_enabled) {
+ dev_priv->map_mode = vmw_dma_map_populate;
+ goto out_fixup;
+ }
+#endif
+
+ if (!(vmw_force_iommu || vmw_force_coherent)) {
+ dev_priv->map_mode = vmw_dma_phys;
+ DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+ return 0;
+ }
+
+ dev_priv->map_mode = vmw_dma_map_populate;
+
+ if (dma_ops->sync_single_for_cpu)
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl() == 0)
+ dev_priv->map_mode = vmw_dma_map_populate;
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU
+out_fixup:
+#endif
+ if (dev_priv->map_mode == vmw_dma_map_populate &&
+ vmw_restrict_iommu)
+ dev_priv->map_mode = vmw_dma_map_bind;
+
+ if (vmw_force_coherent)
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+
+#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
+ /*
+ * No coherent page pool
+ */
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ return -EINVAL;
+#endif
+
+#else /* CONFIG_X86 */
+ dev_priv->map_mode = vmw_dma_map_populate;
+#endif /* CONFIG_X86 */
+
+ DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+
+ return 0;
+}
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
int ret;
uint32_t svga_id;
enum vmw_res_type i;
+ bool refuse_dma = false;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -481,6 +565,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+ ret = vmw_dma_select_mode(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+ refuse_dma = true;
+ }
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -558,8 +647,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->has_gmr = true;
- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- dev_priv->max_gmr_ids) != 0) {
+ if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+ refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+ dev_priv->max_gmr_ids) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
@@ -587,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->tdev = ttm_object_device_init
- (dev_priv->mem_global_ref.object, 12);
+ (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1120,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
@@ -1145,6 +1235,9 @@ static struct drm_driver driver = {
.dumb_map_offset = vmw_dumb_map_offset,
.dumb_destroy = vmw_dumb_destroy,
+ .prime_fd_to_handle = vmw_prime_fd_to_handle,
+ .prime_handle_to_fd = vmw_prime_handle_to_fd,
+
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 150ec64af61..db85985c708 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -177,6 +177,58 @@ struct vmw_res_cache_entry {
struct vmw_resource_val_node *node;
};
+/**
+ * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
+ */
+enum vmw_dma_map_mode {
+ vmw_dma_phys, /* Use physical page addresses */
+ vmw_dma_alloc_coherent, /* Use TTM coherent pages */
+ vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
+ vmw_dma_map_bind, /* Unmap from DMA just before unbind */
+ vmw_dma_map_max
+};
+
+/**
+ * struct vmw_sg_table - Scatter/gather table for binding, with additional
+ * device-specific information.
+ *
+ * @sgt: Pointer to a struct sg_table with binding information
+ * @num_regions: Number of regions with device-address contigous pages
+ */
+struct vmw_sg_table {
+ enum vmw_dma_map_mode mode;
+ struct page **pages;
+ const dma_addr_t *addrs;
+ struct sg_table *sgt;
+ unsigned long num_regions;
+ unsigned long num_pages;
+};
+
+/**
+ * struct vmw_piter - Page iterator that iterates over a list of pages
+ * and DMA addresses that could be either a scatter-gather list or
+ * arrays
+ *
+ * @pages: Array of page pointers to the pages.
+ * @addrs: DMA addresses to the pages if coherent pages are used.
+ * @iter: Scatter-gather page iterator. Current position in SG list.
+ * @i: Current position in arrays.
+ * @num_pages: Number of pages total.
+ * @next: Function to advance the iterator. Returns false if past the list
+ * of pages, true otherwise.
+ * @dma_address: Function to return the DMA address of the current page.
+ */
+struct vmw_piter {
+ struct page **pages;
+ const dma_addr_t *addrs;
+ struct sg_page_iter iter;
+ unsigned long i;
+ unsigned long num_pages;
+ bool (*next)(struct vmw_piter *);
+ dma_addr_t (*dma_address)(struct vmw_piter *);
+ struct page *(*page)(struct vmw_piter *);
+};
+
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
@@ -358,6 +410,11 @@ struct vmw_private {
struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size;
+
+ /*
+ * DMA mapping stuff.
+ */
+ enum vmw_dma_map_mode map_mode;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -405,7 +462,7 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
*/
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
- struct page *pages[],
+ const struct vmw_sg_table *vsgt,
unsigned long num_pages,
int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
@@ -568,6 +625,45 @@ extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
+extern void vmw_piter_start(struct vmw_piter *viter,
+ const struct vmw_sg_table *vsgt,
+ unsigned long p_offs);
+
+/**
+ * vmw_piter_next - Advance the iterator one page.
+ *
+ * @viter: Pointer to the iterator to advance.
+ *
+ * Returns false if past the list of pages, true otherwise.
+ */
+static inline bool vmw_piter_next(struct vmw_piter *viter)
+{
+ return viter->next(viter);
+}
+
+/**
+ * vmw_piter_dma_addr - Return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+ return viter->dma_address(viter);
+}
+
+/**
+ * vmw_piter_page - Return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline struct page *vmw_piter_page(struct vmw_piter *viter)
+{
+ return viter->page(viter);
+}
/**
* Command submission - vmwgfx_execbuf.c
@@ -723,6 +819,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
/**
+ * Prime - vmwgfx_prime.c
+ */
+
+extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
+extern int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle);
+extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 1a0bf07fe54..6ef0b035bec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -32,9 +32,11 @@
#define VMW_PPN_SIZE (sizeof(unsigned long))
/* A future safe maximum remap size. */
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+#define DMA_ADDR_INVALID ((dma_addr_t) 0)
+#define DMA_PAGE_INVALID 0UL
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
- struct page *pages[],
+ struct vmw_piter *iter,
unsigned long num_pages,
int gmr_id)
{
@@ -81,11 +83,13 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
for (i = 0; i < nr; ++i) {
if (VMW_PPN_SIZE <= 4)
- *cmd = page_to_pfn(*pages++);
+ *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
else
- *((uint64_t *)cmd) = page_to_pfn(*pages++);
+ *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
+ PAGE_SHIFT;
cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ vmw_piter_next(iter);
}
num_pages -= nr;
@@ -120,22 +124,56 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, define_size);
}
+
+static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
+ struct list_head *desc_pages)
+{
+ struct page *page, *next;
+ struct svga_guest_mem_descriptor *page_virtual;
+ unsigned int desc_per_page = PAGE_SIZE /
+ sizeof(struct svga_guest_mem_descriptor) - 1;
+
+ if (list_empty(desc_pages))
+ return;
+
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+
+ if (likely(desc_dma != DMA_ADDR_INVALID)) {
+ dma_unmap_page(dev, desc_dma, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ }
+
+ page_virtual = kmap_atomic(page);
+ desc_dma = (dma_addr_t)
+ le32_to_cpu(page_virtual[desc_per_page].ppn) <<
+ PAGE_SHIFT;
+ kunmap_atomic(page_virtual);
+
+ __free_page(page);
+ }
+}
+
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
+ *
*/
-static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
- struct page *pages[],
- unsigned long num_pages)
+static int vmw_gmr_build_descriptors(struct device *dev,
+ struct list_head *desc_pages,
+ struct vmw_piter *iter,
+ unsigned long num_pages,
+ dma_addr_t *first_dma)
{
- struct page *page, *next;
+ struct page *page;
struct svga_guest_mem_descriptor *page_virtual = NULL;
struct svga_guest_mem_descriptor *desc_virtual = NULL;
unsigned int desc_per_page;
unsigned long prev_pfn;
unsigned long pfn;
int ret;
+ dma_addr_t desc_dma;
desc_per_page = PAGE_SIZE /
sizeof(struct svga_guest_mem_descriptor) - 1;
@@ -148,23 +186,12 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
}
list_add_tail(&page->lru, desc_pages);
-
- /*
- * Point previous page terminating descriptor to this
- * page before unmapping it.
- */
-
- if (likely(page_virtual != NULL)) {
- desc_virtual->ppn = page_to_pfn(page);
- kunmap_atomic(page_virtual);
- }
-
page_virtual = kmap_atomic(page);
desc_virtual = page_virtual - 1;
prev_pfn = ~(0UL);
while (likely(num_pages != 0)) {
- pfn = page_to_pfn(*pages);
+ pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
if (pfn != prev_pfn + 1) {
@@ -181,104 +208,82 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
}
prev_pfn = pfn;
--num_pages;
- ++pages;
+ vmw_piter_next(iter);
}
- (++desc_virtual)->ppn = cpu_to_le32(0);
+ (++desc_virtual)->ppn = DMA_PAGE_INVALID;
desc_virtual->num_pages = cpu_to_le32(0);
+ kunmap_atomic(page_virtual);
}
- if (likely(page_virtual != NULL))
+ desc_dma = 0;
+ list_for_each_entry_reverse(page, desc_pages, lru) {
+ page_virtual = kmap_atomic(page);
+ page_virtual[desc_per_page].ppn = cpu_to_le32
+ (desc_dma >> PAGE_SHIFT);
kunmap_atomic(page_virtual);
+ desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, desc_dma)))
+ goto out_err;
+ }
+ *first_dma = desc_dma;
return 0;
out_err:
- list_for_each_entry_safe(page, next, desc_pages, lru) {
- list_del_init(&page->lru);
- __free_page(page);
- }
+ vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
return ret;
}
-static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
-{
- struct page *page, *next;
-
- list_for_each_entry_safe(page, next, desc_pages, lru) {
- list_del_init(&page->lru);
- __free_page(page);
- }
-}
-
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
- int gmr_id, struct list_head *desc_pages)
+ int gmr_id, dma_addr_t desc_dma)
{
- struct page *page;
-
- if (unlikely(list_empty(desc_pages)))
- return;
-
- page = list_entry(desc_pages->next, struct page, lru);
-
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
mb();
mutex_unlock(&dev_priv->hw_mutex);
}
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- */
-
-static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
- unsigned long num_pages)
-{
- unsigned long prev_pfn = ~(0UL);
- unsigned long pfn;
- unsigned long descriptors = 0;
-
- while (num_pages--) {
- pfn = page_to_pfn(*pages++);
- if (prev_pfn + 1 != pfn)
- ++descriptors;
- prev_pfn = pfn;
- }
-
- return descriptors;
-}
-
int vmw_gmr_bind(struct vmw_private *dev_priv,
- struct page *pages[],
+ const struct vmw_sg_table *vsgt,
unsigned long num_pages,
int gmr_id)
{
struct list_head desc_pages;
+ dma_addr_t desc_dma = 0;
+ struct device *dev = dev_priv->dev->dev;
+ struct vmw_piter data_iter;
int ret;
+ vmw_piter_start(&data_iter, vsgt, 0);
+
+ if (unlikely(!vmw_piter_next(&data_iter)))
+ return 0;
+
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
- return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
+ return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL;
- if (vmw_gmr_count_descriptors(pages, num_pages) >
- dev_priv->max_gmr_descriptors)
+ if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
return -EINVAL;
INIT_LIST_HEAD(&desc_pages);
- ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
+ ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
+ num_pages, &desc_dma);
if (unlikely(ret != 0))
return ret;
- vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
- vmw_gmr_free_descriptors(&desc_pages);
+ vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
+ vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c509d40c489..a51f48e3e91 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -168,7 +168,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, arg->fb_id);
if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
- ret = -EINVAL;
+ ret = -ENOENT;
goto out_no_fb;
}
vfb = vmw_framebuffer_to_vfb(fb);
@@ -252,7 +252,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, arg->fb_id);
if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
- ret = -EINVAL;
+ ret = -ENOENT;
goto out_no_fb;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fc43c060123..ecb3d867b42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1508,7 +1508,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 00000000000..31fe32d8d65
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thellstrom@vmware.com>
+ *
+ */
+
+#include "vmwgfx_drv.h"
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_object.h>
+
+/*
+ * DMA-BUF attach- and mapping methods. No need to implement
+ * these until we have other virtual devices use them.
+ */
+
+static int vmw_prime_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ return -ENOSYS;
+}
+
+static void vmw_prime_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+}
+
+static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgb,
+ enum dma_data_direction dir)
+{
+}
+
+static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+}
+
+static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
+ return -ENOSYS;
+}
+
+const struct dma_buf_ops vmw_prime_dmabuf_ops = {
+ .attach = vmw_prime_map_attach,
+ .detach = vmw_prime_map_detach,
+ .map_dma_buf = vmw_prime_map_dma_buf,
+ .unmap_dma_buf = vmw_prime_unmap_dma_buf,
+ .release = NULL,
+ .kmap = vmw_prime_dmabuf_kmap,
+ .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
+ .kunmap = vmw_prime_dmabuf_kunmap,
+ .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+ .mmap = vmw_prime_dmabuf_mmap,
+ .vmap = vmw_prime_dmabuf_vmap,
+ .vunmap = vmw_prime_dmabuf_vunmap,
+};
+
+int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_fd_to_handle(tfile, fd, handle);
+}
+
+int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 37fb4befec8..efe2b74c5eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -32,8 +32,10 @@
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
+#define VMW_RES_EVICT_ERR_COUNT 10
+
struct vmw_user_dma_buffer {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
@@ -295,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != converter->object_type))
+ if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
@@ -385,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- ttm_base_object_kfree(vmw_user_bo, base);
+ ttm_prime_object_kfree(vmw_user_bo, prime);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -399,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
if (unlikely(base == NULL))
return;
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
@@ -440,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
return ret;
tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_base_object_init(tfile,
- &user_bo->base,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_dma_buf = &user_bo->dma;
- *handle = user_bo->base.hash.key;
+ *handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
@@ -473,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
- return (vmw_user_bo->base.tfile == tfile ||
- vmw_user_bo->base.shareable) ? 0 : -EPERM;
+ return (vmw_user_bo->prime.base.tfile == tfile ||
+ vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -536,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return -ESRCH;
}
- if (unlikely(base->object_type != ttm_buffer_type)) {
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
@@ -560,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
- return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL);
}
/*
@@ -805,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
goto out_no_dmabuf;
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
+ args->size,
+ &vmw_user_bo->prime,
+ false,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0))
goto out_no_base_object;
- args->handle = vmw_user_bo->base.hash.key;
+ args->handle = vmw_user_bo->prime.base.hash.key;
out_no_base_object:
ttm_bo_unref(&tmp);
@@ -992,7 +999,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
*/
static int
vmw_resource_check_buffer(struct vmw_resource *res,
- struct ww_acquire_ctx *ticket,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -1009,7 +1015,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1027,7 +1033,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
@@ -1072,8 +1078,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1082,7 +1087,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1091,18 +1096,18 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
* to a backup buffer.
*
* @res: The resource to evict.
+ * @interruptible: Whether to wait interruptible.
*/
-int vmw_resource_do_evict(struct vmw_resource *res)
+int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
- struct ww_acquire_ctx ticket;
int ret;
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
+ ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1117,7 +1122,7 @@ int vmw_resource_do_evict(struct vmw_resource *res)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&ticket, &val_buf);
+ vmw_resource_backoff_reservation(&val_buf);
return ret;
}
@@ -1141,6 +1146,7 @@ int vmw_resource_validate(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
struct ttm_validate_buffer val_buf;
+ unsigned err_count = 0;
if (likely(!res->func->may_evict))
return 0;
@@ -1155,7 +1161,7 @@ int vmw_resource_validate(struct vmw_resource *res)
write_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
- DRM_ERROR("Out of device device id entries "
+ DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
write_unlock(&dev_priv->resource_lock);
@@ -1168,7 +1174,19 @@ int vmw_resource_validate(struct vmw_resource *res)
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- vmw_resource_do_evict(evict_res);
+
+ ret = vmw_resource_do_evict(evict_res, true);
+ if (unlikely(ret != 0)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&evict_res->lru_head, lru_list);
+ write_unlock(&dev_priv->resource_lock);
+ if (ret == -ERESTARTSYS ||
+ ++err_count > VMW_RES_EVICT_ERR_COUNT) {
+ vmw_resource_unreference(&evict_res);
+ goto out_no_validate;
+ }
+ }
+
vmw_resource_unreference(&evict_res);
} while (1);
@@ -1253,13 +1271,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
* @type: The resource type to evict
*
* To avoid thrashing starvation or as part of the hibernation sequence,
- * evict all evictable resources of a specific type.
+ * try to evict all evictable resources of a specific type.
*/
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
enum vmw_res_type type)
{
struct list_head *lru_list = &dev_priv->res_lru[type];
struct vmw_resource *evict_res;
+ unsigned err_count = 0;
+ int ret;
do {
write_lock(&dev_priv->resource_lock);
@@ -1272,7 +1292,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
lru_head));
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- vmw_resource_do_evict(evict_res);
+
+ ret = vmw_resource_do_evict(evict_res, false);
+ if (unlikely(ret != 0)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&evict_res->lru_head, lru_list);
+ write_unlock(&dev_priv->resource_lock);
+ if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
+ vmw_resource_unreference(&evict_res);
+ return;
+ }
+ }
+
vmw_resource_unreference(&evict_res);
} while (1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 58281433974..7de2ea8bd55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -38,7 +38,7 @@
* @size: TTM accounting size for the surface.
*/
struct vmw_user_surface {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
- return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+ return &(container_of(base, struct vmw_user_surface,
+ prime.base)->srf.res);
}
/**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
+ container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->snooper.crtc = NULL;
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
/**
* From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->sid = user_srf->base.hash.key;
+ rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
out_no_offsets:
kfree(srf->sizes);
out_no_sizes:
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index ccfd42b2360..7d6bed22254 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -19,6 +19,4 @@ config TEGRA_HOST1X_FIREWALL
If unsure, choose Y.
-source "drivers/gpu/host1x/drm/Kconfig"
-
endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 3b037b6e029..afa1e9e4e51 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,6 +1,5 @@
-ccflags-y = -Idrivers/gpu/host1x
-
host1x-y = \
+ bus.o \
syncpt.o \
dev.o \
intr.o \
@@ -8,13 +7,7 @@ host1x-y = \
channel.o \
job.o \
debug.o \
- hw/host1x01.o
-
-ccflags-y += -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+ hw/host1x01.o \
+ hw/host1x02.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644
index 00000000000..509383f8be0
--- /dev/null
+++ b/drivers/gpu/host1x/bus.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/host1x.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+static DEFINE_MUTEX(clients_lock);
+static LIST_HEAD(clients);
+
+static DEFINE_MUTEX(drivers_lock);
+static LIST_HEAD(drivers);
+
+static DEFINE_MUTEX(devices_lock);
+static LIST_HEAD(devices);
+
+struct host1x_subdev {
+ struct host1x_client *client;
+ struct device_node *np;
+ struct list_head list;
+};
+
+/**
+ * host1x_subdev_add() - add a new subdevice with an associated device node
+ */
+static int host1x_subdev_add(struct host1x_device *device,
+ struct device_node *np)
+{
+ struct host1x_subdev *subdev;
+
+ subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
+ if (!subdev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&subdev->list);
+ subdev->np = of_node_get(np);
+
+ mutex_lock(&device->subdevs_lock);
+ list_add_tail(&subdev->list, &device->subdevs);
+ mutex_unlock(&device->subdevs_lock);
+
+ return 0;
+}
+
+/**
+ * host1x_subdev_del() - remove subdevice
+ */
+static void host1x_subdev_del(struct host1x_subdev *subdev)
+{
+ list_del(&subdev->list);
+ of_node_put(subdev->np);
+ kfree(subdev);
+}
+
+/**
+ * host1x_device_parse_dt() - scan device tree and add matching subdevices
+ */
+static int host1x_device_parse_dt(struct host1x_device *device)
+{
+ struct device_node *np;
+ int err;
+
+ for_each_child_of_node(device->dev.parent->of_node, np) {
+ if (of_match_node(device->driver->subdevs, np) &&
+ of_device_is_available(np)) {
+ err = host1x_subdev_add(device, np);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void host1x_subdev_register(struct host1x_device *device,
+ struct host1x_subdev *subdev,
+ struct host1x_client *client)
+{
+ int err;
+
+ /*
+ * Move the subdevice to the list of active (registered) subdevices
+ * and associate it with a client. At the same time, associate the
+ * client with its parent device.
+ */
+ mutex_lock(&device->subdevs_lock);
+ mutex_lock(&device->clients_lock);
+ list_move_tail(&client->list, &device->clients);
+ list_move_tail(&subdev->list, &device->active);
+ client->parent = &device->dev;
+ subdev->client = client;
+ mutex_unlock(&device->clients_lock);
+ mutex_unlock(&device->subdevs_lock);
+
+ /*
+ * When all subdevices have been registered, the composite device is
+ * ready to be probed.
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device->driver->probe(device);
+ if (err < 0)
+ dev_err(&device->dev, "probe failed: %d\n", err);
+ }
+}
+
+static void __host1x_subdev_unregister(struct host1x_device *device,
+ struct host1x_subdev *subdev)
+{
+ struct host1x_client *client = subdev->client;
+ int err;
+
+ /*
+ * If all subdevices have been activated, we're about to remove the
+ * first active subdevice, so unload the driver first.
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device->driver->remove(device);
+ if (err < 0)
+ dev_err(&device->dev, "remove failed: %d\n", err);
+ }
+
+ /*
+ * Move the subdevice back to the list of idle subdevices and remove
+ * it from list of clients.
+ */
+ mutex_lock(&device->clients_lock);
+ subdev->client = NULL;
+ client->parent = NULL;
+ list_move_tail(&subdev->list, &device->subdevs);
+ /*
+ * XXX: Perhaps don't do this here, but rather explicitly remove it
+ * when the device is about to be deleted.
+ *
+ * This is somewhat complicated by the fact that this function is
+ * used to remove the subdevice when a client is unregistered but
+ * also when the composite device is about to be removed.
+ */
+ list_del_init(&client->list);
+ mutex_unlock(&device->clients_lock);
+}
+
+static void host1x_subdev_unregister(struct host1x_device *device,
+ struct host1x_subdev *subdev)
+{
+ mutex_lock(&device->subdevs_lock);
+ __host1x_subdev_unregister(device, subdev);
+ mutex_unlock(&device->subdevs_lock);
+}
+
+int host1x_device_init(struct host1x_device *device)
+{
+ struct host1x_client *client;
+ int err;
+
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry(client, &device->clients, list) {
+ if (client->ops && client->ops->init) {
+ err = client->ops->init(client);
+ if (err < 0) {
+ dev_err(&device->dev,
+ "failed to initialize %s: %d\n",
+ dev_name(client->dev), err);
+ mutex_unlock(&device->clients_lock);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&device->clients_lock);
+
+ return 0;
+}
+
+int host1x_device_exit(struct host1x_device *device)
+{
+ struct host1x_client *client;
+ int err;
+
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry_reverse(client, &device->clients, list) {
+ if (client->ops && client->ops->exit) {
+ err = client->ops->exit(client);
+ if (err < 0) {
+ dev_err(&device->dev,
+ "failed to cleanup %s: %d\n",
+ dev_name(client->dev), err);
+ mutex_unlock(&device->clients_lock);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&device->clients_lock);
+
+ return 0;
+}
+
+static int host1x_register_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_device *device;
+ struct host1x_subdev *subdev;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry(device, &host1x->devices, list) {
+ list_for_each_entry(subdev, &device->subdevs, list) {
+ if (subdev->np == client->dev->of_node) {
+ host1x_subdev_register(device, subdev, client);
+ mutex_unlock(&host1x->devices_lock);
+ return 0;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->devices_lock);
+ return -ENODEV;
+}
+
+static int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_device *device, *dt;
+ struct host1x_subdev *subdev;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry_safe(device, dt, &host1x->devices, list) {
+ list_for_each_entry(subdev, &device->active, list) {
+ if (subdev->client == client) {
+ host1x_subdev_unregister(device, subdev);
+ mutex_unlock(&host1x->devices_lock);
+ return 0;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->devices_lock);
+ return -ENODEV;
+}
+
+struct bus_type host1x_bus_type = {
+ .name = "host1x",
+};
+
+int host1x_bus_init(void)
+{
+ return bus_register(&host1x_bus_type);
+}
+
+void host1x_bus_exit(void)
+{
+ bus_unregister(&host1x_bus_type);
+}
+
+static void host1x_device_release(struct device *dev)
+{
+ struct host1x_device *device = to_host1x_device(dev);
+
+ kfree(device);
+}
+
+static int host1x_device_add(struct host1x *host1x,
+ struct host1x_driver *driver)
+{
+ struct host1x_client *client, *tmp;
+ struct host1x_subdev *subdev;
+ struct host1x_device *device;
+ int err;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ mutex_init(&device->subdevs_lock);
+ INIT_LIST_HEAD(&device->subdevs);
+ INIT_LIST_HEAD(&device->active);
+ mutex_init(&device->clients_lock);
+ INIT_LIST_HEAD(&device->clients);
+ INIT_LIST_HEAD(&device->list);
+ device->driver = driver;
+
+ device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
+ device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ device->dev.release = host1x_device_release;
+ dev_set_name(&device->dev, driver->name);
+ device->dev.bus = &host1x_bus_type;
+ device->dev.parent = host1x->dev;
+
+ err = device_register(&device->dev);
+ if (err < 0)
+ return err;
+
+ err = host1x_device_parse_dt(device);
+ if (err < 0) {
+ device_unregister(&device->dev);
+ return err;
+ }
+
+ mutex_lock(&host1x->devices_lock);
+ list_add_tail(&device->list, &host1x->devices);
+ mutex_unlock(&host1x->devices_lock);
+
+ mutex_lock(&clients_lock);
+
+ list_for_each_entry_safe(client, tmp, &clients, list) {
+ list_for_each_entry(subdev, &device->subdevs, list) {
+ if (subdev->np == client->dev->of_node) {
+ host1x_subdev_register(device, subdev, client);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&clients_lock);
+
+ return 0;
+}
+
+/*
+ * Removes a device by first unregistering any subdevices and then removing
+ * itself from the list of devices.
+ *
+ * This function must be called with the host1x->devices_lock held.
+ */
+static void host1x_device_del(struct host1x *host1x,
+ struct host1x_device *device)
+{
+ struct host1x_subdev *subdev, *sd;
+ struct host1x_client *client, *cl;
+
+ mutex_lock(&device->subdevs_lock);
+
+ /* unregister subdevices */
+ list_for_each_entry_safe(subdev, sd, &device->active, list) {
+ /*
+ * host1x_subdev_unregister() will remove the client from
+ * any lists, so we'll need to manually add it back to the
+ * list of idle clients.
+ *
+ * XXX: Alternatively, perhaps don't remove the client from
+ * any lists in host1x_subdev_unregister() and instead do
+ * that explicitly from host1x_unregister_client()?
+ */
+ client = subdev->client;
+
+ __host1x_subdev_unregister(device, subdev);
+
+ /* add the client to the list of idle clients */
+ mutex_lock(&clients_lock);
+ list_add_tail(&client->list, &clients);
+ mutex_unlock(&clients_lock);
+ }
+
+ /* remove subdevices */
+ list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
+ host1x_subdev_del(subdev);
+
+ mutex_unlock(&device->subdevs_lock);
+
+ /* move clients to idle list */
+ mutex_lock(&clients_lock);
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry_safe(client, cl, &device->clients, list)
+ list_move_tail(&client->list, &clients);
+
+ mutex_unlock(&device->clients_lock);
+ mutex_unlock(&clients_lock);
+
+ /* finally remove the device */
+ list_del_init(&device->list);
+ device_unregister(&device->dev);
+}
+
+static void host1x_attach_driver(struct host1x *host1x,
+ struct host1x_driver *driver)
+{
+ struct host1x_device *device;
+ int err;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry(device, &host1x->devices, list) {
+ if (device->driver == driver) {
+ mutex_unlock(&host1x->devices_lock);
+ return;
+ }
+ }
+
+ mutex_unlock(&host1x->devices_lock);
+
+ err = host1x_device_add(host1x, driver);
+ if (err < 0)
+ dev_err(host1x->dev, "failed to allocate device: %d\n", err);
+}
+
+static void host1x_detach_driver(struct host1x *host1x,
+ struct host1x_driver *driver)
+{
+ struct host1x_device *device, *tmp;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry_safe(device, tmp, &host1x->devices, list)
+ if (device->driver == driver)
+ host1x_device_del(host1x, device);
+
+ mutex_unlock(&host1x->devices_lock);
+}
+
+int host1x_register(struct host1x *host1x)
+{
+ struct host1x_driver *driver;
+
+ mutex_lock(&devices_lock);
+ list_add_tail(&host1x->list, &devices);
+ mutex_unlock(&devices_lock);
+
+ mutex_lock(&drivers_lock);
+
+ list_for_each_entry(driver, &drivers, list)
+ host1x_attach_driver(host1x, driver);
+
+ mutex_unlock(&drivers_lock);
+
+ return 0;
+}
+
+int host1x_unregister(struct host1x *host1x)
+{
+ struct host1x_driver *driver;
+
+ mutex_lock(&drivers_lock);
+
+ list_for_each_entry(driver, &drivers, list)
+ host1x_detach_driver(host1x, driver);
+
+ mutex_unlock(&drivers_lock);
+
+ mutex_lock(&devices_lock);
+ list_del_init(&host1x->list);
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+
+int host1x_driver_register(struct host1x_driver *driver)
+{
+ struct host1x *host1x;
+
+ INIT_LIST_HEAD(&driver->list);
+
+ mutex_lock(&drivers_lock);
+ list_add_tail(&driver->list, &drivers);
+ mutex_unlock(&drivers_lock);
+
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list)
+ host1x_attach_driver(host1x, driver);
+
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(host1x_driver_register);
+
+void host1x_driver_unregister(struct host1x_driver *driver)
+{
+ mutex_lock(&drivers_lock);
+ list_del_init(&driver->list);
+ mutex_unlock(&drivers_lock);
+}
+EXPORT_SYMBOL(host1x_driver_unregister);
+
+int host1x_client_register(struct host1x_client *client)
+{
+ struct host1x *host1x;
+ int err;
+
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list) {
+ err = host1x_register_client(host1x, client);
+ if (!err) {
+ mutex_unlock(&devices_lock);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+
+ mutex_lock(&clients_lock);
+ list_add_tail(&client->list, &clients);
+ mutex_unlock(&clients_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(host1x_client_register);
+
+int host1x_client_unregister(struct host1x_client *client)
+{
+ struct host1x_client *c;
+ struct host1x *host1x;
+ int err;
+
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list) {
+ err = host1x_unregister_client(host1x, client);
+ if (!err) {
+ mutex_unlock(&devices_lock);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+ mutex_lock(&clients_lock);
+
+ list_for_each_entry(c, &clients, list) {
+ if (c == client) {
+ list_del_init(&c->list);
+ break;
+ }
+ }
+
+ mutex_unlock(&clients_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(host1x_client_unregister);
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/bus.h
index 9b85f10f4a4..4099e99212c 100644
--- a/drivers/gpu/host1x/host1x_client.h
+++ b/drivers/gpu/host1x/bus.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -14,22 +15,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HOST1X_CLIENT_H
-#define HOST1X_CLIENT_H
+#ifndef HOST1X_BUS_H
+#define HOST1X_BUS_H
-struct device;
-struct platform_device;
+struct host1x;
-#ifdef CONFIG_DRM_TEGRA
-int host1x_drm_alloc(struct platform_device *pdev);
-#else
-static inline int host1x_drm_alloc(struct platform_device *pdev)
-{
- return 0;
-}
-#endif
+int host1x_bus_init(void);
+void host1x_bus_exit(void);
-void host1x_set_drm_data(struct device *dev, void *data);
-void *host1x_get_drm_data(struct device *dev);
+int host1x_register(struct host1x *host1x);
+int host1x_unregister(struct host1x *host1x);
#endif
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index de72172d3b5..3995255b16c 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/host1x.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
@@ -30,7 +31,6 @@
#include "channel.h"
#include "dev.h"
#include "debug.h"
-#include "host1x_bo.h"
#include "job.h"
/*
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 48723b8eea4..df767cf90d5 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -40,12 +40,6 @@ struct host1x_channel {
/* channel list operations */
int host1x_channel_list_init(struct host1x *host);
-struct host1x_channel *host1x_channel_request(struct device *dev);
-void host1x_channel_free(struct host1x_channel *channel);
-struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
-void host1x_channel_put(struct host1x_channel *channel);
-int host1x_job_submit(struct host1x_job *job);
-
#define host1x_for_each_channel(host, channel) \
list_for_each_entry(channel, &host->chlist.list, list)
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 47163029987..80da003d63d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -27,24 +27,13 @@
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
+#include "bus.h"
#include "dev.h"
#include "intr.h"
#include "channel.h"
#include "debug.h"
#include "hw/host1x01.h"
-#include "host1x_client.h"
-
-void host1x_set_drm_data(struct device *dev, void *data)
-{
- struct host1x *host1x = dev_get_drvdata(dev);
- host1x->drm_data = data;
-}
-
-void *host1x_get_drm_data(struct device *dev)
-{
- struct host1x *host1x = dev_get_drvdata(dev);
- return host1x ? host1x->drm_data : NULL;
-}
+#include "hw/host1x02.h"
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -79,7 +68,17 @@ static const struct host1x_info host1x01_info = {
.sync_offset = 0x3000,
};
+static const struct host1x_info host1x02_info = {
+ .nb_channels = 9,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 12,
+ .init = host1x02_init,
+ .sync_offset = 0x3000,
+};
+
static struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
{ },
@@ -114,6 +113,9 @@ static int host1x_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
+ mutex_init(&host->devices_lock);
+ INIT_LIST_HEAD(&host->devices);
+ INIT_LIST_HEAD(&host->list);
host->dev = &pdev->dev;
host->info = id->data;
@@ -152,7 +154,7 @@ static int host1x_probe(struct platform_device *pdev)
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- return err;
+ goto fail_unprepare_disable;
}
err = host1x_intr_init(host, syncpt_irq);
@@ -163,19 +165,26 @@ static int host1x_probe(struct platform_device *pdev)
host1x_debug_init(host);
- host1x_drm_alloc(pdev);
+ err = host1x_register(host);
+ if (err < 0)
+ goto fail_deinit_intr;
return 0;
+fail_deinit_intr:
+ host1x_intr_deinit(host);
fail_deinit_syncpt:
host1x_syncpt_deinit(host);
+fail_unprepare_disable:
+ clk_disable_unprepare(host->clk);
return err;
}
-static int __exit host1x_remove(struct platform_device *pdev)
+static int host1x_remove(struct platform_device *pdev)
{
struct host1x *host = platform_get_drvdata(pdev);
+ host1x_unregister(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
clk_disable_unprepare(host->clk);
@@ -184,59 +193,36 @@ static int __exit host1x_remove(struct platform_device *pdev)
}
static struct platform_driver tegra_host1x_driver = {
- .probe = host1x_probe,
- .remove = __exit_p(host1x_remove),
.driver = {
- .owner = THIS_MODULE,
.name = "tegra-host1x",
.of_match_table = host1x_of_match,
},
+ .probe = host1x_probe,
+ .remove = host1x_remove,
};
static int __init tegra_host1x_init(void)
{
int err;
- err = platform_driver_register(&tegra_host1x_driver);
+ err = host1x_bus_init();
if (err < 0)
return err;
-#ifdef CONFIG_DRM_TEGRA
- err = platform_driver_register(&tegra_dc_driver);
- if (err < 0)
- goto unregister_host1x;
-
- err = platform_driver_register(&tegra_hdmi_driver);
- if (err < 0)
- goto unregister_dc;
-
- err = platform_driver_register(&tegra_gr2d_driver);
- if (err < 0)
- goto unregister_hdmi;
-#endif
+ err = platform_driver_register(&tegra_host1x_driver);
+ if (err < 0) {
+ host1x_bus_exit();
+ return err;
+ }
return 0;
-
-#ifdef CONFIG_DRM_TEGRA
-unregister_hdmi:
- platform_driver_unregister(&tegra_hdmi_driver);
-unregister_dc:
- platform_driver_unregister(&tegra_dc_driver);
-unregister_host1x:
- platform_driver_unregister(&tegra_host1x_driver);
- return err;
-#endif
}
module_init(tegra_host1x_init);
static void __exit tegra_host1x_exit(void)
{
-#ifdef CONFIG_DRM_TEGRA
- platform_driver_unregister(&tegra_gr2d_driver);
- platform_driver_unregister(&tegra_hdmi_driver);
- platform_driver_unregister(&tegra_dc_driver);
-#endif
platform_driver_unregister(&tegra_host1x_driver);
+ host1x_bus_exit();
}
module_exit(tegra_host1x_exit);
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index bed90a8131b..a61a976e7a4 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -27,6 +27,7 @@
#include "job.h"
struct host1x_syncpt;
+struct host1x_syncpt_base;
struct host1x_channel;
struct host1x_cdma;
struct host1x_job;
@@ -102,6 +103,7 @@ struct host1x {
void __iomem *regs;
struct host1x_syncpt *syncpt;
+ struct host1x_syncpt_base *bases;
struct device *dev;
struct clk *clk;
@@ -125,7 +127,10 @@ struct host1x {
struct dentry *debugfs;
- void *drm_data;
+ struct mutex devices_lock;
+ struct list_head devices;
+
+ struct list_head list;
};
void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +306,4 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
host->debug_op->show_mlocks(host, o);
}
-extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_gr2d_driver;
-
#endif
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
deleted file mode 100644
index 8c61ceeaa12..00000000000
--- a/drivers/gpu/host1x/drm/drm.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include <drm/drm.h>
-#include <drm/drmP.h>
-
-#include "host1x_client.h"
-#include "dev.h"
-#include "drm.h"
-#include "gem.h"
-#include "syncpt.h"
-
-#define DRIVER_NAME "tegra"
-#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-struct host1x_drm_client {
- struct host1x_client *client;
- struct device_node *np;
- struct list_head list;
-};
-
-static int host1x_add_drm_client(struct host1x_drm *host1x,
- struct device_node *np)
-{
- struct host1x_drm_client *client;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&client->list);
- client->np = of_node_get(np);
-
- list_add_tail(&client->list, &host1x->drm_clients);
-
- return 0;
-}
-
-static int host1x_activate_drm_client(struct host1x_drm *host1x,
- struct host1x_drm_client *drm,
- struct host1x_client *client)
-{
- mutex_lock(&host1x->drm_clients_lock);
- list_del_init(&drm->list);
- list_add_tail(&drm->list, &host1x->drm_active);
- drm->client = client;
- mutex_unlock(&host1x->drm_clients_lock);
-
- return 0;
-}
-
-static int host1x_remove_drm_client(struct host1x_drm *host1x,
- struct host1x_drm_client *client)
-{
- mutex_lock(&host1x->drm_clients_lock);
- list_del_init(&client->list);
- mutex_unlock(&host1x->drm_clients_lock);
-
- of_node_put(client->np);
- kfree(client);
-
- return 0;
-}
-
-static int host1x_parse_dt(struct host1x_drm *host1x)
-{
- static const char * const compat[] = {
- "nvidia,tegra20-dc",
- "nvidia,tegra20-hdmi",
- "nvidia,tegra20-gr2d",
- "nvidia,tegra30-dc",
- "nvidia,tegra30-hdmi",
- "nvidia,tegra30-gr2d",
- };
- unsigned int i;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(compat); i++) {
- struct device_node *np;
-
- for_each_child_of_node(host1x->dev->of_node, np) {
- if (of_device_is_compatible(np, compat[i]) &&
- of_device_is_available(np)) {
- err = host1x_add_drm_client(host1x, np);
- if (err < 0)
- return err;
- }
- }
- }
-
- return 0;
-}
-
-int host1x_drm_alloc(struct platform_device *pdev)
-{
- struct host1x_drm *host1x;
- int err;
-
- host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
- if (!host1x)
- return -ENOMEM;
-
- mutex_init(&host1x->drm_clients_lock);
- INIT_LIST_HEAD(&host1x->drm_clients);
- INIT_LIST_HEAD(&host1x->drm_active);
- mutex_init(&host1x->clients_lock);
- INIT_LIST_HEAD(&host1x->clients);
- host1x->dev = &pdev->dev;
-
- err = host1x_parse_dt(host1x);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
- return err;
- }
-
- host1x_set_drm_data(&pdev->dev, host1x);
-
- return 0;
-}
-
-int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
-{
- struct host1x_client *client;
-
- mutex_lock(&host1x->clients_lock);
-
- list_for_each_entry(client, &host1x->clients, list) {
- if (client->ops && client->ops->drm_init) {
- int err = client->ops->drm_init(client, drm);
- if (err < 0) {
- dev_err(host1x->dev,
- "DRM setup failed for %s: %d\n",
- dev_name(client->dev), err);
- mutex_unlock(&host1x->clients_lock);
- return err;
- }
- }
- }
-
- mutex_unlock(&host1x->clients_lock);
-
- return 0;
-}
-
-int host1x_drm_exit(struct host1x_drm *host1x)
-{
- struct platform_device *pdev = to_platform_device(host1x->dev);
- struct host1x_client *client;
-
- if (!host1x->drm)
- return 0;
-
- mutex_lock(&host1x->clients_lock);
-
- list_for_each_entry_reverse(client, &host1x->clients, list) {
- if (client->ops && client->ops->drm_exit) {
- int err = client->ops->drm_exit(client);
- if (err < 0) {
- dev_err(host1x->dev,
- "DRM cleanup failed for %s: %d\n",
- dev_name(client->dev), err);
- mutex_unlock(&host1x->clients_lock);
- return err;
- }
- }
- }
-
- mutex_unlock(&host1x->clients_lock);
-
- drm_platform_exit(&tegra_drm_driver, pdev);
- host1x->drm = NULL;
-
- return 0;
-}
-
-int host1x_register_client(struct host1x_drm *host1x,
- struct host1x_client *client)
-{
- struct host1x_drm_client *drm, *tmp;
- int err;
-
- mutex_lock(&host1x->clients_lock);
- list_add_tail(&client->list, &host1x->clients);
- mutex_unlock(&host1x->clients_lock);
-
- list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
- if (drm->np == client->dev->of_node)
- host1x_activate_drm_client(host1x, drm, client);
-
- if (list_empty(&host1x->drm_clients)) {
- struct platform_device *pdev = to_platform_device(host1x->dev);
-
- err = drm_platform_init(&tegra_drm_driver, pdev);
- if (err < 0) {
- dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
- return err;
- }
- }
-
- return 0;
-}
-
-int host1x_unregister_client(struct host1x_drm *host1x,
- struct host1x_client *client)
-{
- struct host1x_drm_client *drm, *tmp;
- int err;
-
- list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
- if (drm->client == client) {
- err = host1x_drm_exit(host1x);
- if (err < 0) {
- dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
- err);
- return err;
- }
-
- host1x_remove_drm_client(host1x, drm);
- break;
- }
- }
-
- mutex_lock(&host1x->clients_lock);
- list_del_init(&client->list);
- mutex_unlock(&host1x->clients_lock);
-
- return 0;
-}
-
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_drm *host1x;
- int err;
-
- host1x = host1x_get_drm_data(drm->dev);
- drm->dev_private = host1x;
- host1x->drm = drm;
-
- drm_mode_config_init(drm);
-
- err = host1x_drm_init(host1x, drm);
- if (err < 0)
- return err;
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = 1;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- return err;
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- return err;
-
- drm_kms_helper_poll_init(drm);
-
- return 0;
-}
-
-static int tegra_drm_unload(struct drm_device *drm)
-{
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
-
- drm_mode_config_cleanup(drm);
-
- return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
- struct host1x_drm_file *fpriv;
-
- fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (!fpriv)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&fpriv->contexts);
- filp->driver_priv = fpriv;
-
- return 0;
-}
-
-static void host1x_drm_context_free(struct host1x_drm_context *context)
-{
- context->client->ops->close_channel(context);
- kfree(context);
-}
-
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
- struct host1x_drm *host1x = drm->dev_private;
-
- tegra_fbdev_restore_mode(host1x->fbdev);
-}
-
-#ifdef CONFIG_DRM_TEGRA_STAGING
-static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
- struct host1x_drm_context *context)
-{
- struct host1x_drm_context *ctx;
-
- list_for_each_entry(ctx, &file->contexts, list)
- if (ctx == context)
- return true;
-
- return false;
-}
-
-static int tegra_gem_create(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_gem_create *args = data;
- struct tegra_bo *bo;
-
- bo = tegra_bo_create_with_handle(file, drm, args->size,
- &args->handle);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- return 0;
-}
-
-static int tegra_gem_mmap(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_gem_mmap *args = data;
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(drm, file, args->handle);
- if (!gem)
- return -EINVAL;
-
- bo = to_tegra_bo(gem);
-
- args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
-
- drm_gem_object_unreference(gem);
-
- return 0;
-}
-
-static int tegra_syncpt_read(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_syncpt_read *args = data;
- struct host1x *host = dev_get_drvdata(drm->dev);
- struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
- if (!sp)
- return -EINVAL;
-
- args->value = host1x_syncpt_read_min(sp);
- return 0;
-}
-
-static int tegra_syncpt_incr(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_syncpt_incr *args = data;
- struct host1x *host = dev_get_drvdata(drm->dev);
- struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
- if (!sp)
- return -EINVAL;
-
- return host1x_syncpt_incr(sp);
-}
-
-static int tegra_syncpt_wait(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_syncpt_wait *args = data;
- struct host1x *host = dev_get_drvdata(drm->dev);
- struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
- if (!sp)
- return -EINVAL;
-
- return host1x_syncpt_wait(sp, args->thresh, args->timeout,
- &args->value);
-}
-
-static int tegra_open_channel(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_open_channel *args = data;
- struct host1x_client *client;
- struct host1x_drm_context *context;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm *host1x = drm->dev_private;
- int err = -ENODEV;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- list_for_each_entry(client, &host1x->clients, list)
- if (client->class == args->client) {
- err = client->ops->open_channel(client, context);
- if (err)
- break;
-
- context->client = client;
- list_add(&context->list, &fpriv->contexts);
- args->context = (uintptr_t)context;
- return 0;
- }
-
- kfree(context);
- return err;
-}
-
-static int tegra_close_channel(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_close_channel *args = data;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context =
- (struct host1x_drm_context *)(uintptr_t)args->context;
-
- if (!host1x_drm_file_owns_context(fpriv, context))
- return -EINVAL;
-
- list_del(&context->list);
- host1x_drm_context_free(context);
-
- return 0;
-}
-
-static int tegra_get_syncpt(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_get_syncpt *args = data;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context =
- (struct host1x_drm_context *)(uintptr_t)args->context;
- struct host1x_syncpt *syncpt;
-
- if (!host1x_drm_file_owns_context(fpriv, context))
- return -ENODEV;
-
- if (args->index >= context->client->num_syncpts)
- return -EINVAL;
-
- syncpt = context->client->syncpts[args->index];
- args->id = host1x_syncpt_id(syncpt);
-
- return 0;
-}
-
-static int tegra_submit(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_submit *args = data;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context =
- (struct host1x_drm_context *)(uintptr_t)args->context;
-
- if (!host1x_drm_file_owns_context(fpriv, context))
- return -ENODEV;
-
- return context->client->ops->submit(context, args, drm, file);
-}
-#endif
-
-static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
-#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
-#endif
-};
-
-static const struct file_operations tegra_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = tegra_drm_mmap,
- .poll = drm_poll,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
- return crtc;
- }
-
- return NULL;
-}
-
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
-{
- /* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
-}
-
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (!crtc)
- return -ENODEV;
-
- tegra_dc_enable_vblank(dc);
-
- return 0;
-}
-
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (crtc)
- tegra_dc_disable_vblank(dc);
-}
-
-static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
-{
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context, *tmp;
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- tegra_dc_cancel_page_flip(crtc, file);
-
- list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
- host1x_drm_context_free(context);
-
- kfree(fpriv);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)s->private;
- struct drm_device *drm = node->minor->dev;
- struct drm_framebuffer *fb;
-
- mutex_lock(&drm->mode_config.fb_lock);
-
- list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
- seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
- fb->base.id, fb->width, fb->height, fb->depth,
- fb->bits_per_pixel,
- atomic_read(&fb->refcount.refcount));
- }
-
- mutex_unlock(&drm->mode_config.fb_lock);
-
- return 0;
-}
-
-static struct drm_info_list tegra_debugfs_list[] = {
- { "framebuffers", tegra_debugfs_framebuffers, 0 },
-};
-
-static int tegra_debugfs_init(struct drm_minor *minor)
-{
- return drm_debugfs_create_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list),
- minor->debugfs_root, minor);
-}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list), minor);
-}
-#endif
-
-struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
- .open = tegra_drm_open,
- .preclose = tegra_drm_preclose,
- .lastclose = tegra_drm_lastclose,
-
- .get_vblank_counter = tegra_drm_get_vblank_counter,
- .enable_vblank = tegra_drm_enable_vblank,
- .disable_vblank = tegra_drm_disable_vblank,
-
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = tegra_debugfs_init,
- .debugfs_cleanup = tegra_debugfs_cleanup,
-#endif
-
- .gem_free_object = tegra_bo_free_object,
- .gem_vm_ops = &tegra_bo_vm_ops,
- .dumb_create = tegra_bo_dumb_create,
- .dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
-
- .ioctls = tegra_drm_ioctls,
- .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
- .fops = &tegra_drm_fops,
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644
index 27ffcf15a4b..00000000000
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * drivers/video/tegra/host/gr2d/gr2d.c
- *
- * Tegra Graphics 2D
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-
-#include "channel.h"
-#include "drm.h"
-#include "gem.h"
-#include "job.h"
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "host1x_client.h"
-#include "syncpt.h"
-
-struct gr2d {
- struct host1x_client client;
- struct clk *clk;
- struct host1x_channel *channel;
- unsigned long *addr_regs;
-};
-
-static inline struct gr2d *to_gr2d(struct host1x_client *client)
-{
- return container_of(client, struct gr2d, client);
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
-
-static int gr2d_client_init(struct host1x_client *client,
- struct drm_device *drm)
-{
- return 0;
-}
-
-static int gr2d_client_exit(struct host1x_client *client)
-{
- return 0;
-}
-
-static int gr2d_open_channel(struct host1x_client *client,
- struct host1x_drm_context *context)
-{
- struct gr2d *gr2d = to_gr2d(client);
-
- context->channel = host1x_channel_get(gr2d->channel);
-
- if (!context->channel)
- return -ENOMEM;
-
- return 0;
-}
-
-static void gr2d_close_channel(struct host1x_drm_context *context)
-{
- host1x_channel_put(context->channel);
-}
-
-static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
- struct drm_file *file,
- u32 handle)
-{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(drm, file, handle);
- if (!gem)
- return NULL;
-
- mutex_lock(&drm->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&drm->struct_mutex);
-
- bo = to_tegra_bo(gem);
- return &bo->base;
-}
-
-static int gr2d_submit(struct host1x_drm_context *context,
- struct drm_tegra_submit *args, struct drm_device *drm,
- struct drm_file *file)
-{
- struct host1x_job *job;
- unsigned int num_cmdbufs = args->num_cmdbufs;
- unsigned int num_relocs = args->num_relocs;
- unsigned int num_waitchks = args->num_waitchks;
- struct drm_tegra_cmdbuf __user *cmdbufs =
- (void * __user)(uintptr_t)args->cmdbufs;
- struct drm_tegra_reloc __user *relocs =
- (void * __user)(uintptr_t)args->relocs;
- struct drm_tegra_waitchk __user *waitchks =
- (void * __user)(uintptr_t)args->waitchks;
- struct drm_tegra_syncpt syncpt;
- int err;
-
- /* We don't yet support other than one syncpt_incr struct per submit */
- if (args->num_syncpts != 1)
- return -EINVAL;
-
- job = host1x_job_alloc(context->channel, args->num_cmdbufs,
- args->num_relocs, args->num_waitchks);
- if (!job)
- return -ENOMEM;
-
- job->num_relocs = args->num_relocs;
- job->num_waitchk = args->num_waitchks;
- job->client = (u32)args->context;
- job->class = context->client->class;
- job->serialize = true;
-
- while (num_cmdbufs) {
- struct drm_tegra_cmdbuf cmdbuf;
- struct host1x_bo *bo;
-
- err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
- if (err)
- goto fail;
-
- bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
- if (!bo) {
- err = -ENOENT;
- goto fail;
- }
-
- host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
- num_cmdbufs--;
- cmdbufs++;
- }
-
- err = copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs);
- if (err)
- goto fail;
-
- while (num_relocs--) {
- struct host1x_reloc *reloc = &job->relocarray[num_relocs];
- struct host1x_bo *cmdbuf, *target;
-
- cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
- target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
- reloc->cmdbuf = cmdbuf;
- reloc->target = target;
-
- if (!reloc->target || !reloc->cmdbuf) {
- err = -ENOENT;
- goto fail;
- }
- }
-
- err = copy_from_user(job->waitchk, waitchks,
- sizeof(*waitchks) * num_waitchks);
- if (err)
- goto fail;
-
- err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
- sizeof(syncpt));
- if (err)
- goto fail;
-
- job->syncpt_id = syncpt.id;
- job->syncpt_incrs = syncpt.incrs;
- job->timeout = 10000;
- job->is_addr_reg = gr2d_is_addr_reg;
-
- if (args->timeout && args->timeout < 10000)
- job->timeout = args->timeout;
-
- err = host1x_job_pin(job, context->client->dev);
- if (err)
- goto fail;
-
- err = host1x_job_submit(job);
- if (err)
- goto fail_submit;
-
- args->fence = job->syncpt_end;
-
- host1x_job_put(job);
- return 0;
-
-fail_submit:
- host1x_job_unpin(job);
-fail:
- host1x_job_put(job);
- return err;
-}
-
-static struct host1x_client_ops gr2d_client_ops = {
- .drm_init = gr2d_client_init,
- .drm_exit = gr2d_client_exit,
- .open_channel = gr2d_open_channel,
- .close_channel = gr2d_close_channel,
- .submit = gr2d_submit,
-};
-
-static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
-{
- const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
- 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
- unsigned long *bitmap;
- int i;
-
- bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
- GFP_KERNEL);
-
- for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
- u32 reg = gr2d_addr_regs[i];
- bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
- }
-
- gr2d->addr_regs = bitmap;
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
-{
- struct gr2d *gr2d = dev_get_drvdata(dev);
-
- switch (class) {
- case HOST1X_CLASS_HOST1X:
- return reg == 0x2b;
- case HOST1X_CLASS_GR2D:
- case HOST1X_CLASS_GR2D_SB:
- reg &= 0xff;
- if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
- return 1;
- default:
- return 0;
- }
-}
-
-static const struct of_device_id gr2d_match[] = {
- { .compatible = "nvidia,tegra30-gr2d" },
- { .compatible = "nvidia,tegra20-gr2d" },
- { },
-};
-
-static int gr2d_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
- int err;
- struct gr2d *gr2d = NULL;
- struct host1x_syncpt **syncpts;
-
- gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
- if (!gr2d)
- return -ENOMEM;
-
- syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
- if (!syncpts)
- return -ENOMEM;
-
- gr2d->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(gr2d->clk)) {
- dev_err(dev, "cannot get clock\n");
- return PTR_ERR(gr2d->clk);
- }
-
- err = clk_prepare_enable(gr2d->clk);
- if (err) {
- dev_err(dev, "cannot turn on clock\n");
- return err;
- }
-
- gr2d->channel = host1x_channel_request(dev);
- if (!gr2d->channel)
- return -ENOMEM;
-
- *syncpts = host1x_syncpt_request(dev, false);
- if (!(*syncpts)) {
- host1x_channel_free(gr2d->channel);
- return -ENOMEM;
- }
-
- gr2d->client.ops = &gr2d_client_ops;
- gr2d->client.dev = dev;
- gr2d->client.class = HOST1X_CLASS_GR2D;
- gr2d->client.syncpts = syncpts;
- gr2d->client.num_syncpts = 1;
-
- err = host1x_register_client(host1x, &gr2d->client);
- if (err < 0) {
- dev_err(dev, "failed to register host1x client: %d\n", err);
- return err;
- }
-
- gr2d_init_addr_reg_map(dev, gr2d);
-
- platform_set_drvdata(pdev, gr2d);
-
- return 0;
-}
-
-static int __exit gr2d_remove(struct platform_device *pdev)
-{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
- struct gr2d *gr2d = platform_get_drvdata(pdev);
- unsigned int i;
- int err;
-
- err = host1x_unregister_client(host1x, &gr2d->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
- return err;
- }
-
- for (i = 0; i < gr2d->client.num_syncpts; i++)
- host1x_syncpt_free(gr2d->client.syncpts[i]);
-
- host1x_channel_free(gr2d->channel);
- clk_disable_unprepare(gr2d->clk);
-
- return 0;
-}
-
-struct platform_driver tegra_gr2d_driver = {
- .probe = gr2d_probe,
- .remove = __exit_p(gr2d_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "gr2d",
- .of_match_table = gr2d_match,
- }
-};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644
index a2bc1e65e97..00000000000
--- a/drivers/gpu/host1x/host1x.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Tegra host1x driver
- *
- * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __LINUX_HOST1X_H
-#define __LINUX_HOST1X_H
-
-enum host1x_class {
- HOST1X_CLASS_HOST1X = 0x1,
- HOST1X_CLASS_GR2D = 0x51,
- HOST1X_CLASS_GR2D_SB = 0x52
-};
-
-#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644
index 4c1f10bd773..00000000000
--- a/drivers/gpu/host1x/host1x_bo.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Tegra host1x Memory Management Abstraction header
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HOST1X_BO_H
-#define _HOST1X_BO_H
-
-struct host1x_bo;
-
-struct host1x_bo_ops {
- struct host1x_bo *(*get)(struct host1x_bo *bo);
- void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
- void *(*mmap)(struct host1x_bo *bo);
- void (*munmap)(struct host1x_bo *bo, void *addr);
- void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
- void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
-};
-
-struct host1x_bo {
- const struct host1x_bo_ops *ops;
-};
-
-static inline void host1x_bo_init(struct host1x_bo *bo,
- const struct host1x_bo_ops *ops)
-{
- bo->ops = ops;
-}
-
-static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
-{
- return bo->ops->get(bo);
-}
-
-static inline void host1x_bo_put(struct host1x_bo *bo)
-{
- bo->ops->put(bo);
-}
-
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
-{
- return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
- bo->ops->unpin(bo, sgt);
-}
-
-static inline void *host1x_bo_mmap(struct host1x_bo *bo)
-{
- return bo->ops->mmap(bo);
-}
-
-static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
-{
- bo->ops->munmap(bo, addr);
-}
-
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
- return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
- unsigned int pagenum, void *addr)
-{
- bo->ops->kunmap(bo, pagenum, addr);
-}
-
-#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644
index 9b50863a223..00000000000
--- a/drivers/gpu/host1x/hw/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-y = -Idrivers/gpu/host1x
-
-host1x-hw-objs = \
- host1x01.o
-
-obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2ee4ad55c4d..37e2a63241a 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -20,10 +20,10 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include "cdma.h"
-#include "channel.h"
-#include "dev.h"
-#include "debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../debug.h"
/*
* Put the restart at the end of pushbuffer memor
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index ee199623e36..4608257ab65 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -16,15 +16,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/host1x.h>
#include <linux/slab.h>
+
#include <trace/events/host1x.h>
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "channel.h"
-#include "dev.h"
-#include "intr.h"
-#include "job.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../intr.h"
+#include "../job.h"
#define HOST1X_CHANNEL_SIZE 16384
#define TRACE_MAX_LENGTH 128U
@@ -67,6 +67,22 @@ static void submit_gathers(struct host1x_job *job)
}
}
+static inline void synchronize_syncpt_base(struct host1x_job *job)
+{
+ struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+ struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+ u32 id, value;
+
+ value = host1x_syncpt_read_max(sp);
+ id = sp->base->id;
+
+ host1x_cdma_push(&job->channel->cdma,
+ host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+ HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
+ HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
+ HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
+}
+
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
@@ -118,6 +134,10 @@ static int channel_submit(struct host1x_job *job)
host1x_syncpt_read_max(sp)));
}
+ /* Synchronize base register to allow using it for relative waiting */
+ if (sp->base)
+ synchronize_syncpt_base(job);
+
syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
job->syncpt_end = syncval;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 334c038052f..640c75ca5a8 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -15,18 +15,10 @@
*
*/
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-#include <linux/io.h>
-
-#include "dev.h"
-#include "debug.h"
-#include "cdma.h"
-#include "channel.h"
-#include "host1x_bo.h"
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index a14e91cd1e5..859b73beb4d 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -17,17 +17,17 @@
*/
/* include hw specification */
-#include "hw/host1x01.h"
-#include "hw/host1x01_hardware.h"
+#include "host1x01.h"
+#include "host1x01_hardware.h"
/* include code */
-#include "hw/cdma_hw.c"
-#include "hw/channel_hw.c"
-#include "hw/debug_hw.c"
-#include "hw/intr_hw.c"
-#include "hw/syncpt_hw.c"
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
-#include "dev.h"
+#include "../dev.h"
int host1x01_init(struct host1x *host)
{
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644
index 00000000000..e98caca0ca4
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x01.h"
+#include "host1x01_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x02_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x02.h b/drivers/gpu/host1x/hw/host1x02.h
new file mode 100644
index 00000000000..f7486609a90
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X02_H
+#define HOST1X_HOST1X02_H
+
+struct host1x;
+
+int host1x02_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
index 42f3ce19ca3..f7553599ee2 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
}
#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
{
return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644
index 00000000000..e490bcde33f
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
+#define HOST1X_HW_HOST1X02_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+ host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+ return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+ host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+ return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+ host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+ return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+ host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+ return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+ host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+ return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+ host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+ return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+ host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+ return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+ host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+ host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+ return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+ host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+ return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+ host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644
index 00000000000..4495401525e
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X02_SYNC_H
+#define HOST1X_HW_HOST1X02_SYNC_H
+
+#define REGISTER_STRIDE 4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+ return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+ host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+ return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+ host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+ return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+ host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+ return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+ host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+ return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+ host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+ host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+ host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+ return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+ host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+ return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+ host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+ return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+ host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+ return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+ host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+ return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+ host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+ return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+ host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+ return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+ host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+ host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+ host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+ return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+ host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+ return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+ host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+ return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+ host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+ return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+ host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+ return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+ host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+ return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+ host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+ return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+ host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+ host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+ return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+ host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+ return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+ host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+ return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+ host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+ host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+ host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644
index 00000000000..a3b3c987441
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X02_UCLASS_H
+#define HOST1X_HW_HOST1X02_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef1efc..b26dcc83bc1 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -22,8 +22,8 @@
#include <linux/io.h>
#include <asm/mach/irq.h>
-#include "intr.h"
-#include "dev.h"
+#include "../intr.h"
+#include "../dev.h"
/*
* Sync point threshold interrupt service function
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 0cf6095d336..56e85395ac2 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -18,8 +18,8 @@
#include <linux/io.h>
-#include "dev.h"
-#include "syncpt.h"
+#include "../dev.h"
+#include "../syncpt.h"
/*
* Write the current syncpoint value back to hw.
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index c4e1050f225..de5ec333ce1 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -18,6 +18,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/host1x.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
#include "channel.h"
#include "dev.h"
-#include "host1x_bo.h"
#include "job.h"
#include "syncpt.h"
@@ -264,7 +264,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
}
static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
- unsigned int offset)
+ unsigned int offset)
{
offset *= sizeof(u32);
@@ -281,7 +281,7 @@ struct host1x_firewall {
unsigned int num_relocs;
struct host1x_reloc *reloc;
- struct host1x_bo *cmdbuf_id;
+ struct host1x_bo *cmdbuf;
unsigned int offset;
u32 words;
@@ -291,25 +291,37 @@ struct host1x_firewall {
u32 count;
};
+static int check_register(struct host1x_firewall *fw, unsigned long offset)
+{
+ if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
+ if (!fw->num_relocs)
+ return -EINVAL;
+
+ if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
+ return -EINVAL;
+
+ fw->num_relocs--;
+ fw->reloc++;
+ }
+
+ return 0;
+}
+
static int check_mask(struct host1x_firewall *fw)
{
u32 mask = fw->mask;
u32 reg = fw->reg;
+ int ret;
while (mask) {
if (fw->words == 0)
return -EINVAL;
if (mask & 1) {
- if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
- if (!fw->num_relocs)
- return -EINVAL;
- if (!check_reloc(fw->reloc, fw->cmdbuf_id,
- fw->offset))
- return -EINVAL;
- fw->reloc++;
- fw->num_relocs--;
- }
+ ret = check_register(fw, reg);
+ if (ret < 0)
+ return ret;
+
fw->words--;
fw->offset++;
}
@@ -324,19 +336,16 @@ static int check_incr(struct host1x_firewall *fw)
{
u32 count = fw->count;
u32 reg = fw->reg;
+ int ret;
while (count) {
if (fw->words == 0)
return -EINVAL;
- if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
- if (!fw->num_relocs)
- return -EINVAL;
- if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
- return -EINVAL;
- fw->reloc++;
- fw->num_relocs--;
- }
+ ret = check_register(fw, reg);
+ if (ret < 0)
+ return ret;
+
reg++;
fw->words--;
fw->offset++;
@@ -348,21 +357,17 @@ static int check_incr(struct host1x_firewall *fw)
static int check_nonincr(struct host1x_firewall *fw)
{
- int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
u32 count = fw->count;
+ int ret;
while (count) {
if (fw->words == 0)
return -EINVAL;
- if (is_addr_reg) {
- if (!fw->num_relocs)
- return -EINVAL;
- if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
- return -EINVAL;
- fw->reloc++;
- fw->num_relocs--;
- }
+ ret = check_register(fw, fw->reg);
+ if (ret < 0)
+ return ret;
+
fw->words--;
fw->offset++;
count--;
@@ -381,7 +386,7 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
return 0;
fw->words = g->words;
- fw->cmdbuf_id = g->bo;
+ fw->cmdbuf = g->bo;
fw->offset = 0;
while (fw->words && !err) {
@@ -436,10 +441,6 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
}
}
- /* No relocs should remain at this point */
- if (fw->num_relocs)
- err = -EINVAL;
-
out:
return err;
}
@@ -493,6 +494,10 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
offset += g->words * sizeof(u32);
}
+ /* No relocs should remain at this point */
+ if (fw.num_relocs)
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index fba45f20458..33a697d6dce 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -34,15 +34,6 @@ struct host1x_cmdbuf {
u32 pad;
};
-struct host1x_reloc {
- struct host1x_bo *cmdbuf;
- u32 cmdbuf_offset;
- struct host1x_bo *target;
- u32 target_offset;
- u32 shift;
- u32 pad;
-};
-
struct host1x_waitchk {
struct host1x_bo *bo;
u32 offset;
@@ -56,105 +47,6 @@ struct host1x_job_unpin_data {
};
/*
- * Each submit is tracked as a host1x_job.
- */
-struct host1x_job {
- /* When refcount goes to zero, job can be freed */
- struct kref ref;
-
- /* List entry */
- struct list_head list;
-
- /* Channel where job is submitted to */
- struct host1x_channel *channel;
-
- u32 client;
-
- /* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
-
- /* Wait checks to be processed at submit time */
- struct host1x_waitchk *waitchk;
- unsigned int num_waitchk;
- u32 waitchk_mask;
-
- /* Array of handles to be pinned & unpinned */
- struct host1x_reloc *relocarray;
- unsigned int num_relocs;
- struct host1x_job_unpin_data *unpins;
- unsigned int num_unpins;
-
- dma_addr_t *addr_phys;
- dma_addr_t *gather_addr_phys;
- dma_addr_t *reloc_addr_phys;
-
- /* Sync point id, number of increments and end related to the submit */
- u32 syncpt_id;
- u32 syncpt_incrs;
- u32 syncpt_end;
-
- /* Maximum time to wait for this job */
- unsigned int timeout;
-
- /* Index and number of slots used in the push buffer */
- unsigned int first_get;
- unsigned int num_slots;
-
- /* Copy of gathers */
- size_t gather_copy_size;
- dma_addr_t gather_copy;
- u8 *gather_copy_mapped;
-
- /* Check if register is marked as an address reg */
- int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
-
- /* Request a SETCLASS to this class */
- u32 class;
-
- /* Add a channel wait for previous ops to complete */
- bool serialize;
-};
-/*
- * Allocate memory for a job. Just enough memory will be allocated to
- * accomodate the submit.
- */
-struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs,
- u32 num_waitchks);
-
-/*
- * Add a gather to a job.
- */
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
- u32 words, u32 offset);
-
-/*
- * Increment reference going to host1x_job.
- */
-struct host1x_job *host1x_job_get(struct host1x_job *job);
-
-/*
- * Decrement reference job, free if goes to zero.
- */
-void host1x_job_put(struct host1x_job *job);
-
-/*
- * Pin memory related to job. This handles relocation of addresses to the
- * host1x address space. Handles both the gather memory and any other memory
- * referred to from the gather buffers.
- *
- * Handles also patching out host waits that would wait for an expired sync
- * point value.
- */
-int host1x_job_pin(struct host1x_job *job, struct device *dev);
-
-/*
- * Unpin memory related to job.
- */
-void host1x_job_unpin(struct host1x_job *job);
-
-/*
* Dump contents of job to debug output.
*/
void host1x_job_dump(struct device *dev, struct host1x_job *job);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 409745b949d..159c479829c 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -30,9 +30,32 @@
#define SYNCPT_CHECK_PERIOD (2 * HZ)
#define MAX_STUCK_CHECK_COUNT 15
-static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
- struct device *dev,
- bool client_managed)
+static struct host1x_syncpt_base *
+host1x_syncpt_base_request(struct host1x *host)
+{
+ struct host1x_syncpt_base *bases = host->bases;
+ unsigned int i;
+
+ for (i = 0; i < host->info->nb_bases; i++)
+ if (!bases[i].requested)
+ break;
+
+ if (i >= host->info->nb_bases)
+ return NULL;
+
+ bases[i].requested = true;
+ return &bases[i];
+}
+
+static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
+{
+ if (base)
+ base->requested = false;
+}
+
+static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ struct device *dev,
+ unsigned long flags)
{
int i;
struct host1x_syncpt *sp = host->syncpt;
@@ -44,6 +67,12 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
if (i >= host->info->nb_pts)
return NULL;
+ if (flags & HOST1X_SYNCPT_HAS_BASE) {
+ sp->base = host1x_syncpt_base_request(host);
+ if (!sp->base)
+ return NULL;
+ }
+
name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
@@ -51,7 +80,11 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
sp->dev = dev;
sp->name = name;
- sp->client_managed = client_managed;
+
+ if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
+ sp->client_managed = true;
+ else
+ sp->client_managed = false;
return sp;
}
@@ -303,25 +336,35 @@ int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
int host1x_syncpt_init(struct host1x *host)
{
+ struct host1x_syncpt_base *bases;
struct host1x_syncpt *syncpt;
int i;
syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!syncpt)
return -ENOMEM;
- for (i = 0; i < host->info->nb_pts; ++i) {
+ bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+ GFP_KERNEL);
+ if (!bases)
+ return -ENOMEM;
+
+ for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
}
+ for (i = 0; i < host->info->nb_bases; i++)
+ bases[i].id = i;
+
host->syncpt = syncpt;
+ host->bases = bases;
host1x_syncpt_restore(host);
/* Allocate sync point to use for clearing waits for expired fences */
- host->nop_sp = _host1x_syncpt_alloc(host, NULL, false);
+ host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
if (!host->nop_sp)
return -ENOMEM;
@@ -329,10 +372,10 @@ int host1x_syncpt_init(struct host1x *host)
}
struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
- bool client_managed)
+ unsigned long flags)
{
struct host1x *host = dev_get_drvdata(dev->parent);
- return _host1x_syncpt_alloc(host, dev, client_managed);
+ return host1x_syncpt_alloc(host, dev, flags);
}
void host1x_syncpt_free(struct host1x_syncpt *sp)
@@ -340,7 +383,9 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
if (!sp)
return;
+ host1x_syncpt_base_free(sp->base);
kfree(sp->name);
+ sp->base = NULL;
sp->dev = NULL;
sp->name = NULL;
sp->client_managed = false;
@@ -354,6 +399,25 @@ void host1x_syncpt_deinit(struct host1x *host)
kfree(sp->name);
}
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->min_val);
+}
+
int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
@@ -375,3 +439,13 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
return NULL;
return host->syncpt + id;
}
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
+{
+ return sp ? sp->base : NULL;
+}
+
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
+{
+ return base->id;
+}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 267c0b9d364..9056465ecd3 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -20,6 +20,7 @@
#define __HOST1X_SYNCPT_H
#include <linux/atomic.h>
+#include <linux/host1x.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -30,6 +31,11 @@ struct host1x;
/* Reserved for replacing an expired wait with a NOP */
#define HOST1X_SYNCPT_RESERVED 0
+struct host1x_syncpt_base {
+ unsigned int id;
+ bool requested;
+};
+
struct host1x_syncpt {
int id;
atomic_t min_val;
@@ -39,6 +45,7 @@ struct host1x_syncpt {
bool client_managed;
struct host1x *host;
struct device *dev;
+ struct host1x_syncpt_base *base;
/* interrupt data */
struct host1x_syncpt_intr intr;
@@ -50,25 +57,6 @@ int host1x_syncpt_init(struct host1x *host);
/* Free sync point array */
void host1x_syncpt_deinit(struct host1x *host);
-/*
- * Read max. It indicates how many operations there are in queue, either in
- * channel or in a software thread.
- * */
-static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
-{
- smp_rmb();
- return (u32)atomic_read(&sp->max_val);
-}
-
-/*
- * Read min, which is a shadow of the current sync point value in hardware.
- */
-static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
-{
- smp_rmb();
- return (u32)atomic_read(&sp->min_val);
-}
-
/* Return number of sync point supported. */
int host1x_syncpt_nb_pts(struct host1x *host);
@@ -112,9 +100,6 @@ static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
return (min == max);
}
-/* Return pointer to struct denoting sync point id. */
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
-
/* Load current value from hardware to the shadow register. */
u32 host1x_syncpt_load(struct host1x_syncpt *sp);
@@ -130,16 +115,9 @@ void host1x_syncpt_restore(struct host1x *host);
/* Read current wait base value into shadow register and return it. */
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
-/* Request incrementing a sync point. */
-int host1x_syncpt_incr(struct host1x_syncpt *sp);
-
/* Indicate future operations by incrementing the sync point max. */
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
-/* Wait until sync point reaches a threshold value, or a timeout. */
-int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
- long timeout, u32 *value);
-
/* Check if sync point id is valid. */
static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
{
@@ -149,14 +127,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
-/* Return id of the sync point */
-u32 host1x_syncpt_id(struct host1x_syncpt *sp);
-
-/* Allocate a sync point for a device. */
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
- bool client_managed);
-
-/* Free a sync point. */
-void host1x_syncpt_free(struct host1x_syncpt *sp);
-
#endif
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index c91d547191d..329fbb9b597 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -242,6 +242,7 @@ config HID_HOLTEK
- Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
Zalman ZM-GM1
- SHARKOON DarkGlider Gaming mouse
+ - LEETGION Hellion Gaming Mouse
config HOLTEK_FF
bool "Holtek On Line Grip force feedback support"
@@ -323,7 +324,7 @@ config HID_LCPOWER
config HID_LENOVO_TPKBD
tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
- depends on USB_HID
+ depends on HID
select NEW_LEDS
select LEDS_CLASS
---help---
@@ -362,19 +363,20 @@ config LOGITECH_FF
- Logitech WingMan Force 3D
- Logitech Formula Force EX
- Logitech WingMan Formula Force GP
- - Logitech MOMO Force wheel
and if you want to enable force feedback for them.
Note: if you say N here, this device will still be supported, but without
force feedback.
config LOGIRUMBLEPAD2_FF
- bool "Logitech RumblePad/Rumblepad 2 force feedback support"
+ bool "Logitech force feedback support (variant 2)"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
help
- Say Y here if you want to enable force feedback support for Logitech
- RumblePad and Rumblepad 2 devices.
+ Say Y here if you want to enable force feedback support for:
+ - Logitech RumblePad
+ - Logitech Rumblepad 2
+ - Logitech Formula Vibration Feedback Wheel
config LOGIG940_FF
bool "Logitech Flight System G940 force feedback support"
@@ -437,6 +439,7 @@ config HID_MULTITOUCH
- Chunghwa panels
- CVTouch panels
- Cypress TrueTouch panels
+ - Elan Microelectronics touch panels
- Elo TouchSystems IntelliTouch Plus panels
- GeneralTouch 'Sensing Win7-TwoFinger' panels
- GoodTouch panels
@@ -453,6 +456,7 @@ config HID_MULTITOUCH
- Pixcir dual touch panels
- Quanta panels
- eGalax dual-touch panels, including the Joojoo and Wetab tablets
+ - SiS multitouch panels
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
@@ -614,6 +618,14 @@ config HID_SONY
* Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
* Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
+config SONY_FF
+ bool "Sony PS2/3 accessories force feedback support"
+ depends on HID_SONY
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have a Sony PS2/3 accessory and want to enable force
+ feedback support for it.
+
config HID_SPEEDLINK
tristate "Speedlink VAD Cezanne mouse support"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a959f4aecaf..30e44318f87 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -95,7 +95,7 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \
- hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-savu.o
+ hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 881cf7b4f9a..497558127bb 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -46,6 +46,12 @@ module_param(iso_layout, uint, 0644);
MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
"(0 = disabled, [1] = enabled)");
+static unsigned int swap_opt_cmd;
+module_param(swap_opt_cmd, uint, 0644);
+MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. "
+ "(For people who want to keep Windows PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -150,6 +156,14 @@ static const struct apple_key_translation apple_iso_keyboard[] = {
{ }
};
+static const struct apple_key_translation swapped_option_cmd_keys[] = {
+ { KEY_LEFTALT, KEY_LEFTMETA },
+ { KEY_LEFTMETA, KEY_LEFTALT },
+ { KEY_RIGHTALT, KEY_RIGHTMETA },
+ { KEY_RIGHTMETA,KEY_RIGHTALT },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -242,6 +256,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (swap_opt_cmd) {
+ trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 64ab94a55aa..a594e478a1e 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -95,7 +95,7 @@ static int axff_init(struct hid_device *hid)
}
}
- if (field_count < 4) {
+ if (field_count < 4 && hid->product != 0xf705) {
hid_err(hid, "not enough fields in the report: %d\n",
field_count);
return -ENODEV;
@@ -180,6 +180,7 @@ static void ax_remove(struct hid_device *hdev)
static const struct hid_device_id ax_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705), },
{ }
};
MODULE_DEVICE_TABLE(hid, ax_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e80da62363b..8c10f274223 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1418,10 +1418,8 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
- if (ret < 0) {
- ret = ret < 0 ? ret : 0;
+ if (ret < 0)
goto unlock;
- }
}
ret = hid_report_raw_event(hid, type, data, size, interrupt);
@@ -1605,6 +1603,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
@@ -1716,6 +1715,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
@@ -1754,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
@@ -1801,21 +1802,28 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
#if IS_ENABLED(CONFIG_HID_ROCCAT)
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2376,15 +2384,6 @@ bool hid_ignore(struct hid_device *hdev)
hdev->type == HID_TYPE_USBNONE)
return true;
break;
- case USB_VENDOR_ID_DWAV:
- /* These are handled by usbtouchscreen. hdev->type is probably
- * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match
- * usbtouchscreen. */
- if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER ||
- hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) &&
- hdev->type != HID_TYPE_USBMOUSE)
- return true;
- break;
case USB_VENDOR_ID_VELLEMAN:
/* These are not HID devices. They are handled by comedi. */
if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index f042a6cf8b1..4e49462870a 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -181,7 +181,40 @@ fail:
*/
static bool elo_broken_firmware(struct usb_device *dev)
{
- return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d;
+ struct usb_device *hub = dev->parent;
+ struct usb_device *child = NULL;
+ u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice);
+ u16 child_vid, child_pid;
+ int i;
+
+ if (!use_fw_quirk)
+ return false;
+ if (fw_lvl != 0x10d)
+ return false;
+
+ /* iterate sibling devices of the touch controller */
+ usb_hub_for_each_child(hub, i, child) {
+ child_vid = le16_to_cpu(child->descriptor.idVendor);
+ child_pid = le16_to_cpu(child->descriptor.idProduct);
+
+ /*
+ * If one of the devices below is present attached as a sibling of
+ * the touch controller then this is a newer IBM 4820 monitor that
+ * does not need the IBM-requested workaround if fw level is
+ * 0x010d - aka 'M'.
+ * No other HW can have this combination.
+ */
+ if (child_vid==0x04b3) {
+ switch (child_pid) {
+ case 0x4676: /* 4820 21x Video */
+ case 0x4677: /* 4820 51x Video */
+ case 0x4678: /* 4820 2Lx Video */
+ case 0x4679: /* 4820 5Lx Video */
+ return false;
+ }
+ }
+ }
+ return true;
}
static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index e696566cde4..0caa676de62 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -28,6 +28,7 @@
* - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
* and Zalman ZM-GM1
* - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
+ * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse
*/
static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -40,6 +41,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* 0x2fff, so they don't exceed HID_MAX_USAGES */
switch (hdev->product) {
case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067:
+ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072:
if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f
&& rdesc[120] == 0xff && rdesc[121] == 0x7f) {
hid_info(hdev, "Fixing up report descriptor\n");
@@ -66,6 +68,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ }
};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f0296a50be5..76559629568 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -332,6 +332,11 @@
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
@@ -448,8 +453,9 @@
#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
-#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
#define USB_VENDOR_ID_IMATION 0x0718
@@ -571,6 +577,7 @@
#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03
+#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
#define USB_VENDOR_ID_LUMIO 0x202e
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
@@ -726,6 +733,9 @@
#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK 0x3138
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW 0x31ce
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232
#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
#define USB_VENDOR_ID_SAITEK 0x06a3
@@ -745,6 +755,10 @@
#define USB_VENDOR_ID_SIGMATEL 0x066F
#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+#define USB_VENDOR_ID_SIS2_TOUCH 0x0457
+#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
+#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
+
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 31cf29a6ba1..2d25b6cbbc0 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -14,11 +14,9 @@
#include <linux/module.h>
#include <linux/sysfs.h>
#include <linux/device.h>
-#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
-#include "usbhid/usbhid.h"
#include "hid-ids.h"
@@ -41,10 +39,9 @@ static int tpkbd_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
{
- struct usbhid_device *uhdev;
-
- uhdev = (struct usbhid_device *) hdev->driver_data;
- if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ /* mark the device as pointer */
+ hid_set_drvdata(hdev, (void *)1);
map_key_clear(KEY_MICMUTE);
return 1;
}
@@ -339,7 +336,7 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
struct tpkbd_data_pointer *data_pointer;
size_t name_sz = strlen(dev_name(dev)) + 16;
char *name_mute, *name_micmute;
- int i, ret;
+ int i;
/* Validate required reports. */
for (i = 0; i < 4; i++) {
@@ -354,7 +351,9 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
hid_warn(hdev, "Could not create sysfs group\n");
}
- data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+ data_pointer = devm_kzalloc(&hdev->dev,
+ sizeof(struct tpkbd_data_pointer),
+ GFP_KERNEL);
if (data_pointer == NULL) {
hid_err(hdev, "Could not allocate memory for driver data\n");
return -ENOMEM;
@@ -364,20 +363,13 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
data_pointer->sensitivity = 0xa0;
data_pointer->press_speed = 0x38;
- name_mute = kzalloc(name_sz, GFP_KERNEL);
- if (name_mute == NULL) {
+ name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ if (name_mute == NULL || name_micmute == NULL) {
hid_err(hdev, "Could not allocate memory for led data\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
-
- name_micmute = kzalloc(name_sz, GFP_KERNEL);
- if (name_micmute == NULL) {
- hid_err(hdev, "Could not allocate memory for led data\n");
- ret = -ENOMEM;
- goto err2;
- }
snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
hid_set_drvdata(hdev, data_pointer);
@@ -397,19 +389,12 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
tpkbd_features_set(hdev);
return 0;
-
-err2:
- kfree(name_mute);
-err:
- kfree(data_pointer);
- return ret;
}
static int tpkbd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int ret;
- struct usbhid_device *uhdev;
ret = hid_parse(hdev);
if (ret) {
@@ -423,9 +408,8 @@ static int tpkbd_probe(struct hid_device *hdev,
goto err;
}
- uhdev = (struct usbhid_device *) hdev->driver_data;
-
- if (uhdev->ifnum == 1) {
+ if (hid_get_drvdata(hdev)) {
+ hid_set_drvdata(hdev, NULL);
ret = tpkbd_probe_tp(hdev);
if (ret)
goto err_hid;
@@ -449,17 +433,11 @@ static void tpkbd_remove_tp(struct hid_device *hdev)
led_classdev_unregister(&data_pointer->led_mute);
hid_set_drvdata(hdev, NULL);
- kfree(data_pointer->led_micmute.name);
- kfree(data_pointer->led_mute.name);
- kfree(data_pointer);
}
static void tpkbd_remove(struct hid_device *hdev)
{
- struct usbhid_device *uhdev;
-
- uhdev = (struct usbhid_device *) hdev->driver_data;
- if (uhdev->ifnum == 1)
+ if (hid_get_drvdata(hdev))
tpkbd_remove_tp(hdev);
hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 6f12ecd36c8..06eb45fa633 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -45,7 +45,9 @@
/* Size of the original descriptors of the Driving Force (and Pro) wheels */
#define DF_RDESC_ORIG_SIZE 130
#define DFP_RDESC_ORIG_SIZE 97
+#define FV_RDESC_ORIG_SIZE 130
#define MOMO_RDESC_ORIG_SIZE 87
+#define MOMO2_RDESC_ORIG_SIZE 87
/* Fixed report descriptors for Logitech Driving Force (and Pro)
* wheel controllers
@@ -170,6 +172,73 @@ static __u8 dfp_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 fv_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0C, /* Report Count (12), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x0C, /* Usage Maximum (0Ch), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x02, /* Report Count (2), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x02, /* Usage (02h), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x75, 0x04, /* Report Size (4), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Null State), */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x04, /* Report Count (4), */
+0x65, 0x00, /* Unit, */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x07, /* Report Count (7), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x03, /* Usage (03h), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
static __u8 momo_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
@@ -216,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 momo2_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0A, /* Report Count (10), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x0A, /* Usage Maximum (0Ah), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x00, /* Usage (00h), */
+0x95, 0x04, /* Report Count (4), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x00, /* Usage (00h), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
/*
* Certain Logitech keyboards send in report #3 keys which are far
* above the logical maximum described in descriptor. This extends
@@ -275,6 +392,24 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Momo Racing Force (Black) report descriptor\n");
+ rdesc = momo2_rdesc_fixed;
+ *rsize = sizeof(momo2_rdesc_fixed);
+ }
+ break;
+
+ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+ if (*rsize == FV_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Formula Vibration report descriptor\n");
+ rdesc = fv_rdesc_fixed;
+ *rsize = sizeof(fv_rdesc_fixed);
+ }
+ break;
+
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
@@ -492,6 +627,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
field->application = HID_GD_MULTIAXIS;
break;
default:
@@ -639,6 +775,8 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
.driver_data = LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL),
+ .driver_data = LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 1a42eaa6ca0..0e3fb1a7e42 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -95,7 +95,7 @@ int lg2ff_init(struct hid_device *hid)
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
- hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
return 0;
}
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 8782fe1aaa0..befe0e33647 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -196,6 +196,21 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e
case FF_CONSTANT:
x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
CLAMP(x);
+
+ if (x == 0x80) {
+ /* De-activate force in slot-1*/
+ value[0] = 0x13;
+ value[1] = 0x00;
+ value[2] = 0x00;
+ value[3] = 0x00;
+ value[4] = 0x00;
+ value[5] = 0x00;
+ value[6] = 0x00;
+
+ hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ return 0;
+ }
+
value[0] = 0x11; /* Slot 1 */
value[1] = 0x08;
value[2] = x;
@@ -218,12 +233,70 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
+ __u32 expand_a, expand_b;
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return;
+ }
+
+ /* De-activate Auto-Center */
+ if (magnitude == 0) {
+ value[0] = 0xf5;
+ value[1] = 0x00;
+ value[2] = 0x00;
+ value[3] = 0x00;
+ value[4] = 0x00;
+ value[5] = 0x00;
+ value[6] = 0x00;
+
+ hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ return;
+ }
+
+ if (magnitude <= 0xaaaa) {
+ expand_a = 0x0c * magnitude;
+ expand_b = 0x80 * magnitude;
+ } else {
+ expand_a = (0x0c * 0xaaaa) + 0x06 * (magnitude - 0xaaaa);
+ expand_b = (0x80 * 0xaaaa) + 0xff * (magnitude - 0xaaaa);
+ }
+
+ /* Adjust for non-MOMO wheels */
+ switch (entry->product_id) {
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ break;
+ default:
+ expand_a = expand_a >> 1;
+ break;
+ }
value[0] = 0xfe;
value[1] = 0x0d;
- value[2] = magnitude >> 13;
- value[3] = magnitude >> 13;
- value[4] = magnitude >> 8;
+ value[2] = expand_a / 0xaaaa;
+ value[3] = expand_a / 0xaaaa;
+ value[4] = expand_b / 0xaaaa;
+ value[5] = 0x00;
+ value[6] = 0x00;
+
+ hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+
+ /* Activate Auto-Center */
+ value[0] = 0x14;
+ value[1] = 0x00;
+ value[2] = 0x00;
+ value[3] = 0x00;
+ value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
@@ -540,17 +613,6 @@ int lg4ff_init(struct hid_device *hid)
if (error)
return error;
- /* Check if autocentering is available and
- * set the centering force to zero by default */
- if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
- if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
- else
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
-
- dev->ff->set_autocenter(dev, 0);
- }
-
/* Get private driver data */
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
@@ -571,6 +633,17 @@ int lg4ff_init(struct hid_device *hid)
entry->max_range = lg4ff_devices[i].max_range;
entry->set_range = lg4ff_devices[i].set_range;
+ /* Check if autocentering is available and
+ * set the centering force to zero by default */
+ if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+ if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+ else
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+ dev->ff->set_autocenter(dev, 0);
+ }
+
/* Create sysfs interface */
error = device_create_file(&hid->dev, &dev_attr_range);
if (error)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 2e5302462ef..a7947d8251a 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -542,9 +542,9 @@ static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
return 0;
}
-static void rdcat(char **rdesc, unsigned int *rsize, const char *data, unsigned int size)
+static void rdcat(char *rdesc, unsigned int *rsize, const char *data, unsigned int size)
{
- memcpy(*rdesc + *rsize, data, size);
+ memcpy(rdesc + *rsize, data, size);
*rsize += size;
}
@@ -567,31 +567,31 @@ static int logi_dj_ll_parse(struct hid_device *hid)
if (djdev->reports_supported & STD_KEYBOARD) {
dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
+ rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
}
if (djdev->reports_supported & STD_MOUSE) {
dbg_hid("%s: sending a mouse descriptor, reports_supported: "
"%x\n", __func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
+ rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
}
if (djdev->reports_supported & MULTIMEDIA) {
dbg_hid("%s: sending a multimedia report descriptor: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
+ rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
}
if (djdev->reports_supported & POWER_KEYS) {
dbg_hid("%s: sending a power keys report descriptor: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
+ rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
}
if (djdev->reports_supported & MEDIA_CENTER) {
dbg_hid("%s: sending a media center report descriptor: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
+ rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
}
if (djdev->reports_supported & KBD_LEDS) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5e5fe1b8eeb..a2cedb8ae1c 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -250,12 +250,12 @@ static struct mt_class mt_classes[] = {
{ .name = MT_CLS_GENERALTOUCH_TWOFINGERS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
MT_QUIRK_VALID_IS_INRANGE |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+ MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2
},
{ .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER
+ MT_QUIRK_SLOT_IS_CONTACTID
},
{ .name = MT_CLS_FLATFROG,
@@ -1173,6 +1173,21 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
+ { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
/* Gametel game controller */
{ .driver_data = MT_CLS_NSMU,
@@ -1284,6 +1299,14 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+ /* SiS panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+ USB_DEVICE_ID_SIS9200_TOUCH) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+ USB_DEVICE_ID_SIS817_TOUCH) },
+
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM,
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index 74f70403262..02e28e9f4ea 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -65,10 +65,11 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
EXPORT_SYMBOL_GPL(roccat_common2_send);
enum roccat_common2_control_states {
- ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
+ ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0,
ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
- ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
+ ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3,
+ ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4,
};
static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
@@ -88,13 +89,12 @@ static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
switch (control.value) {
case ROCCAT_COMMON_CONTROL_STATUS_OK:
return 0;
- case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
+ case ROCCAT_COMMON_CONTROL_STATUS_BUSY:
msleep(500);
continue;
case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
-
- case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
- /* seems to be critical - replug necessary */
+ case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL:
+ case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW:
return -EINVAL;
default:
dev_err(&usb_dev->dev,
@@ -122,6 +122,59 @@ int roccat_common2_send_with_status(struct usb_device *usb_dev,
}
EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
+int roccat_common2_device_init_struct(struct usb_device *usb_dev,
+ struct roccat_common2_device *dev)
+{
+ mutex_init(&dev->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_device_init_struct);
+
+ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&roccat_dev->lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&roccat_dev->lock);
+
+ return retval ? retval : real_size;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_sysfs_read);
+
+ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&roccat_dev->lock);
+ retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size);
+ mutex_unlock(&roccat_dev->lock);
+
+ return retval ? retval : real_size;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_sysfs_write);
+
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index a97746a63b7..eaa56eb7d5d 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -32,4 +32,66 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
int roccat_common2_send_with_status(struct usb_device *usb_dev,
uint command, void const *buf, uint size);
+struct roccat_common2_device {
+ int roccat_claimed;
+ int chrdev_minor;
+ struct mutex lock;
+};
+
+int roccat_common2_device_init_struct(struct usb_device *usb_dev,
+ struct roccat_common2_device *dev);
+ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command);
+ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command);
+
+#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
+static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \
+ SIZE, COMMAND); \
+}
+
+#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \
+static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \
+ SIZE, COMMAND); \
+}
+
+#define ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE)
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = SIZE, \
+ .read = roccat_common2_sysfs_read_ ## thingy, \
+ .write = roccat_common2_sysfs_write_ ## thingy \
+}
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = SIZE, \
+ .read = roccat_common2_sysfs_read_ ## thingy, \
+}
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = SIZE, \
+ .write = roccat_common2_sysfs_write_ ## thingy \
+}
+
#endif
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index 99a605ebb66..07de2f9014c 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -15,6 +15,7 @@
* Roccat KonePure is a smaller version of KoneXTD with less buttons and lights.
*/
+#include <linux/types.h>
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
@@ -23,128 +24,50 @@
#include <linux/hid-roccat.h>
#include "hid-ids.h"
#include "hid-roccat-common.h"
-#include "hid-roccat-konepure.h"
-static struct class *konepure_class;
-
-static ssize_t konepure_sysfs_read(struct file *fp, struct kobject *kobj,
- char *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off >= real_size)
- return 0;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&konepure->konepure_lock);
- retval = roccat_common2_receive(usb_dev, command, buf, real_size);
- mutex_unlock(&konepure->konepure_lock);
-
- return retval ? retval : real_size;
-}
-
-static ssize_t konepure_sysfs_write(struct file *fp, struct kobject *kobj,
- void const *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&konepure->konepure_lock);
- retval = roccat_common2_send_with_status(usb_dev, command,
- (void *)buf, real_size);
- mutex_unlock(&konepure->konepure_lock);
-
- return retval ? retval : real_size;
-}
-
-#define KONEPURE_SYSFS_W(thingy, THINGY) \
-static ssize_t konepure_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return konepure_sysfs_write(fp, kobj, buf, off, count, \
- KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \
-}
-
-#define KONEPURE_SYSFS_R(thingy, THINGY) \
-static ssize_t konepure_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return konepure_sysfs_read(fp, kobj, buf, off, count, \
- KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \
-}
+enum {
+ KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3,
+};
-#define KONEPURE_SYSFS_RW(thingy, THINGY) \
-KONEPURE_SYSFS_W(thingy, THINGY) \
-KONEPURE_SYSFS_R(thingy, THINGY)
-
-#define KONEPURE_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-KONEPURE_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0660 }, \
- .size = KONEPURE_SIZE_ ## THINGY, \
- .read = konepure_sysfs_read_ ## thingy, \
- .write = konepure_sysfs_write_ ## thingy \
-}
+struct konepure_mouse_report_button {
+ uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */
+ uint8_t zero;
+ uint8_t type;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t zero2;
+ uint8_t unknown[2];
+} __packed;
-#define KONEPURE_BIN_ATTRIBUTE_R(thingy, THINGY) \
-KONEPURE_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0440 }, \
- .size = KONEPURE_SIZE_ ## THINGY, \
- .read = konepure_sysfs_read_ ## thingy, \
-}
-
-#define KONEPURE_BIN_ATTRIBUTE_W(thingy, THINGY) \
-KONEPURE_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0220 }, \
- .size = KONEPURE_SIZE_ ## THINGY, \
- .write = konepure_sysfs_write_ ## thingy \
-}
+static struct class *konepure_class;
-KONEPURE_BIN_ATTRIBUTE_RW(actual_profile, ACTUAL_PROFILE);
-KONEPURE_BIN_ATTRIBUTE_RW(info, INFO);
-KONEPURE_BIN_ATTRIBUTE_RW(sensor, SENSOR);
-KONEPURE_BIN_ATTRIBUTE_RW(tcu, TCU);
-KONEPURE_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
-KONEPURE_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
-KONEPURE_BIN_ATTRIBUTE_W(control, CONTROL);
-KONEPURE_BIN_ATTRIBUTE_W(talk, TALK);
-KONEPURE_BIN_ATTRIBUTE_W(macro, MACRO);
-KONEPURE_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE);
-
-static struct bin_attribute *konepure_bin_attributes[] = {
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(actual_profile, 0x05, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_settings, 0x06, 0x1f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_buttons, 0x07, 0x3b);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(macro, 0x08, 0x0822);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x09, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(tcu, 0x0c, 0x04);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10);
+
+static struct bin_attribute *konepure_bin_attrs[] = {
&bin_attr_actual_profile,
+ &bin_attr_control,
&bin_attr_info,
+ &bin_attr_talk,
+ &bin_attr_macro,
&bin_attr_sensor,
&bin_attr_tcu,
+ &bin_attr_tcu_image,
&bin_attr_profile_settings,
&bin_attr_profile_buttons,
- &bin_attr_control,
- &bin_attr_talk,
- &bin_attr_macro,
- &bin_attr_tcu_image,
NULL,
};
static const struct attribute_group konepure_group = {
- .bin_attrs = konepure_bin_attributes,
+ .bin_attrs = konepure_bin_attrs,
};
static const struct attribute_group *konepure_groups[] = {
@@ -152,20 +75,11 @@ static const struct attribute_group *konepure_groups[] = {
NULL,
};
-
-static int konepure_init_konepure_device_struct(struct usb_device *usb_dev,
- struct konepure_device *konepure)
-{
- mutex_init(&konepure->konepure_lock);
-
- return 0;
-}
-
static int konepure_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- struct konepure_device *konepure;
+ struct roccat_common2_device *konepure;
int retval;
if (intf->cur_altsetting->desc.bInterfaceProtocol
@@ -181,9 +95,9 @@ static int konepure_init_specials(struct hid_device *hdev)
}
hid_set_drvdata(hdev, konepure);
- retval = konepure_init_konepure_device_struct(usb_dev, konepure);
+ retval = roccat_common2_device_init_struct(usb_dev, konepure);
if (retval) {
- hid_err(hdev, "couldn't init struct konepure_device\n");
+ hid_err(hdev, "couldn't init KonePure device\n");
goto exit_free;
}
@@ -205,7 +119,7 @@ exit_free:
static void konepure_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct konepure_device *konepure;
+ struct roccat_common2_device *konepure;
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
@@ -258,7 +172,7 @@ static int konepure_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct konepure_device *konepure = hid_get_drvdata(hdev);
+ struct roccat_common2_device *konepure = hid_get_drvdata(hdev);
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
diff --git a/drivers/hid/hid-roccat-konepure.h b/drivers/hid/hid-roccat-konepure.h
deleted file mode 100644
index 2cd24e93dfd..00000000000
--- a/drivers/hid/hid-roccat-konepure.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef __HID_ROCCAT_KONEPURE_H
-#define __HID_ROCCAT_KONEPURE_H
-
-/*
- * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/types.h>
-
-enum {
- KONEPURE_SIZE_ACTUAL_PROFILE = 0x03,
- KONEPURE_SIZE_CONTROL = 0x03,
- KONEPURE_SIZE_FIRMWARE_WRITE = 0x0402,
- KONEPURE_SIZE_INFO = 0x06,
- KONEPURE_SIZE_MACRO = 0x0822,
- KONEPURE_SIZE_PROFILE_SETTINGS = 0x1f,
- KONEPURE_SIZE_PROFILE_BUTTONS = 0x3b,
- KONEPURE_SIZE_SENSOR = 0x06,
- KONEPURE_SIZE_TALK = 0x10,
- KONEPURE_SIZE_TCU = 0x04,
- KONEPURE_SIZE_TCU_IMAGE = 0x0404,
-};
-
-enum konepure_control_requests {
- KONEPURE_CONTROL_REQUEST_GENERAL = 0x80,
- KONEPURE_CONTROL_REQUEST_BUTTONS = 0x90,
-};
-
-enum konepure_commands {
- KONEPURE_COMMAND_CONTROL = 0x04,
- KONEPURE_COMMAND_ACTUAL_PROFILE = 0x05,
- KONEPURE_COMMAND_PROFILE_SETTINGS = 0x06,
- KONEPURE_COMMAND_PROFILE_BUTTONS = 0x07,
- KONEPURE_COMMAND_MACRO = 0x08,
- KONEPURE_COMMAND_INFO = 0x09,
- KONEPURE_COMMAND_TCU = 0x0c,
- KONEPURE_COMMAND_TCU_IMAGE = 0x0c,
- KONEPURE_COMMAND_E = 0x0e,
- KONEPURE_COMMAND_SENSOR = 0x0f,
- KONEPURE_COMMAND_TALK = 0x10,
- KONEPURE_COMMAND_FIRMWARE_WRITE = 0x1b,
- KONEPURE_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
-};
-
-enum {
- KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3,
-};
-
-struct konepure_mouse_report_button {
- uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */
- uint8_t zero;
- uint8_t type;
- uint8_t data1;
- uint8_t data2;
- uint8_t zero2;
- uint8_t unknown[2];
-} __packed;
-
-struct konepure_device {
- int roccat_claimed;
- int chrdev_minor;
- struct mutex konepure_lock;
-};
-
-#endif
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 0c8e1ef0b67..966047711fb 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -554,9 +554,13 @@ static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus,
break;
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
+ break;
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
kovaplus->actual_x_sensitivity = button_report->data1;
kovaplus->actual_y_sensitivity = button_report->data2;
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
new file mode 100644
index 00000000000..47cc8f30ff6
--- /dev/null
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -0,0 +1,241 @@
+/*
+ * Roccat Ryos driver for Linux
+ *
+ * Copyright (c) 2013 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+
+enum {
+ RYOS_REPORT_NUMBER_SPECIAL = 3,
+ RYOS_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct ryos_report_special {
+ uint8_t number; /* RYOS_REPORT_NUMBER_SPECIAL */
+ uint8_t data[4];
+} __packed;
+
+static struct class *ryos_class;
+
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x05, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_primary, 0x06, 0x7d);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_function, 0x07, 0x5f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_macro, 0x08, 0x23);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_thumbster, 0x09, 0x17);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_extra, 0x0a, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_easyzone, 0x0b, 0x126);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(key_mask, 0x0c, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light, 0x0d, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x0e, 0x7d2);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_R(info, 0x0f, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(reset, 0x11, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(light_control, 0x13, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x16, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2);
+
+static struct bin_attribute *ryos_bin_attrs[] = {
+ &bin_attr_control,
+ &bin_attr_profile,
+ &bin_attr_keys_primary,
+ &bin_attr_keys_function,
+ &bin_attr_keys_macro,
+ &bin_attr_keys_thumbster,
+ &bin_attr_keys_extra,
+ &bin_attr_keys_easyzone,
+ &bin_attr_key_mask,
+ &bin_attr_light,
+ &bin_attr_macro,
+ &bin_attr_info,
+ &bin_attr_reset,
+ &bin_attr_light_control,
+ &bin_attr_talk,
+ &bin_attr_stored_lights,
+ &bin_attr_custom_lights,
+ &bin_attr_light_macro,
+ NULL,
+};
+
+static const struct attribute_group ryos_group = {
+ .bin_attrs = ryos_bin_attrs,
+};
+
+static const struct attribute_group *ryos_groups[] = {
+ &ryos_group,
+ NULL,
+};
+
+static int ryos_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct roccat_common2_device *ryos;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != RYOS_USB_INTERFACE_PROTOCOL) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ ryos = kzalloc(sizeof(*ryos), GFP_KERNEL);
+ if (!ryos) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, ryos);
+
+ retval = roccat_common2_device_init_struct(usb_dev, ryos);
+ if (retval) {
+ hid_err(hdev, "couldn't init Ryos device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(ryos_class, hdev,
+ sizeof(struct ryos_report_special));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ ryos->chrdev_minor = retval;
+ ryos->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(ryos);
+ return retval;
+}
+
+static void ryos_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct roccat_common2_device *ryos;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != RYOS_USB_INTERFACE_PROTOCOL)
+ return;
+
+ ryos = hid_get_drvdata(hdev);
+ if (ryos->roccat_claimed)
+ roccat_disconnect(ryos->chrdev_minor);
+ kfree(ryos);
+}
+
+static int ryos_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = ryos_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void ryos_remove(struct hid_device *hdev)
+{
+ ryos_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static int ryos_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct roccat_common2_device *ryos = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != RYOS_USB_INTERFACE_PROTOCOL)
+ return 0;
+
+ if (data[0] != RYOS_REPORT_NUMBER_SPECIAL)
+ return 0;
+
+ if (ryos != NULL && ryos->roccat_claimed)
+ roccat_report_event(ryos->chrdev_minor, data);
+
+ return 0;
+}
+
+static const struct hid_device_id ryos_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, ryos_devices);
+
+static struct hid_driver ryos_driver = {
+ .name = "ryos",
+ .id_table = ryos_devices,
+ .probe = ryos_probe,
+ .remove = ryos_remove,
+ .raw_event = ryos_raw_event
+};
+
+static int __init ryos_init(void)
+{
+ int retval;
+
+ ryos_class = class_create(THIS_MODULE, "ryos");
+ if (IS_ERR(ryos_class))
+ return PTR_ERR(ryos_class);
+ ryos_class->dev_groups = ryos_groups;
+
+ retval = hid_register_driver(&ryos_driver);
+ if (retval)
+ class_destroy(ryos_class);
+ return retval;
+}
+
+static void __exit ryos_exit(void)
+{
+ hid_unregister_driver(&ryos_driver);
+ class_destroy(ryos_class);
+}
+
+module_init(ryos_init);
+module_exit(ryos_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Ryos MK/Glow/Pro driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 0332267199d..6dbf6e04dce 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -27,98 +27,15 @@
static struct class *savu_class;
-static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj,
- char *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off >= real_size)
- return 0;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&savu->savu_lock);
- retval = roccat_common2_receive(usb_dev, command, buf, real_size);
- mutex_unlock(&savu->savu_lock);
-
- return retval ? retval : real_size;
-}
-
-static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj,
- void const *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&savu->savu_lock);
- retval = roccat_common2_send_with_status(usb_dev, command,
- (void *)buf, real_size);
- mutex_unlock(&savu->savu_lock);
-
- return retval ? retval : real_size;
-}
-
-#define SAVU_SYSFS_W(thingy, THINGY) \
-static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return savu_sysfs_write(fp, kobj, buf, off, count, \
- SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
-}
-
-#define SAVU_SYSFS_R(thingy, THINGY) \
-static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return savu_sysfs_read(fp, kobj, buf, off, count, \
- SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
-}
-
-#define SAVU_SYSFS_RW(thingy, THINGY) \
-SAVU_SYSFS_W(thingy, THINGY) \
-SAVU_SYSFS_R(thingy, THINGY)
-
-#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-SAVU_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0660 }, \
- .size = SAVU_SIZE_ ## THINGY, \
- .read = savu_sysfs_read_ ## thingy, \
- .write = savu_sysfs_write_ ## thingy \
-}
-
-#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
-SAVU_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0220 }, \
- .size = SAVU_SIZE_ ## THINGY, \
- .write = savu_sysfs_write_ ## thingy \
-}
-
-SAVU_BIN_ATTRIBUTE_W(control, CONTROL);
-SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE);
-SAVU_BIN_ATTRIBUTE_RW(general, GENERAL);
-SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS);
-SAVU_BIN_ATTRIBUTE_RW(macro, MACRO);
-SAVU_BIN_ATTRIBUTE_RW(info, INFO);
-SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR);
-
-static struct bin_attribute *savu_bin_attributes[] = {
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x4, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x5, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(general, 0x6, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(buttons, 0x7, 0x2f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04);
+
+static struct bin_attribute *savu_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_general,
@@ -130,7 +47,7 @@ static struct bin_attribute *savu_bin_attributes[] = {
};
static const struct attribute_group savu_group = {
- .bin_attrs = savu_bin_attributes,
+ .bin_attrs = savu_bin_attrs,
};
static const struct attribute_group *savu_groups[] = {
@@ -138,19 +55,11 @@ static const struct attribute_group *savu_groups[] = {
NULL,
};
-static int savu_init_savu_device_struct(struct usb_device *usb_dev,
- struct savu_device *savu)
-{
- mutex_init(&savu->savu_lock);
-
- return 0;
-}
-
static int savu_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- struct savu_device *savu;
+ struct roccat_common2_device *savu;
int retval;
if (intf->cur_altsetting->desc.bInterfaceProtocol
@@ -166,9 +75,9 @@ static int savu_init_specials(struct hid_device *hdev)
}
hid_set_drvdata(hdev, savu);
- retval = savu_init_savu_device_struct(usb_dev, savu);
+ retval = roccat_common2_device_init_struct(usb_dev, savu);
if (retval) {
- hid_err(hdev, "couldn't init struct savu_device\n");
+ hid_err(hdev, "couldn't init Savu device\n");
goto exit_free;
}
@@ -190,7 +99,7 @@ exit_free:
static void savu_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct savu_device *savu;
+ struct roccat_common2_device *savu;
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
@@ -239,7 +148,7 @@ static void savu_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static void savu_report_to_chrdev(struct savu_device const *savu,
+static void savu_report_to_chrdev(struct roccat_common2_device const *savu,
u8 const *data)
{
struct savu_roccat_report roccat_report;
@@ -261,7 +170,7 @@ static int savu_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct savu_device *savu = hid_get_drvdata(hdev);
+ struct roccat_common2_device *savu = hid_get_drvdata(hdev);
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h
index 9120ba72087..d23217bd2b8 100644
--- a/drivers/hid/hid-roccat-savu.h
+++ b/drivers/hid/hid-roccat-savu.h
@@ -14,31 +14,6 @@
#include <linux/types.h>
-enum {
- SAVU_SIZE_CONTROL = 0x03,
- SAVU_SIZE_PROFILE = 0x03,
- SAVU_SIZE_GENERAL = 0x10,
- SAVU_SIZE_BUTTONS = 0x2f,
- SAVU_SIZE_MACRO = 0x0823,
- SAVU_SIZE_INFO = 0x08,
- SAVU_SIZE_SENSOR = 0x04,
-};
-
-enum savu_control_requests {
- SAVU_CONTROL_REQUEST_GENERAL = 0x80,
- SAVU_CONTROL_REQUEST_BUTTONS = 0x90,
-};
-
-enum savu_commands {
- SAVU_COMMAND_CONTROL = 0x4,
- SAVU_COMMAND_PROFILE = 0x5,
- SAVU_COMMAND_GENERAL = 0x6,
- SAVU_COMMAND_BUTTONS = 0x7,
- SAVU_COMMAND_MACRO = 0x8,
- SAVU_COMMAND_INFO = 0x9,
- SAVU_COMMAND_SENSOR = 0xc,
-};
-
struct savu_mouse_report_special {
uint8_t report_number; /* always 3 */
uint8_t zero;
@@ -77,11 +52,4 @@ struct savu_roccat_report {
uint8_t data[2];
} __packed;
-struct savu_device {
- int roccat_claimed;
- int chrdev_minor;
-
- struct mutex savu_lock;
-};
-
#endif
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 10e1581022c..a184e1921c1 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -326,7 +326,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
field->logical == attr_usage_id) {
sensor_hub_fill_attr_info(info, i, report->id,
field->unit, field->unit_exponent,
- field->report_size);
+ field->report_size *
+ field->report_count);
ret = 0;
} else {
for (j = 0; j < field->maxusage; ++j) {
@@ -338,7 +339,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
i, report->id,
field->unit,
field->unit_exponent,
- field->report_size);
+ field->report_size *
+ field->report_count);
ret = 0;
break;
}
@@ -425,9 +427,10 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
hid_dbg(hdev, "%d collection_index:%x hid:%x sz:%x\n",
i, report->field[i]->usage->collection_index,
report->field[i]->usage->hid,
- report->field[i]->report_size/8);
-
- sz = report->field[i]->report_size/8;
+ (report->field[i]->report_size *
+ report->field[i]->report_count)/8);
+ sz = (report->field[i]->report_size *
+ report->field[i]->report_count)/8;
if (pdata->pending.status && pdata->pending.attr_usage_id ==
report->field[i]->usage->hid) {
hid_dbg(hdev, "data was pending ...\n");
@@ -465,6 +468,39 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
return 1;
}
+int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev)
+{
+ int ret = 0;
+ struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
+
+ mutex_lock(&data->mutex);
+ if (!hsdev->ref_cnt) {
+ ret = hid_hw_open(hsdev->hdev);
+ if (ret) {
+ hid_err(hsdev->hdev, "failed to open hid device\n");
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ }
+ hsdev->ref_cnt++;
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sensor_hub_device_open);
+
+void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
+{
+ struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
+
+ mutex_lock(&data->mutex);
+ hsdev->ref_cnt--;
+ if (!hsdev->ref_cnt)
+ hid_hw_close(hsdev->hdev);
+ mutex_unlock(&data->mutex);
+}
+EXPORT_SYMBOL_GPL(sensor_hub_device_close);
+
static int sensor_hub_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -506,12 +542,6 @@ static int sensor_hub_probe(struct hid_device *hdev,
hid_err(hdev, "hw start failed\n");
return ret;
}
- ret = hid_hw_open(hdev);
- if (ret) {
- hid_err(hdev, "failed to open input interrupt pipe\n");
- goto err_stop_hw;
- }
-
INIT_LIST_HEAD(&sd->dyn_callback_list);
sd->hid_sensor_client_cnt = 0;
report_enum = &hdev->report_enum[HID_INPUT_REPORT];
@@ -520,7 +550,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
if (dev_cnt > HID_MAX_PHY_DEVICES) {
hid_err(hdev, "Invalid Physical device count\n");
ret = -EINVAL;
- goto err_close;
+ goto err_stop_hw;
}
sd->hid_sensor_hub_client_devs = kzalloc(dev_cnt *
sizeof(struct mfd_cell),
@@ -528,7 +558,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
if (sd->hid_sensor_hub_client_devs == NULL) {
hid_err(hdev, "Failed to allocate memory for mfd cells\n");
ret = -ENOMEM;
- goto err_close;
+ goto err_stop_hw;
}
list_for_each_entry(report, &report_enum->report_list, list) {
hid_dbg(hdev, "Report id:%x\n", report->id);
@@ -565,8 +595,6 @@ err_free_names:
for (i = 0; i < sd->hid_sensor_client_cnt ; ++i)
kfree(sd->hid_sensor_hub_client_devs[i].name);
kfree(sd->hid_sensor_hub_client_devs);
-err_close:
- hid_hw_close(hdev);
err_stop_hw:
hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b18320db5f7..da551d11376 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -419,21 +419,14 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
*/
static int sixaxis_set_operational_usb(struct hid_device *hdev)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
- __u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
int ret;
char *buf = kmalloc(18, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- HID_REQ_GET_REPORT,
- USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE,
- (3 << 8) | 0xf2, ifnum, buf, 17,
- USB_CTRL_GET_TIMEOUT);
+ ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
+
if (ret < 0)
hid_err(hdev, "can't set operational mode\n");
@@ -621,6 +614,54 @@ static void buzz_remove(struct hid_device *hdev)
drv_data->extra = NULL;
}
+#ifdef CONFIG_SONY_FF
+static int sony_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ unsigned char buf[] = {
+ 0x01,
+ 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03,
+ 0xff, 0x27, 0x10, 0x00, 0x32,
+ 0xff, 0x27, 0x10, 0x00, 0x32,
+ 0xff, 0x27, 0x10, 0x00, 0x32,
+ 0xff, 0x27, 0x10, 0x00, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ __u8 left;
+ __u8 right;
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ left = effect->u.rumble.strong_magnitude / 256;
+ right = effect->u.rumble.weak_magnitude ? 1 : 0;
+
+ buf[3] = right;
+ buf[5] = left;
+
+ return hid->hid_output_raw_report(hid, buf, sizeof(buf),
+ HID_OUTPUT_REPORT);
+}
+
+static int sony_init_ff(struct hid_device *hdev)
+{
+ struct hid_input *hidinput = list_entry(hdev->inputs.next,
+ struct hid_input, list);
+ struct input_dev *input_dev = hidinput->input;
+
+ input_set_capability(input_dev, EV_FF, FF_RUMBLE);
+ return input_ff_create_memless(input_dev, NULL, sony_play_effect);
+}
+
+#else
+static int sony_init_ff(struct hid_device *hdev)
+{
+ return 0;
+}
+#endif
+
static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -670,6 +711,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0)
goto err_stop;
+ ret = sony_init_ff(hdev);
+ if (ret < 0)
+ goto err_stop;
+
return 0;
err_stop:
hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 71adf9e60b1..6b61f01e01e 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -1655,10 +1655,39 @@ static void wiimod_pro_in_ext(struct wiimote_data *wdata, const __u8 *ext)
ly = (ext[4] & 0xff) | ((ext[5] & 0x0f) << 8);
ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8);
- input_report_abs(wdata->extension.input, ABS_X, lx - 0x800);
- input_report_abs(wdata->extension.input, ABS_Y, ly - 0x800);
- input_report_abs(wdata->extension.input, ABS_RX, rx - 0x800);
- input_report_abs(wdata->extension.input, ABS_RY, ry - 0x800);
+ /* zero-point offsets */
+ lx -= 0x800;
+ ly = 0x800 - ly;
+ rx -= 0x800;
+ ry = 0x800 - ry;
+
+ /* Trivial automatic calibration. We don't know any calibration data
+ * in the EEPROM so we must use the first report to calibrate the
+ * null-position of the analog sticks. Users can retrigger calibration
+ * via sysfs, or set it explicitly. If data is off more than abs(500),
+ * we skip calibration as the sticks are likely to be moved already. */
+ if (!(wdata->state.flags & WIIPROTO_FLAG_PRO_CALIB_DONE)) {
+ wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE;
+ if (abs(lx) < 500)
+ wdata->state.calib_pro_sticks[0] = -lx;
+ if (abs(ly) < 500)
+ wdata->state.calib_pro_sticks[1] = -ly;
+ if (abs(rx) < 500)
+ wdata->state.calib_pro_sticks[2] = -rx;
+ if (abs(ry) < 500)
+ wdata->state.calib_pro_sticks[3] = -ry;
+ }
+
+ /* apply calibration data */
+ lx += wdata->state.calib_pro_sticks[0];
+ ly += wdata->state.calib_pro_sticks[1];
+ rx += wdata->state.calib_pro_sticks[2];
+ ry += wdata->state.calib_pro_sticks[3];
+
+ input_report_abs(wdata->extension.input, ABS_X, lx);
+ input_report_abs(wdata->extension.input, ABS_Y, ly);
+ input_report_abs(wdata->extension.input, ABS_RX, rx);
+ input_report_abs(wdata->extension.input, ABS_RY, ry);
input_report_key(wdata->extension.input,
wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT],
@@ -1766,12 +1795,70 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
return 0;
}
+static ssize_t wiimod_pro_calib_show(struct device *dev,
+ struct device_attribute *attr,
+ char *out)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ int r;
+
+ r = 0;
+ r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[0]);
+ r += sprintf(&out[r], "%+06hd ", wdata->state.calib_pro_sticks[1]);
+ r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[2]);
+ r += sprintf(&out[r], "%+06hd\n", wdata->state.calib_pro_sticks[3]);
+
+ return r;
+}
+
+static ssize_t wiimod_pro_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ int r;
+ s16 x1, y1, x2, y2;
+
+ if (!strncmp(buf, "scan\n", 5)) {
+ spin_lock_irq(&wdata->state.lock);
+ wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE;
+ spin_unlock_irq(&wdata->state.lock);
+ } else {
+ r = sscanf(buf, "%hd:%hd %hd:%hd", &x1, &y1, &x2, &y2);
+ if (r != 4)
+ return -EINVAL;
+
+ spin_lock_irq(&wdata->state.lock);
+ wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE;
+ spin_unlock_irq(&wdata->state.lock);
+
+ wdata->state.calib_pro_sticks[0] = x1;
+ wdata->state.calib_pro_sticks[1] = y1;
+ wdata->state.calib_pro_sticks[2] = x2;
+ wdata->state.calib_pro_sticks[3] = y2;
+ }
+
+ return strnlen(buf, PAGE_SIZE);
+}
+
+static DEVICE_ATTR(pro_calib, S_IRUGO|S_IWUSR|S_IWGRP, wiimod_pro_calib_show,
+ wiimod_pro_calib_store);
+
static int wiimod_pro_probe(const struct wiimod_ops *ops,
struct wiimote_data *wdata)
{
int ret, i;
+ unsigned long flags;
INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+ wdata->state.calib_pro_sticks[0] = 0;
+ wdata->state.calib_pro_sticks[1] = 0;
+ wdata->state.calib_pro_sticks[2] = 0;
+ wdata->state.calib_pro_sticks[3] = 0;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
wdata->extension.input = input_allocate_device();
if (!wdata->extension.input)
@@ -1786,6 +1873,13 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
goto err_free;
}
+ ret = device_create_file(&wdata->hdev->dev,
+ &dev_attr_pro_calib);
+ if (ret) {
+ hid_err(wdata->hdev, "cannot create sysfs attribute\n");
+ goto err_free;
+ }
+
wdata->extension.input->open = wiimod_pro_open;
wdata->extension.input->close = wiimod_pro_close;
wdata->extension.input->dev.parent = &wdata->hdev->dev;
@@ -1806,20 +1900,23 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
set_bit(ABS_RX, wdata->extension.input->absbit);
set_bit(ABS_RY, wdata->extension.input->absbit);
input_set_abs_params(wdata->extension.input,
- ABS_X, -0x800, 0x800, 2, 4);
+ ABS_X, -0x400, 0x400, 4, 100);
input_set_abs_params(wdata->extension.input,
- ABS_Y, -0x800, 0x800, 2, 4);
+ ABS_Y, -0x400, 0x400, 4, 100);
input_set_abs_params(wdata->extension.input,
- ABS_RX, -0x800, 0x800, 2, 4);
+ ABS_RX, -0x400, 0x400, 4, 100);
input_set_abs_params(wdata->extension.input,
- ABS_RY, -0x800, 0x800, 2, 4);
+ ABS_RY, -0x400, 0x400, 4, 100);
ret = input_register_device(wdata->extension.input);
if (ret)
- goto err_free;
+ goto err_file;
return 0;
+err_file:
+ device_remove_file(&wdata->hdev->dev,
+ &dev_attr_pro_calib);
err_free:
input_free_device(wdata->extension.input);
wdata->extension.input = NULL;
@@ -1837,6 +1934,8 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
input_unregister_device(wdata->extension.input);
wdata->extension.input = NULL;
cancel_work_sync(&wdata->rumble_worker);
+ device_remove_file(&wdata->hdev->dev,
+ &dev_attr_pro_calib);
spin_lock_irqsave(&wdata->state.lock, flags);
wiiproto_req_rumble(wdata, 0);
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index 75db0c40003..10934aa129f 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -46,6 +46,7 @@
#define WIIPROTO_FLAG_DRM_LOCKED 0x8000
#define WIIPROTO_FLAG_BUILTIN_MP 0x010000
#define WIIPROTO_FLAG_NO_MP 0x020000
+#define WIIPROTO_FLAG_PRO_CALIB_DONE 0x040000
#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
@@ -135,6 +136,7 @@ struct wiimote_state {
/* calibration/cache data */
__u16 calib_bboard[4][3];
+ __s16 calib_pro_sticks[4];
__u8 cache_rumble;
};
@@ -327,7 +329,7 @@ static inline void wiimote_cmd_acquire_noint(struct wiimote_data *wdata)
static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
__u32 opt)
{
- INIT_COMPLETION(wdata->state.ready);
+ reinit_completion(&wdata->state.ready);
wdata->state.cmd = cmd;
wdata->state.opt = opt;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1336193b04..5f7e55f4b7f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -455,10 +455,6 @@ static void i2c_hid_init_reports(struct hid_device *hid)
}
list_for_each_entry(report,
- &hid->report_enum[HID_INPUT_REPORT].report_list, list)
- i2c_hid_init_report(report, inbuf, ihid->bufsize);
-
- list_for_each_entry(report,
&hid->report_enum[HID_FEATURE_REPORT].report_list, list)
i2c_hid_init_report(report, inbuf, ihid->bufsize);
@@ -854,10 +850,10 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
};
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4], *obj;
+ union acpi_object params[4];
struct acpi_object_list input;
struct acpi_device *adev;
+ unsigned long long value;
acpi_handle handle;
handle = ACPI_HANDLE(&client->dev);
@@ -878,22 +874,14 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
params[3].package.count = 0;
params[3].package.elements = NULL;
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+ if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
+ &value))) {
dev_err(&client->dev, "device _DSM execution failed\n");
return -ENODEV;
}
- obj = (union acpi_object *)buf.pointer;
- if (obj->type != ACPI_TYPE_INTEGER) {
- dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
- obj->type);
- kfree(buf.pointer);
- return -EINVAL;
- }
-
- pdata->hid_descriptor_address = obj->integer.value;
+ pdata->hid_descriptor_address = value;
- kfree(buf.pointer);
return 0;
}
@@ -1020,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->hid_get_raw_report = i2c_hid_get_raw_report;
hid->hid_output_raw_report = i2c_hid_output_raw_report;
hid->dev.parent = &client->dev;
- ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev));
+ ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev));
hid->bus = BUS_I2C;
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 3fca3be0833..0db9a67278b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -84,6 +84,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 66d44581e1b..749f7b5c817 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -33,11 +33,13 @@ static ssize_t modalias_show(struct device *dev,
{
return sprintf(buf, "hsi:%s\n", dev_name(dev));
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute hsi_bus_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *hsi_bus_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(hsi_bus_dev);
static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -53,7 +55,7 @@ static int hsi_bus_match(struct device *dev, struct device_driver *driver)
static struct bus_type hsi_bus_type = {
.name = "hsi",
- .dev_attrs = hsi_bus_dev_attrs,
+ .dev_groups = hsi_bus_dev_groups,
.match = hsi_bus_match,
.uevent = hsi_bus_uevent,
};
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 6de6c98ce6e..cea623c36ae 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -47,8 +47,8 @@ static void vmbus_setevent(struct vmbus_channel *channel)
(unsigned long *) vmbus_connection.send_int_page +
(channel->offermsg.child_relid >> 5));
- monitorpage = vmbus_connection.monitor_pages;
- monitorpage++; /* Get the child to parent monitor page */
+ /* Get the child to parent monitor page */
+ monitorpage = vmbus_connection.monitor_pages[1];
sync_set_bit(channel->monitor_bit,
(unsigned long *)&monitorpage->trigger_group
@@ -60,50 +60,6 @@ static void vmbus_setevent(struct vmbus_channel *channel)
}
/*
- * vmbus_get_debug_info -Retrieve various channel debug info
- */
-void vmbus_get_debug_info(struct vmbus_channel *channel,
- struct vmbus_channel_debug_info *debuginfo)
-{
- struct hv_monitor_page *monitorpage;
- u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
- u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
-
- debuginfo->relid = channel->offermsg.child_relid;
- debuginfo->state = channel->state;
- memcpy(&debuginfo->interfacetype,
- &channel->offermsg.offer.if_type, sizeof(uuid_le));
- memcpy(&debuginfo->interface_instance,
- &channel->offermsg.offer.if_instance,
- sizeof(uuid_le));
-
- monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
-
- debuginfo->monitorid = channel->offermsg.monitorid;
-
- debuginfo->servermonitor_pending =
- monitorpage->trigger_group[monitor_group].pending;
- debuginfo->servermonitor_latency =
- monitorpage->latency[monitor_group][monitor_offset];
- debuginfo->servermonitor_connectionid =
- monitorpage->parameter[monitor_group]
- [monitor_offset].connectionid.u.id;
-
- monitorpage++;
-
- debuginfo->clientmonitor_pending =
- monitorpage->trigger_group[monitor_group].pending;
- debuginfo->clientmonitor_latency =
- monitorpage->latency[monitor_group][monitor_offset];
- debuginfo->clientmonitor_connectionid =
- monitorpage->parameter[monitor_group]
- [monitor_offset].connectionid.u.id;
-
- hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
- hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
-}
-
-/*
* vmbus_open - Open the specified channel.
*/
int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
@@ -855,6 +811,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
if (signal)
vmbus_setevent(channel);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index bbff5f200be..fa920469bf1 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -203,7 +203,8 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
struct vmbus_channel *primary_channel;
struct vmbus_channel_relid_released msg;
- vmbus_device_unregister(channel->device_obj);
+ if (channel->device_obj)
+ vmbus_device_unregister(channel->device_obj);
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = channel->offermsg.child_relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
@@ -216,7 +217,7 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
} else {
primary_channel = channel->primary_channel;
spin_lock_irqsave(&primary_channel->sc_lock, flags);
- list_del(&channel->listentry);
+ list_del(&channel->sc_list);
spin_unlock_irqrestore(&primary_channel->sc_lock, flags);
}
free_channel(channel);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 936093e0271..af6edf9b193 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -76,10 +76,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
msg->vmbus_version_requested = version;
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
- msg->monitor_page2 = virt_to_phys(
- (void *)((unsigned long)vmbus_connection.monitor_pages +
- PAGE_SIZE));
+ msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
+ msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
/*
* Add to list before we send the request since we may
@@ -169,9 +167,10 @@ int vmbus_connect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages =
- (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 1);
- if (vmbus_connection.monitor_pages == NULL) {
+ vmbus_connection.monitor_pages[0] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
+ vmbus_connection.monitor_pages[1] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
+ if ((vmbus_connection.monitor_pages[0] == NULL) ||
+ (vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
goto cleanup;
}
@@ -229,10 +228,10 @@ cleanup:
vmbus_connection.int_page = NULL;
}
- if (vmbus_connection.monitor_pages) {
- free_pages((unsigned long)vmbus_connection.monitor_pages, 1);
- vmbus_connection.monitor_pages = NULL;
- }
+ free_pages((unsigned long)vmbus_connection.monitor_pages[0], 1);
+ free_pages((unsigned long)vmbus_connection.monitor_pages[1], 1);
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages[1] = NULL;
kfree(msginfo);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 88f4096fa07..f0c5e07c25e 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -304,7 +304,7 @@ err:
void hv_synic_free_cpu(int cpu)
{
kfree(hv_context.event_dpc[cpu]);
- if (hv_context.synic_message_page[cpu])
+ if (hv_context.synic_event_page[cpu])
free_page((unsigned long)hv_context.synic_event_page[cpu]);
if (hv_context.synic_message_page[cpu])
free_page((unsigned long)hv_context.synic_message_page[cpu]);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 273e3ddb3a2..62dfd246b94 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -97,7 +97,7 @@ static void shutdown_onchannelcallback(void *context)
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
- u8 execute_shutdown = false;
+ bool execute_shutdown = false;
u8 *shut_txf_buf = util_shutdown.recv_buffer;
struct shutdown_msg_data *shutdown_msg;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d84918fe19a..e05517616a0 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -514,6 +514,13 @@ struct hv_context {
extern struct hv_context hv_context;
+struct hv_ring_buffer_debug_info {
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
/* Hv Interface */
@@ -612,7 +619,7 @@ struct vmbus_connection {
* 2 pages - 1st page for parent->child notification and 2nd
* is child->parent notification
*/
- void *monitor_pages;
+ struct hv_monitor_page *monitor_pages[2];
struct list_head chn_msg_list;
spinlock_t channelmsg_lock;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index f9fe46f52cf..48aad4faea0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -46,24 +46,6 @@ static struct tasklet_struct msg_dpc;
static struct completion probe_event;
static int irq;
-struct hv_device_info {
- u32 chn_id;
- u32 chn_state;
- uuid_le chn_type;
- uuid_le chn_instance;
-
- u32 monitor_id;
- u32 server_monitor_pending;
- u32 server_monitor_latency;
- u32 server_monitor_conn_id;
- u32 client_monitor_pending;
- u32 client_monitor_latency;
- u32 client_monitor_conn_id;
-
- struct hv_dev_port_info inbound;
- struct hv_dev_port_info outbound;
-};
-
static int vmbus_exists(void)
{
if (hv_acpi_dev == NULL)
@@ -72,169 +54,361 @@ static int vmbus_exists(void)
return 0;
}
+#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
+static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
+{
+ int i;
+ for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
+ sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
+}
-static void get_channel_info(struct hv_device *device,
- struct hv_device_info *info)
+static u8 channel_monitor_group(struct vmbus_channel *channel)
{
- struct vmbus_channel_debug_info debug_info;
+ return (u8)channel->offermsg.monitorid / 32;
+}
- if (!device->channel)
- return;
+static u8 channel_monitor_offset(struct vmbus_channel *channel)
+{
+ return (u8)channel->offermsg.monitorid % 32;
+}
- vmbus_get_debug_info(device->channel, &debug_info);
+static u32 channel_pending(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ return monitor_page->trigger_group[monitor_group].pending;
+}
- info->chn_id = debug_info.relid;
- info->chn_state = debug_info.state;
- memcpy(&info->chn_type, &debug_info.interfacetype,
- sizeof(uuid_le));
- memcpy(&info->chn_instance, &debug_info.interface_instance,
- sizeof(uuid_le));
+static u32 channel_latency(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ u8 monitor_offset = channel_monitor_offset(channel);
+ return monitor_page->latency[monitor_group][monitor_offset];
+}
- info->monitor_id = debug_info.monitorid;
+static u32 channel_conn_id(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ u8 monitor_offset = channel_monitor_offset(channel);
+ return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
+}
- info->server_monitor_pending = debug_info.servermonitor_pending;
- info->server_monitor_latency = debug_info.servermonitor_latency;
- info->server_monitor_conn_id = debug_info.servermonitor_connectionid;
+static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
- info->client_monitor_pending = debug_info.clientmonitor_pending;
- info->client_monitor_latency = debug_info.clientmonitor_latency;
- info->client_monitor_conn_id = debug_info.clientmonitor_connectionid;
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+}
+static DEVICE_ATTR_RO(id);
- info->inbound.int_mask = debug_info.inbound.current_interrupt_mask;
- info->inbound.read_idx = debug_info.inbound.current_read_index;
- info->inbound.write_idx = debug_info.inbound.current_write_index;
- info->inbound.bytes_avail_toread =
- debug_info.inbound.bytes_avail_toread;
- info->inbound.bytes_avail_towrite =
- debug_info.inbound.bytes_avail_towrite;
+static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
- info->outbound.int_mask =
- debug_info.outbound.current_interrupt_mask;
- info->outbound.read_idx = debug_info.outbound.current_read_index;
- info->outbound.write_idx = debug_info.outbound.current_write_index;
- info->outbound.bytes_avail_toread =
- debug_info.outbound.bytes_avail_toread;
- info->outbound.bytes_avail_towrite =
- debug_info.outbound.bytes_avail_towrite;
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->state);
}
+static DEVICE_ATTR_RO(state);
-#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
-static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
+static ssize_t monitor_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
{
- int i;
- for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
- sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
+static DEVICE_ATTR_RO(monitor_id);
-/*
- * vmbus_show_device_attr - Show the device attribute in sysfs.
- *
- * This is invoked when user does a
- * "cat /sys/bus/vmbus/devices/<busdevice>/<attr name>"
- */
-static ssize_t vmbus_show_device_attr(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static ssize_t class_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "{%pUl}\n",
+ hv_dev->channel->offermsg.offer.if_type.b);
+}
+static DEVICE_ATTR_RO(class_id);
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "{%pUl}\n",
+ hv_dev->channel->offermsg.offer.if_instance.b);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- struct hv_device_info *device_info;
char alias_name[VMBUS_ALIAS_LEN + 1];
- int ret = 0;
- device_info = kzalloc(sizeof(struct hv_device_info), GFP_KERNEL);
- if (!device_info)
- return ret;
+ print_alias_name(hv_dev, alias_name);
+ return sprintf(buf, "vmbus:%s\n", alias_name);
+}
+static DEVICE_ATTR_RO(modalias);
- get_channel_info(hv_dev, device_info);
-
- if (!strcmp(dev_attr->attr.name, "class_id")) {
- ret = sprintf(buf, "{%pUl}\n", device_info->chn_type.b);
- } else if (!strcmp(dev_attr->attr.name, "device_id")) {
- ret = sprintf(buf, "{%pUl}\n", device_info->chn_instance.b);
- } else if (!strcmp(dev_attr->attr.name, "modalias")) {
- print_alias_name(hv_dev, alias_name);
- ret = sprintf(buf, "vmbus:%s\n", alias_name);
- } else if (!strcmp(dev_attr->attr.name, "state")) {
- ret = sprintf(buf, "%d\n", device_info->chn_state);
- } else if (!strcmp(dev_attr->attr.name, "id")) {
- ret = sprintf(buf, "%d\n", device_info->chn_id);
- } else if (!strcmp(dev_attr->attr.name, "out_intr_mask")) {
- ret = sprintf(buf, "%d\n", device_info->outbound.int_mask);
- } else if (!strcmp(dev_attr->attr.name, "out_read_index")) {
- ret = sprintf(buf, "%d\n", device_info->outbound.read_idx);
- } else if (!strcmp(dev_attr->attr.name, "out_write_index")) {
- ret = sprintf(buf, "%d\n", device_info->outbound.write_idx);
- } else if (!strcmp(dev_attr->attr.name, "out_read_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->outbound.bytes_avail_toread);
- } else if (!strcmp(dev_attr->attr.name, "out_write_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->outbound.bytes_avail_towrite);
- } else if (!strcmp(dev_attr->attr.name, "in_intr_mask")) {
- ret = sprintf(buf, "%d\n", device_info->inbound.int_mask);
- } else if (!strcmp(dev_attr->attr.name, "in_read_index")) {
- ret = sprintf(buf, "%d\n", device_info->inbound.read_idx);
- } else if (!strcmp(dev_attr->attr.name, "in_write_index")) {
- ret = sprintf(buf, "%d\n", device_info->inbound.write_idx);
- } else if (!strcmp(dev_attr->attr.name, "in_read_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->inbound.bytes_avail_toread);
- } else if (!strcmp(dev_attr->attr.name, "in_write_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->inbound.bytes_avail_towrite);
- } else if (!strcmp(dev_attr->attr.name, "monitor_id")) {
- ret = sprintf(buf, "%d\n", device_info->monitor_id);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_pending")) {
- ret = sprintf(buf, "%d\n", device_info->server_monitor_pending);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_latency")) {
- ret = sprintf(buf, "%d\n", device_info->server_monitor_latency);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_conn_id")) {
- ret = sprintf(buf, "%d\n",
- device_info->server_monitor_conn_id);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_pending")) {
- ret = sprintf(buf, "%d\n", device_info->client_monitor_pending);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_latency")) {
- ret = sprintf(buf, "%d\n", device_info->client_monitor_latency);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_conn_id")) {
- ret = sprintf(buf, "%d\n",
- device_info->client_monitor_conn_id);
- }
+static ssize_t server_monitor_pending_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
- kfree(device_info);
- return ret;
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(server_monitor_pending);
+
+static ssize_t client_monitor_pending_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_pending);
+
+static ssize_t server_monitor_latency_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_latency);
+
+static ssize_t client_monitor_latency_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_latency);
+
+static ssize_t server_monitor_conn_id_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_conn_id);
+
+static ssize_t client_monitor_conn_id_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_conn_id);
+
+static ssize_t out_intr_mask_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+}
+static DEVICE_ATTR_RO(out_intr_mask);
+
+static ssize_t out_read_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.current_read_index);
+}
+static DEVICE_ATTR_RO(out_read_index);
+
+static ssize_t out_write_index_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.current_write_index);
+}
+static DEVICE_ATTR_RO(out_write_index);
+
+static ssize_t out_read_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
}
+static DEVICE_ATTR_RO(out_read_bytes_avail);
+
+static ssize_t out_write_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+}
+static DEVICE_ATTR_RO(out_write_bytes_avail);
+
+static ssize_t in_intr_mask_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+}
+static DEVICE_ATTR_RO(in_intr_mask);
+
+static ssize_t in_read_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.current_read_index);
+}
+static DEVICE_ATTR_RO(in_read_index);
+
+static ssize_t in_write_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.current_write_index);
+}
+static DEVICE_ATTR_RO(in_write_index);
+
+static ssize_t in_read_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+}
+static DEVICE_ATTR_RO(in_read_bytes_avail);
+
+static ssize_t in_write_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+}
+static DEVICE_ATTR_RO(in_write_bytes_avail);
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct device_attribute vmbus_device_attrs[] = {
- __ATTR(id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(state, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(class_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(device_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(monitor_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(modalias, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(server_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(server_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(server_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(client_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(client_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(client_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(out_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(in_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR_NULL
+static struct attribute *vmbus_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_state.attr,
+ &dev_attr_monitor_id.attr,
+ &dev_attr_class_id.attr,
+ &dev_attr_device_id.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_server_monitor_pending.attr,
+ &dev_attr_client_monitor_pending.attr,
+ &dev_attr_server_monitor_latency.attr,
+ &dev_attr_client_monitor_latency.attr,
+ &dev_attr_server_monitor_conn_id.attr,
+ &dev_attr_client_monitor_conn_id.attr,
+ &dev_attr_out_intr_mask.attr,
+ &dev_attr_out_read_index.attr,
+ &dev_attr_out_write_index.attr,
+ &dev_attr_out_read_bytes_avail.attr,
+ &dev_attr_out_write_bytes_avail.attr,
+ &dev_attr_in_intr_mask.attr,
+ &dev_attr_in_read_index.attr,
+ &dev_attr_in_write_index.attr,
+ &dev_attr_in_read_bytes_avail.attr,
+ &dev_attr_in_write_bytes_avail.attr,
+ NULL,
};
-
+ATTRIBUTE_GROUPS(vmbus);
/*
* vmbus_uevent - add uevent for our device
@@ -383,7 +557,7 @@ static struct bus_type hv_bus = {
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
- .dev_attrs = vmbus_device_attrs,
+ .dev_groups = vmbus_groups,
};
static const char *driver_name = "hyperv";
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b3ab9d43bb3..52d548f1dc1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -656,6 +656,7 @@ config SENSORS_LM75
- Analog Devices ADT75
- Dallas Semiconductor DS75, DS1775 and DS7505
+ - Global Mixed-mode Technology (GMT) G751
- Maxim MAX6625 and MAX6626
- Microchip MCP980x
- National Semiconductor LM75, LM75A
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 2ebd6ce4610..9c8a6bab822 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -164,7 +164,7 @@ static const u8 abituguru_bank2_max_threshold = 50;
static const int abituguru_pwm_settings_multiplier[5] = { 0, 1, 1, 1000, 1000 };
/*
* Min / Max allowed values for pwm_settings. Note: pwm1 (CPU fan) is a
- * special case the minium allowed pwm% setting for this is 30% (77) on
+ * special case the minimum allowed pwm% setting for this is 30% (77) on
* some MB's this special case is handled in the code!
*/
static const u8 abituguru_pwm_min[5] = { 0, 170, 170, 25, 25 };
@@ -517,7 +517,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
ABIT_UGURU_DEBUG(2, "testing bank1 sensor %d\n", (int)sensor_addr);
/*
- * Volt sensor test, enable volt low alarm, set min value ridicously
+ * Volt sensor test, enable volt low alarm, set min value ridiculously
* high, or vica versa if the reading is very high. If its a volt
* sensor this should always give us an alarm.
*/
@@ -564,7 +564,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/*
* Temp sensor test, enable sensor as a temp sensor, set beep value
- * ridicously low (but not too low, otherwise uguru ignores it).
+ * ridiculously low (but not too low, otherwise uguru ignores it).
* If its a temp sensor this should always give us an alarm.
*/
buf[0] = ABIT_UGURU_TEMP_HIGH_ALARM_ENABLE;
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 0cac8c0b001..4ae74aa8cdc 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -176,7 +176,7 @@ struct abituguru3_data {
/*
* The abituguru3 supports up to 48 sensors, and thus has registers
- * sets for 48 sensors, for convienence reasons / simplicity of the
+ * sets for 48 sensors, for convenience reasons / simplicity of the
* code we always read and store all registers for all 48 sensors
*/
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index a9e3d0152c0..6a34f7f48eb 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -381,8 +381,10 @@ static ssize_t show_str(struct device *dev,
val = resource->oem_info;
break;
default:
- BUG();
+ WARN(1, "Implementation error: unexpected attribute index %d\n",
+ attr->index);
val = "";
+ break;
}
return sprintf(buf, "%s\n", val);
@@ -436,7 +438,9 @@ static ssize_t show_val(struct device *dev,
val = resource->trip[attr->index - 7] * 1000;
break;
default:
- BUG();
+ WARN(1, "Implementation error: unexpected attribute index %d\n",
+ attr->index);
+ break;
}
return sprintf(buf, "%llu\n", val);
@@ -598,9 +602,8 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
/* Create a symlink to domain objects */
resource->domain_devices[i] = NULL;
- status = acpi_bus_get_device(element->reference.handle,
- &resource->domain_devices[i]);
- if (ACPI_FAILURE(status))
+ if (acpi_bus_get_device(element->reference.handle,
+ &resource->domain_devices[i]))
continue;
obj = resource->domain_devices[i];
@@ -855,7 +858,8 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
dev_info(&device->dev, "Capping in progress.\n");
break;
default:
- BUG();
+ WARN(1, "Unexpected event %d\n", event);
+ break;
}
mutex_unlock(&resource->lock);
@@ -991,7 +995,7 @@ static int __init acpi_power_meter_init(void)
result = acpi_bus_register_driver(&acpi_power_meter_driver);
if (result < 0)
- return -ENODEV;
+ return result;
return 0;
}
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index 751b1f0264a..04c08c2f79b 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -203,7 +203,6 @@ out_err:
for (i--; i >= 0; i--)
device_remove_file(&spi->dev, &ad_input[i].dev_attr);
- spi_set_drvdata(spi, NULL);
mutex_unlock(&adc->lock);
return status;
}
@@ -218,7 +217,6 @@ static int adcxx_remove(struct spi_device *spi)
for (i = 0; i < 3 + adc->channels; i++)
device_remove_file(&spi->dev, &ad_input[i].dev_attr);
- spi_set_drvdata(spi, NULL);
mutex_unlock(&adc->lock);
return 0;
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 3a6d9ef1c16..b3498acb9ab 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -616,7 +616,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
data->gpio = gpio;
data->last_reading = jiffies;
- }; /* last_reading */
+ } /* last_reading */
if (!data->valid ||
time_after(jiffies, data->last_config + ADM1026_CONFIG_INTERVAL)) {
@@ -700,7 +700,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
}
data->last_config = jiffies;
- }; /* last_config */
+ } /* last_config */
data->valid = 1;
mutex_unlock(&data->update_lock);
@@ -1791,7 +1791,7 @@ static int adm1026_detect(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
/* We need to be able to do byte I/O */
return -ENODEV;
- };
+ }
/* Now, we do the remaining detection. */
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index da5f0789fb9..5994cf68e0a 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -42,13 +42,8 @@ static const u8 adt7310_reg_table[] = {
static int adt7310_spi_read_word(struct device *dev, u8 reg)
{
struct spi_device *spi = to_spi_device(dev);
- int ret;
- ret = spi_w8r16(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
- if (ret < 0)
- return ret;
-
- return be16_to_cpu((__force __be16)ret);
+ return spi_w8r16be(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
}
static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index addb5a4d506..562cc3881d3 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -700,7 +700,7 @@ static int find_trange_value(int trange)
if (trange_values[i] == trange)
return i;
- return -ENODEV;
+ return -EINVAL;
}
static struct adt7462_data *adt7462_update_device(struct device *dev)
@@ -1294,9 +1294,8 @@ static ssize_t set_pwm_tmax(struct device *dev,
/* trange = tmax - tmin */
tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
trange_value = find_trange_value(trange - tmin);
-
if (trange_value < 0)
- return -EINVAL;
+ return trange_value;
temp = trange_value << ADT7462_PWM_RANGE_SHIFT;
temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 3ad9d849add..8d9f2a0e8ef 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -138,7 +138,7 @@ static inline u8 read_byte(struct i2c_client *client, u8 reg)
dev_err(&client->dev,
"Unable to read from register 0x%02x.\n", reg);
return 0;
- };
+ }
return res & 0xff;
}
@@ -149,7 +149,7 @@ static inline int write_byte(struct i2c_client *client, u8 reg, u8 data)
dev_err(&client->dev,
"Unable to write value 0x%02x to register 0x%02x.\n",
data, reg);
- };
+ }
return res;
}
@@ -1030,7 +1030,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
}
}
data->last_high_reading = jiffies;
- }; /* last_reading */
+ } /* last_reading */
/* Read all the low priority registers. */
@@ -1044,7 +1044,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
}
}
data->last_low_reading = jiffies;
- }; /* last_reading */
+ } /* last_reading */
data->valid = 1;
@@ -1084,11 +1084,11 @@ static void asc7621_init_client(struct i2c_client *client)
dev_err(&client->dev,
"Client (%d,0x%02x) config is locked.\n",
i2c_adapter_id(client->adapter), client->addr);
- };
+ }
if (!(value & 0x04)) {
dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
i2c_adapter_id(client->adapter), client->addr);
- };
+ }
/*
* Start monitoring
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b25c64302cb..1d7ff46812c 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -119,7 +119,7 @@ struct atk_data {
acpi_handle rtmp_handle;
acpi_handle rvlt_handle;
acpi_handle rfan_handle;
- /* new inteface */
+ /* new interface */
acpi_handle enumerate_handle;
acpi_handle read_handle;
acpi_handle write_handle;
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index aecb9ea7beb..ddff02e3e66 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -147,10 +147,9 @@ static ssize_t atxp1_storevcore(struct device *dev,
/* Calculate VID */
vid = vid_to_reg(vcore, data->vrm);
-
if (vid < 0) {
dev_err(dev, "VID calculation failed.\n");
- return -1;
+ return vid;
}
/*
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index a26ba7a17c2..872d76744e3 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -120,7 +120,7 @@ static const u8 DS1621_REG_TEMP[3] = {
/* Each client has this additional data */
struct ds1621_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -151,10 +151,10 @@ static inline u16 DS1621_TEMP_TO_REG(long temp, u8 zbits)
return temp;
}
-static void ds1621_init_client(struct i2c_client *client)
+static void ds1621_init_client(struct ds1621_data *data,
+ struct i2c_client *client)
{
u8 conf, new_conf, sreg, resol;
- struct ds1621_data *data = i2c_get_clientdata(client);
new_conf = conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF);
/* switch to continuous conversion mode */
@@ -197,8 +197,8 @@ static void ds1621_init_client(struct i2c_client *client)
static struct ds1621_data *ds1621_update_client(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u8 new_conf;
mutex_lock(&data->update_lock);
@@ -247,8 +247,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
long val;
int err;
@@ -258,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
mutex_lock(&data->update_lock);
data->temp[attr->index] = DS1621_TEMP_TO_REG(val, data->zbits);
- i2c_smbus_write_word_swapped(client, DS1621_REG_TEMP[attr->index],
+ i2c_smbus_write_word_swapped(data->client, DS1621_REG_TEMP[attr->index],
data->temp[attr->index]);
mutex_unlock(&data->update_lock);
return count;
@@ -282,16 +281,15 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%hu\n", data->update_interval);
}
static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long convrate;
s32 err;
int resol = 0;
@@ -343,8 +341,7 @@ static umode_t ds1621_attribute_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
if (attr == &dev_attr_update_interval.attr)
if (data->kind == ds1621 || data->kind == ds1625)
@@ -357,52 +354,31 @@ static const struct attribute_group ds1621_group = {
.attrs = ds1621_attributes,
.is_visible = ds1621_attribute_visible
};
+__ATTRIBUTE_GROUPS(ds1621);
static int ds1621_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds1621_data *data;
- int err;
+ struct device *hwmon_dev;
data = devm_kzalloc(&client->dev, sizeof(struct ds1621_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->kind = id->driver_data;
+ data->client = client;
/* Initialize the DS1621 chip */
- ds1621_init_client(client);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &ds1621_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
- exit_remove_files:
- sysfs_remove_group(&client->dev.kobj, &ds1621_group);
- return err;
-}
-
-static int ds1621_remove(struct i2c_client *client)
-{
- struct ds1621_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ds1621_group);
+ ds1621_init_client(data, client);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ ds1621_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ds1621_id[] = {
@@ -422,7 +398,6 @@ static struct i2c_driver ds1621_driver = {
.name = "ds1621",
},
.probe = ds1621_probe,
- .remove = ds1621_remove,
.id_table = ds1621_id,
};
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 142e1cb8dea..90ec1173b8a 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -21,7 +21,6 @@
*
* TODO
* - cache alarm and critical limit registers
- * - add emc1404 support
*/
#include <linux/module.h>
@@ -40,7 +39,8 @@
#define THERMAL_REVISION_REG 0xff
struct thermal_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
struct mutex mutex;
/*
* Cache the hyst value so we don't keep re-reading it. In theory
@@ -53,10 +53,11 @@ struct thermal_data {
static ssize_t show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- int retval = i2c_smbus_read_byte_data(client, sda->index);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ int retval;
+ retval = i2c_smbus_read_byte_data(data->client, sda->index);
if (retval < 0)
return retval;
return sprintf(buf, "%d000\n", retval);
@@ -65,27 +66,27 @@ static ssize_t show_temp(struct device *dev,
static ssize_t show_bit(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
- int retval = i2c_smbus_read_byte_data(client, sda->nr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ int retval;
+ retval = i2c_smbus_read_byte_data(data->client, sda->nr);
if (retval < 0)
return retval;
- retval &= sda->index;
- return sprintf(buf, "%d\n", retval ? 1 : 0);
+ return sprintf(buf, "%d\n", !!(retval & sda->index));
}
static ssize_t store_temp(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = dev_get_drvdata(dev);
unsigned long val;
int retval;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
- retval = i2c_smbus_write_byte_data(client, sda->index,
+ retval = i2c_smbus_write_byte_data(data->client, sda->index,
DIV_ROUND_CLOSEST(val, 1000));
if (retval < 0)
return retval;
@@ -95,9 +96,9 @@ static ssize_t store_temp(struct device *dev,
static ssize_t store_bit(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct thermal_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int retval;
@@ -124,9 +125,9 @@ fail:
static ssize_t show_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct thermal_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int retval;
int hyst;
@@ -147,9 +148,9 @@ static ssize_t show_hyst(struct device *dev,
static ssize_t store_hyst(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct thermal_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int retval;
int hyst;
unsigned long val;
@@ -232,10 +233,26 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
show_hyst, store_hyst, 0x1A);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x2D);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x2C);
+static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x30);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 0x2A);
+static SENSOR_DEVICE_ATTR_2(temp4_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x08);
+static SENSOR_DEVICE_ATTR(temp4_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x30);
+
static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR,
show_bit, store_bit, 0x03, 0x40);
-static struct attribute *mid_att_thermal[] = {
+static struct attribute *emc1403_attrs[] = {
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
@@ -264,8 +281,24 @@ static struct attribute *mid_att_thermal[] = {
NULL
};
-static const struct attribute_group m_thermal_gr = {
- .attrs = mid_att_thermal
+static const struct attribute_group emc1403_group = {
+ .attrs = emc1403_attrs,
+};
+
+static struct attribute *emc1404_attrs[] = {
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group emc1404_group = {
+ .attrs = emc1404_attrs,
};
static int emc1403_detect(struct i2c_client *client,
@@ -286,10 +319,12 @@ static int emc1403_detect(struct i2c_client *client,
case 0x23:
strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
break;
- /*
- * Note: 0x25 is the 1404 which is very similar and this
- * driver could be extended
- */
+ case 0x25:
+ strlcpy(info->type, "emc1404", I2C_NAME_SIZE);
+ break;
+ case 0x27:
+ strlcpy(info->type, "emc1424", I2C_NAME_SIZE);
+ break;
default:
return -ENODEV;
}
@@ -304,43 +339,29 @@ static int emc1403_detect(struct i2c_client *client,
static int emc1403_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int res;
struct thermal_data *data;
+ struct device *hwmon_dev;
data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->mutex);
data->hyst_valid = jiffies - 1; /* Expired */
- res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
- if (res) {
- dev_warn(&client->dev, "create group failed\n");
- return res;
- }
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- res = PTR_ERR(data->hwmon_dev);
- dev_warn(&client->dev, "register hwmon dev failed\n");
- goto thermal_error;
- }
- dev_info(&client->dev, "EMC1403 Thermal chip found\n");
- return 0;
-
-thermal_error:
- sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
- return res;
-}
+ data->groups[0] = &emc1403_group;
+ if (id->driver_data)
+ data->groups[1] = &emc1404_group;
-static int emc1403_remove(struct i2c_client *client)
-{
- struct thermal_data *data = i2c_get_clientdata(client);
+ hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+ dev_info(&client->dev, "%s Thermal chip found\n", id->name);
return 0;
}
@@ -350,7 +371,9 @@ static const unsigned short emc1403_address_list[] = {
static const struct i2c_device_id emc1403_idtable[] = {
{ "emc1403", 0 },
+ { "emc1404", 1 },
{ "emc1423", 0 },
+ { "emc1424", 1 },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
@@ -362,7 +385,6 @@ static struct i2c_driver sensor_emc1403 = {
},
.detect = emc1403_detect,
.probe = emc1403_probe,
- .remove = emc1403_remove,
.id_table = emc1403_idtable,
.address_list = emc1403_address_list,
};
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 31b221eeee6..03d8592810b 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -2420,7 +2420,6 @@ static int f71882fg_probe(struct platform_device *pdev)
exit_unregister_sysfs:
f71882fg_remove(pdev); /* Will unregister the sysfs files for us */
return err; /* f71882fg_remove() also frees our data */
- return err;
}
static int f71882fg_remove(struct platform_device *pdev)
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index a837b94977f..80c42bea90e 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -275,7 +275,7 @@ static bool duty_mode_enabled(u8 pwm_enable)
case 3: /* Manual, speed mode */
return false;
default:
- BUG();
+ WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
return true;
}
}
@@ -291,7 +291,7 @@ static bool auto_mode_enabled(u8 pwm_enable)
case 4: /* Auto, duty mode */
return true;
default:
- BUG();
+ WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
return false;
}
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index b7d6a5704eb..73181be5b30 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -31,6 +31,7 @@
#include <linux/hwmon.h>
#include <linux/gpio.h>
#include <linux/gpio-fan.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
@@ -169,7 +170,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
dev_warn(&fan_data->pdev->dev,
"missing speed array entry for GPIO value 0x%x\n", ctrl_val);
- return -EINVAL;
+ return -ENODEV;
}
static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
@@ -309,12 +310,6 @@ exit_unlock:
return ret;
}
-static ssize_t show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "gpio-fan\n");
-}
-
static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable);
@@ -324,26 +319,23 @@ static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
- if (index == 1 && !data->alarm)
+ if (index == 0 && !data->alarm)
return 0;
- if (index > 1 && !data->ctrl)
+ if (index > 0 && !data->ctrl)
return 0;
return attr->mode;
}
static struct attribute *gpio_fan_attributes[] = {
- &dev_attr_name.attr,
- &dev_attr_fan1_alarm.attr, /* 1 */
- &dev_attr_pwm1.attr, /* 2 */
+ &dev_attr_fan1_alarm.attr, /* 0 */
+ &dev_attr_pwm1.attr, /* 1 */
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm1_mode.attr,
&dev_attr_fan1_input.attr,
@@ -358,6 +350,11 @@ static const struct attribute_group gpio_fan_group = {
.is_visible = gpio_fan_is_visible,
};
+static const struct attribute_group *gpio_fan_groups[] = {
+ &gpio_fan_group,
+ NULL
+};
+
static int fan_ctrl_init(struct gpio_fan_data *fan_data,
struct gpio_fan_platform_data *pdata)
{
@@ -384,7 +381,7 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
if (fan_data->speed_index < 0)
- return -ENODEV;
+ return fan_data->speed_index;
return 0;
}
@@ -539,24 +536,16 @@ static int gpio_fan_probe(struct platform_device *pdev)
return err;
}
- err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_group);
- if (err)
- return err;
-
/* Make this driver part of hwmon class. */
- fan_data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(fan_data->hwmon_dev)) {
- err = PTR_ERR(fan_data->hwmon_dev);
- goto err_remove;
- }
+ fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ "gpio-fan", fan_data,
+ gpio_fan_groups);
+ if (IS_ERR(fan_data->hwmon_dev))
+ return PTR_ERR(fan_data->hwmon_dev);
dev_info(&pdev->dev, "GPIO fan initialized\n");
return 0;
-
-err_remove:
- sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
- return err;
}
static int gpio_fan_remove(struct platform_device *pdev)
@@ -564,7 +553,6 @@ static int gpio_fan_remove(struct platform_device *pdev)
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
hwmon_device_unregister(fan_data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
return 0;
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 646314f7c83..e176a43af63 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/hwmon.h>
@@ -25,35 +26,122 @@
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
-static struct class *hwmon_class;
+struct hwmon_device {
+ const char *name;
+ struct device dev;
+};
+#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+
+static ssize_t
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_hwmon_device(dev)->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static struct attribute *hwmon_dev_attrs[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+
+static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+
+ if (to_hwmon_device(dev)->name == NULL)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group hwmon_dev_attr_group = {
+ .attrs = hwmon_dev_attrs,
+ .is_visible = hwmon_dev_name_is_visible,
+};
+
+static const struct attribute_group *hwmon_dev_attr_groups[] = {
+ &hwmon_dev_attr_group,
+ NULL
+};
+
+static void hwmon_dev_release(struct device *dev)
+{
+ kfree(to_hwmon_device(dev));
+}
+
+static struct class hwmon_class = {
+ .name = "hwmon",
+ .owner = THIS_MODULE,
+ .dev_groups = hwmon_dev_attr_groups,
+ .dev_release = hwmon_dev_release,
+};
static DEFINE_IDA(hwmon_ida);
/**
- * hwmon_device_register - register w/ hwmon
- * @dev: the device to register
+ * hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
*
* Returns the pointer to the new device.
*/
-struct device *hwmon_device_register(struct device *dev)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups)
{
- struct device *hwdev;
- int id;
+ struct hwmon_device *hwdev;
+ int err, id;
id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
- hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
- HWMON_ID_FORMAT, id);
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (hwdev == NULL) {
+ err = -ENOMEM;
+ goto ida_remove;
+ }
- if (IS_ERR(hwdev))
- ida_simple_remove(&hwmon_ida, id);
+ hwdev->name = name;
+ hwdev->dev.class = &hwmon_class;
+ hwdev->dev.parent = dev;
+ hwdev->dev.groups = groups;
+ hwdev->dev.of_node = dev ? dev->of_node : NULL;
+ dev_set_drvdata(&hwdev->dev, drvdata);
+ dev_set_name(&hwdev->dev, HWMON_ID_FORMAT, id);
+ err = device_register(&hwdev->dev);
+ if (err)
+ goto free;
- return hwdev;
+ return &hwdev->dev;
+
+free:
+ kfree(hwdev);
+ida_remove:
+ ida_simple_remove(&hwmon_ida, id);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
+
+/**
+ * hwmon_device_register - register w/ hwmon
+ * @dev: the device to register
+ *
+ * hwmon_device_unregister() must be called when the device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new device.
+ */
+struct device *hwmon_device_register(struct device *dev)
+{
+ return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(hwmon_device_register);
@@ -75,6 +163,69 @@ void hwmon_device_unregister(struct device *dev)
}
EXPORT_SYMBOL_GPL(hwmon_device_unregister);
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+ struct device *hwdev = *(struct device **)res;
+
+ hwmon_device_unregister(hwdev);
+}
+
+/**
+ * devm_hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
+ *
+ * Returns the pointer to the new device. The new device is automatically
+ * unregistered with the parent device.
+ */
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups)
+{
+ struct device **ptr, *hwdev;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+ if (IS_ERR(hwdev))
+ goto error;
+
+ *ptr = hwdev;
+ devres_add(dev, ptr);
+ return hwdev;
+
+error:
+ devres_free(ptr);
+ return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+
+static int devm_hwmon_match(struct device *dev, void *res, void *data)
+{
+ struct device **hwdev = res;
+
+ return *hwdev == data;
+}
+
+/**
+ * devm_hwmon_device_unregister - removes a previously registered hwmon device
+ *
+ * @dev: the parent device of the device to unregister
+ */
+void devm_hwmon_device_unregister(struct device *dev)
+{
+ WARN_ON(devres_release(dev, devm_hwmon_release, devm_hwmon_match, dev));
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_unregister);
+
static void __init hwmon_pci_quirks(void)
{
#if defined CONFIG_X86 && defined CONFIG_PCI
@@ -105,19 +256,21 @@ static void __init hwmon_pci_quirks(void)
static int __init hwmon_init(void)
{
+ int err;
+
hwmon_pci_quirks();
- hwmon_class = class_create(THIS_MODULE, "hwmon");
- if (IS_ERR(hwmon_class)) {
- pr_err("couldn't create sysfs class\n");
- return PTR_ERR(hwmon_class);
+ err = class_register(&hwmon_class);
+ if (err) {
+ pr_err("couldn't register hwmon sysfs class\n");
+ return err;
}
return 0;
}
static void __exit hwmon_exit(void)
{
- class_destroy(hwmon_class);
+ class_unregister(&hwmon_class);
}
subsys_initcall(hwmon_init);
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index c6fdd5bd395..5378fdefc1f 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -63,7 +63,7 @@
#define INA209_SHUNT_DEFAULT 10000 /* uOhm */
struct ina209_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -78,8 +78,8 @@ struct ina209_data {
static struct ina209_data *ina209_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina209_data *data = i2c_get_clientdata(client);
+ struct ina209_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ina209_data *ret = data;
s32 val;
int i;
@@ -234,7 +234,6 @@ static ssize_t ina209_set_interval(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
struct ina209_data *data = ina209_update_device(dev);
long val;
u16 regval;
@@ -250,7 +249,8 @@ static ssize_t ina209_set_interval(struct device *dev,
mutex_lock(&data->update_lock);
regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
val);
- i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+ i2c_smbus_write_word_swapped(data->client, INA209_CONFIGURATION,
+ regval);
data->regs[INA209_CONFIGURATION] = regval;
data->update_interval = ina209_interval_from_reg(regval);
mutex_unlock(&data->update_lock);
@@ -260,8 +260,7 @@ static ssize_t ina209_set_interval(struct device *dev,
static ssize_t ina209_show_interval(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina209_data *data = i2c_get_clientdata(client);
+ struct ina209_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
}
@@ -285,9 +284,9 @@ static ssize_t ina209_reset_history(struct device *dev,
const char *buf,
size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina209_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina209_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u32 mask = attr->index;
long val;
int i, ret;
@@ -312,7 +311,6 @@ static ssize_t ina209_set_value(struct device *dev,
const char *buf,
size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
struct ina209_data *data = ina209_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int reg = attr->index;
@@ -332,7 +330,7 @@ static ssize_t ina209_set_value(struct device *dev,
count = ret;
goto abort;
}
- i2c_smbus_write_word_swapped(client, reg, ret);
+ i2c_smbus_write_word_swapped(data->client, reg, ret);
data->regs[reg] = ret;
abort:
mutex_unlock(&data->update_lock);
@@ -457,7 +455,7 @@ static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
-static struct attribute *ina209_attributes[] = {
+static struct attribute *ina209_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_input_highest.dev_attr.attr,
&sensor_dev_attr_in0_input_lowest.dev_attr.attr,
@@ -498,10 +496,7 @@ static struct attribute *ina209_attributes[] = {
NULL,
};
-
-static const struct attribute_group ina209_group = {
- .attrs = ina209_attributes,
-};
+ATTRIBUTE_GROUPS(ina209);
static void ina209_restore_conf(struct i2c_client *client,
struct ina209_data *data)
@@ -565,6 +560,7 @@ static int ina209_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = client->adapter;
struct ina209_data *data;
+ struct device *hwmon_dev;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -575,27 +571,23 @@ static int ina209_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
ret = ina209_init_client(client, data);
if (ret)
return ret;
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
- if (ret)
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name,
+ data, ina209_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
goto out_restore_conf;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
}
return 0;
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ina209_group);
out_restore_conf:
ina209_restore_conf(client, data);
return ret;
@@ -605,8 +597,6 @@ static int ina209_remove(struct i2c_client *client)
{
struct ina209_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ina209_group);
ina209_restore_conf(client, data);
return 0;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 70a39a8ac01..93d26e8af3e 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -78,7 +78,7 @@ struct ina2xx_config {
};
struct ina2xx_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
const struct ina2xx_config *config;
struct mutex update_lock;
@@ -112,8 +112,8 @@ static const struct ina2xx_config ina2xx_config[] = {
static struct ina2xx_data *ina2xx_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina2xx_data *data = i2c_get_clientdata(client);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ina2xx_data *ret = data;
mutex_lock(&data->update_lock);
@@ -203,41 +203,39 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
INA2XX_POWER);
/* pointers to created device attributes */
-static struct attribute *ina2xx_attributes[] = {
+static struct attribute *ina2xx_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
NULL,
};
-
-static const struct attribute_group ina2xx_group = {
- .attrs = ina2xx_attributes,
-};
+ATTRIBUTE_GROUPS(ina2xx);
static int ina2xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
- struct ina2xx_data *data;
struct ina2xx_platform_data *pdata;
- int ret;
- u32 val;
+ struct device *dev = &client->dev;
+ struct ina2xx_data *data;
+ struct device *hwmon_dev;
long shunt = 10000; /* default shunt value 10mOhms */
+ u32 val;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- if (dev_get_platdata(&client->dev)) {
- pdata = dev_get_platdata(&client->dev);
+ if (dev_get_platdata(dev)) {
+ pdata = dev_get_platdata(dev);
shunt = pdata->shunt_uohms;
- } else if (!of_property_read_u32(client->dev.of_node,
- "shunt-resistor", &val)) {
- shunt = val;
+ } else if (!of_property_read_u32(dev->of_node,
+ "shunt-resistor", &val)) {
+ shunt = val;
}
if (shunt <= 0)
@@ -255,37 +253,18 @@ static int ina2xx_probe(struct i2c_client *client,
i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
data->config->calibration_factor / shunt);
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
- ret = sysfs_create_group(&client->dev.kobj, &ina2xx_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_err_hwmon;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, ina2xx_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- dev_info(&client->dev, "power monitor %s (Rshunt = %li uOhm)\n",
+ dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
id->name, shunt);
return 0;
-
-out_err_hwmon:
- sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
- return ret;
-}
-
-static int ina2xx_remove(struct i2c_client *client)
-{
- struct ina2xx_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
-
- return 0;
}
static const struct i2c_device_id ina2xx_id[] = {
@@ -302,7 +281,6 @@ static struct i2c_driver ina2xx_driver = {
.name = "ina2xx",
},
.probe = ina2xx_probe,
- .remove = ina2xx_remove,
.id_table = ina2xx_id,
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4a58f130fd4..6013611e4f2 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -163,7 +163,7 @@ static struct jc42_chips jc42_chips[] = {
/* Each client has this additional data */
struct jc42_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock; /* protect register access */
bool extended; /* true if extended range supported */
bool valid;
@@ -193,21 +193,21 @@ MODULE_DEVICE_TABLE(i2c, jc42_id);
static int jc42_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
data->config |= JC42_CFG_SHUTDOWN;
- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+ i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+ data->config);
return 0;
}
static int jc42_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
data->config &= ~JC42_CFG_SHUTDOWN;
- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+ i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+ data->config);
return 0;
}
@@ -317,15 +317,14 @@ static ssize_t set_##value(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct jc42_data *data = i2c_get_clientdata(client); \
+ struct jc42_data *data = dev_get_drvdata(dev); \
int err, ret = count; \
long val; \
- if (kstrtol(buf, 10, &val) < 0) \
+ if (kstrtol(buf, 10, &val) < 0) \
return -EINVAL; \
mutex_lock(&data->update_lock); \
data->value = jc42_temp_to_reg(val, data->extended); \
- err = i2c_smbus_write_word_swapped(client, reg, data->value); \
+ err = i2c_smbus_write_word_swapped(data->client, reg, data->value); \
if (err < 0) \
ret = err; \
mutex_unlock(&data->update_lock); \
@@ -344,8 +343,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
unsigned long val;
int diff, hyst;
int err;
@@ -368,7 +366,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
mutex_lock(&data->update_lock);
data->config = (data->config & ~JC42_CFG_HYST_MASK)
| (hyst << JC42_CFG_HYST_SHIFT);
- err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
+ err = i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
data->config);
if (err < 0)
ret = err;
@@ -430,8 +428,7 @@ static umode_t jc42_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
unsigned int config = data->config;
bool readonly;
@@ -452,6 +449,7 @@ static const struct attribute_group jc42_group = {
.attrs = jc42_attributes,
.is_visible = jc42_attribute_mode,
};
+__ATTRIBUTE_GROUPS(jc42);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
@@ -487,14 +485,16 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct jc42_data *data;
- int config, cap, err;
struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct jc42_data *data;
+ int config, cap;
data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -515,29 +515,15 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &jc42_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- sysfs_remove_group(&dev->kobj, &jc42_group);
- return err;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ jc42_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static int jc42_remove(struct i2c_client *client)
{
struct jc42_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &jc42_group);
/* Restore original configuration except hysteresis */
if ((data->config & ~JC42_CFG_HYST_MASK) !=
@@ -553,8 +539,8 @@ static int jc42_remove(struct i2c_client *client)
static struct jc42_data *jc42_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct jc42_data *ret = data;
int val;
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index e0d66b9590a..a183e488db7 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -66,7 +66,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
mutex_lock(&hwmon->lock);
- INIT_COMPLETION(*completion);
+ reinit_completion(completion);
enable_irq(hwmon->irq);
hwmon->cell->enable(to_platform_device(dev));
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 016efa26ba7..505a59e100b 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -174,7 +174,6 @@ out_dev_reg_failed:
out_dev_create_file_failed:
device_remove_file(&spi->dev, &dev_attr_temp1_input);
out_dev_create_temp_file_failed:
- spi_set_drvdata(spi, NULL);
return status;
}
@@ -185,7 +184,6 @@ static int lm70_remove(struct spi_device *spi)
hwmon_device_unregister(p_lm70->hwmon_dev);
device_remove_file(&spi->dev, &dev_attr_temp1_input);
device_remove_file(&spi->dev, &dev_attr_name);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9bde9644b10..9653bb870a4 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -55,7 +55,7 @@ static const unsigned short lm73_convrates[] = {
};
struct lm73_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex lock;
u8 ctrl; /* control register value */
};
@@ -66,7 +66,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = dev_get_drvdata(dev);
long temp;
short value;
s32 err;
@@ -77,7 +77,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
/* Write value */
value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
- err = i2c_smbus_write_word_swapped(client, attr->index, value);
+ err = i2c_smbus_write_word_swapped(data->client, attr->index, value);
return (err < 0) ? err : count;
}
@@ -85,10 +85,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = dev_get_drvdata(dev);
int temp;
- s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+ s32 err = i2c_smbus_read_word_swapped(data->client, attr->index);
if (err < 0)
return err;
@@ -101,8 +101,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm73_data *data = i2c_get_clientdata(client);
+ struct lm73_data *data = dev_get_drvdata(dev);
unsigned long convrate;
s32 err;
int res = 0;
@@ -124,7 +123,8 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
mutex_lock(&data->lock);
data->ctrl &= LM73_CTRL_TO_MASK;
data->ctrl |= res << LM73_CTRL_RES_SHIFT;
- err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+ err = i2c_smbus_write_byte_data(data->client, LM73_REG_CTRL,
+ data->ctrl);
mutex_unlock(&data->lock);
if (err < 0)
@@ -136,8 +136,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm73_data *data = i2c_get_clientdata(client);
+ struct lm73_data *data = dev_get_drvdata(dev);
int res;
res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
@@ -147,13 +146,12 @@ static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
static ssize_t show_maxmin_alarm(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct lm73_data *data = i2c_get_clientdata(client);
+ struct lm73_data *data = dev_get_drvdata(dev);
s32 ctrl;
mutex_lock(&data->lock);
- ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+ ctrl = i2c_smbus_read_byte_data(data->client, LM73_REG_CTRL);
if (ctrl < 0)
goto abort;
data->ctrl = ctrl;
@@ -183,7 +181,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
-static struct attribute *lm73_attributes[] = {
+static struct attribute *lm73_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
@@ -192,10 +190,7 @@ static struct attribute *lm73_attributes[] = {
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
NULL
};
-
-static const struct attribute_group lm73_group = {
- .attrs = lm73_attributes,
-};
+ATTRIBUTE_GROUPS(lm73);
/*-----------------------------------------------------------------------*/
@@ -204,16 +199,16 @@ static const struct attribute_group lm73_group = {
static int
lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- int status;
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct lm73_data *data;
int ctrl;
- data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm73_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->lock);
ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
@@ -221,33 +216,13 @@ lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ctrl;
data->ctrl = ctrl;
- /* Register sysfs hooks */
- status = sysfs_create_group(&client->dev.kobj, &lm73_group);
- if (status)
- return status;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- status = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, lm73_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- dev_info(&client->dev, "%s: sensor '%s'\n",
- dev_name(data->hwmon_dev), client->name);
-
- return 0;
-
-exit_remove:
- sysfs_remove_group(&client->dev.kobj, &lm73_group);
- return status;
-}
-
-static int lm73_remove(struct i2c_client *client)
-{
- struct lm73_data *data = i2c_get_clientdata(client);
+ dev_info(dev, "sensor '%s'\n", client->name);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm73_group);
return 0;
}
@@ -300,7 +275,6 @@ static struct i2c_driver lm73_driver = {
.name = "lm73",
},
.probe = lm73_probe,
- .remove = lm73_remove,
.id_table = lm73_ids,
.detect = lm73_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index c03b490bba8..7e3ef134f1d 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
ds1775,
ds75,
ds7505,
+ g751,
lm75,
lm75a,
max6625,
@@ -208,6 +209,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->resolution = 12;
data->sample_time = HZ / 4;
break;
+ case g751:
case lm75:
case lm75a:
data->resolution = 9;
@@ -296,6 +298,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "ds1775", ds1775, },
{ "ds75", ds75, },
{ "ds7505", ds7505, },
+ { "g751", g751, },
{ "lm75", lm75, },
{ "lm75a", lm75a, },
{ "max6625", max6625, },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index cdff7428295..4c4c1421bf2 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -60,6 +60,11 @@
* This driver also supports the G781 from GMT. This device is compatible
* with the ADM1032.
*
+ * This driver also supports TMP451 from Texas Instruments. This device is
+ * supported in both compatibility and extended mode. It's mostly compatible
+ * with ADT7461 except for local temperature low byte register and max
+ * conversion rate.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
@@ -89,6 +94,8 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
/*
* Addresses to scan
@@ -110,7 +117,7 @@ static const unsigned short normal_i2c[] = {
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781 };
+ max6646, w83l771, max6696, sa56004, g781, tmp451 };
/*
* The LM90 registers
@@ -167,6 +174,9 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
+/* TMP451 registers */
+#define TMP451_REG_R_LOCAL_TEMPL 0x15
+
/*
* Device flags
*/
@@ -179,6 +189,23 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
+/* LM90 status */
+#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
+#define LM90_STATUS_RTHRM (1 << 1) /* remote THERM limit tripped */
+#define LM90_STATUS_ROPEN (1 << 2) /* remote is an open circuit */
+#define LM90_STATUS_RLOW (1 << 3) /* remote low temp limit tripped */
+#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
+#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
+#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
+
+#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
+#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
+#define MAX6696_STATUS2_R2LOW (1 << 3) /* remote2 low temp limit tripped */
+#define MAX6696_STATUS2_R2HIGH (1 << 4) /* remote2 high temp limit tripped */
+#define MAX6696_STATUS2_ROT2 (1 << 5) /* remote emergency limit tripped */
+#define MAX6696_STATUS2_R2OT2 (1 << 6) /* remote2 emergency limit tripped */
+#define MAX6696_STATUS2_LOT2 (1 << 7) /* local emergency limit tripped */
+
/*
* Driver data (common to all clients)
*/
@@ -205,6 +232,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "nct1008", adt7461 },
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
+ { "tmp451", tmp451 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -278,7 +306,7 @@ static const struct lm90_params lm90_params[] = {
[max6696] = {
.flags = LM90_HAVE_EMERGENCY
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
- .alert_alarms = 0x187c,
+ .alert_alarms = 0x1c7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
@@ -293,6 +321,43 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
.reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
},
+ [tmp451] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 9,
+ .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+ }
+};
+
+/*
+ * TEMP8 register index
+ */
+enum lm90_temp8_reg_index {
+ LOCAL_LOW = 0,
+ LOCAL_HIGH,
+ LOCAL_CRIT,
+ REMOTE_CRIT,
+ LOCAL_EMERG, /* max6659 and max6695/96 */
+ REMOTE_EMERG, /* max6659 and max6695/96 */
+ REMOTE2_CRIT, /* max6695/96 only */
+ REMOTE2_EMERG, /* max6695/96 only */
+ TEMP8_REG_NUM
+};
+
+/*
+ * TEMP11 register index
+ */
+enum lm90_temp11_reg_index {
+ REMOTE_TEMP = 0,
+ REMOTE_LOW,
+ REMOTE_HIGH,
+ REMOTE_OFFSET, /* except max6646, max6657/58/59, and max6695/96 */
+ LOCAL_TEMP,
+ REMOTE2_TEMP, /* max6695/96 only */
+ REMOTE2_LOW, /* max6695/96 only */
+ REMOTE2_HIGH, /* max6695/96 only */
+ TEMP11_REG_NUM
};
/*
@@ -302,6 +367,7 @@ static const struct lm90_params lm90_params[] = {
struct lm90_data {
struct device *hwmon_dev;
struct mutex update_lock;
+ struct regulator *regulator;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
int kind;
@@ -317,25 +383,8 @@ struct lm90_data {
u8 reg_local_ext; /* local extension register offset */
/* registers values */
- s8 temp8[8]; /* 0: local low limit
- * 1: local high limit
- * 2: local critical limit
- * 3: remote critical limit
- * 4: local emergency limit (max6659 and max6695/96)
- * 5: remote emergency limit (max6659 and max6695/96)
- * 6: remote 2 critical limit (max6695/96 only)
- * 7: remote 2 emergency limit (max6695/96 only)
- */
- s16 temp11[8]; /* 0: remote input
- * 1: remote low limit
- * 2: remote high limit
- * 3: remote offset (except max6646, max6657/58/59,
- * and max6695/96)
- * 4: local input
- * 5: remote 2 input (max6695/96 only)
- * 6: remote 2 low limit (max6695/96 only)
- * 7: remote 2 high limit (max6695/96 only)
- */
+ s8 temp8[TEMP8_REG_NUM];
+ s16 temp11[TEMP11_REG_NUM];
u8 temp_hyst;
u16 alarms; /* bitvector (upper 8 bits for max6695/96) */
};
@@ -477,37 +526,42 @@ static struct lm90_data *lm90_update_device(struct device *dev)
u8 alarms;
dev_dbg(&client->dev, "Updating lm90 data.\n");
- lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, &data->temp8[0]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, &data->temp8[1]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, &data->temp8[2]);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_LOW,
+ &data->temp8[LOCAL_LOW]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH,
+ &data->temp8[LOCAL_HIGH]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT,
+ &data->temp8[LOCAL_CRIT]);
+ lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
+ &data->temp8[REMOTE_CRIT]);
lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
if (data->reg_local_ext) {
lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
data->reg_local_ext,
- &data->temp11[4]);
+ &data->temp11[LOCAL_TEMP]);
} else {
if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
&h) == 0)
- data->temp11[4] = h << 8;
+ data->temp11[LOCAL_TEMP] = h << 8;
}
lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL, &data->temp11[0]);
+ LM90_REG_R_REMOTE_TEMPL,
+ &data->temp11[REMOTE_TEMP]);
if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
- data->temp11[1] = h << 8;
+ data->temp11[REMOTE_LOW] = h << 8;
if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
&& lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
&l) == 0)
- data->temp11[1] |= l;
+ data->temp11[REMOTE_LOW] |= l;
}
if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
- data->temp11[2] = h << 8;
+ data->temp11[REMOTE_HIGH] = h << 8;
if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
&& lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
&l) == 0)
- data->temp11[2] |= l;
+ data->temp11[REMOTE_HIGH] |= l;
}
if (data->flags & LM90_HAVE_OFFSET) {
@@ -515,13 +569,13 @@ static struct lm90_data *lm90_update_device(struct device *dev)
&h) == 0
&& lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
&l) == 0)
- data->temp11[3] = (h << 8) | l;
+ data->temp11[REMOTE_OFFSET] = (h << 8) | l;
}
if (data->flags & LM90_HAVE_EMERGENCY) {
lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
- &data->temp8[4]);
+ &data->temp8[LOCAL_EMERG]);
lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[5]);
+ &data->temp8[REMOTE_EMERG]);
}
lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
data->alarms = alarms; /* save as 16 bit value */
@@ -529,15 +583,16 @@ static struct lm90_data *lm90_update_device(struct device *dev)
if (data->kind == max6696) {
lm90_select_remote_channel(client, data, 1);
lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
- &data->temp8[6]);
+ &data->temp8[REMOTE2_CRIT]);
lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[7]);
+ &data->temp8[REMOTE2_EMERG]);
lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL, &data->temp11[5]);
+ LM90_REG_R_REMOTE_TEMPL,
+ &data->temp11[REMOTE2_TEMP]);
if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
- data->temp11[6] = h << 8;
+ data->temp11[REMOTE2_LOW] = h << 8;
if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
- data->temp11[7] = h << 8;
+ data->temp11[REMOTE2_HIGH] = h << 8;
lm90_select_remote_channel(client, data, 0);
if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
@@ -709,7 +764,7 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
struct lm90_data *data = lm90_update_device(dev);
int temp;
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[attr->index]);
@@ -726,7 +781,7 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- static const u8 reg[8] = {
+ static const u8 reg[TEMP8_REG_NUM] = {
LM90_REG_W_LOCAL_LOW,
LM90_REG_W_LOCAL_HIGH,
LM90_REG_W_LOCAL_CRIT,
@@ -753,7 +808,7 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
val -= 16000;
mutex_lock(&data->update_lock);
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
data->temp8[nr] = temp_to_u8_adt7461(data, val);
else if (data->kind == max6646)
data->temp8[nr] = temp_to_u8(val);
@@ -775,7 +830,7 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
struct lm90_data *data = lm90_update_device(dev);
int temp;
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
else if (data->kind == max6646)
temp = temp_from_u16(data->temp11[attr->index]);
@@ -821,7 +876,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
val -= 16000;
mutex_lock(&data->update_lock);
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
data->temp11[index] = temp_to_u16_adt7461(data, val);
else if (data->kind == max6646)
data->temp11[index] = temp_to_u8(val) << 8;
@@ -850,7 +905,7 @@ static ssize_t show_temphyst(struct device *dev,
struct lm90_data *data = lm90_update_device(dev);
int temp;
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[attr->index]);
@@ -878,12 +933,12 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
return err;
mutex_lock(&data->update_lock);
- if (data->kind == adt7461)
- temp = temp_from_u8_adt7461(data, data->temp8[2]);
+ if (data->kind == adt7461 || data->kind == tmp451)
+ temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[2]);
+ temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
else
- temp = temp_from_s8(data->temp8[2]);
+ temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
data->temp_hyst = hyst_to_reg(temp - val);
i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -937,25 +992,28 @@ static ssize_t set_update_interval(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL, 0, 4);
-static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL,
+ 0, LOCAL_TEMP);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL,
+ 0, REMOTE_TEMP);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 0);
+ set_temp8, LOCAL_LOW);
static SENSOR_DEVICE_ATTR_2(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 0, 1);
+ set_temp11, 0, REMOTE_LOW);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 1);
+ set_temp8, LOCAL_HIGH);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 1, 2);
+ set_temp11, 1, REMOTE_HIGH);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 2);
+ set_temp8, LOCAL_CRIT);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 3);
+ set_temp8, REMOTE_CRIT);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst,
- set_temphyst, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 3);
+ set_temphyst, LOCAL_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL,
+ REMOTE_CRIT);
static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 2, 3);
+ set_temp11, 2, REMOTE_OFFSET);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
@@ -1003,13 +1061,13 @@ static const struct attribute_group lm90_group = {
* Additional attributes for devices with emergency sensors
*/
static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 4);
+ set_temp8, LOCAL_EMERG);
static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 5);
+ set_temp8, REMOTE_EMERG);
static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, 4);
+ NULL, LOCAL_EMERG);
static SENSOR_DEVICE_ATTR(temp2_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, 5);
+ NULL, REMOTE_EMERG);
static struct attribute *lm90_emergency_attributes[] = {
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
@@ -1039,18 +1097,20 @@ static const struct attribute_group lm90_emergency_alarm_group = {
/*
* Additional attributes for devices with 3 temperature sensors
*/
-static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL, 0, 5);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL,
+ 0, REMOTE2_TEMP);
static SENSOR_DEVICE_ATTR_2(temp3_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 3, 6);
+ set_temp11, 3, REMOTE2_LOW);
static SENSOR_DEVICE_ATTR_2(temp3_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 4, 7);
+ set_temp11, 4, REMOTE2_HIGH);
static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 6);
-static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL, 6);
+ set_temp8, REMOTE2_CRIT);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL,
+ REMOTE2_CRIT);
static SENSOR_DEVICE_ATTR(temp3_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 7);
+ set_temp8, REMOTE2_EMERG);
static SENSOR_DEVICE_ATTR(temp3_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, 7);
+ NULL, REMOTE2_EMERG);
static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 10);
@@ -1306,6 +1366,19 @@ static int lm90_detect(struct i2c_client *client,
&& (config1 & 0x3F) == 0x00
&& convrate <= 0x08)
name = "g781";
+ } else
+ if (address == 0x4C
+ && man_id == 0x55) { /* Texas Instruments */
+ int local_ext;
+
+ local_ext = i2c_smbus_read_byte_data(client,
+ TMP451_REG_R_LOCAL_TEMPL);
+
+ if (chip_id == 0x00 /* TMP451 */
+ && (config1 & 0x1B) == 0x00
+ && convrate <= 0x09
+ && (local_ext & 0x0F) == 0x00)
+ name = "tmp451";
}
if (!name) { /* identification failed */
@@ -1367,7 +1440,7 @@ static void lm90_init_client(struct i2c_client *client)
data->config_orig = config;
/* Check Temperature Range Select */
- if (data->kind == adt7461) {
+ if (data->kind == adt7461 || data->kind == tmp451) {
if (config & 0x04)
data->flags |= LM90_FLAG_ADT7461_EXT;
}
@@ -1391,14 +1464,74 @@ static void lm90_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
}
+static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
+{
+ struct lm90_data *data = i2c_get_clientdata(client);
+ u8 st, st2 = 0;
+
+ lm90_read_reg(client, LM90_REG_R_STATUS, &st);
+
+ if (data->kind == max6696)
+ lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2);
+
+ *status = st | (st2 << 8);
+
+ if ((st & 0x7f) == 0 && (st2 & 0xfe) == 0)
+ return false;
+
+ if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
+ (st2 & MAX6696_STATUS2_LOT2))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 1);
+ if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
+ (st2 & MAX6696_STATUS2_ROT2))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 2);
+ if (st & LM90_STATUS_ROPEN)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 2);
+ if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
+ MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 3);
+ if (st2 & MAX6696_STATUS2_R2OPEN)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 3);
+
+ return true;
+}
+
+static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
+{
+ struct i2c_client *client = dev_id;
+ u16 status;
+
+ if (lm90_is_tripped(client, &status))
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
+ struct regulator *regulator;
int err;
+ regulator = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(regulator))
+ return PTR_ERR(regulator);
+
+ err = regulator_enable(regulator);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", err);
+ return err;
+ }
+
data = devm_kzalloc(&client->dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1406,6 +1539,8 @@ static int lm90_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
+ data->regulator = regulator;
+
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == adm1032) {
@@ -1467,12 +1602,26 @@ static int lm90_probe(struct i2c_client *client,
goto exit_remove_files;
}
+ if (client->irq) {
+ dev_dbg(dev, "IRQ: %d\n", client->irq);
+ err = devm_request_threaded_irq(dev, client->irq,
+ NULL, lm90_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "lm90", client);
+ if (err < 0) {
+ dev_err(dev, "cannot request IRQ %d\n", client->irq);
+ goto exit_remove_files;
+ }
+ }
+
return 0;
exit_remove_files:
lm90_remove_files(client, data);
exit_restore:
lm90_restore_conf(client, data);
+ regulator_disable(data->regulator);
+
return err;
}
@@ -1483,49 +1632,33 @@ static int lm90_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
lm90_remove_files(client, data);
lm90_restore_conf(client, data);
+ regulator_disable(data->regulator);
return 0;
}
static void lm90_alert(struct i2c_client *client, unsigned int flag)
{
- struct lm90_data *data = i2c_get_clientdata(client);
- u8 config, alarms, alarms2 = 0;
-
- lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
-
- if (data->kind == max6696)
- lm90_read_reg(client, MAX6696_REG_R_STATUS2, &alarms2);
-
- if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
- dev_info(&client->dev, "Everything OK\n");
- } else {
- if (alarms & 0x61)
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 1);
- if (alarms & 0x1a)
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 2);
- if (alarms & 0x04)
- dev_warn(&client->dev,
- "temp%d diode open, please check!\n", 2);
-
- if (alarms2 & 0x18)
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 3);
+ u16 alarms;
+ if (lm90_is_tripped(client, &alarms)) {
/*
* Disable ALERT# output, because these chips don't implement
* SMBus alert correctly; they should only hold the alert line
* low briefly.
*/
+ struct lm90_data *data = i2c_get_clientdata(client);
+
if ((data->flags & LM90_HAVE_BROKEN_ALERT)
&& (alarms & data->alert_alarms)) {
+ u8 config;
dev_dbg(&client->dev, "Disabling ALERT#\n");
lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
config | 0x80);
}
+ } else {
+ dev_info(&client->dev, "Everything OK\n");
}
}
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 307c9eaeeb9..411202bdaf6 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -57,7 +57,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4d, 0x4e, I2C_CLIENT_END };
/* Client data (each client gets its own) */
struct lm95234_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated, interval; /* in jiffies */
bool valid; /* false until following fields are valid */
@@ -114,9 +114,9 @@ static u16 update_intervals[] = { 143, 364, 1000, 2500 };
/* Fill value cache. Must be called with update lock held. */
-static int lm95234_fill_cache(struct i2c_client *client)
+static int lm95234_fill_cache(struct lm95234_data *data,
+ struct i2c_client *client)
{
- struct lm95234_data *data = i2c_get_clientdata(client);
int i, ret;
ret = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE);
@@ -157,9 +157,9 @@ static int lm95234_fill_cache(struct i2c_client *client)
return 0;
}
-static int lm95234_update_device(struct i2c_client *client,
- struct lm95234_data *data)
+static int lm95234_update_device(struct lm95234_data *data)
{
+ struct i2c_client *client = data->client;
int ret;
mutex_lock(&data->update_lock);
@@ -169,7 +169,7 @@ static int lm95234_update_device(struct i2c_client *client,
int i;
if (!data->valid) {
- ret = lm95234_fill_cache(client);
+ ret = lm95234_fill_cache(data, client);
if (ret < 0)
goto abort;
}
@@ -209,10 +209,9 @@ abort:
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -224,10 +223,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
static ssize_t show_alarm(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
u32 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -238,10 +236,9 @@ static ssize_t show_alarm(struct device *dev,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
u8 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -252,11 +249,10 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
unsigned long val;
u8 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -274,7 +270,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
else
data->sensor_type &= ~mask;
data->valid = false;
- i2c_smbus_write_byte_data(client, LM95234_REG_REM_MODEL,
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_REM_MODEL,
data->sensor_type);
mutex_unlock(&data->update_lock);
@@ -284,10 +280,9 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -298,11 +293,10 @@ static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
long val;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -315,7 +309,7 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->tcrit2[index] = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT2(index), val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT2(index), val);
mutex_unlock(&data->update_lock);
return count;
@@ -324,10 +318,9 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
static ssize_t show_tcrit2_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -340,8 +333,7 @@ static ssize_t show_tcrit2_hyst(struct device *dev,
static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u", data->tcrit1[index] * 1000);
@@ -350,11 +342,10 @@ static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ int ret = lm95234_update_device(data);
long val;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -367,7 +358,7 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->tcrit1[index] = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT1(index), val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT1(index), val);
mutex_unlock(&data->update_lock);
return count;
@@ -376,10 +367,9 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
static ssize_t show_tcrit1_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -393,11 +383,10 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ int ret = lm95234_update_device(data);
long val;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -411,7 +400,7 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
mutex_lock(&data->update_lock);
data->thyst = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT_HYST, val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT_HYST, val);
mutex_unlock(&data->update_lock);
return count;
@@ -420,10 +409,9 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -434,11 +422,10 @@ static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ int ret = lm95234_update_device(data);
long val;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -452,7 +439,7 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->toffset[index] = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_OFFSET(index), val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_OFFSET(index), val);
mutex_unlock(&data->update_lock);
return count;
@@ -461,9 +448,8 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
- int ret = lm95234_update_device(client, data);
+ struct lm95234_data *data = dev_get_drvdata(dev);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -475,11 +461,10 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
+ int ret = lm95234_update_device(data);
unsigned long val;
u8 regval;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -495,7 +480,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->interval = msecs_to_jiffies(update_intervals[regval]);
- i2c_smbus_write_byte_data(client, LM95234_REG_CONVRATE, regval);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_CONVRATE, regval);
mutex_unlock(&data->update_lock);
return count;
@@ -579,7 +564,7 @@ static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
-static struct attribute *lm95234_attributes[] = {
+static struct attribute *lm95234_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -621,10 +606,7 @@ static struct attribute *lm95234_attributes[] = {
&dev_attr_update_interval.attr,
NULL
};
-
-static const struct attribute_group lm95234_group = {
- .attrs = lm95234_attributes,
-};
+ATTRIBUTE_GROUPS(lm95234);
static int lm95234_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -701,13 +683,14 @@ static int lm95234_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct lm95234_data *data;
+ struct device *hwmon_dev;
int err;
data = devm_kzalloc(dev, sizeof(struct lm95234_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM95234 chip */
@@ -715,32 +698,10 @@ static int lm95234_probe(struct i2c_client *client,
if (err < 0)
return err;
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &lm95234_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&dev->kobj, &lm95234_group);
- return err;
-}
-
-static int lm95234_remove(struct i2c_client *client)
-{
- struct lm95234_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm95234_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ lm95234_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/* Driver data (common to all clients) */
@@ -756,7 +717,6 @@ static struct i2c_driver lm95234_driver = {
.name = DRVNAME,
},
.probe = lm95234_probe,
- .remove = lm95234_remove,
.id_table = lm95234_id,
.detect = lm95234_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index cdc1ecc6734..d4172933ce4 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -51,7 +51,9 @@ enum ltc4245_cmd {
};
struct ltc4245_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+
+ const struct attribute_group *groups[3];
struct mutex update_lock;
bool valid;
@@ -77,8 +79,8 @@ struct ltc4245_data {
*/
static void ltc4245_update_gpios(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct ltc4245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u8 gpio_curr, gpio_next, gpio_reg;
int i;
@@ -130,8 +132,8 @@ static void ltc4245_update_gpios(struct device *dev)
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct ltc4245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
s32 val;
int i;
@@ -455,41 +457,14 @@ static const struct attribute_group ltc4245_gpio_group = {
.attrs = ltc4245_gpio_attributes,
};
-static int ltc4245_sysfs_create_groups(struct i2c_client *client)
+static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
{
- struct ltc4245_data *data = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
- int ret;
-
- /* register the standard sysfs attributes */
- ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group);
- if (ret) {
- dev_err(dev, "unable to register standard attributes\n");
- return ret;
- }
+ /* standard sysfs attributes */
+ data->groups[0] = &ltc4245_std_group;
/* if we're using the extra gpio support, register it's attributes */
- if (data->use_extra_gpios) {
- ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group);
- if (ret) {
- dev_err(dev, "unable to register gpio attributes\n");
- sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void ltc4245_sysfs_remove_groups(struct i2c_client *client)
-{
- struct ltc4245_data *data = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
-
if (data->use_extra_gpios)
- sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group);
-
- sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
+ data->groups[1] = &ltc4245_gpio_group;
}
static bool ltc4245_use_extra_gpios(struct i2c_client *client)
@@ -517,7 +492,7 @@ static int ltc4245_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = client->adapter;
struct ltc4245_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -526,7 +501,7 @@ static int ltc4245_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
data->use_extra_gpios = ltc4245_use_extra_gpios(client);
@@ -534,30 +509,25 @@ static int ltc4245_probe(struct i2c_client *client,
i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
- /* Register sysfs hooks */
- ret = ltc4245_sysfs_create_groups(client);
- if (ret)
- return ret;
+ /* Add sysfs hooks */
+ ltc4245_sysfs_add_groups(data);
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
+ hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- return 0;
+ i2c_set_clientdata(client, hwmon_dev);
-out_hwmon_device_register:
- ltc4245_sysfs_remove_groups(client);
- return ret;
+ return 0;
}
static int ltc4245_remove(struct i2c_client *client)
{
- struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct device *hwmon_dev = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- ltc4245_sysfs_remove_groups(client);
+ hwmon_device_unregister(hwmon_dev);
return 0;
}
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 487da58ec86..0becd69842b 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -55,7 +55,7 @@
#define FAULT_OC (1<<2)
struct ltc4261_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -67,8 +67,8 @@ struct ltc4261_data {
static struct ltc4261_data *ltc4261_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4261_data *data = i2c_get_clientdata(client);
+ struct ltc4261_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ltc4261_data *ret = data;
mutex_lock(&data->update_lock);
@@ -150,7 +150,6 @@ static ssize_t ltc4261_show_bool(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
struct ltc4261_data *data = ltc4261_update_device(dev);
u8 fault;
@@ -159,7 +158,7 @@ static ssize_t ltc4261_show_bool(struct device *dev,
fault = data->regs[LTC4261_FAULT] & attr->index;
if (fault) /* Clear reported faults in chip register */
- i2c_smbus_write_byte_data(client, LTC4261_FAULT, ~fault);
+ i2c_smbus_write_byte_data(data->client, LTC4261_FAULT, ~fault);
return snprintf(buf, PAGE_SIZE, "%d\n", fault ? 1 : 0);
}
@@ -197,7 +196,7 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4261_show_value, NULL,
static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
FAULT_OC);
-static struct attribute *ltc4261_attributes[] = {
+static struct attribute *ltc4261_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min_alarm.dev_attr.attr,
&sensor_dev_attr_in1_max_alarm.dev_attr.attr,
@@ -210,62 +209,38 @@ static struct attribute *ltc4261_attributes[] = {
NULL,
};
-
-static const struct attribute_group ltc4261_group = {
- .attrs = ltc4261_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4261);
static int ltc4261_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
struct ltc4261_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
- dev_err(&client->dev, "Failed to read status register\n");
+ dev_err(dev, "Failed to read status register\n");
return -ENODEV;
}
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Clear faults */
i2c_smbus_write_byte_data(client, LTC4261_FAULT, 0x00);
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ltc4261_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
-
- return 0;
-
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
- return ret;
-}
-
-static int ltc4261_remove(struct i2c_client *client)
-{
- struct ltc4261_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ ltc4261_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4261_id[] = {
@@ -281,7 +256,6 @@ static struct i2c_driver ltc4261_driver = {
.name = "ltc4261",
},
.probe = ltc4261_probe,
- .remove = ltc4261_remove,
.id_table = ltc4261_id,
};
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 2fa2c02f556..d4efc79d7b9 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -83,7 +83,8 @@ static const bool max16065_have_current[] = {
struct max16065_data {
enum chips type;
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[4];
struct mutex update_lock;
bool valid;
unsigned long last_updated; /* in jiffies */
@@ -144,8 +145,8 @@ static int max16065_read_adc(struct i2c_client *client, int reg)
static struct max16065_data *max16065_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max16065_data *data = i2c_get_clientdata(client);
+ struct max16065_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
@@ -186,7 +187,7 @@ static ssize_t max16065_show_alarm(struct device *dev,
val &= (1 << attr2->index);
if (val)
- i2c_smbus_write_byte_data(to_i2c_client(dev),
+ i2c_smbus_write_byte_data(data->client,
MAX16065_FAULT(attr2->nr), val);
return snprintf(buf, PAGE_SIZE, "%d\n", !!val);
@@ -223,8 +224,7 @@ static ssize_t max16065_set_limit(struct device *dev,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
- struct i2c_client *client = to_i2c_client(dev);
- struct max16065_data *data = i2c_get_clientdata(client);
+ struct max16065_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
int limit;
@@ -238,7 +238,7 @@ static ssize_t max16065_set_limit(struct device *dev,
mutex_lock(&data->update_lock);
data->limit[attr2->nr][attr2->index]
= LIMIT_TO_MV(limit, data->range[attr2->index]);
- i2c_smbus_write_byte_data(client,
+ i2c_smbus_write_byte_data(data->client,
MAX16065_LIMIT(attr2->nr, attr2->index),
limit);
mutex_unlock(&data->update_lock);
@@ -250,8 +250,7 @@ static ssize_t max16065_show_limit(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
- struct i2c_client *client = to_i2c_client(dev);
- struct max16065_data *data = i2c_get_clientdata(client);
+ struct max16065_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\n",
data->limit[attr2->nr][attr2->index]);
@@ -516,8 +515,32 @@ static struct attribute *max16065_max_attributes[] = {
NULL
};
+static umode_t max16065_basic_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct max16065_data *data = dev_get_drvdata(dev);
+ int index = n / 4;
+
+ if (index >= data->num_adc || !data->range[index])
+ return 0;
+ return a->mode;
+}
+
+static umode_t max16065_secondary_is_visible(struct kobject *kobj,
+ struct attribute *a, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct max16065_data *data = dev_get_drvdata(dev);
+
+ if (index >= data->num_adc)
+ return 0;
+ return a->mode;
+}
+
static const struct attribute_group max16065_basic_group = {
.attrs = max16065_basic_attributes,
+ .is_visible = max16065_basic_is_visible,
};
static const struct attribute_group max16065_current_group = {
@@ -526,38 +549,35 @@ static const struct attribute_group max16065_current_group = {
static const struct attribute_group max16065_min_group = {
.attrs = max16065_min_attributes,
+ .is_visible = max16065_secondary_is_visible,
};
static const struct attribute_group max16065_max_group = {
.attrs = max16065_max_attributes,
+ .is_visible = max16065_secondary_is_visible,
};
-static void max16065_cleanup(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &max16065_max_group);
- sysfs_remove_group(&client->dev.kobj, &max16065_min_group);
- sysfs_remove_group(&client->dev.kobj, &max16065_current_group);
- sysfs_remove_group(&client->dev.kobj, &max16065_basic_group);
-}
-
static int max16065_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
struct max16065_data *data;
- int i, j, val, ret;
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ int i, j, val;
bool have_secondary; /* true if chip has secondary limits */
bool secondary_is_max = false; /* secondary limits reflect max */
+ int groups = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_READ_WORD_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (unlikely(!data))
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
data->num_adc = max16065_num_adc[id->driver_data];
@@ -596,38 +616,16 @@ static int max16065_probe(struct i2c_client *client,
}
}
- /* Register sysfs hooks */
- for (i = 0; i < data->num_adc * 4; i++) {
- /* Do not create sysfs entry if channel is disabled */
- if (!data->range[i / 4])
- continue;
-
- ret = sysfs_create_file(&client->dev.kobj,
- max16065_basic_attributes[i]);
- if (unlikely(ret))
- goto out;
- }
-
- if (have_secondary) {
- struct attribute **attr = secondary_is_max ?
- max16065_max_attributes : max16065_min_attributes;
-
- for (i = 0; i < data->num_adc; i++) {
- if (!data->range[i])
- continue;
-
- ret = sysfs_create_file(&client->dev.kobj, attr[i]);
- if (unlikely(ret))
- goto out;
- }
- }
+ /* sysfs hooks */
+ data->groups[groups++] = &max16065_basic_group;
+ if (have_secondary)
+ data->groups[groups++] = secondary_is_max ?
+ &max16065_max_group : &max16065_min_group;
if (data->have_current) {
val = i2c_smbus_read_byte_data(client, MAX16065_CURR_CONTROL);
- if (unlikely(val < 0)) {
- ret = val;
- goto out;
- }
+ if (unlikely(val < 0))
+ return val;
if (val & MAX16065_CURR_ENABLE) {
/*
* Current gain is 6, 12, 24, 48 based on values in
@@ -636,33 +634,16 @@ static int max16065_probe(struct i2c_client *client,
data->curr_gain = 6 << ((val >> 2) & 0x03);
data->range[MAX16065_NUM_ADC]
= max16065_csp_adc_range[(val >> 1) & 0x01];
- ret = sysfs_create_group(&client->dev.kobj,
- &max16065_current_group);
- if (unlikely(ret))
- goto out;
+ data->groups[groups++] = &max16065_current_group;
} else {
data->have_current = false;
}
}
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (unlikely(IS_ERR(data->hwmon_dev))) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out;
- }
- return 0;
-
-out:
- max16065_cleanup(client);
- return ret;
-}
-
-static int max16065_remove(struct i2c_client *client)
-{
- struct max16065_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- max16065_cleanup(client);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ if (unlikely(IS_ERR(hwmon_dev)))
+ return PTR_ERR(hwmon_dev);
return 0;
}
@@ -685,7 +666,6 @@ static struct i2c_driver max16065_driver = {
.name = "max16065",
},
.probe = max16065_probe,
- .remove = max16065_remove,
.id_table = max16065_id,
};
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 57d58cd3220..8326fbd6015 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -87,7 +87,7 @@ static int temp_to_reg(int val)
*/
struct max6642_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -102,10 +102,10 @@ struct max6642_data {
* Real code
*/
-static void max6642_init_client(struct i2c_client *client)
+static void max6642_init_client(struct max6642_data *data,
+ struct i2c_client *client)
{
u8 config;
- struct max6642_data *data = i2c_get_clientdata(client);
/*
* Start the conversions.
@@ -168,14 +168,14 @@ static int max6642_detect(struct i2c_client *client,
static struct max6642_data *max6642_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6642_data *data = i2c_get_clientdata(client);
+ struct max6642_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u16 val, tmp;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- dev_dbg(&client->dev, "Updating max6642 data.\n");
+ dev_dbg(dev, "Updating max6642 data.\n");
val = i2c_smbus_read_byte_data(client,
MAX6642_REG_R_LOCAL_TEMPL);
tmp = (val >> 6) & 3;
@@ -209,8 +209,8 @@ static struct max6642_data *max6642_update_device(struct device *dev)
static ssize_t show_temp_max10(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct max6642_data *data = max6642_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6642_data *data = max6642_update_device(dev);
return sprintf(buf, "%d\n",
temp_from_reg10(data->temp_input[attr->index]));
@@ -219,8 +219,8 @@ static ssize_t show_temp_max10(struct device *dev,
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct max6642_data *data = max6642_update_device(dev);
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+ struct max6642_data *data = max6642_update_device(dev);
return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr]));
}
@@ -228,11 +228,10 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+ struct max6642_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
- struct i2c_client *client = to_i2c_client(dev);
- struct max6642_data *data = i2c_get_clientdata(client);
- struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
err = kstrtoul(buf, 10, &val);
if (err < 0)
@@ -240,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
- i2c_smbus_write_byte_data(client, attr2->index,
+ i2c_smbus_write_byte_data(data->client, attr2->index,
data->temp_high[attr2->nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -264,7 +263,7 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
-static struct attribute *max6642_attributes[] = {
+static struct attribute *max6642_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -275,54 +274,29 @@ static struct attribute *max6642_attributes[] = {
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(max6642);
-static const struct attribute_group max6642_group = {
- .attrs = max6642_attributes,
-};
-
-static int max6642_probe(struct i2c_client *new_client,
+static int max6642_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct max6642_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&new_client->dev, sizeof(struct max6642_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct max6642_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the MAX6642 chip */
- max6642_init_client(new_client);
+ max6642_init_client(data, client);
- /* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &max6642_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &max6642_group);
- return err;
-}
-
-static int max6642_remove(struct i2c_client *client)
-{
- struct max6642_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &max6642_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ max6642_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/*
@@ -341,7 +315,6 @@ static struct i2c_driver max6642_driver = {
.name = "max6642",
},
.probe = max6642_probe,
- .remove = max6642_remove,
.id_table = max6642_id,
.detect = max6642_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 3c16cbd4c00..0cafc390db4 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -660,7 +660,7 @@ static int max6650_init_client(struct i2c_client *client)
/*
* If mode is set to "full off", we change it to "open loop" and
* set DAC to 255, which has the same effect. We do this because
- * there's no "full off" mode defined in hwmon specifcations.
+ * there's no "full off" mode defined in hwmon specifications.
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index a41b5f3fc50..7fd3eaf817f 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -77,7 +77,7 @@ struct max6697_chip_data {
};
struct max6697_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
enum chips type;
const struct max6697_chip_data *chip;
@@ -181,8 +181,8 @@ static const struct max6697_chip_data max6697_chip_data[] = {
static struct max6697_data *max6697_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct max6697_data *ret = data;
int val;
int i;
@@ -303,8 +303,7 @@ static ssize_t set_temp(struct device *dev,
{
int nr = to_sensor_dev_attr_2(devattr)->nr;
int index = to_sensor_dev_attr_2(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *data = dev_get_drvdata(dev);
long temp;
int ret;
@@ -316,7 +315,7 @@ static ssize_t set_temp(struct device *dev,
temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
data->temp[nr][index] = temp;
- ret = i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_byte_data(data->client,
index == 2 ? MAX6697_REG_MAX[nr]
: MAX6697_REG_CRIT[nr],
temp);
@@ -405,8 +404,7 @@ static umode_t max6697_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *data = dev_get_drvdata(dev);
const struct max6697_chip_data *chip = data->chip;
int channel = index / 6; /* channel number */
int nr = index % 6; /* attribute index within channel */
@@ -489,6 +487,7 @@ static struct attribute *max6697_attributes[] = {
static const struct attribute_group max6697_group = {
.attrs = max6697_attributes, .is_visible = max6697_is_visible,
};
+__ATTRIBUTE_GROUPS(max6697);
static void max6697_get_config_of(struct device_node *node,
struct max6697_platform_data *pdata)
@@ -525,9 +524,9 @@ static void max6697_get_config_of(struct device_node *node,
}
}
-static int max6697_init_chip(struct i2c_client *client)
+static int max6697_init_chip(struct max6697_data *data,
+ struct i2c_client *client)
{
- struct max6697_data *data = i2c_get_clientdata(client);
struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
struct max6697_platform_data p;
const struct max6697_chip_data *chip = data->chip;
@@ -625,6 +624,7 @@ static int max6697_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
struct max6697_data *data;
+ struct device *hwmon_dev;
int err;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -636,39 +636,17 @@ static int max6697_probe(struct i2c_client *client,
data->type = id->driver_data;
data->chip = &max6697_chip_data[data->type];
-
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
- err = max6697_init_chip(client);
- if (err)
- return err;
-
- err = sysfs_create_group(&client->dev.kobj, &max6697_group);
+ err = max6697_init_chip(data, client);
if (err)
return err;
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error;
- }
-
- return 0;
-
-error:
- sysfs_remove_group(&client->dev.kobj, &max6697_group);
- return err;
-}
-
-static int max6697_remove(struct i2c_client *client)
-{
- struct max6697_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &max6697_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ max6697_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id max6697_id[] = {
@@ -692,7 +670,6 @@ static struct i2c_driver max6697_driver = {
.name = "max6697",
},
.probe = max6697_probe,
- .remove = max6697_remove,
.id_table = max6697_id,
};
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 982d8622c09..ae00e60d856 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -37,7 +37,7 @@
struct mc13783_adc_priv {
struct mc13xxx *mc13xxx;
struct device *hwmon_dev;
- char name[10];
+ char name[PLATFORM_NAME_SIZE];
};
static ssize_t mc13783_adc_show_name(struct device *dev, struct device_attribute
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 6eb03ce2cff..cf811c1a147 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -274,6 +274,8 @@ static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
static const u16 NCT6775_REG_TEMP[] = {
0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
+static const u16 NCT6775_REG_TEMP_MON[] = { 0x73, 0x75, 0x77 };
+
static const u16 NCT6775_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
0, 0x152, 0x252, 0x628, 0x629, 0x62A };
static const u16 NCT6775_REG_TEMP_HYST[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
@@ -454,6 +456,7 @@ static const u16 NCT6779_REG_CRITICAL_PWM[] = {
0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 };
static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 };
+static const u16 NCT6779_REG_TEMP_MON[] = { 0x73, 0x75, 0x77, 0x79, 0x7b };
static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
0x18, 0x152 };
static const u16 NCT6779_REG_TEMP_HYST[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
@@ -507,6 +510,13 @@ static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1]
#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28
+static const u16 NCT6791_REG_WEIGHT_TEMP_SEL[6] = { 0, 0x239 };
+static const u16 NCT6791_REG_WEIGHT_TEMP_STEP[6] = { 0, 0x23a };
+static const u16 NCT6791_REG_WEIGHT_TEMP_STEP_TOL[6] = { 0, 0x23b };
+static const u16 NCT6791_REG_WEIGHT_DUTY_STEP[6] = { 0, 0x23c };
+static const u16 NCT6791_REG_WEIGHT_TEMP_BASE[6] = { 0, 0x23d };
+static const u16 NCT6791_REG_WEIGHT_DUTY_BASE[6] = { 0, 0x23e };
+
static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = {
0x459, 0x45A, 0x45B, 0x568, 0x45D };
@@ -534,6 +544,7 @@ static const u16 NCT6106_REG_IN[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 };
static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 };
+static const u16 NCT6106_REG_TEMP_MON[] = { 0x18, 0x19, 0x1a };
static const u16 NCT6106_REG_TEMP_HYST[] = {
0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 };
static const u16 NCT6106_REG_TEMP_OVER[] = {
@@ -724,11 +735,8 @@ struct nct6775_data {
enum kinds kind;
const char *name;
- struct device *hwmon_dev;
- struct attribute_group *group_in;
- struct attribute_group *group_fan;
- struct attribute_group *group_temp;
- struct attribute_group *group_pwm;
+ int num_attr_groups;
+ const struct attribute_group *groups[6];
u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
* 3=temp_crit, 4=temp_lcrit
@@ -942,7 +950,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
struct sensor_device_attribute_2 *a2;
struct attribute **attrs;
struct sensor_device_template **t;
- int err, i, j, count;
+ int i, count;
if (repeat <= 0)
return ERR_PTR(-EINVAL);
@@ -973,7 +981,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
for (i = 0; i < repeat; i++) {
t = tg->templates;
- for (j = 0; *t != NULL; j++) {
+ while (*t != NULL) {
snprintf(su->name, sizeof(su->name),
(*t)->dev_attr.attr.name, tg->base + i);
if ((*t)->s2) {
@@ -1002,10 +1010,6 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
}
}
- err = sysfs_create_group(&dev->kobj, group);
- if (err)
- return ERR_PTR(-ENOMEM);
-
return group;
}
@@ -1314,6 +1318,9 @@ static void nct6775_update_pwm(struct device *dev)
if (reg & 0x80)
data->pwm[2][i] = 0;
+ if (!data->REG_WEIGHT_TEMP_SEL[i])
+ continue;
+
reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
@@ -1457,7 +1464,8 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
= nct6775_read_temp(data,
data->reg_temp[j][i]);
}
- if (!(data->have_temp_fixed & (1 << i)))
+ if (i >= NUM_TEMP_FIXED ||
+ !(data->have_temp_fixed & (1 << i)))
continue;
data->temp_offset[i]
= nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
@@ -1545,7 +1553,7 @@ static int find_temp_source(struct nct6775_data *data, int index, int count)
if (src == source)
return nr;
}
- return -1;
+ return -ENODEV;
}
static ssize_t
@@ -1644,7 +1652,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
if (nr < 0)
- return -ENODEV;
+ return nr;
bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
regindex = bit >> 3;
@@ -2726,16 +2734,6 @@ store_fan_time(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct nct6775_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
-static ssize_t
show_auto_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nct6775_data *data = nct6775_update_device(dev);
@@ -2868,6 +2866,9 @@ static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
if (!(data->has_pwm & (1 << pwm)))
return 0;
+ if ((nr >= 14 && nr <= 18) || nr == 21) /* weight */
+ if (!data->REG_WEIGHT_TEMP_SEL[pwm])
+ return 0;
if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */
return 0;
if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */
@@ -2961,11 +2962,11 @@ static struct sensor_device_template *nct6775_attributes_pwm_template[] = {
&sensor_dev_template_pwm_step_down_time,
&sensor_dev_template_pwm_start,
&sensor_dev_template_pwm_floor,
- &sensor_dev_template_pwm_weight_temp_sel,
+ &sensor_dev_template_pwm_weight_temp_sel, /* 14 */
&sensor_dev_template_pwm_weight_temp_step,
&sensor_dev_template_pwm_weight_temp_step_tol,
&sensor_dev_template_pwm_weight_temp_step_base,
- &sensor_dev_template_pwm_weight_duty_step,
+ &sensor_dev_template_pwm_weight_duty_step, /* 18 */
&sensor_dev_template_pwm_max, /* 19 */
&sensor_dev_template_pwm_step, /* 20 */
&sensor_dev_template_pwm_weight_duty_base, /* 21 */
@@ -3061,16 +3062,16 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
- if (index == 1 && !data->have_vid)
+ if (index == 0 && !data->have_vid)
return 0;
- if (index == 2 || index == 3) {
- if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 2] < 0)
+ if (index == 1 || index == 2) {
+ if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 1] < 0)
return 0;
}
- if (index == 4 || index == 5) {
- if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 4] < 0)
+ if (index == 3 || index == 4) {
+ if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 3] < 0)
return 0;
}
@@ -3083,13 +3084,12 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
* Any change in order or content must be matched.
*/
static struct attribute *nct6775_attributes_other[] = {
- &dev_attr_name.attr,
- &dev_attr_cpu0_vid.attr, /* 1 */
- &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, /* 2 */
- &sensor_dev_attr_intrusion1_alarm.dev_attr.attr, /* 3 */
- &sensor_dev_attr_intrusion0_beep.dev_attr.attr, /* 4 */
- &sensor_dev_attr_intrusion1_beep.dev_attr.attr, /* 5 */
- &sensor_dev_attr_beep_enable.dev_attr.attr, /* 6 */
+ &dev_attr_cpu0_vid.attr, /* 0 */
+ &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, /* 1 */
+ &sensor_dev_attr_intrusion1_alarm.dev_attr.attr, /* 2 */
+ &sensor_dev_attr_intrusion0_beep.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_intrusion1_beep.dev_attr.attr, /* 4 */
+ &sensor_dev_attr_beep_enable.dev_attr.attr, /* 5 */
NULL
};
@@ -3099,27 +3099,6 @@ static const struct attribute_group nct6775_group_other = {
.is_visible = nct6775_other_is_visible,
};
-/*
- * Driver and device management
- */
-
-static void nct6775_device_remove_files(struct device *dev)
-{
- struct nct6775_data *data = dev_get_drvdata(dev);
-
- if (data->group_pwm)
- sysfs_remove_group(&dev->kobj, data->group_pwm);
- if (data->group_in)
- sysfs_remove_group(&dev->kobj, data->group_in);
- if (data->group_fan)
- sysfs_remove_group(&dev->kobj, data->group_fan);
- if (data->group_temp)
- sysfs_remove_group(&dev->kobj, data->group_temp);
-
- sysfs_remove_group(&dev->kobj, &nct6775_group_other);
-}
-
-/* Get the monitoring functions started */
static inline void nct6775_init_device(struct nct6775_data *data)
{
int i;
@@ -3291,11 +3270,12 @@ static int nct6775_probe(struct platform_device *pdev)
int i, s, err = 0;
int src, mask, available;
const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
- const u16 *reg_temp_alternate, *reg_temp_crit;
+ const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
- int num_reg_temp;
+ int num_reg_temp, num_reg_temp_mon;
u8 cr2a;
struct attribute_group *group;
+ struct device *hwmon_dev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
@@ -3375,7 +3355,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->BEEP_BITS = NCT6106_BEEP_BITS;
reg_temp = NCT6106_REG_TEMP;
+ reg_temp_mon = NCT6106_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -3447,7 +3429,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6775_REG_BEEP;
reg_temp = NCT6775_REG_TEMP;
+ reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
@@ -3517,7 +3501,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6775_REG_TEMP;
+ reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
@@ -3591,7 +3577,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6779_REG_TEMP;
+ reg_temp_mon = NCT6779_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3640,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP;
- data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM[5] = NCT6791_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6791_REG_WEIGHT_DUTY_BASE;
data->REG_PWM_READ = NCT6775_REG_PWM_READ;
data->REG_PWM_MODE = NCT6776_REG_PWM_MODE;
data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK;
@@ -3657,15 +3645,17 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
- data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL;
- data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP;
- data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
- data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
+ data->REG_WEIGHT_TEMP_SEL = NCT6791_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6791_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6791_REG_ALARM;
data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6779_REG_TEMP;
+ reg_temp_mon = NCT6779_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3766,6 +3756,50 @@ static int nct6775_probe(struct platform_device *pdev)
s++;
}
+ /*
+ * Repeat with temperatures used for fan control.
+ * This set of registers does not support limits.
+ */
+ for (i = 0; i < num_reg_temp_mon; i++) {
+ if (reg_temp_mon[i] == 0)
+ continue;
+
+ src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
+ if (!src || (mask & (1 << src)))
+ continue;
+
+ if (src >= data->temp_label_num ||
+ !strlen(data->temp_label[src])) {
+ dev_info(dev,
+ "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n",
+ src, i, data->REG_TEMP_SEL[i],
+ reg_temp_mon[i]);
+ continue;
+ }
+
+ mask |= 1 << src;
+
+ /* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */
+ if (src <= data->temp_fixed_num) {
+ if (data->have_temp & (1 << (src - 1)))
+ continue;
+ data->have_temp |= 1 << (src - 1);
+ data->have_temp_fixed |= 1 << (src - 1);
+ data->reg_temp[0][src - 1] = reg_temp_mon[i];
+ data->temp_src[src - 1] = src;
+ continue;
+ }
+
+ if (s >= NUM_TEMP)
+ continue;
+
+ /* Use dynamic index for other sources */
+ data->have_temp |= 1 << s;
+ data->reg_temp[0][s] = reg_temp_mon[i];
+ data->temp_src[s] = src;
+ s++;
+ }
+
#ifdef USE_ALTERNATE
/*
* Go through the list of alternate temp registers and enable
@@ -3870,61 +3904,36 @@ static int nct6775_probe(struct platform_device *pdev)
/* Register sysfs hooks */
group = nct6775_create_attr_group(dev, &nct6775_pwm_template_group,
data->pwm_num);
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_pwm = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[data->num_attr_groups++] = group;
group = nct6775_create_attr_group(dev, &nct6775_in_template_group,
fls(data->have_in));
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_in = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[data->num_attr_groups++] = group;
group = nct6775_create_attr_group(dev, &nct6775_fan_template_group,
fls(data->has_fan));
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_fan = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[data->num_attr_groups++] = group;
group = nct6775_create_attr_group(dev, &nct6775_temp_template_group,
fls(data->have_temp));
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_temp = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- err = sysfs_create_group(&dev->kobj, &nct6775_group_other);
- if (err)
- goto exit_remove;
+ data->groups[data->num_attr_groups++] = group;
+ data->groups[data->num_attr_groups++] = &nct6775_group_other;
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- nct6775_device_remove_files(dev);
- return err;
-}
-
-static int nct6775_remove(struct platform_device *pdev)
-{
- struct nct6775_data *data = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(data->hwmon_dev);
- nct6775_device_remove_files(&pdev->dev);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, data->name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
#ifdef CONFIG_PM
@@ -4013,7 +4022,6 @@ static struct platform_driver nct6775_driver = {
.pm = NCT6775_DEV_PM_OPS,
},
.probe = nct6775_probe,
- .remove = nct6775_remove,
};
static const char * const nct6775_sio_names[] __initconst = {
@@ -4101,7 +4109,7 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
/*
* when Super-I/O functions move to a separate file, the Super-I/O
* bus will manage the lifetime of the device and this module will only keep
- * track of the nct6775 driver. But since we platform_device_alloc(), we
+ * track of the nct6775 driver. But since we use platform_device_alloc(), we
* must keep track of the device
*/
static struct platform_device *pdev[2];
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 6a9d6edaacb..a26b1d1d951 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25066, lm5064, lm5066 };
+enum chips { lm25056, lm25063, lm25066, lm5064, lm5066 };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -52,6 +52,11 @@ enum chips { lm25056, lm25066, lm5064, lm5066 };
#define LM25056_MFR_STS_VAUX_OV_WARN (1 << 1)
#define LM25056_MFR_STS_VAUX_UV_WARN (1 << 0)
+/* LM25063 only */
+
+#define LM25063_READ_VOUT_MAX 0xe5
+#define LM25063_READ_VOUT_MIN 0xe6
+
struct __coeff {
short m, b, R;
};
@@ -59,7 +64,7 @@ struct __coeff {
#define PSC_CURRENT_IN_L (PSC_NUM_CLASSES)
#define PSC_POWER_L (PSC_NUM_CLASSES + 1)
-static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
+static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = {
[lm25056] = {
[PSC_VOLTAGE_IN] = {
.m = 16296,
@@ -116,6 +121,36 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
+ [lm25063] = {
+ [PSC_VOLTAGE_IN] = {
+ .m = 16000,
+ .R = -2,
+ },
+ [PSC_VOLTAGE_OUT] = {
+ .m = 16000,
+ .R = -2,
+ },
+ [PSC_CURRENT_IN] = {
+ .m = 10000,
+ .R = -2,
+ },
+ [PSC_CURRENT_IN_L] = {
+ .m = 10000,
+ .R = -2,
+ },
+ [PSC_POWER] = {
+ .m = 5000,
+ .R = -3,
+ },
+ [PSC_POWER_L] = {
+ .m = 5000,
+ .R = -3,
+ },
+ [PSC_TEMPERATURE] = {
+ .m = 15596,
+ .R = -3,
+ },
+ },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -178,6 +213,7 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
struct lm25066_data {
int id;
+ u16 rlimit; /* Maximum register value */
struct pmbus_driver_info info;
};
@@ -200,6 +236,10 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
+ case lm25063:
+ /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 20, 625);
+ break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -253,6 +293,24 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
+ break;
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
+ break;
+ default:
+ ret = lm25066_read_word_data(client, page, reg);
+ break;
+ }
+ return ret;
+}
+
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -308,27 +366,34 @@ static int lm25056_read_byte_data(struct i2c_client *client, int page, int reg)
static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct lm25066_data *data = to_lm25066_data(info);
int ret;
switch (reg) {
+ case PMBUS_POUT_OP_FAULT_LIMIT:
+ case PMBUS_POUT_OP_WARN_LIMIT:
case PMBUS_VOUT_UV_WARN_LIMIT:
case PMBUS_OT_FAULT_LIMIT:
case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_IIN_OC_FAULT_LIMIT:
case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_FAULT_LIMIT:
case PMBUS_VIN_OV_WARN_LIMIT:
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0, reg, word);
pmbus_clear_cache(client);
break;
case PMBUS_IIN_OC_WARN_LIMIT:
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_IIN_OC_WARN_LIMIT,
word);
pmbus_clear_cache(client);
break;
case PMBUS_PIN_OP_WARN_LIMIT:
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_PIN_OP_WARN_LIMIT,
word);
@@ -337,7 +402,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_UV_WARN_LIMIT, word);
pmbus_clear_cache(client);
@@ -345,7 +410,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_OV_WARN_LIMIT, word);
pmbus_clear_cache(client);
@@ -399,9 +464,16 @@ static int lm25066_probe(struct i2c_client *client,
info->func[0] |= PMBUS_HAVE_STATUS_VMON;
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
+ data->rlimit = 0x0fff;
+ } else if (data->id == lm25063) {
+ info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_POUT;
+ info->read_word_data = lm25063_read_word_data;
+ data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
+ data->rlimit = 0x0fff;
}
info->write_word_data = lm25066_write_word_data;
@@ -432,6 +504,7 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
+ {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
@@ -453,5 +526,5 @@ static struct i2c_driver lm25066_driver = {
module_i2c_driver(lm25066_driver);
MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LM25056/LM25066/LM5064/LM5066");
+MODULE_DESCRIPTION("PMBus driver for LM25066 and compatible chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 586a89ef9e0..de3c152a1d9 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -1,5 +1,6 @@
/*
- * Hardware monitoring driver for LTC2974, LTC2978, LTC3880, and LTC3883
+ * Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
+ * and LTC3883
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -27,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
+enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
@@ -35,7 +36,7 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
#define LTC2978_MFR_TEMPERATURE_PEAK 0xdf
#define LTC2978_MFR_SPECIAL_ID 0xe7
-/* LTC2974 and LTC2978 */
+/* LTC2974, LCT2977, and LTC2978 */
#define LTC2978_MFR_VOUT_MIN 0xfb
#define LTC2978_MFR_VIN_MIN 0xfc
#define LTC2978_MFR_TEMPERATURE_MIN 0xfd
@@ -53,8 +54,10 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
#define LTC3883_MFR_IIN_PEAK 0xe1
#define LTC2974_ID 0x0212
+#define LTC2977_ID 0x0130
#define LTC2978_ID_REV1 0x0121
#define LTC2978_ID_REV2 0x0122
+#define LTC2978A_ID 0x0124
#define LTC3880_ID 0x4000
#define LTC3880_ID_MASK 0xff00
#define LTC3883_ID 0x4300
@@ -363,6 +366,7 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
static const struct i2c_device_id ltc2978_id[] = {
{"ltc2974", ltc2974},
+ {"ltc2977", ltc2977},
{"ltc2978", ltc2978},
{"ltc3880", ltc3880},
{"ltc3883", ltc3883},
@@ -392,7 +396,10 @@ static int ltc2978_probe(struct i2c_client *client,
if (chip_id == LTC2974_ID) {
data->id = ltc2974;
- } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2) {
+ } else if (chip_id == LTC2977_ID) {
+ data->id = ltc2977;
+ } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2 ||
+ chip_id == LTC2978A_ID) {
data->id = ltc2978;
} else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
data->id = ltc3880;
@@ -438,6 +445,7 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
}
break;
+ case ltc2977:
case ltc2978:
info->read_word_data = ltc2978_read_word_data;
info->pages = LTC2978_NUM_PAGES;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 9319fcf142d..3cbf66e9d86 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -97,6 +97,7 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
struct attribute_group group;
+ const struct attribute_group *groups[2];
struct pmbus_sensor *sensors;
@@ -156,7 +157,7 @@ EXPORT_SYMBOL_GPL(pmbus_write_byte);
/*
* _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
- * a device specific mapping funcion exists and calls it if necessary.
+ * a device specific mapping function exists and calls it if necessary.
*/
static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
@@ -348,7 +349,7 @@ static struct _pmbus_status {
static struct pmbus_data *pmbus_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
struct pmbus_sensor *sensor;
@@ -686,7 +687,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
if (!s1 && !s2) {
ret = !!regval;
} else if (!s1 || !s2) {
- BUG();
+ WARN(1, "Bad boolean descriptor %p: s1=%p, s2=%p\n", b, s1, s2);
return 0;
} else {
long v1, v2;
@@ -733,7 +734,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_data *data = i2c_get_clientdata(client);
struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
ssize_t rv = count;
@@ -1768,22 +1769,16 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
goto out_kfree;
}
- /* Register sysfs hooks */
- ret = sysfs_create_group(&dev->kobj, &data->group);
- if (ret) {
- dev_err(dev, "Failed to create sysfs entries\n");
- goto out_kfree;
- }
- data->hwmon_dev = hwmon_device_register(dev);
+ data->groups[0] = &data->group;
+ data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
dev_err(dev, "Failed to register hwmon device\n");
- goto out_hwmon_device_register;
+ goto out_kfree;
}
return 0;
-out_hwmon_device_register:
- sysfs_remove_group(&dev->kobj, &data->group);
out_kfree:
kfree(data->group.attrs);
return ret;
@@ -1794,7 +1789,6 @@ int pmbus_do_remove(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &data->group);
kfree(data->group.attrs);
return 0;
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index dfe6d9527ef..7fa6e7d0b9b 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -155,7 +155,8 @@ MODULE_DEVICE_TABLE(i2c, tmp401_id);
*/
struct tmp401_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -231,8 +232,8 @@ static int tmp401_update_device_reg16(struct i2c_client *client,
static struct tmp401_data *tmp401_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct tmp401_data *ret = data;
int i, val;
unsigned long next_update;
@@ -350,15 +351,12 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *devattr,
{
int nr = to_sensor_dev_attr_2(devattr)->nr;
int index = to_sensor_dev_attr_2(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = tmp401_update_device(dev);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
u16 reg;
u8 regaddr;
- if (IS_ERR(data))
- return PTR_ERR(data);
-
if (kstrtol(buf, 10, &val))
return -EINVAL;
@@ -405,7 +403,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
val = clamp_val(val, temp - 255000, temp);
reg = ((temp - val) + 500) / 1000;
- i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_CRIT_HYST,
+ i2c_smbus_write_byte_data(data->client, TMP401_TEMP_CRIT_HYST,
reg);
data->temp_crit_hyst = reg;
@@ -423,8 +421,8 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
static ssize_t reset_temp_history(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
if (kstrtol(buf, 10, &val))
@@ -447,8 +445,7 @@ static ssize_t reset_temp_history(struct device *dev,
static ssize_t show_update_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->update_interval);
}
@@ -457,8 +454,8 @@ static ssize_t set_update_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int err, rate;
@@ -616,10 +613,10 @@ static const struct attribute_group tmp432_group = {
* Begin non sysfs callback code (aka Real code)
*/
-static void tmp401_init_client(struct i2c_client *client)
+static void tmp401_init_client(struct tmp401_data *data,
+ struct i2c_client *client)
{
int config, config_orig;
- struct tmp401_data *data = i2c_get_clientdata(client);
/* Set the conversion rate to 2 Hz */
i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
@@ -705,77 +702,45 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
-static int tmp401_remove(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct tmp401_data *data = i2c_get_clientdata(client);
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- sysfs_remove_group(&dev->kobj, &tmp401_group);
-
- if (data->kind == tmp411)
- sysfs_remove_group(&dev->kobj, &tmp411_group);
-
- if (data->kind == tmp432)
- sysfs_remove_group(&dev->kobj, &tmp432_group);
-
- return 0;
-}
-
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
struct device *dev = &client->dev;
- int err;
+ struct device *hwmon_dev;
struct tmp401_data *data;
- const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
+ int groups = 0;
data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
data->kind = id->driver_data;
/* Initialize the TMP401 chip */
- tmp401_init_client(client);
+ tmp401_init_client(data, client);
/* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &tmp401_group);
- if (err)
- return err;
+ data->groups[groups++] = &tmp401_group;
/* Register additional tmp411 sysfs hooks */
- if (data->kind == tmp411) {
- err = sysfs_create_group(&dev->kobj, &tmp411_group);
- if (err)
- goto exit_remove;
- }
+ if (data->kind == tmp411)
+ data->groups[groups++] = &tmp411_group;
/* Register additional tmp432 sysfs hooks */
- if (data->kind == tmp432) {
- err = sysfs_create_group(&dev->kobj, &tmp432_group);
- if (err)
- goto exit_remove;
- }
+ if (data->kind == tmp432)
+ data->groups[groups++] = &tmp432_group;
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- data->hwmon_dev = NULL;
- goto exit_remove;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
dev_info(dev, "Detected TI %s chip\n", names[data->kind]);
return 0;
-
-exit_remove:
- tmp401_remove(client);
- return err;
}
static struct i2c_driver tmp401_driver = {
@@ -784,7 +749,6 @@ static struct i2c_driver tmp401_driver = {
.name = "tmp401",
},
.probe = tmp401_probe,
- .remove = tmp401_remove,
.id_table = tmp401_id,
.detect = tmp401_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index a3feee332e2..bdcf2dce5ec 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1043,7 +1043,7 @@ static struct sensor_device_attribute sda_temp_alarm[] = {
SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
};
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
static ssize_t show_alarms_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 5febb43cb4c..df585808adb 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -579,7 +579,7 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
return count;
}
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
static ssize_t
show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
{
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index b0c30a546ff..9d63d71214c 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -808,7 +808,7 @@ show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
if (nr == TEMP_FAN_MAP) {
val = data->temp_fan_map[index];
} else if (nr == TEMP_PWM_ENABLE) {
- /* +2 to transfrom into 2 and 3 to conform with sysfs intf */
+ /* +2 to transform into 2 and 3 to conform with sysfs intf */
val = ((data->pwm_enable >> index) & 0x01) + 2;
} else if (nr == TEMP_CRUISE) {
val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
@@ -1199,7 +1199,8 @@ static void w83793_init_client(struct i2c_client *client)
static int watchdog_set_timeout(struct w83793_data *data, int timeout)
{
- int ret, mtimeout;
+ unsigned int mtimeout;
+ int ret;
mtimeout = DIV_ROUND_UP(timeout, 60);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cdcbd8368ed..3b26129f605 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -109,6 +109,7 @@ config I2C_I801
Avoton (SOC)
Wellsburg (PCH)
Coleto Creek (PCH)
+ Wildcat Point-LP (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -345,6 +346,16 @@ config I2C_BCM2835
This support is also available as a module. If so, the module
will be called i2c-bcm2835.
+config I2C_BCM_KONA
+ tristate "BCM Kona I2C adapter"
+ depends on ARCH_BCM_MOBILE
+ default y
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface on the Broadcom Kona family of processors.
+
+ If you do not need KONA I2C inteface, say N.
+
config I2C_BLACKFIN_TWI
tristate "Blackfin TWI I2C support"
depends on BLACKFIN
@@ -436,6 +447,13 @@ config I2C_EG20T
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+config I2C_EXYNOS5
+ tristate "Exynos5 high-speed I2C driver"
+ depends on ARCH_EXYNOS5 && OF
+ help
+ Say Y here to include support for high-speed I2C controller in the
+ Exynos5 based Samsung SoCs.
+
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
depends on GPIOLIB
@@ -665,7 +683,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -695,6 +713,16 @@ config I2C_SIRF
This driver can also be built as a module. If so, the module
will be called i2c-sirf.
+config I2C_ST
+ tristate "STMicroelectronics SSC I2C support"
+ depends on ARCH_STI
+ help
+ Enable this option to add support for STMicroelectronics SoCs
+ hardware SSC (Synchronous Serial Controller) as an I2C controller.
+
+ This driver can also be built as module. If so, the module
+ will be called i2c-st.
+
config I2C_STU300
tristate "ST Microelectronics DDC I2C interface"
depends on MACH_U300
@@ -768,7 +796,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
- depends on ARCH_SHMOBILE && I2C
+ depends on ARM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index d00997f3eb3..c73eb0ea788 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -42,6 +42,7 @@ i2c-designware-platform-objs := i2c-designware-platdrv.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-objs := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
+obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
@@ -68,6 +69,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o
+obj-$(CONFIG_I2C_ST) += i2c-st.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
@@ -87,6 +89,7 @@ obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o
# Other I2C/SMBus bus drivers
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
+obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index fd059308aff..8edba9de76d 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -371,7 +371,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
- INIT_COMPLETION(dev->cmd_complete);
+ reinit_completion(&dev->cmd_complete);
dev->transfer_status = 0;
if (!dev->buf_len) {
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
new file mode 100644
index 00000000000..036cf03aeb6
--- /dev/null
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -0,0 +1,909 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+/* Hardware register offsets and field defintions */
+#define CS_OFFSET 0x00000020
+#define CS_ACK_SHIFT 3
+#define CS_ACK_MASK 0x00000008
+#define CS_ACK_CMD_GEN_START 0x00000000
+#define CS_ACK_CMD_GEN_RESTART 0x00000001
+#define CS_CMD_SHIFT 1
+#define CS_CMD_CMD_NO_ACTION 0x00000000
+#define CS_CMD_CMD_START_RESTART 0x00000001
+#define CS_CMD_CMD_STOP 0x00000002
+#define CS_EN_SHIFT 0
+#define CS_EN_CMD_ENABLE_BSC 0x00000001
+
+#define TIM_OFFSET 0x00000024
+#define TIM_PRESCALE_SHIFT 6
+#define TIM_P_SHIFT 3
+#define TIM_NO_DIV_SHIFT 2
+#define TIM_DIV_SHIFT 0
+
+#define DAT_OFFSET 0x00000028
+
+#define TOUT_OFFSET 0x0000002c
+
+#define TXFCR_OFFSET 0x0000003c
+#define TXFCR_FIFO_FLUSH_MASK 0x00000080
+#define TXFCR_FIFO_EN_MASK 0x00000040
+
+#define IER_OFFSET 0x00000044
+#define IER_READ_COMPLETE_INT_MASK 0x00000010
+#define IER_I2C_INT_EN_MASK 0x00000008
+#define IER_FIFO_INT_EN_MASK 0x00000002
+#define IER_NOACK_EN_MASK 0x00000001
+
+#define ISR_OFFSET 0x00000048
+#define ISR_RESERVED_MASK 0xffffff60
+#define ISR_CMDBUSY_MASK 0x00000080
+#define ISR_READ_COMPLETE_MASK 0x00000010
+#define ISR_SES_DONE_MASK 0x00000008
+#define ISR_ERR_MASK 0x00000004
+#define ISR_TXFIFOEMPTY_MASK 0x00000002
+#define ISR_NOACK_MASK 0x00000001
+
+#define CLKEN_OFFSET 0x0000004C
+#define CLKEN_AUTOSENSE_OFF_MASK 0x00000080
+#define CLKEN_M_SHIFT 4
+#define CLKEN_N_SHIFT 1
+#define CLKEN_CLKEN_MASK 0x00000001
+
+#define FIFO_STATUS_OFFSET 0x00000054
+#define FIFO_STATUS_RXFIFO_EMPTY_MASK 0x00000004
+#define FIFO_STATUS_TXFIFO_EMPTY_MASK 0x00000010
+
+#define HSTIM_OFFSET 0x00000058
+#define HSTIM_HS_MODE_MASK 0x00008000
+#define HSTIM_HS_HOLD_SHIFT 10
+#define HSTIM_HS_HIGH_PHASE_SHIFT 5
+#define HSTIM_HS_SETUP_SHIFT 0
+
+#define PADCTL_OFFSET 0x0000005c
+#define PADCTL_PAD_OUT_EN_MASK 0x00000004
+
+#define RXFCR_OFFSET 0x00000068
+#define RXFCR_NACK_EN_SHIFT 7
+#define RXFCR_READ_COUNT_SHIFT 0
+#define RXFIFORDOUT_OFFSET 0x0000006c
+
+/* Locally used constants */
+#define MAX_RX_FIFO_SIZE 64U /* bytes */
+#define MAX_TX_FIFO_SIZE 64U /* bytes */
+
+#define STD_EXT_CLK_FREQ 13000000UL
+#define HS_EXT_CLK_FREQ 104000000UL
+
+#define MASTERCODE 0x08 /* Mastercodes are 0000_1xxxb */
+
+#define I2C_TIMEOUT 100 /* msecs */
+
+/* Operations that can be commanded to the controller */
+enum bcm_kona_cmd_t {
+ BCM_CMD_NOACTION = 0,
+ BCM_CMD_START,
+ BCM_CMD_RESTART,
+ BCM_CMD_STOP,
+};
+
+enum bus_speed_index {
+ BCM_SPD_100K = 0,
+ BCM_SPD_400K,
+ BCM_SPD_1MHZ,
+};
+
+enum hs_bus_speed_index {
+ BCM_SPD_3P4MHZ = 0,
+};
+
+/* Internal divider settings for standard mode, fast mode and fast mode plus */
+struct bus_speed_cfg {
+ uint8_t time_m; /* Number of cycles for setup time */
+ uint8_t time_n; /* Number of cycles for hold time */
+ uint8_t prescale; /* Prescale divider */
+ uint8_t time_p; /* Timing coefficient */
+ uint8_t no_div; /* Disable clock divider */
+ uint8_t time_div; /* Post-prescale divider */
+};
+
+/* Internal divider settings for high-speed mode */
+struct hs_bus_speed_cfg {
+ uint8_t hs_hold; /* Number of clock cycles SCL stays low until
+ the end of bit period */
+ uint8_t hs_high_phase; /* Number of clock cycles SCL stays high
+ before it falls */
+ uint8_t hs_setup; /* Number of clock cycles SCL stays low
+ before it rises */
+ uint8_t prescale; /* Prescale divider */
+ uint8_t time_p; /* Timing coefficient */
+ uint8_t no_div; /* Disable clock divider */
+ uint8_t time_div; /* Post-prescale divider */
+};
+
+static const struct bus_speed_cfg std_cfg_table[] = {
+ [BCM_SPD_100K] = {0x01, 0x01, 0x03, 0x06, 0x00, 0x02},
+ [BCM_SPD_400K] = {0x05, 0x01, 0x03, 0x05, 0x01, 0x02},
+ [BCM_SPD_1MHZ] = {0x01, 0x01, 0x03, 0x01, 0x01, 0x03},
+};
+
+static const struct hs_bus_speed_cfg hs_cfg_table[] = {
+ [BCM_SPD_3P4MHZ] = {0x01, 0x08, 0x14, 0x00, 0x06, 0x01, 0x00},
+};
+
+struct bcm_kona_i2c_dev {
+ struct device *device;
+
+ void __iomem *base;
+ int irq;
+ struct clk *external_clk;
+
+ struct i2c_adapter adapter;
+
+ struct completion done;
+
+ const struct bus_speed_cfg *std_cfg;
+ const struct hs_bus_speed_cfg *hs_cfg;
+};
+
+static void bcm_kona_i2c_send_cmd_to_ctrl(struct bcm_kona_i2c_dev *dev,
+ enum bcm_kona_cmd_t cmd)
+{
+ dev_dbg(dev->device, "%s, %d\n", __func__, cmd);
+
+ switch (cmd) {
+ case BCM_CMD_NOACTION:
+ writel((CS_CMD_CMD_NO_ACTION << CS_CMD_SHIFT) |
+ (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT),
+ dev->base + CS_OFFSET);
+ break;
+
+ case BCM_CMD_START:
+ writel((CS_ACK_CMD_GEN_START << CS_ACK_SHIFT) |
+ (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) |
+ (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT),
+ dev->base + CS_OFFSET);
+ break;
+
+ case BCM_CMD_RESTART:
+ writel((CS_ACK_CMD_GEN_RESTART << CS_ACK_SHIFT) |
+ (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) |
+ (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT),
+ dev->base + CS_OFFSET);
+ break;
+
+ case BCM_CMD_STOP:
+ writel((CS_CMD_CMD_STOP << CS_CMD_SHIFT) |
+ (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT),
+ dev->base + CS_OFFSET);
+ break;
+
+ default:
+ dev_err(dev->device, "Unknown command %d\n", cmd);
+ }
+}
+
+static void bcm_kona_i2c_enable_clock(struct bcm_kona_i2c_dev *dev)
+{
+ writel(readl(dev->base + CLKEN_OFFSET) | CLKEN_CLKEN_MASK,
+ dev->base + CLKEN_OFFSET);
+}
+
+static void bcm_kona_i2c_disable_clock(struct bcm_kona_i2c_dev *dev)
+{
+ writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_CLKEN_MASK,
+ dev->base + CLKEN_OFFSET);
+}
+
+static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid)
+{
+ struct bcm_kona_i2c_dev *dev = devid;
+ uint32_t status = readl(dev->base + ISR_OFFSET);
+
+ if ((status & ~ISR_RESERVED_MASK) == 0)
+ return IRQ_NONE;
+
+ /* Must flush the TX FIFO when NAK detected */
+ if (status & ISR_NOACK_MASK)
+ writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK,
+ dev->base + TXFCR_OFFSET);
+
+ writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
+ complete_all(&dev->done);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for ISR_CMDBUSY_MASK to go low before writing to CS, DAT, or RCD */
+static int bcm_kona_i2c_wait_if_busy(struct bcm_kona_i2c_dev *dev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT);
+
+ while (readl(dev->base + ISR_OFFSET) & ISR_CMDBUSY_MASK)
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev->device, "CMDBUSY timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* Send command to I2C bus */
+static int bcm_kona_send_i2c_cmd(struct bcm_kona_i2c_dev *dev,
+ enum bcm_kona_cmd_t cmd)
+{
+ int rc;
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT);
+
+ /* Make sure the hardware is ready */
+ rc = bcm_kona_i2c_wait_if_busy(dev);
+ if (rc < 0)
+ return rc;
+
+ /* Unmask the session done interrupt */
+ writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET);
+
+ /* Mark as incomplete before sending the command */
+ reinit_completion(&dev->done);
+
+ /* Send the command */
+ bcm_kona_i2c_send_cmd_to_ctrl(dev, cmd);
+
+ /* Wait for transaction to finish or timeout */
+ time_left = wait_for_completion_timeout(&dev->done, time_left);
+
+ /* Mask all interrupts */
+ writel(0, dev->base + IER_OFFSET);
+
+ if (!time_left) {
+ dev_err(dev->device, "controller timed out\n");
+ rc = -ETIMEDOUT;
+ }
+
+ /* Clear command */
+ bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION);
+
+ return rc;
+}
+
+/* Read a single RX FIFO worth of data from the i2c bus */
+static int bcm_kona_i2c_read_fifo_single(struct bcm_kona_i2c_dev *dev,
+ uint8_t *buf, unsigned int len,
+ unsigned int last_byte_nak)
+{
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT);
+
+ /* Mark as incomplete before starting the RX FIFO */
+ reinit_completion(&dev->done);
+
+ /* Unmask the read complete interrupt */
+ writel(IER_READ_COMPLETE_INT_MASK, dev->base + IER_OFFSET);
+
+ /* Start the RX FIFO */
+ writel((last_byte_nak << RXFCR_NACK_EN_SHIFT) |
+ (len << RXFCR_READ_COUNT_SHIFT),
+ dev->base + RXFCR_OFFSET);
+
+ /* Wait for FIFO read to complete */
+ time_left = wait_for_completion_timeout(&dev->done, time_left);
+
+ /* Mask all interrupts */
+ writel(0, dev->base + IER_OFFSET);
+
+ if (!time_left) {
+ dev_err(dev->device, "RX FIFO time out\n");
+ return -EREMOTEIO;
+ }
+
+ /* Read data from FIFO */
+ for (; len > 0; len--, buf++)
+ *buf = readl(dev->base + RXFIFORDOUT_OFFSET);
+
+ return 0;
+}
+
+/* Read any amount of data using the RX FIFO from the i2c bus */
+static int bcm_kona_i2c_read_fifo(struct bcm_kona_i2c_dev *dev,
+ struct i2c_msg *msg)
+{
+ unsigned int bytes_to_read = MAX_RX_FIFO_SIZE;
+ unsigned int last_byte_nak = 0;
+ unsigned int bytes_read = 0;
+ int rc;
+
+ uint8_t *tmp_buf = msg->buf;
+
+ while (bytes_read < msg->len) {
+ if (msg->len - bytes_read <= MAX_RX_FIFO_SIZE) {
+ last_byte_nak = 1; /* NAK last byte of transfer */
+ bytes_to_read = msg->len - bytes_read;
+ }
+
+ rc = bcm_kona_i2c_read_fifo_single(dev, tmp_buf, bytes_to_read,
+ last_byte_nak);
+ if (rc < 0)
+ return -EREMOTEIO;
+
+ bytes_read += bytes_to_read;
+ tmp_buf += bytes_to_read;
+ }
+
+ return 0;
+}
+
+/* Write a single byte of data to the i2c bus */
+static int bcm_kona_i2c_write_byte(struct bcm_kona_i2c_dev *dev, uint8_t data,
+ unsigned int nak_expected)
+{
+ int rc;
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT);
+ unsigned int nak_received;
+
+ /* Make sure the hardware is ready */
+ rc = bcm_kona_i2c_wait_if_busy(dev);
+ if (rc < 0)
+ return rc;
+
+ /* Clear pending session done interrupt */
+ writel(ISR_SES_DONE_MASK, dev->base + ISR_OFFSET);
+
+ /* Unmask the session done interrupt */
+ writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET);
+
+ /* Mark as incomplete before sending the data */
+ reinit_completion(&dev->done);
+
+ /* Send one byte of data */
+ writel(data, dev->base + DAT_OFFSET);
+
+ /* Wait for byte to be written */
+ time_left = wait_for_completion_timeout(&dev->done, time_left);
+
+ /* Mask all interrupts */
+ writel(0, dev->base + IER_OFFSET);
+
+ if (!time_left) {
+ dev_dbg(dev->device, "controller timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ nak_received = readl(dev->base + CS_OFFSET) & CS_ACK_MASK ? 1 : 0;
+
+ if (nak_received ^ nak_expected) {
+ dev_dbg(dev->device, "unexpected NAK/ACK\n");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+/* Write a single TX FIFO worth of data to the i2c bus */
+static int bcm_kona_i2c_write_fifo_single(struct bcm_kona_i2c_dev *dev,
+ uint8_t *buf, unsigned int len)
+{
+ int k;
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT);
+ unsigned int fifo_status;
+
+ /* Mark as incomplete before sending data to the TX FIFO */
+ reinit_completion(&dev->done);
+
+ /* Unmask the fifo empty and nak interrupt */
+ writel(IER_FIFO_INT_EN_MASK | IER_NOACK_EN_MASK,
+ dev->base + IER_OFFSET);
+
+ /* Disable IRQ to load a FIFO worth of data without interruption */
+ disable_irq(dev->irq);
+
+ /* Write data into FIFO */
+ for (k = 0; k < len; k++)
+ writel(buf[k], (dev->base + DAT_OFFSET));
+
+ /* Enable IRQ now that data has been loaded */
+ enable_irq(dev->irq);
+
+ /* Wait for FIFO to empty */
+ do {
+ time_left = wait_for_completion_timeout(&dev->done, time_left);
+ fifo_status = readl(dev->base + FIFO_STATUS_OFFSET);
+ } while (time_left && !(fifo_status & FIFO_STATUS_TXFIFO_EMPTY_MASK));
+
+ /* Mask all interrupts */
+ writel(0, dev->base + IER_OFFSET);
+
+ /* Check if there was a NAK */
+ if (readl(dev->base + CS_OFFSET) & CS_ACK_MASK) {
+ dev_err(dev->device, "unexpected NAK\n");
+ return -EREMOTEIO;
+ }
+
+ /* Check if a timeout occured */
+ if (!time_left) {
+ dev_err(dev->device, "completion timed out\n");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+
+/* Write any amount of data using TX FIFO to the i2c bus */
+static int bcm_kona_i2c_write_fifo(struct bcm_kona_i2c_dev *dev,
+ struct i2c_msg *msg)
+{
+ unsigned int bytes_to_write = MAX_TX_FIFO_SIZE;
+ unsigned int bytes_written = 0;
+ int rc;
+
+ uint8_t *tmp_buf = msg->buf;
+
+ while (bytes_written < msg->len) {
+ if (msg->len - bytes_written <= MAX_TX_FIFO_SIZE)
+ bytes_to_write = msg->len - bytes_written;
+
+ rc = bcm_kona_i2c_write_fifo_single(dev, tmp_buf,
+ bytes_to_write);
+ if (rc < 0)
+ return -EREMOTEIO;
+
+ bytes_written += bytes_to_write;
+ tmp_buf += bytes_to_write;
+ }
+
+ return 0;
+}
+
+/* Send i2c address */
+static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev,
+ struct i2c_msg *msg)
+{
+ unsigned char addr;
+
+ if (msg->flags & I2C_M_TEN) {
+ /* First byte is 11110XX0 where XX is upper 2 bits */
+ addr = 0xF0 | ((msg->addr & 0x300) >> 7);
+ if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
+ return -EREMOTEIO;
+
+ /* Second byte is the remaining 8 bits */
+ addr = msg->addr & 0xFF;
+ if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
+ return -EREMOTEIO;
+
+ if (msg->flags & I2C_M_RD) {
+ /* For read, send restart command */
+ if (bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART) < 0)
+ return -EREMOTEIO;
+
+ /* Then re-send the first byte with the read bit set */
+ addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01;
+ if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
+ return -EREMOTEIO;
+ }
+ } else {
+ addr = msg->addr << 1;
+
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+
+ if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static void bcm_kona_i2c_enable_autosense(struct bcm_kona_i2c_dev *dev)
+{
+ writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_AUTOSENSE_OFF_MASK,
+ dev->base + CLKEN_OFFSET);
+}
+
+static void bcm_kona_i2c_config_timing(struct bcm_kona_i2c_dev *dev)
+{
+ writel(readl(dev->base + HSTIM_OFFSET) & ~HSTIM_HS_MODE_MASK,
+ dev->base + HSTIM_OFFSET);
+
+ writel((dev->std_cfg->prescale << TIM_PRESCALE_SHIFT) |
+ (dev->std_cfg->time_p << TIM_P_SHIFT) |
+ (dev->std_cfg->no_div << TIM_NO_DIV_SHIFT) |
+ (dev->std_cfg->time_div << TIM_DIV_SHIFT),
+ dev->base + TIM_OFFSET);
+
+ writel((dev->std_cfg->time_m << CLKEN_M_SHIFT) |
+ (dev->std_cfg->time_n << CLKEN_N_SHIFT) |
+ CLKEN_CLKEN_MASK,
+ dev->base + CLKEN_OFFSET);
+}
+
+static void bcm_kona_i2c_config_timing_hs(struct bcm_kona_i2c_dev *dev)
+{
+ writel((dev->hs_cfg->prescale << TIM_PRESCALE_SHIFT) |
+ (dev->hs_cfg->time_p << TIM_P_SHIFT) |
+ (dev->hs_cfg->no_div << TIM_NO_DIV_SHIFT) |
+ (dev->hs_cfg->time_div << TIM_DIV_SHIFT),
+ dev->base + TIM_OFFSET);
+
+ writel((dev->hs_cfg->hs_hold << HSTIM_HS_HOLD_SHIFT) |
+ (dev->hs_cfg->hs_high_phase << HSTIM_HS_HIGH_PHASE_SHIFT) |
+ (dev->hs_cfg->hs_setup << HSTIM_HS_SETUP_SHIFT),
+ dev->base + HSTIM_OFFSET);
+
+ writel(readl(dev->base + HSTIM_OFFSET) | HSTIM_HS_MODE_MASK,
+ dev->base + HSTIM_OFFSET);
+}
+
+static int bcm_kona_i2c_switch_to_hs(struct bcm_kona_i2c_dev *dev)
+{
+ int rc;
+
+ /* Send mastercode at standard speed */
+ rc = bcm_kona_i2c_write_byte(dev, MASTERCODE, 1);
+ if (rc < 0) {
+ pr_err("High speed handshake failed\n");
+ return rc;
+ }
+
+ /* Configure external clock to higher frequency */
+ rc = clk_set_rate(dev->external_clk, HS_EXT_CLK_FREQ);
+ if (rc) {
+ dev_err(dev->device, "%s: clk_set_rate returned %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* Reconfigure internal dividers */
+ bcm_kona_i2c_config_timing_hs(dev);
+
+ /* Send a restart command */
+ rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART);
+ if (rc < 0)
+ dev_err(dev->device, "High speed restart command failed\n");
+
+ return rc;
+}
+
+static int bcm_kona_i2c_switch_to_std(struct bcm_kona_i2c_dev *dev)
+{
+ int rc;
+
+ /* Reconfigure internal dividers */
+ bcm_kona_i2c_config_timing(dev);
+
+ /* Configure external clock to lower frequency */
+ rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ);
+ if (rc) {
+ dev_err(dev->device, "%s: clk_set_rate returned %d\n",
+ __func__, rc);
+ }
+
+ return rc;
+}
+
+/* Master transfer function */
+static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msgs[], int num)
+{
+ struct bcm_kona_i2c_dev *dev = i2c_get_adapdata(adapter);
+ struct i2c_msg *pmsg;
+ int rc = 0;
+ int i;
+
+ rc = clk_prepare_enable(dev->external_clk);
+ if (rc) {
+ dev_err(dev->device, "%s: peri clock enable failed. err %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* Enable pad output */
+ writel(0, dev->base + PADCTL_OFFSET);
+
+ /* Enable internal clocks */
+ bcm_kona_i2c_enable_clock(dev);
+
+ /* Send start command */
+ rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_START);
+ if (rc < 0) {
+ dev_err(dev->device, "Start command failed rc = %d\n", rc);
+ goto xfer_disable_pad;
+ }
+
+ /* Switch to high speed if applicable */
+ if (dev->hs_cfg) {
+ rc = bcm_kona_i2c_switch_to_hs(dev);
+ if (rc < 0)
+ goto xfer_send_stop;
+ }
+
+ /* Loop through all messages */
+ for (i = 0; i < num; i++) {
+ pmsg = &msgs[i];
+
+ /* Send restart for subsequent messages */
+ if ((i != 0) && ((pmsg->flags & I2C_M_NOSTART) == 0)) {
+ rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART);
+ if (rc < 0) {
+ dev_err(dev->device,
+ "restart cmd failed rc = %d\n", rc);
+ goto xfer_send_stop;
+ }
+ }
+
+ /* Send slave address */
+ if (!(pmsg->flags & I2C_M_NOSTART)) {
+ rc = bcm_kona_i2c_do_addr(dev, pmsg);
+ if (rc < 0) {
+ dev_err(dev->device,
+ "NAK from addr %2.2x msg#%d rc = %d\n",
+ pmsg->addr, i, rc);
+ goto xfer_send_stop;
+ }
+ }
+
+ /* Perform data transfer */
+ if (pmsg->flags & I2C_M_RD) {
+ rc = bcm_kona_i2c_read_fifo(dev, pmsg);
+ if (rc < 0) {
+ dev_err(dev->device, "read failure\n");
+ goto xfer_send_stop;
+ }
+ } else {
+ rc = bcm_kona_i2c_write_fifo(dev, pmsg);
+ if (rc < 0) {
+ dev_err(dev->device, "write failure");
+ goto xfer_send_stop;
+ }
+ }
+ }
+
+ rc = num;
+
+xfer_send_stop:
+ /* Send a STOP command */
+ bcm_kona_send_i2c_cmd(dev, BCM_CMD_STOP);
+
+ /* Return from high speed if applicable */
+ if (dev->hs_cfg) {
+ int hs_rc = bcm_kona_i2c_switch_to_std(dev);
+
+ if (hs_rc)
+ rc = hs_rc;
+ }
+
+xfer_disable_pad:
+ /* Disable pad output */
+ writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET);
+
+ /* Stop internal clock */
+ bcm_kona_i2c_disable_clock(dev);
+
+ clk_disable_unprepare(dev->external_clk);
+
+ return rc;
+}
+
+static uint32_t bcm_kona_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
+ I2C_FUNC_NOSTART;
+}
+
+static const struct i2c_algorithm bcm_algo = {
+ .master_xfer = bcm_kona_i2c_xfer,
+ .functionality = bcm_kona_i2c_functionality,
+};
+
+static int bcm_kona_i2c_assign_bus_speed(struct bcm_kona_i2c_dev *dev)
+{
+ unsigned int bus_speed;
+ int ret = of_property_read_u32(dev->device->of_node, "clock-frequency",
+ &bus_speed);
+ if (ret < 0) {
+ dev_err(dev->device, "missing clock-frequency property\n");
+ return -ENODEV;
+ }
+
+ switch (bus_speed) {
+ case 100000:
+ dev->std_cfg = &std_cfg_table[BCM_SPD_100K];
+ break;
+ case 400000:
+ dev->std_cfg = &std_cfg_table[BCM_SPD_400K];
+ break;
+ case 1000000:
+ dev->std_cfg = &std_cfg_table[BCM_SPD_1MHZ];
+ break;
+ case 3400000:
+ /* Send mastercode at 100k */
+ dev->std_cfg = &std_cfg_table[BCM_SPD_100K];
+ dev->hs_cfg = &hs_cfg_table[BCM_SPD_3P4MHZ];
+ break;
+ default:
+ pr_err("%d hz bus speed not supported\n", bus_speed);
+ pr_err("Valid speeds are 100khz, 400khz, 1mhz, and 3.4mhz\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bcm_kona_i2c_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct bcm_kona_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *iomem;
+
+ /* Allocate memory for private data structure */
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+ dev->device = &pdev->dev;
+ init_completion(&dev->done);
+
+ /* Map hardware registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(dev->device, iomem);
+ if (IS_ERR(dev->base))
+ return -ENOMEM;
+
+ /* Get and enable external clock */
+ dev->external_clk = devm_clk_get(dev->device, NULL);
+ if (IS_ERR(dev->external_clk)) {
+ dev_err(dev->device, "couldn't get clock\n");
+ return -ENODEV;
+ }
+
+ rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ);
+ if (rc) {
+ dev_err(dev->device, "%s: clk_set_rate returned %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = clk_prepare_enable(dev->external_clk);
+ if (rc) {
+ dev_err(dev->device, "couldn't enable clock\n");
+ return rc;
+ }
+
+ /* Parse bus speed */
+ rc = bcm_kona_i2c_assign_bus_speed(dev);
+ if (rc)
+ goto probe_disable_clk;
+
+ /* Enable internal clocks */
+ bcm_kona_i2c_enable_clock(dev);
+
+ /* Configure internal dividers */
+ bcm_kona_i2c_config_timing(dev);
+
+ /* Disable timeout */
+ writel(0, dev->base + TOUT_OFFSET);
+
+ /* Enable autosense */
+ bcm_kona_i2c_enable_autosense(dev);
+
+ /* Enable TX FIFO */
+ writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK,
+ dev->base + TXFCR_OFFSET);
+
+ /* Mask all interrupts */
+ writel(0, dev->base + IER_OFFSET);
+
+ /* Clear all pending interrupts */
+ writel(ISR_CMDBUSY_MASK |
+ ISR_READ_COMPLETE_MASK |
+ ISR_SES_DONE_MASK |
+ ISR_ERR_MASK |
+ ISR_TXFIFOEMPTY_MASK |
+ ISR_NOACK_MASK,
+ dev->base + ISR_OFFSET);
+
+ /* Get the interrupt number */
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ dev_err(dev->device, "no irq resource\n");
+ rc = -ENODEV;
+ goto probe_disable_clk;
+ }
+
+ /* register the ISR handler */
+ rc = devm_request_irq(&pdev->dev, dev->irq, bcm_kona_i2c_isr,
+ IRQF_SHARED, pdev->name, dev);
+ if (rc) {
+ dev_err(dev->device, "failed to request irq %i\n", dev->irq);
+ goto probe_disable_clk;
+ }
+
+ /* Enable the controller but leave it idle */
+ bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION);
+
+ /* Disable pad output */
+ writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET);
+
+ /* Disable internal clock */
+ bcm_kona_i2c_disable_clock(dev);
+
+ /* Disable external clock */
+ clk_disable_unprepare(dev->external_clk);
+
+ /* Add the i2c adapter */
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
+ adap->algo = &bcm_algo;
+ adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
+
+ rc = i2c_add_adapter(adap);
+ if (rc) {
+ dev_err(dev->device, "failed to add adapter\n");
+ return rc;
+ }
+
+ dev_info(dev->device, "device registered successfully\n");
+
+ return 0;
+
+probe_disable_clk:
+ bcm_kona_i2c_disable_clock(dev);
+ clk_disable_unprepare(dev->external_clk);
+
+ return rc;
+}
+
+static int bcm_kona_i2c_remove(struct platform_device *pdev)
+{
+ struct bcm_kona_i2c_dev *dev = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&dev->adapter);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_kona_i2c_of_match[] = {
+ {.compatible = "brcm,kona-i2c",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, kona_i2c_of_match);
+
+static struct platform_driver bcm_kona_i2c_driver = {
+ .driver = {
+ .name = "bcm-kona-i2c",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_kona_i2c_of_match,
+ },
+ .probe = bcm_kona_i2c_probe,
+ .remove = bcm_kona_i2c_remove,
+};
+module_platform_driver(bcm_kona_i2c_driver);
+
+MODULE_AUTHOR("Tim Kryger <tkryger@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Kona I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ea4b08fc335..d7e8600f31f 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -151,7 +151,7 @@ static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
i2c_dev->msg_buf = msg->buf;
i2c_dev->msg_buf_remaining = msg->len;
- INIT_COMPLETION(i2c_dev->completion);
+ reinit_completion(&i2c_dev->completion);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 35a473ba3d8..3b9bd9a3f2b 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -675,7 +675,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->retries = 3;
rc = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev),
+ dev_get_platdata(&pdev->dev),
"i2c-bfin-twi");
if (rc) {
dev_err(&pdev->dev, "Can't setup pin mux!\n");
@@ -723,7 +723,7 @@ out_error_add_adapter:
free_irq(iface->irq, iface);
out_error_req_irq:
out_error_no_irq:
- peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
out_error_pin_mux:
iounmap(iface->regs_base);
out_error_ioremap:
@@ -739,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
i2c_del_adapter(&(iface->adap));
free_irq(iface->irq, iface);
- peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
iounmap(iface->regs_base);
kfree(iface);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index 2d46f13adfd..ce7ffba2b02 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -246,6 +246,7 @@ static int cbus_i2c_probe(struct platform_device *pdev)
adapter->owner = THIS_MODULE;
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = pdev->dev.of_node;
adapter->nr = pdev->id;
adapter->timeout = HZ;
adapter->algo = &cbus_i2c_algo;
@@ -289,6 +290,7 @@ static struct platform_driver cbus_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "i2c-cbus-gpio",
+ .of_match_table = of_match_ptr(i2c_cbus_dt_ids),
},
};
module_platform_driver(cbus_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b2b8aa9adc0..3e5ea2c87a6 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -447,7 +447,7 @@ static int cpm_i2c_setup(struct cpm_i2c *cpm)
init_waitqueue_head(&cpm->i2c_wait);
- cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ cpm->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (!cpm->irq)
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 132369fad4e..ff05d9fef4a 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -323,7 +323,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len);
- INIT_COMPLETION(dev->cmd_complete);
+ reinit_completion(&dev->cmd_complete);
dev->cmd_err = 0;
/* Take I2C out of reset and configure it as master */
@@ -795,7 +795,7 @@ static struct platform_driver davinci_i2c_driver = {
.name = "i2c_davinci",
.owner = THIS_MODULE,
.pm = davinci_i2c_pm_ops,
- .of_match_table = of_match_ptr(davinci_i2c_of_match),
+ .of_match_table = davinci_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 5888feef1ac..e89e3e2145e 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -613,7 +613,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
mutex_lock(&dev->lock);
pm_runtime_get_sync(dev->dev);
- INIT_COMPLETION(dev->cmd_complete);
+ reinit_completion(&dev->cmd_complete);
dev->msgs = msgs;
dev->msgs_num = num;
dev->cmd_err = 0;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0aa01136f8d..d0bdac0498c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -103,6 +103,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "INT33C2", 0 },
{ "INT33C3", 0 },
+ { "INT3432", 0 },
+ { "INT3433", 0 },
{ "80860F41", 0 },
{ }
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 0f3752967c4..ff15ae90aaf 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -312,24 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
}
/**
- * pch_i2c_getack() - to confirm ACK/NACK
- * @adap: Pointer to struct i2c_algo_pch_data.
- */
-static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap)
-{
- u32 reg_val;
- void __iomem *p = adap->pch_base_address;
- reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK;
-
- if (reg_val != 0) {
- pch_err(adap, "return%d\n", -EPROTO);
- return -EPROTO;
- }
-
- return 0;
-}
-
-/**
* pch_i2c_stop() - generate stop condition in normal mode.
* @adap: Pointer to struct i2c_algo_pch_data.
*/
@@ -344,6 +326,7 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
{
long ret;
+ void __iomem *p = adap->pch_base_address;
ret = wait_event_timeout(pch_event,
(adap->pch_event_flag != 0), msecs_to_jiffies(1000));
@@ -366,10 +349,9 @@ static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
adap->pch_event_flag = 0;
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
+ if (ioread32(p + PCH_I2CSR) & PCH_GETACK) {
+ pch_dbg(adap, "Receive NACK for slave address setting\n");
+ return -ENXIO;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
new file mode 100644
index 00000000000..c1ef228095b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -0,0 +1,769 @@
+/**
+ * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * HSI2C controller from Samsung supports 2 modes of operation
+ * 1. Auto mode: Where in master automatically controls the whole transaction
+ * 2. Manual mode: Software controls the transaction by issuing commands
+ * START, READ, WRITE, STOP, RESTART in I2C_MANUAL_CMD register.
+ *
+ * Operation mode can be selected by setting AUTO_MODE bit in I2C_CONF register
+ *
+ * Special bits are available for both modes of operation to set commands
+ * and for checking transfer status
+ */
+
+/* Register Map */
+#define HSI2C_CTL 0x00
+#define HSI2C_FIFO_CTL 0x04
+#define HSI2C_TRAILIG_CTL 0x08
+#define HSI2C_CLK_CTL 0x0C
+#define HSI2C_CLK_SLOT 0x10
+#define HSI2C_INT_ENABLE 0x20
+#define HSI2C_INT_STATUS 0x24
+#define HSI2C_ERR_STATUS 0x2C
+#define HSI2C_FIFO_STATUS 0x30
+#define HSI2C_TX_DATA 0x34
+#define HSI2C_RX_DATA 0x38
+#define HSI2C_CONF 0x40
+#define HSI2C_AUTO_CONF 0x44
+#define HSI2C_TIMEOUT 0x48
+#define HSI2C_MANUAL_CMD 0x4C
+#define HSI2C_TRANS_STATUS 0x50
+#define HSI2C_TIMING_HS1 0x54
+#define HSI2C_TIMING_HS2 0x58
+#define HSI2C_TIMING_HS3 0x5C
+#define HSI2C_TIMING_FS1 0x60
+#define HSI2C_TIMING_FS2 0x64
+#define HSI2C_TIMING_FS3 0x68
+#define HSI2C_TIMING_SLA 0x6C
+#define HSI2C_ADDR 0x70
+
+/* I2C_CTL Register bits */
+#define HSI2C_FUNC_MODE_I2C (1u << 0)
+#define HSI2C_MASTER (1u << 3)
+#define HSI2C_RXCHON (1u << 6)
+#define HSI2C_TXCHON (1u << 7)
+#define HSI2C_SW_RST (1u << 31)
+
+/* I2C_FIFO_CTL Register bits */
+#define HSI2C_RXFIFO_EN (1u << 0)
+#define HSI2C_TXFIFO_EN (1u << 1)
+#define HSI2C_RXFIFO_TRIGGER_LEVEL(x) ((x) << 4)
+#define HSI2C_TXFIFO_TRIGGER_LEVEL(x) ((x) << 16)
+
+/* As per user manual FIFO max depth is 64bytes */
+#define HSI2C_FIFO_MAX 0x40
+/* default trigger levels for Tx and Rx FIFOs */
+#define HSI2C_DEF_TXFIFO_LVL (HSI2C_FIFO_MAX - 0x30)
+#define HSI2C_DEF_RXFIFO_LVL (HSI2C_FIFO_MAX - 0x10)
+
+/* I2C_TRAILING_CTL Register bits */
+#define HSI2C_TRAILING_COUNT (0xf)
+
+/* I2C_INT_EN Register bits */
+#define HSI2C_INT_TX_ALMOSTEMPTY_EN (1u << 0)
+#define HSI2C_INT_RX_ALMOSTFULL_EN (1u << 1)
+#define HSI2C_INT_TRAILING_EN (1u << 6)
+#define HSI2C_INT_I2C_EN (1u << 9)
+
+/* I2C_INT_STAT Register bits */
+#define HSI2C_INT_TX_ALMOSTEMPTY (1u << 0)
+#define HSI2C_INT_RX_ALMOSTFULL (1u << 1)
+#define HSI2C_INT_TX_UNDERRUN (1u << 2)
+#define HSI2C_INT_TX_OVERRUN (1u << 3)
+#define HSI2C_INT_RX_UNDERRUN (1u << 4)
+#define HSI2C_INT_RX_OVERRUN (1u << 5)
+#define HSI2C_INT_TRAILING (1u << 6)
+#define HSI2C_INT_I2C (1u << 9)
+
+/* I2C_FIFO_STAT Register bits */
+#define HSI2C_RX_FIFO_EMPTY (1u << 24)
+#define HSI2C_RX_FIFO_FULL (1u << 23)
+#define HSI2C_RX_FIFO_LVL(x) ((x >> 16) & 0x7f)
+#define HSI2C_TX_FIFO_EMPTY (1u << 8)
+#define HSI2C_TX_FIFO_FULL (1u << 7)
+#define HSI2C_TX_FIFO_LVL(x) ((x >> 0) & 0x7f)
+
+/* I2C_CONF Register bits */
+#define HSI2C_AUTO_MODE (1u << 31)
+#define HSI2C_10BIT_ADDR_MODE (1u << 30)
+#define HSI2C_HS_MODE (1u << 29)
+
+/* I2C_AUTO_CONF Register bits */
+#define HSI2C_READ_WRITE (1u << 16)
+#define HSI2C_STOP_AFTER_TRANS (1u << 17)
+#define HSI2C_MASTER_RUN (1u << 31)
+
+/* I2C_TIMEOUT Register bits */
+#define HSI2C_TIMEOUT_EN (1u << 31)
+#define HSI2C_TIMEOUT_MASK 0xff
+
+/* I2C_TRANS_STATUS register bits */
+#define HSI2C_MASTER_BUSY (1u << 17)
+#define HSI2C_SLAVE_BUSY (1u << 16)
+#define HSI2C_TIMEOUT_AUTO (1u << 4)
+#define HSI2C_NO_DEV (1u << 3)
+#define HSI2C_NO_DEV_ACK (1u << 2)
+#define HSI2C_TRANS_ABORT (1u << 1)
+#define HSI2C_TRANS_DONE (1u << 0)
+
+/* I2C_ADDR register bits */
+#define HSI2C_SLV_ADDR_SLV(x) ((x & 0x3ff) << 0)
+#define HSI2C_SLV_ADDR_MAS(x) ((x & 0x3ff) << 10)
+#define HSI2C_MASTER_ID(x) ((x & 0xff) << 24)
+#define MASTER_ID(x) ((x & 0x7) + 0x08)
+
+/*
+ * Controller operating frequency, timing values for operation
+ * are calculated against this frequency
+ */
+#define HSI2C_HS_TX_CLOCK 1000000
+#define HSI2C_FS_TX_CLOCK 100000
+#define HSI2C_HIGH_SPD 1
+#define HSI2C_FAST_SPD 0
+
+#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(1000))
+
+struct exynos5_i2c {
+ struct i2c_adapter adap;
+ unsigned int suspended:1;
+
+ struct i2c_msg *msg;
+ struct completion msg_complete;
+ unsigned int msg_ptr;
+
+ unsigned int irq;
+
+ void __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ int state;
+
+ spinlock_t lock; /* IRQ synchronization */
+
+ /*
+ * Since the TRANS_DONE bit is cleared on read, and we may read it
+ * either during an IRQ or after a transaction, keep track of its
+ * state here.
+ */
+ int trans_done;
+
+ /* Controller operating frequency */
+ unsigned int fs_clock;
+ unsigned int hs_clock;
+
+ /*
+ * HSI2C Controller can operate in
+ * 1. High speed upto 3.4Mbps
+ * 2. Fast speed upto 1Mbps
+ */
+ int speed_mode;
+};
+
+static const struct of_device_id exynos5_i2c_match[] = {
+ { .compatible = "samsung,exynos5-hsi2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
+
+static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c)
+{
+ writel(readl(i2c->regs + HSI2C_INT_STATUS),
+ i2c->regs + HSI2C_INT_STATUS);
+}
+
+/*
+ * exynos5_i2c_set_timing: updates the registers with appropriate
+ * timing values calculated
+ *
+ * Returns 0 on success, -EINVAL if the cycle length cannot
+ * be calculated.
+ */
+static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, int mode)
+{
+ u32 i2c_timing_s1;
+ u32 i2c_timing_s2;
+ u32 i2c_timing_s3;
+ u32 i2c_timing_sla;
+ unsigned int t_start_su, t_start_hd;
+ unsigned int t_stop_su;
+ unsigned int t_data_su, t_data_hd;
+ unsigned int t_scl_l, t_scl_h;
+ unsigned int t_sr_release;
+ unsigned int t_ftl_cycle;
+ unsigned int clkin = clk_get_rate(i2c->clk);
+ unsigned int div, utemp0 = 0, utemp1 = 0, clk_cycle;
+ unsigned int op_clk = (mode == HSI2C_HIGH_SPD) ?
+ i2c->hs_clock : i2c->fs_clock;
+
+ /*
+ * FPCLK / FI2C =
+ * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE
+ * utemp0 = (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2)
+ * utemp1 = (TSCLK_L + TSCLK_H + 2)
+ */
+ t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
+ utemp0 = (clkin / op_clk) - 8 - 2 * t_ftl_cycle;
+
+ /* CLK_DIV max is 256 */
+ for (div = 0; div < 256; div++) {
+ utemp1 = utemp0 / (div + 1);
+
+ /*
+ * SCL_L and SCL_H each has max value of 255
+ * Hence, For the clk_cycle to the have right value
+ * utemp1 has to be less then 512 and more than 4.
+ */
+ if ((utemp1 < 512) && (utemp1 > 4)) {
+ clk_cycle = utemp1 - 2;
+ break;
+ } else if (div == 255) {
+ dev_warn(i2c->dev, "Failed to calculate divisor");
+ return -EINVAL;
+ }
+ }
+
+ t_scl_l = clk_cycle / 2;
+ t_scl_h = clk_cycle / 2;
+ t_start_su = t_scl_l;
+ t_start_hd = t_scl_l;
+ t_stop_su = t_scl_l;
+ t_data_su = t_scl_l / 2;
+ t_data_hd = t_scl_l / 2;
+ t_sr_release = clk_cycle;
+
+ i2c_timing_s1 = t_start_su << 24 | t_start_hd << 16 | t_stop_su << 8;
+ i2c_timing_s2 = t_data_su << 24 | t_scl_l << 8 | t_scl_h << 0;
+ i2c_timing_s3 = div << 16 | t_sr_release << 0;
+ i2c_timing_sla = t_data_hd << 0;
+
+ dev_dbg(i2c->dev, "tSTART_SU: %X, tSTART_HD: %X, tSTOP_SU: %X\n",
+ t_start_su, t_start_hd, t_stop_su);
+ dev_dbg(i2c->dev, "tDATA_SU: %X, tSCL_L: %X, tSCL_H: %X\n",
+ t_data_su, t_scl_l, t_scl_h);
+ dev_dbg(i2c->dev, "nClkDiv: %X, tSR_RELEASE: %X\n",
+ div, t_sr_release);
+ dev_dbg(i2c->dev, "tDATA_HD: %X\n", t_data_hd);
+
+ if (mode == HSI2C_HIGH_SPD) {
+ writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_HS1);
+ writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_HS2);
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3);
+ } else {
+ writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_FS1);
+ writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_FS2);
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3);
+ }
+ writel(i2c_timing_sla, i2c->regs + HSI2C_TIMING_SLA);
+
+ return 0;
+}
+
+static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c)
+{
+ /*
+ * Configure the Fast speed timing values
+ * Even the High Speed mode initially starts with Fast mode
+ */
+ if (exynos5_i2c_set_timing(i2c, HSI2C_FAST_SPD)) {
+ dev_err(i2c->dev, "HSI2C FS Clock set up failed\n");
+ return -EINVAL;
+ }
+
+ /* configure the High speed timing values */
+ if (i2c->speed_mode == HSI2C_HIGH_SPD) {
+ if (exynos5_i2c_set_timing(i2c, HSI2C_HIGH_SPD)) {
+ dev_err(i2c->dev, "HSI2C HS Clock set up failed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * exynos5_i2c_init: configures the controller for I2C functionality
+ * Programs I2C controller for Master mode operation
+ */
+static void exynos5_i2c_init(struct exynos5_i2c *i2c)
+{
+ u32 i2c_conf = readl(i2c->regs + HSI2C_CONF);
+ u32 i2c_timeout = readl(i2c->regs + HSI2C_TIMEOUT);
+
+ /* Clear to disable Timeout */
+ i2c_timeout &= ~HSI2C_TIMEOUT_EN;
+ writel(i2c_timeout, i2c->regs + HSI2C_TIMEOUT);
+
+ writel((HSI2C_FUNC_MODE_I2C | HSI2C_MASTER),
+ i2c->regs + HSI2C_CTL);
+ writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL);
+
+ if (i2c->speed_mode == HSI2C_HIGH_SPD) {
+ writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)),
+ i2c->regs + HSI2C_ADDR);
+ i2c_conf |= HSI2C_HS_MODE;
+ }
+
+ writel(i2c_conf | HSI2C_AUTO_MODE, i2c->regs + HSI2C_CONF);
+}
+
+static void exynos5_i2c_reset(struct exynos5_i2c *i2c)
+{
+ u32 i2c_ctl;
+
+ /* Set and clear the bit for reset */
+ i2c_ctl = readl(i2c->regs + HSI2C_CTL);
+ i2c_ctl |= HSI2C_SW_RST;
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+
+ i2c_ctl = readl(i2c->regs + HSI2C_CTL);
+ i2c_ctl &= ~HSI2C_SW_RST;
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+
+ /* We don't expect calculations to fail during the run */
+ exynos5_hsi2c_clock_setup(i2c);
+ /* Initialize the configure registers */
+ exynos5_i2c_init(i2c);
+}
+
+/*
+ * exynos5_i2c_irq: top level IRQ servicing routine
+ *
+ * INT_STATUS registers gives the interrupt details. Further,
+ * FIFO_STATUS or TRANS_STATUS registers are to be check for detailed
+ * state of the bus.
+ */
+static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
+{
+ struct exynos5_i2c *i2c = dev_id;
+ u32 fifo_level, int_status, fifo_status, trans_status;
+ unsigned char byte;
+ int len = 0;
+
+ i2c->state = -EINVAL;
+
+ spin_lock(&i2c->lock);
+
+ int_status = readl(i2c->regs + HSI2C_INT_STATUS);
+ writel(int_status, i2c->regs + HSI2C_INT_STATUS);
+ fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
+
+ /* handle interrupt related to the transfer status */
+ if (int_status & HSI2C_INT_I2C) {
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
+ if (trans_status & HSI2C_NO_DEV_ACK) {
+ dev_dbg(i2c->dev, "No ACK from device\n");
+ i2c->state = -ENXIO;
+ goto stop;
+ } else if (trans_status & HSI2C_NO_DEV) {
+ dev_dbg(i2c->dev, "No device\n");
+ i2c->state = -ENXIO;
+ goto stop;
+ } else if (trans_status & HSI2C_TRANS_ABORT) {
+ dev_dbg(i2c->dev, "Deal with arbitration lose\n");
+ i2c->state = -EAGAIN;
+ goto stop;
+ } else if (trans_status & HSI2C_TIMEOUT_AUTO) {
+ dev_dbg(i2c->dev, "Accessing device timed out\n");
+ i2c->state = -EAGAIN;
+ goto stop;
+ } else if (trans_status & HSI2C_TRANS_DONE) {
+ i2c->trans_done = 1;
+ i2c->state = 0;
+ }
+ }
+
+ if ((i2c->msg->flags & I2C_M_RD) && (int_status &
+ (HSI2C_INT_TRAILING | HSI2C_INT_RX_ALMOSTFULL))) {
+ fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
+ fifo_level = HSI2C_RX_FIFO_LVL(fifo_status);
+ len = min(fifo_level, i2c->msg->len - i2c->msg_ptr);
+
+ while (len > 0) {
+ byte = (unsigned char)
+ readl(i2c->regs + HSI2C_RX_DATA);
+ i2c->msg->buf[i2c->msg_ptr++] = byte;
+ len--;
+ }
+ i2c->state = 0;
+ } else if (int_status & HSI2C_INT_TX_ALMOSTEMPTY) {
+ fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
+ fifo_level = HSI2C_TX_FIFO_LVL(fifo_status);
+
+ len = HSI2C_FIFO_MAX - fifo_level;
+ if (len > (i2c->msg->len - i2c->msg_ptr))
+ len = i2c->msg->len - i2c->msg_ptr;
+
+ while (len > 0) {
+ byte = i2c->msg->buf[i2c->msg_ptr++];
+ writel(byte, i2c->regs + HSI2C_TX_DATA);
+ len--;
+ }
+ i2c->state = 0;
+ }
+
+ stop:
+ if ((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) ||
+ (i2c->state < 0)) {
+ writel(0, i2c->regs + HSI2C_INT_ENABLE);
+ exynos5_i2c_clr_pend_irq(i2c);
+ complete(&i2c->msg_complete);
+ }
+
+ spin_unlock(&i2c->lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * exynos5_i2c_wait_bus_idle
+ *
+ * Wait for the bus to go idle, indicated by the MASTER_BUSY bit being
+ * cleared.
+ *
+ * Returns -EBUSY if the bus cannot be bought to idle
+ */
+static int exynos5_i2c_wait_bus_idle(struct exynos5_i2c *i2c)
+{
+ unsigned long stop_time;
+ u32 trans_status;
+
+ /* wait for 100 milli seconds for the bus to be idle */
+ stop_time = jiffies + msecs_to_jiffies(100) + 1;
+ do {
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
+ if (!(trans_status & HSI2C_MASTER_BUSY))
+ return 0;
+
+ usleep_range(50, 200);
+ } while (time_before(jiffies, stop_time));
+
+ return -EBUSY;
+}
+
+/*
+ * exynos5_i2c_message_start: Configures the bus and starts the xfer
+ * i2c: struct exynos5_i2c pointer for the current bus
+ * stop: Enables stop after transfer if set. Set for last transfer of
+ * in the list of messages.
+ *
+ * Configures the bus for read/write function
+ * Sets chip address to talk to, message length to be sent.
+ * Enables appropriate interrupts and sends start xfer command.
+ */
+static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
+{
+ u32 i2c_ctl;
+ u32 int_en = HSI2C_INT_I2C_EN;
+ u32 i2c_auto_conf = 0;
+ u32 fifo_ctl;
+ unsigned long flags;
+
+ i2c_ctl = readl(i2c->regs + HSI2C_CTL);
+ i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON);
+ fifo_ctl = HSI2C_RXFIFO_EN | HSI2C_TXFIFO_EN;
+
+ if (i2c->msg->flags & I2C_M_RD) {
+ i2c_ctl |= HSI2C_RXCHON;
+
+ i2c_auto_conf = HSI2C_READ_WRITE;
+
+ fifo_ctl |= HSI2C_RXFIFO_TRIGGER_LEVEL(HSI2C_DEF_TXFIFO_LVL);
+ int_en |= (HSI2C_INT_RX_ALMOSTFULL_EN |
+ HSI2C_INT_TRAILING_EN);
+ } else {
+ i2c_ctl |= HSI2C_TXCHON;
+
+ fifo_ctl |= HSI2C_TXFIFO_TRIGGER_LEVEL(HSI2C_DEF_RXFIFO_LVL);
+ int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN;
+ }
+
+ writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR);
+
+ writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+
+
+ /*
+ * Enable interrupts before starting the transfer so that we don't
+ * miss any INT_I2C interrupts.
+ */
+ spin_lock_irqsave(&i2c->lock, flags);
+ writel(int_en, i2c->regs + HSI2C_INT_ENABLE);
+
+ if (stop == 1)
+ i2c_auto_conf |= HSI2C_STOP_AFTER_TRANS;
+ i2c_auto_conf |= i2c->msg->len;
+ i2c_auto_conf |= HSI2C_MASTER_RUN;
+ writel(i2c_auto_conf, i2c->regs + HSI2C_AUTO_CONF);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+}
+
+static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
+ struct i2c_msg *msgs, int stop)
+{
+ unsigned long timeout;
+ int ret;
+
+ i2c->msg = msgs;
+ i2c->msg_ptr = 0;
+ i2c->trans_done = 0;
+
+ reinit_completion(&i2c->msg_complete);
+
+ exynos5_i2c_message_start(i2c, stop);
+
+ timeout = wait_for_completion_timeout(&i2c->msg_complete,
+ EXYNOS5_I2C_TIMEOUT);
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = i2c->state;
+
+ /*
+ * If this is the last message to be transfered (stop == 1)
+ * Then check if the bus can be brought back to idle.
+ */
+ if (ret == 0 && stop)
+ ret = exynos5_i2c_wait_bus_idle(i2c);
+
+ if (ret < 0) {
+ exynos5_i2c_reset(i2c);
+ if (ret == -ETIMEDOUT)
+ dev_warn(i2c->dev, "%s timeout\n",
+ (msgs->flags & I2C_M_RD) ? "rx" : "tx");
+ }
+
+ /* Return the state as in interrupt routine */
+ return ret;
+}
+
+static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct exynos5_i2c *i2c = (struct exynos5_i2c *)adap->algo_data;
+ int i = 0, ret = 0, stop = 0;
+
+ if (i2c->suspended) {
+ dev_err(i2c->dev, "HS-I2C is not initialzed.\n");
+ return -EIO;
+ }
+
+ clk_prepare_enable(i2c->clk);
+
+ for (i = 0; i < num; i++, msgs++) {
+ stop = (i == num - 1);
+
+ ret = exynos5_i2c_xfer_msg(i2c, msgs, stop);
+
+ if (ret < 0)
+ goto out;
+ }
+
+ if (i == num) {
+ ret = num;
+ } else {
+ /* Only one message, cannot access the device */
+ if (i == 1)
+ ret = -EREMOTEIO;
+ else
+ ret = i;
+
+ dev_warn(i2c->dev, "xfer message failed\n");
+ }
+
+ out:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+}
+
+static u32 exynos5_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm exynos5_i2c_algorithm = {
+ .master_xfer = exynos5_i2c_xfer,
+ .functionality = exynos5_i2c_func,
+};
+
+static int exynos5_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct exynos5_i2c *i2c;
+ struct resource *mem;
+ unsigned int op_clock;
+ int ret;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct exynos5_i2c), GFP_KERNEL);
+ if (!i2c) {
+ dev_err(&pdev->dev, "no memory for state\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", &op_clock)) {
+ i2c->speed_mode = HSI2C_FAST_SPD;
+ i2c->fs_clock = HSI2C_FS_TX_CLOCK;
+ } else {
+ if (op_clock >= HSI2C_HS_TX_CLOCK) {
+ i2c->speed_mode = HSI2C_HIGH_SPD;
+ i2c->fs_clock = HSI2C_FS_TX_CLOCK;
+ i2c->hs_clock = op_clock;
+ } else {
+ i2c->speed_mode = HSI2C_FAST_SPD;
+ i2c->fs_clock = op_clock;
+ }
+ }
+
+ strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &exynos5_i2c_algorithm;
+ i2c->adap.retries = 3;
+
+ i2c->dev = &pdev->dev;
+ i2c->clk = devm_clk_get(&pdev->dev, "hsi2c");
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return -ENOENT;
+ }
+
+ clk_prepare_enable(i2c->clk);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(i2c->regs)) {
+ ret = PTR_ERR(i2c->regs);
+ goto err_clk;
+ }
+
+ i2c->adap.dev.of_node = np;
+ i2c->adap.algo_data = i2c;
+ i2c->adap.dev.parent = &pdev->dev;
+
+ /* Clear pending interrupts from u-boot or misc causes */
+ exynos5_i2c_clr_pend_irq(i2c);
+
+ spin_lock_init(&i2c->lock);
+ init_completion(&i2c->msg_complete);
+
+ i2c->irq = ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "cannot find HS-I2C IRQ\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev), i2c);
+
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
+ goto err_clk;
+ }
+
+ ret = exynos5_hsi2c_clock_setup(i2c);
+ if (ret)
+ goto err_clk;
+
+ exynos5_i2c_init(i2c);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, i2c);
+
+ err_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+}
+
+static int exynos5_i2c_remove(struct platform_device *pdev)
+{
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+
+ return 0;
+}
+
+static int exynos5_i2c_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c->suspended = 1;
+
+ return 0;
+}
+
+static int exynos5_i2c_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ clk_prepare_enable(i2c->clk);
+
+ ret = exynos5_hsi2c_clock_setup(i2c);
+ if (ret) {
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+ }
+
+ exynos5_i2c_init(i2c);
+ clk_disable_unprepare(i2c->clk);
+ i2c->suspended = 0;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq,
+ exynos5_i2c_resume_noirq);
+
+static struct platform_driver exynos5_i2c_driver = {
+ .probe = exynos5_i2c_probe,
+ .remove = exynos5_i2c_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos5-hsi2c",
+ .pm = &exynos5_i2c_dev_pm_ops,
+ .of_match_table = exynos5_i2c_match,
+ },
+};
+
+module_platform_driver(exynos5_i2c_driver);
+
+MODULE_DESCRIPTION("Exynos5 HS-I2C Bus driver");
+MODULE_AUTHOR("Naveen Krishna Chatradhi, <ch.naveen@samsung.com>");
+MODULE_AUTHOR("Taekgyun Ko, <taeggyun.ko@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index bfa02c6c2dd..d9f7e186a4c 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
struct i2c_gpio_private_data {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4296d172127..737e2986688 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -59,6 +59,7 @@
Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
+ Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -177,6 +178,7 @@
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
struct i801_mux_config {
char *gpio_chip;
@@ -819,6 +821,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index ff3caa0c28c..f7444100f39 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -41,6 +41,8 @@
#include <asm/irq.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include "i2c-ibm_iic.h"
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1672effbceb..0043ede234c 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -541,7 +541,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
desc->dptr_high = upper_32_bits(dma_addr);
}
- INIT_COMPLETION(priv->cmp);
+ reinit_completion(&priv->cmp);
/* Add the descriptor */
ismt_submit_desc(priv);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index b80c76888ca..b6a741caf4f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -17,6 +17,8 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index d3e9cc3153a..8be7e42aa4d 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -911,7 +911,7 @@ static struct platform_driver mv64xxx_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
- .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table),
+ .of_match_table = mv64xxx_i2c_of_match_table,
},
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index b7c85777470..0cde4e6ab2b 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -1,6 +1,7 @@
/*
* Freescale MXS I2C bus driver
*
+ * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de>
* Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K.
*
* based on a (non-working) driver which was:
@@ -34,10 +35,12 @@
#define MXS_I2C_CTRL0 (0x00)
#define MXS_I2C_CTRL0_SET (0x04)
+#define MXS_I2C_CTRL0_CLR (0x08)
#define MXS_I2C_CTRL0_SFTRST 0x80000000
#define MXS_I2C_CTRL0_RUN 0x20000000
#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
+#define MXS_I2C_CTRL0_PIO_MODE 0x01000000
#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000
@@ -64,13 +67,13 @@
#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
#define MXS_I2C_STAT (0x50)
+#define MXS_I2C_STAT_GOT_A_NAK 0x10000000
#define MXS_I2C_STAT_BUS_BUSY 0x00000800
#define MXS_I2C_STAT_CLK_GEN_BUSY 0x00000400
-#define MXS_I2C_DATA (0xa0)
+#define MXS_I2C_DATA(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0)
-#define MXS_I2C_DEBUG0 (0xb0)
-#define MXS_I2C_DEBUG0_CLR (0xb8)
+#define MXS_I2C_DEBUG0_CLR(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8)
#define MXS_I2C_DEBUG0_DMAREQ 0x80000000
@@ -95,10 +98,17 @@
#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
MXS_I2C_CTRL0_MASTER_MODE)
+enum mxs_i2c_devtype {
+ MXS_I2C_UNKNOWN = 0,
+ MXS_I2C_V1,
+ MXS_I2C_V2,
+};
+
/**
* struct mxs_i2c_dev - per device, private MXS-I2C data
*
* @dev: driver model device node
+ * @dev_type: distinguish i.MX23/i.MX28 features
* @regs: IO registers pointer
* @cmd_complete: completion object for transaction wait
* @cmd_err: error code for last transaction
@@ -106,6 +116,7 @@
*/
struct mxs_i2c_dev {
struct device *dev;
+ enum mxs_i2c_devtype dev_type;
void __iomem *regs;
struct completion cmd_complete;
int cmd_err;
@@ -291,48 +302,11 @@ write_init_pio_fail:
return -EINVAL;
}
-static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- while (!(readl(i2c->regs + MXS_I2C_DEBUG0) &
- MXS_I2C_DEBUG0_DMAREQ)) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- cond_resched();
- }
-
- return 0;
-}
-
-static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- /*
- * We do not use interrupts in the PIO mode. Due to the
- * maximum transfer length being 8 bytes in PIO mode, the
- * overhead of interrupt would be too large and this would
- * neglect the gain from using the PIO mode.
- */
-
- while (!(readl(i2c->regs + MXS_I2C_CTRL1) &
- MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- cond_resched();
- }
-
- writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
- i2c->regs + MXS_I2C_CTRL1_CLR);
-
- /*
- * When ending a transfer with a stop, we have to wait for the bus to
- * go idle before we report the transfer as completed. Otherwise the
- * start of the next transfer may race with the end of the current one.
- */
- while (last && (readl(i2c->regs + MXS_I2C_STAT) &
- (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) {
+ while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
cond_resched();
@@ -370,106 +344,215 @@ static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd)
writel(reg, i2c->regs + MXS_I2C_CTRL0);
}
+/*
+ * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet,
+ * CTRL0::PIO_MODE bit description clarifies the order in which the registers
+ * must be written during PIO mode operation. First, the CTRL0 register has
+ * to be programmed with all the necessary bits but the RUN bit. Then the
+ * payload has to be written into the DATA register. Finally, the transmission
+ * is executed by setting the RUN bit in CTRL0.
+ */
+static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd,
+ u32 data)
+{
+ writel(cmd, i2c->regs + MXS_I2C_CTRL0);
+
+ if (i2c->dev_type == MXS_I2C_V1)
+ writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET);
+
+ writel(data, i2c->regs + MXS_I2C_DATA(i2c));
+ writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET);
+}
+
static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
struct i2c_msg *msg, uint32_t flags)
{
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
uint32_t addr_data = msg->addr << 1;
uint32_t data = 0;
- int i, shifts_left, ret;
+ int i, ret, xlen = 0, xmit = 0;
+ uint32_t start;
/* Mute IRQs coming from this block. */
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR);
+ /*
+ * MX23 idea:
+ * - Enable CTRL0::PIO_MODE (1 << 24)
+ * - Enable CTRL1::ACK_MODE (1 << 27)
+ *
+ * WARNING! The MX23 is broken in some way, even if it claims
+ * to support PIO, when we try to transfer any amount of data
+ * that is not aligned to 4 bytes, the DMA engine will have
+ * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the
+ * transfer. This in turn will mess up the next transfer as
+ * the block it emit one byte write onto the bus terminated
+ * with a NAK+STOP. A possible workaround is to reset the IP
+ * block after every PIO transmission, which might just work.
+ *
+ * NOTE: The CTRL0::PIO_MODE description is important, since
+ * it outlines how the PIO mode is really supposed to work.
+ */
if (msg->flags & I2C_M_RD) {
+ /*
+ * PIO READ transfer:
+ *
+ * This transfer MUST be limited to 4 bytes maximum. It is not
+ * possible to transfer more than four bytes via PIO, since we
+ * can not in any way make sure we can read the data from the
+ * DATA register fast enough. Besides, the RX FIFO is only four
+ * bytes deep, thus we can only really read up to four bytes at
+ * time. Finally, there is no bit indicating us that new data
+ * arrived at the FIFO and can thus be fetched from the DATA
+ * register.
+ */
+ BUG_ON(msg->len > 4);
+
addr_data |= I2C_SMBUS_READ;
/* SELECT command. */
- mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT);
-
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
-
- writel(addr_data, i2c->regs + MXS_I2C_DATA);
- writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
+ mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT,
+ addr_data);
- ret = mxs_i2c_pio_wait_cplt(i2c, 0);
- if (ret)
- return ret;
-
- if (mxs_i2c_pio_check_error_state(i2c))
+ ret = mxs_i2c_pio_wait_xfer_end(i2c);
+ if (ret) {
+ dev_err(i2c->dev,
+ "PIO: Failed to send SELECT command!\n");
goto cleanup;
+ }
/* READ command. */
mxs_i2c_pio_trigger_cmd(i2c,
MXS_CMD_I2C_READ | flags |
MXS_I2C_CTRL0_XFER_COUNT(msg->len));
+ ret = mxs_i2c_pio_wait_xfer_end(i2c);
+ if (ret) {
+ dev_err(i2c->dev,
+ "PIO: Failed to send SELECT command!\n");
+ goto cleanup;
+ }
+
+ data = readl(i2c->regs + MXS_I2C_DATA(i2c));
for (i = 0; i < msg->len; i++) {
- if ((i & 3) == 0) {
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
- data = readl(i2c->regs + MXS_I2C_DATA);
- writel(MXS_I2C_DEBUG0_DMAREQ,
- i2c->regs + MXS_I2C_DEBUG0_CLR);
- }
msg->buf[i] = data & 0xff;
data >>= 8;
}
} else {
+ /*
+ * PIO WRITE transfer:
+ *
+ * The code below implements clock stretching to circumvent
+ * the possibility of kernel not being able to supply data
+ * fast enough. It is possible to transfer arbitrary amount
+ * of data using PIO write.
+ */
addr_data |= I2C_SMBUS_WRITE;
- /* WRITE command. */
- mxs_i2c_pio_trigger_cmd(i2c,
- MXS_CMD_I2C_WRITE | flags |
- MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1));
-
/*
* The LSB of data buffer is the first byte blasted across
* the bus. Higher order bytes follow. Thus the following
* filling schematic.
*/
+
data = addr_data << 24;
+
+ /* Start the transfer with START condition. */
+ start = MXS_I2C_CTRL0_PRE_SEND_START;
+
+ /* If the transfer is long, use clock stretching. */
+ if (msg->len > 3)
+ start |= MXS_I2C_CTRL0_RETAIN_CLOCK;
+
for (i = 0; i < msg->len; i++) {
data >>= 8;
data |= (msg->buf[i] << 24);
- if ((i & 3) == 2) {
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
- writel(data, i2c->regs + MXS_I2C_DATA);
- writel(MXS_I2C_DEBUG0_DMAREQ,
- i2c->regs + MXS_I2C_DEBUG0_CLR);
+
+ xmit = 0;
+
+ /* This is the last transfer of the message. */
+ if (i + 1 == msg->len) {
+ /* Add optional STOP flag. */
+ start |= flags;
+ /* Remove RETAIN_CLOCK bit. */
+ start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK;
+ xmit = 1;
}
- }
- shifts_left = 24 - (i & 3) * 8;
- if (shifts_left) {
- data >>= shifts_left;
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
- writel(data, i2c->regs + MXS_I2C_DATA);
+ /* Four bytes are ready in the "data" variable. */
+ if ((i & 3) == 2)
+ xmit = 1;
+
+ /* Nothing interesting happened, continue stuffing. */
+ if (!xmit)
+ continue;
+
+ /*
+ * Compute the size of the transfer and shift the
+ * data accordingly.
+ *
+ * i = (4k + 0) .... xlen = 2
+ * i = (4k + 1) .... xlen = 3
+ * i = (4k + 2) .... xlen = 4
+ * i = (4k + 3) .... xlen = 1
+ */
+
+ if ((i % 4) == 3)
+ xlen = 1;
+ else
+ xlen = (i % 4) + 2;
+
+ data >>= (4 - xlen) * 8;
+
+ dev_dbg(i2c->dev,
+ "PIO: len=%i pos=%i total=%i [W%s%s%s]\n",
+ xlen, i, msg->len,
+ start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "",
+ start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "",
+ start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : "");
+
writel(MXS_I2C_DEBUG0_DMAREQ,
- i2c->regs + MXS_I2C_DEBUG0_CLR);
+ i2c->regs + MXS_I2C_DEBUG0_CLR(i2c));
+
+ mxs_i2c_pio_trigger_write_cmd(i2c,
+ start | MXS_I2C_CTRL0_MASTER_MODE |
+ MXS_I2C_CTRL0_DIRECTION |
+ MXS_I2C_CTRL0_XFER_COUNT(xlen), data);
+
+ /* The START condition is sent only once. */
+ start &= ~MXS_I2C_CTRL0_PRE_SEND_START;
+
+ /* Wait for the end of the transfer. */
+ ret = mxs_i2c_pio_wait_xfer_end(i2c);
+ if (ret) {
+ dev_err(i2c->dev,
+ "PIO: Failed to finish WRITE cmd!\n");
+ break;
+ }
+
+ /* Check NAK here. */
+ ret = readl(i2c->regs + MXS_I2C_STAT) &
+ MXS_I2C_STAT_GOT_A_NAK;
+ if (ret) {
+ ret = -ENXIO;
+ goto cleanup;
+ }
}
}
- ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP);
- if (ret)
- return ret;
-
/* make sure we capture any occurred error into cmd_err */
- mxs_i2c_pio_check_error_state(i2c);
+ ret = mxs_i2c_pio_check_error_state(i2c);
cleanup:
/* Clear any dangling IRQs and re-enable interrupts. */
writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
- return 0;
+ /* Clear the PIO_MODE on i.MX23 */
+ if (i2c->dev_type == MXS_I2C_V1)
+ writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR);
+
+ return ret;
}
/*
@@ -479,8 +562,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
int stop)
{
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
- int ret, err;
+ int ret;
int flags;
+ int use_pio = 0;
flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
@@ -491,21 +575,23 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
return -EINVAL;
/*
- * The current boundary to select between PIO/DMA transfer method
- * is set to 8 bytes, transfers shorter than 8 bytes are transfered
- * using PIO mode while longer transfers use DMA. The 8 byte border is
- * based on this empirical measurement and a lot of previous frobbing.
+ * The MX28 I2C IP block can only do PIO READ for transfer of to up
+ * 4 bytes of length. The write transfer is not limited as it can use
+ * clock stretching to avoid FIFO underruns.
*/
+ if ((msg->flags & I2C_M_RD) && (msg->len <= 4))
+ use_pio = 1;
+ if (!(msg->flags & I2C_M_RD) && (msg->len < 7))
+ use_pio = 1;
+
i2c->cmd_err = 0;
- if (0) { /* disable PIO mode until a proper fix is made */
+ if (use_pio) {
ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
- if (ret) {
- err = mxs_i2c_reset(i2c);
- if (err)
- return err;
- }
+ /* No need to reset the block if NAK was received. */
+ if (ret && (ret != -ENXIO))
+ mxs_i2c_reset(i2c);
} else {
- INIT_COMPLETION(i2c->cmd_complete);
+ reinit_completion(&i2c->cmd_complete);
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
if (ret)
return ret;
@@ -514,9 +600,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
msecs_to_jiffies(1000));
if (ret == 0)
goto timeout;
+
+ ret = i2c->cmd_err;
}
- if (i2c->cmd_err == -ENXIO) {
+ if (ret == -ENXIO) {
/*
* If the transfer fails with a NAK from the slave the
* controller halts until it gets told to return to idle state.
@@ -525,7 +613,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
i2c->regs + MXS_I2C_CTRL1_SET);
}
- ret = i2c->cmd_err;
+ /*
+ * WARNING!
+ * The i.MX23 is strange. After each and every operation, it's I2C IP
+ * block must be reset, otherwise the IP block will misbehave. This can
+ * be observed on the bus by the block sending out one single byte onto
+ * the bus. In case such an error happens, bit 27 will be set in the
+ * DEBUG0 register. This bit is not documented in the i.MX23 datasheet
+ * and is marked as "TBD" instead. To reset this bit to a correct state,
+ * reset the whole block. Since the block reset does not take long, do
+ * reset the block after every transfer to play safe.
+ */
+ if (i2c->dev_type == MXS_I2C_V1)
+ mxs_i2c_reset(i2c);
dev_dbg(i2c->dev, "Done with err=%d\n", ret);
@@ -680,8 +780,28 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
return 0;
}
+static struct platform_device_id mxs_i2c_devtype[] = {
+ {
+ .name = "imx23-i2c",
+ .driver_data = MXS_I2C_V1,
+ }, {
+ .name = "imx28-i2c",
+ .driver_data = MXS_I2C_V2,
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_i2c_devtype);
+
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+ { .compatible = "fsl,imx23-i2c", .data = &mxs_i2c_devtype[0], },
+ { .compatible = "fsl,imx28-i2c", .data = &mxs_i2c_devtype[1], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
static int mxs_i2c_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_i2c_dt_ids, &pdev->dev);
struct device *dev = &pdev->dev;
struct mxs_i2c_dev *i2c;
struct i2c_adapter *adap;
@@ -693,6 +813,11 @@ static int mxs_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
+ if (of_id) {
+ const struct platform_device_id *device_id = of_id->data;
+ i2c->dev_type = device_id->driver_data;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -768,12 +893,6 @@ static int mxs_i2c_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mxs_i2c_dt_ids[] = {
- { .compatible = "fsl,imx28-i2c", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
-
static struct platform_driver mxs_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
@@ -796,6 +915,7 @@ static void __exit mxs_i2c_exit(void)
}
module_exit(mxs_i2c_exit);
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
MODULE_DESCRIPTION("MXS I2C Bus Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 9967a6f9c2f..d76228d81d5 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -543,7 +543,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
- INIT_COMPLETION(dev->cmd_complete);
+ reinit_completion(&dev->cmd_complete);
dev->cmd_err = 0;
w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
@@ -1037,6 +1037,20 @@ static const struct i2c_algorithm omap_i2c_algo = {
};
#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap2420_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_NO_FIFO |
+ OMAP_I2C_FLAG_SIMPLE_CLOCK |
+ OMAP_I2C_FLAG_16BIT_DATA_REG |
+ OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap2430_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 |
+ OMAP_I2C_FLAG_FORCE_19200_INT_CLK,
+};
+
static struct omap_i2c_bus_platform_data omap3_pdata = {
.rev = OMAP_I2C_IP_VERSION_1,
.flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
@@ -1055,6 +1069,14 @@ static const struct of_device_id omap_i2c_of_match[] = {
.compatible = "ti,omap3-i2c",
.data = &omap3_pdata,
},
+ {
+ .compatible = "ti,omap2430-i2c",
+ .data = &omap2430_pdata,
+ },
+ {
+ .compatible = "ti,omap2420-i2c",
+ .data = &omap2420_pdata,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 1a9ea25f231..c9a352f0a9a 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */
#define I2C_PNX_SPEED_KHZ_DEFAULT 100
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 37e8cfad625..8c87f4a9793 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/of_irq.h>
#include <asm/prom.h>
#include <asm/pmac_low_i2c.h>
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d2fe11da5e8..2c2fd7c2b11 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -33,6 +33,7 @@
#include <linux/i2c/i2c-rcar.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -102,8 +103,8 @@ enum {
#define ID_NACK (1 << 4)
enum rcar_i2c_type {
- I2C_RCAR_H1,
- I2C_RCAR_H2,
+ I2C_RCAR_GEN1,
+ I2C_RCAR_GEN2,
};
struct rcar_i2c_priv {
@@ -226,22 +227,23 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
u32 bus_speed,
struct device *dev)
{
- struct clk *clkp = clk_get(NULL, "peripheral_clk");
+ struct clk *clkp = clk_get(dev, NULL);
u32 scgd, cdf;
u32 round, ick;
u32 scl;
u32 cdf_width;
+ unsigned long rate;
- if (!clkp) {
- dev_err(dev, "there is no peripheral_clk\n");
- return -EIO;
+ if (IS_ERR(clkp)) {
+ dev_err(dev, "couldn't get clock\n");
+ return PTR_ERR(clkp);
}
switch (priv->devtype) {
- case I2C_RCAR_H1:
+ case I2C_RCAR_GEN1:
cdf_width = 2;
break;
- case I2C_RCAR_H2:
+ case I2C_RCAR_GEN2:
cdf_width = 3;
break;
default:
@@ -264,15 +266,14 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
* clkp : peripheral_clk
* F[] : integer up-valuation
*/
- for (cdf = 0; cdf < (1 << cdf_width); cdf++) {
- ick = clk_get_rate(clkp) / (1 + cdf);
- if (ick < 20000000)
- goto ick_find;
+ rate = clk_get_rate(clkp);
+ cdf = rate / 20000000;
+ if (cdf >= 1 << cdf_width) {
+ dev_err(dev, "Input clock %lu too high\n", rate);
+ return -EIO;
}
- dev_err(dev, "there is no best CDF\n");
- return -EIO;
+ ick = rate / (cdf + 1);
-ick_find:
/*
* it is impossible to calculate large scale
* number on u32. separate it
@@ -290,6 +291,12 @@ ick_find:
*
* Calculation result (= SCL) should be less than
* bus_speed for hardware safety
+ *
+ * We could use something along the lines of
+ * div = ick / (bus_speed + 1) + 1;
+ * scgd = (div - 20 - round + 7) / 8;
+ * scl = ick / (20 + (scgd * 8) + round);
+ * (not fully verified) but that would get pretty involved
*/
for (scgd = 0; scgd < 0x40; scgd++) {
scl = ick / (20 + (scgd * 8) + round);
@@ -306,7 +313,7 @@ scgd_find:
/*
* keep icccr value
*/
- priv->icccr = (scgd << (cdf_width) | cdf);
+ priv->icccr = scgd << cdf_width | cdf;
return 0;
}
@@ -632,6 +639,15 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.functionality = rcar_i2c_func,
};
+static const struct of_device_id rcar_i2c_dt_ids[] = {
+ { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
+
static int rcar_i2c_probe(struct platform_device *pdev)
{
struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -649,10 +665,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
}
bus_speed = 100000; /* default 100 kHz */
- if (pdata && pdata->bus_speed)
+ ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
+ if (ret < 0 && pdata && pdata->bus_speed)
bus_speed = pdata->bus_speed;
- priv->devtype = platform_get_device_id(pdev)->driver_data;
+ if (pdev->dev.of_node)
+ priv->devtype = (long)of_match_device(rcar_i2c_dt_ids,
+ dev)->data;
+ else
+ priv->devtype = platform_get_device_id(pdev)->driver_data;
ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
if (ret < 0)
@@ -673,6 +694,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->retries = 3;
adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
@@ -709,9 +731,9 @@ static int rcar_i2c_remove(struct platform_device *pdev)
}
static struct platform_device_id rcar_i2c_id_table[] = {
- { "i2c-rcar", I2C_RCAR_H1 },
- { "i2c-rcar_h1", I2C_RCAR_H1 },
- { "i2c-rcar_h2", I2C_RCAR_H2 },
+ { "i2c-rcar", I2C_RCAR_GEN1 },
+ { "i2c-rcar_gen1", I2C_RCAR_GEN1 },
+ { "i2c-rcar_gen2", I2C_RCAR_GEN2 },
{},
};
MODULE_DEVICE_TABLE(platform, rcar_i2c_id_table);
@@ -720,6 +742,7 @@ static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.owner = THIS_MODULE,
+ .of_match_table = rcar_i2c_dt_ids,
},
.probe = rcar_i2c_probe,
.remove = rcar_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3747b9bf67d..bf8fb94ebc5 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -36,6 +36,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index c447e8d40b7..59923551413 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -223,7 +223,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
goto out;
obj = pkg->package.elements + 1;
- if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj->type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO, "Invalid argument type"));
result = -EIO;
goto out;
@@ -235,7 +235,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
case I2C_SMBUS_BYTE:
case I2C_SMBUS_BYTE_DATA:
case I2C_SMBUS_WORD_DATA:
- if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj->type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO, "Invalid argument type"));
result = -EIO;
goto out;
@@ -246,7 +246,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
data->byte = obj->integer.value;
break;
case I2C_SMBUS_BLOCK_DATA:
- if (obj == NULL || obj->type != ACPI_TYPE_BUFFER) {
+ if (obj->type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO, "Invalid argument type"));
result = -EIO;
goto out;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 55110ddbed1..1d79585ba4b 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -235,7 +235,7 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
int offset;
/* Get clock rate after clock is enabled */
- clk_enable(pd->clk);
+ clk_prepare_enable(pd->clk);
i2c_clk_khz = clk_get_rate(pd->clk) / 1000;
i2c_clk_khz /= pd->clks_per_count;
@@ -270,14 +270,14 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
pd->icic &= ~ICIC_ICCHB8;
out:
- clk_disable(pd->clk);
+ clk_disable_unprepare(pd->clk);
}
static void activate_ch(struct sh_mobile_i2c_data *pd)
{
/* Wake up device and enable clock */
pm_runtime_get_sync(pd->dev);
- clk_enable(pd->clk);
+ clk_prepare_enable(pd->clk);
/* Enable channel and configure rx ack */
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
@@ -300,7 +300,7 @@ static void deactivate_ch(struct sh_mobile_i2c_data *pd)
iic_set_clr(pd, ICCR, 0, ICCR_ICE);
/* Disable clock and mark device as idle */
- clk_disable(pd->clk);
+ clk_disable_unprepare(pd->clk);
pm_runtime_put_sync(pd->dev);
}
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
new file mode 100644
index 00000000000..9cf715d6955
--- /dev/null
+++ b/drivers/i2c/busses/i2c-st.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics
+ *
+ * I2C master mode controller driver, used in STMicroelectronics devices.
+ *
+ * Author: Maxime Coquelin <maxime.coquelin@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+/* SSC registers */
+#define SSC_BRG 0x000
+#define SSC_TBUF 0x004
+#define SSC_RBUF 0x008
+#define SSC_CTL 0x00C
+#define SSC_IEN 0x010
+#define SSC_STA 0x014
+#define SSC_I2C 0x018
+#define SSC_SLAD 0x01C
+#define SSC_REP_START_HOLD 0x020
+#define SSC_START_HOLD 0x024
+#define SSC_REP_START_SETUP 0x028
+#define SSC_DATA_SETUP 0x02C
+#define SSC_STOP_SETUP 0x030
+#define SSC_BUS_FREE 0x034
+#define SSC_TX_FSTAT 0x038
+#define SSC_RX_FSTAT 0x03C
+#define SSC_PRE_SCALER_BRG 0x040
+#define SSC_CLR 0x080
+#define SSC_NOISE_SUPP_WIDTH 0x100
+#define SSC_PRSCALER 0x104
+#define SSC_NOISE_SUPP_WIDTH_DATAOUT 0x108
+#define SSC_PRSCALER_DATAOUT 0x10c
+
+/* SSC Control */
+#define SSC_CTL_DATA_WIDTH_9 0x8
+#define SSC_CTL_DATA_WIDTH_MSK 0xf
+#define SSC_CTL_BM 0xf
+#define SSC_CTL_HB BIT(4)
+#define SSC_CTL_PH BIT(5)
+#define SSC_CTL_PO BIT(6)
+#define SSC_CTL_SR BIT(7)
+#define SSC_CTL_MS BIT(8)
+#define SSC_CTL_EN BIT(9)
+#define SSC_CTL_LPB BIT(10)
+#define SSC_CTL_EN_TX_FIFO BIT(11)
+#define SSC_CTL_EN_RX_FIFO BIT(12)
+#define SSC_CTL_EN_CLST_RX BIT(13)
+
+/* SSC Interrupt Enable */
+#define SSC_IEN_RIEN BIT(0)
+#define SSC_IEN_TIEN BIT(1)
+#define SSC_IEN_TEEN BIT(2)
+#define SSC_IEN_REEN BIT(3)
+#define SSC_IEN_PEEN BIT(4)
+#define SSC_IEN_AASEN BIT(6)
+#define SSC_IEN_STOPEN BIT(7)
+#define SSC_IEN_ARBLEN BIT(8)
+#define SSC_IEN_NACKEN BIT(10)
+#define SSC_IEN_REPSTRTEN BIT(11)
+#define SSC_IEN_TX_FIFO_HALF BIT(12)
+#define SSC_IEN_RX_FIFO_HALF_FULL BIT(14)
+
+/* SSC Status */
+#define SSC_STA_RIR BIT(0)
+#define SSC_STA_TIR BIT(1)
+#define SSC_STA_TE BIT(2)
+#define SSC_STA_RE BIT(3)
+#define SSC_STA_PE BIT(4)
+#define SSC_STA_CLST BIT(5)
+#define SSC_STA_AAS BIT(6)
+#define SSC_STA_STOP BIT(7)
+#define SSC_STA_ARBL BIT(8)
+#define SSC_STA_BUSY BIT(9)
+#define SSC_STA_NACK BIT(10)
+#define SSC_STA_REPSTRT BIT(11)
+#define SSC_STA_TX_FIFO_HALF BIT(12)
+#define SSC_STA_TX_FIFO_FULL BIT(13)
+#define SSC_STA_RX_FIFO_HALF BIT(14)
+
+/* SSC I2C Control */
+#define SSC_I2C_I2CM BIT(0)
+#define SSC_I2C_STRTG BIT(1)
+#define SSC_I2C_STOPG BIT(2)
+#define SSC_I2C_ACKG BIT(3)
+#define SSC_I2C_AD10 BIT(4)
+#define SSC_I2C_TXENB BIT(5)
+#define SSC_I2C_REPSTRTG BIT(11)
+#define SSC_I2C_SLAVE_DISABLE BIT(12)
+
+/* SSC Tx FIFO Status */
+#define SSC_TX_FSTAT_STATUS 0x07
+
+/* SSC Rx FIFO Status */
+#define SSC_RX_FSTAT_STATUS 0x07
+
+/* SSC Clear bit operation */
+#define SSC_CLR_SSCAAS BIT(6)
+#define SSC_CLR_SSCSTOP BIT(7)
+#define SSC_CLR_SSCARBL BIT(8)
+#define SSC_CLR_NACK BIT(10)
+#define SSC_CLR_REPSTRT BIT(11)
+
+/* SSC Clock Prescaler */
+#define SSC_PRSC_VALUE 0x0f
+
+
+#define SSC_TXFIFO_SIZE 0x8
+#define SSC_RXFIFO_SIZE 0x8
+
+enum st_i2c_mode {
+ I2C_MODE_STANDARD,
+ I2C_MODE_FAST,
+ I2C_MODE_END,
+};
+
+/**
+ * struct st_i2c_timings - per-Mode tuning parameters
+ * @rate: I2C bus rate
+ * @rep_start_hold: I2C repeated start hold time requirement
+ * @rep_start_setup: I2C repeated start set up time requirement
+ * @start_hold: I2C start hold time requirement
+ * @data_setup_time: I2C data set up time requirement
+ * @stop_setup_time: I2C stop set up time requirement
+ * @bus_free_time: I2C bus free time requirement
+ * @sda_pulse_min_limit: I2C SDA pulse mini width limit
+ */
+struct st_i2c_timings {
+ u32 rate;
+ u32 rep_start_hold;
+ u32 rep_start_setup;
+ u32 start_hold;
+ u32 data_setup_time;
+ u32 stop_setup_time;
+ u32 bus_free_time;
+ u32 sda_pulse_min_limit;
+};
+
+/**
+ * struct st_i2c_client - client specific data
+ * @addr: 8-bit slave addr, including r/w bit
+ * @count: number of bytes to be transfered
+ * @xfered: number of bytes already transferred
+ * @buf: data buffer
+ * @result: result of the transfer
+ * @stop: last I2C msg to be sent, i.e. STOP to be generated
+ */
+struct st_i2c_client {
+ u8 addr;
+ u32 count;
+ u32 xfered;
+ u8 *buf;
+ int result;
+ bool stop;
+};
+
+/**
+ * struct st_i2c_dev - private data of the controller
+ * @adap: I2C adapter for this controller
+ * @dev: device for this controller
+ * @base: virtual memory area
+ * @complete: completion of I2C message
+ * @irq: interrupt line for th controller
+ * @clk: hw ssc block clock
+ * @mode: I2C mode of the controller. Standard or Fast only supported
+ * @scl_min_width_us: SCL line minimum pulse width in us
+ * @sda_min_width_us: SDA line minimum pulse width in us
+ * @client: I2C transfert information
+ * @busy: I2C transfer on-going
+ */
+struct st_i2c_dev {
+ struct i2c_adapter adap;
+ struct device *dev;
+ void __iomem *base;
+ struct completion complete;
+ int irq;
+ struct clk *clk;
+ int mode;
+ u32 scl_min_width_us;
+ u32 sda_min_width_us;
+ struct st_i2c_client client;
+ bool busy;
+};
+
+static inline void st_i2c_set_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) | mask, reg);
+}
+
+static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) & ~mask, reg);
+}
+
+/* From I2C Specifications v0.5 */
+static struct st_i2c_timings i2c_timings[] = {
+ [I2C_MODE_STANDARD] = {
+ .rate = 100000,
+ .rep_start_hold = 4000,
+ .rep_start_setup = 4700,
+ .start_hold = 4000,
+ .data_setup_time = 250,
+ .stop_setup_time = 4000,
+ .bus_free_time = 4700,
+ },
+ [I2C_MODE_FAST] = {
+ .rate = 400000,
+ .rep_start_hold = 600,
+ .rep_start_setup = 600,
+ .start_hold = 600,
+ .data_setup_time = 100,
+ .stop_setup_time = 600,
+ .bus_free_time = 1300,
+ },
+};
+
+static void st_i2c_flush_rx_fifo(struct st_i2c_dev *i2c_dev)
+{
+ int count, i;
+
+ /*
+ * Counter only counts up to 7 but fifo size is 8...
+ * When fifo is full, counter is 0 and RIR bit of status register is
+ * set
+ */
+ if (readl_relaxed(i2c_dev->base + SSC_STA) & SSC_STA_RIR)
+ count = SSC_RXFIFO_SIZE;
+ else
+ count = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT) &
+ SSC_RX_FSTAT_STATUS;
+
+ for (i = 0; i < count; i++)
+ readl_relaxed(i2c_dev->base + SSC_RBUF);
+}
+
+static void st_i2c_soft_reset(struct st_i2c_dev *i2c_dev)
+{
+ /*
+ * FIFO needs to be emptied before reseting the IP,
+ * else the controller raises a BUSY error.
+ */
+ st_i2c_flush_rx_fifo(i2c_dev);
+
+ st_i2c_set_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR);
+ st_i2c_clr_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR);
+}
+
+/**
+ * st_i2c_hw_config() - Prepare SSC block, calculate and apply tuning timings
+ * @i2c_dev: Controller's private data
+ */
+static void st_i2c_hw_config(struct st_i2c_dev *i2c_dev)
+{
+ unsigned long rate;
+ u32 val, ns_per_clk;
+ struct st_i2c_timings *t = &i2c_timings[i2c_dev->mode];
+
+ st_i2c_soft_reset(i2c_dev);
+
+ val = SSC_CLR_REPSTRT | SSC_CLR_NACK | SSC_CLR_SSCARBL |
+ SSC_CLR_SSCAAS | SSC_CLR_SSCSTOP;
+ writel_relaxed(val, i2c_dev->base + SSC_CLR);
+
+ /* SSC Control register setup */
+ val = SSC_CTL_PO | SSC_CTL_PH | SSC_CTL_HB | SSC_CTL_DATA_WIDTH_9;
+ writel_relaxed(val, i2c_dev->base + SSC_CTL);
+
+ rate = clk_get_rate(i2c_dev->clk);
+ ns_per_clk = 1000000000 / rate;
+
+ /* Baudrate */
+ val = rate / (2 * t->rate);
+ writel_relaxed(val, i2c_dev->base + SSC_BRG);
+
+ /* Pre-scaler baudrate */
+ writel_relaxed(1, i2c_dev->base + SSC_PRE_SCALER_BRG);
+
+ /* Enable I2C mode */
+ writel_relaxed(SSC_I2C_I2CM, i2c_dev->base + SSC_I2C);
+
+ /* Repeated start hold time */
+ val = t->rep_start_hold / ns_per_clk;
+ writel_relaxed(val, i2c_dev->base + SSC_REP_START_HOLD);
+
+ /* Repeated start set up time */
+ val = t->rep_start_setup / ns_per_clk;
+ writel_relaxed(val, i2c_dev->base + SSC_REP_START_SETUP);
+
+ /* Start hold time */
+ val = t->start_hold / ns_per_clk;
+ writel_relaxed(val, i2c_dev->base + SSC_START_HOLD);
+
+ /* Data set up time */
+ val = t->data_setup_time / ns_per_clk;
+ writel_relaxed(val, i2c_dev->base + SSC_DATA_SETUP);
+
+ /* Stop set up time */
+ val = t->stop_setup_time / ns_per_clk;
+ writel_relaxed(val, i2c_dev->base + SSC_STOP_SETUP);
+
+ /* Bus free time */
+ val = t->bus_free_time / ns_per_clk;
+ writel_relaxed(val, i2c_dev->base + SSC_BUS_FREE);
+
+ /* Prescalers set up */
+ val = rate / 10000000;
+ writel_relaxed(val, i2c_dev->base + SSC_PRSCALER);
+ writel_relaxed(val, i2c_dev->base + SSC_PRSCALER_DATAOUT);
+
+ /* Noise suppression witdh */
+ val = i2c_dev->scl_min_width_us * rate / 100000000;
+ writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH);
+
+ /* Noise suppression max output data delay width */
+ val = i2c_dev->sda_min_width_us * rate / 100000000;
+ writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT);
+}
+
+static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev)
+{
+ u32 sta;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ sta = readl_relaxed(i2c_dev->base + SSC_STA);
+ if (!(sta & SSC_STA_BUSY))
+ return 0;
+
+ usleep_range(2000, 4000);
+ }
+
+ dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta);
+
+ return -EBUSY;
+}
+
+/**
+ * st_i2c_write_tx_fifo() - Write a byte in the Tx FIFO
+ * @i2c_dev: Controller's private data
+ * @byte: Data to write in the Tx FIFO
+ */
+static inline void st_i2c_write_tx_fifo(struct st_i2c_dev *i2c_dev, u8 byte)
+{
+ u16 tbuf = byte << 1;
+
+ writel_relaxed(tbuf | 1, i2c_dev->base + SSC_TBUF);
+}
+
+/**
+ * st_i2c_wr_fill_tx_fifo() - Fill the Tx FIFO in write mode
+ * @i2c_dev: Controller's private data
+ *
+ * This functions fills the Tx FIFO with I2C transfert buffer when
+ * in write mode.
+ */
+static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+ u32 tx_fstat, sta;
+ int i;
+
+ sta = readl_relaxed(i2c_dev->base + SSC_STA);
+ if (sta & SSC_STA_TX_FIFO_FULL)
+ return;
+
+ tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT);
+ tx_fstat &= SSC_TX_FSTAT_STATUS;
+
+ if (c->count < (SSC_TXFIFO_SIZE - tx_fstat))
+ i = c->count;
+ else
+ i = SSC_TXFIFO_SIZE - tx_fstat;
+
+ for (; i > 0; i--, c->count--, c->buf++)
+ st_i2c_write_tx_fifo(i2c_dev, *c->buf);
+}
+
+/**
+ * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
+ * @i2c_dev: Controller's private data
+ *
+ * This functions fills the Tx FIFO with fixed pattern when
+ * in read mode to trigger clock.
+ */
+static void st_i2c_rd_fill_tx_fifo(struct st_i2c_dev *i2c_dev, int max)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+ u32 tx_fstat, sta;
+ int i;
+
+ sta = readl_relaxed(i2c_dev->base + SSC_STA);
+ if (sta & SSC_STA_TX_FIFO_FULL)
+ return;
+
+ tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT);
+ tx_fstat &= SSC_TX_FSTAT_STATUS;
+
+ if (max < (SSC_TXFIFO_SIZE - tx_fstat))
+ i = max;
+ else
+ i = SSC_TXFIFO_SIZE - tx_fstat;
+
+ for (; i > 0; i--, c->xfered++)
+ st_i2c_write_tx_fifo(i2c_dev, 0xff);
+}
+
+static void st_i2c_read_rx_fifo(struct st_i2c_dev *i2c_dev)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+ u32 i, sta;
+ u16 rbuf;
+
+ sta = readl_relaxed(i2c_dev->base + SSC_STA);
+ if (sta & SSC_STA_RIR) {
+ i = SSC_RXFIFO_SIZE;
+ } else {
+ i = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT);
+ i &= SSC_RX_FSTAT_STATUS;
+ }
+
+ for (; (i > 0) && (c->count > 0); i--, c->count--) {
+ rbuf = readl_relaxed(i2c_dev->base + SSC_RBUF) >> 1;
+ *c->buf++ = (u8)rbuf & 0xff;
+ }
+
+ if (i) {
+ dev_err(i2c_dev->dev, "Unexpected %d bytes in rx fifo\n", i);
+ st_i2c_flush_rx_fifo(i2c_dev);
+ }
+}
+
+/**
+ * st_i2c_terminate_xfer() - Send either STOP or REPSTART condition
+ * @i2c_dev: Controller's private data
+ */
+static void st_i2c_terminate_xfer(struct st_i2c_dev *i2c_dev)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+
+ st_i2c_clr_bits(i2c_dev->base + SSC_IEN, SSC_IEN_TEEN);
+ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG);
+
+ if (c->stop) {
+ st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_STOPEN);
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG);
+ } else {
+ st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_REPSTRTEN);
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_REPSTRTG);
+ }
+}
+
+/**
+ * st_i2c_handle_write() - Handle FIFO empty interrupt in case of write
+ * @i2c_dev: Controller's private data
+ */
+static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+
+ st_i2c_flush_rx_fifo(i2c_dev);
+
+ if (!c->count)
+ /* End of xfer, send stop or repstart */
+ st_i2c_terminate_xfer(i2c_dev);
+ else
+ st_i2c_wr_fill_tx_fifo(i2c_dev);
+}
+
+/**
+ * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read
+ * @i2c_dev: Controller's private data
+ */
+static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+ u32 ien;
+
+ /* Trash the address read back */
+ if (!c->xfered) {
+ readl_relaxed(i2c_dev->base + SSC_RBUF);
+ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_TXENB);
+ } else {
+ st_i2c_read_rx_fifo(i2c_dev);
+ }
+
+ if (!c->count) {
+ /* End of xfer, send stop or repstart */
+ st_i2c_terminate_xfer(i2c_dev);
+ } else if (c->count == 1) {
+ /* Penultimate byte to xfer, disable ACK gen. */
+ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_ACKG);
+
+ /* Last received byte is to be handled by NACK interrupt */
+ ien = SSC_IEN_NACKEN | SSC_IEN_ARBLEN;
+ writel_relaxed(ien, i2c_dev->base + SSC_IEN);
+
+ st_i2c_rd_fill_tx_fifo(i2c_dev, c->count);
+ } else {
+ st_i2c_rd_fill_tx_fifo(i2c_dev, c->count - 1);
+ }
+}
+
+/**
+ * st_i2c_isr() - Interrupt routine
+ * @irq: interrupt number
+ * @data: Controller's private data
+ */
+static irqreturn_t st_i2c_isr_thread(int irq, void *data)
+{
+ struct st_i2c_dev *i2c_dev = data;
+ struct st_i2c_client *c = &i2c_dev->client;
+ u32 sta, ien;
+ int it;
+
+ ien = readl_relaxed(i2c_dev->base + SSC_IEN);
+ sta = readl_relaxed(i2c_dev->base + SSC_STA);
+
+ /* Use __fls() to check error bits first */
+ it = __fls(sta & ien);
+ if (it < 0) {
+ dev_dbg(i2c_dev->dev, "spurious it (sta=0x%04x, ien=0x%04x)\n",
+ sta, ien);
+ return IRQ_NONE;
+ }
+
+ switch (1 << it) {
+ case SSC_STA_TE:
+ if (c->addr & I2C_M_RD)
+ st_i2c_handle_read(i2c_dev);
+ else
+ st_i2c_handle_write(i2c_dev);
+ break;
+
+ case SSC_STA_STOP:
+ case SSC_STA_REPSTRT:
+ writel_relaxed(0, i2c_dev->base + SSC_IEN);
+ complete(&i2c_dev->complete);
+ break;
+
+ case SSC_STA_NACK:
+ writel_relaxed(SSC_CLR_NACK, i2c_dev->base + SSC_CLR);
+
+ /* Last received byte handled by NACK interrupt */
+ if ((c->addr & I2C_M_RD) && (c->count == 1) && (c->xfered)) {
+ st_i2c_handle_read(i2c_dev);
+ break;
+ }
+
+ it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN;
+ writel_relaxed(it, i2c_dev->base + SSC_IEN);
+
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG);
+ c->result = -EIO;
+ break;
+
+ case SSC_STA_ARBL:
+ writel_relaxed(SSC_CLR_SSCARBL, i2c_dev->base + SSC_CLR);
+
+ it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN;
+ writel_relaxed(it, i2c_dev->base + SSC_IEN);
+
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG);
+ c->result = -EIO;
+ break;
+
+ default:
+ dev_err(i2c_dev->dev,
+ "it %d unhandled (sta=0x%04x)\n", it, sta);
+ }
+
+ /*
+ * Read IEN register to ensure interrupt mask write is effective
+ * before re-enabling interrupt at GIC level, and thus avoid spurious
+ * interrupts.
+ */
+ readl(i2c_dev->base + SSC_IEN);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * st_i2c_xfer_msg() - Transfer a single I2C message
+ * @i2c_dev: Controller's private data
+ * @msg: I2C message to transfer
+ * @is_first: first message of the sequence
+ * @is_last: last message of the sequence
+ */
+static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg,
+ bool is_first, bool is_last)
+{
+ struct st_i2c_client *c = &i2c_dev->client;
+ u32 ctl, i2c, it;
+ unsigned long timeout;
+ int ret;
+
+ c->addr = (u8)(msg->addr << 1);
+ c->addr |= (msg->flags & I2C_M_RD);
+ c->buf = msg->buf;
+ c->count = msg->len;
+ c->xfered = 0;
+ c->result = 0;
+ c->stop = is_last;
+
+ reinit_completion(&i2c_dev->complete);
+
+ ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO;
+ st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl);
+
+ i2c = SSC_I2C_TXENB;
+ if (c->addr & I2C_M_RD)
+ i2c |= SSC_I2C_ACKG;
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, i2c);
+
+ /* Write slave address */
+ st_i2c_write_tx_fifo(i2c_dev, c->addr);
+
+ /* Pre-fill Tx fifo with data in case of write */
+ if (!(c->addr & I2C_M_RD))
+ st_i2c_wr_fill_tx_fifo(i2c_dev);
+
+ it = SSC_IEN_NACKEN | SSC_IEN_TEEN | SSC_IEN_ARBLEN;
+ writel_relaxed(it, i2c_dev->base + SSC_IEN);
+
+ if (is_first) {
+ ret = st_i2c_wait_free_bus(i2c_dev);
+ if (ret)
+ return ret;
+
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG);
+ }
+
+ timeout = wait_for_completion_timeout(&i2c_dev->complete,
+ i2c_dev->adap.timeout);
+ ret = c->result;
+
+ if (!timeout) {
+ dev_err(i2c_dev->dev, "Write to slave 0x%x timed out\n",
+ c->addr);
+ ret = -ETIMEDOUT;
+ }
+
+ i2c = SSC_I2C_STOPG | SSC_I2C_REPSTRTG;
+ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, i2c);
+
+ writel_relaxed(SSC_CLR_SSCSTOP | SSC_CLR_REPSTRT,
+ i2c_dev->base + SSC_CLR);
+
+ return ret;
+}
+
+/**
+ * st_i2c_xfer() - Transfer a single I2C message
+ * @i2c_adap: Adapter pointer to the controller
+ * @msgs: Pointer to data to be written.
+ * @num: Number of messages to be executed
+ */
+static int st_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+ int ret, i;
+
+ i2c_dev->busy = true;
+
+ ret = clk_prepare_enable(i2c_dev->clk);
+ if (ret) {
+ dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ pinctrl_pm_select_default_state(i2c_dev->dev);
+
+ st_i2c_hw_config(i2c_dev);
+
+ for (i = 0; (i < num) && !ret; i++)
+ ret = st_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, i == num - 1);
+
+ pinctrl_pm_select_idle_state(i2c_dev->dev);
+
+ clk_disable_unprepare(i2c_dev->clk);
+
+ i2c_dev->busy = false;
+
+ return (ret < 0) ? ret : i;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int st_i2c_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ if (i2c_dev->busy)
+ return -EBUSY;
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int st_i2c_resume(struct device *dev)
+{
+ pinctrl_pm_select_default_state(dev);
+ /* Go in idle state if available */
+ pinctrl_pm_select_idle_state(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume);
+#define ST_I2C_PM (&st_i2c_pm)
+#else
+#define ST_I2C_PM NULL
+#endif
+
+static u32 st_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm st_i2c_algo = {
+ .master_xfer = st_i2c_xfer,
+ .functionality = st_i2c_func,
+};
+
+static int st_i2c_of_get_deglitch(struct device_node *np,
+ struct st_i2c_dev *i2c_dev)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "st,i2c-min-scl-pulse-width-us",
+ &i2c_dev->scl_min_width_us);
+ if ((ret == -ENODATA) || (ret == -EOVERFLOW)) {
+ dev_err(i2c_dev->dev, "st,i2c-min-scl-pulse-width-us invalid\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "st,i2c-min-sda-pulse-width-us",
+ &i2c_dev->sda_min_width_us);
+ if ((ret == -ENODATA) || (ret == -EOVERFLOW)) {
+ dev_err(i2c_dev->dev, "st,i2c-min-sda-pulse-width-us invalid\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int st_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct st_i2c_dev *i2c_dev;
+ struct resource *res;
+ u32 clk_rate;
+ struct i2c_adapter *adap;
+ int ret;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c_dev->base))
+ return PTR_ERR(i2c_dev->base);
+
+ i2c_dev->irq = irq_of_parse_and_map(np, 0);
+ if (!i2c_dev->irq) {
+ dev_err(&pdev->dev, "IRQ missing or invalid\n");
+ return -EINVAL;
+ }
+
+ i2c_dev->clk = of_clk_get_by_name(np, "ssc");
+ if (IS_ERR(i2c_dev->clk)) {
+ dev_err(&pdev->dev, "Unable to request clock\n");
+ return PTR_ERR(i2c_dev->clk);
+ }
+
+ i2c_dev->mode = I2C_MODE_STANDARD;
+ ret = of_property_read_u32(np, "clock-frequency", &clk_rate);
+ if ((!ret) && (clk_rate == 400000))
+ i2c_dev->mode = I2C_MODE_FAST;
+
+ i2c_dev->dev = &pdev->dev;
+
+ ret = devm_request_threaded_irq(&pdev->dev, i2c_dev->irq,
+ NULL, st_i2c_isr_thread,
+ IRQF_ONESHOT, pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ return ret;
+ }
+
+ pinctrl_pm_select_default_state(i2c_dev->dev);
+ /* In case idle state available, select it */
+ pinctrl_pm_select_idle_state(i2c_dev->dev);
+
+ ret = st_i2c_of_get_deglitch(np, i2c_dev);
+ if (ret)
+ return ret;
+
+ adap = &i2c_dev->adap;
+ i2c_set_adapdata(adap, i2c_dev);
+ snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%x)", res->start);
+ adap->owner = THIS_MODULE;
+ adap->timeout = 2 * HZ;
+ adap->retries = 0;
+ adap->algo = &st_i2c_algo;
+ adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
+
+ init_completion(&i2c_dev->complete);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add adapter\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ dev_info(i2c_dev->dev, "%s initialized\n", adap->name);
+
+ return 0;
+}
+
+static int st_i2c_remove(struct platform_device *pdev)
+{
+ struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c_dev->adap);
+
+ return 0;
+}
+
+static struct of_device_id st_i2c_match[] = {
+ { .compatible = "st,comms-ssc-i2c", },
+ { .compatible = "st,comms-ssc4-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_i2c_match);
+
+static struct platform_driver st_i2c_driver = {
+ .driver = {
+ .name = "st-i2c",
+ .owner = THIS_MODULE,
+ .of_match_table = st_i2c_match,
+ .pm = ST_I2C_PM,
+ },
+ .probe = st_i2c_probe,
+ .remove = st_i2c_remove,
+};
+
+module_platform_driver(st_i2c_driver);
+
+MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index c457cb447c6..e661edee4d0 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -544,7 +544,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
i2c_dev->msg_buf_remaining = msg->len;
i2c_dev->msg_err = I2C_ERR_NONE;
i2c_dev->msg_read = (msg->flags & I2C_M_RD);
- INIT_COMPLETION(i2c_dev->msg_complete);
+ reinit_completion(&i2c_dev->msg_complete);
packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
PACKET_HEADER0_PROTOCOL_I2C |
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index c65da3d913a..2c8a3e4f900 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -158,7 +158,7 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
writew(val, i2c_dev->base + REG_CR);
}
- INIT_COMPLETION(i2c_dev->complete);
+ reinit_completion(&i2c_dev->complete);
if (i2c_dev->mode == I2C_MODE_STANDARD)
tcr_val = TCR_STANDARD_MODE;
@@ -247,7 +247,7 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
writew(val, i2c_dev->base + REG_CR);
}
- INIT_COMPLETION(i2c_dev->complete);
+ reinit_completion(&i2c_dev->complete);
if (i2c_dev->mode == I2C_MODE_STANDARD)
tcr_val = TCR_STANDARD_MODE;
@@ -349,6 +349,7 @@ static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev)
err = clk_set_rate(i2c_dev->clk, 20000000);
if (err) {
dev_err(i2c_dev->dev, "failed to set clock = 20Mhz\n");
+ clk_disable_unprepare(i2c_dev->clk);
return err;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4c8b368d463..fc2716afdfd 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
#include <linux/i2c-xiic.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define DRIVER_NAME "xiic-i2c"
@@ -702,7 +703,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (irq < 0)
goto resource_missing;
- pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(&pdev->dev);
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 3be58f89ac7..d74c0b34248 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -248,16 +248,17 @@ static int i2c_device_probe(struct device *dev)
driver = to_i2c_driver(dev->driver);
if (!driver->probe || !driver->id_table)
return -ENODEV;
- client->driver = driver;
+
if (!device_can_wakeup(&client->dev))
device_init_wakeup(&client->dev,
client->flags & I2C_CLIENT_WAKE);
dev_dbg(dev, "probe\n");
+ acpi_dev_pm_attach(&client->dev, true);
status = driver->probe(client, i2c_match_id(driver->id_table, client));
if (status) {
- client->driver = NULL;
i2c_set_clientdata(client, NULL);
+ acpi_dev_pm_detach(&client->dev, true);
}
return status;
}
@@ -279,10 +280,9 @@ static int i2c_device_remove(struct device *dev)
dev->driver = NULL;
status = 0;
}
- if (status == 0) {
- client->driver = NULL;
+ if (status == 0)
i2c_set_clientdata(client, NULL);
- }
+ acpi_dev_pm_detach(&client->dev, true);
return status;
}
@@ -615,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter)
}
EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
+static void i2c_dev_set_name(struct i2c_adapter *adap,
+ struct i2c_client *client)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+
+ if (adev) {
+ dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ /* For 10-bit clients, add an arbitrary offset to avoid collisions */
+ dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
+ client->addr | ((client->flags & I2C_CLIENT_TEN)
+ ? 0xa000 : 0));
+}
+
/**
* i2c_new_device - instantiate an i2c device
* @adap: the adapter managing the device
@@ -671,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
- ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle);
+ ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion);
- /* For 10-bit clients, add an arbitrary offset to avoid collisions */
- dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr | ((client->flags & I2C_CLIENT_TEN)
- ? 0xa000 : 0));
+ i2c_dev_set_name(adap, client);
status = device_register(&client->dev);
if (status)
goto out_err;
@@ -1100,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
return AE_OK;
memset(&info, 0, sizeof(info));
- info.acpi_node.handle = handle;
+ info.acpi_node.companion = adev;
info.irq = -1;
INIT_LIST_HEAD(&resource_list);
@@ -1111,8 +1124,10 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
if (ret < 0 || !info.addr)
return AE_OK;
+ adev->power.flags.ignore_parent = true;
strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
if (!i2c_new_device(adapter, &info)) {
+ adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
dev_name(&adev->dev));
@@ -1609,9 +1624,14 @@ static int i2c_cmd(struct device *dev, void *_arg)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_cmd_arg *arg = _arg;
+ struct i2c_driver *driver;
+
+ if (!client || !client->dev.driver)
+ return 0;
- if (client && client->driver && client->driver->command)
- client->driver->command(client, arg->cmd, arg->arg);
+ driver = to_i2c_driver(client->dev.driver);
+ if (driver->command)
+ driver->command(client, arg->cmd, arg->arg);
return 0;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c3ccdea3d18..80b47e8ce03 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -102,8 +102,8 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev)
kfree(i2c_dev);
}
-static ssize_t show_adapter_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
@@ -111,7 +111,13 @@ static ssize_t show_adapter_name(struct device *dev,
return -ENODEV;
return sprintf(buf, "%s\n", i2c_dev->adap->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *i2c_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i2c);
/* ------------------------------------------------------------------------- */
@@ -562,15 +568,10 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
res = PTR_ERR(i2c_dev->dev);
goto error;
}
- res = device_create_file(i2c_dev->dev, &dev_attr_name);
- if (res)
- goto error_destroy;
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
-error_destroy:
- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
error:
return_i2c_dev(i2c_dev);
return res;
@@ -589,7 +590,6 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- device_remove_file(i2c_dev->dev, &dev_attr_name);
return_i2c_dev(i2c_dev);
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
@@ -637,6 +637,7 @@ static int __init i2c_dev_init(void)
res = PTR_ERR(i2c_dev_class);
goto out_unreg_chrdev;
}
+ i2c_dev_class->dev_groups = i2c_groups;
/* Keep track of adapters which will be added or removed later */
res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 44d4c6071c1..c99b2298736 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -46,6 +46,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
struct alert_data *data = addrp;
+ struct i2c_driver *driver;
if (!client || client->addr != data->addr)
return 0;
@@ -54,12 +55,13 @@ static int smbus_do_alert(struct device *dev, void *addrp)
/*
* Drivers should either disable alerts, or provide at least
- * a minimal handler. Lock so client->driver won't change.
+ * a minimal handler. Lock so the driver won't change.
*/
device_lock(dev);
- if (client->driver) {
- if (client->driver->alert)
- client->driver->alert(client, data->flag);
+ if (client->dev.driver) {
+ driver = to_i2c_driver(client->dev.driver);
+ if (driver->alert)
+ driver->alert(client, data->flag);
else
dev_warn(&client->dev, "no driver alert()!\n");
} else
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 928656e241d..c58e093b603 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -238,7 +238,7 @@ static struct platform_driver i2c_arbitrator_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "i2c-arb-gpio-challenge",
- .of_match_table = of_match_ptr(i2c_arbitrator_of_match),
+ .of_match_table = i2c_arbitrator_of_match,
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index a764da777f0..8a8c56f4b02 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -30,15 +30,15 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
int i;
for (i = 0; i < mux->data.n_gpios; i++)
- gpio_set_value(mux->gpio_base + mux->data.gpios[i],
- val & (1 << i));
+ gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i],
+ val & (1 << i));
}
static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- i2c_mux_gpio_set(mux, mux->data.values[chan]);
+ i2c_mux_gpio_set(mux, chan);
return 0;
}
@@ -228,7 +228,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
- i, class,
+ mux->data.values[i], class,
i2c_mux_gpio_select, deselect);
if (!mux->adap[i]) {
ret = -ENODEV;
@@ -283,7 +283,7 @@ static struct platform_driver i2c_mux_gpio_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "i2c-mux-gpio",
- .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
+ .of_match_table = i2c_mux_gpio_of_match,
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 68a37157377..d7978dc4ad0 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -24,6 +24,7 @@
#include <linux/i2c-mux-pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
struct i2c_mux_pinctrl {
struct device *dev;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 02906ca99b4..8fb46aab2d8 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -197,8 +197,8 @@ comment "IDE chipset support/bugfixes"
config IDE_GENERIC
tristate "generic/default IDE chipset support"
- depends on ALPHA || X86 || IA64 || M32R || MIPS || ARCH_RPC || ARCH_SHARK
- default ARM && (ARCH_RPC || ARCH_SHARK)
+ depends on ALPHA || X86 || IA64 || M32R || MIPS || ARCH_RPC
+ default ARM && ARCH_RPC
help
This is the generic IDE driver. This driver attaches to the
fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
@@ -722,13 +722,6 @@ config BLK_DEV_IDE_RAPIDE
Say Y here if you want to support the Yellowstone RapIDE controller
manufactured for use with Acorn computers.
-config IDE_H8300
- tristate "H8300 IDE support"
- depends on H8300
- default y
- help
- Enables the H8300 IDE driver.
-
config BLK_DEV_GAYLE
tristate "Amiga Gayle IDE interface support"
depends on AMIGA
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index af8d016c37e..a04ee82f1c8 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -78,8 +78,6 @@ obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
-obj-$(CONFIG_IDE_H8300) += ide-h8300.o
-
obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
index 24214ab60ac..de9185db41d 100644
--- a/drivers/ide/cs5536.c
+++ b/drivers/ide/cs5536.c
@@ -295,15 +295,7 @@ static struct pci_driver cs5536_pci_driver = {
.resume = ide_pci_resume,
};
-static int __init cs5536_init(void)
-{
- return pci_register_driver(&cs5536_pci_driver);
-}
-
-static void __exit cs5536_exit(void)
-{
- pci_unregister_driver(&cs5536_pci_driver);
-}
+module_pci_driver(cs5536_pci_driver);
MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
@@ -312,6 +304,3 @@ MODULE_DEVICE_TABLE(pci, cs5536_pci_tbl);
module_param_named(msr, use_msr, int, 0644);
MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
-
-module_init(cs5536_init);
-module_exit(cs5536_exit);
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 140c8ef5052..d9e1f7ccfe6 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -7,6 +7,7 @@
* Copyright (C) 2006 Hannes Reinecke
*/
+#include <linux/acpi.h>
#include <linux/ata.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -19,8 +20,6 @@
#include <linux/dmi.h>
#include <linux/module.h>
-#include <acpi/acpi_bus.h>
-
#define REGS_PER_GTF 7
struct GTM_buffer {
@@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
- dev_handle = DEVICE_ACPI_HANDLE(dev);
+ dev_handle = ACPI_HANDLE(dev);
if (!dev_handle) {
DEBPRINT("no acpi handle for device\n");
goto err;
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
deleted file mode 100644
index 520f42c5445..00000000000
--- a/drivers/ide/ide-h8300.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * H8/300 generic IDE interface
- */
-
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define DRV_NAME "ide-h8300"
-
-#define bswap(d) \
-({ \
- u16 r; \
- __asm__("mov.b %w1,r1h\n\t" \
- "mov.b %x1,r1l\n\t" \
- "mov.w r1,%0" \
- :"=r"(r) \
- :"r"(d) \
- :"er1"); \
- (r); \
-})
-
-static void mm_outsw(unsigned long addr, void *buf, u32 len)
-{
- unsigned short *bp = (unsigned short *)buf;
- for (; len > 0; len--, bp++)
- *(volatile u16 *)addr = bswap(*bp);
-}
-
-static void mm_insw(unsigned long addr, void *buf, u32 len)
-{
- unsigned short *bp = (unsigned short *)buf;
- for (; len > 0; len--, bp++)
- *bp = bswap(*(volatile u16 *)addr);
-}
-
-static void h8300_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static void h8300_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static const struct ide_tp_ops h8300_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = h8300_input_data,
- .output_data = h8300_output_data,
-};
-
-#define H8300_IDE_GAP (2)
-
-static inline void hw_setup(struct ide_hw *hw)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
- hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
- hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
-}
-
-static const struct ide_port_info h8300_port_info = {
- .tp_ops = &h8300_tp_ops,
- .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static int __init h8300_ide_init(void)
-{
- struct ide_hw hw, *hws[] = { &hw };
-
- printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
-
- if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
- goto out_busy;
- if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) {
- release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8);
- goto out_busy;
- }
-
- hw_setup(&hw);
-
- return ide_host_add(&h8300_port_info, hws, 1, NULL);
-
-out_busy:
- printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
-
- return -EBUSY;
-}
-
-module_init(h8300_ide_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
index 883ffacaf45..84a6a9e08d6 100644
--- a/drivers/ide/ide-sysfs.c
+++ b/drivers/ide/ide-sysfs.c
@@ -25,6 +25,7 @@ static ssize_t media_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", ide_media_string(drive));
}
+static DEVICE_ATTR_RO(media);
static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -32,6 +33,7 @@ static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", drive->name);
}
+static DEVICE_ATTR_RO(drivename);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -39,6 +41,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
}
+static DEVICE_ATTR_RO(modalias);
static ssize_t model_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -46,6 +49,7 @@ static ssize_t model_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
}
+static DEVICE_ATTR_RO(model);
static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -53,6 +57,7 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
}
+static DEVICE_ATTR_RO(firmware);
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -60,16 +65,28 @@ static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
}
+static DEVICE_ATTR(serial, 0400, serial_show, NULL);
+
+static DEVICE_ATTR(unload_heads, 0644, ide_park_show, ide_park_store);
+
+static struct attribute *ide_attrs[] = {
+ &dev_attr_media.attr,
+ &dev_attr_drivename.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_model.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_unload_heads.attr,
+ NULL,
+};
+
+static const struct attribute_group ide_attr_group = {
+ .attrs = ide_attrs,
+};
-struct device_attribute ide_dev_attrs[] = {
- __ATTR_RO(media),
- __ATTR_RO(drivename),
- __ATTR_RO(modalias),
- __ATTR_RO(model),
- __ATTR_RO(firmware),
- __ATTR(serial, 0400, serial_show, NULL),
- __ATTR(unload_heads, 0644, ide_park_show, ide_park_store),
- __ATTR_NULL
+const struct attribute_group *ide_dev_groups[] = {
+ &ide_attr_group,
+ NULL,
};
static ssize_t store_delete_devices(struct device *portdev,
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index fa896210ed7..2ce6268a273 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -158,7 +158,7 @@ struct bus_type ide_bus_type = {
.probe = generic_ide_probe,
.remove = generic_ide_remove,
.shutdown = generic_ide_shutdown,
- .dev_attrs = ide_dev_attrs,
+ .dev_groups = ide_dev_groups,
.suspend = generic_ide_suspend,
.resume = generic_ide_resume,
};
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index bf83d7bb6bc..2db803cd095 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -416,8 +416,7 @@ static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
static void pmac_ide_apply_timings(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
if (drive->dn & 1)
writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
@@ -434,8 +433,7 @@ static void pmac_ide_apply_timings(ide_drive_t *drive)
static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
if (drive->dn & 1) {
writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
@@ -454,8 +452,7 @@ static void
pmac_ide_do_update_timings(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
if (pmif->kind == controller_sh_ata6 ||
pmif->kind == controller_un_ata6 ||
@@ -500,8 +497,7 @@ static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
*/
static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
u32 *timings, t;
@@ -781,8 +777,7 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
int ret = 0;
u32 *timings, *timings2, tl[2];
u8 unit = drive->dn & 1;
@@ -919,8 +914,7 @@ static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
struct device_node *np = pmif->node;
const char *cable = of_get_property(np, "cable-type", NULL);
struct device_node *root = of_find_node_by_path("/");
@@ -951,8 +945,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
static void pmac_ide_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
if (on_media_bay(pmif)) {
if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
@@ -1228,8 +1221,7 @@ out_free_pmif:
static int
pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
int rc = 0;
if (mesg.event != mdev->ofdev.dev.power.power_state.event
@@ -1245,8 +1237,7 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
static int
pmac_ide_macio_resume(struct macio_dev *mdev)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
int rc = 0;
if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
@@ -1318,7 +1309,6 @@ static int pmac_ide_pci_attach(struct pci_dev *pdev,
rc = pmac_ide_setup_device(pmif, &hw);
if (rc != 0) {
/* The inteface is released to the common IDE layer */
- pci_set_drvdata(pdev, NULL);
iounmap(base);
pci_release_regions(pdev);
kfree(pmif);
@@ -1365,8 +1355,7 @@ pmac_ide_pci_resume(struct pci_dev *pdev)
#ifdef CONFIG_PMAC_MEDIABAY
static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
switch(mb_state) {
case MB_CD:
@@ -1468,8 +1457,7 @@ out:
static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
struct dbdma_cmd *table;
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
@@ -1546,8 +1534,7 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
@@ -1572,8 +1559,7 @@ static void
pmac_ide_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma;
dma = pmif->dma_regs;
@@ -1590,8 +1576,7 @@ static int
pmac_ide_dma_end (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
u32 dstat;
@@ -1615,8 +1600,7 @@ static int
pmac_ide_dma_test_irq (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
unsigned long status, timeout;
@@ -1670,8 +1654,7 @@ static void
pmac_ide_dma_lost_irq (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
unsigned long status = readl(&dma->status);
@@ -1693,8 +1676,7 @@ static const struct ide_dma_ops pmac_dma_ops = {
*/
static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
- pmac_ide_hwif_t *pmif =
- (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
struct pci_dev *dev = to_pci_dev(hwif->dev);
/* We won't need pci_dev if we switch to generic consistent
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index fa6964d8681..cbd4e9abc47 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1,7 +1,7 @@
/*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2013, Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
{
.name = "C1-NHM",
.desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
{
.name = "C1-SNB",
.desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
{
.name = "C1-IVB",
.desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
{
.name = "C1-HSW",
.desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E-ATM",
.desc = "MWAIT 0x00",
@@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
{
.enter = NULL }
};
+static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .name = "C1-AVN",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle },
+ {
+ .name = "C6-AVN",
+ .desc = "MWAIT 0x51",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 15,
+ .target_residency = 45,
+ .enter = &intel_idle },
+};
/**
* intel_idle
@@ -359,7 +375,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- if (!need_resched()) {
+ if (!current_set_polling_and_test()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
@@ -390,7 +406,7 @@ static int cpu_hotplug_notify(struct notifier_block *n,
int hotcpu = (unsigned long)hcpu;
struct cpuidle_device *dev;
- switch (action & 0xf) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
@@ -462,6 +478,11 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_avn = {
+ .state_table = avn_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -483,6 +504,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
ICPU(0x46, idle_cpu_hsw),
+ ICPU(0x4D, idle_cpu_avn),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -490,7 +512,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
*/
-static int intel_idle_probe(void)
+static int __init intel_idle_probe(void)
{
unsigned int eax, ebx, ecx;
const struct x86_cpu_id *id;
@@ -558,7 +580,7 @@ static void intel_idle_cpuidle_devices_uninit(void)
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
-static int intel_idle_cpuidle_driver_init(void)
+static int __init intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
@@ -628,7 +650,7 @@ static int intel_idle_cpu_init(int cpu)
int num_substates, mwait_hint, mwait_cstate, mwait_substate;
if (cpuidle_state_table[cstate].enter == NULL)
- continue;
+ break;
if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n", max_cstate);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 81e3dc26099..28b39283bcc 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -471,13 +471,10 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bma180_data *data = iio_priv(indio_dev);
+ int64_t time_ns = iio_get_time_ns();
int bit, ret, i = 0;
mutex_lock(&data->mutex);
- if (indio_dev->scan_timestamp) {
- ret = indio_dev->scan_bytes / sizeof(s64) - 1;
- ((s64 *)data->buff)[ret] = iio_get_time_ns();
- }
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
indio_dev->masklength) {
@@ -490,7 +487,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
}
mutex_unlock(&data->mutex);
- iio_push_to_buffers(indio_dev, (u8 *)data->buff);
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 46d22f3fb1a..dcda17395c4 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -182,10 +182,11 @@ static const struct iio_info accel_3d_info = {
};
/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
{
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
+ iio_push_to_buffers(indio_dev, data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -200,7 +201,7 @@ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev,
accel_state->common_attributes.data_ready);
if (accel_state->common_attributes.data_ready)
hid_sensor_push_data(indio_dev,
- (u8 *)accel_state->accel_val,
+ accel_state->accel_val,
sizeof(accel_state->accel_val));
return 0;
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 709c13259f1..d72118d1189 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -222,7 +222,6 @@ static int kxsd9_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct kxsd9_state *st;
- int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
@@ -244,11 +243,7 @@ static int kxsd9_probe(struct spi_device *spi)
spi_setup(spi);
kxsd9_power_up(st);
- ret = iio_device_register(indio_dev);
- if (ret)
- return ret;
-
- return 0;
+ return iio_device_register(indio_dev);
}
static int kxsd9_remove(struct spi_device *spi)
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index d9b350756f9..a1e642ee13d 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -32,16 +32,7 @@ int st_accel_trig_set_state(struct iio_trigger *trig, bool state)
static int st_accel_buffer_preenable(struct iio_dev *indio_dev)
{
- int err;
-
- err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
- goto st_accel_set_enable_error;
-
- err = iio_sw_buffer_preenable(indio_dev);
-
-st_accel_set_enable_error:
- return err;
+ return st_sensors_set_enable(indio_dev, true);
}
static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 1458343f6f3..38caedc76b9 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -452,8 +452,9 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
int st_accel_common_probe(struct iio_dev *indio_dev,
struct st_sensors_platform_data *plat_data)
{
- int err;
struct st_sensor_data *adata = iio_priv(indio_dev);
+ int irq = adata->get_irq_data_ready(indio_dev);
+ int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &accel_info;
@@ -461,7 +462,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev,
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_accel_sensors), st_accel_sensors);
if (err < 0)
- goto st_accel_common_probe_error;
+ return err;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
adata->multiread_bit = adata->sensor->multi_read_bit;
@@ -478,13 +479,13 @@ int st_accel_common_probe(struct iio_dev *indio_dev,
err = st_sensors_init_sensor(indio_dev, plat_data);
if (err < 0)
- goto st_accel_common_probe_error;
+ return err;
- if (adata->get_irq_data_ready(indio_dev) > 0) {
- err = st_accel_allocate_ring(indio_dev);
- if (err < 0)
- goto st_accel_common_probe_error;
+ err = st_accel_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+ if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
ST_ACCEL_TRIGGER_OPS);
if (err < 0)
@@ -495,15 +496,14 @@ int st_accel_common_probe(struct iio_dev *indio_dev,
if (err)
goto st_accel_device_register_error;
- return err;
+ return 0;
st_accel_device_register_error:
- if (adata->get_irq_data_ready(indio_dev) > 0)
+ if (irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_accel_probe_trigger_error:
- if (adata->get_irq_data_ready(indio_dev) > 0)
- st_accel_deallocate_ring(indio_dev);
-st_accel_common_probe_error:
+ st_accel_deallocate_ring(indio_dev);
+
return err;
}
EXPORT_SYMBOL(st_accel_common_probe);
@@ -513,10 +513,10 @@ void st_accel_common_remove(struct iio_dev *indio_dev)
struct st_sensor_data *adata = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (adata->get_irq_data_ready(indio_dev) > 0) {
+ if (adata->get_irq_data_ready(indio_dev) > 0)
st_sensors_deallocate_trigger(indio_dev);
- st_accel_deallocate_ring(indio_dev);
- }
+
+ st_accel_deallocate_ring(indio_dev);
}
EXPORT_SYMBOL(st_accel_common_remove);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 09371cbc9dc..2209f28441e 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -145,6 +145,16 @@ config MCP320X
This driver can also be built as a module. If so, the module will be
called mcp320x.
+config MCP3422
+ tristate "Microchip Technology MCP3422/3/4 driver"
+ depends on I2C
+ help
+ Say yes here to build support for Microchip Technology's MCP3422,
+ MCP3423 or MCP3424 analog to digital converters.
+
+ This driver can also be built as a module. If so, the module will be
+ called mcp3422.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
@@ -167,6 +177,8 @@ config TI_ADC081C
config TI_AM335X_ADC
tristate "TI's AM335X ADC driver"
depends on MFD_TI_AM335X_TSCADC
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
help
Say yes here to build support for Texas Instruments ADC
driver which is also a MFD client.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 33656ef7d1f..ba9a10a24cd 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
+obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 371731df163..58e945594c7 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -27,7 +27,7 @@
struct ad7266_state {
struct spi_device *spi;
struct regulator *reg;
- unsigned long vref_uv;
+ unsigned long vref_mv;
struct spi_transfer single_xfer[3];
struct spi_message single_msg;
@@ -61,17 +61,7 @@ static int ad7266_powerdown(struct ad7266_state *st)
static int ad7266_preenable(struct iio_dev *indio_dev)
{
struct ad7266_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = ad7266_wakeup(st);
- if (ret)
- return ret;
-
- ret = iio_sw_buffer_preenable(indio_dev);
- if (ret)
- ad7266_powerdown(st);
-
- return ret;
+ return ad7266_wakeup(st);
}
static int ad7266_postdisable(struct iio_dev *indio_dev)
@@ -96,9 +86,8 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
ret = spi_read(st->spi, st->data, 4);
if (ret == 0) {
- if (indio_dev->scan_timestamp)
- ((s64 *)st->data)[1] = pf->timestamp;
- iio_push_to_buffers(indio_dev, (u8 *)st->data);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ pf->timestamp);
}
iio_trigger_notify_done(indio_dev->trig);
@@ -157,7 +146,7 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long m)
{
struct ad7266_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
+ unsigned long scale_mv;
int ret;
switch (m) {
@@ -175,16 +164,15 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->vref_uv * 100);
+ scale_mv = st->vref_mv;
if (st->mode == AD7266_MODE_DIFF)
- scale_uv *= 2;
+ scale_mv *= 2;
if (st->range == AD7266_RANGE_2VREF)
- scale_uv *= 2;
+ scale_mv *= 2;
- scale_uv >>= chan->scan_type.realbits;
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = scale_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
if (st->range == AD7266_RANGE_2VREF &&
st->mode != AD7266_MODE_DIFF)
@@ -293,7 +281,7 @@ static const struct iio_info ad7266_info = {
.driver_module = THIS_MODULE,
};
-static unsigned long ad7266_available_scan_masks[] = {
+static const unsigned long ad7266_available_scan_masks[] = {
0x003,
0x00c,
0x030,
@@ -303,14 +291,14 @@ static unsigned long ad7266_available_scan_masks[] = {
0x000,
};
-static unsigned long ad7266_available_scan_masks_diff[] = {
+static const unsigned long ad7266_available_scan_masks_diff[] = {
0x003,
0x00c,
0x030,
0x000,
};
-static unsigned long ad7266_available_scan_masks_fixed[] = {
+static const unsigned long ad7266_available_scan_masks_fixed[] = {
0x003,
0x000,
};
@@ -318,7 +306,7 @@ static unsigned long ad7266_available_scan_masks_fixed[] = {
struct ad7266_chan_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
- unsigned long *scan_masks;
+ const unsigned long *scan_masks;
};
#define AD7266_CHAN_INFO_INDEX(_differential, _signed, _fixed) \
@@ -415,10 +403,10 @@ static int ad7266_probe(struct spi_device *spi)
if (ret < 0)
goto error_disable_reg;
- st->vref_uv = ret;
+ st->vref_mv = ret / 1000;
} else {
/* Use internal reference */
- st->vref_uv = 2500000;
+ st->vref_mv = 2500;
}
if (pdata) {
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 85d1481c312..2a3b65c74af 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -159,20 +159,14 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7298_state *st = iio_priv(indio_dev);
- s64 time_ns = 0;
int b_sent;
b_sent = spi_sync(st->spi, &st->ring_msg);
if (b_sent)
goto done;
- if (indio_dev->scan_timestamp) {
- time_ns = iio_get_time_ns();
- memcpy((u8 *)st->rx_buf + indio_dev->scan_bytes - sizeof(s64),
- &time_ns, sizeof(time_ns));
- }
-
- iio_push_to_buffers(indio_dev, (u8 *)st->rx_buf);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
+ iio_get_time_ns());
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 6d2b1d8d1a1..d141d452c3d 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -64,19 +64,14 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7476_state *st = iio_priv(indio_dev);
- s64 time_ns;
int b_sent;
b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
goto done;
- time_ns = iio_get_time_ns();
-
- if (indio_dev->scan_timestamp)
- ((s64 *)st->data)[1] = time_ns;
-
- iio_push_to_buffers(indio_dev, st->data);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns());
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -132,10 +127,9 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
} else {
scale_uv = st->chip_info->int_vref_uv;
}
- scale_uv >>= chan->scan_type.realbits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index c20203577d2..c19f8fd1b4b 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -202,7 +202,6 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
{
struct ad7791_state *st = iio_priv(indio_dev);
bool unipolar = !!(st->mode & AD7791_MODE_UNIPOLAR);
- unsigned long long scale_pv;
switch (info) {
case IIO_CHAN_INFO_RAW:
@@ -220,23 +219,26 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
/* The monitor channel uses an internal reference. */
if (chan->address == AD7791_CH_AVDD_MONITOR) {
- scale_pv = 5850000000000ULL;
+ /*
+ * The signal is attenuated by a factor of 5 and
+ * compared against a 1.17V internal reference.
+ */
+ *val = 1170 * 5;
} else {
int voltage_uv;
voltage_uv = regulator_get_voltage(st->reg);
if (voltage_uv < 0)
return voltage_uv;
- scale_pv = (unsigned long long)voltage_uv * 1000000;
+
+ *val = voltage_uv / 1000;
}
if (unipolar)
- scale_pv >>= chan->scan_type.realbits;
+ *val2 = chan->scan_type.realbits;
else
- scale_pv >>= chan->scan_type.realbits - 1;
- *val2 = do_div(scale_pv, 1000000000);
- *val = scale_pv;
+ *val2 = chan->scan_type.realbits - 1;
- return IIO_VAL_INT_PLUS_NANO;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 9dd077b7875..acb7f90359a 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -78,11 +78,6 @@ enum ad7887_supported_device_ids {
static int ad7887_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7887_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = iio_sw_buffer_preenable(indio_dev);
- if (ret < 0)
- return ret;
/* We know this is a single long so can 'cheat' */
switch (*indio_dev->active_scan_mask) {
@@ -121,20 +116,14 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7887_state *st = iio_priv(indio_dev);
- s64 time_ns;
int b_sent;
b_sent = spi_sync(st->spi, st->ring_msg);
if (b_sent)
goto done;
- time_ns = iio_get_time_ns();
-
- if (indio_dev->scan_timestamp)
- memcpy(st->data + indio_dev->scan_bytes - sizeof(s64),
- &time_ns, sizeof(time_ns));
-
- iio_push_to_buffers(indio_dev, st->data);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns());
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 4108dbb28c3..28732c28e81 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -174,20 +174,14 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7923_state *st = iio_priv(indio_dev);
- s64 time_ns = 0;
int b_sent;
b_sent = spi_sync(st->spi, &st->ring_msg);
if (b_sent)
goto done;
- if (indio_dev->scan_timestamp) {
- time_ns = iio_get_time_ns();
- memcpy((u8 *)st->rx_buf + indio_dev->scan_bytes - sizeof(s64),
- &time_ns, sizeof(time_ns));
- }
-
- iio_push_to_buffers(indio_dev, (u8 *)st->rx_buf);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
+ iio_get_time_ns());
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index f0d6335ae08..9a4e0e32a77 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -188,7 +188,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
- INIT_COMPLETION(sigma_delta->completion);
+ reinit_completion(&sigma_delta->completion);
ret = ad_sigma_delta_set_mode(sigma_delta, mode);
if (ret < 0)
@@ -259,7 +259,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
- INIT_COMPLETION(sigma_delta->completion);
+ reinit_completion(&sigma_delta->completion);
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
@@ -343,7 +343,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
- INIT_COMPLETION(sigma_delta->completion);
+ reinit_completion(&sigma_delta->completion);
wait_for_completion_timeout(&sigma_delta->completion, HZ);
if (!sigma_delta->irq_dis) {
@@ -368,10 +368,6 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
memset(data, 0x00, 16);
- /* Guaranteed to be aligned with 8 byte boundary */
- if (indio_dev->scan_timestamp)
- ((s64 *)data)[1] = pf->timestamp;
-
reg_size = indio_dev->channels[0].scan_type.realbits +
indio_dev->channels[0].scan_type.shift;
reg_size = DIV_ROUND_UP(reg_size, 8);
@@ -391,7 +387,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
break;
}
- iio_push_to_buffers(indio_dev, (uint8_t *)data);
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
sigma_delta->irq_dis = false;
@@ -401,7 +397,6 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
}
static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
- .preenable = &iio_sw_buffer_preenable,
.postenable = &ad_sd_buffer_postenable,
.predisable = &iio_triggered_buffer_predisable,
.postdisable = &ad_sd_buffer_postdisable,
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 0f16b553e06..17df74908db 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -39,10 +40,36 @@
#define at91_adc_writel(st, reg, val) \
(writel_relaxed(val, st->reg_base + reg))
+#define DRIVER_NAME "at91_adc"
+#define MAX_POS_BITS 12
+
+#define TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */
+#define TOUCH_PEN_DETECT_DEBOUNCE_US 200
+
struct at91_adc_caps {
+ bool has_ts; /* Support touch screen */
+ bool has_tsmr; /* only at91sam9x5, sama5d3 have TSMR reg */
+ /*
+ * Numbers of sampling data will be averaged. Can be 0~3.
+ * Hardware can average (2 ^ ts_filter_average) sample data.
+ */
+ u8 ts_filter_average;
+ /* Pen Detection input pull-up resistor, can be 0~3 */
+ u8 ts_pen_detect_sensitivity;
+
+ /* startup time calculate function */
+ u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
+
+ u8 num_channels;
struct at91_adc_reg_desc registers;
};
+enum atmel_adc_ts_type {
+ ATMEL_ADC_TOUCHSCREEN_NONE = 0,
+ ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
+ ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
+};
+
struct at91_adc_state {
struct clk *adc_clk;
u16 *buffer;
@@ -67,6 +94,26 @@ struct at91_adc_state {
bool low_res; /* the resolution corresponds to the lowest one */
wait_queue_head_t wq_data_avail;
struct at91_adc_caps *caps;
+
+ /*
+ * Following ADC channels are shared by touchscreen:
+ *
+ * CH0 -- Touch screen XP/UL
+ * CH1 -- Touch screen XM/UR
+ * CH2 -- Touch screen YP/LL
+ * CH3 -- Touch screen YM/Sense
+ * CH4 -- Touch screen LR(5-wire only)
+ *
+ * The bitfields below represents the reserved channel in the
+ * touchscreen mode.
+ */
+#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 0)
+#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 0)
+ enum atmel_adc_ts_type touchscreen_type;
+ struct input_dev *ts_input;
+
+ u16 ts_sample_period_val;
+ u32 ts_pressure_threshold;
};
static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
@@ -83,13 +130,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
j++;
}
- if (idev->scan_timestamp) {
- s64 *timestamp = (s64 *)((u8 *)st->buffer +
- ALIGN(j, sizeof(s64)));
- *timestamp = pf->timestamp;
- }
-
- iio_push_to_buffers(idev, (u8 *)st->buffer);
+ iio_push_to_buffers_with_timestamp(idev, st->buffer, pf->timestamp);
iio_trigger_notify_done(idev->trig);
@@ -101,14 +142,10 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
-static irqreturn_t at91_adc_eoc_trigger(int irq, void *private)
+/* Handler for classic adc channel eoc trigger */
+void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
{
- struct iio_dev *idev = private;
struct at91_adc_state *st = iio_priv(idev);
- u32 status = at91_adc_readl(st, st->registers->status_register);
-
- if (!(status & st->registers->drdy_mask))
- return IRQ_HANDLED;
if (iio_buffer_enabled(idev)) {
disable_irq_nosync(irq);
@@ -118,6 +155,115 @@ static irqreturn_t at91_adc_eoc_trigger(int irq, void *private)
st->done = true;
wake_up_interruptible(&st->wq_data_avail);
}
+}
+
+static int at91_ts_sample(struct at91_adc_state *st)
+{
+ unsigned int xscale, yscale, reg, z1, z2;
+ unsigned int x, y, pres, xpos, ypos;
+ unsigned int rxp = 1;
+ unsigned int factor = 1000;
+ struct iio_dev *idev = iio_priv_to_dev(st);
+
+ unsigned int xyz_mask_bits = st->res;
+ unsigned int xyz_mask = (1 << xyz_mask_bits) - 1;
+
+ /* calculate position */
+ /* x position = (x / xscale) * max, max = 2^MAX_POS_BITS - 1 */
+ reg = at91_adc_readl(st, AT91_ADC_TSXPOSR);
+ xpos = reg & xyz_mask;
+ x = (xpos << MAX_POS_BITS) - xpos;
+ xscale = (reg >> 16) & xyz_mask;
+ if (xscale == 0) {
+ dev_err(&idev->dev, "Error: xscale == 0!\n");
+ return -1;
+ }
+ x /= xscale;
+
+ /* y position = (y / yscale) * max, max = 2^MAX_POS_BITS - 1 */
+ reg = at91_adc_readl(st, AT91_ADC_TSYPOSR);
+ ypos = reg & xyz_mask;
+ y = (ypos << MAX_POS_BITS) - ypos;
+ yscale = (reg >> 16) & xyz_mask;
+ if (yscale == 0) {
+ dev_err(&idev->dev, "Error: yscale == 0!\n");
+ return -1;
+ }
+ y /= yscale;
+
+ /* calculate the pressure */
+ reg = at91_adc_readl(st, AT91_ADC_TSPRESSR);
+ z1 = reg & xyz_mask;
+ z2 = (reg >> 16) & xyz_mask;
+
+ if (z1 != 0)
+ pres = rxp * (x * factor / 1024) * (z2 * factor / z1 - factor)
+ / factor;
+ else
+ pres = st->ts_pressure_threshold; /* no pen contacted */
+
+ dev_dbg(&idev->dev, "xpos = %d, xscale = %d, ypos = %d, yscale = %d, z1 = %d, z2 = %d, press = %d\n",
+ xpos, xscale, ypos, yscale, z1, z2, pres);
+
+ if (pres < st->ts_pressure_threshold) {
+ dev_dbg(&idev->dev, "x = %d, y = %d, pressure = %d\n",
+ x, y, pres / factor);
+ input_report_abs(st->ts_input, ABS_X, x);
+ input_report_abs(st->ts_input, ABS_Y, y);
+ input_report_abs(st->ts_input, ABS_PRESSURE, pres);
+ input_report_key(st->ts_input, BTN_TOUCH, 1);
+ input_sync(st->ts_input);
+ } else {
+ dev_dbg(&idev->dev, "pressure too low: not reporting\n");
+ }
+
+ return 0;
+}
+
+static irqreturn_t at91_adc_interrupt(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct at91_adc_state *st = iio_priv(idev);
+ u32 status = at91_adc_readl(st, st->registers->status_register);
+ const uint32_t ts_data_irq_mask =
+ AT91_ADC_IER_XRDY |
+ AT91_ADC_IER_YRDY |
+ AT91_ADC_IER_PRDY;
+
+ if (status & st->registers->drdy_mask)
+ handle_adc_eoc_trigger(irq, idev);
+
+ if (status & AT91_ADC_IER_PEN) {
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN);
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_NOPEN |
+ ts_data_irq_mask);
+ /* Set up period trigger for sampling */
+ at91_adc_writel(st, st->registers->trigger_register,
+ AT91_ADC_TRGR_MOD_PERIOD_TRIG |
+ AT91_ADC_TRGR_TRGPER_(st->ts_sample_period_val));
+ } else if (status & AT91_ADC_IER_NOPEN) {
+ at91_adc_writel(st, st->registers->trigger_register, 0);
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_NOPEN |
+ ts_data_irq_mask);
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN);
+
+ input_report_key(st->ts_input, BTN_TOUCH, 0);
+ input_sync(st->ts_input);
+ } else if ((status & ts_data_irq_mask) == ts_data_irq_mask) {
+ /* Now all touchscreen data is ready */
+
+ if (status & AT91_ADC_ISR_PENS) {
+ /* validate data by pen contact */
+ at91_ts_sample(st);
+ } else {
+ /* triggered by event that is no pen contact, just read
+ * them to clean the interrupt and discard all.
+ */
+ at91_adc_readl(st, AT91_ADC_TSXPOSR);
+ at91_adc_readl(st, AT91_ADC_TSYPOSR);
+ at91_adc_readl(st, AT91_ADC_TSPRESSR);
+ }
+ }
return IRQ_HANDLED;
}
@@ -127,6 +273,16 @@ static int at91_adc_channel_init(struct iio_dev *idev)
struct at91_adc_state *st = iio_priv(idev);
struct iio_chan_spec *chan_array, *timestamp;
int bit, idx = 0;
+ unsigned long rsvd_mask = 0;
+
+ /* If touchscreen is enable, then reserve the adc channels */
+ if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
+ rsvd_mask = CHAN_MASK_TOUCHSCREEN_4WIRE;
+ else if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_5WIRE)
+ rsvd_mask = CHAN_MASK_TOUCHSCREEN_5WIRE;
+
+ /* set up the channel mask to reserve touchscreen channels */
+ st->channels_mask &= ~rsvd_mask;
idev->num_channels = bitmap_weight(&st->channels_mask,
st->num_channels) + 1;
@@ -279,7 +435,7 @@ static int at91_adc_trigger_init(struct iio_dev *idev)
int i, ret;
st->trig = devm_kzalloc(&idev->dev,
- st->trigger_number * sizeof(st->trig),
+ st->trigger_number * sizeof(*st->trig),
GFP_KERNEL);
if (st->trig == NULL) {
@@ -372,9 +528,9 @@ static int at91_adc_read_raw(struct iio_dev *idev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = (st->vref_mv * 1000) >> chan->scan_type.realbits;
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
break;
}
@@ -434,8 +590,80 @@ ret:
return ret;
}
+static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+{
+ /*
+ * Number of ticks needed to cover the startup time of the ADC
+ * as defined in the electrical characteristics of the board,
+ * divided by 8. The formula thus is :
+ * Startup Time = (ticks + 1) * 8 / ADC Clock
+ */
+ return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
+}
+
+static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
+{
+ /*
+ * For sama5d3x and at91sam9x5, the formula changes to:
+ * Startup Time = <lookup_table_value> / ADC Clock
+ */
+ const int startup_lookup[] = {
+ 0 , 8 , 16 , 24 ,
+ 64 , 80 , 96 , 112,
+ 512, 576, 640, 704,
+ 768, 832, 896, 960
+ };
+ int i, size = ARRAY_SIZE(startup_lookup);
+ unsigned int ticks;
+
+ ticks = startup_time * adc_clk_khz / 1000;
+ for (i = 0; i < size; i++)
+ if (ticks < startup_lookup[i])
+ break;
+
+ ticks = i;
+ if (ticks == size)
+ /* Reach the end of lookup table */
+ ticks = size - 1;
+
+ return ticks;
+}
+
static const struct of_device_id at91_adc_dt_ids[];
+static int at91_adc_probe_dt_ts(struct device_node *node,
+ struct at91_adc_state *st, struct device *dev)
+{
+ int ret;
+ u32 prop;
+
+ ret = of_property_read_u32(node, "atmel,adc-ts-wires", &prop);
+ if (ret) {
+ dev_info(dev, "ADC Touch screen is disabled.\n");
+ return 0;
+ }
+
+ switch (prop) {
+ case 4:
+ case 5:
+ st->touchscreen_type = prop;
+ break;
+ default:
+ dev_err(dev, "Unsupported number of touchscreen wires (%d). Should be 4 or 5.\n", prop);
+ return -EINVAL;
+ }
+
+ prop = 0;
+ of_property_read_u32(node, "atmel,adc-ts-pressure-threshold", &prop);
+ st->ts_pressure_threshold = prop;
+ if (st->ts_pressure_threshold) {
+ return 0;
+ } else {
+ dev_err(dev, "Invalid pressure threshold for the touchscreen\n");
+ return -EINVAL;
+ }
+}
+
static int at91_adc_probe_dt(struct at91_adc_state *st,
struct platform_device *pdev)
{
@@ -460,13 +688,6 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
}
st->channels_mask = prop;
- if (of_property_read_u32(node, "atmel,adc-num-channels", &prop)) {
- dev_err(&idev->dev, "Missing adc-num-channels property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- st->num_channels = prop;
-
st->sleep_mode = of_property_read_bool(node, "atmel,adc-sleep-mode");
if (of_property_read_u32(node, "atmel,adc-startup-time", &prop)) {
@@ -492,6 +713,7 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
goto error_ret;
st->registers = &st->caps->registers;
+ st->num_channels = st->caps->num_channels;
st->trigger_number = of_get_child_count(node);
st->trigger_list = devm_kzalloc(&idev->dev, st->trigger_number *
sizeof(struct at91_adc_trigger),
@@ -523,6 +745,12 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
i++;
}
+ /* Check if touchscreen is supported. */
+ if (st->caps->has_ts)
+ return at91_adc_probe_dt_ts(node, st, &idev->dev);
+ else
+ dev_info(&idev->dev, "not support touchscreen in the adc compatible string.\n");
+
return 0;
error_ret:
@@ -554,6 +782,114 @@ static const struct iio_info at91_adc_info = {
.read_raw = &at91_adc_read_raw,
};
+/* Touchscreen related functions */
+static int atmel_ts_open(struct input_dev *dev)
+{
+ struct at91_adc_state *st = input_get_drvdata(dev);
+
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN);
+ return 0;
+}
+
+static void atmel_ts_close(struct input_dev *dev)
+{
+ struct at91_adc_state *st = input_get_drvdata(dev);
+
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN);
+}
+
+static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz)
+{
+ u32 reg = 0, pendbc;
+ int i = 0;
+
+ if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
+ reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS;
+ else
+ reg = AT91_ADC_TSMR_TSMODE_5WIRE;
+
+ /* a Pen Detect Debounce Time is necessary for the ADC Touch to avoid
+ * pen detect noise.
+ * The formula is : Pen Detect Debounce Time = (2 ^ pendbc) / ADCClock
+ */
+ pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz / 1000, 1);
+
+ while (pendbc >> ++i)
+ ; /* Empty! Find the shift offset */
+ if (abs(pendbc - (1 << i)) < abs(pendbc - (1 << (i - 1))))
+ pendbc = i;
+ else
+ pendbc = i - 1;
+
+ if (st->caps->has_tsmr) {
+ reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average)
+ & AT91_ADC_TSMR_TSAV;
+ reg |= AT91_ADC_TSMR_PENDBC_(pendbc) & AT91_ADC_TSMR_PENDBC;
+ reg |= AT91_ADC_TSMR_NOTSDMA;
+ reg |= AT91_ADC_TSMR_PENDET_ENA;
+ reg |= 0x03 << 8; /* TSFREQ, need bigger than TSAV */
+
+ at91_adc_writel(st, AT91_ADC_TSMR, reg);
+ } else {
+ /* TODO: for 9g45 which has no TSMR */
+ }
+
+ /* Change adc internal resistor value for better pen detection,
+ * default value is 100 kOhm.
+ * 0 = 200 kOhm, 1 = 150 kOhm, 2 = 100 kOhm, 3 = 50 kOhm
+ * option only available on ES2 and higher
+ */
+ at91_adc_writel(st, AT91_ADC_ACR, st->caps->ts_pen_detect_sensitivity
+ & AT91_ADC_ACR_PENDETSENS);
+
+ /* Sample Peroid Time = (TRGPER + 1) / ADCClock */
+ st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US *
+ adc_clk_khz / 1000) - 1, 1);
+
+ return 0;
+}
+
+static int at91_ts_register(struct at91_adc_state *st,
+ struct platform_device *pdev)
+{
+ struct input_dev *input;
+ struct iio_dev *idev = iio_priv_to_dev(st);
+ int ret;
+
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&idev->dev, "Failed to allocate TS device!\n");
+ return -ENOMEM;
+ }
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &pdev->dev;
+ input->open = atmel_ts_open;
+ input->close = atmel_ts_close;
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0);
+
+ st->ts_input = input;
+ input_set_drvdata(input, st);
+
+ ret = input_register_device(input);
+ if (ret)
+ input_free_device(st->ts_input);
+
+ return ret;
+}
+
+static void at91_ts_unregister(struct at91_adc_state *st)
+{
+ input_unregister_device(st->ts_input);
+}
+
static int at91_adc_probe(struct platform_device *pdev)
{
unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
@@ -605,7 +941,7 @@ static int at91_adc_probe(struct platform_device *pdev)
at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST);
at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF);
ret = request_irq(st->irq,
- at91_adc_eoc_trigger,
+ at91_adc_interrupt,
0,
pdev->dev.driver->name,
idev);
@@ -650,6 +986,10 @@ static int at91_adc_probe(struct platform_device *pdev)
mstrclk = clk_get_rate(st->clk);
adc_clk = clk_get_rate(st->adc_clk);
adc_clk_khz = adc_clk / 1000;
+
+ dev_dbg(&pdev->dev, "Master clock is set as: %d Hz, adc_clk should set as: %d Hz\n",
+ mstrclk, adc_clk);
+
prsc = (mstrclk / (2 * adc_clk)) - 1;
if (!st->startup_time) {
@@ -657,15 +997,9 @@ static int at91_adc_probe(struct platform_device *pdev)
ret = -EINVAL;
goto error_disable_adc_clk;
}
+ ticks = (*st->caps->calc_startup_ticks)(st->startup_time, adc_clk_khz);
/*
- * Number of ticks needed to cover the startup time of the ADC as
- * defined in the electrical characteristics of the board, divided by 8.
- * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
- */
- ticks = round_up((st->startup_time * adc_clk_khz /
- 1000) - 1, 8) / 8;
- /*
* a minimal Sample and Hold Time is necessary for the ADC to guarantee
* the best converted final value between two channels selection
* The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
@@ -692,30 +1026,52 @@ static int at91_adc_probe(struct platform_device *pdev)
init_waitqueue_head(&st->wq_data_avail);
mutex_init(&st->lock);
- ret = at91_adc_buffer_init(idev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't initialize the buffer.\n");
- goto error_disable_adc_clk;
- }
+ /*
+ * Since touch screen will set trigger register as period trigger. So
+ * when touch screen is enabled, then we have to disable hardware
+ * trigger for classic adc.
+ */
+ if (!st->touchscreen_type) {
+ ret = at91_adc_buffer_init(idev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't initialize the buffer.\n");
+ goto error_disable_adc_clk;
+ }
- ret = at91_adc_trigger_init(idev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't setup the triggers.\n");
- goto error_unregister_buffer;
+ ret = at91_adc_trigger_init(idev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't setup the triggers.\n");
+ at91_adc_buffer_remove(idev);
+ goto error_disable_adc_clk;
+ }
+ } else {
+ if (!st->caps->has_tsmr) {
+ dev_err(&pdev->dev, "We don't support non-TSMR adc\n");
+ goto error_disable_adc_clk;
+ }
+
+ ret = at91_ts_register(st, pdev);
+ if (ret)
+ goto error_disable_adc_clk;
+
+ at91_ts_hw_init(st, adc_clk_khz);
}
ret = iio_device_register(idev);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't register the device.\n");
- goto error_remove_triggers;
+ goto error_iio_device_register;
}
return 0;
-error_remove_triggers:
- at91_adc_trigger_remove(idev);
-error_unregister_buffer:
- at91_adc_buffer_remove(idev);
+error_iio_device_register:
+ if (!st->touchscreen_type) {
+ at91_adc_trigger_remove(idev);
+ at91_adc_buffer_remove(idev);
+ } else {
+ at91_ts_unregister(st);
+ }
error_disable_adc_clk:
clk_disable_unprepare(st->adc_clk);
error_disable_clk:
@@ -731,8 +1087,12 @@ static int at91_adc_remove(struct platform_device *pdev)
struct at91_adc_state *st = iio_priv(idev);
iio_device_unregister(idev);
- at91_adc_trigger_remove(idev);
- at91_adc_buffer_remove(idev);
+ if (!st->touchscreen_type) {
+ at91_adc_trigger_remove(idev);
+ at91_adc_buffer_remove(idev);
+ } else {
+ at91_ts_unregister(st);
+ }
clk_disable_unprepare(st->adc_clk);
clk_disable_unprepare(st->clk);
free_irq(st->irq, idev);
@@ -742,6 +1102,8 @@ static int at91_adc_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static struct at91_adc_caps at91sam9260_caps = {
+ .calc_startup_ticks = calc_startup_ticks_9260,
+ .num_channels = 4,
.registers = {
.channel_base = AT91_ADC_CHR(0),
.drdy_mask = AT91_ADC_DRDY,
@@ -753,6 +1115,9 @@ static struct at91_adc_caps at91sam9260_caps = {
};
static struct at91_adc_caps at91sam9g45_caps = {
+ .has_ts = true,
+ .calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */
+ .num_channels = 8,
.registers = {
.channel_base = AT91_ADC_CHR(0),
.drdy_mask = AT91_ADC_DRDY,
@@ -764,6 +1129,12 @@ static struct at91_adc_caps at91sam9g45_caps = {
};
static struct at91_adc_caps at91sam9x5_caps = {
+ .has_ts = true,
+ .has_tsmr = true,
+ .ts_filter_average = 3,
+ .ts_pen_detect_sensitivity = 2,
+ .calc_startup_ticks = calc_startup_ticks_9x5,
+ .num_channels = 12,
.registers = {
.channel_base = AT91_ADC_CDR0_9X5,
.drdy_mask = AT91_ADC_SR_DRDY_9X5,
@@ -788,7 +1159,7 @@ static struct platform_driver at91_adc_driver = {
.probe = at91_adc_probe,
.remove = at91_adc_remove,
.driver = {
- .name = "at91_adc",
+ .name = DRIVER_NAME,
.of_match_table = of_match_ptr(at91_adc_dt_ids),
},
};
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 4fb35d1d749..6118dced02b 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -165,6 +165,8 @@ struct max1363_chip_info {
* @thresh_low: low threshold values
* @vref: Reference voltage regulator
* @vref_uv: Actual (external or internal) reference voltage
+ * @send: function used to send data to the chip
+ * @recv: function used to receive data from the chip
*/
struct max1363_state {
struct i2c_client *client;
@@ -186,6 +188,10 @@ struct max1363_state {
s16 thresh_low[8];
struct regulator *vref;
u32 vref_uv;
+ int (*send)(const struct i2c_client *client,
+ const char *buf, int count);
+ int (*recv)(const struct i2c_client *client,
+ char *buf, int count);
};
#define MAX1363_MODE_SINGLE(_num, _mask) { \
@@ -311,13 +317,37 @@ static const struct max1363_mode
return NULL;
}
-static int max1363_write_basic_config(struct i2c_client *client,
- unsigned char d1,
- unsigned char d2)
+static int max1363_smbus_send(const struct i2c_client *client, const char *buf,
+ int count)
{
- u8 tx_buf[2] = {d1, d2};
+ int i, err;
- return i2c_master_send(client, tx_buf, 2);
+ for (i = err = 0; err == 0 && i < count; ++i)
+ err = i2c_smbus_write_byte(client, buf[i]);
+
+ return err ? err : count;
+}
+
+static int max1363_smbus_recv(const struct i2c_client *client, char *buf,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; ++i) {
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0)
+ return ret;
+ buf[i] = ret;
+ }
+
+ return count;
+}
+
+static int max1363_write_basic_config(struct max1363_state *st)
+{
+ u8 tx_buf[2] = { st->setupbyte, st->configbyte };
+
+ return st->send(st->client, tx_buf, 2);
}
static int max1363_set_scan_mode(struct max1363_state *st)
@@ -327,9 +357,7 @@ static int max1363_set_scan_mode(struct max1363_state *st)
| MAX1363_SE_DE_MASK);
st->configbyte |= st->current_mode->conf;
- return max1363_write_basic_config(st->client,
- st->setupbyte,
- st->configbyte);
+ return max1363_write_basic_config(st);
}
static int max1363_read_single_chan(struct iio_dev *indio_dev,
@@ -366,7 +394,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
}
if (st->chip_info->bits != 8) {
/* Get reading */
- data = i2c_master_recv(client, rxbuf, 2);
+ data = st->recv(client, rxbuf, 2);
if (data < 0) {
ret = data;
goto error_ret;
@@ -375,7 +403,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
((1 << st->chip_info->bits) - 1);
} else {
/* Get reading */
- data = i2c_master_recv(client, rxbuf, 1);
+ data = st->recv(client, rxbuf, 1);
if (data < 0) {
ret = data;
goto error_ret;
@@ -397,7 +425,6 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
{
struct max1363_state *st = iio_priv(indio_dev);
int ret;
- unsigned long scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -406,10 +433,9 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = st->vref_uv >> st->chip_info->bits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->vref_uv / 1000;
+ *val2 = st->chip_info->bits;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
@@ -424,11 +450,21 @@ static const enum max1363_modes max1363_mode_list[] = {
d0m1to2m3, d1m0to3m2,
};
-#define MAX1363_EV_M \
- (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) \
- | IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
+static const struct iio_event_spec max1363_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
-#define MAX1363_CHAN_U(num, addr, si, bits, evmask) \
+#define MAX1363_CHAN_U(num, addr, si, bits, ev_spec, num_ev_spec) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -444,11 +480,12 @@ static const enum max1363_modes max1363_mode_list[] = {
.endianness = IIO_BE, \
}, \
.scan_index = si, \
- .event_mask = evmask, \
+ .event_spec = ev_spec, \
+ .num_event_specs = num_ev_spec, \
}
/* bipolar channel */
-#define MAX1363_CHAN_B(num, num2, addr, si, bits, evmask) \
+#define MAX1363_CHAN_B(num, num2, addr, si, bits, ev_spec, num_ev_spec) \
{ \
.type = IIO_VOLTAGE, \
.differential = 1, \
@@ -466,28 +503,32 @@ static const enum max1363_modes max1363_mode_list[] = {
.endianness = IIO_BE, \
}, \
.scan_index = si, \
- .event_mask = evmask, \
+ .event_spec = ev_spec, \
+ .num_event_specs = num_ev_spec, \
}
-#define MAX1363_4X_CHANS(bits, em) { \
- MAX1363_CHAN_U(0, _s0, 0, bits, em), \
- MAX1363_CHAN_U(1, _s1, 1, bits, em), \
- MAX1363_CHAN_U(2, _s2, 2, bits, em), \
- MAX1363_CHAN_U(3, _s3, 3, bits, em), \
- MAX1363_CHAN_B(0, 1, d0m1, 4, bits, em), \
- MAX1363_CHAN_B(2, 3, d2m3, 5, bits, em), \
- MAX1363_CHAN_B(1, 0, d1m0, 6, bits, em), \
- MAX1363_CHAN_B(3, 2, d3m2, 7, bits, em), \
- IIO_CHAN_SOFT_TIMESTAMP(8) \
+#define MAX1363_4X_CHANS(bits, ev_spec, num_ev_spec) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \
+ IIO_CHAN_SOFT_TIMESTAMP(8) \
}
-static const struct iio_chan_spec max1036_channels[] = MAX1363_4X_CHANS(8, 0);
-static const struct iio_chan_spec max1136_channels[] = MAX1363_4X_CHANS(10, 0);
-static const struct iio_chan_spec max1236_channels[] = MAX1363_4X_CHANS(12, 0);
+static const struct iio_chan_spec max1036_channels[] =
+ MAX1363_4X_CHANS(8, NULL, 0);
+static const struct iio_chan_spec max1136_channels[] =
+ MAX1363_4X_CHANS(10, NULL, 0);
+static const struct iio_chan_spec max1236_channels[] =
+ MAX1363_4X_CHANS(12, NULL, 0);
static const struct iio_chan_spec max1361_channels[] =
- MAX1363_4X_CHANS(10, MAX1363_EV_M);
+ MAX1363_4X_CHANS(10, max1363_events, ARRAY_SIZE(max1363_events));
static const struct iio_chan_spec max1363_channels[] =
- MAX1363_4X_CHANS(12, MAX1363_EV_M);
+ MAX1363_4X_CHANS(12, max1363_events, ARRAY_SIZE(max1363_events));
/* Applies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
@@ -511,32 +552,32 @@ static const enum max1363_modes max1238_mode_list[] = {
d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
};
-#define MAX1363_12X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits, 0), \
- MAX1363_CHAN_U(1, _s1, 1, bits, 0), \
- MAX1363_CHAN_U(2, _s2, 2, bits, 0), \
- MAX1363_CHAN_U(3, _s3, 3, bits, 0), \
- MAX1363_CHAN_U(4, _s4, 4, bits, 0), \
- MAX1363_CHAN_U(5, _s5, 5, bits, 0), \
- MAX1363_CHAN_U(6, _s6, 6, bits, 0), \
- MAX1363_CHAN_U(7, _s7, 7, bits, 0), \
- MAX1363_CHAN_U(8, _s8, 8, bits, 0), \
- MAX1363_CHAN_U(9, _s9, 9, bits, 0), \
- MAX1363_CHAN_U(10, _s10, 10, bits, 0), \
- MAX1363_CHAN_U(11, _s11, 11, bits, 0), \
- MAX1363_CHAN_B(0, 1, d0m1, 12, bits, 0), \
- MAX1363_CHAN_B(2, 3, d2m3, 13, bits, 0), \
- MAX1363_CHAN_B(4, 5, d4m5, 14, bits, 0), \
- MAX1363_CHAN_B(6, 7, d6m7, 15, bits, 0), \
- MAX1363_CHAN_B(8, 9, d8m9, 16, bits, 0), \
- MAX1363_CHAN_B(10, 11, d10m11, 17, bits, 0), \
- MAX1363_CHAN_B(1, 0, d1m0, 18, bits, 0), \
- MAX1363_CHAN_B(3, 2, d3m2, 19, bits, 0), \
- MAX1363_CHAN_B(5, 4, d5m4, 20, bits, 0), \
- MAX1363_CHAN_B(7, 6, d7m6, 21, bits, 0), \
- MAX1363_CHAN_B(9, 8, d9m8, 22, bits, 0), \
- MAX1363_CHAN_B(11, 10, d11m10, 23, bits, 0), \
- IIO_CHAN_SOFT_TIMESTAMP(24) \
+#define MAX1363_12X_CHANS(bits) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, NULL, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, NULL, 0), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, NULL, 0), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, NULL, 0), \
+ MAX1363_CHAN_U(4, _s4, 4, bits, NULL, 0), \
+ MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
+ MAX1363_CHAN_U(8, _s8, 8, bits, NULL, 0), \
+ MAX1363_CHAN_U(9, _s9, 9, bits, NULL, 0), \
+ MAX1363_CHAN_U(10, _s10, 10, bits, NULL, 0), \
+ MAX1363_CHAN_U(11, _s11, 11, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \
+ MAX1363_CHAN_B(8, 9, d8m9, 16, bits, NULL, 0), \
+ MAX1363_CHAN_B(10, 11, d10m11, 17, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \
+ MAX1363_CHAN_B(9, 8, d9m8, 22, bits, NULL, 0), \
+ MAX1363_CHAN_B(11, 10, d11m10, 23, bits, NULL, 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(24) \
}
static const struct iio_chan_spec max1038_channels[] = MAX1363_12X_CHANS(8);
static const struct iio_chan_spec max1138_channels[] = MAX1363_12X_CHANS(10);
@@ -561,22 +602,22 @@ static const enum max1363_modes max11608_mode_list[] = {
};
#define MAX1363_8X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits, 0), \
- MAX1363_CHAN_U(1, _s1, 1, bits, 0), \
- MAX1363_CHAN_U(2, _s2, 2, bits, 0), \
- MAX1363_CHAN_U(3, _s3, 3, bits, 0), \
- MAX1363_CHAN_U(4, _s4, 4, bits, 0), \
- MAX1363_CHAN_U(5, _s5, 5, bits, 0), \
- MAX1363_CHAN_U(6, _s6, 6, bits, 0), \
- MAX1363_CHAN_U(7, _s7, 7, bits, 0), \
- MAX1363_CHAN_B(0, 1, d0m1, 8, bits, 0), \
- MAX1363_CHAN_B(2, 3, d2m3, 9, bits, 0), \
- MAX1363_CHAN_B(4, 5, d4m5, 10, bits, 0), \
- MAX1363_CHAN_B(6, 7, d6m7, 11, bits, 0), \
- MAX1363_CHAN_B(1, 0, d1m0, 12, bits, 0), \
- MAX1363_CHAN_B(3, 2, d3m2, 13, bits, 0), \
- MAX1363_CHAN_B(5, 4, d5m4, 14, bits, 0), \
- MAX1363_CHAN_B(7, 6, d7m6, 15, bits, 0), \
+ MAX1363_CHAN_U(0, _s0, 0, bits, NULL, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, NULL, 0), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, NULL, 0), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, NULL, 0), \
+ MAX1363_CHAN_U(4, _s4, 4, bits, NULL, 0), \
+ MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \
IIO_CHAN_SOFT_TIMESTAMP(16) \
}
static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
@@ -588,10 +629,10 @@ static const enum max1363_modes max11644_mode_list[] = {
};
#define MAX1363_2X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits, 0), \
- MAX1363_CHAN_U(1, _s1, 1, bits, 0), \
- MAX1363_CHAN_B(0, 1, d0m1, 2, bits, 0), \
- MAX1363_CHAN_B(1, 0, d1m0, 3, bits, 0), \
+ MAX1363_CHAN_U(0, _s0, 0, bits, NULL, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 2, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 3, bits, NULL, 0), \
IIO_CHAN_SOFT_TIMESTAMP(4) \
}
@@ -686,20 +727,22 @@ static IIO_CONST_ATTR(sampling_frequency_available,
"133000 665000 33300 16600 8300 4200 2000 1000");
static int max1363_read_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
{
struct max1363_state *st = iio_priv(indio_dev);
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
- *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
+ if (dir == IIO_EV_DIR_FALLING)
+ *val = st->thresh_low[chan->channel];
else
- *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
- return 0;
+ *val = st->thresh_high[chan->channel];
+ return IIO_VAL_INT;
}
static int max1363_write_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
{
struct max1363_state *st = iio_priv(indio_dev);
/* make it handle signed correctly as well */
@@ -714,13 +757,15 @@ static int max1363_write_thresh(struct iio_dev *indio_dev,
break;
}
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ switch (dir) {
case IIO_EV_DIR_FALLING:
- st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
+ st->thresh_low[chan->channel] = val;
break;
case IIO_EV_DIR_RISING:
- st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
+ st->thresh_high[chan->channel] = val;
break;
+ default:
+ return -EINVAL;
}
return 0;
@@ -755,24 +800,25 @@ static irqreturn_t max1363_event_handler(int irq, void *private)
u8 tx[2] = { st->setupbyte,
MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0 };
- i2c_master_recv(st->client, &rx, 1);
+ st->recv(st->client, &rx, 1);
mask = rx;
for_each_set_bit(loc, &mask, 8)
iio_push_event(indio_dev, max1363_event_codes[loc], timestamp);
- i2c_master_send(st->client, tx, 2);
+ st->send(st->client, tx, 2);
return IRQ_HANDLED;
}
static int max1363_read_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct max1363_state *st = iio_priv(indio_dev);
int val;
- int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
+ int number = chan->channel;
mutex_lock(&indio_dev->mlock);
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
+ if (dir == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
else
val = (1 << number) & st->mask_high;
@@ -794,9 +840,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
st->setupbyte &= ~MAX1363_SETUP_MONITOR_SETUP;
st->configbyte &= ~MAX1363_SCAN_MASK;
st->monitor_on = false;
- return max1363_write_basic_config(st->client,
- st->setupbyte,
- st->configbyte);
+ return max1363_write_basic_config(st);
}
/* Ensure we are in the relevant mode */
@@ -858,7 +902,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
}
- ret = i2c_master_send(st->client, tx_buf, len);
+ ret = st->send(st->client, tx_buf, len);
if (ret < 0)
goto error_ret;
if (ret != len) {
@@ -875,7 +919,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
*/
tx_buf[0] = st->setupbyte;
tx_buf[1] = MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0;
- ret = i2c_master_send(st->client, tx_buf, 2);
+ ret = st->send(st->client, tx_buf, 2);
if (ret < 0)
goto error_ret;
if (ret != 2) {
@@ -917,17 +961,17 @@ error_ret:
}
static int max1363_write_event_config(struct iio_dev *indio_dev,
- u64 event_code,
- int state)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
{
int ret = 0;
struct max1363_state *st = iio_priv(indio_dev);
u16 unifiedmask;
- int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
+ int number = chan->channel;
mutex_lock(&indio_dev->mlock);
unifiedmask = st->mask_low | st->mask_high;
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING) {
+ if (dir == IIO_EV_DIR_FALLING) {
if (state == 0)
st->mask_low &= ~(1 << number);
@@ -995,10 +1039,10 @@ static const struct iio_info max1238_info = {
};
static const struct iio_info max1363_info = {
- .read_event_value = &max1363_read_thresh,
- .write_event_value = &max1363_write_thresh,
- .read_event_config = &max1363_read_event_config,
- .write_event_config = &max1363_write_event_config,
+ .read_event_value_new = &max1363_read_thresh,
+ .write_event_value_new = &max1363_write_thresh,
+ .read_event_config_new = &max1363_read_event_config,
+ .write_event_config_new = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
.update_scan_mode = &max1363_update_scan_mode,
.driver_module = THIS_MODULE,
@@ -1436,7 +1480,6 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1363_state *st = iio_priv(indio_dev);
- s64 time_ns;
__u8 *rxbuf;
int b_sent;
size_t d_size;
@@ -1464,17 +1507,13 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
if (rxbuf == NULL)
goto done;
if (st->chip_info->bits != 8)
- b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
+ b_sent = st->recv(st->client, rxbuf, numvals * 2);
else
- b_sent = i2c_master_recv(st->client, rxbuf, numvals);
+ b_sent = st->recv(st->client, rxbuf, numvals);
if (b_sent < 0)
goto done_free;
- time_ns = iio_get_time_ns();
-
- if (indio_dev->scan_timestamp)
- memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
- iio_push_to_buffers(indio_dev, rxbuf);
+ iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, iio_get_time_ns());
done_free:
kfree(rxbuf);
@@ -1484,12 +1523,6 @@ done:
return IRQ_HANDLED;
}
-static const struct iio_buffer_setup_ops max1363_buffered_setup_ops = {
- .postenable = &iio_triggered_buffer_postenable,
- .preenable = &iio_sw_buffer_preenable,
- .predisable = &iio_triggered_buffer_predisable,
-};
-
static int max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1543,6 +1576,18 @@ static int max1363_probe(struct i2c_client *client,
st->vref_uv = vref_uv;
}
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ st->send = i2c_master_send;
+ st->recv = i2c_master_recv;
+ } else if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)
+ && st->chip_info->bits == 8) {
+ st->send = max1363_smbus_send;
+ st->recv = max1363_smbus_recv;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error_disable_reg;
+ }
+
ret = max1363_alloc_scan_masks(indio_dev);
if (ret)
goto error_disable_reg;
@@ -1559,7 +1604,7 @@ static int max1363_probe(struct i2c_client *client,
goto error_disable_reg;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
- &max1363_trigger_handler, &max1363_buffered_setup_ops);
+ &max1363_trigger_handler, NULL);
if (ret)
goto error_disable_reg;
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
new file mode 100644
index 00000000000..12948325431
--- /dev/null
+++ b/drivers/iio/adc/mcp3422.c
@@ -0,0 +1,410 @@
+/*
+ * mcp3422.c - driver for the Microchip mcp3422/3/4 chip family
+ *
+ * Copyright (C) 2013, Angelo Compagnucci
+ * Author: Angelo Compagnucci <angelo.compagnucci@gmail.com>
+ *
+ * Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf
+ *
+ * This driver exports the value of analog input voltage to sysfs, the
+ * voltage unit is nV.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Masks */
+#define MCP3422_CHANNEL_MASK 0x60
+#define MCP3422_PGA_MASK 0x03
+#define MCP3422_SRATE_MASK 0x0C
+#define MCP3422_SRATE_240 0x0
+#define MCP3422_SRATE_60 0x1
+#define MCP3422_SRATE_15 0x2
+#define MCP3422_SRATE_3 0x3
+#define MCP3422_PGA_1 0
+#define MCP3422_PGA_2 1
+#define MCP3422_PGA_4 2
+#define MCP3422_PGA_8 3
+#define MCP3422_CONT_SAMPLING 0x10
+
+#define MCP3422_CHANNEL(config) (((config) & MCP3422_CHANNEL_MASK) >> 5)
+#define MCP3422_PGA(config) ((config) & MCP3422_PGA_MASK)
+#define MCP3422_SAMPLE_RATE(config) (((config) & MCP3422_SRATE_MASK) >> 2)
+
+#define MCP3422_CHANNEL_VALUE(value) (((value) << 5) & MCP3422_CHANNEL_MASK)
+#define MCP3422_PGA_VALUE(value) ((value) & MCP3422_PGA_MASK)
+#define MCP3422_SAMPLE_RATE_VALUE(value) ((value << 2) & MCP3422_SRATE_MASK)
+
+#define MCP3422_CHAN(_index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ }
+
+/* LSB is in nV to eliminate floating point */
+static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
+
+/*
+ * scales calculated as:
+ * rates_to_lsb[sample_rate] / (1 << pga);
+ * pga is 1 for 0, 2
+ */
+
+static const int mcp3422_scales[4][4] = {
+ { 1000000, 250000, 62500, 15625 },
+ { 500000 , 125000, 31250, 7812 },
+ { 250000 , 62500 , 15625, 3906 },
+ { 125000 , 31250 , 7812 , 1953 } };
+
+/* Constant msleep times for data acquisitions */
+static const int mcp3422_read_times[4] = {
+ [MCP3422_SRATE_240] = 1000 / 240,
+ [MCP3422_SRATE_60] = 1000 / 60,
+ [MCP3422_SRATE_15] = 1000 / 15,
+ [MCP3422_SRATE_3] = 1000 / 3 };
+
+/* sample rates to integer conversion table */
+static const int mcp3422_sample_rates[4] = {
+ [MCP3422_SRATE_240] = 240,
+ [MCP3422_SRATE_60] = 60,
+ [MCP3422_SRATE_15] = 15,
+ [MCP3422_SRATE_3] = 3 };
+
+/* sample rates to sign extension table */
+static const int mcp3422_sign_extend[4] = {
+ [MCP3422_SRATE_240] = 12,
+ [MCP3422_SRATE_60] = 14,
+ [MCP3422_SRATE_15] = 16,
+ [MCP3422_SRATE_3] = 18 };
+
+/* Client data (each client gets its own) */
+struct mcp3422 {
+ struct i2c_client *i2c;
+ u8 config;
+ u8 pga[4];
+ struct mutex lock;
+};
+
+static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
+{
+ int ret;
+
+ mutex_lock(&adc->lock);
+
+ ret = i2c_master_send(adc->i2c, &newconfig, 1);
+ if (ret > 0) {
+ adc->config = newconfig;
+ ret = 0;
+ }
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
+}
+
+static int mcp3422_read(struct mcp3422 *adc, int *value, u8 *config)
+{
+ int ret = 0;
+ u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
+ u8 buf[4] = {0, 0, 0, 0};
+ u32 temp;
+
+ if (sample_rate == MCP3422_SRATE_3) {
+ ret = i2c_master_recv(adc->i2c, buf, 4);
+ temp = buf[0] << 16 | buf[1] << 8 | buf[2];
+ *config = buf[3];
+ } else {
+ ret = i2c_master_recv(adc->i2c, buf, 3);
+ temp = buf[0] << 8 | buf[1];
+ *config = buf[2];
+ }
+
+ *value = sign_extend32(temp, mcp3422_sign_extend[sample_rate]);
+
+ return ret;
+}
+
+static int mcp3422_read_channel(struct mcp3422 *adc,
+ struct iio_chan_spec const *channel, int *value)
+{
+ int ret;
+ u8 config;
+ u8 req_channel = channel->channel;
+
+ if (req_channel != MCP3422_CHANNEL(adc->config)) {
+ config = adc->config;
+ config &= ~MCP3422_CHANNEL_MASK;
+ config |= MCP3422_CHANNEL_VALUE(req_channel);
+ config &= ~MCP3422_PGA_MASK;
+ config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
+ ret = mcp3422_update_config(adc, config);
+ if (ret < 0)
+ return ret;
+ msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
+ }
+
+ return mcp3422_read(adc, value, &config);
+}
+
+static int mcp3422_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *val1,
+ int *val2, long mask)
+{
+ struct mcp3422 *adc = iio_priv(iio);
+ int err;
+
+ u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
+ u8 pga = MCP3422_PGA(adc->config);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = mcp3422_read_channel(adc, channel, val1);
+ if (err < 0)
+ return -EINVAL;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+
+ *val1 = 0;
+ *val2 = mcp3422_scales[sample_rate][pga];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val1 = mcp3422_sample_rates[MCP3422_SAMPLE_RATE(adc->config)];
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mcp3422_write_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int val1,
+ int val2, long mask)
+{
+ struct mcp3422 *adc = iio_priv(iio);
+ u8 temp;
+ u8 config = adc->config;
+ u8 req_channel = channel->channel;
+ u8 sample_rate = MCP3422_SAMPLE_RATE(config);
+ u8 i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val1 != 0)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mcp3422_scales[0]); i++) {
+ if (val2 == mcp3422_scales[sample_rate][i]) {
+ adc->pga[req_channel] = i;
+
+ config &= ~MCP3422_CHANNEL_MASK;
+ config |= MCP3422_CHANNEL_VALUE(req_channel);
+ config &= ~MCP3422_PGA_MASK;
+ config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
+
+ return mcp3422_update_config(adc, config);
+ }
+ }
+ return -EINVAL;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (val1) {
+ case 240:
+ temp = MCP3422_SRATE_240;
+ break;
+ case 60:
+ temp = MCP3422_SRATE_60;
+ break;
+ case 15:
+ temp = MCP3422_SRATE_15;
+ break;
+ case 3:
+ temp = MCP3422_SRATE_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config &= ~MCP3422_CHANNEL_MASK;
+ config |= MCP3422_CHANNEL_VALUE(req_channel);
+ config &= ~MCP3422_SRATE_MASK;
+ config |= MCP3422_SAMPLE_RATE_VALUE(temp);
+
+ return mcp3422_update_config(adc, config);
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mcp3422_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t mcp3422_show_scales(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcp3422 *adc = iio_priv(dev_to_iio_dev(dev));
+ u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
+
+ return sprintf(buf, "0.%09u 0.%09u 0.%09u 0.%09u\n",
+ mcp3422_scales[sample_rate][0],
+ mcp3422_scales[sample_rate][1],
+ mcp3422_scales[sample_rate][2],
+ mcp3422_scales[sample_rate][3]);
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("240 60 15 3");
+static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO,
+ mcp3422_show_scales, NULL, 0);
+
+static struct attribute *mcp3422_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mcp3422_attribute_group = {
+ .attrs = mcp3422_attributes,
+};
+
+static const struct iio_chan_spec mcp3422_channels[] = {
+ MCP3422_CHAN(0),
+ MCP3422_CHAN(1),
+};
+
+static const struct iio_chan_spec mcp3424_channels[] = {
+ MCP3422_CHAN(0),
+ MCP3422_CHAN(1),
+ MCP3422_CHAN(2),
+ MCP3422_CHAN(3),
+};
+
+static const struct iio_info mcp3422_info = {
+ .read_raw = mcp3422_read_raw,
+ .write_raw = mcp3422_write_raw,
+ .write_raw_get_fmt = mcp3422_write_raw_get_fmt,
+ .attrs = &mcp3422_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int mcp3422_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct mcp3422 *adc;
+ int err;
+ u8 config;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->i2c = client;
+
+ mutex_init(&adc->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mcp3422_info;
+
+ switch ((unsigned int)(id->driver_data)) {
+ case 2:
+ case 3:
+ indio_dev->channels = mcp3422_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mcp3422_channels);
+ break;
+ case 4:
+ indio_dev->channels = mcp3424_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mcp3424_channels);
+ break;
+ }
+
+ /* meaningful default configuration */
+ config = (MCP3422_CONT_SAMPLING
+ | MCP3422_CHANNEL_VALUE(1)
+ | MCP3422_PGA_VALUE(MCP3422_PGA_1)
+ | MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240));
+ mcp3422_update_config(adc, config);
+
+ err = iio_device_register(indio_dev);
+ if (err < 0)
+ return err;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ return 0;
+}
+
+static int mcp3422_remove(struct i2c_client *client)
+{
+ iio_device_unregister(i2c_get_clientdata(client));
+ return 0;
+}
+
+static const struct i2c_device_id mcp3422_id[] = {
+ { "mcp3422", 2 },
+ { "mcp3423", 3 },
+ { "mcp3424", 4 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcp3422_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mcp3422_of_match[] = {
+ { .compatible = "mcp3422" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp3422_of_match);
+#endif
+
+static struct i2c_driver mcp3422_driver = {
+ .driver = {
+ .name = "mcp3422",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mcp3422_of_match),
+ },
+ .probe = mcp3422_probe,
+ .remove = mcp3422_remove,
+ .id_table = mcp3422_id,
+};
+module_i2c_driver(mcp3422_driver);
+
+MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
+MODULE_DESCRIPTION("Microchip mcp3422/3/4 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index bdf03468f3b..e525aa6475c 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/log2.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -189,7 +190,7 @@ static int nau7802_read_irq(struct iio_dev *indio_dev,
struct nau7802_state *st = iio_priv(indio_dev);
int ret;
- INIT_COMPLETION(st->value_ok);
+ reinit_completion(&st->value_ok);
enable_irq(st->client->irq);
nau7802_sync(st);
@@ -569,7 +570,7 @@ static struct i2c_driver nau7802_driver = {
.id_table = nau7802_i2c_id,
.driver = {
.name = "nau7802",
- .of_match_table = of_match_ptr(nau7802_dt_ids),
+ .of_match_table = nau7802_dt_ids,
},
};
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index ee5f72bffe5..b3a82b4d1a7 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index a952538a1a8..728411ec764 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -28,12 +28,16 @@
#include <linux/iio/driver.h>
#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
struct tiadc_device {
struct ti_tscadc_dev *mfd_tscadc;
int channels;
u8 channel_line[8];
u8 channel_step[8];
+ int buffer_en_ch_steps;
+ u16 data[8];
};
static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg)
@@ -56,8 +60,14 @@ static u32 get_adc_step_mask(struct tiadc_device *adc_dev)
return step_en;
}
-static void tiadc_step_config(struct tiadc_device *adc_dev)
+static u32 get_adc_step_bit(struct tiadc_device *adc_dev, int chan)
{
+ return 1 << adc_dev->channel_step[chan];
+}
+
+static void tiadc_step_config(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
unsigned int stepconfig;
int i, steps;
@@ -72,7 +82,11 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
*/
steps = TOTAL_STEPS - adc_dev->channels;
- stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
+ if (iio_buffer_enabled(indio_dev))
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1
+ | STEPCONFIG_MODE_SWCNT;
+ else
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
for (i = 0; i < adc_dev->channels; i++) {
int chan;
@@ -85,9 +99,175 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
adc_dev->channel_step[i] = steps;
steps++;
}
+}
+
+static irqreturn_t tiadc_irq_h(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int status, config;
+ status = tiadc_readl(adc_dev, REG_IRQSTATUS);
+
+ /*
+ * ADC and touchscreen share the IRQ line.
+ * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only
+ */
+ if (status & IRQENB_FIFO1OVRRUN) {
+ /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */
+ config = tiadc_readl(adc_dev, REG_CTRL);
+ config &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, config);
+ tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
+ | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+ tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
+ return IRQ_HANDLED;
+ } else if (status & IRQENB_FIFO1THRES) {
+ /* Disable irq and wake worker thread */
+ tiadc_writel(adc_dev, REG_IRQCLR, IRQENB_FIFO1THRES);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t tiadc_worker_h(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i, k, fifo1count, read;
+ u16 *data = adc_dev->data;
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (k = 0; k < fifo1count; k = k + i) {
+ for (i = 0; i < (indio_dev->scan_bytes)/2; i++) {
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+ data[i] = read & FIFOREAD_DATA_MASK;
+ }
+ iio_push_to_buffers(indio_dev, (u8 *) data);
+ }
+
+ tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES);
+ tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES);
+ return IRQ_HANDLED;
}
+static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i, fifo1count, read;
+
+ tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
+ IRQENB_FIFO1OVRRUN |
+ IRQENB_FIFO1UNDRFLW));
+
+ /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+
+ return 0;
+}
+
+static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ unsigned int enb = 0;
+ u8 bit;
+
+ tiadc_step_config(indio_dev);
+ for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
+ enb |= (get_adc_step_bit(adc_dev, bit) << 1);
+ adc_dev->buffer_en_ch_steps = enb;
+
+ am335x_tsc_se_set(adc_dev->mfd_tscadc, enb);
+
+ tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
+ | IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
+ tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES
+ | IRQENB_FIFO1OVRRUN);
+
+ return 0;
+}
+
+static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int fifo1count, i, read;
+
+ tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
+ IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
+ am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
+
+ /* Flush FIFO of leftover data in the time it takes to disable adc */
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+
+ return 0;
+}
+
+static int tiadc_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ tiadc_step_config(indio_dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops = {
+ .preenable = &tiadc_buffer_preenable,
+ .postenable = &tiadc_buffer_postenable,
+ .predisable = &tiadc_buffer_predisable,
+ .postdisable = &tiadc_buffer_postdisable,
+};
+
+static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
+ irqreturn_t (*pollfunc_bh)(int irq, void *p),
+ irqreturn_t (*pollfunc_th)(int irq, void *p),
+ int irq,
+ unsigned long flags,
+ const struct iio_buffer_setup_ops *setup_ops)
+{
+ int ret;
+
+ indio_dev->buffer = iio_kfifo_allocate(indio_dev);
+ if (!indio_dev->buffer)
+ return -ENOMEM;
+
+ ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh,
+ flags, indio_dev->name, indio_dev);
+ if (ret)
+ goto error_kfifo_free;
+
+ indio_dev->setup_ops = setup_ops;
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
+ if (ret)
+ goto error_free_irq;
+
+ return 0;
+
+error_free_irq:
+ free_irq(irq, indio_dev);
+error_kfifo_free:
+ iio_kfifo_free(indio_dev->buffer);
+ return ret;
+}
+
+static void tiadc_iio_buffered_hardware_remove(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+
+ free_irq(adc_dev->mfd_tscadc->irq, indio_dev);
+ iio_kfifo_free(indio_dev->buffer);
+ iio_buffer_unregister(indio_dev);
+}
+
+
static const char * const chan_name_ain[] = {
"AIN0",
"AIN1",
@@ -120,9 +300,10 @@ static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
chan->channel = adc_dev->channel_line[i];
chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
chan->datasheet_name = chan_name_ain[chan->channel];
+ chan->scan_index = i;
chan->scan_type.sign = 'u';
chan->scan_type.realbits = 12;
- chan->scan_type.storagebits = 32;
+ chan->scan_type.storagebits = 16;
}
indio_dev->channels = chan_array;
@@ -142,11 +323,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
struct tiadc_device *adc_dev = iio_priv(indio_dev);
int i, map_val;
unsigned int fifo1count, read, stepid;
- u32 step = UINT_MAX;
bool found = false;
u32 step_en;
unsigned long timeout = jiffies + usecs_to_jiffies
(IDLE_TIMEOUT * adc_dev->channels);
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
step_en = get_adc_step_mask(adc_dev);
am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
@@ -168,15 +352,6 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
* Hence we need to flush out this data.
*/
- for (i = 0; i < ARRAY_SIZE(adc_dev->channel_step); i++) {
- if (chan->channel == adc_dev->channel_line[i]) {
- step = adc_dev->channel_step[i];
- break;
- }
- }
- if (WARN_ON_ONCE(step == UINT_MAX))
- return -EINVAL;
-
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
for (i = 0; i < fifo1count; i++) {
read = tiadc_readl(adc_dev, REG_FIFO1);
@@ -186,7 +361,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
if (stepid == map_val) {
read = read & FIFOREAD_DATA_MASK;
found = true;
- *val = read;
+ *val = (u16) read;
}
}
@@ -237,20 +412,33 @@ static int tiadc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &tiadc_info;
- tiadc_step_config(adc_dev);
+ tiadc_step_config(indio_dev);
+ tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
err = tiadc_channel_init(indio_dev, adc_dev->channels);
if (err < 0)
return err;
- err = iio_device_register(indio_dev);
+ err = tiadc_iio_buffered_hardware_setup(indio_dev,
+ &tiadc_worker_h,
+ &tiadc_irq_h,
+ adc_dev->mfd_tscadc->irq,
+ IRQF_SHARED,
+ &tiadc_buffer_setup_ops);
+
if (err)
goto err_free_channels;
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_buffer_unregister;
+
platform_set_drvdata(pdev, indio_dev);
return 0;
+err_buffer_unregister:
+ tiadc_iio_buffered_hardware_remove(indio_dev);
err_free_channels:
tiadc_channels_remove(indio_dev);
return err;
@@ -263,6 +451,7 @@ static int tiadc_remove(struct platform_device *pdev)
u32 step_en;
iio_device_unregister(indio_dev);
+ tiadc_iio_buffered_hardware_remove(indio_dev);
tiadc_channels_remove(indio_dev);
step_en = get_adc_step_mask(adc_dev);
@@ -301,7 +490,7 @@ static int tiadc_resume(struct device *dev)
restore &= ~(CNTRLREG_POWERDOWN);
tiadc_writel(adc_dev, REG_CTRL, restore);
- tiadc_step_config(adc_dev);
+ tiadc_step_config(indio_dev);
return 0;
}
@@ -326,7 +515,7 @@ static struct platform_driver tiadc_driver = {
.name = "TI-am335x-adc",
.owner = THIS_MODULE,
.pm = TIADC_PM_OPS,
- .of_match_table = of_match_ptr(ti_adc_dt_ids),
+ .of_match_table = ti_adc_dt_ids,
},
.probe = tiadc_probe,
.remove = tiadc_remove,
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 0ea96c058c0..53e1c645cee 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -887,7 +887,7 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
int irq;
int ret;
- match = of_match_device(of_match_ptr(of_twl6030_match_tbl), dev);
+ match = of_match_device(of_twl6030_match_tbl, dev);
if (!match)
return -EINVAL;
@@ -948,9 +948,7 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
indio_dev->channels = pdata->iio_channels;
indio_dev->num_channels = pdata->nchannels;
- ret = iio_device_register(indio_dev);
-
- return ret;
+ return iio_device_register(indio_dev);
}
static int twl6030_gpadc_remove(struct platform_device *pdev)
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 415f3c6efd7..2d9c6f8c06d 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -7,26 +7,36 @@
struct iio_cb_buffer {
struct iio_buffer buffer;
- int (*cb)(u8 *data, void *private);
+ int (*cb)(const void *data, void *private);
void *private;
struct iio_channel *channels;
};
-static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
+static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
{
- struct iio_cb_buffer *cb_buff = container_of(buffer,
- struct iio_cb_buffer,
- buffer);
+ return container_of(buffer, struct iio_cb_buffer, buffer);
+}
+static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
+{
+ struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
return cb_buff->cb(data, cb_buff->private);
}
-static struct iio_buffer_access_funcs iio_cb_access = {
+static void iio_buffer_cb_release(struct iio_buffer *buffer)
+{
+ struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
+ kfree(cb_buff->buffer.scan_mask);
+ kfree(cb_buff);
+}
+
+static const struct iio_buffer_access_funcs iio_cb_access = {
.store_to = &iio_buffer_cb_store_to,
+ .release = &iio_buffer_cb_release,
};
struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
- int (*cb)(u8 *data,
+ int (*cb)(const void *data,
void *private),
void *private)
{
@@ -104,9 +114,8 @@ EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
{
- kfree(cb_buff->buffer.scan_mask);
iio_channel_release_all(cb_buff->channels);
- kfree(cb_buff);
+ iio_buffer_put(&cb_buff->buffer);
}
EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 87419c41b99..b6e77e0fc42 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -34,6 +34,12 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
struct hid_sensor_common *st = iio_trigger_get_drvdata(trig);
int state_val;
+ if (state) {
+ if (sensor_hub_device_open(st->hsdev))
+ return -EIO;
+ } else
+ sensor_hub_device_close(st->hsdev);
+
state_val = state ? 1 : 0;
if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS))
++state_val;
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index 71a2c5f63b9..1665c8e4b62 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -113,11 +113,8 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
if (len < 0)
goto st_sensors_get_buffer_element_error;
- if (indio_dev->scan_timestamp)
- *(s64 *)((u8 *)sdata->buffer_data +
- ALIGN(len, sizeof(s64))) = pf->timestamp;
-
- iio_push_to_buffers(indio_dev, sdata->buffer_data);
+ iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
+ pf->timestamp);
st_sensors_get_buffer_element_error:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 965ee22d3ac..7ba1ef27021 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -198,21 +198,17 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
-int st_sensors_init_sensor(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
{
- int err;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_init(&sdata->tb.buf_lock);
-
switch (pdata->drdy_int_pin) {
case 1:
if (sdata->sensor->drdy_irq.mask_int1 == 0) {
dev_err(&indio_dev->dev,
"DRDY on INT1 not available.\n");
- err = -EINVAL;
- goto init_error;
+ return -EINVAL;
}
sdata->drdy_int_pin = 1;
break;
@@ -220,39 +216,53 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
if (sdata->sensor->drdy_irq.mask_int2 == 0) {
dev_err(&indio_dev->dev,
"DRDY on INT2 not available.\n");
- err = -EINVAL;
- goto init_error;
+ return -EINVAL;
}
sdata->drdy_int_pin = 2;
break;
default:
dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n");
- err = -EINVAL;
- goto init_error;
+ return -EINVAL;
}
+ return 0;
+}
+
+int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int err = 0;
+
+ mutex_init(&sdata->tb.buf_lock);
+
+ if (pdata)
+ err = st_sensors_set_drdy_int_pin(indio_dev, pdata);
+
err = st_sensors_set_enable(indio_dev, false);
if (err < 0)
- goto init_error;
+ return err;
- err = st_sensors_set_fullscale(indio_dev,
- sdata->current_fullscale->num);
- if (err < 0)
- goto init_error;
+ if (sdata->current_fullscale) {
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->current_fullscale->num);
+ if (err < 0)
+ return err;
+ } else
+ dev_info(&indio_dev->dev, "Full-scale not possible\n");
err = st_sensors_set_odr(indio_dev, sdata->odr);
if (err < 0)
- goto init_error;
+ return err;
/* set BDU */
err = st_sensors_write_data_with_mask(indio_dev,
sdata->sensor->bdu.addr, sdata->sensor->bdu.mask, true);
if (err < 0)
- goto init_error;
+ return err;
err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
-init_error:
return err;
}
EXPORT_SYMBOL(st_sensors_init_sensor);
@@ -263,6 +273,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
u8 drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ if (!sdata->sensor->drdy_irq.addr)
+ return 0;
+
/* Enable/Disable the interrupt generator 1. */
if (sdata->sensor->drdy_irq.ig1.en_addr > 0) {
err = st_sensors_write_data_with_mask(indio_dev,
@@ -318,10 +331,8 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
unsigned int byte_for_channel = ch->scan_type.storagebits >> 3;
outdata = kmalloc(byte_for_channel, GFP_KERNEL);
- if (!outdata) {
- err = -EINVAL;
- goto st_sensors_read_axis_data_error;
- }
+ if (!outdata)
+ return -ENOMEM;
err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
ch->address, byte_for_channel,
@@ -336,7 +347,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
st_sensors_free_memory:
kfree(outdata);
-st_sensors_read_axis_data_error:
+
return err;
}
@@ -349,28 +360,25 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
err = -EBUSY;
- goto read_error;
+ goto out;
} else {
err = st_sensors_set_enable(indio_dev, true);
if (err < 0)
- goto read_error;
+ goto out;
msleep((sdata->sensor->bootime * 1000) / sdata->odr);
err = st_sensors_read_axis_data(indio_dev, ch, val);
if (err < 0)
- goto read_error;
+ goto out;
*val = *val >> ch->scan_type.shift;
err = st_sensors_set_enable(indio_dev, false);
}
+out:
mutex_unlock(&indio_dev->mlock);
return err;
-
-read_error:
- mutex_unlock(&indio_dev->mlock);
- return err;
}
EXPORT_SYMBOL(st_sensors_read_info_raw);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 3c6a78a75b7..f378ca8033d 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -57,7 +57,7 @@ config AD5446
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
- AD5620, AD5621, AD5622, AD5640, AD5660, AD5662 DACs.
+ AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs.
To compile this driver as a module, choose M here: the
module will be called ad5446.
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index a3a52be4852..cb9c6366032 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -239,10 +239,9 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
if (scale_uv < 0)
return scale_uv;
- scale_uv = (scale_uv * 100) >> chan->scan_type.realbits;
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
break;
}
@@ -285,8 +284,9 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.name = "powerdown",
.read = ad5064_read_dac_powerdown,
.write = ad5064_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", false, &ad5064_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5064_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index d2da71ece74..b968af50db0 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -379,15 +379,14 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
*val = ret >> chan->scan_type.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- /* vout = 4 * vref * dac_code */
- scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100;
+ scale_uv = ad5360_get_channel_vref(st, chan->channel);
if (scale_uv < 0)
return scale_uv;
- scale_uv >>= (chan->scan_type.realbits);
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
+ /* vout = 4 * vref * dac_code */
+ *val = scale_uv * 4 / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_CALIBBIAS:
ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET,
chan->address);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 1c44ae3920e..a59ff0e7b88 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -204,7 +204,6 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct ad5380_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
int ret;
switch (info) {
@@ -225,10 +224,9 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
val -= (1 << chan->scan_type.realbits) / 2;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = ((2 * st->vref) >> chan->scan_type.realbits) * 100;
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 2 * st->vref;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
break;
}
@@ -247,8 +245,10 @@ static struct iio_chan_spec_ext_info ad5380_ext_info[] = {
.name = "powerdown",
.read = ad5380_read_dac_powerdown,
.write = ad5380_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", true, &ad5380_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5380_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5380_powerdown_mode_enum),
{ },
};
@@ -269,72 +269,72 @@ static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
[ID_AD5380_3] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 40,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5380_5] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 40,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
[ID_AD5381_3] = {
.channel_template = AD5380_CHANNEL(12),
.num_channels = 16,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5381_5] = {
.channel_template = AD5380_CHANNEL(12),
.num_channels = 16,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
[ID_AD5382_3] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 32,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5382_5] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 32,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
[ID_AD5383_3] = {
.channel_template = AD5380_CHANNEL(12),
.num_channels = 32,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5383_5] = {
.channel_template = AD5380_CHANNEL(12),
.num_channels = 32,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
[ID_AD5390_3] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 16,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5390_5] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 16,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
[ID_AD5391_3] = {
.channel_template = AD5380_CHANNEL(12),
.num_channels = 16,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5391_5] = {
.channel_template = AD5380_CHANNEL(12),
.num_channels = 16,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
[ID_AD5392_3] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 8,
- .int_vref = 1250000,
+ .int_vref = 1250,
},
[ID_AD5392_5] = {
.channel_template = AD5380_CHANNEL(14),
.num_channels = 8,
- .int_vref = 2500000,
+ .int_vref = 2500,
},
};
@@ -393,7 +393,7 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- if (st->chip_info->int_vref == 2500000)
+ if (st->chip_info->int_vref == 2500)
ctrl |= AD5380_CTRL_INT_VREF_2V5;
st->vref_reg = devm_regulator_get(dev, "vref");
@@ -409,7 +409,7 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
if (ret < 0)
goto error_disable_reg;
- st->vref = ret;
+ st->vref = ret / 1000;
} else {
st->vref = st->chip_info->int_vref;
ctrl |= AD5380_CTRL_INT_VREF_EN;
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 1f78b14abb7..3eeaa82075f 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -80,6 +80,29 @@ struct ad5421_state {
} data[2] ____cacheline_aligned;
};
+static const struct iio_event_spec ad5421_current_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec ad5421_temp_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_chan_spec ad5421_channels[] = {
{
.type = IIO_CURRENT,
@@ -92,13 +115,14 @@ static const struct iio_chan_spec ad5421_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
.scan_type = IIO_ST('u', 16, 16, 0),
- .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ .event_spec = ad5421_current_event,
+ .num_event_specs = ARRAY_SIZE(ad5421_current_event),
},
{
.type = IIO_TEMP,
.channel = -1,
- .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ .event_spec = ad5421_temp_event,
+ .num_event_specs = ARRAY_SIZE(ad5421_temp_event),
},
};
@@ -281,18 +305,11 @@ static inline unsigned int ad5421_get_offset(struct ad5421_state *st)
return (min * (1 << 16)) / (max - min);
}
-static inline unsigned int ad5421_get_scale(struct ad5421_state *st)
-{
- unsigned int min, max;
-
- ad5421_get_current_min_max(st, &min, &max);
- return ((max - min) * 1000) / (1 << 16);
-}
-
static int ad5421_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long m)
{
struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int min, max;
int ret;
if (chan->type != IIO_CURRENT)
@@ -306,9 +323,10 @@ static int ad5421_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = ad5421_get_scale(st);
- return IIO_VAL_INT_PLUS_MICRO;
+ ad5421_get_current_min_max(st, &min, &max);
+ *val = max - min;
+ *val2 = (1 << 16) * 1000;
+ return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_OFFSET:
*val = ad5421_get_offset(st);
return IIO_VAL_INT;
@@ -359,15 +377,15 @@ static int ad5421_write_raw(struct iio_dev *indio_dev,
}
static int ad5421_write_event_config(struct iio_dev *indio_dev,
- u64 event_code, int state)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
{
struct ad5421_state *st = iio_priv(indio_dev);
unsigned int mask;
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ switch (chan->type) {
case IIO_CURRENT:
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)
+ if (dir == IIO_EV_DIR_RISING)
mask = AD5421_FAULT_OVER_CURRENT;
else
mask = AD5421_FAULT_UNDER_CURRENT;
@@ -390,15 +408,15 @@ static int ad5421_write_event_config(struct iio_dev *indio_dev,
}
static int ad5421_read_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct ad5421_state *st = iio_priv(indio_dev);
unsigned int mask;
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ switch (chan->type) {
case IIO_CURRENT:
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)
+ if (dir == IIO_EV_DIR_RISING)
mask = AD5421_FAULT_OVER_CURRENT;
else
mask = AD5421_FAULT_UNDER_CURRENT;
@@ -413,12 +431,14 @@ static int ad5421_read_event_config(struct iio_dev *indio_dev,
return (bool)(st->fault_mask & mask);
}
-static int ad5421_read_event_value(struct iio_dev *indio_dev, u64 event_code,
- int *val)
+static int ad5421_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
{
int ret;
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ switch (chan->type) {
case IIO_CURRENT:
ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
if (ret < 0)
@@ -432,15 +452,15 @@ static int ad5421_read_event_value(struct iio_dev *indio_dev, u64 event_code,
return -EINVAL;
}
- return 0;
+ return IIO_VAL_INT;
}
static const struct iio_info ad5421_info = {
.read_raw = ad5421_read_raw,
.write_raw = ad5421_write_raw,
- .read_event_config = ad5421_read_event_config,
- .write_event_config = ad5421_write_event_config,
- .read_event_value = ad5421_read_event_value,
+ .read_event_config_new = ad5421_read_event_config,
+ .write_event_config_new = ad5421_write_event_config,
+ .read_event_value_new = ad5421_read_event_value,
.driver_module = THIS_MODULE,
};
@@ -494,13 +514,7 @@ static int ad5421_probe(struct spi_device *spi)
return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return iio_device_register(indio_dev);
}
static int ad5421_remove(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 96e9ed4c2d0..1263b0e5ad8 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -132,8 +132,9 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
.name = "powerdown",
.read = ad5446_read_dac_powerdown,
.write = ad5446_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", false, &ad5446_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5446_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5446_powerdown_mode_enum),
{ },
};
@@ -162,18 +163,15 @@ static int ad5446_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5446_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
*val = st->cached_val;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
-
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -329,6 +327,7 @@ enum ad5446_supported_spi_device_ids {
ID_AD5601,
ID_AD5611,
ID_AD5621,
+ ID_AD5641,
ID_AD5620_2500,
ID_AD5620_1250,
ID_AD5640_2500,
@@ -391,6 +390,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
.channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
.write = ad5446_write,
},
+ [ID_AD5641] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .write = ad5446_write,
+ },
[ID_AD5620_2500] = {
.channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
.int_vref_mv = 2500,
@@ -445,6 +448,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"ad5601", ID_AD5601},
{"ad5611", ID_AD5611},
{"ad5621", ID_AD5621},
+ {"ad5641", ID_AD5641},
{"ad5620-2500", ID_AD5620_2500}, /* AD5620/40/60: */
{"ad5620-1250", ID_AD5620_1250}, /* part numbers may look differently */
{"ad5640-2500", ID_AD5640_2500},
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index fff7d0762c0..82e208f6cde 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -101,7 +101,6 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
{
struct ad5449 *st = iio_priv(indio_dev);
int ret;
- struct spi_message msg;
struct spi_transfer t[] = {
{
.tx_buf = &st->data[0],
@@ -114,15 +113,11 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
},
};
- spi_message_init(&msg);
- spi_message_add_tail(&t[0], &msg);
- spi_message_add_tail(&t[1], &msg);
-
mutex_lock(&indio_dev->mlock);
st->data[0] = cpu_to_be16(addr << 12);
st->data[1] = cpu_to_be16(AD5449_CMD_NOOP);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
goto out_unlock;
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index caffb16bc05..c0957a918e1 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -100,7 +100,6 @@ static int ad5504_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5504_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
int ret;
switch (m) {
@@ -113,11 +112,9 @@ static int ad5504_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
-
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -248,8 +245,10 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
.name = "powerdown",
.read = ad5504_read_dac_powerdown,
.write = ad5504_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", true, &ad5504_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5504_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5504_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 714af757cd5..774dd968145 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -50,15 +50,12 @@ static int ad5624r_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5624r_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
switch (m) {
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
-
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -163,8 +160,10 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
.name = "powerdown",
.read = ad5624r_read_dac_powerdown,
.write = ad5624r_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", true, &ad5624r_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5624r_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5624r_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 57825ead7db..30e506e37dd 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -201,7 +201,6 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5686_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
int ret;
switch (m) {
@@ -213,14 +212,10 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret;
return IIO_VAL_INT;
- break;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->vref_mv * 100000)
- >> (chan->scan_type.realbits);
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
-
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -265,8 +260,9 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
.name = "powerdown",
.read = ad5686_read_dac_powerdown,
.write = ad5686_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", false, &ad5686_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5686_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5686_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 36a4361aece..9a78d5abb2f 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -253,15 +253,6 @@ static inline int ad5755_get_offset(struct ad5755_state *st,
return (min * (1 << chan->scan_type.realbits)) / (max - min);
}
-static inline int ad5755_get_scale(struct ad5755_state *st,
- struct iio_chan_spec const *chan)
-{
- int min, max;
-
- ad5755_get_min_max(st, chan, &min, &max);
- return ((max - min) * 1000000000ULL) >> chan->scan_type.realbits;
-}
-
static int ad5755_chan_reg_info(struct ad5755_state *st,
struct iio_chan_spec const *chan, long info, bool write,
unsigned int *reg, unsigned int *shift, unsigned int *offset)
@@ -303,13 +294,15 @@ static int ad5755_read_raw(struct iio_dev *indio_dev,
{
struct ad5755_state *st = iio_priv(indio_dev);
unsigned int reg, shift, offset;
+ int min, max;
int ret;
switch (info) {
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = ad5755_get_scale(st, chan);
- return IIO_VAL_INT_PLUS_NANO;
+ ad5755_get_min_max(st, chan, &min, &max);
+ *val = max - min;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
*val = ad5755_get_offset(st, chan);
return IIO_VAL_INT;
@@ -386,6 +379,7 @@ static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
.name = "powerdown",
.read = ad5755_read_powerdown,
.write = ad5755_write_powerdown,
+ .shared = IIO_SEPARATE,
},
{ },
};
@@ -595,13 +589,7 @@ static int ad5755_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return iio_device_register(indio_dev);
}
static int ad5755_remove(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index df7e028d9db..a8ff5b2ed13 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -217,7 +217,6 @@ static int ad5764_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct ad5764_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int reg;
int vref;
int ret;
@@ -245,15 +244,14 @@ static int ad5764_read_raw(struct iio_dev *indio_dev,
*val = sign_extend32(*val, 5);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- /* vout = 4 * vref + ((dac_code / 65535) - 0.5) */
+ /* vout = 4 * vref + ((dac_code / 65536) - 0.5) */
vref = ad5764_get_channel_vref(st, chan->channel);
if (vref < 0)
return vref;
- scale_uv = (vref * 4 * 100) >> chan->scan_type.realbits;
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = vref * 4 / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
*val = -(1 << chan->scan_type.realbits) / 2;
return IIO_VAL_INT;
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index ce745896330..d64acbd8948 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -270,9 +270,9 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
*val >>= chan->scan_type.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = (((u64)st->vref_mv) * 1000000ULL) >> chan->scan_type.realbits;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->vref_mv;
+ *val2 = (1 << chan->scan_type.realbits) - 1;
+ return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_OFFSET:
val64 = (((u64)st->vref_neg_mv) << chan->scan_type.realbits);
do_div(val64, st->vref_mv);
@@ -287,11 +287,12 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
{
.name = "powerdown",
- .shared = true,
+ .shared = IIO_SHARED_BY_TYPE,
.read = ad5791_read_dac_powerdown,
.write = ad5791_write_dac_powerdown,
},
- IIO_ENUM("powerdown_mode", true, &ad5791_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5791_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5791_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index ed2d276477b..d0505fd22ef 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -169,6 +169,7 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
.name = "powerdown",
.read = ad7303_read_dac_powerdown,
.write = ad7303_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
},
{ },
};
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index 83adcbf1a20..6e190353795 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -82,15 +82,13 @@ static int max517_read_raw(struct iio_dev *indio_dev,
long m)
{
struct max517_data *data = iio_priv(indio_dev);
- unsigned int scale_uv;
switch (m) {
case IIO_CHAN_INFO_SCALE:
/* Corresponds to Vref / 2^(bits) */
- scale_uv = (data->vref_mv[chan->channel] * 1000) >> 8;
- *val = scale_uv / 1000000;
- *val2 = scale_uv % 1000000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = data->vref_mv[chan->channel];
+ *val2 = 8;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
break;
}
@@ -162,7 +160,6 @@ static int max517_probe(struct i2c_client *client,
struct max517_data *data;
struct iio_dev *indio_dev;
struct max517_platform_data *platform_data = client->dev.platform_data;
- int err;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -194,13 +191,7 @@ static int max517_probe(struct i2c_client *client,
data->vref_mv[1] = platform_data->vref_mv[1];
}
- err = iio_device_register(indio_dev);
- if (err)
- return err;
-
- dev_info(&client->dev, "DAC registered\n");
-
- return 0;
+ return iio_device_register(indio_dev);
}
static int max517_remove(struct i2c_client *client)
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 1397b6e0e41..9f57ae84ab8 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -195,8 +195,9 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
.name = "powerdown",
.read = mcp4725_read_powerdown,
.write = mcp4725_write_powerdown,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", false, &mcp4725_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4725_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum),
{ },
};
@@ -238,17 +239,15 @@ static int mcp4725_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct mcp4725_data *data = iio_priv(indio_dev);
- unsigned long scale_uv;
switch (mask) {
case IIO_CHAN_INFO_RAW:
*val = data->dac_value;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (data->vref_mv * 1000) >> 12;
- *val = scale_uv / 1000000;
- *val2 = scale_uv % 1000000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = data->vref_mv;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -321,13 +320,7 @@ static int mcp4725_probe(struct i2c_client *client,
data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
- err = iio_device_register(indio_dev);
- if (err)
- return err;
-
- dev_info(&client->dev, "MCP4725 DAC registered\n");
-
- return 0;
+ return iio_device_register(indio_dev);
}
static int mcp4725_remove(struct i2c_client *client)
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 52605c0ea3a..63a25d9e120 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -351,6 +351,7 @@ static ssize_t adf4350_read(struct iio_dev *indio_dev,
.read = adf4350_read, \
.write = adf4350_write, \
.private = _ident, \
+ .shared = IIO_SEPARATE, \
}
static const struct iio_chan_spec_ext_info adf4350_ext_info[] = {
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index e9ec022ae22..add50983726 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -51,7 +51,6 @@ static int adis16080_read_sample(struct iio_dev *indio_dev,
u16 addr, int *val)
{
struct adis16080_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -66,11 +65,7 @@ static int adis16080_read_sample(struct iio_dev *indio_dev,
st->buf = cpu_to_be16(addr | ADIS16080_DIN_WRITE);
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- ret = spi_sync(st->us, &m);
+ ret = spi_sync_transfer(st->us, t, ARRAY_SIZE(t));
if (ret == 0)
*val = sign_extend32(be16_to_cpu(st->buf), 11);
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index ac66fc18404..445c2aecfad 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -47,7 +47,6 @@ static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
{
int ret;
struct adis16130_state *st = iio_priv(indio_dev);
- struct spi_message msg;
struct spi_transfer xfer = {
.tx_buf = st->buf,
.rx_buf = st->buf,
@@ -59,10 +58,7 @@ static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
st->buf[0] = ADIS16130_CON_RD | reg_addr;
st->buf[1] = st->buf[2] = st->buf[3] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->us, &msg);
-
+ ret = spi_sync_transfer(st->us, &xfer, 1);
if (ret == 0)
*val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
mutex_unlock(&st->buf_lock);
@@ -103,7 +99,6 @@ static int adis16130_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- break;
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_ANGL_VEL:
@@ -115,7 +110,6 @@ static int adis16130_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- break;
}
return -EINVAL;
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 06541162fc0..22b6fb80fa1 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -239,7 +239,6 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- break;
case IIO_CHAN_INFO_OFFSET:
*val = 250000 / 1453; /* 25 C = 0x00 */
return IIO_VAL_INT;
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index 6dab2995f0f..1e546ba7ba4 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -90,7 +90,6 @@ static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
u8 reg_address,
u16 *val)
{
- struct spi_message msg;
struct adxrs450_state *st = iio_priv(indio_dev);
u32 tx;
int ret;
@@ -114,10 +113,7 @@ static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
tx |= ADXRS450_P;
st->tx = cpu_to_be32(tx);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
reg_address);
@@ -169,7 +165,6 @@ static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
**/
static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
{
- struct spi_message msg;
struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
@@ -188,10 +183,7 @@ static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
mutex_lock(&st->buf_lock);
st->tx = cpu_to_be32(ADXRS450_SENSOR_DATA);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "Problem while reading sensor data\n");
goto error_ret;
@@ -354,7 +346,6 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- break;
case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW:
ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
if (ret)
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index c688d974d3e..ea01c6bcfb5 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -182,10 +182,11 @@ static const struct iio_info gyro_3d_info = {
};
/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
{
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
+ iio_push_to_buffers(indio_dev, data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -200,7 +201,7 @@ static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev,
gyro_state->common_attributes.data_ready);
if (gyro_state->common_attributes.data_ready)
hid_sensor_push_data(indio_dev,
- (u8 *)gyro_state->gyro_val,
+ gyro_state->gyro_val,
sizeof(gyro_state->gyro_val));
return 0;
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index 6c43af9bb0a..e3b3c508407 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -55,11 +55,8 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
if (ret < 0)
goto error_ret;
- if (indio_dev->scan_timestamp)
- memcpy(buf + indio_dev->scan_bytes - sizeof(s64),
- &pf->timestamp, sizeof(pf->timestamp));
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
- iio_push_to_buffers(indio_dev, (u8 *)buf);
iio_trigger_notify_done(indio_dev->trig);
error_ret:
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index 69017c7ec30..d67b17b6a7a 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -32,16 +32,7 @@ int st_gyro_trig_set_state(struct iio_trigger *trig, bool state)
static int st_gyro_buffer_preenable(struct iio_dev *indio_dev)
{
- int err;
-
- err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
- goto st_gyro_set_enable_error;
-
- err = iio_sw_buffer_preenable(indio_dev);
-
-st_gyro_set_enable_error:
- return err;
+ return st_sensors_set_enable(indio_dev, true);
}
static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index e13c2b0bf3d..d53d91adfb5 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -305,8 +305,9 @@ static const struct iio_trigger_ops st_gyro_trigger_ops = {
int st_gyro_common_probe(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
- int err;
struct st_sensor_data *gdata = iio_priv(indio_dev);
+ int irq = gdata->get_irq_data_ready(indio_dev);
+ int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &gyro_info;
@@ -314,7 +315,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev,
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_gyro_sensors), st_gyro_sensors);
if (err < 0)
- goto st_gyro_common_probe_error;
+ return err;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
gdata->multiread_bit = gdata->sensor->multi_read_bit;
@@ -327,13 +328,13 @@ int st_gyro_common_probe(struct iio_dev *indio_dev,
err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
- goto st_gyro_common_probe_error;
+ return err;
- if (gdata->get_irq_data_ready(indio_dev) > 0) {
- err = st_gyro_allocate_ring(indio_dev);
- if (err < 0)
- goto st_gyro_common_probe_error;
+ err = st_gyro_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+ if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
ST_GYRO_TRIGGER_OPS);
if (err < 0)
@@ -344,15 +345,14 @@ int st_gyro_common_probe(struct iio_dev *indio_dev,
if (err)
goto st_gyro_device_register_error;
- return err;
+ return 0;
st_gyro_device_register_error:
- if (gdata->get_irq_data_ready(indio_dev) > 0)
+ if (irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_gyro_probe_trigger_error:
- if (gdata->get_irq_data_ready(indio_dev) > 0)
- st_gyro_deallocate_ring(indio_dev);
-st_gyro_common_probe_error:
+ st_gyro_deallocate_ring(indio_dev);
+
return err;
}
EXPORT_SYMBOL(st_gyro_common_probe);
@@ -362,10 +362,10 @@ void st_gyro_common_remove(struct iio_dev *indio_dev)
struct st_sensor_data *gdata = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (gdata->get_irq_data_ready(indio_dev) > 0) {
+ if (gdata->get_irq_data_ready(indio_dev) > 0)
st_sensors_deallocate_trigger(indio_dev);
- st_gyro_deallocate_ring(indio_dev);
- }
+
+ st_gyro_deallocate_ring(indio_dev);
}
EXPORT_SYMBOL(st_gyro_common_remove);
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 9b32253b824..f6db6af36ba 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -30,9 +30,12 @@ int __iio_add_chan_devattr(const char *postfix,
const char *buf,
size_t len),
u64 mask,
- bool generic,
+ enum iio_shared_by shared_by,
struct device *dev,
struct list_head *attr_list);
+void iio_free_chan_devattr_list(struct list_head *attr_list);
+
+ssize_t iio_format_value(char *buf, unsigned int type, int val, int val2);
/* Event interface flags */
#define IIO_BUSY_BIT_POS 1
@@ -50,6 +53,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
void iio_disable_all_buffers(struct iio_dev *indio_dev);
+void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
#else
@@ -57,11 +61,13 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev);
#define iio_buffer_read_first_n_outer_addr NULL
static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
+static inline void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) {}
#endif
int iio_device_register_eventset(struct iio_dev *indio_dev);
void iio_device_unregister_eventset(struct iio_dev *indio_dev);
+void iio_device_wakeup_eventset(struct iio_dev *indio_dev);
int iio_event_getfd(struct iio_dev *indio_dev);
#endif
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
index 054c01d6e73..f2cf829e5df 100644
--- a/drivers/iio/imu/adis16400_buffer.c
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -82,13 +82,8 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
spi_setup(st->adis.spi);
}
- /* Guaranteed to be aligned with 8 byte boundary */
- if (indio_dev->scan_timestamp) {
- void *b = adis->buffer + indio_dev->scan_bytes - sizeof(s64);
- *(s64 *)b = pf->timestamp;
- }
-
- iio_push_to_buffers(indio_dev, adis->buffer);
+ iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+ pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 99d8e0b0dd3..cb32b593f1c 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -102,13 +102,8 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
mutex_unlock(&adis->txrx_lock);
}
- /* Guaranteed to be aligned with 8 byte boundary */
- if (indio_dev->scan_timestamp) {
- void *b = adis->buffer + indio_dev->scan_bytes - sizeof(s64);
- *(s64 *)b = pf->timestamp;
- }
-
- iio_push_to_buffers(indio_dev, adis->buffer);
+ iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+ pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 7da0832f187..429517117ef 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -124,7 +124,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
u16 fifo_count;
s64 timestamp;
- u64 *tmp;
mutex_lock(&indio_dev->mlock);
if (!(st->chip_config.accl_fifo_enable |
@@ -170,9 +169,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (0 == result)
timestamp = 0;
- tmp = (u64 *)data;
- tmp[DIV_ROUND_UP(bytes_per_datum, 8)] = timestamp;
- result = iio_push_to_buffers(indio_dev, data);
+ result = iio_push_to_buffers_with_timestamp(indio_dev, data,
+ timestamp);
if (result)
goto flush_fifo;
fifo_count -= bytes_per_datum;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2db7dcd826b..7f9152c3c4d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/poll.h>
+#include <linux/sched.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
@@ -31,16 +32,9 @@ static const char * const iio_endian_prefix[] = {
[IIO_LE] = "le",
};
-static bool iio_buffer_is_active(struct iio_dev *indio_dev,
- struct iio_buffer *buf)
+static bool iio_buffer_is_active(struct iio_buffer *buf)
{
- struct list_head *p;
-
- list_for_each(p, &indio_dev->buffer_list)
- if (p == &buf->buffer_list)
- return true;
-
- return false;
+ return !list_empty(&buf->buffer_list);
}
/**
@@ -55,6 +49,9 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ if (!indio_dev->info)
+ return -ENODEV;
+
if (!rb || !rb->access->read_first_n)
return -EINVAL;
return rb->access->read_first_n(rb, n, buf);
@@ -69,6 +66,9 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ if (!indio_dev->info)
+ return -ENODEV;
+
poll_wait(filp, &rb->pollq, wait);
if (rb->stufftoread)
return POLLIN | POLLRDNORM;
@@ -76,10 +76,27 @@ unsigned int iio_buffer_poll(struct file *filp,
return 0;
}
+/**
+ * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
+ * @indio_dev: The IIO device
+ *
+ * Wakes up the event waitqueue used for poll(). Should usually
+ * be called when the device is unregistered.
+ */
+void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
+{
+ if (!indio_dev->buffer)
+ return;
+
+ wake_up(&indio_dev->buffer->pollq);
+}
+
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
+ INIT_LIST_HEAD(&buffer->buffer_list);
init_waitqueue_head(&buffer->pollq);
+ kref_init(&buffer->ref);
}
EXPORT_SYMBOL(iio_buffer_init);
@@ -146,7 +163,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
+ if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -192,7 +209,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
+ if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -214,7 +231,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&iio_show_scan_index,
NULL,
0,
- 0,
+ IIO_SEPARATE,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
@@ -249,29 +266,14 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
0,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
attrcount++;
ret = attrcount;
error_ret:
return ret;
}
-static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
- struct iio_dev_attr *p)
-{
- kfree(p->dev_attr.attr.name);
- kfree(p);
-}
-
-static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p, *n;
- struct iio_buffer *buffer = indio_dev->buffer;
-
- list_for_each_entry_safe(p, n,
- &buffer->scan_el_dev_attr_list, l)
- iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
-}
-
static const char * const iio_scan_elements_group_name = "scan_elements";
int iio_buffer_register(struct iio_dev *indio_dev,
@@ -348,7 +350,7 @@ int iio_buffer_register(struct iio_dev *indio_dev,
error_free_scan_mask:
kfree(buffer->scan_mask);
error_cleanup_dynamic:
- __iio_buffer_attr_cleanup(indio_dev);
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
return ret;
}
@@ -358,7 +360,7 @@ void iio_buffer_unregister(struct iio_dev *indio_dev)
{
kfree(indio_dev->buffer->scan_mask);
kfree(indio_dev->buffer->scan_el_group.attrs);
- __iio_buffer_attr_cleanup(indio_dev);
+ iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
}
EXPORT_SYMBOL(iio_buffer_unregister);
@@ -396,7 +398,7 @@ ssize_t iio_buffer_write_length(struct device *dev,
return len;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
+ if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
} else {
if (buffer->access->set_length)
@@ -414,13 +416,11 @@ ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n",
- iio_buffer_is_active(indio_dev,
- indio_dev->buffer));
+ return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
-/* note NULL used as error indicator as it doesn't make sense. */
+/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
unsigned int masklength,
const unsigned long *mask)
@@ -435,8 +435,8 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
return NULL;
}
-static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
- bool timestamp)
+static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
+ const unsigned long *mask, bool timestamp)
{
const struct iio_chan_spec *ch;
unsigned bytes = 0;
@@ -460,6 +460,19 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
return bytes;
}
+static void iio_buffer_activate(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ iio_buffer_get(buffer);
+ list_add(&buffer->buffer_list, &indio_dev->buffer_list);
+}
+
+static void iio_buffer_deactivate(struct iio_buffer *buffer)
+{
+ list_del_init(&buffer->buffer_list);
+ iio_buffer_put(buffer);
+}
+
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer, *_buffer;
@@ -472,7 +485,7 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
list_for_each_entry_safe(buffer, _buffer,
&indio_dev->buffer_list, buffer_list)
- list_del_init(&buffer->buffer_list);
+ iio_buffer_deactivate(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
@@ -482,7 +495,21 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
kfree(indio_dev->active_scan_mask);
}
-int iio_update_buffers(struct iio_dev *indio_dev,
+static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ unsigned int bytes;
+
+ if (!buffer->access->set_bytes_per_datum)
+ return;
+
+ bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
+ buffer->scan_timestamp);
+
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+}
+
+static int __iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
@@ -512,9 +539,9 @@ int iio_update_buffers(struct iio_dev *indio_dev,
indio_dev->active_scan_mask = NULL;
if (remove_buffer)
- list_del(&remove_buffer->buffer_list);
+ iio_buffer_deactivate(remove_buffer);
if (insert_buffer)
- list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
+ iio_buffer_activate(indio_dev, insert_buffer);
/* If no buffers in list, we are done */
if (list_empty(&indio_dev->buffer_list)) {
@@ -524,7 +551,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
return 0;
}
- /* What scan mask do we actually have ?*/
+ /* What scan mask do we actually have? */
compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
sizeof(long), GFP_KERNEL);
if (compound_mask == NULL) {
@@ -549,7 +576,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
* Roll back.
* Note can only occur when adding a buffer.
*/
- list_del(&insert_buffer->buffer_list);
+ iio_buffer_deactivate(insert_buffer);
if (old_mask) {
indio_dev->active_scan_mask = old_mask;
success = -EINVAL;
@@ -579,7 +606,8 @@ int iio_update_buffers(struct iio_dev *indio_dev,
iio_compute_scan_bytes(indio_dev,
indio_dev->active_scan_mask,
indio_dev->scan_timestamp);
- list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ iio_buffer_update_bytes_per_datum(indio_dev, buffer);
if (buffer->access->request_update) {
ret = buffer->access->request_update(buffer);
if (ret) {
@@ -588,6 +616,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
goto error_run_postdisable;
}
}
+ }
if (indio_dev->info->update_scan_mode) {
ret = indio_dev->info
->update_scan_mode(indio_dev,
@@ -597,7 +626,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
goto error_run_postdisable;
}
}
- /* Definitely possible for devices to support both of these.*/
+ /* Definitely possible for devices to support both of these. */
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO "Buffer not started: no trigger\n");
@@ -608,7 +637,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
- } else { /* should never be reached */
+ } else { /* Should never be reached */
ret = -EINVAL;
goto error_run_postdisable;
}
@@ -640,13 +669,50 @@ error_run_postdisable:
error_remove_inserted:
if (insert_buffer)
- list_del(&insert_buffer->buffer_list);
+ iio_buffer_deactivate(insert_buffer);
indio_dev->active_scan_mask = old_mask;
kfree(compound_mask);
error_ret:
return ret;
}
+
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
+{
+ int ret;
+
+ if (insert_buffer == remove_buffer)
+ return 0;
+
+ mutex_lock(&indio_dev->info_exist_lock);
+ mutex_lock(&indio_dev->mlock);
+
+ if (insert_buffer && iio_buffer_is_active(insert_buffer))
+ insert_buffer = NULL;
+
+ if (remove_buffer && !iio_buffer_is_active(remove_buffer))
+ remove_buffer = NULL;
+
+ if (!insert_buffer && !remove_buffer) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ if (indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&indio_dev->info_exist_lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(iio_update_buffers);
ssize_t iio_buffer_store_enable(struct device *dev,
@@ -657,7 +723,6 @@ ssize_t iio_buffer_store_enable(struct device *dev,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *pbuf = indio_dev->buffer;
bool inlist;
ret = strtobool(buf, &requested_state);
@@ -667,16 +732,16 @@ ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
/* Find out if it is in the list */
- inlist = iio_buffer_is_active(indio_dev, pbuf);
+ inlist = iio_buffer_is_active(indio_dev->buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
- ret = iio_update_buffers(indio_dev,
+ ret = __iio_update_buffers(indio_dev,
indio_dev->buffer, NULL);
else
- ret = iio_update_buffers(indio_dev,
+ ret = __iio_update_buffers(indio_dev,
NULL, indio_dev->buffer);
if (ret < 0)
@@ -687,24 +752,6 @@ done:
}
EXPORT_SYMBOL(iio_buffer_store_enable);
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer;
- unsigned bytes;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
-
- list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
- if (buffer->access->set_bytes_per_datum) {
- bytes = iio_compute_scan_bytes(indio_dev,
- buffer->scan_mask,
- buffer->scan_timestamp);
-
- buffer->access->set_bytes_per_datum(buffer, bytes);
- }
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_buffer_preenable);
-
/**
* iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
* @indio_dev: the iio device
@@ -732,6 +779,7 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
/**
* iio_scan_mask_set() - set particular bit in the scan mask
+ * @indio_dev: the iio device
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
*
@@ -752,7 +800,7 @@ int iio_scan_mask_set(struct iio_dev *indio_dev,
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
- WARN_ON("trying to set scanmask prior to registering buffer\n");
+ WARN_ON("Trying to set scanmask prior to registering buffer\n");
goto err_invalid_mask;
}
bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
@@ -807,8 +855,8 @@ struct iio_demux_table {
struct list_head l;
};
-static unsigned char *iio_demux(struct iio_buffer *buffer,
- unsigned char *datain)
+static const void *iio_demux(struct iio_buffer *buffer,
+ const void *datain)
{
struct iio_demux_table *t;
@@ -821,9 +869,9 @@ static unsigned char *iio_demux(struct iio_buffer *buffer,
return buffer->demux_bounce;
}
-static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
+static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
{
- unsigned char *dataout = iio_demux(buffer, data);
+ const void *dataout = iio_demux(buffer, data);
return buffer->access->store_to(buffer, dataout);
}
@@ -838,7 +886,7 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
}
-int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
+int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{
int ret;
struct iio_buffer *buf;
@@ -961,3 +1009,45 @@ error_clear_mux_table:
return ret;
}
EXPORT_SYMBOL_GPL(iio_update_demux);
+
+/**
+ * iio_buffer_release() - Free a buffer's resources
+ * @ref: Pointer to the kref embedded in the iio_buffer struct
+ *
+ * This function is called when the last reference to the buffer has been
+ * dropped. It will typically free all resources allocated by the buffer. Do not
+ * call this function manually, always use iio_buffer_put() when done using a
+ * buffer.
+ */
+static void iio_buffer_release(struct kref *ref)
+{
+ struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
+
+ buffer->access->release(buffer);
+}
+
+/**
+ * iio_buffer_get() - Grab a reference to the buffer
+ * @buffer: The buffer to grab a reference for, may be NULL
+ *
+ * Returns the pointer to the buffer that was passed into the function.
+ */
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
+{
+ if (buffer)
+ kref_get(&buffer->ref);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(iio_buffer_get);
+
+/**
+ * iio_buffer_put() - Release the reference to the buffer
+ * @buffer: The buffer to release the reference for, may be NULL
+ */
+void iio_buffer_put(struct iio_buffer *buffer)
+{
+ if (buffer)
+ kref_put(&buffer->ref, iio_buffer_release);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_put);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index f95c6979efd..18f72e3d0ed 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -9,6 +9,8 @@
* Based on elements of hwmon and input subsystems.
*/
+#define pr_fmt(fmt) "iio-core: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/idr.h>
@@ -28,6 +30,7 @@
#include "iio_core_trigger.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
/* IDA to assign each registered device a unique id */
static DEFINE_IDA(iio_ida);
@@ -101,6 +104,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_PHASE] = "phase",
[IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
[IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
+ [IIO_CHAN_INFO_INT_TIME] = "integration_time",
};
const struct iio_chan_spec
@@ -130,16 +134,13 @@ static int __init iio_init(void)
/* Register sysfs bus */
ret = bus_register(&iio_bus_type);
if (ret < 0) {
- printk(KERN_ERR
- "%s could not register bus type\n",
- __FILE__);
+ pr_err("could not register bus type\n");
goto error_nothing;
}
ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
if (ret < 0) {
- printk(KERN_ERR "%s: failed to allocate char dev region\n",
- __FILE__);
+ pr_err("failed to allocate char dev region\n");
goto error_unregister_bus_type;
}
@@ -361,22 +362,20 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(iio_enum_write);
-static ssize_t iio_read_channel_info(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * iio_format_value() - Formats a IIO value into its string representation
+ * @buf: The buffer to which the formated value gets written
+ * @type: One of the IIO_VAL_... constants. This decides how the val and val2
+ * parameters are formatted.
+ * @val: First part of the value, exact meaning depends on the type parameter.
+ * @val2: Second part of the value, exact meaning depends on the type parameter.
+ */
+ssize_t iio_format_value(char *buf, unsigned int type, int val, int val2)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long long tmp;
- int val, val2;
bool scale_db = false;
- int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
- &val, &val2, this_attr->address);
-
- if (ret < 0)
- return ret;
- switch (ret) {
+ switch (type) {
case IIO_VAL_INT:
return sprintf(buf, "%d\n", val);
case IIO_VAL_INT_PLUS_MICRO_DB:
@@ -408,6 +407,22 @@ static ssize_t iio_read_channel_info(struct device *dev,
}
}
+static ssize_t iio_read_channel_info(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val, val2;
+ int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
+ &val, &val2, this_attr->address);
+
+ if (ret < 0)
+ return ret;
+
+ return iio_format_value(buf, ret, val, val2);
+}
+
/**
* iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
@@ -516,14 +531,15 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
struct device_attribute *attr,
const char *buf,
size_t len),
- bool generic)
+ enum iio_shared_by shared_by)
{
- int ret;
- char *name_format, *full_postfix;
+ int ret = 0;
+ char *name_format = NULL;
+ char *full_postfix;
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
- if (chan->modified && !generic) {
+ if (chan->modified && (shared_by == IIO_SEPARATE)) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_modifier_names[chan
@@ -544,53 +560,78 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
chan->extend_name,
postfix);
}
- if (full_postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (full_postfix == NULL)
+ return -ENOMEM;
if (chan->differential) { /* Differential can not have modifier */
- if (generic)
+ switch (shared_by) {
+ case IIO_SHARED_BY_ALL:
+ name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ break;
+ case IIO_SHARED_BY_DIR:
+ name_format = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_direction[chan->output],
+ full_postfix);
+ break;
+ case IIO_SHARED_BY_TYPE:
name_format
= kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
iio_chan_type_name_spec[chan->type],
full_postfix);
- else if (chan->indexed)
+ break;
+ case IIO_SEPARATE:
+ if (!chan->indexed) {
+ WARN_ON("Differential channels must be indexed\n");
+ ret = -EINVAL;
+ goto error_free_full_postfix;
+ }
name_format
- = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
+ = kasprintf(GFP_KERNEL,
+ "%s_%s%d-%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
iio_chan_type_name_spec[chan->type],
chan->channel2,
full_postfix);
- else {
- WARN_ON("Differential channels must be indexed\n");
- ret = -EINVAL;
- goto error_free_full_postfix;
+ break;
}
} else { /* Single ended */
- if (generic)
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s_%s",
- iio_direction[chan->output],
- iio_chan_type_name_spec[chan->type],
- full_postfix);
- else if (chan->indexed)
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
- iio_direction[chan->output],
- iio_chan_type_name_spec[chan->type],
- chan->channel,
- full_postfix);
- else
+ switch (shared_by) {
+ case IIO_SHARED_BY_ALL:
+ name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ break;
+ case IIO_SHARED_BY_DIR:
+ name_format = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_direction[chan->output],
+ full_postfix);
+ break;
+ case IIO_SHARED_BY_TYPE:
name_format
= kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
+ break;
+
+ case IIO_SEPARATE:
+ if (chan->indexed)
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ chan->channel,
+ full_postfix);
+ else
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ break;
+ }
}
if (name_format == NULL) {
ret = -ENOMEM;
@@ -614,16 +655,11 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
dev_attr->attr.mode |= S_IWUSR;
dev_attr->store = writefunc;
}
- kfree(name_format);
- kfree(full_postfix);
-
- return 0;
-
error_free_name_format:
kfree(name_format);
error_free_full_postfix:
kfree(full_postfix);
-error_ret:
+
return ret;
}
@@ -642,21 +678,21 @@ int __iio_add_chan_devattr(const char *postfix,
const char *buf,
size_t len),
u64 mask,
- bool generic,
+ enum iio_shared_by shared_by,
struct device *dev,
struct list_head *attr_list)
{
int ret;
struct iio_dev_attr *iio_attr, *t;
- iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
+ iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
if (iio_attr == NULL) {
ret = -ENOMEM;
goto error_ret;
}
ret = __iio_device_attr_init(&iio_attr->dev_attr,
postfix, chan,
- readfunc, writefunc, generic);
+ readfunc, writefunc, shared_by);
if (ret)
goto error_iio_dev_attr_free;
iio_attr->c = chan;
@@ -664,7 +700,7 @@ int __iio_add_chan_devattr(const char *postfix,
list_for_each_entry(t, attr_list, l)
if (strcmp(t->dev_attr.attr.name,
iio_attr->dev_attr.attr.name) == 0) {
- if (!generic)
+ if (shared_by == IIO_SEPARATE)
dev_err(dev, "tried to double register : %s\n",
t->dev_attr.attr.name);
ret = -EBUSY;
@@ -682,46 +718,68 @@ error_ret:
return ret;
}
-static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
+static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_shared_by shared_by,
+ const long *infomask)
{
- int ret, attrcount = 0;
- int i;
- const struct iio_chan_spec_ext_info *ext_info;
+ int i, ret, attrcount = 0;
- if (chan->channel < 0)
- return 0;
- for_each_set_bit(i, &chan->info_mask_separate, sizeof(long)*8) {
+ for_each_set_bit(i, infomask, sizeof(infomask)*8) {
ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
chan,
&iio_read_channel_info,
&iio_write_channel_info,
i,
- 0,
+ shared_by,
&indio_dev->dev,
&indio_dev->channel_attr_list);
- if (ret < 0)
- goto error_ret;
- attrcount++;
- }
- for_each_set_bit(i, &chan->info_mask_shared_by_type, sizeof(long)*8) {
- ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
- chan,
- &iio_read_channel_info,
- &iio_write_channel_info,
- i,
- 1,
- &indio_dev->dev,
- &indio_dev->channel_attr_list);
- if (ret == -EBUSY) {
- ret = 0;
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
- } else if (ret < 0) {
- goto error_ret;
- }
+ else if (ret < 0)
+ return ret;
attrcount++;
}
+ return attrcount;
+}
+
+static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret, attrcount = 0;
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ if (chan->channel < 0)
+ return 0;
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SEPARATE,
+ &chan->info_mask_separate);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SHARED_BY_TYPE,
+ &chan->info_mask_shared_by_type);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SHARED_BY_DIR,
+ &chan->info_mask_shared_by_dir);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SHARED_BY_ALL,
+ &chan->info_mask_shared_by_all);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
if (chan->ext_info) {
unsigned int i = 0;
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
@@ -740,22 +798,31 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
continue;
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
}
}
- ret = attrcount;
-error_ret:
- return ret;
+ return attrcount;
}
-static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev,
- struct iio_dev_attr *p)
+/**
+ * iio_free_chan_devattr_list() - Free a list of IIO device attributes
+ * @attr_list: List of IIO device attributes
+ *
+ * This function frees the memory allocated for each of the IIO device
+ * attributes in the list. Note: if you want to reuse the list after calling
+ * this function you have to reinitialize it using INIT_LIST_HEAD().
+ */
+void iio_free_chan_devattr_list(struct list_head *attr_list)
{
- kfree(p->dev_attr.attr.name);
- kfree(p);
+ struct iio_dev_attr *p, *n;
+
+ list_for_each_entry_safe(p, n, attr_list, l) {
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+ }
}
static ssize_t iio_show_dev_name(struct device *dev,
@@ -771,7 +838,7 @@ static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
- struct iio_dev_attr *p, *n;
+ struct iio_dev_attr *p;
struct attribute **attr;
/* First count elements in any existing group */
@@ -824,11 +891,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
return 0;
error_clear_attrs:
- list_for_each_entry_safe(p, n,
- &indio_dev->channel_attr_list, l) {
- list_del(&p->l);
- iio_device_remove_and_free_read_attr(indio_dev, p);
- }
+ iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
return ret;
}
@@ -836,12 +899,7 @@ error_clear_attrs:
static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
{
- struct iio_dev_attr *p, *n;
-
- list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) {
- list_del(&p->l);
- iio_device_remove_and_free_read_attr(indio_dev, p);
- }
+ iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
kfree(indio_dev->chan_attr_group.attrs);
}
@@ -853,6 +911,8 @@ static void iio_dev_release(struct device *device)
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
+ iio_buffer_put(indio_dev->buffer);
+
ida_simple_remove(&iio_ida, indio_dev->id);
kfree(indio_dev);
}
@@ -890,7 +950,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
/* cannot use a dev_err as the name isn't available */
- printk(KERN_ERR "Failed to get id\n");
+ pr_err("failed to get device id\n");
kfree(dev);
return NULL;
}
@@ -995,6 +1055,9 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int __user *ip = (int __user *)arg;
int fd;
+ if (!indio_dev->info)
+ return -ENODEV;
+
if (cmd == IIO_GET_EVENT_FD_IOCTL) {
fd = iio_event_getfd(indio_dev);
if (copy_to_user(ip, &fd, sizeof(fd)))
@@ -1091,6 +1154,10 @@ void iio_device_unregister(struct iio_dev *indio_dev)
iio_disable_all_buffers(indio_dev);
indio_dev->info = NULL;
+
+ iio_device_wakeup_eventset(indio_dev);
+ iio_buffer_wakeup_poll(indio_dev);
+
mutex_unlock(&indio_dev->info_exist_lock);
}
EXPORT_SYMBOL(iio_device_unregister);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 6be65ef5faa..c10eab64bc0 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -56,7 +56,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
ev.id = ev_code;
ev.timestamp = timestamp;
- copied = kfifo_put(&ev_int->det_events, &ev);
+ copied = kfifo_put(&ev_int->det_events, ev);
if (copied != 0)
wake_up_locked_poll(&ev_int->wait, POLLIN);
}
@@ -76,6 +76,9 @@ static unsigned int iio_event_poll(struct file *filep,
struct iio_event_interface *ev_int = indio_dev->event_interface;
unsigned int events = 0;
+ if (!indio_dev->info)
+ return -ENODEV;
+
poll_wait(filep, &ev_int->wait, wait);
spin_lock_irq(&ev_int->wait.lock);
@@ -96,6 +99,9 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
unsigned int copied;
int ret;
+ if (!indio_dev->info)
+ return -ENODEV;
+
if (count < sizeof(struct iio_event_data))
return -EINVAL;
@@ -107,9 +113,14 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
}
/* Blocking on device; waiting for something to be there */
ret = wait_event_interruptible_locked_irq(ev_int->wait,
- !kfifo_is_empty(&ev_int->det_events));
+ !kfifo_is_empty(&ev_int->det_events) ||
+ indio_dev->info == NULL);
if (ret)
goto error_unlock;
+ if (indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto error_unlock;
+ }
/* Single access device so no one else can get the data */
}
@@ -166,7 +177,7 @@ int iio_event_getfd(struct iio_dev *indio_dev)
iio_device_get(indio_dev);
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
- indio_dev, O_RDONLY);
+ indio_dev, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -190,6 +201,27 @@ static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_FALLING] = "falling"
};
+static const char * const iio_ev_info_text[] = {
+ [IIO_EV_INFO_ENABLE] = "en",
+ [IIO_EV_INFO_VALUE] = "value",
+ [IIO_EV_INFO_HYSTERESIS] = "hysteresis",
+};
+
+static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
+{
+ return attr->c->event_spec[attr->address & 0xffff].dir;
+}
+
+static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
+{
+ return attr->c->event_spec[attr->address & 0xffff].type;
+}
+
+static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
+{
+ return (attr->address >> 16) & 0xffff;
+}
+
static ssize_t iio_ev_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -204,9 +236,14 @@ static ssize_t iio_ev_state_store(struct device *dev,
if (ret < 0)
return ret;
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address,
- val);
+ if (indio_dev->info->write_event_config)
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->address, val);
+ else
+ ret = indio_dev->info->write_event_config_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), val);
+
return (ret < 0) ? ret : len;
}
@@ -216,9 +253,15 @@ static ssize_t iio_ev_state_show(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
+ int val;
+ if (indio_dev->info->read_event_config)
+ val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->address);
+ else
+ val = indio_dev->info->read_event_config_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr));
if (val < 0)
return val;
else
@@ -231,14 +274,24 @@ static ssize_t iio_ev_value_show(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val, ret;
-
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
+ int val, val2;
+ int ret;
- return sprintf(buf, "%d\n", val);
+ if (indio_dev->info->read_event_value) {
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->address, &val);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", val);
+ } else {
+ ret = indio_dev->info->read_event_value_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ &val, &val2);
+ if (ret < 0)
+ return ret;
+ return iio_format_value(buf, ret, val, val2);
+ }
}
static ssize_t iio_ev_value_store(struct device *dev,
@@ -248,25 +301,120 @@ static ssize_t iio_ev_value_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val;
+ int val, val2;
int ret;
- if (!indio_dev->info->write_event_value)
+ if (!indio_dev->info->write_event_value &&
+ !indio_dev->info->write_event_value_new)
return -EINVAL;
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
- val);
+ if (indio_dev->info->write_event_value) {
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value(indio_dev,
+ this_attr->address, val);
+ } else {
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ val, val2);
+ }
if (ret < 0)
return ret;
return len;
}
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+static int iio_device_add_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int spec_index,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_shared_by shared_by, const unsigned long *mask)
+{
+ ssize_t (*show)(struct device *, struct device_attribute *, char *);
+ ssize_t (*store)(struct device *, struct device_attribute *,
+ const char *, size_t);
+ unsigned int attrcount = 0;
+ unsigned int i;
+ char *postfix;
+ int ret;
+
+ for_each_set_bit(i, mask, sizeof(*mask)) {
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_ev_type_text[type], iio_ev_dir_text[dir],
+ iio_ev_info_text[i]);
+ if (postfix == NULL)
+ return -ENOMEM;
+
+ if (i == IIO_EV_INFO_ENABLE) {
+ show = iio_ev_state_show;
+ store = iio_ev_state_store;
+ } else {
+ show = iio_ev_value_show;
+ store = iio_ev_value_store;
+ }
+
+ ret = __iio_add_chan_devattr(postfix, chan, show, store,
+ (i << 16) | spec_index, shared_by, &indio_dev->dev,
+ &indio_dev->event_interface->dev_attr_list);
+ kfree(postfix);
+
+ if (ret)
+ return ret;
+
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
+static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret = 0, i, attrcount = 0;
+ enum iio_event_direction dir;
+ enum iio_event_type type;
+
+ for (i = 0; i < chan->num_event_specs; i++) {
+ type = chan->event_spec[i].type;
+ dir = chan->event_spec[i].dir;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SEPARATE, &chan->event_spec[i].mask_separate);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_TYPE,
+ &chan->event_spec[i].mask_shared_by_type);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_DIR,
+ &chan->event_spec[i].mask_shared_by_dir);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_ALL,
+ &chan->event_spec[i].mask_shared_by_all);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+ }
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
@@ -339,15 +487,14 @@ error_ret:
return ret;
}
-static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
+
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
{
- struct iio_dev_attr *p, *n;
- list_for_each_entry_safe(p, n,
- &indio_dev->event_interface->
- dev_attr_list, l) {
- kfree(p->dev_attr.attr.name);
- kfree(p);
- }
+ if (chan->event_mask)
+ return iio_device_add_event_sysfs_old(indio_dev, chan);
+ else
+ return iio_device_add_event_sysfs_new(indio_dev, chan);
}
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
@@ -369,9 +516,12 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
int j;
- for (j = 0; j < indio_dev->num_channels; j++)
+ for (j = 0; j < indio_dev->num_channels; j++) {
if (indio_dev->channels[j].event_mask != 0)
return true;
+ if (indio_dev->channels[j].num_event_specs != 0)
+ return true;
+ }
return false;
}
@@ -441,18 +591,32 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
return 0;
error_free_setup_event_lines:
- __iio_remove_event_config_attrs(indio_dev);
+ iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
error_ret:
return ret;
}
+/**
+ * iio_device_wakeup_eventset - Wakes up the event waitqueue
+ * @indio_dev: The IIO device
+ *
+ * Wakes up the event waitqueue used for poll() and blocking read().
+ * Should usually be called when the device is unregistered.
+ */
+void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ wake_up(&indio_dev->event_interface->wait);
+}
+
void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
if (indio_dev->event_interface == NULL)
return;
- __iio_remove_event_config_attrs(indio_dev);
+ iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface->group.attrs);
kfree(indio_dev->event_interface);
}
diff --git a/drivers/iio/industrialio-triggered-buffer.c b/drivers/iio/industrialio-triggered-buffer.c
index 46c619b0d8c..d6f54930b34 100644
--- a/drivers/iio/industrialio-triggered-buffer.c
+++ b/drivers/iio/industrialio-triggered-buffer.c
@@ -17,7 +17,6 @@
#include <linux/iio/trigger_consumer.h>
static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
- .preenable = &iio_sw_buffer_preenable,
.postenable = &iio_triggered_buffer_postenable,
.predisable = &iio_triggered_buffer_predisable,
};
@@ -47,14 +46,17 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
irqreturn_t (*pollfunc_th)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops)
{
+ struct iio_buffer *buffer;
int ret;
- indio_dev->buffer = iio_kfifo_allocate(indio_dev);
- if (!indio_dev->buffer) {
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (!buffer) {
ret = -ENOMEM;
goto error_ret;
}
+ iio_device_attach_buffer(indio_dev, buffer);
+
indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh,
pollfunc_th,
IRQF_ONESHOT,
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index a923c78d5cb..95c6fc81c2c 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -7,10 +7,12 @@
#include <linux/mutex.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/sched.h>
+#include <linux/poll.h>
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
+ struct mutex user_lock;
int update_needed;
};
@@ -31,13 +33,18 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
- if (!buf->update_needed)
- goto error_ret;
- kfifo_free(&buf->kf);
- ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
+ mutex_lock(&buf->user_lock);
+ if (buf->update_needed) {
+ kfifo_free(&buf->kf);
+ ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
+ buf->update_needed = false;
+ } else {
+ kfifo_reset_out(&buf->kf);
+ }
r->stufftoread = false;
-error_ret:
+ mutex_unlock(&buf->user_lock);
+
return ret;
}
@@ -94,7 +101,7 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
}
static int iio_store_to_kfifo(struct iio_buffer *r,
- u8 *data)
+ const void *data)
{
int ret;
struct iio_kfifo *kf = iio_to_kfifo(r);
@@ -102,7 +109,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
if (ret != 1)
return -EBUSY;
r->stufftoread = true;
- wake_up_interruptible(&r->pollq);
+ wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
return 0;
}
@@ -113,12 +120,13 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
- if (n < r->bytes_per_datum || r->bytes_per_datum == 0)
- return -EINVAL;
+ if (mutex_lock_interruptible(&kf->user_lock))
+ return -ERESTARTSYS;
- ret = kfifo_to_user(&kf->kf, buf, n, &copied);
- if (ret < 0)
- return ret;
+ if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
+ ret = -EINVAL;
+ else
+ ret = kfifo_to_user(&kf->kf, buf, n, &copied);
if (kfifo_is_empty(&kf->kf))
r->stufftoread = false;
@@ -126,9 +134,22 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
if (!kfifo_is_empty(&kf->kf))
r->stufftoread = true;
+ mutex_unlock(&kf->user_lock);
+ if (ret < 0)
+ return ret;
+
return copied;
}
+static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(buffer);
+
+ mutex_destroy(&kf->user_lock);
+ kfifo_free(&kf->kf);
+ kfree(kf);
+}
+
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
@@ -137,6 +158,7 @@ static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
.get_length = &iio_get_length_kfifo,
.set_length = &iio_set_length_kfifo,
+ .release = &iio_kfifo_buffer_release,
};
struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
@@ -151,13 +173,14 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
kf->buffer.attrs = &iio_kfifo_attribute_group;
kf->buffer.access = &kfifo_access_funcs;
kf->buffer.length = 2;
+ mutex_init(&kf->user_lock);
return &kf->buffer;
}
EXPORT_SYMBOL(iio_kfifo_allocate);
void iio_kfifo_free(struct iio_buffer *r)
{
- kfree(iio_to_kfifo(r));
+ iio_buffer_put(r);
}
EXPORT_SYMBOL(iio_kfifo_free);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index bf9fa0d7aff..f98c2b50925 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -27,6 +27,29 @@ config APDS9300
To compile this driver as a module, choose M here: the
module will be called apds9300.
+config CM36651
+ depends on I2C
+ tristate "CM36651 driver"
+ help
+ Say Y here if you use cm36651.
+ This option enables proximity & RGB sensor using
+ Capella cm36651 device driver.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cm36651.
+
+config GP2AP020A00F
+ tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip
+ hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gp2ap020a00f.
+
config HID_SENSOR_ALS
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -55,6 +78,16 @@ config SENSORS_LM3533
changes. The ALS-control output values can be set per zone for the
three current output channels.
+config TCS3472
+ tristate "TAOS TCS3472 color light-to-digital converter"
+ depends on I2C
+ help
+ If you say yes here you get support for the TAOS TCS3472
+ family of color light-to-digital converters with IR filter.
+
+ This driver can also be built as a module. If so, the module
+ will be called tcs3472.
+
config SENSORS_TSL2563
tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
depends on I2C
@@ -65,6 +98,16 @@ config SENSORS_TSL2563
This driver can also be built as a module. If so, the module
will be called tsl2563.
+config TSL4531
+ tristate "TAOS TSL4531 ambient light sensors"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the TAOS TSL4531 family
+ of ambient light sensors with direct lux output.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsl4531.
+
config VCNL4000
tristate "VCNL4000 combined ALS and proximity sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 354ee9ab237..daa327f39e0 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -5,7 +5,11 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_APDS9300) += apds9300.o
+obj-$(CONFIG_CM36651) += cm36651.o
+obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
+obj-$(CONFIG_TCS3472) += tcs3472.o
+obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 23cff798598..83d15c5baf6 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -114,43 +114,6 @@ static int adjd_s311_read_data(struct iio_dev *indio_dev, u8 reg, int *val)
return 0;
}
-static ssize_t adjd_s311_read_int_time(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, char *buf)
-{
- struct adjd_s311_data *data = iio_priv(indio_dev);
- s32 ret;
-
- ret = i2c_smbus_read_word_data(data->client,
- ADJD_S311_INT_REG(chan->address));
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", ret & ADJD_S311_INT_MASK);
-}
-
-static ssize_t adjd_s311_write_int_time(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
- size_t len)
-{
- struct adjd_s311_data *data = iio_priv(indio_dev);
- unsigned long int_time;
- int ret;
-
- ret = kstrtoul(buf, 10, &int_time);
- if (ret)
- return ret;
-
- if (int_time > ADJD_S311_INT_MASK)
- return -EINVAL;
-
- ret = i2c_smbus_write_word_data(data->client,
- ADJD_S311_INT_REG(chan->address), int_time);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -175,10 +138,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
len += 2;
}
- if (indio_dev->scan_timestamp)
- *(s64 *)((u8 *)data->buffer + ALIGN(len, sizeof(s64)))
- = time_ns;
- iio_push_to_buffers(indio_dev, (u8 *)data->buffer);
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -186,25 +146,16 @@ done:
return IRQ_HANDLED;
}
-static const struct iio_chan_spec_ext_info adjd_s311_ext_info[] = {
- {
- .name = "integration_time",
- .read = adjd_s311_read_int_time,
- .write = adjd_s311_write_int_time,
- },
- { }
-};
-
#define ADJD_S311_CHANNEL(_color, _scan_idx) { \
.type = IIO_INTENSITY, \
.modified = 1, \
.address = (IDX_##_color), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
.channel2 = (IIO_MOD_LIGHT_##_color), \
.scan_index = (_scan_idx), \
.scan_type = IIO_ST('u', 10, 16, 0), \
- .ext_info = adjd_s311_ext_info, \
}
static const struct iio_chan_spec adjd_s311_channels[] = {
@@ -236,6 +187,18 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret & ADJD_S311_CAP_MASK;
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = i2c_smbus_read_word_data(data->client,
+ ADJD_S311_INT_REG(chan->address));
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ /*
+ * not documented, based on measurement:
+ * 4095 LSBs correspond to roughly 4 ms
+ */
+ *val2 = ret & ADJD_S311_INT_MASK;
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
@@ -245,16 +208,20 @@ static int adjd_s311_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
if (val < 0 || val > ADJD_S311_CAP_MASK)
return -EINVAL;
- ret = i2c_smbus_write_byte_data(data->client,
+ return i2c_smbus_write_byte_data(data->client,
ADJD_S311_CAP_REG(chan->address), val);
- return ret;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0 || val2 < 0 || val2 > ADJD_S311_INT_MASK)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_data(data->client,
+ ADJD_S311_INT_REG(chan->address), val2);
}
return -EINVAL;
}
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 66a58bda6dc..51097bbd59c 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -273,12 +273,14 @@ static int apds9300_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int apds9300_read_thresh(struct iio_dev *indio_dev, u64 event_code,
- int *val)
+static int apds9300_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
{
struct apds9300_data *data = iio_priv(indio_dev);
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ switch (dir) {
case IIO_EV_DIR_RISING:
*val = data->thresh_hi;
break;
@@ -289,17 +291,19 @@ static int apds9300_read_thresh(struct iio_dev *indio_dev, u64 event_code,
return -EINVAL;
}
- return 0;
+ return IIO_VAL_INT;
}
-static int apds9300_write_thresh(struct iio_dev *indio_dev, u64 event_code,
- int val)
+static int apds9300_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
{
struct apds9300_data *data = iio_priv(indio_dev);
int ret;
mutex_lock(&data->mutex);
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
+ if (dir == IIO_EV_DIR_RISING)
ret = apds9300_set_thresh_hi(data, val);
else
ret = apds9300_set_thresh_low(data, val);
@@ -309,7 +313,9 @@ static int apds9300_write_thresh(struct iio_dev *indio_dev, u64 event_code,
}
static int apds9300_read_interrupt_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct apds9300_data *data = iio_priv(indio_dev);
@@ -317,7 +323,8 @@ static int apds9300_read_interrupt_config(struct iio_dev *indio_dev,
}
static int apds9300_write_interrupt_config(struct iio_dev *indio_dev,
- u64 event_code, int state)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
{
struct apds9300_data *data = iio_priv(indio_dev);
int ret;
@@ -337,10 +344,24 @@ static const struct iio_info apds9300_info_no_irq = {
static const struct iio_info apds9300_info = {
.driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
- .read_event_value = apds9300_read_thresh,
- .write_event_value = apds9300_write_thresh,
- .read_event_config = apds9300_read_interrupt_config,
- .write_event_config = apds9300_write_interrupt_config,
+ .read_event_value_new = apds9300_read_thresh,
+ .write_event_value_new = apds9300_write_thresh,
+ .read_event_config_new = apds9300_read_interrupt_config,
+ .write_event_config_new = apds9300_write_interrupt_config,
+};
+
+static const struct iio_event_spec apds9300_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
};
static const struct iio_chan_spec apds9300_channels[] = {
@@ -355,10 +376,8 @@ static const struct iio_chan_spec apds9300_channels[] = {
.channel2 = IIO_MOD_LIGHT_BOTH,
.indexed = true,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING)),
+ .event_spec = apds9300_event_spec,
+ .num_event_specs = ARRAY_SIZE(apds9300_event_spec),
}, {
.type = IIO_INTENSITY,
.channel = 1,
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
new file mode 100644
index 00000000000..21df5713001
--- /dev/null
+++ b/drivers/iio/light/cm36651.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+/* Slave address 0x19 for PS of 7 bit addressing protocol for I2C */
+#define CM36651_I2C_ADDR_PS 0x19
+/* Alert Response Address */
+#define CM36651_ARA 0x0C
+
+/* Ambient light sensor */
+#define CM36651_CS_CONF1 0x00
+#define CM36651_CS_CONF2 0x01
+#define CM36651_ALS_WH_M 0x02
+#define CM36651_ALS_WH_L 0x03
+#define CM36651_ALS_WL_M 0x04
+#define CM36651_ALS_WL_L 0x05
+#define CM36651_CS_CONF3 0x06
+#define CM36651_CS_CONF_REG_NUM 0x02
+
+/* Proximity sensor */
+#define CM36651_PS_CONF1 0x00
+#define CM36651_PS_THD 0x01
+#define CM36651_PS_CANC 0x02
+#define CM36651_PS_CONF2 0x03
+#define CM36651_PS_REG_NUM 0x04
+
+/* CS_CONF1 command code */
+#define CM36651_ALS_ENABLE 0x00
+#define CM36651_ALS_DISABLE 0x01
+#define CM36651_ALS_INT_EN 0x02
+#define CM36651_ALS_THRES 0x04
+
+/* CS_CONF2 command code */
+#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
+
+/* CS_CONF3 channel integration time */
+#define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */
+#define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */
+#define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */
+#define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */
+
+/* PS_CONF1 command code */
+#define CM36651_PS_ENABLE 0x00
+#define CM36651_PS_DISABLE 0x01
+#define CM36651_PS_INT_EN 0x02
+#define CM36651_PS_PERS2 0x04
+#define CM36651_PS_PERS3 0x08
+#define CM36651_PS_PERS4 0x0C
+
+/* PS_CONF1 command code: integration time */
+#define CM36651_PS_IT1 0x00 /* Integration time 320 usec */
+#define CM36651_PS_IT2 0x10 /* Integration time 420 usec */
+#define CM36651_PS_IT3 0x20 /* Integration time 520 usec */
+#define CM36651_PS_IT4 0x30 /* Integration time 640 usec */
+
+/* PS_CONF1 command code: duty ratio */
+#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
+#define CM36651_PS_DR2 0x40 /* Duty ratio 1/160 */
+#define CM36651_PS_DR3 0x80 /* Duty ratio 1/320 */
+#define CM36651_PS_DR4 0xC0 /* Duty ratio 1/640 */
+
+/* PS_THD command code */
+#define CM36651_PS_INITIAL_THD 0x05
+
+/* PS_CANC command code */
+#define CM36651_PS_CANC_DEFAULT 0x00
+
+/* PS_CONF2 command code */
+#define CM36651_PS_HYS1 0x00
+#define CM36651_PS_HYS2 0x01
+#define CM36651_PS_SMART_PERS_EN 0x02
+#define CM36651_PS_DIR_INT 0x04
+#define CM36651_PS_MS 0x10
+
+#define CM36651_CS_COLOR_NUM 4
+
+#define CM36651_CLOSE_PROXIMITY 0x32
+#define CM36651_FAR_PROXIMITY 0x33
+
+#define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000"
+#define CM36651_PS_INT_TIME_AVAIL "320 420 520 640"
+
+enum cm36651_operation_mode {
+ CM36651_LIGHT_EN,
+ CM36651_PROXIMITY_EN,
+ CM36651_PROXIMITY_EV_EN,
+};
+
+enum cm36651_light_channel_idx {
+ CM36651_LIGHT_CHANNEL_IDX_RED,
+ CM36651_LIGHT_CHANNEL_IDX_GREEN,
+ CM36651_LIGHT_CHANNEL_IDX_BLUE,
+ CM36651_LIGHT_CHANNEL_IDX_CLEAR,
+};
+
+enum cm36651_command {
+ CM36651_CMD_READ_RAW_LIGHT,
+ CM36651_CMD_READ_RAW_PROXIMITY,
+ CM36651_CMD_PROX_EV_EN,
+ CM36651_CMD_PROX_EV_DIS,
+};
+
+static const u8 cm36651_cs_reg[CM36651_CS_CONF_REG_NUM] = {
+ CM36651_CS_CONF1,
+ CM36651_CS_CONF2,
+};
+
+static const u8 cm36651_ps_reg[CM36651_PS_REG_NUM] = {
+ CM36651_PS_CONF1,
+ CM36651_PS_THD,
+ CM36651_PS_CANC,
+ CM36651_PS_CONF2,
+};
+
+struct cm36651_data {
+ const struct cm36651_platform_data *pdata;
+ struct i2c_client *client;
+ struct i2c_client *ps_client;
+ struct i2c_client *ara_client;
+ struct mutex lock;
+ struct regulator *vled_reg;
+ unsigned long flags;
+ int cs_int_time[CM36651_CS_COLOR_NUM];
+ int ps_int_time;
+ u8 cs_ctrl_regs[CM36651_CS_CONF_REG_NUM];
+ u8 ps_ctrl_regs[CM36651_PS_REG_NUM];
+ u16 color[CM36651_CS_COLOR_NUM];
+};
+
+static int cm36651_setup_reg(struct cm36651_data *cm36651)
+{
+ struct i2c_client *client = cm36651->client;
+ struct i2c_client *ps_client = cm36651->ps_client;
+ int i, ret;
+
+ /* CS initialization */
+ cm36651->cs_ctrl_regs[CM36651_CS_CONF1] = CM36651_ALS_ENABLE |
+ CM36651_ALS_THRES;
+ cm36651->cs_ctrl_regs[CM36651_CS_CONF2] = CM36651_CS_CONF2_DEFAULT_BIT;
+
+ for (i = 0; i < CM36651_CS_CONF_REG_NUM; i++) {
+ ret = i2c_smbus_write_byte_data(client, cm36651_cs_reg[i],
+ cm36651->cs_ctrl_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* PS initialization */
+ cm36651->ps_ctrl_regs[CM36651_PS_CONF1] = CM36651_PS_ENABLE |
+ CM36651_PS_IT2;
+ cm36651->ps_ctrl_regs[CM36651_PS_THD] = CM36651_PS_INITIAL_THD;
+ cm36651->ps_ctrl_regs[CM36651_PS_CANC] = CM36651_PS_CANC_DEFAULT;
+ cm36651->ps_ctrl_regs[CM36651_PS_CONF2] = CM36651_PS_HYS2 |
+ CM36651_PS_DIR_INT | CM36651_PS_SMART_PERS_EN;
+
+ for (i = 0; i < CM36651_PS_REG_NUM; i++) {
+ ret = i2c_smbus_write_byte_data(ps_client, cm36651_ps_reg[i],
+ cm36651->ps_ctrl_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Set shutdown mode */
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF1,
+ CM36651_ALS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(cm36651->ps_client,
+ CM36651_PS_CONF1, CM36651_PS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cm36651_read_output(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct i2c_client *client = cm36651->client;
+ int ret = -EINVAL;
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val = i2c_smbus_read_word_data(client, chan->address);
+ if (*val < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF1,
+ CM36651_ALS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_PROXIMITY:
+ *val = i2c_smbus_read_byte(cm36651->ps_client);
+ if (*val < 0)
+ return ret;
+
+ if (!test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags)) {
+ ret = i2c_smbus_write_byte_data(cm36651->ps_client,
+ CM36651_PS_CONF1, CM36651_PS_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static irqreturn_t cm36651_irq_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ struct i2c_client *client = cm36651->client;
+ int ev_dir, ret;
+ u64 ev_code;
+
+ /*
+ * The PS INT pin is an active low signal that PS INT move logic low
+ * when the object is detect. Once the MCU host received the PS INT
+ * "LOW" signal, the Host needs to read the data at Alert Response
+ * Address(ARA) to clear the PS INT signal. After clearing the PS
+ * INT pin, the PS INT signal toggles from low to high.
+ */
+ ret = i2c_smbus_read_byte(cm36651->ara_client);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: Data read failed: %d\n", __func__, ret);
+ return IRQ_HANDLED;
+ }
+ switch (ret) {
+ case CM36651_CLOSE_PROXIMITY:
+ ev_dir = IIO_EV_DIR_RISING;
+ break;
+ case CM36651_FAR_PROXIMITY:
+ ev_dir = IIO_EV_DIR_FALLING;
+ break;
+ default:
+ dev_err(&client->dev,
+ "%s: Data read wrong: %d\n", __func__, ret);
+ return IRQ_HANDLED;
+ }
+
+ ev_code = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
+ CM36651_CMD_READ_RAW_PROXIMITY,
+ IIO_EV_TYPE_THRESH, ev_dir);
+
+ iio_push_event(indio_dev, ev_code, iio_get_time_ns());
+
+ return IRQ_HANDLED;
+}
+
+static int cm36651_set_operation_mode(struct cm36651_data *cm36651, int cmd)
+{
+ struct i2c_client *client = cm36651->client;
+ struct i2c_client *ps_client = cm36651->ps_client;
+ int ret = -EINVAL;
+
+ switch (cmd) {
+ case CM36651_CMD_READ_RAW_LIGHT:
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF1,
+ cm36651->cs_ctrl_regs[CM36651_CS_CONF1]);
+ break;
+ case CM36651_CMD_READ_RAW_PROXIMITY:
+ if (test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags))
+ return CM36651_PROXIMITY_EV_EN;
+
+ ret = i2c_smbus_write_byte_data(ps_client, CM36651_PS_CONF1,
+ cm36651->ps_ctrl_regs[CM36651_PS_CONF1]);
+ break;
+ case CM36651_CMD_PROX_EV_EN:
+ if (test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags)) {
+ dev_err(&client->dev,
+ "Already proximity event enable state\n");
+ return ret;
+ }
+ set_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags);
+
+ ret = i2c_smbus_write_byte_data(ps_client,
+ cm36651_ps_reg[CM36651_PS_CONF1],
+ CM36651_PS_INT_EN | CM36651_PS_PERS2 | CM36651_PS_IT2);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Proximity enable event failed\n");
+ return ret;
+ }
+ break;
+ case CM36651_CMD_PROX_EV_DIS:
+ if (!test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags)) {
+ dev_err(&client->dev,
+ "Already proximity event disable state\n");
+ return ret;
+ }
+ clear_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags);
+ ret = i2c_smbus_write_byte_data(ps_client,
+ CM36651_PS_CONF1, CM36651_PS_DISABLE);
+ break;
+ }
+
+ if (ret < 0)
+ dev_err(&client->dev, "Write register failed\n");
+
+ return ret;
+}
+
+static int cm36651_read_channel(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct i2c_client *client = cm36651->client;
+ int cmd, ret;
+
+ if (chan->type == IIO_LIGHT)
+ cmd = CM36651_CMD_READ_RAW_LIGHT;
+ else if (chan->type == IIO_PROXIMITY)
+ cmd = CM36651_CMD_READ_RAW_PROXIMITY;
+ else
+ return -EINVAL;
+
+ ret = cm36651_set_operation_mode(cm36651, cmd);
+ if (ret < 0) {
+ dev_err(&client->dev, "CM36651 set operation mode failed\n");
+ return ret;
+ }
+ /* Delay for work after enable operation */
+ msleep(50);
+ ret = cm36651_read_output(cm36651, chan, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "CM36651 read output failed\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cm36651_read_int_time(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int *val)
+{
+ switch (chan->type) {
+ case IIO_LIGHT:
+ if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
+ *val = 80000;
+ else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
+ *val = 160000;
+ else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
+ *val = 320000;
+ else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
+ *val = 640000;
+ else
+ return -EINVAL;
+ break;
+ case IIO_PROXIMITY:
+ if (cm36651->ps_int_time == CM36651_PS_IT1)
+ *val = 320;
+ else if (cm36651->ps_int_time == CM36651_PS_IT2)
+ *val = 420;
+ else if (cm36651->ps_int_time == CM36651_PS_IT3)
+ *val = 520;
+ else if (cm36651->ps_int_time == CM36651_PS_IT4)
+ *val = 640;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int cm36651_write_int_time(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int val)
+{
+ struct i2c_client *client = cm36651->client;
+ struct i2c_client *ps_client = cm36651->ps_client;
+ int int_time, ret;
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ if (val == 80000)
+ int_time = CM36651_CS_IT1;
+ else if (val == 160000)
+ int_time = CM36651_CS_IT2;
+ else if (val == 320000)
+ int_time = CM36651_CS_IT3;
+ else if (val == 640000)
+ int_time = CM36651_CS_IT4;
+ else
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF3,
+ int_time >> 2 * (chan->address));
+ if (ret < 0) {
+ dev_err(&client->dev, "CS integration time write failed\n");
+ return ret;
+ }
+ cm36651->cs_int_time[chan->address] = int_time;
+ break;
+ case IIO_PROXIMITY:
+ if (val == 320)
+ int_time = CM36651_PS_IT1;
+ else if (val == 420)
+ int_time = CM36651_PS_IT2;
+ else if (val == 520)
+ int_time = CM36651_PS_IT3;
+ else if (val == 640)
+ int_time = CM36651_PS_IT4;
+ else
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(ps_client,
+ CM36651_PS_CONF1, int_time);
+ if (ret < 0) {
+ dev_err(&client->dev, "PS integration time write failed\n");
+ return ret;
+ }
+ cm36651->ps_int_time = int_time;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cm36651_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&cm36651->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = cm36651_read_channel(cm36651, chan, val);
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = cm36651_read_int_time(cm36651, chan, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&cm36651->lock);
+
+ return ret;
+}
+
+static int cm36651_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ struct i2c_client *client = cm36651->client;
+ int ret = -EINVAL;
+
+ if (mask == IIO_CHAN_INFO_INT_TIME) {
+ ret = cm36651_write_int_time(cm36651, chan, val);
+ if (ret < 0)
+ dev_err(&client->dev, "Integration time write failed\n");
+ }
+
+ return ret;
+}
+
+static int cm36651_read_prox_thresh(struct iio_dev *indio_dev,
+ u64 event_code, int *val)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+
+ *val = cm36651->ps_ctrl_regs[CM36651_PS_THD];
+
+ return 0;
+}
+
+static int cm36651_write_prox_thresh(struct iio_dev *indio_dev,
+ u64 event_code, int val)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ struct i2c_client *client = cm36651->client;
+ int ret;
+
+ if (val < 3 || val > 255)
+ return -EINVAL;
+
+ cm36651->ps_ctrl_regs[CM36651_PS_THD] = val;
+ ret = i2c_smbus_write_byte_data(cm36651->ps_client, CM36651_PS_THD,
+ cm36651->ps_ctrl_regs[CM36651_PS_THD]);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "PS threshold write failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
+ u64 event_code, int state)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ int cmd, ret = -EINVAL;
+
+ mutex_lock(&cm36651->lock);
+
+ cmd = state ? CM36651_CMD_PROX_EV_EN : CM36651_CMD_PROX_EV_DIS;
+ ret = cm36651_set_operation_mode(cm36651, cmd);
+
+ mutex_unlock(&cm36651->lock);
+
+ return ret;
+}
+
+static int cm36651_read_prox_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ int event_en;
+
+ mutex_lock(&cm36651->lock);
+
+ event_en = test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags);
+
+ mutex_unlock(&cm36651->lock);
+
+ return event_en;
+}
+
+#define CM36651_LIGHT_CHANNEL(_color, _idx) { \
+ .type = IIO_LIGHT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .address = _idx, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+} \
+
+static const struct iio_chan_spec cm36651_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER)
+ },
+ CM36651_LIGHT_CHANNEL(RED, CM36651_LIGHT_CHANNEL_IDX_RED),
+ CM36651_LIGHT_CHANNEL(GREEN, CM36651_LIGHT_CHANNEL_IDX_GREEN),
+ CM36651_LIGHT_CHANNEL(BLUE, CM36651_LIGHT_CHANNEL_IDX_BLUE),
+ CM36651_LIGHT_CHANNEL(CLEAR, CM36651_LIGHT_CHANNEL_IDX_CLEAR),
+};
+
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ CM36651_CS_INT_TIME_AVAIL);
+static IIO_CONST_ATTR(in_proximity_integration_time_available,
+ CM36651_PS_INT_TIME_AVAIL);
+
+static struct attribute *cm36651_attributes[] = {
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_proximity_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm36651_attribute_group = {
+ .attrs = cm36651_attributes
+};
+
+static const struct iio_info cm36651_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cm36651_read_raw,
+ .write_raw = &cm36651_write_raw,
+ .read_event_value = &cm36651_read_prox_thresh,
+ .write_event_value = &cm36651_write_prox_thresh,
+ .read_event_config = &cm36651_read_prox_event_config,
+ .write_event_config = &cm36651_write_prox_event_config,
+ .attrs = &cm36651_attribute_group,
+};
+
+static int cm36651_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm36651_data *cm36651;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm36651));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ cm36651 = iio_priv(indio_dev);
+
+ cm36651->vled_reg = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(cm36651->vled_reg)) {
+ dev_err(&client->dev, "get regulator vled failed\n");
+ return PTR_ERR(cm36651->vled_reg);
+ }
+
+ ret = regulator_enable(cm36651->vled_reg);
+ if (ret) {
+ dev_err(&client->dev, "enable regulator vled failed\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ cm36651->client = client;
+ cm36651->ps_client = i2c_new_dummy(client->adapter,
+ CM36651_I2C_ADDR_PS);
+ cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
+ mutex_init(&cm36651->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = cm36651_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cm36651_channels);
+ indio_dev->info = &cm36651_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = cm36651_setup_reg(cm36651);
+ if (ret) {
+ dev_err(&client->dev, "%s: register setup failed\n", __func__);
+ goto error_disable_reg;
+ }
+
+ ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cm36651", indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: request irq failed\n", __func__);
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: regist device failed\n", __func__);
+ goto error_free_irq;
+ }
+
+ return 0;
+
+error_free_irq:
+ free_irq(client->irq, indio_dev);
+error_disable_reg:
+ regulator_disable(cm36651->vled_reg);
+ return ret;
+}
+
+static int cm36651_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(cm36651->vled_reg);
+ free_irq(client->irq, indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id cm36651_id[] = {
+ { "cm36651", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, cm36651_id);
+
+static const struct of_device_id cm36651_of_match[] = {
+ { .compatible = "capella,cm36651" },
+ { }
+};
+
+static struct i2c_driver cm36651_driver = {
+ .driver = {
+ .name = "cm36651",
+ .of_match_table = of_match_ptr(cm36651_of_match),
+ .owner = THIS_MODULE,
+ },
+ .probe = cm36651_probe,
+ .remove = cm36651_remove,
+ .id_table = cm36651_id,
+};
+
+module_i2c_driver(cm36651_driver);
+
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_DESCRIPTION("CM36651 proximity/ambient light sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
new file mode 100644
index 00000000000..dc79835be30
--- /dev/null
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -0,0 +1,1654 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * IIO features supported by the driver:
+ *
+ * Read-only raw channels:
+ * - illiminance_clear [lux]
+ * - illiminance_ir
+ * - proximity
+ *
+ * Triggered buffer:
+ * - illiminance_clear
+ * - illiminance_ir
+ * - proximity
+ *
+ * Events:
+ * - illuminance_clear (rising and falling)
+ * - proximity (rising and falling)
+ * - both falling and rising thresholds for the proximity events
+ * must be set to the values greater than 0.
+ *
+ * The driver supports triggered buffers for all the three
+ * channels as well as high and low threshold events for the
+ * illuminance_clear and proxmimity channels. Triggers
+ * can be enabled simultaneously with both illuminance_clear
+ * events. Proximity events cannot be enabled simultaneously
+ * with any triggers or illuminance events. Enabling/disabling
+ * one of the proximity events automatically enables/disables
+ * the other one.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define GP2A_I2C_NAME "gp2ap020a00f"
+
+/* Registers */
+#define GP2AP020A00F_OP_REG 0x00 /* Basic operations */
+#define GP2AP020A00F_ALS_REG 0x01 /* ALS related settings */
+#define GP2AP020A00F_PS_REG 0x02 /* PS related settings */
+#define GP2AP020A00F_LED_REG 0x03 /* LED reg */
+#define GP2AP020A00F_TL_L_REG 0x04 /* ALS: Threshold low LSB */
+#define GP2AP020A00F_TL_H_REG 0x05 /* ALS: Threshold low MSB */
+#define GP2AP020A00F_TH_L_REG 0x06 /* ALS: Threshold high LSB */
+#define GP2AP020A00F_TH_H_REG 0x07 /* ALS: Threshold high MSB */
+#define GP2AP020A00F_PL_L_REG 0x08 /* PS: Threshold low LSB */
+#define GP2AP020A00F_PL_H_REG 0x09 /* PS: Threshold low MSB */
+#define GP2AP020A00F_PH_L_REG 0x0a /* PS: Threshold high LSB */
+#define GP2AP020A00F_PH_H_REG 0x0b /* PS: Threshold high MSB */
+#define GP2AP020A00F_D0_L_REG 0x0c /* ALS result: Clear/Illuminance LSB */
+#define GP2AP020A00F_D0_H_REG 0x0d /* ALS result: Clear/Illuminance MSB */
+#define GP2AP020A00F_D1_L_REG 0x0e /* ALS result: IR LSB */
+#define GP2AP020A00F_D1_H_REG 0x0f /* ALS result: IR LSB */
+#define GP2AP020A00F_D2_L_REG 0x10 /* PS result LSB */
+#define GP2AP020A00F_D2_H_REG 0x11 /* PS result MSB */
+#define GP2AP020A00F_NUM_REGS 0x12 /* Number of registers */
+
+/* OP_REG bits */
+#define GP2AP020A00F_OP3_MASK 0x80 /* Software shutdown */
+#define GP2AP020A00F_OP3_SHUTDOWN 0x00
+#define GP2AP020A00F_OP3_OPERATION 0x80
+#define GP2AP020A00F_OP2_MASK 0x40 /* Auto shutdown/Continuous mode */
+#define GP2AP020A00F_OP2_AUTO_SHUTDOWN 0x00
+#define GP2AP020A00F_OP2_CONT_OPERATION 0x40
+#define GP2AP020A00F_OP_MASK 0x30 /* Operating mode selection */
+#define GP2AP020A00F_OP_ALS_AND_PS 0x00
+#define GP2AP020A00F_OP_ALS 0x10
+#define GP2AP020A00F_OP_PS 0x20
+#define GP2AP020A00F_OP_DEBUG 0x30
+#define GP2AP020A00F_PROX_MASK 0x08 /* PS: detection/non-detection */
+#define GP2AP020A00F_PROX_NON_DETECT 0x00
+#define GP2AP020A00F_PROX_DETECT 0x08
+#define GP2AP020A00F_FLAG_P 0x04 /* PS: interrupt result */
+#define GP2AP020A00F_FLAG_A 0x02 /* ALS: interrupt result */
+#define GP2AP020A00F_TYPE_MASK 0x01 /* Output data type selection */
+#define GP2AP020A00F_TYPE_MANUAL_CALC 0x00
+#define GP2AP020A00F_TYPE_AUTO_CALC 0x01
+
+/* ALS_REG bits */
+#define GP2AP020A00F_PRST_MASK 0xc0 /* Number of measurement cycles */
+#define GP2AP020A00F_PRST_ONCE 0x00
+#define GP2AP020A00F_PRST_4_CYCLES 0x40
+#define GP2AP020A00F_PRST_8_CYCLES 0x80
+#define GP2AP020A00F_PRST_16_CYCLES 0xc0
+#define GP2AP020A00F_RES_A_MASK 0x38 /* ALS: Resolution */
+#define GP2AP020A00F_RES_A_800ms 0x00
+#define GP2AP020A00F_RES_A_400ms 0x08
+#define GP2AP020A00F_RES_A_200ms 0x10
+#define GP2AP020A00F_RES_A_100ms 0x18
+#define GP2AP020A00F_RES_A_25ms 0x20
+#define GP2AP020A00F_RES_A_6_25ms 0x28
+#define GP2AP020A00F_RES_A_1_56ms 0x30
+#define GP2AP020A00F_RES_A_0_39ms 0x38
+#define GP2AP020A00F_RANGE_A_MASK 0x07 /* ALS: Max measurable range */
+#define GP2AP020A00F_RANGE_A_x1 0x00
+#define GP2AP020A00F_RANGE_A_x2 0x01
+#define GP2AP020A00F_RANGE_A_x4 0x02
+#define GP2AP020A00F_RANGE_A_x8 0x03
+#define GP2AP020A00F_RANGE_A_x16 0x04
+#define GP2AP020A00F_RANGE_A_x32 0x05
+#define GP2AP020A00F_RANGE_A_x64 0x06
+#define GP2AP020A00F_RANGE_A_x128 0x07
+
+/* PS_REG bits */
+#define GP2AP020A00F_ALC_MASK 0x80 /* Auto light cancel */
+#define GP2AP020A00F_ALC_ON 0x80
+#define GP2AP020A00F_ALC_OFF 0x00
+#define GP2AP020A00F_INTTYPE_MASK 0x40 /* Interrupt type setting */
+#define GP2AP020A00F_INTTYPE_LEVEL 0x00
+#define GP2AP020A00F_INTTYPE_PULSE 0x40
+#define GP2AP020A00F_RES_P_MASK 0x38 /* PS: Resolution */
+#define GP2AP020A00F_RES_P_800ms_x2 0x00
+#define GP2AP020A00F_RES_P_400ms_x2 0x08
+#define GP2AP020A00F_RES_P_200ms_x2 0x10
+#define GP2AP020A00F_RES_P_100ms_x2 0x18
+#define GP2AP020A00F_RES_P_25ms_x2 0x20
+#define GP2AP020A00F_RES_P_6_25ms_x2 0x28
+#define GP2AP020A00F_RES_P_1_56ms_x2 0x30
+#define GP2AP020A00F_RES_P_0_39ms_x2 0x38
+#define GP2AP020A00F_RANGE_P_MASK 0x07 /* PS: Max measurable range */
+#define GP2AP020A00F_RANGE_P_x1 0x00
+#define GP2AP020A00F_RANGE_P_x2 0x01
+#define GP2AP020A00F_RANGE_P_x4 0x02
+#define GP2AP020A00F_RANGE_P_x8 0x03
+#define GP2AP020A00F_RANGE_P_x16 0x04
+#define GP2AP020A00F_RANGE_P_x32 0x05
+#define GP2AP020A00F_RANGE_P_x64 0x06
+#define GP2AP020A00F_RANGE_P_x128 0x07
+
+/* LED reg bits */
+#define GP2AP020A00F_INTVAL_MASK 0xc0 /* Intermittent operating */
+#define GP2AP020A00F_INTVAL_0 0x00
+#define GP2AP020A00F_INTVAL_4 0x40
+#define GP2AP020A00F_INTVAL_8 0x80
+#define GP2AP020A00F_INTVAL_16 0xc0
+#define GP2AP020A00F_IS_MASK 0x30 /* ILED drive peak current */
+#define GP2AP020A00F_IS_13_8mA 0x00
+#define GP2AP020A00F_IS_27_5mA 0x10
+#define GP2AP020A00F_IS_55mA 0x20
+#define GP2AP020A00F_IS_110mA 0x30
+#define GP2AP020A00F_PIN_MASK 0x0c /* INT terminal setting */
+#define GP2AP020A00F_PIN_ALS_OR_PS 0x00
+#define GP2AP020A00F_PIN_ALS 0x04
+#define GP2AP020A00F_PIN_PS 0x08
+#define GP2AP020A00F_PIN_PS_DETECT 0x0c
+#define GP2AP020A00F_FREQ_MASK 0x02 /* LED modulation frequency */
+#define GP2AP020A00F_FREQ_327_5kHz 0x00
+#define GP2AP020A00F_FREQ_81_8kHz 0x02
+#define GP2AP020A00F_RST 0x01 /* Software reset */
+
+#define GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR 0
+#define GP2AP020A00F_SCAN_MODE_LIGHT_IR 1
+#define GP2AP020A00F_SCAN_MODE_PROXIMITY 2
+#define GP2AP020A00F_CHAN_TIMESTAMP 3
+
+#define GP2AP020A00F_DATA_READY_TIMEOUT msecs_to_jiffies(1000)
+#define GP2AP020A00F_DATA_REG(chan) (GP2AP020A00F_D0_L_REG + \
+ (chan) * 2)
+#define GP2AP020A00F_THRESH_REG(th_val_id) (GP2AP020A00F_TL_L_REG + \
+ (th_val_id) * 2)
+#define GP2AP020A00F_THRESH_VAL_ID(reg_addr) ((reg_addr - 4) / 2)
+
+#define GP2AP020A00F_SUBTRACT_MODE 0
+#define GP2AP020A00F_ADD_MODE 1
+
+#define GP2AP020A00F_MAX_CHANNELS 3
+
+enum gp2ap020a00f_opmode {
+ GP2AP020A00F_OPMODE_READ_RAW_CLEAR,
+ GP2AP020A00F_OPMODE_READ_RAW_IR,
+ GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_OPMODE_PS,
+ GP2AP020A00F_OPMODE_ALS_AND_PS,
+ GP2AP020A00F_OPMODE_PROX_DETECT,
+ GP2AP020A00F_OPMODE_SHUTDOWN,
+ GP2AP020A00F_NUM_OPMODES,
+};
+
+enum gp2ap020a00f_cmd {
+ GP2AP020A00F_CMD_READ_RAW_CLEAR,
+ GP2AP020A00F_CMD_READ_RAW_IR,
+ GP2AP020A00F_CMD_READ_RAW_PROXIMITY,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_EN,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_DIS,
+ GP2AP020A00F_CMD_TRIGGER_IR_EN,
+ GP2AP020A00F_CMD_TRIGGER_IR_DIS,
+ GP2AP020A00F_CMD_TRIGGER_PROX_EN,
+ GP2AP020A00F_CMD_TRIGGER_PROX_DIS,
+ GP2AP020A00F_CMD_ALS_HIGH_EV_EN,
+ GP2AP020A00F_CMD_ALS_HIGH_EV_DIS,
+ GP2AP020A00F_CMD_ALS_LOW_EV_EN,
+ GP2AP020A00F_CMD_ALS_LOW_EV_DIS,
+ GP2AP020A00F_CMD_PROX_HIGH_EV_EN,
+ GP2AP020A00F_CMD_PROX_HIGH_EV_DIS,
+ GP2AP020A00F_CMD_PROX_LOW_EV_EN,
+ GP2AP020A00F_CMD_PROX_LOW_EV_DIS,
+};
+
+enum gp2ap020a00f_flags {
+ GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER,
+ GP2AP020A00F_FLAG_ALS_IR_TRIGGER,
+ GP2AP020A00F_FLAG_PROX_TRIGGER,
+ GP2AP020A00F_FLAG_PROX_RISING_EV,
+ GP2AP020A00F_FLAG_PROX_FALLING_EV,
+ GP2AP020A00F_FLAG_ALS_RISING_EV,
+ GP2AP020A00F_FLAG_ALS_FALLING_EV,
+ GP2AP020A00F_FLAG_LUX_MODE_HI,
+ GP2AP020A00F_FLAG_DATA_READY,
+};
+
+enum gp2ap020a00f_thresh_val_id {
+ GP2AP020A00F_THRESH_TL,
+ GP2AP020A00F_THRESH_TH,
+ GP2AP020A00F_THRESH_PL,
+ GP2AP020A00F_THRESH_PH,
+};
+
+struct gp2ap020a00f_data {
+ const struct gp2ap020a00f_platform_data *pdata;
+ struct i2c_client *client;
+ struct mutex lock;
+ char *buffer;
+ struct regulator *vled_reg;
+ unsigned long flags;
+ enum gp2ap020a00f_opmode cur_opmode;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ unsigned int thresh_val[4];
+ u8 debug_reg_addr;
+ struct irq_work work;
+ wait_queue_head_t data_ready_queue;
+};
+
+static const u8 gp2ap020a00f_reg_init_tab[] = {
+ [GP2AP020A00F_OP_REG] = GP2AP020A00F_OP3_SHUTDOWN,
+ [GP2AP020A00F_ALS_REG] = GP2AP020A00F_RES_A_25ms |
+ GP2AP020A00F_RANGE_A_x8,
+ [GP2AP020A00F_PS_REG] = GP2AP020A00F_ALC_ON |
+ GP2AP020A00F_RES_P_1_56ms_x2 |
+ GP2AP020A00F_RANGE_P_x4,
+ [GP2AP020A00F_LED_REG] = GP2AP020A00F_INTVAL_0 |
+ GP2AP020A00F_IS_110mA |
+ GP2AP020A00F_FREQ_327_5kHz,
+ [GP2AP020A00F_TL_L_REG] = 0,
+ [GP2AP020A00F_TL_H_REG] = 0,
+ [GP2AP020A00F_TH_L_REG] = 0,
+ [GP2AP020A00F_TH_H_REG] = 0,
+ [GP2AP020A00F_PL_L_REG] = 0,
+ [GP2AP020A00F_PL_H_REG] = 0,
+ [GP2AP020A00F_PH_L_REG] = 0,
+ [GP2AP020A00F_PH_H_REG] = 0,
+};
+
+static bool gp2ap020a00f_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case GP2AP020A00F_OP_REG:
+ case GP2AP020A00F_D0_L_REG:
+ case GP2AP020A00F_D0_H_REG:
+ case GP2AP020A00F_D1_L_REG:
+ case GP2AP020A00F_D1_H_REG:
+ case GP2AP020A00F_D2_L_REG:
+ case GP2AP020A00F_D2_H_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config gp2ap020a00f_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = GP2AP020A00F_D2_H_REG,
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = gp2ap020a00f_is_volatile_reg,
+};
+
+static const struct gp2ap020a00f_mutable_config_regs {
+ u8 op_reg;
+ u8 als_reg;
+ u8 ps_reg;
+ u8 led_reg;
+} opmode_regs_settings[GP2AP020A00F_NUM_OPMODES] = {
+ [GP2AP020A00F_OPMODE_READ_RAW_CLEAR] = {
+ GP2AP020A00F_OP_ALS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_AUTO_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS
+ },
+ [GP2AP020A00F_OPMODE_READ_RAW_IR] = {
+ GP2AP020A00F_OP_ALS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS
+ },
+ [GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY] = {
+ GP2AP020A00F_OP_PS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_PS
+ },
+ [GP2AP020A00F_OPMODE_PROX_DETECT] = {
+ GP2AP020A00F_OP_PS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_4_CYCLES,
+ GP2AP020A00F_INTTYPE_PULSE,
+ GP2AP020A00F_PIN_PS_DETECT
+ },
+ [GP2AP020A00F_OPMODE_ALS] = {
+ GP2AP020A00F_OP_ALS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_AUTO_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS
+ },
+ [GP2AP020A00F_OPMODE_PS] = {
+ GP2AP020A00F_OP_PS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_4_CYCLES,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_PS
+ },
+ [GP2AP020A00F_OPMODE_ALS_AND_PS] = {
+ GP2AP020A00F_OP_ALS_AND_PS
+ | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_AUTO_CALC,
+ GP2AP020A00F_PRST_4_CYCLES,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS_OR_PS
+ },
+ [GP2AP020A00F_OPMODE_SHUTDOWN] = { GP2AP020A00F_OP3_SHUTDOWN, },
+};
+
+static int gp2ap020a00f_set_operation_mode(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_opmode op)
+{
+ unsigned int op_reg_val;
+ int err;
+
+ if (op != GP2AP020A00F_OPMODE_SHUTDOWN) {
+ err = regmap_read(data->regmap, GP2AP020A00F_OP_REG,
+ &op_reg_val);
+ if (err < 0)
+ return err;
+ /*
+ * Shutdown the device if the operation being executed entails
+ * mode transition.
+ */
+ if ((opmode_regs_settings[op].op_reg & GP2AP020A00F_OP_MASK) !=
+ (op_reg_val & GP2AP020A00F_OP_MASK)) {
+ /* set shutdown mode */
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG, GP2AP020A00F_OP3_MASK,
+ GP2AP020A00F_OP3_SHUTDOWN);
+ if (err < 0)
+ return err;
+ }
+
+ err = regmap_update_bits(data->regmap, GP2AP020A00F_ALS_REG,
+ GP2AP020A00F_PRST_MASK, opmode_regs_settings[op]
+ .als_reg);
+ if (err < 0)
+ return err;
+
+ err = regmap_update_bits(data->regmap, GP2AP020A00F_PS_REG,
+ GP2AP020A00F_INTTYPE_MASK, opmode_regs_settings[op]
+ .ps_reg);
+ if (err < 0)
+ return err;
+
+ err = regmap_update_bits(data->regmap, GP2AP020A00F_LED_REG,
+ GP2AP020A00F_PIN_MASK, opmode_regs_settings[op]
+ .led_reg);
+ if (err < 0)
+ return err;
+ }
+
+ /* Set OP_REG and apply operation mode (power on / off) */
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG,
+ GP2AP020A00F_OP_MASK | GP2AP020A00F_OP2_MASK |
+ GP2AP020A00F_OP3_MASK | GP2AP020A00F_TYPE_MASK,
+ opmode_regs_settings[op].op_reg);
+ if (err < 0)
+ return err;
+
+ data->cur_opmode = op;
+
+ return 0;
+}
+
+static bool gp2ap020a00f_als_enabled(struct gp2ap020a00f_data *data)
+{
+ return test_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags);
+}
+
+static bool gp2ap020a00f_prox_detect_enabled(struct gp2ap020a00f_data *data)
+{
+ return test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags);
+}
+
+static int gp2ap020a00f_write_event_threshold(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_thresh_val_id th_val_id,
+ bool enable)
+{
+ __le16 thresh_buf = 0;
+ unsigned int thresh_reg_val;
+
+ if (!enable)
+ thresh_reg_val = 0;
+ else if (test_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags) &&
+ th_val_id != GP2AP020A00F_THRESH_PL &&
+ th_val_id != GP2AP020A00F_THRESH_PH)
+ /*
+ * For the high lux mode ALS threshold has to be scaled down
+ * to allow for proper comparison with the output value.
+ */
+ thresh_reg_val = data->thresh_val[th_val_id] / 16;
+ else
+ thresh_reg_val = data->thresh_val[th_val_id] > 16000 ?
+ 16000 :
+ data->thresh_val[th_val_id];
+
+ thresh_buf = cpu_to_le16(thresh_reg_val);
+
+ return regmap_bulk_write(data->regmap,
+ GP2AP020A00F_THRESH_REG(th_val_id),
+ (u8 *)&thresh_buf, 2);
+}
+
+static int gp2ap020a00f_alter_opmode(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_opmode diff_mode, int add_sub)
+{
+ enum gp2ap020a00f_opmode new_mode;
+
+ if (diff_mode != GP2AP020A00F_OPMODE_ALS &&
+ diff_mode != GP2AP020A00F_OPMODE_PS)
+ return -EINVAL;
+
+ if (add_sub == GP2AP020A00F_ADD_MODE) {
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_SHUTDOWN)
+ new_mode = diff_mode;
+ else
+ new_mode = GP2AP020A00F_OPMODE_ALS_AND_PS;
+ } else {
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_ALS_AND_PS)
+ new_mode = (diff_mode == GP2AP020A00F_OPMODE_ALS) ?
+ GP2AP020A00F_OPMODE_PS :
+ GP2AP020A00F_OPMODE_ALS;
+ else
+ new_mode = GP2AP020A00F_OPMODE_SHUTDOWN;
+ }
+
+ return gp2ap020a00f_set_operation_mode(data, new_mode);
+}
+
+static int gp2ap020a00f_exec_cmd(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_cmd cmd)
+{
+ int err = 0;
+
+ switch (cmd) {
+ case GP2AP020A00F_CMD_READ_RAW_CLEAR:
+ if (data->cur_opmode != GP2AP020A00F_OPMODE_SHUTDOWN)
+ return -EBUSY;
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_READ_RAW_CLEAR);
+ break;
+ case GP2AP020A00F_CMD_READ_RAW_IR:
+ if (data->cur_opmode != GP2AP020A00F_OPMODE_SHUTDOWN)
+ return -EBUSY;
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_READ_RAW_IR);
+ break;
+ case GP2AP020A00F_CMD_READ_RAW_PROXIMITY:
+ if (data->cur_opmode != GP2AP020A00F_OPMODE_SHUTDOWN)
+ return -EBUSY;
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_CLEAR_EN:
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data))
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ set_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &data->flags);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_CLEAR_DIS:
+ clear_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &data->flags);
+ if (gp2ap020a00f_als_enabled(data))
+ break;
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_IR_EN:
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data))
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ set_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &data->flags);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_IR_DIS:
+ clear_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &data->flags);
+ if (gp2ap020a00f_als_enabled(data))
+ break;
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_PROX_EN:
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_PS,
+ GP2AP020A00F_ADD_MODE);
+ set_bit(GP2AP020A00F_FLAG_PROX_TRIGGER, &data->flags);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_PROX_DIS:
+ clear_bit(GP2AP020A00F_FLAG_PROX_TRIGGER, &data->flags);
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_PS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ break;
+ case GP2AP020A00F_CMD_ALS_HIGH_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags))
+ return 0;
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, true);
+ break;
+ case GP2AP020A00F_CMD_ALS_HIGH_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags);
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ if (err < 0)
+ return err;
+ }
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, false);
+ break;
+ case GP2AP020A00F_CMD_ALS_LOW_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags))
+ return 0;
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, true);
+ break;
+ case GP2AP020A00F_CMD_ALS_LOW_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags);
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ if (err < 0)
+ return err;
+ }
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, false);
+ break;
+ case GP2AP020A00F_CMD_PROX_HIGH_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags))
+ return 0;
+ if (gp2ap020a00f_als_enabled(data) ||
+ data->cur_opmode == GP2AP020A00F_OPMODE_PS)
+ return -EBUSY;
+ if (!gp2ap020a00f_prox_detect_enabled(data)) {
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_PROX_DETECT);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PH, true);
+ break;
+ case GP2AP020A00F_CMD_PROX_HIGH_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags);
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ return err;
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PH, false);
+ break;
+ case GP2AP020A00F_CMD_PROX_LOW_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags))
+ return 0;
+ if (gp2ap020a00f_als_enabled(data) ||
+ data->cur_opmode == GP2AP020A00F_OPMODE_PS)
+ return -EBUSY;
+ if (!gp2ap020a00f_prox_detect_enabled(data)) {
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_PROX_DETECT);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PL, true);
+ break;
+ case GP2AP020A00F_CMD_PROX_LOW_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags);
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ return err;
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PL, false);
+ break;
+ }
+
+ return err;
+}
+
+static int wait_conversion_complete_irq(struct gp2ap020a00f_data *data)
+{
+ int ret;
+
+ ret = wait_event_timeout(data->data_ready_queue,
+ test_bit(GP2AP020A00F_FLAG_DATA_READY,
+ &data->flags),
+ GP2AP020A00F_DATA_READY_TIMEOUT);
+ clear_bit(GP2AP020A00F_FLAG_DATA_READY, &data->flags);
+
+ return ret > 0 ? 0 : -ETIME;
+}
+
+static int gp2ap020a00f_read_output(struct gp2ap020a00f_data *data,
+ unsigned int output_reg, int *val)
+{
+ u8 reg_buf[2];
+ int err;
+
+ err = wait_conversion_complete_irq(data);
+ if (err < 0)
+ dev_dbg(&data->client->dev, "data ready timeout\n");
+
+ err = regmap_bulk_read(data->regmap, output_reg, reg_buf, 2);
+ if (err < 0)
+ return err;
+
+ *val = le16_to_cpup((__le16 *)reg_buf);
+
+ return err;
+}
+
+static bool gp2ap020a00f_adjust_lux_mode(struct gp2ap020a00f_data *data,
+ int output_val)
+{
+ u8 new_range = 0xff;
+ int err;
+
+ if (!test_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags)) {
+ if (output_val > 16000) {
+ set_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags);
+ new_range = GP2AP020A00F_RANGE_A_x128;
+ }
+ } else {
+ if (output_val < 1000) {
+ clear_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags);
+ new_range = GP2AP020A00F_RANGE_A_x8;
+ }
+ }
+
+ if (new_range != 0xff) {
+ /* Clear als threshold registers to avoid spurious
+ * events caused by lux mode transition.
+ */
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, false);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Clearing als threshold register failed.\n");
+ return false;
+ }
+
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, false);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Clearing als threshold register failed.\n");
+ return false;
+ }
+
+ /* Change lux mode */
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG,
+ GP2AP020A00F_OP3_MASK,
+ GP2AP020A00F_OP3_SHUTDOWN);
+
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Shutting down the device failed.\n");
+ return false;
+ }
+
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_ALS_REG,
+ GP2AP020A00F_RANGE_A_MASK,
+ new_range);
+
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Adjusting device lux mode failed.\n");
+ return false;
+ }
+
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG,
+ GP2AP020A00F_OP3_MASK,
+ GP2AP020A00F_OP3_OPERATION);
+
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Powering up the device failed.\n");
+ return false;
+ }
+
+ /* Adjust als threshold register values to the new lux mode */
+ if (test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags)) {
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, true);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Adjusting als threshold value failed.\n");
+ return false;
+ }
+ }
+
+ if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags)) {
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, true);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Adjusting als threshold value failed.\n");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static void gp2ap020a00f_output_to_lux(struct gp2ap020a00f_data *data,
+ int *output_val)
+{
+ if (test_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags))
+ *output_val *= 16;
+}
+
+static void gp2ap020a00f_iio_trigger_work(struct irq_work *work)
+{
+ struct gp2ap020a00f_data *data =
+ container_of(work, struct gp2ap020a00f_data, work);
+
+ iio_trigger_poll(data->trig, 0);
+}
+
+static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct gp2ap020a00f_data *priv = iio_priv(indio_dev);
+ unsigned int op_reg_val;
+ int ret;
+
+ /* Read interrupt flags */
+ ret = regmap_read(priv->regmap, GP2AP020A00F_OP_REG, &op_reg_val);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (gp2ap020a00f_prox_detect_enabled(priv)) {
+ if (op_reg_val & GP2AP020A00F_PROX_DETECT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ GP2AP020A00F_SCAN_MODE_PROXIMITY,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ } else {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ GP2AP020A00F_SCAN_MODE_PROXIMITY,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct gp2ap020a00f_data *priv = iio_priv(indio_dev);
+ u8 op_reg_flags, d0_reg_buf[2];
+ unsigned int output_val, op_reg_val;
+ int thresh_val_id, ret;
+
+ /* Read interrupt flags */
+ ret = regmap_read(priv->regmap, GP2AP020A00F_OP_REG,
+ &op_reg_val);
+ if (ret < 0)
+ goto done;
+
+ op_reg_flags = op_reg_val & (GP2AP020A00F_FLAG_A | GP2AP020A00F_FLAG_P
+ | GP2AP020A00F_PROX_DETECT);
+
+ op_reg_val &= (~GP2AP020A00F_FLAG_A & ~GP2AP020A00F_FLAG_P
+ & ~GP2AP020A00F_PROX_DETECT);
+
+ /* Clear interrupt flags (if not in INTTYPE_PULSE mode) */
+ if (priv->cur_opmode != GP2AP020A00F_OPMODE_PROX_DETECT) {
+ ret = regmap_write(priv->regmap, GP2AP020A00F_OP_REG,
+ op_reg_val);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (op_reg_flags & GP2AP020A00F_FLAG_A) {
+ /* Check D0 register to assess if the lux mode
+ * transition is required.
+ */
+ ret = regmap_bulk_read(priv->regmap, GP2AP020A00F_D0_L_REG,
+ d0_reg_buf, 2);
+ if (ret < 0)
+ goto done;
+
+ output_val = le16_to_cpup((__le16 *)d0_reg_buf);
+
+ if (gp2ap020a00f_adjust_lux_mode(priv, output_val))
+ goto done;
+
+ gp2ap020a00f_output_to_lux(priv, &output_val);
+
+ /*
+ * We need to check output value to distinguish
+ * between high and low ambient light threshold event.
+ */
+ if (test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &priv->flags)) {
+ thresh_val_id =
+ GP2AP020A00F_THRESH_VAL_ID(GP2AP020A00F_TH_L_REG);
+ if (output_val > priv->thresh_val[thresh_val_id])
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(
+ IIO_LIGHT,
+ GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR,
+ IIO_MOD_LIGHT_CLEAR,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &priv->flags)) {
+ thresh_val_id =
+ GP2AP020A00F_THRESH_VAL_ID(GP2AP020A00F_TL_L_REG);
+ if (output_val < priv->thresh_val[thresh_val_id])
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(
+ IIO_LIGHT,
+ GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR,
+ IIO_MOD_LIGHT_CLEAR,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+ }
+
+ if (priv->cur_opmode == GP2AP020A00F_OPMODE_READ_RAW_CLEAR ||
+ priv->cur_opmode == GP2AP020A00F_OPMODE_READ_RAW_IR ||
+ priv->cur_opmode == GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY) {
+ set_bit(GP2AP020A00F_FLAG_DATA_READY, &priv->flags);
+ wake_up(&priv->data_ready_queue);
+ goto done;
+ }
+
+ if (test_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &priv->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &priv->flags) ||
+ test_bit(GP2AP020A00F_FLAG_PROX_TRIGGER, &priv->flags))
+ /* This fires off the trigger. */
+ irq_work_queue(&priv->work);
+
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gp2ap020a00f_trigger_handler(int irq, void *data)
+{
+ struct iio_poll_func *pf = data;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct gp2ap020a00f_data *priv = iio_priv(indio_dev);
+ size_t d_size = 0;
+ __le32 light_lux;
+ int i, out_val, ret;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = regmap_bulk_read(priv->regmap,
+ GP2AP020A00F_DATA_REG(i),
+ &priv->buffer[d_size], 2);
+ if (ret < 0)
+ goto done;
+
+ if (i == GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR ||
+ i == GP2AP020A00F_SCAN_MODE_LIGHT_IR) {
+ out_val = le16_to_cpup((__le16 *)&priv->buffer[d_size]);
+ gp2ap020a00f_output_to_lux(priv, &out_val);
+ light_lux = cpu_to_le32(out_val);
+ memcpy(&priv->buffer[d_size], (u8 *)&light_lux, 4);
+ d_size += 4;
+ } else {
+ d_size += 2;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, priv->buffer,
+ pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static u8 gp2ap020a00f_get_thresh_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction event_dir)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (event_dir == IIO_EV_DIR_RISING)
+ return GP2AP020A00F_PH_L_REG;
+ else
+ return GP2AP020A00F_PL_L_REG;
+ case IIO_LIGHT:
+ if (event_dir == IIO_EV_DIR_RISING)
+ return GP2AP020A00F_TH_L_REG;
+ else
+ return GP2AP020A00F_TL_L_REG;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int gp2ap020a00f_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ bool event_en = false;
+ u8 thresh_val_id;
+ u8 thresh_reg_l;
+ int err = 0;
+
+ mutex_lock(&data->lock);
+
+ thresh_reg_l = gp2ap020a00f_get_thresh_reg(chan, dir);
+ thresh_val_id = GP2AP020A00F_THRESH_VAL_ID(thresh_reg_l);
+
+ if (thresh_val_id > GP2AP020A00F_THRESH_PH) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+
+ switch (thresh_reg_l) {
+ case GP2AP020A00F_TH_L_REG:
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV,
+ &data->flags);
+ break;
+ case GP2AP020A00F_TL_L_REG:
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV,
+ &data->flags);
+ break;
+ case GP2AP020A00F_PH_L_REG:
+ if (val == 0) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV,
+ &data->flags);
+ break;
+ case GP2AP020A00F_PL_L_REG:
+ if (val == 0) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV,
+ &data->flags);
+ break;
+ }
+
+ data->thresh_val[thresh_val_id] = val;
+ err = gp2ap020a00f_write_event_threshold(data, thresh_val_id,
+ event_en);
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ u8 thresh_reg_l;
+ int err = IIO_VAL_INT;
+
+ mutex_lock(&data->lock);
+
+ thresh_reg_l = gp2ap020a00f_get_thresh_reg(chan, dir);
+
+ if (thresh_reg_l > GP2AP020A00F_PH_L_REG) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+
+ *val = data->thresh_val[GP2AP020A00F_THRESH_VAL_ID(thresh_reg_l)];
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_write_prox_event_config(struct iio_dev *indio_dev,
+ int state)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ enum gp2ap020a00f_cmd cmd_high_ev, cmd_low_ev;
+ int err;
+
+ cmd_high_ev = state ? GP2AP020A00F_CMD_PROX_HIGH_EV_EN :
+ GP2AP020A00F_CMD_PROX_HIGH_EV_DIS;
+ cmd_low_ev = state ? GP2AP020A00F_CMD_PROX_LOW_EV_EN :
+ GP2AP020A00F_CMD_PROX_LOW_EV_DIS;
+
+ /*
+ * In order to enable proximity detection feature in the device
+ * both high and low threshold registers have to be written
+ * with different values, greater than zero.
+ */
+ if (state) {
+ if (data->thresh_val[GP2AP020A00F_THRESH_PL] == 0)
+ return -EINVAL;
+
+ if (data->thresh_val[GP2AP020A00F_THRESH_PH] == 0)
+ return -EINVAL;
+ }
+
+ err = gp2ap020a00f_exec_cmd(data, cmd_high_ev);
+ if (err < 0)
+ return err;
+
+ err = gp2ap020a00f_exec_cmd(data, cmd_low_ev);
+ if (err < 0)
+ return err;
+
+ free_irq(data->client->irq, indio_dev);
+
+ if (state)
+ err = request_threaded_irq(data->client->irq, NULL,
+ &gp2ap020a00f_prox_sensing_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gp2ap020a00f_prox_sensing",
+ indio_dev);
+ else {
+ err = request_threaded_irq(data->client->irq, NULL,
+ &gp2ap020a00f_thresh_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gp2ap020a00f_thresh_event",
+ indio_dev);
+ }
+
+ return err;
+}
+
+static int gp2ap020a00f_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ enum gp2ap020a00f_cmd cmd;
+ int err;
+
+ mutex_lock(&data->lock);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ err = gp2ap020a00f_write_prox_event_config(indio_dev, state);
+ break;
+ case IIO_LIGHT:
+ if (dir == IIO_EV_DIR_RISING) {
+ cmd = state ? GP2AP020A00F_CMD_ALS_HIGH_EV_EN :
+ GP2AP020A00F_CMD_ALS_HIGH_EV_DIS;
+ err = gp2ap020a00f_exec_cmd(data, cmd);
+ } else {
+ cmd = state ? GP2AP020A00F_CMD_ALS_LOW_EV_EN :
+ GP2AP020A00F_CMD_ALS_LOW_EV_DIS;
+ err = gp2ap020a00f_exec_cmd(data, cmd);
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int event_en = 0;
+
+ mutex_lock(&data->lock);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV,
+ &data->flags);
+ else
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV,
+ &data->flags);
+ break;
+ case IIO_LIGHT:
+ if (dir == IIO_EV_DIR_RISING)
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV,
+ &data->flags);
+ else
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV,
+ &data->flags);
+ break;
+ default:
+ event_en = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return event_en;
+}
+
+static int gp2ap020a00f_read_channel(struct gp2ap020a00f_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ enum gp2ap020a00f_cmd cmd;
+ int err;
+
+ switch (chan->scan_index) {
+ case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
+ cmd = GP2AP020A00F_CMD_READ_RAW_CLEAR;
+ break;
+ case GP2AP020A00F_SCAN_MODE_LIGHT_IR:
+ cmd = GP2AP020A00F_CMD_READ_RAW_IR;
+ break;
+ case GP2AP020A00F_SCAN_MODE_PROXIMITY:
+ cmd = GP2AP020A00F_CMD_READ_RAW_PROXIMITY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = gp2ap020a00f_exec_cmd(data, cmd);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "gp2ap020a00f_exec_cmd failed\n");
+ goto error_ret;
+ }
+
+ err = gp2ap020a00f_read_output(data, chan->address, val);
+ if (err < 0)
+ dev_err(&data->client->dev,
+ "gp2ap020a00f_read_output failed\n");
+
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ dev_err(&data->client->dev,
+ "Failed to shut down the device.\n");
+
+ if (cmd == GP2AP020A00F_CMD_READ_RAW_CLEAR ||
+ cmd == GP2AP020A00F_CMD_READ_RAW_IR)
+ gp2ap020a00f_output_to_lux(data, val);
+
+error_ret:
+ return err;
+}
+
+static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int err = -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev)) {
+ err = -EBUSY;
+ goto error_unlock;
+ }
+
+ err = gp2ap020a00f_read_channel(data, chan, val);
+ break;
+ }
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err < 0 ? err : IIO_VAL_INT;
+}
+
+static const struct iio_event_spec gp2ap020a00f_event_spec_light[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec gp2ap020a00f_event_spec_prox[] = {
+ {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec gp2ap020a00f_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .shift = 0,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .scan_index = GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR,
+ .address = GP2AP020A00F_D0_L_REG,
+ .event_spec = gp2ap020a00f_event_spec_light,
+ .num_event_specs = ARRAY_SIZE(gp2ap020a00f_event_spec_light),
+ },
+ {
+ .type = IIO_LIGHT,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .shift = 0,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .scan_index = GP2AP020A00F_SCAN_MODE_LIGHT_IR,
+ .address = GP2AP020A00F_D1_L_REG,
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .modified = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .shift = 0,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .scan_index = GP2AP020A00F_SCAN_MODE_PROXIMITY,
+ .address = GP2AP020A00F_D2_L_REG,
+ .event_spec = gp2ap020a00f_event_spec_prox,
+ .num_event_specs = ARRAY_SIZE(gp2ap020a00f_event_spec_prox),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(GP2AP020A00F_CHAN_TIMESTAMP),
+};
+
+static const struct iio_info gp2ap020a00f_info = {
+ .read_raw = &gp2ap020a00f_read_raw,
+ .read_event_value_new = &gp2ap020a00f_read_event_val,
+ .read_event_config_new = &gp2ap020a00f_read_event_config,
+ .write_event_value_new = &gp2ap020a00f_write_event_val,
+ .write_event_config_new = &gp2ap020a00f_write_event_config,
+ .driver_module = THIS_MODULE,
+};
+
+static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int i, err = 0;
+
+ mutex_lock(&data->lock);
+
+ /*
+ * Enable triggers according to the scan_mask. Enabling either
+ * LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS
+ * module in the device, which generates samples in both D0 (clear)
+ * and D1 (ir) registers. As the two registers are bound to the
+ * two separate IIO channels they are treated in the driver logic
+ * as if they were controlled independently.
+ */
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ switch (i) {
+ case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_EN);
+ break;
+ case GP2AP020A00F_SCAN_MODE_LIGHT_IR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_IR_EN);
+ break;
+ case GP2AP020A00F_SCAN_MODE_PROXIMITY:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_PROX_EN);
+ break;
+ }
+ }
+
+ if (err < 0)
+ goto error_unlock;
+
+ data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (!data->buffer) {
+ err = -ENOMEM;
+ goto error_unlock;
+ }
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int i, err;
+
+ mutex_lock(&data->lock);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto error_unlock;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ switch (i) {
+ case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_DIS);
+ break;
+ case GP2AP020A00F_SCAN_MODE_LIGHT_IR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_IR_DIS);
+ break;
+ case GP2AP020A00F_SCAN_MODE_PROXIMITY:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_PROX_DIS);
+ break;
+ }
+ }
+
+ if (err == 0)
+ kfree(data->buffer);
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = {
+ .postenable = &gp2ap020a00f_buffer_postenable,
+ .predisable = &gp2ap020a00f_buffer_predisable,
+};
+
+static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static int gp2ap020a00f_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct gp2ap020a00f_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+
+ data->vled_reg = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(data->vled_reg))
+ return PTR_ERR(data->vled_reg);
+
+ err = regulator_enable(data->vled_reg);
+ if (err)
+ return err;
+
+ regmap = devm_regmap_init_i2c(client, &gp2ap020a00f_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Regmap initialization failed.\n");
+ err = PTR_ERR(regmap);
+ goto error_regulator_disable;
+ }
+
+ /* Initialize device registers */
+ err = regmap_bulk_write(regmap, GP2AP020A00F_OP_REG,
+ gp2ap020a00f_reg_init_tab,
+ ARRAY_SIZE(gp2ap020a00f_reg_init_tab));
+
+ if (err < 0) {
+ dev_err(&client->dev, "Device initialization failed.\n");
+ goto error_regulator_disable;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data->client = client;
+ data->cur_opmode = GP2AP020A00F_OPMODE_SHUTDOWN;
+ data->regmap = regmap;
+ init_waitqueue_head(&data->data_ready_queue);
+
+ mutex_init(&data->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = gp2ap020a00f_channels;
+ indio_dev->num_channels = ARRAY_SIZE(gp2ap020a00f_channels);
+ indio_dev->info = &gp2ap020a00f_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Allocate buffer */
+ err = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &gp2ap020a00f_trigger_handler, &gp2ap020a00f_buffer_setup_ops);
+ if (err < 0)
+ goto error_regulator_disable;
+
+ /* Allocate trigger */
+ data->trig = devm_iio_trigger_alloc(&client->dev, "%s-trigger",
+ indio_dev->name);
+ if (data->trig == NULL) {
+ err = -ENOMEM;
+ dev_err(&indio_dev->dev, "Failed to allocate iio trigger.\n");
+ goto error_uninit_buffer;
+ }
+
+ /* This needs to be requested here for read_raw calls to work. */
+ err = request_threaded_irq(client->irq, NULL,
+ &gp2ap020a00f_thresh_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gp2ap020a00f_als_event",
+ indio_dev);
+ if (err < 0) {
+ dev_err(&client->dev, "Irq request failed.\n");
+ goto error_uninit_buffer;
+ }
+
+ data->trig->ops = &gp2ap020a00f_trigger_ops;
+ data->trig->dev.parent = &data->client->dev;
+
+ init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work);
+
+ err = iio_trigger_register(data->trig);
+ if (err < 0) {
+ dev_err(&client->dev, "Failed to register iio trigger.\n");
+ goto error_free_irq;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err < 0)
+ goto error_trigger_unregister;
+
+ return 0;
+
+error_trigger_unregister:
+ iio_trigger_unregister(data->trig);
+error_free_irq:
+ free_irq(client->irq, indio_dev);
+error_uninit_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_regulator_disable:
+ regulator_disable(data->vled_reg);
+
+ return err;
+}
+
+static int gp2ap020a00f_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int err;
+
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ dev_err(&indio_dev->dev, "Failed to power off the device.\n");
+
+ iio_device_unregister(indio_dev);
+ iio_trigger_unregister(data->trig);
+ free_irq(client->irq, indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(data->vled_reg);
+
+ return 0;
+}
+
+static const struct i2c_device_id gp2ap020a00f_id[] = {
+ { GP2A_I2C_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, gp2ap020a00f_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id gp2ap020a00f_of_match[] = {
+ { .compatible = "sharp,gp2ap020a00f" },
+ { }
+};
+#endif
+
+static struct i2c_driver gp2ap020a00f_driver = {
+ .driver = {
+ .name = GP2A_I2C_NAME,
+ .of_match_table = of_match_ptr(gp2ap020a00f_of_match),
+ .owner = THIS_MODULE,
+ },
+ .probe = gp2ap020a00f_probe,
+ .remove = gp2ap020a00f_remove,
+ .id_table = gp2ap020a00f_id,
+};
+
+module_i2c_driver(gp2ap020a00f_driver);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("Sharp GP2AP020A00F Proximity/ALS sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index e59d00c3139..fa6ae8cf89e 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -161,10 +161,11 @@ static const struct iio_info als_info = {
};
/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
{
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
+ iio_push_to_buffers(indio_dev, data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -179,7 +180,7 @@ static int als_proc_event(struct hid_sensor_hub_device *hsdev,
als_state->common_attributes.data_ready);
if (als_state->common_attributes.data_ready)
hid_sensor_push_data(indio_dev,
- (u8 *)&als_state->illum,
+ &als_state->illum,
sizeof(als_state->illum));
return 0;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
new file mode 100644
index 00000000000..45df2204614
--- /dev/null
+++ b/drivers/iio/light/tcs3472.c
@@ -0,0 +1,367 @@
+/*
+ * tcs3472.c - Support for TAOS TCS3472 color light-to-digital converter
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Color light sensor with 16-bit channels for red, green, blue, clear);
+ * 7-bit I2C slave address 0x39 (TCS34721, TCS34723) or 0x29 (TCS34725,
+ * TCS34727)
+ *
+ * TODO: interrupt support, thresholds, wait time
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define TCS3472_DRV_NAME "tcs3472"
+
+#define TCS3472_COMMAND BIT(7)
+#define TCS3472_AUTO_INCR BIT(5)
+
+#define TCS3472_ENABLE (TCS3472_COMMAND | 0x00)
+#define TCS3472_ATIME (TCS3472_COMMAND | 0x01)
+#define TCS3472_WTIME (TCS3472_COMMAND | 0x03)
+#define TCS3472_AILT (TCS3472_COMMAND | 0x04)
+#define TCS3472_AIHT (TCS3472_COMMAND | 0x06)
+#define TCS3472_PERS (TCS3472_COMMAND | 0x0c)
+#define TCS3472_CONFIG (TCS3472_COMMAND | 0x0d)
+#define TCS3472_CONTROL (TCS3472_COMMAND | 0x0f)
+#define TCS3472_ID (TCS3472_COMMAND | 0x12)
+#define TCS3472_STATUS (TCS3472_COMMAND | 0x13)
+#define TCS3472_CDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x14)
+#define TCS3472_RDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x16)
+#define TCS3472_GDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x18)
+#define TCS3472_BDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x1a)
+
+#define TCS3472_STATUS_AVALID BIT(0)
+#define TCS3472_ENABLE_AEN BIT(1)
+#define TCS3472_ENABLE_PON BIT(0)
+#define TCS3472_CONTROL_AGAIN_MASK (BIT(0) | BIT(1))
+
+struct tcs3472_data {
+ struct i2c_client *client;
+ u8 enable;
+ u8 control;
+ u8 atime;
+ u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
+};
+
+#define TCS3472_CHANNEL(_color, _si, _addr) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+ .address = _addr, \
+ .scan_index = _si, \
+ .scan_type = IIO_ST('u', 16, 16, 0), \
+}
+
+static const int tcs3472_agains[] = { 1, 4, 16, 60 };
+
+static const struct iio_chan_spec tcs3472_channels[] = {
+ TCS3472_CHANNEL(CLEAR, 0, TCS3472_CDATA),
+ TCS3472_CHANNEL(RED, 1, TCS3472_RDATA),
+ TCS3472_CHANNEL(GREEN, 2, TCS3472_GDATA),
+ TCS3472_CHANNEL(BLUE, 3, TCS3472_BDATA),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int tcs3472_req_data(struct tcs3472_data *data)
+{
+ int tries = 50;
+ int ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_STATUS);
+ if (ret < 0)
+ return ret;
+ if (ret & TCS3472_STATUS_AVALID)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tcs3472_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = tcs3472_req_data(data);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = tcs3472_agains[data->control &
+ TCS3472_CONTROL_AGAIN_MASK];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = (256 - data->atime) * 2400;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int tcs3472_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val2 != 0)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(tcs3472_agains); i++) {
+ if (val == tcs3472_agains[i]) {
+ data->control &= ~TCS3472_CONTROL_AGAIN_MASK;
+ data->control |= i;
+ return i2c_smbus_write_byte_data(
+ data->client, TCS3472_CONTROL,
+ data->control);
+ }
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ for (i = 0; i < 256; i++) {
+ if (val2 == (256 - i) * 2400) {
+ data->atime = i;
+ return i2c_smbus_write_word_data(
+ data->client, TCS3472_ATIME,
+ data->atime);
+ }
+
+ }
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int len = 0;
+ int i, j = 0;
+
+ int ret = tcs3472_req_data(data);
+ if (ret < 0)
+ goto done;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = i2c_smbus_read_word_data(data->client,
+ TCS3472_CDATA + 2*i);
+ if (ret < 0)
+ goto done;
+
+ data->buffer[j++] = ret;
+ len += 2;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t tcs3472_show_int_time_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 1; i <= 256; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06d ",
+ 2400 * i);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_CONST_ATTR(calibscale_available, "1 4 16 60");
+static IIO_DEV_ATTR_INT_TIME_AVAIL(tcs3472_show_int_time_available);
+
+static struct attribute *tcs3472_attributes[] = {
+ &iio_const_attr_calibscale_available.dev_attr.attr,
+ &iio_dev_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tcs3472_attribute_group = {
+ .attrs = tcs3472_attributes,
+};
+
+static const struct iio_info tcs3472_info = {
+ .read_raw = tcs3472_read_raw,
+ .write_raw = tcs3472_write_raw,
+ .attrs = &tcs3472_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int tcs3472_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tcs3472_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &tcs3472_info;
+ indio_dev->name = TCS3472_DRV_NAME;
+ indio_dev->channels = tcs3472_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tcs3472_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_ID);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0x44)
+ dev_info(&client->dev, "TCS34721/34725 found\n");
+ else if (ret == 0x4d)
+ dev_info(&client->dev, "TCS34723/34727 found\n");
+ else
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_CONTROL);
+ if (ret < 0)
+ return ret;
+ data->control = ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_ATIME);
+ if (ret < 0)
+ return ret;
+ data->atime = ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ /* enable device */
+ data->enable = ret | TCS3472_ENABLE_PON | TCS3472_ENABLE_AEN;
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ tcs3472_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int tcs3472_powerdown(struct tcs3472_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable & ~(TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+}
+
+static int tcs3472_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ tcs3472_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tcs3472_suspend(struct device *dev)
+{
+ struct tcs3472_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return tcs3472_powerdown(data);
+}
+
+static int tcs3472_resume(struct device *dev)
+{
+ struct tcs3472_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable | (TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tcs3472_pm_ops, tcs3472_suspend, tcs3472_resume);
+
+static const struct i2c_device_id tcs3472_id[] = {
+ { "tcs3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tcs3472_id);
+
+static struct i2c_driver tcs3472_driver = {
+ .driver = {
+ .name = TCS3472_DRV_NAME,
+ .pm = &tcs3472_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = tcs3472_probe,
+ .remove = tcs3472_remove,
+ .id_table = tcs3472_id,
+};
+module_i2c_driver(tcs3472_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("TCS3472 color light sensors driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index ebb962c5c32..5e5d9dea22c 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -526,6 +526,20 @@ error_ret:
return ret;
}
+static const struct iio_event_spec tsl2563_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_chan_spec tsl2563_channels[] = {
{
.type = IIO_LIGHT,
@@ -538,10 +552,8 @@ static const struct iio_chan_spec tsl2563_channels[] = {
.channel2 = IIO_MOD_LIGHT_BOTH,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING)),
+ .event_spec = tsl2563_events,
+ .num_event_specs = ARRAY_SIZE(tsl2563_events),
}, {
.type = IIO_INTENSITY,
.modified = 1,
@@ -552,12 +564,13 @@ static const struct iio_chan_spec tsl2563_channels[] = {
};
static int tsl2563_read_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ switch (dir) {
case IIO_EV_DIR_RISING:
*val = chip->high_thres;
break;
@@ -568,18 +581,19 @@ static int tsl2563_read_thresh(struct iio_dev *indio_dev,
return -EINVAL;
}
- return 0;
+ return IIO_VAL_INT;
}
static int tsl2563_write_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
u8 address;
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
+ if (dir == IIO_EV_DIR_RISING)
address = TSL2563_REG_HIGHLOW;
else
address = TSL2563_REG_LOWLOW;
@@ -591,7 +605,7 @@ static int tsl2563_write_thresh(struct iio_dev *indio_dev,
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | (address + 1),
(val >> 8) & 0xFF);
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
+ if (dir == IIO_EV_DIR_RISING)
chip->high_thres = val;
else
chip->low_thres = val;
@@ -620,8 +634,8 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
}
static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
- u64 event_code,
- int state)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret = 0;
@@ -662,7 +676,8 @@ out:
}
static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
@@ -687,10 +702,10 @@ static const struct iio_info tsl2563_info = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
- .read_event_value = &tsl2563_read_thresh,
- .write_event_value = &tsl2563_write_thresh,
- .read_event_config = &tsl2563_read_interrupt_config,
- .write_event_config = &tsl2563_write_interrupt_config,
+ .read_event_value_new = &tsl2563_read_thresh,
+ .write_event_value_new = &tsl2563_write_thresh,
+ .read_event_config_new = &tsl2563_read_interrupt_config,
+ .write_event_config_new = &tsl2563_write_interrupt_config,
};
static int tsl2563_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
new file mode 100644
index 00000000000..a15006efa13
--- /dev/null
+++ b/drivers/iio/light/tsl4531.c
@@ -0,0 +1,258 @@
+/*
+ * tsl4531.c - Support for TAOS TSL4531 ambient light sensor
+ *
+ * Copyright 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for the TSL4531x family
+ * TSL45311/TSL45313: 7-bit I2C slave address 0x39
+ * TSL45315/TSL45317: 7-bit I2C slave address 0x29
+ *
+ * TODO: single cycle measurement
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define TSL4531_DRV_NAME "tsl4531"
+
+#define TCS3472_COMMAND BIT(7)
+
+#define TSL4531_CONTROL (TCS3472_COMMAND | 0x00)
+#define TSL4531_CONFIG (TCS3472_COMMAND | 0x01)
+#define TSL4531_DATA (TCS3472_COMMAND | 0x04)
+#define TSL4531_ID (TCS3472_COMMAND | 0x0a)
+
+/* operating modes in control register */
+#define TSL4531_MODE_POWERDOWN 0x00
+#define TSL4531_MODE_SINGLE_ADC 0x02
+#define TSL4531_MODE_NORMAL 0x03
+
+/* integration time control in config register */
+#define TSL4531_TCNTRL_400MS 0x00
+#define TSL4531_TCNTRL_200MS 0x01
+#define TSL4531_TCNTRL_100MS 0x02
+
+/* part number in id register */
+#define TSL45311_ID 0x8
+#define TSL45313_ID 0x9
+#define TSL45315_ID 0xa
+#define TSL45317_ID 0xb
+#define TSL4531_ID_SHIFT 4
+
+struct tsl4531_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ int int_time;
+};
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.1 0.2 0.4");
+
+static struct attribute *tsl4531_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tsl4531_attribute_group = {
+ .attrs = tsl4531_attributes,
+};
+
+static const struct iio_chan_spec tsl4531_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME)
+ }
+};
+
+static int tsl4531_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tsl4531_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client,
+ TSL4531_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* 0.. 1x, 1 .. 2x, 2 .. 4x */
+ *val = 1 << data->int_time;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (data->int_time == 0)
+ *val2 = 400000;
+ else if (data->int_time == 1)
+ *val2 = 200000;
+ else if (data->int_time == 2)
+ *val2 = 100000;
+ else
+ return -EINVAL;
+ *val = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl4531_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tsl4531_data *data = iio_priv(indio_dev);
+ int int_time, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ if (val2 == 400000)
+ int_time = 0;
+ else if (val2 == 200000)
+ int_time = 1;
+ else if (val2 == 100000)
+ int_time = 2;
+ else
+ return -EINVAL;
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client,
+ TSL4531_CONFIG, int_time);
+ if (ret >= 0)
+ data->int_time = int_time;
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info tsl4531_info = {
+ .read_raw = tsl4531_read_raw,
+ .write_raw = tsl4531_write_raw,
+ .attrs = &tsl4531_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int tsl4531_check_id(struct i2c_client *client)
+{
+ int ret = i2c_smbus_read_byte_data(client, TSL4531_ID);
+ if (ret < 0)
+ return ret;
+
+ switch (ret >> TSL4531_ID_SHIFT) {
+ case TSL45311_ID:
+ case TSL45313_ID:
+ case TSL45315_ID:
+ case TSL45317_ID:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int tsl4531_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tsl4531_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ if (!tsl4531_check_id(client)) {
+ dev_err(&client->dev, "no TSL4531 sensor\n");
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, TSL4531_CONTROL,
+ TSL4531_MODE_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, TSL4531_CONFIG,
+ TSL4531_TCNTRL_400MS);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &tsl4531_info;
+ indio_dev->channels = tsl4531_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl4531_channels);
+ indio_dev->name = TSL4531_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return iio_device_register(indio_dev);
+}
+
+static int tsl4531_powerdown(struct i2c_client *client)
+{
+ return i2c_smbus_write_byte_data(client, TSL4531_CONTROL,
+ TSL4531_MODE_POWERDOWN);
+}
+
+static int tsl4531_remove(struct i2c_client *client)
+{
+ iio_device_unregister(i2c_get_clientdata(client));
+ tsl4531_powerdown(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tsl4531_suspend(struct device *dev)
+{
+ return tsl4531_powerdown(to_i2c_client(dev));
+}
+
+static int tsl4531_resume(struct device *dev)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), TSL4531_CONTROL,
+ TSL4531_MODE_NORMAL);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend, tsl4531_resume);
+
+static const struct i2c_device_id tsl4531_id[] = {
+ { "tsl4531", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tsl4531_id);
+
+static struct i2c_driver tsl4531_driver = {
+ .driver = {
+ .name = TSL4531_DRV_NAME,
+ .pm = &tsl4531_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = tsl4531_probe,
+ .remove = tsl4531_remove,
+ .id_table = tsl4531_id,
+};
+
+module_i2c_driver(tsl4531_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("TAOS TSL4531 ambient light sensors driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 2bb304215b1..ecb3341ef9c 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -179,11 +179,7 @@ static int vcnl4000_probe(struct i2c_client *client,
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- return ret;
-
- return 0;
+ return iio_device_register(indio_dev);
}
static int vcnl4000_remove(struct i2c_client *client)
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 4fa923f37b9..0cf09637b35 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -16,6 +16,16 @@ config AK8975
To compile this driver as a module, choose M here: the module
will be called ak8975.
+config MAG3110
+ tristate "Freescale MAG3110 3-Axis Magnetometer"
+ depends on I2C
+ help
+ Say yes here to build support for the Freescale MAG3110 3-Axis
+ magnetometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mag3110.
+
config HID_SENSOR_MAGNETOMETER_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index f91b1b68d39..0f5d3c98579 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -4,6 +4,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AK8975) += ak8975.o
+obj-$(CONFIG_MAG3110) += mag3110.o
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
obj-$(CONFIG_IIO_ST_MAGN_3AXIS) += st_magn.o
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 7105f22d6cd..ff284e5afd9 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -263,7 +263,7 @@ static int ak8975_setup(struct i2c_client *client)
*
* HuT = H * 1229/4096, or roughly, 3/10.
*
- * Since 1uT = 100 gauss, our final scale factor becomes:
+ * Since 1uT = 0.01 gauss, our final scale factor becomes:
*
* Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
* Hadj = H * ((ASA + 128) * 30 / 256
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index a98460b15e4..2634920562f 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -183,10 +183,11 @@ static const struct iio_info magn_3d_info = {
};
/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
{
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
+ iio_push_to_buffers(indio_dev, data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -201,7 +202,7 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
magn_state->common_attributes.data_ready);
if (magn_state->common_attributes.data_ready)
hid_sensor_push_data(indio_dev,
- (u8 *)magn_state->magn_val,
+ magn_state->magn_val,
sizeof(magn_state->magn_val));
return 0;
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
new file mode 100644
index 00000000000..783c5b41735
--- /dev/null
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -0,0 +1,401 @@
+/*
+ * mag3110.c - Support for Freescale MAG3110 magnetometer sensor
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x0e)
+ *
+ * TODO: irq, user offset, oversampling, continuous mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#define MAG3110_STATUS 0x00
+#define MAG3110_OUT_X 0x01 /* MSB first */
+#define MAG3110_OUT_Y 0x03
+#define MAG3110_OUT_Z 0x05
+#define MAG3110_WHO_AM_I 0x07
+#define MAG3110_OFF_X 0x09 /* MSB first */
+#define MAG3110_OFF_Y 0x0b
+#define MAG3110_OFF_Z 0x0d
+#define MAG3110_DIE_TEMP 0x0f
+#define MAG3110_CTRL_REG1 0x10
+#define MAG3110_CTRL_REG2 0x11
+
+#define MAG3110_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
+
+#define MAG3110_CTRL_DR_MASK (BIT(7) | BIT(6) | BIT(5))
+#define MAG3110_CTRL_DR_SHIFT 5
+#define MAG3110_CTRL_DR_DEFAULT 0
+
+#define MAG3110_CTRL_TM BIT(1) /* trigger single measurement */
+#define MAG3110_CTRL_AC BIT(0) /* continuous measurements */
+
+#define MAG3110_CTRL_AUTO_MRST_EN BIT(7) /* magnetic auto-reset */
+#define MAG3110_CTRL_RAW BIT(5) /* measurements not user-offset corrected */
+
+#define MAG3110_DEVICE_ID 0xc4
+
+/* Each client has this additional data */
+struct mag3110_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 ctrl_reg1;
+};
+
+static int mag3110_request(struct mag3110_data *data)
+{
+ int ret, tries = 150;
+
+ /* trigger measurement */
+ ret = i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1 | MAG3110_CTRL_TM);
+ if (ret < 0)
+ return ret;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, MAG3110_STATUS);
+ if (ret < 0)
+ return ret;
+ /* wait for data ready */
+ if ((ret & MAG3110_STATUS_DRDY) == MAG3110_STATUS_DRDY)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mag3110_read(struct mag3110_data *data, __be16 buf[3])
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = mag3110_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MAG3110_OUT_X, 3 * sizeof(__be16), (u8 *) buf);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static ssize_t mag3110_show_int_plus_micros(char *buf,
+ const int (*vals)[2], int n)
+{
+ size_t len = 0;
+
+ while (n-- > 0)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d.%d ", vals[n][0], vals[n][1]);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static int mag3110_get_int_plus_micros_index(const int (*vals)[2], int n,
+ int val, int val2)
+{
+ while (n-- > 0)
+ if (val == vals[n][0] && val2 == vals[n][1])
+ return n;
+
+ return -EINVAL;
+}
+
+static const int mag3110_samp_freq[8][2] = {
+ {80, 0}, {40, 0}, {20, 0}, {10, 0}, {5, 0}, {2, 500000},
+ {1, 250000}, {0, 625000}
+};
+
+static ssize_t mag3110_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mag3110_show_int_plus_micros(buf, mag3110_samp_freq, 8);
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mag3110_show_samp_freq_avail);
+
+static int mag3110_get_samp_freq_index(struct mag3110_data *data,
+ int val, int val2)
+{
+ return mag3110_get_int_plus_micros_index(mag3110_samp_freq, 8, val,
+ val2);
+}
+
+static int mag3110_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mag3110_data *data = iio_priv(indio_dev);
+ __be16 buffer[3];
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_MAGN: /* in 0.1 uT / LSB */
+ ret = mag3110_read(data, buffer);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(
+ be16_to_cpu(buffer[chan->scan_index]), 15);
+ return IIO_VAL_INT;
+ case IIO_TEMP: /* in 1 C / LSB */
+ mutex_lock(&data->lock);
+ ret = mag3110_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_byte_data(data->client,
+ MAG3110_DIE_TEMP);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 7);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ i = data->ctrl_reg1 >> MAG3110_CTRL_DR_SHIFT;
+ *val = mag3110_samp_freq[i][0];
+ *val2 = mag3110_samp_freq[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int mag3110_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mag3110_data *data = iio_priv(indio_dev);
+ int rate;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ rate = mag3110_get_samp_freq_index(data, val, val2);
+ if (rate < 0)
+ return -EINVAL;
+
+ data->ctrl_reg1 &= ~MAG3110_CTRL_DR_MASK;
+ data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT;
+ return i2c_smbus_write_byte_data(data->client,
+ MAG3110_CTRL_REG1, data->ctrl_reg1);
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t mag3110_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mag3110_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */
+ int ret;
+
+ ret = mag3110_read(data, (__be16 *) buffer);
+ if (ret < 0)
+ goto done;
+
+ if (test_bit(3, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ MAG3110_DIE_TEMP);
+ if (ret < 0)
+ goto done;
+ buffer[6] = ret;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+#define MAG3110_CHANNEL(axis, idx) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = idx, \
+ .scan_type = IIO_ST('s', 16, 16, IIO_BE), \
+}
+
+static const struct iio_chan_spec mag3110_channels[] = {
+ MAG3110_CHANNEL(X, 0),
+ MAG3110_CHANNEL(Y, 1),
+ MAG3110_CHANNEL(Z, 2),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_index = 3,
+ .scan_type = IIO_ST('s', 8, 8, 0),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static struct attribute *mag3110_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mag3110_group = {
+ .attrs = mag3110_attributes,
+};
+
+static const struct iio_info mag3110_info = {
+ .attrs = &mag3110_group,
+ .read_raw = &mag3110_read_raw,
+ .write_raw = &mag3110_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0};
+
+static int mag3110_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mag3110_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MAG3110_WHO_AM_I);
+ if (ret < 0)
+ return ret;
+ if (ret != MAG3110_DEVICE_ID)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mag3110_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mag3110_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mag3110_channels);
+ indio_dev->available_scan_masks = mag3110_scan_masks;
+
+ data->ctrl_reg1 = MAG3110_CTRL_DR_DEFAULT;
+ ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG2,
+ MAG3110_CTRL_AUTO_MRST_EN | MAG3110_CTRL_RAW);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ mag3110_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int mag3110_standby(struct mag3110_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1 & ~MAG3110_CTRL_AC);
+}
+
+static int mag3110_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ mag3110_standby(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mag3110_suspend(struct device *dev)
+{
+ return mag3110_standby(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static int mag3110_resume(struct device *dev)
+{
+ struct mag3110_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+static SIMPLE_DEV_PM_OPS(mag3110_pm_ops, mag3110_suspend, mag3110_resume);
+#define MAG3110_PM_OPS (&mag3110_pm_ops)
+#else
+#define MAG3110_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mag3110_id[] = {
+ { "mag3110", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mag3110_id);
+
+static struct i2c_driver mag3110_driver = {
+ .driver = {
+ .name = "mag3110",
+ .pm = MAG3110_PM_OPS,
+ },
+ .probe = mag3110_probe,
+ .remove = mag3110_remove,
+ .id_table = mag3110_id,
+};
+module_i2c_driver(mag3110_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MAG3110 magnetometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index 708857bdb47..bf427dc0d22 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -25,16 +25,7 @@
static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
{
- int err;
-
- err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
- goto st_magn_set_enable_error;
-
- err = iio_sw_buffer_preenable(indio_dev);
-
-st_magn_set_enable_error:
- return err;
+ return st_sensors_set_enable(indio_dev, true);
}
static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index cab3bc7494a..52bbcfa1e07 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -348,8 +348,9 @@ static const struct iio_info magn_info = {
int st_magn_common_probe(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
- int err;
struct st_sensor_data *mdata = iio_priv(indio_dev);
+ int irq = mdata->get_irq_data_ready(indio_dev);
+ int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &magn_info;
@@ -357,7 +358,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev,
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_magn_sensors), st_magn_sensors);
if (err < 0)
- goto st_magn_common_probe_error;
+ return err;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
mdata->multiread_bit = mdata->sensor->multi_read_bit;
@@ -370,12 +371,13 @@ int st_magn_common_probe(struct iio_dev *indio_dev,
err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
- goto st_magn_common_probe_error;
+ return err;
- if (mdata->get_irq_data_ready(indio_dev) > 0) {
- err = st_magn_allocate_ring(indio_dev);
- if (err < 0)
- goto st_magn_common_probe_error;
+ err = st_magn_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+
+ if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev, NULL);
if (err < 0)
goto st_magn_probe_trigger_error;
@@ -385,15 +387,14 @@ int st_magn_common_probe(struct iio_dev *indio_dev,
if (err)
goto st_magn_device_register_error;
- return err;
+ return 0;
st_magn_device_register_error:
- if (mdata->get_irq_data_ready(indio_dev) > 0)
+ if (irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_magn_probe_trigger_error:
- if (mdata->get_irq_data_ready(indio_dev) > 0)
- st_magn_deallocate_ring(indio_dev);
-st_magn_common_probe_error:
+ st_magn_deallocate_ring(indio_dev);
+
return err;
}
EXPORT_SYMBOL(st_magn_common_probe);
@@ -403,10 +404,10 @@ void st_magn_common_remove(struct iio_dev *indio_dev)
struct st_sensor_data *mdata = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (mdata->get_irq_data_ready(indio_dev) > 0) {
+ if (mdata->get_irq_data_ready(indio_dev) > 0)
st_sensors_deallocate_trigger(indio_dev);
- st_magn_deallocate_ring(indio_dev);
- }
+
+ st_magn_deallocate_ring(indio_dev);
}
EXPORT_SYMBOL(st_magn_common_remove);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 26fdc0bdb99..4f2e0f9bad8 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -14,7 +14,7 @@ config IIO_ST_PRESS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics pressure
- sensors: LPS331AP.
+ sensors: LPS001WP, LPS331AP.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index b0b630688da..049c21acf1f 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/iio/common/st_sensors.h>
+#define LPS001WP_PRESS_DEV_NAME "lps001wp"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
/**
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index f877ef8af52..b37b1c9ac93 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -32,16 +32,7 @@ int st_press_trig_set_state(struct iio_trigger *trig, bool state)
static int st_press_buffer_preenable(struct iio_dev *indio_dev)
{
- int err;
-
- err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
- goto st_press_set_enable_error;
-
- err = iio_sw_buffer_preenable(indio_dev);
-
-st_press_set_enable_error:
- return err;
+ return st_sensors_set_enable(indio_dev, true);
}
static int st_press_buffer_postenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index ceebd3c2789..58083f9d51c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -23,6 +23,7 @@
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
#include <linux/iio/buffer.h>
+#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
@@ -36,94 +37,200 @@
ST_PRESS_LSB_PER_CELSIUS)
#define ST_PRESS_NUMBER_DATA_CHANNELS 1
-/* DEFAULT VALUE FOR SENSORS */
-#define ST_PRESS_DEFAULT_OUT_XL_ADDR 0x28
-#define ST_TEMP_DEFAULT_OUT_L_ADDR 0x2b
-
/* FULLSCALE */
#define ST_PRESS_FS_AVL_1260MB 1260
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_PRESS_1_WAI_EXP 0xbb
-#define ST_PRESS_1_ODR_ADDR 0x20
-#define ST_PRESS_1_ODR_MASK 0x70
-#define ST_PRESS_1_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_1_ODR_AVL_7HZ_VAL 0x05
-#define ST_PRESS_1_ODR_AVL_13HZ_VAL 0x06
-#define ST_PRESS_1_ODR_AVL_25HZ_VAL 0x07
-#define ST_PRESS_1_PW_ADDR 0x20
-#define ST_PRESS_1_PW_MASK 0x80
-#define ST_PRESS_1_FS_ADDR 0x23
-#define ST_PRESS_1_FS_MASK 0x30
-#define ST_PRESS_1_FS_AVL_1260_VAL 0x00
-#define ST_PRESS_1_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_1_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
-#define ST_PRESS_1_BDU_ADDR 0x20
-#define ST_PRESS_1_BDU_MASK 0x04
-#define ST_PRESS_1_DRDY_IRQ_ADDR 0x22
-#define ST_PRESS_1_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_1_DRDY_IRQ_INT2_MASK 0x20
-#define ST_PRESS_1_MULTIREAD_BIT true
-#define ST_PRESS_1_TEMP_OFFSET 42500
-
-static const struct iio_chan_spec st_press_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_PRESSURE,
+/* CUSTOM VALUES FOR LPS331AP SENSOR */
+#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
+#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
+#define ST_PRESS_LPS331AP_ODR_MASK 0x70
+#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL 0x05
+#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL 0x06
+#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL 0x07
+#define ST_PRESS_LPS331AP_PW_ADDR 0x20
+#define ST_PRESS_LPS331AP_PW_MASK 0x80
+#define ST_PRESS_LPS331AP_FS_ADDR 0x23
+#define ST_PRESS_LPS331AP_FS_MASK 0x30
+#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
+#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
+#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
+#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
+#define ST_PRESS_LPS331AP_BDU_MASK 0x04
+#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
+#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
+#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
+#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
+#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
+#define ST_PRESS_LPS331AP_OUT_XL_ADDR 0x28
+#define ST_TEMP_LPS331AP_OUT_L_ADDR 0x2b
+
+/* CUSTOM VALUES FOR LPS001WP SENSOR */
+#define ST_PRESS_LPS001WP_WAI_EXP 0xba
+#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
+#define ST_PRESS_LPS001WP_ODR_MASK 0x30
+#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL 0x02
+#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
+#define ST_PRESS_LPS001WP_PW_ADDR 0x20
+#define ST_PRESS_LPS001WP_PW_MASK 0x40
+#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
+#define ST_PRESS_LPS001WP_BDU_MASK 0x04
+#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
+#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
+#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
+
+static const struct iio_chan_spec st_press_lps331ap_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_PRESS_LPS331AP_OUT_XL_ADDR,
+ .scan_index = ST_SENSORS_SCAN_X,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 24,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
- ST_SENSORS_SCAN_X, 0, IIO_NO_MOD, 'u', IIO_LE, 24, 24,
- ST_PRESS_DEFAULT_OUT_XL_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_TEMP,
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_OFFSET),
- -1, 0, IIO_NO_MOD, 's', IIO_LE, 16, 16,
- ST_TEMP_DEFAULT_OUT_L_ADDR),
+ .modified = 0,
+ },
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_TEMP_LPS331AP_OUT_L_ADDR,
+ .scan_index = -1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .modified = 0,
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1)
+};
+
+static const struct iio_chan_spec st_press_lps001wp_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_PRESS_LPS001WP_OUT_L_ADDR,
+ .scan_index = ST_SENSORS_SCAN_X,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .modified = 0,
+ },
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_TEMP_LPS001WP_OUT_L_ADDR,
+ .scan_index = -1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .modified = 0,
+ },
IIO_CHAN_SOFT_TIMESTAMP(1)
};
static const struct st_sensors st_press_sensors[] = {
{
- .wai = ST_PRESS_1_WAI_EXP,
+ .wai = ST_PRESS_LPS331AP_WAI_EXP,
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
},
- .ch = (struct iio_chan_spec *)st_press_channels,
+ .ch = (struct iio_chan_spec *)st_press_lps331ap_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps331ap_channels),
.odr = {
- .addr = ST_PRESS_1_ODR_ADDR,
- .mask = ST_PRESS_1_ODR_MASK,
+ .addr = ST_PRESS_LPS331AP_ODR_ADDR,
+ .mask = ST_PRESS_LPS331AP_ODR_MASK,
.odr_avl = {
- { 1, ST_PRESS_1_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_1_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_1_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_1_ODR_AVL_25HZ_VAL, },
+ { 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, },
+ { 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, },
},
},
.pw = {
- .addr = ST_PRESS_1_PW_ADDR,
- .mask = ST_PRESS_1_PW_MASK,
+ .addr = ST_PRESS_LPS331AP_PW_ADDR,
+ .mask = ST_PRESS_LPS331AP_PW_MASK,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = ST_PRESS_1_FS_ADDR,
- .mask = ST_PRESS_1_FS_MASK,
+ .addr = ST_PRESS_LPS331AP_FS_ADDR,
+ .mask = ST_PRESS_LPS331AP_FS_MASK,
.fs_avl = {
[0] = {
.num = ST_PRESS_FS_AVL_1260MB,
- .value = ST_PRESS_1_FS_AVL_1260_VAL,
- .gain = ST_PRESS_1_FS_AVL_1260_GAIN,
- .gain2 = ST_PRESS_1_FS_AVL_TEMP_GAIN,
+ .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
+ .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
+ .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
},
},
},
.bdu = {
- .addr = ST_PRESS_1_BDU_ADDR,
- .mask = ST_PRESS_1_BDU_MASK,
+ .addr = ST_PRESS_LPS331AP_BDU_ADDR,
+ .mask = ST_PRESS_LPS331AP_BDU_MASK,
},
.drdy_irq = {
- .addr = ST_PRESS_1_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_1_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_1_DRDY_IRQ_INT2_MASK,
+ .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
},
- .multi_read_bit = ST_PRESS_1_MULTIREAD_BIT,
+ .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_PRESS_LPS001WP_WAI_EXP,
+ .sensors_supported = {
+ [0] = LPS001WP_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS001WP_ODR_ADDR,
+ .mask = ST_PRESS_LPS001WP_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS001WP_PW_ADDR,
+ .mask = ST_PRESS_LPS001WP_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = 0,
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS001WP_BDU_ADDR,
+ .mask = ST_PRESS_LPS001WP_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = 0,
+ },
+ .multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
.bootime = 2,
},
};
@@ -207,44 +314,85 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
#define ST_PRESS_TRIGGER_OPS NULL
#endif
+static void st_press_power_enable(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *pdata = iio_priv(indio_dev);
+ int err;
+
+ /* Regulators not mandatory, but if requested we should enable them. */
+ pdata->vdd = devm_regulator_get_optional(&indio_dev->dev, "vdd");
+ if (!IS_ERR(pdata->vdd)) {
+ err = regulator_enable(pdata->vdd);
+ if (err != 0)
+ dev_warn(&indio_dev->dev,
+ "Failed to enable specified Vdd supply\n");
+ }
+
+ pdata->vdd_io = devm_regulator_get_optional(&indio_dev->dev, "vddio");
+ if (!IS_ERR(pdata->vdd_io)) {
+ err = regulator_enable(pdata->vdd_io);
+ if (err != 0)
+ dev_warn(&indio_dev->dev,
+ "Failed to enable specified Vdd_IO supply\n");
+ }
+}
+
+static void st_press_power_disable(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *pdata = iio_priv(indio_dev);
+
+ if (!IS_ERR(pdata->vdd))
+ regulator_disable(pdata->vdd);
+
+ if (!IS_ERR(pdata->vdd_io))
+ regulator_disable(pdata->vdd_io);
+}
+
int st_press_common_probe(struct iio_dev *indio_dev,
struct st_sensors_platform_data *plat_data)
{
- int err;
struct st_sensor_data *pdata = iio_priv(indio_dev);
+ int irq = pdata->get_irq_data_ready(indio_dev);
+ int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &press_info;
+ st_press_power_enable(indio_dev);
+
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_press_sensors), st_press_sensors);
+ ARRAY_SIZE(st_press_sensors),
+ st_press_sensors);
if (err < 0)
- goto st_press_common_probe_error;
+ return err;
pdata->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
- pdata->multiread_bit = pdata->sensor->multi_read_bit;
- indio_dev->channels = pdata->sensor->ch;
- indio_dev->num_channels = ARRAY_SIZE(st_press_channels);
+ pdata->multiread_bit = pdata->sensor->multi_read_bit;
+ indio_dev->channels = pdata->sensor->ch;
+ indio_dev->num_channels = pdata->sensor->num_ch;
+
+ if (pdata->sensor->fs.addr != 0)
+ pdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &pdata->sensor->fs.fs_avl[0];
- pdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &pdata->sensor->fs.fs_avl[0];
pdata->odr = pdata->sensor->odr.odr_avl[0].hz;
- if (!plat_data)
+ /* Some devices don't support a data ready pin. */
+ if (!plat_data && pdata->sensor->drdy_irq.addr)
plat_data =
(struct st_sensors_platform_data *)&default_press_pdata;
err = st_sensors_init_sensor(indio_dev, plat_data);
if (err < 0)
- goto st_press_common_probe_error;
+ return err;
- if (pdata->get_irq_data_ready(indio_dev) > 0) {
- err = st_press_allocate_ring(indio_dev);
- if (err < 0)
- goto st_press_common_probe_error;
+ err = st_press_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+ if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
- ST_PRESS_TRIGGER_OPS);
+ ST_PRESS_TRIGGER_OPS);
if (err < 0)
goto st_press_probe_trigger_error;
}
@@ -256,12 +404,11 @@ int st_press_common_probe(struct iio_dev *indio_dev,
return err;
st_press_device_register_error:
- if (pdata->get_irq_data_ready(indio_dev) > 0)
+ if (irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_press_probe_trigger_error:
- if (pdata->get_irq_data_ready(indio_dev) > 0)
- st_press_deallocate_ring(indio_dev);
-st_press_common_probe_error:
+ st_press_deallocate_ring(indio_dev);
+
return err;
}
EXPORT_SYMBOL(st_press_common_probe);
@@ -270,11 +417,13 @@ void st_press_common_remove(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
+ st_press_power_disable(indio_dev);
+
iio_device_unregister(indio_dev);
- if (pdata->get_irq_data_ready(indio_dev) > 0) {
+ if (pdata->get_irq_data_ready(indio_dev) > 0)
st_sensors_deallocate_trigger(indio_dev);
- st_press_deallocate_ring(indio_dev);
- }
+
+ st_press_deallocate_ring(indio_dev);
}
EXPORT_SYMBOL(st_press_common_remove);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 08aac5e6251..51eab7fcb19 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -49,6 +49,7 @@ static int st_press_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id st_press_id_table[] = {
+ { LPS001WP_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{},
};
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 399a29b6017..27322af6d66 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -48,6 +48,7 @@ static int st_press_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id st_press_id_table[] = {
+ { LPS001WP_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{},
};
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 6d63883da1a..84a0789c3d9 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -70,12 +70,16 @@ static int tmp006_read_measurement(struct tmp006_data *data, u8 reg)
return i2c_smbus_read_word_swapped(data->client, reg);
}
+static const int tmp006_freqs[5][2] = { {4, 0}, {2, 0}, {1, 0},
+ {0, 500000}, {0, 250000} };
+
static int tmp006_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long mask)
{
struct tmp006_data *data = iio_priv(indio_dev);
s32 ret;
+ int cr;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -106,6 +110,12 @@ static int tmp006_read_raw(struct iio_dev *indio_dev,
break;
}
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ cr = (data->config & TMP006_CONFIG_CR_MASK)
+ >> TMP006_CONFIG_CR_SHIFT;
+ *val = tmp006_freqs[cr][0];
+ *val2 = tmp006_freqs[cr][1];
+ return IIO_VAL_INT_PLUS_MICRO;
default:
break;
}
@@ -113,48 +123,32 @@ static int tmp006_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static const char * const tmp006_freqs[] = { "4", "2", "1", "0.5", "0.25" };
-
-static ssize_t tmp006_show_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev));
- int cr = (data->config & TMP006_CONFIG_CR_MASK)
- >> TMP006_CONFIG_CR_SHIFT;
- return sprintf(buf, "%s\n", tmp006_freqs[cr]);
-}
-
-static ssize_t tmp006_store_freq(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int tmp006_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tmp006_data *data = iio_priv(indio_dev);
int i;
- bool found = false;
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
- if (sysfs_streq(buf, tmp006_freqs[i])) {
- found = true;
- break;
- }
- if (!found)
- return -EINVAL;
+ if ((val == tmp006_freqs[i][0]) &&
+ (val2 == tmp006_freqs[i][1])) {
+ data->config &= ~TMP006_CONFIG_CR_MASK;
+ data->config |= i << TMP006_CONFIG_CR_SHIFT;
- data->config &= ~TMP006_CONFIG_CR_MASK;
- data->config |= i << TMP006_CONFIG_CR_SHIFT;
+ return i2c_smbus_write_word_swapped(data->client,
+ TMP006_CONFIG,
+ data->config);
- return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
- data->config);
+ }
+ return -EINVAL;
}
-static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
- tmp006_show_freq, tmp006_store_freq);
-
static IIO_CONST_ATTR(sampling_frequency_available, "4 2 1 0.5 0.25");
static struct attribute *tmp006_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@@ -168,16 +162,19 @@ static const struct iio_chan_spec tmp006_channels[] = {
.type = IIO_VOLTAGE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
},
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}
};
static const struct iio_info tmp006_info = {
.read_raw = tmp006_read_raw,
+ .write_raw = tmp006_write_raw,
.attrs = &tmp006_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index effcd0ac98d..15e3b850f51 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -23,7 +23,7 @@ struct iio_sysfs_trig {
};
static LIST_HEAD(iio_sysfs_trig_list);
-static DEFINE_MUTEX(iio_syfs_trig_list_mut);
+static DEFINE_MUTEX(iio_sysfs_trig_list_mut);
static int iio_sysfs_trigger_probe(int id);
static ssize_t iio_sysfs_trig_add(struct device *dev,
@@ -135,7 +135,7 @@ static int iio_sysfs_trigger_probe(int id)
struct iio_sysfs_trig *t;
int ret;
bool foundit = false;
- mutex_lock(&iio_syfs_trig_list_mut);
+ mutex_lock(&iio_sysfs_trig_list_mut);
list_for_each_entry(t, &iio_sysfs_trig_list, l)
if (id == t->id) {
foundit = true;
@@ -169,7 +169,7 @@ static int iio_sysfs_trigger_probe(int id)
goto out2;
list_add(&t->l, &iio_sysfs_trig_list);
__module_get(THIS_MODULE);
- mutex_unlock(&iio_syfs_trig_list_mut);
+ mutex_unlock(&iio_sysfs_trig_list_mut);
return 0;
out2:
@@ -177,7 +177,7 @@ out2:
free_t:
kfree(t);
out1:
- mutex_unlock(&iio_syfs_trig_list_mut);
+ mutex_unlock(&iio_sysfs_trig_list_mut);
return ret;
}
@@ -185,14 +185,14 @@ static int iio_sysfs_trigger_remove(int id)
{
bool foundit = false;
struct iio_sysfs_trig *t;
- mutex_lock(&iio_syfs_trig_list_mut);
+ mutex_lock(&iio_sysfs_trig_list_mut);
list_for_each_entry(t, &iio_sysfs_trig_list, l)
if (id == t->id) {
foundit = true;
break;
}
if (!foundit) {
- mutex_unlock(&iio_syfs_trig_list_mut);
+ mutex_unlock(&iio_sysfs_trig_list_mut);
return -EINVAL;
}
@@ -202,7 +202,7 @@ static int iio_sysfs_trigger_remove(int id)
list_del(&t->l);
kfree(t);
module_put(THIS_MODULE);
- mutex_unlock(&iio_syfs_trig_list_mut);
+ mutex_unlock(&iio_sysfs_trig_list_mut);
return 0;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b84791f03a2..5ceda710f51 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -31,17 +31,6 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
<http://www.openfabrics.org/git/>.
-config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
- bool "Experimental and unstable ABI for userspace access to flow steering verbs"
- depends on INFINIBAND_USER_ACCESS
- depends on STAGING
- ---help---
- The final ABI for userspace access to flow steering verbs
- has not been defined. To use the current ABI, *WHICH WILL
- CHANGE IN THE FUTURE*, say Y here.
-
- If unsure, say N.
-
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 784b97cb05b..f2ef7ef0f36 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -383,14 +383,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
int id;
- static int next_id;
idr_preload(GFP_KERNEL);
spin_lock_irqsave(&cm.lock, flags);
- id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
- if (id >= 0)
- next_id = max(id + 1, 0);
+ id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
spin_unlock_irqrestore(&cm.lock, flags);
idr_preload_end();
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index dab4b41f171..8e49db690f3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -328,28 +328,6 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
return ret;
}
-static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
-{
- int i;
- int err;
- struct ib_port_attr props;
- union ib_gid tmp;
-
- err = ib_query_port(device, port_num, &props);
- if (err)
- return err;
-
- for (i = 0; i < props.gid_tbl_len; ++i) {
- err = ib_query_gid(device, port_num, i, &tmp);
- if (err)
- return err;
- if (!memcmp(&tmp, gid, sizeof tmp))
- return 0;
- }
-
- return -EADDRNOTAVAIL;
-}
-
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
dev_addr->dev_type = ARPHRD_INFINIBAND;
@@ -371,13 +349,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv)
+static int cma_acquire_dev(struct rdma_id_private *id_priv,
+ struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
union ib_gid gid, iboe_gid;
int ret = -ENODEV;
- u8 port;
+ u8 port, found_port;
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
@@ -389,17 +368,39 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
iboe_addr_get_sgid(dev_addr, &iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
+ if (listen_id_priv &&
+ rdma_port_get_link_layer(listen_id_priv->id.device,
+ listen_id_priv->id.port_num) == dev_ll) {
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
+ ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
+ &found_port, NULL);
+ else
+ ret = ib_find_cached_gid(cma_dev->device, &gid,
+ &found_port, NULL);
+
+ if (!ret && (port == found_port)) {
+ id_priv->id.port_num = found_port;
+ goto out;
+ }
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ if (listen_id_priv &&
+ listen_id_priv->cma_dev == cma_dev &&
+ listen_id_priv->id.port_num == port)
+ continue;
if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
- ret = find_gid_port(cma_dev->device, &iboe_gid, port);
+ ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
else
- ret = find_gid_port(cma_dev->device, &gid, port);
+ ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
- if (!ret) {
- id_priv->id.port_num = port;
+ if (!ret && (port == found_port)) {
+ id_priv->id.port_num = found_port;
goto out;
}
}
@@ -1292,7 +1293,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- ret = cma_acquire_dev(conn_id);
+ ret = cma_acquire_dev(conn_id, listen_id);
if (ret)
goto err2;
@@ -1451,7 +1452,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
{
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
- struct net_device *dev = NULL;
struct rdma_cm_event event;
int ret;
struct ib_device_attr attr;
@@ -1481,7 +1481,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}
- ret = cma_acquire_dev(conn_id);
+ ret = cma_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -1529,8 +1529,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
cma_deref_id(conn_id);
out:
- if (dev)
- dev_put(dev);
mutex_unlock(&listen_id->handler_mutex);
return ret;
}
@@ -1848,6 +1846,26 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
return 0;
}
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ int prio;
+ struct net_device *dev;
+
+ prio = rt_tos2priority(tos);
+ dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
+ vlan_dev_real_dev(ndev) : ndev;
+
+ if (dev->num_tc)
+ return netdev_get_prio_tc_map(dev, prio);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ return (vlan_dev_get_egress_qos_mask(ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#endif
+ return 0;
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -1888,11 +1906,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->reversible = 1;
route->path_rec->pkey = cpu_to_be16(0xffff);
route->path_rec->mtu_selector = IB_SA_EQ;
- route->path_rec->sl = netdev_get_prio_tc_map(
- ndev->priv_flags & IFF_802_1Q_VLAN ?
- vlan_dev_real_dev(ndev) : ndev,
- rt_tos2priority(id_priv->tos));
-
+ route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
route->path_rec->rate_selector = IB_SA_EQ;
route->path_rec->rate = iboe_get_rate(ndev);
@@ -2050,7 +2064,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
goto out;
if (!status && !id_priv->cma_dev)
- status = cma_acquire_dev(id_priv);
+ status = cma_acquire_dev(id_priv, NULL);
if (status) {
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
@@ -2294,7 +2308,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
int low, high, remaining;
unsigned int rover;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(&init_net, &low, &high);
remaining = (high - low) + 1;
rover = net_random() % remaining + low;
retry:
@@ -2547,7 +2561,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- ret = cma_acquire_dev(id_priv);
+ ret = cma_acquire_dev(id_priv, NULL);
if (ret)
goto err1;
}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index da06abde9e0..a1e9cba8494 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
list_for_each_entry(client, &client_list, list) {
if (client->index == index) {
if (op < 0 || op >= client->nops ||
- !client->cb_table[RDMA_NL_GET_OP(op)].dump)
+ !client->cb_table[op].dump)
return -EINVAL;
{
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index cde1e7b5b85..faad2caf22b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -612,6 +612,7 @@ static ssize_t show_node_type(struct device *device,
switch (dev->node_type) {
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
+ case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b0f189be543..ab8b1c30b36 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -57,7 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL");
static unsigned int max_backlog = 1024;
static struct ctl_table_header *ucma_ctl_table_hdr;
-static ctl_table ucma_ctl_table[] = {
+static struct ctl_table ucma_ctl_table[] = {
{
.procname = "max_backlog",
.data = &max_backlog,
@@ -271,7 +271,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
goto out;
}
ctx->backlog--;
- } else if (!ctx->uid) {
+ } else if (!ctx->uid || ctx->cm_id != cm_id) {
/*
* We ignore events for new connections until userspace has set
* their context. This can only happen if an error occurs on a
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index d8f9c6c272d..bdc842e9fae 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -47,6 +47,14 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
+#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
+ do { \
+ (udata)->inbuf = (void __user *) (ibuf); \
+ (udata)->outbuf = (void __user *) (obuf); \
+ (udata)->inlen = (ilen); \
+ (udata)->outlen = (olen); \
+ } while (0)
+
/*
* Our lifetime rules for these structs are the following:
*
@@ -178,6 +186,22 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd);
+struct ib_uverbs_flow_spec {
+ union {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_spec_eth eth;
+ struct ib_uverbs_flow_spec_ipv4 ipv4;
+ struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
+ };
+};
+
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
const char __user *buf, int in_len, \
@@ -217,9 +241,13 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
IB_UVERBS_DECLARE_CMD(create_xsrq);
IB_UVERBS_DECLARE_CMD(open_xrcd);
IB_UVERBS_DECLARE_CMD(close_xrcd);
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
-IB_UVERBS_DECLARE_CMD(create_flow);
-IB_UVERBS_DECLARE_CMD(destroy_flow);
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
+
+#define IB_UVERBS_DECLARE_EX_CMD(name) \
+ int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
+ struct ib_udata *ucore, \
+ struct ib_udata *uhw)
+
+IB_UVERBS_DECLARE_EX_CMD(create_flow);
+IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 2f0f01b70e3..65f6e7dc380 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -54,17 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
-
-#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (void __user *) (ibuf); \
- (udata)->outbuf = (void __user *) (obuf); \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
/*
* The ib_uobject locking scheme is as follows:
@@ -939,13 +929,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
return -EINVAL;
- /*
- * Local write permission is required if remote write or
- * remote atomic permission is also requested.
- */
- if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
- !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
- return -EINVAL;
+ ret = ib_check_mr_access(cmd.access_flags);
+ if (ret)
+ return ret;
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
@@ -2128,6 +2114,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
}
next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
+ if (next->opcode == IB_WR_SEND_WITH_IMM)
+ next->ex.imm_data =
+ (__be32 __force) user_wr->ex.imm_data;
} else {
switch (next->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -2601,8 +2590,7 @@ out_put:
return ret ? ret : in_len;
}
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
-static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
+static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
ib_spec->type = kern_spec->type;
@@ -2642,28 +2630,31 @@ static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
return 0;
}
-ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
- const char __user *buf, int in_len,
- int out_len)
+int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
{
struct ib_uverbs_create_flow cmd;
struct ib_uverbs_create_flow_resp resp;
struct ib_uobject *uobj;
struct ib_flow *flow_id;
- struct ib_kern_flow_attr *kern_flow_attr;
+ struct ib_uverbs_flow_attr *kern_flow_attr;
struct ib_flow_attr *flow_attr;
struct ib_qp *qp;
int err = 0;
void *kern_spec;
void *ib_spec;
int i;
- int kern_attr_size;
- if (out_len < sizeof(resp))
+ if (ucore->outlen < sizeof(resp))
return -ENOSPC;
- if (copy_from_user(&cmd, buf, sizeof(cmd)))
- return -EFAULT;
+ err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ if (err)
+ return err;
+
+ ucore->inbuf += sizeof(cmd);
+ ucore->inlen -= sizeof(cmd);
if (cmd.comp_mask)
return -EINVAL;
@@ -2672,32 +2663,27 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
!capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
return -EPERM;
- if (cmd.flow_attr.num_of_specs < 0 ||
- cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
+ if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
return -EINVAL;
- kern_attr_size = cmd.flow_attr.size - sizeof(cmd) -
- sizeof(struct ib_uverbs_cmd_hdr_ex);
-
- if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len ||
- kern_attr_size < 0 || kern_attr_size >
- (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec)))
+ if (cmd.flow_attr.size > ucore->inlen ||
+ cmd.flow_attr.size >
+ (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
return -EINVAL;
if (cmd.flow_attr.num_of_specs) {
- kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
+ kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
+ GFP_KERNEL);
if (!kern_flow_attr)
return -ENOMEM;
memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
- if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd),
- kern_attr_size)) {
- err = -EFAULT;
+ err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+ cmd.flow_attr.size);
+ if (err)
goto err_free_attr;
- }
} else {
kern_flow_attr = &cmd.flow_attr;
- kern_attr_size = sizeof(cmd.flow_attr);
}
uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
@@ -2714,7 +2700,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
goto err_uobj;
}
- flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
+ flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
@@ -2729,19 +2715,22 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
kern_spec = kern_flow_attr + 1;
ib_spec = flow_attr + 1;
- for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) {
+ for (i = 0; i < flow_attr->num_of_specs &&
+ cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
+ cmd.flow_attr.size >=
+ ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
err = kern_spec_to_ib_spec(kern_spec, ib_spec);
if (err)
goto err_free;
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
- kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size;
- kern_spec += ((struct ib_kern_spec *) kern_spec)->size;
+ cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
+ kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
- if (kern_attr_size) {
- pr_warn("create flow failed, %d bytes left from uverb cmd\n",
- kern_attr_size);
+ if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
+ pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
+ i, cmd.flow_attr.size);
goto err_free;
}
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2760,11 +2749,10 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof(resp));
resp.flow_handle = uobj->id;
- if (copy_to_user((void __user *)(unsigned long) cmd.response,
- &resp, sizeof(resp))) {
- err = -EFAULT;
+ err = ib_copy_to_udata(ucore,
+ &resp, sizeof(resp));
+ if (err)
goto err_copy;
- }
put_qp_read(qp);
mutex_lock(&file->mutex);
@@ -2777,7 +2765,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
kfree(flow_attr);
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
- return in_len;
+ return 0;
err_copy:
idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
destroy_flow:
@@ -2794,16 +2782,18 @@ err_free_attr:
return err;
}
-ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
- const char __user *buf, int in_len,
- int out_len) {
+int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
struct ib_uverbs_destroy_flow cmd;
struct ib_flow *flow_id;
struct ib_uobject *uobj;
int ret;
- if (copy_from_user(&cmd, buf, sizeof(cmd)))
- return -EFAULT;
+ ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ if (ret)
+ return ret;
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
file->ucontext);
@@ -2825,9 +2815,8 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
put_uobj(uobj);
- return ret ? ret : in_len;
+ return ret;
}
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uverbs_create_xsrq *cmd,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2df31f68ea0..34386943ebc 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -115,10 +115,13 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
[IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
- [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
- [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
+};
+
+static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw) = {
+ [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
+ [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -589,6 +592,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_cmd_hdr hdr;
+ __u32 flags;
if (count < sizeof hdr)
return -EINVAL;
@@ -596,45 +600,105 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof hdr))
return -EFAULT;
- if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[hdr.command])
- return -EINVAL;
+ flags = (hdr.command &
+ IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
- if (!file->ucontext &&
- hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
- return -EINVAL;
+ if (!flags) {
+ __u32 command;
- if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
- return -ENOSYS;
+ if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
+ IB_USER_VERBS_CMD_COMMAND_MASK))
+ return -EINVAL;
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
- if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
- struct ib_uverbs_cmd_hdr_ex hdr_ex;
+ command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
- if (copy_from_user(&hdr_ex, buf, sizeof(hdr_ex)))
- return -EFAULT;
+ if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
+ !uverbs_cmd_table[command])
+ return -EINVAL;
- if (((hdr_ex.in_words + hdr_ex.provider_in_words) * 4) != count)
+ if (!file->ucontext &&
+ command != IB_USER_VERBS_CMD_GET_CONTEXT)
return -EINVAL;
- return uverbs_cmd_table[hdr.command](file,
- buf + sizeof(hdr_ex),
- (hdr_ex.in_words +
- hdr_ex.provider_in_words) * 4,
- (hdr_ex.out_words +
- hdr_ex.provider_out_words) * 4);
- } else {
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
+ if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << command)))
+ return -ENOSYS;
+
if (hdr.in_words * 4 != count)
return -EINVAL;
- return uverbs_cmd_table[hdr.command](file,
- buf + sizeof(hdr),
- hdr.in_words * 4,
- hdr.out_words * 4);
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
+ return uverbs_cmd_table[command](file,
+ buf + sizeof(hdr),
+ hdr.in_words * 4,
+ hdr.out_words * 4);
+
+ } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
+ __u32 command;
+
+ struct ib_uverbs_ex_cmd_hdr ex_hdr;
+ struct ib_udata ucore;
+ struct ib_udata uhw;
+ int err;
+ size_t written_count = count;
+
+ if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
+ IB_USER_VERBS_CMD_COMMAND_MASK))
+ return -EINVAL;
+
+ command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+
+ if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
+ !uverbs_ex_cmd_table[command])
+ return -ENOSYS;
+
+ if (!file->ucontext)
+ return -EINVAL;
+
+ if (!(file->device->ib_dev->uverbs_ex_cmd_mask & (1ull << command)))
+ return -ENOSYS;
+
+ if (count < (sizeof(hdr) + sizeof(ex_hdr)))
+ return -EINVAL;
+
+ if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
+ return -EFAULT;
+
+ count -= sizeof(hdr) + sizeof(ex_hdr);
+ buf += sizeof(hdr) + sizeof(ex_hdr);
+
+ if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
+ return -EINVAL;
+
+ if (ex_hdr.response) {
+ if (!hdr.out_words && !ex_hdr.provider_out_words)
+ return -EINVAL;
+ } else {
+ if (hdr.out_words || ex_hdr.provider_out_words)
+ return -EINVAL;
+ }
+
+ INIT_UDATA(&ucore,
+ (hdr.in_words) ? buf : 0,
+ (unsigned long)ex_hdr.response,
+ hdr.in_words * 8,
+ hdr.out_words * 8);
+
+ INIT_UDATA(&uhw,
+ (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
+ (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
+
+ err = uverbs_ex_cmd_table[command](file,
+ &ucore,
+ &uhw);
+
+ if (err)
+ return err;
+
+ return written_count;
}
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
+
+ return -ENOSYS;
}
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a321df28bab..d4f6ddf72ff 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -114,6 +114,8 @@ rdma_node_get_transport(enum rdma_node_type node_type)
return RDMA_TRANSPORT_IB;
case RDMA_NODE_RNIC:
return RDMA_TRANSPORT_IWARP;
+ case RDMA_NODE_USNIC:
+ return RDMA_TRANSPORT_USNIC;
default:
BUG();
return 0;
@@ -130,6 +132,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_
case RDMA_TRANSPORT_IB:
return IB_LINK_LAYER_INFINIBAND;
case RDMA_TRANSPORT_IWARP:
+ case RDMA_TRANSPORT_USNIC:
return IB_LINK_LAYER_ETHERNET;
default:
return IB_LINK_LAYER_UNSPECIFIED;
@@ -958,6 +961,11 @@ EXPORT_SYMBOL(ib_resize_cq);
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
{
struct ib_mr *mr;
+ int err;
+
+ err = ib_check_mr_access(mr_access_flags);
+ if (err)
+ return ERR_PTR(err);
mr = pd->device->get_dma_mr(pd, mr_access_flags);
@@ -980,6 +988,11 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
u64 *iova_start)
{
struct ib_mr *mr;
+ int err;
+
+ err = ib_check_mr_access(mr_access_flags);
+ if (err)
+ return ERR_PTR(err);
if (!pd->device->reg_phys_mr)
return ERR_PTR(-ENOSYS);
@@ -1010,6 +1023,10 @@ int ib_rereg_phys_mr(struct ib_mr *mr,
struct ib_pd *old_pd;
int ret;
+ ret = ib_check_mr_access(mr_access_flags);
+ if (ret)
+ return ret;
+
if (!mr->device->rereg_phys_mr)
return -ENOSYS;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 33d2cc6ab56..4a033853312 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -602,10 +602,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
+ PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
+ (u64)pci_resource_start(rdev->lldi.pdev, 2),
rdev->lldi.db_reg,
rdev->lldi.gts_reg,
rdev->qpshift, rdev->qpmask,
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index f5cb13b2144..cc04b7ba348 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
int j;
int ret;
- ret = get_user_pages(current, current->mm, addr,
- npages, 0, 1, pages, NULL);
-
+ ret = get_user_pages_fast(addr, npages, 0, pages);
if (ret != npages) {
int i;
@@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
while (dim) {
const int mxp = 8;
- down_write(&current->mm->mmap_sem);
ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
- up_write(&current->mm->mmap_sem);
-
if (ret <= 0)
goto done_unlock;
else {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d5e60f44ba5..66dbf806237 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -324,7 +324,7 @@ static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
u32 i;
i = cq->mcq.cons_index;
- while (get_sw_cqe(cq, i & cq->ibcq.cqe))
+ while (get_sw_cqe(cq, i))
++i;
return i - cq->mcq.cons_index;
@@ -365,7 +365,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mutex_lock(&cq->resize_mutex);
- if (entries < 1 || entries > dev->dev->caps.max_cqes) {
+ if (entries < 1) {
err = -EINVAL;
goto out;
}
@@ -376,6 +376,11 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
goto out;
}
+ if (entries > dev->dev->caps.max_cqes) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (ibcq->uobject) {
err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
if (err)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f0612645de9..1958c5ca792 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,18 +177,18 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap;
- props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
+ props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
- props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
+ props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
- props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
+ props->max_mr = dev->dev->quotas.mpt;
props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
- props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
+ props->max_srq = dev->dev->quotas.srq;
props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
props->max_srq_sge = dev->dev->caps.max_srq_sge;
props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
@@ -526,7 +526,6 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (IS_ERR(mailbox))
return 0;
- memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -547,8 +546,6 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, 256);
-
if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
*(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
@@ -879,8 +876,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
- size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
- (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -905,7 +900,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, rule_size);
ctrl = mailbox->buf;
ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
@@ -1691,11 +1685,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
-#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
- ibdev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
-#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
+ ibdev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
}
mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 344ab03948a..b7262742974 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -556,7 +556,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
goto err_db;
}
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
- (*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
+ (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
*index = to_mucontext(context)->uuari.uars[0].index;
@@ -620,7 +620,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
}
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
- (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT;
+ (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
*index = dev->mdev.priv.uuari.uars[0].index;
return 0;
@@ -653,8 +653,11 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int eqn;
int err;
+ if (entries < 0)
+ return ERR_PTR(-EINVAL);
+
entries = roundup_pow_of_two(entries + 1);
- if (entries < 1 || entries > dev->mdev.caps.max_cqes)
+ if (entries > dev->mdev.caps.max_cqes)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -747,17 +750,9 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
return 0;
}
-static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq,
- u32 rsn)
+static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
{
- u32 lrsn;
-
- if (srq)
- lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff;
- else
- lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
-
- return rsn == lrsn;
+ return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
}
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
@@ -787,8 +782,8 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
- if (is_equal_rsn(cqe64, srq, rsn)) {
- if (srq)
+ if (is_equal_rsn(cqe64, rsn)) {
+ if (srq && (ntohl(cqe64->srqn) & 0xffffff))
mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
++nfreed;
} else if (nfreed) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b1a6cb3a280..30653410962 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -745,7 +745,8 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in));
+ err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
+ NULL, NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
goto err_in;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 836be915724..4c134d93d4f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -262,6 +262,9 @@ struct mlx5_ib_mr {
int npages;
struct completion done;
enum ib_wc_status status;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_create_mkey_mbox_out out;
+ unsigned long start;
};
struct mlx5_ib_fast_reg_page_list {
@@ -323,6 +326,7 @@ struct mlx5_cache_ent {
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
+ int pending;
};
struct mlx5_mr_cache {
@@ -358,6 +362,8 @@ struct mlx5_ib_dev {
spinlock_t mr_lock;
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
+ struct timer_list delay_timer;
+ int fill_delay;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3453580b1eb..039c3e40fcb 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -35,11 +35,12 @@
#include <linux/random.h>
#include <linux/debugfs.h>
#include <linux/export.h>
+#include <linux/delay.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
enum {
- DEF_CACHE_SIZE = 10,
+ MAX_PENDING_REG_MR = 8,
};
enum {
@@ -63,6 +64,51 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
return order - cache->ent[0].order;
}
+static void reg_mr_callback(int status, void *context)
+{
+ struct mlx5_ib_mr *mr = context;
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int c = order2idx(dev, mr->order);
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ u8 key;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ent->lock, flags);
+ ent->pending--;
+ spin_unlock_irqrestore(&ent->lock, flags);
+ if (status) {
+ mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
+ kfree(mr);
+ dev->fill_delay = 1;
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ if (mr->out.hdr.status) {
+ mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
+ mr->out.hdr.status,
+ be32_to_cpu(mr->out.hdr.syndrome));
+ kfree(mr);
+ dev->fill_delay = 1;
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
+ key = dev->mdev.priv.mkey_key++;
+ spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
+ mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+
+ cache->last_add = jiffies;
+
+ spin_lock_irqsave(&ent->lock, flags);
+ list_add_tail(&mr->list, &ent->head);
+ ent->cur++;
+ ent->size++;
+ spin_unlock_irqrestore(&ent->lock, flags);
+}
+
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct mlx5_mr_cache *cache = &dev->cache;
@@ -78,36 +124,39 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
return -ENOMEM;
for (i = 0; i < num; i++) {
+ if (ent->pending >= MAX_PENDING_REG_MR) {
+ err = -EAGAIN;
+ break;
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
err = -ENOMEM;
- goto out;
+ break;
}
mr->order = ent->order;
mr->umred = 1;
+ mr->dev = dev;
in->seg.status = 1 << 6;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
in->seg.log2_page_size = 12;
+ spin_lock_irq(&ent->lock);
+ ent->pending++;
+ spin_unlock_irq(&ent->lock);
+ mr->start = jiffies;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
- sizeof(*in));
+ sizeof(*in), reg_mr_callback,
+ mr, &mr->out);
if (err) {
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
kfree(mr);
- goto out;
+ break;
}
- cache->last_add = jiffies;
-
- spin_lock(&ent->lock);
- list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- ent->size++;
- spin_unlock(&ent->lock);
}
-out:
kfree(in);
return err;
}
@@ -121,16 +170,16 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
int i;
for (i = 0; i < num; i++) {
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
return;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_del(&mr->list);
ent->cur--;
ent->size--;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
@@ -162,9 +211,13 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
return -EINVAL;
if (var > ent->size) {
- err = add_keys(dev, c, var - ent->size);
- if (err)
- return err;
+ do {
+ err = add_keys(dev, c, var - ent->size);
+ if (err && err != -EAGAIN)
+ return err;
+
+ usleep_range(3000, 5000);
+ } while (err);
} else if (var < ent->size) {
remove_keys(dev, c, ent->size - var);
}
@@ -280,23 +333,37 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
struct mlx5_ib_dev *dev = ent->dev;
struct mlx5_mr_cache *cache = &dev->cache;
int i = order2idx(dev, ent->order);
+ int err;
if (cache->stopped)
return;
ent = &dev->cache.ent[i];
- if (ent->cur < 2 * ent->limit) {
- add_keys(dev, i, 1);
- if (ent->cur < 2 * ent->limit)
- queue_work(cache->wq, &ent->work);
+ if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
+ err = add_keys(dev, i, 1);
+ if (ent->cur < 2 * ent->limit) {
+ if (err == -EAGAIN) {
+ mlx5_ib_dbg(dev, "returned eagain, order %d\n",
+ i + 2);
+ queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(3));
+ } else if (err) {
+ mlx5_ib_warn(dev, "command failed order %d, err %d\n",
+ i + 2, err);
+ queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(1000));
+ } else {
+ queue_work(cache->wq, &ent->work);
+ }
+ }
} else if (ent->cur > 2 * ent->limit) {
if (!someone_adding(cache) &&
- time_after(jiffies, cache->last_add + 60 * HZ)) {
+ time_after(jiffies, cache->last_add + 300 * HZ)) {
remove_keys(dev, i, 1);
if (ent->cur > ent->limit)
queue_work(cache->wq, &ent->work);
} else {
- queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ);
+ queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
}
}
}
@@ -336,18 +403,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
list);
list_del(&mr->list);
ent->cur--;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
if (ent->cur < ent->limit)
queue_work(cache->wq, &ent->work);
break;
}
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
queue_work(cache->wq, &ent->work);
@@ -374,12 +441,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
}
ent = &cache->ent[c];
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->cur++;
if (ent->cur > 2 * ent->limit)
shrink = 1;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
if (shrink)
queue_work(cache->wq, &ent->work);
@@ -394,16 +461,16 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
cancel_delayed_work(&ent->dwork);
while (1) {
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
return;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_del(&mr->list);
ent->cur--;
ent->size--;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
@@ -464,12 +531,18 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
debugfs_remove_recursive(dev->cache.root);
}
+static void delay_time_func(unsigned long ctx)
+{
+ struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+
+ dev->fill_delay = 0;
+}
+
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int limit;
- int size;
int err;
int i;
@@ -479,6 +552,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
+ setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
INIT_LIST_HEAD(&cache->ent[i].head);
spin_lock_init(&cache->ent[i].lock);
@@ -489,13 +563,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
- if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) {
- size = dev->mdev.profile->mr_cache[i].size;
+ if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
limit = dev->mdev.profile->mr_cache[i].limit;
- } else {
- size = DEF_CACHE_SIZE;
+ else
limit = 0;
- }
+
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
ent->limit = limit;
@@ -522,6 +594,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
+ del_timer_sync(&dev->delay_timer);
return 0;
}
@@ -551,7 +624,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in));
+ err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
+ NULL);
if (err)
goto err_in;
@@ -660,14 +734,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
int err;
int i;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 1; i++) {
mr = alloc_cached_mr(dev, order);
if (mr)
break;
err = add_keys(dev, order2idx(dev, order), 1);
- if (err) {
- mlx5_ib_warn(dev, "add_keys failed\n");
+ if (err && err != -EAGAIN) {
+ mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
break;
}
}
@@ -759,8 +833,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
in->seg.log2_page_size = page_shift;
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen);
+ in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
+ 1 << page_shift));
+ err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
+ NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@@ -944,7 +1020,8 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
* TBD not needed - issue 197292 */
in->seg.log2_page_size = PAGE_SHIFT;
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in));
+ err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
+ NULL, NULL);
kfree(in);
if (err)
goto err_free;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5659ea88074..7c6b4ba49be 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -551,7 +551,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
(*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((page_shift - PAGE_SHIFT) << 24);
+ cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
(*in)->ctx.params2 = cpu_to_be32(offset << 6);
(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
@@ -648,7 +648,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
goto err_buf;
}
(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
- (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24);
+ (*in)->ctx.log_pg_sz_remote_qpn =
+ cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
/* Set "fast registration enabled" for all kernel QPs */
(*in)->ctx.params1 |= cpu_to_be32(1 << 11);
(*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
@@ -1317,9 +1318,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RNR_TIMEOUT |
- MLX5_QP_OPTPAR_PM_STATE,
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PM_STATE,
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_SRQN |
MLX5_QP_OPTPAR_CQN_RCV,
@@ -1550,7 +1553,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
mlx5_st = to_mlx5_st(ibqp->qp_type);
- if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0)
+ if (mlx5_st < 0)
goto out;
optpar = ib_mask_to_mlx5_opt(attr_mask);
@@ -1744,6 +1747,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
MLX5_MKEY_MASK_PD |
MLX5_MKEY_MASK_LR |
MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
MLX5_MKEY_MASK_A |
@@ -1800,7 +1804,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
seg->len = cpu_to_be64(wr->wr.fast_reg.length);
seg->log2_page_size = wr->wr.fast_reg.page_shift;
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
+ mlx5_mkey_variant(wr->wr.fast_reg.rkey));
}
static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
@@ -1913,6 +1918,10 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
if (unlikely((*seg == qp->sq.qend)))
*seg = mlx5_get_send_wqe(qp, 0);
if (!li) {
+ if (unlikely(wr->wr.fast_reg.page_list_len >
+ wr->wr.fast_reg.page_list->max_page_list_len))
+ return -ENOMEM;
+
set_frwr_pages(*seg, wr, mdev, pd, writ);
*seg += sizeof(struct mlx5_wqe_data_seg);
*size += (sizeof(struct mlx5_wqe_data_seg) / 16);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0aa478bc291..210b3eaf188 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -123,7 +123,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_in;
}
- (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
+ (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
return 0;
@@ -192,7 +192,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
srq->wq_sig = !!srq_signature;
- (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
+ (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
return 0;
@@ -390,9 +390,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem);
} else {
- kfree(msrq->wrid);
- mlx5_buf_free(&dev->mdev, &msrq->buf);
- mlx5_db_free(&dev->mdev, &msrq->db);
+ destroy_srq_kernel(dev, msrq);
}
kfree(srq);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 5b53ca5a228..8308e363476 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2834,7 +2834,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->qp_context = nesqp->ibqp.qp_context;
init_attr->send_cq = nesqp->ibqp.send_cq;
init_attr->recv_cq = nesqp->ibqp.recv_cq;
- init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq;
+ init_attr->srq = nesqp->ibqp.srq;
init_attr->cap = attr->cap;
return 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index adc11d14f87..294dd27b601 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -122,6 +122,32 @@ struct mqe_ctx {
bool cmd_done;
};
+struct ocrdma_hw_mr {
+ u32 lkey;
+ u8 fr_mr;
+ u8 remote_atomic;
+ u8 remote_rd;
+ u8 remote_wr;
+ u8 local_rd;
+ u8 local_wr;
+ u8 mw_bind;
+ u8 rsvd;
+ u64 len;
+ struct ocrdma_pbl *pbl_table;
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ u64 fbo;
+ u64 va;
+};
+
+struct ocrdma_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct ocrdma_hw_mr hwmr;
+};
+
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
@@ -169,7 +195,7 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
- u64 stag_arr[OCRDMA_MAX_STAG];
+ struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG];
u16 pvid;
};
@@ -294,31 +320,6 @@ struct ocrdma_qp {
u16 db_cache;
};
-struct ocrdma_hw_mr {
- u32 lkey;
- u8 fr_mr;
- u8 remote_atomic;
- u8 remote_rd;
- u8 remote_wr;
- u8 local_rd;
- u8 local_wr;
- u8 mw_bind;
- u8 rsvd;
- u64 len;
- struct ocrdma_pbl *pbl_table;
- u32 num_pbls;
- u32 num_pbes;
- u32 pbl_size;
- u32 pbe_size;
- u64 fbo;
- u64 va;
-};
-
-struct ocrdma_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct ocrdma_hw_mr hwmr;
-};
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 50219ab2279..56bf32fcb62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1783,7 +1783,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
u32 max_sges = attrs->cap.max_send_sge;
/* QP1 may exceed 127 */
- max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1,
+ max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
dev->attr.max_wqe);
status = ocrdma_build_q_conf(&max_wqe_allocated,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 0ce7674621e..91443bcb9e0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -452,9 +452,6 @@ static void ocrdma_remove_free(struct rcu_head *rcu)
{
struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
- ocrdma_free_resources(dev);
- ocrdma_cleanup_hw(dev);
-
idr_remove(&ocrdma_dev_id, dev->id);
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
@@ -470,6 +467,10 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
spin_lock(&ocrdma_devlist_lock);
list_del_rcu(&dev->entry);
spin_unlock(&ocrdma_devlist_lock);
+
+ ocrdma_free_resources(dev);
+ ocrdma_cleanup_hw(dev);
+
call_rcu(&dev->rcu, ocrdma_remove_free);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 69f1d1221a6..7686dceadd2 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1981,9 +1981,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
- if ((wr->wr.fast_reg.page_list_len >
- qp->dev->attr.max_pages_per_frmr) ||
- (wr->wr.fast_reg.length > 0xffffffffULL))
+ if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
return -EINVAL;
hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2839,7 +2837,7 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
goto mbx_err;
mr->ibmr.rkey = mr->hwmr.lkey;
mr->ibmr.lkey = mr->hwmr.lkey;
- dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr;
return &mr->ibmr;
mbx_err:
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index f247fc6e618..c61e2a92b3c 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -456,13 +456,13 @@ static int remove_file(struct dentry *parent, char *name)
spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
- dget_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
simple_unlink(parent->d_inode, tmp);
} else {
spin_unlock(&tmp->d_lock);
}
+ dput(tmp);
ret = 0;
bail:
@@ -491,6 +491,7 @@ static int remove_device_files(struct super_block *sb,
goto bail;
}
+ mutex_lock(&dir->d_inode->i_mutex);
remove_file(dir, "counters");
remove_file(dir, "counter_names");
remove_file(dir, "portcounter_names");
@@ -505,8 +506,10 @@ static int remove_device_files(struct super_block *sb,
}
}
remove_file(dir, "flash");
- d_delete(dir);
+ mutex_unlock(&dir->d_inode->i_mutex);
ret = simple_rmdir(root->d_inode, dir);
+ d_delete(dir);
+ dput(dir);
bail:
mutex_unlock(&root->d_inode->i_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 016e7429adf..5bfc02f450e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6190,21 +6190,20 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
{
struct qib_devdata *dd;
unsigned long val;
- int ret;
-
+ char *n;
if (strlen(str) >= MAX_ATTEN_LEN) {
pr_info("txselect_values string too long\n");
return -ENOSPC;
}
- ret = kstrtoul(str, 0, &val);
- if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ val = simple_strtoul(str, &n, 0);
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) {
pr_info("txselect_values must start with a number < %d\n",
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
- return ret ? ret : -EINVAL;
+ return -EINVAL;
}
-
strcpy(txselect_list, str);
+
list_for_each_entry(dd, &qib_dev_list, list)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
set_no_qsfp_atten(dd, 1);
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 28874f8606f..941d4d50d8e 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -54,7 +54,7 @@ struct ib_node_info {
__be32 revision;
u8 local_port_num;
u8 vendor_id[3];
-} __attribute__ ((packed));
+} __packed;
struct ib_mad_notice_attr {
u8 generic_type;
@@ -73,7 +73,7 @@ struct ib_mad_notice_attr {
__be16 reserved;
__be16 lid; /* where violation happened */
u8 port_num; /* where violation happened */
- } __attribute__ ((packed)) ntc_129_131;
+ } __packed ntc_129_131;
struct {
__be16 reserved;
@@ -83,14 +83,14 @@ struct ib_mad_notice_attr {
__be32 new_cap_mask; /* new capability mask */
u8 reserved3;
u8 change_flags; /* low 3 bits only */
- } __attribute__ ((packed)) ntc_144;
+ } __packed ntc_144;
struct {
__be16 reserved;
__be16 lid; /* lid where sys guid changed */
__be16 reserved2;
__be64 new_sys_guid;
- } __attribute__ ((packed)) ntc_145;
+ } __packed ntc_145;
struct {
__be16 reserved;
@@ -104,7 +104,7 @@ struct ib_mad_notice_attr {
u8 reserved3;
u8 dr_trunc_hop;
u8 dr_rtn_path[30];
- } __attribute__ ((packed)) ntc_256;
+ } __packed ntc_256;
struct {
__be16 reserved;
@@ -115,7 +115,7 @@ struct ib_mad_notice_attr {
__be32 qp2; /* high 8 bits reserved */
union ib_gid gid1;
union ib_gid gid2;
- } __attribute__ ((packed)) ntc_257_258;
+ } __packed ntc_257_258;
} details;
};
@@ -209,7 +209,7 @@ struct ib_pma_portcounters_cong {
__be64 port_rcv_packets;
__be64 port_xmit_wait;
__be64 port_adr_events;
-} __attribute__ ((packed));
+} __packed;
#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 3f14009fb66..c8d9c4ab142 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -51,8 +51,8 @@
* file calls, even though this violates some
* expectations of harmlessness.
*/
-static int qib_tune_pcie_caps(struct qib_devdata *);
-static int qib_tune_pcie_coalesce(struct qib_devdata *);
+static void qib_tune_pcie_caps(struct qib_devdata *);
+static void qib_tune_pcie_coalesce(struct qib_devdata *);
/*
* Do all the common PCIe setup and initialization.
@@ -476,30 +476,6 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
"pci_enable_device failed after reset: %d\n", r);
}
-/* code to adjust PCIe capabilities. */
-
-static int fld2val(int wd, int mask)
-{
- int lsbmask;
-
- if (!mask)
- return 0;
- wd &= mask;
- lsbmask = mask ^ (mask & (mask - 1));
- wd /= lsbmask;
- return wd;
-}
-
-static int val2fld(int wd, int mask)
-{
- int lsbmask;
-
- if (!mask)
- return 0;
- lsbmask = mask ^ (mask & (mask - 1));
- wd *= lsbmask;
- return wd;
-}
static int qib_pcie_coalesce;
module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
@@ -511,7 +487,7 @@ MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
* of these chipsets, with some BIOS settings, and enabling it on those
* systems may result in the system crashing, and/or data corruption.
*/
-static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
int r;
struct pci_dev *parent;
@@ -519,18 +495,18 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
u32 mask, bits, val;
if (!qib_pcie_coalesce)
- return 0;
+ return;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (parent->bus->parent) {
qib_devinfo(dd->pcidev, "Parent not root\n");
- return 1;
+ return;
}
if (!pci_is_pcie(parent))
- return 1;
+ return;
if (parent->vendor != 0x8086)
- return 1;
+ return;
/*
* - bit 12: Max_rdcmp_Imt_EN: need to set to 1
@@ -563,13 +539,12 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
mask = (3U << 24) | (7U << 10);
} else {
/* not one of the chipsets that we know about */
- return 1;
+ return;
}
pci_read_config_dword(parent, 0x48, &val);
val &= ~mask;
val |= bits;
r = pci_write_config_dword(parent, 0x48, val);
- return 0;
}
/*
@@ -580,55 +555,44 @@ static int qib_pcie_caps;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
-static int qib_tune_pcie_caps(struct qib_devdata *dd)
+static void qib_tune_pcie_caps(struct qib_devdata *dd)
{
- int ret = 1; /* Assume the worst */
struct pci_dev *parent;
- u16 pcaps, pctl, ecaps, ectl;
- int rc_sup, ep_sup;
- int rc_cur, ep_cur;
+ u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
+ u16 rc_mrrs, ep_mrrs, max_mrrs;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
- if (parent->bus->parent) {
+ if (!pci_is_root_bus(parent->bus)) {
qib_devinfo(dd->pcidev, "Parent not root\n");
- goto bail;
+ return;
}
if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- goto bail;
- pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps);
- pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl);
+ return;
+
+ rc_mpss = parent->pcie_mpss;
+ rc_mps = ffs(pcie_get_mps(parent)) - 8;
/* Find out supported and configured values for endpoint (us) */
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ ep_mpss = dd->pcidev->pcie_mpss;
+ ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
- ret = 0;
/* Find max payload supported by root, endpoint */
- rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
- ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
- if (rc_sup > ep_sup)
- rc_sup = ep_sup;
-
- rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
- ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+ if (rc_mpss > ep_mpss)
+ rc_mpss = ep_mpss;
/* If Supported greater than limit in module param, limit it */
- if (rc_sup > (qib_pcie_caps & 7))
- rc_sup = qib_pcie_caps & 7;
+ if (rc_mpss > (qib_pcie_caps & 7))
+ rc_mpss = qib_pcie_caps & 7;
/* If less than (allowed, supported), bump root payload */
- if (rc_sup > rc_cur) {
- rc_cur = rc_sup;
- pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
- val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
- pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
+ if (rc_mpss > rc_mps) {
+ rc_mps = rc_mpss;
+ pcie_set_mps(parent, 128 << rc_mps);
}
/* If less than (allowed, supported), bump endpoint payload */
- if (rc_sup > ep_cur) {
- ep_cur = rc_sup;
- ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
- val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+ if (rc_mpss > ep_mps) {
+ ep_mps = rc_mpss;
+ pcie_set_mps(dd->pcidev, 128 << ep_mps);
}
/*
@@ -636,26 +600,22 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
* No field for max supported, but PCIe spec limits it to 4096,
* which is code '5' (log2(4096) - 7)
*/
- rc_sup = 5;
- if (rc_sup > ((qib_pcie_caps >> 4) & 7))
- rc_sup = (qib_pcie_caps >> 4) & 7;
- rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
- ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
-
- if (rc_sup > rc_cur) {
- rc_cur = rc_sup;
- pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
- val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
- pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
+ max_mrrs = 5;
+ if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
+ max_mrrs = (qib_pcie_caps >> 4) & 7;
+
+ max_mrrs = 128 << max_mrrs;
+ rc_mrrs = pcie_get_readrq(parent);
+ ep_mrrs = pcie_get_readrq(dd->pcidev);
+
+ if (max_mrrs > rc_mrrs) {
+ rc_mrrs = max_mrrs;
+ pcie_set_readrq(parent, rc_mrrs);
}
- if (rc_sup > ep_cur) {
- ep_cur = rc_sup;
- ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
- val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+ if (max_mrrs > ep_mrrs) {
+ ep_mrrs = max_mrrs;
+ pcie_set_readrq(dd->pcidev, ep_mrrs);
}
-bail:
- return ret;
}
/* End of PCIe capability tuning */
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index d0a0ea0c14d..165aee2ca8a 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -594,8 +594,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else
j = npages;
- ret = get_user_pages(current, current->mm, addr,
- j, 0, 1, pages, NULL);
+ ret = get_user_pages_fast(addr, j, 0, pages);
if (ret != j) {
i = 0;
j = ret;
@@ -1294,11 +1293,8 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
int mxp = 8;
int ndesc = 0;
- down_write(&current->mm->mmap_sem);
ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
iov, dim, &list, &mxp, &ndesc);
- up_write(&current->mm->mmap_sem);
-
if (ret < 0)
goto done_unlock;
else {
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 012e2c7575a..a01c7d2cf54 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -150,14 +150,14 @@ struct ib_reth {
__be64 vaddr;
__be32 rkey;
__be32 length;
-} __attribute__ ((packed));
+} __packed;
struct ib_atomic_eth {
__be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
__be32 rkey;
__be64 swap_data;
__be64 compare_data;
-} __attribute__ ((packed));
+} __packed;
struct qib_other_headers {
__be32 bth[3];
@@ -178,7 +178,7 @@ struct qib_other_headers {
__be32 aeth;
struct ib_atomic_eth atomic_eth;
} u;
-} __attribute__ ((packed));
+} __packed;
/*
* Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
@@ -195,12 +195,12 @@ struct qib_ib_header {
} l;
struct qib_other_headers oth;
} u;
-} __attribute__ ((packed));
+} __packed;
struct qib_pio_header {
__le32 pbc[2];
struct qib_ib_header hdr;
-} __attribute__ ((packed));
+} __packed;
/*
* There is one struct qib_mcast for each multicast GID.
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index eb71aaa26a9..c639f90cfda 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -101,6 +101,7 @@ enum {
IPOIB_MCAST_FLAG_SENDONLY = 1,
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
+ IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -151,6 +152,7 @@ struct ipoib_mcast {
struct sk_buff_head pkt_queue;
struct net_device *dev;
+ struct completion done;
};
struct ipoib_rx_buf {
@@ -299,7 +301,7 @@ struct ipoib_dev_priv {
unsigned long flags;
- struct mutex vlan_mutex;
+ struct rw_semaphore vlan_rwsem;
struct rb_root path_tree;
struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7a3175400b2..1377f85911c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -140,7 +140,8 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
- u64 mapping[IPOIB_CM_RX_SG])
+ u64 mapping[IPOIB_CM_RX_SG],
+ gfp_t gfp)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
@@ -164,7 +165,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
}
for (i = 0; i < frags; i++) {
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_page(gfp);
if (!page)
goto partial_error;
@@ -382,7 +383,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
- rx->rx_ring[i].mapping)) {
+ rx->rx_ring[i].mapping,
+ GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
@@ -639,7 +641,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
- newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
+ newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
+ mapping, GFP_ATOMIC);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
@@ -1556,7 +1559,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
priv->cm.num_frags - 1,
- priv->cm.srq_ring[i].mapping)) {
+ priv->cm.srq_ring[i].mapping,
+ GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 196b1d13cbc..6a7003ddb0b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,15 +685,13 @@ int ipoib_ib_dev_open(struct net_device *dev)
ret = ipoib_ib_post_receives(dev);
if (ret) {
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
- ipoib_ib_dev_stop(dev, 1);
- return -1;
+ goto dev_stop;
}
ret = ipoib_cm_dev_open(dev);
if (ret) {
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
- ipoib_ib_dev_stop(dev, 1);
- return -1;
+ goto dev_stop;
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
@@ -704,6 +702,11 @@ int ipoib_ib_dev_open(struct net_device *dev)
napi_enable(&priv->napi);
return 0;
+dev_stop:
+ if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ napi_enable(&priv->napi);
+ ipoib_ib_dev_stop(dev, 1);
+ return -1;
}
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
@@ -746,10 +749,8 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
- cancel_delayed_work(&priv->pkey_poll_task);
+ cancel_delayed_work_sync(&priv->pkey_poll_task);
mutex_unlock(&pkey_mutex);
- if (flush)
- flush_workqueue(ipoib_workqueue);
}
ipoib_mcast_stop_thread(dev, flush);
@@ -974,7 +975,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
u16 new_index;
int result;
- mutex_lock(&priv->vlan_mutex);
+ down_read(&priv->vlan_rwsem);
/*
* Flush any child interfaces too -- they might be up even if
@@ -983,7 +984,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
list_for_each_entry(cpriv, &priv->child_intfs, list)
__ipoib_ib_dev_flush(cpriv, level);
- mutex_unlock(&priv->vlan_mutex);
+ up_read(&priv->vlan_rwsem);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
/* for non-child devices must check/update the pkey value here */
@@ -1081,6 +1082,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_dbg(priv, "cleaning up ib_dev\n");
+ /*
+ * We must make sure there are no more (path) completions
+ * that may wish to touch priv fields that are no longer valid
+ */
+ ipoib_flush_paths(dev);
ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 82cec1af902..d64ed05fb08 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -119,7 +119,7 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- mutex_lock(&priv->vlan_mutex);
+ down_read(&priv->vlan_rwsem);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -129,7 +129,7 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP);
}
- mutex_unlock(&priv->vlan_mutex);
+ up_read(&priv->vlan_rwsem);
}
netif_start_queue(dev);
@@ -162,7 +162,7 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- mutex_lock(&priv->vlan_mutex);
+ down_read(&priv->vlan_rwsem);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -172,7 +172,7 @@ static int ipoib_stop(struct net_device *dev)
dev_change_flags(cpriv->dev, flags & ~IFF_UP);
}
- mutex_unlock(&priv->vlan_mutex);
+ up_read(&priv->vlan_rwsem);
}
return 0;
@@ -1350,7 +1350,7 @@ void ipoib_setup(struct net_device *dev)
ipoib_set_ethtool_ops(dev);
- netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
+ netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
dev->watchdog_timeo = HZ;
@@ -1372,7 +1372,7 @@ void ipoib_setup(struct net_device *dev)
spin_lock_init(&priv->lock);
- mutex_init(&priv->vlan_mutex);
+ init_rwsem(&priv->vlan_rwsem);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index cecb98a4c66..d4e005720d0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -386,8 +386,10 @@ static int ipoib_mcast_join_complete(int status,
mcast->mcmember.mgid.raw, status);
/* We trap for port events ourselves. */
- if (status == -ENETRESET)
- return 0;
+ if (status == -ENETRESET) {
+ status = 0;
+ goto out;
+ }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
@@ -407,7 +409,8 @@ static int ipoib_mcast_join_complete(int status,
if (mcast == priv->broadcast)
queue_work(ipoib_workqueue, &priv->carrier_on_task);
- return 0;
+ status = 0;
+ goto out;
}
if (mcast->logcount++ < 20) {
@@ -434,7 +437,8 @@ static int ipoib_mcast_join_complete(int status,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-
+out:
+ complete(&mcast->done);
return status;
}
@@ -484,11 +488,15 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
}
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
if (IS_ERR(mcast->mc)) {
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ complete(&mcast->done);
ret = PTR_ERR(mcast->mc);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
@@ -510,10 +518,18 @@ void ipoib_mcast_join_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, mcast_task.work);
struct net_device *dev = priv->dev;
+ struct ib_port_attr port_attr;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
+ if (ib_query_port(priv->ca, priv->port, &port_attr) ||
+ port_attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
+ port_attr.state);
+ return;
+ }
+
if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
ipoib_warn(priv, "ib_query_gid() failed\n");
else
@@ -751,6 +767,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
+ /* seperate between the wait to the leave*/
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(dev, mcast);
ipoib_mcast_free(mcast);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index f81abe16cf0..c29b5c83883 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -142,10 +142,10 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head
priv = netdev_priv(dev);
ppriv = netdev_priv(priv->parent);
- mutex_lock(&ppriv->vlan_mutex);
+ down_write(&ppriv->vlan_rwsem);
unregister_netdevice_queue(dev, head);
list_del(&priv->list);
- mutex_unlock(&ppriv->vlan_mutex);
+ up_write(&ppriv->vlan_rwsem);
}
static size_t ipoib_get_size(const struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 8292554bccb..9fad7b5ac8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -140,7 +140,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
if (!rtnl_trylock())
return restart_syscall();
- mutex_lock(&ppriv->vlan_mutex);
+ down_write(&ppriv->vlan_rwsem);
/*
* First ensure this isn't a duplicate. We check the parent device and
@@ -163,7 +163,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
out:
- mutex_unlock(&ppriv->vlan_mutex);
+ up_write(&ppriv->vlan_rwsem);
if (result)
free_netdev(priv->dev);
@@ -185,7 +185,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (!rtnl_trylock())
return restart_syscall();
- mutex_lock(&ppriv->vlan_mutex);
+
+ down_write(&ppriv->vlan_rwsem);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -195,7 +196,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
break;
}
}
- mutex_unlock(&ppriv->vlan_mutex);
+ up_write(&ppriv->vlan_rwsem);
+
rtnl_unlock();
if (dev) {
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
index ce3fd32167d..02f9759ebb1 100644
--- a/drivers/infiniband/ulp/isert/Kconfig
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -1,5 +1,5 @@
config INFINIBAND_ISERT
- tristate "iSCSI Extentions for RDMA (iSER) target support"
+ tristate "iSCSI Extensions for RDMA (iSER) target support"
depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
---help---
- Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
+ Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6df23502059..6be57c38638 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,6 +22,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/llist.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
@@ -489,6 +490,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
+ mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -843,14 +845,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
}
static void
-isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
+isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+ struct ib_send_wr *send_wr, bool coalesce)
{
+ struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
+
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
- send_wr->send_flags = IB_SEND_SIGNALED;
- send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
+ send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
+ /*
+ * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
+ * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
+ */
+ mutex_lock(&isert_conn->conn_comp_mutex);
+ if (coalesce &&
+ ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+ llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
+ mutex_unlock(&isert_conn->conn_comp_mutex);
+ return;
+ }
+ isert_conn->conn_comp_batch = 0;
+ tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
+ mutex_unlock(&isert_conn->conn_comp_mutex);
+
+ send_wr->send_flags = IB_SEND_SIGNALED;
}
static int
@@ -1582,8 +1602,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+__isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
@@ -1624,6 +1644,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
}
static void
+isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
+{
+ struct llist_node *llnode = tx_desc->comp_llnode_batch;
+ struct iser_tx_desc *t;
+ /*
+ * Drain coalesced completion llist starting from comp_llnode_batch
+ * setup in isert_init_send_wr(), and then complete trailing tx_desc.
+ */
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ __isert_send_completion(t, isert_conn);
+ }
+ __isert_send_completion(tx_desc, isert_conn);
+}
+
+static void
isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1793,7 +1831,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1813,7 +1851,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1831,7 +1869,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1849,7 +1887,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1881,7 +1919,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1921,7 +1959,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1991,8 +2029,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- cmd->stat_sn = conn->stat_sn++;
} else {
sg_off = cmd->write_data_done / PAGE_SIZE;
data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2204,8 +2240,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- cmd->stat_sn = conn->stat_sn++;
} else {
sg_off = cmd->write_data_done / PAGE_SIZE;
data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2259,18 +2293,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
data_len = min(data_left, rdma_write_max);
wr->cur_rdma_length = data_len;
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
- wr->fr_desc = fr_desc;
+ /* if there is a single dma entry, dma mr is sufficient */
+ if (count == 1) {
+ ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
+ ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
+ ib_sge->lkey = isert_conn->conn_mr->lkey;
+ wr->fr_desc = NULL;
+ } else {
+ spin_lock_irqsave(&isert_conn->conn_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+ struct fast_reg_descriptor, list);
+ list_del(&fr_desc->list);
+ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ wr->fr_desc = fr_desc;
- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
- ib_sge, offset, data_len);
- if (ret) {
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
- goto unmap_sg;
+ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+ ib_sge, offset, data_len);
+ if (ret) {
+ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+ goto unmap_sg;
+ }
}
return 0;
@@ -2306,10 +2348,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
+ iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd,
+ &isert_cmd->tx_desc.send_wr, true);
atomic_inc(&isert_conn->post_send_buf_count);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 631f2090f0b..691f90ff2d8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -43,6 +43,8 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2];
int num_sge;
struct isert_cmd *isert_cmd;
+ struct llist_node *comp_llnode_batch;
+ struct llist_node comp_llnode;
struct ib_send_wr send_wr;
} __packed;
@@ -121,6 +123,10 @@ struct isert_conn {
int conn_frwr_pool_size;
/* lock to protect frwr_pool */
spinlock_t conn_lock;
+#define ISERT_COMP_BATCH_COUNT 8
+ int conn_comp_batch;
+ struct llist_head conn_comp_llist;
+ struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index f93baf8254c..a88631918e8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -46,6 +46,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/srp.h>
#include <scsi/scsi_transport_srp.h>
@@ -86,6 +87,32 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
+static struct kernel_param_ops srp_tmo_ops;
+
+static int srp_reconnect_delay = 10;
+module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
+
+static int srp_fast_io_fail_tmo = 15;
+module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_io_fail_tmo,
+ "Number of seconds between the observation of a transport"
+ " layer error and failing all I/O. \"off\" means that this"
+ " functionality is disabled.");
+
+static int srp_dev_loss_tmo = 600;
+module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo,
+ "Maximum number of seconds that the SRP transport should"
+ " insulate transport layer errors. After this time has been"
+ " exceeded the SCSI host is removed. Should be"
+ " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ " if fast_io_fail_tmo has not been set. \"off\" means that"
+ " this functionality is disabled.");
+
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
@@ -102,6 +129,48 @@ static struct ib_client srp_client = {
static struct ib_sa_client srp_sa_client;
+static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
+{
+ int tmo = *(int *)kp->arg;
+
+ if (tmo >= 0)
+ return sprintf(buffer, "%d", tmo);
+ else
+ return sprintf(buffer, "off");
+}
+
+static int srp_tmo_set(const char *val, const struct kernel_param *kp)
+{
+ int tmo, res;
+
+ if (strncmp(val, "off", 3) != 0) {
+ res = kstrtoint(val, 0, &tmo);
+ if (res)
+ goto out;
+ } else {
+ tmo = -1;
+ }
+ if (kp->arg == &srp_reconnect_delay)
+ res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
+ srp_dev_loss_tmo);
+ else if (kp->arg == &srp_fast_io_fail_tmo)
+ res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
+ else
+ res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
+ tmo);
+ if (res)
+ goto out;
+ *(int *)kp->arg = tmo;
+
+out:
+ return res;
+}
+
+static struct kernel_param_ops srp_tmo_ops = {
+ .get = srp_tmo_get,
+ .set = srp_tmo_set,
+};
+
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
return (struct srp_target_port *) host->hostdata;
@@ -231,16 +300,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
return -ENOMEM;
recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target, SRP_RQ_SIZE,
- target->comp_vector);
+ srp_recv_completion, NULL, target,
+ target->queue_size, target->comp_vector);
if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq);
goto err;
}
send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target, SRP_SQ_SIZE,
- target->comp_vector);
+ srp_send_completion, NULL, target,
+ target->queue_size, target->comp_vector);
if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq);
goto err_recv_cq;
@@ -249,8 +318,8 @@ static int srp_create_target_ib(struct srp_target_port *target)
ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
- init_attr->cap.max_send_wr = SRP_SQ_SIZE;
- init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
+ init_attr->cap.max_send_wr = target->queue_size;
+ init_attr->cap.max_recv_wr = target->queue_size;
init_attr->cap.max_recv_sge = 1;
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -296,6 +365,10 @@ err:
return ret;
}
+/*
+ * Note: this function may be called without srp_alloc_iu_bufs() having been
+ * invoked. Hence the target->[rt]x_ring checks.
+ */
static void srp_free_target_ib(struct srp_target_port *target)
{
int i;
@@ -307,10 +380,18 @@ static void srp_free_target_ib(struct srp_target_port *target)
target->qp = NULL;
target->send_cq = target->recv_cq = NULL;
- for (i = 0; i < SRP_RQ_SIZE; ++i)
- srp_free_iu(target->srp_host, target->rx_ring[i]);
- for (i = 0; i < SRP_SQ_SIZE; ++i)
- srp_free_iu(target->srp_host, target->tx_ring[i]);
+ if (target->rx_ring) {
+ for (i = 0; i < target->queue_size; ++i)
+ srp_free_iu(target->srp_host, target->rx_ring[i]);
+ kfree(target->rx_ring);
+ target->rx_ring = NULL;
+ }
+ if (target->tx_ring) {
+ for (i = 0; i < target->queue_size; ++i)
+ srp_free_iu(target->srp_host, target->tx_ring[i]);
+ kfree(target->tx_ring);
+ target->tx_ring = NULL;
+ }
}
static void srp_path_rec_completion(int status,
@@ -390,7 +471,7 @@ static int srp_send_req(struct srp_target_port *target)
req->param.responder_resources = 4;
req->param.remote_cm_response_timeout = 20;
req->param.local_cm_response_timeout = 20;
- req->param.retry_count = 7;
+ req->param.retry_count = target->tl_retry_count;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
@@ -496,7 +577,11 @@ static void srp_free_req_data(struct srp_target_port *target)
struct srp_request *req;
int i;
- for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
+ if (!target->req_ring)
+ return;
+
+ for (i = 0; i < target->req_ring_size; ++i) {
+ req = &target->req_ring[i];
kfree(req->fmr_list);
kfree(req->map_page);
if (req->indirect_dma_addr) {
@@ -506,6 +591,50 @@ static void srp_free_req_data(struct srp_target_port *target)
}
kfree(req->indirect_desc);
}
+
+ kfree(target->req_ring);
+ target->req_ring = NULL;
+}
+
+static int srp_alloc_req_data(struct srp_target_port *target)
+{
+ struct srp_device *srp_dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
+ struct srp_request *req;
+ dma_addr_t dma_addr;
+ int i, ret = -ENOMEM;
+
+ INIT_LIST_HEAD(&target->free_reqs);
+
+ target->req_ring = kzalloc(target->req_ring_size *
+ sizeof(*target->req_ring), GFP_KERNEL);
+ if (!target->req_ring)
+ goto out;
+
+ for (i = 0; i < target->req_ring_size; ++i) {
+ req = &target->req_ring[i];
+ req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
+ GFP_KERNEL);
+ req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
+ GFP_KERNEL);
+ req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
+ if (!req->fmr_list || !req->map_page || !req->indirect_desc)
+ goto out;
+
+ dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
+ target->indirect_size,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ibdev, dma_addr))
+ goto out;
+
+ req->indirect_dma_addr = dma_addr;
+ req->index = i;
+ list_add_tail(&req->list, &target->free_reqs);
+ }
+ ret = 0;
+
+out:
+ return ret;
}
/**
@@ -528,12 +657,20 @@ static void srp_remove_target(struct srp_target_port *target)
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
+ srp_rport_get(target->rport);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
+ cancel_work_sync(&target->tl_err_work);
+ srp_rport_put(target->rport);
srp_free_req_data(target);
+
+ spin_lock(&target->srp_host->target_lock);
+ list_del(&target->list);
+ spin_unlock(&target->srp_host->target_lock);
+
scsi_host_put(target->scsi_host);
}
@@ -545,10 +682,6 @@ static void srp_remove_work(struct work_struct *work)
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_remove_target(target);
-
- spin_lock(&target->srp_host->target_lock);
- list_del(&target->list);
- spin_unlock(&target->srp_host->target_lock);
}
static void srp_rport_delete(struct srp_rport *rport)
@@ -686,23 +819,42 @@ static void srp_free_req(struct srp_target_port *target,
spin_unlock_irqrestore(&target->lock, flags);
}
-static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_finish_req(struct srp_target_port *target,
+ struct srp_request *req, int result)
{
struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
if (scmnd) {
srp_free_req(target, req, scmnd, 0);
- scmnd->result = DID_RESET << 16;
+ scmnd->result = result;
scmnd->scsi_done(scmnd);
}
}
-static int srp_reconnect_target(struct srp_target_port *target)
+static void srp_terminate_io(struct srp_rport *rport)
{
- struct Scsi_Host *shost = target->scsi_host;
- int i, ret;
+ struct srp_target_port *target = rport->lld_data;
+ int i;
- scsi_target_block(&shost->shost_gendev);
+ for (i = 0; i < target->req_ring_size; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+ srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
+ }
+}
+
+/*
+ * It is up to the caller to ensure that srp_rport_reconnect() calls are
+ * serialized and that no concurrent srp_queuecommand(), srp_abort(),
+ * srp_reset_device() or srp_reset_host() calls will occur while this function
+ * is in progress. One way to realize that is not to call this function
+ * directly but to call srp_reconnect_rport() instead since that last function
+ * serializes calls of this function via rport->mutex and also blocks
+ * srp_queuecommand() calls before invoking this function.
+ */
+static int srp_rport_reconnect(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+ int i, ret;
srp_disconnect_target(target);
/*
@@ -721,41 +873,21 @@ static int srp_reconnect_target(struct srp_target_port *target)
else
srp_create_target_ib(target);
- for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
- if (req->scmnd)
- srp_reset_req(target, req);
+ srp_finish_req(target, req, DID_RESET << 16);
}
INIT_LIST_HEAD(&target->free_tx);
- for (i = 0; i < SRP_SQ_SIZE; ++i)
+ for (i = 0; i < target->queue_size; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
if (ret == 0)
ret = srp_connect_target(target);
- scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
- SDEV_TRANSPORT_OFFLINE);
- target->transport_offline = !!ret;
-
- if (ret)
- goto err;
-
- shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
-
- return ret;
-
-err:
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "reconnect failed (%d), removing target port.\n", ret);
-
- /*
- * We couldn't reconnect, so kill our target port off.
- * However, we have to defer the real removal because we
- * are in the context of the SCSI error handler now, which
- * will deadlock if we call scsi_remove_host().
- */
- srp_queue_remove_work(target);
+ if (ret == 0)
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "reconnect succeeded\n");
return ret;
}
@@ -1302,15 +1434,30 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
PFX "Recv failed with error code %d\n", res);
}
-static void srp_handle_qp_err(enum ib_wc_status wc_status,
- enum ib_wc_opcode wc_opcode,
+/**
+ * srp_tl_err_work() - handle a transport layer error
+ *
+ * Note: This function may get invoked before the rport has been created,
+ * hence the target->rport test.
+ */
+static void srp_tl_err_work(struct work_struct *work)
+{
+ struct srp_target_port *target;
+
+ target = container_of(work, struct srp_target_port, tl_err_work);
+ if (target->rport)
+ srp_start_tl_fail_timers(target->rport);
+}
+
+static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
struct srp_target_port *target)
{
if (target->connected && !target->qp_in_error) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "failed %s status %d\n",
- wc_opcode & IB_WC_RECV ? "receive" : "send",
+ send_err ? "send" : "receive",
wc_status);
+ queue_work(system_long_wq, &target->tl_err_work);
}
target->qp_in_error = true;
}
@@ -1325,7 +1472,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
if (likely(wc.status == IB_WC_SUCCESS)) {
srp_handle_recv(target, &wc);
} else {
- srp_handle_qp_err(wc.status, wc.opcode, target);
+ srp_handle_qp_err(wc.status, false, target);
}
}
}
@@ -1341,7 +1488,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
list_add(&iu->list, &target->free_tx);
} else {
- srp_handle_qp_err(wc.status, wc.opcode, target);
+ srp_handle_qp_err(wc.status, true, target);
}
}
}
@@ -1349,17 +1496,29 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(shost);
+ struct srp_rport *rport = target->rport;
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
- int len;
+ int len, result;
+ const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
+
+ /*
+ * The SCSI EH thread is the only context from which srp_queuecommand()
+ * can get invoked for blocked devices (SDEV_BLOCK /
+ * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
+ * locking the rport mutex if invoked from inside the SCSI EH.
+ */
+ if (in_scsi_eh)
+ mutex_lock(&rport->mutex);
- if (unlikely(target->transport_offline)) {
- scmnd->result = DID_NO_CONNECT << 16;
+ result = srp_chkready(target->rport);
+ if (unlikely(result)) {
+ scmnd->result = result;
scmnd->scsi_done(scmnd);
- return 0;
+ goto unlock_rport;
}
spin_lock_irqsave(&target->lock, flags);
@@ -1404,6 +1563,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
goto err_unmap;
}
+unlock_rport:
+ if (in_scsi_eh)
+ mutex_unlock(&rport->mutex);
+
return 0;
err_unmap:
@@ -1418,14 +1581,30 @@ err_iu:
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
+ if (in_scsi_eh)
+ mutex_unlock(&rport->mutex);
+
return SCSI_MLQUEUE_HOST_BUSY;
}
+/*
+ * Note: the resources allocated in this function are freed in
+ * srp_free_target_ib().
+ */
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
- for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
+ GFP_KERNEL);
+ if (!target->rx_ring)
+ goto err_no_ring;
+ target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
+ GFP_KERNEL);
+ if (!target->tx_ring)
+ goto err_no_ring;
+
+ for (i = 0; i < target->queue_size; ++i) {
target->rx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_ti_iu_len,
GFP_KERNEL, DMA_FROM_DEVICE);
@@ -1433,7 +1612,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
goto err;
}
- for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ for (i = 0; i < target->queue_size; ++i) {
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
@@ -1446,16 +1625,18 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
return 0;
err:
- for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ for (i = 0; i < target->queue_size; ++i) {
srp_free_iu(target->srp_host, target->rx_ring[i]);
- target->rx_ring[i] = NULL;
- }
-
- for (i = 0; i < SRP_SQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->tx_ring[i]);
- target->tx_ring[i] = NULL;
}
+
+err_no_ring:
+ kfree(target->tx_ring);
+ target->tx_ring = NULL;
+ kfree(target->rx_ring);
+ target->rx_ring = NULL;
+
return -ENOMEM;
}
@@ -1506,6 +1687,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
target->scsi_host->can_queue
= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
target->scsi_host->can_queue);
+ target->scsi_host->cmd_per_lun
+ = min_t(int, target->scsi_host->can_queue,
+ target->scsi_host->cmd_per_lun);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
@@ -1513,7 +1697,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- if (!target->rx_ring[0]) {
+ if (!target->rx_ring) {
ret = srp_alloc_iu_bufs(target);
if (ret)
goto error;
@@ -1533,7 +1717,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
- for (i = 0; i < SRP_RQ_SIZE; i++) {
+ for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = target->rx_ring[i];
ret = srp_post_recv(target, iu);
if (ret)
@@ -1672,6 +1856,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
+ queue_work(system_long_wq, &target->tl_err_work);
break;
case IB_CM_TIMEWAIT_EXIT:
@@ -1698,9 +1883,61 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
+/**
+ * srp_change_queue_type - changing device queue tag type
+ * @sdev: scsi device struct
+ * @tag_type: requested tag type
+ *
+ * Returns queue tag type.
+ */
+static int
+srp_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+/**
+ * srp_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
+ * (see include/scsi/scsi_host.h for definition)
+ *
+ * Returns queue depth.
+ */
+static int
+srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth;
+ if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
+ max_depth = shost->can_queue;
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ } else if (reason == SCSI_QDEPTH_QFULL)
+ scsi_track_queue_full(sdev, qdepth);
+ else
+ return -EOPNOTSUPP;
+
+ return sdev->queue_depth;
+}
+
static int srp_send_tsk_mgmt(struct srp_target_port *target,
u64 req_tag, unsigned int lun, u8 func)
{
+ struct srp_rport *rport = target->rport;
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
@@ -1710,12 +1947,20 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&target->tsk_mgmt_done);
+ /*
+ * Lock the rport mutex to avoid that srp_create_target_ib() is
+ * invoked while a task management function is being sent.
+ */
+ mutex_lock(&rport->mutex);
spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
spin_unlock_irq(&target->lock);
- if (!iu)
+ if (!iu) {
+ mutex_unlock(&rport->mutex);
+
return -1;
+ }
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
@@ -1732,8 +1977,11 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
DMA_TO_DEVICE);
if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
+ mutex_unlock(&rport->mutex);
+
return -1;
}
+ mutex_unlock(&rport->mutex);
if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
@@ -1751,11 +1999,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
if (!req || !srp_claim_req(target, req, scmnd))
- return FAILED;
+ return SUCCESS;
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK) == 0)
ret = SUCCESS;
- else if (target->transport_offline)
+ else if (target->rport->state == SRP_RPORT_LOST)
ret = FAST_IO_FAIL;
else
ret = FAILED;
@@ -1779,10 +2027,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (target->tsk_mgmt_status)
return FAILED;
- for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
if (req->scmnd && req->scmnd->device == scmnd->device)
- srp_reset_req(target, req);
+ srp_finish_req(target, req, DID_RESET << 16);
}
return SUCCESS;
@@ -1791,14 +2039,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
static int srp_reset_host(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- int ret = FAILED;
shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
- if (!srp_reconnect_target(target))
- ret = SUCCESS;
-
- return ret;
+ return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
static int srp_slave_configure(struct scsi_device *sdev)
@@ -1851,6 +2095,14 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
}
+static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sprintf(buf, "%pI6\n", target->path.sgid.raw);
+}
+
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1907,6 +2159,14 @@ static ssize_t show_comp_vector(struct device *dev,
return sprintf(buf, "%d\n", target->comp_vector);
}
+static ssize_t show_tl_retry_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sprintf(buf, "%d\n", target->tl_retry_count);
+}
+
static ssize_t show_cmd_sg_entries(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1927,6 +2187,7 @@ static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
@@ -1934,6 +2195,7 @@ static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
+static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
@@ -1942,6 +2204,7 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_ioc_guid,
&dev_attr_service_id,
&dev_attr_pkey,
+ &dev_attr_sgid,
&dev_attr_dgid,
&dev_attr_orig_dgid,
&dev_attr_req_lim,
@@ -1949,6 +2212,7 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,
&dev_attr_comp_vector,
+ &dev_attr_tl_retry_count,
&dev_attr_cmd_sg_entries,
&dev_attr_allow_ext_sg,
NULL
@@ -1961,14 +2225,16 @@ static struct scsi_host_template srp_template = {
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
+ .change_queue_depth = srp_change_queue_depth,
+ .change_queue_type = srp_change_queue_type,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
.skip_settle_delay = true,
.sg_tablesize = SRP_DEF_SG_TABLESIZE,
- .can_queue = SRP_CMD_SQ_SIZE,
+ .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
.this_id = -1,
- .cmd_per_lun = SRP_CMD_SQ_SIZE,
+ .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs
};
@@ -1994,6 +2260,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
}
rport->lld_data = target;
+ target->rport = rport;
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
@@ -2073,6 +2340,8 @@ enum {
SRP_OPT_ALLOW_EXT_SG = 1 << 10,
SRP_OPT_SG_TABLESIZE = 1 << 11,
SRP_OPT_COMP_VECTOR = 1 << 12,
+ SRP_OPT_TL_RETRY_COUNT = 1 << 13,
+ SRP_OPT_QUEUE_SIZE = 1 << 14,
SRP_OPT_ALL = (SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
@@ -2094,6 +2363,8 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
{ SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
+ { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
+ { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -2188,13 +2459,25 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->max_sectors = token;
break;
+ case SRP_OPT_QUEUE_SIZE:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad queue_size parameter '%s'\n", p);
+ goto out;
+ }
+ target->scsi_host->can_queue = token;
+ target->queue_size = token + SRP_RSP_SQ_SIZE +
+ SRP_TSK_MGMT_SQ_SIZE;
+ if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
+ target->scsi_host->cmd_per_lun = token;
+ break;
+
case SRP_OPT_MAX_CMD_PER_LUN:
- if (match_int(args, &token)) {
+ if (match_int(args, &token) || token < 1) {
pr_warn("bad max cmd_per_lun parameter '%s'\n",
p);
goto out;
}
- target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
+ target->scsi_host->cmd_per_lun = token;
break;
case SRP_OPT_IO_CLASS:
@@ -2257,6 +2540,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->comp_vector = token;
break;
+ case SRP_OPT_TL_RETRY_COUNT:
+ if (match_int(args, &token) || token < 2 || token > 7) {
+ pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
+ p);
+ goto out;
+ }
+ target->tl_retry_count = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -2273,6 +2565,12 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("target creation request is missing parameter '%s'\n",
srp_opt_tokens[i].pattern);
+ if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
+ && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
+ pr_warn("cmd_per_lun = %d > queue_size = %d\n",
+ target->scsi_host->cmd_per_lun,
+ target->scsi_host->can_queue);
+
out:
kfree(options);
return ret;
@@ -2287,8 +2585,7 @@ static ssize_t srp_create_target(struct device *dev,
struct Scsi_Host *target_host;
struct srp_target_port *target;
struct ib_device *ibdev = host->srp_dev->dev;
- dma_addr_t dma_addr;
- int i, ret;
+ int ret;
target_host = scsi_host_alloc(&srp_template,
sizeof (struct srp_target_port));
@@ -2311,11 +2608,15 @@ static ssize_t srp_create_target(struct device *dev,
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
+ target->tl_retry_count = 7;
+ target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
ret = srp_parse_options(buf, target);
if (ret)
goto err;
+ target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
+
if (!srp_conn_unique(target->srp_host, target)) {
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
@@ -2339,31 +2640,13 @@ static ssize_t srp_create_target(struct device *dev,
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
+ INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
- INIT_LIST_HEAD(&target->free_reqs);
- for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
- struct srp_request *req = &target->req_ring[i];
-
- req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
- GFP_KERNEL);
- req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
- GFP_KERNEL);
- req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
- if (!req->fmr_list || !req->map_page || !req->indirect_desc)
- goto err_free_mem;
-
- dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
- target->indirect_size,
- DMA_TO_DEVICE);
- if (ib_dma_mapping_error(ibdev, dma_addr))
- goto err_free_mem;
-
- req->indirect_dma_addr = dma_addr;
- req->index = i;
- list_add_tail(&req->list, &target->free_reqs);
- }
+ ret = srp_alloc_req_data(target);
+ if (ret)
+ goto err_free_mem;
ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
@@ -2612,7 +2895,14 @@ static void srp_remove_one(struct ib_device *device)
}
static struct srp_function_template ib_srp_transport_functions = {
+ .has_rport_state = true,
+ .reset_timer_if_blocked = true,
+ .reconnect_delay = &srp_reconnect_delay,
+ .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
+ .dev_loss_tmo = &srp_dev_loss_tmo,
+ .reconnect = srp_rport_reconnect,
.rport_delete = srp_rport_delete,
+ .terminate_rport_io = srp_terminate_io,
};
static int __init srp_init_module(void)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e641088c14d..575681063f3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -57,14 +57,11 @@ enum {
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
- SRP_RQ_SHIFT = 6,
- SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
-
- SRP_SQ_SIZE = SRP_RQ_SIZE,
+ SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
SRP_RSP_SQ_SIZE = 1,
- SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
SRP_TSK_MGMT_SQ_SIZE = 1,
- SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
+ SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
+ SRP_TSK_MGMT_SQ_SIZE,
SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = 1U << 31,
@@ -140,7 +137,6 @@ struct srp_target_port {
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
- bool transport_offline;
/* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines.
@@ -153,10 +149,14 @@ struct srp_target_port {
u16 io_class;
struct srp_host *srp_host;
struct Scsi_Host *scsi_host;
+ struct srp_rport *rport;
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ int queue_size;
+ int req_ring_size;
int comp_vector;
+ int tl_retry_count;
struct ib_sa_path_rec path;
__be16 orig_dgid[8];
@@ -172,10 +172,11 @@ struct srp_target_port {
int zero_req_lim;
- struct srp_iu *tx_ring[SRP_SQ_SIZE];
- struct srp_iu *rx_ring[SRP_RQ_SIZE];
- struct srp_request req_ring[SRP_CMD_SQ_SIZE];
+ struct srp_iu **tx_ring;
+ struct srp_iu **rx_ring;
+ struct srp_request *req_ring;
+ struct work_struct tl_err_work;
struct work_struct remove_work;
struct list_head list;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6c923c7039a..520a7e5a490 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
/* XXX(hch): this is a horrible layering violation.. */
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
-
- complete(&ioctx->cmd.transport_lun_stop_comp);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time.
*/
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
- spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
@@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
{
struct se_cmd *cmd;
enum srpt_command_state state;
- unsigned long flags;
cmd = &ioctx->cmd;
state = srpt_get_cmd_state(ioctx);
@@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
__func__, __LINE__, state);
break;
case SRPT_RDMA_WRITE_LAST:
- spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
- spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
break;
default:
printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 38b523a1ece..a11ff74a512 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -80,7 +80,7 @@ config INPUT_MATRIXKMAP
comment "Userland interfaces"
config INPUT_MOUSEDEV
- tristate "Mouse interface" if EXPERT
+ tristate "Mouse interface"
default y
help
Say Y here if you want your mouse to be accessible as char devices
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index b6ded17b3be..a06e1255288 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -18,6 +18,8 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input/mt.h>
@@ -369,7 +371,11 @@ static int evdev_release(struct inode *inode, struct file *file)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
- kfree(client);
+
+ if (is_vmalloc_addr(client))
+ vfree(client);
+ else
+ kfree(client);
evdev_close_device(evdev);
@@ -389,12 +395,14 @@ static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
+ unsigned int size = sizeof(struct evdev_client) +
+ bufsize * sizeof(struct input_event);
struct evdev_client *client;
int error;
- client = kzalloc(sizeof(struct evdev_client) +
- bufsize * sizeof(struct input_event),
- GFP_KERNEL);
+ client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!client)
+ client = vzalloc(size);
if (!client)
return -ENOMEM;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 922a7fea2ce..24c41ba7d4e 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -422,14 +422,15 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
* Gameport port operations
*/
-static ssize_t gameport_show_description(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t gameport_description_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gameport *gameport = to_gameport_port(dev);
return sprintf(buf, "%s\n", gameport->name);
}
+static DEVICE_ATTR(description, S_IRUGO, gameport_description_show, NULL);
-static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct gameport *gameport = to_gameport_port(dev);
struct device_driver *drv;
@@ -457,12 +458,14 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
return error ? error : count;
}
+static DEVICE_ATTR_WO(drvctl);
-static struct device_attribute gameport_device_attrs[] = {
- __ATTR(description, S_IRUGO, gameport_show_description, NULL),
- __ATTR(drvctl, S_IWUSR, NULL, gameport_rebind_driver),
- __ATTR_NULL
+static struct attribute *gameport_device_attrs[] = {
+ &dev_attr_description.attr,
+ &dev_attr_drvctl.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(gameport_device);
static void gameport_release_port(struct device *dev)
{
@@ -750,7 +753,7 @@ static int gameport_bus_match(struct device *dev, struct device_driver *drv)
static struct bus_type gameport_bus = {
.name = "gameport",
- .dev_attrs = gameport_device_attrs,
+ .dev_groups = gameport_device_groups,
.drv_groups = gameport_driver_groups,
.match = gameport_bus_match,
.probe = gameport_driver_probe,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e75d015024a..846ccdd905b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2052,7 +2052,7 @@ int input_register_device(struct input_dev *dev)
if (dev->hint_events_per_packet < packet_size)
dev->hint_events_per_packet = packet_size;
- dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2;
+ dev->max_vals = dev->hint_events_per_packet + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
if (!dev->vals) {
error = -ENOMEM;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index c1edd39bc5b..bb174c1a988 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
menuconfig INPUT_KEYBOARD
- bool "Keyboards" if EXPERT || !X86
+ bool "Keyboards"
default y
help
Say Y here, and a list of supported keyboards will be displayed.
@@ -67,7 +67,7 @@ config KEYBOARD_ATARI
module will be called atakbd.
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EXPERT || !X86
+ tristate "AT keyboard"
default y
select SERIO
select SERIO_LIBPS2
@@ -525,7 +525,7 @@ config KEYBOARD_SUNKBD
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARM || COMPILE_TEST
help
Say Y here if you want to use a keypad attached to the KEYSC block
on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 440ce32462b..2db13246eb8 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,6 +26,7 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/spinlock.h>
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index cd5ed9e2216..4e428199e58 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 42181435fe6..8b1b01361ec 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -383,7 +383,7 @@ static struct platform_driver lpc32xx_kscan_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = &lpc32xx_kscan_pm_ops,
- .of_match_table = of_match_ptr(lpc32xx_kscan_match),
+ .of_match_table = lpc32xx_kscan_match,
}
};
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index b3e3edab6d9..b31064981e9 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -143,8 +143,10 @@ static int nspire_keypad_open(struct input_dev *input)
return error;
error = nspire_keypad_chip_init(keypad);
- if (error)
+ if (error) {
+ clk_disable_unprepare(keypad->clk);
return error;
+ }
return 0;
}
@@ -267,7 +269,7 @@ static struct platform_driver nspire_keypad_driver = {
.driver = {
.name = "nspire-keypad",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(nspire_keypad_dt_match),
+ .of_match_table = nspire_keypad_dt_match,
},
.probe = nspire_keypad_probe,
};
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a2e758d2758..186138c720c 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 9cd20e6905a..8508879f6fa 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -614,7 +614,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
unsigned int keymap_rows;
const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_kbc_of_match), &pdev->dev);
+ match = of_match_device(tegra_kbc_of_match, &pdev->dev);
kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
if (!kbc) {
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 5f7b427dd7e..8bd24d52bf1 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -60,8 +60,8 @@ struct keypad_data {
struct clk *clk;
struct device *dev;
spinlock_t lock;
- u32 irq_press;
- u32 irq_release;
+ int irq_press;
+ int irq_release;
int rows, cols, row_shift;
int debounce_ms, active_low;
u32 prev_keys[3];
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index aa51baaa9b1..5f4967d01bc 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -156,7 +156,7 @@ config INPUT_MAX8925_ONKEY
config INPUT_MAX8997_HAPTIC
tristate "MAXIM MAX8997 haptic controller support"
- depends on HAVE_PWM && MFD_MAX8997
+ depends on PWM && HAVE_PWM && MFD_MAX8997
select INPUT_FF_MEMLESS
help
This option enables device driver support for the haptic controller
@@ -461,7 +461,7 @@ config INPUT_PCF8574
config INPUT_PWM_BEEPER
tristate "PWM beeper support"
- depends on HAVE_PWM || PWM
+ depends on PWM && HAVE_PWM
help
Say Y here to get support for PWM based beeper devices.
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 61891486067..3a90b710e30 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -108,7 +108,6 @@ static int ad714x_spi_remove(struct spi_device *spi)
struct ad714x_chip *chip = spi_get_drvdata(spi);
ad714x_remove(chip);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 4f77f87847e..b5d71d24585 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -131,7 +131,6 @@ static int cobalt_buttons_probe(struct platform_device *pdev)
err_free_mem:
input_free_polled_device(poll_dev);
kfree(bdev);
- dev_set_drvdata(&pdev->dev, NULL);
return error;
}
@@ -144,7 +143,6 @@ static int cobalt_buttons_remove(struct platform_device *pdev)
input_free_polled_device(bdev->poll_dev);
iounmap(bdev->reg);
kfree(bdev);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index f34beb228d3..17ccba88d63 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
@@ -35,15 +36,12 @@ static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
spin_lock_irqsave(&beep_lock, flags);
- if (count) {
- gpio_line_config(pin, IXP4XX_GPIO_OUT);
- gpio_line_set(pin, IXP4XX_GPIO_LOW);
-
+ if (count) {
+ gpio_direction_output(pin, 0);
*IXP4XX_OSRT2 = (count & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE;
} else {
- gpio_line_config(pin, IXP4XX_GPIO_IN);
- gpio_line_set(pin, IXP4XX_GPIO_HIGH);
-
+ gpio_direction_output(pin, 1);
+ gpio_direction_input(pin);
*IXP4XX_OSRT2 = 0;
}
@@ -78,11 +76,13 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
{
+ unsigned int pin = (unsigned int) dev_id;
+
/* clear interrupt */
*IXP4XX_OSST = IXP4XX_OSST_TIMER_2_PEND;
/* flip the beeper output */
- *IXP4XX_GPIO_GPOUTR ^= (1 << (unsigned int) dev_id);
+ gpio_set_value(pin, !gpio_get_value(pin));
return IRQ_HANDLED;
}
@@ -110,11 +110,15 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = ixp4xx_spkr_event;
+ err = gpio_request(dev->id, "ixp4-beeper");
+ if (err)
+ goto err_free_device;
+
err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
IRQF_NO_SUSPEND, "ixp4xx-beeper",
(void *) dev->id);
if (err)
- goto err_free_device;
+ goto err_free_gpio;
err = input_register_device(input_dev);
if (err)
@@ -126,6 +130,8 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
err_free_irq:
free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ err_free_gpio:
+ gpio_free(dev->id);
err_free_device:
input_free_device(input_dev);
@@ -144,6 +150,7 @@ static int ixp4xx_spkr_remove(struct platform_device *dev)
ixp4xx_spkr_control(pin, 0);
free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ gpio_free(dev->id);
return 0;
}
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index f3309696d05..59d4dcddf6d 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -168,7 +168,7 @@ static void mma8450_close(struct input_polled_dev *dev)
* I2C init/probing/exit functions
*/
static int mma8450_probe(struct i2c_client *c,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct input_polled_dev *idev;
struct mma8450 *m;
@@ -204,6 +204,8 @@ static int mma8450_probe(struct i2c_client *c,
goto err_free_mem;
}
+ i2c_set_clientdata(c, m);
+
return 0;
err_free_mem:
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index dce0d95943c..6983ffbbfb9 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -383,6 +383,7 @@ static int mpu3050_probe(struct i2c_client *client,
pm_runtime_enable(&client->dev);
pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
+ i2c_set_clientdata(client, sensor);
return 0;
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 2ff4d1c78ab..940566e7be1 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -16,6 +16,7 @@
#include <linux/input.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index fb4f8ac3343..83fff38b86b 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -87,7 +87,6 @@ static int rb532_button_remove(struct platform_device *pdev)
input_unregister_polled_device(poll_dev);
input_free_polled_device(poll_dev);
- dev_set_drvdata(&pdev->dev, NULL);
return 0;
}
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 5b1aff82513..f920ba7ab51 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -24,6 +24,7 @@
#include <linux/gpio.h>
#include <linux/rotary_encoder.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 0621c367049..7b8b03e0d0b 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -153,7 +153,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
.name = "sirfsoc-pwrc",
.owner = THIS_MODULE,
.pm = &sirfsoc_pwrc_pm_ops,
- .of_match_table = of_match_ptr(sirfsoc_pwrc_of_match),
+ .of_match_table = sirfsoc_pwrc_of_match,
}
};
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index a0a4bbaef02..772835938a5 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -430,20 +430,30 @@ static int uinput_setup_device(struct uinput_device *udev,
return retval;
}
-static ssize_t uinput_inject_event(struct uinput_device *udev,
- const char __user *buffer, size_t count)
+static ssize_t uinput_inject_events(struct uinput_device *udev,
+ const char __user *buffer, size_t count)
{
struct input_event ev;
+ size_t bytes = 0;
- if (count < input_event_size())
+ if (count != 0 && count < input_event_size())
return -EINVAL;
- if (input_event_from_user(buffer, &ev))
- return -EFAULT;
+ while (bytes + input_event_size() <= count) {
+ /*
+ * Note that even if some events were fetched successfully
+ * we are still going to return EFAULT instead of partial
+ * count to let userspace know that it got it's buffers
+ * all wrong.
+ */
+ if (input_event_from_user(buffer + bytes, &ev))
+ return -EFAULT;
- input_event(udev->dev, ev.type, ev.code, ev.value);
+ input_event(udev->dev, ev.type, ev.code, ev.value);
+ bytes += input_event_size();
+ }
- return input_event_size();
+ return bytes;
}
static ssize_t uinput_write(struct file *file, const char __user *buffer,
@@ -460,7 +470,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer,
return retval;
retval = udev->state == UIST_CREATED ?
- uinput_inject_event(udev, buffer, count) :
+ uinput_inject_events(udev, buffer, count) :
uinput_setup_device(udev, buffer, count);
mutex_unlock(&udev->mutex);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 83658472ad2..ca7a26f1dce 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,7 +103,6 @@ static const struct alps_model_info alps_model_data[] = {
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
- { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
@@ -1793,7 +1792,7 @@ int alps_init(struct psmouse *psmouse)
snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
dev2->name = (priv->flags & ALPS_DUALPOINT) ?
- "DualPoint Stick" : "PS/2 Mouse";
+ "DualPoint Stick" : "ALPS PS/2 Device";
dev2->id.bustype = BUS_I8042;
dev2->id.vendor = 0x0002;
dev2->id.product = PSMOUSE_ALPS;
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index f51765fff05..a5869a856ea 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -439,7 +439,7 @@ static int cypress_get_finger_count(unsigned char header_byte)
case 2: return 5;
default:
/* Invalid contact (e.g. palm). Ignore it. */
- return -1;
+ return 0;
}
}
@@ -452,17 +452,10 @@ static int cypress_parse_packet(struct psmouse *psmouse,
{
unsigned char *packet = psmouse->packet;
unsigned char header_byte = packet[0];
- int contact_cnt;
memset(report_data, 0, sizeof(struct cytp_report_data));
- contact_cnt = cypress_get_finger_count(header_byte);
-
- if (contact_cnt < 0) /* e.g. palm detect */
- return -EINVAL;
-
- report_data->contact_cnt = contact_cnt;
-
+ report_data->contact_cnt = cypress_get_finger_count(header_byte);
report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
if (report_data->contact_cnt == 1) {
@@ -535,11 +528,9 @@ static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
int slots[CYTP_MAX_MT_SLOTS];
int n;
- if (cypress_parse_packet(psmouse, cytp, &report_data))
- return;
+ cypress_parse_packet(psmouse, cytp, &report_data);
n = report_data.contact_cnt;
-
if (n > CYTP_MAX_MT_SLOTS)
n = CYTP_MAX_MT_SLOTS;
@@ -605,10 +596,6 @@ static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
contact_cnt = cypress_get_finger_count(packet[0]);
-
- if (contact_cnt < 0)
- return PSMOUSE_BAD_DATA;
-
if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
else
@@ -679,15 +666,15 @@ int cypress_init(struct psmouse *psmouse)
{
struct cytp_data *cytp;
- cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
- psmouse->private = (void *)cytp;
- if (cytp == NULL)
+ cytp = kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
+ if (!cytp)
return -ENOMEM;
- cypress_reset(psmouse);
-
+ psmouse->private = cytp;
psmouse->pktsize = 8;
+ cypress_reset(psmouse);
+
if (cypress_query_hardware(psmouse)) {
psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
goto err_exit;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 33b3e88fe4a..8541f949778 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
config SERIO
- tristate "Serial I/O support" if EXPERT || !X86
+ tristate "Serial I/O support"
default y
help
Say Yes here if you have any input device that uses serial I/O to
@@ -19,9 +19,9 @@ config SERIO
if SERIO
config SERIO_I8042
- tristate "i8042 PC Keyboard controller" if EXPERT || !X86
+ tristate "i8042 PC Keyboard controller"
default y
- depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
+ depends on !PARISC && (!ARM || FOOTBRIDGE_HOST) && \
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
!ARC
help
@@ -170,7 +170,7 @@ config SERIO_MACEPS2
module will be called maceps2.
config SERIO_LIBPS2
- tristate "PS/2 driver library" if EXPERT
+ tristate "PS/2 driver library"
depends on SERIO_I8042 || SERIO_I8042=n
help
Say Y here if you are using a driver for device connected
@@ -266,4 +266,14 @@ config SERIO_OLPC_APSP
To compile this driver as a module, choose M here: the module will
be called olpc_apsp.
+config HYPERV_KEYBOARD
+ tristate "Microsoft Synthetic Keyboard driver"
+ depends on HYPERV
+ default HYPERV
+ help
+ Select this option to enable the Hyper-V Keyboard driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hyperv_keyboard.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 12298b1c0e7..815d874fe72 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o
obj-$(CONFIG_SERIO_APBPS2) += apbps2.o
obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o
+obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
new file mode 100644
index 00000000000..3a83c3c14b2
--- /dev/null
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2013, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+#include <linux/hyperv.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+/*
+ * Current version 1.0
+ *
+ */
+#define SYNTH_KBD_VERSION_MAJOR 1
+#define SYNTH_KBD_VERSION_MINOR 0
+#define SYNTH_KBD_VERSION (SYNTH_KBD_VERSION_MINOR | \
+ (SYNTH_KBD_VERSION_MAJOR << 16))
+
+
+/*
+ * Message types in the synthetic input protocol
+ */
+enum synth_kbd_msg_type {
+ SYNTH_KBD_PROTOCOL_REQUEST = 1,
+ SYNTH_KBD_PROTOCOL_RESPONSE = 2,
+ SYNTH_KBD_EVENT = 3,
+ SYNTH_KBD_LED_INDICATORS = 4,
+};
+
+/*
+ * Basic message structures.
+ */
+struct synth_kbd_msg_hdr {
+ __le32 type;
+};
+
+struct synth_kbd_msg {
+ struct synth_kbd_msg_hdr header;
+ char data[]; /* Enclosed message */
+};
+
+union synth_kbd_version {
+ __le32 version;
+};
+
+/*
+ * Protocol messages
+ */
+struct synth_kbd_protocol_request {
+ struct synth_kbd_msg_hdr header;
+ union synth_kbd_version version_requested;
+};
+
+#define PROTOCOL_ACCEPTED BIT(0)
+struct synth_kbd_protocol_response {
+ struct synth_kbd_msg_hdr header;
+ __le32 proto_status;
+};
+
+#define IS_UNICODE BIT(0)
+#define IS_BREAK BIT(1)
+#define IS_E0 BIT(2)
+#define IS_E1 BIT(3)
+struct synth_kbd_keystroke {
+ struct synth_kbd_msg_hdr header;
+ __le16 make_code;
+ __le16 reserved0;
+ __le32 info; /* Additional information */
+};
+
+
+#define HK_MAXIMUM_MESSAGE_SIZE 256
+
+#define KBD_VSC_SEND_RING_BUFFER_SIZE (10 * PAGE_SIZE)
+#define KBD_VSC_RECV_RING_BUFFER_SIZE (10 * PAGE_SIZE)
+
+#define XTKBD_EMUL0 0xe0
+#define XTKBD_EMUL1 0xe1
+#define XTKBD_RELEASE 0x80
+
+
+/*
+ * Represents a keyboard device
+ */
+struct hv_kbd_dev {
+ struct hv_device *hv_dev;
+ struct serio *hv_serio;
+ struct synth_kbd_protocol_request protocol_req;
+ struct synth_kbd_protocol_response protocol_resp;
+ /* Synchronize the request/response if needed */
+ struct completion wait_event;
+ spinlock_t lock; /* protects 'started' field */
+ bool started;
+};
+
+static void hv_kbd_on_receive(struct hv_device *hv_dev,
+ struct synth_kbd_msg *msg, u32 msg_length)
+{
+ struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+ struct synth_kbd_keystroke *ks_msg;
+ unsigned long flags;
+ u32 msg_type = __le32_to_cpu(msg->header.type);
+ u32 info;
+ u16 scan_code;
+
+ switch (msg_type) {
+ case SYNTH_KBD_PROTOCOL_RESPONSE:
+ /*
+ * Validate the information provided by the host.
+ * If the host is giving us a bogus packet,
+ * drop the packet (hoping the problem
+ * goes away).
+ */
+ if (msg_length < sizeof(struct synth_kbd_protocol_response)) {
+ dev_err(&hv_dev->device,
+ "Illegal protocol response packet (len: %d)\n",
+ msg_length);
+ break;
+ }
+
+ memcpy(&kbd_dev->protocol_resp, msg,
+ sizeof(struct synth_kbd_protocol_response));
+ complete(&kbd_dev->wait_event);
+ break;
+
+ case SYNTH_KBD_EVENT:
+ /*
+ * Validate the information provided by the host.
+ * If the host is giving us a bogus packet,
+ * drop the packet (hoping the problem
+ * goes away).
+ */
+ if (msg_length < sizeof(struct synth_kbd_keystroke)) {
+ dev_err(&hv_dev->device,
+ "Illegal keyboard event packet (len: %d)\n",
+ msg_length);
+ break;
+ }
+
+ ks_msg = (struct synth_kbd_keystroke *)msg;
+ info = __le32_to_cpu(ks_msg->info);
+
+ /*
+ * Inject the information through the serio interrupt.
+ */
+ spin_lock_irqsave(&kbd_dev->lock, flags);
+ if (kbd_dev->started) {
+ if (info & IS_E0)
+ serio_interrupt(kbd_dev->hv_serio,
+ XTKBD_EMUL0, 0);
+
+ scan_code = __le16_to_cpu(ks_msg->make_code);
+ if (info & IS_BREAK)
+ scan_code |= XTKBD_RELEASE;
+
+ serio_interrupt(kbd_dev->hv_serio, scan_code, 0);
+ }
+ spin_unlock_irqrestore(&kbd_dev->lock, flags);
+ break;
+
+ default:
+ dev_err(&hv_dev->device,
+ "unhandled message type %d\n", msg_type);
+ }
+}
+
+static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
+ struct vmpacket_descriptor *desc,
+ u32 bytes_recvd,
+ u64 req_id)
+{
+ struct synth_kbd_msg *msg;
+ u32 msg_sz;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ /*
+ * We have a packet that has "inband" data. The API used
+ * for retrieving the packet guarantees that the complete
+ * packet is read. So, minimally, we should be able to
+ * parse the payload header safely (assuming that the host
+ * can be trusted. Trusting the host seems to be a
+ * reasonable assumption because in a virtualized
+ * environment there is not whole lot you can do if you
+ * don't trust the host.
+ *
+ * Nonetheless, let us validate if the host can be trusted
+ * (in a trivial way). The interesting aspect of this
+ * validation is how do you recover if we discover that the
+ * host is not to be trusted? Simply dropping the packet, I
+ * don't think is an appropriate recovery. In the interest
+ * of failing fast, it may be better to crash the guest.
+ * For now, I will just drop the packet!
+ */
+
+ msg_sz = bytes_recvd - (desc->offset8 << 3);
+ if (msg_sz <= sizeof(struct synth_kbd_msg_hdr)) {
+ /*
+ * Drop the packet and hope
+ * the problem magically goes away.
+ */
+ dev_err(&hv_dev->device,
+ "Illegal packet (type: %d, tid: %llx, size: %d)\n",
+ desc->type, req_id, msg_sz);
+ break;
+ }
+
+ msg = (void *)desc + (desc->offset8 << 3);
+ hv_kbd_on_receive(hv_dev, msg, msg_sz);
+ break;
+
+ default:
+ dev_err(&hv_dev->device,
+ "unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
+ break;
+ }
+}
+
+static void hv_kbd_on_channel_callback(void *context)
+{
+ struct hv_device *hv_dev = context;
+ void *buffer;
+ int bufferlen = 0x100; /* Start with sensible size */
+ u32 bytes_recvd;
+ u64 req_id;
+ int error;
+
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
+
+ while (1) {
+ error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
+ &bytes_recvd, &req_id);
+ switch (error) {
+ case 0:
+ if (bytes_recvd == 0) {
+ kfree(buffer);
+ return;
+ }
+
+ hv_kbd_handle_received_packet(hv_dev, buffer,
+ bytes_recvd, req_id);
+ break;
+
+ case -ENOBUFS:
+ kfree(buffer);
+ /* Handle large packet */
+ bufferlen = bytes_recvd;
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+ if (!buffer)
+ return;
+ break;
+ }
+ }
+}
+
+static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev)
+{
+ struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+ struct synth_kbd_protocol_request *request;
+ struct synth_kbd_protocol_response *response;
+ u32 proto_status;
+ int error;
+
+ request = &kbd_dev->protocol_req;
+ memset(request, 0, sizeof(struct synth_kbd_protocol_request));
+ request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST);
+ request->version_requested.version = __cpu_to_le32(SYNTH_KBD_VERSION);
+
+ error = vmbus_sendpacket(hv_dev->channel, request,
+ sizeof(struct synth_kbd_protocol_request),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (error)
+ return error;
+
+ if (!wait_for_completion_timeout(&kbd_dev->wait_event, 10 * HZ))
+ return -ETIMEDOUT;
+
+ response = &kbd_dev->protocol_resp;
+ proto_status = __le32_to_cpu(response->proto_status);
+ if (!(proto_status & PROTOCOL_ACCEPTED)) {
+ dev_err(&hv_dev->device,
+ "synth_kbd protocol request failed (version %d)\n",
+ SYNTH_KBD_VERSION);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hv_kbd_start(struct serio *serio)
+{
+ struct hv_kbd_dev *kbd_dev = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbd_dev->lock, flags);
+ kbd_dev->started = true;
+ spin_unlock_irqrestore(&kbd_dev->lock, flags);
+
+ return 0;
+}
+
+static void hv_kbd_stop(struct serio *serio)
+{
+ struct hv_kbd_dev *kbd_dev = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbd_dev->lock, flags);
+ kbd_dev->started = false;
+ spin_unlock_irqrestore(&kbd_dev->lock, flags);
+}
+
+static int hv_kbd_probe(struct hv_device *hv_dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_kbd_dev *kbd_dev;
+ struct serio *hv_serio;
+ int error;
+
+ kbd_dev = kzalloc(sizeof(struct hv_kbd_dev), GFP_KERNEL);
+ hv_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!kbd_dev || !hv_serio) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ kbd_dev->hv_dev = hv_dev;
+ kbd_dev->hv_serio = hv_serio;
+ spin_lock_init(&kbd_dev->lock);
+ init_completion(&kbd_dev->wait_event);
+ hv_set_drvdata(hv_dev, kbd_dev);
+
+ hv_serio->dev.parent = &hv_dev->device;
+ hv_serio->id.type = SERIO_8042_XL;
+ hv_serio->port_data = kbd_dev;
+ strlcpy(hv_serio->name, dev_name(&hv_dev->device),
+ sizeof(hv_serio->name));
+ strlcpy(hv_serio->phys, dev_name(&hv_dev->device),
+ sizeof(hv_serio->phys));
+
+ hv_serio->start = hv_kbd_start;
+ hv_serio->stop = hv_kbd_stop;
+
+ error = vmbus_open(hv_dev->channel,
+ KBD_VSC_SEND_RING_BUFFER_SIZE,
+ KBD_VSC_RECV_RING_BUFFER_SIZE,
+ NULL, 0,
+ hv_kbd_on_channel_callback,
+ hv_dev);
+ if (error)
+ goto err_free_mem;
+
+ error = hv_kbd_connect_to_vsp(hv_dev);
+ if (error)
+ goto err_close_vmbus;
+
+ serio_register_port(kbd_dev->hv_serio);
+ return 0;
+
+err_close_vmbus:
+ vmbus_close(hv_dev->channel);
+err_free_mem:
+ kfree(hv_serio);
+ kfree(kbd_dev);
+ return error;
+}
+
+static int hv_kbd_remove(struct hv_device *hv_dev)
+{
+ struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+
+ serio_unregister_port(kbd_dev->hv_serio);
+ vmbus_close(hv_dev->channel);
+ kfree(kbd_dev);
+
+ hv_set_drvdata(hv_dev, NULL);
+
+ return 0;
+}
+
+/*
+ * Keyboard GUID
+ * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
+ */
+#define HV_KBD_GUID \
+ .guid = { \
+ 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \
+ 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \
+ }
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Keyboard guid */
+ { HV_KBD_GUID, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct hv_driver hv_kbd_drv = {
+ .name = KBUILD_MODNAME,
+ .id_table = id_table,
+ .probe = hv_kbd_probe,
+ .remove = hv_kbd_remove,
+};
+
+static int __init hv_kbd_init(void)
+{
+ return vmbus_driver_register(&hv_kbd_drv);
+}
+
+static void __exit hv_kbd_exit(void)
+{
+ vmbus_driver_unregister(&hv_kbd_drv);
+}
+
+MODULE_LICENSE("GPL");
+module_init(hv_kbd_init);
+module_exit(hv_kbd_exit);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5f306f79da0..0ec9abbe31f 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -765,6 +765,7 @@ static struct pnp_device_id pnp_kbd_devids[] = {
{ .id = "CPQA0D7", .driver_data = 0 },
{ .id = "", },
};
+MODULE_DEVICE_TABLE(pnp, pnp_kbd_devids);
static struct pnp_driver i8042_pnp_kbd_driver = {
.name = "i8042 kbd",
@@ -786,6 +787,7 @@ static struct pnp_device_id pnp_aux_devids[] = {
{ .id = "SYN0801", .driver_data = 0 },
{ .id = "", },
};
+MODULE_DEVICE_TABLE(pnp, pnp_aux_devids);
static struct pnp_driver i8042_pnp_aux_driver = {
.name = "i8042 aux",
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 52c9ebf9472..020053fa5aa 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1036,7 +1036,7 @@ static void i8042_controller_reset(bool force_reset)
/*
* i8042_panic_blink() will turn the keyboard LEDs on or off and is called
* when kernel panics. Flashing LEDs is useful for users running X who may
- * not see the console and will help distingushing panics from "real"
+ * not see the console and will help distinguishing panics from "real"
* lockups.
*
* Note that DELAY has a limit of 10ms so we will not get stuck here
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 2b56855c2c7..98707fb2cb5 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -365,7 +365,7 @@ static ssize_t serio_show_description(struct device *dev, struct device_attribut
return sprintf(buf, "%s\n", serio->name);
}
-static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
@@ -373,54 +373,31 @@ static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *
serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
}
-static ssize_t serio_show_id_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.type);
}
-static ssize_t serio_show_id_proto(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.proto);
}
-static ssize_t serio_show_id_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.id);
}
-static ssize_t serio_show_id_extra(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.extra);
}
-static DEVICE_ATTR(type, S_IRUGO, serio_show_id_type, NULL);
-static DEVICE_ATTR(proto, S_IRUGO, serio_show_id_proto, NULL);
-static DEVICE_ATTR(id, S_IRUGO, serio_show_id_id, NULL);
-static DEVICE_ATTR(extra, S_IRUGO, serio_show_id_extra, NULL);
-
-static struct attribute *serio_device_id_attrs[] = {
- &dev_attr_type.attr,
- &dev_attr_proto.attr,
- &dev_attr_id.attr,
- &dev_attr_extra.attr,
- NULL
-};
-
-static struct attribute_group serio_id_attr_group = {
- .name = "id",
- .attrs = serio_device_id_attrs,
-};
-
-static const struct attribute_group *serio_device_attr_groups[] = {
- &serio_id_attr_group,
- NULL
-};
-
-static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct serio *serio = to_serio_port(dev);
struct device_driver *drv;
@@ -474,14 +451,36 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
return retval;
}
-static struct device_attribute serio_device_attrs[] = {
- __ATTR(description, S_IRUGO, serio_show_description, NULL),
- __ATTR(modalias, S_IRUGO, serio_show_modalias, NULL),
- __ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver),
- __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode),
- __ATTR_NULL
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(proto);
+static DEVICE_ATTR_RO(id);
+static DEVICE_ATTR_RO(extra);
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_WO(drvctl);
+static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
+static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+
+static struct attribute *serio_device_id_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_proto.attr,
+ &dev_attr_id.attr,
+ &dev_attr_extra.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_description.attr,
+ &dev_attr_drvctl.attr,
+ &dev_attr_bind_mode.attr,
+ NULL
};
+static struct attribute_group serio_id_attr_group = {
+ .name = "id",
+ .attrs = serio_device_id_attrs,
+};
+
+static const struct attribute_group *serio_device_attr_groups[] = {
+ &serio_id_attr_group,
+ NULL
+};
static void serio_release_port(struct device *dev)
{
@@ -996,7 +995,6 @@ EXPORT_SYMBOL(serio_interrupt);
static struct bus_type serio_bus = {
.name = "serio",
- .dev_attrs = serio_device_attrs,
.drv_groups = serio_driver_groups,
.match = serio_bus_match,
.uevent = serio_uevent,
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 4b7662a17ae..dfbcd872f95 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#define DRIVER_NAME "xilinx_ps2"
@@ -235,12 +236,12 @@ static void sxps2_close(struct serio *pserio)
*/
static int xps2_of_probe(struct platform_device *ofdev)
{
- struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
struct xps2data *drvdata;
struct serio *serio;
struct device *dev = &ofdev->dev;
resource_size_t remap_size, phys_addr;
+ unsigned int irq;
int error;
dev_info(dev, "Device Tree Probing \'%s\'\n",
@@ -254,7 +255,8 @@ static int xps2_of_probe(struct platform_device *ofdev)
}
/* Get IRQ for the device */
- if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) {
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!irq) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -267,7 +269,7 @@ static int xps2_of_probe(struct platform_device *ofdev)
}
spin_lock_init(&drvdata->lock);
- drvdata->irq = r_irq.start;
+ drvdata->irq = irq;
drvdata->serio = serio;
drvdata->dev = dev;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index e53416a4d7f..867e7c33ac5 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -524,9 +524,6 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
report_id, rep_data, length, 1);
- if (error >= 0)
- error = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, length, 1);
} while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -548,7 +545,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
/* MT Tablet PC touch */
return wacom_set_device_mode(intf, 3, 4, 4);
}
- else if (features->type == WACOM_24HDT) {
+ else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) {
return wacom_set_device_mode(intf, 18, 3, 2);
}
} else if (features->device_type == BTN_TOOL_PEN) {
@@ -719,7 +716,7 @@ static int wacom_led_control(struct wacom *wacom)
return -ENOMEM;
if (wacom->wacom_wac.features.type >= INTUOS5S &&
- wacom->wacom_wac.features.type <= INTUOS5L) {
+ wacom->wacom_wac.features.type <= INTUOSPL) {
/*
* Touch Ring and crop mark LED luminance may take on
* one of four values:
@@ -981,14 +978,20 @@ static int wacom_initialize_leds(struct wacom *wacom)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
- wacom->led.select[0] = 0;
- wacom->led.select[1] = 0;
- wacom->led.llv = 32;
- wacom->led.hlv = 0;
- wacom->led.img_lum = 0;
-
- error = sysfs_create_group(&wacom->intf->dev.kobj,
- &intuos5_led_attr_group);
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
+ if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) {
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 32;
+ wacom->led.hlv = 0;
+ wacom->led.img_lum = 0;
+
+ error = sysfs_create_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ } else
+ return 0;
break;
default:
@@ -1024,8 +1027,12 @@ static void wacom_destroy_leds(struct wacom *wacom)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
- sysfs_remove_group(&wacom->intf->dev.kobj,
- &intuos5_led_attr_group);
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
+ if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN)
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
break;
}
}
@@ -1185,34 +1192,47 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac1->features =
*((struct wacom_features *)id->driver_info);
wacom_wac1->features.device_type = BTN_TOOL_PEN;
+ snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
+ wacom_wac1->features.name);
error = wacom_register_input(wacom1);
if (error)
- goto fail1;
+ goto fail;
/* Touch interface */
- wacom_wac2->features =
- *((struct wacom_features *)id->driver_info);
- wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
- wacom_wac2->features.device_type = BTN_TOOL_FINGER;
- wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
- error = wacom_register_input(wacom2);
- if (error)
- goto fail2;
+ if (wacom_wac1->features.touch_max) {
+ wacom_wac2->features =
+ *((struct wacom_features *)id->driver_info);
+ wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_wac2->features.device_type = BTN_TOOL_FINGER;
+ wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+ if (wacom_wac2->features.touch_max)
+ snprintf(wacom_wac2->name, WACOM_NAME_MAX,
+ "%s (WL) Finger",wacom_wac2->features.name);
+ else
+ snprintf(wacom_wac2->name, WACOM_NAME_MAX,
+ "%s (WL) Pad",wacom_wac2->features.name);
+ error = wacom_register_input(wacom2);
+ if (error)
+ goto fail;
+ }
error = wacom_initialize_battery(wacom);
if (error)
- goto fail3;
+ goto fail;
}
return;
-fail3:
- input_unregister_device(wacom_wac2->input);
- wacom_wac2->input = NULL;
-fail2:
- input_unregister_device(wacom_wac1->input);
- wacom_wac1->input = NULL;
-fail1:
+fail:
+ if (wacom_wac2->input) {
+ input_unregister_device(wacom_wac2->input);
+ wacom_wac2->input = NULL;
+ }
+
+ if (wacom_wac1->input) {
+ input_unregister_device(wacom_wac1->input);
+ wacom_wac1->input = NULL;
+ }
return;
}
@@ -1302,7 +1322,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
* HID descriptor. If this is the touch interface (wMaxPacketSize
* of WACOM_PKGLEN_BBTOUCH3), override the table values.
*/
- if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
features->device_type = BTN_TOOL_FINGER;
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1329,10 +1349,12 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
struct usb_device *other_dev;
/* Append the device type to the name */
- strlcat(wacom_wac->name,
- features->device_type == BTN_TOOL_PEN ?
- " Pen" : " Finger",
- sizeof(wacom_wac->name));
+ if (features->device_type != BTN_TOOL_FINGER)
+ strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX);
+ else if (features->touch_max)
+ strlcat(wacom_wac->name, " Finger", WACOM_NAME_MAX);
+ else
+ strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX);
other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c59b797eeaf..782c2535f1d 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -427,6 +427,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(features->type == WACOM_21UX2))
return 1;
+ /* Range Report */
+ if ((data[1] & 0xfe) == 0x20) {
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+ }
+
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
@@ -477,7 +484,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
/* general pen packet */
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- if (features->type >= INTUOS4S && features->type <= WACOM_24HD) {
+ if (features->type >= INTUOS4S && features->type <= CINTIQ_HYBRID) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -621,14 +628,30 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else {
input_report_abs(input, ABS_MISC, 0);
}
- } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ } else if (features->type == CINTIQ_HYBRID) {
+ /*
+ * Do not send hardware buttons under Android. They
+ * are already sent to the system through GPIO (and
+ * have different meaning).
+ */
+ input_report_key(input, BTN_1, (data[4] & 0x01));
+ input_report_key(input, BTN_2, (data[4] & 0x02));
+ input_report_key(input, BTN_3, (data[4] & 0x04));
+ input_report_key(input, BTN_4, (data[4] & 0x08));
+
+ input_report_key(input, BTN_5, (data[4] & 0x10)); /* Right */
+ input_report_key(input, BTN_6, (data[4] & 0x20)); /* Up */
+ input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */
+ input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */
+ input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */
+ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
int i;
/* Touch ring mode switch has no capacitive sensor */
input_report_key(input, BTN_0, (data[3] & 0x01));
/*
- * ExpressKeys on Intuos5 have a capacitive sensor in
+ * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
* addition to the mechanical switch. Switch data is
* stored in data[4], capacitive data in data[5].
*/
@@ -716,7 +739,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
features->type == INTUOS4 ||
features->type == INTUOS4S ||
features->type == INTUOS5 ||
- features->type == INTUOS5S)) {
+ features->type == INTUOS5S ||
+ features->type == INTUOSPM ||
+ features->type == INTUOSPS)) {
return 0;
}
@@ -769,8 +794,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
+ if (features->type >= INTUOS4S && features->type <= INTUOSPL) {
input_report_key(input, BTN_LEFT, data[6] & 0x01);
input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
input_report_key(input, BTN_RIGHT, data[6] & 0x04);
@@ -797,7 +821,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
- features->type == INTUOS4L || features->type == INTUOS5L) &&
+ features->type == INTUOS4L || features->type == INTUOS5L ||
+ features->type == INTUOSPL) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
input_report_key(input, BTN_LEFT, data[8] & 0x01);
@@ -1107,6 +1132,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
{
+ struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->input;
bool touch = data[1] & 0x80;
int slot = input_mt_get_slot_by_key(input, data[0]);
@@ -1122,14 +1148,23 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
if (touch) {
int x = (data[2] << 4) | (data[4] >> 4);
int y = (data[3] << 4) | (data[4] & 0x0f);
- int a = data[5];
+ int width, height;
- // "a" is a scaled-down area which we assume is roughly
- // circular and which can be described as: a=(pi*r^2)/C.
- int x_res = input_abs_get_res(input, ABS_X);
- int y_res = input_abs_get_res(input, ABS_Y);
- int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
- int height = width * y_res / x_res;
+ if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
+ width = data[5];
+ height = data[6];
+ } else {
+ /*
+ * "a" is a scaled-down area which we assume is
+ * roughly circular and which can be described as:
+ * a=(pi*r^2)/C.
+ */
+ int a = data[5];
+ int x_res = input_abs_get_res(input, ABS_X);
+ int y_res = input_abs_get_res(input, ABS_Y);
+ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+ height = width * y_res / x_res;
+ }
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
@@ -1327,6 +1362,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case WACOM_22HD:
case WACOM_24HD:
case DTK:
+ case CINTIQ_HYBRID:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1337,6 +1373,9 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
if (len == WACOM_PKGLEN_BBTOUCH3)
sync = wacom_bpt3_touch(wacom_wac);
else
@@ -1420,7 +1459,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type >= WIRELESS ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
(features->oVid && features->oPid))
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
@@ -1627,6 +1666,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS5:
case INTUOS5L:
+ case INTUOSPM:
+ case INTUOSPL:
if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_7, input_dev->keybit);
__set_bit(BTN_8, input_dev->keybit);
@@ -1634,6 +1675,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case INTUOS5S:
+ case INTUOSPS:
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->device_type == BTN_TOOL_PEN) {
@@ -1765,6 +1807,24 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
0, 0);
}
break;
+
+ case CINTIQ_HYBRID:
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ __set_bit(BTN_0, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
+ wacom_setup_cintiq(wacom_wac);
+ break;
}
return 0;
}
@@ -1952,6 +2012,18 @@ static const struct wacom_features wacom_features_0x29 =
static const struct wacom_features wacom_features_0x2A =
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x314 =
+ { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x315 =
+ { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x317 =
+ { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
+ 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2131,6 +2203,13 @@ static const struct wacom_features wacom_features_0x301 =
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x0307 =
+ { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59552, 33848, 2047,
+ 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
+static const struct wacom_features wacom_features_0x0309 =
+ { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10 };
#define USB_DEVICE_WACOM(prod) \
USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
@@ -2259,12 +2338,17 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_WACOM(0x304) },
+ { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xFA) },
+ { USB_DEVICE_WACOM(0x0307) },
+ { USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index dfc9e08e7f7..fd23a379060 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -14,6 +14,8 @@
/* maximum packet length for USB devices */
#define WACOM_PKGLEN_MAX 64
+#define WACOM_NAME_MAX 64
+
/* packet length for individual models */
#define WACOM_PKGLEN_PENPRTN 7
#define WACOM_PKGLEN_GRAPHIRE 8
@@ -76,10 +78,14 @@ enum {
INTUOS5S,
INTUOS5,
INTUOS5L,
+ INTUOSPS,
+ INTUOSPM,
+ INTUOSPL,
WACOM_21UX2,
WACOM_22HD,
DTK,
WACOM_24HD,
+ CINTIQ_HYBRID,
CINTIQ,
WACOM_BEE,
WACOM_13HD,
@@ -126,7 +132,7 @@ struct wacom_shared {
};
struct wacom_wac {
- char name[64];
+ char name[WACOM_NAME_MAX];
unsigned char *data;
int tool[2];
int id[2];
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e09ec67957a..00d1e547b21 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -919,4 +919,17 @@ config TOUCHSCREEN_TPS6507X
To compile this driver as a module, choose M here: the
module will be called tps6507x_ts.
+config TOUCHSCREEN_ZFORCE
+ tristate "Neonode zForce infrared touchscreens"
+ depends on I2C
+ depends on GPIOLIB
+ help
+ Say Y here if you have a touchscreen using the zforce
+ infraread technology from Neonode.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zforce_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f5216c1bf53..7587883b8d3 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -75,3 +75,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index f3a174a83c8..69834dd3c31 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -806,7 +806,6 @@ err_free_irq:
err_free_mem:
input_free_device(input_dev);
kfree(ts);
- spi_set_drvdata(spi, NULL);
return err;
}
@@ -823,7 +822,6 @@ static int ad7877_remove(struct spi_device *spi)
kfree(ts);
dev_dbg(&spi->dev, "unregistered touchscreen\n");
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 606da5bd611..1a7b1143536 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -142,7 +142,6 @@ static int ad7879_spi_remove(struct spi_device *spi)
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_remove(ts);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index d038575f49d..42d830efa31 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -2113,7 +2113,6 @@ error_startup:
error_request_irq:
if (cd->cpdata->init)
cd->cpdata->init(cd->cpdata, 0, dev);
- dev_set_drvdata(dev, NULL);
error_free_xfer:
kfree(cd->xfer_buf);
error_free_cd:
@@ -2151,7 +2150,6 @@ int cyttsp4_remove(struct cyttsp4 *cd)
free_irq(cd->irq, cd);
if (cd->cpdata->init)
cd->cpdata->init(cd->cpdata, 0, dev);
- dev_set_drvdata(dev, NULL);
cyttsp4_free_si_ptrs(cd);
kfree(cd);
return 0;
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
index a71e1141d63..b19434cebbf 100644
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ b/drivers/input/touchscreen/cyttsp4_spi.c
@@ -171,10 +171,7 @@ static int cyttsp4_spi_probe(struct spi_device *spi)
ts = cyttsp4_probe(&cyttsp_spi_bus_ops, &spi->dev, spi->irq,
CY_SPI_DATA_BUF_SIZE);
- if (IS_ERR(ts))
- return PTR_ERR(ts);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ts);
}
static int cyttsp4_spi_remove(struct spi_device *spi)
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index d53e0b72a40..4204841cdc4 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -242,7 +242,7 @@ static int cyttsp_soft_reset(struct cyttsp *ts)
int retval;
/* wait for interrupt to set ready completion */
- INIT_COMPLETION(ts->bl_ready);
+ reinit_completion(&ts->bl_ready);
ts->state = CY_BL_STATE;
enable_irq(ts->irq);
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index ef5fcb0945e..054d2258324 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -273,7 +273,7 @@ static struct i2c_driver egalax_ts_driver = {
.name = "egalax_ts",
.owner = THIS_MODULE,
.pm = &egalax_ts_pm_ops,
- .of_match_table = of_match_ptr(egalax_ts_dt_ids),
+ .of_match_table = egalax_ts_dt_ids,
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 66500852341..92e2243fb77 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -186,8 +186,6 @@ static int htcpen_isa_remove(struct device *dev, unsigned int id)
release_region(HTCPEN_PORT_INIT, 1);
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
- dev_set_drvdata(dev, NULL);
-
return 0;
}
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 1740a249637..2f03b2f289d 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -24,6 +24,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index e1c5300cacf..68beadaabce 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -52,6 +52,7 @@ struct titsc {
u32 config_inp[4];
u32 bit_xp, bit_xn, bit_yp, bit_yn;
u32 inp_xp, inp_xn, inp_yp, inp_yn;
+ u32 step_mask;
};
static unsigned int titsc_readl(struct titsc *ts, unsigned int reg)
@@ -196,7 +197,8 @@ static void titsc_step_config(struct titsc *ts_dev)
/* The steps1 … end and bit 0 for TS_Charge */
stepenable = (1 << (end_step + 2)) - 1;
- am335x_tsc_se_set(ts_dev->mfd_tscadc, stepenable);
+ ts_dev->step_mask = stepenable;
+ am335x_tsc_se_set(ts_dev->mfd_tscadc, ts_dev->step_mask);
}
static void titsc_read_coordinates(struct titsc *ts_dev,
@@ -260,6 +262,10 @@ static irqreturn_t titsc_irq(int irq, void *dev)
unsigned int fsm;
status = titsc_readl(ts_dev, REG_IRQSTATUS);
+ /*
+ * ADC and touchscreen share the IRQ line.
+ * FIFO1 interrupts are used by ADC. Handle FIFO0 IRQs here only
+ */
if (status & IRQENB_FIFO0THRES) {
titsc_read_coordinates(ts_dev, &x, &y, &z1, &z2);
@@ -316,7 +322,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
if (irqclr) {
titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
- am335x_tsc_se_update(ts_dev->mfd_tscadc);
+ am335x_tsc_se_set(ts_dev->mfd_tscadc, ts_dev->step_mask);
return IRQ_HANDLED;
}
return IRQ_NONE;
@@ -348,9 +354,16 @@ static int titsc_parse_dt(struct platform_device *pdev,
if (err < 0)
return err;
- err = of_property_read_u32(node, "ti,coordiante-readouts",
+ /*
+ * Try with the new binding first. If it fails, try again with
+ * bogus, miss-spelled version.
+ */
+ err = of_property_read_u32(node, "ti,coordinate-readouts",
&ts_dev->coordinate_readouts);
if (err < 0)
+ err = of_property_read_u32(node, "ti,coordiante-readouts",
+ &ts_dev->coordinate_readouts);
+ if (err < 0)
return err;
return of_property_read_u32_array(node, "ti,wire-config",
@@ -389,7 +402,7 @@ static int titsc_probe(struct platform_device *pdev)
}
err = request_irq(ts_dev->irq, titsc_irq,
- 0, pdev->dev.driver->name, ts_dev);
+ IRQF_SHARED, pdev->dev.driver->name, ts_dev);
if (err) {
dev_err(&pdev->dev, "failed to allocate irq.\n");
goto err_free_mem;
@@ -505,7 +518,7 @@ static struct platform_driver ti_tsc_driver = {
.name = "TI-am335x-tsc",
.owner = THIS_MODULE,
.pm = TITSC_PM_OPS,
- .of_match_table = of_match_ptr(ti_tsc_dt_ids),
+ .of_match_table = ti_tsc_dt_ids,
},
};
module_platform_driver(ti_tsc_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 7213e8b07e7..81135335391 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -678,7 +678,6 @@ static int tsc2005_probe(struct spi_device *spi)
err_remove_sysfs:
sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
err_clear_drvdata:
- spi_set_drvdata(spi, NULL);
free_irq(spi->irq, ts);
err_free_mem:
input_free_device(input_dev);
@@ -696,7 +695,6 @@ static int tsc2005_remove(struct spi_device *spi)
input_unregister_device(ts->idev);
kfree(ts);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 721fdb3597c..ae4b6b90362 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -146,12 +146,10 @@ enum {
#define USB_DEVICE_HID_CLASS(vend, prod) \
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
- | USB_DEVICE_ID_MATCH_INT_PROTOCOL \
| USB_DEVICE_ID_MATCH_DEVICE, \
.idVendor = (vend), \
.idProduct = (prod), \
- .bInterfaceClass = USB_INTERFACE_CLASS_HID, \
- .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE
+ .bInterfaceClass = USB_INTERFACE_CLASS_HID
static const struct usb_device_id usbtouch_devices[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
new file mode 100644
index 00000000000..75762d6ff3b
--- /dev/null
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -0,0 +1,836 @@
+/*
+ * Copyright (C) 2012-2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based in parts on Nook zforce driver
+ *
+ * Copyright (C) 2010 Barnes & Noble, Inc.
+ * Author: Pieter Truter<ptruter@intrinsyc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/input/mt.h>
+#include <linux/platform_data/zforce_ts.h>
+
+#define WAIT_TIMEOUT msecs_to_jiffies(1000)
+
+#define FRAME_START 0xee
+
+/* Offsets of the different parts of the payload the controller sends */
+#define PAYLOAD_HEADER 0
+#define PAYLOAD_LENGTH 1
+#define PAYLOAD_BODY 2
+
+/* Response offsets */
+#define RESPONSE_ID 0
+#define RESPONSE_DATA 1
+
+/* Commands */
+#define COMMAND_DEACTIVATE 0x00
+#define COMMAND_INITIALIZE 0x01
+#define COMMAND_RESOLUTION 0x02
+#define COMMAND_SETCONFIG 0x03
+#define COMMAND_DATAREQUEST 0x04
+#define COMMAND_SCANFREQ 0x08
+#define COMMAND_STATUS 0X1e
+
+/*
+ * Responses the controller sends as a result of
+ * command requests
+ */
+#define RESPONSE_DEACTIVATE 0x00
+#define RESPONSE_INITIALIZE 0x01
+#define RESPONSE_RESOLUTION 0x02
+#define RESPONSE_SETCONFIG 0x03
+#define RESPONSE_SCANFREQ 0x08
+#define RESPONSE_STATUS 0X1e
+
+/*
+ * Notifications are send by the touch controller without
+ * being requested by the driver and include for example
+ * touch indications
+ */
+#define NOTIFICATION_TOUCH 0x04
+#define NOTIFICATION_BOOTCOMPLETE 0x07
+#define NOTIFICATION_OVERRUN 0x25
+#define NOTIFICATION_PROXIMITY 0x26
+#define NOTIFICATION_INVALID_COMMAND 0xfe
+
+#define ZFORCE_REPORT_POINTS 2
+#define ZFORCE_MAX_AREA 0xff
+
+#define STATE_DOWN 0
+#define STATE_MOVE 1
+#define STATE_UP 2
+
+#define SETCONFIG_DUALTOUCH (1 << 0)
+
+struct zforce_point {
+ int coord_x;
+ int coord_y;
+ int state;
+ int id;
+ int area_major;
+ int area_minor;
+ int orientation;
+ int pressure;
+ int prblty;
+};
+
+/*
+ * @client the i2c_client
+ * @input the input device
+ * @suspending in the process of going to suspend (don't emit wakeup
+ * events for commands executed to suspend the device)
+ * @suspended device suspended
+ * @access_mutex serialize i2c-access, to keep multipart reads together
+ * @command_done completion to wait for the command result
+ * @command_mutex serialize commands send to the ic
+ * @command_waiting the id of the command that that is currently waiting
+ * for a result
+ * @command_result returned result of the command
+ */
+struct zforce_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ const struct zforce_ts_platdata *pdata;
+ char phys[32];
+
+ bool suspending;
+ bool suspended;
+ bool boot_complete;
+
+ /* Firmware version information */
+ u16 version_major;
+ u16 version_minor;
+ u16 version_build;
+ u16 version_rev;
+
+ struct mutex access_mutex;
+
+ struct completion command_done;
+ struct mutex command_mutex;
+ int command_waiting;
+ int command_result;
+};
+
+static int zforce_command(struct zforce_ts *ts, u8 cmd)
+{
+ struct i2c_client *client = ts->client;
+ char buf[3];
+ int ret;
+
+ dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
+
+ buf[0] = FRAME_START;
+ buf[1] = 1; /* data size, command only */
+ buf[2] = cmd;
+
+ mutex_lock(&ts->access_mutex);
+ ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf));
+ mutex_unlock(&ts->access_mutex);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = mutex_trylock(&ts->command_mutex);
+ if (!ret) {
+ dev_err(&client->dev, "already waiting for a command\n");
+ return -EBUSY;
+ }
+
+ dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n",
+ buf[1], buf[2]);
+
+ ts->command_waiting = buf[2];
+
+ mutex_lock(&ts->access_mutex);
+ ret = i2c_master_send(client, buf, len);
+ mutex_unlock(&ts->access_mutex);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+ goto unlock;
+ }
+
+ dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]);
+
+ if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) {
+ ret = -ETIME;
+ goto unlock;
+ }
+
+ ret = ts->command_result;
+
+unlock:
+ mutex_unlock(&ts->command_mutex);
+ return ret;
+}
+
+static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
+{
+ struct i2c_client *client = ts->client;
+ char buf[3];
+ int ret;
+
+ dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
+
+ buf[0] = FRAME_START;
+ buf[1] = 1; /* data size, command only */
+ buf[2] = cmd;
+
+ ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zforce_resolution(struct zforce_ts *ts, u16 x, u16 y)
+{
+ struct i2c_client *client = ts->client;
+ char buf[7] = { FRAME_START, 5, COMMAND_RESOLUTION,
+ (x & 0xff), ((x >> 8) & 0xff),
+ (y & 0xff), ((y >> 8) & 0xff) };
+
+ dev_dbg(&client->dev, "set resolution to (%d,%d)\n", x, y);
+
+ return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_scan_frequency(struct zforce_ts *ts, u16 idle, u16 finger,
+ u16 stylus)
+{
+ struct i2c_client *client = ts->client;
+ char buf[9] = { FRAME_START, 7, COMMAND_SCANFREQ,
+ (idle & 0xff), ((idle >> 8) & 0xff),
+ (finger & 0xff), ((finger >> 8) & 0xff),
+ (stylus & 0xff), ((stylus >> 8) & 0xff) };
+
+ dev_dbg(&client->dev, "set scan frequency to (idle: %d, finger: %d, stylus: %d)\n",
+ idle, finger, stylus);
+
+ return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_setconfig(struct zforce_ts *ts, char b1)
+{
+ struct i2c_client *client = ts->client;
+ char buf[7] = { FRAME_START, 5, COMMAND_SETCONFIG,
+ b1, 0, 0, 0 };
+
+ dev_dbg(&client->dev, "set config to (%d)\n", b1);
+
+ return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_start(struct zforce_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ int ret;
+
+ dev_dbg(&client->dev, "starting device\n");
+
+ ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (ret) {
+ dev_err(&client->dev, "Unable to initialize, %d\n", ret);
+ return ret;
+ }
+
+ ret = zforce_resolution(ts, pdata->x_max, pdata->y_max);
+ if (ret) {
+ dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
+ goto error;
+ }
+
+ ret = zforce_scan_frequency(ts, 10, 50, 50);
+ if (ret) {
+ dev_err(&client->dev, "Unable to set scan frequency, %d\n",
+ ret);
+ goto error;
+ }
+
+ if (zforce_setconfig(ts, SETCONFIG_DUALTOUCH)) {
+ dev_err(&client->dev, "Unable to set config\n");
+ goto error;
+ }
+
+ /* start sending touch events */
+ ret = zforce_command(ts, COMMAND_DATAREQUEST);
+ if (ret) {
+ dev_err(&client->dev, "Unable to request data\n");
+ goto error;
+ }
+
+ /*
+ * Per NN, initial cal. take max. of 200msec.
+ * Allow time to complete this calibration
+ */
+ msleep(200);
+
+ return 0;
+
+error:
+ zforce_command_wait(ts, COMMAND_DEACTIVATE);
+ return ret;
+}
+
+static int zforce_stop(struct zforce_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ dev_dbg(&client->dev, "stopping device\n");
+
+ /* Deactivates touch sensing and puts the device into sleep. */
+ ret = zforce_command_wait(ts, COMMAND_DEACTIVATE);
+ if (ret != 0) {
+ dev_err(&client->dev, "could not deactivate device, %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+{
+ struct i2c_client *client = ts->client;
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ struct zforce_point point;
+ int count, i, num = 0;
+
+ count = payload[0];
+ if (count > ZFORCE_REPORT_POINTS) {
+ dev_warn(&client->dev, "to many coordinates %d, expected max %d\n",
+ count, ZFORCE_REPORT_POINTS);
+ count = ZFORCE_REPORT_POINTS;
+ }
+
+ for (i = 0; i < count; i++) {
+ point.coord_x =
+ payload[9 * i + 2] << 8 | payload[9 * i + 1];
+ point.coord_y =
+ payload[9 * i + 4] << 8 | payload[9 * i + 3];
+
+ if (point.coord_x > pdata->x_max ||
+ point.coord_y > pdata->y_max) {
+ dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+ point.coord_x, point.coord_y);
+ point.coord_x = point.coord_y = 0;
+ }
+
+ point.state = payload[9 * i + 5] & 0x03;
+ point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+
+ /* determine touch major, minor and orientation */
+ point.area_major = max(payload[9 * i + 6],
+ payload[9 * i + 7]);
+ point.area_minor = min(payload[9 * i + 6],
+ payload[9 * i + 7]);
+ point.orientation = payload[9 * i + 6] > payload[9 * i + 7];
+
+ point.pressure = payload[9 * i + 8];
+ point.prblty = payload[9 * i + 9];
+
+ dev_dbg(&client->dev,
+ "point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n",
+ i, count, point.state, point.id,
+ point.pressure, point.prblty,
+ point.coord_x, point.coord_y,
+ point.area_major, point.area_minor,
+ point.orientation);
+
+ /* the zforce id starts with "1", so needs to be decreased */
+ input_mt_slot(ts->input, point.id - 1);
+
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
+ point.state != STATE_UP);
+
+ if (point.state != STATE_UP) {
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ point.coord_x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ point.coord_y);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ point.area_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+ point.area_minor);
+ input_report_abs(ts->input, ABS_MT_ORIENTATION,
+ point.orientation);
+ num++;
+ }
+ }
+
+ input_mt_sync_frame(ts->input);
+
+ input_mt_report_finger_count(ts->input, num);
+
+ input_sync(ts->input);
+
+ return 0;
+}
+
+static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ mutex_lock(&ts->access_mutex);
+
+ /* read 2 byte message header */
+ ret = i2c_master_recv(client, buf, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading header: %d\n", ret);
+ goto unlock;
+ }
+
+ if (buf[PAYLOAD_HEADER] != FRAME_START) {
+ dev_err(&client->dev, "invalid frame start: %d\n", buf[0]);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (buf[PAYLOAD_LENGTH] <= 0 || buf[PAYLOAD_LENGTH] > 255) {
+ dev_err(&client->dev, "invalid payload length: %d\n",
+ buf[PAYLOAD_LENGTH]);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* read the message */
+ ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading payload: %d\n", ret);
+ goto unlock;
+ }
+
+ dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n",
+ buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]);
+
+unlock:
+ mutex_unlock(&ts->access_mutex);
+ return ret;
+}
+
+static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
+{
+ struct i2c_client *client = ts->client;
+
+ if (ts->command_waiting == cmd) {
+ dev_dbg(&client->dev, "completing command 0x%x\n", cmd);
+ ts->command_result = result;
+ complete(&ts->command_done);
+ } else {
+ dev_dbg(&client->dev, "command %d not for us\n", cmd);
+ }
+}
+
+static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+{
+ struct zforce_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ int ret;
+ u8 payload_buffer[512];
+ u8 *payload;
+
+ /*
+ * When suspended, emit a wakeup signal if necessary and return.
+ * Due to the level-interrupt we will get re-triggered later.
+ */
+ if (ts->suspended) {
+ if (device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+ msleep(20);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(&client->dev, "handling interrupt\n");
+
+ /* Don't emit wakeup events from commands run by zforce_suspend */
+ if (!ts->suspending && device_may_wakeup(&client->dev))
+ pm_stay_awake(&client->dev);
+
+ while (!gpio_get_value(pdata->gpio_int)) {
+ ret = zforce_read_packet(ts, payload_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not read packet, ret: %d\n",
+ ret);
+ break;
+ }
+
+ payload = &payload_buffer[PAYLOAD_BODY];
+
+ switch (payload[RESPONSE_ID]) {
+ case NOTIFICATION_TOUCH:
+ /*
+ * Always report touch-events received while
+ * suspending, when being a wakeup source
+ */
+ if (ts->suspending && device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+ zforce_touch_event(ts, &payload[RESPONSE_DATA]);
+ break;
+
+ case NOTIFICATION_BOOTCOMPLETE:
+ ts->boot_complete = payload[RESPONSE_DATA];
+ zforce_complete(ts, payload[RESPONSE_ID], 0);
+ break;
+
+ case RESPONSE_INITIALIZE:
+ case RESPONSE_DEACTIVATE:
+ case RESPONSE_SETCONFIG:
+ case RESPONSE_RESOLUTION:
+ case RESPONSE_SCANFREQ:
+ zforce_complete(ts, payload[RESPONSE_ID],
+ payload[RESPONSE_DATA]);
+ break;
+
+ case RESPONSE_STATUS:
+ /*
+ * Version Payload Results
+ * [2:major] [2:minor] [2:build] [2:rev]
+ */
+ ts->version_major = (payload[RESPONSE_DATA + 1] << 8) |
+ payload[RESPONSE_DATA];
+ ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) |
+ payload[RESPONSE_DATA + 2];
+ ts->version_build = (payload[RESPONSE_DATA + 5] << 8) |
+ payload[RESPONSE_DATA + 4];
+ ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) |
+ payload[RESPONSE_DATA + 6];
+ dev_dbg(&ts->client->dev, "Firmware Version %04x:%04x %04x:%04x\n",
+ ts->version_major, ts->version_minor,
+ ts->version_build, ts->version_rev);
+
+ zforce_complete(ts, payload[RESPONSE_ID], 0);
+ break;
+
+ case NOTIFICATION_INVALID_COMMAND:
+ dev_err(&ts->client->dev, "invalid command: 0x%x\n",
+ payload[RESPONSE_DATA]);
+ break;
+
+ default:
+ dev_err(&ts->client->dev, "unrecognized response id: 0x%x\n",
+ payload[RESPONSE_ID]);
+ break;
+ }
+ }
+
+ if (!ts->suspending && device_may_wakeup(&client->dev))
+ pm_relax(&client->dev);
+
+ dev_dbg(&client->dev, "finished interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static int zforce_input_open(struct input_dev *dev)
+{
+ struct zforce_ts *ts = input_get_drvdata(dev);
+ int ret;
+
+ ret = zforce_start(ts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void zforce_input_close(struct input_dev *dev)
+{
+ struct zforce_ts *ts = input_get_drvdata(dev);
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = zforce_stop(ts);
+ if (ret)
+ dev_warn(&client->dev, "stopping zforce failed\n");
+
+ return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zforce_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct zforce_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+ ts->suspending = true;
+
+ /*
+ * When configured as a wakeup source device should always wake
+ * the system, therefore start device if necessary.
+ */
+ if (device_may_wakeup(&client->dev)) {
+ dev_dbg(&client->dev, "suspend while being a wakeup source\n");
+
+ /* Need to start device, if not open, to be a wakeup source. */
+ if (!input->users) {
+ ret = zforce_start(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ enable_irq_wake(client->irq);
+ } else if (input->users) {
+ dev_dbg(&client->dev, "suspend without being a wakeup source\n");
+
+ ret = zforce_stop(ts);
+ if (ret)
+ goto unlock;
+
+ disable_irq(client->irq);
+ }
+
+ ts->suspended = true;
+
+unlock:
+ ts->suspending = false;
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+
+static int zforce_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct zforce_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ ts->suspended = false;
+
+ if (device_may_wakeup(&client->dev)) {
+ dev_dbg(&client->dev, "resume from being a wakeup source\n");
+
+ disable_irq_wake(client->irq);
+
+ /* need to stop device if it was not open on suspend */
+ if (!input->users) {
+ ret = zforce_stop(ts);
+ if (ret)
+ goto unlock;
+ }
+ } else if (input->users) {
+ dev_dbg(&client->dev, "resume without being a wakeup source\n");
+
+ enable_irq(client->irq);
+
+ ret = zforce_start(ts);
+ if (ret < 0)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
+
+static void zforce_reset(void *data)
+{
+ struct zforce_ts *ts = data;
+
+ gpio_set_value(ts->pdata->gpio_rst, 0);
+}
+
+static int zforce_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ struct zforce_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+
+ if (!pdata)
+ return -EINVAL;
+
+ ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
+ "zforce_ts_int");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_int, ret);
+ return ret;
+ }
+
+ ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
+ GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_rst, ret);
+ return ret;
+ }
+
+ ret = devm_add_action(&client->dev, zforce_reset, ts);
+ if (ret) {
+ dev_err(&client->dev, "failed to register reset action, %d\n",
+ ret);
+ return ret;
+ }
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "could not allocate input device\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ts->access_mutex);
+ mutex_init(&ts->command_mutex);
+
+ ts->pdata = pdata;
+ ts->client = client;
+ ts->input = input_dev;
+
+ input_dev->name = "Neonode zForce touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->id.bustype = BUS_I2C;
+
+ input_dev->open = zforce_input_open;
+ input_dev->close = zforce_input_close;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_SYN, input_dev->evbit);
+ __set_bit(EV_ABS, input_dev->evbit);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ pdata->y_max, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ ZFORCE_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+ ZFORCE_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT);
+
+ input_set_drvdata(ts->input, ts);
+
+ init_completion(&ts->command_done);
+
+ /*
+ * The zforce pulls the interrupt low when it has data ready.
+ * After it is triggered the isr thread runs until all the available
+ * packets have been read and the interrupt is high again.
+ * Therefore we can trigger the interrupt anytime it is low and do
+ * not need to limit it to the interrupt edge.
+ */
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ zforce_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ input_dev->name, ts);
+ if (ret) {
+ dev_err(&client->dev, "irq %d request failed\n", client->irq);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, ts);
+
+ /* let the controller boot */
+ gpio_set_value(pdata->gpio_rst, 1);
+
+ ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
+ if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
+ dev_warn(&client->dev, "bootcomplete timed out\n");
+
+ /* need to start device to get version information */
+ ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (ret) {
+ dev_err(&client->dev, "unable to initialize, %d\n", ret);
+ return ret;
+ }
+
+ /* this gets the firmware version among other informations */
+ ret = zforce_command_wait(ts, COMMAND_STATUS);
+ if (ret < 0) {
+ dev_err(&client->dev, "couldn't get status, %d\n", ret);
+ zforce_stop(ts);
+ return ret;
+ }
+
+ /* stop device and put it into sleep until it is opened */
+ ret = zforce_stop(ts);
+ if (ret < 0)
+ return ret;
+
+ device_set_wakeup_capable(&client->dev, true);
+
+ ret = input_register_device(input_dev);
+ if (ret) {
+ dev_err(&client->dev, "could not register input device, %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct i2c_device_id zforce_idtable[] = {
+ { "zforce-ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, zforce_idtable);
+
+static struct i2c_driver zforce_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "zforce-ts",
+ .pm = &zforce_pm_ops,
+ },
+ .probe = zforce_probe,
+ .id_table = zforce_idtable,
+};
+
+module_i2c_driver(zforce_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("zForce TouchScreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c880ebaf155..3e7fdbb4916 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB
config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
- depends on (ARM && ARCH_SHMOBILE)
+ depends on ARM
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 14c1f474cf1..5d58bf16e9e 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 181c9ba929c..1abfb5684ab 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -590,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
ret = IRQ_HANDLED;
resume = RESUME_RETRY;
} else {
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+ iova, fsynr, root_cfg->cbndx);
ret = IRQ_NONE;
resume = RESUME_TERMINATE;
}
@@ -778,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
#ifdef __BIG_ENDIAN
reg |= SCTLR_E;
#endif
- writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
}
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
@@ -1212,7 +1215,10 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
arm_smmu_flush_pgtable(smmu, page_address(table),
ARM_SMMU_PTE_HWTABLE_SIZE);
- pgtable_page_ctor(table);
+ if (!pgtable_page_ctor(table)) {
+ __free_page(table);
+ return -ENOMEM;
+ }
pmd_populate(NULL, pmd, table);
arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
}
@@ -1559,9 +1565,13 @@ static struct iommu_ops arm_smmu_ops = {
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
- void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
+ void __iomem *cb_base;
int i = 0;
- u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+ u32 reg;
+
+ /* Clear Global FSR */
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+ writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
/* Mark all SMRn as invalid and all S2CRn as bypass */
for (i = 0; i < smmu->num_mapping_groups; ++i) {
@@ -1569,33 +1579,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
}
- /* Make sure all context banks are disabled */
- for (i = 0; i < smmu->num_context_banks; ++i)
- writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
+ /* Make sure all context banks are disabled and clear CB_FSR */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+ writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
+ }
/* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+
/* Enable fault reporting */
- scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+ reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
/* Disable TLB broadcasting. */
- scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
+ reg |= (sCR0_VMIDPNE | sCR0_PTM);
/* Enable client access, but bypass when no mapping is found */
- scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+ reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
/* Disable forced broadcasting */
- scr0 &= ~sCR0_FB;
+ reg &= ~sCR0_FB;
/* Don't upgrade barriers */
- scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+ reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
/* Push the button */
arm_smmu_tlb_sync(smmu);
- writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
}
static int arm_smmu_id_size_to_bits(int size)
@@ -1700,13 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
- /* Check that we ioremapped enough */
+ /* Check for size mismatch of SMMU address space from mapped region */
size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
size *= (smmu->pagesize << 1);
- if (smmu->size < size)
- dev_warn(smmu->dev,
- "device is 0x%lx bytes but only mapped 0x%lx!\n",
- size, smmu->size);
+ if (smmu->size != size)
+ dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs "
+ "from mapped region size (0x%lx)!\n", size, smmu->size);
smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
ID1_NUMS2CB_MASK;
@@ -1781,15 +1795,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing base address/size\n");
- return -ENODEV;
- }
-
+ smmu->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(smmu->base))
+ return PTR_ERR(smmu->base);
smmu->size = resource_size(res);
- smmu->base = devm_request_and_ioremap(dev, res);
- if (!smmu->base)
- return -EADDRNOTAVAIL;
if (of_property_read_u32(dev->of_node, "#global-interrupts",
&smmu->num_global_irqs)) {
@@ -1804,12 +1813,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->num_context_irqs++;
}
- if (num_irqs < smmu->num_global_irqs) {
- dev_warn(dev, "found %d interrupts but expected at least %d\n",
- num_irqs, smmu->num_global_irqs);
- smmu->num_global_irqs = num_irqs;
+ if (!smmu->num_context_irqs) {
+ dev_err(dev, "found %d interrupts but expected at least %d\n",
+ num_irqs, smmu->num_global_irqs + 1);
+ return -ENODEV;
}
- smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
GFP_KERNEL);
@@ -1933,7 +1941,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
free_irq(smmu->irqs[i], smmu);
/* Turn the thing off */
- writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
+ writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
return 0;
}
@@ -1981,7 +1989,7 @@ static void __exit arm_smmu_exit(void)
return platform_driver_unregister(&arm_smmu_driver);
}
-module_init(arm_smmu_init);
+subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 785675a56a1..8b452c9676d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -88,7 +88,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pr_warn("Device scope bus [%d] not found\n", scope->bus);
break;
}
- pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
+ pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
if (!pdev) {
/* warning will be printed below */
break;
@@ -99,7 +99,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
}
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
+ segment, scope->bus, path->device, path->function);
*dev = NULL;
return 0;
}
@@ -403,7 +403,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
dev = pci_physfn(dev);
- list_for_each_entry(dmaru, &dmar_drhd_units, list) {
+ for_each_drhd_unit(dmaru) {
drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit,
header);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 15e9b57e9cf..43b9bfea48f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
int offset;
BUG_ON(!domain->pgd);
- BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
+
+ if (addr_width < BITS_PER_LONG && pfn >> addr_width)
+ /* Address beyond IOMMU's addressing capabilities. */
+ return NULL;
+
parent = domain->pgd;
while (level > 0) {
@@ -3777,11 +3781,10 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
- struct device_domain_info *info;
+ struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
int found = 0;
- struct list_head *entry, *tmp;
iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
pdev->devfn);
@@ -3789,8 +3792,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_safe(entry, tmp, &domain->devices) {
- info = list_entry(entry, struct device_domain_info, link);
+ list_for_each_entry_safe(info, tmp, &domain->devices, link) {
if (info->segment == pci_domain_nr(pdev->bus) &&
info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f71673dbb23..bab10b1002f 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
if (disable_irq_remap)
return 0;
if (irq_remap_broken) {
- WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
- "This system BIOS has enabled interrupt remapping\n"
- "on a chipset that contains an erratum making that\n"
- "feature unstable. To maintain system stability\n"
- "interrupt remapping is being disabled. Please\n"
- "contact your BIOS vendor for an update\n");
+ printk(KERN_WARNING
+ "This system BIOS has enabled interrupt remapping\n"
+ "on a chipset that contains an erratum making that\n"
+ "feature unstable. To maintain system stability\n"
+ "interrupt remapping is being disabled. Please\n"
+ "contact your BIOS vendor for an update\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
disable_irq_remap = 1;
return 0;
}
@@ -686,12 +687,12 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
- bus = read_pci_config_byte(bus, path->dev, path->fn,
+ bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
ir_hpet[ir_hpet_num].iommu = iommu;
ir_hpet[ir_hpet_num].id = scope->enumeration_id;
ir_hpet_num++;
@@ -714,13 +715,13 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
- bus = read_pci_config_byte(bus, path->dev, path->fn,
+ bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[ir_ioapic_num].iommu = iommu;
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c
new file mode 100644
index 00000000000..bf3b317ff0c
--- /dev/null
+++ b/drivers/iommu/iommu-traces.c
@@ -0,0 +1,27 @@
+/*
+ * iommu trace points
+ *
+ * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/iommu.h>
+
+/* iommu_group_event */
+EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
+EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
+
+/* iommu_device_event */
+EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
+EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
+
+/* iommu_map_unmap */
+EXPORT_TRACEPOINT_SYMBOL_GPL(map);
+EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
+
+/* iommu_error */
+EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index fbe9ca734f8..e5555fcfe70 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -29,6 +29,7 @@
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/err.h>
+#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
static struct ida iommu_group_ida;
@@ -363,6 +364,8 @@ rename:
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+
+ trace_add_device_to_group(group->id, dev);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev)
sysfs_remove_link(group->devices_kobj, device->name);
sysfs_remove_link(&dev->kobj, "iommu_group");
+ trace_remove_device_from_group(group->id, dev);
+
kfree(device->name);
kfree(device);
dev->iommu_group = NULL;
@@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free);
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
+ int ret;
if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV;
- return domain->ops->attach_dev(domain, dev);
+ ret = domain->ops->attach_dev(domain, dev);
+ if (!ret)
+ trace_attach_device_to_domain(dev);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
@@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
return;
domain->ops->detach_dev(domain, dev);
+ trace_detach_device_from_domain(dev);
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
@@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
* size of the smallest page supported by the hardware
*/
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
- pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, min_pagesz);
return -EINVAL;
}
- pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
+ pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
- pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+ pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
@@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
+ else
+ trace_map(iova, paddr, size);
return ret;
}
@@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page;
}
+ trace_unmap(iova, 0, size);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index f4003d568a9..b6f9a51746c 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
#define to_iommu(dev) \
- ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+ (platform_get_drvdata(to_platform_device(dev)))
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 108c0e9c24d..dba1a9fd507 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -252,7 +252,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_lock_irqsave(&gart->pte_lock, flags);
pfn = __phys_to_pfn(pa);
if (!pfn_valid(pfn)) {
- dev_err(gart->dev, "Invalid page: %08x\n", pa);
+ dev_err(gart->dev, "Invalid page: %pa\n", &pa);
spin_unlock_irqrestore(&gart->pte_lock, flags);
return -EINVAL;
}
@@ -295,8 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
pa = (pte & GART_PAGE_MASK);
if (!pfn_valid(__phys_to_pfn(pa))) {
- dev_err(gart->dev, "No entry for %08llx:%08x\n",
- (unsigned long long)iova, pa);
+ dev_err(gart->dev, "No entry for %08llx:%pa\n",
+ (unsigned long long)iova, &pa);
gart_dump_table(gart);
return -EINVAL;
}
@@ -351,7 +351,6 @@ static int tegra_gart_probe(struct platform_device *pdev)
struct gart_device *gart;
struct resource *res, *res_remap;
void __iomem *gart_regs;
- int err;
struct device *dev = &pdev->dev;
if (gart_handle)
@@ -376,8 +375,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!gart_regs) {
dev_err(dev, "failed to remap GART registers\n");
- err = -ENXIO;
- goto fail;
+ return -ENXIO;
}
gart->dev = &pdev->dev;
@@ -391,8 +389,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
if (!gart->savedata) {
dev_err(dev, "failed to allocate context save area\n");
- err = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, gart);
@@ -401,32 +398,20 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart_handle = gart;
bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return 0;
-
-fail:
- if (gart_regs)
- devm_iounmap(dev, gart_regs);
- if (gart && gart->savedata)
- vfree(gart->savedata);
- devm_kfree(dev, gart);
- return err;
}
static int tegra_gart_remove(struct platform_device *pdev)
{
struct gart_device *gart = platform_get_drvdata(pdev);
- struct device *dev = gart->dev;
writel(0, gart->regs + GART_CONFIG);
if (gart->savedata)
vfree(gart->savedata);
- if (gart->regs)
- devm_iounmap(dev, gart->regs);
- devm_kfree(dev, gart);
gart_handle = NULL;
return 0;
}
-const struct dev_pm_ops tegra_gart_pm_ops = {
+static const struct dev_pm_ops tegra_gart_pm_ops = {
.suspend = tegra_gart_suspend,
.resume = tegra_gart_resume,
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e0665603afd..605b5b46a90 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -731,7 +731,7 @@ static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
unsigned long pfn = __phys_to_pfn(pa);
unsigned long flags;
- dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
+ dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa);
if (!pfn_valid(pfn))
return -ENOMEM;
@@ -1254,7 +1254,7 @@ static int tegra_smmu_remove(struct platform_device *pdev)
return 0;
}
-const struct dev_pm_ops tegra_smmu_pm_ops = {
+static const struct dev_pm_ops tegra_smmu_pm_ops = {
.suspend = tegra_smmu_suspend,
.resume = tegra_smmu_resume,
};
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 6e066c53acc..d0016ba469e 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -180,20 +180,28 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
ipack_device_attr(id_format, "0x%hhu\n");
-static struct device_attribute ipack_dev_attrs[] = {
- __ATTR_RO(id),
- __ATTR_RO(id_device),
- __ATTR_RO(id_format),
- __ATTR_RO(id_vendor),
- __ATTR_RO(modalias),
+static DEVICE_ATTR_RO(id);
+static DEVICE_ATTR_RO(id_device);
+static DEVICE_ATTR_RO(id_format);
+static DEVICE_ATTR_RO(id_vendor);
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ipack_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_id_device.attr,
+ &dev_attr_id_format.attr,
+ &dev_attr_id_vendor.attr,
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(ipack);
static struct bus_type ipack_bus_type = {
.name = "ipack",
.probe = ipack_bus_probe,
.match = ipack_bus_match,
.remove = ipack_bus_remove,
- .dev_attrs = ipack_dev_attrs,
+ .dev_groups = ipack_groups,
.uevent = ipack_uevent,
};
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index bb328a36612..433cc8568de 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -21,7 +21,10 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/msi.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
@@ -51,12 +54,22 @@
#define IPI_DOORBELL_START (0)
#define IPI_DOORBELL_END (8)
#define IPI_DOORBELL_MASK 0xFF
+#define PCI_MSI_DOORBELL_START (16)
+#define PCI_MSI_DOORBELL_NR (16)
+#define PCI_MSI_DOORBELL_END (32)
+#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
+#ifdef CONFIG_PCI_MSI
+static struct irq_domain *armada_370_xp_msi_domain;
+static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+static DEFINE_MUTEX(msi_used_lock);
+static phys_addr_t msi_doorbell_addr;
+#endif
/*
* In SMP mode:
@@ -87,6 +100,144 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
+#ifdef CONFIG_PCI_MSI
+
+static int armada_370_xp_alloc_msi(void)
+{
+ int hwirq;
+
+ mutex_lock(&msi_used_lock);
+ hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
+ if (hwirq >= PCI_MSI_DOORBELL_NR)
+ hwirq = -ENOSPC;
+ else
+ set_bit(hwirq, msi_used);
+ mutex_unlock(&msi_used_lock);
+
+ return hwirq;
+}
+
+static void armada_370_xp_free_msi(int hwirq)
+{
+ mutex_lock(&msi_used_lock);
+ if (!test_bit(hwirq, msi_used))
+ pr_err("trying to free unused MSI#%d\n", hwirq);
+ else
+ clear_bit(hwirq, msi_used);
+ mutex_unlock(&msi_used_lock);
+}
+
+static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct msi_msg msg;
+ irq_hw_number_t hwirq;
+ int virq;
+
+ hwirq = armada_370_xp_alloc_msi();
+ if (hwirq < 0)
+ return hwirq;
+
+ virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
+ if (!virq) {
+ armada_370_xp_free_msi(hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(virq, desc);
+
+ msg.address_lo = msi_doorbell_addr;
+ msg.address_hi = 0;
+ msg.data = 0xf00 | (hwirq + 16);
+
+ write_msi_msg(virq, &msg);
+ return 0;
+}
+
+static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
+ unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ irq_dispose_mapping(irq);
+ armada_370_xp_free_msi(d->hwirq);
+}
+
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+ .name = "armada_370_xp_msi_irq",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
+ .map = armada_370_xp_msi_map,
+};
+
+static int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
+ struct msi_chip *msi_chip;
+ u32 reg;
+ int ret;
+
+ msi_doorbell_addr = main_int_phys_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS;
+
+ msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
+ if (!msi_chip)
+ return -ENOMEM;
+
+ msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
+ msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+ msi_chip->of_node = node;
+
+ armada_370_xp_msi_domain =
+ irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+ &armada_370_xp_msi_irq_ops,
+ NULL);
+ if (!armada_370_xp_msi_domain) {
+ kfree(msi_chip);
+ return -ENOMEM;
+ }
+
+ ret = of_pci_msi_chip_add(msi_chip);
+ if (ret < 0) {
+ irq_domain_remove(armada_370_xp_msi_domain);
+ kfree(msi_chip);
+ return ret;
+ }
+
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
+ | PCI_MSI_DOORBELL_MASK;
+
+ writel(reg, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ /* Unmask IPI interrupt */
+ writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
+ return 0;
+}
+#else
+static inline int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SMP
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
@@ -214,12 +365,39 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
if (irqnr > 1022)
break;
- if (irqnr > 0) {
+ if (irqnr > 1) {
irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
irqnr);
handle_IRQ(irqnr, regs);
continue;
}
+
+#ifdef CONFIG_PCI_MSI
+ /* MSI handling */
+ if (irqnr == 1) {
+ u32 msimask, msinr;
+
+ msimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & PCI_MSI_DOORBELL_MASK;
+
+ writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ for (msinr = PCI_MSI_DOORBELL_START;
+ msinr < PCI_MSI_DOORBELL_END; msinr++) {
+ int irq;
+
+ if (!(msimask & BIT(msinr)))
+ continue;
+
+ irq = irq_find_mapping(armada_370_xp_msi_domain,
+ msinr - 16);
+ handle_IRQ(irq, regs);
+ }
+ }
+#endif
+
#ifdef CONFIG_SMP
/* IPI Handling */
if (irqnr == 0) {
@@ -248,12 +426,25 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
+ struct resource main_int_res, per_cpu_int_res;
u32 control;
- main_int_base = of_iomap(node, 0);
- per_cpu_int_base = of_iomap(node, 1);
+ BUG_ON(of_address_to_resource(node, 0, &main_int_res));
+ BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
+
+ BUG_ON(!request_mem_region(main_int_res.start,
+ resource_size(&main_int_res),
+ node->full_name));
+ BUG_ON(!request_mem_region(per_cpu_int_res.start,
+ resource_size(&per_cpu_int_res),
+ node->full_name));
+ main_int_base = ioremap(main_int_res.start,
+ resource_size(&main_int_res));
BUG_ON(!main_int_base);
+
+ per_cpu_int_base = ioremap(per_cpu_int_res.start,
+ resource_size(&per_cpu_int_res));
BUG_ON(!per_cpu_int_base);
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
@@ -262,8 +453,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
irq_domain_add_linear(node, (control >> 2) & 0x3ff,
&armada_370_xp_mpic_irq_ops, NULL);
- if (!armada_370_xp_mpic_domain)
- panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
+ BUG_ON(!armada_370_xp_mpic_domain);
irq_set_default_host(armada_370_xp_mpic_domain);
@@ -280,6 +470,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#endif
+ armada_370_xp_msi_init(node, main_int_res.start);
+
set_handle_irq(armada_370_xp_handle_irq);
return 0;
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 16c78f1c5ef..1693b8e7f26 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -49,9 +49,11 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
-#include <linux/irqchip/bcm2835.h>
#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
@@ -93,6 +95,8 @@ struct armctrl_ic {
};
static struct armctrl_ic intc __read_mostly;
+static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+ struct pt_regs *regs);
static void armctrl_mask_irq(struct irq_data *d)
{
@@ -164,17 +168,9 @@ static int __init armctrl_of_init(struct device_node *node,
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
- return 0;
-}
-
-static struct of_device_id irq_of_match[] __initconst = {
- { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init },
- { }
-};
-void __init bcm2835_init_irq(void)
-{
- of_irq_init(irq_of_match);
+ set_handle_irq(bcm2835_handle_irq);
+ return 0;
}
/*
@@ -200,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
}
-asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs)
{
u32 stat, irq;
@@ -222,3 +218,5 @@ asmlinkage void __exception_irq_entry bcm2835_handle_irq(
}
}
}
+
+IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d0e948084ea..9031171c141 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
+ raw_spin_lock(&irq_controller_lock);
mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift;
-
- raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
raw_spin_unlock(&irq_controller_lock);
@@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
- unsigned long map = 0;
+ unsigned long flags, map = 0;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
@@ -666,7 +667,149 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+ BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+ cpu_id = 1 << cpu_id;
+ /* this always happens on GIC0 */
+ writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+ unsigned int cpu_bit;
+
+ if (cpu >= NR_GIC_CPU_IF)
+ return -1;
+ cpu_bit = gic_cpu_map[cpu];
+ if (cpu_bit & (cpu_bit - 1))
+ return -1;
+ return __ffs(cpu_bit);
}
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id. The CPU interface mapping
+ * is also updated. Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+ unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+ void __iomem *dist_base;
+ int i, ror_val, cpu = smp_processor_id();
+ u32 val, cur_target_mask, active_mask;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ if (!dist_base)
+ return;
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+
+ cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+ cur_target_mask = 0x01010101 << cur_cpu_id;
+ ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ /* Update the target interface for this logical CPU */
+ gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+ /*
+ * Find all the peripheral interrupts targetting the current
+ * CPU interface and migrate them to the new CPU interface.
+ * We skip DIST_TARGET 0 to 7 as they are read-only.
+ */
+ for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+ val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+ active_mask = val & cur_target_mask;
+ if (active_mask) {
+ val &= ~active_mask;
+ val |= ror32(active_mask, ror_val);
+ writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+ }
+ }
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ /*
+ * Now let's migrate and clear any potential SGIs that might be
+ * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
+ * is a banked register, we can only forward the SGI using
+ * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
+ * doesn't use that information anyway.
+ *
+ * For the same reason we do not adjust SGI source information
+ * for previously sent SGIs by us to other CPUs either.
+ */
+ for (i = 0; i < 16; i += 4) {
+ int j;
+ val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+ if (!val)
+ continue;
+ writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+ for (j = i; j < i + 4; j++) {
+ if (val & 0xff)
+ writel_relaxed((1 << (new_cpu_id + 16)) | j,
+ dist_base + GIC_DIST_SOFTINT);
+ val >>= 8;
+ }
+ }
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+ if (!gic_dist_physaddr)
+ return 0;
+ return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+ struct resource res;
+ if (of_address_to_resource(node, 0, &res) == 0) {
+ gic_dist_physaddr = res.start;
+ pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+ }
+}
+
+#else
+#define gic_init_physaddr(node) do { } while (0)
#endif
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
@@ -850,6 +993,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
percpu_offset = 0;
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+ if (!gic_cnt)
+ gic_init_physaddr(node);
if (parent) {
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 2bbb00404cf..8e21ae0bab4 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -469,6 +469,8 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
int __init vic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *regs;
+ u32 interrupt_mask = ~0;
+ u32 wakeup_mask = ~0;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
@@ -477,10 +479,13 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
if (WARN_ON(!regs))
return -EIO;
+ of_property_read_u32(node, "valid-mask", &interrupt_mask);
+ of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
+
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
- __vic_init(regs, 0, ~0, ~0, node);
+ __vic_init(regs, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 52377b4bf03..a2e0ed6c9a4 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -481,7 +481,7 @@ void __inline__ outpp(void __iomem *addr, word p)
int diva_os_register_irq(void *context, byte irq, const char *name)
{
int result = request_irq(irq, diva_os_irq_wrapper,
- IRQF_DISABLED | IRQF_SHARED, name, context);
+ IRQF_SHARED, name, context);
return (result);
}
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
index 7cab5c3276c..e1519718ce6 100644
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ b/drivers/isdn/hardware/eicon/um_idi.c
@@ -288,9 +288,9 @@ int divas_um_idi_delete_entity(int adapter_nr, void *entity)
cleanup_entity(e);
diva_os_free(0, e->os_context);
memset(e, 0x00, sizeof(*e));
- diva_os_free(0, e);
DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
+ diva_os_free(0, e);
return (0);
}
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index e74df7c4658..53d487f0c79 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1580,8 +1580,7 @@ icn_addcard(int port, char *id1, char *id2)
}
if (!(card2 = icn_initcard(port, id2))) {
printk(KERN_INFO
- "icn: (%s) half ICN-4B, port 0x%x added\n",
- card2->interface.id, port);
+ "icn: (%s) half ICN-4B, port 0x%x added\n", id2, port);
return 0;
}
card->doubleS0 = 1;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index baf2686aa8e..02125e6a910 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM;
}
- for (i = 0; i < 3; i++)
- strcpy(card->s0num[i], sdef.num[i]);
+ for (i = 0; i < 3; i++) {
+ strlcpy(card->s0num[i], sdef.num[i],
+ sizeof(card->s0num[0]));
+ }
break;
case ISDN_PTYPE_1TR6:
if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
@@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM;
}
- strcpy(card->s0num[0], sdef.num[0]);
+ strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
card->s0num[1][0] = '\0';
card->s0num[2][0] = '\0';
break;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index e47dcb9d1e9..5cefb479c70 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
- struct sockaddr_mISDN *maddr;
int copied, err;
@@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
return err;
- if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
- msg->msg_namelen = sizeof(struct sockaddr_mISDN);
- maddr = (struct sockaddr_mISDN *)msg->msg_name;
+ if (msg->msg_name) {
+ struct sockaddr_mISDN *maddr = msg->msg_name;
+
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
@@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
maddr->sapi = _pms(sk)->ch.addr & 0xFF;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
}
- } else {
- if (msg->msg_namelen)
- printk(KERN_WARNING "%s: too small namelen %d\n",
- __func__, msg->msg_namelen);
- msg->msg_namelen = 0;
+ msg->msg_namelen = sizeof(*maddr);
}
copied = skb->len + MISDN_HEADER_LEN;
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index ca997bd4e81..92acc81f844 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -336,7 +336,7 @@ static int __init sc_init(void)
*/
sc_adapter[cinst]->interrupt = irq[b];
if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
- IRQF_DISABLED, interface->id,
+ 0, interface->id,
(void *)(unsigned long) cinst))
{
kfree(sc_adapter[cinst]->channel);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 875bbe4c962..72156c12303 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -300,6 +300,16 @@ config LEDS_PCA963X
LED driver chip accessed via the I2C bus. Supported
devices include PCA9633 and PCA9634
+config LEDS_PCA9685
+ tristate "LED support for PCA9685 I2C chip"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for LEDs connected to the PCA9685
+ LED driver chip accessed via the I2C bus.
+ The PCA9685 offers 12-bit PWM (4095 levels of brightness) on
+ 16 individual channels.
+
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8979b0b2c85..3cd76dbd9be 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
+obj-$(CONFIG_LEDS_PCA9685) += leds-pca9685.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index a502678cc7f..66d0a57db22 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -161,13 +161,10 @@ static ssize_t show_color_common(struct device *dev, char *buf, int color)
switch (color) {
case RED:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->red);
- break;
case GREEN:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->green);
- break;
case BLUE:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->blue);
- break;
default:
return -EINVAL;
}
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 1f9d8e62d37..db3ba8b4251 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -101,7 +101,6 @@ eledcr:
while (i--)
led_classdev_unregister(&dac->leds[i].ldev);
- spi_set_drvdata(spi, NULL);
return ret;
}
@@ -115,8 +114,6 @@ static int dac124s085_remove(struct spi_device *spi)
cancel_work_sync(&dac->leds[i].work);
}
- spi_set_drvdata(spi, NULL);
-
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index e8b01e57348..78b0e273a90 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/leds.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
@@ -170,11 +171,11 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
int count, ret;
/* count LEDs in this device, so we know how much to allocate */
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
if (!count)
return ERR_PTR(-ENODEV);
- for_each_child_of_node(np, child)
+ for_each_available_child_of_node(np, child)
if (of_get_gpio(child, 0) == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
@@ -183,7 +184,7 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
if (!priv)
return ERR_PTR(-ENOMEM);
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct gpio_led led = {};
enum of_gpio_flags flags;
const char *state;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fe3bcbb5747..6b553d9f426 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -29,6 +29,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 2585cfd5771..bf006f4e44a 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -17,6 +17,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 351825b96f1..9acc6bb7dee 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -20,6 +20,8 @@
#include <linux/module.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "leds-lp55xx-common.h"
@@ -165,6 +167,7 @@ static int lp55xx_init_led(struct lp55xx_led *led,
led->led_current = pdata->led_config[chan].led_current;
led->max_current = pdata->led_config[chan].max_current;
led->chan_nr = pdata->led_config[chan].chan_nr;
+ led->cdev.default_trigger = pdata->led_config[chan].default_trigger;
if (led->chan_nr >= max_channel) {
dev_err(dev, "Use channel numbers between 0 and %d\n",
@@ -406,18 +409,18 @@ int lp55xx_init_device(struct lp55xx_chip *chip)
if (!pdata || !cfg)
return -EINVAL;
- if (pdata->setup_resources) {
- ret = pdata->setup_resources();
+ if (gpio_is_valid(pdata->enable_gpio)) {
+ ret = devm_gpio_request_one(dev, pdata->enable_gpio,
+ GPIOF_DIR_OUT, "lp5523_enable");
if (ret < 0) {
- dev_err(dev, "setup resoure err: %d\n", ret);
+ dev_err(dev, "could not acquire enable gpio (err=%d)\n",
+ ret);
goto err;
}
- }
- if (pdata->enable) {
- pdata->enable(0);
+ gpio_set_value(pdata->enable_gpio, 0);
usleep_range(1000, 2000); /* Keep enable down at least 1ms */
- pdata->enable(1);
+ gpio_set_value(pdata->enable_gpio, 1);
usleep_range(1000, 2000); /* 500us abs min. */
}
@@ -458,11 +461,8 @@ void lp55xx_deinit_device(struct lp55xx_chip *chip)
if (chip->clk)
clk_disable_unprepare(chip->clk);
- if (pdata->enable)
- pdata->enable(0);
-
- if (pdata->release_resources)
- pdata->release_resources();
+ if (gpio_is_valid(pdata->enable_gpio))
+ gpio_set_value(pdata->enable_gpio, 0);
}
EXPORT_SYMBOL_GPL(lp55xx_deinit_device);
@@ -586,6 +586,8 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_string(child, "chan-name", &cfg[i].name);
of_property_read_u8(child, "led-cur", &cfg[i].led_current);
of_property_read_u8(child, "max-cur", &cfg[i].max_current);
+ cfg[i].default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
i++;
}
@@ -593,6 +595,8 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_string(np, "label", &pdata->label);
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
+ pdata->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
+
/* LP8501 specific */
of_property_read_u8(np, "pwr-sel", (u8 *)&pdata->pwr_sel);
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index 8d55a780ca4..f1c704f2243 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -18,6 +18,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 141f13438e8..c7a4230233e 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -30,6 +30,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_data/leds-kirkwood-ns2.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
/*
diff --git a/drivers/leds/leds-pca9685.c b/drivers/leds/leds-pca9685.c
new file mode 100644
index 00000000000..6e1ef3a9d6e
--- /dev/null
+++ b/drivers/leds/leds-pca9685.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2013 Maximilian GĂĽntner <maximilian.guentner@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Based on leds-pca963x.c driver by
+ * Peter Meerwald <p.meerwald@bct-electronic.com>
+ *
+ * Driver for the NXP PCA9685 12-Bit PWM LED driver chip.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <linux/platform_data/leds-pca9685.h>
+
+/* Register Addresses */
+#define PCA9685_MODE1 0x00
+#define PCA9685_MODE2 0x01
+#define PCA9685_LED0_ON_L 0x06
+#define PCA9685_ALL_LED_ON_L 0xFA
+
+/* MODE1 Register */
+#define PCA9685_ALLCALL 0x00
+#define PCA9685_SLEEP 0x04
+#define PCA9685_AI 0x05
+
+/* MODE2 Register */
+#define PCA9685_INVRT 0x04
+#define PCA9685_OUTDRV 0x02
+
+static const struct i2c_device_id pca9685_id[] = {
+ { "pca9685", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pca9685_id);
+
+struct pca9685_led {
+ struct i2c_client *client;
+ struct work_struct work;
+ u16 brightness;
+ struct led_classdev led_cdev;
+ int led_num; /* 0-15 */
+ char name[32];
+};
+
+static void pca9685_write_msg(struct i2c_client *client, u8 *buf, u8 len)
+{
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0x00,
+ .len = len,
+ .buf = buf
+ };
+ i2c_transfer(client->adapter, &msg, 1);
+}
+
+static void pca9685_all_off(struct i2c_client *client)
+{
+ u8 i2c_buffer[5] = {PCA9685_ALL_LED_ON_L, 0x00, 0x00, 0x00, 0x10};
+ pca9685_write_msg(client, i2c_buffer, 5);
+}
+
+static void pca9685_led_work(struct work_struct *work)
+{
+ struct pca9685_led *pca9685;
+ u8 i2c_buffer[5];
+
+ pca9685 = container_of(work, struct pca9685_led, work);
+ i2c_buffer[0] = PCA9685_LED0_ON_L + 4 * pca9685->led_num;
+ /*
+ * 4095 is the maximum brightness, so we set the ON time to 0x1000
+ * which disables the PWM generator for that LED
+ */
+ if (pca9685->brightness == 4095)
+ *((__le16 *)(i2c_buffer+1)) = cpu_to_le16(0x1000);
+ else
+ *((__le16 *)(i2c_buffer+1)) = 0x0000;
+
+ if (pca9685->brightness == 0)
+ *((__le16 *)(i2c_buffer+3)) = cpu_to_le16(0x1000);
+ else if (pca9685->brightness == 4095)
+ *((__le16 *)(i2c_buffer+3)) = 0x0000;
+ else
+ *((__le16 *)(i2c_buffer+3)) = cpu_to_le16(pca9685->brightness);
+
+ pca9685_write_msg(pca9685->client, i2c_buffer, 5);
+}
+
+static void pca9685_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct pca9685_led *pca9685;
+ pca9685 = container_of(led_cdev, struct pca9685_led, led_cdev);
+ pca9685->brightness = value;
+
+ schedule_work(&pca9685->work);
+}
+
+static int pca9685_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pca9685_led *pca9685;
+ struct pca9685_platform_data *pdata;
+ int err;
+ u8 i;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata) {
+ if (pdata->leds.num_leds < 1 || pdata->leds.num_leds > 15) {
+ dev_err(&client->dev, "board info must claim 1-16 LEDs");
+ return -EINVAL;
+ }
+ }
+
+ pca9685 = devm_kzalloc(&client->dev, 16 * sizeof(*pca9685), GFP_KERNEL);
+ if (!pca9685)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pca9685);
+ pca9685_all_off(client);
+
+ for (i = 0; i < 16; i++) {
+ pca9685[i].client = client;
+ pca9685[i].led_num = i;
+ pca9685[i].name[0] = '\0';
+ if (pdata && i < pdata->leds.num_leds) {
+ if (pdata->leds.leds[i].name)
+ strncpy(pca9685[i].name,
+ pdata->leds.leds[i].name,
+ sizeof(pca9685[i].name)-1);
+ if (pdata->leds.leds[i].default_trigger)
+ pca9685[i].led_cdev.default_trigger =
+ pdata->leds.leds[i].default_trigger;
+ }
+ if (strlen(pca9685[i].name) == 0) {
+ /*
+ * Write adapter and address to the name as well.
+ * Otherwise multiple chips attached to one host would
+ * not work.
+ */
+ snprintf(pca9685[i].name, sizeof(pca9685[i].name),
+ "pca9685:%d:x%.2x:%d",
+ client->adapter->nr, client->addr, i);
+ }
+ pca9685[i].led_cdev.name = pca9685[i].name;
+ pca9685[i].led_cdev.max_brightness = 0xfff;
+ pca9685[i].led_cdev.brightness_set = pca9685_led_set;
+
+ INIT_WORK(&pca9685[i].work, pca9685_led_work);
+ err = led_classdev_register(&client->dev, &pca9685[i].led_cdev);
+ if (err < 0)
+ goto exit;
+ }
+
+ if (pdata)
+ i2c_smbus_write_byte_data(client, PCA9685_MODE2,
+ pdata->outdrv << PCA9685_OUTDRV |
+ pdata->inverted << PCA9685_INVRT);
+ else
+ i2c_smbus_write_byte_data(client, PCA9685_MODE2,
+ PCA9685_TOTEM_POLE << PCA9685_OUTDRV);
+ /* Enable Auto-Increment, enable oscillator, ALLCALL/SUBADDR disabled */
+ i2c_smbus_write_byte_data(client, PCA9685_MODE1, BIT(PCA9685_AI));
+
+ return 0;
+
+exit:
+ while (i--) {
+ led_classdev_unregister(&pca9685[i].led_cdev);
+ cancel_work_sync(&pca9685[i].work);
+ }
+ return err;
+}
+
+static int pca9685_remove(struct i2c_client *client)
+{
+ struct pca9685_led *pca9685 = i2c_get_clientdata(client);
+ u8 i;
+
+ for (i = 0; i < 16; i++) {
+ led_classdev_unregister(&pca9685[i].led_cdev);
+ cancel_work_sync(&pca9685[i].work);
+ }
+ pca9685_all_off(client);
+ return 0;
+}
+
+static struct i2c_driver pca9685_driver = {
+ .driver = {
+ .name = "leds-pca9685",
+ .owner = THIS_MODULE,
+ },
+ .probe = pca9685_probe,
+ .remove = pca9685_remove,
+ .id_table = pca9685_id,
+};
+
+module_i2c_driver(pca9685_driver);
+
+MODULE_AUTHOR("Maximilian GĂĽntner <maximilian.guentner@gmail.com>");
+MODULE_DESCRIPTION("PCA9685 LED Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index bb6f9489854..2848171b857 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -232,7 +232,7 @@ static struct platform_driver led_pwm_driver = {
.driver = {
.name = "leds_pwm",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_pwm_leds_match),
+ .of_match_table = of_pwm_leds_match,
},
};
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index b3256ff0d42..d0a1d8a45c8 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -229,7 +229,7 @@ struct lguest_vq_info {
* make a hypercall. We hand the physical address of the virtqueue so the Host
* knows which virtqueue we're talking about.
*/
-static void lg_notify(struct virtqueue *vq)
+static bool lg_notify(struct virtqueue *vq)
{
/*
* We store our virtqueue information in the "priv" pointer of the
@@ -238,6 +238,7 @@ static void lg_notify(struct virtqueue *vq)
struct lguest_vq_info *lvq = vq->priv;
hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
+ return true;
}
/* An extern declaration inside a C file is bad form. Don't do it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 51692392633..922a1acbf65 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -157,7 +157,7 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
* stack, then the address of this call. This stack layout happens to
* exactly match the stack layout created by an interrupt...
*/
- asm volatile("pushf; lcall *lguest_entry"
+ asm volatile("pushf; lcall *%4"
/*
* This is how we tell GCC that %eax ("a") and %ebx ("b")
* are changed by this routine. The "=" means output.
@@ -169,7 +169,9 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
* physical address of the Guest's top-level page
* directory.
*/
- : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
+ : "0"(pages),
+ "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)),
+ "m"(lguest_entry)
/*
* We tell gcc that all these registers could change,
* which means we don't have to save and restore them in
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 696238b9f0f..d26a312f117 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -103,6 +103,7 @@ config ADB_PMU_LED_IDE
bool "Use front LED as IDE LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
+ depends on IDE_GD_ATA
select LEDS_TRIGGERS
select LEDS_TRIGGER_IDE_DISK
help
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index ac5c8793986..4f12c6f01fe 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -24,6 +24,8 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/machdep.h>
#include <asm/macio.h>
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index cad0e19b47a..4192901cab4 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -25,6 +25,8 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel_stat.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index b3b2d36c009..23b4a3b28db 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -35,6 +35,7 @@
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 283e1b53c6b..dee88e59f0d 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -46,6 +46,8 @@
#include <linux/suspend.h>
#include <linux/cpu.h>
#include <linux/compat.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30b426ed744..f2ccbc3b9fe 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -297,6 +297,17 @@ config DM_MIRROR
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
+config DM_LOG_USERSPACE
+ tristate "Mirror userspace logging"
+ depends on DM_MIRROR && NET
+ select CONNECTOR
+ ---help---
+ The userspace logging module provides a mechanism for
+ relaying the dm-dirty-log API to userspace. Log designs
+ which are more suited to userspace implementation (e.g.
+ shared storage logs) or experimental logs can be implemented
+ by leveraging this framework.
+
config DM_RAID
tristate "RAID 1/4/5/6/10 target"
depends on BLK_DEV_DM
@@ -323,17 +334,6 @@ config DM_RAID
RAID-5, RAID-6 distributes the syndromes across the drives
in one of the available parity distribution methods.
-config DM_LOG_USERSPACE
- tristate "Mirror userspace logging"
- depends on DM_MIRROR && NET
- select CONNECTOR
- ---help---
- The userspace logging module provides a mechanism for
- relaying the dm-dirty-log API to userspace. Log designs
- which are more suited to userspace implementation (e.g.
- shared storage logs) or experimental logs can be implemented
- by leveraging this framework.
-
config DM_ZERO
tristate "Zero target"
depends on BLK_DEV_DM
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index f950c9d29f3..2638417b19a 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -13,15 +13,8 @@ config BCACHE_DEBUG
---help---
Don't select this option unless you're a developer
- Enables extra debugging tools (primarily a fuzz tester)
-
-config BCACHE_EDEBUG
- bool "Extended runtime checks"
- depends on BCACHE
- ---help---
- Don't select this option unless you're a developer
-
- Enables extra runtime checks which significantly affect performance
+ Enables extra debugging tools, allows expensive runtime checks to be
+ turned on.
config BCACHE_CLOSURES_DEBUG
bool "Debug closures"
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index e45f5575fd4..2b46bf1d7e4 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,13 +63,12 @@
#include "bcache.h"
#include "btree.h"
+#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
-#define MAX_IN_FLIGHT_DISCARDS 8U
-
/* Bucket heap / gen */
uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
@@ -121,75 +120,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
mutex_unlock(&c->bucket_lock);
}
-/* Discard/TRIM */
-
-struct discard {
- struct list_head list;
- struct work_struct work;
- struct cache *ca;
- long bucket;
-
- struct bio bio;
- struct bio_vec bv;
-};
-
-static void discard_finish(struct work_struct *w)
-{
- struct discard *d = container_of(w, struct discard, work);
- struct cache *ca = d->ca;
- char buf[BDEVNAME_SIZE];
-
- if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
- pr_notice("discard error on %s, disabling",
- bdevname(ca->bdev, buf));
- d->ca->discard = 0;
- }
-
- mutex_lock(&ca->set->bucket_lock);
-
- fifo_push(&ca->free, d->bucket);
- list_add(&d->list, &ca->discards);
- atomic_dec(&ca->discards_in_flight);
-
- mutex_unlock(&ca->set->bucket_lock);
-
- closure_wake_up(&ca->set->bucket_wait);
- wake_up_process(ca->alloc_thread);
-
- closure_put(&ca->set->cl);
-}
-
-static void discard_endio(struct bio *bio, int error)
-{
- struct discard *d = container_of(bio, struct discard, bio);
- schedule_work(&d->work);
-}
-
-static void do_discard(struct cache *ca, long bucket)
-{
- struct discard *d = list_first_entry(&ca->discards,
- struct discard, list);
-
- list_del(&d->list);
- d->bucket = bucket;
-
- atomic_inc(&ca->discards_in_flight);
- closure_get(&ca->set->cl);
-
- bio_init(&d->bio);
-
- d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket);
- d->bio.bi_bdev = ca->bdev;
- d->bio.bi_rw = REQ_WRITE|REQ_DISCARD;
- d->bio.bi_max_vecs = 1;
- d->bio.bi_io_vec = d->bio.bi_inline_vecs;
- d->bio.bi_size = bucket_bytes(ca);
- d->bio.bi_end_io = discard_endio;
- bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
-
- submit_bio(0, &d->bio);
-}
-
/* Allocation */
static inline bool can_inc_bucket_gen(struct bucket *b)
@@ -280,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca)
* multiple times when it can't do anything
*/
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
@@ -305,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca)
if (++checked >= ca->sb.nbuckets) {
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
}
@@ -330,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca)
if (++checked >= ca->sb.nbuckets / 2) {
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
}
@@ -398,16 +328,18 @@ static int bch_allocator_thread(void *arg)
else
break;
- allocator_wait(ca, (int) fifo_free(&ca->free) >
- atomic_read(&ca->discards_in_flight));
-
if (ca->discard) {
- allocator_wait(ca, !list_empty(&ca->discards));
- do_discard(ca, bucket);
- } else {
- fifo_push(&ca->free, bucket);
- closure_wake_up(&ca->set->bucket_wait);
+ mutex_unlock(&ca->set->bucket_lock);
+ blkdev_issue_discard(ca->bdev,
+ bucket_to_sector(ca->set, bucket),
+ ca->sb.block_size, GFP_KERNEL, 0);
+ mutex_lock(&ca->set->bucket_lock);
}
+
+ allocator_wait(ca, !fifo_full(&ca->free));
+
+ fifo_push(&ca->free, bucket);
+ wake_up(&ca->set->bucket_wait);
}
/*
@@ -433,16 +365,40 @@ static int bch_allocator_thread(void *arg)
}
}
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
{
- long r = -1;
-again:
+ DEFINE_WAIT(w);
+ struct bucket *b;
+ long r;
+
+ /* fastpath */
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+ fifo_pop(&ca->free, r);
+ goto out;
+ }
+
+ if (!wait)
+ return -1;
+
+ while (1) {
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+ fifo_pop(&ca->free, r);
+ break;
+ }
+
+ prepare_to_wait(&ca->set->bucket_wait, &w,
+ TASK_UNINTERRUPTIBLE);
+
+ mutex_unlock(&ca->set->bucket_lock);
+ schedule();
+ mutex_lock(&ca->set->bucket_lock);
+ }
+
+ finish_wait(&ca->set->bucket_wait, &w);
+out:
wake_up_process(ca->alloc_thread);
- if (fifo_used(&ca->free) > ca->watermark[watermark] &&
- fifo_pop(&ca->free, r)) {
- struct bucket *b = ca->buckets + r;
-#ifdef CONFIG_BCACHE_EDEBUG
+ if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
@@ -455,36 +411,23 @@ again:
BUG_ON(i == r);
fifo_for_each(i, &ca->unused, iter)
BUG_ON(i == r);
-#endif
- BUG_ON(atomic_read(&b->pin) != 1);
-
- SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
-
- if (watermark <= WATERMARK_METADATA) {
- SET_GC_MARK(b, GC_MARK_METADATA);
- b->prio = BTREE_PRIO;
- } else {
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
- b->prio = INITIAL_PRIO;
- }
-
- return r;
}
- trace_bcache_alloc_fail(ca);
+ b = ca->buckets + r;
- if (cl) {
- closure_wait(&ca->set->bucket_wait, cl);
+ BUG_ON(atomic_read(&b->pin) != 1);
- if (closure_blocking(cl)) {
- mutex_unlock(&ca->set->bucket_lock);
- closure_sync(cl);
- mutex_lock(&ca->set->bucket_lock);
- goto again;
- }
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+ if (watermark <= WATERMARK_METADATA) {
+ SET_GC_MARK(b, GC_MARK_METADATA);
+ b->prio = BTREE_PRIO;
+ } else {
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ b->prio = INITIAL_PRIO;
}
- return -1;
+ return r;
}
void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -501,7 +444,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
- struct bkey *k, int n, struct closure *cl)
+ struct bkey *k, int n, bool wait)
{
int i;
@@ -514,7 +457,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
for (i = 0; i < n; i++) {
struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, watermark, cl);
+ long b = bch_bucket_alloc(ca, watermark, wait);
if (b == -1)
goto err;
@@ -529,22 +472,202 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
return 0;
err:
bch_bucket_free(c, k);
- __bkey_put(c, k);
+ bkey_put(c, k);
return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
- struct bkey *k, int n, struct closure *cl)
+ struct bkey *k, int n, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+ ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
+/* Sector allocator */
+
+struct open_bucket {
+ struct list_head list;
+ unsigned last_write_point;
+ unsigned sectors_free;
+ BKEY_PADDED(key);
+};
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we look for a bucket where
+ * the last write to it was sequential with the current write, and failing that
+ * we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ const struct bkey *search,
+ unsigned write_point,
+ struct bkey *alloc)
+{
+ struct open_bucket *ret, *ret_task = NULL;
+
+ list_for_each_entry_reverse(ret, &c->data_buckets, list)
+ if (!bkey_cmp(&ret->key, search))
+ goto found;
+ else if (ret->last_write_point == write_point)
+ ret_task = ret;
+
+ ret = ret_task ?: list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+ ret->sectors_free = c->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+
+ if (!ret->sectors_free)
+ ret = NULL;
+
+ return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+ unsigned write_point, unsigned write_prio, bool wait)
+{
+ struct open_bucket *b;
+ BKEY_PADDED(key) alloc;
+ unsigned i;
+
+ /*
+ * We might have to allocate a new bucket, which we can't do with a
+ * spinlock held. So if we have to allocate, we drop the lock, allocate
+ * and then retry. KEY_PTRS() indicates whether alloc points to
+ * allocated bucket(s).
+ */
+
+ bkey_init(&alloc.key);
+ spin_lock(&c->data_bucket_lock);
+
+ while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
+ unsigned watermark = write_prio
+ ? WATERMARK_MOVINGGC
+ : WATERMARK_NONE;
+
+ spin_unlock(&c->data_bucket_lock);
+
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+ }
+
+ /*
+ * If we had to allocate, we might race and not need to allocate the
+ * second time we call find_data_bucket(). If we allocated a bucket but
+ * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+ */
+ if (KEY_PTRS(&alloc.key))
+ bkey_put(c, &alloc.key);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ EBUG_ON(ptr_stale(c, &b->key, i));
+
+ /* Set up the pointer to the space we're allocating: */
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ k->ptr[i] = b->key.ptr[i];
+
+ sectors = min(sectors, b->sectors_free);
+
+ SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+ SET_KEY_SIZE(k, sectors);
+ SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+ /*
+ * Move b to the end of the lru, and keep track of what this bucket was
+ * last used for:
+ */
+ list_move_tail(&b->list, &c->data_buckets);
+ bkey_copy_key(&b->key, k);
+ b->last_write_point = write_point;
+
+ b->sectors_free -= sectors;
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+ atomic_long_add(sectors,
+ &PTR_CACHE(c, &b->key, i)->sectors_written);
+ }
+
+ if (b->sectors_free < c->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+ * k takes refcounts on the buckets it points to until it's inserted
+ * into the btree, but if we're done with this bucket we just transfer
+ * get_data_bucket()'s refcount.
+ */
+ if (b->sectors_free)
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+ spin_unlock(&c->data_bucket_lock);
+ return true;
+}
+
/* Init */
+void bch_open_buckets_free(struct cache_set *c)
+{
+ struct open_bucket *b;
+
+ while (!list_empty(&c->data_buckets)) {
+ b = list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+ int i;
+
+ spin_lock_init(&c->data_bucket_lock);
+
+ for (i = 0; i < 6; i++) {
+ struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ list_add(&b->list, &c->data_buckets);
+ }
+
+ return 0;
+}
+
int bch_cache_allocator_start(struct cache *ca)
{
struct task_struct *k = kthread_run(bch_allocator_thread,
@@ -556,22 +679,8 @@ int bch_cache_allocator_start(struct cache *ca)
return 0;
}
-void bch_cache_allocator_exit(struct cache *ca)
-{
- struct discard *d;
-
- while (!list_empty(&ca->discards)) {
- d = list_first_entry(&ca->discards, struct discard, list);
- cancel_work_sync(&d->work);
- list_del(&d->list);
- kfree(d);
- }
-}
-
int bch_cache_allocator_init(struct cache *ca)
{
- unsigned i;
-
/*
* Reserve:
* Prio/gen writes first
@@ -589,15 +698,5 @@ int bch_cache_allocator_init(struct cache *ca)
ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
ca->watermark[WATERMARK_MOVINGGC];
- for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
- struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- d->ca = ca;
- INIT_WORK(&d->work, discard_finish);
- list_add(&d->list, &ca->discards);
- }
-
return 0;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0f12382aa35..4beb55a0ff3 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -177,6 +177,7 @@
#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -210,168 +211,6 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_METADATA 2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
-struct bkey {
- uint64_t high;
- uint64_t low;
- uint64_t ptr[];
-};
-
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD 8
-
-#define BKEY_PADDED(key) \
- union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_MAX_VERSION 4
-
-#define SB_SECTOR 8
-#define SB_SIZE 4096
-#define SB_LABEL_SIZE 32
-#define SB_JOURNAL_BUCKETS 256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET 8
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
-
-struct cache_sb {
- uint64_t csum;
- uint64_t offset; /* sector where this sb was written */
- uint64_t version;
-
- uint8_t magic[16];
-
- uint8_t uuid[16];
- union {
- uint8_t set_uuid[16];
- uint64_t set_magic;
- };
- uint8_t label[SB_LABEL_SIZE];
-
- uint64_t flags;
- uint64_t seq;
- uint64_t pad[8];
-
- union {
- struct {
- /* Cache devices */
- uint64_t nbuckets; /* device size */
-
- uint16_t block_size; /* sectors */
- uint16_t bucket_size; /* sectors */
-
- uint16_t nr_in_set;
- uint16_t nr_this_dev;
- };
- struct {
- /* Backing devices */
- uint64_t data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- uint32_t last_mount; /* time_t */
-
- uint16_t first_bucket;
- union {
- uint16_t njournal_buckets;
- uint16_t keys;
- };
- uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH 0U
-#define CACHE_MODE_WRITEBACK 1U
-#define CACHE_MODE_WRITEAROUND 2U
-#define CACHE_MODE_NONE 3U
-BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE 0U
-#define BDEV_STATE_CLEAN 1U
-#define BDEV_STATE_DIRTY 2U
-#define BDEV_STATE_STALE 3U
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_VERSION 1
-
-/*
- * This is the on disk format for btree nodes - a btree node on disk is a list
- * of these; within each set the keys are sorted
- */
-struct bset {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t keys;
-
- union {
- struct bkey start[0];
- uint64_t d[0];
- };
-};
-
-/*
- * On disk format for priorities and gens - see super.c near prio_write() for
- * more.
- */
-struct prio_set {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t pad;
-
- uint64_t next_bucket;
-
- struct bucket_disk {
- uint16_t prio;
- uint8_t gen;
- } __attribute((packed)) data[];
-};
-
-struct uuid_entry {
- union {
- struct {
- uint8_t uuid[16];
- uint8_t label[32];
- uint32_t first_reg;
- uint32_t last_reg;
- uint32_t invalidated;
-
- uint32_t flags;
- /* Size of flash only volumes */
- uint64_t sectors;
- };
-
- uint8_t pad[128];
- };
-};
-
-BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
-
#include "journal.h"
#include "stats.h"
struct search;
@@ -384,8 +223,6 @@ struct keybuf_key {
void *private;
};
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
struct keybuf {
struct bkey last_scanned;
spinlock_t lock;
@@ -400,7 +237,7 @@ struct keybuf {
struct rb_root keys;
-#define KEYBUF_NR 100
+#define KEYBUF_NR 500
DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
};
@@ -429,16 +266,15 @@ struct bcache_device {
struct gendisk *disk;
- /* If nonzero, we're closing */
- atomic_t closing;
-
- /* If nonzero, we're detaching/unregistering from cache set */
- atomic_t detaching;
- int flush_done;
+ unsigned long flags;
+#define BCACHE_DEV_CLOSING 0
+#define BCACHE_DEV_DETACHING 1
+#define BCACHE_DEV_UNLINK_DONE 2
- uint64_t nr_stripes;
- unsigned stripe_size_bits;
+ unsigned nr_stripes;
+ unsigned stripe_size;
atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
@@ -509,7 +345,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
- struct closure_with_timer writeback;
+ struct task_struct *writeback_thread;
struct keybuf writeback_keys;
@@ -527,8 +363,8 @@ struct cached_dev {
unsigned sequential_cutoff;
unsigned readahead;
- unsigned sequential_merge:1;
unsigned verify:1;
+ unsigned bypass_torture_test:1;
unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1;
@@ -620,15 +456,6 @@ struct cache {
bool discard; /* Get rid of? */
- /*
- * We preallocate structs for issuing discards to buckets, and keep them
- * on this list when they're not in use; do_discard() issues discards
- * whenever there's work to do and is called by free_some_buckets() and
- * when a discard finishes.
- */
- atomic_t discards_in_flight;
- struct list_head discards;
-
struct journal_device journal;
/* The rest of this all shows up in sysfs */
@@ -649,7 +476,6 @@ struct gc_stat {
size_t nkeys;
uint64_t data; /* sectors */
- uint64_t dirty; /* sectors */
unsigned in_use; /* percent */
};
@@ -744,8 +570,8 @@ struct cache_set {
* basically a lock for this that we can wait on asynchronously. The
* btree_root() macro releases the lock when it returns.
*/
- struct closure *try_harder;
- struct closure_waitlist try_wait;
+ struct task_struct *try_harder;
+ wait_queue_head_t try_wait;
uint64_t try_harder_start;
/*
@@ -759,7 +585,7 @@ struct cache_set {
* written.
*/
atomic_t prio_blocked;
- struct closure_waitlist bucket_wait;
+ wait_queue_head_t bucket_wait;
/*
* For any bio we don't skip we subtract the number of sectors from
@@ -782,7 +608,7 @@ struct cache_set {
struct gc_stat gc_stats;
size_t nbuckets;
- struct closure_with_waitlist gc;
+ struct task_struct *gc_thread;
/* Where in the btree gc currently is */
struct bkey gc_done;
@@ -795,11 +621,10 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
- struct closure moving_gc;
- struct closure_waitlist moving_gc_wait;
+ wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
- atomic_t in_flight;
+ struct semaphore moving_in_flight;
struct btree *root;
@@ -841,22 +666,27 @@ struct cache_set {
unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us;
- spinlock_t sort_time_lock;
struct time_stats sort_time;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
- spinlock_t btree_read_time_lock;
struct time_stats btree_read_time;
struct time_stats try_harder_time;
atomic_long_t cache_read_races;
atomic_long_t writeback_keys_done;
atomic_long_t writeback_keys_failed;
+
+ enum {
+ ON_ERROR_UNREGISTER,
+ ON_ERROR_PANIC,
+ } on_error;
unsigned error_limit;
unsigned error_decay;
+
unsigned short journal_delay_ms;
unsigned verify:1;
unsigned key_merging_disabled:1;
+ unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1;
@@ -865,21 +695,6 @@ struct cache_set {
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
};
-static inline bool key_merging_disabled(struct cache_set *c)
-{
-#ifdef CONFIG_BCACHE_DEBUG
- return c->key_merging_disabled;
-#else
- return 0;
-#endif
-}
-
-static inline bool SB_IS_BDEV(const struct cache_sb *sb)
-{
- return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
-}
-
struct bbio {
unsigned submit_time_us;
union {
@@ -933,59 +748,6 @@ static inline unsigned local_clock_us(void)
#define prio_buckets(c) \
DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
-
-#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
-#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
-#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
-
-/* Bkey fields: all units are in sectors */
-
-#define KEY_FIELD(name, field, offset, size) \
- BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size) \
- static inline uint64_t name(const struct bkey *k, unsigned i) \
- { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
- \
- static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
- { \
- k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
- k->ptr[i] |= v << offset; \
- }
-
-KEY_FIELD(KEY_PTRS, high, 60, 3)
-KEY_FIELD(HEADER_SIZE, high, 58, 2)
-KEY_FIELD(KEY_CSUM, high, 56, 2)
-KEY_FIELD(KEY_PINNED, high, 55, 1)
-KEY_FIELD(KEY_DIRTY, high, 36, 1)
-
-KEY_FIELD(KEY_SIZE, high, 20, 16)
-KEY_FIELD(KEY_INODE, high, 0, 20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline uint64_t KEY_OFFSET(const struct bkey *k)
-{
- return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
-{
- k->low = v;
-}
-
-PTR_FIELD(PTR_DEV, 51, 12)
-PTR_FIELD(PTR_OFFSET, 8, 43)
-PTR_FIELD(PTR_GEN, 0, 8)
-
-#define PTR_CHECK_DEV ((1 << 12) - 1)
-
-#define PTR(gen, offset, dev) \
- ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
-
static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
{
return s >> c->bucket_bits;
@@ -1024,27 +786,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
/* Btree key macros */
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(dev, sector, len) \
-((struct bkey) { \
- .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
- .low = (sector) \
-})
-
static inline void bkey_init(struct bkey *k)
{
- *k = KEY(0, 0, 0);
+ *k = ZERO_KEY;
}
-#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
-#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
-#define ZERO_KEY KEY(0, 0, 0)
-
/*
* This is used for various on disk data structures - cache_sb, prio_set, bset,
* jset: The checksum is _always_ the first 8 bytes of these structs
@@ -1094,14 +840,6 @@ do { \
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-static inline void __bkey_put(struct cache_set *c, struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
-}
-
static inline void cached_dev_put(struct cached_dev *dc)
{
if (atomic_dec_and_test(&dc->count))
@@ -1173,13 +911,15 @@ uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *);
-long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
+long bch_bucket_alloc(struct cache *, unsigned, bool);
void bch_bucket_free(struct cache_set *, struct bkey *);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, struct closure *);
+ struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, struct closure *);
+ struct bkey *, int, bool);
+bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
+ unsigned, unsigned, bool);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *, const char *, ...);
@@ -1187,7 +927,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...);
void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *);
-extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
+extern struct workqueue_struct *bcache_wq;
extern const char * const bch_cache_modes[];
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
@@ -1220,15 +960,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
void bch_moving_init_cache_set(struct cache_set *);
+int bch_open_buckets_alloc(struct cache_set *);
+void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
-void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void);
int bch_debug_init(struct kobject *);
-void bch_writeback_exit(void);
-int bch_writeback_init(void);
void bch_request_exit(void);
int bch_request_init(void);
void bch_btree_exit(void);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 22d1ae72c28..7d388b8bb50 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -14,22 +14,12 @@
/* Keylists */
-void bch_keylist_copy(struct keylist *dest, struct keylist *src)
-{
- *dest = *src;
-
- if (src->list == src->d) {
- size_t n = (uint64_t *) src->top - src->d;
- dest->top = (struct bkey *) &dest->d[n];
- dest->list = dest->d;
- }
-}
-
int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
{
- unsigned oldsize = (uint64_t *) l->top - l->list;
- unsigned newsize = oldsize + 2 + nptrs;
- uint64_t *new;
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + 2 + nptrs;
+ uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
+ uint64_t *new_keys;
/* The journalling code doesn't handle the case where the keys to insert
* is bigger than an empty write: If we just return -ENOMEM here,
@@ -45,24 +35,23 @@ int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
roundup_pow_of_two(oldsize) == newsize)
return 0;
- new = krealloc(l->list == l->d ? NULL : l->list,
- sizeof(uint64_t) * newsize, GFP_NOIO);
+ new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
- if (!new)
+ if (!new_keys)
return -ENOMEM;
- if (l->list == l->d)
- memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+ if (!old_keys)
+ memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
- l->list = new;
- l->top = (struct bkey *) (&l->list[oldsize]);
+ l->keys_p = new_keys;
+ l->top_p = new_keys + oldsize;
return 0;
}
struct bkey *bch_keylist_pop(struct keylist *l)
{
- struct bkey *k = l->bottom;
+ struct bkey *k = l->keys;
if (k == l->top)
return NULL;
@@ -73,21 +62,20 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k;
}
-/* Pointer validation */
-
-bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
+void bch_keylist_pop_front(struct keylist *l)
{
- unsigned i;
- char buf[80];
+ l->top_p -= bkey_u64s(l->keys);
- if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
- goto bad;
+ memmove(l->keys,
+ bkey_next(l->keys),
+ bch_keylist_bytes(l));
+}
- if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
- goto bad;
+/* Pointer validation */
- if (!KEY_SIZE(k))
- return true;
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@@ -98,13 +86,83 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
if (KEY_SIZE(k) + r > c->sb.bucket_size ||
bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets)
- goto bad;
+ return true;
}
return false;
+}
+
+bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ if (KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
bad:
bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
+ cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
+ unsigned ptr)
+{
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ if (b->level) {
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto err;
+
+ } else {
+ if (g->prio == BTREE_PRIO)
+ goto err;
+
+ if (KEY_DIRTY(k) &&
+ b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_DIRTY)
+ goto err;
+ }
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
}
@@ -118,64 +176,29 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
bch_ptr_invalid(b, k))
return true;
- if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
- return true;
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (!ptr_available(b->c, k, i))
+ return true;
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(b->c, k, i)) {
- g = PTR_BUCKET(b->c, k, i);
- stale = ptr_stale(b->c, k, i);
+ g = PTR_BUCKET(b->c, k, i);
+ stale = ptr_stale(b->c, k, i);
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
- stale, b->c->need_gc);
+ btree_bug_on(stale > 96, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
- b, "stale dirty pointer");
+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+ b, "stale dirty pointer");
- if (stale)
- return true;
+ if (stale)
+ return true;
-#ifdef CONFIG_BCACHE_EDEBUG
- if (!mutex_trylock(&b->c->bucket_lock))
- continue;
-
- if (b->level) {
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
- (b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_METADATA))
- goto bug;
-
- } else {
- if (g->prio == BTREE_PRIO)
- goto bug;
-
- if (KEY_DIRTY(k) &&
- b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_DIRTY)
- goto bug;
- }
- mutex_unlock(&b->c->bucket_lock);
-#endif
- }
+ if (expensive_debug_checks(b->c) &&
+ ptr_bad_expensive_checks(b, k, i))
+ return true;
+ }
return false;
-#ifdef CONFIG_BCACHE_EDEBUG
-bug:
- mutex_unlock(&b->c->bucket_lock);
-
- {
- char buf[80];
-
- bch_bkey_to_text(buf, sizeof(buf), k);
- btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
- }
- return true;
-#endif
}
/* Key/pointer manipulation */
@@ -458,16 +481,8 @@ static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{
-#ifdef CONFIG_X86_64
- asm("shrd %[shift],%[high],%[low]"
- : [low] "+Rm" (low)
- : [high] "R" (high),
- [shift] "ci" (shift)
- : "cc");
-#else
low >>= shift;
low |= (high << 1) << (63U - shift);
-#endif
return low;
}
@@ -686,7 +701,7 @@ void bch_bset_init_next(struct btree *b)
} else
get_random_bytes(&i->seq, sizeof(uint64_t));
- i->magic = bset_magic(b->c);
+ i->magic = bset_magic(&b->c->sb);
i->version = 0;
i->keys = 0;
@@ -824,16 +839,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
} else
i = bset_search_write_set(b, t, search);
-#ifdef CONFIG_BCACHE_EDEBUG
- BUG_ON(bset_written(b, t) &&
- i.l != t->data->start &&
- bkey_cmp(tree_to_prev_bkey(t,
- inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
- search) > 0);
+ if (expensive_debug_checks(b->c)) {
+ BUG_ON(bset_written(b, t) &&
+ i.l != t->data->start &&
+ bkey_cmp(tree_to_prev_bkey(t,
+ inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+ search) > 0);
- BUG_ON(i.r != end(t->data) &&
- bkey_cmp(i.r, search) <= 0);
-#endif
+ BUG_ON(i.r != end(t->data) &&
+ bkey_cmp(i.r, search) <= 0);
+ }
while (likely(i.l != i.r) &&
bkey_cmp(i.l, search) <= 0)
@@ -844,6 +859,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
@@ -867,12 +889,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
}
struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
- struct bkey *search, struct bset_tree *start)
+ struct bkey *search, struct bset_tree *start)
{
struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data);
iter->used = 0;
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
for (; start <= &b->sets[b->nsets]; start++) {
ret = bch_bset_search(b, start, search);
bch_btree_iter_push(iter, ret, end(start->data));
@@ -887,6 +913,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
struct bkey *ret = NULL;
if (!btree_iter_end(iter)) {
+ bch_btree_iter_next_check(iter);
+
ret = iter->data->k;
iter->data->k = bkey_next(iter->data->k);
@@ -916,14 +944,6 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
return ret;
}
-struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
-{
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, search);
- return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
-}
-
/* Mergesort */
static void sort_key_next(struct btree_iter *iter,
@@ -998,7 +1018,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
pr_debug("sorted %i keys", out->keys);
- bch_check_key_order(b, out);
}
static void __btree_sort(struct btree *b, struct btree_iter *iter,
@@ -1029,7 +1048,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
* memcpy()
*/
- out->magic = bset_magic(b->c);
+ out->magic = bset_magic(&b->c->sb);
out->seq = b->sets[0].data->seq;
out->version = b->sets[0].data->version;
swap(out, b->sets[0].data);
@@ -1050,24 +1069,21 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
if (b->written)
bset_build_written_tree(b);
- if (!start) {
- spin_lock(&b->c->sort_time_lock);
+ if (!start)
bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
- }
}
void bch_btree_sort_partial(struct btree *b, unsigned start)
{
- size_t oldsize = 0, order = b->page_order, keys = 0;
+ size_t order = b->page_order, keys = 0;
struct btree_iter iter;
+ int oldsize = bch_count_data(b);
+
__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
BUG_ON(b->sets[b->nsets].data == write_block(b) &&
(b->sets[b->nsets].size || b->nsets));
- if (b->written)
- oldsize = bch_count_data(b);
if (start) {
unsigned i;
@@ -1083,7 +1099,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
__btree_sort(b, &iter, start, order, false);
- EBUG_ON(b->written && bch_count_data(b) != oldsize);
+ EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
}
void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
@@ -1101,9 +1117,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
btree_mergesort(b, new->sets->data, &iter, false, true);
- spin_lock(&b->c->sort_time_lock);
bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
bkey_copy_key(&new->key, &b->key);
new->sets->size = 0;
@@ -1148,16 +1162,16 @@ out:
/* Sysfs stuff */
struct bset_stats {
+ struct btree_op op;
size_t nodes;
size_t sets_written, sets_unwritten;
size_t bytes_written, bytes_unwritten;
size_t floats, failed;
};
-static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
- struct bset_stats *stats)
+static int btree_bset_stats(struct btree_op *op, struct btree *b)
{
- struct bkey *k;
+ struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i;
stats->nodes++;
@@ -1182,30 +1196,19 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
}
}
- if (b->level) {
- struct btree_iter iter;
-
- for_each_key_filter(b, k, &iter, bch_ptr_bad) {
- int ret = btree(bset_stats, k, b, op, stats);
- if (ret)
- return ret;
- }
- }
-
- return 0;
+ return MAP_CONTINUE;
}
int bch_bset_print_stats(struct cache_set *c, char *buf)
{
- struct btree_op op;
struct bset_stats t;
int ret;
- bch_btree_op_init_stack(&op);
memset(&t, 0, sizeof(struct bset_stats));
+ bch_btree_op_init(&t.op, -1);
- ret = btree_root(bset_stats, c, &op, &t);
- if (ret)
+ ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
+ if (ret < 0)
return ret;
return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae115a253d7..1d3c24f9fa0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -148,6 +148,9 @@
struct btree_iter {
size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree *b;
+#endif
struct btree_iter_set {
struct bkey *k, *end;
} data[MAX_BSETS];
@@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
-static inline size_t bkey_u64s(const struct bkey *k)
-{
- BUG_ON(KEY_CSUM(k) > 1);
- return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
-}
-
-static inline size_t bkey_bytes(const struct bkey *k)
-{
- return bkey_u64s(k) * sizeof(uint64_t);
-}
-
-static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
-{
- memcpy(dest, src, bkey_bytes(src));
-}
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
- if (!src)
- src = &KEY(0, 0, 0);
-
- SET_KEY_INODE(dest, KEY_INODE(src));
- SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
- uint64_t *d = (void *) k;
- return (struct bkey *) (d + bkey_u64s(k));
-}
-
/* Keylists */
struct keylist {
- struct bkey *top;
union {
- uint64_t *list;
- struct bkey *bottom;
+ struct bkey *keys;
+ uint64_t *keys_p;
+ };
+ union {
+ struct bkey *top;
+ uint64_t *top_p;
};
/* Enough room for btree_split's keys without realloc */
#define KEYLIST_INLINE 16
- uint64_t d[KEYLIST_INLINE];
+ uint64_t inline_keys[KEYLIST_INLINE];
};
static inline void bch_keylist_init(struct keylist *l)
{
- l->top = (void *) (l->list = l->d);
+ l->top_p = l->keys_p = l->inline_keys;
}
static inline void bch_keylist_push(struct keylist *l)
@@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
static inline bool bch_keylist_empty(struct keylist *l)
{
- return l->top == (void *) l->list;
+ return l->top == l->keys;
+}
+
+static inline void bch_keylist_reset(struct keylist *l)
+{
+ l->top = l->keys;
}
static inline void bch_keylist_free(struct keylist *l)
{
- if (l->list != l->d)
- kfree(l->list);
+ if (l->keys_p != l->inline_keys)
+ kfree(l->keys_p);
+}
+
+static inline size_t bch_keylist_nkeys(struct keylist *l)
+{
+ return l->top_p - l->keys_p;
+}
+
+static inline size_t bch_keylist_bytes(struct keylist *l)
+{
+ return bch_keylist_nkeys(l) * sizeof(uint64_t);
}
-void bch_keylist_copy(struct keylist *, struct keylist *);
struct bkey *bch_keylist_pop(struct keylist *);
+void bch_keylist_pop_front(struct keylist *);
int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
@@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
}
const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
+bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
+
bool bch_ptr_bad(struct btree *, const struct bkey *);
static inline uint8_t gen_after(uint8_t a, uint8_t b)
@@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
struct bkey *bch_btree_iter_next(struct btree_iter *);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree *, ptr_filter_fn);
@@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
const struct bkey *);
+/*
+ * Returns the first key that is strictly greater than search
+ */
static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
const struct bkey *search)
{
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
+#define PRECEDING_KEY(_k) \
+({ \
+ struct bkey *_ret = NULL; \
+ \
+ if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
+ _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
+ \
+ if (!_ret->low) \
+ _ret->high--; \
+ _ret->low--; \
+ } \
+ \
+ _ret; \
+})
+
bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
void bch_btree_sort_lazy(struct btree *);
void bch_btree_sort_into(struct btree *, struct btree *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f42fc7ed9cd..5e2765aadce 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,12 +23,13 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include "writeback.h"
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/freezer.h>
#include <linux/hash.h>
+#include <linux/kthread.h>
#include <linux/prefetch.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
@@ -88,15 +89,13 @@
* Test module load/unload
*/
-static const char * const op_types[] = {
- "insert", "replace"
+enum {
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
};
-static const char *op_type(struct btree_op *op)
-{
- return op_types[op->type];
-}
-
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
@@ -105,23 +104,89 @@ static const char *op_type(struct btree_op *op)
#define PTR_HASH(c, k) \
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
-struct workqueue_struct *bch_gc_wq;
static struct workqueue_struct *btree_io_wq;
-void bch_btree_op_init_stack(struct btree_op *op)
+static inline bool should_split(struct btree *b)
{
- memset(op, 0, sizeof(struct btree_op));
- closure_init_stack(&op->cl);
- op->lock = -1;
- bch_keylist_init(&op->keys);
+ struct bset *i = write_block(b);
+ return b->written >= btree_blocks(b) ||
+ (b->written + __set_blocks(i, i->keys + 15, b->c)
+ > btree_blocks(b));
}
+#define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn: function to call, which will be passed the child node
+ * @key: key to recurse on
+ * @b: parent btree node
+ * @op: pointer to struct btree_op
+ */
+#define btree(fn, key, b, op, ...) \
+({ \
+ int _r, l = (b)->level - 1; \
+ bool _w = l <= (op)->lock; \
+ struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
+ if (!IS_ERR(_child)) { \
+ _child->parent = (b); \
+ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _child); \
+ } else \
+ _r = PTR_ERR(_child); \
+ _r; \
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn: function to call, which will be passed the child node
+ * @c: cache set
+ * @op: pointer to struct btree_op
+ */
+#define btree_root(fn, c, op, ...) \
+({ \
+ int _r = -EINTR; \
+ do { \
+ struct btree *_b = (c)->root; \
+ bool _w = insert_lock(op, _b); \
+ rw_lock(_w, _b, _b->level); \
+ if (_b == (c)->root && \
+ _w == insert_lock(op, _b)) { \
+ _b->parent = NULL; \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ } \
+ rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c); \
+ if (_r == -ENOSPC) { \
+ wait_event((c)->try_wait, \
+ !(c)->try_harder); \
+ _r = -EINTR; \
+ } \
+ } while (_r == -EINTR); \
+ \
+ _r; \
+})
+
/* Btree key manipulation */
-static void bkey_put(struct cache_set *c, struct bkey *k, int level)
+void bkey_put(struct cache_set *c, struct bkey *k)
{
- if ((level && KEY_OFFSET(k)) || !level)
- __bkey_put(c, k);
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i))
+ atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
}
/* Btree IO */
@@ -145,6 +210,10 @@ static void bch_btree_node_read_done(struct btree *b)
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0;
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
if (!i->seq)
goto err;
@@ -160,7 +229,7 @@ static void bch_btree_node_read_done(struct btree *b)
goto err;
err = "bad magic";
- if (i->magic != bset_magic(b->c))
+ if (i->magic != bset_magic(&b->c->sb))
goto err;
err = "bad checksum";
@@ -248,10 +317,7 @@ void bch_btree_node_read(struct btree *b)
goto err;
bch_btree_node_read_done(b);
-
- spin_lock(&b->c->btree_read_time_lock);
bch_time_stats_update(&b->c->btree_read_time, start_time);
- spin_unlock(&b->c->btree_read_time_lock);
return;
err:
@@ -327,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
b->bio = bch_bbio_alloc(b->c);
b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = &b->io.cl;
+ b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
@@ -383,7 +449,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
BUG_ON(b->written >= btree_blocks(b));
BUG_ON(b->written && !i->keys);
BUG_ON(b->sets->data->seq != i->seq);
- bch_check_key_order(b, i);
+ bch_check_keys(b, "writing");
cancel_delayed_work(&b->work);
@@ -405,6 +471,15 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
bch_bset_init_next(b);
}
+static void bch_btree_node_write_sync(struct btree *b)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch_btree_node_write(b, &cl);
+ closure_sync(&cl);
+}
+
static void btree_node_write_work(struct work_struct *w)
{
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
@@ -416,7 +491,7 @@ static void btree_node_write_work(struct work_struct *w)
rw_unlock(true, b);
}
-static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
+static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{
struct bset *i = b->sets[b->nsets].data;
struct btree_write *w = btree_current_write(b);
@@ -429,15 +504,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
set_btree_node_dirty(b);
- if (op && op->journal) {
+ if (journal_ref) {
if (w->journal &&
- journal_pin_cmp(b->c, w, op)) {
+ journal_pin_cmp(b->c, w->journal, journal_ref)) {
atomic_dec_bug(w->journal);
w->journal = NULL;
}
if (!w->journal) {
- w->journal = op->journal;
+ w->journal = journal_ref;
atomic_inc(w->journal);
}
}
@@ -566,33 +641,32 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b;
}
-static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
+static int mca_reap(struct btree *b, unsigned min_order, bool flush)
{
+ struct closure cl;
+
+ closure_init_stack(&cl);
lockdep_assert_held(&b->c->bucket_lock);
if (!down_write_trylock(&b->lock))
return -ENOMEM;
- if (b->page_order < min_order) {
+ BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+
+ if (b->page_order < min_order ||
+ (!flush &&
+ (btree_node_dirty(b) ||
+ atomic_read(&b->io.cl.remaining) != -1))) {
rw_unlock(true, b);
return -ENOMEM;
}
- BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
-
- if (cl && btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
-
- if (cl)
- closure_wait_event_async(&b->io.wait, cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ if (btree_node_dirty(b))
+ bch_btree_node_write_sync(b);
- if (btree_node_dirty(b) ||
- !closure_is_unlocked(&b->io.cl) ||
- work_pending(&b->work.work)) {
- rw_unlock(true, b);
- return -EAGAIN;
- }
+ /* wait for any in flight btree write */
+ closure_wait_event(&b->io.wait, &cl,
+ atomic_read(&b->io.cl.remaining) == -1);
return 0;
}
@@ -633,7 +707,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
break;
if (++i > 3 &&
- !mca_reap(b, NULL, 0)) {
+ !mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
@@ -652,7 +726,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
list_rotate_left(&c->btree_cache);
if (!b->accessed &&
- !mca_reap(b, NULL, 0)) {
+ !mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
@@ -723,12 +797,9 @@ int bch_btree_cache_alloc(struct cache_set *c)
{
unsigned i;
- /* XXX: doesn't check for errors */
-
- closure_init_unlocked(&c->gc);
-
for (i = 0; i < mca_reserve(c); i++)
- mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+ if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
+ return -ENOMEM;
list_splice_init(&c->btree_cache,
&c->btree_cache_freeable);
@@ -775,52 +846,27 @@ out:
return b;
}
-static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
- int level, struct closure *cl)
+static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
{
- int ret = -ENOMEM;
- struct btree *i;
+ struct btree *b;
trace_bcache_btree_cache_cannibalize(c);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- /*
- * Trying to free up some memory - i.e. reuse some btree nodes - may
- * require initiating IO to flush the dirty part of the node. If we're
- * running under generic_make_request(), that IO will never finish and
- * we would deadlock. Returning -EAGAIN causes the cache lookup code to
- * punt to workqueue and retry.
- */
- if (current->bio_list)
- return ERR_PTR(-EAGAIN);
-
- if (c->try_harder && c->try_harder != cl) {
- closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
- return ERR_PTR(-EAGAIN);
- }
+ if (!c->try_harder) {
+ c->try_harder = current;
+ c->try_harder_start = local_clock();
+ } else if (c->try_harder != current)
+ return ERR_PTR(-ENOSPC);
- c->try_harder = cl;
- c->try_harder_start = local_clock();
-retry:
- list_for_each_entry_reverse(i, &c->btree_cache, list) {
- int r = mca_reap(i, cl, btree_order(k));
- if (!r)
- return i;
- if (r != -ENOMEM)
- ret = r;
- }
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), false))
+ return b;
- if (ret == -EAGAIN &&
- closure_blocking(cl)) {
- mutex_unlock(&c->bucket_lock);
- closure_sync(cl);
- mutex_lock(&c->bucket_lock);
- goto retry;
- }
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), true))
+ return b;
- return ERR_PTR(ret);
+ return ERR_PTR(-ENOMEM);
}
/*
@@ -829,20 +875,21 @@ retry:
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
+static void bch_cannibalize_unlock(struct cache_set *c)
{
- if (c->try_harder == cl) {
+ if (c->try_harder == current) {
bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
c->try_harder = NULL;
- __closure_wake_up(&c->try_wait);
+ wake_up(&c->try_wait);
}
}
-static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
- int level, struct closure *cl)
+static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
{
struct btree *b;
+ BUG_ON(current->bio_list);
+
lockdep_assert_held(&c->bucket_lock);
if (mca_find(c, k))
@@ -852,14 +899,14 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
* the list. Check if there's any freed nodes there:
*/
list_for_each_entry(b, &c->btree_cache_freeable, list)
- if (!mca_reap(b, NULL, btree_order(k)))
+ if (!mca_reap(b, btree_order(k), false))
goto out;
/* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, &c->btree_cache_freed, list)
- if (!mca_reap(b, NULL, 0)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
if (!b->sets[0].data)
goto err;
@@ -884,6 +931,7 @@ out:
lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
b->level = level;
+ b->parent = (void *) ~0UL;
mca_reinit(b);
@@ -892,7 +940,7 @@ err:
if (b)
rw_unlock(true, b);
- b = mca_cannibalize(c, k, level, cl);
+ b = mca_cannibalize(c, k);
if (!IS_ERR(b))
goto out;
@@ -903,17 +951,15 @@ err:
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
- * if that closure is in non blocking mode, will return -EAGAIN.
+ * If IO is necessary and running under generic_make_request, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
*/
struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
- int level, struct btree_op *op)
+ int level, bool write)
{
int i = 0;
- bool write = level <= op->lock;
struct btree *b;
BUG_ON(level < 0);
@@ -925,7 +971,7 @@ retry:
return ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level, &op->cl);
+ b = mca_alloc(c, k, level);
mutex_unlock(&c->bucket_lock);
if (!b)
@@ -971,7 +1017,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
struct btree *b;
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level, NULL);
+ b = mca_alloc(c, k, level);
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(b)) {
@@ -982,17 +1028,12 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
/* Btree alloc */
-static void btree_node_free(struct btree *b, struct btree_op *op)
+static void btree_node_free(struct btree *b)
{
unsigned i;
trace_bcache_btree_node_free(b);
- /*
- * The BUG_ON() in btree_node_get() implies that we must have a write
- * lock on parent to free or even invalidate a node
- */
- BUG_ON(op->lock <= b->level);
BUG_ON(b == b->c->root);
if (btree_node_dirty(b))
@@ -1015,27 +1056,26 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
mutex_unlock(&b->c->bucket_lock);
}
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
- struct closure *cl)
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
{
BKEY_PADDED(key) k;
struct btree *b = ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
+ if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
goto err;
+ bkey_put(c, &k.key);
SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
- b = mca_alloc(c, &k.key, level, cl);
+ b = mca_alloc(c, &k.key, level);
if (IS_ERR(b))
goto err_free;
if (!b) {
cache_bug(c,
"Tried to allocate bucket that was in btree cache");
- __bkey_put(c, &k.key);
goto retry;
}
@@ -1048,7 +1088,6 @@ retry:
return b;
err_free:
bch_bucket_free(c, &k.key);
- __bkey_put(c, &k.key);
err:
mutex_unlock(&c->bucket_lock);
@@ -1056,16 +1095,31 @@ err:
return b;
}
-static struct btree *btree_node_alloc_replacement(struct btree *b,
- struct closure *cl)
+static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
{
- struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
+ struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
if (!IS_ERR_OR_NULL(n))
bch_btree_sort_into(b, n);
return n;
}
+static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+{
+ unsigned i;
+
+ bkey_copy(k, &b->key);
+ bkey_copy_key(k, &ZERO_KEY);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
+
+ SET_PTR_GEN(k, i, g);
+ }
+
+ atomic_inc(&b->c->prio_blocked);
+}
+
/* Garbage collection */
uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1119,12 +1173,10 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
-static int btree_gc_mark_node(struct btree *b, unsigned *keys,
- struct gc_stat *gc)
+static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
- unsigned last_dev = -1;
- struct bcache_device *d = NULL;
+ unsigned keys = 0, good_keys = 0;
struct bkey *k;
struct btree_iter iter;
struct bset_tree *t;
@@ -1132,27 +1184,17 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
gc->nodes++;
for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
- if (last_dev != KEY_INODE(k)) {
- last_dev = KEY_INODE(k);
-
- d = KEY_INODE(k) < b->c->nr_uuids
- ? b->c->devices[last_dev]
- : NULL;
- }
-
stale = max(stale, btree_mark_key(b, k));
+ keys++;
if (bch_ptr_bad(b, k))
continue;
- *keys += bkey_u64s(k);
-
gc->key_bytes += bkey_u64s(k);
gc->nkeys++;
+ good_keys++;
gc->data += KEY_SIZE(k);
- if (KEY_DIRTY(k))
- gc->dirty += KEY_SIZE(k);
}
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1161,78 +1203,74 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
bkey_cmp(&b->key, &t->end) < 0,
b, "found short btree key in gc");
- return stale;
-}
-
-static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
- struct btree_op *op)
-{
- /*
- * We block priorities from being written for the duration of garbage
- * collection, so we can't sleep in btree_alloc() ->
- * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
- * our closure.
- */
- struct btree *n = btree_node_alloc_replacement(b, NULL);
-
- if (!IS_ERR_OR_NULL(n)) {
- swap(b, n);
- __bkey_put(b->c, &b->key);
+ if (b->c->gc_always_rewrite)
+ return true;
- memcpy(k->ptr, b->key.ptr,
- sizeof(uint64_t) * KEY_PTRS(&b->key));
+ if (stale > 10)
+ return true;
- btree_node_free(n, op);
- up_write(&n->lock);
- }
+ if ((keys - good_keys) * 2 > keys)
+ return true;
- return b;
+ return false;
}
-/*
- * Leaving this at 2 until we've got incremental garbage collection done; it
- * could be higher (and has been tested with 4) except that garbage collection
- * could take much longer, adversely affecting latency.
- */
-#define GC_MERGE_NODES 2U
+#define GC_MERGE_NODES 4U
struct gc_merge_info {
struct btree *b;
- struct bkey *k;
unsigned keys;
};
-static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
- struct gc_stat *gc, struct gc_merge_info *r)
+static int bch_btree_insert_node(struct btree *, struct btree_op *,
+ struct keylist *, atomic_t *, struct bkey *);
+
+static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ struct keylist *keylist, struct gc_stat *gc,
+ struct gc_merge_info *r)
{
- unsigned nodes = 0, keys = 0, blocks;
- int i;
+ unsigned i, nodes = 0, keys = 0, blocks;
+ struct btree *new_nodes[GC_MERGE_NODES];
+ struct closure cl;
+ struct bkey *k;
+
+ memset(new_nodes, 0, sizeof(new_nodes));
+ closure_init_stack(&cl);
- while (nodes < GC_MERGE_NODES && r[nodes].b)
+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
if (nodes < 2 ||
__set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
- return;
-
- for (i = nodes - 1; i >= 0; --i) {
- if (r[i].b->written)
- r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
+ return 0;
- if (r[i].b->written)
- return;
+ for (i = 0; i < nodes; i++) {
+ new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
+ if (IS_ERR_OR_NULL(new_nodes[i]))
+ goto out_nocoalesce;
}
for (i = nodes - 1; i > 0; --i) {
- struct bset *n1 = r[i].b->sets->data;
- struct bset *n2 = r[i - 1].b->sets->data;
+ struct bset *n1 = new_nodes[i]->sets->data;
+ struct bset *n2 = new_nodes[i - 1]->sets->data;
struct bkey *k, *last = NULL;
keys = 0;
- if (i == 1) {
+ if (i > 1) {
+ for (k = n2->start;
+ k < end(n2);
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k), b->c) > blocks)
+ break;
+
+ last = k;
+ keys += bkey_u64s(k);
+ }
+ } else {
/*
* Last node we're not getting rid of - we're getting
* rid of the node at r[0]. Have to try and fit all of
@@ -1241,37 +1279,27 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
* length keys (shouldn't be possible in practice,
* though)
*/
- if (__set_blocks(n1, n1->keys + r->keys,
- b->c) > btree_blocks(r[i].b))
- return;
+ if (__set_blocks(n1, n1->keys + n2->keys,
+ b->c) > btree_blocks(new_nodes[i]))
+ goto out_nocoalesce;
keys = n2->keys;
+ /* Take the key of the node we're getting rid of */
last = &r->b->key;
- } else
- for (k = n2->start;
- k < end(n2);
- k = bkey_next(k)) {
- if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k), b->c) > blocks)
- break;
-
- last = k;
- keys += bkey_u64s(k);
- }
+ }
BUG_ON(__set_blocks(n1, n1->keys + keys,
- b->c) > btree_blocks(r[i].b));
+ b->c) > btree_blocks(new_nodes[i]));
- if (last) {
- bkey_copy_key(&r[i].b->key, last);
- bkey_copy_key(r[i].k, last);
- }
+ if (last)
+ bkey_copy_key(&new_nodes[i]->key, last);
memcpy(end(n1),
n2->start,
(void *) node(n2, keys) - (void *) n2->start);
n1->keys += keys;
+ r[i].keys = n1->keys;
memmove(n2->start,
node(n2, keys),
@@ -1279,95 +1307,176 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
n2->keys -= keys;
- r[i].keys = n1->keys;
- r[i - 1].keys = n2->keys;
+ if (bch_keylist_realloc(keylist,
+ KEY_PTRS(&new_nodes[i]->key), b->c))
+ goto out_nocoalesce;
+
+ bch_btree_node_write(new_nodes[i], &cl);
+ bch_keylist_add(keylist, &new_nodes[i]->key);
}
- btree_node_free(r->b, op);
- up_write(&r->b->lock);
+ for (i = 0; i < nodes; i++) {
+ if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+ goto out_nocoalesce;
- trace_bcache_btree_gc_coalesce(nodes);
+ make_btree_freeing_key(r[i].b, keylist->top);
+ bch_keylist_push(keylist);
+ }
+
+ /* We emptied out this node */
+ BUG_ON(new_nodes[0]->sets->data->keys);
+ btree_node_free(new_nodes[0]);
+ rw_unlock(true, new_nodes[0]);
+
+ closure_sync(&cl);
+
+ for (i = 0; i < nodes; i++) {
+ btree_node_free(r[i].b);
+ rw_unlock(true, r[i].b);
+
+ r[i].b = new_nodes[i];
+ }
+
+ bch_btree_insert_node(b, op, keylist, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(keylist));
+
+ memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
+ r[nodes - 1].b = ERR_PTR(-EINTR);
+ trace_bcache_btree_gc_coalesce(nodes);
gc->nodes--;
- nodes--;
- memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
- memset(&r[nodes], 0, sizeof(struct gc_merge_info));
+ /* Invalidated our iterator */
+ return -EINTR;
+
+out_nocoalesce:
+ closure_sync(&cl);
+
+ while ((k = bch_keylist_pop(keylist)))
+ if (!bkey_cmp(k, &ZERO_KEY))
+ atomic_dec(&b->c->prio_blocked);
+
+ for (i = 0; i < nodes; i++)
+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+ return 0;
}
-static int btree_gc_recurse(struct btree *b, struct btree_op *op,
- struct closure *writes, struct gc_stat *gc)
+static unsigned btree_gc_count_keys(struct btree *b)
{
- void write(struct btree *r)
- {
- if (!r->written)
- bch_btree_node_write(r, &op->cl);
- else if (btree_node_dirty(r))
- bch_btree_node_write(r, writes);
+ struct bkey *k;
+ struct btree_iter iter;
+ unsigned ret = 0;
- up_write(&r->lock);
- }
+ for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ ret += bkey_u64s(k);
+
+ return ret;
+}
- int ret = 0, stale;
+static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
unsigned i;
+ int ret = 0;
+ bool should_rewrite;
+ struct btree *n;
+ struct bkey *k;
+ struct keylist keys;
+ struct btree_iter iter;
struct gc_merge_info r[GC_MERGE_NODES];
+ struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
- memset(r, 0, sizeof(r));
+ bch_keylist_init(&keys);
+ bch_btree_iter_init(b, &iter, &b->c->gc_done);
- while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
- r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
+ for (i = 0; i < GC_MERGE_NODES; i++)
+ r[i].b = ERR_PTR(-EINTR);
- if (IS_ERR(r->b)) {
- ret = PTR_ERR(r->b);
- break;
+ while (1) {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (k) {
+ r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
+ if (IS_ERR(r->b)) {
+ ret = PTR_ERR(r->b);
+ break;
+ }
+
+ r->keys = btree_gc_count_keys(r->b);
+
+ ret = btree_gc_coalesce(b, op, &keys, gc, r);
+ if (ret)
+ break;
}
- r->keys = 0;
- stale = btree_gc_mark_node(r->b, &r->keys, gc);
+ if (!last->b)
+ break;
- if (!b->written &&
- (r->b->level || stale > 10 ||
- b->c->gc_always_rewrite))
- r->b = btree_gc_alloc(r->b, r->k, op);
+ if (!IS_ERR(last->b)) {
+ should_rewrite = btree_gc_mark_node(last->b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(last->b,
+ false);
- if (r->b->level)
- ret = btree_gc_recurse(r->b, op, writes, gc);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_node_write_sync(n);
+ bch_keylist_add(&keys, &n->key);
- if (ret) {
- write(r->b);
- break;
- }
+ make_btree_freeing_key(last->b,
+ keys.top);
+ bch_keylist_push(&keys);
+
+ btree_node_free(last->b);
+
+ bch_btree_insert_node(b, op, &keys,
+ NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keys));
- bkey_copy_key(&b->c->gc_done, r->k);
+ rw_unlock(true, last->b);
+ last->b = n;
- if (!b->written)
- btree_gc_coalesce(b, op, gc, r);
+ /* Invalidated our iterator */
+ ret = -EINTR;
+ break;
+ }
+ }
- if (r[GC_MERGE_NODES - 1].b)
- write(r[GC_MERGE_NODES - 1].b);
+ if (last->b->level) {
+ ret = btree_gc_recurse(last->b, op, writes, gc);
+ if (ret)
+ break;
+ }
- memmove(&r[1], &r[0],
- sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
+ bkey_copy_key(&b->c->gc_done, &last->b->key);
+
+ /*
+ * Must flush leaf nodes before gc ends, since replace
+ * operations aren't journalled
+ */
+ if (btree_node_dirty(last->b))
+ bch_btree_node_write(last->b, writes);
+ rw_unlock(true, last->b);
+ }
+
+ memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
+ r->b = NULL;
- /* When we've got incremental GC working, we'll want to do
- * if (should_resched())
- * return -EAGAIN;
- */
- cond_resched();
-#if 0
if (need_resched()) {
ret = -EAGAIN;
break;
}
-#endif
}
- for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
- write(r[i].b);
+ for (i = 0; i < GC_MERGE_NODES; i++)
+ if (!IS_ERR_OR_NULL(r[i].b)) {
+ if (btree_node_dirty(r[i].b))
+ bch_btree_node_write(r[i].b, writes);
+ rw_unlock(true, r[i].b);
+ }
- /* Might have freed some children, must remove their keys */
- if (!b->written)
- bch_btree_sort(b);
+ bch_keylist_free(&keys);
return ret;
}
@@ -1376,29 +1485,31 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
struct btree *n = NULL;
- unsigned keys = 0;
- int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
-
- if (b->level || stale > 10)
- n = btree_node_alloc_replacement(b, NULL);
+ int ret = 0;
+ bool should_rewrite;
- if (!IS_ERR_OR_NULL(n))
- swap(b, n);
+ should_rewrite = btree_gc_mark_node(b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(b, false);
- if (b->level)
- ret = btree_gc_recurse(b, op, writes, gc);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_node_write_sync(n);
+ bch_btree_set_root(n);
+ btree_node_free(b);
+ rw_unlock(true, n);
- if (!b->written || btree_node_dirty(b)) {
- bch_btree_node_write(b, n ? &op->cl : NULL);
+ return -EINTR;
+ }
}
- if (!IS_ERR_OR_NULL(n)) {
- closure_sync(&op->cl);
- bch_btree_set_root(b);
- btree_node_free(n, op);
- rw_unlock(true, b);
+ if (b->level) {
+ ret = btree_gc_recurse(b, op, writes, gc);
+ if (ret)
+ return ret;
}
+ bkey_copy_key(&b->c->gc_done, &b->key);
+
return ret;
}
@@ -1479,9 +1590,8 @@ size_t bch_btree_gc_finish(struct cache_set *c)
return available;
}
-static void bch_btree_gc(struct closure *cl)
+static void bch_btree_gc(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
int ret;
unsigned long available;
struct gc_stat stats;
@@ -1493,47 +1603,73 @@ static void bch_btree_gc(struct closure *cl)
memset(&stats, 0, sizeof(struct gc_stat));
closure_init_stack(&writes);
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ bch_btree_op_init(&op, SHRT_MAX);
btree_gc_start(c);
- atomic_inc(&c->prio_blocked);
-
- ret = btree_root(gc_root, c, &op, &writes, &stats);
- closure_sync(&op.cl);
- closure_sync(&writes);
-
- if (ret) {
- pr_warn("gc failed!");
- continue_at(cl, bch_btree_gc, bch_gc_wq);
- }
+ do {
+ ret = btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&writes);
- /* Possibly wait for new UUIDs or whatever to hit disk */
- bch_journal_meta(c, &op.cl);
- closure_sync(&op.cl);
+ if (ret && ret != -EAGAIN)
+ pr_warn("gc failed!");
+ } while (ret);
available = bch_btree_gc_finish(c);
-
- atomic_dec(&c->prio_blocked);
wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
- stats.dirty <<= 9;
stats.data <<= 9;
stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c);
- continue_at(cl, bch_moving_gc, bch_gc_wq);
+ bch_moving_gc(c);
+}
+
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
+ struct cache *ca;
+ unsigned i;
+
+ while (1) {
+again:
+ bch_btree_gc(c);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc) {
+ mutex_unlock(&c->bucket_lock);
+ set_current_state(TASK_RUNNING);
+ goto again;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+
+ try_to_freeze();
+ schedule();
+ }
+
+ return 0;
}
-void bch_queue_gc(struct cache_set *c)
+int bch_gc_thread_start(struct cache_set *c)
{
- closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
+ c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ if (IS_ERR(c->gc_thread))
+ return PTR_ERR(c->gc_thread);
+
+ set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
+ return 0;
}
/* Initial partial gc */
@@ -1541,9 +1677,9 @@ void bch_queue_gc(struct cache_set *c)
static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
unsigned long **seen)
{
- int ret;
+ int ret = 0;
unsigned i;
- struct bkey *k;
+ struct bkey *k, *p = NULL;
struct bucket *g;
struct btree_iter iter;
@@ -1570,31 +1706,32 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
}
if (b->level) {
- k = bch_next_recurse_key(b, &ZERO_KEY);
+ bch_btree_iter_init(b, &iter, NULL);
- while (k) {
- struct bkey *p = bch_next_recurse_key(b, k);
- if (p)
- btree_node_prefetch(b->c, p, b->level - 1);
+ do {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (k)
+ btree_node_prefetch(b->c, k, b->level - 1);
- ret = btree(check_recurse, k, b, op, seen);
- if (ret)
- return ret;
+ if (p)
+ ret = btree(check_recurse, p, b, op, seen);
- k = p;
- }
+ p = k;
+ } while (p && !ret);
}
return 0;
}
-int bch_btree_check(struct cache_set *c, struct btree_op *op)
+int bch_btree_check(struct cache_set *c)
{
int ret = -ENOMEM;
unsigned i;
unsigned long *seen[MAX_CACHES_PER_SET];
+ struct btree_op op;
memset(seen, 0, sizeof(seen));
+ bch_btree_op_init(&op, SHRT_MAX);
for (i = 0; c->cache[i]; i++) {
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1606,7 +1743,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
memset(seen[i], 0xFF, n);
}
- ret = btree_root(check_recurse, c, op, seen);
+ ret = btree_root(check_recurse, c, &op, seen);
err:
for (i = 0; i < MAX_CACHES_PER_SET; i++)
kfree(seen[i]);
@@ -1628,10 +1765,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
bch_bset_fix_lookup_table(b, where);
}
-static bool fix_overlapping_extents(struct btree *b,
- struct bkey *insert,
+static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
struct btree_iter *iter,
- struct btree_op *op)
+ struct bkey *replace_key)
{
void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
{
@@ -1659,39 +1795,38 @@ static bool fix_overlapping_extents(struct btree *b,
* We might overlap with 0 size extents; we can't skip these
* because if they're in the set we're inserting to we have to
* adjust them so they don't overlap with the key we're
- * inserting. But we don't want to check them for BTREE_REPLACE
+ * inserting. But we don't want to check them for replace
* operations.
*/
- if (op->type == BTREE_REPLACE &&
- KEY_SIZE(k)) {
+ if (replace_key && KEY_SIZE(k)) {
/*
* k might have been split since we inserted/found the
* key we're replacing
*/
unsigned i;
uint64_t offset = KEY_START(k) -
- KEY_START(&op->replace);
+ KEY_START(replace_key);
/* But it must be a subset of the replace key */
- if (KEY_START(k) < KEY_START(&op->replace) ||
- KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+ if (KEY_START(k) < KEY_START(replace_key) ||
+ KEY_OFFSET(k) > KEY_OFFSET(replace_key))
goto check_failed;
/* We didn't find a key that we were supposed to */
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
- if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+ if (KEY_PTRS(replace_key) != KEY_PTRS(k))
goto check_failed;
/* skip past gen */
offset <<= 8;
- BUG_ON(!KEY_PTRS(&op->replace));
+ BUG_ON(!KEY_PTRS(replace_key));
- for (i = 0; i < KEY_PTRS(&op->replace); i++)
- if (k->ptr[i] != op->replace.ptr[i] + offset)
+ for (i = 0; i < KEY_PTRS(replace_key); i++)
+ if (k->ptr[i] != replace_key->ptr[i] + offset)
goto check_failed;
sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1742,6 +1877,9 @@ static bool fix_overlapping_extents(struct btree *b,
if (bkey_cmp(insert, k) < 0) {
bch_cut_front(insert, k);
} else {
+ if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+ old_offset = KEY_START(insert);
+
if (bkey_written(b, k) &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
/*
@@ -1759,9 +1897,8 @@ static bool fix_overlapping_extents(struct btree *b,
}
check_failed:
- if (op->type == BTREE_REPLACE) {
+ if (replace_key) {
if (!sectors_found) {
- op->insert_collision = true;
return true;
} else if (sectors_found < KEY_SIZE(insert)) {
SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1774,7 +1911,7 @@ check_failed:
}
static bool btree_insert_key(struct btree *b, struct btree_op *op,
- struct bkey *k)
+ struct bkey *k, struct bkey *replace_key)
{
struct bset *i = b->sets[b->nsets].data;
struct bkey *m, *prev;
@@ -1786,22 +1923,23 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
if (!b->level) {
struct btree_iter iter;
- struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
/*
* bset_search() returns the first key that is strictly greater
* than the search key - but for back merging, we want to find
- * the first key that is greater than or equal to KEY_START(k) -
- * unless KEY_START(k) is 0.
+ * the previous key.
*/
- if (KEY_OFFSET(&search))
- SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
-
prev = NULL;
- m = bch_btree_iter_init(b, &iter, &search);
+ m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
- if (fix_overlapping_extents(b, k, &iter, op))
+ if (fix_overlapping_extents(b, k, &iter, replace_key)) {
+ op->insert_collision = true;
return false;
+ }
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
while (m != end(i) &&
bkey_cmp(k, &START_KEY(m)) > 0)
@@ -1825,84 +1963,80 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
if (m != end(i) &&
bch_bkey_try_merge(b, k, m))
goto copy;
- } else
+ } else {
+ BUG_ON(replace_key);
m = bch_bset_search(b, &b->sets[b->nsets], k);
+ }
insert: shift_keys(b, m, k);
copy: bkey_copy(m, k);
merged:
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
- KEY_START(k), KEY_SIZE(k));
-
- bch_check_keys(b, "%u for %s", status, op_type(op));
+ bch_check_keys(b, "%u for %s", status,
+ replace_key ? "replace" : "insert");
if (b->level && !KEY_OFFSET(k))
btree_current_write(b)->prio_blocked++;
- trace_bcache_btree_insert_key(b, k, op->type, status);
+ trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
return true;
}
-static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
{
bool ret = false;
- struct bkey *k;
- unsigned oldsize = bch_count_data(b);
-
- while ((k = bch_keylist_pop(&op->keys))) {
- bkey_put(b->c, k, b->level);
- ret |= btree_insert_key(b, op, k);
- }
-
- BUG_ON(bch_count_data(b) < oldsize);
- return ret;
-}
+ int oldsize = bch_count_data(b);
-bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- struct bio *bio)
-{
- bool ret = false;
- uint64_t btree_ptr = b->key.ptr[0];
- unsigned long seq = b->seq;
- BKEY_PADDED(k) tmp;
+ while (!bch_keylist_empty(insert_keys)) {
+ struct bset *i = write_block(b);
+ struct bkey *k = insert_keys->keys;
- rw_unlock(false, b);
- rw_lock(true, b, b->level);
+ if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
+ > btree_blocks(b))
+ break;
- if (b->key.ptr[0] != btree_ptr ||
- b->seq != seq + 1 ||
- should_split(b))
- goto out;
+ if (bkey_cmp(k, &b->key) <= 0) {
+ if (!b->level)
+ bkey_put(b->c, k);
- op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
+ ret |= btree_insert_key(b, op, k, replace_key);
+ bch_keylist_pop_front(insert_keys);
+ } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, insert_keys->keys);
- SET_KEY_PTRS(&op->replace, 1);
- get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
+ bch_cut_back(&b->key, &temp.key);
+ bch_cut_front(&b->key, insert_keys->keys);
- SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV);
+ ret |= btree_insert_key(b, op, &temp.key, replace_key);
+ break;
+ } else {
+ break;
+ }
+ }
- bkey_copy(&tmp.k, &op->replace);
+ BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
- BUG_ON(op->type != BTREE_INSERT);
- BUG_ON(!btree_insert_key(b, op, &tmp.k));
- ret = true;
-out:
- downgrade_write(&b->lock);
+ BUG_ON(bch_count_data(b) < oldsize);
return ret;
}
-static int btree_split(struct btree *b, struct btree_op *op)
+static int btree_split(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
{
- bool split, root = b == b->c->root;
+ bool split;
struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock();
+ struct closure cl;
+ struct keylist parent_keys;
- if (b->level)
- set_closure_blocking(&op->cl);
+ closure_init_stack(&cl);
+ bch_keylist_init(&parent_keys);
- n1 = btree_node_alloc_replacement(b, &op->cl);
+ n1 = btree_node_alloc_replacement(b, true);
if (IS_ERR(n1))
goto err;
@@ -1913,19 +2047,20 @@ static int btree_split(struct btree *b, struct btree_op *op)
trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
- n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
+ n2 = bch_btree_node_alloc(b->c, b->level, true);
if (IS_ERR(n2))
goto err_free1;
- if (root) {
- n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
+ if (!b->parent) {
+ n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
if (IS_ERR(n3))
goto err_free2;
}
- bch_btree_insert_keys(n1, op);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
- /* Has to be a linear search because we don't have an auxiliary
+ /*
+ * Has to be a linear search because we don't have an auxiliary
* search tree yet
*/
@@ -1944,60 +2079,57 @@ static int btree_split(struct btree *b, struct btree_op *op)
bkey_copy_key(&n2->key, &b->key);
- bch_keylist_add(&op->keys, &n2->key);
- bch_btree_node_write(n2, &op->cl);
+ bch_keylist_add(&parent_keys, &n2->key);
+ bch_btree_node_write(n2, &cl);
rw_unlock(true, n2);
} else {
trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
- bch_btree_insert_keys(n1, op);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
}
- bch_keylist_add(&op->keys, &n1->key);
- bch_btree_node_write(n1, &op->cl);
+ bch_keylist_add(&parent_keys, &n1->key);
+ bch_btree_node_write(n1, &cl);
if (n3) {
+ /* Depth increases, make a new root */
bkey_copy_key(&n3->key, &MAX_KEY);
- bch_btree_insert_keys(n3, op);
- bch_btree_node_write(n3, &op->cl);
+ bch_btree_insert_keys(n3, op, &parent_keys, NULL);
+ bch_btree_node_write(n3, &cl);
- closure_sync(&op->cl);
+ closure_sync(&cl);
bch_btree_set_root(n3);
rw_unlock(true, n3);
- } else if (root) {
- op->keys.top = op->keys.bottom;
- closure_sync(&op->cl);
- bch_btree_set_root(n1);
- } else {
- unsigned i;
- bkey_copy(op->keys.top, &b->key);
- bkey_copy_key(op->keys.top, &ZERO_KEY);
+ btree_node_free(b);
+ } else if (!b->parent) {
+ /* Root filled up but didn't need to be split */
+ closure_sync(&cl);
+ bch_btree_set_root(n1);
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
+ btree_node_free(b);
+ } else {
+ /* Split a non root node */
+ closure_sync(&cl);
+ make_btree_freeing_key(b, parent_keys.top);
+ bch_keylist_push(&parent_keys);
- SET_PTR_GEN(op->keys.top, i, g);
- }
+ btree_node_free(b);
- bch_keylist_push(&op->keys);
- closure_sync(&op->cl);
- atomic_inc(&b->c->prio_blocked);
+ bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&parent_keys));
}
rw_unlock(true, n1);
- btree_node_free(b, op);
bch_time_stats_update(&b->c->btree_split_time, start_time);
return 0;
err_free2:
- __bkey_put(n2->c, &n2->key);
- btree_node_free(n2, op);
+ btree_node_free(n2);
rw_unlock(true, n2);
err_free1:
- __bkey_put(n1->c, &n1->key);
- btree_node_free(n1, op);
+ btree_node_free(n1);
rw_unlock(true, n1);
err:
if (n3 == ERR_PTR(-EAGAIN) ||
@@ -2009,116 +2141,126 @@ err:
return -ENOMEM;
}
-static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
- struct keylist *stack_keys)
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ atomic_t *journal_ref,
+ struct bkey *replace_key)
{
- if (b->level) {
- int ret;
- struct bkey *insert = op->keys.bottom;
- struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
-
- if (!k) {
- btree_bug(b, "no key to recurse on at level %i/%i",
- b->level, b->c->root->level);
+ BUG_ON(b->level && replace_key);
- op->keys.top = op->keys.bottom;
- return -EIO;
+ if (should_split(b)) {
+ if (current->bio_list) {
+ op->lock = b->c->root->level + 1;
+ return -EAGAIN;
+ } else if (op->lock <= b->c->root->level) {
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ } else {
+ /* Invalidated all iterators */
+ return btree_split(b, op, insert_keys, replace_key) ?:
+ -EINTR;
}
+ } else {
+ BUG_ON(write_block(b) != b->sets[b->nsets].data);
- if (bkey_cmp(insert, k) > 0) {
- unsigned i;
-
- if (op->type == BTREE_REPLACE) {
- __bkey_put(b->c, insert);
- op->keys.top = op->keys.bottom;
- op->insert_collision = true;
- return 0;
- }
+ if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, journal_ref);
+ else
+ bch_btree_node_write_sync(b);
+ }
- for (i = 0; i < KEY_PTRS(insert); i++)
- atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin);
+ return 0;
+ }
+}
- bkey_copy(stack_keys->top, insert);
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bkey *check_key)
+{
+ int ret = -EINTR;
+ uint64_t btree_ptr = b->key.ptr[0];
+ unsigned long seq = b->seq;
+ struct keylist insert;
+ bool upgrade = op->lock == -1;
- bch_cut_back(k, insert);
- bch_cut_front(k, stack_keys->top);
+ bch_keylist_init(&insert);
- bch_keylist_push(stack_keys);
- }
+ if (upgrade) {
+ rw_unlock(false, b);
+ rw_lock(true, b, b->level);
- ret = btree(insert_recurse, k, b, op, stack_keys);
- if (ret)
- return ret;
+ if (b->key.ptr[0] != btree_ptr ||
+ b->seq != seq + 1)
+ goto out;
}
- if (!bch_keylist_empty(&op->keys)) {
- if (should_split(b)) {
- if (op->lock <= b->c->root->level) {
- BUG_ON(b->level);
- op->lock = b->c->root->level + 1;
- return -EINTR;
- }
- return btree_split(b, op);
- }
+ SET_KEY_PTRS(check_key, 1);
+ get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
- BUG_ON(write_block(b) != b->sets[b->nsets].data);
+ SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
- if (bch_btree_insert_keys(b, op)) {
- if (!b->level)
- bch_btree_leaf_dirty(b, op);
- else
- bch_btree_node_write(b, &op->cl);
- }
- }
+ bch_keylist_add(&insert, check_key);
- return 0;
+ ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
+
+ BUG_ON(!ret && !bch_keylist_empty(&insert));
+out:
+ if (upgrade)
+ downgrade_write(&b->lock);
+ return ret;
}
-int bch_btree_insert(struct btree_op *op, struct cache_set *c)
+struct btree_insert_op {
+ struct btree_op op;
+ struct keylist *keys;
+ atomic_t *journal_ref;
+ struct bkey *replace_key;
+};
+
+int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
- int ret = 0;
- struct keylist stack_keys;
+ struct btree_insert_op *op = container_of(b_op,
+ struct btree_insert_op, op);
- /*
- * Don't want to block with the btree locked unless we have to,
- * otherwise we get deadlocks with try_harder and between split/gc
- */
- clear_closure_blocking(&op->cl);
-
- BUG_ON(bch_keylist_empty(&op->keys));
- bch_keylist_copy(&stack_keys, &op->keys);
- bch_keylist_init(&op->keys);
-
- while (!bch_keylist_empty(&stack_keys) ||
- !bch_keylist_empty(&op->keys)) {
- if (bch_keylist_empty(&op->keys)) {
- bch_keylist_add(&op->keys,
- bch_keylist_pop(&stack_keys));
- op->lock = 0;
- }
+ int ret = bch_btree_insert_node(b, &op->op, op->keys,
+ op->journal_ref, op->replace_key);
+ if (ret && !bch_keylist_empty(op->keys))
+ return ret;
+ else
+ return MAP_DONE;
+}
- ret = btree_root(insert_recurse, c, op, &stack_keys);
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+ atomic_t *journal_ref, struct bkey *replace_key)
+{
+ struct btree_insert_op op;
+ int ret = 0;
- if (ret == -EAGAIN) {
- ret = 0;
- closure_sync(&op->cl);
- } else if (ret) {
- struct bkey *k;
+ BUG_ON(current->bio_list);
+ BUG_ON(bch_keylist_empty(keys));
+
+ bch_btree_op_init(&op.op, 0);
+ op.keys = keys;
+ op.journal_ref = journal_ref;
+ op.replace_key = replace_key;
+
+ while (!ret && !bch_keylist_empty(keys)) {
+ op.op.lock = 0;
+ ret = bch_btree_map_leaf_nodes(&op.op, c,
+ &START_KEY(keys->keys),
+ btree_insert_fn);
+ }
- pr_err("error %i trying to insert key for %s",
- ret, op_type(op));
+ if (ret) {
+ struct bkey *k;
- while ((k = bch_keylist_pop(&stack_keys) ?:
- bch_keylist_pop(&op->keys)))
- bkey_put(c, k, 0);
- }
- }
+ pr_err("error %i", ret);
- bch_keylist_free(&stack_keys);
+ while ((k = bch_keylist_pop(keys)))
+ bkey_put(c, k);
+ } else if (op.op.insert_collision)
+ ret = -ESRCH;
- if (op->journal)
- atomic_dec_bug(op->journal);
- op->journal = NULL;
return ret;
}
@@ -2141,132 +2283,81 @@ void bch_btree_set_root(struct btree *b)
mutex_unlock(&b->c->bucket_lock);
b->c->root = b;
- __bkey_put(b->c, &b->key);
bch_journal_meta(b->c, &cl);
closure_sync(&cl);
}
-/* Cache lookup */
+/* Map across nodes or keys */
-static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
- struct bkey *k)
+static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from,
+ btree_map_nodes_fn *fn, int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
- int ret = 0;
+ int ret = MAP_CONTINUE;
+
+ if (b->level) {
+ struct bkey *k;
+ struct btree_iter iter;
- while (!ret &&
- !op->lookup_done) {
- unsigned sectors = INT_MAX;
+ bch_btree_iter_init(b, &iter, from);
- if (KEY_INODE(k) == op->inode) {
- if (KEY_START(k) <= bio->bi_sector)
- break;
+ while ((k = bch_btree_iter_next_filter(&iter, b,
+ bch_ptr_bad))) {
+ ret = btree(map_nodes_recurse, k, b,
+ op, from, fn, flags);
+ from = NULL;
- sectors = min_t(uint64_t, sectors,
- KEY_START(k) - bio->bi_sector);
+ if (ret != MAP_CONTINUE)
+ return ret;
}
-
- ret = s->d->cache_miss(b, s, bio, sectors);
}
+ if (!b->level || flags == MAP_ALL_NODES)
+ ret = fn(op, b);
+
return ret;
}
-/*
- * Read from a single key, handling the initial cache miss if the key starts in
- * the middle of the bio
- */
-static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
- struct bkey *k)
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn, int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
- unsigned ptr;
- struct bio *n;
-
- int ret = submit_partial_cache_miss(b, op, k);
- if (ret || op->lookup_done)
- return ret;
-
- /* XXX: figure out best pointer - for multiple cache devices */
- ptr = 0;
-
- PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
-
- while (!op->lookup_done &&
- KEY_INODE(k) == op->inode &&
- bio->bi_sector < KEY_OFFSET(k)) {
- struct bkey *bio_key;
- sector_t sector = PTR_OFFSET(k, ptr) +
- (bio->bi_sector - KEY_START(k));
- unsigned sectors = min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector);
-
- n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (n == bio)
- op->lookup_done = true;
-
- bio_key = &container_of(n, struct bbio, bio)->key;
-
- /*
- * The bucket we're reading from might be reused while our bio
- * is in flight, and we could then end up reading the wrong
- * data.
- *
- * We guard against this by checking (in cache_read_endio()) if
- * the pointer is stale again; if so, we treat it as an error
- * and reread from the backing device (but we don't pass that
- * error up anywhere).
- */
-
- bch_bkey_copy_single_ptr(bio_key, k, ptr);
- SET_PTR_OFFSET(bio_key, 0, sector);
-
- n->bi_end_io = bch_cache_read_endio;
- n->bi_private = &s->cl;
-
- __bch_submit_bbio(n, b->c);
- }
-
- return 0;
+ return btree_root(map_nodes_recurse, c, op, from, fn, flags);
}
-int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
+static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from, btree_map_keys_fn *fn,
+ int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
-
- int ret = 0;
+ int ret = MAP_CONTINUE;
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
- do {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
- if (!k) {
- /*
- * b->key would be exactly what we want, except that
- * pointers to btree nodes have nonzero size - we
- * wouldn't go far enough
- */
+ bch_btree_iter_init(b, &iter, from);
- ret = submit_partial_cache_miss(b, op,
- &KEY(KEY_INODE(&b->key),
- KEY_OFFSET(&b->key), 0));
- break;
- }
+ while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+ ret = !b->level
+ ? fn(op, b, k)
+ : btree(map_keys_recurse, k, b, op, from, fn, flags);
+ from = NULL;
+
+ if (ret != MAP_CONTINUE)
+ return ret;
+ }
- ret = b->level
- ? btree(search_recurse, k, b, op)
- : submit_partial_cache_hit(b, op, k);
- } while (!ret &&
- !op->lookup_done);
+ if (!b->level && (flags & MAP_END_KEY))
+ ret = fn(op, b, &KEY(KEY_INODE(&b->key),
+ KEY_OFFSET(&b->key), 0));
return ret;
}
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_keys_fn *fn, int flags)
+{
+ return btree_root(map_keys_recurse, c, op, from, fn, flags);
+}
+
/* Keybuf code */
static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
@@ -2285,80 +2376,79 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
}
-static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
- struct keybuf *buf, struct bkey *end,
- keybuf_pred_fn *pred)
-{
- struct btree_iter iter;
- bch_btree_iter_init(b, &iter, &buf->last_scanned);
-
- while (!array_freelist_empty(&buf->freelist)) {
- struct bkey *k = bch_btree_iter_next_filter(&iter, b,
- bch_ptr_bad);
-
- if (!b->level) {
- if (!k) {
- buf->last_scanned = b->key;
- break;
- }
+struct refill {
+ struct btree_op op;
+ unsigned nr_found;
+ struct keybuf *buf;
+ struct bkey *end;
+ keybuf_pred_fn *pred;
+};
- buf->last_scanned = *k;
- if (bkey_cmp(&buf->last_scanned, end) >= 0)
- break;
+static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+ struct bkey *k)
+{
+ struct refill *refill = container_of(op, struct refill, op);
+ struct keybuf *buf = refill->buf;
+ int ret = MAP_CONTINUE;
- if (pred(buf, k)) {
- struct keybuf_key *w;
+ if (bkey_cmp(k, refill->end) >= 0) {
+ ret = MAP_DONE;
+ goto out;
+ }
- spin_lock(&buf->lock);
+ if (!KEY_SIZE(k)) /* end key */
+ goto out;
- w = array_alloc(&buf->freelist);
+ if (refill->pred(buf, k)) {
+ struct keybuf_key *w;
- w->private = NULL;
- bkey_copy(&w->key, k);
+ spin_lock(&buf->lock);
- if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
- array_free(&buf->freelist, w);
+ w = array_alloc(&buf->freelist);
+ if (!w) {
+ spin_unlock(&buf->lock);
+ return MAP_DONE;
+ }
- spin_unlock(&buf->lock);
- }
- } else {
- if (!k)
- break;
+ w->private = NULL;
+ bkey_copy(&w->key, k);
- btree(refill_keybuf, k, b, op, buf, end, pred);
- /*
- * Might get an error here, but can't really do anything
- * and it'll get logged elsewhere. Just read what we
- * can.
- */
+ if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+ array_free(&buf->freelist, w);
+ else
+ refill->nr_found++;
- if (bkey_cmp(&buf->last_scanned, end) >= 0)
- break;
+ if (array_freelist_empty(&buf->freelist))
+ ret = MAP_DONE;
- cond_resched();
- }
+ spin_unlock(&buf->lock);
}
-
- return 0;
+out:
+ buf->last_scanned = *k;
+ return ret;
}
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
struct bkey *end, keybuf_pred_fn *pred)
{
struct bkey start = buf->last_scanned;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
+ struct refill refill;
cond_resched();
- btree_root(refill_keybuf, c, &op, buf, end, pred);
- closure_sync(&op.cl);
+ bch_btree_op_init(&refill.op, -1);
+ refill.nr_found = 0;
+ refill.buf = buf;
+ refill.end = end;
+ refill.pred = pred;
+
+ bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
+ refill_keybuf_fn, MAP_END_KEY);
- pr_debug("found %s keys from %llu:%llu to %llu:%llu",
- RB_EMPTY_ROOT(&buf->keys) ? "no" :
- array_freelist_empty(&buf->freelist) ? "some" : "a few",
- KEY_INODE(&start), KEY_OFFSET(&start),
- KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
+ trace_bcache_keyscan(refill.nr_found,
+ KEY_INODE(&start), KEY_OFFSET(&start),
+ KEY_INODE(&buf->last_scanned),
+ KEY_OFFSET(&buf->last_scanned));
spin_lock(&buf->lock);
@@ -2436,9 +2526,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
}
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
- struct keybuf *buf,
- struct bkey *end,
- keybuf_pred_fn *pred)
+ struct keybuf *buf,
+ struct bkey *end,
+ keybuf_pred_fn *pred)
{
struct keybuf_key *ret;
@@ -2471,14 +2561,12 @@ void bch_btree_exit(void)
{
if (btree_io_wq)
destroy_workqueue(btree_io_wq);
- if (bch_gc_wq)
- destroy_workqueue(bch_gc_wq);
}
int __init bch_btree_init(void)
{
- if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
- !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
+ btree_io_wq = create_singlethread_workqueue("bch_btree_io");
+ if (!btree_io_wq)
return -ENOMEM;
return 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 3333d372363..767e7557089 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -125,6 +125,7 @@ struct btree {
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
+ struct btree *parent;
unsigned long flags;
uint16_t written; /* would be nice to kill */
@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k)
static inline void set_gc_sectors(struct cache_set *c)
{
- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
- return __bch_ptr_invalid(b->c, b->level, k);
+ atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
static inline struct bkey *bch_btree_iter_init(struct btree *b,
@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
return __bch_btree_iter_init(b, iter, search, b->sets);
}
+static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
+{
+ if (b->level)
+ return bch_btree_ptr_invalid(b->c, k);
+ else
+ return bch_extent_ptr_invalid(b->c, k);
+}
+
+void bkey_put(struct cache_set *c, struct bkey *k);
+
/* Looping macros */
#define for_each_cached_btree(b, c, iter) \
@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
/* Recursing down the btree */
struct btree_op {
- struct closure cl;
- struct cache_set *c;
-
- /* Journal entry we have a refcount on */
- atomic_t *journal;
-
- /* Bio to be inserted into the cache */
- struct bio *cache_bio;
-
- unsigned inode;
-
- uint16_t write_prio;
-
/* Btree level at which we start taking write locks */
short lock;
- /* Btree insertion type */
- enum {
- BTREE_INSERT,
- BTREE_REPLACE
- } type:8;
-
- unsigned csum:1;
- unsigned skip:1;
- unsigned flush_journal:1;
-
- unsigned insert_data_done:1;
- unsigned lookup_done:1;
unsigned insert_collision:1;
-
- /* Anything after this point won't get zeroed in do_bio_hook() */
-
- /* Keys to be inserted */
- struct keylist keys;
- BKEY_PADDED(replace);
};
-enum {
- BTREE_INSERT_STATUS_INSERT,
- BTREE_INSERT_STATUS_BACK_MERGE,
- BTREE_INSERT_STATUS_OVERWROTE,
- BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
-void bch_btree_op_init_stack(struct btree_op *);
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+ memset(op, 0, sizeof(struct btree_op));
+ op->lock = write_lock_level;
+}
static inline void rw_lock(bool w, struct btree *b, int level)
{
@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level)
static inline void rw_unlock(bool w, struct btree *b)
{
-#ifdef CONFIG_BCACHE_EDEBUG
- unsigned i;
-
- if (w && b->key.ptr[0])
- for (i = 0; i <= b->nsets; i++)
- bch_check_key_order(b, b->sets[i].data);
-#endif
-
if (w)
b->seq++;
(w ? up_write : up_read)(&b->lock);
}
-#define insert_lock(s, b) ((b)->level <= (s)->lock)
+void bch_btree_node_read(struct btree *);
+void bch_btree_node_write(struct btree *, struct closure *);
-/*
- * These macros are for recursing down the btree - they handle the details of
- * locking and looking up nodes in the cache for you. They're best treated as
- * mere syntax when reading code that uses them.
- *
- * op->lock determines whether we take a read or a write lock at a given depth.
- * If you've got a read lock and find that you need a write lock (i.e. you're
- * going to have to split), set op->lock and return -EINTR; btree_root() will
- * call you again and you'll have the correct lock.
- */
+void bch_btree_set_root(struct btree *);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
+struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
-/**
- * btree - recurse down the btree on a specified key
- * @fn: function to call, which will be passed the child node
- * @key: key to recurse on
- * @b: parent btree node
- * @op: pointer to struct btree_op
- */
-#define btree(fn, key, b, op, ...) \
-({ \
- int _r, l = (b)->level - 1; \
- bool _w = l <= (op)->lock; \
- struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
- if (!IS_ERR(_b)) { \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- rw_unlock(_w, _b); \
- } else \
- _r = PTR_ERR(_b); \
- _r; \
-})
-
-/**
- * btree_root - call a function on the root of the btree
- * @fn: function to call, which will be passed the child node
- * @c: cache set
- * @op: pointer to struct btree_op
- */
-#define btree_root(fn, c, op, ...) \
-({ \
- int _r = -EINTR; \
- do { \
- struct btree *_b = (c)->root; \
- bool _w = insert_lock(op, _b); \
- rw_lock(_w, _b, _b->level); \
- if (_b == (c)->root && \
- _w == insert_lock(op, _b)) \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- rw_unlock(_w, _b); \
- bch_cannibalize_unlock(c, &(op)->cl); \
- } while (_r == -EINTR); \
- \
- _r; \
-})
+int bch_btree_insert_check_key(struct btree *, struct btree_op *,
+ struct bkey *);
+int bch_btree_insert(struct cache_set *, struct keylist *,
+ atomic_t *, struct bkey *);
+
+int bch_gc_thread_start(struct cache_set *);
+size_t bch_btree_gc_finish(struct cache_set *);
+void bch_moving_gc(struct cache_set *);
+int bch_btree_check(struct cache_set *);
+uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
-static inline bool should_split(struct btree *b)
+static inline void wake_up_gc(struct cache_set *c)
{
- struct bset *i = write_block(b);
- return b->written >= btree_blocks(b) ||
- (i->seq == b->sets[0].data->seq &&
- b->written + __set_blocks(i, i->keys + 15, b->c)
- > btree_blocks(b));
+ if (c->gc_thread)
+ wake_up_process(c->gc_thread);
}
-void bch_btree_node_read(struct btree *);
-void bch_btree_node_write(struct btree *, struct closure *);
+#define MAP_DONE 0
+#define MAP_CONTINUE 1
-void bch_cannibalize_unlock(struct cache_set *, struct closure *);
-void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
-struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
- int, struct btree_op *);
+#define MAP_ALL_NODES 0
+#define MAP_LEAF_NODES 1
-bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
- struct bio *);
-int bch_btree_insert(struct btree_op *, struct cache_set *);
+#define MAP_END_KEY 1
-int bch_btree_search_recurse(struct btree *, struct btree_op *);
+typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
+int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
+ struct bkey *, btree_map_nodes_fn *, int);
-void bch_queue_gc(struct cache_set *);
-size_t bch_btree_gc_finish(struct cache_set *);
-void bch_moving_gc(struct closure *);
-int bch_btree_check(struct cache_set *, struct btree_op *);
-uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
+}
+
+static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+ struct cache_set *c,
+ struct bkey *from,
+ btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+}
+
+typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
+ struct bkey *);
+int bch_btree_map_keys(struct btree_op *, struct cache_set *,
+ struct bkey *, btree_map_keys_fn *, int);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
- keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *,
+ struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9aba2017f0d..dfff2410322 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,17 +11,6 @@
#include "closure.h"
-void closure_queue(struct closure *cl)
-{
- struct workqueue_struct *wq = cl->wq;
- if (wq) {
- INIT_WORK(&cl->work, cl->work.func);
- BUG_ON(!queue_work(wq, &cl->work));
- } else
- cl->fn(cl);
-}
-EXPORT_SYMBOL_GPL(closure_queue);
-
#define CL_FIELD(type, field) \
case TYPE_ ## type: \
return &container_of(cl, struct type, cl)->field
@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl)
{
switch (cl->type) {
CL_FIELD(closure_with_waitlist, wait);
- CL_FIELD(closure_with_waitlist_and_timer, wait);
- default:
- return NULL;
- }
-}
-
-static struct timer_list *closure_timer(struct closure *cl)
-{
- switch (cl->type) {
- CL_FIELD(closure_with_timer, timer);
- CL_FIELD(closure_with_waitlist_and_timer, timer);
default:
return NULL;
}
@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
int r = flags & CLOSURE_REMAINING_MASK;
BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
/* Must deliver precisely one wakeup */
if (r == 1 && (flags & CLOSURE_SLEEPING))
@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
- /* CLOSURE_BLOCKING might be set - clear it */
atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER);
closure_queue(cl);
@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_sub);
+EXPORT_SYMBOL(closure_sub);
void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_put);
+EXPORT_SYMBOL(closure_put);
static void set_waiting(struct closure *cl, unsigned long f)
{
@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL_GPL(__closure_wake_up);
+EXPORT_SYMBOL(__closure_wake_up);
bool closure_wait(struct closure_waitlist *list, struct closure *cl)
{
@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl)
return true;
}
-EXPORT_SYMBOL_GPL(closure_wait);
+EXPORT_SYMBOL(closure_wait);
/**
* closure_sync() - sleep until a closure a closure has nothing left to wait on
@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl)
__closure_end_sleep(cl);
}
-EXPORT_SYMBOL_GPL(closure_sync);
+EXPORT_SYMBOL(closure_sync);
/**
* closure_trylock() - try to acquire the closure, without waiting
@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
CLOSURE_REMAINING_INITIALIZER) != -1)
return false;
- closure_set_ret_ip(cl);
-
smp_mb();
+
cl->parent = parent;
if (parent)
closure_get(parent);
+ closure_set_ret_ip(cl);
closure_debug_create(cl);
return true;
}
-EXPORT_SYMBOL_GPL(closure_trylock);
+EXPORT_SYMBOL(closure_trylock);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list)
@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent,
if (closure_trylock(cl, parent))
return;
- closure_wait_event_sync(wait_list, &wait,
- atomic_read(&cl->remaining) == -1);
+ closure_wait_event(wait_list, &wait,
+ atomic_read(&cl->remaining) == -1);
}
}
-EXPORT_SYMBOL_GPL(__closure_lock);
-
-static void closure_delay_timer_fn(unsigned long data)
-{
- struct closure *cl = (struct closure *) data;
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-
-void do_closure_timer_init(struct closure *cl)
-{
- struct timer_list *timer = closure_timer(cl);
-
- init_timer(timer);
- timer->data = (unsigned long) cl;
- timer->function = closure_delay_timer_fn;
-}
-EXPORT_SYMBOL_GPL(do_closure_timer_init);
-
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer)
-{
- if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
- return false;
-
- BUG_ON(timer_pending(timer));
-
- timer->expires = jiffies + delay;
-
- atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
- add_timer(timer);
- return true;
-}
-EXPORT_SYMBOL_GPL(__closure_delay);
-
-void __closure_flush(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush);
-
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer_sync(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush_sync);
+EXPORT_SYMBOL(__closure_lock);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_create);
+EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_destroy);
+EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug;
@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s%s%s\n",
+ seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING,
work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_BLOCKING ? "B" : "",
r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "",
- r & CLOSURE_TIMER ? "T" : "");
+ r & CLOSURE_SLEEPING ? "Sl" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 00039924ea9..9762f1be330 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -155,21 +155,6 @@
* delayed_work embeds a work item and a timer_list. The important thing is, use
* it exactly like you would a regular closure and closure_put() will magically
* handle everything for you.
- *
- * We've got closures that embed timers, too. They're called, appropriately
- * enough:
- * struct closure_with_timer;
- *
- * This gives you access to closure_delay(). It takes a refcount for a specified
- * number of jiffies - you could then call closure_sync() (for a slightly
- * convoluted version of msleep()) or continue_at() - which gives you the same
- * effect as using a delayed work item, except you can reuse the work_struct
- * already embedded in struct closure.
- *
- * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
- * probably expect, if you happen to need the features of both. (You don't
- * really want to know how all this is implemented, but if I've done my job
- * right you shouldn't have to care).
*/
struct closure;
@@ -182,16 +167,11 @@ struct closure_waitlist {
enum closure_type {
TYPE_closure = 0,
TYPE_closure_with_waitlist = 1,
- TYPE_closure_with_timer = 2,
- TYPE_closure_with_waitlist_and_timer = 3,
- MAX_CLOSURE_TYPE = 3,
+ MAX_CLOSURE_TYPE = 1,
};
enum closure_state {
/*
- * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
- * waiting asynchronously
- *
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
@@ -200,10 +180,6 @@ enum closure_state {
* - indicates that cl->task is valid and closure_put() may wake it up.
* Only set or cleared by the thread that owns the closure.
*
- * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
- * has an outstanding timer. Must be set by the thread that owns the
- * closure, and cleared by the timer function when the timer goes off.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -218,19 +194,17 @@ enum closure_state {
* closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 19),
- CLOSURE_DESTRUCTOR = (1 << 19),
- CLOSURE_BLOCKING = (1 << 21),
- CLOSURE_WAITING = (1 << 23),
- CLOSURE_SLEEPING = (1 << 25),
- CLOSURE_TIMER = (1 << 27),
+ CLOSURE_BITS_START = (1 << 23),
+ CLOSURE_DESTRUCTOR = (1 << 23),
+ CLOSURE_WAITING = (1 << 25),
+ CLOSURE_SLEEPING = (1 << 27),
CLOSURE_RUNNING = (1 << 29),
CLOSURE_STACK = (1 << 31),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \
- CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
+ CLOSURE_RUNNING|CLOSURE_STACK) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -268,17 +242,6 @@ struct closure_with_waitlist {
struct closure_waitlist wait;
};
-struct closure_with_timer {
- struct closure cl;
- struct timer_list timer;
-};
-
-struct closure_with_waitlist_and_timer {
- struct closure cl;
- struct closure_waitlist wait;
- struct timer_list timer;
-};
-
extern unsigned invalid_closure_type(void);
#define __CLOSURE_TYPE(cl, _t) \
@@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void);
( \
__CLOSURE_TYPE(cl, closure) \
__CLOSURE_TYPE(cl, closure_with_waitlist) \
- __CLOSURE_TYPE(cl, closure_with_timer) \
- __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \
invalid_closure_type() \
)
void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
-void closure_queue(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl);
@@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list);
-void do_closure_timer_init(struct closure *cl);
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer);
-void __closure_flush(struct closure *cl, struct timer_list *timer);
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void);
@@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl)
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
}
-static inline bool closure_is_stopped(struct closure *cl)
-{
- return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
-}
-
static inline bool closure_is_unlocked(struct closure *cl)
{
return atomic_read(&cl->remaining) == -1;
@@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl)
static inline void do_closure_init(struct closure *cl, struct closure *parent,
bool running)
{
- switch (cl->type) {
- case TYPE_closure_with_timer:
- case TYPE_closure_with_waitlist_and_timer:
- do_closure_timer_init(cl);
- default:
- break;
- }
-
cl->parent = parent;
if (parent)
closure_get(parent);
@@ -429,8 +370,7 @@ do { \
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
- CLOSURE_BLOCKING|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
}
/**
@@ -461,24 +401,6 @@ do { \
#define closure_lock(cl, parent) \
__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
-/**
- * closure_delay() - delay some number of jiffies
- * @cl: the closure that will sleep
- * @delay: the delay in jiffies
- *
- * Takes a refcount on @cl which will be released after @delay jiffies; this may
- * be used to have a function run after a delay with continue_at(), or
- * closure_sync() may be used for a convoluted version of msleep().
- */
-#define closure_delay(cl, delay) \
- __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
-
-#define closure_flush(cl) \
- __closure_flush(__to_internal_closure(cl), &(cl)->timer)
-
-#define closure_flush_sync(cl) \
- __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
-
static inline void __closure_end_sleep(struct closure *cl)
{
__set_current_state(TASK_RUNNING);
@@ -498,40 +420,6 @@ static inline void __closure_start_sleep(struct closure *cl)
}
/**
- * closure_blocking() - returns true if the closure is in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- */
-static inline bool closure_blocking(struct closure *cl)
-{
- return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
-}
-
-/**
- * set_closure_blocking() - put a closure in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- *
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void set_closure_blocking(struct closure *cl)
-{
- if (!closure_blocking(cl))
- atomic_add(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/*
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void clear_closure_blocking(struct closure *cl)
-{
- if (closure_blocking(cl))
- atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/**
* closure_wake_up() - wake up all closures on a wait list.
*/
static inline void closure_wake_up(struct closure_waitlist *list)
@@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* refcount on our closure. If this was a stack allocated closure, that would be
* bad.
*/
-#define __closure_wait_event(list, cl, condition, _block) \
+#define closure_wait_event(list, cl, condition) \
({ \
- bool block = _block; \
typeof(condition) ret; \
\
while (1) { \
ret = (condition); \
if (ret) { \
__closure_wake_up(list); \
- if (block) \
- closure_sync(cl); \
- \
+ closure_sync(cl); \
break; \
} \
\
- if (block) \
- __closure_start_sleep(cl); \
- \
- if (!closure_wait(list, cl)) { \
- if (!block) \
- break; \
+ __closure_start_sleep(cl); \
\
+ if (!closure_wait(list, cl)) \
schedule(); \
- } \
} \
\
ret; \
})
-/**
- * closure_wait_event() - wait on a condition, synchronously or asynchronously.
- * @list: the wait list to wait on
- * @cl: the closure that is doing the waiting
- * @condition: a C expression for the event to wait for
- *
- * If the closure is in blocking mode, sleeps until the @condition evaluates to
- * true - exactly like wait_event().
- *
- * If the closure is not in blocking mode, waits asynchronously; if the
- * condition is currently false the @cl is put onto @list and returns. @list
- * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
- * wait for another thread to wake up @list, which drops the refcount on @cl.
- *
- * Returns the value of @condition; @cl will be on @list iff @condition was
- * false.
- *
- * closure_wake_up(@list) must be called after changing any variable that could
- * cause @condition to become true.
- */
-#define closure_wait_event(list, cl, condition) \
- __closure_wait_event(list, cl, condition, closure_blocking(cl))
-
-#define closure_wait_event_async(list, cl, condition) \
- __closure_wait_event(list, cl, condition, false)
-
-#define closure_wait_event_sync(list, cl, condition) \
- __closure_wait_event(list, cl, condition, true)
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
@@ -642,7 +503,7 @@ do { \
#define continue_at_nobarrier(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
- closure_queue(cl); \
+ closure_queue(_cl); \
return; \
} while (0)
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 88e6411eab4..264fcfbd629 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,7 +8,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include <linux/console.h>
#include <linux/debugfs.h>
@@ -77,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
return out - buf;
}
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
-{
- return scnprintf(buf, size, "%zu level %i/%i",
- PTR_BUCKET_NR(b->c, &b->key, 0),
- b->level, b->c->root ? b->c->root->level : -1);
-}
-
-#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
-
-static bool skipped_backwards(struct btree *b, struct bkey *k)
-{
- return bkey_cmp(k, (!b->level)
- ? &START_KEY(bkey_next(k))
- : bkey_next(k)) > 0;
-}
+#ifdef CONFIG_BCACHE_DEBUG
static void dump_bset(struct btree *b, struct bset *i)
{
- struct bkey *k;
+ struct bkey *k, *next;
unsigned j;
char buf[80];
- for (k = i->start; k < end(i); k = bkey_next(k)) {
+ for (k = i->start; k < end(i); k = next) {
+ next = bkey_next(k);
+
bch_bkey_to_text(buf, sizeof(buf), k);
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
(uint64_t *) k - i->d, i->keys, buf);
@@ -115,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
printk(" %s\n", bch_ptr_status(b->c, k));
- if (bkey_next(k) < end(i) &&
- skipped_backwards(b, k))
+ if (next < end(i) &&
+ bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
printk(KERN_ERR "Key skipped backwards\n");
}
}
-#endif
+static void bch_dump_bucket(struct btree *b)
+{
+ unsigned i;
-#ifdef CONFIG_BCACHE_DEBUG
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ dump_bset(b, b->sets[i].data);
+ console_unlock();
+}
void bch_btree_verify(struct btree *b, struct bset *new)
{
@@ -176,66 +169,44 @@ void bch_btree_verify(struct btree *b, struct bset *new)
mutex_unlock(&b->c->verify_lock);
}
-static void data_verify_endio(struct bio *bio, int error)
-{
- struct closure *cl = bio->bi_private;
- closure_put(cl);
-}
-
-void bch_data_verify(struct search *s)
+void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- struct closure *cl = &s->cl;
struct bio *check;
struct bio_vec *bv;
int i;
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-
- check = bio_clone(s->orig_bio, GFP_NOIO);
+ check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
- check->bi_rw = READ_SYNC;
- check->bi_private = cl;
- check->bi_end_io = data_verify_endio;
-
- closure_bio_submit(check, cl, &dc->disk);
- closure_sync(cl);
+ submit_bio_wait(READ_SYNC, check);
- bio_for_each_segment(bv, s->orig_bio, i) {
- void *p1 = kmap(bv->bv_page);
- void *p2 = kmap(check->bi_io_vec[i].bv_page);
+ bio_for_each_segment(bv, bio, i) {
+ void *p1 = kmap_atomic(bv->bv_page);
+ void *p2 = page_address(check->bi_io_vec[i].bv_page);
- if (memcmp(p1 + bv->bv_offset,
- p2 + bv->bv_offset,
- bv->bv_len))
- printk(KERN_ERR
- "bcache (%s): verify failed at sector %llu\n",
- bdevname(dc->bdev, name),
- (uint64_t) s->orig_bio->bi_sector);
+ cache_set_err_on(memcmp(p1 + bv->bv_offset,
+ p2 + bv->bv_offset,
+ bv->bv_len),
+ dc->disk.c,
+ "verify failed at dev %s sector %llu",
+ bdevname(dc->bdev, name),
+ (uint64_t) bio->bi_sector);
- kunmap(bv->bv_page);
- kunmap(check->bi_io_vec[i].bv_page);
+ kunmap_atomic(p1);
}
- __bio_for_each_segment(bv, check, i, 0)
+ bio_for_each_segment_all(bv, check, i)
__free_page(bv->bv_page);
out_put:
bio_put(check);
}
-#endif
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *b)
+int __bch_count_data(struct btree *b)
{
unsigned ret = 0;
struct btree_iter iter;
@@ -247,72 +218,60 @@ unsigned bch_count_data(struct btree *b)
return ret;
}
-static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
- va_list args)
-{
- unsigned i;
- char buf[80];
-
- console_lock();
-
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
-
- vprintk(fmt, args);
-
- console_unlock();
-
- bch_btree_to_text(buf, sizeof(buf), b);
- panic("at %s\n", buf);
-}
-
-void bch_check_key_order_msg(struct btree *b, struct bset *i,
- const char *fmt, ...)
-{
- struct bkey *k;
-
- if (!i->keys)
- return;
-
- for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
- if (skipped_backwards(b, k)) {
- va_list args;
- va_start(args, fmt);
-
- vdump_bucket_and_panic(b, fmt, args);
- va_end(args);
- }
-}
-
-void bch_check_keys(struct btree *b, const char *fmt, ...)
+void __bch_check_keys(struct btree *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
struct btree_iter iter;
-
- if (b->level)
- return;
+ const char *err;
for_each_key(b, k, &iter) {
- if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
- printk(KERN_ERR "Keys out of order:\n");
- goto bug;
- }
-
- if (bch_ptr_invalid(b, k))
- continue;
-
- if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
- printk(KERN_ERR "Overlapping keys:\n");
- goto bug;
+ if (!b->level) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
}
p = k;
}
+
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+
return;
bug:
+ bch_dump_bucket(b);
+
va_start(args, fmt);
- vdump_bucket_and_panic(b, fmt, args);
+ vprintk(fmt, args);
va_end(args);
+
+ panic("bcache error: %s:\n", err);
+}
+
+void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
}
#endif
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 1c39b5a2489..2ede60e3187 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -4,40 +4,44 @@
/* Btree/bkey debug printing */
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *);
-void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
-void bch_check_keys(struct btree *, const char *, ...);
-
-#define bch_check_key_order(b, i) \
- bch_check_key_order_msg(b, i, "keys out of order")
-#define EBUG_ON(cond) BUG_ON(cond)
-
-#else /* EDEBUG */
-
-#define bch_count_data(b) 0
-#define bch_check_key_order(b, i) do {} while (0)
-#define bch_check_key_order_msg(b, i, ...) do {} while (0)
-#define bch_check_keys(b, ...) do {} while (0)
-#define EBUG_ON(cond) do {} while (0)
-
-#endif
#ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *, struct bset *);
-void bch_data_verify(struct search *);
+void bch_data_verify(struct cached_dev *, struct bio *);
+int __bch_count_data(struct btree *);
+void __bch_check_keys(struct btree *, const char *, ...);
+void bch_btree_iter_next_check(struct btree_iter *);
+
+#define EBUG_ON(cond) BUG_ON(cond)
+#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
+#define key_merging_disabled(c) ((c)->key_merging_disabled)
+#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */
static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
-static inline void bch_data_verify(struct search *s) {};
+static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
+static inline int __bch_count_data(struct btree *b) { return -1; }
+static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#define EBUG_ON(cond) do { if (cond); } while (0)
+#define expensive_debug_checks(c) 0
+#define key_merging_disabled(c) 0
+#define bypass_torture_test(d) 0
#endif
+#define bch_count_data(b) \
+ (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
+
+#define bch_check_keys(b, ...) \
+do { \
+ if (expensive_debug_checks((b)->c)) \
+ __bch_check_keys(b, __VA_ARGS__); \
+} while (0)
+
#ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *);
#else
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8435f81e5d8..ecdaa671bd5 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -7,7 +7,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include <trace/events/bcache.h>
@@ -31,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
}
static int journal_read_bucket(struct cache *ca, struct list_head *list,
- struct btree_op *op, unsigned bucket_index)
+ unsigned bucket_index)
{
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio;
struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data;
+ struct closure cl;
unsigned len, left, offset = 0;
int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+ closure_init_stack(&cl);
+
pr_debug("reading %llu", (uint64_t) bucket);
while (offset < ca->sb.bucket_size) {
@@ -55,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
- bio->bi_private = &op->cl;
+ bio->bi_private = &cl;
bch_bio_map(bio, data);
- closure_bio_submit(bio, &op->cl, ca);
- closure_sync(&op->cl);
+ closure_bio_submit(bio, &cl, ca);
+ closure_sync(&cl);
/* This function could be simpler now since we no longer write
* journal entries that overlap bucket boundaries; this means
@@ -72,7 +74,7 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
- if (j->magic != jset_magic(ca->set))
+ if (j->magic != jset_magic(&ca->sb))
return ret;
if (bytes > left << 9)
@@ -129,12 +131,11 @@ next_set:
return ret;
}
-int bch_journal_read(struct cache_set *c, struct list_head *list,
- struct btree_op *op)
+int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b) \
({ \
- int ret = journal_read_bucket(ca, list, op, b); \
+ int ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \
if (ret < 0) \
return ret; \
@@ -292,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
-int bch_journal_replay(struct cache_set *s, struct list_head *list,
- struct btree_op *op)
+int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
struct bkey *k;
@@ -301,31 +301,30 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
list_entry(list->prev, struct journal_replay, list);
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+ struct keylist keylist;
+
+ bch_keylist_init(&keylist);
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
- if (n != i->j.seq)
- pr_err(
- "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
- n, i->j.seq - 1, start, end);
+ cache_set_err_on(n != i->j.seq, s,
+"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
for (k = i->j.start;
k < end(&i->j);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
- bkey_copy(op->keys.top, k);
- bch_keylist_push(&op->keys);
-
- op->journal = i->pin;
- atomic_inc(op->journal);
+ bkey_copy(keylist.top, k);
+ bch_keylist_push(&keylist);
- ret = bch_btree_insert(op, s);
+ ret = bch_btree_insert(s, &keylist, i->pin, NULL);
if (ret)
goto err;
- BUG_ON(!bch_keylist_empty(&op->keys));
+ BUG_ON(!bch_keylist_empty(&keylist));
keys++;
cond_resched();
@@ -339,14 +338,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
pr_info("journal replay done, %i keys in %i entries, seq %llu",
keys, entries, end);
-
+err:
while (!list_empty(list)) {
i = list_first_entry(list, struct journal_replay, list);
list_del(&i->list);
kfree(i);
}
-err:
- closure_sync(&op->cl);
+
return ret;
}
@@ -358,48 +356,35 @@ static void btree_flush_write(struct cache_set *c)
* Try to find the btree node with that references the oldest journal
* entry, best is our current candidate and is locked if non NULL:
*/
- struct btree *b, *best = NULL;
- unsigned iter;
+ struct btree *b, *best;
+ unsigned i;
+retry:
+ best = NULL;
+
+ for_each_cached_btree(b, c, i)
+ if (btree_current_write(b)->journal) {
+ if (!best)
+ best = b;
+ else if (journal_pin_cmp(c,
+ btree_current_write(best)->journal,
+ btree_current_write(b)->journal)) {
+ best = b;
+ }
+ }
- for_each_cached_btree(b, c, iter) {
- if (!down_write_trylock(&b->lock))
- continue;
+ b = best;
+ if (b) {
+ rw_lock(true, b, b->level);
- if (!btree_node_dirty(b) ||
- !btree_current_write(b)->journal) {
+ if (!btree_current_write(b)->journal) {
rw_unlock(true, b);
- continue;
+ /* We raced */
+ goto retry;
}
- if (!best)
- best = b;
- else if (journal_pin_cmp(c,
- btree_current_write(best),
- btree_current_write(b))) {
- rw_unlock(true, best);
- best = b;
- } else
- rw_unlock(true, b);
+ bch_btree_node_write(b, NULL);
+ rw_unlock(true, b);
}
-
- if (best)
- goto out;
-
- /* We can't find the best btree node, just pick the first */
- list_for_each_entry(b, &c->btree_cache, list)
- if (!b->level && btree_node_dirty(b)) {
- best = b;
- rw_lock(true, best, best->level);
- goto found;
- }
-
-out:
- if (!best)
- return;
-found:
- if (btree_node_dirty(best))
- bch_btree_node_write(best, NULL);
- rw_unlock(true, best);
}
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
@@ -495,7 +480,7 @@ static void journal_reclaim(struct cache_set *c)
do_journal_discard(ca);
if (c->journal.blocks_free)
- return;
+ goto out;
/*
* Allocate:
@@ -521,7 +506,7 @@ static void journal_reclaim(struct cache_set *c)
if (n)
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
+out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
}
@@ -554,32 +539,26 @@ static void journal_write_endio(struct bio *bio, int error)
struct journal_write *w = bio->bi_private;
cache_set_err_on(error, w->c, "journal io error");
- closure_put(&w->c->journal.io.cl);
+ closure_put(&w->c->journal.io);
}
static void journal_write(struct closure *);
static void journal_write_done(struct closure *cl)
{
- struct journal *j = container_of(cl, struct journal, io.cl);
- struct cache_set *c = container_of(j, struct cache_set, journal);
-
+ struct journal *j = container_of(cl, struct journal, io);
struct journal_write *w = (j->cur == j->w)
? &j->w[1]
: &j->w[0];
__closure_wake_up(&w->wait);
-
- if (c->journal_delay_ms)
- closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
-
- continue_at(cl, journal_write, system_wq);
+ continue_at_nobarrier(cl, journal_write, system_wq);
}
static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
@@ -617,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
for_each_cache(ca, c, i)
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(c);
+ w->data->magic = jset_magic(&c->sb);
w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data);
@@ -660,121 +639,134 @@ static void journal_write_unlocked(struct closure *cl)
static void journal_write(struct closure *cl)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
spin_lock(&c->journal.lock);
journal_write_unlocked(cl);
}
-static void __journal_try_write(struct cache_set *c, bool noflush)
+static void journal_try_write(struct cache_set *c)
__releases(c->journal.lock)
{
- struct closure *cl = &c->journal.io.cl;
+ struct closure *cl = &c->journal.io;
+ struct journal_write *w = c->journal.cur;
- if (!closure_trylock(cl, &c->cl))
- spin_unlock(&c->journal.lock);
- else if (noflush && journal_full(&c->journal)) {
- spin_unlock(&c->journal.lock);
- continue_at(cl, journal_write, system_wq);
- } else
+ w->need_write = true;
+
+ if (closure_trylock(cl, &c->cl))
journal_write_unlocked(cl);
+ else
+ spin_unlock(&c->journal.lock);
}
-#define journal_try_write(c) __journal_try_write(c, false)
-
-void bch_journal_meta(struct cache_set *c, struct closure *cl)
+static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ unsigned nkeys)
{
- struct journal_write *w;
+ size_t sectors;
+ struct closure cl;
- if (CACHE_SYNC(&c->sb)) {
- spin_lock(&c->journal.lock);
+ closure_init_stack(&cl);
+
+ spin_lock(&c->journal.lock);
- w = c->journal.cur;
- w->need_write = true;
+ while (1) {
+ struct journal_write *w = c->journal.cur;
- if (cl)
- BUG_ON(!closure_wait(&w->wait, cl));
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+ c) * c->sb.block_size;
- closure_flush(&c->journal.io);
- __journal_try_write(c, true);
+ if (sectors <= min_t(size_t,
+ c->journal.blocks_free * c->sb.block_size,
+ PAGE_SECTORS << JSET_BITS))
+ return w;
+
+ /* XXX: tracepoint */
+ if (!journal_full(&c->journal)) {
+ trace_bcache_journal_entry_full(c);
+
+ /*
+ * XXX: If we were inserting so many keys that they
+ * won't fit in an _empty_ journal write, we'll
+ * deadlock. For now, handle this in
+ * bch_keylist_realloc() - but something to think about.
+ */
+ BUG_ON(!w->data->keys);
+
+ closure_wait(&w->wait, &cl);
+ journal_try_write(c); /* unlocks */
+ } else {
+ trace_bcache_journal_full(c);
+
+ closure_wait(&c->journal.wait, &cl);
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ }
+
+ closure_sync(&cl);
+ spin_lock(&c->journal.lock);
}
}
+static void journal_write_work(struct work_struct *work)
+{
+ struct cache_set *c = container_of(to_delayed_work(work),
+ struct cache_set,
+ journal.work);
+ spin_lock(&c->journal.lock);
+ journal_try_write(c);
+}
+
/*
* Entry point to the journalling code - bio_insert() and btree_invalidate()
* pass bch_journal() a list of keys to be journalled, and then
* bch_journal() hands those same keys off to btree_insert_async()
*/
-void bch_journal(struct closure *cl)
+atomic_t *bch_journal(struct cache_set *c,
+ struct keylist *keys,
+ struct closure *parent)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct cache_set *c = op->c;
struct journal_write *w;
- size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
-
- if (op->type != BTREE_INSERT ||
- !CACHE_SYNC(&c->sb))
- goto out;
+ atomic_t *ret;
- /*
- * If we're looping because we errored, might already be waiting on
- * another journal write:
- */
- while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
- closure_sync(cl->parent);
+ if (!CACHE_SYNC(&c->sb))
+ return NULL;
- spin_lock(&c->journal.lock);
+ w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
- if (journal_full(&c->journal)) {
- trace_bcache_journal_full(c);
+ memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+ w->data->keys += bch_keylist_nkeys(keys);
- closure_wait(&c->journal.wait, cl);
+ ret = &fifo_back(&c->journal.pin);
+ atomic_inc(ret);
- journal_reclaim(c);
+ if (parent) {
+ closure_wait(&w->wait, parent);
+ journal_try_write(c);
+ } else if (!w->need_write) {
+ schedule_delayed_work(&c->journal.work,
+ msecs_to_jiffies(c->journal_delay_ms));
+ spin_unlock(&c->journal.lock);
+ } else {
spin_unlock(&c->journal.lock);
-
- btree_flush_write(c);
- continue_at(cl, bch_journal, bcache_wq);
}
- w = c->journal.cur;
- w->need_write = true;
- b = __set_blocks(w->data, w->data->keys + n, c);
-
- if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
- b > c->journal.blocks_free) {
- trace_bcache_journal_entry_full(c);
-
- /*
- * XXX: If we were inserting so many keys that they won't fit in
- * an _empty_ journal write, we'll deadlock. For now, handle
- * this in bch_keylist_realloc() - but something to think about.
- */
- BUG_ON(!w->data->keys);
-
- BUG_ON(!closure_wait(&w->wait, cl));
-
- closure_flush(&c->journal.io);
- journal_try_write(c);
- continue_at(cl, bch_journal, bcache_wq);
- }
-
- memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
- w->data->keys += n;
+ return ret;
+}
- op->journal = &fifo_back(&c->journal.pin);
- atomic_inc(op->journal);
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+ struct keylist keys;
+ atomic_t *ref;
- if (op->flush_journal) {
- closure_flush(&c->journal.io);
- closure_wait(&w->wait, cl->parent);
- }
+ bch_keylist_init(&keys);
- journal_try_write(c);
-out:
- bch_btree_insert_async(cl);
+ ref = bch_journal(c, &keys, cl);
+ if (ref)
+ atomic_dec_bug(ref);
}
void bch_journal_free(struct cache_set *c)
@@ -790,6 +782,7 @@ int bch_journal_alloc(struct cache_set *c)
closure_init_unlocked(&j->io);
spin_lock_init(&j->lock);
+ INIT_DELAYED_WORK(&j->work, journal_write_work);
c->journal_delay_ms = 100;
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 3d7851274b0..a6472fda94b 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -75,43 +75,6 @@
* nodes that are pinning the oldest journal entries first.
*/
-#define BCACHE_JSET_VERSION_UUIDv1 1
-/* Always latest UUID format */
-#define BCACHE_JSET_VERSION_UUID 1
-#define BCACHE_JSET_VERSION 1
-
-/*
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-struct jset {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t keys;
-
- uint64_t last_seq;
-
- BKEY_PADDED(uuid_bucket);
- BKEY_PADDED(btree_root);
- uint16_t btree_level;
- uint16_t pad[3];
-
- uint64_t prio_bucket[MAX_CACHES_PER_SET];
-
- union {
- struct bkey start[0];
- uint64_t d[0];
- };
-};
-
/*
* Only used for holding the journal entries we read in btree_journal_read()
* during cache_registration
@@ -140,7 +103,8 @@ struct journal {
spinlock_t lock;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
- struct closure_with_timer io;
+ struct closure io;
+ struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free;
@@ -188,8 +152,7 @@ struct journal_device {
};
#define journal_pin_cmp(c, l, r) \
- (fifo_idx(&(c)->journal.pin, (l)->journal) > \
- fifo_idx(&(c)->journal.pin, (r)->journal))
+ (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
#define JOURNAL_PIN 20000
@@ -199,15 +162,14 @@ struct journal_device {
struct closure;
struct cache_set;
struct btree_op;
+struct keylist;
-void bch_journal(struct closure *);
+atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *);
-int bch_journal_read(struct cache_set *, struct list_head *,
- struct btree_op *);
-int bch_journal_replay(struct cache_set *, struct list_head *,
- struct btree_op *);
+int bch_journal_read(struct cache_set *, struct list_head *);
+int bch_journal_replay(struct cache_set *, struct list_head *);
void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 1a3b4f4786c..7c1275e6602 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -12,8 +12,9 @@
#include <trace/events/bcache.h>
struct moving_io {
+ struct closure cl;
struct keybuf_key *w;
- struct search s;
+ struct data_insert_op op;
struct bbio bio;
};
@@ -38,13 +39,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl)
{
- struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io);
}
static void write_moving_finish(struct closure *cl)
{
- struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
struct bio_vec *bv;
int i;
@@ -52,13 +53,12 @@ static void write_moving_finish(struct closure *cl)
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
- if (io->s.op.insert_collision)
+ if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
- bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
+ bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
- atomic_dec_bug(&io->s.op.c->in_flight);
- closure_wake_up(&io->s.op.c->moving_gc_wait);
+ up(&io->op.c->moving_in_flight);
closure_return_with_destructor(cl, moving_io_destructor);
}
@@ -66,12 +66,12 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
struct moving_io *io = container_of(bio->bi_private,
- struct moving_io, s.cl);
+ struct moving_io, cl);
if (error)
- io->s.error = error;
+ io->op.error = error;
- bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
+ bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
static void moving_init(struct moving_io *io)
@@ -85,54 +85,53 @@ static void moving_init(struct moving_io *io)
bio->bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
- bio->bi_private = &io->s.cl;
+ bio->bi_private = &io->cl;
bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
static void write_moving(struct closure *cl)
{
- struct search *s = container_of(cl, struct search, cl);
- struct moving_io *io = container_of(s, struct moving_io, s);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct data_insert_op *op = &io->op;
- if (!s->error) {
+ if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
- s->op.lock = -1;
- s->op.write_prio = 1;
- s->op.cache_bio = &io->bio.bio;
+ io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ op->write_prio = 1;
+ op->bio = &io->bio.bio;
- s->writeback = KEY_DIRTY(&io->w->key);
- s->op.csum = KEY_CSUM(&io->w->key);
+ op->writeback = KEY_DIRTY(&io->w->key);
+ op->csum = KEY_CSUM(&io->w->key);
- s->op.type = BTREE_REPLACE;
- bkey_copy(&s->op.replace, &io->w->key);
+ bkey_copy(&op->replace_key, &io->w->key);
+ op->replace = true;
- closure_init(&s->op.cl, cl);
- bch_insert_data(&s->op.cl);
+ closure_call(&op->cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, write_moving_finish, NULL);
+ continue_at(cl, write_moving_finish, system_wq);
}
static void read_moving_submit(struct closure *cl)
{
- struct search *s = container_of(cl, struct search, cl);
- struct moving_io *io = container_of(s, struct moving_io, s);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
- bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
+ bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
- continue_at(cl, write_moving, bch_gc_wq);
+ continue_at(cl, write_moving, system_wq);
}
-static void read_moving(struct closure *cl)
+static void read_moving(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
struct keybuf_key *w;
struct moving_io *io;
struct bio *bio;
+ struct closure cl;
+
+ closure_init_stack(&cl);
/* XXX: if we error, background writeback could stall indefinitely */
@@ -150,8 +149,8 @@ static void read_moving(struct closure *cl)
w->private = io;
io->w = w;
- io->s.op.inode = KEY_INODE(&w->key);
- io->s.op.c = c;
+ io->op.inode = KEY_INODE(&w->key);
+ io->op.c = c;
moving_init(io);
bio = &io->bio.bio;
@@ -164,13 +163,8 @@ static void read_moving(struct closure *cl)
trace_bcache_gc_copy(&w->key);
- closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
-
- if (atomic_inc_return(&c->in_flight) >= 64) {
- closure_wait_event(&c->moving_gc_wait, cl,
- atomic_read(&c->in_flight) < 64);
- continue_at(cl, read_moving, bch_gc_wq);
- }
+ down(&c->moving_in_flight);
+ closure_call(&io->cl, read_moving_submit, NULL, &cl);
}
if (0) {
@@ -180,7 +174,7 @@ err: if (!IS_ERR_OR_NULL(w->private))
bch_keybuf_del(&c->moving_gc_keys, w);
}
- closure_return(cl);
+ closure_sync(&cl);
}
static bool bucket_cmp(struct bucket *l, struct bucket *r)
@@ -193,15 +187,14 @@ static unsigned bucket_heap_top(struct cache *ca)
return GC_SECTORS_USED(heap_peek(&ca->heap));
}
-void bch_moving_gc(struct closure *cl)
+void bch_moving_gc(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
struct cache *ca;
struct bucket *b;
unsigned i;
if (!c->copy_gc_enabled)
- closure_return(cl);
+ return;
mutex_lock(&c->bucket_lock);
@@ -242,13 +235,11 @@ void bch_moving_gc(struct closure *cl)
c->moving_gc_keys.last_scanned = ZERO_KEY;
- closure_init(&c->moving_gc, cl);
- read_moving(&c->moving_gc);
-
- closure_return(cl);
+ read_moving(c);
}
void bch_moving_init_cache_set(struct cache_set *c)
{
bch_keybuf_init(&c->moving_gc_keys);
+ sema_init(&c->moving_in_flight, 64);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2a7f0dd6aba..fbcc851ed5a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache;
-static void check_should_skip(struct cached_dev *, struct search *);
+static void bch_data_insert_start(struct closure *);
/* Cgroup interface */
@@ -213,221 +213,79 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */
-static void bio_invalidate(struct closure *cl)
+static void bch_data_insert_keys(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct bio *bio = op->cache_bio;
-
- pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
-
- while (bio_sectors(bio)) {
- unsigned len = min(bio_sectors(bio), 1U << 14);
-
- if (bch_keylist_realloc(&op->keys, 0, op->c))
- goto out;
-
- bio->bi_sector += len;
- bio->bi_size -= len << 9;
-
- bch_keylist_add(&op->keys,
- &KEY(op->inode, bio->bi_sector, len));
- }
-
- op->insert_data_done = true;
- bio_put(bio);
-out:
- continue_at(cl, bch_journal, bcache_wq);
-}
-
-struct open_bucket {
- struct list_head list;
- struct task_struct *last;
- unsigned sectors_free;
- BKEY_PADDED(key);
-};
-
-void bch_open_buckets_free(struct cache_set *c)
-{
- struct open_bucket *b;
-
- while (!list_empty(&c->data_buckets)) {
- b = list_first_entry(&c->data_buckets,
- struct open_bucket, list);
- list_del(&b->list);
- kfree(b);
- }
-}
-
-int bch_open_buckets_alloc(struct cache_set *c)
-{
- int i;
-
- spin_lock_init(&c->data_bucket_lock);
-
- for (i = 0; i < 6; i++) {
- struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
- if (!b)
- return -ENOMEM;
-
- list_add(&b->list, &c->data_buckets);
- }
-
- return 0;
-}
-
-/*
- * We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
- *
- * The ideas is if you've got multiple tasks pulling data into the cache at the
- * same time, you'll get better cache utilization if you try to segregate their
- * data and preserve locality.
- *
- * For example, say you've starting Firefox at the same time you're copying a
- * bunch of files. Firefox will likely end up being fairly hot and stay in the
- * cache awhile, but the data you copied might not be; if you wrote all that
- * data to the same buckets it'd get invalidated at the same time.
- *
- * Both of those tasks will be doing fairly random IO so we can't rely on
- * detecting sequential IO to segregate their data, but going off of the task
- * should be a sane heuristic.
- */
-static struct open_bucket *pick_data_bucket(struct cache_set *c,
- const struct bkey *search,
- struct task_struct *task,
- struct bkey *alloc)
-{
- struct open_bucket *ret, *ret_task = NULL;
-
- list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
- goto found;
- else if (ret->last == task)
- ret_task = ret;
-
- ret = ret_task ?: list_first_entry(&c->data_buckets,
- struct open_bucket, list);
-found:
- if (!ret->sectors_free && KEY_PTRS(alloc)) {
- ret->sectors_free = c->sb.bucket_size;
- bkey_copy(&ret->key, alloc);
- bkey_init(alloc);
- }
-
- if (!ret->sectors_free)
- ret = NULL;
-
- return ret;
-}
-
-/*
- * Allocates some space in the cache to write to, and k to point to the newly
- * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
- * end of the newly allocated space).
- *
- * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
- * sectors were actually allocated.
- *
- * If s->writeback is true, will not fail.
- */
-static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
- struct search *s)
-{
- struct cache_set *c = s->op.c;
- struct open_bucket *b;
- BKEY_PADDED(key) alloc;
- struct closure cl, *w = NULL;
- unsigned i;
-
- if (s->writeback) {
- closure_init_stack(&cl);
- w = &cl;
- }
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ atomic_t *journal_ref = NULL;
+ struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
+ int ret;
/*
- * We might have to allocate a new bucket, which we can't do with a
- * spinlock held. So if we have to allocate, we drop the lock, allocate
- * and then retry. KEY_PTRS() indicates whether alloc points to
- * allocated bucket(s).
+ * If we're looping, might already be waiting on
+ * another journal write - can't wait on more than one journal write at
+ * a time
+ *
+ * XXX: this looks wrong
*/
+#if 0
+ while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
+ closure_sync(&s->cl);
+#endif
- bkey_init(&alloc.key);
- spin_lock(&c->data_bucket_lock);
-
- while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
- unsigned watermark = s->op.write_prio
- ? WATERMARK_MOVINGGC
- : WATERMARK_NONE;
-
- spin_unlock(&c->data_bucket_lock);
-
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
- return false;
+ if (!op->replace)
+ journal_ref = bch_journal(op->c, &op->insert_keys,
+ op->flush_journal ? cl : NULL);
- spin_lock(&c->data_bucket_lock);
+ ret = bch_btree_insert(op->c, &op->insert_keys,
+ journal_ref, replace_key);
+ if (ret == -ESRCH) {
+ op->replace_collision = true;
+ } else if (ret) {
+ op->error = -ENOMEM;
+ op->insert_data_done = true;
}
- /*
- * If we had to allocate, we might race and not need to allocate the
- * second time we call find_data_bucket(). If we allocated a bucket but
- * didn't use it, drop the refcount bch_bucket_alloc_set() took:
- */
- if (KEY_PTRS(&alloc.key))
- __bkey_put(c, &alloc.key);
-
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- EBUG_ON(ptr_stale(c, &b->key, i));
+ if (journal_ref)
+ atomic_dec_bug(journal_ref);
- /* Set up the pointer to the space we're allocating: */
+ if (!op->insert_data_done)
+ continue_at(cl, bch_data_insert_start, bcache_wq);
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- k->ptr[i] = b->key.ptr[i];
+ bch_keylist_free(&op->insert_keys);
+ closure_return(cl);
+}
- sectors = min(sectors, b->sectors_free);
+static void bch_data_invalidate(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio;
- SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
- SET_KEY_SIZE(k, sectors);
- SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+ pr_debug("invalidating %i sectors from %llu",
+ bio_sectors(bio), (uint64_t) bio->bi_sector);
- /*
- * Move b to the end of the lru, and keep track of what this bucket was
- * last used for:
- */
- list_move_tail(&b->list, &c->data_buckets);
- bkey_copy_key(&b->key, k);
- b->last = s->task;
+ while (bio_sectors(bio)) {
+ unsigned sectors = min(bio_sectors(bio),
+ 1U << (KEY_SIZE_BITS - 1));
- b->sectors_free -= sectors;
+ if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+ goto out;
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+ bio->bi_sector += sectors;
+ bio->bi_size -= sectors << 9;
- atomic_long_add(sectors,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
+ bch_keylist_add(&op->insert_keys,
+ &KEY(op->inode, bio->bi_sector, sectors));
}
- if (b->sectors_free < c->sb.block_size)
- b->sectors_free = 0;
-
- /*
- * k takes refcounts on the buckets it points to until it's inserted
- * into the btree, but if we're done with this bucket we just transfer
- * get_data_bucket()'s refcount.
- */
- if (b->sectors_free)
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
-
- spin_unlock(&c->data_bucket_lock);
- return true;
+ op->insert_data_done = true;
+ bio_put(bio);
+out:
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
}
-static void bch_insert_data_error(struct closure *cl)
+static void bch_data_insert_error(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
/*
* Our data write just errored, which means we've got a bunch of keys to
@@ -438,35 +296,34 @@ static void bch_insert_data_error(struct closure *cl)
* from the keys we'll accomplish just that.
*/
- struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
+ struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
- while (src != op->keys.top) {
+ while (src != op->insert_keys.top) {
struct bkey *n = bkey_next(src);
SET_KEY_PTRS(src, 0);
- bkey_copy(dst, src);
+ memmove(dst, src, bkey_bytes(src));
dst = bkey_next(dst);
src = n;
}
- op->keys.top = dst;
+ op->insert_keys.top = dst;
- bch_journal(cl);
+ bch_data_insert_keys(cl);
}
-static void bch_insert_data_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
if (error) {
/* TODO: We could try to recover from this. */
- if (s->writeback)
- s->error = error;
- else if (s->write)
- set_closure_fn(cl, bch_insert_data_error, bcache_wq);
+ if (op->writeback)
+ op->error = error;
+ else if (!op->replace)
+ set_closure_fn(cl, bch_data_insert_error, bcache_wq);
else
set_closure_fn(cl, NULL, NULL);
}
@@ -474,18 +331,17 @@ static void bch_insert_data_endio(struct bio *bio, int error)
bch_bbio_endio(op->c, bio, error, "writing data to cache");
}
-static void bch_insert_data_loop(struct closure *cl)
+static void bch_data_insert_start(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = op->cache_bio, *n;
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio, *n;
- if (op->skip)
- return bio_invalidate(cl);
+ if (op->bypass)
+ return bch_data_invalidate(cl);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
- bch_queue_gc(op->c);
+ wake_up_gc(op->c);
}
/*
@@ -497,29 +353,30 @@ static void bch_insert_data_loop(struct closure *cl)
do {
unsigned i;
struct bkey *k;
- struct bio_set *split = s->d
- ? s->d->bio_split : op->c->bio_split;
+ struct bio_set *split = op->c->bio_split;
/* 1 for the device pointer and 1 for the chksum */
- if (bch_keylist_realloc(&op->keys,
+ if (bch_keylist_realloc(&op->insert_keys,
1 + (op->csum ? 1 : 0),
op->c))
- continue_at(cl, bch_journal, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
- k = op->keys.top;
+ k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
SET_KEY_OFFSET(k, bio->bi_sector);
- if (!bch_alloc_sectors(k, bio_sectors(bio), s))
+ if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
+ op->write_point, op->write_prio,
+ op->writeback))
goto err;
n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
- n->bi_end_io = bch_insert_data_endio;
+ n->bi_end_io = bch_data_insert_endio;
n->bi_private = cl;
- if (s->writeback) {
+ if (op->writeback) {
SET_KEY_DIRTY(k, true);
for (i = 0; i < KEY_PTRS(k); i++)
@@ -532,17 +389,17 @@ static void bch_insert_data_loop(struct closure *cl)
bio_csum(n, k);
trace_bcache_cache_insert(k);
- bch_keylist_push(&op->keys);
+ bch_keylist_push(&op->insert_keys);
n->bi_rw |= REQ_WRITE;
bch_submit_bbio(n, op->c, k, 0);
} while (n != bio);
op->insert_data_done = true;
- continue_at(cl, bch_journal, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
- BUG_ON(s->writeback);
+ BUG_ON(op->writeback);
/*
* But if it's not a writeback write we'd rather just bail out if
@@ -550,15 +407,15 @@ err:
* we might be starving btree writes for gc or something.
*/
- if (s->write) {
+ if (!op->replace) {
/*
* Writethrough write: We can't complete the write until we've
* updated the index. But we don't want to delay the write while
* we wait for buckets to be freed up, so just invalidate the
* rest of the write.
*/
- op->skip = true;
- return bio_invalidate(cl);
+ op->bypass = true;
+ return bch_data_invalidate(cl);
} else {
/*
* From a cache miss, we can just insert the keys for the data
@@ -567,15 +424,15 @@ err:
op->insert_data_done = true;
bio_put(bio);
- if (!bch_keylist_empty(&op->keys))
- continue_at(cl, bch_journal, bcache_wq);
+ if (!bch_keylist_empty(&op->insert_keys))
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
else
closure_return(cl);
}
}
/**
- * bch_insert_data - stick some data in the cache
+ * bch_data_insert - stick some data in the cache
*
* This is the starting point for any data to end up in a cache device; it could
* be from a normal write, or a writeback write, or a write to a flash only
@@ -587,56 +444,179 @@ err:
* data is written it calls bch_journal, and after the keys have been added to
* the next journal write they're inserted into the btree.
*
- * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
+ * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
* and op->inode is used for the key inode.
*
- * If op->skip is true, instead of inserting the data it invalidates the region
- * of the cache represented by op->cache_bio and op->inode.
+ * If s->bypass is true, instead of inserting the data it invalidates the
+ * region of the cache represented by s->cache_bio and op->inode.
*/
-void bch_insert_data(struct closure *cl)
+void bch_data_insert(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+
+ trace_bcache_write(op->bio, op->writeback, op->bypass);
- bch_keylist_init(&op->keys);
- bio_get(op->cache_bio);
- bch_insert_data_loop(cl);
+ bch_keylist_init(&op->insert_keys);
+ bio_get(op->bio);
+ bch_data_insert_start(cl);
}
-void bch_btree_insert_async(struct closure *cl)
+/* Congested? */
+
+unsigned bch_get_congested(struct cache_set *c)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ int i;
+ long rand;
- if (bch_btree_insert(op, op->c)) {
- s->error = -ENOMEM;
- op->insert_data_done = true;
- }
+ if (!c->congested_read_threshold_us &&
+ !c->congested_write_threshold_us)
+ return 0;
+
+ i = (local_clock_us() - c->congested_last_us) / 1024;
+ if (i < 0)
+ return 0;
+
+ i += atomic_read(&c->congested);
+ if (i >= 0)
+ return 0;
- if (op->insert_data_done) {
- bch_keylist_free(&op->keys);
- closure_return(cl);
- } else
- continue_at(cl, bch_insert_data_loop, bcache_wq);
+ i += CONGESTED_MAX;
+
+ if (i > 0)
+ i = fract_exp_two(i, 6);
+
+ rand = get_random_int();
+ i -= bitmap_weight(&rand, BITS_PER_LONG);
+
+ return i > 0 ? i : 1;
}
-/* Common code for the make_request functions */
+static void add_sequential(struct task_struct *t)
+{
+ ewma_add(t->sequential_io_avg,
+ t->sequential_io, 8, 0);
-static void request_endio(struct bio *bio, int error)
+ t->sequential_io = 0;
+}
+
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{
- struct closure *cl = bio->bi_private;
+ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
- if (error) {
- struct search *s = container_of(cl, struct search, cl);
- s->error = error;
- /* Only cache read errors are recoverable */
- s->recoverable = false;
+static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+{
+ struct cache_set *c = dc->disk.c;
+ unsigned mode = cache_mode(dc, bio);
+ unsigned sectors, congested = bch_get_congested(c);
+ struct task_struct *task = current;
+ struct io *i;
+
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
+ (bio->bi_rw & REQ_DISCARD))
+ goto skip;
+
+ if (mode == CACHE_MODE_NONE ||
+ (mode == CACHE_MODE_WRITEAROUND &&
+ (bio->bi_rw & REQ_WRITE)))
+ goto skip;
+
+ if (bio->bi_sector & (c->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io");
+ goto skip;
}
- bio_put(bio);
- closure_put(cl);
+ if (bypass_torture_test(dc)) {
+ if ((get_random_int() & 3) == 3)
+ goto skip;
+ else
+ goto rescale;
+ }
+
+ if (!congested && !dc->sequential_cutoff)
+ goto rescale;
+
+ if (!congested &&
+ mode == CACHE_MODE_WRITEBACK &&
+ (bio->bi_rw & REQ_WRITE) &&
+ (bio->bi_rw & REQ_SYNC))
+ goto rescale;
+
+ spin_lock(&dc->io_lock);
+
+ hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
+ if (i->last == bio->bi_sector &&
+ time_before(jiffies, i->jiffies))
+ goto found;
+
+ i = list_first_entry(&dc->io_lru, struct io, lru);
+
+ add_sequential(task);
+ i->sequential = 0;
+found:
+ if (i->sequential + bio->bi_size > i->sequential)
+ i->sequential += bio->bi_size;
+
+ i->last = bio_end_sector(bio);
+ i->jiffies = jiffies + msecs_to_jiffies(5000);
+ task->sequential_io = i->sequential;
+
+ hlist_del(&i->hash);
+ hlist_add_head(&i->hash, iohash(dc, i->last));
+ list_move_tail(&i->lru, &dc->io_lru);
+
+ spin_unlock(&dc->io_lock);
+
+ sectors = max(task->sequential_io,
+ task->sequential_io_avg) >> 9;
+
+ if (dc->sequential_cutoff &&
+ sectors >= dc->sequential_cutoff >> 9) {
+ trace_bcache_bypass_sequential(bio);
+ goto skip;
+ }
+
+ if (congested && sectors >= congested) {
+ trace_bcache_bypass_congested(bio);
+ goto skip;
+ }
+
+rescale:
+ bch_rescale_priorities(c, bio_sectors(bio));
+ return false;
+skip:
+ bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
+ return true;
}
-void bch_cache_read_endio(struct bio *bio, int error)
+/* Cache lookup */
+
+struct search {
+ /* Stack frame for bio_complete */
+ struct closure cl;
+
+ struct bcache_device *d;
+
+ struct bbio bio;
+ struct bio *orig_bio;
+ struct bio *cache_miss;
+
+ unsigned insert_bio_sectors;
+
+ unsigned recoverable:1;
+ unsigned unaligned_bvec:1;
+ unsigned write:1;
+ unsigned read_dirty_data:1;
+
+ unsigned long start_time;
+
+ struct btree_op op;
+ struct data_insert_op iop;
+};
+
+static void bch_cache_read_endio(struct bio *bio, int error)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct closure *cl = bio->bi_private;
@@ -650,13 +630,113 @@ void bch_cache_read_endio(struct bio *bio, int error)
*/
if (error)
- s->error = error;
- else if (ptr_stale(s->op.c, &b->key, 0)) {
- atomic_long_inc(&s->op.c->cache_read_races);
- s->error = -EINTR;
+ s->iop.error = error;
+ else if (ptr_stale(s->iop.c, &b->key, 0)) {
+ atomic_long_inc(&s->iop.c->cache_read_races);
+ s->iop.error = -EINTR;
}
- bch_bbio_endio(s->op.c, bio, error, "reading from cache");
+ bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *n, *bio = &s->bio.bio;
+ struct bkey *bio_key;
+ unsigned ptr;
+
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+ return MAP_CONTINUE;
+
+ if (KEY_INODE(k) != s->iop.inode ||
+ KEY_START(k) > bio->bi_sector) {
+ unsigned bio_sectors = bio_sectors(bio);
+ unsigned sectors = KEY_INODE(k) == s->iop.inode
+ ? min_t(uint64_t, INT_MAX,
+ KEY_START(k) - bio->bi_sector)
+ : INT_MAX;
+
+ int ret = s->d->cache_miss(b, s, bio, sectors);
+ if (ret != MAP_CONTINUE)
+ return ret;
+
+ /* if this was a complete miss we shouldn't get here */
+ BUG_ON(bio_sectors <= sectors);
+ }
+
+ if (!KEY_SIZE(k))
+ return MAP_CONTINUE;
+
+ /* XXX: figure out best pointer - for multiple cache devices */
+ ptr = 0;
+
+ PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+ if (KEY_DIRTY(k))
+ s->read_dirty_data = true;
+
+ n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_sector),
+ GFP_NOIO, s->d->bio_split);
+
+ bio_key = &container_of(n, struct bbio, bio)->key;
+ bch_bkey_copy_single_ptr(bio_key, k, ptr);
+
+ bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+ bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
+
+ n->bi_end_io = bch_cache_read_endio;
+ n->bi_private = &s->cl;
+
+ /*
+ * The bucket we're reading from might be reused while our bio
+ * is in flight, and we could then end up reading the wrong
+ * data.
+ *
+ * We guard against this by checking (in cache_read_endio()) if
+ * the pointer is stale again; if so, we treat it as an error
+ * and reread from the backing device (but we don't pass that
+ * error up anywhere).
+ */
+
+ __bch_submit_bbio(n, b->c);
+ return n == bio ? MAP_DONE : MAP_CONTINUE;
+}
+
+static void cache_lookup(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, iop.cl);
+ struct bio *bio = &s->bio.bio;
+
+ int ret = bch_btree_map_keys(&s->op, s->iop.c,
+ &KEY(s->iop.inode, bio->bi_sector, 0),
+ cache_lookup_fn, MAP_END_KEY);
+ if (ret == -EAGAIN)
+ continue_at(cl, cache_lookup, bcache_wq);
+
+ closure_return(cl);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+
+ if (error) {
+ struct search *s = container_of(cl, struct search, cl);
+ s->iop.error = error;
+ /* Only cache read errors are recoverable */
+ s->recoverable = false;
+ }
+
+ bio_put(bio);
+ closure_put(cl);
}
static void bio_complete(struct search *s)
@@ -670,8 +750,8 @@ static void bio_complete(struct search *s)
part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
part_stat_unlock();
- trace_bcache_request_end(s, s->orig_bio);
- bio_endio(s->orig_bio, s->error);
+ trace_bcache_request_end(s->d, s->orig_bio);
+ bio_endio(s->orig_bio, s->iop.error);
s->orig_bio = NULL;
}
}
@@ -691,8 +771,8 @@ static void search_free(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
bio_complete(s);
- if (s->op.cache_bio)
- bio_put(s->op.cache_bio);
+ if (s->iop.bio)
+ bio_put(s->iop.bio);
if (s->unaligned_bvec)
mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
@@ -703,21 +783,22 @@ static void search_free(struct closure *cl)
static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
+ struct search *s;
struct bio_vec *bv;
- struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
- memset(s, 0, offsetof(struct search, op.keys));
+
+ s = mempool_alloc(d->c->search, GFP_NOIO);
+ memset(s, 0, offsetof(struct search, iop.insert_keys));
__closure_init(&s->cl, NULL);
- s->op.inode = d->id;
- s->op.c = d->c;
+ s->iop.inode = d->id;
+ s->iop.c = d->c;
s->d = d;
s->op.lock = -1;
- s->task = current;
+ s->iop.write_point = hash_long((unsigned long) current, 16);
s->orig_bio = bio;
s->write = (bio->bi_rw & REQ_WRITE) != 0;
- s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
- s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
s->recoverable = 1;
s->start_time = jiffies;
do_bio_hook(s);
@@ -734,18 +815,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
return s;
}
-static void btree_read_async(struct closure *cl)
-{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
-
- int ret = btree_root(search_recurse, op->c, op);
-
- if (ret == -EAGAIN)
- continue_at(cl, btree_read_async, bcache_wq);
-
- closure_return(cl);
-}
-
/* Cached devices */
static void cached_dev_bio_complete(struct closure *cl)
@@ -759,27 +828,28 @@ static void cached_dev_bio_complete(struct closure *cl)
/* Process reads */
-static void cached_dev_read_complete(struct closure *cl)
+static void cached_dev_cache_miss_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
- if (s->op.insert_collision)
- bch_mark_cache_miss_collision(s);
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
- if (s->op.cache_bio) {
+ if (s->iop.bio) {
int i;
struct bio_vec *bv;
- __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
+ bio_for_each_segment_all(bv, s->iop.bio, i)
__free_page(bv->bv_page);
}
cached_dev_bio_complete(cl);
}
-static void request_read_error(struct closure *cl)
+static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
+ struct bio *bio = &s->bio.bio;
struct bio_vec *bv;
int i;
@@ -787,7 +857,7 @@ static void request_read_error(struct closure *cl)
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
- s->error = 0;
+ s->iop.error = 0;
bv = s->bio.bio.bi_io_vec;
do_bio_hook(s);
s->bio.bio.bi_io_vec = bv;
@@ -803,146 +873,148 @@ static void request_read_error(struct closure *cl)
/* XXX: invalidate cache */
- closure_bio_submit(&s->bio.bio, &s->cl, s->d);
+ closure_bio_submit(bio, cl, s->d);
}
- continue_at(cl, cached_dev_read_complete, NULL);
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void request_read_done(struct closure *cl)
+static void cached_dev_read_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
- * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
- * contains data ready to be inserted into the cache.
+ * We had a cache miss; cache_bio now contains data ready to be inserted
+ * into the cache.
*
* First, we copy the data we just read from cache_bio's bounce buffers
* to the buffers the original bio pointed to:
*/
- if (s->op.cache_bio) {
- bio_reset(s->op.cache_bio);
- s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
- s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
- s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
- bch_bio_map(s->op.cache_bio, NULL);
+ if (s->iop.bio) {
+ bio_reset(s->iop.bio);
+ s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+ s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+ s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+ bch_bio_map(s->iop.bio, NULL);
- bio_copy_data(s->cache_miss, s->op.cache_bio);
+ bio_copy_data(s->cache_miss, s->iop.bio);
bio_put(s->cache_miss);
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable)
- bch_data_verify(s);
+ if (verify(dc, &s->bio.bio) && s->recoverable &&
+ !s->unaligned_bvec && !s->read_dirty_data)
+ bch_data_verify(dc, s->orig_bio);
bio_complete(s);
- if (s->op.cache_bio &&
- !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
- s->op.type = BTREE_REPLACE;
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ if (s->iop.bio &&
+ !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
+ BUG_ON(!s->iop.replace);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, cached_dev_read_complete, NULL);
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void request_read_done_bh(struct closure *cl)
+static void cached_dev_read_done_bh(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
- trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
+ bch_mark_cache_accounting(s->iop.c, s->d,
+ !s->cache_miss, s->iop.bypass);
+ trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
- if (s->error)
- continue_at_nobarrier(cl, request_read_error, bcache_wq);
- else if (s->op.cache_bio || verify(dc, &s->bio.bio))
- continue_at_nobarrier(cl, request_read_done, bcache_wq);
+ if (s->iop.error)
+ continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
+ else if (s->iop.bio || verify(dc, &s->bio.bio))
+ continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
else
- continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
+ continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
}
static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- int ret = 0;
- unsigned reada;
+ int ret = MAP_CONTINUE;
+ unsigned reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- struct bio *miss;
-
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (miss == bio)
- s->op.lookup_done = true;
+ struct bio *miss, *cache_bio;
- miss->bi_end_io = request_endio;
- miss->bi_private = &s->cl;
-
- if (s->cache_miss || s->op.skip)
+ if (s->cache_miss || s->iop.bypass) {
+ miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
-
- if (miss != bio ||
- (bio->bi_rw & REQ_RAHEAD) ||
- (bio->bi_rw & REQ_META) ||
- s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
- reada = 0;
- else {
- reada = min(dc->readahead >> 9,
- sectors - bio_sectors(miss));
-
- if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
- reada = bdev_sectors(miss->bi_bdev) -
- bio_end_sector(miss);
}
- s->cache_bio_sectors = bio_sectors(miss) + reada;
- s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
- DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
- dc->disk.bio_split);
+ if (!(bio->bi_rw & REQ_RAHEAD) &&
+ !(bio->bi_rw & REQ_META) &&
+ s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
+ reada = min_t(sector_t, dc->readahead >> 9,
+ bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
- if (!s->op.cache_bio)
- goto out_submit;
+ s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
- s->op.cache_bio->bi_sector = miss->bi_sector;
- s->op.cache_bio->bi_bdev = miss->bi_bdev;
- s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
+ s->iop.replace_key = KEY(s->iop.inode,
+ bio->bi_sector + s->insert_bio_sectors,
+ s->insert_bio_sectors);
- s->op.cache_bio->bi_end_io = request_endio;
- s->op.cache_bio->bi_private = &s->cl;
+ ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
+ if (ret)
+ return ret;
+
+ s->iop.replace = true;
+
+ miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
- ret = -EINTR;
- if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
- goto out_put;
+ ret = miss == bio ? MAP_DONE : -EINTR;
+
+ cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
+ dc->disk.bio_split);
+ if (!cache_bio)
+ goto out_submit;
+
+ cache_bio->bi_sector = miss->bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_size = s->insert_bio_sectors << 9;
+
+ cache_bio->bi_end_io = request_endio;
+ cache_bio->bi_private = &s->cl;
- bch_bio_map(s->op.cache_bio, NULL);
- if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
+ bch_bio_map(cache_bio, NULL);
+ if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- s->cache_miss = miss;
- bio_get(s->op.cache_bio);
+ if (reada)
+ bch_mark_cache_readahead(s->iop.c, s->d);
- closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
+ s->cache_miss = miss;
+ s->iop.bio = cache_bio;
+ bio_get(cache_bio);
+ closure_bio_submit(cache_bio, &s->cl, s->d);
return ret;
out_put:
- bio_put(s->op.cache_bio);
- s->op.cache_bio = NULL;
+ bio_put(cache_bio);
out_submit:
+ miss->bi_end_io = request_endio;
+ miss->bi_private = &s->cl;
closure_bio_submit(miss, &s->cl, s->d);
return ret;
}
-static void request_read(struct cached_dev *dc, struct search *s)
+static void cached_dev_read(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
- check_should_skip(dc, s);
- closure_call(&s->op.cl, btree_read_async, NULL, cl);
-
- continue_at(cl, request_read_done_bh, NULL);
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
+ continue_at(cl, cached_dev_read_done_bh, NULL);
}
/* Process writes */
@@ -956,47 +1028,52 @@ static void cached_dev_write_complete(struct closure *cl)
cached_dev_bio_complete(cl);
}
-static void request_write(struct cached_dev *dc, struct search *s)
+static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start, end;
- start = KEY(dc->disk.id, bio->bi_sector, 0);
- end = KEY(dc->disk.id, bio_end_sector(bio), 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+ struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
- bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
- check_should_skip(dc, s);
down_read_non_owner(&dc->writeback_lock);
-
if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
- s->op.skip = false;
- s->writeback = true;
+ /*
+ * We overlap with some dirty data undergoing background
+ * writeback, force this write to writeback
+ */
+ s->iop.bypass = false;
+ s->iop.writeback = true;
}
+ /*
+ * Discards aren't _required_ to do anything, so skipping if
+ * check_overlapping returned true is ok
+ *
+ * But check_overlapping drops dirty keys for which io hasn't started,
+ * so we still want to call it.
+ */
if (bio->bi_rw & REQ_DISCARD)
- goto skip;
+ s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
cache_mode(dc, bio),
- s->op.skip)) {
- s->op.skip = false;
- s->writeback = true;
+ s->iop.bypass)) {
+ s->iop.bypass = false;
+ s->iop.writeback = true;
}
- if (s->op.skip)
- goto skip;
-
- trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
+ if (s->iop.bypass) {
+ s->iop.bio = s->orig_bio;
+ bio_get(s->iop.bio);
- if (!s->writeback) {
- s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
-
- closure_bio_submit(bio, cl, s->d);
- } else {
+ if (!(bio->bi_rw & REQ_DISCARD) ||
+ blk_queue_discard(bdev_get_queue(dc->bdev)))
+ closure_bio_submit(bio, cl, s->d);
+ } else if (s->iop.writeback) {
bch_writeback_add(dc);
- s->op.cache_bio = bio;
+ s->iop.bio = bio;
if (bio->bi_rw & REQ_FLUSH) {
/* Also need to send a flush to the backing device */
@@ -1010,36 +1087,26 @@ static void request_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(flush, cl, s->d);
}
- }
-out:
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
- continue_at(cl, cached_dev_write_complete, NULL);
-skip:
- s->op.skip = true;
- s->op.cache_bio = s->orig_bio;
- bio_get(s->op.cache_bio);
+ } else {
+ s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
+ dc->disk.bio_split);
- if ((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
- goto out;
+ closure_bio_submit(bio, cl, s->d);
+ }
- closure_bio_submit(bio, cl, s->d);
- goto out;
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+ continue_at(cl, cached_dev_write_complete, NULL);
}
-static void request_nodata(struct cached_dev *dc, struct search *s)
+static void cached_dev_nodata(struct closure *cl)
{
- struct closure *cl = &s->cl;
+ struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- if (bio->bi_rw & REQ_DISCARD) {
- request_write(dc, s);
- return;
- }
-
- if (s->op.flush_journal)
- bch_journal_meta(s->op.c, cl);
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+ /* If it's a flush, we send the flush to the backing device too */
closure_bio_submit(bio, cl, s->d);
continue_at(cl, cached_dev_bio_complete, NULL);
@@ -1047,134 +1114,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
/* Cached devices - read & write stuff */
-unsigned bch_get_congested(struct cache_set *c)
-{
- int i;
- long rand;
-
- if (!c->congested_read_threshold_us &&
- !c->congested_write_threshold_us)
- return 0;
-
- i = (local_clock_us() - c->congested_last_us) / 1024;
- if (i < 0)
- return 0;
-
- i += atomic_read(&c->congested);
- if (i >= 0)
- return 0;
-
- i += CONGESTED_MAX;
-
- if (i > 0)
- i = fract_exp_two(i, 6);
-
- rand = get_random_int();
- i -= bitmap_weight(&rand, BITS_PER_LONG);
-
- return i > 0 ? i : 1;
-}
-
-static void add_sequential(struct task_struct *t)
-{
- ewma_add(t->sequential_io_avg,
- t->sequential_io, 8, 0);
-
- t->sequential_io = 0;
-}
-
-static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
-{
- return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
-}
-
-static void check_should_skip(struct cached_dev *dc, struct search *s)
-{
- struct cache_set *c = s->op.c;
- struct bio *bio = &s->bio.bio;
- unsigned mode = cache_mode(dc, bio);
- unsigned sectors, congested = bch_get_congested(c);
-
- if (atomic_read(&dc->disk.detaching) ||
- c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
- (bio->bi_rw & REQ_DISCARD))
- goto skip;
-
- if (mode == CACHE_MODE_NONE ||
- (mode == CACHE_MODE_WRITEAROUND &&
- (bio->bi_rw & REQ_WRITE)))
- goto skip;
-
- if (bio->bi_sector & (c->sb.block_size - 1) ||
- bio_sectors(bio) & (c->sb.block_size - 1)) {
- pr_debug("skipping unaligned io");
- goto skip;
- }
-
- if (!congested && !dc->sequential_cutoff)
- goto rescale;
-
- if (!congested &&
- mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
- (bio->bi_rw & REQ_SYNC))
- goto rescale;
-
- if (dc->sequential_merge) {
- struct io *i;
-
- spin_lock(&dc->io_lock);
-
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
- time_before(jiffies, i->jiffies))
- goto found;
-
- i = list_first_entry(&dc->io_lru, struct io, lru);
-
- add_sequential(s->task);
- i->sequential = 0;
-found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
-
- i->last = bio_end_sector(bio);
- i->jiffies = jiffies + msecs_to_jiffies(5000);
- s->task->sequential_io = i->sequential;
-
- hlist_del(&i->hash);
- hlist_add_head(&i->hash, iohash(dc, i->last));
- list_move_tail(&i->lru, &dc->io_lru);
-
- spin_unlock(&dc->io_lock);
- } else {
- s->task->sequential_io = bio->bi_size;
-
- add_sequential(s->task);
- }
-
- sectors = max(s->task->sequential_io,
- s->task->sequential_io_avg) >> 9;
-
- if (dc->sequential_cutoff &&
- sectors >= dc->sequential_cutoff >> 9) {
- trace_bcache_bypass_sequential(s->orig_bio);
- goto skip;
- }
-
- if (congested && sectors >= congested) {
- trace_bcache_bypass_congested(s->orig_bio);
- goto skip;
- }
-
-rescale:
- bch_rescale_priorities(c, bio_sectors(bio));
- return;
-skip:
- bch_mark_sectors_bypassed(s, bio_sectors(bio));
- s->op.skip = true;
-}
-
static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
{
struct search *s;
@@ -1192,14 +1131,24 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
- trace_bcache_request_start(s, bio);
-
- if (!bio_has_data(bio))
- request_nodata(dc, s);
- else if (rw)
- request_write(dc, s);
- else
- request_read(dc, s);
+ trace_bcache_request_start(s->d, bio);
+
+ if (!bio->bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * generic_make_request
+ */
+ continue_at_nobarrier(&s->cl,
+ cached_dev_nodata,
+ bcache_wq);
+ } else {
+ s->iop.bypass = check_should_bypass(dc, bio);
+
+ if (rw)
+ cached_dev_write(dc, s);
+ else
+ cached_dev_read(dc, s);
+ }
} else {
if ((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1274,9 +1223,19 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
bio_advance(bio, min(sectors << 9, bio->bi_size));
if (!bio->bi_size)
- s->op.lookup_done = true;
+ return MAP_DONE;
- return 0;
+ return MAP_CONTINUE;
+}
+
+static void flash_dev_nodata(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+
+ continue_at(cl, search_free, NULL);
}
static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
@@ -1295,23 +1254,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
cl = &s->cl;
bio = &s->bio.bio;
- trace_bcache_request_start(s, bio);
+ trace_bcache_request_start(s->d, bio);
- if (bio_has_data(bio) && !rw) {
- closure_call(&s->op.cl, btree_read_async, NULL, cl);
- } else if (bio_has_data(bio) || s->op.skip) {
- bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
+ if (!bio->bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * generic_make_request
+ */
+ continue_at_nobarrier(&s->cl,
+ flash_dev_nodata,
+ bcache_wq);
+ } else if (rw) {
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
- s->writeback = true;
- s->op.cache_bio = bio;
+ s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.writeback = true;
+ s->iop.bio = bio;
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
} else {
- /* No data - probably a cache flush */
- if (s->op.flush_journal)
- bch_journal_meta(s->op.c, cl);
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
}
continue_at(cl, search_free, NULL);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 57dc4784f4f..2cd65bf073c 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -3,40 +3,33 @@
#include <linux/cgroup.h>
-struct search {
- /* Stack frame for bio_complete */
+struct data_insert_op {
struct closure cl;
+ struct cache_set *c;
+ struct bio *bio;
- struct bcache_device *d;
- struct task_struct *task;
-
- struct bbio bio;
- struct bio *orig_bio;
- struct bio *cache_miss;
- unsigned cache_bio_sectors;
-
- unsigned recoverable:1;
- unsigned unaligned_bvec:1;
+ unsigned inode;
+ uint16_t write_point;
+ uint16_t write_prio;
+ short error;
- unsigned write:1;
+ unsigned bypass:1;
unsigned writeback:1;
+ unsigned flush_journal:1;
+ unsigned csum:1;
- /* IO error returned to s->bio */
- short error;
- unsigned long start_time;
+ unsigned replace:1;
+ unsigned replace_collision:1;
+
+ unsigned insert_data_done:1;
- /* Anything past op->keys won't get zeroed in do_bio_hook */
- struct btree_op op;
+ /* Anything past this point won't get zeroed in search_alloc() */
+ struct keylist insert_keys;
+ BKEY_PADDED(replace_key);
};
-void bch_cache_read_endio(struct bio *, int);
unsigned bch_get_congested(struct cache_set *);
-void bch_insert_data(struct closure *cl);
-void bch_btree_insert_async(struct closure *);
-void bch_cache_read_endio(struct bio *, int);
-
-void bch_open_buckets_free(struct cache_set *);
-int bch_open_buckets_alloc(struct cache_set *);
+void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
void bch_flash_dev_request_init(struct bcache_device *d);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index b8730e714d6..84d0782f702 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -7,7 +7,6 @@
#include "bcache.h"
#include "stats.h"
#include "btree.h"
-#include "request.h"
#include "sysfs.h"
/*
@@ -196,35 +195,36 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
atomic_inc(&stats->cache_bypass_misses);
}
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
- mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+ mark_cache_stats(&c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif
}
-void bch_mark_cache_readahead(struct search *s)
+void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+ atomic_inc(&c->accounting.collector.cache_readaheads);
}
-void bch_mark_cache_miss_collision(struct search *s)
+void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
- atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+ atomic_inc(&c->accounting.collector.cache_miss_collisions);
}
-void bch_mark_sectors_bypassed(struct search *s, int sectors)
+void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+ int sectors)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
- atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+ atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
}
void bch_cache_accounting_init(struct cache_accounting *acc,
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index c7c7a8fd29f..adbff141c88 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -38,7 +38,9 @@ struct cache_accounting {
struct cache_stats day;
};
-struct search;
+struct cache_set;
+struct cached_dev;
+struct bcache_device;
void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent);
@@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc);
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
-void bch_mark_cache_readahead(struct search *s);
-void bch_mark_cache_miss_collision(struct search *s);
-void bch_mark_sectors_bypassed(struct search *s, int sectors);
+void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
+ bool, bool);
+void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
+void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
+void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 547c4c57b05..dec15cd2d79 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
@@ -45,21 +46,13 @@ const char * const bch_cache_modes[] = {
NULL
};
-struct uuid_entry_v0 {
- uint8_t uuid[16];
- uint8_t label[32];
- uint32_t first_reg;
- uint32_t last_reg;
- uint32_t invalidated;
- uint32_t pad;
-};
-
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
-static int bcache_major, bcache_minor;
+static int bcache_major;
+static DEFINE_IDA(bcache_minor);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
@@ -382,7 +375,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
struct bkey *k = &j->uuid_bucket;
- if (__bch_ptr_invalid(c, 1, k))
+ if (bch_btree_ptr_invalid(c, k))
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
@@ -427,7 +420,7 @@ static int __uuid_write(struct cache_set *c)
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
+ if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -435,7 +428,7 @@ static int __uuid_write(struct cache_set *c)
closure_sync(&cl);
bkey_copy(&c->uuid_bucket, &k.key);
- __bkey_put(c, &k.key);
+ bkey_put(c, &k.key);
return 0;
}
@@ -562,10 +555,10 @@ void bch_prio_write(struct cache *ca)
}
p->next_bucket = ca->prio_buckets[i + 1];
- p->magic = pset_magic(ca);
+ p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
+ bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -613,7 +606,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
- if (p->magic != pset_magic(ca))
+ if (p->magic != pset_magic(&ca->sb))
pr_warn("bad magic reading priorities");
bucket = p->next_bucket;
@@ -630,7 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
static int open_dev(struct block_device *b, fmode_t mode)
{
struct bcache_device *d = b->bd_disk->private_data;
- if (atomic_read(&d->closing))
+ if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO;
closure_get(&d->cl);
@@ -659,20 +652,24 @@ static const struct block_device_operations bcache_ops = {
void bcache_device_stop(struct bcache_device *d)
{
- if (!atomic_xchg(&d->closing, 1))
+ if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
closure_queue(&d->cl);
}
static void bcache_device_unlink(struct bcache_device *d)
{
- unsigned i;
- struct cache *ca;
+ lockdep_assert_held(&bch_register_lock);
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+ unsigned i;
+ struct cache *ca;
- for_each_cache(ca, d->c, i)
- bd_unlink_disk_holder(ca->bdev, d->disk);
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+ for_each_cache(ca, d->c, i)
+ bd_unlink_disk_holder(ca->bdev, d->disk);
+ }
}
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
@@ -696,19 +693,16 @@ static void bcache_device_detach(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
- if (atomic_read(&d->detaching)) {
+ if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
struct uuid_entry *u = d->c->uuids + d->id;
SET_UUID_FLASH_ONLY(u, 0);
memcpy(u->uuid, invalid_uuid, 16);
u->invalidated = cpu_to_le32(get_seconds());
bch_uuid_write(d->c);
-
- atomic_set(&d->detaching, 0);
}
- if (!d->flush_done)
- bcache_device_unlink(d);
+ bcache_device_unlink(d);
d->c->devices[d->id] = NULL;
closure_put(&d->c->caching);
@@ -739,14 +733,20 @@ static void bcache_device_free(struct bcache_device *d)
del_gendisk(d->disk);
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
- if (d->disk)
+ if (d->disk) {
+ ida_simple_remove(&bcache_minor, d->disk->first_minor);
put_disk(d->disk);
+ }
bio_split_pool_free(&d->bio_split_hook);
if (d->unaligned_bvec)
mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
+ if (is_vmalloc_addr(d->full_dirty_stripes))
+ vfree(d->full_dirty_stripes);
+ else
+ kfree(d->full_dirty_stripes);
if (is_vmalloc_addr(d->stripe_sectors_dirty))
vfree(d->stripe_sectors_dirty);
else
@@ -760,15 +760,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{
struct request_queue *q;
size_t n;
+ int minor;
- if (!d->stripe_size_bits)
- d->stripe_size_bits = 31;
+ if (!d->stripe_size)
+ d->stripe_size = 1 << 31;
- d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
- d->stripe_size_bits;
+ d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
- if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
+ if (!d->nr_stripes ||
+ d->nr_stripes > INT_MAX ||
+ d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
+ pr_err("nr_stripes too large");
return -ENOMEM;
+ }
n = d->nr_stripes * sizeof(atomic_t);
d->stripe_sectors_dirty = n < PAGE_SIZE << 6
@@ -777,22 +781,38 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->stripe_sectors_dirty)
return -ENOMEM;
+ n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
+ d->full_dirty_stripes = n < PAGE_SIZE << 6
+ ? kzalloc(n, GFP_KERNEL)
+ : vzalloc(n);
+ if (!d->full_dirty_stripes)
+ return -ENOMEM;
+
+ minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) ||
- !(d->disk = alloc_disk(1)) ||
- !(q = blk_alloc_queue(GFP_KERNEL)))
+ !(d->disk = alloc_disk(1))) {
+ ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
+ }
set_capacity(d->disk, sectors);
- snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
d->disk->major = bcache_major;
- d->disk->first_minor = bcache_minor++;
+ d->disk->first_minor = minor;
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
@@ -874,7 +894,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
struct closure cl;
closure_init_stack(&cl);
- BUG_ON(!atomic_read(&dc->disk.detaching));
+ BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(atomic_read(&dc->count));
mutex_lock(&bch_register_lock);
@@ -888,6 +908,8 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
+ clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
+
mutex_unlock(&bch_register_lock);
pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
@@ -900,10 +922,10 @@ void bch_cached_dev_detach(struct cached_dev *dc)
{
lockdep_assert_held(&bch_register_lock);
- if (atomic_read(&dc->disk.closing))
+ if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return;
- if (atomic_xchg(&dc->disk.detaching, 1))
+ if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
return;
/*
@@ -1030,6 +1052,7 @@ static void cached_dev_free(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cancel_delayed_work_sync(&dc->writeback_rate_update);
+ kthread_stop(dc->writeback_thread);
mutex_lock(&bch_register_lock);
@@ -1058,11 +1081,7 @@ static void cached_dev_flush(struct closure *cl)
struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
- d->flush_done = 1;
-
- if (d->c)
- bcache_device_unlink(d);
-
+ bcache_device_unlink(d);
mutex_unlock(&bch_register_lock);
bch_cache_accounting_destroy(&dc->accounting);
@@ -1088,7 +1107,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
- dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20;
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
@@ -1260,7 +1278,8 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
va_list args;
- if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ if (c->on_error != ON_ERROR_PANIC &&
+ test_bit(CACHE_SET_STOPPING, &c->flags))
return false;
/* XXX: we can be called from atomic context
@@ -1275,6 +1294,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
printk(", disabling caching\n");
+ if (c->on_error == ON_ERROR_PANIC)
+ panic("panic forced after error\n");
+
bch_cache_set_unregister(c);
return true;
}
@@ -1339,6 +1361,9 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);
+ if (c->gc_thread)
+ kthread_stop(c->gc_thread);
+
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
@@ -1433,12 +1458,19 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->sort_crit_factor = int_sqrt(c->btree_pages);
- mutex_init(&c->bucket_lock);
- mutex_init(&c->sort_lock);
- spin_lock_init(&c->sort_time_lock);
closure_init_unlocked(&c->sb_write);
+ mutex_init(&c->bucket_lock);
+ init_waitqueue_head(&c->try_wait);
+ init_waitqueue_head(&c->bucket_wait);
closure_init_unlocked(&c->uuid_write);
- spin_lock_init(&c->btree_read_time_lock);
+ mutex_init(&c->sort_lock);
+
+ spin_lock_init(&c->sort_time.lock);
+ spin_lock_init(&c->btree_gc_time.lock);
+ spin_lock_init(&c->btree_split_time.lock);
+ spin_lock_init(&c->btree_read_time.lock);
+ spin_lock_init(&c->try_harder_time.lock);
+
bch_moving_init_cache_set(c);
INIT_LIST_HEAD(&c->list);
@@ -1483,11 +1515,10 @@ static void run_cache_set(struct cache_set *c)
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
struct cache *ca;
+ struct closure cl;
unsigned i;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ closure_init_stack(&cl);
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
@@ -1498,7 +1529,7 @@ static void run_cache_set(struct cache_set *c)
struct jset *j;
err = "cannot allocate memory for journal";
- if (bch_journal_read(c, &journal, &op))
+ if (bch_journal_read(c, &journal))
goto err;
pr_debug("btree_journal_read() done");
@@ -1522,23 +1553,23 @@ static void run_cache_set(struct cache_set *c)
k = &j->btree_root;
err = "bad btree root";
- if (__bch_ptr_invalid(c, j->btree_level + 1, k))
+ if (bch_btree_ptr_invalid(c, k))
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, k, j->btree_level, &op);
+ c->root = bch_btree_node_get(c, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root))
goto err;
list_del_init(&c->root->list);
rw_unlock(true, c->root);
- err = uuid_read(c, j, &op.cl);
+ err = uuid_read(c, j, &cl);
if (err)
goto err;
err = "error in recovery";
- if (bch_btree_check(c, &op))
+ if (bch_btree_check(c))
goto err;
bch_journal_mark(c, &journal);
@@ -1570,11 +1601,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c);
- bch_journal_replay(c, &journal, &op);
+ bch_journal_replay(c, &journal);
} else {
pr_notice("invalidating existing data");
- /* Don't want invalidate_buckets() to queue a gc yet */
- closure_lock(&c->gc, NULL);
for_each_cache(ca, c, i) {
unsigned j;
@@ -1600,15 +1629,15 @@ static void run_cache_set(struct cache_set *c)
err = "cannot allocate new UUID bucket";
if (__uuid_write(c))
- goto err_unlock_gc;
+ goto err;
err = "cannot allocate new btree root";
- c->root = bch_btree_node_alloc(c, 0, &op.cl);
+ c->root = bch_btree_node_alloc(c, 0, true);
if (IS_ERR_OR_NULL(c->root))
- goto err_unlock_gc;
+ goto err;
bkey_copy_key(&c->root->key, &MAX_KEY);
- bch_btree_node_write(c->root, &op.cl);
+ bch_btree_node_write(c->root, &cl);
bch_btree_set_root(c->root);
rw_unlock(true, c->root);
@@ -1621,14 +1650,14 @@ static void run_cache_set(struct cache_set *c)
SET_CACHE_SYNC(&c->sb, true);
bch_journal_next(&c->journal);
- bch_journal_meta(c, &op.cl);
-
- /* Unlock */
- closure_set_stopped(&c->gc.cl);
- closure_put(&c->gc.cl);
+ bch_journal_meta(c, &cl);
}
- closure_sync(&op.cl);
+ err = "error starting gc thread";
+ if (bch_gc_thread_start(c))
+ goto err;
+
+ closure_sync(&cl);
c->sb.last_mount = get_seconds();
bcache_write_super(c);
@@ -1638,13 +1667,10 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c);
return;
-err_unlock_gc:
- closure_set_stopped(&c->gc.cl);
- closure_put(&c->gc.cl);
err:
- closure_sync(&op.cl);
+ closure_sync(&cl);
/* XXX: test this, it's broken */
- bch_cache_set_error(c, err);
+ bch_cache_set_error(c, "%s", err);
}
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -1725,8 +1751,6 @@ void bch_cache_release(struct kobject *kobj)
if (ca->set)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
- bch_cache_allocator_exit(ca);
-
bio_split_pool_free(&ca->bio_split_hook);
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
@@ -1758,8 +1782,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- INIT_LIST_HEAD(&ca->discards);
-
bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -2006,7 +2028,6 @@ static struct notifier_block reboot = {
static void bcache_exit(void)
{
bch_debug_exit();
- bch_writeback_exit();
bch_request_exit();
bch_btree_exit();
if (bcache_kobj)
@@ -2039,7 +2060,6 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files) ||
bch_btree_init() ||
bch_request_init() ||
- bch_writeback_init() ||
bch_debug_init(bcache_kobj))
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 924dcfdae11..80d4c2bee18 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -21,6 +21,12 @@ static const char * const cache_replacement_policies[] = {
NULL
};
+static const char * const error_actions[] = {
+ "unregister",
+ "panic",
+ NULL
+};
+
write_attribute(attach);
write_attribute(detach);
write_attribute(unregister);
@@ -66,7 +72,6 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff);
-rw_attribute(sequential_merge);
rw_attribute(data_csum);
rw_attribute(cache_mode);
rw_attribute(writeback_metadata);
@@ -90,11 +95,14 @@ rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
rw_attribute(readahead);
+rw_attribute(errors);
rw_attribute(io_error_limit);
rw_attribute(io_error_halflife);
rw_attribute(verify);
+rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite);
+rw_attribute(expensive_debug_checks);
rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
@@ -116,6 +124,7 @@ SHOW(__bch_cached_dev)
sysfs_printf(data_csum, "%i", dc->disk.data_csum);
var_printf(verify, "%i");
+ var_printf(bypass_torture_test, "%i");
var_printf(writeback_metadata, "%i");
var_printf(writeback_running, "%i");
var_print(writeback_delay);
@@ -150,10 +159,9 @@ SHOW(__bch_cached_dev)
sysfs_hprint(dirty_data,
bcache_dev_sectors_dirty(&dc->disk) << 9);
- sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
+ sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
var_printf(partial_stripes_expensive, "%u");
- var_printf(sequential_merge, "%i");
var_hprint(sequential_cutoff);
var_hprint(readahead);
@@ -185,6 +193,7 @@ STORE(__cached_dev)
sysfs_strtoul(data_csum, dc->disk.data_csum);
d_strtoul(verify);
+ d_strtoul(bypass_torture_test);
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
@@ -199,7 +208,6 @@ STORE(__cached_dev)
dc->writeback_rate_p_term_inverse, 1, INT_MAX);
d_strtoul(writeback_rate_d_smooth);
- d_strtoul(sequential_merge);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -311,7 +319,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_stripe_size,
&sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff,
- &sysfs_sequential_merge,
&sysfs_clear_stats,
&sysfs_running,
&sysfs_state,
@@ -319,6 +326,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
+ &sysfs_bypass_torture_test,
#endif
NULL
};
@@ -366,7 +374,7 @@ STORE(__bch_flash_dev)
}
if (attr == &sysfs_unregister) {
- atomic_set(&d->detaching, 1);
+ set_bit(BCACHE_DEV_DETACHING, &d->flags);
bcache_device_stop(d);
}
@@ -481,7 +489,6 @@ lock_root:
sysfs_print(btree_used_percent, btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(dirty_data, c->gc_stats.dirty);
sysfs_hprint(average_key_size, average_key_size(c));
sysfs_print(cache_read_races,
@@ -492,6 +499,10 @@ lock_root:
sysfs_print(writeback_keys_failed,
atomic_long_read(&c->writeback_keys_failed));
+ if (attr == &sysfs_errors)
+ return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
+ c->on_error);
+
/* See count_io_errors for why 88 */
sysfs_print(io_error_halflife, c->error_decay * 88);
sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
@@ -506,6 +517,8 @@ lock_root:
sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
sysfs_printf(verify, "%i", c->verify);
sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
+ sysfs_printf(expensive_debug_checks,
+ "%i", c->expensive_debug_checks);
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
@@ -555,7 +568,7 @@ STORE(__bch_cache_set)
}
if (attr == &sysfs_trigger_gc)
- bch_queue_gc(c);
+ wake_up_gc(c);
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
@@ -569,6 +582,15 @@ STORE(__bch_cache_set)
sysfs_strtoul(congested_write_threshold_us,
c->congested_write_threshold_us);
+ if (attr == &sysfs_errors) {
+ ssize_t v = bch_read_string_list(buf, error_actions);
+
+ if (v < 0)
+ return v;
+
+ c->on_error = v;
+ }
+
if (attr == &sysfs_io_error_limit)
c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
@@ -579,6 +601,7 @@ STORE(__bch_cache_set)
sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
sysfs_strtoul(verify, c->verify);
sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
+ sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
@@ -618,8 +641,8 @@ static struct attribute *bch_cache_set_files[] = {
&sysfs_cache_available_percent,
&sysfs_average_key_size,
- &sysfs_dirty_data,
+ &sysfs_errors,
&sysfs_io_error_limit,
&sysfs_io_error_halflife,
&sysfs_congested,
@@ -653,6 +676,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
&sysfs_key_merging_disabled,
+ &sysfs_expensive_debug_checks,
#endif
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index f7b6c197f90..adbc3df17a8 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -1,6 +1,5 @@
#include "bcache.h"
#include "btree.h"
-#include "request.h"
#include <linux/blktrace_api.h>
#include <linux/module.h>
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 420dad545c7..462214eeacb 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -168,10 +168,14 @@ int bch_parse_uuid(const char *s, char *uuid)
void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
{
- uint64_t now = local_clock();
- uint64_t duration = time_after64(now, start_time)
+ uint64_t now, duration, last;
+
+ spin_lock(&stats->lock);
+
+ now = local_clock();
+ duration = time_after64(now, start_time)
? now - start_time : 0;
- uint64_t last = time_after64(now, stats->last)
+ last = time_after64(now, stats->last)
? now - stats->last : 0;
stats->max_duration = max(stats->max_duration, duration);
@@ -188,6 +192,8 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
}
stats->last = now ?: 1;
+
+ spin_unlock(&stats->lock);
}
/**
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ea345c6896f..362c4b3f8b4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,28 +15,18 @@
struct closure;
-#ifdef CONFIG_BCACHE_EDEBUG
+#ifdef CONFIG_BCACHE_DEBUG
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
-#else /* EDEBUG */
+#else /* DEBUG */
#define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v)
#endif
-#define BITMASK(name, type, field, offset, size) \
-static inline uint64_t name(const type *k) \
-{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \
- \
-static inline void SET_##name(type *k, uint64_t v) \
-{ \
- k->field &= ~(~((uint64_t) ~0 << size) << offset); \
- k->field |= v << offset; \
-}
-
#define DECLARE_HEAP(type, name) \
struct { \
size_t size, used; \
@@ -388,6 +378,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[
ssize_t bch_read_string_list(const char *buf, const char * const list[]);
struct time_stats {
+ spinlock_t lock;
/*
* all fields are in nanoseconds, averages are ewmas stored left shifted
* by 8
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index ba3ee48320f..99053b1251b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -11,18 +11,11 @@
#include "debug.h"
#include "writeback.h"
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <trace/events/bcache.h>
-static struct workqueue_struct *dirty_wq;
-
-static void read_dirty(struct closure *);
-
-struct dirty_io {
- struct closure cl;
- struct cached_dev *dc;
- struct bio bio;
-};
-
/* Rate limiting */
static void __update_writeback_rate(struct cached_dev *dc)
@@ -72,9 +65,6 @@ out:
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
-
- schedule_delayed_work(&dc->writeback_rate_update,
- dc->writeback_rate_update_seconds * HZ);
}
static void update_writeback_rate(struct work_struct *work)
@@ -90,13 +80,16 @@ static void update_writeback_rate(struct work_struct *work)
__update_writeback_rate(dc);
up_read(&dc->writeback_lock);
+
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
}
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
uint64_t ret;
- if (atomic_read(&dc->disk.detaching) ||
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
@@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
return min_t(uint64_t, ret, HZ);
}
-/* Background writeback */
-
-static bool dirty_pred(struct keybuf *buf, struct bkey *k)
-{
- return KEY_DIRTY(k);
-}
-
-static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
-{
- uint64_t stripe;
- unsigned nr_sectors = KEY_SIZE(k);
- struct cached_dev *dc = container_of(buf, struct cached_dev,
- writeback_keys);
- unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
-
- if (!KEY_DIRTY(k))
- return false;
-
- stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
- while (1) {
- if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
- stripe_size)
- return false;
-
- if (nr_sectors <= stripe_size)
- return true;
-
- nr_sectors -= stripe_size;
- stripe++;
- }
-}
+struct dirty_io {
+ struct closure cl;
+ struct cached_dev *dc;
+ struct bio bio;
+};
static void dirty_init(struct keybuf_key *w)
{
@@ -153,131 +120,6 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL);
}
-static void refill_dirty(struct closure *cl)
-{
- struct cached_dev *dc = container_of(cl, struct cached_dev,
- writeback.cl);
- struct keybuf *buf = &dc->writeback_keys;
- bool searched_from_start = false;
- struct bkey end = MAX_KEY;
- SET_KEY_INODE(&end, dc->disk.id);
-
- if (!atomic_read(&dc->disk.detaching) &&
- !dc->writeback_running)
- closure_return(cl);
-
- down_write(&dc->writeback_lock);
-
- if (!atomic_read(&dc->has_dirty)) {
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
- bch_write_bdev_super(dc, NULL);
-
- up_write(&dc->writeback_lock);
- closure_return(cl);
- }
-
- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
- buf->last_scanned = KEY(dc->disk.id, 0, 0);
- searched_from_start = true;
- }
-
- if (dc->partial_stripes_expensive) {
- uint64_t i;
-
- for (i = 0; i < dc->disk.nr_stripes; i++)
- if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
- 1 << dc->disk.stripe_size_bits)
- goto full_stripes;
-
- goto normal_refill;
-full_stripes:
- bch_refill_keybuf(dc->disk.c, buf, &end,
- dirty_full_stripe_pred);
- } else {
-normal_refill:
- bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
- }
-
- if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
- /* Searched the entire btree - delay awhile */
-
- if (RB_EMPTY_ROOT(&buf->keys)) {
- atomic_set(&dc->has_dirty, 0);
- cached_dev_put(dc);
- }
-
- if (!atomic_read(&dc->disk.detaching))
- closure_delay(&dc->writeback, dc->writeback_delay * HZ);
- }
-
- up_write(&dc->writeback_lock);
-
- bch_ratelimit_reset(&dc->writeback_rate);
-
- /* Punt to workqueue only so we don't recurse and blow the stack */
- continue_at(cl, read_dirty, dirty_wq);
-}
-
-void bch_writeback_queue(struct cached_dev *dc)
-{
- if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
- if (!atomic_read(&dc->disk.detaching))
- closure_delay(&dc->writeback, dc->writeback_delay * HZ);
-
- continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
- }
-}
-
-void bch_writeback_add(struct cached_dev *dc)
-{
- if (!atomic_read(&dc->has_dirty) &&
- !atomic_xchg(&dc->has_dirty, 1)) {
- atomic_inc(&dc->count);
-
- if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
- /* XXX: should do this synchronously */
- bch_write_bdev_super(dc, NULL);
- }
-
- bch_writeback_queue(dc);
-
- if (dc->writeback_percent)
- schedule_delayed_work(&dc->writeback_rate_update,
- dc->writeback_rate_update_seconds * HZ);
- }
-}
-
-void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
- uint64_t offset, int nr_sectors)
-{
- struct bcache_device *d = c->devices[inode];
- unsigned stripe_size, stripe_offset;
- uint64_t stripe;
-
- if (!d)
- return;
-
- stripe_size = 1 << d->stripe_size_bits;
- stripe = offset >> d->stripe_size_bits;
- stripe_offset = offset & (stripe_size - 1);
-
- while (nr_sectors) {
- int s = min_t(unsigned, abs(nr_sectors),
- stripe_size - stripe_offset);
-
- if (nr_sectors < 0)
- s = -s;
-
- atomic_add(s, d->stripe_sectors_dirty + stripe);
- nr_sectors -= s;
- stripe_offset = 0;
- stripe++;
- }
-}
-
-/* Background writeback - IO loop */
-
static void dirty_io_destructor(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
@@ -297,26 +139,25 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
+ int ret;
unsigned i;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
+ struct keylist keys;
- op.type = BTREE_REPLACE;
- bkey_copy(&op.replace, &w->key);
+ bch_keylist_init(&keys);
- SET_KEY_DIRTY(&w->key, false);
- bch_keylist_add(&op.keys, &w->key);
+ bkey_copy(keys.top, &w->key);
+ SET_KEY_DIRTY(keys.top, false);
+ bch_keylist_push(&keys);
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
- bch_btree_insert(&op, dc->disk.c);
- closure_sync(&op.cl);
+ ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
- if (op.insert_collision)
+ if (ret)
trace_bcache_writeback_collision(&w->key);
- atomic_long_inc(op.insert_collision
+ atomic_long_inc(ret
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
}
@@ -374,30 +215,33 @@ static void read_dirty_submit(struct closure *cl)
continue_at(cl, write_dirty, system_wq);
}
-static void read_dirty(struct closure *cl)
+static void read_dirty(struct cached_dev *dc)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev,
- writeback.cl);
- unsigned delay = writeback_delay(dc, 0);
+ unsigned delay = 0;
struct keybuf_key *w;
struct dirty_io *io;
+ struct closure cl;
+
+ closure_init_stack(&cl);
/*
* XXX: if we error, background writeback just spins. Should use some
* mempools.
*/
- while (1) {
+ while (!kthread_should_stop()) {
+ try_to_freeze();
+
w = bch_keybuf_next(&dc->writeback_keys);
if (!w)
break;
BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
- if (delay > 0 &&
- (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50))
- delay = schedule_timeout_uninterruptible(delay);
+ if (KEY_START(&w->key) != dc->last_read ||
+ jiffies_to_msecs(delay) > 50)
+ while (!kthread_should_stop() && delay)
+ delay = schedule_timeout_interruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -423,7 +267,7 @@ static void read_dirty(struct closure *cl)
trace_bcache_writeback(&w->key);
down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, cl);
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
delay = writeback_delay(dc, KEY_SIZE(&w->key));
}
@@ -439,52 +283,205 @@ err:
* Wait for outstanding writeback IOs to finish (and keybuf slots to be
* freed) before refilling again
*/
- continue_at(cl, refill_dirty, dirty_wq);
+ closure_sync(&cl);
}
-/* Init */
+/* Scan for dirty data */
+
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ uint64_t offset, int nr_sectors)
+{
+ struct bcache_device *d = c->devices[inode];
+ unsigned stripe_offset, stripe, sectors_dirty;
+
+ if (!d)
+ return;
+
+ stripe = offset_to_stripe(d, offset);
+ stripe_offset = offset & (d->stripe_size - 1);
+
+ while (nr_sectors) {
+ int s = min_t(unsigned, abs(nr_sectors),
+ d->stripe_size - stripe_offset);
+
+ if (nr_sectors < 0)
+ s = -s;
+
+ if (stripe >= d->nr_stripes)
+ return;
+
+ sectors_dirty = atomic_add_return(s,
+ d->stripe_sectors_dirty + stripe);
+ if (sectors_dirty == d->stripe_size)
+ set_bit(stripe, d->full_dirty_stripes);
+ else
+ clear_bit(stripe, d->full_dirty_stripes);
+
+ nr_sectors -= s;
+ stripe_offset = 0;
+ stripe++;
+ }
+}
-static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
- struct cached_dev *dc)
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
- struct bkey *k;
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
- while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
- if (!b->level) {
- if (KEY_INODE(k) > dc->disk.id)
- break;
-
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
- KEY_START(k),
- KEY_SIZE(k));
- } else {
- btree(sectors_dirty_init, k, b, op, dc);
- if (KEY_INODE(k) > dc->disk.id)
- break;
-
- cond_resched();
+ return KEY_DIRTY(k);
+}
+
+static void refill_full_stripes(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ unsigned start_stripe, stripe, next_stripe;
+ bool wrapped = false;
+
+ stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+
+ if (stripe >= dc->disk.nr_stripes)
+ stripe = 0;
+
+ start_stripe = stripe;
+
+ while (1) {
+ stripe = find_next_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ if (stripe == dc->disk.nr_stripes)
+ goto next;
+
+ next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ buf->last_scanned = KEY(dc->disk.id,
+ stripe * dc->disk.stripe_size, 0);
+
+ bch_refill_keybuf(dc->disk.c, buf,
+ &KEY(dc->disk.id,
+ next_stripe * dc->disk.stripe_size, 0),
+ dirty_pred);
+
+ if (array_freelist_empty(&buf->freelist))
+ return;
+
+ stripe = next_stripe;
+next:
+ if (wrapped && stripe > start_stripe)
+ return;
+
+ if (stripe == dc->disk.nr_stripes) {
+ stripe = 0;
+ wrapped = true;
}
+ }
+}
+
+static bool refill_dirty(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+ bool searched_from_start = false;
+
+ if (dc->partial_stripes_expensive) {
+ refill_full_stripes(dc);
+ if (array_freelist_empty(&buf->freelist))
+ return false;
+ }
+
+ if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+ buf->last_scanned = KEY(dc->disk.id, 0, 0);
+ searched_from_start = true;
+ }
+
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+ return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+}
+
+static int bch_writeback_thread(void *arg)
+{
+ struct cached_dev *dc = arg;
+ bool searched_full_index;
+
+ while (!kthread_should_stop()) {
+ down_write(&dc->writeback_lock);
+ if (!atomic_read(&dc->has_dirty) ||
+ (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+ !dc->writeback_running)) {
+ up_write(&dc->writeback_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ return 0;
+
+ try_to_freeze();
+ schedule();
+ continue;
+ }
+
+ searched_full_index = refill_dirty(dc);
+
+ if (searched_full_index &&
+ RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
+ atomic_set(&dc->has_dirty, 0);
+ cached_dev_put(dc);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ up_write(&dc->writeback_lock);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
+ read_dirty(dc);
+
+ if (searched_full_index) {
+ unsigned delay = dc->writeback_delay * HZ;
+
+ while (delay &&
+ !kthread_should_stop() &&
+ !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ delay = schedule_timeout_interruptible(delay);
+ }
+ }
return 0;
}
+/* Init */
+
+struct sectors_dirty_init {
+ struct btree_op op;
+ unsigned inode;
+};
+
+static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ struct bkey *k)
+{
+ struct sectors_dirty_init *op = container_of(_op,
+ struct sectors_dirty_init, op);
+ if (KEY_INODE(k) > op->inode)
+ return MAP_DONE;
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
+
+ return MAP_CONTINUE;
+}
+
void bch_sectors_dirty_init(struct cached_dev *dc)
{
- struct btree_op op;
+ struct sectors_dirty_init op;
+
+ bch_btree_op_init(&op.op, -1);
+ op.inode = dc->disk.id;
- bch_btree_op_init_stack(&op);
- btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+ bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn, 0);
}
-void bch_cached_dev_writeback_init(struct cached_dev *dc)
+int bch_cached_dev_writeback_init(struct cached_dev *dc)
{
sema_init(&dc->in_flight, 64);
- closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
-
bch_keybuf_init(&dc->writeback_keys);
dc->writeback_metadata = true;
@@ -498,22 +495,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_rate_p_term_inverse = 64;
dc->writeback_rate_d_smooth = 8;
+ dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ "bcache_writeback");
+ if (IS_ERR(dc->writeback_thread))
+ return PTR_ERR(dc->writeback_thread);
+
+ set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
+
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
-}
-
-void bch_writeback_exit(void)
-{
- if (dirty_wq)
- destroy_workqueue(dirty_wq);
-}
-
-int __init bch_writeback_init(void)
-{
- dirty_wq = create_workqueue("bcache_writeback");
- if (!dirty_wq)
- return -ENOMEM;
return 0;
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c91f61bb95b..c9ddcf4614b 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,20 +14,27 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
+static inline unsigned offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+{
+ do_div(offset, d->stripe_size);
+ return offset;
+}
+
+static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned nr_sectors)
{
- uint64_t stripe = offset >> d->stripe_size_bits;
+ unsigned stripe = offset_to_stripe(&dc->disk, offset);
while (1) {
- if (atomic_read(d->stripe_sectors_dirty + stripe))
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
return true;
- if (nr_sectors <= 1 << d->stripe_size_bits)
+ if (nr_sectors <= dc->disk.stripe_size)
return false;
- nr_sectors -= 1 << d->stripe_size_bits;
+ nr_sectors -= dc->disk.stripe_size;
stripe++;
}
}
@@ -38,12 +45,12 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK ||
- atomic_read(&dc->disk.detaching) ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
in_use > CUTOFF_WRITEBACK_SYNC)
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_sector,
bio_sectors(bio)))
return true;
@@ -54,11 +61,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
in_use <= CUTOFF_WRITEBACK;
}
+static inline void bch_writeback_queue(struct cached_dev *dc)
+{
+ wake_up_process(dc->writeback_thread);
+}
+
+static inline void bch_writeback_add(struct cached_dev *dc)
+{
+ if (!atomic_read(&dc->has_dirty) &&
+ !atomic_xchg(&dc->has_dirty, 1)) {
+ atomic_inc(&dc->count);
+
+ if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+ /* XXX: should do this synchronously */
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ bch_writeback_queue(dc);
+ }
+}
+
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_writeback_queue(struct cached_dev *);
-void bch_writeback_add(struct cached_dev *);
void bch_sectors_dirty_init(struct cached_dev *dc);
-void bch_cached_dev_writeback_init(struct cached_dev *);
+int bch_cached_dev_writeback_init(struct cached_dev *);
#endif
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a7fd82133b1..12dc29ba739 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1654,9 +1654,9 @@ int bitmap_create(struct mddev *mddev)
bitmap->mddev = mddev;
if (mddev->kobj.sd)
- bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
+ bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
if (bm) {
- bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
sysfs_put(bm);
} else
bitmap->sysfs_can_clear = NULL;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 1af7255bbff..9ef0752e8a0 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -20,7 +20,13 @@
#define CACHE_SUPERBLOCK_MAGIC 06142003
#define CACHE_SUPERBLOCK_LOCATION 0
-#define CACHE_VERSION 1
+
+/*
+ * defines a range of metadata versions that this module can handle.
+ */
+#define MIN_CACHE_VERSION 1
+#define MAX_CACHE_VERSION 1
+
#define CACHE_METADATA_CACHE_SIZE 64
/*
@@ -134,6 +140,18 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
SUPERBLOCK_CSUM_XOR));
}
+static int check_metadata_version(struct cache_disk_superblock *disk_super)
+{
+ uint32_t metadata_version = le32_to_cpu(disk_super->version);
+ if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
+ DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
+ metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int sb_check(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
@@ -164,7 +182,7 @@ static int sb_check(struct dm_block_validator *v,
return -EILSEQ;
}
- return 0;
+ return check_metadata_version(disk_super);
}
static struct dm_block_validator sb_validator = {
@@ -198,7 +216,7 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
/*----------------------------------------------------------------*/
-static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned i;
@@ -214,10 +232,10 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
return r;
data_le = dm_block_data(b);
- *result = 1;
+ *result = true;
for (i = 0; i < sb_block_size; i++) {
if (data_le[i] != zero) {
- *result = 0;
+ *result = false;
break;
}
}
@@ -270,7 +288,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
- disk_super->version = cpu_to_le32(CACHE_VERSION);
+ disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
@@ -411,7 +429,8 @@ bad:
static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
bool format_device)
{
- int r, unformatted;
+ int r;
+ bool unformatted = false;
r = __superblock_all_zeroes(cmd->bm, &unformatted);
if (r)
@@ -666,19 +685,85 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
kfree(cmd);
}
+/*
+ * Checks that the given cache block is either unmapped or clean.
+ */
+static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
+ bool *result)
+{
+ int r;
+ __le64 value;
+ dm_oblock_t ob;
+ unsigned flags;
+
+ r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
+ if (r) {
+ DMERR("block_unmapped_or_clean failed");
+ return r;
+ }
+
+ unpack_value(value, &ob, &flags);
+ *result = !((flags & M_VALID) && (flags & M_DIRTY));
+
+ return 0;
+}
+
+static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ dm_cblock_t begin, dm_cblock_t end,
+ bool *result)
+{
+ int r;
+ *result = true;
+
+ while (begin != end) {
+ r = block_unmapped_or_clean(cmd, begin, result);
+ if (r)
+ return r;
+
+ if (!*result) {
+ DMERR("cache block %llu is dirty",
+ (unsigned long long) from_cblock(begin));
+ return 0;
+ }
+
+ begin = to_cblock(from_cblock(begin) + 1);
+ }
+
+ return 0;
+}
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
+ bool clean;
__le64 null_mapping = pack_value(0, 0);
down_write(&cmd->root_lock);
__dm_bless_for_disk(&null_mapping);
+
+ if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
+ r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
+ if (r) {
+ __dm_unbless_for_disk(&null_mapping);
+ goto out;
+ }
+
+ if (!clean) {
+ DMERR("unable to shrink cache due to dirty blocks");
+ r = -EINVAL;
+ __dm_unbless_for_disk(&null_mapping);
+ goto out;
+ }
+ }
+
r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
from_cblock(new_cache_size),
&null_mapping, &cmd->root);
if (!r)
cmd->cache_blocks = new_cache_size;
cmd->changed = true;
+
+out:
up_write(&cmd->root_lock);
return r;
@@ -1182,3 +1267,8 @@ int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
return r;
}
+
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
+{
+ return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index f45cef21f3d..cd906f14f98 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -137,6 +137,11 @@ int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_save_hint(struct dm_cache_metadata *cmd,
dm_cblock_t cblock, uint32_t hint);
+/*
+ * Query method. Are all the blocks in the cache clean?
+ */
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
+
/*----------------------------------------------------------------*/
#endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 0928abdc49f..2256a1f24f7 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -61,7 +61,12 @@ static inline int policy_writeback_work(struct dm_cache_policy *p,
static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- return p->remove_mapping(p, oblock);
+ p->remove_mapping(p, oblock);
+}
+
+static inline int policy_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+ return p->remove_cblock(p, cblock);
}
static inline void policy_force_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 4296155090b..416b7b752a6 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -26,19 +26,6 @@ static unsigned next_power(unsigned n, unsigned min)
/*----------------------------------------------------------------*/
-static unsigned long *alloc_bitset(unsigned nr_entries)
-{
- size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
- return vzalloc(s);
-}
-
-static void free_bitset(unsigned long *bits)
-{
- vfree(bits);
-}
-
-/*----------------------------------------------------------------*/
-
/*
* Large, sequential ios are probably better left on the origin device since
* spindles tend to have good bandwidth.
@@ -151,6 +138,21 @@ static void queue_init(struct queue *q)
}
/*
+ * Checks to see if the queue is empty.
+ * FIXME: reduce cpu usage.
+ */
+static bool queue_empty(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ if (!list_empty(q->qs + i))
+ return false;
+
+ return true;
+}
+
+/*
* Insert an entry to the back of the given level.
*/
static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
@@ -218,17 +220,116 @@ struct entry {
struct hlist_node hlist;
struct list_head list;
dm_oblock_t oblock;
- dm_cblock_t cblock; /* valid iff in_cache */
/*
* FIXME: pack these better
*/
- bool in_cache:1;
+ bool dirty:1;
unsigned hit_count;
unsigned generation;
unsigned tick;
};
+/*
+ * Rather than storing the cblock in an entry, we allocate all entries in
+ * an array, and infer the cblock from the entry position.
+ *
+ * Free entries are linked together into a list.
+ */
+struct entry_pool {
+ struct entry *entries, *entries_end;
+ struct list_head free;
+ unsigned nr_allocated;
+};
+
+static int epool_init(struct entry_pool *ep, unsigned nr_entries)
+{
+ unsigned i;
+
+ ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
+ if (!ep->entries)
+ return -ENOMEM;
+
+ ep->entries_end = ep->entries + nr_entries;
+
+ INIT_LIST_HEAD(&ep->free);
+ for (i = 0; i < nr_entries; i++)
+ list_add(&ep->entries[i].list, &ep->free);
+
+ ep->nr_allocated = 0;
+
+ return 0;
+}
+
+static void epool_exit(struct entry_pool *ep)
+{
+ vfree(ep->entries);
+}
+
+static struct entry *alloc_entry(struct entry_pool *ep)
+{
+ struct entry *e;
+
+ if (list_empty(&ep->free))
+ return NULL;
+
+ e = list_entry(list_pop(&ep->free), struct entry, list);
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+ ep->nr_allocated++;
+
+ return e;
+}
+
+/*
+ * This assumes the cblock hasn't already been allocated.
+ */
+static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
+{
+ struct entry *e = ep->entries + from_cblock(cblock);
+ list_del(&e->list);
+
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+ ep->nr_allocated++;
+
+ return e;
+}
+
+static void free_entry(struct entry_pool *ep, struct entry *e)
+{
+ BUG_ON(!ep->nr_allocated);
+ ep->nr_allocated--;
+ INIT_HLIST_NODE(&e->hlist);
+ list_add(&e->list, &ep->free);
+}
+
+/*
+ * Returns NULL if the entry is free.
+ */
+static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
+{
+ struct entry *e = ep->entries + from_cblock(cblock);
+ return !hlist_unhashed(&e->hlist) ? e : NULL;
+}
+
+static bool epool_empty(struct entry_pool *ep)
+{
+ return list_empty(&ep->free);
+}
+
+static bool in_pool(struct entry_pool *ep, struct entry *e)
+{
+ return e >= ep->entries && e < ep->entries_end;
+}
+
+static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
+{
+ return to_cblock(e - ep->entries);
+}
+
+/*----------------------------------------------------------------*/
+
struct mq_policy {
struct dm_cache_policy policy;
@@ -238,13 +339,22 @@ struct mq_policy {
struct io_tracker tracker;
/*
- * We maintain two queues of entries. The cache proper contains
- * the currently active mappings. Whereas the pre_cache tracks
- * blocks that are being hit frequently and potential candidates
- * for promotion to the cache.
+ * Entries come from two pools, one of pre-cache entries, and one
+ * for the cache proper.
+ */
+ struct entry_pool pre_cache_pool;
+ struct entry_pool cache_pool;
+
+ /*
+ * We maintain three queues of entries. The cache proper,
+ * consisting of a clean and dirty queue, contains the currently
+ * active mappings. Whereas the pre_cache tracks blocks that
+ * are being hit frequently and potential candidates for promotion
+ * to the cache.
*/
struct queue pre_cache;
- struct queue cache;
+ struct queue cache_clean;
+ struct queue cache_dirty;
/*
* Keeps track of time, incremented by the core. We use this to
@@ -282,25 +392,6 @@ struct mq_policy {
unsigned promote_threshold;
/*
- * We need cache_size entries for the cache, and choose to have
- * cache_size entries for the pre_cache too. One motivation for
- * using the same size is to make the hit counts directly
- * comparable between pre_cache and cache.
- */
- unsigned nr_entries;
- unsigned nr_entries_allocated;
- struct list_head free;
-
- /*
- * Cache blocks may be unallocated. We store this info in a
- * bitset.
- */
- unsigned long *allocation_bitset;
- unsigned nr_cblocks_allocated;
- unsigned find_free_nr_words;
- unsigned find_free_last_word;
-
- /*
* The hash table allows us to quickly find an entry by origin
* block. Both pre_cache and cache entries are in here.
*/
@@ -310,49 +401,6 @@ struct mq_policy {
};
/*----------------------------------------------------------------*/
-/* Free/alloc mq cache entry structures. */
-static void takeout_queue(struct list_head *lh, struct queue *q)
-{
- unsigned level;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_splice(q->qs + level, lh);
-}
-
-static void free_entries(struct mq_policy *mq)
-{
- struct entry *e, *tmp;
-
- takeout_queue(&mq->free, &mq->pre_cache);
- takeout_queue(&mq->free, &mq->cache);
-
- list_for_each_entry_safe(e, tmp, &mq->free, list)
- kmem_cache_free(mq_entry_cache, e);
-}
-
-static int alloc_entries(struct mq_policy *mq, unsigned elts)
-{
- unsigned u = mq->nr_entries;
-
- INIT_LIST_HEAD(&mq->free);
- mq->nr_entries_allocated = 0;
-
- while (u--) {
- struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
-
- if (!e) {
- free_entries(mq);
- return -ENOMEM;
- }
-
-
- list_add(&e->list, &mq->free);
- }
-
- return 0;
-}
-
-/*----------------------------------------------------------------*/
/*
* Simple hash table implementation. Should replace with the standard hash
@@ -388,96 +436,14 @@ static void hash_remove(struct entry *e)
/*----------------------------------------------------------------*/
-/*
- * Allocates a new entry structure. The memory is allocated in one lump,
- * so we just handing it out here. Returns NULL if all entries have
- * already been allocated. Cannot fail otherwise.
- */
-static struct entry *alloc_entry(struct mq_policy *mq)
-{
- struct entry *e;
-
- if (mq->nr_entries_allocated >= mq->nr_entries) {
- BUG_ON(!list_empty(&mq->free));
- return NULL;
- }
-
- e = list_entry(list_pop(&mq->free), struct entry, list);
- INIT_LIST_HEAD(&e->list);
- INIT_HLIST_NODE(&e->hlist);
-
- mq->nr_entries_allocated++;
- return e;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Mark cache blocks allocated or not in the bitset.
- */
-static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
- BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
- BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
-
- set_bit(from_cblock(cblock), mq->allocation_bitset);
- mq->nr_cblocks_allocated++;
-}
-
-static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
- BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
- BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
-
- clear_bit(from_cblock(cblock), mq->allocation_bitset);
- mq->nr_cblocks_allocated--;
-}
-
static bool any_free_cblocks(struct mq_policy *mq)
{
- return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
+ return !epool_empty(&mq->cache_pool);
}
-/*
- * Fills result out with a cache block that isn't in use, or return
- * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
- * reponsible for that.
- */
-static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
- dm_cblock_t *result, unsigned *last_word)
+static bool any_clean_cblocks(struct mq_policy *mq)
{
- int r = -ENOSPC;
- unsigned w;
-
- for (w = begin; w < end; w++) {
- /*
- * ffz is undefined if no zero exists
- */
- if (mq->allocation_bitset[w] != ~0UL) {
- *last_word = w;
- *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
- if (from_cblock(*result) < from_cblock(mq->cache_size))
- r = 0;
-
- break;
- }
- }
-
- return r;
-}
-
-static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
-{
- int r;
-
- if (!any_free_cblocks(mq))
- return -ENOSPC;
-
- r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
- if (r == -ENOSPC && mq->find_free_last_word)
- r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
-
- return r;
+ return !queue_empty(&mq->cache_clean);
}
/*----------------------------------------------------------------*/
@@ -496,33 +462,35 @@ static unsigned queue_level(struct entry *e)
return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
}
+static bool in_cache(struct mq_policy *mq, struct entry *e)
+{
+ return in_pool(&mq->cache_pool, e);
+}
+
/*
* Inserts the entry into the pre_cache or the cache. Ensures the cache
- * block is marked as allocated if necc. Inserts into the hash table. Sets the
- * tick which records when the entry was last moved about.
+ * block is marked as allocated if necc. Inserts into the hash table.
+ * Sets the tick which records when the entry was last moved about.
*/
static void push(struct mq_policy *mq, struct entry *e)
{
e->tick = mq->tick;
hash_insert(mq, e);
- if (e->in_cache) {
- alloc_cblock(mq, e->cblock);
- queue_push(&mq->cache, queue_level(e), &e->list);
- } else
+ if (in_cache(mq, e))
+ queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
+ queue_level(e), &e->list);
+ else
queue_push(&mq->pre_cache, queue_level(e), &e->list);
}
/*
* Removes an entry from pre_cache or cache. Removes from the hash table.
- * Frees off the cache block if necc.
*/
static void del(struct mq_policy *mq, struct entry *e)
{
queue_remove(&e->list);
hash_remove(e);
- if (e->in_cache)
- free_cblock(mq, e->cblock);
}
/*
@@ -531,14 +499,14 @@ static void del(struct mq_policy *mq, struct entry *e)
*/
static struct entry *pop(struct mq_policy *mq, struct queue *q)
{
- struct entry *e = container_of(queue_pop(q), struct entry, list);
+ struct entry *e;
+ struct list_head *h = queue_pop(q);
- if (e) {
- hash_remove(e);
+ if (!h)
+ return NULL;
- if (e->in_cache)
- free_cblock(mq, e->cblock);
- }
+ e = container_of(h, struct entry, list);
+ hash_remove(e);
return e;
}
@@ -556,7 +524,8 @@ static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
* of the entries.
*
* At the moment the threshold is taken by averaging the hit counts of some
- * of the entries in the cache (the first 20 entries of the first level).
+ * of the entries in the cache (the first 20 entries across all levels in
+ * ascending order, giving preference to the clean entries at each level).
*
* We can be much cleverer than this though. For example, each promotion
* could bump up the threshold helping to prevent churn. Much more to do
@@ -571,14 +540,21 @@ static void check_generation(struct mq_policy *mq)
struct list_head *head;
struct entry *e;
- if ((mq->hit_count >= mq->generation_period) &&
- (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
-
+ if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
mq->hit_count = 0;
mq->generation++;
for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
- head = mq->cache.qs + level;
+ head = mq->cache_clean.qs + level;
+ list_for_each_entry(e, head, list) {
+ nr++;
+ total += e->hit_count;
+
+ if (++count >= MAX_TO_AVERAGE)
+ break;
+ }
+
+ head = mq->cache_dirty.qs + level;
list_for_each_entry(e, head, list) {
nr++;
total += e->hit_count;
@@ -631,19 +607,30 @@ static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
* - set the hit count to a hard coded value other than 1, eg, is it better
* if it goes in at level 2?
*/
-static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
{
- dm_cblock_t result;
- struct entry *demoted = pop(mq, &mq->cache);
+ struct entry *demoted = pop(mq, &mq->cache_clean);
+
+ if (!demoted)
+ /*
+ * We could get a block from mq->cache_dirty, but that
+ * would add extra latency to the triggering bio as it
+ * waits for the writeback. Better to not promote this
+ * time and hope there's a clean block next time this block
+ * is hit.
+ */
+ return -ENOSPC;
- BUG_ON(!demoted);
- result = demoted->cblock;
*oblock = demoted->oblock;
- demoted->in_cache = false;
- demoted->hit_count = 1;
- push(mq, demoted);
+ free_entry(&mq->cache_pool, demoted);
+
+ /*
+ * We used to put the demoted block into the pre-cache, but I think
+ * it's simpler to just let it work it's way up from zero again.
+ * Stops blocks flickering in and out of the cache.
+ */
- return result;
+ return 0;
}
/*
@@ -662,17 +649,18 @@ static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
static unsigned adjusted_promote_threshold(struct mq_policy *mq,
bool discarded_oblock, int data_dir)
{
- if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+ if (data_dir == READ)
+ return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
+
+ if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/*
* We don't need to do any copying at all, so give this a
- * very low threshold. In practice this only triggers
- * during initial population after a format.
+ * very low threshold.
*/
return DISCARDED_PROMOTE_THRESHOLD;
+ }
- return data_dir == READ ?
- (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
- (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+ return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
}
static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -688,34 +676,49 @@ static int cache_entry_found(struct mq_policy *mq,
{
requeue_and_update_tick(mq, e);
- if (e->in_cache) {
+ if (in_cache(mq, e)) {
result->op = POLICY_HIT;
- result->cblock = e->cblock;
+ result->cblock = infer_cblock(&mq->cache_pool, e);
}
return 0;
}
/*
- * Moves and entry from the pre_cache to the cache. The main work is
+ * Moves an entry from the pre_cache to the cache. The main work is
* finding which cache block to use.
*/
static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
struct policy_result *result)
{
- dm_cblock_t cblock;
+ int r;
+ struct entry *new_e;
- if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ /* Ensure there's a free cblock in the cache */
+ if (epool_empty(&mq->cache_pool)) {
result->op = POLICY_REPLACE;
- cblock = demote_cblock(mq, &result->old_oblock);
+ r = demote_cblock(mq, &result->old_oblock);
+ if (r) {
+ result->op = POLICY_MISS;
+ return 0;
+ }
} else
result->op = POLICY_NEW;
- result->cblock = e->cblock = cblock;
+ new_e = alloc_entry(&mq->cache_pool);
+ BUG_ON(!new_e);
+
+ new_e->oblock = e->oblock;
+ new_e->dirty = false;
+ new_e->hit_count = e->hit_count;
+ new_e->generation = e->generation;
+ new_e->tick = e->tick;
del(mq, e);
- e->in_cache = true;
- push(mq, e);
+ free_entry(&mq->pre_cache_pool, e);
+ push(mq, new_e);
+
+ result->cblock = infer_cblock(&mq->cache_pool, new_e);
return 0;
}
@@ -743,7 +746,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
static void insert_in_pre_cache(struct mq_policy *mq,
dm_oblock_t oblock)
{
- struct entry *e = alloc_entry(mq);
+ struct entry *e = alloc_entry(&mq->pre_cache_pool);
if (!e)
/*
@@ -757,7 +760,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,
return;
}
- e->in_cache = false;
+ e->dirty = false;
e->oblock = oblock;
e->hit_count = 1;
e->generation = mq->generation;
@@ -767,30 +770,36 @@ static void insert_in_pre_cache(struct mq_policy *mq,
static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
struct policy_result *result)
{
+ int r;
struct entry *e;
- dm_cblock_t cblock;
- if (find_free_cblock(mq, &cblock) == -ENOSPC) {
- result->op = POLICY_MISS;
- insert_in_pre_cache(mq, oblock);
- return;
- }
+ if (epool_empty(&mq->cache_pool)) {
+ result->op = POLICY_REPLACE;
+ r = demote_cblock(mq, &result->old_oblock);
+ if (unlikely(r)) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+ return;
+ }
- e = alloc_entry(mq);
- if (unlikely(!e)) {
- result->op = POLICY_MISS;
- return;
+ /*
+ * This will always succeed, since we've just demoted.
+ */
+ e = alloc_entry(&mq->cache_pool);
+ BUG_ON(!e);
+
+ } else {
+ e = alloc_entry(&mq->cache_pool);
+ result->op = POLICY_NEW;
}
e->oblock = oblock;
- e->cblock = cblock;
- e->in_cache = true;
+ e->dirty = false;
e->hit_count = 1;
e->generation = mq->generation;
push(mq, e);
- result->op = POLICY_NEW;
- result->cblock = e->cblock;
+ result->cblock = infer_cblock(&mq->cache_pool, e);
}
static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
@@ -821,13 +830,16 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
int r = 0;
struct entry *e = hash_lookup(mq, oblock);
- if (e && e->in_cache)
+ if (e && in_cache(mq, e))
r = cache_entry_found(mq, e, result);
+
else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
result->op = POLICY_MISS;
+
else if (e)
r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
data_dir, result);
+
else
r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
data_dir, result);
@@ -854,9 +866,9 @@ static void mq_destroy(struct dm_cache_policy *p)
{
struct mq_policy *mq = to_mq_policy(p);
- free_bitset(mq->allocation_bitset);
kfree(mq->table);
- free_entries(mq);
+ epool_exit(&mq->cache_pool);
+ epool_exit(&mq->pre_cache_pool);
kfree(mq);
}
@@ -904,8 +916,8 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t
return -EWOULDBLOCK;
e = hash_lookup(mq, oblock);
- if (e && e->in_cache) {
- *cblock = e->cblock;
+ if (e && in_cache(mq, e)) {
+ *cblock = infer_cblock(&mq->cache_pool, e);
r = 0;
} else
r = -ENOENT;
@@ -915,6 +927,36 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t
return r;
}
+static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
+{
+ struct entry *e;
+
+ e = hash_lookup(mq, oblock);
+ BUG_ON(!e || !in_cache(mq, e));
+
+ del(mq, e);
+ e->dirty = set;
+ push(mq, e);
+}
+
+static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ __mq_set_clear_dirty(mq, oblock, true);
+ mutex_unlock(&mq->lock);
+}
+
+static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ __mq_set_clear_dirty(mq, oblock, false);
+ mutex_unlock(&mq->lock);
+}
+
static int mq_load_mapping(struct dm_cache_policy *p,
dm_oblock_t oblock, dm_cblock_t cblock,
uint32_t hint, bool hint_valid)
@@ -922,13 +964,9 @@ static int mq_load_mapping(struct dm_cache_policy *p,
struct mq_policy *mq = to_mq_policy(p);
struct entry *e;
- e = alloc_entry(mq);
- if (!e)
- return -ENOMEM;
-
- e->cblock = cblock;
+ e = alloc_particular_entry(&mq->cache_pool, cblock);
e->oblock = oblock;
- e->in_cache = true;
+ e->dirty = false; /* this gets corrected in a minute */
e->hit_count = hint_valid ? hint : 1;
e->generation = mq->generation;
push(mq, e);
@@ -936,57 +974,126 @@ static int mq_load_mapping(struct dm_cache_policy *p,
return 0;
}
+static int mq_save_hints(struct mq_policy *mq, struct queue *q,
+ policy_walk_fn fn, void *context)
+{
+ int r;
+ unsigned level;
+ struct entry *e;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each_entry(e, q->qs + level, list) {
+ r = fn(context, infer_cblock(&mq->cache_pool, e),
+ e->oblock, e->hit_count);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
void *context)
{
struct mq_policy *mq = to_mq_policy(p);
int r = 0;
- struct entry *e;
- unsigned level;
mutex_lock(&mq->lock);
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each_entry(e, &mq->cache.qs[level], list) {
- r = fn(context, e->cblock, e->oblock, e->hit_count);
- if (r)
- goto out;
- }
+ r = mq_save_hints(mq, &mq->cache_clean, fn, context);
+ if (!r)
+ r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
-out:
mutex_unlock(&mq->lock);
return r;
}
+static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ struct entry *e;
+
+ e = hash_lookup(mq, oblock);
+ BUG_ON(!e || !in_cache(mq, e));
+
+ del(mq, e);
+ free_entry(&mq->cache_pool, e);
+}
+
static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
mutex_lock(&mq->lock);
+ __remove_mapping(mq, oblock);
+ mutex_unlock(&mq->lock);
+}
- e = hash_lookup(mq, oblock);
+static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ struct entry *e = epool_find(&mq->cache_pool, cblock);
- BUG_ON(!e || !e->in_cache);
+ if (!e)
+ return -ENODATA;
del(mq, e);
- e->in_cache = false;
- push(mq, e);
+ free_entry(&mq->cache_pool, e);
+ return 0;
+}
+
+static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ r = __remove_cblock(mq, cblock);
mutex_unlock(&mq->lock);
+
+ return r;
}
-static void force_mapping(struct mq_policy *mq,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
{
- struct entry *e = hash_lookup(mq, current_oblock);
+ struct entry *e = pop(mq, &mq->cache_dirty);
- BUG_ON(!e || !e->in_cache);
+ if (!e)
+ return -ENODATA;
- del(mq, e);
- e->oblock = new_oblock;
+ *oblock = e->oblock;
+ *cblock = infer_cblock(&mq->cache_pool, e);
+ e->dirty = false;
push(mq, e);
+
+ return 0;
+}
+
+static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ r = __mq_writeback_work(mq, oblock, cblock);
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static void __force_mapping(struct mq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct entry *e = hash_lookup(mq, current_oblock);
+
+ if (e && in_cache(mq, e)) {
+ del(mq, e);
+ e->oblock = new_oblock;
+ e->dirty = true;
+ push(mq, e);
+ }
}
static void mq_force_mapping(struct dm_cache_policy *p,
@@ -995,16 +1102,20 @@ static void mq_force_mapping(struct dm_cache_policy *p,
struct mq_policy *mq = to_mq_policy(p);
mutex_lock(&mq->lock);
- force_mapping(mq, current_oblock, new_oblock);
+ __force_mapping(mq, current_oblock, new_oblock);
mutex_unlock(&mq->lock);
}
static dm_cblock_t mq_residency(struct dm_cache_policy *p)
{
+ dm_cblock_t r;
struct mq_policy *mq = to_mq_policy(p);
- /* FIXME: lock mutex, not sure we can block here */
- return to_cblock(mq->nr_cblocks_allocated);
+ mutex_lock(&mq->lock);
+ r = to_cblock(mq->cache_pool.nr_allocated);
+ mutex_unlock(&mq->lock);
+
+ return r;
}
static void mq_tick(struct dm_cache_policy *p)
@@ -1057,10 +1168,13 @@ static void init_policy_functions(struct mq_policy *mq)
mq->policy.destroy = mq_destroy;
mq->policy.map = mq_map;
mq->policy.lookup = mq_lookup;
+ mq->policy.set_dirty = mq_set_dirty;
+ mq->policy.clear_dirty = mq_clear_dirty;
mq->policy.load_mapping = mq_load_mapping;
mq->policy.walk_mappings = mq_walk_mappings;
mq->policy.remove_mapping = mq_remove_mapping;
- mq->policy.writeback_work = NULL;
+ mq->policy.remove_cblock = mq_remove_cblock;
+ mq->policy.writeback_work = mq_writeback_work;
mq->policy.force_mapping = mq_force_mapping;
mq->policy.residency = mq_residency;
mq->policy.tick = mq_tick;
@@ -1072,7 +1186,6 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- int r;
struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
@@ -1080,8 +1193,18 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
init_policy_functions(mq);
iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
-
mq->cache_size = cache_size;
+
+ if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
+ DMERR("couldn't initialize pool of pre-cache entries");
+ goto bad_pre_cache_init;
+ }
+
+ if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
+ DMERR("couldn't initialize pool of cache entries");
+ goto bad_cache_init;
+ }
+
mq->tick_protected = 0;
mq->tick = 0;
mq->hit_count = 0;
@@ -1089,20 +1212,12 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->promote_threshold = 0;
mutex_init(&mq->lock);
spin_lock_init(&mq->tick_lock);
- mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
- mq->find_free_last_word = 0;
queue_init(&mq->pre_cache);
- queue_init(&mq->cache);
- mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
+ queue_init(&mq->cache_clean);
+ queue_init(&mq->cache_dirty);
- mq->nr_entries = 2 * from_cblock(cache_size);
- r = alloc_entries(mq, mq->nr_entries);
- if (r)
- goto bad_cache_alloc;
-
- mq->nr_entries_allocated = 0;
- mq->nr_cblocks_allocated = 0;
+ mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
mq->hash_bits = ffs(mq->nr_buckets) - 1;
@@ -1110,17 +1225,13 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
if (!mq->table)
goto bad_alloc_table;
- mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
- if (!mq->allocation_bitset)
- goto bad_alloc_bitset;
-
return &mq->policy;
-bad_alloc_bitset:
- kfree(mq->table);
bad_alloc_table:
- free_entries(mq);
-bad_cache_alloc:
+ epool_exit(&mq->cache_pool);
+bad_cache_init:
+ epool_exit(&mq->pre_cache_pool);
+bad_pre_cache_init:
kfree(mq);
return NULL;
@@ -1130,7 +1241,7 @@ bad_cache_alloc:
static struct dm_cache_policy_type mq_policy_type = {
.name = "mq",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
@@ -1138,7 +1249,7 @@ static struct dm_cache_policy_type mq_policy_type = {
static struct dm_cache_policy_type default_policy_type = {
.name = "default",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index 21c03c570c0..d8005796840 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -119,13 +119,13 @@ struct dm_cache_policy *dm_cache_policy_create(const char *name,
type = get_policy(name);
if (!type) {
DMWARN("unknown policy type");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
p = type->create(cache_size, origin_size, cache_block_size);
if (!p) {
put_policy(type);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
p->private = type;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 33369ca9614..052c00a84a5 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -135,9 +135,6 @@ struct dm_cache_policy {
*/
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
- /*
- * oblock must be a mapped block. Must not block.
- */
void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
@@ -159,8 +156,24 @@ struct dm_cache_policy {
void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
dm_oblock_t new_oblock);
- int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+ /*
+ * This is called via the invalidate_cblocks message. It is
+ * possible the particular cblock has already been removed due to a
+ * write io in passthrough mode. In which case this should return
+ * -ENODATA.
+ */
+ int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
+ /*
+ * Provide a dirty block to be written back by the core target.
+ *
+ * Returns:
+ *
+ * 0 and @cblock,@oblock: block to write back provided
+ *
+ * -ENODATA: no dirty blocks available
+ */
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
/*
* How full is the cache?
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 29569768ffb..9efcf1059b9 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -61,6 +61,34 @@ static void free_bitset(unsigned long *bits)
/*----------------------------------------------------------------*/
+/*
+ * There are a couple of places where we let a bio run, but want to do some
+ * work before calling its endio function. We do this by temporarily
+ * changing the endio fn.
+ */
+struct dm_hook_info {
+ bio_end_io_t *bi_end_io;
+ void *bi_private;
+};
+
+static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
+ bio_end_io_t *bi_end_io, void *bi_private)
+{
+ h->bi_end_io = bio->bi_end_io;
+ h->bi_private = bio->bi_private;
+
+ bio->bi_end_io = bi_end_io;
+ bio->bi_private = bi_private;
+}
+
+static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
+{
+ bio->bi_end_io = h->bi_end_io;
+ bio->bi_private = h->bi_private;
+}
+
+/*----------------------------------------------------------------*/
+
#define PRISON_CELLS 1024
#define MIGRATION_POOL_SIZE 128
#define COMMIT_PERIOD HZ
@@ -76,14 +104,37 @@ static void free_bitset(unsigned long *bits)
/*
* FIXME: the cache is read/write for the time being.
*/
-enum cache_mode {
+enum cache_metadata_mode {
CM_WRITE, /* metadata may be changed */
CM_READ_ONLY, /* metadata may not be changed */
};
+enum cache_io_mode {
+ /*
+ * Data is written to cached blocks only. These blocks are marked
+ * dirty. If you lose the cache device you will lose data.
+ * Potential performance increase for both reads and writes.
+ */
+ CM_IO_WRITEBACK,
+
+ /*
+ * Data is written to both cache and origin. Blocks are never
+ * dirty. Potential performance benfit for reads only.
+ */
+ CM_IO_WRITETHROUGH,
+
+ /*
+ * A degraded mode useful for various cache coherency situations
+ * (eg, rolling back snapshots). Reads and writes always go to the
+ * origin. If a write goes to a cached oblock, then the cache
+ * block is invalidated.
+ */
+ CM_IO_PASSTHROUGH
+};
+
struct cache_features {
- enum cache_mode mode;
- bool write_through:1;
+ enum cache_metadata_mode mode;
+ enum cache_io_mode io_mode;
};
struct cache_stats {
@@ -99,6 +150,25 @@ struct cache_stats {
atomic_t discard_count;
};
+/*
+ * Defines a range of cblocks, begin to (end - 1) are in the range. end is
+ * the one-past-the-end value.
+ */
+struct cblock_range {
+ dm_cblock_t begin;
+ dm_cblock_t end;
+};
+
+struct invalidation_request {
+ struct list_head list;
+ struct cblock_range *cblocks;
+
+ atomic_t complete;
+ int err;
+
+ wait_queue_head_t result_wait;
+};
+
struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
@@ -148,6 +218,10 @@ struct cache {
wait_queue_head_t migration_wait;
atomic_t nr_migrations;
+ wait_queue_head_t quiescing_wait;
+ atomic_t quiescing;
+ atomic_t quiescing_ack;
+
/*
* cache_size entries, dirty if set
*/
@@ -186,7 +260,7 @@ struct cache {
bool need_tick_bio:1;
bool sized:1;
- bool quiescing:1;
+ bool invalidate:1;
bool commit_requested:1;
bool loaded_mappings:1;
bool loaded_discards:1;
@@ -197,6 +271,12 @@ struct cache {
struct cache_features features;
struct cache_stats stats;
+
+ /*
+ * Invalidation fields.
+ */
+ spinlock_t invalidation_lock;
+ struct list_head invalidation_requests;
};
struct per_bio_data {
@@ -211,7 +291,7 @@ struct per_bio_data {
*/
struct cache *cache;
dm_cblock_t cblock;
- bio_end_io_t *saved_bi_end_io;
+ struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -228,6 +308,8 @@ struct dm_cache_migration {
bool writeback:1;
bool demote:1;
bool promote:1;
+ bool requeue_holder:1;
+ bool invalidate:1;
struct dm_bio_prison_cell *old_ocell;
struct dm_bio_prison_cell *new_ocell;
@@ -533,9 +615,24 @@ static void save_stats(struct cache *cache)
#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
+static bool writethrough_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_WRITETHROUGH;
+}
+
+static bool writeback_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_WRITEBACK;
+}
+
+static bool passthrough_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_PASSTHROUGH;
+}
+
static size_t get_per_bio_data_size(struct cache *cache)
{
- return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+ return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
}
static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
@@ -605,6 +702,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
+ check_if_tick_bio_needed(cache, bio);
remap_to_cache(cache, bio, cblock);
if (bio_data_dir(bio) == WRITE) {
set_dirty(cache, oblock, cblock);
@@ -662,7 +760,8 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
static void writethrough_endio(struct bio *bio, int err)
{
struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
- bio->bi_end_io = pb->saved_bi_end_io;
+
+ dm_unhook_bio(&pb->hook_info, bio);
if (err) {
bio_endio(bio, err);
@@ -693,9 +792,8 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
pb->cache = cache;
pb->cblock = cblock;
- pb->saved_bi_end_io = bio->bi_end_io;
+ dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
dm_bio_record(&pb->bio_details, bio);
- bio->bi_end_io = writethrough_endio;
remap_to_origin_clear_discard(pb->cache, bio, oblock);
}
@@ -748,8 +846,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
static void cleanup_migration(struct dm_cache_migration *mg)
{
- dec_nr_migrations(mg->cache);
+ struct cache *cache = mg->cache;
free_migration(mg);
+ dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -765,13 +864,13 @@ static void migration_failure(struct dm_cache_migration *mg)
DMWARN_LIMIT("demotion failed; couldn't copy block");
policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
- cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote)
- cell_defer(cache, mg->new_ocell, 1);
+ cell_defer(cache, mg->new_ocell, true);
} else {
DMWARN_LIMIT("promotion failed; couldn't copy block");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cell_defer(cache, mg->new_ocell, 1);
+ cell_defer(cache, mg->new_ocell, true);
}
cleanup_migration(mg);
@@ -823,7 +922,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
return;
} else if (mg->demote) {
- cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote) {
mg->demote = false;
@@ -832,11 +931,19 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
list_add_tail(&mg->list, &cache->quiesced_migrations);
spin_unlock_irqrestore(&cache->lock, flags);
- } else
+ } else {
+ if (mg->invalidate)
+ policy_remove_mapping(cache->policy, mg->old_oblock);
cleanup_migration(mg);
+ }
} else {
- cell_defer(cache, mg->new_ocell, true);
+ if (mg->requeue_holder)
+ cell_defer(cache, mg->new_ocell, true);
+ else {
+ bio_endio(mg->new_ocell->holder, 0);
+ cell_defer(cache, mg->new_ocell, false);
+ }
clear_dirty(cache, mg->new_oblock, mg->cblock);
cleanup_migration(mg);
}
@@ -881,8 +988,46 @@ static void issue_copy_real(struct dm_cache_migration *mg)
r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
}
- if (r < 0)
+ if (r < 0) {
+ DMERR_LIMIT("issuing migration failed");
migration_failure(mg);
+ }
+}
+
+static void overwrite_endio(struct bio *bio, int err)
+{
+ struct dm_cache_migration *mg = bio->bi_private;
+ struct cache *cache = mg->cache;
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ unsigned long flags;
+
+ if (err)
+ mg->err = true;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->completed_migrations);
+ dm_unhook_bio(&pb->hook_info, bio);
+ mg->requeue_holder = false;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
+{
+ size_t pb_data_size = get_per_bio_data_size(mg->cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+
+ dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
+ remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
+ generic_make_request(bio);
+}
+
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static void avoid_copy(struct dm_cache_migration *mg)
@@ -899,9 +1044,17 @@ static void issue_copy(struct dm_cache_migration *mg)
if (mg->writeback || mg->demote)
avoid = !is_dirty(cache, mg->cblock) ||
is_discarded_oblock(cache, mg->old_oblock);
- else
+ else {
+ struct bio *bio = mg->new_ocell->holder;
+
avoid = is_discarded_oblock(cache, mg->new_oblock);
+ if (!avoid && bio_writes_complete_block(cache, bio)) {
+ issue_overwrite(mg, bio);
+ return;
+ }
+ }
+
avoid ? avoid_copy(mg) : issue_copy_real(mg);
}
@@ -991,6 +1144,8 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->writeback = false;
mg->demote = false;
mg->promote = true;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->new_oblock = oblock;
mg->cblock = cblock;
@@ -1012,6 +1167,8 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->writeback = true;
mg->demote = false;
mg->promote = false;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->old_oblock = oblock;
mg->cblock = cblock;
@@ -1035,6 +1192,8 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->writeback = false;
mg->demote = true;
mg->promote = true;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->old_oblock = old_oblock;
mg->new_oblock = new_oblock;
@@ -1047,6 +1206,33 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
quiesce_migration(mg);
}
+/*
+ * Invalidate a cache entry. No writeback occurs; any changes in the cache
+ * block are thrown away.
+ */
+static void invalidate(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = true;
+ mg->promote = false;
+ mg->requeue_holder = true;
+ mg->invalidate = true;
+ mg->cache = cache;
+ mg->old_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = cell;
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
/*----------------------------------------------------------------
* bio processing
*--------------------------------------------------------------*/
@@ -1109,13 +1295,6 @@ static bool spare_migration_bandwidth(struct cache *cache)
return current_volume < cache->migration_threshold;
}
-static bool is_writethrough_io(struct cache *cache, struct bio *bio,
- dm_cblock_t cblock)
-{
- return bio_data_dir(bio) == WRITE &&
- cache->features.write_through && !is_dirty(cache, cblock);
-}
-
static void inc_hit_counter(struct cache *cache, struct bio *bio)
{
atomic_inc(bio_data_dir(bio) == READ ?
@@ -1128,6 +1307,15 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
&cache->stats.read_miss : &cache->stats.write_miss);
}
+static void issue_cache_bio(struct cache *cache, struct bio *bio,
+ struct per_bio_data *pb,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_cache_dirty(cache, bio, oblock, cblock);
+ issue(cache, bio);
+}
+
static void process_bio(struct cache *cache, struct prealloc *structs,
struct bio *bio)
{
@@ -1139,7 +1327,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
bool discarded_block = is_discarded_oblock(cache, block);
- bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+ bool passthrough = passthrough_mode(&cache->features);
+ bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
/*
* Check to see if that block is currently migrating.
@@ -1160,15 +1349,39 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
switch (lookup_result.op) {
case POLICY_HIT:
- inc_hit_counter(cache, bio);
- pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ if (passthrough) {
+ inc_miss_counter(cache, bio);
- if (is_writethrough_io(cache, bio, lookup_result.cblock))
- remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ /*
+ * Passthrough always maps to the origin,
+ * invalidating any cache blocks that are written
+ * to.
+ */
+
+ if (bio_data_dir(bio) == WRITE) {
+ atomic_inc(&cache->stats.demotion);
+ invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
+ release_cell = false;
+
+ } else {
+ /* FIXME: factor out issue_origin() */
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
+ }
+ } else {
+ inc_hit_counter(cache, bio);
+
+ if (bio_data_dir(bio) == WRITE &&
+ writethrough_mode(&cache->features) &&
+ !is_dirty(cache, lookup_result.cblock)) {
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ issue(cache, bio);
+ } else
+ issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
+ }
- issue(cache, bio);
break;
case POLICY_MISS:
@@ -1227,15 +1440,17 @@ static int need_commit_due_to_time(struct cache *cache)
static int commit_if_needed(struct cache *cache)
{
- if (dm_cache_changed_this_transaction(cache->cmd) &&
- (cache->commit_requested || need_commit_due_to_time(cache))) {
+ int r = 0;
+
+ if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
+ dm_cache_changed_this_transaction(cache->cmd)) {
atomic_inc(&cache->stats.commit_count);
- cache->last_commit_jiffies = jiffies;
cache->commit_requested = false;
- return dm_cache_commit(cache->cmd, false);
+ r = dm_cache_commit(cache->cmd, false);
+ cache->last_commit_jiffies = jiffies;
}
- return 0;
+ return r;
}
static void process_deferred_bios(struct cache *cache)
@@ -1344,36 +1559,88 @@ static void writeback_some_dirty_blocks(struct cache *cache)
}
/*----------------------------------------------------------------
- * Main worker loop
+ * Invalidations.
+ * Dropping something from the cache *without* writing back.
*--------------------------------------------------------------*/
-static void start_quiescing(struct cache *cache)
+
+static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
{
- unsigned long flags;
+ int r = 0;
+ uint64_t begin = from_cblock(req->cblocks->begin);
+ uint64_t end = from_cblock(req->cblocks->end);
- spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 1;
- spin_unlock_irqrestore(&cache->lock, flags);
+ while (begin != end) {
+ r = policy_remove_cblock(cache->policy, to_cblock(begin));
+ if (!r) {
+ r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
+ if (r)
+ break;
+
+ } else if (r == -ENODATA) {
+ /* harmless, already unmapped */
+ r = 0;
+
+ } else {
+ DMERR("policy_remove_cblock failed");
+ break;
+ }
+
+ begin++;
+ }
+
+ cache->commit_requested = true;
+
+ req->err = r;
+ atomic_set(&req->complete, 1);
+
+ wake_up(&req->result_wait);
}
-static void stop_quiescing(struct cache *cache)
+static void process_invalidation_requests(struct cache *cache)
{
- unsigned long flags;
+ struct list_head list;
+ struct invalidation_request *req, *tmp;
- spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 0;
- spin_unlock_irqrestore(&cache->lock, flags);
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cache->invalidation_lock);
+ list_splice_init(&cache->invalidation_requests, &list);
+ spin_unlock(&cache->invalidation_lock);
+
+ list_for_each_entry_safe (req, tmp, &list, list)
+ process_invalidation_request(cache, req);
}
+/*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
static bool is_quiescing(struct cache *cache)
{
- int r;
- unsigned long flags;
+ return atomic_read(&cache->quiescing);
+}
- spin_lock_irqsave(&cache->lock, flags);
- r = cache->quiescing;
- spin_unlock_irqrestore(&cache->lock, flags);
+static void ack_quiescing(struct cache *cache)
+{
+ if (is_quiescing(cache)) {
+ atomic_inc(&cache->quiescing_ack);
+ wake_up(&cache->quiescing_wait);
+ }
+}
- return r;
+static void wait_for_quiescing_ack(struct cache *cache)
+{
+ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
+}
+
+static void start_quiescing(struct cache *cache)
+{
+ atomic_inc(&cache->quiescing);
+ wait_for_quiescing_ack(cache);
+}
+
+static void stop_quiescing(struct cache *cache)
+{
+ atomic_set(&cache->quiescing, 0);
+ atomic_set(&cache->quiescing_ack, 0);
}
static void wait_for_migrations(struct cache *cache)
@@ -1412,7 +1679,8 @@ static int more_work(struct cache *cache)
!bio_list_empty(&cache->deferred_writethrough_bios) ||
!list_empty(&cache->quiesced_migrations) ||
!list_empty(&cache->completed_migrations) ||
- !list_empty(&cache->need_commit_migrations);
+ !list_empty(&cache->need_commit_migrations) ||
+ cache->invalidate;
}
static void do_worker(struct work_struct *ws)
@@ -1420,16 +1688,16 @@ static void do_worker(struct work_struct *ws)
struct cache *cache = container_of(ws, struct cache, worker);
do {
- if (!is_quiescing(cache))
+ if (!is_quiescing(cache)) {
+ writeback_some_dirty_blocks(cache);
+ process_deferred_writethrough_bios(cache);
process_deferred_bios(cache);
+ process_invalidation_requests(cache);
+ }
process_migrations(cache, &cache->quiesced_migrations, issue_copy);
process_migrations(cache, &cache->completed_migrations, complete_migration);
- writeback_some_dirty_blocks(cache);
-
- process_deferred_writethrough_bios(cache);
-
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
@@ -1442,6 +1710,9 @@ static void do_worker(struct work_struct *ws)
process_migrations(cache, &cache->need_commit_migrations,
migration_success_post_commit);
}
+
+ ack_quiescing(cache);
+
} while (more_work(cache));
}
@@ -1715,7 +1986,7 @@ static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
static void init_features(struct cache_features *cf)
{
cf->mode = CM_WRITE;
- cf->write_through = false;
+ cf->io_mode = CM_IO_WRITEBACK;
}
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
@@ -1740,10 +2011,13 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
arg = dm_shift_arg(as);
if (!strcasecmp(arg, "writeback"))
- cf->write_through = false;
+ cf->io_mode = CM_IO_WRITEBACK;
else if (!strcasecmp(arg, "writethrough"))
- cf->write_through = true;
+ cf->io_mode = CM_IO_WRITETHROUGH;
+
+ else if (!strcasecmp(arg, "passthrough"))
+ cf->io_mode = CM_IO_PASSTHROUGH;
else {
*error = "Unrecognised cache feature requested";
@@ -1872,14 +2146,15 @@ static int set_config_values(struct cache *cache, int argc, const char **argv)
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{
- cache->policy = dm_cache_policy_create(ca->policy_name,
- cache->cache_size,
- cache->origin_sectors,
- cache->sectors_per_block);
- if (!cache->policy) {
+ struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
+ cache->cache_size,
+ cache->origin_sectors,
+ cache->sectors_per_block);
+ if (IS_ERR(p)) {
*error = "Error creating cache's policy";
- return -ENOMEM;
+ return PTR_ERR(p);
}
+ cache->policy = p;
return 0;
}
@@ -1995,6 +2270,22 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
cache->cmd = cmd;
+ if (passthrough_mode(&cache->features)) {
+ bool all_clean;
+
+ r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
+ if (r) {
+ *error = "dm_cache_metadata_all_clean() failed";
+ goto bad;
+ }
+
+ if (!all_clean) {
+ *error = "Cannot enter passthrough mode unless all blocks are clean";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
spin_lock_init(&cache->lock);
bio_list_init(&cache->deferred_bios);
bio_list_init(&cache->deferred_flush_bios);
@@ -2005,6 +2296,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
+ init_waitqueue_head(&cache->quiescing_wait);
+ atomic_set(&cache->quiescing, 0);
+ atomic_set(&cache->quiescing_ack, 0);
+
r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
@@ -2064,7 +2359,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->need_tick_bio = true;
cache->sized = false;
- cache->quiescing = false;
+ cache->invalidate = false;
cache->commit_requested = false;
cache->loaded_mappings = false;
cache->loaded_discards = false;
@@ -2078,6 +2373,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->stats.commit_count, 0);
atomic_set(&cache->stats.discard_count, 0);
+ spin_lock_init(&cache->invalidation_lock);
+ INIT_LIST_HEAD(&cache->invalidation_requests);
+
*result = cache;
return 0;
@@ -2207,17 +2505,37 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ r = DM_MAPIO_REMAPPED;
switch (lookup_result.op) {
case POLICY_HIT:
- inc_hit_counter(cache, bio);
- pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ if (passthrough_mode(&cache->features)) {
+ if (bio_data_dir(bio) == WRITE) {
+ /*
+ * We need to invalidate this block, so
+ * defer for the worker thread.
+ */
+ cell_defer(cache, cell, true);
+ r = DM_MAPIO_SUBMITTED;
+
+ } else {
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ inc_miss_counter(cache, bio);
+ remap_to_origin_clear_discard(cache, bio, block);
+
+ cell_defer(cache, cell, false);
+ }
- if (is_writethrough_io(cache, bio, lookup_result.cblock))
- remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ } else {
+ inc_hit_counter(cache, bio);
- cell_defer(cache, cell, false);
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ !is_dirty(cache, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ else
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+ cell_defer(cache, cell, false);
+ }
break;
case POLICY_MISS:
@@ -2242,10 +2560,10 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
(unsigned) lookup_result.op);
bio_io_error(bio);
- return DM_MAPIO_SUBMITTED;
+ r = DM_MAPIO_SUBMITTED;
}
- return DM_MAPIO_REMAPPED;
+ return r;
}
static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -2406,26 +2724,71 @@ static int load_discard(void *context, sector_t discard_block_size,
return 0;
}
+static dm_cblock_t get_cache_dev_size(struct cache *cache)
+{
+ sector_t size = get_dev_size(cache->cache_dev);
+ (void) sector_div(size, cache->sectors_per_block);
+ return to_cblock(size);
+}
+
+static bool can_resize(struct cache *cache, dm_cblock_t new_size)
+{
+ if (from_cblock(new_size) > from_cblock(cache->cache_size))
+ return true;
+
+ /*
+ * We can't drop a dirty block when shrinking the cache.
+ */
+ while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
+ new_size = to_cblock(from_cblock(new_size) + 1);
+ if (is_dirty(cache, new_size)) {
+ DMERR("unable to shrink cache; cache block %llu is dirty",
+ (unsigned long long) from_cblock(new_size));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
+{
+ int r;
+
+ r = dm_cache_resize(cache->cmd, cache->cache_size);
+ if (r) {
+ DMERR("could not resize cache metadata");
+ return r;
+ }
+
+ cache->cache_size = new_size;
+
+ return 0;
+}
+
static int cache_preresume(struct dm_target *ti)
{
int r = 0;
struct cache *cache = ti->private;
- sector_t actual_cache_size = get_dev_size(cache->cache_dev);
- (void) sector_div(actual_cache_size, cache->sectors_per_block);
+ dm_cblock_t csize = get_cache_dev_size(cache);
/*
* Check to see if the cache has resized.
*/
- if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
- cache->cache_size = to_cblock(actual_cache_size);
-
- r = dm_cache_resize(cache->cmd, cache->cache_size);
- if (r) {
- DMERR("could not resize cache metadata");
+ if (!cache->sized) {
+ r = resize_cache_dev(cache, csize);
+ if (r)
return r;
- }
cache->sized = true;
+
+ } else if (csize != cache->cache_size) {
+ if (!can_resize(cache, csize))
+ return -EINVAL;
+
+ r = resize_cache_dev(cache, csize);
+ if (r)
+ return r;
}
if (!cache->loaded_mappings) {
@@ -2518,10 +2881,19 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned long long) from_cblock(residency),
cache->nr_dirty);
- if (cache->features.write_through)
+ if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");
- else
- DMEMIT("0 ");
+
+ else if (passthrough_mode(&cache->features))
+ DMEMIT("1 passthrough ");
+
+ else if (writeback_mode(&cache->features))
+ DMEMIT("1 writeback ");
+
+ else {
+ DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
+ goto err;
+ }
DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
if (sz < maxlen) {
@@ -2553,7 +2925,128 @@ err:
}
/*
- * Supports <key> <value>.
+ * A cache block range can take two forms:
+ *
+ * i) A single cblock, eg. '3456'
+ * ii) A begin and end cblock with dots between, eg. 123-234
+ */
+static int parse_cblock_range(struct cache *cache, const char *str,
+ struct cblock_range *result)
+{
+ char dummy;
+ uint64_t b, e;
+ int r;
+
+ /*
+ * Try and parse form (ii) first.
+ */
+ r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
+ if (r < 0)
+ return r;
+
+ if (r == 2) {
+ result->begin = to_cblock(b);
+ result->end = to_cblock(e);
+ return 0;
+ }
+
+ /*
+ * That didn't work, try form (i).
+ */
+ r = sscanf(str, "%llu%c", &b, &dummy);
+ if (r < 0)
+ return r;
+
+ if (r == 1) {
+ result->begin = to_cblock(b);
+ result->end = to_cblock(from_cblock(result->begin) + 1u);
+ return 0;
+ }
+
+ DMERR("invalid cblock range '%s'", str);
+ return -EINVAL;
+}
+
+static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
+{
+ uint64_t b = from_cblock(range->begin);
+ uint64_t e = from_cblock(range->end);
+ uint64_t n = from_cblock(cache->cache_size);
+
+ if (b >= n) {
+ DMERR("begin cblock out of range: %llu >= %llu", b, n);
+ return -EINVAL;
+ }
+
+ if (e > n) {
+ DMERR("end cblock out of range: %llu > %llu", e, n);
+ return -EINVAL;
+ }
+
+ if (b >= e) {
+ DMERR("invalid cblock range: %llu >= %llu", b, e);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int request_invalidation(struct cache *cache, struct cblock_range *range)
+{
+ struct invalidation_request req;
+
+ INIT_LIST_HEAD(&req.list);
+ req.cblocks = range;
+ atomic_set(&req.complete, 0);
+ req.err = 0;
+ init_waitqueue_head(&req.result_wait);
+
+ spin_lock(&cache->invalidation_lock);
+ list_add(&req.list, &cache->invalidation_requests);
+ spin_unlock(&cache->invalidation_lock);
+ wake_worker(cache);
+
+ wait_event(req.result_wait, atomic_read(&req.complete));
+ return req.err;
+}
+
+static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
+ const char **cblock_ranges)
+{
+ int r = 0;
+ unsigned i;
+ struct cblock_range range;
+
+ if (!passthrough_mode(&cache->features)) {
+ DMERR("cache has to be in passthrough mode for invalidation");
+ return -EPERM;
+ }
+
+ for (i = 0; i < count; i++) {
+ r = parse_cblock_range(cache, cblock_ranges[i], &range);
+ if (r)
+ break;
+
+ r = validate_cblock_range(cache, &range);
+ if (r)
+ break;
+
+ /*
+ * Pass begin and end origin blocks to the worker and wake it.
+ */
+ r = request_invalidation(cache, &range);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+/*
+ * Supports
+ * "<key> <value>"
+ * and
+ * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
*
* The key migration_threshold is supported by the cache target core.
*/
@@ -2561,6 +3054,12 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct cache *cache = ti->private;
+ if (!argc)
+ return -EINVAL;
+
+ if (!strcasecmp(argv[0], "invalidate_cblocks"))
+ return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
+
if (argc != 2)
return -EINVAL;
@@ -2630,7 +3129,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0fce0bc1a95..81b0fa66045 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2,6 +2,7 @@
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
* Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
*
* This file is released under the GPL.
*/
@@ -98,6 +99,13 @@ struct iv_lmk_private {
u8 *seed;
};
+#define TCW_WHITENING_SIZE 16
+struct iv_tcw_private {
+ struct crypto_shash *crc32_tfm;
+ u8 *iv_seed;
+ u8 *whitening;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -139,6 +147,7 @@ struct crypt_config {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
+ struct iv_tcw_private tcw;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
@@ -171,7 +180,8 @@ struct crypt_config {
unsigned long flags;
unsigned int key_size;
- unsigned int key_parts;
+ unsigned int key_parts; /* independent parts in key buffer */
+ unsigned int key_extra_size; /* additional keys length */
u8 key[0];
};
@@ -230,6 +240,16 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
* version 3: the same as version 2 with additional IV seed
* (it uses 65 keys, last key is used as IV seed)
*
+ * tcw: Compatible implementation of the block chaining mode used
+ * by the TrueCrypt device encryption system (prior to version 4.1).
+ * For more info see: http://www.truecrypt.org
+ * It operates on full 512 byte sectors and uses CBC
+ * with an IV derived from initial key and the sector number.
+ * In addition, whitening value is applied on every sector, whitening
+ * is calculated from initial key, sector number and mixed using CRC32.
+ * Note that this encryption scheme is vulnerable to watermarking attacks
+ * and should be used for old compatible containers access only.
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@@ -530,7 +550,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
char ctx[crypto_shash_descsize(lmk->hash_tfm)];
} sdesc;
struct md5_state md5state;
- u32 buf[4];
+ __le32 buf[4];
int i, r;
sdesc.desc.tfm = lmk->hash_tfm;
@@ -608,6 +628,153 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
return r;
}
+static void crypt_iv_tcw_dtr(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ kzfree(tcw->iv_seed);
+ tcw->iv_seed = NULL;
+ kzfree(tcw->whitening);
+ tcw->whitening = NULL;
+
+ if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
+ crypto_free_shash(tcw->crc32_tfm);
+ tcw->crc32_tfm = NULL;
+}
+
+static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
+ ti->error = "Wrong key size for TCW";
+ return -EINVAL;
+ }
+
+ tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(tcw->crc32_tfm)) {
+ ti->error = "Error initializing CRC32 in TCW";
+ return PTR_ERR(tcw->crc32_tfm);
+ }
+
+ tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
+ tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
+ if (!tcw->iv_seed || !tcw->whitening) {
+ crypt_iv_tcw_dtr(cc);
+ ti->error = "Error allocating seed storage in TCW";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int crypt_iv_tcw_init(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
+
+ memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
+ memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
+ TCW_WHITENING_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_tcw_wipe(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ memset(tcw->iv_seed, 0, cc->iv_size);
+ memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_tcw_whitening(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq,
+ u8 *data)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ u8 buf[TCW_WHITENING_SIZE];
+ struct {
+ struct shash_desc desc;
+ char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
+ } sdesc;
+ int i, r;
+
+ /* xor whitening with sector number */
+ memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
+ crypto_xor(buf, (u8 *)&sector, 8);
+ crypto_xor(&buf[8], (u8 *)&sector, 8);
+
+ /* calculate crc32 for every 32bit part and xor it */
+ sdesc.desc.tfm = tcw->crc32_tfm;
+ sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ for (i = 0; i < 4; i++) {
+ r = crypto_shash_init(&sdesc.desc);
+ if (r)
+ goto out;
+ r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
+ if (r)
+ goto out;
+ r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
+ if (r)
+ goto out;
+ }
+ crypto_xor(&buf[0], &buf[12], 4);
+ crypto_xor(&buf[4], &buf[8], 4);
+
+ /* apply whitening (8 bytes) to whole sector */
+ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
+ crypto_xor(data + i * 8, buf, 8);
+out:
+ memset(buf, 0, sizeof(buf));
+ return r;
+}
+
+static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ u8 *src;
+ int r = 0;
+
+ /* Remove whitening from ciphertext */
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+ src = kmap_atomic(sg_page(&dmreq->sg_in));
+ r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
+ kunmap_atomic(src);
+ }
+
+ /* Calculate IV */
+ memcpy(iv, tcw->iv_seed, cc->iv_size);
+ crypto_xor(iv, (u8 *)&sector, 8);
+ if (cc->iv_size > 8)
+ crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
+
+ return r;
+}
+
+static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ u8 *dst;
+ int r;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+ return 0;
+
+ /* Apply whitening on ciphertext */
+ dst = kmap_atomic(sg_page(&dmreq->sg_out));
+ r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
+ kunmap_atomic(dst);
+
+ return r;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -643,6 +810,15 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
+static struct crypt_iv_operations crypt_iv_tcw_ops = {
+ .ctr = crypt_iv_tcw_ctr,
+ .dtr = crypt_iv_tcw_dtr,
+ .init = crypt_iv_tcw_init,
+ .wipe = crypt_iv_tcw_wipe,
+ .generator = crypt_iv_tcw_gen,
+ .post = crypt_iv_tcw_post
+};
+
static void crypt_convert_init(struct crypt_config *cc,
struct convert_context *ctx,
struct bio *bio_out, struct bio *bio_in,
@@ -774,7 +950,7 @@ static int crypt_convert(struct crypt_config *cc,
/* async */
case -EBUSY:
wait_for_completion(&ctx->restart);
- INIT_COMPLETION(ctx->restart);
+ reinit_completion(&ctx->restart);
/* fall through*/
case -EINPROGRESS:
this_cc->req = NULL;
@@ -1274,9 +1450,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
static int crypt_setkey_allcpus(struct crypt_config *cc)
{
- unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+ unsigned subkey_size;
int err = 0, i, r;
+ /* Ignore extra keys (which are used for IV etc) */
+ subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
+
for (i = 0; i < cc->tfms_count; i++) {
r = crypto_ablkcipher_setkey(cc->tfms[i],
cc->key + (i * subkey_size),
@@ -1409,6 +1588,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
return -EINVAL;
}
cc->key_parts = cc->tfms_count;
+ cc->key_extra_size = 0;
cc->cipher = kstrdup(cipher, GFP_KERNEL);
if (!cc->cipher)
@@ -1460,13 +1640,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
goto bad;
}
- /* Initialize and set key */
- ret = crypt_set_key(cc, key);
- if (ret < 0) {
- ti->error = "Error decoding and setting key";
- goto bad;
- }
-
/* Initialize IV */
cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
@@ -1493,18 +1666,33 @@ static int crypt_ctr_cipher(struct dm_target *ti,
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
- /* Version 2 and 3 is recognised according
+ /*
+ * Version 2 and 3 is recognised according
* to length of provided multi-key string.
* If present (version 3), last key is used as IV seed.
+ * All keys (including IV seed) are always the same size.
*/
- if (cc->key_size % cc->key_parts)
+ if (cc->key_size % cc->key_parts) {
cc->key_parts++;
+ cc->key_extra_size = cc->key_size / cc->key_parts;
+ }
+ } else if (strcmp(ivmode, "tcw") == 0) {
+ cc->iv_gen_ops = &crypt_iv_tcw_ops;
+ cc->key_parts += 2; /* IV + whitening */
+ cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
} else {
ret = -EINVAL;
ti->error = "Invalid IV mode";
goto bad;
}
+ /* Initialize and set key */
+ ret = crypt_set_key(cc, key);
+ if (ret < 0) {
+ ti->error = "Error decoding and setting key";
+ goto bad;
+ }
+
/* Allocate IV */
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
@@ -1817,7 +2005,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afe08146f73..51521429fb5 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -57,7 +57,7 @@ struct vers_iter {
static struct list_head _name_buckets[NUM_BUCKETS];
static struct list_head _uuid_buckets[NUM_BUCKETS];
-static void dm_hash_remove_all(int keep_open_devices);
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
/*
* Guards access to both hash tables.
@@ -86,7 +86,7 @@ static int dm_hash_init(void)
static void dm_hash_exit(void)
{
- dm_hash_remove_all(0);
+ dm_hash_remove_all(false, false, false);
}
/*-----------------------------------------------------------------
@@ -276,7 +276,7 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
return table;
}
-static void dm_hash_remove_all(int keep_open_devices)
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
{
int i, dev_skipped;
struct hash_cell *hc;
@@ -293,7 +293,8 @@ retry:
md = hc->md;
dm_get(md);
- if (keep_open_devices && dm_lock_for_deletion(md)) {
+ if (keep_open_devices &&
+ dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
dm_put(md);
dev_skipped++;
continue;
@@ -450,6 +451,11 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
return md;
}
+void dm_deferred_remove(void)
+{
+ dm_hash_remove_all(true, false, true);
+}
+
/*-----------------------------------------------------------------
* Implementation of the ioctl commands
*---------------------------------------------------------------*/
@@ -461,7 +467,7 @@ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
static int remove_all(struct dm_ioctl *param, size_t param_size)
{
- dm_hash_remove_all(1);
+ dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
param->data_size = 0;
return 0;
}
@@ -683,6 +689,9 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended_md(md))
param->flags |= DM_SUSPEND_FLAG;
+ if (dm_test_deferred_remove_flag(md))
+ param->flags |= DM_DEFERRED_REMOVE;
+
param->dev = huge_encode_dev(disk_devt(disk));
/*
@@ -832,8 +841,13 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
/*
* Ensure the device is not open and nothing further can open it.
*/
- r = dm_lock_for_deletion(md);
+ r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
if (r) {
+ if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
+ up_write(&_hash_lock);
+ dm_put(md);
+ return 0;
+ }
DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
up_write(&_hash_lock);
dm_put(md);
@@ -848,6 +862,8 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
dm_table_destroy(t);
}
+ param->flags &= ~DM_DEFERRED_REMOVE;
+
if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
param->flags |= DM_UEVENT_GENERATED_FLAG;
@@ -1469,6 +1485,14 @@ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
if (**argv != '@')
return 2; /* no '@' prefix, deliver to target */
+ if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
+ if (argc != 1) {
+ DMERR("Invalid arguments for @cancel_deferred_remove");
+ return -EINVAL;
+ }
+ return dm_cancel_deferred_remove(md);
+ }
+
r = dm_stats_message(md, argc, argv, result, maxlen);
if (r < 2)
return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index de570a55876..6eb9dc9ef8f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -87,6 +87,7 @@ struct multipath {
unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
+ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
@@ -390,13 +391,16 @@ static int map_io(struct multipath *m, struct request *clone,
if (was_queued)
m->queue_size--;
- if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path)) {
+ if (m->pg_init_required) {
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
+ r = DM_MAPIO_REQUEUE;
+ } else if ((pgpath && m->queue_io) ||
+ (!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
- if ((m->pg_init_required && !m->pg_init_in_progress) ||
- !m->queue_io)
+ if (!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
@@ -497,7 +501,8 @@ static void process_queued_ios(struct work_struct *work)
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
+ !m->pg_init_disabled)
__pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
@@ -942,10 +947,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ m->pg_init_disabled = 1;
+ spin_unlock_irqrestore(&m->lock, flags);
+
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
+
+ spin_lock_irqsave(&m->lock, flags);
+ m->pg_init_disabled = 0;
+ spin_unlock_irqrestore(&m->lock, flags);
}
static void multipath_dtr(struct dm_target *ti)
@@ -1164,7 +1179,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (m->pg_init_count <= m->pg_init_retries)
+ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
m->pg_init_required = 1;
else
limit_reached = 1;
@@ -1665,6 +1680,11 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
+ /* pg_init in progress, requeue until done */
+ if (m->pg_init_in_progress) {
+ busy = 1;
+ goto out;
+ }
/* Guess which priority_group will be used at next mapping time */
if (unlikely(!m->current_pgpath && m->next_pg))
pg = m->next_pg;
@@ -1714,7 +1734,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 5, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8f8783533ac..465f08ca62b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -545,14 +545,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)
/*
* Used to dynamically allocate the arg array.
+ *
+ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
+ * process messages even if some device is suspended. These messages have a
+ * small fixed number of arguments.
+ *
+ * On the other hand, dm-switch needs to process bulk data using messages and
+ * excessive use of GFP_NOIO could cause trouble.
*/
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
char **argv;
unsigned new_size;
+ gfp_t gfp;
- new_size = *array_size ? *array_size * 2 : 64;
- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
+ if (*array_size) {
+ new_size = *array_size * 2;
+ gfp = GFP_KERNEL;
+ } else {
+ new_size = 8;
+ gfp = GFP_NOIO;
+ }
+ argv = kmalloc(new_size * sizeof(*argv), gfp);
if (argv) {
memcpy(argv, old_argv, *array_size * sizeof(*argv));
*array_size = new_size;
@@ -1548,8 +1562,11 @@ int dm_table_resume_targets(struct dm_table *t)
continue;
r = ti->type->preresume(ti);
- if (r)
+ if (r) {
+ DMERR("%s: %s: preresume failed, error = %d",
+ dm_device_name(t->md), ti->type->name, r);
return r;
+ }
}
for (i = 0; i < t->num_targets; i++) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3e26c7d141..0704c523a76 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -49,6 +49,11 @@ static unsigned int _major = 0;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
+
+static void do_deferred_remove(struct work_struct *w);
+
+static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
+
/*
* For bio-based dm.
* One of these is allocated per bio.
@@ -116,6 +121,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_MERGE_IS_OPTIONAL 6
+#define DMF_DEFERRED_REMOVE 7
/*
* A dummy definition to make RCU happy.
@@ -299,6 +305,8 @@ out_free_io_cache:
static void local_exit(void)
{
+ flush_scheduled_work();
+
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -404,7 +412,10 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
spin_lock(&_minor_lock);
- atomic_dec(&md->open_count);
+ if (atomic_dec_and_test(&md->open_count) &&
+ (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ schedule_work(&deferred_remove_work);
+
dm_put(md);
spin_unlock(&_minor_lock);
@@ -418,14 +429,18 @@ int dm_open_count(struct mapped_device *md)
/*
* Guarantees nothing is using the device before it's deleted.
*/
-int dm_lock_for_deletion(struct mapped_device *md)
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{
int r = 0;
spin_lock(&_minor_lock);
- if (dm_open_count(md))
+ if (dm_open_count(md)) {
r = -EBUSY;
+ if (mark_deferred)
+ set_bit(DMF_DEFERRED_REMOVE, &md->flags);
+ } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
+ r = -EEXIST;
else
set_bit(DMF_DELETING, &md->flags);
@@ -434,6 +449,27 @@ int dm_lock_for_deletion(struct mapped_device *md)
return r;
}
+int dm_cancel_deferred_remove(struct mapped_device *md)
+{
+ int r = 0;
+
+ spin_lock(&_minor_lock);
+
+ if (test_bit(DMF_DELETING, &md->flags))
+ r = -EBUSY;
+ else
+ clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
+
+ spin_unlock(&_minor_lock);
+
+ return r;
+}
+
+static void do_deferred_remove(struct work_struct *w)
+{
+ dm_deferred_remove();
+}
+
sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
@@ -2894,6 +2930,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
+int dm_test_deferred_remove_flag(struct mapped_device *md)
+{
+ return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
+}
+
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(dm_table_get_md(ti->table));
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1d1ad7b7e52..c57ba550f69 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -129,6 +129,16 @@ int dm_deleting_md(struct mapped_device *md);
int dm_suspended_md(struct mapped_device *md);
/*
+ * Test if the device is scheduled for deferred remove.
+ */
+int dm_test_deferred_remove_flag(struct mapped_device *md);
+
+/*
+ * Try to remove devices marked for deferred removal.
+ */
+void dm_deferred_remove(void);
+
+/*
* The device-mapper can be driven through one of two interfaces;
* ioctl or filesystem, depending which patch you have applied.
*/
@@ -158,7 +168,8 @@ void dm_stripe_exit(void);
void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
-int dm_lock_for_deletion(struct mapped_device *md);
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
+int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
struct dm_stats *dm_get_stats(struct mapped_device *md);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 561a65f82e2..b6b7a2866c9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)
static struct ctl_table_header *raid_table_header;
-static ctl_table raid_table[] = {
+static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
@@ -130,7 +130,7 @@ static ctl_table raid_table[] = {
{ }
};
-static ctl_table raid_dir_table[] = {
+static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {
{ }
};
-static ctl_table raid_root_table[] = {
+static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
@@ -183,46 +183,6 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
-void md_trim_bio(struct bio *bio, int offset, int size)
-{
- /* 'bio' is a cloned bio which we need to trim to match
- * the given offset and size.
- * This requires adjusting bi_sector, bi_size, and bi_io_vec
- */
- int i;
- struct bio_vec *bvec;
- int sofar = 0;
-
- size <<= 9;
- if (offset == 0 && size == bio->bi_size)
- return;
-
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
- bio_advance(bio, offset << 9);
-
- bio->bi_size = size;
-
- /* avoid any complications with bi_idx being non-zero*/
- if (bio->bi_idx) {
- memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
- (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
- bio->bi_vcnt -= bio->bi_idx;
- bio->bi_idx = 0;
- }
- /* Make sure vcnt and last bv are not too big */
- bio_for_each_segment(bvec, bio, i) {
- if (sofar + bvec->bv_len > size)
- bvec->bv_len = size - sofar;
- if (bvec->bv_len == 0) {
- bio->bi_vcnt = i;
- break;
- }
- sofar += bvec->bv_len;
- }
-}
-EXPORT_SYMBOL_GPL(md_trim_bio);
-
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -602,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit)
goto retry;
}
-static inline int mddev_lock(struct mddev * mddev)
+static inline int __must_check mddev_lock(struct mddev * mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
+/* Sometimes we need to take the lock in a situation where
+ * failure due to interrupts is not acceptable.
+ */
+static inline void mddev_lock_nointr(struct mddev * mddev)
+{
+ mutex_lock(&mddev->reconfig_mutex);
+}
+
static inline int mddev_is_locked(struct mddev *mddev)
{
return mutex_is_locked(&mddev->reconfig_mutex);
@@ -3018,7 +2986,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
@@ -3034,7 +3002,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
break;
}
}
- mddev_lock(my_mddev);
+ mddev_lock_nointr(my_mddev);
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
@@ -3555,7 +3523,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
@@ -3620,6 +3588,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
+ blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
@@ -5298,7 +5267,7 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -5331,20 +5300,35 @@ EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ /* Thread might be blocked waiting for metadata update
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+ }
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock_nointr(mddev);
+
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > !!bdev) {
+ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
err = -EBUSY;
goto out;
}
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5355,7 +5339,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state);
- err = 0;
+ err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
@@ -5371,20 +5355,34 @@ static int do_md_stop(struct mddev * mddev, int mode,
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ /* Thread might be blocked waiting for metadata update
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+ }
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
- mddev->sysfs_active) {
+ mddev->sysfs_active ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
return -EBUSY;
}
if (mddev->pers) {
@@ -6591,7 +6589,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
}
} else {
err = -EROFS;
@@ -7401,9 +7399,6 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 2;
try_again:
- if (kthread_should_stop())
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
@@ -7428,7 +7423,7 @@ void md_do_sync(struct md_thread *thread)
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
- if (!kthread_should_stop() &&
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
@@ -7504,7 +7499,7 @@ void md_do_sync(struct md_thread *thread)
last_check = 0;
if (j>2) {
- printk(KERN_INFO
+ printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
@@ -7541,7 +7536,8 @@ void md_do_sync(struct md_thread *thread)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- while (j >= mddev->resync_max && !kthread_should_stop()) {
+ while (j >= mddev->resync_max &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
@@ -7549,17 +7545,18 @@ void md_do_sync(struct md_thread *thread)
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
- || kthread_should_stop());
+ || test_bit(MD_RECOVERY_INTR,
+ &mddev->recovery));
}
- if (kthread_should_stop())
- goto interrupted;
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
+ break;
}
if (!skipped) { /* actual IO requested */
@@ -7596,10 +7593,8 @@ void md_do_sync(struct md_thread *thread)
last_mark = next;
}
-
- if (kthread_should_stop())
- goto interrupted;
-
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
/*
* this loop exits only if either when we are slower than
@@ -7622,11 +7617,12 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
+ printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
- out:
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
@@ -7680,16 +7676,6 @@ void md_do_sync(struct md_thread *thread)
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
-
- interrupted:
- /*
- * got a signal, exit.
- */
- printk(KERN_INFO
- "md: md_do_sync() got signal ... exiting\n");
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
-
}
EXPORT_SYMBOL_GPL(md_do_sync);
@@ -7934,6 +7920,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
+ wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 608050c43f1..2f5cc8a7ef3 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -501,7 +501,7 @@ extern struct attribute_group md_bitmap_group;
static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
{
if (sd)
- return sysfs_get_dirent(sd, NULL, name);
+ return sysfs_get_dirent(sd, name);
return sd;
}
static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
@@ -617,7 +617,6 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern void md_trim_bio(struct bio *bio, int offset, int size);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
static inline int mddev_check_plugged(struct mddev *mddev)
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 172147eb1d4..af96e24ec32 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -509,15 +509,18 @@ static int grow_add_tail_block(struct resize *resize)
static int grow_needs_more_blocks(struct resize *resize)
{
int r;
+ unsigned old_nr_blocks = resize->old_nr_full_blocks;
if (resize->old_nr_entries_in_last_block > 0) {
+ old_nr_blocks++;
+
r = grow_extend_tail_block(resize, resize->max_entries);
if (r)
return r;
}
r = insert_full_ablocks(resize->info, resize->size_of_block,
- resize->old_nr_full_blocks,
+ old_nr_blocks,
resize->new_nr_full_blocks,
resize->max_entries, resize->value,
&resize->root);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index e735a6d5a79..cfbf9617e46 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -140,26 +140,10 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
- int r;
- uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- r = sm_ll_dec(&smd->ll, b, &ev);
- if (!r && (ev == SM_FREE)) {
- /*
- * It's only free if it's also free in the last
- * transaction.
- */
- r = sm_ll_lookup(&smd->old_ll, b, &old_count);
- if (r)
- return r;
-
- if (!old_count)
- smd->nr_allocated_this_transaction--;
- }
-
- return r;
+ return sm_ll_dec(&smd->ll, b, &ev);
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index aacf6bf352d..1e5a540995e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -66,7 +66,8 @@
*/
static int max_queued_requests = 1024;
-static void allow_barrier(struct r1conf *conf);
+static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
+ sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data)
}
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
+#define RESYNC_DEPTH 32
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
+#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio)
struct bio *bio = r1_bio->master_bio;
int done;
struct r1conf *conf = r1_bio->mddev->private;
+ sector_t start_next_window = r1_bio->start_next_window;
+ sector_t bi_sector = bio->bi_sector;
if (bio->bi_phys_segments) {
unsigned long flags;
@@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio)
bio->bi_phys_segments--;
done = (bio->bi_phys_segments == 0);
spin_unlock_irqrestore(&conf->device_lock, flags);
+ /*
+ * make_request() might be waiting for
+ * bi_phys_segments to decrease
+ */
+ wake_up(&conf->wait_barrier);
} else
done = 1;
@@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
* Wake up any possible resync thread that waits for the device
* to go idle.
*/
- allow_barrier(conf);
+ allow_barrier(conf, start_next_window, bi_sector);
}
}
@@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
-
static void raise_barrier(struct r1conf *conf)
{
spin_lock_irq(&conf->resync_lock);
@@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf)
/* block any new IO from starting */
conf->barrier++;
- /* Now wait for all pending IO to complete */
+ /* For these conditions we must wait:
+ * A: while the array is in frozen state
+ * B: while barrier >= RESYNC_DEPTH, meaning resync reach
+ * the max count which allowed.
+ * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
+ * next resync will reach to the window which normal bios are
+ * handling.
+ */
wait_event_lock_irq(conf->wait_barrier,
- !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ !conf->array_frozen &&
+ conf->barrier < RESYNC_DEPTH &&
+ (conf->start_next_window >=
+ conf->next_resync + RESYNC_SECTORS),
conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
@@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static void wait_barrier(struct r1conf *conf)
+static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
{
+ bool wait = false;
+
+ if (conf->array_frozen || !bio)
+ wait = true;
+ else if (conf->barrier && bio_data_dir(bio) == WRITE) {
+ if (conf->next_resync < RESYNC_WINDOW_SECTORS)
+ wait = true;
+ else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
+ >= bio_end_sector(bio)) ||
+ (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_sector))
+ wait = false;
+ else
+ wait = true;
+ }
+
+ return wait;
+}
+
+static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
+{
+ sector_t sector = 0;
+
spin_lock_irq(&conf->resync_lock);
- if (conf->barrier) {
+ if (need_to_wait_for_sync(conf, bio)) {
conf->nr_waiting++;
/* Wait for the barrier to drop.
* However if there are already pending
@@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf)
* count down.
*/
wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (conf->nr_pending &&
+ !conf->array_frozen &&
+ (!conf->barrier ||
+ ((conf->start_next_window <
+ conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ !bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+ if (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_sector) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
+ NEXT_NORMALIO_DISTANCE;
+
+ if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
+ <= bio->bi_sector)
+ conf->next_window_requests++;
+ else
+ conf->current_window_requests++;
+ }
+ if (bio->bi_sector >= conf->start_next_window)
+ sector = conf->start_next_window;
+ }
+
conf->nr_pending++;
spin_unlock_irq(&conf->resync_lock);
+ return sector;
}
-static void allow_barrier(struct r1conf *conf)
+static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
+ sector_t bi_sector)
{
unsigned long flags;
+
spin_lock_irqsave(&conf->resync_lock, flags);
conf->nr_pending--;
+ if (start_next_window) {
+ if (start_next_window == conf->start_next_window) {
+ if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
+ <= bi_sector)
+ conf->next_window_requests--;
+ else
+ conf->current_window_requests--;
+ } else
+ conf->current_window_requests--;
+
+ if (!conf->current_window_requests) {
+ if (conf->next_window_requests) {
+ conf->current_window_requests =
+ conf->next_window_requests;
+ conf->next_window_requests = 0;
+ conf->start_next_window +=
+ NEXT_NORMALIO_DISTANCE;
+ } else
+ conf->start_next_window = MaxSector;
+ }
+ }
spin_unlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
@@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
- * We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+extra
+ * We wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
@@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra)
* we continue.
*/
spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- conf->nr_waiting++;
+ conf->array_frozen = 1;
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf)
{
/* reverse the effect of the freeze */
spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- conf->nr_waiting--;
+ conf->array_frozen = 0;
wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
}
@@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int first_clone;
int sectors_handled;
int max_sectors;
+ sector_t start_next_window;
/*
* Register the new request and wait if the reconstruction
@@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
finish_wait(&conf->wait_barrier, &w);
}
- wait_barrier(conf);
+ start_next_window = wait_barrier(conf, bio);
bitmap = mddev->bitmap;
@@ -1097,8 +1181,8 @@ read_again:
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+ max_sectors);
r1_bio->bios[rdisk] = read_bio;
@@ -1163,6 +1247,7 @@ read_again:
disks = conf->raid_disks * 2;
retry_write:
+ r1_bio->start_next_window = start_next_window;
blocked_rdev = NULL;
rcu_read_lock();
max_sectors = r1_bio->sectors;
@@ -1231,14 +1316,24 @@ read_again:
if (unlikely(blocked_rdev)) {
/* Wait for this device to become unblocked */
int j;
+ sector_t old = start_next_window;
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
- allow_barrier(conf);
+ allow_barrier(conf, start_next_window, bio->bi_sector);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
+ start_next_window = wait_barrier(conf, bio);
+ /*
+ * We must make sure the multi r1bios of bio have
+ * the same value of bi_phys_segments
+ */
+ if (bio->bi_phys_segments && old &&
+ old != start_next_window)
+ /* Wait for the former r1bio(s) to complete */
+ wait_event(conf->wait_barrier,
+ bio->bi_phys_segments == 1);
goto retry_write;
}
@@ -1266,7 +1361,7 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf)
{
- wait_barrier(conf);
- allow_barrier(conf);
+ wait_barrier(conf, NULL);
+ allow_barrier(conf, 0, 0);
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
+
+ conf->next_resync = 0;
+ conf->start_next_window = MaxSector;
}
static int raid1_spare_active(struct mddev *mddev)
@@ -2126,7 +2224,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio->bi_sector = r1_bio->sector;
wbio->bi_size = r1_bio->sectors << 9;
- md_trim_bio(wbio, sector - r1_bio->sector, sectors);
+ bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
@@ -2241,7 +2339,7 @@ read_more:
}
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR
@@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
+ conf->start_next_window = MaxSector;
+ conf->current_window_requests = conf->next_window_requests = 0;
+
err = -EIO;
for (i = 0; i < conf->raid_disks * 2; i++) {
@@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev)
atomic_read(&bitmap->behind_writes) == 0);
}
- raise_barrier(conf);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
@@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state)
wake_up(&conf->wait_barrier);
break;
case 1:
- raise_barrier(conf);
+ freeze_array(conf, 0);
break;
case 0:
- lower_barrier(conf);
+ unfreeze_array(conf);
break;
}
}
@@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
if (!IS_ERR(conf))
- conf->barrier = 1;
+ /* Array must appear to be quiesced */
+ conf->array_frozen = 1;
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 0ff3715fb7e..9bebca7bff2 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -41,6 +41,19 @@ struct r1conf {
*/
sector_t next_resync;
+ /* When raid1 starts resync, we divide array into four partitions
+ * |---------|--------------|---------------------|-------------|
+ * next_resync start_next_window end_window
+ * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
+ * end_window = start_next_window + NEXT_NORMALIO_DISTANCE
+ * current_window_requests means the count of normalIO between
+ * start_next_window and end_window.
+ * next_window_requests means the count of normalIO after end_window.
+ * */
+ sector_t start_next_window;
+ int current_window_requests;
+ int next_window_requests;
+
spinlock_t device_lock;
/* list of 'struct r1bio' that need to be processed by raid1d,
@@ -65,6 +78,7 @@ struct r1conf {
int nr_waiting;
int nr_queued;
int barrier;
+ int array_frozen;
/* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes.
@@ -111,6 +125,7 @@ struct r1bio {
* in this BehindIO request
*/
sector_t sector;
+ sector_t start_next_window;
int sectors;
unsigned long state;
struct mddev *mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 73dc8a37752..c504e8389e6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1302,8 +1302,8 @@ read_again:
slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
@@ -1510,8 +1510,8 @@ retry_write:
if (r10_bio->devs[i].bio) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[i].bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr+
@@ -1553,8 +1553,8 @@ retry_write:
rdev = conf->mirrors[d].rdev;
}
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[i].repl_bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr +
@@ -2614,7 +2614,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+ bio_trim(wbio, sector - bio->bi_sector, sectors);
wbio->bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
@@ -2687,9 +2687,7 @@ read_more:
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
- md_trim_bio(bio,
- r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
@@ -4386,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
- kthread_should_stop());
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ allow_barrier(conf);
+ return sectors_done;
+ }
conf->reshape_safe = mddev->reshape_position;
allow_barrier(conf);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f8b90684392..47da0af6322 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
return &conf->stripe_hashtbl[hash];
}
+static inline int stripe_hash_locks_hash(sector_t sect)
+{
+ return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
+}
+
+static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_lock_irq(conf->hash_locks + hash);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
+}
+
+static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ local_irq_disable();
+ spin_lock(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ spin_unlock(&conf->device_lock);
+ for (i = NR_STRIPE_HASH_LOCKS; i; i--)
+ spin_unlock(conf->hash_locks + i - 1);
+ local_irq_enable();
+}
+
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
@@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
}
}
-static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
@@ -278,37 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
- list_add_tail(&sh->lru, &conf->inactive_list);
- wake_up(&conf->wait_for_stripe);
- if (conf->retry_read_aligned)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (!test_bit(STRIPE_EXPANDING, &sh->state))
+ list_add_tail(&sh->lru, temp_inactive_list);
}
}
-static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
if (atomic_dec_and_test(&sh->count))
- do_release_stripe(conf, sh);
+ do_release_stripe(conf, sh, temp_inactive_list);
}
-static struct llist_node *llist_reverse_order(struct llist_node *head)
+/*
+ * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
+ *
+ * Be careful: Only one task can add/delete stripes from temp_inactive_list at
+ * given time. Adding stripes only takes device lock, while deleting stripes
+ * only takes hash lock.
+ */
+static void release_inactive_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list,
+ int hash)
{
- struct llist_node *new_head = NULL;
+ int size;
+ bool do_wakeup = false;
+ unsigned long flags;
- while (head) {
- struct llist_node *tmp = head;
- head = head->next;
- tmp->next = new_head;
- new_head = tmp;
+ if (hash == NR_STRIPE_HASH_LOCKS) {
+ size = NR_STRIPE_HASH_LOCKS;
+ hash = NR_STRIPE_HASH_LOCKS - 1;
+ } else
+ size = 1;
+ while (size) {
+ struct list_head *list = &temp_inactive_list[size - 1];
+
+ /*
+ * We don't hold any lock here yet, get_active_stripe() might
+ * remove stripes from the list
+ */
+ if (!list_empty_careful(list)) {
+ spin_lock_irqsave(conf->hash_locks + hash, flags);
+ if (list_empty(conf->inactive_list + hash) &&
+ !list_empty(list))
+ atomic_dec(&conf->empty_inactive_list_nr);
+ list_splice_tail_init(list, conf->inactive_list + hash);
+ do_wakeup = true;
+ spin_unlock_irqrestore(conf->hash_locks + hash, flags);
+ }
+ size--;
+ hash--;
}
- return new_head;
+ if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
+ if (conf->retry_read_aligned)
+ md_wakeup_thread(conf->mddev->thread);
+ }
}
/* should hold conf->device_lock already */
-static int release_stripe_list(struct r5conf *conf)
+static int release_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *sh;
int count = 0;
@@ -317,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf)
head = llist_del_all(&conf->released_stripes);
head = llist_reverse_order(head);
while (head) {
+ int hash;
+
sh = llist_entry(head, struct stripe_head, release_list);
head = llist_next(head);
/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
@@ -327,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf)
* again, the count is always > 1. This is true for
* STRIPE_ON_UNPLUG_LIST bit too.
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
count++;
}
@@ -338,9 +409,12 @@ static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ struct list_head list;
+ int hash;
bool wakeup;
- if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ if (unlikely(!conf->mddev->thread) ||
+ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
goto slow_path;
wakeup = llist_add(&sh->release_list, &conf->released_stripes);
if (wakeup)
@@ -350,8 +424,11 @@ slow_path:
local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
- do_release_stripe(conf, sh);
+ INIT_LIST_HEAD(&list);
+ hash = sh->hash_lock_index;
+ do_release_stripe(conf, sh, &list);
spin_unlock(&conf->device_lock);
+ release_inactive_stripe_list(conf, &list, hash);
}
local_irq_restore(flags);
}
@@ -376,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
/* find an idle stripe, make sure it is unhashed, and return it. */
-static struct stripe_head *get_free_stripe(struct r5conf *conf)
+static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh = NULL;
struct list_head *first;
- if (list_empty(&conf->inactive_list))
+ if (list_empty(conf->inactive_list + hash))
goto out;
- first = conf->inactive_list.next;
+ first = (conf->inactive_list + hash)->next;
sh = list_entry(first, struct stripe_head, lru);
list_del_init(first);
remove_hash(sh);
atomic_inc(&conf->active_stripes);
+ BUG_ON(hash != sh->hash_lock_index);
+ if (list_empty(conf->inactive_list + hash))
+ atomic_inc(&conf->empty_inactive_list_nr);
out:
return sh;
}
@@ -430,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
{
struct r5conf *conf = sh->raid_conf;
- int i;
+ int i, seq;
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
@@ -440,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
(unsigned long long)sh->sector);
remove_hash(sh);
-
+retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
sh->sector = sector;
@@ -462,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
dev->flags = 0;
raid5_build_block(sh, i, previous);
}
+ if (read_seqcount_retry(&conf->gen_lock, seq))
+ goto retry;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
}
@@ -566,29 +649,31 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
+ int hash = stripe_hash_locks_hash(sector);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock);
+ *(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
- sh = get_free_stripe(conf);
+ sh = get_free_stripe(conf, hash);
if (noblock && sh == NULL)
break;
if (!sh) {
conf->inactive_blocked = 1;
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes *3/4)
- || !conf->inactive_blocked),
- conf->device_lock);
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash) &&
+ (atomic_read(&conf->active_stripes)
+ < (conf->max_nr_stripes * 3 / 4)
+ || !conf->inactive_blocked),
+ *(conf->hash_locks + hash));
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -599,9 +684,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
&& !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
&& !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
} else {
+ spin_lock(&conf->device_lock);
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
if (list_empty(&sh->lru) &&
+ !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state) &&
!test_bit(STRIPE_EXPANDING, &sh->state))
BUG();
list_del_init(&sh->lru);
@@ -609,6 +696,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
sh->group->stripes_cnt--;
sh->group = NULL;
}
+ spin_unlock(&conf->device_lock);
}
}
} while (sh == NULL);
@@ -616,7 +704,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
if (sh)
atomic_inc(&sh->count);
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -772,7 +860,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
- bi->bi_rw |= REQ_FLUSH;
+ bi->bi_rw |= REQ_NOMERGE;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -1596,7 +1684,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf)
+static int grow_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
@@ -1612,6 +1700,7 @@ static int grow_one_stripe(struct r5conf *conf)
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
+ sh->hash_lock_index = hash;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
@@ -1624,6 +1713,7 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
+ int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1641,9 +1731,13 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- while (num--)
- if (!grow_one_stripe(conf))
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
+ while (num--) {
+ if (!grow_one_stripe(conf, hash))
return 1;
+ conf->max_nr_stripes++;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
+ }
return 0;
}
@@ -1701,6 +1795,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
int err;
struct kmem_cache *sc;
int i;
+ int hash, cnt;
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
@@ -1740,19 +1835,29 @@ static int resize_stripes(struct r5conf *conf, int newsize)
* OK, we have enough stripes, start collecting inactive
* stripes and copying them over
*/
+ hash = 0;
+ cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list),
- conf->device_lock);
- osh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ lock_device_hash_lock(conf, hash);
+ wait_event_cmd(conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash),
+ unlock_device_hash_lock(conf, hash),
+ lock_device_hash_lock(conf, hash));
+ osh = get_free_stripe(conf, hash);
+ unlock_device_hash_lock(conf, hash);
atomic_set(&nsh->count, 1);
for(i=0; i<conf->pool_size; i++)
nsh->dev[i].page = osh->dev[i].page;
for( ; i<newsize; i++)
nsh->dev[i].page = NULL;
+ nsh->hash_lock_index = hash;
kmem_cache_free(conf->slab_cache, osh);
+ cnt++;
+ if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
+ !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
+ hash++;
+ cnt = 0;
+ }
}
kmem_cache_destroy(conf->slab_cache);
@@ -1811,13 +1916,13 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return err;
}
-static int drop_one_stripe(struct r5conf *conf)
+static int drop_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
+ sh = get_free_stripe(conf, hash);
+ spin_unlock_irq(conf->hash_locks + hash);
if (!sh)
return 0;
BUG_ON(atomic_read(&sh->count));
@@ -1829,8 +1934,10 @@ static int drop_one_stripe(struct r5conf *conf)
static void shrink_stripes(struct r5conf *conf)
{
- while (drop_one_stripe(conf))
- ;
+ int hash;
+ for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
+ while (drop_one_stripe(conf, hash))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -1935,6 +2042,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev), bdn);
else
retry = 1;
+ if (set_bad && test_bit(In_sync, &rdev->flags)
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
if (retry)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
set_bit(R5_ReadError, &sh->dev[i].flags);
@@ -3914,7 +4024,8 @@ static void raid5_activate_delayed(struct r5conf *conf)
}
}
-static void activate_bit_delay(struct r5conf *conf)
+static void activate_bit_delay(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
/* device_lock is held */
struct list_head head;
@@ -3922,9 +4033,11 @@ static void activate_bit_delay(struct r5conf *conf)
list_del_init(&conf->bitmap_list);
while (!list_empty(&head)) {
struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
+ int hash;
list_del_init(&sh->lru);
atomic_inc(&sh->count);
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
}
}
@@ -3940,7 +4053,7 @@ int md_raid5_congested(struct mddev *mddev, int bits)
return 1;
if (conf->quiesce)
return 1;
- if (list_empty_careful(&conf->inactive_list))
+ if (atomic_read(&conf->empty_inactive_list_nr))
return 1;
return 0;
@@ -4270,6 +4383,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
struct raid5_plug_cb {
struct blk_plug_cb cb;
struct list_head list;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
};
static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
@@ -4280,6 +4394,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
int cnt = 0;
+ int hash;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4297,11 +4412,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
* STRIPE_ON_RELEASE_LIST could be set here. In that
* case, the count is always > 1 here
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ release_inactive_stripe_list(conf, cb->temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
if (mddev->queue)
trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
@@ -4322,8 +4440,12 @@ static void release_stripe_plug(struct mddev *mddev,
cb = container_of(blk_cb, struct raid5_plug_cb, cb);
- if (cb->list.next == NULL)
+ if (cb->list.next == NULL) {
+ int i;
INIT_LIST_HEAD(&cb->list);
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(cb->temp_inactive_list + i);
+ }
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
list_add_tail(&sh->lru, &cb->list);
@@ -4706,14 +4828,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes)==0);
+ atomic_read(&conf->reshape_stripes)==0
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (atomic_read(&conf->reshape_stripes) != 0)
+ return 0;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
- kthread_should_stop());
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ return 0;
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
@@ -4796,7 +4923,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes) == 0);
+ atomic_read(&conf->reshape_stripes) == 0
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (atomic_read(&conf->reshape_stripes) != 0)
+ goto ret;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
@@ -4804,13 +4934,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags)
- || kthread_should_stop());
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto ret;
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
+ret:
return reshape_sectors;
}
@@ -4968,27 +5101,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
}
static int handle_active_stripes(struct r5conf *conf, int group,
- struct r5worker *worker)
+ struct r5worker *worker,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
- int i, batch_size = 0;
+ int i, batch_size = 0, hash;
+ bool release_inactive = false;
while (batch_size < MAX_STRIPE_BATCH &&
(sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
- if (batch_size == 0)
- return batch_size;
+ if (batch_size == 0) {
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ if (!list_empty(temp_inactive_list + i))
+ break;
+ if (i == NR_STRIPE_HASH_LOCKS)
+ return batch_size;
+ release_inactive = true;
+ }
spin_unlock_irq(&conf->device_lock);
+ release_inactive_stripe_list(conf, temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
+
+ if (release_inactive) {
+ spin_lock_irq(&conf->device_lock);
+ return 0;
+ }
+
for (i = 0; i < batch_size; i++)
handle_stripe(batch[i]);
cond_resched();
spin_lock_irq(&conf->device_lock);
- for (i = 0; i < batch_size; i++)
- __release_stripe(conf, batch[i]);
+ for (i = 0; i < batch_size; i++) {
+ hash = batch[i]->hash_lock_index;
+ __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
+ }
return batch_size;
}
@@ -5009,9 +5160,10 @@ static void raid5_do_work(struct work_struct *work)
while (1) {
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, worker->temp_inactive_list);
- batch_size = handle_active_stripes(conf, group_id, worker);
+ batch_size = handle_active_stripes(conf, group_id, worker,
+ worker->temp_inactive_list);
worker->working = false;
if (!batch_size && !released)
break;
@@ -5050,7 +5202,7 @@ static void raid5d(struct md_thread *thread)
struct bio *bio;
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, conf->temp_inactive_list);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5060,7 +5212,7 @@ static void raid5d(struct md_thread *thread)
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
- activate_bit_delay(conf);
+ activate_bit_delay(conf, conf->temp_inactive_list);
}
raid5_activate_delayed(conf);
@@ -5074,7 +5226,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
+ conf->temp_inactive_list);
if (!batch_size && !released)
break;
handled += batch_size;
@@ -5110,22 +5263,29 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
+ int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
+ hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf))
+ if (drop_one_stripe(conf, hash))
conf->max_nr_stripes--;
else
break;
+ hash--;
+ if (hash < 0)
+ hash = NR_STRIPE_HASH_LOCKS - 1;
}
err = md_allow_write(mddev);
if (err)
return err;
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf))
+ if (grow_one_stripe(conf, hash))
conf->max_nr_stripes++;
else break;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
}
return 0;
}
@@ -5213,15 +5373,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
return 0;
}
-static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static int alloc_thread_groups(struct r5conf *conf, int cnt,
+ int *group_cnt,
+ int *worker_cnt_per_group,
+ struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf = mddev->private;
unsigned long new;
int err;
- struct r5worker_group *old_groups;
- int old_group_cnt;
+ struct r5worker_group *new_groups, *old_groups;
+ int group_cnt, worker_cnt_per_group;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -5237,14 +5400,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
old_groups = conf->worker_groups;
- old_group_cnt = conf->worker_cnt_per_group;
+ if (old_groups)
+ flush_workqueue(raid5_wq);
+
+ err = alloc_thread_groups(conf, new,
+ &group_cnt, &worker_cnt_per_group,
+ &new_groups);
+ if (!err) {
+ spin_lock_irq(&conf->device_lock);
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_groups;
+ spin_unlock_irq(&conf->device_lock);
- conf->worker_groups = NULL;
- err = alloc_thread_groups(conf, new);
- if (err) {
- conf->worker_groups = old_groups;
- conf->worker_cnt_per_group = old_group_cnt;
- } else {
if (old_groups)
kfree(old_groups[0].workers);
kfree(old_groups);
@@ -5274,40 +5442,47 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt)
+static int alloc_thread_groups(struct r5conf *conf, int cnt,
+ int *group_cnt,
+ int *worker_cnt_per_group,
+ struct r5worker_group **worker_groups)
{
- int i, j;
+ int i, j, k;
ssize_t size;
struct r5worker *workers;
- conf->worker_cnt_per_group = cnt;
+ *worker_cnt_per_group = cnt;
if (cnt == 0) {
- conf->worker_groups = NULL;
+ *group_cnt = 0;
+ *worker_groups = NULL;
return 0;
}
- conf->group_cnt = num_possible_nodes();
+ *group_cnt = num_possible_nodes();
size = sizeof(struct r5worker) * cnt;
- workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
- conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
- conf->group_cnt, GFP_NOIO);
- if (!conf->worker_groups || !workers) {
+ workers = kzalloc(size * *group_cnt, GFP_NOIO);
+ *worker_groups = kzalloc(sizeof(struct r5worker_group) *
+ *group_cnt, GFP_NOIO);
+ if (!*worker_groups || !workers) {
kfree(workers);
- kfree(conf->worker_groups);
- conf->worker_groups = NULL;
+ kfree(*worker_groups);
return -ENOMEM;
}
- for (i = 0; i < conf->group_cnt; i++) {
+ for (i = 0; i < *group_cnt; i++) {
struct r5worker_group *group;
- group = &conf->worker_groups[i];
+ group = worker_groups[i];
INIT_LIST_HEAD(&group->handle_list);
group->conf = conf;
group->workers = workers + i * cnt;
for (j = 0; j < cnt; j++) {
- group->workers[j].group = group;
- INIT_WORK(&group->workers[j].work, raid5_do_work);
+ struct r5worker *worker = group->workers + j;
+ worker->group = group;
+ INIT_WORK(&worker->work, raid5_do_work);
+
+ for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
+ INIT_LIST_HEAD(worker->temp_inactive_list + k);
}
}
@@ -5458,6 +5633,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct md_rdev *rdev;
struct disk_info *disk;
char pers_name[6];
+ int i;
+ int group_cnt, worker_cnt_per_group;
+ struct r5worker_group *new_group;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -5492,7 +5670,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (conf == NULL)
goto abort;
/* Don't enable multi-threading by default*/
- if (alloc_thread_groups(conf, 0))
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
+ &new_group)) {
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_group;
+ } else
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
@@ -5502,7 +5685,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
- INIT_LIST_HEAD(&conf->inactive_list);
init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
@@ -5528,6 +5710,21 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
+ /* We init hash_locks[0] separately to that it can be used
+ * as the reference lock in the spin_lock_nest_lock() call
+ * in lock_all_device_hash_locks_irq in order to convince
+ * lockdep that we know what we are doing.
+ */
+ spin_lock_init(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_init(conf->hash_locks + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->inactive_list + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->temp_inactive_list + i);
+
conf->level = mddev->new_level;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5568,7 +5765,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->max_degraded = 1;
conf->algorithm = mddev->new_layout;
- conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
@@ -5577,7 +5773,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
- if (grow_stripes(conf, conf->max_nr_stripes)) {
+ atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
+ if (grow_stripes(conf, NR_STRIPES)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -6383,12 +6580,18 @@ static int raid5_start_reshape(struct mddev *mddev)
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
+ mddev->new_chunk_sectors =
+ conf->chunk_sectors = conf->prev_chunk_sectors;
+ mddev->new_layout = conf->algorithm = conf->prev_algo;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
+ conf->generation --;
conf->reshape_progress = MaxSector;
mddev->reshape_position = MaxSector;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
return -EAGAIN;
}
@@ -6476,27 +6679,28 @@ static void raid5_quiesce(struct mddev *mddev, int state)
break;
case 1: /* stop all writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
conf->quiesce = 2;
- wait_event_lock_irq(conf->wait_for_stripe,
+ wait_event_cmd(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf),
+ lock_all_device_hash_locks_irq(conf));
conf->quiesce = 1;
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
break;
case 0: /* re-enable writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_stripe);
wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
break;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2113ffa82c7..01ad8ae8f57 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -49,7 +49,7 @@
* can't distinguish between a clean block that has been generated
* from parity calculations, and a clean block that has been
* successfully written to the spare ( or to parity when resyncing).
- * To distingush these states we have a stripe bit STRIPE_INSYNC that
+ * To distinguish these states we have a stripe bit STRIPE_INSYNC that
* is set whenever a write is scheduled to the spare, or to the parity
* disc if there is no spare. A sync request clears this bit, and
* when we find it set with no buffers locked, we know the sync is
@@ -205,6 +205,7 @@ struct stripe_head {
short pd_idx; /* parity disk index */
short qd_idx; /* 'Q' disk index for raid6 */
short ddf_layout;/* use DDF ordering to calculate Q */
+ short hash_lock_index;
unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
@@ -367,9 +368,18 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
+ * This is because we sometimes take all the spinlocks
+ * and creating that much locking depth can cause
+ * problems.
+ */
+#define NR_STRIPE_HASH_LOCKS 8
+#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
+
struct r5worker {
struct work_struct work;
struct r5worker_group *group;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
bool working;
};
@@ -382,6 +392,8 @@ struct r5worker_group {
struct r5conf {
struct hlist_head *stripe_hashtbl;
+ /* only protect corresponding hash list and inactive_list */
+ spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
int level, algorithm;
@@ -462,7 +474,8 @@ struct r5conf {
* Free stripes pool
*/
atomic_t active_stripes;
- struct list_head inactive_list;
+ struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+ atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
@@ -477,6 +490,7 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
struct r5worker_group *worker_groups;
int group_cnt;
int worker_cnt_per_group;
diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c
index f2199e43e80..185c285f70f 100644
--- a/drivers/media/common/b2c2/flexcop-sram.c
+++ b/drivers/media/common/b2c2/flexcop-sram.c
@@ -85,7 +85,7 @@ static void flexcop_sram_write(struct adapter *adapter, u32 bank, u32 addr, u8 *
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
- };
+ }
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
@@ -110,7 +110,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf,
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
- };
+ }
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
@@ -122,7 +122,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf,
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
- };
+ }
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index bb6ee5191eb..34b0d0ddeef 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -411,7 +411,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
saa7146_write(dev, MC2, 0xf8000000);
/* request an interrupt for the saa7146 */
- err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED,
+ err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED,
dev->name, dev);
if (err < 0) {
ERR("request_irq() failed\n");
@@ -524,8 +524,6 @@ static void saa7146_remove_one(struct pci_dev *pdev)
DEB_EE("dev:%p\n", dev);
dev->ext->detach(dev);
- /* Zero the PCI drvdata after use. */
- pci_set_drvdata(pdev, NULL);
/* shut down all video dma transfers */
saa7146_write(dev, MC1, 0x00ff0000);
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index a142f7942a0..050984c5b1e 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -922,8 +922,8 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
u32 i, *ptr;
u8 *payload = firmware->payload;
int rc = 0;
- firmware->start_address = le32_to_cpu(firmware->start_address);
- firmware->length = le32_to_cpu(firmware->length);
+ firmware->start_address = le32_to_cpup((__le32 *)&firmware->start_address);
+ firmware->length = le32_to_cpup((__le32 *)&firmware->length);
mem_address = firmware->start_address;
@@ -982,7 +982,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
if (rc < 0)
goto exit_fw_download;
- sms_err("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x",
+ sms_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x",
calc_checksum);
SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ,
sizeof(msg->x_msg_header) +
@@ -1562,7 +1562,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
{
struct sms_msg_data *validity = (struct sms_msg_data *) phdr;
- sms_err("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x",
+ sms_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x",
validity->msg_data[0]);
complete(&coredev->data_validity_done);
break;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 63676a8b024..85151efdd94 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -44,14 +44,14 @@ module_param_named(debug, sms_dbg, int, 0644);
MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
-u32 sms_to_guard_interval_table[] = {
+static u32 sms_to_guard_interval_table[] = {
[0] = GUARD_INTERVAL_1_32,
[1] = GUARD_INTERVAL_1_16,
[2] = GUARD_INTERVAL_1_8,
[3] = GUARD_INTERVAL_1_4,
};
-u32 sms_to_code_rate_table[] = {
+static u32 sms_to_code_rate_table[] = {
[0] = FEC_1_2,
[1] = FEC_2_3,
[2] = FEC_3_4,
@@ -60,14 +60,14 @@ u32 sms_to_code_rate_table[] = {
};
-u32 sms_to_hierarchy_table[] = {
+static u32 sms_to_hierarchy_table[] = {
[0] = HIERARCHY_NONE,
[1] = HIERARCHY_1,
[2] = HIERARCHY_2,
[3] = HIERARCHY_4,
};
-u32 sms_to_modulation_table[] = {
+static u32 sms_to_modulation_table[] = {
[0] = QPSK,
[1] = QAM_16,
[2] = QAM_64,
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 0b4616b8719..c0363f1b6c9 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -206,8 +206,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
/* TODO */
dvbdev->users--;
if (dvbdev->users == 1 && dmxdev->exit == 1) {
- fops_put(file->f_op);
- file->f_op = NULL;
mutex_unlock(&dmxdev->mutex);
wake_up(&dvbdev->wait_queue);
} else
@@ -1120,8 +1118,6 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
mutex_lock(&dmxdev->mutex);
dmxdev->dvbdev->users--;
if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
- fops_put(file->f_op);
- file->f_op = NULL;
mutex_unlock(&dmxdev->mutex);
wake_up(&dmxdev->dvbdev->wait_queue);
} else
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 3485655fa08..58de4410c52 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -476,7 +476,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
- spin_lock(&demux->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&demux->lock, flags);
while (count--) {
if (buf[0] == 0x47)
@@ -484,7 +486,7 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
buf += 188;
}
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
@@ -519,8 +521,9 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
{
int p = 0, i, j;
const u8 *q;
+ unsigned long flags;
- spin_lock(&demux->lock);
+ spin_lock_irqsave(&demux->lock, flags);
if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */
i = demux->tsbufp;
@@ -564,7 +567,7 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
}
bailout:
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
@@ -581,11 +584,13 @@ EXPORT_SYMBOL(dvb_dmx_swfilter_204);
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- spin_lock(&demux->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&demux->lock, flags);
demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 401ef64f92c..983db75de35 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -74,22 +74,15 @@ static int dvb_device_open(struct inode *inode, struct file *file)
if (dvbdev && dvbdev->fops) {
int err = 0;
- const struct file_operations *old_fops;
+ const struct file_operations *new_fops;
- file->private_data = dvbdev;
- old_fops = file->f_op;
- file->f_op = fops_get(dvbdev->fops);
- if (file->f_op == NULL) {
- file->f_op = old_fops;
+ new_fops = fops_get(dvbdev->fops);
+ if (!new_fops)
goto fail;
- }
- if(file->f_op->open)
+ file->private_data = dvbdev;
+ replace_fops(file, new_fops);
+ if (file->f_op->open)
err = file->f_op->open(inode,file);
- if (err) {
- fops_put(file->f_op);
- file->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
up_read(&minor_rwsem);
mutex_unlock(&dvbdev_mutex);
return err;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0e2ec6f73b0..bddbab43a2d 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -200,6 +200,13 @@ config DVB_CX24116
help
A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+config DVB_CX24117
+ tristate "Conexant CX24117 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+
config DVB_SI21XX
tristate "Silicon Labs SI21XX based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index cebc0faffab..f9cb43d9aed 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
obj-$(CONFIG_DVB_AF9013) += af9013.o
obj-$(CONFIG_DVB_CX24116) += cx24116.o
+obj-$(CONFIG_DVB_CX24117) += cx24117.o
obj-$(CONFIG_DVB_SI21XX) += si21xx.o
obj-$(CONFIG_DVB_STV0288) += stv0288.o
obj-$(CONFIG_DVB_STB6000) += stb6000.o
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index a204f282882..fb504f1e912 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -24,6 +24,9 @@
#include "af9013_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct af9013_state {
struct i2c_adapter *i2c;
struct dvb_frontend fe;
@@ -50,16 +53,23 @@ static int af9013_wr_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg,
const u8 *val, int len)
{
int ret;
- u8 buf[3+len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->config.i2c_addr,
.flags = 0,
- .len = sizeof(buf),
+ .len = 3 + len,
.buf = buf,
}
};
+ if (3 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = (reg >> 8) & 0xff;
buf[1] = (reg >> 0) & 0xff;
buf[2] = mbox;
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index a777b4b944e..30ee5905215 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -21,6 +21,9 @@
#include "af9033_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct af9033_state {
struct i2c_adapter *i2c;
struct dvb_frontend fe;
@@ -40,16 +43,23 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
int len)
{
int ret;
- u8 buf[3 + len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = state->cfg.i2c_addr,
.flags = 0,
- .len = sizeof(buf),
+ .len = 3 + len,
.buf = buf,
}
};
+ if (3 + len > sizeof(buf)) {
+ dev_warn(&state->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = (reg >> 16) & 0xff;
buf[1] = (reg >> 8) & 0xff;
buf[2] = (reg >> 0) & 0xff;
@@ -161,7 +171,14 @@ static int af9033_wr_reg_val_tab(struct af9033_state *state,
const struct reg_val *tab, int tab_len)
{
int ret, i, j;
- u8 buf[tab_len];
+ u8 buf[MAX_XFER_SIZE];
+
+ if (tab_len > sizeof(buf)) {
+ dev_warn(&state->i2c->dev,
+ "%s: i2c wr len=%d is too big!\n",
+ KBUILD_MODNAME, tab_len);
+ return -EINVAL;
+ }
dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 1b77909c0c7..39a29dd2951 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -44,6 +44,9 @@
#include "bcm3510.h"
#include "bcm3510_priv.h"
+/* Max transfer size done by bcm3510_do_hab_cmd() function */
+#define MAX_XFER_SIZE 128
+
struct bcm3510_state {
struct i2c_adapter* i2c;
@@ -201,9 +204,19 @@ static int bcm3510_hab_send_request(struct bcm3510_state *st, u8 *buf, int len)
static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *obuf, u8 olen, u8 *ibuf, u8 ilen)
{
- u8 ob[olen+2],ib[ilen+2];
+ u8 ob[MAX_XFER_SIZE], ib[MAX_XFER_SIZE];
int ret = 0;
+ if (ilen + 2 > sizeof(ib)) {
+ deb_hab("do_hab_cmd: ilen=%d is too big!\n", ilen);
+ return -EINVAL;
+ }
+
+ if (olen + 2 > sizeof(ob)) {
+ deb_hab("do_hab_cmd: olen=%d is too big!\n", olen);
+ return -EINVAL;
+ }
+
ob[0] = cmd;
ob[1] = msgid;
memcpy(&ob[2],obuf,olen);
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 0cd6927e654..95b981cd711 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -378,7 +378,7 @@ static int cx24110_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltag
return cx24110_writereg(state,0x76,(cx24110_readreg(state,0x76)&0x3b)|0x40);
default:
return -EINVAL;
- };
+ }
}
static int cx24110_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst)
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
new file mode 100644
index 00000000000..476b422ccf1
--- /dev/null
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -0,0 +1,1650 @@
+/*
+ Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver
+
+ Copyright (C) 2013 Luis Alves <ljalvs@gmail.com>
+ July, 6th 2013
+ First release based on cx24116 driver by:
+ Steven Toth and Georg Acher, Darron Broad, Igor Liplianin
+ Cards currently supported:
+ TBS6980 - Dual DVBS/S2 PCIe card
+ TBS6981 - Dual DVBS/S2 PCIe card
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+
+#include "tuner-i2c.h"
+#include "dvb_frontend.h"
+#include "cx24117.h"
+
+
+#define CX24117_DEFAULT_FIRMWARE "dvb-fe-cx24117.fw"
+#define CX24117_SEARCH_RANGE_KHZ 5000
+
+/* known registers */
+#define CX24117_REG_COMMAND (0x00) /* command buffer */
+#define CX24117_REG_EXECUTE (0x1f) /* execute command */
+
+#define CX24117_REG_FREQ3_0 (0x34) /* frequency */
+#define CX24117_REG_FREQ2_0 (0x35)
+#define CX24117_REG_FREQ1_0 (0x36)
+#define CX24117_REG_STATE0 (0x39)
+#define CX24117_REG_SSTATUS0 (0x3a) /* demod0 signal high / status */
+#define CX24117_REG_SIGNAL0 (0x3b)
+#define CX24117_REG_FREQ5_0 (0x3c) /* +-freq */
+#define CX24117_REG_FREQ6_0 (0x3d)
+#define CX24117_REG_SRATE2_0 (0x3e) /* +- 1000 * srate */
+#define CX24117_REG_SRATE1_0 (0x3f)
+#define CX24117_REG_QUALITY2_0 (0x40)
+#define CX24117_REG_QUALITY1_0 (0x41)
+
+#define CX24117_REG_BER4_0 (0x47)
+#define CX24117_REG_BER3_0 (0x48)
+#define CX24117_REG_BER2_0 (0x49)
+#define CX24117_REG_BER1_0 (0x4a)
+#define CX24117_REG_DVBS_UCB2_0 (0x4b)
+#define CX24117_REG_DVBS_UCB1_0 (0x4c)
+#define CX24117_REG_DVBS2_UCB2_0 (0x50)
+#define CX24117_REG_DVBS2_UCB1_0 (0x51)
+#define CX24117_REG_QSTATUS0 (0x93)
+#define CX24117_REG_CLKDIV0 (0xe6)
+#define CX24117_REG_RATEDIV0 (0xf0)
+
+
+#define CX24117_REG_FREQ3_1 (0x55) /* frequency */
+#define CX24117_REG_FREQ2_1 (0x56)
+#define CX24117_REG_FREQ1_1 (0x57)
+#define CX24117_REG_STATE1 (0x5a)
+#define CX24117_REG_SSTATUS1 (0x5b) /* demod1 signal high / status */
+#define CX24117_REG_SIGNAL1 (0x5c)
+#define CX24117_REG_FREQ5_1 (0x5d) /* +- freq */
+#define CX24117_REG_FREQ4_1 (0x5e)
+#define CX24117_REG_SRATE2_1 (0x5f)
+#define CX24117_REG_SRATE1_1 (0x60)
+#define CX24117_REG_QUALITY2_1 (0x61)
+#define CX24117_REG_QUALITY1_1 (0x62)
+#define CX24117_REG_BER4_1 (0x68)
+#define CX24117_REG_BER3_1 (0x69)
+#define CX24117_REG_BER2_1 (0x6a)
+#define CX24117_REG_BER1_1 (0x6b)
+#define CX24117_REG_DVBS_UCB2_1 (0x6c)
+#define CX24117_REG_DVBS_UCB1_1 (0x6d)
+#define CX24117_REG_DVBS2_UCB2_1 (0x71)
+#define CX24117_REG_DVBS2_UCB1_1 (0x72)
+#define CX24117_REG_QSTATUS1 (0x9f)
+#define CX24117_REG_CLKDIV1 (0xe7)
+#define CX24117_REG_RATEDIV1 (0xf1)
+
+
+/* arg buffer size */
+#define CX24117_ARGLEN (0x1e)
+
+/* rolloff */
+#define CX24117_ROLLOFF_020 (0x00)
+#define CX24117_ROLLOFF_025 (0x01)
+#define CX24117_ROLLOFF_035 (0x02)
+
+/* pilot bit */
+#define CX24117_PILOT_OFF (0x00)
+#define CX24117_PILOT_ON (0x40)
+#define CX24117_PILOT_AUTO (0x80)
+
+/* signal status */
+#define CX24117_HAS_SIGNAL (0x01)
+#define CX24117_HAS_CARRIER (0x02)
+#define CX24117_HAS_VITERBI (0x04)
+#define CX24117_HAS_SYNCLOCK (0x08)
+#define CX24117_STATUS_MASK (0x0f)
+#define CX24117_SIGNAL_MASK (0xc0)
+
+
+/* arg offset for DiSEqC */
+#define CX24117_DISEQC_DEMOD (1)
+#define CX24117_DISEQC_BURST (2)
+#define CX24117_DISEQC_ARG3_2 (3) /* unknown value=2 */
+#define CX24117_DISEQC_ARG4_0 (4) /* unknown value=0 */
+#define CX24117_DISEQC_ARG5_0 (5) /* unknown value=0 */
+#define CX24117_DISEQC_MSGLEN (6)
+#define CX24117_DISEQC_MSGOFS (7)
+
+/* DiSEqC burst */
+#define CX24117_DISEQC_MINI_A (0)
+#define CX24117_DISEQC_MINI_B (1)
+
+
+#define CX24117_PNE (0) /* 0 disabled / 2 enabled */
+#define CX24117_OCC (1) /* 0 disabled / 1 enabled */
+
+
+enum cmds {
+ CMD_SET_VCO = 0x10,
+ CMD_TUNEREQUEST = 0x11,
+ CMD_MPEGCONFIG = 0x13,
+ CMD_TUNERINIT = 0x14,
+ CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */
+ CMD_LNBDCLEVEL = 0x22,
+ CMD_SET_TONE = 0x23,
+ CMD_UPDFWVERS = 0x35,
+ CMD_TUNERSLEEP = 0x36,
+};
+
+static LIST_HEAD(hybrid_tuner_instance_list);
+static DEFINE_MUTEX(cx24117_list_mutex);
+
+/* The Demod/Tuner can't easily provide these, we cache them */
+struct cx24117_tuning {
+ u32 frequency;
+ u32 symbol_rate;
+ fe_spectral_inversion_t inversion;
+ fe_code_rate_t fec;
+
+ fe_delivery_system_t delsys;
+ fe_modulation_t modulation;
+ fe_pilot_t pilot;
+ fe_rolloff_t rolloff;
+
+ /* Demod values */
+ u8 fec_val;
+ u8 fec_mask;
+ u8 inversion_val;
+ u8 pilot_val;
+ u8 rolloff_val;
+};
+
+/* Basic commands that are sent to the firmware */
+struct cx24117_cmd {
+ u8 len;
+ u8 args[CX24117_ARGLEN];
+};
+
+/* common to both fe's */
+struct cx24117_priv {
+ u8 demod_address;
+ struct i2c_adapter *i2c;
+ u8 skip_fw_load;
+ struct mutex fe_lock;
+
+ /* Used for sharing this struct between demods */
+ struct tuner_i2c_props i2c_props;
+ struct list_head hybrid_tuner_instance_list;
+};
+
+/* one per each fe */
+struct cx24117_state {
+ struct cx24117_priv *priv;
+ struct dvb_frontend frontend;
+
+ struct cx24117_tuning dcur;
+ struct cx24117_tuning dnxt;
+ struct cx24117_cmd dsec_cmd;
+
+ int demod;
+};
+
+/* modfec (modulation and FEC) lookup table */
+/* Check cx24116.c for a detailed description of each field */
+static struct cx24117_modfec {
+ fe_delivery_system_t delivery_system;
+ fe_modulation_t modulation;
+ fe_code_rate_t fec;
+ u8 mask; /* In DVBS mode this is used to autodetect */
+ u8 val; /* Passed to the firmware to indicate mode selection */
+} cx24117_modfec_modes[] = {
+ /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */
+
+ /*mod fec mask val */
+ { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 },
+ { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */
+ { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */
+ { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */
+ { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */
+ { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */
+ { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */
+ { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */
+ { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */
+ { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 },
+ /* NBC-QPSK */
+ { SYS_DVBS2, QPSK, FEC_NONE, 0x00, 0x00 },
+ { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 },
+ { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 },
+ { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 },
+ { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 },
+ { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 },
+ { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 },
+ { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a },
+ { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b },
+ { SYS_DVBS2, QPSK, FEC_AUTO, 0x00, 0x00 },
+ /* 8PSK */
+ { SYS_DVBS2, PSK_8, FEC_NONE, 0x00, 0x00 },
+ { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c },
+ { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d },
+ { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e },
+ { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f },
+ { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 },
+ { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 },
+ { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00, 0x00 },
+ /*
+ * 'val' can be found in the FECSTATUS register when tuning.
+ * FECSTATUS will give the actual FEC in use if tuning was successful.
+ */
+};
+
+
+static int cx24117_writereg(struct cx24117_state *state, u8 reg, u8 data)
+{
+ u8 buf[] = { reg, data };
+ struct i2c_msg msg = { .addr = state->priv->demod_address,
+ .flags = 0, .buf = buf, .len = 2 };
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d i2c wr @0x%02x=0x%02x\n",
+ __func__, state->demod, reg, data);
+
+ ret = i2c_transfer(state->priv->i2c, &msg, 1);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c wr err(%i) @0x%02x=0x%02x\n",
+ KBUILD_MODNAME, state->demod, ret, reg, data);
+ return ret;
+ }
+ return 0;
+}
+
+static int cx24117_writecmd(struct cx24117_state *state,
+ struct cx24117_cmd *cmd)
+{
+ struct i2c_msg msg;
+ u8 buf[CX24117_ARGLEN+1];
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d i2c wr cmd len=%d\n",
+ __func__, state->demod, cmd->len);
+
+ buf[0] = CX24117_REG_COMMAND;
+ memcpy(&buf[1], cmd->args, cmd->len);
+
+ msg.addr = state->priv->demod_address;
+ msg.flags = 0;
+ msg.len = cmd->len+1;
+ msg.buf = buf;
+ ret = i2c_transfer(state->priv->i2c, &msg, 1);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c wr cmd err(%i) len=%d\n",
+ KBUILD_MODNAME, state->demod, ret, cmd->len);
+ return ret;
+ }
+ return 0;
+}
+
+static int cx24117_readreg(struct cx24117_state *state, u8 reg)
+{
+ int ret;
+ u8 recv = 0;
+ struct i2c_msg msg[] = {
+ { .addr = state->priv->demod_address, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = state->priv->demod_address, .flags = I2C_M_RD,
+ .buf = &recv, .len = 1 }
+ };
+
+ ret = i2c_transfer(state->priv->i2c, msg, 2);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c rd err(%d) @0x%x\n",
+ KBUILD_MODNAME, state->demod, ret, reg);
+ return ret;
+ }
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d i2c rd @0x%02x=0x%02x\n",
+ __func__, state->demod, reg, recv);
+
+ return recv;
+}
+
+static int cx24117_readregN(struct cx24117_state *state,
+ u8 reg, u8 *buf, int len)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ { .addr = state->priv->demod_address, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = state->priv->demod_address, .flags = I2C_M_RD,
+ .buf = buf, .len = len }
+ };
+
+ ret = i2c_transfer(state->priv->i2c, msg, 2);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c rd err(%d) @0x%x\n",
+ KBUILD_MODNAME, state->demod, ret, reg);
+ return ret;
+ }
+ return 0;
+}
+
+static int cx24117_set_inversion(struct cx24117_state *state,
+ fe_spectral_inversion_t inversion)
+{
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n",
+ __func__, inversion, state->demod);
+
+ switch (inversion) {
+ case INVERSION_OFF:
+ state->dnxt.inversion_val = 0x00;
+ break;
+ case INVERSION_ON:
+ state->dnxt.inversion_val = 0x04;
+ break;
+ case INVERSION_AUTO:
+ state->dnxt.inversion_val = 0x0C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ state->dnxt.inversion = inversion;
+
+ return 0;
+}
+
+static int cx24117_lookup_fecmod(struct cx24117_state *state,
+ fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f)
+{
+ int i, ret = -EINVAL;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s(demod(0x%02x,0x%02x) demod%d\n",
+ __func__, m, f, state->demod);
+
+ for (i = 0; i < ARRAY_SIZE(cx24117_modfec_modes); i++) {
+ if ((d == cx24117_modfec_modes[i].delivery_system) &&
+ (m == cx24117_modfec_modes[i].modulation) &&
+ (f == cx24117_modfec_modes[i].fec)) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int cx24117_set_fec(struct cx24117_state *state,
+ fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec)
+{
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s(0x%02x,0x%02x) demod%d\n",
+ __func__, mod, fec, state->demod);
+
+ ret = cx24117_lookup_fecmod(state, delsys, mod, fec);
+ if (ret < 0)
+ return ret;
+
+ state->dnxt.fec = fec;
+ state->dnxt.fec_val = cx24117_modfec_modes[ret].val;
+ state->dnxt.fec_mask = cx24117_modfec_modes[ret].mask;
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d mask/val = 0x%02x/0x%02x\n", __func__,
+ state->demod, state->dnxt.fec_mask, state->dnxt.fec_val);
+
+ return 0;
+}
+
+static int cx24117_set_symbolrate(struct cx24117_state *state, u32 rate)
+{
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n",
+ __func__, rate, state->demod);
+
+ state->dnxt.symbol_rate = rate;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d symbol_rate = %d\n",
+ __func__, state->demod, rate);
+
+ return 0;
+}
+
+static int cx24117_load_firmware(struct dvb_frontend *fe,
+ const struct firmware *fw);
+
+static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ const struct firmware *fw;
+ int ret = 0;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d skip_fw_load=%d\n",
+ __func__, state->demod, state->priv->skip_fw_load);
+
+ if (state->priv->skip_fw_load)
+ return 0;
+
+ /* check if firmware if already running */
+ if (cx24117_readreg(state, 0xeb) != 0xa) {
+ /* Load firmware */
+ /* request the firmware, this will block until loaded */
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: Waiting for firmware upload (%s)...\n",
+ __func__, CX24117_DEFAULT_FIRMWARE);
+ ret = request_firmware(&fw, CX24117_DEFAULT_FIRMWARE,
+ state->priv->i2c->dev.parent);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: Waiting for firmware upload(2)...\n", __func__);
+ if (ret) {
+ dev_err(&state->priv->i2c->dev,
+ "%s: No firmware uploaded "
+ "(timeout or file not found?)\n", __func__);
+ return ret;
+ }
+
+ /* Make sure we don't recurse back through here
+ * during loading */
+ state->priv->skip_fw_load = 1;
+
+ ret = cx24117_load_firmware(fe, fw);
+ if (ret)
+ dev_err(&state->priv->i2c->dev,
+ "%s: Writing firmware failed\n", __func__);
+ release_firmware(fw);
+
+ dev_info(&state->priv->i2c->dev,
+ "%s: Firmware upload %s\n", __func__,
+ ret == 0 ? "complete" : "failed");
+
+ /* Ensure firmware is always loaded if required */
+ state->priv->skip_fw_load = 0;
+ }
+
+ return ret;
+}
+
+/* Take a basic firmware command structure, format it
+ * and forward it for processing
+ */
+static int cx24117_cmd_execute_nolock(struct dvb_frontend *fe,
+ struct cx24117_cmd *cmd)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int i, ret;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ /* Load the firmware if required */
+ ret = cx24117_firmware_ondemand(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Write the command */
+ cx24117_writecmd(state, cmd);
+
+ /* Start execution and wait for cmd to terminate */
+ cx24117_writereg(state, CX24117_REG_EXECUTE, 0x01);
+ i = 0;
+ while (cx24117_readreg(state, CX24117_REG_EXECUTE)) {
+ msleep(20);
+ if (i++ > 40) {
+ /* Avoid looping forever if the firmware does
+ not respond */
+ dev_warn(&state->priv->i2c->dev,
+ "%s() Firmware not responding\n", __func__);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int cx24117_cmd_execute(struct dvb_frontend *fe, struct cx24117_cmd *cmd)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int ret;
+
+ mutex_lock(&state->priv->fe_lock);
+ ret = cx24117_cmd_execute_nolock(fe, cmd);
+ mutex_unlock(&state->priv->fe_lock);
+
+ return ret;
+}
+
+static int cx24117_load_firmware(struct dvb_frontend *fe,
+ const struct firmware *fw)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int i, ret;
+ unsigned char vers[4];
+
+ struct i2c_msg msg;
+ u8 *buf;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d FW is %zu bytes (%02x %02x .. %02x %02x)\n",
+ __func__, state->demod, fw->size, fw->data[0], fw->data[1],
+ fw->data[fw->size - 2], fw->data[fw->size - 1]);
+
+ cx24117_writereg(state, 0xea, 0x00);
+ cx24117_writereg(state, 0xea, 0x01);
+ cx24117_writereg(state, 0xea, 0x00);
+
+ cx24117_writereg(state, 0xce, 0x92);
+
+ cx24117_writereg(state, 0xfb, 0x00);
+ cx24117_writereg(state, 0xfc, 0x00);
+
+ cx24117_writereg(state, 0xc3, 0x04);
+ cx24117_writereg(state, 0xc4, 0x04);
+
+ cx24117_writereg(state, 0xce, 0x00);
+ cx24117_writereg(state, 0xcf, 0x00);
+
+ cx24117_writereg(state, 0xea, 0x00);
+ cx24117_writereg(state, 0xeb, 0x0c);
+ cx24117_writereg(state, 0xec, 0x06);
+ cx24117_writereg(state, 0xed, 0x05);
+ cx24117_writereg(state, 0xee, 0x03);
+ cx24117_writereg(state, 0xef, 0x05);
+
+ cx24117_writereg(state, 0xf3, 0x03);
+ cx24117_writereg(state, 0xf4, 0x44);
+
+ cx24117_writereg(state, CX24117_REG_RATEDIV0, 0x04);
+ cx24117_writereg(state, CX24117_REG_CLKDIV0, 0x02);
+
+ cx24117_writereg(state, CX24117_REG_RATEDIV1, 0x04);
+ cx24117_writereg(state, CX24117_REG_CLKDIV1, 0x02);
+
+ cx24117_writereg(state, 0xf2, 0x04);
+ cx24117_writereg(state, 0xe8, 0x02);
+ cx24117_writereg(state, 0xea, 0x01);
+ cx24117_writereg(state, 0xc8, 0x00);
+ cx24117_writereg(state, 0xc9, 0x00);
+ cx24117_writereg(state, 0xca, 0x00);
+ cx24117_writereg(state, 0xcb, 0x00);
+ cx24117_writereg(state, 0xcc, 0x00);
+ cx24117_writereg(state, 0xcd, 0x00);
+ cx24117_writereg(state, 0xe4, 0x03);
+ cx24117_writereg(state, 0xeb, 0x0a);
+
+ cx24117_writereg(state, 0xfb, 0x00);
+ cx24117_writereg(state, 0xe0, 0x76);
+ cx24117_writereg(state, 0xf7, 0x81);
+ cx24117_writereg(state, 0xf8, 0x00);
+ cx24117_writereg(state, 0xf9, 0x00);
+
+ buf = kmalloc(fw->size + 1, GFP_KERNEL);
+ if (buf == NULL) {
+ state->priv->skip_fw_load = 0;
+ return -ENOMEM;
+ }
+
+ /* fw upload reg */
+ buf[0] = 0xfa;
+ memcpy(&buf[1], fw->data, fw->size);
+
+ /* prepare i2c message to send */
+ msg.addr = state->priv->demod_address;
+ msg.flags = 0;
+ msg.len = fw->size + 1;
+ msg.buf = buf;
+
+ /* send fw */
+ ret = i2c_transfer(state->priv->i2c, &msg, 1);
+ if (ret < 0)
+ return ret;
+
+ kfree(buf);
+
+ cx24117_writereg(state, 0xf7, 0x0c);
+ cx24117_writereg(state, 0xe0, 0x00);
+
+ /* CMD 1B */
+ cmd.args[0] = 0x1b;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x01;
+ cmd.args[3] = 0x00;
+ cmd.len = 4;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 10 */
+ cmd.args[0] = CMD_SET_VCO;
+ cmd.args[1] = 0x06;
+ cmd.args[2] = 0x2b;
+ cmd.args[3] = 0xd8;
+ cmd.args[4] = 0xa5;
+ cmd.args[5] = 0xee;
+ cmd.args[6] = 0x03;
+ cmd.args[7] = 0x9d;
+ cmd.args[8] = 0xfc;
+ cmd.args[9] = 0x06;
+ cmd.args[10] = 0x02;
+ cmd.args[11] = 0x9d;
+ cmd.args[12] = 0xfc;
+ cmd.len = 13;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 15 */
+ cmd.args[0] = 0x15;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x01;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = 0x00;
+ cmd.args[5] = 0x01;
+ cmd.args[6] = 0x01;
+ cmd.args[7] = 0x01;
+ cmd.args[8] = 0x00;
+ cmd.args[9] = 0x05;
+ cmd.args[10] = 0x02;
+ cmd.args[11] = 0x02;
+ cmd.args[12] = 0x00;
+ cmd.len = 13;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 13 */
+ cmd.args[0] = CMD_MPEGCONFIG;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = 0x01;
+ cmd.args[5] = 0x00;
+ cmd.len = 6;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 14 */
+ for (i = 0; i < 2; i++) {
+ cmd.args[0] = CMD_TUNERINIT;
+ cmd.args[1] = (u8) i;
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x05;
+ cmd.args[4] = 0x00;
+ cmd.args[5] = 0x00;
+ cmd.args[6] = 0x55;
+ cmd.args[7] = 0x00;
+ cmd.len = 8;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+ }
+
+ cx24117_writereg(state, 0xce, 0xc0);
+ cx24117_writereg(state, 0xcf, 0x00);
+ cx24117_writereg(state, 0xe5, 0x04);
+
+ /* Firmware CMD 35: Get firmware version */
+ cmd.args[0] = CMD_UPDFWVERS;
+ cmd.len = 2;
+ for (i = 0; i < 4; i++) {
+ cmd.args[1] = i;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+ vers[i] = cx24117_readreg(state, 0x33);
+ }
+ dev_info(&state->priv->i2c->dev,
+ "%s: FW version %i.%i.%i.%i\n", __func__,
+ vers[0], vers[1], vers[2], vers[3]);
+ return 0;
+error:
+ state->priv->skip_fw_load = 0;
+ dev_err(&state->priv->i2c->dev, "%s() Error running FW.\n", __func__);
+ return ret;
+}
+
+static int cx24117_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int lock;
+
+ lock = cx24117_readreg(state,
+ (state->demod == 0) ? CX24117_REG_SSTATUS0 :
+ CX24117_REG_SSTATUS1) &
+ CX24117_STATUS_MASK;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d status = 0x%02x\n",
+ __func__, state->demod, lock);
+
+ *status = 0;
+
+ if (lock & CX24117_HAS_SIGNAL)
+ *status |= FE_HAS_SIGNAL;
+ if (lock & CX24117_HAS_CARRIER)
+ *status |= FE_HAS_CARRIER;
+ if (lock & CX24117_HAS_VITERBI)
+ *status |= FE_HAS_VITERBI;
+ if (lock & CX24117_HAS_SYNCLOCK)
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
+
+ return 0;
+}
+
+static int cx24117_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int ret;
+ u8 buf[4];
+ u8 base_reg = (state->demod == 0) ?
+ CX24117_REG_BER4_0 :
+ CX24117_REG_BER4_1;
+
+ ret = cx24117_readregN(state, base_reg, buf, 4);
+ if (ret != 0)
+ return ret;
+
+ *ber = (buf[0] << 24) | (buf[1] << 16) |
+ (buf[1] << 8) | buf[0];
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d ber=0x%04x\n",
+ __func__, state->demod, *ber);
+
+ return 0;
+}
+
+static int cx24117_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+ u16 sig_reading;
+ u8 buf[2];
+ u8 reg = (state->demod == 0) ?
+ CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1;
+
+ /* Firmware CMD 1A */
+ cmd.args[0] = 0x1a;
+ cmd.args[1] = (u8) state->demod;
+ cmd.len = 2;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_readregN(state, reg, buf, 2);
+ if (ret != 0)
+ return ret;
+ sig_reading = ((buf[0] & CX24117_SIGNAL_MASK) << 2) | buf[1];
+
+ *signal_strength = -100 * sig_reading + 94324;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d raw / cooked = 0x%04x / 0x%04x\n",
+ __func__, state->demod, sig_reading, *signal_strength);
+
+ return 0;
+}
+
+static int cx24117_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+ u8 reg = (state->demod == 0) ?
+ CX24117_REG_QUALITY2_0 : CX24117_REG_QUALITY2_1;
+
+ ret = cx24117_readregN(state, reg, buf, 2);
+ if (ret != 0)
+ return ret;
+
+ *snr = (buf[0] << 8) | buf[1];
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d snr = 0x%04x\n",
+ __func__, state->demod, *snr);
+
+ return ret;
+}
+
+static int cx24117_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ fe_delivery_system_t delsys = fe->dtv_property_cache.delivery_system;
+ int ret;
+ u8 buf[2];
+ u8 reg = (state->demod == 0) ?
+ CX24117_REG_DVBS_UCB2_0 :
+ CX24117_REG_DVBS_UCB2_1;
+
+ switch (delsys) {
+ case SYS_DVBS:
+ break;
+ case SYS_DVBS2:
+ reg += (CX24117_REG_DVBS2_UCB2_0 - CX24117_REG_DVBS_UCB2_0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = cx24117_readregN(state, reg, buf, 2);
+ if (ret != 0)
+ return ret;
+ *ucblocks = (buf[0] << 8) | buf[1];
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d ucb=0x%04x\n",
+ __func__, state->demod, *ucblocks);
+
+ return 0;
+}
+
+/* Overwrite the current tuning params, we are about to tune */
+static void cx24117_clone_params(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ state->dcur = state->dnxt;
+}
+
+/* Wait for LNB */
+static int cx24117_wait_for_lnb(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int i;
+ u8 val, reg = (state->demod == 0) ? CX24117_REG_QSTATUS0 :
+ CX24117_REG_QSTATUS1;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d qstatus = 0x%02x\n",
+ __func__, state->demod, cx24117_readreg(state, reg));
+
+ /* Wait for up to 300 ms */
+ for (i = 0; i < 10; i++) {
+ val = cx24117_readreg(state, reg) & 0x01;
+ if (val != 0)
+ return 0;
+ msleep(30);
+ }
+
+ dev_warn(&state->priv->i2c->dev, "%s: demod%d LNB not ready\n",
+ KBUILD_MODNAME, state->demod);
+
+ return -ETIMEDOUT; /* -EBUSY ? */
+}
+
+static int cx24117_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+ u8 reg = (state->demod == 0) ? 0x10 : 0x20;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d %s\n",
+ __func__, state->demod,
+ voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" :
+ voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" :
+ "SEC_VOLTAGE_OFF");
+
+ /* CMD 32 */
+ cmd.args[0] = 0x32;
+ cmd.args[1] = reg;
+ cmd.args[2] = reg;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret)
+ return ret;
+
+ if ((voltage == SEC_VOLTAGE_13) ||
+ (voltage == SEC_VOLTAGE_18)) {
+ /* CMD 33 */
+ cmd.args[0] = 0x33;
+ cmd.args[1] = reg;
+ cmd.args[2] = reg;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_wait_for_lnb(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Wait for voltage/min repeat delay */
+ msleep(100);
+
+ /* CMD 22 - CMD_LNBDCLEVEL */
+ cmd.args[0] = CMD_LNBDCLEVEL;
+ cmd.args[1] = state->demod ? 0 : 1;
+ cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00);
+ cmd.len = 3;
+
+ /* Min delay time before DiSEqC send */
+ msleep(20);
+ } else {
+ cmd.args[0] = 0x33;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = reg;
+ cmd.len = 3;
+ }
+
+ return cx24117_cmd_execute(fe, &cmd);
+}
+
+static int cx24117_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t tone)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n",
+ __func__, state->demod, tone);
+ if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) {
+ dev_warn(&state->priv->i2c->dev, "%s: demod%d invalid tone=%d\n",
+ KBUILD_MODNAME, state->demod, tone);
+ return -EINVAL;
+ }
+
+ /* Wait for LNB ready */
+ ret = cx24117_wait_for_lnb(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Min delay time after DiSEqC send */
+ msleep(20);
+
+ /* Set the tone */
+ /* CMD 23 - CMD_SET_TONE */
+ cmd.args[0] = CMD_SET_TONE;
+ cmd.args[1] = (state->demod ? 0 : 1);
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x00;
+ cmd.len = 5;
+ switch (tone) {
+ case SEC_TONE_ON:
+ cmd.args[4] = 0x01;
+ break;
+ case SEC_TONE_OFF:
+ cmd.args[4] = 0x00;
+ break;
+ }
+
+ msleep(20);
+
+ return cx24117_cmd_execute(fe, &cmd);
+}
+
+/* Initialise DiSEqC */
+static int cx24117_diseqc_init(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+
+ /* Prepare a DiSEqC command */
+ state->dsec_cmd.args[0] = CMD_LNBSEND;
+
+ /* demod */
+ state->dsec_cmd.args[CX24117_DISEQC_DEMOD] = state->demod ? 0 : 1;
+
+ /* DiSEqC burst */
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] = CX24117_DISEQC_MINI_A;
+
+ /* Unknown */
+ state->dsec_cmd.args[CX24117_DISEQC_ARG3_2] = 0x02;
+ state->dsec_cmd.args[CX24117_DISEQC_ARG4_0] = 0x00;
+
+ /* Continuation flag? */
+ state->dsec_cmd.args[CX24117_DISEQC_ARG5_0] = 0x00;
+
+ /* DiSEqC message length */
+ state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = 0x00;
+
+ /* Command length */
+ state->dsec_cmd.len = 7;
+
+ return 0;
+}
+
+/* Send DiSEqC message */
+static int cx24117_send_diseqc_msg(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *d)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int i, ret;
+
+ /* Dump DiSEqC message */
+ dev_dbg(&state->priv->i2c->dev, "%s: demod %d (",
+ __func__, state->demod);
+ for (i = 0; i < d->msg_len; i++)
+ dev_dbg(&state->priv->i2c->dev, "0x%02x ", d->msg[i]);
+ dev_dbg(&state->priv->i2c->dev, ")\n");
+
+ /* Validate length */
+ if (d->msg_len > 15)
+ return -EINVAL;
+
+ /* DiSEqC message */
+ for (i = 0; i < d->msg_len; i++)
+ state->dsec_cmd.args[CX24117_DISEQC_MSGOFS + i] = d->msg[i];
+
+ /* DiSEqC message length */
+ state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = d->msg_len;
+
+ /* Command length */
+ state->dsec_cmd.len = CX24117_DISEQC_MSGOFS +
+ state->dsec_cmd.args[CX24117_DISEQC_MSGLEN];
+
+ /*
+ * Message is sent with derived else cached burst
+ *
+ * WRITE PORT GROUP COMMAND 38
+ *
+ * 0/A/A: E0 10 38 F0..F3
+ * 1/B/B: E0 10 38 F4..F7
+ * 2/C/A: E0 10 38 F8..FB
+ * 3/D/B: E0 10 38 FC..FF
+ *
+ * databyte[3]= 8421:8421
+ * ABCD:WXYZ
+ * CLR :SET
+ *
+ * WX= PORT SELECT 0..3 (X=TONEBURST)
+ * Y = VOLTAGE (0=13V, 1=18V)
+ * Z = BAND (0=LOW, 1=HIGH(22K))
+ */
+ if (d->msg_len >= 4 && d->msg[2] == 0x38)
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] =
+ ((d->msg[3] & 4) >> 2);
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d burst=%d\n",
+ __func__, state->demod,
+ state->dsec_cmd.args[CX24117_DISEQC_BURST]);
+
+ /* Wait for LNB ready */
+ ret = cx24117_wait_for_lnb(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Wait for voltage/min repeat delay */
+ msleep(100);
+
+ /* Command */
+ ret = cx24117_cmd_execute(fe, &state->dsec_cmd);
+ if (ret != 0)
+ return ret;
+ /*
+ * Wait for send
+ *
+ * Eutelsat spec:
+ * >15ms delay + (XXX determine if FW does this, see set_tone)
+ * 13.5ms per byte +
+ * >15ms delay +
+ * 12.5ms burst +
+ * >15ms delay (XXX determine if FW does this, see set_tone)
+ */
+ msleep((state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] << 4) + 60);
+
+ return 0;
+}
+
+/* Send DiSEqC burst */
+static int cx24117_diseqc_send_burst(struct dvb_frontend *fe,
+ fe_sec_mini_cmd_t burst)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod=%d\n",
+ __func__, burst, state->demod);
+
+ /* DiSEqC burst */
+ if (burst == SEC_MINI_A)
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] =
+ CX24117_DISEQC_MINI_A;
+ else if (burst == SEC_MINI_B)
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] =
+ CX24117_DISEQC_MINI_B;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cx24117_get_priv(struct cx24117_priv **priv,
+ struct i2c_adapter *i2c, u8 client_address)
+{
+ int ret;
+
+ mutex_lock(&cx24117_list_mutex);
+ ret = hybrid_tuner_request_state(struct cx24117_priv, (*priv),
+ hybrid_tuner_instance_list, i2c, client_address, "cx24117");
+ mutex_unlock(&cx24117_list_mutex);
+
+ return ret;
+}
+
+static void cx24117_release_priv(struct cx24117_priv *priv)
+{
+ mutex_lock(&cx24117_list_mutex);
+ if (priv != NULL)
+ hybrid_tuner_release_state(priv);
+ mutex_unlock(&cx24117_list_mutex);
+}
+
+static void cx24117_release(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ dev_dbg(&state->priv->i2c->dev, "%s demod%d\n",
+ __func__, state->demod);
+ cx24117_release_priv(state->priv);
+ kfree(state);
+}
+
+static struct dvb_frontend_ops cx24117_ops;
+
+struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct cx24117_state *state = NULL;
+ struct cx24117_priv *priv = NULL;
+ int demod = 0;
+
+ /* get the common data struct for both demods */
+ demod = cx24117_get_priv(&priv, i2c, config->demod_address);
+
+ switch (demod) {
+ case 0:
+ dev_err(&state->priv->i2c->dev,
+ "%s: Error attaching frontend %d\n",
+ KBUILD_MODNAME, demod);
+ goto error1;
+ break;
+ case 1:
+ /* new priv instance */
+ priv->i2c = i2c;
+ priv->demod_address = config->demod_address;
+ mutex_init(&priv->fe_lock);
+ break;
+ default:
+ /* existing priv instance */
+ break;
+ }
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct cx24117_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error2;
+
+ state->demod = demod - 1;
+ state->priv = priv;
+
+ /* test i2c bus for ack */
+ if (demod == 0) {
+ if (cx24117_readreg(state, 0x00) < 0)
+ goto error3;
+ }
+
+ dev_info(&state->priv->i2c->dev,
+ "%s: Attaching frontend %d\n",
+ KBUILD_MODNAME, state->demod);
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &cx24117_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+ return &state->frontend;
+
+error3:
+ kfree(state);
+error2:
+ cx24117_release_priv(priv);
+error1:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cx24117_attach);
+
+/*
+ * Initialise or wake up device
+ *
+ * Power config will reset and load initial firmware if required
+ */
+static int cx24117_initfe(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ mutex_lock(&state->priv->fe_lock);
+
+ /* Firmware CMD 36: Power config */
+ cmd.args[0] = CMD_TUNERSLEEP;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = 0;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto exit;
+
+ ret = cx24117_diseqc_init(fe);
+ if (ret != 0)
+ goto exit;
+
+ /* CMD 3C */
+ cmd.args[0] = 0x3c;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = 0x10;
+ cmd.args[3] = 0x10;
+ cmd.len = 4;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto exit;
+
+ /* CMD 34 */
+ cmd.args[0] = 0x34;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = CX24117_OCC;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+
+exit:
+ mutex_unlock(&state->priv->fe_lock);
+
+ return ret;
+}
+
+/*
+ * Put device to sleep
+ */
+static int cx24117_sleep(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ /* Firmware CMD 36: Power config */
+ cmd.args[0] = CMD_TUNERSLEEP;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = 1;
+ cmd.len = 3;
+ return cx24117_cmd_execute(fe, &cmd);
+}
+
+/* dvb-core told us to tune, the tv property cache will be complete,
+ * it's safe for is to pull values and use them for tuning purposes.
+ */
+static int cx24117_set_frontend(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct cx24117_cmd cmd;
+ fe_status_t tunerstat;
+ int i, status, ret, retune = 1;
+ u8 reg_clkdiv, reg_ratediv;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S\n",
+ __func__, state->demod);
+
+ /* Only QPSK is supported for DVB-S */
+ if (c->modulation != QPSK) {
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d unsupported modulation (%d)\n",
+ __func__, state->demod, c->modulation);
+ return -EINVAL;
+ }
+
+ /* Pilot doesn't exist in DVB-S, turn bit off */
+ state->dnxt.pilot_val = CX24117_PILOT_OFF;
+
+ /* DVB-S only supports 0.35 */
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_035;
+ break;
+
+ case SYS_DVBS2:
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S2\n",
+ __func__, state->demod);
+
+ /*
+ * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2,
+ * but not hardware auto detection
+ */
+ if (c->modulation != PSK_8 && c->modulation != QPSK) {
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d unsupported modulation (%d)\n",
+ __func__, state->demod, c->modulation);
+ return -EOPNOTSUPP;
+ }
+
+ switch (c->pilot) {
+ case PILOT_AUTO:
+ state->dnxt.pilot_val = CX24117_PILOT_AUTO;
+ break;
+ case PILOT_OFF:
+ state->dnxt.pilot_val = CX24117_PILOT_OFF;
+ break;
+ case PILOT_ON:
+ state->dnxt.pilot_val = CX24117_PILOT_ON;
+ break;
+ default:
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d unsupported pilot mode (%d)\n",
+ __func__, state->demod, c->pilot);
+ return -EOPNOTSUPP;
+ }
+
+ switch (c->rolloff) {
+ case ROLLOFF_20:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_020;
+ break;
+ case ROLLOFF_25:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_025;
+ break;
+ case ROLLOFF_35:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_035;
+ break;
+ case ROLLOFF_AUTO:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_035;
+ /* soft-auto rolloff */
+ retune = 3;
+ break;
+ default:
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d unsupported rolloff (%d)\n",
+ KBUILD_MODNAME, state->demod, c->rolloff);
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod %d unsupported delivery system (%d)\n",
+ KBUILD_MODNAME, state->demod, c->delivery_system);
+ return -EINVAL;
+ }
+
+ state->dnxt.delsys = c->delivery_system;
+ state->dnxt.modulation = c->modulation;
+ state->dnxt.frequency = c->frequency;
+ state->dnxt.pilot = c->pilot;
+ state->dnxt.rolloff = c->rolloff;
+
+ ret = cx24117_set_inversion(state, c->inversion);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_set_fec(state,
+ c->delivery_system, c->modulation, c->fec_inner);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_set_symbolrate(state, c->symbol_rate);
+ if (ret != 0)
+ return ret;
+
+ /* discard the 'current' tuning parameters and prepare to tune */
+ cx24117_clone_params(fe);
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: delsys = %d\n", __func__, state->dcur.delsys);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: modulation = %d\n", __func__, state->dcur.modulation);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: frequency = %d\n", __func__, state->dcur.frequency);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: pilot = %d (val = 0x%02x)\n", __func__,
+ state->dcur.pilot, state->dcur.pilot_val);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: retune = %d\n", __func__, retune);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: rolloff = %d (val = 0x%02x)\n", __func__,
+ state->dcur.rolloff, state->dcur.rolloff_val);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__,
+ state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: Inversion = %d (val = 0x%02x)\n", __func__,
+ state->dcur.inversion, state->dcur.inversion_val);
+
+ /* Prepare a tune request */
+ cmd.args[0] = CMD_TUNEREQUEST;
+
+ /* demod */
+ cmd.args[1] = state->demod;
+
+ /* Frequency */
+ cmd.args[2] = (state->dcur.frequency & 0xff0000) >> 16;
+ cmd.args[3] = (state->dcur.frequency & 0x00ff00) >> 8;
+ cmd.args[4] = (state->dcur.frequency & 0x0000ff);
+
+ /* Symbol Rate */
+ cmd.args[5] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8;
+ cmd.args[6] = ((state->dcur.symbol_rate / 1000) & 0x00ff);
+
+ /* Automatic Inversion */
+ cmd.args[7] = state->dcur.inversion_val;
+
+ /* Modulation / FEC / Pilot */
+ cmd.args[8] = state->dcur.fec_val | state->dcur.pilot_val;
+
+ cmd.args[9] = CX24117_SEARCH_RANGE_KHZ >> 8;
+ cmd.args[10] = CX24117_SEARCH_RANGE_KHZ & 0xff;
+
+ cmd.args[11] = state->dcur.rolloff_val;
+ cmd.args[12] = state->dcur.fec_mask;
+
+ if (state->dcur.symbol_rate > 30000000) {
+ reg_ratediv = 0x04;
+ reg_clkdiv = 0x02;
+ } else if (state->dcur.symbol_rate > 10000000) {
+ reg_ratediv = 0x06;
+ reg_clkdiv = 0x03;
+ } else {
+ reg_ratediv = 0x0a;
+ reg_clkdiv = 0x05;
+ }
+
+ cmd.args[13] = reg_ratediv;
+ cmd.args[14] = reg_clkdiv;
+
+ cx24117_writereg(state, (state->demod == 0) ?
+ CX24117_REG_CLKDIV0 : CX24117_REG_CLKDIV1, reg_clkdiv);
+ cx24117_writereg(state, (state->demod == 0) ?
+ CX24117_REG_RATEDIV0 : CX24117_REG_RATEDIV1, reg_ratediv);
+
+ cmd.args[15] = CX24117_PNE;
+ cmd.len = 16;
+
+ do {
+ /* Reset status register */
+ status = cx24117_readreg(state, (state->demod == 0) ?
+ CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1) &
+ CX24117_SIGNAL_MASK;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d status_setfe = %02x\n",
+ __func__, state->demod, status);
+
+ cx24117_writereg(state, (state->demod == 0) ?
+ CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1, status);
+
+ /* Tune */
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ break;
+
+ /*
+ * Wait for up to 500 ms before retrying
+ *
+ * If we are able to tune then generally it occurs within 100ms.
+ * If it takes longer, try a different rolloff setting.
+ */
+ for (i = 0; i < 50; i++) {
+ cx24117_read_status(fe, &tunerstat);
+ status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC);
+ if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) {
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d tuned\n",
+ __func__, state->demod);
+ return 0;
+ }
+ msleep(20);
+ }
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d not tuned\n",
+ __func__, state->demod);
+
+ /* try next rolloff value */
+ if (state->dcur.rolloff == 3)
+ cmd.args[11]--;
+
+ } while (--retune);
+ return -EINVAL;
+}
+
+static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags, unsigned int *delay, fe_status_t *status)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ *delay = HZ / 5;
+ if (re_tune) {
+ int ret = cx24117_set_frontend(fe);
+ if (ret)
+ return ret;
+ }
+ return cx24117_read_status(fe, status);
+}
+
+static int cx24117_get_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
+static int cx24117_get_frontend(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct cx24117_cmd cmd;
+ u8 reg, st, inv;
+ int ret, idx;
+ unsigned int freq;
+ short srate_os, freq_os;
+
+ u8 buf[0x1f-4];
+
+ cmd.args[0] = 0x1c;
+ cmd.args[1] = (u8) state->demod;
+ cmd.len = 2;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ return ret;
+
+ /* read all required regs at once */
+ reg = (state->demod == 0) ? CX24117_REG_FREQ3_0 : CX24117_REG_FREQ3_1;
+ ret = cx24117_readregN(state, reg, buf, 0x1f-4);
+ if (ret != 0)
+ return ret;
+
+ st = buf[5];
+
+ /* get spectral inversion */
+ inv = (((state->demod == 0) ? ~st : st) >> 6) & 1;
+ if (inv == 0)
+ c->inversion = INVERSION_OFF;
+ else
+ c->inversion = INVERSION_ON;
+
+ /* modulation and fec */
+ idx = st & 0x3f;
+ if (c->delivery_system == SYS_DVBS2) {
+ if (idx > 11)
+ idx += 9;
+ else
+ idx += 7;
+ }
+
+ c->modulation = cx24117_modfec_modes[idx].modulation;
+ c->fec_inner = cx24117_modfec_modes[idx].fec;
+
+ /* frequency */
+ freq = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ freq_os = (buf[8] << 8) | buf[9];
+ c->frequency = freq + freq_os;
+
+ /* symbol rate */
+ srate_os = (buf[10] << 8) | buf[11];
+ c->symbol_rate = -1000 * srate_os + state->dcur.symbol_rate;
+ return 0;
+}
+
+static struct dvb_frontend_ops cx24117_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Conexant CX24117/CX24132",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 1011, /* kHz for QPSK frontends */
+ .frequency_tolerance = 5000,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_QPSK | FE_CAN_RECOVER
+ },
+
+ .release = cx24117_release,
+
+ .init = cx24117_initfe,
+ .sleep = cx24117_sleep,
+ .read_status = cx24117_read_status,
+ .read_ber = cx24117_read_ber,
+ .read_signal_strength = cx24117_read_signal_strength,
+ .read_snr = cx24117_read_snr,
+ .read_ucblocks = cx24117_read_ucblocks,
+ .set_tone = cx24117_set_tone,
+ .set_voltage = cx24117_set_voltage,
+ .diseqc_send_master_cmd = cx24117_send_diseqc_msg,
+ .diseqc_send_burst = cx24117_diseqc_send_burst,
+ .get_frontend_algo = cx24117_get_algo,
+ .tune = cx24117_tune,
+
+ .set_frontend = cx24117_set_frontend,
+ .get_frontend = cx24117_get_frontend,
+};
+
+
+MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24117/cx24132 hardware");
+MODULE_AUTHOR("Luis Alves (ljalvs@gmail.com)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1");
+MODULE_FIRMWARE(CX24117_DEFAULT_FIRMWARE);
+
diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h
new file mode 100644
index 00000000000..4e59e9574fa
--- /dev/null
+++ b/drivers/media/dvb-frontends/cx24117.h
@@ -0,0 +1,47 @@
+/*
+ Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver
+
+ Copyright (C) 2013 Luis Alves <ljalvs@gmail.com>
+ (based on cx24116.h by Steven Toth)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef CX24117_H
+#define CX24117_H
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+
+struct cx24117_config {
+ /* the demodulator's i2c address */
+ u8 demod_address;
+};
+
+#if IS_ENABLED(CONFIG_DVB_CX24117)
+extern struct dvb_frontend *cx24117_attach(
+ const struct cx24117_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *cx24117_attach(
+ const struct cx24117_config *config,
+ struct i2c_adapter *i2c)
+{
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* CX24117_H */
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index a771da3e9f9..72fb5838cae 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -739,7 +739,7 @@ static int cx24123_set_voltage(struct dvb_frontend *fe,
return 0;
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 7ca5c69dd20..03930d5e9fe 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -21,21 +21,31 @@
#include "cxd2820r_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/* write multiple registers */
static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
u8 *val, int len)
{
int ret;
- u8 buf[len+1];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = i2c,
.flags = 0,
- .len = sizeof(buf),
+ .len = len + 1,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
@@ -55,7 +65,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
u8 *val, int len)
{
int ret;
- u8 buf[len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[2] = {
{
.addr = i2c,
@@ -65,11 +75,18 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
}, {
.addr = i2c,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = len,
.buf = buf,
}
};
+ if (len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, buf, len);
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 6201c59a78d..e540cfb13ba 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -649,9 +649,9 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si
b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
if (*b == '~') {
b++;
- dprintk(b);
+ dprintk("%s", b);
} else
- dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>");
+ dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>");
return 1;
}
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 9a2134792cf..959ae36403b 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -46,10 +46,6 @@
#define DRX_I2C_MODEFLAGS 0xC0
#define DRX_I2C_FLAGS 0xF0
-#ifndef SIZEOF_ARRAY
-#define SIZEOF_ARRAY(array) (sizeof((array))/sizeof((array)[0]))
-#endif
-
#define DEFAULT_LOCK_TIMEOUT 1100
#define DRX_CHANNEL_AUTO 0
@@ -1018,7 +1014,7 @@ static int HI_CfgCommand(struct drxd_state *state)
status = Write16(state, HI_RA_RAM_SRV_CMD__A,
HI_RA_RAM_SRV_CMD_CONFIG, 0);
else
- status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, 0);
+ status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, NULL);
mutex_unlock(&state->mutex);
return status;
}
@@ -1039,7 +1035,7 @@ static int HI_ResetCommand(struct drxd_state *state)
status = Write16(state, HI_RA_RAM_SRV_RST_KEY__A,
HI_RA_RAM_SRV_RST_KEY_ACT, 0);
if (status == 0)
- status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, 0);
+ status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, NULL);
mutex_unlock(&state->mutex);
msleep(1);
return status;
@@ -2837,7 +2833,7 @@ static int drxd_init(struct dvb_frontend *fe)
int err = 0;
/* if (request_firmware(&state->fw, "drxd.fw", state->dev)<0) */
- return DRXD_init(state, 0, 0);
+ return DRXD_init(state, NULL, 0);
err = DRXD_init(state, state->fw->data, state->fw->size);
release_firmware(state->fw);
@@ -2973,7 +2969,7 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
mutex_init(&state->mutex);
- if (Read16(state, 0, 0, 0) < 0)
+ if (Read16(state, 0, NULL, 0) < 0)
goto error;
state->frontend.ops = drxd_ops;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 082014de687..d416c15691d 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1083,7 +1083,7 @@ static int hi_cfg_command(struct drxk_state *state)
SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
if (status < 0)
goto error;
- status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0);
+ status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, NULL);
if (status < 0)
goto error;
@@ -2781,7 +2781,7 @@ static int ConfigureI2CBridge(struct drxk_state *state, bool b_enable_bridge)
goto error;
}
- status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0);
+ status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, NULL);
error:
if (status < 0)
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index c1c3400b217..cadcae4cff8 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -31,6 +31,9 @@
#include "itd1000.h"
#include "itd1000_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
@@ -52,10 +55,18 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
/* don't write more than one byte with flexcop behind */
static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len)
{
- u8 buf[1+len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1
};
+
+ if (1 + len > sizeof(buf)) {
+ printk(KERN_WARNING
+ "itd1000: i2c wr reg=%04x: len=%d is too big!\n",
+ reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], v, len);
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index ec388c1d691..a74ac0ddb83 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -36,6 +36,8 @@
#include "mt312_priv.h"
#include "mt312.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
struct mt312_state {
struct i2c_adapter *i2c;
@@ -96,9 +98,15 @@ static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg,
const u8 *src, const size_t count)
{
int ret;
- u8 buf[count + 1];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg;
+ if (1 + count > sizeof(buf)) {
+ printk(KERN_WARNING
+ "mt312: write: len=%zd is too big!\n", count);
+ return -EINVAL;
+ }
+
if (debug) {
int i;
dprintk("W(%d):", reg & 0x7f);
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 8e288940a61..fbca9856313 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -39,6 +39,9 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
#define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
#define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
#define CRC_CCIT_MASK 0x1021
@@ -95,10 +98,16 @@ static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len)
static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg,
const u8 *buf, u8 len)
{
- u8 buf2 [len+1];
+ u8 buf2[MAX_XFER_SIZE];
int err;
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 };
+ if (1 + len > sizeof(buf2)) {
+ pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n",
+ __func__, reg, len);
+ return -EINVAL;
+ }
+
buf2[0] = reg;
memcpy(&buf2[1], buf, len);
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 362d26d11e8..7efb796c472 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -27,20 +27,30 @@
#include "rtl2830_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/* write multiple hardware registers */
static int rtl2830_wr(struct rtl2830_priv *priv, u8 reg, const u8 *val, int len)
{
int ret;
- u8 buf[1+len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg.i2c_addr,
.flags = 0,
- .len = 1+len,
+ .len = 1 + len,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index facb8484151..ff73da9365e 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -22,6 +22,9 @@
#include "dvb_math.h"
#include <linux/bitops.h>
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
int rtl2832_debug;
module_param_named(debug, rtl2832_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
@@ -162,16 +165,23 @@ static const struct rtl2832_reg_entry registers[] = {
static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
- u8 buf[1+len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg.i2c_addr,
.flags = 0,
- .len = 1+len,
+ .len = 1 + len,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
@@ -489,6 +499,7 @@ static int rtl2832_init(struct dvb_frontend *fe)
init = rtl2832_tuner_init_e4000;
break;
case RTL2832_TUNER_R820T:
+ case RTL2832_TUNER_R828D:
len = ARRAY_SIZE(rtl2832_tuner_init_r820t);
init = rtl2832_tuner_init_r820t;
break;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 91b2dcf5a6e..2cfbb6a9706 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -53,6 +53,7 @@ struct rtl2832_config {
#define RTL2832_TUNER_E4000 0x27
#define RTL2832_TUNER_FC0013 0x29
#define RTL2832_TUNER_R820T 0x2a
+#define RTL2832_TUNER_R828D 0x2b
u8 tuner;
};
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index e2fec9ebf94..93eeaf7118f 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -836,9 +836,16 @@ static u32 s5h1420_tuner_i2c_func(struct i2c_adapter *adapter)
static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct s5h1420_state *state = i2c_get_adapdata(i2c_adap);
- struct i2c_msg m[1 + num];
+ struct i2c_msg m[3];
u8 tx_open[2] = { CON_1, state->CON_1_val | 1 }; /* repeater stops once there was a stop condition */
+ if (1 + num > ARRAY_SIZE(m)) {
+ printk(KERN_WARNING
+ "%s: i2c xfer: num=%d is too big!\n",
+ KBUILD_MODNAME, num);
+ return -EOPNOTSUPP;
+ }
+
memset(m, 0, sizeof(struct i2c_msg) * (1 + num));
m[0].addr = state->config->demod_address;
@@ -847,7 +854,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c
memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
- return i2c_transfer(state->i2c, m, 1+num) == 1 + num ? num : -EIO;
+ return i2c_transfer(state->i2c, m, 1 + num) == 1 + num ? num : -EIO;
}
static struct i2c_algorithm s5h1420_tuner_i2c_algo = {
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 3dd5714eadb..07cd5ea7a03 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -32,6 +32,9 @@
#include "stb0899_priv.h"
#include "stb0899_reg.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static unsigned int verbose = 0;//1;
module_param(verbose, int, 0644);
@@ -499,7 +502,7 @@ err:
int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, u32 count)
{
int ret;
- u8 buf[2 + count];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg i2c_msg = {
.addr = state->config->demod_address,
.flags = 0,
@@ -507,6 +510,13 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
.len = 2 + count
};
+ if (2 + count > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, count);
+ return -EINVAL;
+ }
+
buf[0] = reg >> 8;
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 45f9523f968..cea175d1989 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -31,6 +31,8 @@
static unsigned int verbose;
module_param(verbose, int, 0644);
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
#define FE_ERROR 0
#define FE_NOTICE 1
@@ -183,7 +185,7 @@ static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len)
{
int rc;
- u8 cmdbuf[len + 1];
+ u8 cmdbuf[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = state->config->tuner_address,
.flags = 0,
@@ -191,6 +193,13 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
.len = len + 1
};
+ if (1 + len > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr: len=%d is too big!\n",
+ KBUILD_MODNAME, len);
+ return -EINVAL;
+ }
+
if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) {
dprintk(verbose, FE_ERROR, 1, "Invalid register range %d:%d",
start, len);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 7b6dba3ce55..45877273942 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -33,6 +33,9 @@
#include "stv0367_regs.h"
#include "stv0367_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static int stvdebug;
module_param_named(debug, stvdebug, int, 0644);
@@ -767,7 +770,7 @@ static struct st_register def0367cab[STV0367CAB_NBREGS] = {
static
int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
{
- u8 buf[len + 2];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
@@ -776,6 +779,14 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
};
int ret;
+ if (2 + len > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
+
buf[0] = MSB(reg);
buf[1] = LSB(reg);
memcpy(buf + 2, data, len);
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 56d470ad5a8..23e872f8474 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -35,6 +35,9 @@
#include "stv090x.h"
#include "stv090x_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static unsigned int verbose;
module_param(verbose, int, 0644);
@@ -722,9 +725,16 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
{
const struct stv090x_config *config = state->config;
int ret;
- u8 buf[2 + count];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count };
+ if (2 + count > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, count);
+ return -EINVAL;
+ }
+
buf[0] = reg >> 8;
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 20b5fa92c53..b1425830a24 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -30,6 +30,9 @@
#include "stv6110.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static int debug;
struct stv6110_priv {
@@ -68,7 +71,7 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
{
struct stv6110_priv *priv = fe->tuner_priv;
int rc;
- u8 cmdbuf[len + 1];
+ u8 cmdbuf[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = priv->i2c_address,
.flags = 0,
@@ -78,6 +81,13 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
dprintk("%s\n", __func__);
+ if (1 + len > sizeof(cmdbuf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr: len=%d is too big!\n",
+ KBUILD_MODNAME, len);
+ return -EINVAL;
+ }
+
if (start + len > 8)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index f36cab12bdc..e66154e5c1d 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -32,6 +32,9 @@
#include "stv6110x.h"
#include "stv6110x_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static unsigned int verbose;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
@@ -61,7 +64,8 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
{
int ret;
const struct stv6110x_config *config = stv6110x->config;
- u8 buf[len + 1];
+ u8 buf[MAX_XFER_SIZE];
+
struct i2c_msg msg = {
.addr = config->addr,
.flags = 0,
@@ -69,6 +73,13 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
.len = len + 1
};
+ if (1 + len > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr: len=%d is too big!\n",
+ KBUILD_MODNAME, len);
+ return -EINVAL;
+ }
+
if (start + len > 8)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index e79749cfec8..8ad3a57cf64 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -20,6 +20,9 @@
#include "tda10071_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static struct dvb_frontend_ops tda10071_ops;
/* write multiple registers */
@@ -27,16 +30,23 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
int len)
{
int ret;
- u8 buf[len+1];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg.demod_i2c_addr,
.flags = 0,
- .len = sizeof(buf),
+ .len = 1 + len,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
@@ -56,7 +66,7 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
int len)
{
int ret;
- u8 buf[len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[2] = {
{
.addr = priv->cfg.demod_i2c_addr,
@@ -66,11 +76,18 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
}, {
.addr = priv->cfg.demod_i2c_addr,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = len,
.buf = buf,
}
};
+ if (len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, buf, len);
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index d281f77d5c2..2c54586ac07 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -34,6 +34,9 @@
#include "dvb_frontend.h"
#include "tda18271c2dd.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct SStandardParam {
s32 m_IFFrequency;
u32 m_BandWidth;
@@ -139,11 +142,18 @@ static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
static int WriteRegs(struct tda_state *state,
u8 SubAddr, u8 *Regs, u16 nRegs)
{
- u8 data[nRegs+1];
+ u8 data[MAX_XFER_SIZE];
+
+ if (1 + nRegs > sizeof(data)) {
+ printk(KERN_WARNING
+ "%s: i2c wr: len=%d is too big!\n",
+ KBUILD_MODNAME, nRegs);
+ return -EINVAL;
+ }
data[0] = SubAddr;
memcpy(data + 1, Regs, nRegs);
- return i2c_write(state->i2c, state->adr, data, nRegs+1);
+ return i2c_write(state->i2c, state->adr, data, nRegs + 1);
}
static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg)
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 9d08350fe4b..69e62f42e2e 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -189,7 +189,7 @@ static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t ton
return tda8083_writereg (state, 0x29, 0x80);
default:
return -EINVAL;
- };
+ }
}
static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t voltage)
@@ -201,7 +201,7 @@ static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t vo
return tda8083_writereg (state, 0x20, 0x11);
default:
return -EINVAL;
- };
+ }
}
static int tda8083_send_diseqc_burst (struct tda8083_state* state, fe_sec_mini_cmd_t burst)
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index ad7ad857ab2..9aba044dabe 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -31,6 +31,7 @@ struct ts2020_priv {
struct i2c_adapter *i2c;
u8 clk_out_div;
u32 frequency;
+ u32 frequency_div;
};
static int ts2020_release(struct dvb_frontend *fe)
@@ -193,7 +194,7 @@ static int ts2020_set_params(struct dvb_frontend *fe)
u8 lo = 0x01, div4 = 0x0;
/* Calculate frequency divider */
- if (frequency < 1060000) {
+ if (frequency < priv->frequency_div) {
lo |= 0x10;
div4 = 0x1;
ndiv = (frequency * 14 * 4) / TS2020_XTAL_FREQ;
@@ -340,8 +341,12 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
priv->i2c_address = config->tuner_address;
priv->i2c = i2c;
priv->clk_out_div = config->clk_out_div;
+ priv->frequency_div = config->frequency_div;
fe->tuner_priv = priv;
+ if (!priv->frequency_div)
+ priv->frequency_div = 1060000;
+
/* Wake Up the tuner */
if ((0x03 & ts2020_readreg(fe, 0x00)) == 0x00) {
ts2020_writereg(fe, 0x00, 0x01);
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
index 5bcb9a71ca8..b2fe6bb3a38 100644
--- a/drivers/media/dvb-frontends/ts2020.h
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -28,6 +28,7 @@
struct ts2020_config {
u8 tuner_address;
u8 clk_out_div;
+ u32 frequency_div;
};
#if IS_ENABLED(CONFIG_DVB_TS2020)
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index eff9c5fde50..91b6b2e9b79 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -30,6 +30,9 @@
static int debug;
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
#define dprintk(args...) \
do { \
if (debug) \
@@ -98,7 +101,7 @@ static int zl10039_write(struct zl10039_state *state,
const enum zl10039_reg_addr reg, const u8 *src,
const size_t count)
{
- u8 buf[count + 1];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = state->i2c_addr,
.flags = 0,
@@ -106,6 +109,13 @@ static int zl10039_write(struct zl10039_state *state,
.len = count + 1,
};
+ if (1 + count > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: i2c wr reg=%04x: len=%zd is too big!\n",
+ KBUILD_MODNAME, reg, count);
+ return -EINVAL;
+ }
+
dprintk("%s\n", __func__);
/* Write register address and data in one go */
buf[0] = reg;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index d18be19c96c..842654d3331 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -621,6 +621,15 @@ config VIDEO_AS3645A
This is a driver for the AS3645A and LM3555 flash controllers. It has
build in control for flash, torch and indicator LEDs.
+config VIDEO_LM3560
+ tristate "LM3560 dual flash driver support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
+ select REGMAP_I2C
+ ---help---
+ This is a driver for the lm3560 dual flash controllers. It controls
+ flash, torch LEDs.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -646,7 +655,7 @@ config VIDEO_UPD64083
To compile this driver as a module, choose M here: the
module will be called upd64083.
-comment "Miscelaneous helper chips"
+comment "Miscellaneous helper chips"
config VIDEO_THS7303
tristate "THS7303/53 Video Amplifier"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 9f462df77b4..e03f1776f4f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
+obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 6f738d8e3a8..d45e0e3a781 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -178,7 +178,7 @@ static int adv7183_log_status(struct v4l2_subdev *sd)
adv7183_read(sd, ADV7183_VS_FIELD_CTRL_1),
adv7183_read(sd, ADV7183_VS_FIELD_CTRL_2),
adv7183_read(sd, ADV7183_VS_FIELD_CTRL_3));
- v4l2_info(sd, "adv7183: Hsync positon control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n",
+ v4l2_info(sd, "adv7183: Hsync position control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n",
adv7183_read(sd, ADV7183_HS_POS_CTRL_1),
adv7183_read(sd, ADV7183_HS_POS_CTRL_2),
adv7183_read(sd, ADV7183_HS_POS_CTRL_3));
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index aeb56c53e39..d4e15a617c3 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <media/adv7343.h>
#include <media/v4l2-async.h>
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
new file mode 100644
index 00000000000..3317a9ae396
--- /dev/null
+++ b/drivers/media/i2c/lm3560.c
@@ -0,0 +1,488 @@
+/*
+ * drivers/media/i2c/lm3560.c
+ * General device driver for TI lm3560, FLASH LED Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Contact: Daniel Jeong <gshark.jeong@gmail.com>
+ * Ldd-Mlp <ldd-mlp@list.ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+#include <media/lm3560.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+/* registers definitions */
+#define REG_ENABLE 0x10
+#define REG_TORCH_BR 0xa0
+#define REG_FLASH_BR 0xb0
+#define REG_FLASH_TOUT 0xc0
+#define REG_FLAG 0xd0
+#define REG_CONFIG1 0xe0
+
+/* Fault Mask */
+#define FAULT_TIMEOUT (1<<0)
+#define FAULT_OVERTEMP (1<<1)
+#define FAULT_SHORT_CIRCUIT (1<<2)
+
+enum led_enable {
+ MODE_SHDN = 0x0,
+ MODE_TORCH = 0x2,
+ MODE_FLASH = 0x3,
+};
+
+/* struct lm3560_flash
+ *
+ * @pdata: platform data
+ * @regmap: reg. map for i2c
+ * @lock: muxtex for serial access.
+ * @led_mode: V4L2 LED mode
+ * @ctrls_led: V4L2 contols
+ * @subdev_led: V4L2 subdev
+ */
+struct lm3560_flash {
+ struct device *dev;
+ struct lm3560_platform_data *pdata;
+ struct regmap *regmap;
+ struct mutex lock;
+
+ enum v4l2_flash_led_mode led_mode;
+ struct v4l2_ctrl_handler ctrls_led[LM3560_LED_MAX];
+ struct v4l2_subdev subdev_led[LM3560_LED_MAX];
+};
+
+#define to_lm3560_flash(_ctrl, _no) \
+ container_of(_ctrl->handler, struct lm3560_flash, ctrls_led[_no])
+
+/* enable mode control */
+static int lm3560_mode_ctrl(struct lm3560_flash *flash)
+{
+ int rval = -EINVAL;
+
+ switch (flash->led_mode) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x03, MODE_SHDN);
+ break;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x03, MODE_TORCH);
+ break;
+ case V4L2_FLASH_LED_MODE_FLASH:
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x03, MODE_FLASH);
+ break;
+ }
+ return rval;
+}
+
+/* led1/2 enable/disable */
+static int lm3560_enable_ctrl(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, bool on)
+{
+ int rval;
+
+ if (led_no == LM3560_LED0) {
+ if (on == true)
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x08, 0x08);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x08, 0x00);
+ } else {
+ if (on == true)
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x10, 0x10);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x10, 0x00);
+ }
+ return rval;
+}
+
+/* torch1/2 brightness control */
+static int lm3560_torch_brt_ctrl(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, unsigned int brt)
+{
+ int rval;
+ u8 br_bits;
+
+ if (brt < LM3560_TORCH_BRT_MIN)
+ return lm3560_enable_ctrl(flash, led_no, false);
+ else
+ rval = lm3560_enable_ctrl(flash, led_no, true);
+
+ br_bits = LM3560_TORCH_BRT_uA_TO_REG(brt);
+ if (led_no == LM3560_LED0)
+ rval = regmap_update_bits(flash->regmap,
+ REG_TORCH_BR, 0x07, br_bits);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_TORCH_BR, 0x38, br_bits << 3);
+
+ return rval;
+}
+
+/* flash1/2 brightness control */
+static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, unsigned int brt)
+{
+ int rval;
+ u8 br_bits;
+
+ if (brt < LM3560_FLASH_BRT_MIN)
+ return lm3560_enable_ctrl(flash, led_no, false);
+ else
+ rval = lm3560_enable_ctrl(flash, led_no, true);
+
+ br_bits = LM3560_FLASH_BRT_uA_TO_REG(brt);
+ if (led_no == LM3560_LED0)
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_BR, 0x0f, br_bits);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_BR, 0xf0, br_bits << 4);
+
+ return rval;
+}
+
+/* V4L2 controls */
+static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
+{
+ struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+
+ mutex_lock(&flash->lock);
+
+ if (ctrl->id == V4L2_CID_FLASH_FAULT) {
+ int rval;
+ s32 fault = 0;
+ unsigned int reg_val;
+ rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
+ if (rval < 0)
+ return rval;
+ if (rval & FAULT_SHORT_CIRCUIT)
+ fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
+ if (rval & FAULT_OVERTEMP)
+ fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
+ if (rval & FAULT_TIMEOUT)
+ fault |= V4L2_FLASH_FAULT_TIMEOUT;
+ ctrl->cur.val = fault;
+ return 0;
+ }
+
+ mutex_unlock(&flash->lock);
+ return -EINVAL;
+}
+
+static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
+{
+ struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+ u8 tout_bits;
+ int rval = -EINVAL;
+
+ mutex_lock(&flash->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_LED_MODE:
+ flash->led_mode = ctrl->val;
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ rval = lm3560_mode_ctrl(flash);
+ break;
+
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ rval = regmap_update_bits(flash->regmap,
+ REG_CONFIG1, 0x04, (ctrl->val) << 2);
+ if (rval < 0)
+ goto err_out;
+ break;
+
+ case V4L2_CID_FLASH_STROBE:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+ flash->led_mode = V4L2_FLASH_LED_MODE_FLASH;
+ rval = lm3560_mode_ctrl(flash);
+ break;
+
+ case V4L2_CID_FLASH_STROBE_STOP:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+ rval = lm3560_mode_ctrl(flash);
+ break;
+
+ case V4L2_CID_FLASH_TIMEOUT:
+ tout_bits = LM3560_FLASH_TOUT_ms_TO_REG(ctrl->val);
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_TOUT, 0x1f, tout_bits);
+ break;
+
+ case V4L2_CID_FLASH_INTENSITY:
+ rval = lm3560_flash_brt_ctrl(flash, led_no, ctrl->val);
+ break;
+
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ rval = lm3560_torch_brt_ctrl(flash, led_no, ctrl->val);
+ break;
+ }
+
+ mutex_unlock(&flash->lock);
+err_out:
+ return rval;
+}
+
+static int lm3560_led1_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_get_ctrl(ctrl, LM3560_LED1);
+}
+
+static int lm3560_led1_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_set_ctrl(ctrl, LM3560_LED1);
+}
+
+static int lm3560_led0_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_get_ctrl(ctrl, LM3560_LED0);
+}
+
+static int lm3560_led0_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_set_ctrl(ctrl, LM3560_LED0);
+}
+
+static const struct v4l2_ctrl_ops lm3560_led_ctrl_ops[LM3560_LED_MAX] = {
+ [LM3560_LED0] = {
+ .g_volatile_ctrl = lm3560_led0_get_ctrl,
+ .s_ctrl = lm3560_led0_set_ctrl,
+ },
+ [LM3560_LED1] = {
+ .g_volatile_ctrl = lm3560_led1_get_ctrl,
+ .s_ctrl = lm3560_led1_set_ctrl,
+ }
+};
+
+static int lm3560_init_controls(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no)
+{
+ struct v4l2_ctrl *fault;
+ u32 max_flash_brt = flash->pdata->max_flash_brt[led_no];
+ u32 max_torch_brt = flash->pdata->max_torch_brt[led_no];
+ struct v4l2_ctrl_handler *hdl = &flash->ctrls_led[led_no];
+ const struct v4l2_ctrl_ops *ops = &lm3560_led_ctrl_ops[led_no];
+
+ v4l2_ctrl_handler_init(hdl, 8);
+ /* flash mode */
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_TORCH, ~0x7,
+ V4L2_FLASH_LED_MODE_NONE);
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+
+ /* flash source */
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_STROBE_SOURCE,
+ 0x1, ~0x3, V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+
+ /* flash strobe */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+ /* flash strobe stop */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
+
+ /* flash strobe timeout */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TIMEOUT,
+ LM3560_FLASH_TOUT_MIN,
+ flash->pdata->max_flash_timeout,
+ LM3560_FLASH_TOUT_STEP,
+ flash->pdata->max_flash_timeout);
+
+ /* flash brt */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_INTENSITY,
+ LM3560_FLASH_BRT_MIN, max_flash_brt,
+ LM3560_FLASH_BRT_STEP, max_flash_brt);
+
+ /* torch brt */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY,
+ LM3560_TORCH_BRT_MIN, max_torch_brt,
+ LM3560_TORCH_BRT_STEP, max_torch_brt);
+
+ /* fault */
+ fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0,
+ V4L2_FLASH_FAULT_OVER_VOLTAGE
+ | V4L2_FLASH_FAULT_OVER_TEMPERATURE
+ | V4L2_FLASH_FAULT_SHORT_CIRCUIT
+ | V4L2_FLASH_FAULT_TIMEOUT, 0, 0);
+ if (fault != NULL)
+ fault->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ if (hdl->error)
+ return hdl->error;
+
+ flash->subdev_led[led_no].ctrl_handler = hdl;
+ return 0;
+}
+
+/* initialize device */
+static const struct v4l2_subdev_ops lm3560_ops = {
+ .core = NULL,
+};
+
+static const struct regmap_config lm3560_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xFF,
+};
+
+static int lm3560_subdev_init(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, char *led_name)
+{
+ struct i2c_client *client = to_i2c_client(flash->dev);
+ int rval;
+
+ v4l2_i2c_subdev_init(&flash->subdev_led[led_no], client, &lm3560_ops);
+ flash->subdev_led[led_no].flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(flash->subdev_led[led_no].name, led_name);
+ rval = lm3560_init_controls(flash, led_no);
+ if (rval)
+ goto err_out;
+ rval = media_entity_init(&flash->subdev_led[led_no].entity, 0, NULL, 0);
+ if (rval < 0)
+ goto err_out;
+ flash->subdev_led[led_no].entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+
+ return rval;
+
+err_out:
+ v4l2_ctrl_handler_free(&flash->ctrls_led[led_no]);
+ return rval;
+}
+
+static int lm3560_init_device(struct lm3560_flash *flash)
+{
+ int rval;
+ unsigned int reg_val;
+
+ /* set peak current */
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_TOUT, 0x60, flash->pdata->peak);
+ if (rval < 0)
+ return rval;
+ /* output disable */
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+ rval = lm3560_mode_ctrl(flash);
+ if (rval < 0)
+ return rval;
+ /* Reset faults */
+ rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
+ return rval;
+}
+
+static int lm3560_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct lm3560_flash *flash;
+ struct lm3560_platform_data *pdata = dev_get_platdata(&client->dev);
+ int rval;
+
+ flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->regmap = devm_regmap_init_i2c(client, &lm3560_regmap);
+ if (IS_ERR(flash->regmap)) {
+ rval = PTR_ERR(flash->regmap);
+ return rval;
+ }
+
+ /* if there is no platform data, use chip default value */
+ if (pdata == NULL) {
+ pdata =
+ kzalloc(sizeof(struct lm3560_platform_data), GFP_KERNEL);
+ if (pdata == NULL)
+ return -ENODEV;
+ pdata->peak = LM3560_PEAK_3600mA;
+ pdata->max_flash_timeout = LM3560_FLASH_TOUT_MAX;
+ /* led 1 */
+ pdata->max_flash_brt[LM3560_LED0] = LM3560_FLASH_BRT_MAX;
+ pdata->max_torch_brt[LM3560_LED0] = LM3560_TORCH_BRT_MAX;
+ /* led 2 */
+ pdata->max_flash_brt[LM3560_LED1] = LM3560_FLASH_BRT_MAX;
+ pdata->max_torch_brt[LM3560_LED1] = LM3560_TORCH_BRT_MAX;
+ }
+ flash->pdata = pdata;
+ flash->dev = &client->dev;
+ mutex_init(&flash->lock);
+
+ rval = lm3560_subdev_init(flash, LM3560_LED0, "lm3560-led0");
+ if (rval < 0)
+ return rval;
+
+ rval = lm3560_subdev_init(flash, LM3560_LED1, "lm3560-led1");
+ if (rval < 0)
+ return rval;
+
+ rval = lm3560_init_device(flash);
+ if (rval < 0)
+ return rval;
+
+ return 0;
+}
+
+static int lm3560_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash,
+ subdev_led[LM3560_LED_MAX]);
+ unsigned int i;
+
+ for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) {
+ v4l2_device_unregister_subdev(&flash->subdev_led[i]);
+ v4l2_ctrl_handler_free(&flash->ctrls_led[i]);
+ media_entity_cleanup(&flash->subdev_led[i].entity);
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3560_id_table[] = {
+ {LM3560_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3560_id_table);
+
+static struct i2c_driver lm3560_i2c_driver = {
+ .driver = {
+ .name = LM3560_NAME,
+ .pm = NULL,
+ },
+ .probe = lm3560_probe,
+ .remove = lm3560_remove,
+ .id_table = lm3560_id_table,
+};
+
+module_i2c_driver(lm3560_i2c_driver);
+
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>");
+MODULE_DESCRIPTION("Texas Instruments LM3560 LED flash driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index b76ec0e7e68..6fec9384d86 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1581,7 +1581,7 @@ static int s5c73m3_probe(struct i2c_client *client,
oif_sd = &state->oif_sd;
v4l2_subdev_init(sd, &s5c73m3_subdev_ops);
- sd->owner = client->driver->driver.owner;
+ sd->owner = client->dev.driver->owner;
v4l2_set_subdevdata(sd, state);
strlcpy(sd->name, "S5C73M3", sizeof(sd->name));
@@ -1651,7 +1651,7 @@ static int s5c73m3_probe(struct i2c_client *client,
if (ret < 0)
goto out_err;
- v4l2_info(sd, "%s: completed succesfully\n", __func__);
+ v4l2_info(sd, "%s: completed successfully\n", __func__);
return 0;
out_err:
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index 1d384a371b4..5b915936c3f 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -451,7 +451,9 @@ static int imx074_probe(struct i2c_client *client,
if (ret < 0)
goto eprobe;
- return v4l2_async_register_subdev(&priv->subdev);
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (!ret)
+ return 0;
epwrinit:
eprobe:
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index e968c3fdbd9..bc74224503e 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -371,7 +371,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
alt->com13 = OV9640_COM13_RGB_AVG;
alt->com15 = OV9640_COM15_RGB_565;
break;
- };
+ }
}
/* Setup registers according to resolution and color encoding */
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index d9f65d7e3e5..04139eec8c4 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dv-timings.h>
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 91f3dd4cda1..83d85df4853 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -35,6 +35,7 @@
#include <linux/videodev2.h>
#include <linux/module.h>
#include <linux/v4l2-mediabus.h>
+#include <linux/of.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 24a08fa7e32..912e1cccdd1 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
#include <media/v4l2-async.h>
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 447afbd904a..8b5e0b3a92a 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -319,7 +319,6 @@ static int flexcop_pci_init(struct flexcop_pci *fc_pci)
err_pci_iounmap:
pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
- pci_set_drvdata(fc_pci->pdev, NULL);
err_pci_release_regions:
pci_release_regions(fc_pci->pdev);
err_pci_disable_device:
@@ -332,7 +331,6 @@ static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
if (fc_pci->init_state & FC_PCI_INIT) {
free_irq(fc_pci->pdev->irq, fc_pci);
pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
- pci_set_drvdata(fc_pci->pdev, NULL);
pci_release_regions(fc_pci->pdev);
pci_disable_device(fc_pci->pdev);
}
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 66eb0baab0e..d0c281f41a0 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -488,8 +488,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
btwrite(0, BT848_INT_MASK);
result = request_irq(bt->irq, bt878_irq,
- IRQF_SHARED | IRQF_DISABLED, "bt878",
- (void *) bt);
+ IRQF_SHARED, "bt878", (void *) bt);
if (result == -EINVAL) {
printk(KERN_ERR "bt878(%d): Bad irq number or handler\n",
bt878_num);
@@ -563,7 +562,6 @@ static void bt878_remove(struct pci_dev *pci_dev)
bt->shutdown = 1;
bt878_mem_free(bt);
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
return;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index c6532de0eac..a3b1ee9c00d 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4086,7 +4086,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
/* disable irqs, register irq handler */
btwrite(0, BT848_INT_MASK);
result = request_irq(btv->c.pci->irq, bttv_irq,
- IRQF_SHARED | IRQF_DISABLED, btv->c.v4l2_dev.name, (void *)btv);
+ IRQF_SHARED, btv->c.v4l2_dev.name, (void *)btv);
if (result < 0) {
pr_err("%d: can't get IRQ %d\n",
bttv_num, btv->c.pci->irq);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 004d8ace501..c1f8cc6f14b 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -324,23 +324,24 @@ static void cx18_eeprom_dump(struct cx18 *cx, unsigned char *eedata, int len)
/* Hauppauge card? get values from tveeprom */
void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
{
- struct i2c_client c;
+ struct i2c_client *c;
u8 eedata[256];
- memset(&c, 0, sizeof(c));
- strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
- c.adapter = &cx->i2c_adap[0];
- c.addr = 0xA0 >> 1;
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
+ c->adapter = &cx->i2c_adap[0];
+ c->addr = 0xa0 >> 1;
memset(tv, 0, sizeof(*tv));
- if (tveeprom_read(&c, eedata, sizeof(eedata)))
- return;
+ if (tveeprom_read(c, eedata, sizeof(eedata)))
+ goto ret;
switch (cx->card->type) {
case CX18_CARD_HVR_1600_ESMT:
case CX18_CARD_HVR_1600_SAMSUNG:
case CX18_CARD_HVR_1600_S5H1411:
- tveeprom_hauppauge_analog(&c, tv, eedata);
+ tveeprom_hauppauge_analog(c, tv, eedata);
break;
case CX18_CARD_YUAN_MPC718:
case CX18_CARD_GOTVIEW_PCI_DVD3:
@@ -354,6 +355,9 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
cx18_eeprom_dump(cx, eedata, sizeof(eedata));
break;
}
+
+ret:
+ kfree(c);
}
static void cx18_process_eeprom(struct cx18 *cx)
@@ -1031,8 +1035,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* Register IRQ */
retval = request_irq(cx->pci_dev->irq, cx18_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
- cx->v4l2_dev.name, (void *)cx);
+ IRQF_SHARED, cx->v4l2_dev.name, (void *)cx);
if (retval) {
CX18_ERR("Failed to register irq %d\n", retval);
goto free_i2c;
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index 5104c802f72..d1dcb1d2e08 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -23,6 +23,7 @@ config VIDEO_CX23885
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CX24117 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 7344849183a..16fa7ea4d4a 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -26,6 +26,10 @@
#include "cx23885.h"
#include "cimax2.h"
#include "dvb_ca_en50221.h"
+
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
+-----------+
@@ -125,7 +129,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
- u8 buffer[len + 1];
+ u8 buffer[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = addr,
@@ -134,6 +138,13 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
.len = len + 1
};
+ if (1 + len > sizeof(buffer)) {
+ printk(KERN_WARNING
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buffer[0] = reg;
memcpy(&buffer[1], buf, len);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 6a71a965e75..79f20c8c842 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -223,6 +223,39 @@ struct cx23885_board cx23885_boards[] = {
.name = "Leadtek Winfast PxDVR3200 H",
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200] = {
+ .name = "Leadtek Winfast PxPVR2200",
+ .porta = CX23885_ANALOG_VIDEO,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61,
+ .tuner_bus = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN2_CH1 |
+ CX25840_VIN5_CH2,
+ .amux = CX25840_AUDIO8,
+ .gpio0 = 0x704040,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_COMPOSITE1,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0x704040,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_SVIDEO_LUMA3 |
+ CX25840_SVIDEO_CHROMA4,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0x704040,
+ }, {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_VIN7_CH1 |
+ CX25840_VIN6_CH2 |
+ CX25840_VIN8_CH3 |
+ CX25840_COMPONENT_ON,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0x704040,
+ } },
+ },
[CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
.name = "Leadtek Winfast PxDVR3200 H XC4000",
.porta = CX23885_ANALOG_VIDEO,
@@ -259,6 +292,16 @@ struct cx23885_board cx23885_boards[] = {
.name = "TurboSight TBS 6920",
.portb = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_TBS_6980] = {
+ .name = "TurboSight TBS 6980",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_TBS_6981] = {
+ .name = "TurboSight TBS 6981",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
[CX23885_BOARD_TEVII_S470] = {
.name = "TeVii S470",
.portb = CX23885_MPEG_DVB,
@@ -688,6 +731,10 @@ struct cx23885_subid cx23885_subids[] = {
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
}, {
.subvendor = 0x107d,
+ .subdevice = 0x6f21,
+ .card = CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200,
+ }, {
+ .subvendor = 0x107d,
.subdevice = 0x6f39,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
}, {
@@ -699,6 +746,14 @@ struct cx23885_subid cx23885_subids[] = {
.subdevice = 0x8888,
.card = CX23885_BOARD_TBS_6920,
}, {
+ .subvendor = 0x6980,
+ .subdevice = 0x8888,
+ .card = CX23885_BOARD_TBS_6980,
+ }, {
+ .subvendor = 0x6981,
+ .subdevice = 0x8888,
+ .card = CX23885_BOARD_TBS_6981,
+ }, {
.subvendor = 0xd470,
.subdevice = 0x9022,
.card = CX23885_BOARD_TEVII_S470,
@@ -1023,6 +1078,35 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
dev->name, tv.model);
}
+/* Some TBS cards require initing a chip using a bitbanged SPI attached
+ to the cx23885 gpio's. If this chip doesn't get init'ed the demod
+ doesn't respond to any command. */
+static void tbs_card_init(struct cx23885_dev *dev)
+{
+ int i;
+ const u8 buf[] = {
+ 0xe0, 0x06, 0x66, 0x33, 0x65,
+ 0x01, 0x17, 0x06, 0xde};
+
+ switch (dev->board) {
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ cx_set(GP0_IO, 0x00070007);
+ usleep_range(1000, 10000);
+ cx_clear(GP0_IO, 2);
+ usleep_range(1000, 10000);
+ for (i = 0; i < 9 * 8; i++) {
+ cx_clear(GP0_IO, 7);
+ usleep_range(1000, 10000);
+ cx_set(GP0_IO,
+ ((buf[i >> 3] >> (7 - (i & 7))) & 1) | 4);
+ usleep_range(1000, 10000);
+ }
+ cx_set(GP0_IO, 7);
+ break;
+ }
+}
+
int cx23885_tuner_callback(void *priv, int component, int command, int arg)
{
struct cx23885_tsport *port = priv;
@@ -1043,6 +1127,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
@@ -1208,6 +1293,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
@@ -1225,6 +1311,8 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00040004);
break;
case CX23885_BOARD_TBS_6920:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
case CX23885_BOARD_PROF_8000:
cx_write(MC417_CTL, 0x00000036);
cx_write(MC417_OEN, 0x00001000);
@@ -1473,6 +1561,8 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
@@ -1516,6 +1606,8 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
@@ -1561,6 +1653,8 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
@@ -1676,6 +1770,16 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ tbs_card_init(dev);
+ break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_MYGICA_X8507:
@@ -1704,6 +1808,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
@@ -1733,6 +1838,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
@@ -1752,6 +1858,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_AVERMEDIA_HC81R:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 9f63d93239e..edcd79db1e4 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2129,7 +2129,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
}
err = request_irq(pci_dev->irq, cx23885_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 971e4ff1b87..05492053b47 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -51,6 +51,7 @@
#include "stv6110.h"
#include "lnbh24.h"
#include "cx24116.h"
+#include "cx24117.h"
#include "cimax2.h"
#include "lgs8gxx.h"
#include "netup-eeprom.h"
@@ -461,6 +462,10 @@ static struct cx24116_config tbs_cx24116_config = {
.demod_address = 0x55,
};
+static struct cx24117_config tbs_cx24117_config = {
+ .demod_address = 0x55,
+};
+
static struct ds3000_config tevii_ds3000_config = {
.demod_address = 0x68,
};
@@ -1044,6 +1049,25 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend->ops.set_voltage = f300_set_voltage;
break;
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ i2c_bus = &dev->i2c_bus[1];
+
+ switch (port->nr) {
+ /* PORT B */
+ case 1:
+ fe0->dvb.frontend = dvb_attach(cx24117_attach,
+ &tbs_cx24117_config,
+ &i2c_bus->i2c_adap);
+ break;
+ /* PORT C */
+ case 2:
+ fe0->dvb.frontend = dvb_attach(cx24117_attach,
+ &tbs_cx24117_config,
+ &i2c_bus->i2c_adap);
+ break;
+ }
+ break;
case CX23885_BOARD_TEVII_S470:
i2c_bus = &dev->i2c_bus[1];
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 7875dfbe09f..8a49e7c9edd 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -90,6 +90,8 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
/*
* The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
@@ -168,6 +170,8 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
@@ -298,6 +302,14 @@ int cx23885_input_init(struct cx23885_dev *dev)
/* A guess at the remote */
rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
break;
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_BIT_ALL;
+ /* A guess at the remote */
+ rc_map = RC_MAP_TBS_NEC;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 161686832b2..7891f34157d 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -1865,7 +1865,8 @@ int cx23885_video_register(struct cx23885_dev *dev)
v4l2_subdev_call(sd, tuner, s_type_addr, &tun_setup);
- if (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) {
+ if ((dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) ||
+ (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200)) {
struct xc2028_ctrl ctrl = {
.fname = XC2028_DEFAULT_FIRMWARE,
.max_len = 64
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 038caf53908..0fa4048ab87 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -93,6 +93,9 @@
#define CX23885_BOARD_PROF_8000 37
#define CX23885_BOARD_HAUPPAUGE_HVR4400 38
#define CX23885_BOARD_AVERMEDIA_HC81R 39
+#define CX23885_BOARD_TBS_6981 40
+#define CX23885_BOARD_TBS_6980 41
+#define CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200 42
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx25821/cx25821-cards.c b/drivers/media/pci/cx25821/cx25821-cards.c
index 3b409feb03d..f2ebc989b30 100644
--- a/drivers/media/pci/cx25821/cx25821-cards.c
+++ b/drivers/media/pci/cx25821/cx25821-cards.c
@@ -45,5 +45,3 @@ struct cx25821_board cx25821_boards[] = {
},
};
-
-const unsigned int cx25821_bcount = ARRAY_SIZE(cx25821_boards);
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.c b/drivers/media/pci/cx25821/cx25821-medusa-video.c
index 22fa04415cc..43bdfa4dfba 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/pci/cx25821/cx25821-medusa-video.c
@@ -438,7 +438,7 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
decoder_count = decoder_select + 1;
} else {
decoder = 0;
- decoder_count = _num_decoders;
+ decoder_count = dev->_max_num_decoders;
}
switch (width) {
@@ -506,8 +506,6 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
break;
}
- _display_field_cnt[decoder] = duration;
-
/* update hardware */
fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp);
@@ -667,8 +665,6 @@ int medusa_video_init(struct cx25821_dev *dev)
int ret_val = 0;
int i = 0;
- _num_decoders = dev->_max_num_decoders;
-
/* disable Auto source selection on all video decoders */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFF0FF;
@@ -685,8 +681,14 @@ int medusa_video_init(struct cx25821_dev *dev)
if (ret_val < 0)
goto error;
- for (i = 0; i < _num_decoders; i++)
- medusa_set_decoderduration(dev, i, _display_field_cnt[i]);
+ /*
+ * FIXME: due to a coding bug the duration was always 0. It's
+ * likely that it really should be something else, but due to the
+ * lack of documentation I have no idea what it should be. For
+ * now just fill in 0 as the duration.
+ */
+ for (i = 0; i < dev->_max_num_decoders; i++)
+ medusa_set_decoderduration(dev, i, 0);
/* Select monitor as DENC A input, power up the DAC */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp);
@@ -717,7 +719,7 @@ int medusa_video_init(struct cx25821_dev *dev)
/* Turn on all of the data out and control output pins. */
value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp);
value &= 0xFEF0FE00;
- if (_num_decoders == MAX_DECODERS) {
+ if (dev->_max_num_decoders == MAX_DECODERS) {
/*
* Note: The octal board does not support control pins(bit16-19)
* These bits are ignored in the octal board.
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.h b/drivers/media/pci/cx25821/cx25821-medusa-video.h
index 6175e096185..8bf602ff27b 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-video.h
+++ b/drivers/media/pci/cx25821/cx25821-medusa-video.h
@@ -40,10 +40,4 @@
#define CONTRAST_DEFAULT 5000
#define HUE_DEFAULT 5000
-unsigned short _num_decoders;
-unsigned short _num_cameras;
-
-unsigned int _video_standard;
-int _display_field_cnt[MAX_DECODERS];
-
#endif
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 88ffef410c5..1f43be0b04c 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -159,10 +159,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32
* For the upstream video channel, the risc engine will enable
* the FIFO. */
if (fifo_enable && line == 3) {
- *(rp++) = RISC_WRITECR;
- *(rp++) = sram_ch->dma_ctl;
- *(rp++) = FLD_VID_FIFO_EN;
- *(rp++) = 0x00000001;
+ *(rp++) = cpu_to_le32(RISC_WRITECR);
+ *(rp++) = cpu_to_le32(sram_ch->dma_ctl);
+ *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN);
+ *(rp++) = cpu_to_le32(0x00000001);
}
}
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index aba5b1c649e..400eb1c42d3 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -834,7 +834,7 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
/* get irq */
err = request_irq(chip->pci->irq, cx8801_irq,
- IRQF_SHARED | IRQF_DISABLED, chip->core->name, chip);
+ IRQF_SHARED, chip->core->name, chip);
if (err < 0) {
dprintk(0, "%s: can't get IRQ %d\n",
chip->core->name, chip->pci->irq);
@@ -935,8 +935,6 @@ static void cx88_audio_finidev(struct pci_dev *pci)
snd_card_free((void *)card);
- pci_set_drvdata(pci, NULL);
-
devno--;
}
@@ -951,27 +949,4 @@ static struct pci_driver cx88_audio_pci_driver = {
.remove = cx88_audio_finidev,
};
-/****************************************************************************
- LINUX MODULE INIT
- ****************************************************************************/
-
-/*
- * module init
- */
-static int __init cx88_audio_init(void)
-{
- printk(KERN_INFO "cx2388x alsa driver version %s loaded\n",
- CX88_VERSION);
- return pci_register_driver(&cx88_audio_pci_driver);
-}
-
-/*
- * module remove
- */
-static void __exit cx88_audio_fini(void)
-{
- pci_unregister_driver(&cx88_audio_pci_driver);
-}
-
-module_init(cx88_audio_init);
-module_exit(cx88_audio_fini);
+module_pci_driver(cx88_audio_pci_driver);
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 2d3507eb489..74b7b8614c2 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -499,7 +499,7 @@ static int cx8802_init_common(struct cx8802_dev *dev)
/* get irq */
err = request_irq(dev->pci->irq, cx8802_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->core->name, dev);
+ IRQF_SHARED, dev->core->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->core->name, dev->pci->irq);
@@ -520,7 +520,6 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
/* unregister stuff */
free_irq(dev->pci->irq, dev);
- pci_set_drvdata(dev->pci, NULL);
/* free memory */
btcx_riscmem_free(dev->pci,&dev->mpegq.stopper);
@@ -903,20 +902,8 @@ static struct pci_driver cx8802_pci_driver = {
.remove = cx8802_remove,
};
-static int __init cx8802_init(void)
-{
- printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n",
- CX88_VERSION);
- return pci_register_driver(&cx8802_pci_driver);
-}
-
-static void __exit cx8802_fini(void)
-{
- pci_unregister_driver(&cx8802_pci_driver);
-}
+module_pci_driver(cx8802_pci_driver);
-module_init(cx8802_init);
-module_exit(cx8802_fini);
EXPORT_SYMBOL(cx8802_buf_prepare);
EXPORT_SYMBOL(cx8802_buf_queue);
EXPORT_SYMBOL(cx8802_cancel_buffers);
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index ecf21d9f1f3..ed8cb9037b6 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1738,7 +1738,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* get irq */
err = request_irq(pci_dev->irq, cx8800_irq,
- IRQF_SHARED | IRQF_DISABLED, core->name, dev);
+ IRQF_SHARED, core->name, dev);
if (err < 0) {
printk(KERN_ERR "%s/0: can't get IRQ %d\n",
core->name,pci_dev->irq);
@@ -1922,7 +1922,6 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
free_irq(pci_dev->irq, dev);
cx8800_unregister_video(dev);
- pci_set_drvdata(pci_dev, NULL);
/* free memory */
btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
@@ -2039,17 +2038,4 @@ static struct pci_driver cx8800_pci_driver = {
#endif
};
-static int __init cx8800_init(void)
-{
- printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n",
- CX88_VERSION);
- return pci_register_driver(&cx8800_pci_driver);
-}
-
-static void __exit cx8800_fini(void)
-{
- pci_unregister_driver(&cx8800_pci_driver);
-}
-
-module_init(cx8800_init);
-module_exit(cx8800_fini);
+module_pci_driver(cx8800_pci_driver);
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 36e34522b9a..9375f30d9a8 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1544,7 +1544,7 @@ static void ddb_unmap(struct ddb *dev)
static void ddb_remove(struct pci_dev *pdev)
{
- struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
+ struct ddb *dev = pci_get_drvdata(pdev);
ddb_ports_detach(dev);
ddb_i2c_release(dev);
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index ab797fe466d..e60ac35fc10 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -1178,7 +1178,6 @@ err_pci_release_regions:
err_pci_disable_device:
pci_disable_device(pdev);
err_kfree:
- pci_set_drvdata(pdev, NULL);
kfree(dev);
return ret;
}
@@ -1202,8 +1201,7 @@ static void dm1105_remove(struct pci_dev *pdev)
dvb_dmxdev_release(&dev->dmxdev);
dvb_dmx_release(dvbdemux);
dvb_unregister_adapter(dvb_adapter);
- if (&dev->i2c_adap)
- i2c_del_adapter(&dev->i2c_adap);
+ i2c_del_adapter(&dev->i2c_adap);
dm1105_hw_exit(dev);
synchronize_irq(pdev->irq);
@@ -1211,7 +1209,6 @@ static void dm1105_remove(struct pci_dev *pdev)
pci_iounmap(pdev, dev->io_mem);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
dm1105_devcount--;
kfree(dev);
}
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index c08ae3eb955..802642d2664 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1261,7 +1261,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* Register IRQ */
retval = request_irq(itv->pdev->irq, ivtv_irq_handler,
- IRQF_SHARED | IRQF_DISABLED, itv->v4l2_dev.name, (void *)itv);
+ IRQF_SHARED, itv->v4l2_dev.name, (void *)itv);
if (retval) {
IVTV_ERR("Failed to register irq %d\n", retval);
goto free_i2c;
diff --git a/drivers/media/pci/mantis/mantis_pci.c b/drivers/media/pci/mantis/mantis_pci.c
index a846036ea02..9e89e045213 100644
--- a/drivers/media/pci/mantis/mantis_pci.c
+++ b/drivers/media/pci/mantis/mantis_pci.c
@@ -143,7 +143,6 @@ fail1:
fail0:
dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret);
- pci_set_drvdata(pdev, NULL);
return ret;
}
EXPORT_SYMBOL_GPL(mantis_pci_init);
@@ -161,7 +160,6 @@ void mantis_pci_exit(struct mantis_pci *mantis)
}
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
EXPORT_SYMBOL_GPL(mantis_pci_exit);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 2381b05432e..54d5c821007 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1698,7 +1698,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
meye.mchip_irq = pcidev->irq;
if (request_irq(meye.mchip_irq, meye_irq,
- IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) {
+ IRQF_SHARED, "meye", meye_irq)) {
v4l2_err(v4l2_dev, "request_irq failed\n");
goto outreqirq;
}
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 37ebc42392a..970e8330852 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -1622,7 +1622,7 @@ static void ngene_unlink(struct ngene *dev)
void ngene_shutdown(struct pci_dev *pdev)
{
- struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev);
+ struct ngene *dev = pci_get_drvdata(pdev);
if (!dev || !shutdown_workaround)
return;
@@ -1648,7 +1648,6 @@ void ngene_remove(struct pci_dev *pdev)
cxd_detach(dev);
ngene_stop(dev);
ngene_release_buffers(dev);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
@@ -1702,6 +1701,5 @@ fail1:
ngene_release_buffers(dev);
fail0:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
return stat;
}
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 49382850005..8164d74b46a 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -736,7 +736,6 @@ err_pci_release_regions:
err_pci_disable_device:
pci_disable_device(pdev);
err_kfree:
- pci_set_drvdata(pdev, NULL);
kfree(pluto);
goto out;
}
@@ -765,7 +764,6 @@ static void pluto2_remove(struct pci_dev *pdev)
pci_iounmap(pdev, pluto->io_mem);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(pluto);
}
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index 75ce14229e0..db887b0c37b 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1076,7 +1076,6 @@ static void pt1_remove(struct pci_dev *pdev)
pt1_update_power(pt1);
pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
- pci_set_drvdata(pdev, NULL);
kfree(pt1);
pci_iounmap(pdev, regs);
pci_release_regions(pdev);
@@ -1198,7 +1197,6 @@ err_i2c_del_adapter:
err_pt1_cleanup_adapters:
pt1_cleanup_adapters(pt1);
err_kfree:
- pci_set_drvdata(pdev, NULL);
kfree(pt1);
err_pci_iounmap:
pci_iounmap(pdev, regs);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dbcdfbf8aed..dd67c8a400c 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1096,7 +1096,7 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
err = request_irq(dev->pci->irq, saa7134_alsa_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name,
+ IRQF_SHARED, dev->name,
(void*) &dev->dmasound);
if (err < 0) {
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 45f0aca597a..27d7ee709c5 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -992,7 +992,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index d37ee37aaef..57ef5456f1e 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1232,7 +1232,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
}
err = request_irq(pci_dev->irq, saa7164_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name,
pci_dev->irq);
@@ -1439,7 +1439,6 @@ static void saa7164_finidev(struct pci_dev *pci_dev)
/* unregister stuff */
free_irq(pci_dev->irq, dev);
- pci_set_drvdata(pci_dev, NULL);
mutex_lock(&devlist);
list_del(&dev->devlist);
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index f1cbfe52698..6299d5dadb8 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -22,7 +22,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*
- * the project's page is at http://www.linuxtv.org/
+ * the project's page is at http://www.linuxtv.org/
*/
/* for debugging ARM communication: */
@@ -40,6 +40,14 @@
#define _NOHANDSHAKE
+/*
+ * Max transfer size done by av7110_fw_cmd()
+ *
+ * The maximum size passed to this function is 6 bytes. The buffer also
+ * uses two additional ones for type and size. So, 8 bytes is enough.
+ */
+#define MAX_XFER_SIZE 8
+
/****************************************************************************
* DEBI functions
****************************************************************************/
@@ -488,11 +496,18 @@ static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...)
{
va_list args;
- u16 buf[num + 2];
+ u16 buf[MAX_XFER_SIZE];
int i, ret;
// dprintk(4, "%p\n", av7110);
+ if (2 + num > sizeof(buf)) {
+ printk(KERN_WARNING
+ "%s: %s len=%d is too big!\n",
+ KBUILD_MODNAME, __func__, num);
+ return -EINVAL;
+ }
+
buf[0] = ((type << 8) | com);
buf[1] = num;
diff --git a/drivers/media/pci/zoran/Kconfig b/drivers/media/pci/zoran/Kconfig
index 26ca8702e33..39ec35bd21a 100644
--- a/drivers/media/pci/zoran/Kconfig
+++ b/drivers/media/pci/zoran/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_ZORAN
tristate "Zoran ZR36057/36067 Video For Linux"
depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS
+ depends on !ALPHA
help
Say Y for support for MJPEG capture cards based on the Zoran
36057/36067 PCI controller chipset. This includes the Iomega
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 923d59a321f..cec5b7553f2 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -1293,7 +1293,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
result = request_irq(zr->pci_dev->irq, zoran_irq,
- IRQF_SHARED | IRQF_DISABLED, ZR_DEVNAME(zr), zr);
+ IRQF_SHARED, ZR_DEVNAME(zr), zr);
if (result < 0) {
if (result == -EINVAL) {
dprintk(1,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index c7caf94621b..d7f0249e405 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_OMAP3_DEBUG
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on (PLAT_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ depends on (ARCH_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
select VIDEOBUF2_DMA_CONTIG
---help---
This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
@@ -143,6 +143,7 @@ if V4L_MEM2MEM_DRIVERS
config VIDEO_CODA
tristate "Chips&Media Coda multi-standard codec IP"
depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC
+ select SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -212,7 +213,7 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
@@ -220,6 +221,22 @@ config VIDEO_RENESAS_VSP1
To compile this driver as a module, choose M here: the module
will be called vsp1.
+config VIDEO_TI_VPE
+ tristate "TI VPE (Video Processing Engine) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ Support for the TI VPE(Video Processing Engine) block
+ found on DRA7XX SoC.
+
+config VIDEO_TI_VPE_DEBUG
+ bool "VPE debug messages"
+ depends on VIDEO_TI_VPE
+ ---help---
+ Enable debug messages on VPE driver.
+
endif # V4L_MEM2MEM_DRIVERS
menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 4e4da482c52..1348ba1faf9 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o
obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
+
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 4c110597709..28191659143 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -422,7 +422,7 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
return ret;
}
- INIT_COMPLETION(bcap_dev->comp);
+ reinit_completion(&bcap_dev->comp);
bcap_dev->stop = false;
return 0;
}
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 449d2fec9e8..bd72fb97fea 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -39,7 +39,7 @@
#define CODA_NAME "coda"
-#define CODA_MAX_INSTANCES 4
+#define CODADX6_MAX_INSTANCES 4
#define CODA_FMO_BUF_SIZE 32
#define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
@@ -54,8 +54,6 @@
#define CODA_MAX_FRAMEBUFFERS 8
-#define MAX_W 8192
-#define MAX_H 8192
#define CODA_MAX_FRAME_SIZE 0x100000
#define FMO_SLICE_SAVE_BUF_SIZE (32)
#define CODA_DEFAULT_GAMMA 4096
@@ -394,14 +392,57 @@ static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc,
return &codecs[k];
}
+static void coda_get_max_dimensions(struct coda_dev *dev,
+ struct coda_codec *codec,
+ int *max_w, int *max_h)
+{
+ struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ unsigned int w, h;
+ int k;
+
+ if (codec) {
+ w = codec->max_w;
+ h = codec->max_h;
+ } else {
+ for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
+ w = max(w, codecs[k].max_w);
+ h = max(h, codecs[k].max_h);
+ }
+ }
+
+ if (max_w)
+ *max_w = w;
+ if (max_h)
+ *max_h = h;
+}
+
+static char *coda_product_name(int product)
+{
+ static char buf[9];
+
+ switch (product) {
+ case CODA_DX6:
+ return "CodaDx6";
+ case CODA_7541:
+ return "CODA7541";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", product);
+ return buf;
+ }
+}
+
/*
* V4L2 ioctl() operations.
*/
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+static int coda_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
- strlcpy(cap->card, CODA_NAME, sizeof(cap->card));
+ strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
+ sizeof(cap->card));
strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
/*
* This is only a mem-to-mem video device. The capture and output
@@ -457,6 +498,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
fmt = &formats[i];
strlcpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
+ if (!coda_format_is_yuv(fmt->fourcc))
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
@@ -464,8 +507,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int coda_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *src_vq;
@@ -483,13 +526,14 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
}
-static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int coda_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
}
-static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+static int coda_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct vb2_queue *vq;
struct coda_q_data *q_data;
@@ -516,8 +560,11 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f)
+static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec,
+ struct v4l2_format *f)
{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data;
unsigned int max_w, max_h;
enum v4l2_field field;
@@ -531,32 +578,48 @@ static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f)
* if any of the dimensions is unsupported */
f->fmt.pix.field = field;
- if (codec) {
- max_w = codec->max_w;
- max_h = codec->max_h;
- } else {
- max_w = MAX_W;
- max_h = MAX_H;
+ coda_get_max_dimensions(dev, codec, &max_w, &max_h);
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
+ &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
+ S_ALIGN);
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
+ break;
+ default:
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix.pixelformat = q_data->fourcc;
}
- v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w,
- W_ALIGN, &f->fmt.pix.height,
- MIN_H, max_h, H_ALIGN, S_ALIGN);
- if (coda_format_is_yuv(f->fmt.pix.pixelformat)) {
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
/* Frame stride must be multiple of 8 */
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
- } else { /*encoded formats h.264/mpeg4 */
+ break;
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
+ break;
+ default:
+ BUG();
}
+ f->fmt.pix.priv = 0;
+
return 0;
}
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_codec *codec;
@@ -584,7 +647,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.colorspace = ctx->colorspace;
- ret = vidioc_try_fmt(codec, f);
+ ret = coda_try_fmt(ctx, codec, f);
if (ret < 0)
return ret;
@@ -600,8 +663,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_codec *codec;
@@ -613,10 +676,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
if (!f->fmt.pix.colorspace)
f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
- return vidioc_try_fmt(codec, f);
+ return coda_try_fmt(ctx, codec, f);
}
-static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
+static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
{
struct coda_q_data *q_data;
struct vb2_queue *vq;
@@ -646,61 +709,62 @@ static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
- ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ ret = coda_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- return vidioc_s_fmt(ctx, f);
+ return coda_s_fmt(ctx, f);
}
-static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
- ret = vidioc_try_fmt_vid_out(file, priv, f);
+ ret = coda_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
- ret = vidioc_s_fmt(ctx, f);
+ ret = coda_s_fmt(ctx, f);
if (ret)
ctx->colorspace = f->fmt.pix.colorspace;
return ret;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
+static int coda_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
+static int coda_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+static int coda_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
-static int vidioc_expbuf(struct file *file, void *priv,
- struct v4l2_exportbuffer *eb)
+static int coda_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
@@ -718,7 +782,8 @@ static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
(buf->sequence == (ctx->qsequence - 1)));
}
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+static int coda_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
@@ -738,24 +803,24 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return ret;
}
-static int vidioc_create_bufs(struct file *file, void *priv,
- struct v4l2_create_buffers *create)
+static int coda_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
+static int coda_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
+static int coda_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
@@ -772,23 +837,34 @@ static int vidioc_streamoff(struct file *file, void *priv,
return ret;
}
-static int vidioc_decoder_cmd(struct file *file, void *fh,
- struct v4l2_decoder_cmd *dc)
+static int coda_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
-
if (dc->cmd != V4L2_DEC_CMD_STOP)
return -EINVAL;
- if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) ||
- (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY))
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
return -EINVAL;
- if (dc->stop.pts != 0)
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
return -EINVAL;
+ return 0;
+}
+
+static int coda_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ ret = coda_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore decoder stop command silently in encoder context */
if (ctx->inst_type != CODA_INST_DECODER)
- return -EINVAL;
+ return 0;
/* Set the strem-end flag on this context */
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
@@ -796,8 +872,8 @@ static int vidioc_decoder_cmd(struct file *file, void *fh,
return 0;
}
-static int vidioc_subscribe_event(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
+static int coda_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
@@ -808,32 +884,33 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
}
static const struct v4l2_ioctl_ops coda_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
+ .vidioc_querycap = coda_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = coda_g_fmt,
+ .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_out = vidioc_g_fmt,
- .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
- .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = coda_g_fmt,
+ .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_reqbufs = coda_reqbufs,
+ .vidioc_querybuf = coda_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_expbuf = vidioc_expbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_create_bufs = vidioc_create_bufs,
+ .vidioc_qbuf = coda_qbuf,
+ .vidioc_expbuf = coda_expbuf,
+ .vidioc_dqbuf = coda_dqbuf,
+ .vidioc_create_bufs = coda_create_bufs,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = coda_streamon,
+ .vidioc_streamoff = coda_streamoff,
- .vidioc_decoder_cmd = vidioc_decoder_cmd,
+ .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
+ .vidioc_decoder_cmd = coda_decoder_cmd,
- .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_subscribe_event = coda_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -1928,8 +2005,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (!(ctx->streamon_out & ctx->streamon_cap))
return 0;
- /* Allow device_run with no buffers queued and after streamoff */
- v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
+ /* Allow decoder device_run with no new buffers queued */
+ if (ctx->inst_type == CODA_INST_DECODER)
+ v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
@@ -2071,10 +2149,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
coda_setup_iram(ctx);
if (dst_fourcc == V4L2_PIX_FMT_H264) {
- value = (FMO_SLICE_SAVE_BUF_SIZE << 7);
- value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET;
- value |= 0 & CODA_FMOPARAM_SLICENUM_MASK;
if (dev->devtype->product == CODA_DX6) {
+ value = FMO_SLICE_SAVE_BUF_SIZE << 7;
coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
} else {
coda_write(dev, ctx->iram_info.search_ram_paddr,
@@ -2371,7 +2447,13 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
static int coda_next_free_instance(struct coda_dev *dev)
{
- return ffz(dev->instance_mask);
+ int idx = ffz(dev->instance_mask);
+
+ if ((idx < 0) ||
+ (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
+ return -EBUSY;
+
+ return idx;
}
static int coda_open(struct file *file)
@@ -2386,8 +2468,8 @@ static int coda_open(struct file *file)
return -ENOMEM;
idx = coda_next_free_instance(dev);
- if (idx >= CODA_MAX_INSTANCES) {
- ret = -EBUSY;
+ if (idx < 0) {
+ ret = idx;
goto err_coda_max;
}
set_bit(idx, &dev->instance_mask);
@@ -2719,7 +2801,6 @@ static void coda_finish_encode(struct coda_ctx *ctx)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
/* Get results from the coda */
- coda_read(dev, CODA_RET_ENC_PIC_TYPE);
start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
@@ -2739,7 +2820,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
coda_read(dev, CODA_RET_ENC_PIC_FLAG);
- if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+ if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
} else {
@@ -2861,21 +2942,6 @@ static bool coda_firmware_supported(u32 vernum)
return false;
}
-static char *coda_product_name(int product)
-{
- static char buf[9];
-
- switch (product) {
- case CODA_DX6:
- return "CodaDx6";
- case CODA_7541:
- return "CODA7541";
- default:
- snprintf(buf, sizeof(buf), "(0x%04x)", product);
- return buf;
- }
-}
-
static int coda_hw_init(struct coda_dev *dev)
{
u16 product, major, minor, release;
@@ -3232,13 +3298,12 @@ static int coda_probe(struct platform_device *pdev)
dev->iram_size = CODA7_IRAM_SIZE;
break;
}
- dev->iram_vaddr = gen_pool_alloc(dev->iram_pool, dev->iram_size);
+ dev->iram_vaddr = (unsigned long)gen_pool_dma_alloc(dev->iram_pool,
+ dev->iram_size, (dma_addr_t *)&dev->iram_paddr);
if (!dev->iram_vaddr) {
dev_err(&pdev->dev, "unable to alloc iram\n");
return -ENOMEM;
}
- dev->iram_paddr = gen_pool_virt_to_phys(dev->iram_pool,
- dev->iram_vaddr);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 04609cc6eba..eac472b5ae8 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -1785,7 +1785,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
}
irq = res->start;
- err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED,
+ err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
VPBE_DISPLAY_DRIVER, disp_dev);
if (err) {
v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 93609091cb2..d762246eabf 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -688,7 +688,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
frame_format = ccdc_dev->hw_ops.get_frame_format();
if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
- IRQF_DISABLED, "vpfe_capture1",
+ 0, "vpfe_capture1",
vpfe_dev);
}
return 0;
@@ -1863,7 +1863,7 @@ static int vpfe_probe(struct platform_device *pdev)
}
vpfe_dev->ccdc_irq1 = res1->start;
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED,
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
"vpfe_capture0", vpfe_dev);
if (0 != ret) {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 1089834a4ef..52ac5e6c862 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -2154,7 +2154,7 @@ static __init int vpif_probe(struct platform_device *pdev)
if (!vpif_obj.sd[i]) {
vpif_err("Error registering v4l2 subdevice\n");
- err = -ENOMEM;
+ err = -ENODEV;
goto probe_subdev_out;
}
v4l2_info(&vpif_obj.v4l2_dev,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 76435d3bf62..ef0a6564cef 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -45,6 +45,7 @@
#define GSC_DST_FMT (1 << 2)
#define GSC_CTX_M2M (1 << 3)
#define GSC_CTX_STOP_REQ (1 << 6)
+#define GSC_CTX_ABORT (1 << 7)
enum gsc_dev_flags {
/* for global */
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index e576ff2de3d..810c3e13970 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -46,6 +46,17 @@ static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
return ret == 0 ? -ETIMEDOUT : ret;
}
+static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
+{
+ int ret;
+
+ ret = gsc_m2m_ctx_stop_req(ctx);
+ if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
+ gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct gsc_ctx *ctx = q->drv_priv;
@@ -58,11 +69,8 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
static int gsc_m2m_stop_streaming(struct vb2_queue *q)
{
struct gsc_ctx *ctx = q->drv_priv;
- int ret;
- ret = gsc_m2m_ctx_stop_req(ctx);
- if (ret == -ETIMEDOUT)
- gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ __gsc_m2m_job_abort(ctx);
pm_runtime_put(&ctx->gsc_dev->pdev->dev);
@@ -91,15 +99,9 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
}
}
-
static void gsc_m2m_job_abort(void *priv)
{
- struct gsc_ctx *ctx = priv;
- int ret;
-
- ret = gsc_m2m_ctx_stop_req(ctx);
- if (ret == -ETIMEDOUT)
- gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ __gsc_m2m_job_abort((struct gsc_ctx *)priv);
}
static int gsc_get_bufs(struct gsc_ctx *ctx)
@@ -150,9 +152,10 @@ static void gsc_m2m_device_run(void *priv)
gsc->m2m.ctx = ctx;
}
- is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0;
- ctx->state &= ~GSC_CTX_STOP_REQ;
+ is_set = ctx->state & GSC_CTX_STOP_REQ;
if (is_set) {
+ ctx->state &= ~GSC_CTX_STOP_REQ;
+ ctx->state |= GSC_CTX_ABORT;
wake_up(&gsc->irq_queue);
goto put_device;
}
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 53ad0f08017..d2d3b4b6143 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -29,7 +29,7 @@ config VIDEO_S5P_FIMC
config VIDEO_S5P_MIPI_CSIS
tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
depends on REGULATOR
- select S5P_SETUP_MIPIPHY
+ select GENERIC_PHY
help
This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
receiver (MIPI-CSIS) devices.
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index d2e6cba3566..f3c6136aa5b 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -511,7 +511,7 @@ static int __ctrl_set_metering(struct fimc_is *is, unsigned int value)
break;
default:
return -EINVAL;
- };
+ }
__is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val);
return 0;
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a8351127831..7a4ee4c0449 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -411,8 +411,8 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
device_lock(&client->dev);
- if (!client->driver ||
- !try_module_get(client->driver->driver.owner)) {
+ if (!client->dev.driver ||
+ !try_module_get(client->dev.driver->owner)) {
ret = -EPROBE_DEFER;
v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n",
node->full_name);
@@ -442,7 +442,7 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
fmd->num_sensors++;
mod_put:
- module_put(client->driver->driver.owner);
+ module_put(client->dev.driver->owner);
dev_put:
device_unlock(&client->dev);
put_device(&client->dev);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 0914230b42d..9fc2af6a044 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -20,6 +20,7 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/phy/phy.h>
#include <linux/platform_data/mipi-csis.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -180,6 +181,7 @@ struct csis_drvdata {
* @sd: v4l2_subdev associated with CSIS device instance
* @index: the hardware instance index
* @pdev: CSIS platform device
+ * @phy: pointer to the CSIS generic PHY
* @regs: mmaped I/O registers memory
* @supplies: CSIS regulator supplies
* @clock: CSIS clocks
@@ -203,6 +205,7 @@ struct csis_state {
struct v4l2_subdev sd;
u8 index;
struct platform_device *pdev;
+ struct phy *phy;
void __iomem *regs;
struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
struct clk *clock[NUM_CSIS_CLOCKS];
@@ -779,8 +782,8 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
"samsung,csis-wclk");
state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
-
of_node_put(node);
+
return 0;
}
#else
@@ -829,6 +832,10 @@ static int s5pcsis_probe(struct platform_device *pdev)
return -EINVAL;
}
+ state->phy = devm_phy_get(dev, "csis");
+ if (IS_ERR(state->phy))
+ return PTR_ERR(state->phy);
+
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
state->regs = devm_ioremap_resource(dev, mem_res);
if (IS_ERR(state->regs))
@@ -922,7 +929,7 @@ static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
mutex_lock(&state->lock);
if (state->flags & ST_POWERED) {
s5pcsis_stop_stream(state);
- ret = s5p_csis_phy_enable(state->index, false);
+ ret = phy_power_off(state->phy);
if (ret)
goto unlock;
ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
@@ -958,7 +965,7 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime)
state->supplies);
if (ret)
goto unlock;
- ret = s5p_csis_phy_enable(state->index, true);
+ ret = phy_power_on(state->phy);
if (!ret) {
state->flags |= ST_POWERED;
} else {
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index fe9898ca3c8..6a232239ee8 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 540516ca872..65cab70fefc 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
ctx->xt->dir = DMA_MEM_TO_MEM;
ctx->xt->src_sgl = false;
ctx->xt->dst_sgl = true;
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
if (tx == NULL) {
@@ -1084,8 +1083,7 @@ free_dev:
static int deinterlace_remove(struct platform_device *pdev)
{
- struct deinterlace_dev *pcdev =
- (struct deinterlace_dev *)platform_get_drvdata(pdev);
+ struct deinterlace_dev *pcdev = platform_get_drvdata(pdev);
v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
v4l2_m2m_release(pcdev->m2m_dev);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 5184887b155..32fab30a910 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
{
struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
struct mcam_dma_desc *desc = mvb->dma_desc;
struct scatterlist *sg;
int i;
- mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
- DMA_FROM_DEVICE);
+ mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
+ sg_table->nents, DMA_FROM_DEVICE);
if (mvb->dma_desc_nent <= 0)
return -EIO; /* Not sure what's right here */
- for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+ for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
desc->dma_addr = sg_dma_address(sg);
desc->segment_len = sg_dma_len(sg);
desc++;
@@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
{
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
- dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+ if (sg_table)
+ dma_unmap_sg(cam->dev, sg_table->sgl,
+ sg_table->nents, DMA_FROM_DEVICE);
return 0;
}
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index b5a19af5c58..3458fa0e2fd 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -481,7 +481,6 @@ static int mmpcam_remove(struct mmp_camera *cam)
struct mmp_camera_platform_data *pdata;
mmpcam_remove_device(cam);
- free_irq(cam->irq, mcam);
mccic_shutdown(mcam);
mmpcam_power_down(mcam);
pdata = cam->pdev->dev.platform_data;
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 6a17676f9d7..8df5975b700 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -1090,8 +1090,7 @@ unreg_dev:
static int m2mtest_remove(struct platform_device *pdev)
{
- struct m2mtest_dev *dev =
- (struct m2mtest_dev *)platform_get_drvdata(pdev);
+ struct m2mtest_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index df3a0ec7fd2..1c360803966 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2182,9 +2182,9 @@ static int isp_probe(struct platform_device *pdev)
isp->pdata = pdata;
isp->ref_count = 0;
- isp->raw_dmamask = DMA_BIT_MASK(32);
- isp->dev->dma_mask = &isp->raw_dmamask;
- isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index cd3eff45ae7..ce65d3ae1aa 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -152,7 +152,6 @@ struct isp_xclk {
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions.
* @mmio_size: Array with ISP register regions size in bytes.
- * @raw_dmamask: Raw DMA mask
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
* @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@ struct isp_device {
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
- u64 raw_dmamask;
-
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index fd6289d60cd..0b2948376ae 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -840,7 +840,7 @@ put_clk:
static int g2d_remove(struct platform_device *pdev)
{
- struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev);
+ struct g2d_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 084263dd126..5f2c4ad6c2c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -404,7 +404,11 @@ leave_handle_frame:
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
s5p_mfc_clock_off();
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ /* if suspending, wake up device and do not try_run again*/
+ if (test_bit(0, &dev->enter_suspend))
+ wake_up_dev(dev, reason, err);
+ else
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
}
/* Error handling for interrupt */
@@ -1101,7 +1105,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
}
dev->irq = res->start;
ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
- IRQF_DISABLED, pdev->name, dev);
+ 0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
goto err_res;
@@ -1286,9 +1290,7 @@ static int s5p_mfc_suspend(struct device *dev)
/* Try and lock the HW */
/* Wait on the interrupt waitqueue */
ret = wait_event_interruptible_timeout(m_dev->queue,
- m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
- msecs_to_jiffies(MFC_INT_TIMEOUT));
-
+ m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
if (ret == 0) {
mfc_err("Waiting for hardware to finish timed out\n");
return -EIO;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
index ad4f1df0a18..9a6efd6c132 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -111,7 +111,7 @@ static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
break;
default:
h2r_args.arg[0] = S5P_FIMV_CODEC_NONE;
- };
+ }
h2r_args.arg[1] = 0; /* no crc & no pixelcache */
h2r_args.arg[2] = ctx->ctx.ofs;
h2r_args.arg[3] = ctx->ctx.size;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index db796c8e787..ec1a5947ed7 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -113,7 +113,7 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
break;
default:
codec_type = S5P_FIMV_CODEC_NONE_V6;
- };
+ }
mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6);
mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 41f5a3c10db..4ff3b6cd684 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -113,7 +113,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = (1 << 16) - 1,
.step = 1,
- .default_value = 0,
+ .default_value = 12,
},
{
.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
@@ -356,7 +356,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = 51,
.step = 1,
- .default_value = 1,
+ .default_value = 51,
},
{
.id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
@@ -399,7 +399,7 @@ static struct mfc_control controls[] = {
.minimum = 1,
.maximum = 31,
.step = 1,
- .default_value = 1,
+ .default_value = 31,
},
{
.id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
@@ -444,7 +444,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = 51,
.step = 1,
- .default_value = 1,
+ .default_value = 51,
},
{
.id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 368582b091b..58ec7bb26eb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1582,7 +1582,7 @@ static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev)
break;
default:
reason = S5P_MFC_R2H_CMD_EMPTY;
- };
+ }
return reason;
}
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index b93a21f5aa1..74344c764da 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -226,7 +226,7 @@ static void mxr_graph_fix_geometry(struct mxr_layer *layer,
src->width + src->x_offset, 32767);
src->full_height = clamp_val(src->full_height,
src->height + src->y_offset, 2047);
- };
+ }
}
/* PUBLIC API */
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index 3d13a636877..c9388c45ad7 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -197,7 +197,7 @@ static void mxr_vp_fix_geometry(struct mxr_layer *layer,
ALIGN(src->width + src->x_offset, 8), 8192U);
src->full_height = clamp(src->full_height,
src->height + src->y_offset, 8192U);
- };
+ }
}
/* PUBLIC API */
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index d02a7e0b773..6866bb4fbeb 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/camera-rcar.h>
@@ -105,6 +106,7 @@
#define VIN_MAX_HEIGHT 2048
enum chip_id {
+ RCAR_H2,
RCAR_H1,
RCAR_M1,
RCAR_E1,
@@ -300,7 +302,8 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
- if (priv->chip == RCAR_H1 || priv->chip == RCAR_E1) {
+ if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 ||
+ priv->chip == RCAR_E1) {
dmr = VNDMR_EXRGB;
break;
}
@@ -1381,6 +1384,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
};
static struct platform_device_id rcar_vin_id_table[] = {
+ { "r8a7790-vin", RCAR_H2 },
{ "r8a7779-vin", RCAR_H1 },
{ "r8a7778-vin", RCAR_M1 },
{ "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8df22f77917..150bd4df413 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1800,7 +1800,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
/* request irq */
err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
- IRQF_DISABLED, dev_name(&pdev->dev), pcdev);
+ 0, dev_name(&pdev->dev), pcdev);
if (err) {
dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
goto exit_release_mem;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 387a232d95a..4b8c024fc48 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -71,13 +71,23 @@ static int video_dev_create(struct soc_camera_device *icd);
int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
struct v4l2_clk *clk)
{
- int ret = clk ? v4l2_clk_enable(clk) : 0;
- if (ret < 0) {
- dev_err(dev, "Cannot enable clock: %d\n", ret);
- return ret;
+ int ret;
+ bool clock_toggle;
+
+ if (clk && (!ssdd->unbalanced_power ||
+ !test_and_set_bit(0, &ssdd->clock_state))) {
+ ret = v4l2_clk_enable(clk);
+ if (ret < 0) {
+ dev_err(dev, "Cannot enable clock: %d\n", ret);
+ return ret;
+ }
+ clock_toggle = true;
+ } else {
+ clock_toggle = false;
}
- ret = regulator_bulk_enable(ssdd->num_regulators,
- ssdd->regulators);
+
+ ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
if (ret < 0) {
dev_err(dev, "Cannot enable regulators\n");
goto eregenable;
@@ -95,10 +105,10 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
return 0;
epwron:
- regulator_bulk_disable(ssdd->num_regulators,
- ssdd->regulators);
+ regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
eregenable:
- if (clk)
+ if (clock_toggle)
v4l2_clk_disable(clk);
return ret;
@@ -120,14 +130,14 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd
}
}
- err = regulator_bulk_disable(ssdd->num_regulators,
- ssdd->regulators);
+ err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
if (err < 0) {
dev_err(dev, "Cannot disable regulators\n");
ret = ret ? : err;
}
- if (clk)
+ if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state)))
v4l2_clk_disable(clk);
return ret;
@@ -137,8 +147,8 @@ EXPORT_SYMBOL(soc_camera_power_off);
int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
/* Should not have any effect in synchronous case */
- return devm_regulator_bulk_get(dev, ssdd->num_regulators,
- ssdd->regulators);
+ return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
}
EXPORT_SYMBOL(soc_camera_power_init);
@@ -1346,8 +1356,8 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
* soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try
* to allocate them again.
*/
- ssdd->num_regulators = 0;
- ssdd->regulators = NULL;
+ ssdd->sd_pdata.num_regulators = 0;
+ ssdd->sd_pdata.regulators = NULL;
shd->board_info->platform_data = ssdd;
snprintf(clk_name, sizeof(clk_name), "%d-%04x",
@@ -2020,8 +2030,8 @@ static int soc_camera_pdrv_probe(struct platform_device *pdev)
* that case regulators are attached to the I2C device and not to the
* camera platform device.
*/
- ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators,
- ssdd->regulators);
+ ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
new file mode 100644
index 00000000000..cbf0a806ba1
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
+
+ti-vpe-y := vpe.o vpdma.o
+
+ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
new file mode 100644
index 00000000000..af0a5ffcaa9
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -0,0 +1,846 @@
+/*
+ * VPDMA helper library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "vpdma.h"
+#include "vpdma_priv.h"
+
+#define VPDMA_FIRMWARE "vpdma-1b8.bin"
+
+const struct vpdma_data_format vpdma_yuv_fmts[] = {
+ [VPDMA_DATA_FMT_Y444] = {
+ .data_type = DATA_TYPE_Y444,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_Y422] = {
+ .data_type = DATA_TYPE_Y422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_Y420] = {
+ .data_type = DATA_TYPE_Y420,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C444] = {
+ .data_type = DATA_TYPE_C444,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C422] = {
+ .data_type = DATA_TYPE_C422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C420] = {
+ .data_type = DATA_TYPE_C420,
+ .depth = 4,
+ },
+ [VPDMA_DATA_FMT_YC422] = {
+ .data_type = DATA_TYPE_YC422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_YC444] = {
+ .data_type = DATA_TYPE_YC444,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_CY422] = {
+ .data_type = DATA_TYPE_CY422,
+ .depth = 16,
+ },
+};
+
+const struct vpdma_data_format vpdma_rgb_fmts[] = {
+ [VPDMA_DATA_FMT_RGB565] = {
+ .data_type = DATA_TYPE_RGB16_565,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB16_1555] = {
+ .data_type = DATA_TYPE_ARGB_1555,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB16] = {
+ .data_type = DATA_TYPE_ARGB_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_RGBA16_5551] = {
+ .data_type = DATA_TYPE_RGBA_5551,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_RGBA16] = {
+ .data_type = DATA_TYPE_RGBA_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB24] = {
+ .data_type = DATA_TYPE_ARGB24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_RGB24] = {
+ .data_type = DATA_TYPE_RGB24_888,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_ARGB32] = {
+ .data_type = DATA_TYPE_ARGB32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_RGBA24] = {
+ .data_type = DATA_TYPE_RGBA24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_RGBA32] = {
+ .data_type = DATA_TYPE_RGBA32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_BGR565] = {
+ .data_type = DATA_TYPE_BGR16_565,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR16_1555] = {
+ .data_type = DATA_TYPE_ABGR_1555,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR16] = {
+ .data_type = DATA_TYPE_ABGR_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_BGRA16_5551] = {
+ .data_type = DATA_TYPE_BGRA_5551,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_BGRA16] = {
+ .data_type = DATA_TYPE_BGRA_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR24] = {
+ .data_type = DATA_TYPE_ABGR24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_BGR24] = {
+ .data_type = DATA_TYPE_BGR24_888,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_ABGR32] = {
+ .data_type = DATA_TYPE_ABGR32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_BGRA24] = {
+ .data_type = DATA_TYPE_BGRA24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_BGRA32] = {
+ .data_type = DATA_TYPE_BGRA32_8888,
+ .depth = 32,
+ },
+};
+
+const struct vpdma_data_format vpdma_misc_fmts[] = {
+ [VPDMA_DATA_FMT_MV] = {
+ .data_type = DATA_TYPE_MV,
+ .depth = 4,
+ },
+};
+
+struct vpdma_channel_info {
+ int num; /* VPDMA channel number */
+ int cstat_offset; /* client CSTAT register offset */
+};
+
+static const struct vpdma_channel_info chan_info[] = {
+ [VPE_CHAN_LUMA1_IN] = {
+ .num = VPE_CHAN_NUM_LUMA1_IN,
+ .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
+ },
+ [VPE_CHAN_CHROMA1_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA1_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
+ },
+ [VPE_CHAN_LUMA2_IN] = {
+ .num = VPE_CHAN_NUM_LUMA2_IN,
+ .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
+ },
+ [VPE_CHAN_CHROMA2_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA2_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
+ },
+ [VPE_CHAN_LUMA3_IN] = {
+ .num = VPE_CHAN_NUM_LUMA3_IN,
+ .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
+ },
+ [VPE_CHAN_CHROMA3_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA3_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
+ },
+ [VPE_CHAN_MV_IN] = {
+ .num = VPE_CHAN_NUM_MV_IN,
+ .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
+ },
+ [VPE_CHAN_MV_OUT] = {
+ .num = VPE_CHAN_NUM_MV_OUT,
+ .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
+ },
+ [VPE_CHAN_LUMA_OUT] = {
+ .num = VPE_CHAN_NUM_LUMA_OUT,
+ .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
+ },
+ [VPE_CHAN_CHROMA_OUT] = {
+ .num = VPE_CHAN_NUM_CHROMA_OUT,
+ .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
+ },
+ [VPE_CHAN_RGB_OUT] = {
+ .num = VPE_CHAN_NUM_RGB_OUT,
+ .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
+ },
+};
+
+static u32 read_reg(struct vpdma_data *vpdma, int offset)
+{
+ return ioread32(vpdma->base + offset);
+}
+
+static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
+{
+ iowrite32(value, vpdma->base + offset);
+}
+
+static int read_field_reg(struct vpdma_data *vpdma, int offset,
+ u32 mask, int shift)
+{
+ return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
+}
+
+static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
+ u32 mask, int shift)
+{
+ u32 val = read_reg(vpdma, offset);
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+
+ write_reg(vpdma, offset, val);
+}
+
+void vpdma_dump_regs(struct vpdma_data *vpdma)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
+
+ dev_dbg(dev, "VPDMA Registers:\n");
+
+ DUMPREG(PID);
+ DUMPREG(LIST_ADDR);
+ DUMPREG(LIST_ATTR);
+ DUMPREG(LIST_STAT_SYNC);
+ DUMPREG(BG_RGB);
+ DUMPREG(BG_YUV);
+ DUMPREG(SETUP);
+ DUMPREG(MAX_SIZE1);
+ DUMPREG(MAX_SIZE2);
+ DUMPREG(MAX_SIZE3);
+
+ /*
+ * dumping registers of only group0 and group3, because VPE channels
+ * lie within group0 and group3 registers
+ */
+ DUMPREG(INT_CHAN_STAT(0));
+ DUMPREG(INT_CHAN_MASK(0));
+ DUMPREG(INT_CHAN_STAT(3));
+ DUMPREG(INT_CHAN_MASK(3));
+ DUMPREG(INT_CLIENT0_STAT);
+ DUMPREG(INT_CLIENT0_MASK);
+ DUMPREG(INT_CLIENT1_STAT);
+ DUMPREG(INT_CLIENT1_MASK);
+ DUMPREG(INT_LIST0_STAT);
+ DUMPREG(INT_LIST0_MASK);
+
+ /*
+ * these are registers specific to VPE clients, we can make this
+ * function dump client registers specific to VPE or VIP based on
+ * who is using it
+ */
+ DUMPREG(DEI_CHROMA1_CSTAT);
+ DUMPREG(DEI_LUMA1_CSTAT);
+ DUMPREG(DEI_CHROMA2_CSTAT);
+ DUMPREG(DEI_LUMA2_CSTAT);
+ DUMPREG(DEI_CHROMA3_CSTAT);
+ DUMPREG(DEI_LUMA3_CSTAT);
+ DUMPREG(DEI_MV_IN_CSTAT);
+ DUMPREG(DEI_MV_OUT_CSTAT);
+ DUMPREG(VIP_UP_Y_CSTAT);
+ DUMPREG(VIP_UP_UV_CSTAT);
+ DUMPREG(VPI_CTL_CSTAT);
+}
+
+/*
+ * Allocate a DMA buffer
+ */
+int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
+{
+ buf->size = size;
+ buf->mapped = false;
+ buf->addr = kzalloc(size, GFP_KERNEL);
+ if (!buf->addr)
+ return -ENOMEM;
+
+ WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN);
+
+ return 0;
+}
+
+void vpdma_free_desc_buf(struct vpdma_buf *buf)
+{
+ WARN_ON(buf->mapped);
+ kfree(buf->addr);
+ buf->addr = NULL;
+ buf->size = 0;
+}
+
+/*
+ * map descriptor/payload DMA buffer, enabling DMA access
+ */
+int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+ WARN_ON(buf->mapped);
+ buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buf->dma_addr)) {
+ dev_err(dev, "failed to map buffer\n");
+ return -EINVAL;
+ }
+
+ buf->mapped = true;
+
+ return 0;
+}
+
+/*
+ * unmap descriptor/payload DMA buffer, disabling DMA access and
+ * allowing the main processor to acces the data
+ */
+void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+ if (buf->mapped)
+ dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
+
+ buf->mapped = false;
+}
+
+/*
+ * create a descriptor list, the user of this list will append configuration,
+ * control and data descriptors to this list, this list will be submitted to
+ * VPDMA. VPDMA's list parser will go through each descriptor and perform the
+ * required DMA operations
+ */
+int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
+{
+ int r;
+
+ r = vpdma_alloc_desc_buf(&list->buf, size);
+ if (r)
+ return r;
+
+ list->next = list->buf.addr;
+
+ list->type = type;
+
+ return 0;
+}
+
+/*
+ * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
+ * to allow new descriptors to be added to the list.
+ */
+void vpdma_reset_desc_list(struct vpdma_desc_list *list)
+{
+ list->next = list->buf.addr;
+}
+
+/*
+ * free the buffer allocated fot the VPDMA descriptor list, this should be
+ * called when the user doesn't want to use VPDMA any more.
+ */
+void vpdma_free_desc_list(struct vpdma_desc_list *list)
+{
+ vpdma_free_desc_buf(&list->buf);
+
+ list->next = NULL;
+}
+
+static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
+{
+ return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
+}
+
+/*
+ * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
+ */
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
+{
+ /* we always use the first list */
+ int list_num = 0;
+ int list_size;
+
+ if (vpdma_list_busy(vpdma, list_num))
+ return -EBUSY;
+
+ /* 16-byte granularity */
+ list_size = (list->next - list->buf.addr) >> 4;
+
+ write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
+
+ write_reg(vpdma, VPDMA_LIST_ATTR,
+ (list_num << VPDMA_LIST_NUM_SHFT) |
+ (list->type << VPDMA_LIST_TYPE_SHFT) |
+ list_size);
+
+ return 0;
+}
+
+static void dump_cfd(struct vpdma_cfd *cfd)
+{
+ int class;
+
+ class = cfd_get_class(cfd);
+
+ pr_debug("config descriptor of payload class: %s\n",
+ class == CFD_CLS_BLOCK ? "simple block" :
+ "address data block");
+
+ if (class == CFD_CLS_BLOCK)
+ pr_debug("word0: dst_addr_offset = 0x%08x\n",
+ cfd->dest_addr_offset);
+
+ if (class == CFD_CLS_BLOCK)
+ pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
+
+ pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
+
+ pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
+ "payload_len = %d\n", cfd_get_pkt_type(cfd),
+ cfd_get_direct(cfd), class, cfd_get_dest(cfd),
+ cfd_get_payload_len(cfd));
+}
+
+/*
+ * append a configuration descriptor to the given descriptor list, where the
+ * payload is in the form of a simple data block specified in the descriptor
+ * header, this is used to upload scaler coefficients to the scaler module
+ */
+void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *blk, u32 dest_offset)
+{
+ struct vpdma_cfd *cfd;
+ int len = blk->size;
+
+ WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
+
+ cfd = list->next;
+ WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
+
+ cfd->dest_addr_offset = dest_offset;
+ cfd->block_len = len;
+ cfd->payload_addr = (u32) blk->dma_addr;
+ cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
+ client, len >> 4);
+
+ list->next = cfd + 1;
+
+ dump_cfd(cfd);
+}
+
+/*
+ * append a configuration descriptor to the given descriptor list, where the
+ * payload is in the address data block format, this is used to a configure a
+ * discontiguous set of MMRs
+ */
+void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *adb)
+{
+ struct vpdma_cfd *cfd;
+ unsigned int len = adb->size;
+
+ WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
+ WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
+
+ cfd = list->next;
+ BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
+
+ cfd->w0 = 0;
+ cfd->w1 = 0;
+ cfd->payload_addr = (u32) adb->dma_addr;
+ cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
+ client, len >> 4);
+
+ list->next = cfd + 1;
+
+ dump_cfd(cfd);
+};
+
+/*
+ * control descriptor format change based on what type of control descriptor it
+ * is, we only use 'sync on channel' control descriptors for now, so assume it's
+ * that
+ */
+static void dump_ctd(struct vpdma_ctd *ctd)
+{
+ pr_debug("control descriptor\n");
+
+ pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
+ ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
+}
+
+/*
+ * append a 'sync on channel' type control descriptor to the given descriptor
+ * list, this descriptor stalls the VPDMA list till the time DMA is completed
+ * on the specified channel
+ */
+void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
+ enum vpdma_channel chan)
+{
+ struct vpdma_ctd *ctd;
+
+ ctd = list->next;
+ WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+ ctd->w0 = 0;
+ ctd->w1 = 0;
+ ctd->w2 = 0;
+ ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
+ CTD_TYPE_SYNC_ON_CHANNEL);
+
+ list->next = ctd + 1;
+
+ dump_ctd(ctd);
+}
+
+static void dump_dtd(struct vpdma_dtd *dtd)
+{
+ int dir, chan;
+
+ dir = dtd_get_dir(dtd);
+ chan = dtd_get_chan(dtd);
+
+ pr_debug("%s data transfer descriptor for channel %d\n",
+ dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
+
+ pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
+ "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
+ dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
+ dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
+ dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word1: line_length = %d, xfer_height = %d\n",
+ dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
+
+ pr_debug("word2: start_addr = 0x%08x\n", dtd->start_addr);
+
+ pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
+ "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
+ dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
+ dtd_get_next_chan(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word4: frame_width = %d, frame_height = %d\n",
+ dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
+ else
+ pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
+ "drp_data = %d, use_desc_reg = %d\n",
+ dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
+ dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word5: hor_start = %d, ver_start = %d\n",
+ dtd_get_h_start(dtd), dtd_get_v_start(dtd));
+ else
+ pr_debug("word5: max_width %d, max_height %d\n",
+ dtd_get_max_width(dtd), dtd_get_max_height(dtd));
+
+ pr_debug("word6: client specfic attr0 = 0x%08x\n", dtd->client_attr0);
+ pr_debug("word7: client specfic attr1 = 0x%08x\n", dtd->client_attr1);
+}
+
+/*
+ * append an outbound data transfer descriptor to the given descriptor list,
+ * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
+ */
+void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, u32 flags)
+{
+ int priority = 0;
+ int field = 0;
+ int notify = 1;
+ int channel, next_chan;
+ int depth = fmt->depth;
+ int stride;
+ struct vpdma_dtd *dtd;
+
+ channel = next_chan = chan_info[chan].num;
+
+ if (fmt->data_type == DATA_TYPE_C420)
+ depth = 8;
+
+ stride = (depth * c_rect->width) >> 3;
+ dma_addr += (c_rect->left * depth) >> 3;
+
+ dtd = list->next;
+ WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
+
+ dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
+ notify,
+ field,
+ !!(flags & VPDMA_DATA_FRAME_1D),
+ !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
+ !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
+ stride);
+ dtd->w1 = 0;
+ dtd->start_addr = (u32) dma_addr;
+ dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
+ DTD_DIR_OUT, channel, priority, next_chan);
+ dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
+ dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
+ MAX_OUT_HEIGHT_1080);
+ dtd->client_attr0 = 0;
+ dtd->client_attr1 = 0;
+
+ list->next = dtd + 1;
+
+ dump_dtd(dtd);
+}
+
+/*
+ * append an inbound data transfer descriptor to the given descriptor list,
+ * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
+ */
+void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
+ int frame_height, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, int field, u32 flags)
+{
+ int priority = 0;
+ int notify = 1;
+ int depth = fmt->depth;
+ int channel, next_chan;
+ int stride;
+ int height = c_rect->height;
+ struct vpdma_dtd *dtd;
+
+ channel = next_chan = chan_info[chan].num;
+
+ if (fmt->data_type == DATA_TYPE_C420) {
+ height >>= 1;
+ frame_height >>= 1;
+ depth = 8;
+ }
+
+ stride = (depth * c_rect->width) >> 3;
+ dma_addr += (c_rect->left * depth) >> 3;
+
+ dtd = list->next;
+ WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
+
+ dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
+ notify,
+ field,
+ !!(flags & VPDMA_DATA_FRAME_1D),
+ !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
+ !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
+ stride);
+
+ dtd->xfer_length_height = dtd_xfer_length_height(c_rect->width, height);
+ dtd->start_addr = (u32) dma_addr;
+ dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
+ DTD_DIR_IN, channel, priority, next_chan);
+ dtd->frame_width_height = dtd_frame_width_height(frame_width,
+ frame_height);
+ dtd->start_h_v = dtd_start_h_v(c_rect->left, c_rect->top);
+ dtd->client_attr0 = 0;
+ dtd->client_attr1 = 0;
+
+ list->next = dtd + 1;
+
+ dump_dtd(dtd);
+}
+
+/* set or clear the mask for list complete interrupt */
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
+ bool enable)
+{
+ u32 val;
+
+ val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
+ if (enable)
+ val |= (1 << (list_num * 2));
+ else
+ val &= ~(1 << (list_num * 2));
+ write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
+}
+
+/* clear previosuly occured list intterupts in the LIST_STAT register */
+void vpdma_clear_list_stat(struct vpdma_data *vpdma)
+{
+ write_reg(vpdma, VPDMA_INT_LIST0_STAT,
+ read_reg(vpdma, VPDMA_INT_LIST0_STAT));
+}
+
+/*
+ * configures the output mode of the line buffer for the given client, the
+ * line buffer content can either be mirrored(each line repeated twice) or
+ * passed to the client as is
+ */
+void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
+ enum vpdma_channel chan)
+{
+ int client_cstat = chan_info[chan].cstat_offset;
+
+ write_field_reg(vpdma, client_cstat, line_mode,
+ VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
+}
+
+/*
+ * configures the event which should trigger VPDMA transfer for the given
+ * client
+ */
+void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
+ enum vpdma_frame_start_event fs_event,
+ enum vpdma_channel chan)
+{
+ int client_cstat = chan_info[chan].cstat_offset;
+
+ write_field_reg(vpdma, client_cstat, fs_event,
+ VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
+}
+
+static void vpdma_firmware_cb(const struct firmware *f, void *context)
+{
+ struct vpdma_data *vpdma = context;
+ struct vpdma_buf fw_dma_buf;
+ int i, r;
+
+ dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
+
+ if (!f || !f->data) {
+ dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
+ return;
+ }
+
+ /* already initialized */
+ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
+ VPDMA_LIST_RDY_SHFT)) {
+ vpdma->ready = true;
+ return;
+ }
+
+ r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
+ if (r) {
+ dev_err(&vpdma->pdev->dev,
+ "failed to allocate dma buffer for firmware\n");
+ goto rel_fw;
+ }
+
+ memcpy(fw_dma_buf.addr, f->data, f->size);
+
+ vpdma_map_desc_buf(vpdma, &fw_dma_buf);
+
+ write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
+
+ for (i = 0; i < 100; i++) { /* max 1 second */
+ msleep_interruptible(10);
+
+ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
+ VPDMA_LIST_RDY_SHFT))
+ break;
+ }
+
+ if (i == 100) {
+ dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
+ goto free_buf;
+ }
+
+ vpdma->ready = true;
+
+free_buf:
+ vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
+
+ vpdma_free_desc_buf(&fw_dma_buf);
+rel_fw:
+ release_firmware(f);
+}
+
+static int vpdma_load_firmware(struct vpdma_data *vpdma)
+{
+ int r;
+ struct device *dev = &vpdma->pdev->dev;
+
+ r = request_firmware_nowait(THIS_MODULE, 1,
+ (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
+ vpdma_firmware_cb);
+ if (r) {
+ dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
+ return r;
+ } else {
+ dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
+ }
+
+ return 0;
+}
+
+struct vpdma_data *vpdma_create(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct vpdma_data *vpdma;
+ int r;
+
+ dev_dbg(&pdev->dev, "vpdma_create\n");
+
+ vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
+ if (!vpdma) {
+ dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vpdma->pdev = pdev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
+ if (res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!vpdma->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ r = vpdma_load_firmware(vpdma);
+ if (r) {
+ pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
+ return ERR_PTR(r);
+ }
+
+ return vpdma;
+}
+MODULE_FIRMWARE(VPDMA_FIRMWARE);
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
new file mode 100644
index 00000000000..eaa2a71a5db
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_VPDMA_H_
+#define __TI_VPDMA_H_
+
+/*
+ * A vpdma_buf tracks the size, DMA address and mapping status of each
+ * driver DMA area.
+ */
+struct vpdma_buf {
+ void *addr;
+ dma_addr_t dma_addr;
+ size_t size;
+ bool mapped;
+};
+
+struct vpdma_desc_list {
+ struct vpdma_buf buf;
+ void *next;
+ int type;
+};
+
+struct vpdma_data {
+ void __iomem *base;
+
+ struct platform_device *pdev;
+
+ /* tells whether vpdma firmware is loaded or not */
+ bool ready;
+};
+
+struct vpdma_data_format {
+ int data_type;
+ u8 depth;
+};
+
+#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */
+
+#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
+#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
+
+#define VPDMA_LIST_TYPE_NORMAL 0
+#define VPDMA_LIST_TYPE_SELF_MODIFYING 1
+#define VPDMA_LIST_TYPE_DOORBELL 2
+
+enum vpdma_yuv_formats {
+ VPDMA_DATA_FMT_Y444 = 0,
+ VPDMA_DATA_FMT_Y422,
+ VPDMA_DATA_FMT_Y420,
+ VPDMA_DATA_FMT_C444,
+ VPDMA_DATA_FMT_C422,
+ VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_YC422,
+ VPDMA_DATA_FMT_YC444,
+ VPDMA_DATA_FMT_CY422,
+};
+
+enum vpdma_rgb_formats {
+ VPDMA_DATA_FMT_RGB565 = 0,
+ VPDMA_DATA_FMT_ARGB16_1555,
+ VPDMA_DATA_FMT_ARGB16,
+ VPDMA_DATA_FMT_RGBA16_5551,
+ VPDMA_DATA_FMT_RGBA16,
+ VPDMA_DATA_FMT_ARGB24,
+ VPDMA_DATA_FMT_RGB24,
+ VPDMA_DATA_FMT_ARGB32,
+ VPDMA_DATA_FMT_RGBA24,
+ VPDMA_DATA_FMT_RGBA32,
+ VPDMA_DATA_FMT_BGR565,
+ VPDMA_DATA_FMT_ABGR16_1555,
+ VPDMA_DATA_FMT_ABGR16,
+ VPDMA_DATA_FMT_BGRA16_5551,
+ VPDMA_DATA_FMT_BGRA16,
+ VPDMA_DATA_FMT_ABGR24,
+ VPDMA_DATA_FMT_BGR24,
+ VPDMA_DATA_FMT_ABGR32,
+ VPDMA_DATA_FMT_BGRA24,
+ VPDMA_DATA_FMT_BGRA32,
+};
+
+enum vpdma_misc_formats {
+ VPDMA_DATA_FMT_MV = 0,
+};
+
+extern const struct vpdma_data_format vpdma_yuv_fmts[];
+extern const struct vpdma_data_format vpdma_rgb_fmts[];
+extern const struct vpdma_data_format vpdma_misc_fmts[];
+
+enum vpdma_frame_start_event {
+ VPDMA_FSEVENT_HDMI_FID = 0,
+ VPDMA_FSEVENT_DVO2_FID,
+ VPDMA_FSEVENT_HDCOMP_FID,
+ VPDMA_FSEVENT_SD_FID,
+ VPDMA_FSEVENT_LM_FID0,
+ VPDMA_FSEVENT_LM_FID1,
+ VPDMA_FSEVENT_LM_FID2,
+ VPDMA_FSEVENT_CHANNEL_ACTIVE,
+};
+
+/*
+ * VPDMA channel numbers
+ */
+enum vpdma_channel {
+ VPE_CHAN_LUMA1_IN,
+ VPE_CHAN_CHROMA1_IN,
+ VPE_CHAN_LUMA2_IN,
+ VPE_CHAN_CHROMA2_IN,
+ VPE_CHAN_LUMA3_IN,
+ VPE_CHAN_CHROMA3_IN,
+ VPE_CHAN_MV_IN,
+ VPE_CHAN_MV_OUT,
+ VPE_CHAN_LUMA_OUT,
+ VPE_CHAN_CHROMA_OUT,
+ VPE_CHAN_RGB_OUT,
+};
+
+/* flags for VPDMA data descriptors */
+#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
+#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
+#define VPDMA_DATA_FRAME_1D (1 << 2)
+#define VPDMA_DATA_MODE_TILED (1 << 3)
+
+/*
+ * client identifiers used for configuration descriptors
+ */
+#define CFD_MMR_CLIENT 0
+#define CFD_SC_CLIENT 4
+
+/* Address data block header format */
+struct vpdma_adb_hdr {
+ u32 offset;
+ u32 nwords;
+ u32 reserved0;
+ u32 reserved1;
+};
+
+/* helpers for creating ADB headers for config descriptors MMRs as client */
+#define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld))
+#define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld)
+
+#define VPDMA_SET_MMR_ADB_HDR(buf, str, hdr, regs, offset_a) \
+ do { \
+ struct vpdma_adb_hdr *h; \
+ struct str *adb = NULL; \
+ h = MMR_ADB_ADDR(buf, str, hdr); \
+ h->offset = (offset_a); \
+ h->nwords = sizeof(adb->regs) >> 2; \
+ } while (0)
+
+/* vpdma descriptor buffer allocation and management */
+int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size);
+void vpdma_free_desc_buf(struct vpdma_buf *buf);
+int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
+void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
+
+/* vpdma descriptor list funcs */
+int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
+void vpdma_reset_desc_list(struct vpdma_desc_list *list);
+void vpdma_free_desc_list(struct vpdma_desc_list *list);
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
+
+/* helpers for creating vpdma descriptors */
+void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *blk, u32 dest_offset);
+void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *adb);
+void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
+ enum vpdma_channel chan);
+void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, u32 flags);
+void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
+ int frame_height, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, int field, u32 flags);
+
+/* vpdma list interrupt management */
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
+ bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma);
+
+/* vpdma client configuration */
+void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
+ enum vpdma_channel chan);
+void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
+ enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
+
+void vpdma_dump_regs(struct vpdma_data *vpdma);
+
+/* initialize vpdma, passed with VPE's platform device pointer */
+struct vpdma_data *vpdma_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
new file mode 100644
index 00000000000..f0e9a8038c1
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TI_VPDMA_PRIV_H_
+#define _TI_VPDMA_PRIV_H_
+
+/*
+ * VPDMA Register offsets
+ */
+
+/* Top level */
+#define VPDMA_PID 0x00
+#define VPDMA_LIST_ADDR 0x04
+#define VPDMA_LIST_ATTR 0x08
+#define VPDMA_LIST_STAT_SYNC 0x0c
+#define VPDMA_BG_RGB 0x18
+#define VPDMA_BG_YUV 0x1c
+#define VPDMA_SETUP 0x30
+#define VPDMA_MAX_SIZE1 0x34
+#define VPDMA_MAX_SIZE2 0x38
+#define VPDMA_MAX_SIZE3 0x3c
+
+/* Interrupts */
+#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8)
+#define VPDMA_INT_CHAN_MASK(grp) (VPDMA_INT_CHAN_STAT(grp) + 4)
+#define VPDMA_INT_CLIENT0_STAT 0x78
+#define VPDMA_INT_CLIENT0_MASK 0x7c
+#define VPDMA_INT_CLIENT1_STAT 0x80
+#define VPDMA_INT_CLIENT1_MASK 0x84
+#define VPDMA_INT_LIST0_STAT 0x88
+#define VPDMA_INT_LIST0_MASK 0x8c
+
+#define VPDMA_PERFMON(i) (0x200 + i * 4)
+
+/* VPE specific client registers */
+#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
+#define VPDMA_DEI_LUMA1_CSTAT 0x0304
+#define VPDMA_DEI_LUMA2_CSTAT 0x0308
+#define VPDMA_DEI_CHROMA2_CSTAT 0x030c
+#define VPDMA_DEI_LUMA3_CSTAT 0x0310
+#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
+#define VPDMA_DEI_MV_IN_CSTAT 0x0330
+#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
+#define VPDMA_VIP_UP_Y_CSTAT 0x0390
+#define VPDMA_VIP_UP_UV_CSTAT 0x0394
+#define VPDMA_VPI_CTL_CSTAT 0x03d0
+
+/* Reg field info for VPDMA_CLIENT_CSTAT registers */
+#define VPDMA_CSTAT_LINE_MODE_MASK 0x03
+#define VPDMA_CSTAT_LINE_MODE_SHIFT 8
+#define VPDMA_CSTAT_FRAME_START_MASK 0xf
+#define VPDMA_CSTAT_FRAME_START_SHIFT 10
+
+#define VPDMA_LIST_NUM_MASK 0x07
+#define VPDMA_LIST_NUM_SHFT 24
+#define VPDMA_LIST_STOP_SHFT 20
+#define VPDMA_LIST_RDY_MASK 0x01
+#define VPDMA_LIST_RDY_SHFT 19
+#define VPDMA_LIST_TYPE_MASK 0x03
+#define VPDMA_LIST_TYPE_SHFT 16
+#define VPDMA_LIST_SIZE_MASK 0xffff
+
+/* VPDMA data type values for data formats */
+#define DATA_TYPE_Y444 0x0
+#define DATA_TYPE_Y422 0x1
+#define DATA_TYPE_Y420 0x2
+#define DATA_TYPE_C444 0x4
+#define DATA_TYPE_C422 0x5
+#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_YC422 0x7
+#define DATA_TYPE_YC444 0x8
+#define DATA_TYPE_CY422 0x23
+
+#define DATA_TYPE_RGB16_565 0x0
+#define DATA_TYPE_ARGB_1555 0x1
+#define DATA_TYPE_ARGB_4444 0x2
+#define DATA_TYPE_RGBA_5551 0x3
+#define DATA_TYPE_RGBA_4444 0x4
+#define DATA_TYPE_ARGB24_6666 0x5
+#define DATA_TYPE_RGB24_888 0x6
+#define DATA_TYPE_ARGB32_8888 0x7
+#define DATA_TYPE_RGBA24_6666 0x8
+#define DATA_TYPE_RGBA32_8888 0x9
+#define DATA_TYPE_BGR16_565 0x10
+#define DATA_TYPE_ABGR_1555 0x11
+#define DATA_TYPE_ABGR_4444 0x12
+#define DATA_TYPE_BGRA_5551 0x13
+#define DATA_TYPE_BGRA_4444 0x14
+#define DATA_TYPE_ABGR24_6666 0x15
+#define DATA_TYPE_BGR24_888 0x16
+#define DATA_TYPE_ABGR32_8888 0x17
+#define DATA_TYPE_BGRA24_6666 0x18
+#define DATA_TYPE_BGRA32_8888 0x19
+
+#define DATA_TYPE_MV 0x3
+
+/* VPDMA channel numbers(only VPE channels for now) */
+#define VPE_CHAN_NUM_LUMA1_IN 0
+#define VPE_CHAN_NUM_CHROMA1_IN 1
+#define VPE_CHAN_NUM_LUMA2_IN 2
+#define VPE_CHAN_NUM_CHROMA2_IN 3
+#define VPE_CHAN_NUM_LUMA3_IN 4
+#define VPE_CHAN_NUM_CHROMA3_IN 5
+#define VPE_CHAN_NUM_MV_IN 12
+#define VPE_CHAN_NUM_MV_OUT 15
+#define VPE_CHAN_NUM_LUMA_OUT 102
+#define VPE_CHAN_NUM_CHROMA_OUT 103
+#define VPE_CHAN_NUM_RGB_OUT 106
+
+/*
+ * a VPDMA address data block payload for a configuration descriptor needs to
+ * have each sub block length as a multiple of 16 bytes. Therefore, the overall
+ * size of the payload also needs to be a multiple of 16 bytes. The sub block
+ * lengths should be ensured to be aligned by the VPDMA user.
+ */
+#define VPDMA_ADB_SIZE_ALIGN 0x0f
+
+/*
+ * data transfer descriptor
+ */
+struct vpdma_dtd {
+ u32 type_ctl_stride;
+ union {
+ u32 xfer_length_height;
+ u32 w1;
+ };
+ dma_addr_t start_addr;
+ u32 pkt_ctl;
+ union {
+ u32 frame_width_height; /* inbound */
+ dma_addr_t desc_write_addr; /* outbound */
+ };
+ union {
+ u32 start_h_v; /* inbound */
+ u32 max_width_height; /* outbound */
+ };
+ u32 client_attr0;
+ u32 client_attr1;
+};
+
+/* Data Transfer Descriptor specifics */
+#define DTD_NO_NOTIFY 0
+#define DTD_NOTIFY 1
+
+#define DTD_PKT_TYPE 0xa
+#define DTD_DIR_IN 0
+#define DTD_DIR_OUT 1
+
+/* type_ctl_stride */
+#define DTD_DATA_TYPE_MASK 0x3f
+#define DTD_DATA_TYPE_SHFT 26
+#define DTD_NOTIFY_MASK 0x01
+#define DTD_NOTIFY_SHFT 25
+#define DTD_FIELD_MASK 0x01
+#define DTD_FIELD_SHFT 24
+#define DTD_1D_MASK 0x01
+#define DTD_1D_SHFT 23
+#define DTD_EVEN_LINE_SKIP_MASK 0x01
+#define DTD_EVEN_LINE_SKIP_SHFT 20
+#define DTD_ODD_LINE_SKIP_MASK 0x01
+#define DTD_ODD_LINE_SKIP_SHFT 16
+#define DTD_LINE_STRIDE_MASK 0xffff
+#define DTD_LINE_STRIDE_SHFT 0
+
+/* xfer_length_height */
+#define DTD_LINE_LENGTH_MASK 0xffff
+#define DTD_LINE_LENGTH_SHFT 16
+#define DTD_XFER_HEIGHT_MASK 0xffff
+#define DTD_XFER_HEIGHT_SHFT 0
+
+/* pkt_ctl */
+#define DTD_PKT_TYPE_MASK 0x1f
+#define DTD_PKT_TYPE_SHFT 27
+#define DTD_MODE_MASK 0x01
+#define DTD_MODE_SHFT 26
+#define DTD_DIR_MASK 0x01
+#define DTD_DIR_SHFT 25
+#define DTD_CHAN_MASK 0x01ff
+#define DTD_CHAN_SHFT 16
+#define DTD_PRI_MASK 0x0f
+#define DTD_PRI_SHFT 9
+#define DTD_NEXT_CHAN_MASK 0x01ff
+#define DTD_NEXT_CHAN_SHFT 0
+
+/* frame_width_height */
+#define DTD_FRAME_WIDTH_MASK 0xffff
+#define DTD_FRAME_WIDTH_SHFT 16
+#define DTD_FRAME_HEIGHT_MASK 0xffff
+#define DTD_FRAME_HEIGHT_SHFT 0
+
+/* start_h_v */
+#define DTD_H_START_MASK 0xffff
+#define DTD_H_START_SHFT 16
+#define DTD_V_START_MASK 0xffff
+#define DTD_V_START_SHFT 0
+
+#define DTD_DESC_START_SHIFT 5
+#define DTD_WRITE_DESC_MASK 0x01
+#define DTD_WRITE_DESC_SHIFT 2
+#define DTD_DROP_DATA_MASK 0x01
+#define DTD_DROP_DATA_SHIFT 1
+#define DTD_USE_DESC_MASK 0x01
+#define DTD_USE_DESC_SHIFT 0
+
+/* max_width_height */
+#define DTD_MAX_WIDTH_MASK 0x07
+#define DTD_MAX_WIDTH_SHFT 4
+#define DTD_MAX_HEIGHT_MASK 0x07
+#define DTD_MAX_HEIGHT_SHFT 0
+
+/* max width configurations */
+ /* unlimited width */
+#define MAX_OUT_WIDTH_UNLIMITED 0
+/* as specified in max_size1 reg */
+#define MAX_OUT_WIDTH_REG1 1
+/* as specified in max_size2 reg */
+#define MAX_OUT_WIDTH_REG2 2
+/* as specified in max_size3 reg */
+#define MAX_OUT_WIDTH_REG3 3
+/* maximum of 352 pixels as width */
+#define MAX_OUT_WIDTH_352 4
+/* maximum of 768 pixels as width */
+#define MAX_OUT_WIDTH_768 5
+/* maximum of 1280 pixels width */
+#define MAX_OUT_WIDTH_1280 6
+/* maximum of 1920 pixels as width */
+#define MAX_OUT_WIDTH_1920 7
+
+/* max height configurations */
+ /* unlimited height */
+#define MAX_OUT_HEIGHT_UNLIMITED 0
+/* as specified in max_size1 reg */
+#define MAX_OUT_HEIGHT_REG1 1
+/* as specified in max_size2 reg */
+#define MAX_OUT_HEIGHT_REG2 2
+/* as specified in max_size3 reg */
+#define MAX_OUT_HEIGHT_REG3 3
+/* maximum of 288 lines as height */
+#define MAX_OUT_HEIGHT_288 4
+/* maximum of 576 lines as height */
+#define MAX_OUT_HEIGHT_576 5
+/* maximum of 720 lines as height */
+#define MAX_OUT_HEIGHT_720 6
+/* maximum of 1080 lines as height */
+#define MAX_OUT_HEIGHT_1080 7
+
+static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
+ bool one_d, bool even_line_skip, bool odd_line_skip,
+ int line_stride)
+{
+ return (type << DTD_DATA_TYPE_SHFT) | (notify << DTD_NOTIFY_SHFT) |
+ (field << DTD_FIELD_SHFT) | (one_d << DTD_1D_SHFT) |
+ (even_line_skip << DTD_EVEN_LINE_SKIP_SHFT) |
+ (odd_line_skip << DTD_ODD_LINE_SKIP_SHFT) |
+ line_stride;
+}
+
+static inline u32 dtd_xfer_length_height(int line_length, int xfer_height)
+{
+ return (line_length << DTD_LINE_LENGTH_SHFT) | xfer_height;
+}
+
+static inline u32 dtd_pkt_ctl(bool mode, bool dir, int chan, int pri,
+ int next_chan)
+{
+ return (DTD_PKT_TYPE << DTD_PKT_TYPE_SHFT) | (mode << DTD_MODE_SHFT) |
+ (dir << DTD_DIR_SHFT) | (chan << DTD_CHAN_SHFT) |
+ (pri << DTD_PRI_SHFT) | next_chan;
+}
+
+static inline u32 dtd_frame_width_height(int width, int height)
+{
+ return (width << DTD_FRAME_WIDTH_SHFT) | height;
+}
+
+static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
+ bool drop_data, bool use_desc)
+{
+ return (addr << DTD_DESC_START_SHIFT) |
+ (write_desc << DTD_WRITE_DESC_SHIFT) |
+ (drop_data << DTD_DROP_DATA_SHIFT) |
+ use_desc;
+}
+
+static inline u32 dtd_start_h_v(int h_start, int v_start)
+{
+ return (h_start << DTD_H_START_SHFT) | v_start;
+}
+
+static inline u32 dtd_max_width_height(int max_width, int max_height)
+{
+ return (max_width << DTD_MAX_WIDTH_SHFT) | max_height;
+}
+
+static inline int dtd_get_data_type(struct vpdma_dtd *dtd)
+{
+ return dtd->type_ctl_stride >> DTD_DATA_TYPE_SHFT;
+}
+
+static inline bool dtd_get_notify(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_NOTIFY_SHFT) & DTD_NOTIFY_MASK;
+}
+
+static inline int dtd_get_field(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_FIELD_SHFT) & DTD_FIELD_MASK;
+}
+
+static inline bool dtd_get_1d(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_1D_SHFT) & DTD_1D_MASK;
+}
+
+static inline bool dtd_get_even_line_skip(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_EVEN_LINE_SKIP_SHFT)
+ & DTD_EVEN_LINE_SKIP_MASK;
+}
+
+static inline bool dtd_get_odd_line_skip(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_ODD_LINE_SKIP_SHFT)
+ & DTD_ODD_LINE_SKIP_MASK;
+}
+
+static inline int dtd_get_line_stride(struct vpdma_dtd *dtd)
+{
+ return dtd->type_ctl_stride & DTD_LINE_STRIDE_MASK;
+}
+
+static inline int dtd_get_line_length(struct vpdma_dtd *dtd)
+{
+ return dtd->xfer_length_height >> DTD_LINE_LENGTH_SHFT;
+}
+
+static inline int dtd_get_xfer_height(struct vpdma_dtd *dtd)
+{
+ return dtd->xfer_length_height & DTD_XFER_HEIGHT_MASK;
+}
+
+static inline int dtd_get_pkt_type(struct vpdma_dtd *dtd)
+{
+ return dtd->pkt_ctl >> DTD_PKT_TYPE_SHFT;
+}
+
+static inline bool dtd_get_mode(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_MODE_SHFT) & DTD_MODE_MASK;
+}
+
+static inline bool dtd_get_dir(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_DIR_SHFT) & DTD_DIR_MASK;
+}
+
+static inline int dtd_get_chan(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_CHAN_SHFT) & DTD_CHAN_MASK;
+}
+
+static inline int dtd_get_priority(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_PRI_SHFT) & DTD_PRI_MASK;
+}
+
+static inline int dtd_get_next_chan(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_NEXT_CHAN_SHFT) & DTD_NEXT_CHAN_MASK;
+}
+
+static inline int dtd_get_frame_width(struct vpdma_dtd *dtd)
+{
+ return dtd->frame_width_height >> DTD_FRAME_WIDTH_SHFT;
+}
+
+static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
+{
+ return dtd->frame_width_height & DTD_FRAME_HEIGHT_MASK;
+}
+
+static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
+{
+ return dtd->desc_write_addr >> DTD_DESC_START_SHIFT;
+}
+
+static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
+{
+ return (dtd->desc_write_addr >> DTD_WRITE_DESC_SHIFT) &
+ DTD_WRITE_DESC_MASK;
+}
+
+static inline bool dtd_get_drop_data(struct vpdma_dtd *dtd)
+{
+ return (dtd->desc_write_addr >> DTD_DROP_DATA_SHIFT) &
+ DTD_DROP_DATA_MASK;
+}
+
+static inline bool dtd_get_use_desc(struct vpdma_dtd *dtd)
+{
+ return dtd->desc_write_addr & DTD_USE_DESC_MASK;
+}
+
+static inline int dtd_get_h_start(struct vpdma_dtd *dtd)
+{
+ return dtd->start_h_v >> DTD_H_START_SHFT;
+}
+
+static inline int dtd_get_v_start(struct vpdma_dtd *dtd)
+{
+ return dtd->start_h_v & DTD_V_START_MASK;
+}
+
+static inline int dtd_get_max_width(struct vpdma_dtd *dtd)
+{
+ return (dtd->max_width_height >> DTD_MAX_WIDTH_SHFT) &
+ DTD_MAX_WIDTH_MASK;
+}
+
+static inline int dtd_get_max_height(struct vpdma_dtd *dtd)
+{
+ return (dtd->max_width_height >> DTD_MAX_HEIGHT_SHFT) &
+ DTD_MAX_HEIGHT_MASK;
+}
+
+/*
+ * configuration descriptor
+ */
+struct vpdma_cfd {
+ union {
+ u32 dest_addr_offset;
+ u32 w0;
+ };
+ union {
+ u32 block_len; /* in words */
+ u32 w1;
+ };
+ u32 payload_addr;
+ u32 ctl_payload_len; /* in words */
+};
+
+/* Configuration descriptor specifics */
+
+#define CFD_PKT_TYPE 0xb
+
+#define CFD_DIRECT 1
+#define CFD_INDIRECT 0
+#define CFD_CLS_ADB 0
+#define CFD_CLS_BLOCK 1
+
+/* block_len */
+#define CFD__BLOCK_LEN_MASK 0xffff
+#define CFD__BLOCK_LEN_SHFT 0
+
+/* ctl_payload_len */
+#define CFD_PKT_TYPE_MASK 0x1f
+#define CFD_PKT_TYPE_SHFT 27
+#define CFD_DIRECT_MASK 0x01
+#define CFD_DIRECT_SHFT 26
+#define CFD_CLASS_MASK 0x03
+#define CFD_CLASS_SHFT 24
+#define CFD_DEST_MASK 0xff
+#define CFD_DEST_SHFT 16
+#define CFD_PAYLOAD_LEN_MASK 0xffff
+#define CFD_PAYLOAD_LEN_SHFT 0
+
+static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest,
+ int payload_len)
+{
+ return (CFD_PKT_TYPE << CFD_PKT_TYPE_SHFT) |
+ (direct << CFD_DIRECT_SHFT) |
+ (cls << CFD_CLASS_SHFT) |
+ (dest << CFD_DEST_SHFT) |
+ payload_len;
+}
+
+static inline int cfd_get_pkt_type(struct vpdma_cfd *cfd)
+{
+ return cfd->ctl_payload_len >> CFD_PKT_TYPE_SHFT;
+}
+
+static inline bool cfd_get_direct(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_DIRECT_SHFT) & CFD_DIRECT_MASK;
+}
+
+static inline bool cfd_get_class(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_CLASS_SHFT) & CFD_CLASS_MASK;
+}
+
+static inline int cfd_get_dest(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_DEST_SHFT) & CFD_DEST_MASK;
+}
+
+static inline int cfd_get_payload_len(struct vpdma_cfd *cfd)
+{
+ return cfd->ctl_payload_len & CFD_PAYLOAD_LEN_MASK;
+}
+
+/*
+ * control descriptor
+ */
+struct vpdma_ctd {
+ union {
+ u32 timer_value;
+ u32 list_addr;
+ u32 w0;
+ };
+ union {
+ u32 pixel_line_count;
+ u32 list_size;
+ u32 w1;
+ };
+ union {
+ u32 event;
+ u32 fid_ctl;
+ u32 w2;
+ };
+ u32 type_source_ctl;
+};
+
+/* control descriptor types */
+#define CTD_TYPE_SYNC_ON_CLIENT 0
+#define CTD_TYPE_SYNC_ON_LIST 1
+#define CTD_TYPE_SYNC_ON_EXT 2
+#define CTD_TYPE_SYNC_ON_LM_TIMER 3
+#define CTD_TYPE_SYNC_ON_CHANNEL 4
+#define CTD_TYPE_CHNG_CLIENT_IRQ 5
+#define CTD_TYPE_SEND_IRQ 6
+#define CTD_TYPE_RELOAD_LIST 7
+#define CTD_TYPE_ABORT_CHANNEL 8
+
+#define CTD_PKT_TYPE 0xc
+
+/* timer_value */
+#define CTD_TIMER_VALUE_MASK 0xffff
+#define CTD_TIMER_VALUE_SHFT 0
+
+/* pixel_line_count */
+#define CTD_PIXEL_COUNT_MASK 0xffff
+#define CTD_PIXEL_COUNT_SHFT 16
+#define CTD_LINE_COUNT_MASK 0xffff
+#define CTD_LINE_COUNT_SHFT 0
+
+/* list_size */
+#define CTD_LIST_SIZE_MASK 0xffff
+#define CTD_LIST_SIZE_SHFT 0
+
+/* event */
+#define CTD_EVENT_MASK 0x0f
+#define CTD_EVENT_SHFT 0
+
+/* fid_ctl */
+#define CTD_FID2_MASK 0x03
+#define CTD_FID2_SHFT 4
+#define CTD_FID1_MASK 0x03
+#define CTD_FID1_SHFT 2
+#define CTD_FID0_MASK 0x03
+#define CTD_FID0_SHFT 0
+
+/* type_source_ctl */
+#define CTD_PKT_TYPE_MASK 0x1f
+#define CTD_PKT_TYPE_SHFT 27
+#define CTD_SOURCE_MASK 0xff
+#define CTD_SOURCE_SHFT 16
+#define CTD_CONTROL_MASK 0x0f
+#define CTD_CONTROL_SHFT 0
+
+static inline u32 ctd_pixel_line_count(int pixel_count, int line_count)
+{
+ return (pixel_count << CTD_PIXEL_COUNT_SHFT) | line_count;
+}
+
+static inline u32 ctd_set_fid_ctl(int fid0, int fid1, int fid2)
+{
+ return (fid2 << CTD_FID2_SHFT) | (fid1 << CTD_FID1_SHFT) | fid0;
+}
+
+static inline u32 ctd_type_source_ctl(int source, int control)
+{
+ return (CTD_PKT_TYPE << CTD_PKT_TYPE_SHFT) |
+ (source << CTD_SOURCE_SHFT) | control;
+}
+
+static inline u32 ctd_get_pixel_count(struct vpdma_ctd *ctd)
+{
+ return ctd->pixel_line_count >> CTD_PIXEL_COUNT_SHFT;
+}
+
+static inline int ctd_get_line_count(struct vpdma_ctd *ctd)
+{
+ return ctd->pixel_line_count & CTD_LINE_COUNT_MASK;
+}
+
+static inline int ctd_get_event(struct vpdma_ctd *ctd)
+{
+ return ctd->event & CTD_EVENT_MASK;
+}
+
+static inline int ctd_get_fid2_ctl(struct vpdma_ctd *ctd)
+{
+ return (ctd->fid_ctl >> CTD_FID2_SHFT) & CTD_FID2_MASK;
+}
+
+static inline int ctd_get_fid1_ctl(struct vpdma_ctd *ctd)
+{
+ return (ctd->fid_ctl >> CTD_FID1_SHFT) & CTD_FID1_MASK;
+}
+
+static inline int ctd_get_fid0_ctl(struct vpdma_ctd *ctd)
+{
+ return ctd->fid_ctl & CTD_FID2_MASK;
+}
+
+static inline int ctd_get_pkt_type(struct vpdma_ctd *ctd)
+{
+ return ctd->type_source_ctl >> CTD_PKT_TYPE_SHFT;
+}
+
+static inline int ctd_get_source(struct vpdma_ctd *ctd)
+{
+ return (ctd->type_source_ctl >> CTD_SOURCE_SHFT) & CTD_SOURCE_MASK;
+}
+
+static inline int ctd_get_ctl(struct vpdma_ctd *ctd)
+{
+ return ctd->type_source_ctl & CTD_CONTROL_MASK;
+}
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
new file mode 100644
index 00000000000..4e58069e24f
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -0,0 +1,2099 @@
+/*
+ * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vpdma.h"
+#include "vpe_regs.h"
+
+#define VPE_MODULE_NAME "vpe"
+
+/* minimum and maximum frame sizes */
+#define MIN_W 128
+#define MIN_H 128
+#define MAX_W 1920
+#define MAX_H 1080
+
+/* required alignments */
+#define S_ALIGN 0 /* multiple of 1 */
+#define H_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN 1 /* multiple of 2 */
+
+/* multiple of 128 bits, line stride, 16 bytes */
+#define L_ALIGN 4
+
+/* flags that indicate a format can be used for capture/output */
+#define VPE_FMT_TYPE_CAPTURE (1 << 0)
+#define VPE_FMT_TYPE_OUTPUT (1 << 1)
+
+/* used as plane indices */
+#define VPE_MAX_PLANES 2
+#define VPE_LUMA 0
+#define VPE_CHROMA 1
+
+/* per m2m context info */
+#define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
+
+#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
+
+/*
+ * each VPE context can need up to 3 config desciptors, 7 input descriptors,
+ * 3 output descriptors, and 10 control descriptors
+ */
+#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
+ 13 * VPDMA_CFD_CTD_DESC_SIZE)
+
+#define vpe_dbg(vpedev, fmt, arg...) \
+ dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
+#define vpe_err(vpedev, fmt, arg...) \
+ dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
+
+struct vpe_us_coeffs {
+ unsigned short anchor_fid0_c0;
+ unsigned short anchor_fid0_c1;
+ unsigned short anchor_fid0_c2;
+ unsigned short anchor_fid0_c3;
+ unsigned short interp_fid0_c0;
+ unsigned short interp_fid0_c1;
+ unsigned short interp_fid0_c2;
+ unsigned short interp_fid0_c3;
+ unsigned short anchor_fid1_c0;
+ unsigned short anchor_fid1_c1;
+ unsigned short anchor_fid1_c2;
+ unsigned short anchor_fid1_c3;
+ unsigned short interp_fid1_c0;
+ unsigned short interp_fid1_c1;
+ unsigned short interp_fid1_c2;
+ unsigned short interp_fid1_c3;
+};
+
+/*
+ * Default upsampler coefficients
+ */
+static const struct vpe_us_coeffs us_coeffs[] = {
+ {
+ /* Coefficients for progressive input */
+ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
+ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
+ },
+ {
+ /* Coefficients for Top Field Interlaced input */
+ 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
+ /* Coefficients for Bottom Field Interlaced input */
+ 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
+ },
+};
+
+/*
+ * the following registers are for configuring some of the parameters of the
+ * motion and edge detection blocks inside DEI, these generally remain the same,
+ * these could be passed later via userspace if some one needs to tweak these.
+ */
+struct vpe_dei_regs {
+ unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
+ unsigned long edi_config_reg; /* VPE_DEI_REG3 */
+ unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
+ unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
+ unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
+ unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
+};
+
+/*
+ * default expert DEI register values, unlikely to be modified.
+ */
+static const struct vpe_dei_regs dei_regs = {
+ 0x020C0804u,
+ 0x0118100Fu,
+ 0x08040200u,
+ 0x1010100Cu,
+ 0x10101010u,
+ 0x10101010u,
+};
+
+/*
+ * The port_data structure contains per-port data.
+ */
+struct vpe_port_data {
+ enum vpdma_channel channel; /* VPDMA channel */
+ u8 vb_index; /* input frame f, f-1, f-2 index */
+ u8 vb_part; /* plane index for co-panar formats */
+};
+
+/*
+ * Define indices into the port_data tables
+ */
+#define VPE_PORT_LUMA1_IN 0
+#define VPE_PORT_CHROMA1_IN 1
+#define VPE_PORT_LUMA2_IN 2
+#define VPE_PORT_CHROMA2_IN 3
+#define VPE_PORT_LUMA3_IN 4
+#define VPE_PORT_CHROMA3_IN 5
+#define VPE_PORT_MV_IN 6
+#define VPE_PORT_MV_OUT 7
+#define VPE_PORT_LUMA_OUT 8
+#define VPE_PORT_CHROMA_OUT 9
+#define VPE_PORT_RGB_OUT 10
+
+static const struct vpe_port_data port_data[11] = {
+ [VPE_PORT_LUMA1_IN] = {
+ .channel = VPE_CHAN_LUMA1_IN,
+ .vb_index = 0,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA1_IN] = {
+ .channel = VPE_CHAN_CHROMA1_IN,
+ .vb_index = 0,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_LUMA2_IN] = {
+ .channel = VPE_CHAN_LUMA2_IN,
+ .vb_index = 1,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA2_IN] = {
+ .channel = VPE_CHAN_CHROMA2_IN,
+ .vb_index = 1,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_LUMA3_IN] = {
+ .channel = VPE_CHAN_LUMA3_IN,
+ .vb_index = 2,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA3_IN] = {
+ .channel = VPE_CHAN_CHROMA3_IN,
+ .vb_index = 2,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_MV_IN] = {
+ .channel = VPE_CHAN_MV_IN,
+ },
+ [VPE_PORT_MV_OUT] = {
+ .channel = VPE_CHAN_MV_OUT,
+ },
+ [VPE_PORT_LUMA_OUT] = {
+ .channel = VPE_CHAN_LUMA_OUT,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA_OUT] = {
+ .channel = VPE_CHAN_CHROMA_OUT,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_RGB_OUT] = {
+ .channel = VPE_CHAN_RGB_OUT,
+ .vb_part = VPE_LUMA,
+ },
+};
+
+
+/* driver info for each of the supported video formats */
+struct vpe_fmt {
+ char *name; /* human-readable name */
+ u32 fourcc; /* standard format identifier */
+ u8 types; /* CAPTURE and/or OUTPUT */
+ u8 coplanar; /* set for unpacked Luma and Chroma */
+ /* vpdma format info for each plane */
+ struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
+};
+
+static struct vpe_fmt vpe_formats[] = {
+ {
+ .name = "YUV 422 co-planar",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
+ },
+ },
+ {
+ .name = "YUV 420 co-planar",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
+ },
+ },
+ {
+ .name = "YUYV 422 packed",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
+ },
+ },
+ {
+ .name = "UYVY 422 packed",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
+ },
+ },
+};
+
+/*
+ * per-queue, driver-specific private data.
+ * there is one source queue and one destination queue for each m2m context.
+ */
+struct vpe_q_data {
+ unsigned int width; /* frame width */
+ unsigned int height; /* frame height */
+ unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
+ enum v4l2_colorspace colorspace;
+ enum v4l2_field field; /* supported field value */
+ unsigned int flags;
+ unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
+ struct v4l2_rect c_rect; /* crop/compose rectangle */
+ struct vpe_fmt *fmt; /* format info */
+};
+
+/* vpe_q_data flag bits */
+#define Q_DATA_FRAME_1D (1 << 0)
+#define Q_DATA_MODE_TILED (1 << 1)
+#define Q_DATA_INTERLACED (1 << 2)
+
+enum {
+ Q_DATA_SRC = 0,
+ Q_DATA_DST = 1,
+};
+
+/* find our format description corresponding to the passed v4l2_format */
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ struct vpe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
+ fmt = &vpe_formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/*
+ * there is one vpe_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct vpe_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ atomic_t num_instances; /* count of driver instances */
+ dma_addr_t loaded_mmrs; /* shadow mmrs in device */
+ struct mutex dev_mutex;
+ spinlock_t lock;
+
+ int irq;
+ void __iomem *base;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct vpdma_data *vpdma; /* vpdma data handle */
+};
+
+/*
+ * There is one vpe_ctx structure for each m2m context.
+ */
+struct vpe_ctx {
+ struct v4l2_fh fh;
+ struct vpe_dev *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler hdl;
+
+ unsigned int field; /* current field */
+ unsigned int sequence; /* current frame/field seq */
+ unsigned int aborting; /* abort after next irq */
+
+ unsigned int bufs_per_job; /* input buffers per batch */
+ unsigned int bufs_completed; /* bufs done in this batch */
+
+ struct vpe_q_data q_data[2]; /* src & dst queue data */
+ struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
+ struct vb2_buffer *dst_vb;
+
+ dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
+ void *mv_buf[2]; /* virtual addrs of motion vector bufs */
+ size_t mv_buf_size; /* current motion vector buffer size */
+ struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
+ struct vpdma_desc_list desc_list; /* DMA descriptor list */
+
+ bool deinterlacing; /* using de-interlacer */
+ bool load_mmrs; /* have new shadow reg values */
+
+ unsigned int src_mv_buf_selector;
+};
+
+
+/*
+ * M2M devices get 2 queues.
+ * Return the queue given the type.
+ */
+static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->q_data[Q_DATA_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->q_data[Q_DATA_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+static u32 read_reg(struct vpe_dev *dev, int offset)
+{
+ return ioread32(dev->base + offset);
+}
+
+static void write_reg(struct vpe_dev *dev, int offset, u32 value)
+{
+ iowrite32(value, dev->base + offset);
+}
+
+/* register field read/write helpers */
+static int get_field(u32 value, u32 mask, int shift)
+{
+ return (value & (mask << shift)) >> shift;
+}
+
+static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
+{
+ return get_field(read_reg(dev, offset), mask, shift);
+}
+
+static void write_field(u32 *valp, u32 field, u32 mask, int shift)
+{
+ u32 val = *valp;
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+ *valp = val;
+}
+
+static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
+ u32 mask, int shift)
+{
+ u32 val = read_reg(dev, offset);
+
+ write_field(&val, field, mask, shift);
+
+ write_reg(dev, offset, val);
+}
+
+/*
+ * DMA address/data block for the shadow registers
+ */
+struct vpe_mmr_adb {
+ struct vpdma_adb_hdr out_fmt_hdr;
+ u32 out_fmt_reg[1];
+ u32 out_fmt_pad[3];
+ struct vpdma_adb_hdr us1_hdr;
+ u32 us1_regs[8];
+ struct vpdma_adb_hdr us2_hdr;
+ u32 us2_regs[8];
+ struct vpdma_adb_hdr us3_hdr;
+ u32 us3_regs[8];
+ struct vpdma_adb_hdr dei_hdr;
+ u32 dei_regs[8];
+ struct vpdma_adb_hdr sc_hdr;
+ u32 sc_regs[1];
+ u32 sc_pad[3];
+ struct vpdma_adb_hdr csc_hdr;
+ u32 csc_regs[6];
+ u32 csc_pad[2];
+};
+
+#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
+ VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
+/*
+ * Set the headers for all of the address/data block structures.
+ */
+static void init_adb_hdrs(struct vpe_ctx *ctx)
+{
+ VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
+ VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
+ VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
+};
+
+/*
+ * Allocate or re-allocate the motion vector DMA buffers
+ * There are two buffers, one for input and one for output.
+ * However, the roles are reversed after each field is processed.
+ * In other words, after each field is processed, the previous
+ * output (dst) MV buffer becomes the new input (src) MV buffer.
+ */
+static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
+{
+ struct device *dev = ctx->dev->v4l2_dev.dev;
+
+ if (ctx->mv_buf_size == size)
+ return 0;
+
+ if (ctx->mv_buf[0])
+ dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
+ ctx->mv_buf_dma[0]);
+
+ if (ctx->mv_buf[1])
+ dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
+ ctx->mv_buf_dma[1]);
+
+ if (size == 0)
+ return 0;
+
+ ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
+ GFP_KERNEL);
+ if (!ctx->mv_buf[0]) {
+ vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
+ return -ENOMEM;
+ }
+
+ ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
+ GFP_KERNEL);
+ if (!ctx->mv_buf[1]) {
+ vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
+ dma_free_coherent(dev, size, ctx->mv_buf[0],
+ ctx->mv_buf_dma[0]);
+
+ return -ENOMEM;
+ }
+
+ ctx->mv_buf_size = size;
+ ctx->src_mv_buf_selector = 0;
+
+ return 0;
+}
+
+static void free_mv_buffers(struct vpe_ctx *ctx)
+{
+ realloc_mv_buffers(ctx, 0);
+}
+
+/*
+ * While de-interlacing, we keep the two most recent input buffers
+ * around. This function frees those two buffers when we have
+ * finished processing the current stream.
+ */
+static void free_vbs(struct vpe_ctx *ctx)
+{
+ struct vpe_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ if (ctx->src_vbs[2] == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ctx->src_vbs[2]) {
+ v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/*
+ * Enable or disable the VPE clocks
+ */
+static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
+ write_reg(dev, VPE_CLK_ENABLE, val);
+}
+
+static void vpe_top_reset(struct vpe_dev *dev)
+{
+
+ write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
+ VPE_DATA_PATH_CLK_RESET_SHIFT);
+
+ usleep_range(100, 150);
+
+ write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
+ VPE_DATA_PATH_CLK_RESET_SHIFT);
+}
+
+static void vpe_top_vpdma_reset(struct vpe_dev *dev)
+{
+ write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
+ VPE_VPDMA_CLK_RESET_SHIFT);
+
+ usleep_range(100, 150);
+
+ write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
+ VPE_VPDMA_CLK_RESET_SHIFT);
+}
+
+/*
+ * Load the correct of upsampler coefficients into the shadow MMRs
+ */
+static void set_us_coefficients(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ u32 *us1_reg = &mmr_adb->us1_regs[0];
+ u32 *us2_reg = &mmr_adb->us2_regs[0];
+ u32 *us3_reg = &mmr_adb->us3_regs[0];
+ const unsigned short *cp, *end_cp;
+
+ cp = &us_coeffs[0].anchor_fid0_c0;
+
+ if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
+ cp += sizeof(us_coeffs[0]) / sizeof(*cp);
+
+ end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
+
+ while (cp < end_cp) {
+ write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
+ write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
+ *us2_reg++ = *us1_reg;
+ *us3_reg++ = *us1_reg++;
+ }
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
+ */
+static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
+{
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *us1_reg0 = &mmr_adb->us1_regs[0];
+ u32 *us2_reg0 = &mmr_adb->us2_regs[0];
+ u32 *us3_reg0 = &mmr_adb->us3_regs[0];
+ int line_mode = 1;
+ int cfg_mode = 1;
+
+ /*
+ * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
+ * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
+ */
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ cfg_mode = 0;
+ line_mode = 0; /* double lines to line buffer */
+ }
+
+ write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+
+ /* regs for now */
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
+
+ /* frame start for input luma */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA1_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA2_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA3_IN);
+
+ /* frame start for input chroma */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA1_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA2_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA3_IN);
+
+ /* frame start for MV in client */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_MV_IN);
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the shadow registers that are modified when the source
+ * format changes.
+ */
+static void set_src_registers(struct vpe_ctx *ctx)
+{
+ set_us_coefficients(ctx);
+}
+
+/*
+ * Set the shadow registers that are modified when the destination
+ * format changes.
+ */
+static void set_dst_registers(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ u32 val = 0;
+
+ /* select RGB path when color space conversion is supported in future */
+ if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
+ val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
+ else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
+ val |= VPE_COLOR_SEPARATE_422;
+
+ /* The source of CHR_DS is always the scaler, whether it's used or not */
+ val |= VPE_DS_SRC_DEI_SCALER;
+
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ val |= VPE_DS_BYPASS;
+
+ mmr_adb->out_fmt_reg[0] = val;
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the de-interlacer shadow register values
+ */
+static void set_dei_regs(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int src_w = s_q_data->c_rect.width;
+ u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
+ bool deinterlace = true;
+ u32 val = 0;
+
+ /*
+ * according to TRM, we should set DEI in progressive bypass mode when
+ * the input content is progressive, however, DEI is bypassed correctly
+ * for both progressive and interlace content in interlace bypass mode.
+ * It has been recommended not to use progressive bypass mode.
+ */
+ if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
+ !(s_q_data->flags & Q_DATA_INTERLACED)) {
+ deinterlace = false;
+ val = VPE_DEI_INTERLACE_BYPASS;
+ }
+
+ src_h = deinterlace ? src_h * 2 : src_h;
+
+ val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
+ (src_w << VPE_DEI_WIDTH_SHIFT) |
+ VPE_DEI_FIELD_FLUSH;
+
+ *dei_mmr0 = val;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_dei_shadow_registers(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *dei_mmr = &mmr_adb->dei_regs[0];
+ const struct vpe_dei_regs *cur = &dei_regs;
+
+ dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
+ dei_mmr[3] = cur->edi_config_reg;
+ dei_mmr[4] = cur->edi_lut_reg0;
+ dei_mmr[5] = cur->edi_lut_reg1;
+ dei_mmr[6] = cur->edi_lut_reg2;
+ dei_mmr[7] = cur->edi_lut_reg3;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
+
+ *shadow_csc_reg5 |= VPE_CSC_BYPASS;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_sc_regs_bypass(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *sc_reg0 = &mmr_adb->sc_regs[0];
+ u32 val = 0;
+
+ val |= VPE_SC_BYPASS;
+ *sc_reg0 = val;
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the shadow registers whose values are modified when either the
+ * source or destination format is changed.
+ */
+static int set_srcdst_params(struct vpe_ctx *ctx)
+{
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ size_t mv_buf_size;
+ int ret;
+
+ ctx->sequence = 0;
+ ctx->field = V4L2_FIELD_TOP;
+
+ if ((s_q_data->flags & Q_DATA_INTERLACED) &&
+ !(d_q_data->flags & Q_DATA_INTERLACED)) {
+ const struct vpdma_data_format *mv =
+ &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+
+ ctx->deinterlacing = 1;
+ mv_buf_size =
+ (s_q_data->width * s_q_data->height * mv->depth) >> 3;
+ } else {
+ ctx->deinterlacing = 0;
+ mv_buf_size = 0;
+ }
+
+ free_vbs(ctx);
+
+ ret = realloc_mv_buffers(ctx, mv_buf_size);
+ if (ret)
+ return ret;
+
+ set_cfg_and_line_modes(ctx);
+ set_dei_regs(ctx);
+ set_csc_coeff_bypass(ctx);
+ set_sc_regs_bypass(ctx);
+
+ return 0;
+}
+
+/*
+ * Return the vpe_ctx structure for a given struct file
+ */
+static struct vpe_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vpe_ctx, fh);
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ int needed = ctx->bufs_per_job;
+
+ if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
+ needed += 2; /* need additional two most recent fields */
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
+ return 0;
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/*
+ * Lock access to the device
+ */
+static void vpe_lock(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
+ mutex_lock(&dev->dev_mutex);
+}
+
+static void vpe_unlock(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static void vpe_dump_regs(struct vpe_dev *dev)
+{
+#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
+
+ vpe_dbg(dev, "VPE Registers:\n");
+
+ DUMPREG(PID);
+ DUMPREG(SYSCONFIG);
+ DUMPREG(INT0_STATUS0_RAW);
+ DUMPREG(INT0_STATUS0);
+ DUMPREG(INT0_ENABLE0);
+ DUMPREG(INT0_STATUS1_RAW);
+ DUMPREG(INT0_STATUS1);
+ DUMPREG(INT0_ENABLE1);
+ DUMPREG(CLK_ENABLE);
+ DUMPREG(CLK_RESET);
+ DUMPREG(CLK_FORMAT_SELECT);
+ DUMPREG(CLK_RANGE_MAP);
+ DUMPREG(US1_R0);
+ DUMPREG(US1_R1);
+ DUMPREG(US1_R2);
+ DUMPREG(US1_R3);
+ DUMPREG(US1_R4);
+ DUMPREG(US1_R5);
+ DUMPREG(US1_R6);
+ DUMPREG(US1_R7);
+ DUMPREG(US2_R0);
+ DUMPREG(US2_R1);
+ DUMPREG(US2_R2);
+ DUMPREG(US2_R3);
+ DUMPREG(US2_R4);
+ DUMPREG(US2_R5);
+ DUMPREG(US2_R6);
+ DUMPREG(US2_R7);
+ DUMPREG(US3_R0);
+ DUMPREG(US3_R1);
+ DUMPREG(US3_R2);
+ DUMPREG(US3_R3);
+ DUMPREG(US3_R4);
+ DUMPREG(US3_R5);
+ DUMPREG(US3_R6);
+ DUMPREG(US3_R7);
+ DUMPREG(DEI_FRAME_SIZE);
+ DUMPREG(MDT_BYPASS);
+ DUMPREG(MDT_SF_THRESHOLD);
+ DUMPREG(EDI_CONFIG);
+ DUMPREG(DEI_EDI_LUT_R0);
+ DUMPREG(DEI_EDI_LUT_R1);
+ DUMPREG(DEI_EDI_LUT_R2);
+ DUMPREG(DEI_EDI_LUT_R3);
+ DUMPREG(DEI_FMD_WINDOW_R0);
+ DUMPREG(DEI_FMD_WINDOW_R1);
+ DUMPREG(DEI_FMD_CONTROL_R0);
+ DUMPREG(DEI_FMD_CONTROL_R1);
+ DUMPREG(DEI_FMD_STATUS_R0);
+ DUMPREG(DEI_FMD_STATUS_R1);
+ DUMPREG(DEI_FMD_STATUS_R2);
+ DUMPREG(SC_MP_SC0);
+ DUMPREG(SC_MP_SC1);
+ DUMPREG(SC_MP_SC2);
+ DUMPREG(SC_MP_SC3);
+ DUMPREG(SC_MP_SC4);
+ DUMPREG(SC_MP_SC5);
+ DUMPREG(SC_MP_SC6);
+ DUMPREG(SC_MP_SC8);
+ DUMPREG(SC_MP_SC9);
+ DUMPREG(SC_MP_SC10);
+ DUMPREG(SC_MP_SC11);
+ DUMPREG(SC_MP_SC12);
+ DUMPREG(SC_MP_SC13);
+ DUMPREG(SC_MP_SC17);
+ DUMPREG(SC_MP_SC18);
+ DUMPREG(SC_MP_SC19);
+ DUMPREG(SC_MP_SC20);
+ DUMPREG(SC_MP_SC21);
+ DUMPREG(SC_MP_SC22);
+ DUMPREG(SC_MP_SC23);
+ DUMPREG(SC_MP_SC24);
+ DUMPREG(SC_MP_SC25);
+ DUMPREG(CSC_CSC00);
+ DUMPREG(CSC_CSC01);
+ DUMPREG(CSC_CSC02);
+ DUMPREG(CSC_CSC03);
+ DUMPREG(CSC_CSC04);
+ DUMPREG(CSC_CSC05);
+#undef DUMPREG
+}
+
+static void add_out_dtd(struct vpe_ctx *ctx, int port)
+{
+ struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
+ const struct vpe_port_data *p_data = &port_data[port];
+ struct vb2_buffer *vb = ctx->dst_vb;
+ struct v4l2_rect *c_rect = &q_data->c_rect;
+ struct vpe_fmt *fmt = q_data->fmt;
+ const struct vpdma_data_format *vpdma_fmt;
+ int mv_buf_selector = !ctx->src_mv_buf_selector;
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+
+ if (port == VPE_PORT_MV_OUT) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+
+ vpdma_fmt = fmt->vpdma_fmt[plane];
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ if (!dma_addr) {
+ vpe_err(ctx->dev,
+ "acquiring output buffer(%d) dma_addr failed\n",
+ port);
+ return;
+ }
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+ flags |= VPDMA_DATA_FRAME_1D;
+ if (q_data->flags & Q_DATA_MODE_TILED)
+ flags |= VPDMA_DATA_MODE_TILED;
+
+ vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, flags);
+}
+
+static void add_in_dtd(struct vpe_ctx *ctx, int port)
+{
+ struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
+ const struct vpe_port_data *p_data = &port_data[port];
+ struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
+ struct v4l2_rect *c_rect = &q_data->c_rect;
+ struct vpe_fmt *fmt = q_data->fmt;
+ const struct vpdma_data_format *vpdma_fmt;
+ int mv_buf_selector = ctx->src_mv_buf_selector;
+ int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+
+ if (port == VPE_PORT_MV_IN) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+
+ vpdma_fmt = fmt->vpdma_fmt[plane];
+
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ if (!dma_addr) {
+ vpe_err(ctx->dev,
+ "acquiring input buffer(%d) dma_addr failed\n",
+ port);
+ return;
+ }
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+ flags |= VPDMA_DATA_FRAME_1D;
+ if (q_data->flags & Q_DATA_MODE_TILED)
+ flags |= VPDMA_DATA_MODE_TILED;
+
+ vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height,
+ c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct vpe_ctx *ctx)
+{
+ write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
+ write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
+ VPE_DS1_UV_ERROR_INT);
+
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
+}
+
+static void disable_irqs(struct vpe_ctx *ctx)
+{
+ write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
+ write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
+
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This function is only called when both the source and destination
+ * buffers are in place.
+ */
+static void device_run(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+
+ if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
+ ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->src_vbs[2] == NULL);
+ ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->src_vbs[1] == NULL);
+ }
+
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
+ ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->dst_vb == NULL);
+
+ /* config descriptors */
+ if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
+ vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
+ ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
+ ctx->load_mmrs = false;
+ }
+
+ /* output data descriptors */
+ if (ctx->deinterlacing)
+ add_out_dtd(ctx, VPE_PORT_MV_OUT);
+
+ add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+
+ /* input data descriptors */
+ if (ctx->deinterlacing) {
+ add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
+
+ add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
+ }
+
+ add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
+
+ if (ctx->deinterlacing)
+ add_in_dtd(ctx, VPE_PORT_MV_IN);
+
+ /* sync on channel control descriptors for input ports */
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
+
+ if (ctx->deinterlacing) {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA2_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA2_IN);
+
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA3_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA3_IN);
+
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
+ }
+
+ /* sync on channel control descriptors for output ports */
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
+
+ if (ctx->deinterlacing)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
+
+ enable_irqs(ctx);
+
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
+ vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
+}
+
+static void dei_error(struct vpe_ctx *ctx)
+{
+ dev_warn(ctx->dev->v4l2_dev.dev,
+ "received DEI error interrupt\n");
+}
+
+static void ds1_uv_error(struct vpe_ctx *ctx)
+{
+ dev_warn(ctx->dev->v4l2_dev.dev,
+ "received downsampler error interrupt\n");
+}
+
+static irqreturn_t vpe_irq(int irq_vpe, void *data)
+{
+ struct vpe_dev *dev = (struct vpe_dev *)data;
+ struct vpe_ctx *ctx;
+ struct vpe_q_data *d_q_data;
+ struct vb2_buffer *s_vb, *d_vb;
+ struct v4l2_buffer *s_buf, *d_buf;
+ unsigned long flags;
+ u32 irqst0, irqst1;
+
+ irqst0 = read_reg(dev, VPE_INT0_STATUS0);
+ if (irqst0) {
+ write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
+ vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
+ }
+
+ irqst1 = read_reg(dev, VPE_INT0_STATUS1);
+ if (irqst1) {
+ write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
+ vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
+ }
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ vpe_err(dev, "instance released before end of transaction\n");
+ goto handled;
+ }
+
+ if (irqst1) {
+ if (irqst1 & VPE_DEI_ERROR_INT) {
+ irqst1 &= ~VPE_DEI_ERROR_INT;
+ dei_error(ctx);
+ }
+ if (irqst1 & VPE_DS1_UV_ERROR_INT) {
+ irqst1 &= ~VPE_DS1_UV_ERROR_INT;
+ ds1_uv_error(ctx);
+ }
+ }
+
+ if (irqst0) {
+ if (irqst0 & VPE_INT0_LIST0_COMPLETE)
+ vpdma_clear_list_stat(ctx->dev->vpdma);
+
+ irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
+ }
+
+ if (irqst0 | irqst1) {
+ dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
+ "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
+ irqst0, irqst1);
+ }
+
+ disable_irqs(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+
+ vpdma_reset_desc_list(&ctx->desc_list);
+
+ /* the previous dst mv buffer becomes the next src mv buffer */
+ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
+
+ if (ctx->aborting)
+ goto finished;
+
+ s_vb = ctx->src_vbs[0];
+ d_vb = ctx->dst_vb;
+ s_buf = &s_vb->v4l2_buf;
+ d_buf = &d_vb->v4l2_buf;
+
+ d_buf->timestamp = s_buf->timestamp;
+ if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
+ d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
+ d_buf->timecode = s_buf->timecode;
+ }
+ d_buf->sequence = ctx->sequence;
+ d_buf->field = ctx->field;
+
+ d_q_data = &ctx->q_data[Q_DATA_DST];
+ if (d_q_data->flags & Q_DATA_INTERLACED) {
+ if (ctx->field == V4L2_FIELD_BOTTOM) {
+ ctx->sequence++;
+ ctx->field = V4L2_FIELD_TOP;
+ } else {
+ WARN_ON(ctx->field != V4L2_FIELD_TOP);
+ ctx->field = V4L2_FIELD_BOTTOM;
+ }
+ } else {
+ ctx->sequence++;
+ }
+
+ if (ctx->deinterlacing)
+ s_vb = ctx->src_vbs[2];
+
+ spin_lock_irqsave(&dev->lock, flags);
+ v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (ctx->deinterlacing) {
+ ctx->src_vbs[2] = ctx->src_vbs[1];
+ ctx->src_vbs[1] = ctx->src_vbs[0];
+ }
+
+ ctx->bufs_completed++;
+ if (ctx->bufs_completed < ctx->bufs_per_job) {
+ device_run(ctx);
+ goto handled;
+ }
+
+finished:
+ vpe_dbg(ctx->dev, "finishing transaction\n");
+ ctx->bufs_completed = 0;
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+handled:
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vpe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
+ strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, index;
+ struct vpe_fmt *fmt = NULL;
+
+ index = 0;
+ for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
+ if (vpe_formats[i].types & type) {
+ if (index == f->index) {
+ fmt = &vpe_formats[i];
+ break;
+ }
+ index++;
+ }
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vpe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
+
+ return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
+}
+
+static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vb2_queue *vq;
+ struct vpe_q_data *q_data;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->field = q_data->field;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix->colorspace = q_data->colorspace;
+ } else {
+ struct vpe_q_data *s_q_data;
+
+ /* get colorspace from the source queue */
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = s_q_data->colorspace;
+ }
+
+ pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
+ struct vpe_fmt *fmt, int type)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ int i;
+
+ if (!fmt || !(fmt->types & type)) {
+ vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
+ pix->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
+ &pix->height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ pix->pixelformat = fmt->fourcc;
+
+ if (type == VPE_FMT_TYPE_CAPTURE) {
+ struct vpe_q_data *s_q_data;
+
+ /* get colorspace from the source queue */
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = s_q_data->colorspace;
+ } else {
+ if (!pix->colorspace)
+ pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ }
+
+ for (i = 0; i < pix->num_planes; i++) {
+ int depth;
+
+ plane_fmt = &pix->plane_fmt[i];
+ depth = fmt->vpdma_fmt[i]->depth;
+
+ if (i == VPE_LUMA)
+ plane_fmt->bytesperline =
+ round_up((pix->width * depth) >> 3,
+ 1 << L_ALIGN);
+ else
+ plane_fmt->bytesperline = pix->width;
+
+ plane_fmt->sizeimage =
+ (pix->height * pix->width * depth) >> 3;
+ }
+
+ return 0;
+}
+
+static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_fmt *fmt = find_format(f);
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
+ else
+ return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
+}
+
+static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ struct vpe_q_data *q_data;
+ struct vb2_queue *vq;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ vpe_err(ctx->dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ q_data->fmt = find_format(f);
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->colorspace = pix->colorspace;
+ q_data->field = pix->field;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ plane_fmt = &pix->plane_fmt[i];
+
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ q_data->c_rect.left = 0;
+ q_data->c_rect.top = 0;
+ q_data->c_rect.width = q_data->width;
+ q_data->c_rect.height = q_data->height;
+
+ if (q_data->field == V4L2_FIELD_ALTERNATE)
+ q_data->flags |= Q_DATA_INTERLACED;
+ else
+ q_data->flags &= ~Q_DATA_INTERLACED;
+
+ vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
+ q_data->bytesperline[VPE_LUMA]);
+ if (q_data->fmt->coplanar)
+ vpe_dbg(ctx->dev, " bpl_uv %d\n",
+ q_data->bytesperline[VPE_CHROMA]);
+
+ return 0;
+}
+
+static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ int ret;
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ ret = vpe_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = __vpe_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ set_src_registers(ctx);
+ else
+ set_dst_registers(ctx);
+
+ return set_srcdst_params(ctx);
+}
+
+static int vpe_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ vpe_dump_regs(ctx->dev);
+ vpdma_dump_regs(ctx->dev->vpdma);
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+/*
+ * defines number of buffers/frames a context can process with VPE before
+ * switching to a different context. default value is 1 buffer per context
+ */
+#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
+
+static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpe_ctx *ctx =
+ container_of(ctrl->handler, struct vpe_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VPE_BUFS_PER_JOB:
+ ctx->bufs_per_job = ctrl->val;
+ break;
+
+ default:
+ vpe_err(ctx->dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
+ .s_ctrl = vpe_s_ctrl,
+};
+
+static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
+ .vidioc_querycap = vpe_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
+
+ .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
+
+ .vidioc_reqbufs = vpe_reqbufs,
+ .vidioc_querybuf = vpe_querybuf,
+
+ .vidioc_qbuf = vpe_qbuf,
+ .vidioc_dqbuf = vpe_dqbuf,
+
+ .vidioc_streamon = vpe_streamon,
+ .vidioc_streamoff = vpe_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+static int vpe_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ int i;
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vpe_q_data *q_data;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ *nplanes = q_data->fmt->coplanar ? 2 : 1;
+
+ for (i = 0; i < *nplanes; i++) {
+ sizes[i] = q_data->sizeimage[i];
+ alloc_ctxs[i] = ctx->dev->alloc_ctx;
+ }
+
+ vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
+ sizes[VPE_LUMA]);
+ if (q_data->fmt->coplanar)
+ vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
+
+ return 0;
+}
+
+static int vpe_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpe_q_data *q_data;
+ int i, num_planes;
+
+ vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ num_planes = q_data->fmt->coplanar ? 2 : 1;
+
+ for (i = 0; i < num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ vpe_err(ctx->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long) q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_planes; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static void vpe_buf_queue(struct vb2_buffer *vb)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void vpe_wait_prepare(struct vb2_queue *q)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+ vpe_unlock(ctx);
+}
+
+static void vpe_wait_finish(struct vb2_queue *q)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+ vpe_lock(ctx);
+}
+
+static struct vb2_ops vpe_qops = {
+ .queue_setup = vpe_queue_setup,
+ .buf_prepare = vpe_buf_prepare,
+ .buf_queue = vpe_buf_queue,
+ .wait_prepare = vpe_wait_prepare,
+ .wait_finish = vpe_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vpe_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vpe_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vpe_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ctrl_config vpe_bufs_per_job = {
+ .ops = &vpe_ctrl_ops,
+ .id = V4L2_CID_VPE_BUFS_PER_JOB,
+ .name = "Buffers Per Transaction",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = VPE_DEF_BUFS_PER_JOB,
+ .min = 1,
+ .max = VIDEO_MAX_FRAME,
+ .step = 1,
+};
+
+/*
+ * File operations
+ */
+static int vpe_open(struct file *file)
+{
+ struct vpe_dev *dev = video_drvdata(file);
+ struct vpe_ctx *ctx = NULL;
+ struct vpe_q_data *s_q_data;
+ struct v4l2_ctrl_handler *hdl;
+ int ret;
+
+ vpe_dbg(dev, "vpe_open\n");
+
+ if (!dev->vpdma->ready) {
+ vpe_err(dev, "vpdma firmware not loaded\n");
+ return -ENODEV;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex)) {
+ ret = -ERESTARTSYS;
+ goto free_ctx;
+ }
+
+ ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
+ VPDMA_LIST_TYPE_NORMAL);
+ if (ret != 0)
+ goto unlock;
+
+ ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
+ if (ret != 0)
+ goto free_desc_list;
+
+ init_adb_hdrs(ctx);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 1);
+ v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
+ if (hdl->error) {
+ ret = hdl->error;
+ goto exit_fh;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+ s_q_data->fmt = &vpe_formats[2];
+ s_q_data->width = 1920;
+ s_q_data->height = 1080;
+ s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
+ s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
+ s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ s_q_data->field = V4L2_FIELD_NONE;
+ s_q_data->c_rect.left = 0;
+ s_q_data->c_rect.top = 0;
+ s_q_data->c_rect.width = s_q_data->width;
+ s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->flags = 0;
+
+ ctx->q_data[Q_DATA_DST] = *s_q_data;
+
+ set_dei_shadow_registers(ctx);
+ set_src_registers(ctx);
+ set_dst_registers(ctx);
+ ret = set_srcdst_params(ctx);
+ if (ret)
+ goto exit_fh;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto exit_fh;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ /*
+ * for now, just report the creation of the first instance, we can later
+ * optimize the driver to enable or disable clocks when the first
+ * instance is created or the last instance released
+ */
+ if (atomic_inc_return(&dev->num_instances) == 1)
+ vpe_dbg(dev, "first instance created\n");
+
+ ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
+
+ ctx->load_mmrs = true;
+
+ vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
+ ctx, ctx->m2m_ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+exit_fh:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+free_desc_list:
+ vpdma_free_desc_list(&ctx->desc_list);
+unlock:
+ mutex_unlock(&dev->dev_mutex);
+free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int vpe_release(struct file *file)
+{
+ struct vpe_dev *dev = video_drvdata(file);
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ vpe_dbg(dev, "releasing instance %p\n", ctx);
+
+ mutex_lock(&dev->dev_mutex);
+ free_vbs(ctx);
+ free_mv_buffers(ctx);
+ vpdma_free_desc_list(&ctx->desc_list);
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+
+ kfree(ctx);
+
+ /*
+ * for now, just report the release of the last instance, we can later
+ * optimize the driver to enable or disable clocks when the first
+ * instance is created or the last instance released
+ */
+ if (atomic_dec_return(&dev->num_instances) == 0)
+ vpe_dbg(dev, "last instance released\n");
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static unsigned int vpe_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&dev->dev_mutex);
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_dev *dev = ctx->dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations vpe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpe_open,
+ .release = vpe_release,
+ .poll = vpe_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpe_mmap,
+};
+
+static struct video_device vpe_videodev = {
+ .name = VPE_MODULE_NAME,
+ .fops = &vpe_fops,
+ .ioctl_ops = &vpe_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+ .lock = vpe_lock,
+ .unlock = vpe_unlock,
+};
+
+static int vpe_runtime_get(struct platform_device *pdev)
+{
+ int r;
+
+ dev_dbg(&pdev->dev, "vpe_runtime_get\n");
+
+ r = pm_runtime_get_sync(&pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void vpe_runtime_put(struct platform_device *pdev)
+{
+
+ int r;
+
+ dev_dbg(&pdev->dev, "vpe_runtime_put\n");
+
+ r = pm_runtime_put_sync(&pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
+}
+
+static int vpe_probe(struct platform_device *pdev)
+{
+ struct vpe_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret, irq, func;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock_init(&dev->lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ atomic_set(&dev->num_instances, 0);
+ mutex_init(&dev->dev_mutex);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
+ /*
+ * HACK: we get resource info from device tree in the form of a list of
+ * VPE sub blocks, the driver currently uses only the base of vpe_top
+ * for register access, the driver should be changed later to access
+ * registers based on the sub block base addresses
+ */
+ dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
+ if (IS_ERR(dev->base)) {
+ ret = PTR_ERR(dev->base);
+ goto v4l2_dev_unreg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
+ dev);
+ if (ret)
+ goto v4l2_dev_unreg;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ vpe_err(dev, "Failed to alloc vb2 context\n");
+ ret = PTR_ERR(dev->alloc_ctx);
+ goto v4l2_dev_unreg;
+ }
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ vpe_err(dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto rel_ctx;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = vpe_runtime_get(pdev);
+ if (ret)
+ goto rel_m2m;
+
+ /* Perform clk enable followed by reset */
+ vpe_set_clock_enable(dev, 1);
+
+ vpe_top_reset(dev);
+
+ func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
+ VPE_PID_FUNC_SHIFT);
+ vpe_dbg(dev, "VPE PID function %x\n", func);
+
+ vpe_top_vpdma_reset(dev);
+
+ dev->vpdma = vpdma_create(pdev);
+ if (IS_ERR(dev->vpdma))
+ goto runtime_put;
+
+ vfd = &dev->vfd;
+ *vfd = vpe_videodev;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ vpe_err(dev, "Failed to register video device\n");
+ goto runtime_put;
+ }
+
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
+ dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
+ vfd->num);
+
+ return 0;
+
+runtime_put:
+ vpe_runtime_put(pdev);
+rel_m2m:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(dev->m2m_dev);
+rel_ctx:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+v4l2_dev_unreg:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vpe_remove(struct platform_device *pdev)
+{
+ struct vpe_dev *dev =
+ (struct vpe_dev *) platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+
+ vpe_set_clock_enable(dev, 0);
+ vpe_runtime_put(pdev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id vpe_of_match[] = {
+ {
+ .compatible = "ti,vpe",
+ },
+ {},
+};
+#else
+#define vpe_of_match NULL
+#endif
+
+static struct platform_driver vpe_pdrv = {
+ .probe = vpe_probe,
+ .remove = vpe_remove,
+ .driver = {
+ .name = VPE_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vpe_of_match,
+ },
+};
+
+static void __exit vpe_exit(void)
+{
+ platform_driver_unregister(&vpe_pdrv);
+}
+
+static int __init vpe_init(void)
+{
+ return platform_driver_register(&vpe_pdrv);
+}
+
+module_init(vpe_init);
+module_exit(vpe_exit);
+
+MODULE_DESCRIPTION("TI VPE driver");
+MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
new file mode 100644
index 00000000000..ed214e82839
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_VPE_REGS_H
+#define __TI_VPE_REGS_H
+
+/* VPE register offsets and field selectors */
+
+/* VPE top level regs */
+#define VPE_PID 0x0000
+#define VPE_PID_MINOR_MASK 0x3f
+#define VPE_PID_MINOR_SHIFT 0
+#define VPE_PID_CUSTOM_MASK 0x03
+#define VPE_PID_CUSTOM_SHIFT 6
+#define VPE_PID_MAJOR_MASK 0x07
+#define VPE_PID_MAJOR_SHIFT 8
+#define VPE_PID_RTL_MASK 0x1f
+#define VPE_PID_RTL_SHIFT 11
+#define VPE_PID_FUNC_MASK 0xfff
+#define VPE_PID_FUNC_SHIFT 16
+#define VPE_PID_SCHEME_MASK 0x03
+#define VPE_PID_SCHEME_SHIFT 30
+
+#define VPE_SYSCONFIG 0x0010
+#define VPE_SYSCONFIG_IDLE_MASK 0x03
+#define VPE_SYSCONFIG_IDLE_SHIFT 2
+#define VPE_SYSCONFIG_STANDBY_MASK 0x03
+#define VPE_SYSCONFIG_STANDBY_SHIFT 4
+#define VPE_FORCE_IDLE_MODE 0
+#define VPE_NO_IDLE_MODE 1
+#define VPE_SMART_IDLE_MODE 2
+#define VPE_SMART_IDLE_WAKEUP_MODE 3
+#define VPE_FORCE_STANDBY_MODE 0
+#define VPE_NO_STANDBY_MODE 1
+#define VPE_SMART_STANDBY_MODE 2
+#define VPE_SMART_STANDBY_WAKEUP_MODE 3
+
+#define VPE_INT0_STATUS0_RAW_SET 0x0020
+#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET
+#define VPE_INT0_STATUS0_CLR 0x0028
+#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR
+#define VPE_INT0_ENABLE0_SET 0x0030
+#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET
+#define VPE_INT0_ENABLE0_CLR 0x0038
+#define VPE_INT0_LIST0_COMPLETE (1 << 0)
+#define VPE_INT0_LIST0_NOTIFY (1 << 1)
+#define VPE_INT0_LIST1_COMPLETE (1 << 2)
+#define VPE_INT0_LIST1_NOTIFY (1 << 3)
+#define VPE_INT0_LIST2_COMPLETE (1 << 4)
+#define VPE_INT0_LIST2_NOTIFY (1 << 5)
+#define VPE_INT0_LIST3_COMPLETE (1 << 6)
+#define VPE_INT0_LIST3_NOTIFY (1 << 7)
+#define VPE_INT0_LIST4_COMPLETE (1 << 8)
+#define VPE_INT0_LIST4_NOTIFY (1 << 9)
+#define VPE_INT0_LIST5_COMPLETE (1 << 10)
+#define VPE_INT0_LIST5_NOTIFY (1 << 11)
+#define VPE_INT0_LIST6_COMPLETE (1 << 12)
+#define VPE_INT0_LIST6_NOTIFY (1 << 13)
+#define VPE_INT0_LIST7_COMPLETE (1 << 14)
+#define VPE_INT0_LIST7_NOTIFY (1 << 15)
+#define VPE_INT0_DESCRIPTOR (1 << 16)
+#define VPE_DEI_FMD_INT (1 << 18)
+
+#define VPE_INT0_STATUS1_RAW_SET 0x0024
+#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET
+#define VPE_INT0_STATUS1_CLR 0x002c
+#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR
+#define VPE_INT0_ENABLE1_SET 0x0034
+#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET
+#define VPE_INT0_ENABLE1_CLR 0x003c
+#define VPE_INT0_CHANNEL_GROUP0 (1 << 0)
+#define VPE_INT0_CHANNEL_GROUP1 (1 << 1)
+#define VPE_INT0_CHANNEL_GROUP2 (1 << 2)
+#define VPE_INT0_CHANNEL_GROUP3 (1 << 3)
+#define VPE_INT0_CHANNEL_GROUP4 (1 << 4)
+#define VPE_INT0_CHANNEL_GROUP5 (1 << 5)
+#define VPE_INT0_CLIENT (1 << 7)
+#define VPE_DEI_ERROR_INT (1 << 16)
+#define VPE_DS1_UV_ERROR_INT (1 << 22)
+
+#define VPE_INTC_EOI 0x00a0
+
+#define VPE_CLK_ENABLE 0x0100
+#define VPE_VPEDMA_CLK_ENABLE (1 << 0)
+#define VPE_DATA_PATH_CLK_ENABLE (1 << 1)
+
+#define VPE_CLK_RESET 0x0104
+#define VPE_VPDMA_CLK_RESET_MASK 0x1
+#define VPE_VPDMA_CLK_RESET_SHIFT 0
+#define VPE_DATA_PATH_CLK_RESET_MASK 0x1
+#define VPE_DATA_PATH_CLK_RESET_SHIFT 1
+#define VPE_MAIN_RESET_MASK 0x1
+#define VPE_MAIN_RESET_SHIFT 31
+
+#define VPE_CLK_FORMAT_SELECT 0x010c
+#define VPE_CSC_SRC_SELECT_MASK 0x03
+#define VPE_CSC_SRC_SELECT_SHIFT 0
+#define VPE_RGB_OUT_SELECT (1 << 8)
+#define VPE_DS_SRC_SELECT_MASK 0x07
+#define VPE_DS_SRC_SELECT_SHIFT 9
+#define VPE_DS_BYPASS (1 << 16)
+#define VPE_COLOR_SEPARATE_422 (1 << 18)
+
+#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT)
+#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT)
+
+#define VPE_CLK_RANGE_MAP 0x011c
+#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07
+#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0
+#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07
+#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3
+#define VPE_RANGE_MAP_ON (1 << 6)
+#define VPE_RANGE_REDUCTION_ON (1 << 28)
+
+/* VPE chrominance upsampler regs */
+#define VPE_US1_R0 0x0304
+#define VPE_US2_R0 0x0404
+#define VPE_US3_R0 0x0504
+#define VPE_US_C1_MASK 0x3fff
+#define VPE_US_C1_SHIFT 2
+#define VPE_US_C0_MASK 0x3fff
+#define VPE_US_C0_SHIFT 18
+#define VPE_US_MODE_MASK 0x03
+#define VPE_US_MODE_SHIFT 16
+#define VPE_ANCHOR_FID0_C1_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C1_SHIFT 2
+#define VPE_ANCHOR_FID0_C0_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C0_SHIFT 18
+
+#define VPE_US1_R1 0x0308
+#define VPE_US2_R1 0x0408
+#define VPE_US3_R1 0x0508
+#define VPE_ANCHOR_FID0_C3_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C3_SHIFT 2
+#define VPE_ANCHOR_FID0_C2_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C2_SHIFT 18
+
+#define VPE_US1_R2 0x030c
+#define VPE_US2_R2 0x040c
+#define VPE_US3_R2 0x050c
+#define VPE_INTERP_FID0_C1_MASK 0x3fff
+#define VPE_INTERP_FID0_C1_SHIFT 2
+#define VPE_INTERP_FID0_C0_MASK 0x3fff
+#define VPE_INTERP_FID0_C0_SHIFT 18
+
+#define VPE_US1_R3 0x0310
+#define VPE_US2_R3 0x0410
+#define VPE_US3_R3 0x0510
+#define VPE_INTERP_FID0_C3_MASK 0x3fff
+#define VPE_INTERP_FID0_C3_SHIFT 2
+#define VPE_INTERP_FID0_C2_MASK 0x3fff
+#define VPE_INTERP_FID0_C2_SHIFT 18
+
+#define VPE_US1_R4 0x0314
+#define VPE_US2_R4 0x0414
+#define VPE_US3_R4 0x0514
+#define VPE_ANCHOR_FID1_C1_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C1_SHIFT 2
+#define VPE_ANCHOR_FID1_C0_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C0_SHIFT 18
+
+#define VPE_US1_R5 0x0318
+#define VPE_US2_R5 0x0418
+#define VPE_US3_R5 0x0518
+#define VPE_ANCHOR_FID1_C3_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C3_SHIFT 2
+#define VPE_ANCHOR_FID1_C2_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C2_SHIFT 18
+
+#define VPE_US1_R6 0x031c
+#define VPE_US2_R6 0x041c
+#define VPE_US3_R6 0x051c
+#define VPE_INTERP_FID1_C1_MASK 0x3fff
+#define VPE_INTERP_FID1_C1_SHIFT 2
+#define VPE_INTERP_FID1_C0_MASK 0x3fff
+#define VPE_INTERP_FID1_C0_SHIFT 18
+
+#define VPE_US1_R7 0x0320
+#define VPE_US2_R7 0x0420
+#define VPE_US3_R7 0x0520
+#define VPE_INTERP_FID0_C3_MASK 0x3fff
+#define VPE_INTERP_FID0_C3_SHIFT 2
+#define VPE_INTERP_FID0_C2_MASK 0x3fff
+#define VPE_INTERP_FID0_C2_SHIFT 18
+
+/* VPE de-interlacer regs */
+#define VPE_DEI_FRAME_SIZE 0x0600
+#define VPE_DEI_WIDTH_MASK 0x07ff
+#define VPE_DEI_WIDTH_SHIFT 0
+#define VPE_DEI_HEIGHT_MASK 0x07ff
+#define VPE_DEI_HEIGHT_SHIFT 16
+#define VPE_DEI_INTERLACE_BYPASS (1 << 29)
+#define VPE_DEI_FIELD_FLUSH (1 << 30)
+#define VPE_DEI_PROGRESSIVE (1 << 31)
+
+#define VPE_MDT_BYPASS 0x0604
+#define VPE_MDT_TEMPMAX_BYPASS (1 << 0)
+#define VPE_MDT_SPATMAX_BYPASS (1 << 1)
+
+#define VPE_MDT_SF_THRESHOLD 0x0608
+#define VPE_MDT_SF_SC_THR1_MASK 0xff
+#define VPE_MDT_SF_SC_THR1_SHIFT 0
+#define VPE_MDT_SF_SC_THR2_MASK 0xff
+#define VPE_MDT_SF_SC_THR2_SHIFT 0
+#define VPE_MDT_SF_SC_THR3_MASK 0xff
+#define VPE_MDT_SF_SC_THR3_SHIFT 0
+
+#define VPE_EDI_CONFIG 0x060c
+#define VPE_EDI_INP_MODE_MASK 0x03
+#define VPE_EDI_INP_MODE_SHIFT 0
+#define VPE_EDI_ENABLE_3D (1 << 2)
+#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3)
+#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff
+#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8
+#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff
+#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16
+#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff
+#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23
+
+#define VPE_DEI_EDI_LUT_R0 0x0610
+#define VPE_EDI_LUT0_MASK 0x1f
+#define VPE_EDI_LUT0_SHIFT 0
+#define VPE_EDI_LUT1_MASK 0x1f
+#define VPE_EDI_LUT1_SHIFT 8
+#define VPE_EDI_LUT2_MASK 0x1f
+#define VPE_EDI_LUT2_SHIFT 16
+#define VPE_EDI_LUT3_MASK 0x1f
+#define VPE_EDI_LUT3_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R1 0x0614
+#define VPE_EDI_LUT0_MASK 0x1f
+#define VPE_EDI_LUT0_SHIFT 0
+#define VPE_EDI_LUT1_MASK 0x1f
+#define VPE_EDI_LUT1_SHIFT 8
+#define VPE_EDI_LUT2_MASK 0x1f
+#define VPE_EDI_LUT2_SHIFT 16
+#define VPE_EDI_LUT3_MASK 0x1f
+#define VPE_EDI_LUT3_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R2 0x0618
+#define VPE_EDI_LUT4_MASK 0x1f
+#define VPE_EDI_LUT4_SHIFT 0
+#define VPE_EDI_LUT5_MASK 0x1f
+#define VPE_EDI_LUT5_SHIFT 8
+#define VPE_EDI_LUT6_MASK 0x1f
+#define VPE_EDI_LUT6_SHIFT 16
+#define VPE_EDI_LUT7_MASK 0x1f
+#define VPE_EDI_LUT7_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R3 0x061c
+#define VPE_EDI_LUT8_MASK 0x1f
+#define VPE_EDI_LUT8_SHIFT 0
+#define VPE_EDI_LUT9_MASK 0x1f
+#define VPE_EDI_LUT9_SHIFT 8
+#define VPE_EDI_LUT10_MASK 0x1f
+#define VPE_EDI_LUT10_SHIFT 16
+#define VPE_EDI_LUT11_MASK 0x1f
+#define VPE_EDI_LUT11_SHIFT 24
+
+#define VPE_DEI_FMD_WINDOW_R0 0x0620
+#define VPE_FMD_WINDOW_MINX_MASK 0x07ff
+#define VPE_FMD_WINDOW_MINX_SHIFT 0
+#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff
+#define VPE_FMD_WINDOW_MAXX_SHIFT 16
+#define VPE_FMD_WINDOW_ENABLE (1 << 31)
+
+#define VPE_DEI_FMD_WINDOW_R1 0x0624
+#define VPE_FMD_WINDOW_MINY_MASK 0x07ff
+#define VPE_FMD_WINDOW_MINY_SHIFT 0
+#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff
+#define VPE_FMD_WINDOW_MAXY_SHIFT 16
+
+#define VPE_DEI_FMD_CONTROL_R0 0x0628
+#define VPE_FMD_ENABLE (1 << 0)
+#define VPE_FMD_LOCK (1 << 1)
+#define VPE_FMD_JAM_DIR (1 << 2)
+#define VPE_FMD_BED_ENABLE (1 << 3)
+#define VPE_FMD_CAF_FIELD_THR_MASK 0xff
+#define VPE_FMD_CAF_FIELD_THR_SHIFT 16
+#define VPE_FMD_CAF_LINE_THR_MASK 0xff
+#define VPE_FMD_CAF_LINE_THR_SHIFT 24
+
+#define VPE_DEI_FMD_CONTROL_R1 0x062c
+#define VPE_FMD_CAF_THR_MASK 0x000fffff
+#define VPE_FMD_CAF_THR_SHIFT 0
+
+#define VPE_DEI_FMD_STATUS_R0 0x0630
+#define VPE_FMD_CAF_MASK 0x000fffff
+#define VPE_FMD_CAF_SHIFT 0
+#define VPE_FMD_RESET (1 << 24)
+
+#define VPE_DEI_FMD_STATUS_R1 0x0634
+#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff
+#define VPE_FMD_FIELD_DIFF_SHIFT 0
+
+#define VPE_DEI_FMD_STATUS_R2 0x0638
+#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
+#define VPE_FMD_FRAME_DIFF_SHIFT 0
+
+/* VPE scaler regs */
+#define VPE_SC_MP_SC0 0x0700
+#define VPE_INTERLACE_O (1 << 0)
+#define VPE_LINEAR (1 << 1)
+#define VPE_SC_BYPASS (1 << 2)
+#define VPE_INVT_FID (1 << 3)
+#define VPE_USE_RAV (1 << 4)
+#define VPE_ENABLE_EV (1 << 5)
+#define VPE_AUTO_HS (1 << 6)
+#define VPE_DCM_2X (1 << 7)
+#define VPE_DCM_4X (1 << 8)
+#define VPE_HP_BYPASS (1 << 9)
+#define VPE_INTERLACE_I (1 << 10)
+#define VPE_ENABLE_SIN2_VER_INTP (1 << 11)
+#define VPE_Y_PK_EN (1 << 14)
+#define VPE_TRIM (1 << 15)
+#define VPE_SELFGEN_FID (1 << 16)
+
+#define VPE_SC_MP_SC1 0x0704
+#define VPE_ROW_ACC_INC_MASK 0x07ffffff
+#define VPE_ROW_ACC_INC_SHIFT 0
+
+#define VPE_SC_MP_SC2 0x0708
+#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff
+#define VPE_ROW_ACC_OFFSET_SHIFT 0
+
+#define VPE_SC_MP_SC3 0x070c
+#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff
+#define VPE_ROW_ACC_OFFSET_B_SHIFT 0
+
+#define VPE_SC_MP_SC4 0x0710
+#define VPE_TAR_H_MASK 0x07ff
+#define VPE_TAR_H_SHIFT 0
+#define VPE_TAR_W_MASK 0x07ff
+#define VPE_TAR_W_SHIFT 12
+#define VPE_LIN_ACC_INC_U_MASK 0x07
+#define VPE_LIN_ACC_INC_U_SHIFT 24
+#define VPE_NLIN_ACC_INIT_U_MASK 0x07
+#define VPE_NLIN_ACC_INIT_U_SHIFT 28
+
+#define VPE_SC_MP_SC5 0x0714
+#define VPE_SRC_H_MASK 0x07ff
+#define VPE_SRC_H_SHIFT 0
+#define VPE_SRC_W_MASK 0x07ff
+#define VPE_SRC_W_SHIFT 12
+#define VPE_NLIN_ACC_INC_U_MASK 0x07
+#define VPE_NLIN_ACC_INC_U_SHIFT 24
+
+#define VPE_SC_MP_SC6 0x0718
+#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff
+#define VPE_ROW_ACC_INIT_RAV_SHIFT 0
+#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff
+#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10
+
+#define VPE_SC_MP_SC8 0x0720
+#define VPE_NLIN_LEFT_MASK 0x07ff
+#define VPE_NLIN_LEFT_SHIFT 0
+#define VPE_NLIN_RIGHT_MASK 0x07ff
+#define VPE_NLIN_RIGHT_SHIFT 12
+
+#define VPE_SC_MP_SC9 0x0724
+#define VPE_LIN_ACC_INC VPE_SC_MP_SC9
+
+#define VPE_SC_MP_SC10 0x0728
+#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10
+
+#define VPE_SC_MP_SC11 0x072c
+#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11
+
+#define VPE_SC_MP_SC12 0x0730
+#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff
+#define VPE_COL_ACC_OFFSET_SHIFT 0
+
+#define VPE_SC_MP_SC13 0x0734
+#define VPE_SC_FACTOR_RAV_MASK 0x03ff
+#define VPE_SC_FACTOR_RAV_SHIFT 0
+#define VPE_CHROMA_INTP_THR_MASK 0x03ff
+#define VPE_CHROMA_INTP_THR_SHIFT 12
+#define VPE_DELTA_CHROMA_THR_MASK 0x0f
+#define VPE_DELTA_CHROMA_THR_SHIFT 24
+
+#define VPE_SC_MP_SC17 0x0744
+#define VPE_EV_THR_MASK 0x03ff
+#define VPE_EV_THR_SHIFT 12
+#define VPE_DELTA_LUMA_THR_MASK 0x0f
+#define VPE_DELTA_LUMA_THR_SHIFT 24
+#define VPE_DELTA_EV_THR_MASK 0x0f
+#define VPE_DELTA_EV_THR_SHIFT 28
+
+#define VPE_SC_MP_SC18 0x0748
+#define VPE_HS_FACTOR_MASK 0x03ff
+#define VPE_HS_FACTOR_SHIFT 0
+#define VPE_CONF_DEFAULT_MASK 0x01ff
+#define VPE_CONF_DEFAULT_SHIFT 16
+
+#define VPE_SC_MP_SC19 0x074c
+#define VPE_HPF_COEFF0_MASK 0xff
+#define VPE_HPF_COEFF0_SHIFT 0
+#define VPE_HPF_COEFF1_MASK 0xff
+#define VPE_HPF_COEFF1_SHIFT 8
+#define VPE_HPF_COEFF2_MASK 0xff
+#define VPE_HPF_COEFF2_SHIFT 16
+#define VPE_HPF_COEFF3_MASK 0xff
+#define VPE_HPF_COEFF3_SHIFT 23
+
+#define VPE_SC_MP_SC20 0x0750
+#define VPE_HPF_COEFF4_MASK 0xff
+#define VPE_HPF_COEFF4_SHIFT 0
+#define VPE_HPF_COEFF5_MASK 0xff
+#define VPE_HPF_COEFF5_SHIFT 8
+#define VPE_HPF_NORM_SHIFT_MASK 0x07
+#define VPE_HPF_NORM_SHIFT_SHIFT 16
+#define VPE_NL_LIMIT_MASK 0x1ff
+#define VPE_NL_LIMIT_SHIFT 20
+
+#define VPE_SC_MP_SC21 0x0754
+#define VPE_NL_LO_THR_MASK 0x01ff
+#define VPE_NL_LO_THR_SHIFT 0
+#define VPE_NL_LO_SLOPE_MASK 0xff
+#define VPE_NL_LO_SLOPE_SHIFT 16
+
+#define VPE_SC_MP_SC22 0x0758
+#define VPE_NL_HI_THR_MASK 0x01ff
+#define VPE_NL_HI_THR_SHIFT 0
+#define VPE_NL_HI_SLOPE_SH_MASK 0x07
+#define VPE_NL_HI_SLOPE_SH_SHIFT 16
+
+#define VPE_SC_MP_SC23 0x075c
+#define VPE_GRADIENT_THR_MASK 0x07ff
+#define VPE_GRADIENT_THR_SHIFT 0
+#define VPE_GRADIENT_THR_RANGE_MASK 0x0f
+#define VPE_GRADIENT_THR_RANGE_SHIFT 12
+#define VPE_MIN_GY_THR_MASK 0xff
+#define VPE_MIN_GY_THR_SHIFT 16
+#define VPE_MIN_GY_THR_RANGE_MASK 0x0f
+#define VPE_MIN_GY_THR_RANGE_SHIFT 28
+
+#define VPE_SC_MP_SC24 0x0760
+#define VPE_ORG_H_MASK 0x07ff
+#define VPE_ORG_H_SHIFT 0
+#define VPE_ORG_W_MASK 0x07ff
+#define VPE_ORG_W_SHIFT 16
+
+#define VPE_SC_MP_SC25 0x0764
+#define VPE_OFF_H_MASK 0x07ff
+#define VPE_OFF_H_SHIFT 0
+#define VPE_OFF_W_MASK 0x07ff
+#define VPE_OFF_W_SHIFT 16
+
+/* VPE color space converter regs */
+#define VPE_CSC_CSC00 0x5700
+#define VPE_CSC_A0_MASK 0x1fff
+#define VPE_CSC_A0_SHIFT 0
+#define VPE_CSC_B0_MASK 0x1fff
+#define VPE_CSC_B0_SHIFT 16
+
+#define VPE_CSC_CSC01 0x5704
+#define VPE_CSC_C0_MASK 0x1fff
+#define VPE_CSC_C0_SHIFT 0
+#define VPE_CSC_A1_MASK 0x1fff
+#define VPE_CSC_A1_SHIFT 16
+
+#define VPE_CSC_CSC02 0x5708
+#define VPE_CSC_B1_MASK 0x1fff
+#define VPE_CSC_B1_SHIFT 0
+#define VPE_CSC_C1_MASK 0x1fff
+#define VPE_CSC_C1_SHIFT 16
+
+#define VPE_CSC_CSC03 0x570c
+#define VPE_CSC_A2_MASK 0x1fff
+#define VPE_CSC_A2_SHIFT 0
+#define VPE_CSC_B2_MASK 0x1fff
+#define VPE_CSC_B2_SHIFT 16
+
+#define VPE_CSC_CSC04 0x5710
+#define VPE_CSC_C2_MASK 0x1fff
+#define VPE_CSC_C2_SHIFT 0
+#define VPE_CSC_D0_MASK 0x0fff
+#define VPE_CSC_D0_SHIFT 16
+
+#define VPE_CSC_CSC05 0x5714
+#define VPE_CSC_D1_MASK 0x0fff
+#define VPE_CSC_D1_SHIFT 0
+#define VPE_CSC_D2_MASK 0x0fff
+#define VPE_CSC_D2_SHIFT 16
+#define VPE_CSC_BYPASS (1 << 28)
+
+#endif
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index b557caf5b1a..ccdadd623a3 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -403,7 +403,7 @@ static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
return 0;
}
-static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
+static int timblogiw_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
@@ -420,7 +420,7 @@ static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
}
static int timblogiw_streamoff(struct file *file, void *priv,
- unsigned int type)
+ enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ DMA_PREP_INTERRUPT);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index 21db23b196b..fa3964022b9 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -123,7 +123,7 @@ static int keene_cmd_set(struct keene_device *radio)
/* If bit 0 is set, then transmit mono, otherwise stereo.
If bit 2 is set, then enable 75 us preemphasis, otherwise
it is 50 us. */
- radio->buffer[3] = (!radio->stereo) | (radio->preemph_75_us ? 4 : 0);
+ radio->buffer[3] = (radio->stereo ? 0 : 1) | (radio->preemph_75_us ? 4 : 0);
radio->buffer[4] = 0x00;
radio->buffer[5] = 0x00;
radio->buffer[6] = 0x00;
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index f1e3714b5f1..93d864eb830 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -74,8 +74,8 @@ static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea)
struct fmr2 *fmr2 = tea->private_data;
u8 bits = inb(fmr2->io);
- return (bits & STR_DATA) ? TEA575X_DATA : 0 |
- (bits & STR_MOST) ? TEA575X_MOST : 0;
+ return ((bits & STR_DATA) ? TEA575X_DATA : 0) |
+ ((bits & STR_MOST) ? TEA575X_MOST : 0);
}
static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output)
@@ -295,7 +295,6 @@ static void fmr2_remove(struct fmr2 *fmr2)
static int fmr2_isa_remove(struct device *pdev, unsigned int ndev)
{
fmr2_remove(dev_get_drvdata(pdev));
- dev_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index b9147721241..3db8a8cfe1a 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -271,6 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
+#ifdef CONFIG_PM
static void shark_resume_leds(struct shark_device *shark)
{
if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
@@ -280,6 +281,7 @@ static void shark_resume_leds(struct shark_device *shark)
set_bit(RED_LED, &shark->brightness_new);
schedule_work(&shark->led_work);
}
+#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index 9fb669721e6..d86d90dab8b 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -237,6 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
+#ifdef CONFIG_PM
static void shark_resume_leds(struct shark_device *shark)
{
int i;
@@ -246,6 +247,7 @@ static void shark_resume_leds(struct shark_device *shark)
schedule_work(&shark->led_work);
}
+#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 97c2c18803e..9cf6731fb81 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -375,7 +375,7 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
if (r)
return r;
- INIT_COMPLETION(radio->busy);
+ reinit_completion(&radio->busy);
/* wait for the FR IRQ */
r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
@@ -389,7 +389,7 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
if (r)
return r;
- INIT_COMPLETION(radio->busy);
+ reinit_completion(&radio->busy);
/* wait for the POWER_ENB IRQ */
r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
@@ -444,7 +444,7 @@ static int wl1273_fm_set_rx_freq(struct wl1273_device *radio, unsigned int freq)
goto err;
}
- INIT_COMPLETION(radio->busy);
+ reinit_completion(&radio->busy);
r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
if (!r) {
@@ -805,7 +805,7 @@ static int wl1273_fm_set_seek(struct wl1273_device *radio,
if (level < SCHAR_MIN || level > SCHAR_MAX)
return -EINVAL;
- INIT_COMPLETION(radio->busy);
+ reinit_completion(&radio->busy);
dev_dbg(radio->dev, "%s: BUSY\n", __func__);
r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
@@ -847,7 +847,7 @@ static int wl1273_fm_set_seek(struct wl1273_device *radio,
if (r)
goto out;
- INIT_COMPLETION(radio->busy);
+ reinit_completion(&radio->busy);
dev_dbg(radio->dev, "%s: BUSY\n", __func__);
r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 5c57e5b0f94..0e750aef656 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -218,7 +218,7 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
goto done;
/* wait till tune operation has completed */
- INIT_COMPLETION(radio->completion);
+ reinit_completion(&radio->completion);
retval = wait_for_completion_timeout(&radio->completion,
msecs_to_jiffies(tune_timeout));
if (!retval)
@@ -254,7 +254,7 @@ static unsigned int si470x_get_step(struct si470x_device *radio)
/* 2: 50 kHz */
default:
return 50 * 16;
- };
+ }
}
@@ -341,7 +341,7 @@ static int si470x_set_seek(struct si470x_device *radio,
return retval;
/* wait till tune operation has completed */
- INIT_COMPLETION(radio->completion);
+ reinit_completion(&radio->completion);
retval = wait_for_completion_timeout(&radio->completion,
msecs_to_jiffies(seek_timeout));
if (!retval)
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index e5fc9acd0c4..2a497c80c77 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -463,7 +463,7 @@ static int si470x_i2c_remove(struct i2c_client *client)
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* si470x_i2c_suspend - suspend the device
*/
@@ -509,7 +509,7 @@ static struct i2c_driver si470x_i2c_driver = {
.driver = {
.name = "si470x",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &si470x_i2c_pm,
#endif
},
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index fe160882ee1..9ec48ccbcf0 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1456,7 +1456,7 @@ static int si4713_probe(struct i2c_client *client,
if (client->irq) {
rval = request_irq(client->irq,
- si4713_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ si4713_handler, IRQF_TRIGGER_FALLING,
client->name, sdev);
if (rval < 0) {
v4l2_err(&sdev->sd, "Could not request IRQ\n");
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 06ac69245ca..69e3245a58a 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -48,15 +48,15 @@
#define WM_SUB_TEST 0xF
/* Different modes of the MSA register */
-#define MODE_BUFFER 0x0
-#define MODE_PRESET 0x1
-#define MODE_SEARCH 0x2
-#define MODE_AF_UPDATE 0x3
-#define MODE_JUMP 0x4
-#define MODE_CHECK 0x5
-#define MODE_LOAD 0x6
-#define MODE_END 0x7
-#define MODE_SHIFT 5
+#define MSA_MODE_BUFFER 0x0
+#define MSA_MODE_PRESET 0x1
+#define MSA_MODE_SEARCH 0x2
+#define MSA_MODE_AF_UPDATE 0x3
+#define MSA_MODE_JUMP 0x4
+#define MSA_MODE_CHECK 0x5
+#define MSA_MODE_LOAD 0x6
+#define MSA_MODE_END 0x7
+#define MSA_MODE_SHIFT 5
struct tef6862_state {
struct v4l2_subdev sd;
@@ -114,7 +114,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
- i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM;
+ i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM;
i2cmsg[1] = (pll >> 8) & 0xff;
i2cmsg[2] = pll & 0xff;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 253f307f0b3..4b2e9e8298e 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -175,7 +175,7 @@ static int_handler_prototype int_handler_table[] = {
fm_irq_handle_intmsk_cmd_resp
};
-long (*g_st_write) (struct sk_buff *skb);
+static long (*g_st_write) (struct sk_buff *skb);
static struct completion wait_for_fmdrv_reg_comp;
static inline void fm_irq_call(struct fmdev *fmdev)
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 11e84bcc23a..904f11367c2 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -322,4 +322,14 @@ config IR_GPIO_CIR
To compile this driver as a module, choose M here: the module will
be called gpio-ir-recv.
+config RC_ST
+ tristate "ST remote control receiver"
+ depends on ARCH_STI && RC_CORE
+ help
+ Say Y here if you want support for ST remote control driver
+ which allows both IR and UHF RX.
+ The driver passes raw pulse and space information to the LIRC decoder.
+
+ If you're not sure, select N here.
+
endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 56bacf07b36..f4eb32c0a45 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
obj-$(CONFIG_IR_IGUANA) += iguanair.o
obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
+obj-$(CONFIG_RC_ST) += st_rc.o
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
index 82516a1d39b..b698f3d2ced 100644
--- a/drivers/media/rc/fintek-cir.h
+++ b/drivers/media/rc/fintek-cir.h
@@ -76,8 +76,8 @@ struct fintek_dev {
} tx;
/* Config register index/data port pair */
- u8 cr_ip;
- u8 cr_dp;
+ u32 cr_ip;
+ u32 cr_dp;
/* hardware I/O settings */
unsigned long cir_addr;
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 07aacfa5903..80c611c2e8c 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 19632b1c219..fdae05c4f37 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -207,7 +207,7 @@ static int iguanair_send(struct iguanair *ir, unsigned size)
{
int rc;
- INIT_COMPLETION(ir->completion);
+ reinit_completion(&ir->completion);
ir->urb_out->transfer_buffer_length = size;
rc = usb_submit_urb(ir->urb_out, GFP_KERNEL);
@@ -308,22 +308,12 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
cycles = DIV_ROUND_CLOSEST(24000000, carrier * 2) -
ir->cycle_overhead;
- /* make up the the remainer of 4-cycle blocks */
- switch (cycles & 3) {
- case 0:
- sevens = 0;
- break;
- case 1:
- sevens = 3;
- break;
- case 2:
- sevens = 2;
- break;
- case 3:
- sevens = 1;
- break;
- }
-
+ /*
+ * Calculate minimum number of 7 cycles needed so
+ * we are left with a multiple of 4; so we want to have
+ * (sevens * 7) & 3 == cycles & 3
+ */
+ sevens = (4 - cycles) & 3;
fours = (cycles - sevens * 7) / 4;
/* magic happens here */
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 31b955bf766..b1e19a26208 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -201,8 +201,7 @@ static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51)
lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer);
retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler,
- IRQF_DISABLED | IRQF_SHARED,
- "lirc_pulse_timer", lirc_rx51);
+ IRQF_SHARED, "lirc_pulse_timer", lirc_rx51);
if (retval) {
dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n");
goto err2;
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index 4d13a7f2e5c..492a05ade7e 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -5,7 +5,7 @@
* TODO: This table is a real mess, as it merges RC codes from several
* devices into a big table. It also has both RC-5 and NEC codes inside.
* It should be broken into small tables, and the protocols should properly
- * be indentificated.
+ * be identificated.
*
* The table were imported from dib0700_devices.c.
*
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index ba81d9697cf..454ea596a7e 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -5,7 +5,7 @@
* TODO: This table is a real mess, as it merges RC codes from several
* devices into a big table. It also has both RC-5 and NEC codes inside.
* It should be broken into small tables, and the protocols should properly
- * be indentificated.
+ * be identificated.
*
* The table were imported from dib0700_devices.c.
*
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 7c3674ff5ea..07e83108df0 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -84,8 +84,8 @@ struct nvt_dev {
} tx;
/* EFER Config register index/data pair */
- u8 cr_efir;
- u8 cr_efdr;
+ u32 cr_efir;
+ u32 cr_efdr;
/* hardware I/O settings */
unsigned long cir_addr;
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
new file mode 100644
index 00000000000..65120c2d47a
--- /dev/null
+++ b/drivers/media/rc/st_rc.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <media/rc-core.h>
+#include <linux/pinctrl/consumer.h>
+
+struct st_rc_device {
+ struct device *dev;
+ int irq;
+ int irq_wake;
+ struct clk *sys_clock;
+ void *base; /* Register base address */
+ void *rx_base;/* RX Register base address */
+ struct rc_dev *rdev;
+ bool overclocking;
+ int sample_mult;
+ int sample_div;
+ bool rxuhfmode;
+};
+
+/* Registers */
+#define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/
+#define IRB_CLOCK_SEL 0x70 /* clock select */
+#define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */
+/* IRB IR/UHF receiver registers */
+#define IRB_RX_ON 0x40 /* pulse time capture */
+#define IRB_RX_SYS 0X44 /* sym period capture */
+#define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */
+#define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */
+#define IRB_RX_EN 0x50 /* Receive enable */
+#define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */
+#define IRB_RX_INT_CLEAR 0x58 /* overrun status */
+#define IRB_RX_STATUS 0x6c /* receive status */
+#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */
+#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */
+
+/**
+ * IRQ set: Enable full FIFO 1 -> bit 3;
+ * Enable overrun IRQ 1 -> bit 2;
+ * Enable last symbol IRQ 1 -> bit 1:
+ * Enable RX interrupt 1 -> bit 0;
+ */
+#define IRB_RX_INTS 0x0f
+#define IRB_RX_OVERRUN_INT 0x04
+ /* maximum symbol period (microsecs),timeout to detect end of symbol train */
+#define MAX_SYMB_TIME 0x5000
+#define IRB_SAMPLE_FREQ 10000000
+#define IRB_FIFO_NOT_EMPTY 0xff00
+#define IRB_OVERFLOW 0x4
+#define IRB_TIMEOUT 0xffff
+#define IR_ST_NAME "st-rc"
+
+static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
+{
+ DEFINE_IR_RAW_EVENT(ev);
+ ev.timeout = true;
+ ir_raw_event_store(rdev, &ev);
+}
+
+/**
+ * RX graphical example to better understand the difference between ST IR block
+ * output and standard definition used by LIRC (and most of the world!)
+ *
+ * mark mark
+ * |-IRB_RX_ON-| |-IRB_RX_ON-|
+ * ___ ___ ___ ___ ___ ___ _
+ * | | | | | | | | | | | | |
+ * | | | | | | space 0 | | | | | | space 1 |
+ * _____| |__| |__| |____________________________| |__| |__| |_____________|
+ *
+ * |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------|
+ *
+ * |------------- encoding bit 0 -----------|---- encoding bit 1 -----|
+ *
+ * ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so
+ * convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark)
+ * The mark time represents the amount of time the carrier (usually 36-40kHz)
+ * is detected.The above examples shows Pulse Width Modulation encoding where
+ * bit 0 is represented by space>mark.
+ */
+
+static irqreturn_t st_rc_rx_interrupt(int irq, void *data)
+{
+ unsigned int symbol, mark = 0;
+ struct st_rc_device *dev = data;
+ int last_symbol = 0;
+ u32 status;
+ DEFINE_IR_RAW_EVENT(ev);
+
+ if (dev->irq_wake)
+ pm_wakeup_event(dev->dev, 0);
+
+ status = readl(dev->rx_base + IRB_RX_STATUS);
+
+ while (status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)) {
+ u32 int_status = readl(dev->rx_base + IRB_RX_INT_STATUS);
+ if (unlikely(int_status & IRB_RX_OVERRUN_INT)) {
+ /* discard the entire collection in case of errors! */
+ ir_raw_event_reset(dev->rdev);
+ dev_info(dev->dev, "IR RX overrun\n");
+ writel(IRB_RX_OVERRUN_INT,
+ dev->rx_base + IRB_RX_INT_CLEAR);
+ continue;
+ }
+
+ symbol = readl(dev->rx_base + IRB_RX_SYS);
+ mark = readl(dev->rx_base + IRB_RX_ON);
+
+ if (symbol == IRB_TIMEOUT)
+ last_symbol = 1;
+
+ /* Ignore any noise */
+ if ((mark > 2) && (symbol > 1)) {
+ symbol -= mark;
+ if (dev->overclocking) { /* adjustments to timings */
+ symbol *= dev->sample_mult;
+ symbol /= dev->sample_div;
+ mark *= dev->sample_mult;
+ mark /= dev->sample_div;
+ }
+
+ ev.duration = US_TO_NS(mark);
+ ev.pulse = true;
+ ir_raw_event_store(dev->rdev, &ev);
+
+ if (!last_symbol) {
+ ev.duration = US_TO_NS(symbol);
+ ev.pulse = false;
+ ir_raw_event_store(dev->rdev, &ev);
+ } else {
+ st_rc_send_lirc_timeout(dev->rdev);
+ }
+
+ }
+ last_symbol = 0;
+ status = readl(dev->rx_base + IRB_RX_STATUS);
+ }
+
+ writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR);
+
+ /* Empty software fifo */
+ ir_raw_event_handle(dev->rdev);
+ return IRQ_HANDLED;
+}
+
+static void st_rc_hardware_init(struct st_rc_device *dev)
+{
+ int baseclock, freqdiff;
+ unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
+ unsigned int rx_sampling_freq_div;
+
+ clk_prepare_enable(dev->sys_clock);
+ baseclock = clk_get_rate(dev->sys_clock);
+
+ /* IRB input pins are inverted internally from high to low. */
+ writel(1, dev->rx_base + IRB_RX_POLARITY_INV);
+
+ rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ;
+ writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM);
+
+ freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ);
+ if (freqdiff) { /* over clocking, workout the adjustment factors */
+ dev->overclocking = true;
+ dev->sample_mult = 1000;
+ dev->sample_div = baseclock / (10000 * rx_sampling_freq_div);
+ rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div;
+ }
+
+ writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD);
+}
+
+static int st_rc_remove(struct platform_device *pdev)
+{
+ struct st_rc_device *rc_dev = platform_get_drvdata(pdev);
+ clk_disable_unprepare(rc_dev->sys_clock);
+ rc_unregister_device(rc_dev->rdev);
+ return 0;
+}
+
+static int st_rc_open(struct rc_dev *rdev)
+{
+ struct st_rc_device *dev = rdev->priv;
+ unsigned long flags;
+ local_irq_save(flags);
+ /* enable interrupts and receiver */
+ writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN);
+ writel(0x01, dev->rx_base + IRB_RX_EN);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static void st_rc_close(struct rc_dev *rdev)
+{
+ struct st_rc_device *dev = rdev->priv;
+ /* disable interrupts and receiver */
+ writel(0x00, dev->rx_base + IRB_RX_EN);
+ writel(0x00, dev->rx_base + IRB_RX_INT_EN);
+}
+
+static int st_rc_probe(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct rc_dev *rdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct st_rc_device *rc_dev;
+ struct device_node *np = pdev->dev.of_node;
+ const char *rx_mode;
+
+ rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL);
+
+ if (!rc_dev)
+ return -ENOMEM;
+
+ rdev = rc_allocate_device();
+
+ if (!rdev)
+ return -ENOMEM;
+
+ if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) {
+
+ if (!strcmp(rx_mode, "uhf")) {
+ rc_dev->rxuhfmode = true;
+ } else if (!strcmp(rx_mode, "infrared")) {
+ rc_dev->rxuhfmode = false;
+ } else {
+ dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode);
+ goto err;
+ }
+
+ } else {
+ goto err;
+ }
+
+ rc_dev->sys_clock = devm_clk_get(dev, NULL);
+ if (IS_ERR(rc_dev->sys_clock)) {
+ dev_err(dev, "System clock not found\n");
+ ret = PTR_ERR(rc_dev->sys_clock);
+ goto err;
+ }
+
+ rc_dev->irq = platform_get_irq(pdev, 0);
+ if (rc_dev->irq < 0) {
+ ret = rc_dev->irq;
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ rc_dev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rc_dev->base)) {
+ ret = PTR_ERR(rc_dev->base);
+ goto err;
+ }
+
+ if (rc_dev->rxuhfmode)
+ rc_dev->rx_base = rc_dev->base + 0x40;
+ else
+ rc_dev->rx_base = rc_dev->base;
+
+ rc_dev->dev = dev;
+ platform_set_drvdata(pdev, rc_dev);
+ st_rc_hardware_init(rc_dev);
+
+ rdev->driver_type = RC_DRIVER_IR_RAW;
+ rdev->allowed_protos = RC_BIT_ALL;
+ /* rx sampling rate is 10Mhz */
+ rdev->rx_resolution = 100;
+ rdev->timeout = US_TO_NS(MAX_SYMB_TIME);
+ rdev->priv = rc_dev;
+ rdev->open = st_rc_open;
+ rdev->close = st_rc_close;
+ rdev->driver_name = IR_ST_NAME;
+ rdev->map_name = RC_MAP_LIRC;
+ rdev->input_name = "ST Remote Control Receiver";
+
+ /* enable wake via this device */
+ device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev, true);
+
+ ret = rc_register_device(rdev);
+ if (ret < 0)
+ goto clkerr;
+
+ rc_dev->rdev = rdev;
+ if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt,
+ IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) {
+ dev_err(dev, "IRQ %d register failed\n", rc_dev->irq);
+ ret = -EINVAL;
+ goto rcerr;
+ }
+
+ /**
+ * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
+ * lircd expects a long space first before a signal train to sync.
+ */
+ st_rc_send_lirc_timeout(rdev);
+
+ dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR");
+
+ return ret;
+rcerr:
+ rc_unregister_device(rdev);
+ rdev = NULL;
+clkerr:
+ clk_disable_unprepare(rc_dev->sys_clock);
+err:
+ rc_free_device(rdev);
+ dev_err(dev, "Unable to register device (%d)\n", ret);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int st_rc_suspend(struct device *dev)
+{
+ struct st_rc_device *rc_dev = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (!enable_irq_wake(rc_dev->irq))
+ rc_dev->irq_wake = 1;
+ else
+ return -EINVAL;
+ } else {
+ pinctrl_pm_select_sleep_state(dev);
+ writel(0x00, rc_dev->rx_base + IRB_RX_EN);
+ writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
+ clk_disable_unprepare(rc_dev->sys_clock);
+ }
+
+ return 0;
+}
+
+static int st_rc_resume(struct device *dev)
+{
+ struct st_rc_device *rc_dev = dev_get_drvdata(dev);
+ struct rc_dev *rdev = rc_dev->rdev;
+
+ if (rc_dev->irq_wake) {
+ disable_irq_wake(rc_dev->irq);
+ rc_dev->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(dev);
+ st_rc_hardware_init(rc_dev);
+ if (rdev->users) {
+ writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN);
+ writel(0x01, rc_dev->rx_base + IRB_RX_EN);
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume);
+#endif
+
+#ifdef CONFIG_OF
+static struct of_device_id st_rc_match[] = {
+ { .compatible = "st,comms-irb", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, st_rc_match);
+#endif
+
+static struct platform_driver st_rc_driver = {
+ .driver = {
+ .name = IR_ST_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st_rc_match),
+#ifdef CONFIG_PM
+ .pm = &st_rc_pm_ops,
+#endif
+ },
+ .probe = st_rc_probe,
+ .remove = st_rc_remove,
+};
+
+module_platform_driver(st_rc_driver);
+
+MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms");
+MODULE_AUTHOR("STMicroelectronics (R&D) Ltd");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 98bd4960c75..904baf4eec2 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1110,7 +1110,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
}
err = request_irq(data->irq, wbcir_irq_handler,
- IRQF_DISABLED, DRVNAME, device);
+ 0, DRVNAME, device);
if (err) {
dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
err = -EBUSY;
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 6c96e489877..72971a8d3c3 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -21,20 +21,30 @@
#include "e4000_priv.h"
#include <linux/math64.h>
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/* write multiple registers */
static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
- u8 buf[1 + len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg->i2c_addr,
.flags = 0,
- .len = sizeof(buf),
+ .len = 1 + len,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
@@ -54,7 +64,7 @@ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
- u8 buf[len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[2] = {
{
.addr = priv->cfg->i2c_addr,
@@ -64,11 +74,18 @@ static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
}, {
.addr = priv->cfg->i2c_addr,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = len,
.buf = buf,
}
};
+ if (len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, buf, len);
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index f4d0e797a6c..d74e9205681 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -139,7 +139,7 @@ static int fc0012_set_params(struct dvb_frontend *fe)
unsigned char reg[7], am, pm, multi, tmp;
unsigned long f_vco;
unsigned short xtal_freq_khz_2, xin, xdiv;
- int vco_select = false;
+ bool vco_select = false;
if (fe->callback) {
ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index bd8f0f1e8f3..b4162315773 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -233,7 +233,7 @@ static int fc0013_set_params(struct dvb_frontend *fe)
unsigned char reg[7], am, pm, multi, tmp;
unsigned long f_vco;
unsigned short xtal_freq_khz_2, xin, xdiv;
- int vco_select = false;
+ bool vco_select = false;
if (fe->callback) {
ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 81f38aae9c6..3aecaf46509 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -20,6 +20,9 @@
#include "fc2580_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/*
* TODO:
* I2C write and read works only for one single register. Multiple registers
@@ -41,16 +44,23 @@
static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
- u8 buf[1 + len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg->i2c_addr,
.flags = 0,
- .len = sizeof(buf),
+ .len = 1 + len,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
@@ -69,7 +79,7 @@ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
- u8 buf[len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[2] = {
{
.addr = priv->cfg->i2c_addr,
@@ -79,11 +89,18 @@ static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len)
}, {
.addr = priv->cfg->i2c_addr,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = len,
.buf = buf,
}
};
+ if (len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, buf, len);
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 1c23666468c..d9ee43fae62 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -612,10 +612,19 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type,
vco_fine_tune = (data[4] & 0x30) >> 4;
- if (vco_fine_tune > VCO_POWER_REF)
- div_num = div_num - 1;
- else if (vco_fine_tune < VCO_POWER_REF)
- div_num = div_num + 1;
+ tuner_dbg("mix_div=%d div_num=%d vco_fine_tune=%d\n",
+ mix_div, div_num, vco_fine_tune);
+
+ /*
+ * XXX: R828D/16MHz seems to have always vco_fine_tune=1.
+ * Due to that, this calculation goes wrong.
+ */
+ if (priv->cfg->rafael_chip != CHIP_R828D) {
+ if (vco_fine_tune > VCO_POWER_REF)
+ div_num = div_num - 1;
+ else if (vco_fine_tune < VCO_POWER_REF)
+ div_num = div_num + 1;
+ }
rc = r820t_write_reg_mask(priv, 0x10, div_num << 5, 0xe0);
if (rc < 0)
@@ -637,11 +646,6 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type,
vco_fra = pll_ref * 129 / 128;
}
- if (nint > 63) {
- tuner_info("No valid PLL values for %u kHz!\n", freq);
- return -EINVAL;
- }
-
ni = (nint - 13) / 4;
si = nint - 4 * ni - 13;
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index e4a84ee231c..abe256e1f84 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -20,6 +20,9 @@
#include "tda18212.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct tda18212_priv {
struct tda18212_config *cfg;
struct i2c_adapter *i2c;
@@ -32,16 +35,23 @@ static int tda18212_wr_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
int len)
{
int ret;
- u8 buf[len+1];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg->i2c_address,
.flags = 0,
- .len = sizeof(buf),
+ .len = 1 + len,
.buf = buf,
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
buf[0] = reg;
memcpy(&buf[1], val, len);
@@ -61,7 +71,7 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
int len)
{
int ret;
- u8 buf[len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[2] = {
{
.addr = priv->cfg->i2c_address,
@@ -71,11 +81,18 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
}, {
.addr = priv->cfg->i2c_address,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = len,
.buf = buf,
}
};
+ if (len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, buf, len);
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 2d31aeb6b08..9300e9361e3 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -20,11 +20,14 @@
#include "tda18218_priv.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/* write multiple registers */
static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
{
int ret = 0, len2, remaining;
- u8 buf[1 + len];
+ u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg->i2c_address,
@@ -33,6 +36,13 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
}
};
+ if (1 + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
for (remaining = len; remaining > 0;
remaining -= (priv->cfg->i2c_wr_max - 1)) {
len2 = remaining;
@@ -63,7 +73,7 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
{
int ret;
- u8 buf[reg+len]; /* we must start read always from reg 0x00 */
+ u8 buf[MAX_XFER_SIZE]; /* we must start read always from reg 0x00 */
struct i2c_msg msg[2] = {
{
.addr = priv->cfg->i2c_address,
@@ -73,11 +83,18 @@ static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
}, {
.addr = priv->cfg->i2c_address,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = reg + len,
.buf = buf,
}
};
+ if (reg + len > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ KBUILD_MODNAME, reg, len);
+ return -EINVAL;
+ }
+
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, &buf[reg], len);
diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c
index 300005c535b..9823248d743 100644
--- a/drivers/media/tuners/tda9887.c
+++ b/drivers/media/tuners/tda9887.c
@@ -536,8 +536,8 @@ static int tda9887_status(struct dvb_frontend *fe)
unsigned char buf[1];
int rc;
- memset(buf,0,sizeof(buf));
- if (1 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props,buf,1)))
+ rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, 1);
+ if (rc != 1)
tuner_info("i2c i/o error: rc == %d (should be 1)\n", rc);
dump_read_message(fe, buf);
return 0;
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 878d2c4d9e8..4be5cf808a4 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -24,6 +24,9 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 80
+
/* Registers (Write-only) */
#define XREG_INIT 0x00
#define XREG_RF_FREQ 0x02
@@ -547,7 +550,10 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
{
struct xc2028_data *priv = fe->tuner_priv;
int pos, rc;
- unsigned char *p, *endp, buf[priv->ctrl.max_len];
+ unsigned char *p, *endp, buf[MAX_XFER_SIZE];
+
+ if (priv->ctrl.max_len > sizeof(buf))
+ priv->ctrl.max_len = sizeof(buf);
tuner_dbg("%s called\n", __func__);
@@ -572,7 +578,7 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
return -EINVAL;
}
- size = le16_to_cpu(*(__u16 *) p);
+ size = le16_to_cpu(*(__le16 *) p);
p += sizeof(size);
if (size == 0xffff)
@@ -683,7 +689,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
/* 16 SCODE entries per file; each SCODE entry is 12 bytes and
* has a 2-byte size header in the firmware format. */
if (priv->firm[pos].size != 14 * 16 || scode >= 16 ||
- le16_to_cpu(*(__u16 *)(p + 14 * scode)) != 12)
+ le16_to_cpu(*(__le16 *)(p + 14 * scode)) != 12)
return -EINVAL;
p += 14 * scode + 2;
}
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 8b6275f8590..0bd96906339 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -390,7 +390,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
}
if (fc_usb->iso_buffer != NULL)
- pci_free_consistent(NULL,
+ usb_free_coherent(fc_usb->udev,
fc_usb->buffer_size, fc_usb->iso_buffer,
fc_usb->dma_addr);
}
@@ -407,8 +407,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
"each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
- fc_usb->iso_buffer = pci_alloc_consistent(NULL,
- bufsize, &fc_usb->dma_addr);
+ fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
+ bufsize, GFP_KERNEL, &fc_usb->dma_addr);
if (fc_usb->iso_buffer == NULL)
return -ENOMEM;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index be171928360..351a78a84c3 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -209,7 +209,7 @@ static void cpia2_usb_complete(struct urb *urb)
{
int i;
unsigned char *cdata;
- static int frame_ready = false;
+ static bool frame_ready = false;
struct camera_data *cam = (struct camera_data *) urb->context;
if (urb->status!=0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index a384f80f595..e9d017bea37 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -978,7 +978,6 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
int minor)
{
int retval = -ENOMEM;
- int errCode;
unsigned int maxh, maxw;
dev->udev = udev;
@@ -1014,8 +1013,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
/* Cx231xx pre card setup */
cx231xx_pre_card_setup(dev);
- errCode = cx231xx_config(dev);
- if (errCode) {
+ retval = cx231xx_config(dev);
+ if (retval) {
cx231xx_errdev("error configuring device\n");
return -ENOMEM;
}
@@ -1024,12 +1023,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
dev->norm = dev->board.norm;
/* register i2c bus */
- errCode = cx231xx_dev_init(dev);
- if (errCode < 0) {
- cx231xx_dev_uninit(dev);
+ retval = cx231xx_dev_init(dev);
+ if (retval) {
cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ __func__, retval);
+ goto err_dev_init;
}
/* Do board specific init */
@@ -1047,11 +1045,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
dev->interlaced = 0;
dev->video_input = 0;
- errCode = cx231xx_config(dev);
- if (errCode < 0) {
+ retval = cx231xx_config(dev);
+ if (retval) {
cx231xx_errdev("%s: cx231xx_config - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ __func__, retval);
+ goto err_dev_init;
}
/* init video dma queues */
@@ -1075,9 +1073,9 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
}
retval = cx231xx_register_analog_devices(dev);
- if (retval < 0) {
- cx231xx_release_resources(dev);
- return retval;
+ if (retval) {
+ cx231xx_release_analog_resources(dev);
+ goto err_analog;
}
cx231xx_ir_init(dev);
@@ -1085,6 +1083,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
cx231xx_init_extension(dev);
return 0;
+err_analog:
+ cx231xx_remove_from_devlist(dev);
+err_dev_init:
+ cx231xx_dev_uninit(dev);
+ return retval;
}
#if defined(CONFIG_MODULES) && defined(MODULE)
@@ -1132,7 +1135,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
char *speed;
struct usb_interface_assoc_descriptor *assoc_desc;
- udev = usb_get_dev(interface_to_usbdev(interface));
ifnum = interface->altsetting[0].desc.bInterfaceNumber;
/*
@@ -1161,6 +1163,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
+ udev = usb_get_dev(interface_to_usbdev(interface));
+
snprintf(dev->name, 29, "cx231xx #%d", nr);
dev->devno = nr;
dev->model = id->driver_info;
@@ -1223,10 +1227,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (assoc_desc->bFirstInterface != ifnum) {
cx231xx_err(DRIVER_NAME ": Not found "
"matching IAD interface\n");
- clear_bit(dev->devno, &cx231xx_devused);
- kfree(dev);
- dev = NULL;
- return -ENODEV;
+ retval = -ENODEV;
+ goto err_if;
}
cx231xx_info("registering interface %d\n", ifnum);
@@ -1242,22 +1244,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
cx231xx_errdev("v4l2_device_register failed\n");
- clear_bit(dev->devno, &cx231xx_devused);
- kfree(dev);
- dev = NULL;
- return -EIO;
+ retval = -EIO;
+ goto err_v4l2;
}
/* allocate device struct */
retval = cx231xx_init_dev(dev, udev, nr);
- if (retval) {
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- usb_set_intfdata(interface, NULL);
-
- return retval;
- }
+ if (retval)
+ goto err_init;
/* compute alternate max packet sizes for video */
uif = udev->actconfig->interface[dev->current_pcb_config.
@@ -1275,11 +1268,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->video_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_video_alt;
}
for (i = 0; i < dev->video_mode.num_alt; i++) {
@@ -1309,11 +1299,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->vbi_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_vbi_alt;
}
for (i = 0; i < dev->vbi_mode.num_alt; i++) {
@@ -1344,11 +1331,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_sliced_cc_alt;
}
for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
@@ -1380,11 +1364,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->ts1_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_ts1_alt;
}
for (i = 0; i < dev->ts1_mode.num_alt; i++) {
@@ -1411,6 +1392,29 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
request_modules(dev);
return 0;
+err_ts1_alt:
+ kfree(dev->sliced_cc_mode.alt_max_pkt_size);
+err_sliced_cc_alt:
+ kfree(dev->vbi_mode.alt_max_pkt_size);
+err_vbi_alt:
+ kfree(dev->video_mode.alt_max_pkt_size);
+err_video_alt:
+ /* cx231xx_uninit_dev: */
+ cx231xx_close_extension(dev);
+ cx231xx_ir_exit(dev);
+ cx231xx_release_analog_resources(dev);
+ cx231xx_417_unregister(dev);
+ cx231xx_remove_from_devlist(dev);
+ cx231xx_dev_uninit(dev);
+err_init:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2:
+ usb_set_intfdata(interface, NULL);
+err_if:
+ usb_put_dev(udev);
+ kfree(dev);
+ clear_bit(dev->devno, &cx231xx_devused);
+ return retval;
}
/*
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
index d7308ab7a90..2a34ceee480 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
@@ -28,7 +28,7 @@ MODULE_PARM_DESC(pcb_debug, "enable pcb config debug messages [video]");
/******************************************************************************/
-struct pcb_config cx231xx_Scenario[] = {
+static struct pcb_config cx231xx_Scenario[] = {
{
INDEX_SELFPOWER_DIGITAL_ONLY, /* index */
USB_SELF_POWER, /* power_type */
@@ -672,7 +672,7 @@ u32 initialize_cx231xx(struct cx231xx *dev)
pcb config it is related to */
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, data, 4);
- config_info = le32_to_cpu(*((u32 *) data));
+ config_info = le32_to_cpu(*((__le32 *)data));
usb_speed = (u8) (config_info & 0x1);
/* Verify this device belongs to Bus power or Self power device */
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index d556042cf31..da47d2392f2 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -397,12 +397,13 @@ error:
return ret;
}
+#define AF9015_EEPROM_SIZE 256
+
/* hash (and dump) eeprom */
static int af9015_eeprom_hash(struct dvb_usb_device *d)
{
struct af9015_state *state = d_to_priv(d);
int ret, i;
- static const unsigned int AF9015_EEPROM_SIZE = 256;
u8 buf[AF9015_EEPROM_SIZE];
struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, NULL};
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 1ea17dc2a76..c8fcd78425b 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -21,6 +21,9 @@
#include "af9035.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static u16 af9035_checksum(const u8 *buf, size_t len)
@@ -126,10 +129,16 @@ exit:
/* write multiple registers */
static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{
- u8 wbuf[6 + len];
+ u8 wbuf[MAX_XFER_SIZE];
u8 mbox = (reg >> 16) & 0xff;
struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
+ if (6 + len > sizeof(wbuf)) {
+ dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
+ KBUILD_MODNAME, len);
+ return -EOPNOTSUPP;
+ }
+
wbuf[0] = len;
wbuf[1] = 2;
wbuf[2] = 0;
@@ -228,9 +237,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
msg[1].len);
} else {
/* I2C */
- u8 buf[5 + msg[0].len];
+ u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
buf, msg[1].len, msg[1].buf };
+
+ if (5 + msg[0].len > sizeof(buf)) {
+ dev_warn(&d->udev->dev,
+ "%s: i2c xfer: len=%d is too big!\n",
+ KBUILD_MODNAME, msg[0].len);
+ return -EOPNOTSUPP;
+ }
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len;
buf[1] = msg[0].addr << 1;
@@ -257,9 +273,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
msg[0].len - 3);
} else {
/* I2C */
- u8 buf[5 + msg[0].len];
+ u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
0, NULL };
+
+ if (5 + msg[0].len > sizeof(buf)) {
+ dev_warn(&d->udev->dev,
+ "%s: i2c xfer: len=%d is too big!\n",
+ KBUILD_MODNAME, msg[0].len);
+ return -EOPNOTSUPP;
+ }
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
buf[1] = msg[0].addr << 1;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index e97964ef7f5..2627553f7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -23,6 +23,9 @@
#include "lgdt3305.h"
#include "lg2160.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level "
@@ -57,7 +60,12 @@ int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
{
int wo = (rbuf == NULL || rlen == 0); /* write-only */
int ret;
- u8 sndbuf[1+wlen];
+ u8 sndbuf[MAX_XFER_SIZE];
+
+ if (1 + wlen > sizeof(sndbuf)) {
+ pr_warn("%s: len=%d is too big!\n", __func__, wlen);
+ return -EOPNOTSUPP;
+ }
pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c0cd0848631..ecca03667f9 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -377,6 +377,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
+ struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf};
dev_dbg(&d->udev->dev, "%s:\n", __func__);
@@ -489,6 +490,15 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
goto found;
}
+ /* check R828D ID register; reg=00 val=69 */
+ ret = rtl28xxu_ctrl_msg(d, &req_r828d);
+ if (ret == 0 && buf[0] == 0x69) {
+ priv->tuner = TUNER_RTL2832_R828D;
+ priv->tuner_name = "R828D";
+ goto found;
+ }
+
+
found:
dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name);
@@ -745,6 +755,7 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
rtl2832_config = &rtl28xxu_rtl2832_e4000_config;
break;
case TUNER_RTL2832_R820T:
+ case TUNER_RTL2832_R828D:
rtl2832_config = &rtl28xxu_rtl2832_r820t_config;
break;
default:
@@ -866,6 +877,13 @@ static const struct r820t_config rtl2832u_r820t_config = {
.rafael_chip = CHIP_R820T,
};
+static const struct r820t_config rtl2832u_r828d_config = {
+ .i2c_addr = 0x3a,
+ .xtal = 16000000,
+ .max_i2c_msg_len = 2,
+ .rafael_chip = CHIP_R828D,
+};
+
static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
@@ -923,6 +941,27 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
break;
+ case TUNER_RTL2832_R828D:
+ /* power off mn88472 demod on GPIO0 */
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap,
+ &rtl2832u_r828d_config);
+
+ /* Use tuner to get the signal strength */
+ adap->fe[0]->ops.read_signal_strength =
+ adap->fe[0]->ops.tuner_ops.get_rf_strength;
+ break;
default:
fe = NULL;
dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
@@ -1388,6 +1427,9 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A,
&rtl2832u_props, "Crypto ReDi PC 50 A", NULL) },
+
+ { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
+ &rtl2832u_props, "Astrometa DVB-T2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 729b3540c2f..2142bcb41b4 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -83,6 +83,7 @@ enum rtl28xxu_tuner {
TUNER_RTL2832_TDA18272,
TUNER_RTL2832_FC0013,
TUNER_RTL2832_R820T,
+ TUNER_RTL2832_R828D,
};
struct rtl28xxu_req {
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index ea2d5ee8657..c11138ebf6f 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -254,7 +254,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
-struct stb0899_config az6027_stb0899_config = {
+static struct stb0899_config az6027_stb0899_config = {
.init_dev = az6027_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = az6027_stb0899_s1_init_3,
@@ -291,7 +291,7 @@ struct stb0899_config az6027_stb0899_config = {
.tuner_set_rfsiggain = NULL,
};
-struct stb6100_config az6027_stb6100_config = {
+static struct stb6100_config az6027_stb6100_config = {
.tuner_address = 0xc0,
.refclock = 27000000,
};
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 3940bb0f9ef..20e345d9fe8 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -43,6 +43,9 @@
#include "lgs8gxx.h"
#include "atbm8830.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
/* debug */
static int dvb_usb_cxusb_debug;
module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -57,7 +60,14 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[1+wlen];
+ u8 sndbuf[MAX_XFER_SIZE];
+
+ if (1 + wlen > sizeof(sndbuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ wlen);
+ return -EOPNOTSUPP;
+ }
+
memset(sndbuf, 0, 1+wlen);
sndbuf[0] = cmd;
@@ -158,7 +168,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (msg[i].flags & I2C_M_RD) {
/* read only */
- u8 obuf[3], ibuf[1+msg[i].len];
+ u8 obuf[3], ibuf[MAX_XFER_SIZE];
+
+ if (1 + msg[i].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[i].len);
+ return -EOPNOTSUPP;
+ }
obuf[0] = 0;
obuf[1] = msg[i].len;
obuf[2] = msg[i].addr;
@@ -172,7 +188,18 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
} else if (i+1 < num && (msg[i+1].flags & I2C_M_RD) &&
msg[i].addr == msg[i+1].addr) {
/* write to then read from same address */
- u8 obuf[3+msg[i].len], ibuf[1+msg[i+1].len];
+ u8 obuf[MAX_XFER_SIZE], ibuf[MAX_XFER_SIZE];
+
+ if (3 + msg[i].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[i].len);
+ return -EOPNOTSUPP;
+ }
+ if (1 + msg[i + 1].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[i + 1].len);
+ return -EOPNOTSUPP;
+ }
obuf[0] = msg[i].len;
obuf[1] = msg[i+1].len;
obuf[2] = msg[i].addr;
@@ -191,7 +218,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
i++;
} else {
/* write only */
- u8 obuf[2+msg[i].len], ibuf;
+ u8 obuf[MAX_XFER_SIZE], ibuf;
+
+ if (2 + msg[i].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[i].len);
+ return -EOPNOTSUPP;
+ }
obuf[0] = msg[i].addr;
obuf[1] = msg[i].len;
memcpy(&obuf[2], msg[i].buf, msg[i].len);
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index c2dded92f1d..6d68af0c49c 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -12,6 +12,9 @@
#include <linux/kconfig.h>
#include "dibusb.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS);
@@ -105,11 +108,16 @@ EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
- u8 sndbuf[wlen+4]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
+ u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
/* write only ? */
int wo = (rbuf == NULL || rlen == 0),
len = 2 + wlen + (wo ? 0 : 2);
+ if (4 + wlen > sizeof(sndbuf)) {
+ warn("i2c wr: len=%d is too big!\n", wlen);
+ return -EOPNOTSUPP;
+ }
+
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6e237b6dd0a..c1a63b2a6ba 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -30,6 +30,9 @@
#include "stb6100_proc.h"
#include "m88rs2000.h"
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
#ifndef USB_PID_DW2102
#define USB_PID_DW2102 0x2102
#endif
@@ -308,7 +311,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
case 2: {
/* read */
/* first write first register number */
- u8 ibuf[msg[1].len + 2], obuf[3];
+ u8 ibuf[MAX_XFER_SIZE], obuf[3];
+
+ if (2 + msg[1].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[1].len);
+ return -EOPNOTSUPP;
+ }
+
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
@@ -325,7 +335,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
switch (msg[0].addr) {
case 0x68: {
/* write to register */
- u8 obuf[msg[0].len + 2];
+ u8 obuf[MAX_XFER_SIZE];
+
+ if (2 + msg[0].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[1].len);
+ return -EOPNOTSUPP;
+ }
+
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
@@ -335,7 +352,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
}
case 0x61: {
/* write to tuner */
- u8 obuf[msg[0].len + 2];
+ u8 obuf[MAX_XFER_SIZE];
+
+ if (2 + msg[0].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[1].len);
+ return -EOPNOTSUPP;
+ }
+
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
@@ -401,7 +425,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
default: {
if (msg[j].flags == I2C_M_RD) {
/* read registers */
- u8 ibuf[msg[j].len + 2];
+ u8 ibuf[MAX_XFER_SIZE];
+
+ if (2 + msg[j].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[j].len);
+ return -EOPNOTSUPP;
+ }
+
dw210x_op_rw(d->udev, 0xc3,
(msg[j].addr << 1) + 1, 0,
ibuf, msg[j].len + 2,
@@ -430,7 +461,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
} while (len > 0);
} else {
/* write registers */
- u8 obuf[msg[j].len + 2];
+ u8 obuf[MAX_XFER_SIZE];
+
+ if (2 + msg[j].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[j].len);
+ return -EOPNOTSUPP;
+ }
+
obuf[0] = msg[j].addr << 1;
obuf[1] = msg[j].len;
memcpy(obuf + 2, msg[j].buf, msg[j].len);
@@ -463,7 +501,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
case 2: {
/* read */
/* first write first register number */
- u8 ibuf[msg[1].len + 2], obuf[3];
+ u8 ibuf[MAX_XFER_SIZE], obuf[3];
+
+ if (2 + msg[1].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[1].len);
+ return -EOPNOTSUPP;
+ }
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
@@ -481,7 +525,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
case 0x60:
case 0x0c: {
/* write to register */
- u8 obuf[msg[0].len + 2];
+ u8 obuf[MAX_XFER_SIZE];
+
+ if (2 + msg[0].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[0].len);
+ return -EOPNOTSUPP;
+ }
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
@@ -563,7 +613,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
default: {
if (msg[j].flags == I2C_M_RD) {
/* read registers */
- u8 ibuf[msg[j].len];
+ u8 ibuf[MAX_XFER_SIZE];
+
+ if (msg[j].len > sizeof(ibuf)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[j].len);
+ return -EOPNOTSUPP;
+ }
+
dw210x_op_rw(d->udev, 0x91, 0, 0,
ibuf, msg[j].len,
DW210X_READ_MSG);
@@ -590,7 +647,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
} while (len > 0);
} else if (j < (num - 1)) {
/* write register addr before read */
- u8 obuf[msg[j].len + 2];
+ u8 obuf[MAX_XFER_SIZE];
+
+ if (2 + msg[j].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[j].len);
+ return -EOPNOTSUPP;
+ }
+
obuf[0] = msg[j + 1].len;
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
@@ -602,7 +666,13 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
break;
} else {
/* write registers */
- u8 obuf[msg[j].len + 2];
+ u8 obuf[MAX_XFER_SIZE];
+
+ if (2 + msg[j].len > sizeof(obuf)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[j].len);
+ return -EOPNOTSUPP;
+ }
obuf[0] = msg[j].len + 1;
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
@@ -955,9 +1025,10 @@ static struct ds3000_config dw2104_ds3000_config = {
.demod_address = 0x68,
};
-static struct ts2020_config dw2104_ts2020_config = {
+static struct ts2020_config dw2104_ts2020_config = {
.tuner_address = 0x60,
.clk_out_div = 1,
+ .frequency_div = 1060000,
};
static struct ds3000_config s660_ds3000_config = {
@@ -966,6 +1037,12 @@ static struct ds3000_config s660_ds3000_config = {
.set_lock_led = dw210x_led_ctrl,
};
+static struct ts2020_config s660_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+ .frequency_div = 1146000,
+};
+
static struct stv0900_config dw2104a_stv0900_config = {
.demod_address = 0x6a,
.demod_mode = 0,
@@ -1205,7 +1282,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
if (d->fe_adap[0].fe == NULL)
return -EIO;
- dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config,
+ dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config,
&d->dev->i2c_adap);
st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage;
@@ -1213,7 +1290,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
- info("Attached ds3000+ds2020!\n");
+ info("Attached ds3000+ts2020!\n");
return 0;
}
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 73cc50afa5e..d666741797d 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <media/soc_camera.h>
#include <media/mt9v011.h>
+#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -47,6 +48,7 @@ static struct soc_camera_link camlink = {
.bus_id = 0,
.flags = 0,
.module_name = "em28xx",
+ .unbalanced_power = true,
};
@@ -325,13 +327,24 @@ int em28xx_detect_sensor(struct em28xx *dev)
int em28xx_init_camera(struct em28xx *dev)
{
+ char clk_name[V4L2_SUBDEV_NAME_SIZE];
+ struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus];
+ struct i2c_adapter *adap = &dev->i2c_adap[dev->def_i2c_bus];
+ int ret = 0;
+
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ i2c_adapter_id(adap), client->addr);
+ dev->clk = v4l2_clk_register_fixed(clk_name, "mclk", -EINVAL);
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+
switch (dev->em28xx_sensor) {
case EM28XX_MT9V011:
{
struct mt9v011_platform_data pdata;
struct i2c_board_info mt9v011_info = {
.type = "mt9v011",
- .addr = dev->i2c_client[dev->def_i2c_bus].addr,
+ .addr = client->addr,
.platform_data = &pdata,
};
@@ -352,10 +365,11 @@ int em28xx_init_camera(struct em28xx *dev)
dev->sensor_xtal = 4300000;
pdata.xtal = dev->sensor_xtal;
if (NULL ==
- v4l2_i2c_new_subdev_board(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus],
- &mt9v011_info, NULL))
- return -ENODEV;
+ v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap,
+ &mt9v011_info, NULL)) {
+ ret = -ENODEV;
+ break;
+ }
/* probably means GRGB 16 bit bayer */
dev->vinmode = 0x0d;
dev->vinctl = 0x00;
@@ -391,7 +405,7 @@ int em28xx_init_camera(struct em28xx *dev)
struct i2c_board_info ov2640_info = {
.type = "ov2640",
.flags = I2C_CLIENT_SCCB,
- .addr = dev->i2c_client[dev->def_i2c_bus].addr,
+ .addr = client->addr,
.platform_data = &camlink,
};
struct v4l2_mbus_framefmt fmt;
@@ -408,9 +422,12 @@ int em28xx_init_camera(struct em28xx *dev)
dev->sensor_yres = 480;
subdev =
- v4l2_i2c_new_subdev_board(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus],
+ v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap,
&ov2640_info, NULL);
+ if (NULL == subdev) {
+ ret = -ENODEV;
+ break;
+ }
fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
fmt.width = 640;
@@ -427,8 +444,13 @@ int em28xx_init_camera(struct em28xx *dev)
}
case EM28XX_NOSENSOR:
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ if (ret < 0) {
+ v4l2_clk_unregister_fixed(dev->clk);
+ dev->clk = NULL;
+ }
+
+ return ret;
}
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index dc65742c4bb..a5196697627 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -36,6 +36,7 @@
#include <media/tvaudio.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
+#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -95,8 +96,8 @@ static struct em28xx_reg_seq default_digital[] = {
/* Board Hauppauge WinTV HVR 900 analog */
static struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = {
{EM2820_R08_GPIO_CTRL, 0x2d, ~EM_GPIO_4, 10},
- {0x05, 0xff, 0x10, 10},
- { -1, -1, -1, -1},
+ { 0x05, 0xff, 0x10, 10},
+ { -1, -1, -1, -1},
};
/* Board Hauppauge WinTV HVR 900 digital */
@@ -104,20 +105,20 @@ static struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x04, 0x0f, 10},
{EM2880_R04_GPO, 0x0c, 0x0f, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Board Hauppauge WinTV HVR 900 (R2) digital */
static struct em28xx_reg_seq hauppauge_wintv_hvr_900R2_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x0c, 0x0f, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = {
- {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10},
+ { -1, -1, -1, -1},
};
/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
@@ -132,7 +133,7 @@ static struct em28xx_reg_seq em2882_kworld_315u_digital[] = {
{EM2880_R04_GPO, 0x04, 0xff, 10},
{EM2880_R04_GPO, 0x0c, 0xff, 10},
{EM2820_R08_GPIO_CTRL, 0x7e, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = {
@@ -140,19 +141,19 @@ static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = {
{EM2880_R04_GPO, 0x0c, 0xff, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
{EM2880_R04_GPO, 0x0c, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq kworld_330u_analog[] = {
{EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x00, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq kworld_330u_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Evga inDtube
@@ -170,11 +171,11 @@ static struct em28xx_reg_seq evga_indtube_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 1},
{EM2880_R04_GPO, 0x04, 0xff, 10},
{EM2880_R04_GPO, 0x0c, 0xff, 1},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/*
- * KWorld PlusTV 340U and UB435-Q (ATSC) GPIOs map:
+ * KWorld PlusTV 340U, UB435-Q and UB435-Q V2 (ATSC) GPIOs map:
* EM_GPIO_0 - currently unknown
* EM_GPIO_1 - LED disable/enable (1 = off, 0 = on)
* EM_GPIO_2 - currently unknown
@@ -185,8 +186,8 @@ static struct em28xx_reg_seq evga_indtube_digital[] = {
* EM_GPIO_7 - currently unknown
*/
static struct em28xx_reg_seq kworld_a340_digital[] = {
- {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
+ { -1, -1, -1, -1},
};
/* Pinnacle Hybrid Pro eb1a:2881 */
@@ -205,13 +206,13 @@ static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = {
static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = {
{EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x00, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* eb1a:2868 Reddo DVB-C USB TV Box
@@ -225,7 +226,7 @@ static struct em28xx_reg_seq reddo_dvb_c_usb_box[] = {
{EM2820_R08_GPIO_CTRL, 0x7f, 0xff, 10},
{EM2820_R08_GPIO_CTRL, 0x6f, 0xff, 10},
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
- {-1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Callback for the most boards */
@@ -233,23 +234,23 @@ static struct em28xx_reg_seq default_tuner_gpio[] = {
{EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10},
{EM2820_R08_GPIO_CTRL, 0, EM_GPIO_4, 10},
{EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Mute/unmute */
static struct em28xx_reg_seq compro_unmute_tv_gpio[] = {
- {EM2820_R08_GPIO_CTRL, 5, 7, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 5, 7, 10},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq compro_unmute_svid_gpio[] = {
- {EM2820_R08_GPIO_CTRL, 4, 7, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 4, 7, 10},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq compro_mute_gpio[] = {
- {EM2820_R08_GPIO_CTRL, 6, 7, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 6, 7, 10},
+ { -1, -1, -1, -1},
};
/* Terratec AV350 */
@@ -279,21 +280,21 @@ static struct em28xx_reg_seq vc211a_enable[] = {
static struct em28xx_reg_seq dikom_dk300_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Reset for the most [digital] boards */
static struct em28xx_reg_seq leadership_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq leadership_reset[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* 2013:024f PCTV nanoStick T2 290e
@@ -304,7 +305,7 @@ static struct em28xx_reg_seq pctv_290e[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 80},
{EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 80}, /* GPIO_6 = 1 */
{EM2874_R80_GPIO_P0_CTRL, 0xc0, 0xff, 80}, /* GPIO_7 = 1 */
- {-1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#if 0
@@ -313,14 +314,14 @@ static struct em28xx_reg_seq terratec_h5_gpio[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq terratec_h5_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#endif
@@ -335,12 +336,12 @@ static struct em28xx_reg_seq terratec_h5_digital[] = {
* GPIO_7 - LED (green LED)
*/
static struct em28xx_reg_seq pctv_460e[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50},
- {0x0d, 0xff, 0xff, 50},
- {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */
- {0x0d, 0x42, 0xff, 50},
- {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */
- { -1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50},
+ { 0x0d, 0xff, 0xff, 50},
+ {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */
+ { 0x0d, 0x42, 0xff, 50},
+ {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq c3tech_digital_duo_digital[] = {
@@ -352,7 +353,7 @@ static struct em28xx_reg_seq c3tech_digital_duo_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 20},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#if 0
@@ -361,14 +362,14 @@ static struct em28xx_reg_seq hauppauge_930c_gpio[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, /* xc5000 reset */
{EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq hauppauge_930c_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#endif
@@ -378,10 +379,10 @@ static struct em28xx_reg_seq hauppauge_930c_digital[] = {
* GPIO_7 - LED, 0=active
*/
static struct em28xx_reg_seq maxmedia_ub425_tc[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */
- {-1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */
+ { -1, -1, -1, -1},
};
/* 2304:0242 PCTV QuatroStick (510e)
@@ -391,10 +392,10 @@ static struct em28xx_reg_seq maxmedia_ub425_tc[] = {
* GPIO_7: LED, 1=active
*/
static struct em28xx_reg_seq pctv_510e[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
- { -1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
+ { -1, -1, -1, -1},
};
/* 2013:0251 PCTV QuatroStick nano (520e)
@@ -404,11 +405,11 @@ static struct em28xx_reg_seq pctv_510e[] = {
* GPIO_7: LED, 1=active
*/
static struct em28xx_reg_seq pctv_520e[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */
- { -1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */
+ { -1, -1, -1, -1},
};
/*
@@ -2030,6 +2031,18 @@ struct em28xx_board em28xx_boards[] = {
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ /*
+ * 1b80:e346 KWorld USB ATSC TV Stick UB435-Q V2
+ * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2
+ */
+ [EM2874_BOARD_KWORLD_UB435Q_V2] = {
+ .name = "KWorld USB ATSC TV Stick UB435-Q V2",
+ .tuner_type = TUNER_ABSENT,
+ .has_dvb = 1,
+ .dvb_gpio = kworld_a340_digital,
+ .tuner_gpio = default_tuner_gpio,
+ .def_i2c_bus = 1,
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -2173,6 +2186,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_GADMEI_UTV330 },
{ USB_DEVICE(0x1b80, 0xa340),
.driver_info = EM2870_BOARD_KWORLD_A340 },
+ { USB_DEVICE(0x1b80, 0xe346),
+ .driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 },
{ USB_DEVICE(0x2013, 0x024f),
.driver_info = EM28174_BOARD_PCTV_290E },
{ USB_DEVICE(0x2013, 0x024c),
@@ -2857,6 +2872,8 @@ void em28xx_release_resources(struct em28xx *dev)
if (dev->def_i2c_bus)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
+ if (dev->clk)
+ v4l2_clk_unregister_fixed(dev->clk);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index bb1e8dca80c..344042bb845 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -298,6 +298,18 @@ static struct lgdt3305_config em2870_lgdt3304_dev = {
.qam_if_khz = 4000,
};
+static struct lgdt3305_config em2874_lgdt3305_dev = {
+ .i2c_addr = 0x0e,
+ .demod_chip = LGDT3305,
+ .spectral_inversion = 1,
+ .deny_i2c_rptr = 0,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .vsb_if_khz = 3250,
+ .qam_if_khz = 4000,
+};
+
static struct s921_config sharp_isdbt = {
.demod_address = 0x30 >> 1
};
@@ -329,6 +341,11 @@ static struct tda18271_config kworld_a340_config = {
.std_map = &kworld_a340_std_map,
};
+static struct tda18271_config kworld_ub435q_v2_config = {
+ .std_map = &kworld_a340_std_map,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
@@ -384,7 +401,10 @@ static struct drxk_config maxmedia_ub425_tc_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
+ .microcode_name = "dvb-demod-drxk-01.fw",
+ .chunk_size = 62,
.load_firmware_sync = true,
+ .qam_demod_parameter_count = 2,
};
static struct drxk_config pctv_520e_drxk = {
@@ -424,7 +444,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
@@ -439,7 +459,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x0b},
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x65},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct {
@@ -491,13 +511,13 @@ static void terratec_h5_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq terratec_h5_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct {
unsigned char r[4];
@@ -547,12 +567,12 @@ static void terratec_htc_stick_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq terratec_htc_stick_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/*
@@ -594,13 +614,13 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/*
@@ -1227,18 +1247,14 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->fe[0]->ops.i2c_gate_ctrl = NULL;
/* attach tuner */
- if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0],
- &dev->i2c_adap[dev->def_i2c_bus], 0x60)) {
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &em28xx_cxd2820r_tda18271_config)) {
dvb_frontend_detach(dvb->fe[0]);
result = -EINVAL;
goto out_free;
}
}
-
- /* TODO: we need drx-3913k firmware in order to support DVB-T */
- em28xx_info("MaxMedia UB425-TC/Delock 61959: only DVB-C " \
- "supported by that driver version\n");
-
break;
case EM2884_BOARD_PCTV_510E:
case EM2884_BOARD_PCTV_520E:
@@ -1297,6 +1313,23 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2874_BOARD_KWORLD_UB435Q_V2:
+ dvb->fe[0] = dvb_attach(lgdt3305_attach,
+ &em2874_lgdt3305_dev,
+ &dev->i2c_adap[dev->def_i2c_bus]);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &kworld_ub435q_v2_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 9d103344f34..fc5d60efd4a 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -638,7 +638,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
if (rc)
return rc;
- if (dev->streaming_users++ == 0) {
+ if (dev->streaming_users == 0) {
/* First active streaming user, so allocate all the URBs */
/* Allocate the USB bandwidth */
@@ -657,7 +657,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
dev->packet_multiplier,
em28xx_urb_data_copy);
if (rc < 0)
- goto fail;
+ return rc;
/*
* djh: it's not clear whether this code is still needed. I'm
@@ -675,7 +675,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
}
-fail:
+ dev->streaming_users++;
+
return rc;
}
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 205e9038b1c..f8726ad5d0a 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -131,6 +131,7 @@
#define EM2884_BOARD_TERRATEC_HTC_USB_XS 87
#define EM2884_BOARD_C3TECH_DIGITAL_DUO 88
#define EM2874_BOARD_DELOCK_61959 89
+#define EM2874_BOARD_KWORLD_UB435Q_V2 90
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -492,6 +493,7 @@ struct em28xx {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_clk *clk;
struct em28xx_board board;
/* Webcam specific fields */
diff --git a/drivers/media/usb/gspca/conex.c b/drivers/media/usb/gspca/conex.c
index 38714df31ac..2e15c80d6e3 100644
--- a/drivers/media/usb/gspca/conex.c
+++ b/drivers/media/usb/gspca/conex.c
@@ -783,7 +783,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index 064b53043b1..f23df4a9d8c 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -1553,9 +1553,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->params.format.videoSize = VIDEOSIZE_CIF;
sd->params.roi.colEnd = sd->params.roi.colStart +
- (gspca_dev->width >> 3);
+ (gspca_dev->pixfmt.width >> 3);
sd->params.roi.rowEnd = sd->params.roi.rowStart +
- (gspca_dev->height >> 2);
+ (gspca_dev->pixfmt.height >> 2);
/* And now set the camera to a known state */
ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode,
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 048507b27bb..f3a7ace0fac 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -504,8 +504,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
unsigned int frsz;
int i;
- i = gspca_dev->curr_mode;
- frsz = gspca_dev->cam.cam_mode[i].sizeimage;
+ frsz = gspca_dev->pixfmt.sizeimage;
PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
frsz = PAGE_ALIGN(frsz);
if (count >= GSPCA_MAX_FRAMES)
@@ -627,16 +626,14 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
static u32 which_bandwidth(struct gspca_dev *gspca_dev)
{
u32 bandwidth;
- int i;
/* get the (max) image size */
- i = gspca_dev->curr_mode;
- bandwidth = gspca_dev->cam.cam_mode[i].sizeimage;
+ bandwidth = gspca_dev->pixfmt.sizeimage;
/* if the image is compressed, estimate its mean size */
if (!gspca_dev->cam.needs_full_bandwidth &&
- bandwidth < gspca_dev->cam.cam_mode[i].width *
- gspca_dev->cam.cam_mode[i].height)
+ bandwidth < gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height)
bandwidth = bandwidth * 3 / 8; /* 0.375 */
/* estimate the frame rate */
@@ -650,7 +647,7 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev)
/* don't hope more than 15 fps with USB 1.1 and
* image resolution >= 640x480 */
- if (gspca_dev->width >= 640
+ if (gspca_dev->pixfmt.width >= 640
&& gspca_dev->dev->speed == USB_SPEED_FULL)
bandwidth *= 15; /* 15 fps */
else
@@ -982,9 +979,7 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
i = gspca_dev->cam.nmodes - 1; /* take the highest mode */
gspca_dev->curr_mode = i;
- gspca_dev->width = gspca_dev->cam.cam_mode[i].width;
- gspca_dev->height = gspca_dev->cam.cam_mode[i].height;
- gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat;
+ gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i];
/* does nothing if ctrl_handler == NULL */
v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
@@ -1105,10 +1100,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- int mode;
- mode = gspca_dev->curr_mode;
- fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+ fmt->fmt.pix = gspca_dev->pixfmt;
/* some drivers use priv internally, zero it before giving it to
userspace */
fmt->fmt.pix.priv = 0;
@@ -1140,6 +1133,12 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
mode = mode2;
}
fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+ if (gspca_dev->sd_desc->try_fmt) {
+ /* pass original resolution to subdriver try_fmt */
+ fmt->fmt.pix.width = w;
+ fmt->fmt.pix.height = h;
+ gspca_dev->sd_desc->try_fmt(gspca_dev, fmt);
+ }
/* some drivers use priv internally, zero it before giving it to
userspace */
fmt->fmt.pix.priv = 0;
@@ -1178,19 +1177,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
goto out;
}
- if (ret == gspca_dev->curr_mode) {
- ret = 0;
- goto out; /* same mode */
- }
-
if (gspca_dev->streaming) {
ret = -EBUSY;
goto out;
}
- gspca_dev->width = fmt->fmt.pix.width;
- gspca_dev->height = fmt->fmt.pix.height;
- gspca_dev->pixfmt = fmt->fmt.pix.pixelformat;
gspca_dev->curr_mode = ret;
+ if (gspca_dev->sd_desc->try_fmt)
+ /* subdriver try_fmt can modify format parameters */
+ gspca_dev->pixfmt = fmt->fmt.pix;
+ else
+ gspca_dev->pixfmt = gspca_dev->cam.cam_mode[ret];
ret = 0;
out:
@@ -1205,6 +1201,9 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
int i;
__u32 index = 0;
+ if (gspca_dev->sd_desc->enum_framesizes)
+ return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize);
+
for (i = 0; i < gspca_dev->cam.nmodes; i++) {
if (fsize->pixel_format !=
gspca_dev->cam.cam_mode[i].pixelformat)
@@ -1471,8 +1470,9 @@ static int vidioc_streamon(struct file *file, void *priv,
if (ret < 0)
goto out;
}
- PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", gspca_dev->pixfmt,
- gspca_dev->width, gspca_dev->height);
+ PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK",
+ gspca_dev->pixfmt.pixelformat,
+ gspca_dev->pixfmt.width, gspca_dev->pixfmt.height);
ret = 0;
out:
mutex_unlock(&gspca_dev->queue_lock);
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index ac0b11f46f5..300642dc1a1 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -88,6 +88,10 @@ typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev,
typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev,
u8 *data,
int len);
+typedef void (*cam_format_op) (struct gspca_dev *gspca_dev,
+ struct v4l2_format *fmt);
+typedef int (*cam_frmsize_op) (struct gspca_dev *gspca_dev,
+ struct v4l2_frmsizeenum *fsize);
/* subdriver description */
struct sd_desc {
@@ -109,6 +113,8 @@ struct sd_desc {
cam_set_jpg_op set_jcomp;
cam_streamparm_op get_streamparm;
cam_streamparm_op set_streamparm;
+ cam_format_op try_fmt;
+ cam_frmsize_op enum_framesizes;
#ifdef CONFIG_VIDEO_ADV_DEBUG
cam_set_reg_op set_register;
cam_get_reg_op get_register;
@@ -183,9 +189,7 @@ struct gspca_dev {
__u8 streaming; /* protected by both mutexes (*) */
__u8 curr_mode; /* current camera mode */
- __u32 pixfmt; /* current mode parameters */
- __u16 width;
- __u16 height;
+ struct v4l2_pix_format pixfmt; /* current mode parameters */
__u32 sequence; /* frame sequence number */
wait_queue_head_t wq; /* wait queue */
diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c
index 8da3dde3838..19736e237b3 100644
--- a/drivers/media/usb/gspca/jeilinj.c
+++ b/drivers/media/usb/gspca/jeilinj.c
@@ -378,11 +378,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *dev = (struct sd *) gspca_dev;
/* create the JPEG header */
- jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
jpeg_set_qual(dev->jpeg_hdr, dev->quality);
PDEBUG(D_STREAM, "Start streaming at %dx%d",
- gspca_dev->height, gspca_dev->width);
+ gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
jlj_start(gspca_dev);
return gspca_dev->usb_err;
}
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index fdaeeb14453..5b481fa4309 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -455,7 +455,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
sd->cap_mode = gspca_dev->cam.cam_mode;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 640:
PDEBUG(D_STREAM, "Start streaming at vga resolution");
jl2005c_stream_start_vga_lg(gspca_dev);
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index cfa4663f893..27fcef11aef 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -266,7 +266,7 @@ static int mt9m111_set_hvflip(struct gspca_dev *gspca_dev)
return err;
data[0] = MT9M111_RMB_OVER_SIZED;
- if (gspca_dev->width == 640) {
+ if (gspca_dev->pixfmt.width == 640) {
data[1] = MT9M111_RMB_ROW_SKIP_2X |
MT9M111_RMB_COLUMN_SKIP_2X |
(hflip << 1) | vflip;
diff --git a/drivers/media/usb/gspca/mars.c b/drivers/media/usb/gspca/mars.c
index ff2c5abf115..779a8785f42 100644
--- a/drivers/media/usb/gspca/mars.c
+++ b/drivers/media/usb/gspca/mars.c
@@ -254,7 +254,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
int i;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
@@ -270,8 +271,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[0] = 0x00; /* address */
data[1] = 0x0c | 0x01; /* reg 0 */
data[2] = 0x01; /* reg 1 */
- data[3] = gspca_dev->width / 8; /* h_size , reg 2 */
- data[4] = gspca_dev->height / 8; /* v_size , reg 3 */
+ data[3] = gspca_dev->pixfmt.width / 8; /* h_size , reg 2 */
+ data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */
data[5] = 0x30; /* reg 4, MI, PAS5101 :
* 0x30 for 24mhz , 0x28 for 12mhz */
data[6] = 0x02; /* reg 5, H start - was 0x04 */
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index 68bb2f35966..f006e29ca01 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -521,7 +521,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
if (sd->sensor_type)
data[5] = 0xbb;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */
/* fall thru */
@@ -618,7 +618,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
data[10] = 0x18;
}
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x0c; /* reg 8, 4:1 scale down */
/* fall thru */
@@ -847,7 +847,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv)
u8 clockdiv = (60 * expo + 7999) / 8000;
/* Limit framerate to not exceed usb bandwidth */
- if (clockdiv < min_clockdiv && gspca_dev->width >= 320)
+ if (clockdiv < min_clockdiv && gspca_dev->pixfmt.width >= 320)
clockdiv = min_clockdiv;
else if (clockdiv < 2)
clockdiv = 2;
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index 44c9964b1b3..599f755e75b 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -1708,7 +1708,7 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
reg_r(gspca_dev, 0x1004, 1);
if (gspca_dev->usb_buf[0] & 0x04) { /* if AE_FULL_FRM */
- sd->ae_res = gspca_dev->width * gspca_dev->height;
+ sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
} else { /* get the AE window size */
reg_r(gspca_dev, 0x1011, 8);
w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]
@@ -1717,7 +1717,8 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
- (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6];
sd->ae_res = h * w;
if (sd->ae_res == 0)
- sd->ae_res = gspca_dev->width * gspca_dev->height;
+ sd->ae_res = gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height;
}
}
@@ -1856,21 +1857,21 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w_buf(gspca_dev, cmd);
switch (sd->webcam) {
case P35u:
- if (gspca_dev->width == 320)
+ if (gspca_dev->pixfmt.width == 320)
reg_w_buf(gspca_dev, nw801_start_qvga);
else
reg_w_buf(gspca_dev, nw801_start_vga);
reg_w_buf(gspca_dev, nw801_start_2);
break;
case Kr651us:
- if (gspca_dev->width == 320)
+ if (gspca_dev->pixfmt.width == 320)
reg_w_buf(gspca_dev, kr651_start_qvga);
else
reg_w_buf(gspca_dev, kr651_start_vga);
reg_w_buf(gspca_dev, kr651_start_2);
break;
case Proscope:
- if (gspca_dev->width == 320)
+ if (gspca_dev->pixfmt.width == 320)
reg_w_buf(gspca_dev, proscope_start_qvga);
else
reg_w_buf(gspca_dev, proscope_start_vga);
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 8937d79fd17..c95f32a0c02 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -3468,7 +3468,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
switch (sd->bridge) {
case BRIDGE_OVFX2:
- if (gspca_dev->width != 800)
+ if (gspca_dev->pixfmt.width != 800)
gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
else
gspca_dev->cam.bulk_size = 7 * 4096;
@@ -3507,8 +3507,8 @@ static void ov511_mode_init_regs(struct sd *sd)
/* Here I'm assuming that snapshot size == image size.
* I hope that's always true. --claudio
*/
- hsegs = (sd->gspca_dev.width >> 3) - 1;
- vsegs = (sd->gspca_dev.height >> 3) - 1;
+ hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1;
+ vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1;
reg_w(sd, R511_CAM_PXCNT, hsegs);
reg_w(sd, R511_CAM_LNCNT, vsegs);
@@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd)
case SEN_OV7640:
case SEN_OV7648:
case SEN_OV76BE:
- if (sd->gspca_dev.width == 320)
+ if (sd->gspca_dev.pixfmt.width == 320)
interlaced = 1;
/* Fall through */
case SEN_OV6630:
@@ -3551,7 +3551,7 @@ static void ov511_mode_init_regs(struct sd *sd)
case 30:
case 25:
/* Not enough bandwidth to do 640x480 @ 30 fps */
- if (sd->gspca_dev.width != 640) {
+ if (sd->gspca_dev.pixfmt.width != 640) {
sd->clockdiv = 0;
break;
}
@@ -3584,7 +3584,8 @@ static void ov511_mode_init_regs(struct sd *sd)
/* Check if we have enough bandwidth to disable compression */
fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
- needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+ needed = fps * sd->gspca_dev.pixfmt.width *
+ sd->gspca_dev.pixfmt.height * 3 / 2;
/* 1000 isoc packets/sec */
if (needed > 1000 * packet_size) {
/* Enable Y and UV quantization and compression */
@@ -3646,8 +3647,8 @@ static void ov518_mode_init_regs(struct sd *sd)
reg_w(sd, 0x38, 0x80);
}
- hsegs = sd->gspca_dev.width / 16;
- vsegs = sd->gspca_dev.height / 4;
+ hsegs = sd->gspca_dev.pixfmt.width / 16;
+ vsegs = sd->gspca_dev.pixfmt.height / 4;
reg_w(sd, 0x29, hsegs);
reg_w(sd, 0x2a, vsegs);
@@ -3686,7 +3687,8 @@ static void ov518_mode_init_regs(struct sd *sd)
* happened to be with revision < 2 cams using an
* OV7620 and revision 2 cams using an OV7620AE.
*/
- if (sd->revision > 0 && sd->gspca_dev.width == 640) {
+ if (sd->revision > 0 &&
+ sd->gspca_dev.pixfmt.width == 640) {
reg_w(sd, 0x20, 0x60);
reg_w(sd, 0x21, 0x1f);
} else {
@@ -3812,8 +3814,8 @@ static void ov519_mode_init_regs(struct sd *sd)
break;
}
- reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
- reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
+ reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.pixfmt.width >> 4);
+ reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.pixfmt.height >> 3);
if (sd->sensor == SEN_OV7670 &&
sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
@@ -3947,14 +3949,16 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
}
case SEN_OV3610:
if (qvga) {
- xstart = (1040 - gspca_dev->width) / 2 + (0x1f << 4);
- ystart = (776 - gspca_dev->height) / 2;
+ xstart = (1040 - gspca_dev->pixfmt.width) / 2 +
+ (0x1f << 4);
+ ystart = (776 - gspca_dev->pixfmt.height) / 2;
} else {
- xstart = (2076 - gspca_dev->width) / 2 + (0x10 << 4);
- ystart = (1544 - gspca_dev->height) / 2;
+ xstart = (2076 - gspca_dev->pixfmt.width) / 2 +
+ (0x10 << 4);
+ ystart = (1544 - gspca_dev->pixfmt.height) / 2;
}
- xend = xstart + gspca_dev->width;
- yend = ystart + gspca_dev->height;
+ xend = xstart + gspca_dev->pixfmt.width;
+ yend = ystart + gspca_dev->pixfmt.height;
/* Writing to the COMH register resets the other windowing regs
to their default values, so we must do this first. */
i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0);
@@ -4229,8 +4233,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* Default for most bridges, allow bridge_mode_init_regs to override */
- sd->sensor_width = sd->gspca_dev.width;
- sd->sensor_height = sd->gspca_dev.height;
+ sd->sensor_width = sd->gspca_dev.pixfmt.width;
+ sd->sensor_height = sd->gspca_dev.pixfmt.height;
switch (sd->bridge) {
case BRIDGE_OV511:
@@ -4345,12 +4349,13 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1);
if (in[8] & 0x80) {
/* Frame end */
- if ((in[9] + 1) * 8 != gspca_dev->width ||
- (in[10] + 1) * 8 != gspca_dev->height) {
+ if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
+ (in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
PERR("Invalid frame size, got: %dx%d,"
" requested: %dx%d\n",
(in[9] + 1) * 8, (in[10] + 1) * 8,
- gspca_dev->width, gspca_dev->height);
+ gspca_dev->pixfmt.width,
+ gspca_dev->pixfmt.height);
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
@@ -4470,7 +4475,8 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
if (sd->first_frame) {
sd->first_frame--;
if (gspca_dev->image_len <
- sd->gspca_dev.width * sd->gspca_dev.height)
+ sd->gspca_dev.pixfmt.width *
+ sd->gspca_dev.pixfmt.height)
gspca_dev->last_packet_type = DISCARD_PACKET;
}
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 03a33c46ca2..90f0d637cd9 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -1440,9 +1440,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* If this packet is marked as EOF, end the frame */
} else if (data[1] & UVC_STREAM_EOF) {
sd->last_pts = 0;
- if (gspca_dev->pixfmt == V4L2_PIX_FMT_YUYV
+ if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV
&& gspca_dev->image_len + len - 12 !=
- gspca_dev->width * gspca_dev->height * 2) {
+ gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height * 2) {
PDEBUG(D_PACK, "wrong sized frame");
goto discard;
}
diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
index c4cd028fe0b..47085cf2d72 100644
--- a/drivers/media/usb/gspca/ov534_9.c
+++ b/drivers/media/usb/gspca/ov534_9.c
@@ -59,6 +59,7 @@ enum sensors {
SENSOR_OV965x, /* ov9657 */
SENSOR_OV971x, /* ov9712 */
SENSOR_OV562x, /* ov5621 */
+ SENSOR_OV361x, /* ov3610 */
NSENSORS
};
@@ -106,6 +107,274 @@ static const struct v4l2_pix_format ov562x_mode[] = {
}
};
+enum ov361x {
+ ov361x_2048 = 0,
+ ov361x_1600,
+ ov361x_1024,
+ ov361x_640,
+ ov361x_320,
+ ov361x_160,
+ ov361x_last
+};
+
+static const struct v4l2_pix_format ov361x_mode[] = {
+ {0x800, 0x600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 0x800,
+ .sizeimage = 0x800 * 0x600,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 1600,
+ .sizeimage = 1600 * 1200,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 768,
+ .sizeimage = 1024 * 768,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120,
+ .colorspace = V4L2_COLORSPACE_SRGB}
+};
+
+static const u8 ov361x_start_2048[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0c},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x00},
+ {0x17, 0x10},
+ {0x18, 0x90},
+ {0x19, 0x00},
+ {0x1a, 0xc0},
+};
+static const u8 ov361x_bridge_start_2048[][2] = {
+ {0xf1, 0x60},
+ {0x88, 0x00},
+ {0x89, 0x08},
+ {0x8a, 0x00},
+ {0x8b, 0x06},
+ {0x8c, 0x01},
+ {0x8d, 0x10},
+ {0x1c, 0x00},
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a},
+ {0x1d, 0x2e},
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_1600[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x00},
+ {0x17, 0x10},
+ {0x18, 0x90},
+ {0x19, 0x00},
+ {0x1a, 0xc0},
+};
+static const u8 ov361x_bridge_start_1600[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x08}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x06}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_1024[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+static const u8 ov361x_bridge_start_1024[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_640[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+
+static const u8 ov361x_bridge_start_640[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0]*/
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_320[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+
+static const u8 ov361x_bridge_start_320[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer; */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_160[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+
+static const u8 ov361x_bridge_start_160[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
static const u8 bridge_init[][2] = {
{0x88, 0xf8},
{0x89, 0xff},
@@ -898,7 +1167,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
- msleep(10);
+ msleep(20);
data = reg_r(gspca_dev, OV534_REG_STATUS);
switch (data) {
@@ -1221,6 +1490,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
sccb_w_array(gspca_dev, ov562x_init_2,
ARRAY_SIZE(ov562x_init_2));
reg_w(gspca_dev, 0xe0, 0x00);
+ } else if ((sensor_id & 0xfff0) == 0x3610) {
+ sd->sensor = SENSOR_OV361x;
+ gspca_dev->cam.cam_mode = ov361x_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(ov361x_mode);
+ reg_w(gspca_dev, 0xe7, 0x3a);
+ reg_w(gspca_dev, 0xf1, 0x60);
+ sccb_write(gspca_dev, 0x12, 0x80);
} else {
pr_err("Unknown sensor %04x", sensor_id);
return -EINVAL;
@@ -1229,6 +1505,53 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
+static int sd_start_ov361x(struct gspca_dev *gspca_dev)
+{
+ sccb_write(gspca_dev, 0x12, 0x80);
+ msleep(20);
+ switch (gspca_dev->curr_mode % (ov361x_last)) {
+ case ov361x_2048:
+ reg_w_array(gspca_dev, ov361x_bridge_start_2048,
+ ARRAY_SIZE(ov361x_bridge_start_2048));
+ sccb_w_array(gspca_dev, ov361x_start_2048,
+ ARRAY_SIZE(ov361x_start_2048));
+ break;
+ case ov361x_1600:
+ reg_w_array(gspca_dev, ov361x_bridge_start_1600,
+ ARRAY_SIZE(ov361x_bridge_start_1600));
+ sccb_w_array(gspca_dev, ov361x_start_1600,
+ ARRAY_SIZE(ov361x_start_1600));
+ break;
+ case ov361x_1024:
+ reg_w_array(gspca_dev, ov361x_bridge_start_1024,
+ ARRAY_SIZE(ov361x_bridge_start_1024));
+ sccb_w_array(gspca_dev, ov361x_start_1024,
+ ARRAY_SIZE(ov361x_start_1024));
+ break;
+ case ov361x_640:
+ reg_w_array(gspca_dev, ov361x_bridge_start_640,
+ ARRAY_SIZE(ov361x_bridge_start_640));
+ sccb_w_array(gspca_dev, ov361x_start_640,
+ ARRAY_SIZE(ov361x_start_640));
+ break;
+ case ov361x_320:
+ reg_w_array(gspca_dev, ov361x_bridge_start_320,
+ ARRAY_SIZE(ov361x_bridge_start_320));
+ sccb_w_array(gspca_dev, ov361x_start_320,
+ ARRAY_SIZE(ov361x_start_320));
+ break;
+ case ov361x_160:
+ reg_w_array(gspca_dev, ov361x_bridge_start_160,
+ ARRAY_SIZE(ov361x_bridge_start_160));
+ sccb_w_array(gspca_dev, ov361x_start_160,
+ ARRAY_SIZE(ov361x_start_160));
+ break;
+ }
+ reg_w(gspca_dev, 0xe0, 0x00); /* start transfer */
+
+ return gspca_dev->usb_err;
+}
+
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1237,6 +1560,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
if (sd->sensor == SENSOR_OV562x)
return gspca_dev->usb_err;
+ if (sd->sensor == SENSOR_OV361x)
+ return sd_start_ov361x(gspca_dev);
switch (gspca_dev->curr_mode) {
case QVGA_MODE: /* 320x240 */
@@ -1290,6 +1615,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ if (((struct sd *)gspca_dev)->sensor == SENSOR_OV361x) {
+ reg_w(gspca_dev, 0xe0, 0x01); /* stop transfer */
+ /* reg_w(gspca_dev, 0x31, 0x09); */
+ return;
+ }
reg_w(gspca_dev, 0xe0, 0x01);
set_led(gspca_dev, 0);
reg_w(gspca_dev, 0xe0, 0x00);
@@ -1425,6 +1755,8 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
if (sd->sensor == SENSOR_OV971x)
return 0;
+ if (sd->sensor == SENSOR_OV361x)
+ return 0;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 7);
if (sd->sensor == SENSOR_OV562x) {
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index 83519be94e5..cd79c180f67 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -299,7 +299,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
pac207_write_regs(gspca_dev, 0x0042, pac207_sensor_init[3], 8);
/* Compression Balance */
- if (gspca_dev->width == 176)
+ if (gspca_dev->pixfmt.width == 176)
pac207_write_reg(gspca_dev, 0x4a, 0xff);
else
pac207_write_reg(gspca_dev, 0x4a, 0x30);
@@ -317,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
mode = 0x00;
else
mode = 0x02;
- if (gspca_dev->width == 176) { /* 176x144 */
+ if (gspca_dev->pixfmt.width == 176) { /* 176x144 */
mode |= 0x01;
PDEBUG(D_STREAM, "pac207_start mode 176x144");
} else { /* 352x288 */
diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c
index 1a5bdc853a8..25f86b1e74a 100644
--- a/drivers/media/usb/gspca/pac7311.c
+++ b/drivers/media/usb/gspca/pac7311.c
@@ -326,7 +326,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
* 640x480 mode and page 4 reg 2 <= 3 then it must be 9
*/
reg_w(gspca_dev, 0xff, 0x01);
- if (gspca_dev->width != 640 && val <= 3)
+ if (gspca_dev->pixfmt.width != 640 && val <= 3)
reg_w(gspca_dev, 0x08, 0x09);
else
reg_w(gspca_dev, 0x08, 0x08);
@@ -337,7 +337,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
* camera to use higher compression or we may run out of
* bandwidth.
*/
- if (gspca_dev->width == 640 && val == 2)
+ if (gspca_dev->pixfmt.width == 640 && val == 2)
reg_w(gspca_dev, 0x80, 0x01);
else
reg_w(gspca_dev, 0x80, 0x1c);
@@ -615,7 +615,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* Start the new frame with the jpeg header */
pac_start_frame(gspca_dev,
- gspca_dev->height, gspca_dev->width);
+ gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
}
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index 5f729b8aa2b..5102cea5047 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -354,9 +354,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* set size + mode */
se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
- gspca_dev->width * mult, 0);
+ gspca_dev->pixfmt.width * mult, 0);
se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
- gspca_dev->height * mult, 0);
+ gspca_dev->pixfmt.height * mult, 0);
/*
* HDG: disabled this as it does not seem to do anything
* se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
@@ -480,7 +480,7 @@ static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
{
struct sd *sd = (struct sd *)gspca_dev;
- int imagesize = gspca_dev->width * gspca_dev->height;
+ int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
int i, plen, bits, pixels, info, count;
if (sd->restart_stream)
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index f4453d52801..2a38621cf71 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -1955,7 +1955,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
return 0;
}
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
gspca_dev->alt = 2;
break;
@@ -1985,8 +1985,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
- int width = gspca_dev->width;
- int height = gspca_dev->height;
+ int width = gspca_dev->pixfmt.width;
+ int height = gspca_dev->pixfmt.height;
u8 fmt, scale = 0;
jpeg_define(sd->jpeg_hdr, height, width,
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index d7ff3b9687c..7277dbd2afc 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -513,10 +513,7 @@ static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
if (gspca_dev->usb_buf[0] & 0x04) {
if (gspca_dev->usb_buf[0] & 0x08) {
dev_err(gspca_dev->v4l2_dev.dev,
- "i2c error writing %02x %02x %02x %02x"
- " %02x %02x %02x %02x\n",
- buf[0], buf[1], buf[2], buf[3],
- buf[4], buf[5], buf[6], buf[7]);
+ "i2c error writing %8ph\n", buf);
gspca_dev->usb_err = -EIO;
}
return;
@@ -753,7 +750,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* In 640x480, if the reg11 has less than 4, the image is
unstable (the bridge goes into a higher compression mode
which we have not reverse engineered yet). */
- if (gspca_dev->width == 640 && reg11 < 4)
+ if (gspca_dev->pixfmt.width == 640 && reg11 < 4)
reg11 = 4;
/* frame exposure time in ms = 1000 * reg11 / 30 ->
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 3b5ccb1c4cd..c69b45d7cfb 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -2204,7 +2204,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
{ 0x14, 0xe7, 0x1e, 0xdd };
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
/* initialize the bridge */
diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
index 688592b289e..f38fd894960 100644
--- a/drivers/media/usb/gspca/spca1528.c
+++ b/drivers/media/usb/gspca/spca1528.c
@@ -255,7 +255,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* initialize the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
/* the JPEG quality shall be 85% */
diff --git a/drivers/media/usb/gspca/spca500.c b/drivers/media/usb/gspca/spca500.c
index 9f8bf51fd64..f011a309dd6 100644
--- a/drivers/media/usb/gspca/spca500.c
+++ b/drivers/media/usb/gspca/spca500.c
@@ -608,7 +608,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
__u8 xmult, ymult;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index acb19fb9a3d..aa21edc9502 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -272,7 +272,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
dev->cap_mode = gspca_dev->cam.cam_mode;
/* "Open the shutter" and set size, to start capture */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 640:
PDEBUG(D_STREAM, "Start streaming at high resolution");
dev->cap_mode++;
diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
index b10d0821111..e274cf19a3e 100644
--- a/drivers/media/usb/gspca/sq930x.c
+++ b/drivers/media/usb/gspca/sq930x.c
@@ -906,7 +906,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */
sd->do_ctrl = 0;
- gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8;
+ gspca_dev->cam.bulk_size = gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height + 8;
return 0;
}
diff --git a/drivers/media/usb/gspca/stk014.c b/drivers/media/usb/gspca/stk014.c
index 8c0982607f2..b0c70fea760 100644
--- a/drivers/media/usb/gspca/stk014.c
+++ b/drivers/media/usb/gspca/stk014.c
@@ -250,7 +250,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
int ret, value;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
@@ -261,7 +262,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_par(gspca_dev, 0x00000000);
set_par(gspca_dev, 0x8002e001);
set_par(gspca_dev, 0x14000000);
- if (gspca_dev->width > 320)
+ if (gspca_dev->pixfmt.width > 320)
value = 0x8002e001; /* 640x480 */
else
value = 0x4001f000; /* 320x240 */
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 585868835ac..1fc80af2a18 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -48,42 +48,11 @@ struct sd {
};
static const struct v4l2_pix_format stk1135_modes[] = {
- {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 160,
- .sizeimage = 160 * 120,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 176,
- .sizeimage = 176 * 144,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 320,
- .sizeimage = 320 * 240,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 352,
- .sizeimage = 352 * 288,
- .colorspace = V4L2_COLORSPACE_SRGB},
+ /* default mode (this driver supports variable resolution) */
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB},
- {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 720,
- .sizeimage = 720 * 576,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 800,
- .sizeimage = 800 * 600,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 1024,
- .sizeimage = 1024 * 768,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 1280,
- .sizeimage = 1280 * 1024,
- .colorspace = V4L2_COLORSPACE_SRGB},
};
/* -- read a register -- */
@@ -347,16 +316,16 @@ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)
sensor_write(gspca_dev, cfg[i].reg, cfg[i].val);
/* set output size */
- width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
- height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
- if (width <= 640) { /* use context A (half readout speed by default) */
+ width = gspca_dev->pixfmt.width;
+ height = gspca_dev->pixfmt.height;
+ if (width <= 640 && height <= 512) { /* context A (half readout speed)*/
sensor_write(gspca_dev, 0x1a7, width);
sensor_write(gspca_dev, 0x1aa, height);
/* set read mode context A */
sensor_write(gspca_dev, 0x0c8, 0x0000);
/* set resize, read mode, vblank, hblank context A */
sensor_write(gspca_dev, 0x2c8, 0x0000);
- } else { /* use context B (full readout speed by default) */
+ } else { /* context B (full readout speed) */
sensor_write(gspca_dev, 0x1a1, width);
sensor_write(gspca_dev, 0x1a4, height);
/* set read mode context B */
@@ -484,8 +453,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00);
/* set capture end position */
- width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
- height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
+ width = gspca_dev->pixfmt.width;
+ height = gspca_dev->pixfmt.height;
reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff);
reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8);
reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff);
@@ -643,6 +612,35 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
return 0;
}
+static void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt)
+{
+ fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U);
+ fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U);
+ /* round up to even numbers */
+ fmt->fmt.pix.width += (fmt->fmt.pix.width & 1);
+ fmt->fmt.pix.height += (fmt->fmt.pix.height & 1);
+
+ fmt->fmt.pix.bytesperline = fmt->fmt.pix.width;
+ fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height;
+}
+
+static int stk1135_enum_framesizes(struct gspca_dev *gspca_dev,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = 32;
+ fsize->stepwise.min_height = 32;
+ fsize->stepwise.max_width = 1280;
+ fsize->stepwise.max_height = 1024;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -653,6 +651,8 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = stk1135_dq_callback,
+ .try_fmt = stk1135_try_fmt,
+ .enum_framesizes = stk1135_enum_framesizes,
};
/* -- module initialisation -- */
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 55ee7a61c67..49d209bbf9e 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -452,7 +452,7 @@ frame_data:
NULL, 0);
if (sd->bridge == BRIDGE_ST6422)
- sd->to_skip = gspca_dev->width * 4;
+ sd->to_skip = gspca_dev->pixfmt.width * 4;
if (chunk_len)
PERR("Chunk length is "
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
index 8206b774330..8d785edcccf 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
@@ -421,7 +421,7 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
- totalpixels = gspca_dev->width * gspca_dev->height;
+ totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
brightpixels = (totalpixels * val) >> 8;
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index af8767a9bd4..a517d185feb 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -715,7 +715,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
int enable;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 4cb511ccc5f..640c2fe760b 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -3856,7 +3856,7 @@ static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
if (sd->bridge == BRIDGE_TP6800) {
val |= 0x08; /* grid compensation enable */
- if (gspca_dev->width == 640)
+ if (gspca_dev->pixfmt.width == 640)
reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
else
val |= 0x04; /* scaling down enable */
@@ -3880,7 +3880,7 @@ static void set_resolution(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
- if (gspca_dev->width == 320) {
+ if (gspca_dev->pixfmt.width == 320) {
reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06);
msleep(100);
i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
@@ -3924,7 +3924,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
/* 640x480 * 30 fps does not work */
if (i == 6 /* if 30 fps */
- && gspca_dev->width == 640)
+ && gspca_dev->pixfmt.width == 640)
i = 0x05; /* 15 fps */
} else {
for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) {
@@ -3935,7 +3935,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
/* 640x480 * 30 fps does not work */
if (i == 7 /* if 30 fps */
- && gspca_dev->width == 640)
+ && gspca_dev->pixfmt.width == 640)
i = 6; /* 15 fps */
i |= 0x80; /* clock * 1 */
}
@@ -4554,7 +4554,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width);
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width);
set_dqt(gspca_dev, sd->quality);
if (sd->bridge == BRIDGE_TP6800) {
if (sd->sensor == SENSOR_CX0342)
@@ -4737,7 +4738,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
(gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] +
(gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28])
/ 8;
- if (gspca_dev->width == 640)
+ if (gspca_dev->pixfmt.width == 640)
luma /= 4;
reg_w(gspca_dev, 0x7d, 0x00);
diff --git a/drivers/media/usb/gspca/tv8532.c b/drivers/media/usb/gspca/tv8532.c
index 8591324a53e..d497ba38af0 100644
--- a/drivers/media/usb/gspca/tv8532.c
+++ b/drivers/media/usb/gspca/tv8532.c
@@ -268,7 +268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
packet_type0 = packet_type1 = INTER_PACKET;
if (gspca_dev->empty_packet) {
gspca_dev->empty_packet = 0;
- sd->packet = gspca_dev->height / 2;
+ sd->packet = gspca_dev->pixfmt.height / 2;
packet_type0 = FIRST_PACKET;
} else if (sd->packet == 0)
return; /* 2 more lines in 352x288 ! */
@@ -284,9 +284,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
* - 4 bytes
*/
gspca_frame_add(gspca_dev, packet_type0,
- data + 2, gspca_dev->width);
+ data + 2, gspca_dev->pixfmt.width);
gspca_frame_add(gspca_dev, packet_type1,
- data + gspca_dev->width + 5, gspca_dev->width);
+ data + gspca_dev->pixfmt.width + 5,
+ gspca_dev->pixfmt.width);
}
static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index a2275cfe0b8..103f6c4236b 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -121,13 +121,13 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
memset(req_data, 0, 16);
req_data[0] = gain;
- if (gspca_dev->width == 256)
+ if (gspca_dev->pixfmt.width == 256)
req_data[1] |= 0x01; /* low nibble x-scale */
- if (gspca_dev->height <= 122) {
+ if (gspca_dev->pixfmt.height <= 122) {
req_data[1] |= 0x10; /* high nibble y-scale */
- unscaled_height = gspca_dev->height * 2;
+ unscaled_height = gspca_dev->pixfmt.height * 2;
} else
- unscaled_height = gspca_dev->height;
+ unscaled_height = gspca_dev->pixfmt.height;
req_data[2] = 0x90; /* unknown, does not seem to do anything */
if (unscaled_height <= 200)
req_data[3] = 0x06; /* vend? */
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index 2165da0c7ce..fb9fe2ef3a6 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -430,11 +430,11 @@ static void w9968cf_set_crop_window(struct sd *sd)
#define SC(x) ((x) << 10)
/* Scaling factors */
- fw = SC(sd->gspca_dev.width) / max_width;
- fh = SC(sd->gspca_dev.height) / max_height;
+ fw = SC(sd->gspca_dev.pixfmt.width) / max_width;
+ fh = SC(sd->gspca_dev.pixfmt.height) / max_height;
- cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh;
- ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height;
+ cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.pixfmt.width) / fh;
+ ch = (fw >= fh) ? SC(sd->gspca_dev.pixfmt.height) / fw : max_height;
sd->sensor_width = max_width;
sd->sensor_height = max_height;
@@ -454,34 +454,34 @@ static void w9968cf_mode_init_regs(struct sd *sd)
w9968cf_set_crop_window(sd);
- reg_w(sd, 0x14, sd->gspca_dev.width);
- reg_w(sd, 0x15, sd->gspca_dev.height);
+ reg_w(sd, 0x14, sd->gspca_dev.pixfmt.width);
+ reg_w(sd, 0x15, sd->gspca_dev.pixfmt.height);
/* JPEG width & height */
- reg_w(sd, 0x30, sd->gspca_dev.width);
- reg_w(sd, 0x31, sd->gspca_dev.height);
+ reg_w(sd, 0x30, sd->gspca_dev.pixfmt.width);
+ reg_w(sd, 0x31, sd->gspca_dev.pixfmt.height);
/* Y & UV frame buffer strides (in WORD) */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
- reg_w(sd, 0x2c, sd->gspca_dev.width / 2);
- reg_w(sd, 0x2d, sd->gspca_dev.width / 4);
+ reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width / 2);
+ reg_w(sd, 0x2d, sd->gspca_dev.pixfmt.width / 4);
} else
- reg_w(sd, 0x2c, sd->gspca_dev.width);
+ reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width);
reg_w(sd, 0x00, 0xbf17); /* reset everything */
reg_w(sd, 0x00, 0xbf10); /* normal operation */
/* Transfer size in WORDS (for UYVY format only) */
- val = sd->gspca_dev.width * sd->gspca_dev.height;
+ val = sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height;
reg_w(sd, 0x3d, val & 0xffff); /* low bits */
reg_w(sd, 0x3e, val >> 16); /* high bits */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
/* We may get called multiple times (usb isoc bw negotiat.) */
- jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height,
- sd->gspca_dev.width, 0x22); /* JPEG 420 */
+ jpeg_define(sd->jpeg_hdr, sd->gspca_dev.pixfmt.height,
+ sd->gspca_dev.pixfmt.width, 0x22); /* JPEG 420 */
jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
w9968cf_upload_quantizationtables(sd);
v4l2_ctrl_grab(sd->jpegqual, true);
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index 7eaf64eb867..a41aa7817c5 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -1471,14 +1471,14 @@ static int cit_get_clock_div(struct gspca_dev *gspca_dev)
while (clock_div > 3 &&
1000 * packet_size >
- gspca_dev->width * gspca_dev->height *
+ gspca_dev->pixfmt.width * gspca_dev->pixfmt.height *
fps[clock_div - 1] * 3 / 2)
clock_div--;
PDEBUG(D_PROBE,
"PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)",
- packet_size, gspca_dev->width, gspca_dev->height, clock_div,
- fps[clock_div]);
+ packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height,
+ clock_div, fps[clock_div]);
return clock_div;
}
@@ -1502,7 +1502,7 @@ static int cit_start_model0(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x0002, 0x0426);
cit_write_reg(gspca_dev, 0x0014, 0x0427);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
cit_write_reg(gspca_dev, 0x0004, 0x010b);
cit_write_reg(gspca_dev, 0x0001, 0x010a);
@@ -1643,7 +1643,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x00, 0x0101);
cit_write_reg(gspca_dev, 0x00, 0x010a);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 128: /* 128x96 */
cit_write_reg(gspca_dev, 0x80, 0x0103);
cit_write_reg(gspca_dev, 0x60, 0x0105);
@@ -1700,7 +1700,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
}
/* Assorted init */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 128: /* 128x96 */
cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
@@ -1753,7 +1753,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x0000, 0x0108);
cit_write_reg(gspca_dev, 0x0001, 0x0133);
cit_write_reg(gspca_dev, 0x0001, 0x0102);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
@@ -1792,7 +1792,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_write_reg(gspca_dev, 0x0050, 0x0111);
cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -1840,7 +1840,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
* Magic control of CMOS sensor. Only lower values like
* 0-3 work, and picture shifts left or right. Don't change.
*/
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_model2_Packet1(gspca_dev, 0x0014, 0x0002);
cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
@@ -1899,7 +1899,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
* does not allow arbitrary values and apparently is a bit mask, to
* be activated only at appropriate time. Don't change it randomly!
*/
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2);
break;
@@ -2023,7 +2023,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
cit_model3_Packet1(gspca_dev, 0x009e, 0x0096);
cit_model3_Packet1(gspca_dev, 0x009f, 0x000a);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
@@ -2134,7 +2134,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
like with the IBM netcam pro). */
cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
@@ -2211,7 +2211,7 @@ static int cit_start_model4(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0xfffa, 0x0124);
cit_model4_Packet1(gspca_dev, 0x0034, 0x0000);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 128: /* 128x96 */
cit_write_reg(gspca_dev, 0x0070, 0x0119);
cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -2531,7 +2531,7 @@ static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
cit_write_reg(gspca_dev, 0x0024, 0x010b);
cit_write_reg(gspca_dev, 0x0089, 0x0119);
@@ -2635,7 +2635,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
struct usb_host_interface *alt;
int max_packet_size;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
max_packet_size = 450;
break;
@@ -2659,7 +2659,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
int ret, packet_size, min_packet_size;
struct usb_host_interface *alt;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
min_packet_size = 200;
break;
@@ -2780,7 +2780,7 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
case CIT_MODEL1:
case CIT_MODEL3:
case CIT_IBM_NETCAM_PRO:
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
byte3 = 0x02;
byte4 = 0x0a;
@@ -2864,20 +2864,16 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
if (data[i] == 0xff) {
if (i >= 4)
PDEBUG(D_FRAM,
- "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n",
+ "header found at offset: %d: %02x %02x 00 %3ph\n",
i - 1,
data[i - 4],
data[i - 3],
- data[i],
- data[i + 1],
- data[i + 2]);
+ &data[i]);
else
PDEBUG(D_FRAM,
- "header found at offset: %d: 00 %02x %02x %02x\n",
+ "header found at offset: %d: 00 %3ph\n",
i - 1,
- data[i],
- data[i + 1],
- data[i + 2]);
+ &data[i]);
return data + i + (sd->sof_len - 1);
}
break;
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index cbfc2f92142..7b95d8e88a2 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6700,7 +6700,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
};
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 6e5070774dc..2f0c89cbac7 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -78,7 +78,8 @@ void hdpvr_delete(struct hdpvr_device *dev)
static void challenge(u8 *bytes)
{
- u64 *i64P, tmp64;
+ __le64 *i64P;
+ u64 tmp64;
uint i, idx;
for (idx = 0; idx < 32; ++idx) {
@@ -106,10 +107,10 @@ static void challenge(u8 *bytes)
for (i = 0; i < 3; i++)
bytes[1] *= bytes[6] + 1;
for (i = 0; i < 3; i++) {
- i64P = (u64 *)bytes;
+ i64P = (__le64 *)bytes;
tmp64 = le64_to_cpup(i64P);
- tmp64 <<= bytes[7] & 0x0f;
- *i64P += cpu_to_le64(tmp64);
+ tmp64 = tmp64 + (tmp64 << (bytes[7] & 0x0f));
+ *i64P = cpu_to_le64(tmp64);
}
break;
}
@@ -301,8 +302,6 @@ static int hdpvr_probe(struct usb_interface *interface,
goto error;
}
- dev->workqueue = 0;
-
/* init video transfer queues first of all */
/* to prevent oops in hdpvr_delete() on error paths */
INIT_LIST_HEAD(&dev->free_buff_list);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index c4d51d78f83..ea05f678b55 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2868,7 +2868,7 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id,
pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \
}
-v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw)
+static v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw)
{
v4l2_std_id std;
std = (v4l2_std_id)hdw->std_mask_avail;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 03761c6f472..05bd91a60c0 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -209,8 +209,10 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
int dummy;
- if (dev->state != SMSUSB_ACTIVE)
+ if (dev->state != SMSUSB_ACTIVE) {
+ sms_debug("Device not active yet");
return -ENOENT;
+ }
sms_debug("sending %s(%d) size: %d",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
@@ -243,6 +245,9 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
int rc, dummy;
char *fw_filename;
+ if (id < 0)
+ id = sms_get_board(board_id)->default_mode;
+
if (id < DEVICE_MODE_DVBT || id > DEVICE_MODE_DVBT_BDA) {
sms_err("invalid firmware id specified %d", id);
return -EINVAL;
@@ -445,14 +450,15 @@ static int smsusb_probe(struct usb_interface *intf,
char devpath[32];
int i, rc;
- sms_info("interface number %d",
+ sms_info("board id=%lu, interface number %d",
+ id->driver_info,
intf->cur_altsetting->desc.bInterfaceNumber);
if (sms_get_board(id->driver_info)->intf_num !=
intf->cur_altsetting->desc.bInterfaceNumber) {
- sms_err("interface number is %d expecting %d",
- sms_get_board(id->driver_info)->intf_num,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ sms_debug("interface %d won't be used. Expecting interface %d to popup",
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ sms_get_board(id->driver_info)->intf_num);
return -ENODEV;
}
@@ -483,22 +489,32 @@ static int smsusb_probe(struct usb_interface *intf,
}
if ((udev->actconfig->desc.bNumInterfaces == 2) &&
(intf->cur_altsetting->desc.bInterfaceNumber == 0)) {
- sms_err("rom interface 0 is not used");
+ sms_debug("rom interface 0 is not used");
return -ENODEV;
}
if (id->driver_info == SMS1XXX_BOARD_SIANO_STELLAR_ROM) {
- sms_info("stellar device was found.");
+ /* Detected a Siano Stellar uninitialized */
+
snprintf(devpath, sizeof(devpath), "usb\\%d-%s",
udev->bus->busnum, udev->devpath);
- sms_info("stellar device was found.");
- return smsusb1_load_firmware(
+ sms_info("stellar device in cold state was found at %s.", devpath);
+ rc = smsusb1_load_firmware(
udev, smscore_registry_getmode(devpath),
id->driver_info);
+
+ /* This device will reset and gain another USB ID */
+ if (!rc)
+ sms_info("stellar device now in warm state");
+ else
+ sms_err("Failed to put stellar in warm state. Error: %d", rc);
+
+ return rc;
+ } else {
+ rc = smsusb_init_device(intf, id->driver_info);
}
- rc = smsusb_init_device(intf, id->driver_info);
- sms_info("rc %d", rc);
+ sms_info("Device initialized with return code %d", rc);
sms_board_load_modules(id->driver_info);
return rc;
}
@@ -550,10 +566,13 @@ static int smsusb_resume(struct usb_interface *intf)
}
static const struct usb_device_id smsusb_id_table[] = {
+ /* This device is only present before firmware load */
{ USB_DEVICE(0x187f, 0x0010),
- .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+ .driver_info = SMS1XXX_BOARD_SIANO_STELLAR_ROM },
+ /* This device pops up after firmware load */
{ USB_DEVICE(0x187f, 0x0100),
.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+
{ USB_DEVICE(0x187f, 0x0200),
.driver_info = SMS1XXX_BOARD_SIANO_NOVA_A },
{ USB_DEVICE(0x187f, 0x0201),
diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/media/usb/tlg2300/pd-main.c
index 95f94e5aa66..3316caa4733 100644
--- a/drivers/media/usb/tlg2300/pd-main.c
+++ b/drivers/media/usb/tlg2300/pd-main.c
@@ -232,7 +232,7 @@ static int firmware_download(struct usb_device *udev)
goto out;
}
- max_packet_size = udev->ep_out[0x1]->desc.wMaxPacketSize;
+ max_packet_size = le16_to_cpu(udev->ep_out[0x1]->desc.wMaxPacketSize);
log("\t\t download size : %d", (int)max_packet_size);
for (offset = 0; offset < fwlength; offset += max_packet_size) {
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index e52c3b97f30..29724af9b9a 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -366,7 +366,7 @@ static int ttusb_dec_get_stb_state (struct ttusb_dec *dec, unsigned int *mode,
}
return 0;
} else {
- return -1;
+ return -ENOENT;
}
}
@@ -1241,6 +1241,8 @@ static void ttusb_dec_init_v_pes(struct ttusb_dec *dec)
static int ttusb_dec_init_usb(struct ttusb_dec *dec)
{
+ int result;
+
dprintk("%s\n", __func__);
mutex_init(&dec->usb_mutex);
@@ -1258,7 +1260,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
return -ENOMEM;
}
dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE,
- GFP_ATOMIC, &dec->irq_dma_handle);
+ GFP_KERNEL, &dec->irq_dma_handle);
if(!dec->irq_buffer) {
usb_free_urb(dec->irq_urb);
return -ENOMEM;
@@ -1270,7 +1272,13 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
dec->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
}
- return ttusb_dec_alloc_iso_urbs(dec);
+ result = ttusb_dec_alloc_iso_urbs(dec);
+ if (result) {
+ usb_free_urb(dec->irq_urb);
+ usb_free_coherent(dec->udev, IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
+ }
+ return result;
}
static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
@@ -1293,10 +1301,11 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
- if (request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev)) {
+ result = request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev);
+ if (result) {
printk(KERN_ERR "%s: Firmware (%s) unavailable.\n",
__func__, dec->firmware_name);
- return 1;
+ return result;
}
firmware = fw_entry->data;
@@ -1306,7 +1315,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
printk("%s: firmware size too small for DSP code (%zu < 60).\n",
__func__, firmware_size);
release_firmware(fw_entry);
- return -1;
+ return -ENOENT;
}
/* a 32 bit checksum over the first 56 bytes of the DSP Code is stored
@@ -1320,7 +1329,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
"0x%08x != 0x%08x in file), file invalid.\n",
__func__, crc32_csum, crc32_check);
release_firmware(fw_entry);
- return -1;
+ return -ENOENT;
}
memcpy(idstring, &firmware[36], 20);
idstring[20] = '\0';
@@ -1389,55 +1398,48 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
result = ttusb_dec_get_stb_state(dec, &mode, &model, &version);
+ if (result)
+ return result;
- if (!result) {
- if (!mode) {
- if (version == 0xABCDEFAB)
- printk(KERN_INFO "ttusb_dec: no version "
- "info in Firmware\n");
- else
- printk(KERN_INFO "ttusb_dec: Firmware "
- "%x.%02x%c%c\n",
- version >> 24, (version >> 16) & 0xff,
- (version >> 8) & 0xff, version & 0xff);
-
- result = ttusb_dec_boot_dsp(dec);
- if (result)
- return result;
- else
- return 1;
- } else {
- /* We can't trust the USB IDs that some firmwares
- give the box */
- switch (model) {
- case 0x00070001:
- case 0x00070008:
- case 0x0007000c:
- ttusb_dec_set_model(dec, TTUSB_DEC3000S);
- break;
- case 0x00070009:
- case 0x00070013:
- ttusb_dec_set_model(dec, TTUSB_DEC2000T);
- break;
- case 0x00070011:
- ttusb_dec_set_model(dec, TTUSB_DEC2540T);
- break;
- default:
- printk(KERN_ERR "%s: unknown model returned "
- "by firmware (%08x) - please report\n",
- __func__, model);
- return -1;
- break;
- }
+ if (!mode) {
+ if (version == 0xABCDEFAB)
+ printk(KERN_INFO "ttusb_dec: no version "
+ "info in Firmware\n");
+ else
+ printk(KERN_INFO "ttusb_dec: Firmware "
+ "%x.%02x%c%c\n",
+ version >> 24, (version >> 16) & 0xff,
+ (version >> 8) & 0xff, version & 0xff);
+ result = ttusb_dec_boot_dsp(dec);
+ if (result)
+ return result;
+ } else {
+ /* We can't trust the USB IDs that some firmwares
+ give the box */
+ switch (model) {
+ case 0x00070001:
+ case 0x00070008:
+ case 0x0007000c:
+ ttusb_dec_set_model(dec, TTUSB_DEC3000S);
+ break;
+ case 0x00070009:
+ case 0x00070013:
+ ttusb_dec_set_model(dec, TTUSB_DEC2000T);
+ break;
+ case 0x00070011:
+ ttusb_dec_set_model(dec, TTUSB_DEC2540T);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown model returned "
+ "by firmware (%08x) - please report\n",
+ __func__, model);
+ return -ENOENT;
+ }
if (version >= 0x01770000)
dec->can_playback = 1;
-
- return 0;
- }
}
- else
- return result;
+ return 0;
}
static int ttusb_dec_init_dvb(struct ttusb_dec *dec)
@@ -1539,19 +1541,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
static void ttusb_dec_exit_rc(struct ttusb_dec *dec)
{
-
dprintk("%s\n", __func__);
- /* we have to check whether the irq URB is already submitted.
- * As the irq is submitted after the interface is changed,
- * this is the best method i figured out.
- * Any others?*/
- if (dec->interface == TTUSB_DEC_INTERFACE_IN)
- usb_kill_urb(dec->irq_urb);
-
- usb_free_urb(dec->irq_urb);
-
- usb_free_coherent(dec->udev,IRQ_PACKET_SIZE,
- dec->irq_buffer, dec->irq_dma_handle);
if (dec->rc_input_dev) {
input_unregister_device(dec->rc_input_dev);
@@ -1566,6 +1556,20 @@ static void ttusb_dec_exit_usb(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
+ if (enable_rc) {
+ /* we have to check whether the irq URB is already submitted.
+ * As the irq is submitted after the interface is changed,
+ * this is the best method i figured out.
+ * Any others?*/
+ if (dec->interface == TTUSB_DEC_INTERFACE_IN)
+ usb_kill_urb(dec->irq_urb);
+
+ usb_free_urb(dec->irq_urb);
+
+ usb_free_coherent(dec->udev, IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
+ }
+
dec->iso_stream_count = 0;
for (i = 0; i < ISO_BUF_COUNT; i++)
@@ -1623,6 +1627,7 @@ static int ttusb_dec_probe(struct usb_interface *intf,
{
struct usb_device *udev;
struct ttusb_dec *dec;
+ int result;
dprintk("%s\n", __func__);
@@ -1651,13 +1656,15 @@ static int ttusb_dec_probe(struct usb_interface *intf,
dec->udev = udev;
- if (ttusb_dec_init_usb(dec))
- return 0;
- if (ttusb_dec_init_stb(dec)) {
- ttusb_dec_exit_usb(dec);
- return 0;
- }
- ttusb_dec_init_dvb(dec);
+ result = ttusb_dec_init_usb(dec);
+ if (result)
+ goto err_usb;
+ result = ttusb_dec_init_stb(dec);
+ if (result)
+ goto err_stb;
+ result = ttusb_dec_init_dvb(dec);
+ if (result)
+ goto err_stb;
dec->adapter.priv = dec;
switch (id->idProduct) {
@@ -1696,6 +1703,11 @@ static int ttusb_dec_probe(struct usb_interface *intf,
ttusb_init_rc(dec);
return 0;
+err_stb:
+ ttusb_dec_exit_usb(dec);
+err_usb:
+ kfree(dec);
+ return result;
}
static void ttusb_dec_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index a2f4501c23c..0eb82106d2f 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -664,7 +664,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.size = 32,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
- .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_TILT_ABSOLUTE,
@@ -674,7 +674,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.size = 32,
.offset = 32,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
- .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_PRIVACY,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 3394c343201..899cb6d1c4a 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -680,7 +680,8 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
stream->dev->name,
sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
- v4l2_buf->timestamp.tv_sec, v4l2_buf->timestamp.tv_usec,
+ v4l2_buf->timestamp.tv_sec,
+ (unsigned long)v4l2_buf->timestamp.tv_usec,
x1, first->host_sof, first->dev_sof,
x2, last->host_sof, last->dev_sof, y1, y2);
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index ddc9379eb27..20c09229a08 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -43,7 +43,7 @@
#define UNSET (-1U)
-#define PREFIX (t->i2c->driver->driver.name)
+#define PREFIX (t->i2c->dev.driver->name)
/*
* Driver modprobe parameters
@@ -247,7 +247,7 @@ static const struct analog_demod_ops tuner_analog_ops = {
/**
* set_type - Sets the tuner type for a given device
*
- * @c: i2c_client descriptoy
+ * @c: i2c_client descriptor
* @type: type of the tuner (e. g. tuner number)
* @new_mode_mask: Indicates if tuner supports TV and/or Radio
* @new_config: an optional parameter used by a few tuners to adjust
@@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
}
tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
- c->adapter->name, c->driver->driver.name, c->addr << 1, type,
+ c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
@@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap,
int mode_mask;
if (pos->i2c->adapter != adap ||
- strcmp(pos->i2c->driver->driver.name, "tuner"))
+ strcmp(pos->i2c->dev.driver->name, "tuner"))
continue;
mode_mask = pos->mode_mask;
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index c85d69da35b..85a6a34128a 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -189,30 +189,53 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
struct v4l2_subdev *sd, *tmp;
unsigned int notif_n_subdev = notifier->num_subdevs;
unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
- struct device *dev[n_subdev];
+ struct device **dev;
int i = 0;
if (!notifier->v4l2_dev)
return;
+ dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(notifier->v4l2_dev->dev,
+ "Failed to allocate device cache!\n");
+ }
+
mutex_lock(&list_lock);
list_del(&notifier->list);
list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
- dev[i] = get_device(sd->dev);
+ struct device *d;
+
+ d = get_device(sd->dev);
v4l2_async_cleanup(sd);
/* If we handled USB devices, we'd have to lock the parent too */
- device_release_driver(dev[i++]);
+ device_release_driver(d);
if (notifier->unbind)
notifier->unbind(notifier, sd, sd->asd);
+
+ /*
+ * Store device at the device cache, in order to call
+ * put_device() on the final step
+ */
+ if (dev)
+ dev[i++] = d;
+ else
+ put_device(d);
}
mutex_unlock(&list_lock);
+ /*
+ * Call device_attach() to reprobe devices
+ *
+ * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
+ * executed.
+ */
while (i--) {
struct device *d = dev[i];
@@ -228,6 +251,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
}
put_device(d);
}
+ kfree(dev);
notifier->v4l2_dev = NULL;
diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c
index b67de8642b5..e18cc0469cf 100644
--- a/drivers/media/v4l2-core/v4l2-clk.c
+++ b/drivers/media/v4l2-core/v4l2-clk.c
@@ -240,3 +240,42 @@ void v4l2_clk_unregister(struct v4l2_clk *clk)
kfree(clk);
}
EXPORT_SYMBOL(v4l2_clk_unregister);
+
+struct v4l2_clk_fixed {
+ unsigned long rate;
+ struct v4l2_clk_ops ops;
+};
+
+static unsigned long fixed_get_rate(struct v4l2_clk *clk)
+{
+ struct v4l2_clk_fixed *priv = clk->priv;
+ return priv->rate;
+}
+
+struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
+ const char *id, unsigned long rate, struct module *owner)
+{
+ struct v4l2_clk *clk;
+ struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->rate = rate;
+ priv->ops.get_rate = fixed_get_rate;
+ priv->ops.owner = owner;
+
+ clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
+ if (IS_ERR(clk))
+ kfree(priv);
+
+ return clk;
+}
+EXPORT_SYMBOL(__v4l2_clk_register_fixed);
+
+void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
+{
+ kfree(clk->priv);
+ v4l2_clk_unregister(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 037d7a55aa8..433d6d77942 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
v4l2_subdev_init(sd, ops);
sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
/* the owner is the same as the i2c_client's driver owner */
- sd->owner = client->driver->driver.owner;
+ sd->owner = client->dev.driver->owner;
sd->dev = &client->dev;
/* i2c_client and v4l2_subdev point to one another */
v4l2_set_subdevdata(sd, client);
i2c_set_clientdata(client, sd);
/* initialize name */
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
- client->driver->driver.name, i2c_adapter_id(client->adapter),
+ client->dev.driver->name, i2c_adapter_id(client->adapter),
client->addr);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
@@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
loaded. This delay-load mechanism doesn't work if other drivers
want to use the i2c device, so explicitly loading the module
is the best alternative. */
- if (client == NULL || client->driver == NULL)
+ if (client == NULL || client->dev.driver == NULL)
goto error;
/* Lock the module so we can safely get the v4l2_subdev pointer */
- if (!try_module_get(client->driver->driver.owner))
+ if (!try_module_get(client->dev.driver->owner))
goto error;
sd = i2c_get_clientdata(client);
@@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
if (v4l2_device_register_subdev(v4l2_dev, sd))
sd = NULL;
/* Decrease the module use count to match the first try_module_get. */
- module_put(client->driver->driver.owner);
+ module_put(client->dev.driver->owner);
error:
/* If we have a client but no subdev, then something went wrong and
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index c3f08038868..60dcc0f3b32 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -565,13 +565,13 @@ EXPORT_SYMBOL(v4l2_ctrl_get_menu);
* Returns NULL or an s64 type array containing the menu for given
* control ID. The total number of the menu items is returned in @len.
*/
-const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
{
- static const s64 const qmenu_int_vpx_num_partitions[] = {
+ static const s64 qmenu_int_vpx_num_partitions[] = {
1, 2, 4, 8,
};
- static const s64 const qmenu_int_vpx_num_ref_frames[] = {
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
1, 2, 3,
};
@@ -583,7 +583,7 @@ const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
default:
*len = 0;
return NULL;
- };
+ }
}
EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 7c437128821..73035ee0f4d 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -41,6 +41,8 @@ module_param(debug, bool, 0644);
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT (1 << 2)
/* Offset base for buffers on the destination queue - used to distinguish
@@ -221,6 +223,14 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
}
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+
+ /* If the context is aborted then don't schedule it */
+ if (m2m_ctx->job_flags & TRANS_ABORT) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Aborted context\n");
+ return;
+ }
+
if (m2m_ctx->job_flags & TRANS_QUEUED) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("On job queue already\n");
@@ -280,6 +290,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+
+ m2m_ctx->job_flags |= TRANS_ABORT;
if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
@@ -480,13 +492,15 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
/* We should not be scheduled anymore, since we're dropping a queue. */
- INIT_LIST_HEAD(&m2m_ctx->queue);
+ if (m2m_ctx->job_flags & TRANS_QUEUED)
+ list_del(&m2m_ctx->queue);
m2m_ctx->job_flags = 0;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
/* Drop queue, since streamoff returns device to the same state as after
* calling reqbufs. */
INIT_LIST_HEAD(&q_ctx->rdy_queue);
+ q_ctx->num_rdy = 0;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
if (m2m_dev->curr_ctx == m2m_ctx) {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index de0e87f0b2c..b19b306c8f7 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -241,7 +241,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
q->bufs[q->num_buffers + buffer] = vb;
}
- __setup_offsets(q, buffer);
+ if (memory == V4L2_MEMORY_MMAP)
+ __setup_offsets(q, buffer);
dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
buffer, num_planes);
@@ -1015,6 +1016,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Check if the provided plane buffer is large enough */
if (planes[plane].length < q->plane_sizes[plane]) {
+ dprintk(1, "qbuf: provided buffer size %u is less than "
+ "setup size %u for plane %d\n",
+ planes[plane].length,
+ q->plane_sizes[plane], plane);
ret = -EINVAL;
goto err;
}
@@ -1205,8 +1210,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
int ret;
ret = __verify_length(vb, b);
- if (ret < 0)
+ if (ret < 0) {
+ dprintk(1, "%s(): plane parameters verification failed: %d\n",
+ __func__, ret);
return ret;
+ }
switch (q->memory) {
case V4L2_MEMORY_MMAP:
@@ -2469,10 +2477,11 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
}
EXPORT_SYMBOL_GPL(vb2_read);
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblocking)
{
- return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+ return __vb2_perform_fileio(q, (char __user *) data, count,
+ ppos, nonblocking, 0);
}
EXPORT_SYMBOL_GPL(vb2_write);
@@ -2633,7 +2642,7 @@ int vb2_fop_release(struct file *file)
}
EXPORT_SYMBOL_GPL(vb2_fop_release);
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 16ae3dcc7e2..2f860543912 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -35,17 +35,61 @@ struct vb2_dma_sg_buf {
struct page **pages;
int write;
int offset;
- struct vb2_dma_sg_desc sg_desc;
+ struct sg_table sg_table;
+ size_t size;
+ unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
};
static void vb2_dma_sg_put(void *buf_priv);
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+ gfp_t gfp_flags)
+{
+ unsigned int last_page = 0;
+ int size = buf->size;
+
+ while (size > 0) {
+ struct page *pages;
+ int order;
+ int i;
+
+ order = get_order(size);
+ /* Dont over allocate*/
+ if ((PAGE_SIZE << order) > size)
+ order--;
+
+ pages = NULL;
+ while (!pages) {
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+ __GFP_NOWARN | gfp_flags, order);
+ if (pages)
+ break;
+
+ if (order == 0) {
+ while (last_page--)
+ __free_page(buf->pages[last_page]);
+ return -ENOMEM;
+ }
+ order--;
+ }
+
+ split_page(pages, order);
+ for (i = 0; i < (1 << order); i++)
+ buf->pages[last_page++] = &pages[i];
+
+ size -= PAGE_SIZE << order;
+ }
+
+ return 0;
+}
+
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
{
struct vb2_dma_sg_buf *buf;
- int i;
+ int ret;
+ int num_pages;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -54,29 +98,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
buf->vaddr = NULL;
buf->write = 0;
buf->offset = 0;
- buf->sg_desc.size = size;
+ buf->size = size;
/* size is already page aligned */
- buf->sg_desc.num_pages = size >> PAGE_SHIFT;
+ buf->num_pages = size >> PAGE_SHIFT;
- buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
- sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto fail_sglist_alloc;
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
goto fail_pages_array_alloc;
- for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
- __GFP_NOWARN | gfp_flags);
- if (NULL == buf->pages[i])
- goto fail_pages_alloc;
- sg_set_page(&buf->sg_desc.sglist[i],
- buf->pages[i], PAGE_SIZE, 0);
- }
+ ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ if (ret)
+ goto fail_pages_alloc;
+
+ ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ buf->num_pages, 0, size, gfp_flags);
+ if (ret)
+ goto fail_table_alloc;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
@@ -85,18 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
atomic_inc(&buf->refcount);
dprintk(1, "%s: Allocated buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
+ __func__, buf->num_pages);
return buf;
+fail_table_alloc:
+ num_pages = buf->num_pages;
+ while (num_pages--)
+ __free_page(buf->pages[num_pages]);
fail_pages_alloc:
- while (--i >= 0)
- __free_page(buf->pages[i]);
kfree(buf->pages);
-
fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-fail_sglist_alloc:
kfree(buf);
return NULL;
}
@@ -104,14 +140,14 @@ fail_sglist_alloc:
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
+ int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
- buf->sg_desc.num_pages);
+ buf->num_pages);
if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
- vfree(buf->sg_desc.sglist);
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(&buf->sg_table);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
@@ -124,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
{
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
- int num_pages_from_user, i;
+ int num_pages_from_user;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -133,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->vaddr = NULL;
buf->write = write;
buf->offset = vaddr & ~PAGE_MASK;
- buf->sg_desc.size = size;
+ buf->size = size;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- buf->sg_desc.num_pages = last - first + 1;
-
- buf->sg_desc.sglist = vzalloc(
- buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto userptr_fail_sglist_alloc;
-
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+ buf->num_pages = last - first + 1;
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
- goto userptr_fail_pages_array_alloc;
+ return NULL;
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
- buf->sg_desc.num_pages,
+ buf->num_pages,
write,
1, /* force */
buf->pages,
NULL);
- if (num_pages_from_user != buf->sg_desc.num_pages)
+ if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
- sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
- PAGE_SIZE - buf->offset, buf->offset);
- size -= PAGE_SIZE - buf->offset;
- for (i = 1; i < buf->sg_desc.num_pages; ++i) {
- sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
- min_t(size_t, PAGE_SIZE, size), 0);
- size -= min_t(size_t, PAGE_SIZE, size);
- }
+ if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ buf->num_pages, buf->offset, size, 0))
+ goto userptr_fail_alloc_table_from_pages;
+
return buf;
+userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- num_pages_from_user, buf->sg_desc.num_pages);
+ num_pages_from_user, buf->num_pages);
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
kfree(buf->pages);
-
-userptr_fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-userptr_fail_sglist_alloc:
kfree(buf);
return NULL;
}
@@ -194,18 +215,18 @@ userptr_fail_sglist_alloc:
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
+ int i = buf->num_pages;
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
+ __func__, buf->num_pages);
if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(&buf->sg_table);
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
put_page(buf->pages[i]);
}
- vfree(buf->sg_desc.sglist);
kfree(buf->pages);
kfree(buf);
}
@@ -218,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
if (!buf->vaddr)
buf->vaddr = vm_map_ram(buf->pages,
- buf->sg_desc.num_pages,
+ buf->num_pages,
-1,
PAGE_KERNEL);
@@ -274,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- return &buf->sg_desc;
+ return &buf->sg_table;
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index ffcb10ac434..a0547dbf980 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -153,24 +153,24 @@ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
struct memstick_dev *card = container_of(dev, struct memstick_dev, \
dev); \
return sprintf(buf, format, card->id.name); \
-}
+} \
+static DEVICE_ATTR_RO(name);
MEMSTICK_ATTR(type, "%02X");
MEMSTICK_ATTR(category, "%02X");
MEMSTICK_ATTR(class, "%02X");
-#define MEMSTICK_ATTR_RO(name) __ATTR(name, S_IRUGO, name##_show, NULL)
-
-static struct device_attribute memstick_dev_attrs[] = {
- MEMSTICK_ATTR_RO(type),
- MEMSTICK_ATTR_RO(category),
- MEMSTICK_ATTR_RO(class),
- __ATTR_NULL
+static struct attribute *memstick_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_category.attr,
+ &dev_attr_class.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(memstick_dev);
static struct bus_type memstick_bus_type = {
.name = "memstick",
- .dev_attrs = memstick_dev_attrs,
+ .dev_groups = memstick_dev_groups,
.match = memstick_bus_match,
.uevent = memstick_uevent,
.probe = memstick_device_probe,
@@ -253,7 +253,7 @@ void memstick_new_req(struct memstick_host *host)
{
if (host->card) {
host->retries = cmd_retries;
- INIT_COMPLETION(host->card->mrq_complete);
+ reinit_completion(&host->card->mrq_complete);
host->request(host);
}
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 08e70232062..24f2f8473de 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -401,10 +401,10 @@ again:
sizeof(struct ms_status_register)))
return 0;
- msb->state = MSB_RP_RECEIVE_OOB_READ;
+ msb->state = MSB_RP_RECEIVE_STATUS_REG;
return 0;
- case MSB_RP_RECIVE_STATUS_REG:
+ case MSB_RP_RECEIVE_STATUS_REG:
msb->regs.status = *(struct ms_status_register *)mrq->data;
msb->state = MSB_RP_SEND_OOB_READ;
/* fallthrough */
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index 96e63755098..c75198dbf13 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -223,7 +223,7 @@ enum msb_readpage_states {
MSB_RP_RECEIVE_INT_REQ_RESULT,
MSB_RP_SEND_READ_STATUS_REG,
- MSB_RP_RECIVE_STATUS_REG,
+ MSB_RP_RECEIVE_STATUS_REG,
MSB_RP_SEND_OOB_READ,
MSB_RP_RECEIVE_OOB_READ,
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index f4176ca3a79..fc145d202c4 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1023,8 +1023,8 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
} else
attr_count = attr->count;
- msb->attr_group.attrs = kzalloc((attr_count + 1)
- * sizeof(struct attribute),
+ msb->attr_group.attrs = kcalloc(attr_count + 1,
+ sizeof(*msb->attr_group.attrs),
GFP_KERNEL);
if (!msb->attr_group.attrs) {
rc = -ENOMEM;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 1b6e9134522..31727bf285d 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -290,7 +290,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
dbg_verbose("doing dma transfer");
dev->dma_error = 0;
- INIT_COMPLETION(dev->dma_done);
+ reinit_completion(&dev->dma_done);
/* TODO: hidden assumption about nenth beeing always 1 */
sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index cbe384fb848..91614f11f89 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -33,7 +33,7 @@ extern int __init i2o_pci_init(void);
extern void __exit i2o_pci_exit(void);
/* device */
-extern struct device_attribute i2o_device_attrs[];
+extern const struct attribute_group *i2o_device_groups[];
extern void i2o_device_remove(struct i2o_device *);
extern int i2o_device_parse_lct(struct i2o_controller *);
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 4547db99f7d..98348f420b5 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -138,45 +138,55 @@ static void i2o_device_release(struct device *dev)
}
/**
- * i2o_device_show_class_id - Displays class id of I2O device
+ * class_id_show - Displays class id of I2O device
* @dev: device of which the class id should be displayed
* @attr: pointer to device attribute
* @buf: buffer into which the class id should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
-static ssize_t i2o_device_show_class_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t class_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
sprintf(buf, "0x%03x\n", i2o_dev->lct_data.class_id);
return strlen(buf) + 1;
}
+static DEVICE_ATTR_RO(class_id);
/**
- * i2o_device_show_tid - Displays TID of I2O device
+ * tid_show - Displays TID of I2O device
* @dev: device of which the TID should be displayed
* @attr: pointer to device attribute
* @buf: buffer into which the TID should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
-static ssize_t i2o_device_show_tid(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t tid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
sprintf(buf, "0x%03x\n", i2o_dev->lct_data.tid);
return strlen(buf) + 1;
}
+static DEVICE_ATTR_RO(tid);
/* I2O device attributes */
-struct device_attribute i2o_device_attrs[] = {
- __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
- __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
- __ATTR_NULL
+static struct attribute *i2o_device_attrs[] = {
+ &dev_attr_class_id.attr,
+ &dev_attr_tid.attr,
+ NULL,
+};
+
+static const struct attribute_group i2o_device_group = {
+ .attrs = i2o_device_attrs,
+};
+
+const struct attribute_group *i2o_device_groups[] = {
+ &i2o_device_group,
+ NULL,
};
/**
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 813eaa33fa1..1b18a0d1d05 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -62,7 +62,7 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
struct bus_type i2o_bus_type = {
.name = "i2o",
.match = i2o_bus_match,
- .dev_attrs = i2o_device_attrs
+ .dev_groups = i2o_device_groups,
};
/**
@@ -105,7 +105,8 @@ int i2o_driver_register(struct i2o_driver *drv)
osm_err("too many drivers registered, increase "
"max_drivers\n");
spin_unlock_irqrestore(&i2o_drivers_lock, flags);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
drv->context = i;
@@ -124,11 +125,14 @@ int i2o_driver_register(struct i2o_driver *drv)
}
rc = driver_register(&drv->driver);
- if (rc) {
- if (drv->event) {
- destroy_workqueue(drv->event_queue);
- drv->event_queue = NULL;
- }
+ if (rc)
+ goto out;
+
+ return 0;
+out:
+ if (drv->event_queue) {
+ destroy_workqueue(drv->event_queue);
+ drv->event_queue = NULL;
}
return rc;
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 7ebe9ef1eba..c9b1f642294 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1247,7 +1247,7 @@ static struct i2c_driver pm860x_driver = {
.name = "88PM860x",
.owner = THIS_MODULE,
.pm = &pm860x_pm_ops,
- .of_match_table = of_match_ptr(pm860x_dt_ids),
+ .of_match_table = pm860x_dt_ids,
},
.probe = pm860x_probe,
.remove = pm860x_remove,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 914c3d142f7..62a60caa5d1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -27,6 +27,18 @@ config MFD_AS3711
help
Support for the AS3711 PMIC from AMS
+config MFD_AS3722
+ bool "ams AS3722 Power Management IC"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C && OF
+ help
+ The ams AS3722 is a compact system PMU suitable for mobile phones,
+ tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down
+ controllers, 11 LDOs, RTC, automatic battery, temperature and
+ over current monitoring, GPIOs, ADC and a watchdog.
+
config PMIC_ADP5520
bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
depends on I2C=y
@@ -664,14 +676,14 @@ menu "STMicroelectronics STMPE Interface Drivers"
depends on MFD_STMPE
config STMPE_I2C
- bool "STMicroelectronics STMPE I2C Inteface"
+ bool "STMicroelectronics STMPE I2C Interface"
depends on I2C=y
default y
help
This is used to enable I2C interface of STMPE
config STMPE_SPI
- bool "STMicroelectronics STMPE SPI Inteface"
+ bool "STMicroelectronics STMPE SPI Interface"
depends on SPI_MASTER
help
This is used to enable SPI interface of STMPE
@@ -1151,6 +1163,16 @@ config MFD_WM8994
core support for the WM8994, in order to use the actual
functionaltiy of the device other drivers must be enabled.
+config MFD_STW481X
+ bool "Support for ST Microelectronics STw481x"
+ depends on I2C && ARCH_NOMADIK
+ select REGMAP_I2C
+ select MFD_CORE
+ help
+ Select this option to enable the STw481x chip driver used
+ in various ST Microelectronics and ST-Ericsson embedded
+ Nomadik series.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 15b905c6553..8a28dc90fe7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -162,3 +162,5 @@ obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o
obj-$(CONFIG_MFD_RETU) += retu-mfd.o
obj-$(CONFIG_MFD_AS3711) += as3711.o
+obj-$(CONFIG_MFD_AS3722) += as3722.o
+obj-$(CONFIG_MFD_STW481X) += stw481x.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 6f68472e0ca..14d9542a4ee 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -293,7 +293,7 @@ static ssize_t aat2870_reg_write_file(struct file *file,
unsigned long addr, val;
int ret;
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min(count, (size_t)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size)) {
dev_err(aat2870->dev, "Failed to copy from user\n");
return -EFAULT;
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5ac3aa48473..75e180ceecf 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -540,7 +540,7 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
if (arizona->pdata.gpio_defaults[i] > 0xffff)
arizona->pdata.gpio_defaults[i] = 0;
- if (arizona->pdata.gpio_defaults[i] == 0)
+ else if (arizona->pdata.gpio_defaults[i] == 0)
arizona->pdata.gpio_defaults[i] = 0x10000;
}
} else {
@@ -569,13 +569,25 @@ static struct mfd_cell early_devs[] = {
{ .name = "arizona-ldo1" },
};
+static const char *wm5102_supplies[] = {
+ "DBVDD2",
+ "DBVDD3",
+ "CPVDD",
+ "SPKVDDL",
+ "SPKVDDR",
+};
+
static struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
- { .name = "wm5102-codec" },
+ {
+ .name = "wm5102-codec",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+ },
};
static struct mfd_cell wm5110_devs[] = {
@@ -584,7 +596,17 @@ static struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
- { .name = "wm5110-codec" },
+ {
+ .name = "wm5110-codec",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+ },
+};
+
+static const char *wm8997_supplies[] = {
+ "DBVDD2",
+ "CPVDD",
+ "SPKVDD",
};
static struct mfd_cell wm8997_devs[] = {
@@ -593,7 +615,11 @@ static struct mfd_cell wm8997_devs[] = {
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
- { .name = "wm8997-codec" },
+ {
+ .name = "wm8997-codec",
+ .parent_supplies = wm8997_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm8997_supplies),
+ },
};
int arizona_dev_init(struct arizona *arizona)
@@ -607,11 +633,11 @@ int arizona_dev_init(struct arizona *arizona)
dev_set_drvdata(arizona->dev, arizona);
mutex_init(&arizona->clk_lock);
- arizona_of_get_core_pdata(arizona);
-
if (dev_get_platdata(arizona->dev))
memcpy(&arizona->pdata, dev_get_platdata(arizona->dev),
sizeof(arizona->pdata));
+ else
+ arizona_of_get_core_pdata(arizona);
regcache_cache_only(arizona->regmap, true);
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 51dbabf7c02..beccb790c9b 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/mfd/arizona/core.h>
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 47be7b35b5c..1ca554b18be 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/of.h>
#include <linux/mfd/arizona/core.h>
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index abd3ab7c090..ec684fcedb4 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -17,6 +17,7 @@
#include <linux/mfd/as3711.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
new file mode 100644
index 00000000000..f161f2e00df
--- /dev/null
+++ b/drivers/mfd/as3722.c
@@ -0,0 +1,449 @@
+/*
+ * Core driver for ams AS3722 PMICs
+ *
+ * Copyright (C) 2013 AMS AG
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/as3722.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AS3722_DEVICE_ID 0x0C
+
+static const struct resource as3722_rtc_resource[] = {
+ {
+ .name = "as3722-rtc-alarm",
+ .start = AS3722_IRQ_RTC_ALARM,
+ .end = AS3722_IRQ_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const struct resource as3722_adc_resource[] = {
+ {
+ .name = "as3722-adc",
+ .start = AS3722_IRQ_ADC,
+ .end = AS3722_IRQ_ADC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell as3722_devs[] = {
+ {
+ .name = "as3722-pinctrl",
+ },
+ {
+ .name = "as3722-regulator",
+ },
+ {
+ .name = "as3722-rtc",
+ .num_resources = ARRAY_SIZE(as3722_rtc_resource),
+ .resources = as3722_rtc_resource,
+ },
+ {
+ .name = "as3722-adc",
+ .num_resources = ARRAY_SIZE(as3722_adc_resource),
+ .resources = as3722_adc_resource,
+ },
+ {
+ .name = "as3722-power-off",
+ },
+};
+
+static const struct regmap_irq as3722_irqs[] = {
+ /* INT1 IRQs */
+ [AS3722_IRQ_LID] = {
+ .mask = AS3722_INTERRUPT_MASK1_LID,
+ },
+ [AS3722_IRQ_ACOK] = {
+ .mask = AS3722_INTERRUPT_MASK1_ACOK,
+ },
+ [AS3722_IRQ_ENABLE1] = {
+ .mask = AS3722_INTERRUPT_MASK1_ENABLE1,
+ },
+ [AS3722_IRQ_OCCUR_ALARM_SD0] = {
+ .mask = AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0,
+ },
+ [AS3722_IRQ_ONKEY_LONG_PRESS] = {
+ .mask = AS3722_INTERRUPT_MASK1_ONKEY_LONG,
+ },
+ [AS3722_IRQ_ONKEY] = {
+ .mask = AS3722_INTERRUPT_MASK1_ONKEY,
+ },
+ [AS3722_IRQ_OVTMP] = {
+ .mask = AS3722_INTERRUPT_MASK1_OVTMP,
+ },
+ [AS3722_IRQ_LOWBAT] = {
+ .mask = AS3722_INTERRUPT_MASK1_LOWBAT,
+ },
+
+ /* INT2 IRQs */
+ [AS3722_IRQ_SD0_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD0_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_SD1_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD1_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_SD2_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD2345_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_PWM1_OV_PROT] = {
+ .mask = AS3722_INTERRUPT_MASK2_PWM1_OV_PROT,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_PWM2_OV_PROT] = {
+ .mask = AS3722_INTERRUPT_MASK2_PWM2_OV_PROT,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_ENABLE2] = {
+ .mask = AS3722_INTERRUPT_MASK2_ENABLE2,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_SD6_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD6_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_RTC_REP] = {
+ .mask = AS3722_INTERRUPT_MASK2_RTC_REP,
+ .reg_offset = 1,
+ },
+
+ /* INT3 IRQs */
+ [AS3722_IRQ_RTC_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK3_RTC_ALARM,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO1] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO1,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO2] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO2,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO3] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO3,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO4] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO4,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO5] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO5,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_WATCHDOG] = {
+ .mask = AS3722_INTERRUPT_MASK3_WATCHDOG,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_ENABLE3] = {
+ .mask = AS3722_INTERRUPT_MASK3_ENABLE3,
+ .reg_offset = 2,
+ },
+
+ /* INT4 IRQs */
+ [AS3722_IRQ_TEMP_SD0_SHUTDOWN] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD1_SHUTDOWN] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD2_SHUTDOWN] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD0_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD1_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD6_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_OCCUR_ALARM_SD6] = {
+ .mask = AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_ADC] = {
+ .mask = AS3722_INTERRUPT_MASK4_ADC,
+ .reg_offset = 3,
+ },
+};
+
+static const struct regmap_irq_chip as3722_irq_chip = {
+ .name = "as3722",
+ .irqs = as3722_irqs,
+ .num_irqs = ARRAY_SIZE(as3722_irqs),
+ .num_regs = 4,
+ .status_base = AS3722_INTERRUPT_STATUS1_REG,
+ .mask_base = AS3722_INTERRUPT_MASK1_REG,
+};
+
+static int as3722_check_device_id(struct as3722 *as3722)
+{
+ u32 val;
+ int ret;
+
+ /* Check that this is actually a AS3722 */
+ ret = as3722_read(as3722, AS3722_ASIC_ID1_REG, &val);
+ if (ret < 0) {
+ dev_err(as3722->dev, "ASIC_ID1 read failed: %d\n", ret);
+ return ret;
+ }
+
+ if (val != AS3722_DEVICE_ID) {
+ dev_err(as3722->dev, "Device is not AS3722, ID is 0x%x\n", val);
+ return -ENODEV;
+ }
+
+ ret = as3722_read(as3722, AS3722_ASIC_ID2_REG, &val);
+ if (ret < 0) {
+ dev_err(as3722->dev, "ASIC_ID2 read failed: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(as3722->dev, "AS3722 with revision 0x%x found\n", val);
+ return 0;
+}
+
+static int as3722_configure_pullups(struct as3722 *as3722)
+{
+ int ret;
+ u32 val = 0;
+
+ if (as3722->en_intern_int_pullup)
+ val |= AS3722_INT_PULL_UP;
+ if (as3722->en_intern_i2c_pullup)
+ val |= AS3722_I2C_PULL_UP;
+
+ ret = as3722_update_bits(as3722, AS3722_IOVOLTAGE_REG,
+ AS3722_INT_PULL_UP | AS3722_I2C_PULL_UP, val);
+ if (ret < 0)
+ dev_err(as3722->dev, "IOVOLTAGE_REG update failed: %d\n", ret);
+ return ret;
+}
+
+static const struct regmap_range as3722_readable_ranges[] = {
+ regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG),
+ regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG),
+ regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_REG_SEQU_MOD3_REG),
+ regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG),
+ regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG),
+ regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG,
+ AS3722_BATTERY_VOLTAGE_MONITOR2_REG),
+ regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG),
+ regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG),
+ regmap_reg_range(AS3722_RTC_ACCESS_REG, AS3722_RTC_ACCESS_REG),
+ regmap_reg_range(AS3722_RTC_STATUS_REG, AS3722_TEMP_STATUS_REG),
+ regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC_CONFIGURATION_REG),
+ regmap_reg_range(AS3722_ASIC_ID1_REG, AS3722_ASIC_ID2_REG),
+ regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+};
+
+static const struct regmap_access_table as3722_readable_table = {
+ .yes_ranges = as3722_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(as3722_readable_ranges),
+};
+
+static const struct regmap_range as3722_writable_ranges[] = {
+ regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG),
+ regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG),
+ regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_GPIO_SIGNAL_OUT_REG),
+ regmap_reg_range(AS3722_REG_SEQU_MOD1_REG, AS3722_REG_SEQU_MOD3_REG),
+ regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG),
+ regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG),
+ regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG,
+ AS3722_BATTERY_VOLTAGE_MONITOR2_REG),
+ regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG),
+ regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG),
+ regmap_reg_range(AS3722_INTERRUPT_MASK1_REG, AS3722_TEMP_STATUS_REG),
+ regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC1_CONTROL_REG),
+ regmap_reg_range(AS3722_ADC1_THRESHOLD_HI_MSB_REG,
+ AS3722_ADC_CONFIGURATION_REG),
+ regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+};
+
+static const struct regmap_access_table as3722_writable_table = {
+ .yes_ranges = as3722_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(as3722_writable_ranges),
+};
+
+static const struct regmap_range as3722_cacheable_ranges[] = {
+ regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_LDO11_VOLTAGE_REG),
+ regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_LDOCONTROL1_REG),
+};
+
+static const struct regmap_access_table as3722_volatile_table = {
+ .no_ranges = as3722_cacheable_ranges,
+ .n_no_ranges = ARRAY_SIZE(as3722_cacheable_ranges),
+};
+
+static const struct regmap_config as3722_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AS3722_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &as3722_readable_table,
+ .wr_table = &as3722_writable_table,
+ .volatile_table = &as3722_volatile_table,
+};
+
+static int as3722_i2c_of_probe(struct i2c_client *i2c,
+ struct as3722 *as3722)
+{
+ struct device_node *np = i2c->dev.of_node;
+ struct irq_data *irq_data;
+
+ if (!np) {
+ dev_err(&i2c->dev, "Device Tree not found\n");
+ return -EINVAL;
+ }
+
+ irq_data = irq_get_irq_data(i2c->irq);
+ if (!irq_data) {
+ dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq);
+ return -EINVAL;
+ }
+
+ as3722->en_intern_int_pullup = of_property_read_bool(np,
+ "ams,enable-internal-int-pullup");
+ as3722->en_intern_i2c_pullup = of_property_read_bool(np,
+ "ams,enable-internal-i2c-pullup");
+ as3722->irq_flags = irqd_get_trigger_type(irq_data);
+ dev_dbg(&i2c->dev, "IRQ flags are 0x%08lx\n", as3722->irq_flags);
+ return 0;
+}
+
+static int as3722_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct as3722 *as3722;
+ unsigned long irq_flags;
+ int ret;
+
+ as3722 = devm_kzalloc(&i2c->dev, sizeof(struct as3722), GFP_KERNEL);
+ if (!as3722)
+ return -ENOMEM;
+
+ as3722->dev = &i2c->dev;
+ as3722->chip_irq = i2c->irq;
+ i2c_set_clientdata(i2c, as3722);
+
+ ret = as3722_i2c_of_probe(i2c, as3722);
+ if (ret < 0)
+ return ret;
+
+ as3722->regmap = devm_regmap_init_i2c(i2c, &as3722_regmap_config);
+ if (IS_ERR(as3722->regmap)) {
+ ret = PTR_ERR(as3722->regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = as3722_check_device_id(as3722);
+ if (ret < 0)
+ return ret;
+
+ irq_flags = as3722->irq_flags | IRQF_ONESHOT;
+ ret = regmap_add_irq_chip(as3722->regmap, as3722->chip_irq,
+ irq_flags, -1, &as3722_irq_chip,
+ &as3722->irq_data);
+ if (ret < 0) {
+ dev_err(as3722->dev, "Failed to add regmap irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = as3722_configure_pullups(as3722);
+ if (ret < 0)
+ goto scrub;
+
+ ret = mfd_add_devices(&i2c->dev, -1, as3722_devs,
+ ARRAY_SIZE(as3722_devs), NULL, 0,
+ regmap_irq_get_domain(as3722->irq_data));
+ if (ret) {
+ dev_err(as3722->dev, "Failed to add MFD devices: %d\n", ret);
+ goto scrub;
+ }
+
+ dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
+ return 0;
+
+scrub:
+ regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
+ return ret;
+}
+
+static int as3722_i2c_remove(struct i2c_client *i2c)
+{
+ struct as3722 *as3722 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(as3722->dev);
+ regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
+ return 0;
+}
+
+static const struct of_device_id as3722_of_match[] = {
+ { .compatible = "ams,as3722", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, as3722_of_match);
+
+static const struct i2c_device_id as3722_i2c_id[] = {
+ { "as3722", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, as3722_i2c_id);
+
+static struct i2c_driver as3722_i2c_driver = {
+ .driver = {
+ .name = "as3722",
+ .owner = THIS_MODULE,
+ .of_match_table = as3722_of_match,
+ },
+ .probe = as3722_i2c_probe,
+ .remove = as3722_i2c_remove,
+ .id_table = as3722_i2c_id,
+};
+
+module_i2c_driver(as3722_i2c_driver);
+
+MODULE_DESCRIPTION("I2C support for AS3722 PMICs");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 6a9fec40d01..c319c4ef5d4 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -86,7 +86,11 @@ static int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
return 0;
}
-static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+/*
+ * According to errata item 24, multiwrite mode should be avoided
+ * in order to prevent register data corruption after power-down.
+ */
+static int da9052_i2c_disable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -94,8 +98,8 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
if (ret < 0)
return ret;
- if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
- reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+ if (!(reg_val & DA9052_CONTROL_B_WRITEMODE)) {
+ reg_val |= DA9052_CONTROL_B_WRITEMODE;
ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
reg_val);
if (ret < 0)
@@ -154,7 +158,7 @@ static int da9052_i2c_probe(struct i2c_client *client,
return ret;
}
- ret = da9052_i2c_enable_multiwrite(da9052);
+ ret = da9052_i2c_disable_multiwrite(da9052);
if (ret < 0)
return ret;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 53f371dcbb6..b9ce60c301d 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -480,7 +480,6 @@ static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true),
CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true),
- CLK_MGT_ENTRY(BML8580CLK, PLL_DIV, true),
CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true),
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index 4f6f0fa5d3b..7cc32a8ff01 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -32,7 +32,6 @@
#define PRCM_PER7CLK_MGT (0x040)
#define PRCM_LCDCLK_MGT (0x044)
#define PRCM_BMLCLK_MGT (0x04C)
-#define PRCM_BML8580CLK_MGT (0x108)
#define PRCM_HSITXCLK_MGT (0x050)
#define PRCM_HSIRXCLK_MGT (0x054)
#define PRCM_HDMICLK_MGT (0x058)
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 7245b0c5b79..2ed774e7d34 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -394,16 +394,12 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
static int ezx_pcap_remove(struct spi_device *spi)
{
struct pcap_chip *pcap = spi_get_drvdata(spi);
- struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
- int i, adc_irq;
+ int i;
/* remove all registered subdevs */
device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
/* cleanup ADC */
- adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
- PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
- devm_free_irq(&spi->dev, adc_irq, pcap);
mutex_lock(&pcap->adc_mutex);
for (i = 0; i < PCAP_ADC_MAXQ; i++)
kfree(pcap->adc_queue[i]);
@@ -509,8 +505,6 @@ static int ezx_pcap_probe(struct spi_device *spi)
remove_subdevs:
device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
-/* free_adc: */
- devm_free_irq(&spi->dev, adc_irq, pcap);
free_irqchip:
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
irq_set_chip_and_handler(i, NULL, NULL);
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 9483bc8472a..da1c6566d93 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -53,6 +53,7 @@
* document number TBD : Wellsburg
* document number TBD : Avoton SoC
* document number TBD : Coleto Creek
+ * document number TBD : Wildcat Point-LP
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -211,6 +212,7 @@ enum lpc_chipsets {
LPC_WBG, /* Wellsburg */
LPC_AVN, /* Avoton SoC */
LPC_COLETO, /* Coleto Creek */
+ LPC_WPT_LP, /* Wildcat Point-LP */
};
static struct lpc_ich_info lpc_chipset_info[] = {
@@ -503,6 +505,10 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Coleto Creek",
.iTCO_version = 2,
},
+ [LPC_WPT_LP] = {
+ .name = "Lynx Point_LP",
+ .iTCO_version = 2,
+ },
};
/*
@@ -721,6 +727,13 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
{ PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
+ { PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc5), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
@@ -969,7 +982,6 @@ static int lpc_ich_probe(struct pci_dev *dev,
if (!cell_added) {
dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
- pci_set_drvdata(dev, NULL);
return -ENODEV;
}
@@ -980,7 +992,6 @@ static void lpc_ich_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
lpc_ich_restore_config_space(dev);
- pci_set_drvdata(dev, NULL);
}
static struct pci_driver lpc_ich_driver = {
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 8cc6aac27cb..fbfbf0b7f97 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -59,18 +59,21 @@ static struct mfd_cell isch_smbus_cell = {
.name = "isch_smbus",
.num_resources = 1,
.resources = &smbus_sch_resource,
+ .ignore_resource_conflicts = true,
};
static struct mfd_cell sch_gpio_cell = {
.name = "sch_gpio",
.num_resources = 1,
.resources = &gpio_sch_resource,
+ .ignore_resource_conflicts = true,
};
static struct mfd_cell wdt_sch_cell = {
.name = "ie6xx_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
+ .ignore_resource_conflicts = true,
};
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 522be67b2e6..34520cbe8af 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -31,6 +31,7 @@
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#include <linux/err.h>
+#include <linux/of.h>
#define I2C_ADDR_RTC (0x0C >> 1)
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
index 1029d018c73..66b58fe7709 100644
--- a/drivers/mfd/max77693-irq.c
+++ b/drivers/mfd/max77693-irq.c
@@ -128,7 +128,8 @@ static void max77693_irq_sync_unlock(struct irq_data *data)
static const inline struct max77693_irq_data *
irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
{
- return &max77693_irqs[irq];
+ struct irq_data *data = irq_get_irq_data(irq);
+ return &max77693_irqs[data->hwirq];
}
static void max77693_irq_mask(struct irq_data *data)
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index c04723efc70..9f92463f4f7 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
@@ -110,15 +111,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max77693_dev *max77693;
- struct max77693_platform_data *pdata = dev_get_platdata(&i2c->dev);
u8 reg_data;
int ret = 0;
- if (!pdata) {
- dev_err(&i2c->dev, "No platform data found.\n");
- return -EINVAL;
- }
-
max77693 = devm_kzalloc(&i2c->dev,
sizeof(struct max77693_dev), GFP_KERNEL);
if (max77693 == NULL)
@@ -138,8 +133,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
return ret;
}
- max77693->wakeup = pdata->wakeup;
-
ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
&reg_data);
if (ret < 0) {
@@ -179,8 +172,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err_mfd;
- device_init_wakeup(max77693->dev, pdata->wakeup);
-
return ret;
err_mfd:
@@ -235,11 +226,19 @@ static const struct dev_pm_ops max77693_pm = {
.resume = max77693_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id max77693_dt_match[] = {
+ { .compatible = "maxim,max77693" },
+ {},
+};
+#endif
+
static struct i2c_driver max77693_i2c_driver = {
.driver = {
.name = "max77693",
.owner = THIS_MODULE,
.pm = &max77693_pm,
+ .of_match_table = of_match_ptr(max77693_dt_match),
},
.probe = max77693_i2c_probe,
.remove = max77693_i2c_remove,
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index e9b1c93a3ad..3bbfedc07f4 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -17,6 +17,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max8907.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index de7fb80a605..176aa26fc78 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -238,7 +238,7 @@ static struct i2c_driver max8925_driver = {
.name = "max8925",
.owner = THIS_MODULE,
.pm = &max8925_pm_ops,
- .of_match_table = of_match_ptr(max8925_dt_ids),
+ .of_match_table = max8925_dt_ids,
},
.probe = max8925_probe,
.remove = max8925_remove,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index cee098c0dae..791aea3e96c 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 2a9b100c482..dbbf8ee3f59 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -158,8 +158,6 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
{
int ret;
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
-
if (offset > MC13XXX_NUMREGS)
return -EINVAL;
@@ -172,8 +170,6 @@ EXPORT_SYMBOL(mc13xxx_reg_read);
int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
{
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
-
dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
if (offset > MC13XXX_NUMREGS || val > 0xffffff)
@@ -186,7 +182,6 @@ EXPORT_SYMBOL(mc13xxx_reg_write);
int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
u32 mask, u32 val)
{
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
BUG_ON(val & ~mask);
dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
offset, val, mask);
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index f745e27ee87..898bd335cd8 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -78,7 +78,6 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
ret);
- dev_set_drvdata(&client->dev, NULL);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 77189daadf1..5f14ef6693c 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -94,10 +94,15 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
+ const char *reg = data;
if (count != 4)
return -ENOTSUPP;
+ /* include errata fix for spi audio problems */
+ if (*reg == MC13783_AUDIO_CODEC || *reg == MC13783_AUDIO_DAC)
+ spi_write(spi, data, count);
+
return spi_write(spi, data, count);
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f421586f29f..26764924473 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
+#include <linux/regulator/consumer.h>
static struct device_type mfd_dev_type = {
.name = "mfd_device",
@@ -63,7 +64,8 @@ int mfd_cell_disable(struct platform_device *pdev)
EXPORT_SYMBOL(mfd_cell_disable);
static int mfd_platform_add_cell(struct platform_device *pdev,
- const struct mfd_cell *cell)
+ const struct mfd_cell *cell,
+ atomic_t *usage_count)
{
if (!cell)
return 0;
@@ -72,11 +74,12 @@ static int mfd_platform_add_cell(struct platform_device *pdev,
if (!pdev->mfd_cell)
return -ENOMEM;
+ pdev->mfd_cell->usage_count = usage_count;
return 0;
}
static int mfd_add_device(struct device *parent, int id,
- const struct mfd_cell *cell,
+ const struct mfd_cell *cell, atomic_t *usage_count,
struct resource *mem_base,
int irq_base, struct irq_domain *domain)
{
@@ -99,6 +102,13 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.dma_mask = parent->dma_mask;
pdev->dev.dma_parms = parent->dma_parms;
+ ret = devm_regulator_bulk_register_supply_alias(
+ &pdev->dev, cell->parent_supplies,
+ parent, cell->parent_supplies,
+ cell->num_parent_supplies);
+ if (ret < 0)
+ goto fail_res;
+
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
@@ -112,12 +122,12 @@ static int mfd_add_device(struct device *parent, int id,
ret = platform_device_add_data(pdev,
cell->platform_data, cell->pdata_size);
if (ret)
- goto fail_res;
+ goto fail_alias;
}
- ret = mfd_platform_add_cell(pdev, cell);
+ ret = mfd_platform_add_cell(pdev, cell, usage_count);
if (ret)
- goto fail_res;
+ goto fail_alias;
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
@@ -152,17 +162,17 @@ static int mfd_add_device(struct device *parent, int id,
if (!cell->ignore_resource_conflicts) {
ret = acpi_check_resource_conflict(&res[r]);
if (ret)
- goto fail_res;
+ goto fail_alias;
}
}
ret = platform_device_add_resources(pdev, res, cell->num_resources);
if (ret)
- goto fail_res;
+ goto fail_alias;
ret = platform_device_add(pdev);
if (ret)
- goto fail_res;
+ goto fail_alias;
if (cell->pm_runtime_no_callbacks)
pm_runtime_no_callbacks(&pdev->dev);
@@ -171,6 +181,10 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
+fail_alias:
+ devm_regulator_bulk_unregister_supply_alias(&pdev->dev,
+ cell->parent_supplies,
+ cell->num_parent_supplies);
fail_res:
kfree(res);
fail_device:
@@ -180,12 +194,12 @@ fail_alloc:
}
int mfd_add_devices(struct device *parent, int id,
- struct mfd_cell *cells, int n_devs,
+ const struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base, struct irq_domain *domain)
{
int i;
- int ret = 0;
+ int ret;
atomic_t *cnts;
/* initialize reference counting for all cells */
@@ -195,16 +209,19 @@ int mfd_add_devices(struct device *parent, int id,
for (i = 0; i < n_devs; i++) {
atomic_set(&cnts[i], 0);
- cells[i].usage_count = &cnts[i];
- ret = mfd_add_device(parent, id, cells + i, mem_base,
+ ret = mfd_add_device(parent, id, cells + i, cnts + i, mem_base,
irq_base, domain);
if (ret)
- break;
+ goto fail;
}
- if (ret)
- mfd_remove_devices(parent);
+ return 0;
+fail:
+ if (i)
+ mfd_remove_devices(parent);
+ else
+ kfree(cnts);
return ret;
}
EXPORT_SYMBOL(mfd_add_devices);
@@ -259,8 +276,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
for (i = 0; i < n_clones; i++) {
cell_entry.name = clones[i];
/* don't give up if a single call fails; just report error */
- if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0,
- NULL))
+ if (mfd_add_device(pdev->dev.parent, -1, &cell_entry,
+ cell_entry.usage_count, NULL, 0, NULL))
dev_err(dev, "failed to create platform device '%s'\n",
clones[i]);
}
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29ee54d6851..142650fdc05 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -328,13 +328,13 @@ static int usbhs_runtime_resume(struct device *dev)
omap_tll_enable(pdata);
if (!IS_ERR(omap->ehci_logic_fck))
- clk_enable(omap->ehci_logic_fck);
+ clk_prepare_enable(omap->ehci_logic_fck);
for (i = 0; i < omap->nports; i++) {
switch (pdata->port_mode[i]) {
case OMAP_EHCI_PORT_MODE_HSIC:
if (!IS_ERR(omap->hsic60m_clk[i])) {
- r = clk_enable(omap->hsic60m_clk[i]);
+ r = clk_prepare_enable(omap->hsic60m_clk[i]);
if (r) {
dev_err(dev,
"Can't enable port %d hsic60m clk:%d\n",
@@ -343,7 +343,7 @@ static int usbhs_runtime_resume(struct device *dev)
}
if (!IS_ERR(omap->hsic480m_clk[i])) {
- r = clk_enable(omap->hsic480m_clk[i]);
+ r = clk_prepare_enable(omap->hsic480m_clk[i]);
if (r) {
dev_err(dev,
"Can't enable port %d hsic480m clk:%d\n",
@@ -354,7 +354,7 @@ static int usbhs_runtime_resume(struct device *dev)
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i])) {
- r = clk_enable(omap->utmi_clk[i]);
+ r = clk_prepare_enable(omap->utmi_clk[i]);
if (r) {
dev_err(dev,
"Can't enable port %d clk : %d\n",
@@ -382,15 +382,15 @@ static int usbhs_runtime_suspend(struct device *dev)
switch (pdata->port_mode[i]) {
case OMAP_EHCI_PORT_MODE_HSIC:
if (!IS_ERR(omap->hsic60m_clk[i]))
- clk_disable(omap->hsic60m_clk[i]);
+ clk_disable_unprepare(omap->hsic60m_clk[i]);
if (!IS_ERR(omap->hsic480m_clk[i]))
- clk_disable(omap->hsic480m_clk[i]);
+ clk_disable_unprepare(omap->hsic480m_clk[i]);
/* Fall through as utmi_clks were used in HSIC mode */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i]))
- clk_disable(omap->utmi_clk[i]);
+ clk_disable_unprepare(omap->utmi_clk[i]);
break;
default:
break;
@@ -398,7 +398,7 @@ static int usbhs_runtime_suspend(struct device *dev)
}
if (!IS_ERR(omap->ehci_logic_fck))
- clk_disable(omap->ehci_logic_fck);
+ clk_disable_unprepare(omap->ehci_logic_fck);
omap_tll_disable(pdata);
@@ -893,7 +893,7 @@ static struct platform_driver usbhs_omap_driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
.pm = &usbhsomap_dev_pm_ops,
- .of_match_table = of_match_ptr(usbhs_omap_dt_ids),
+ .of_match_table = usbhs_omap_dt_ids,
},
.remove = usbhs_omap_remove,
};
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index e59ac4cbac9..0d946ae1445 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -320,7 +320,7 @@ static struct platform_driver usbtll_omap_driver = {
.driver = {
.name = (char *)usbtll_driver_name,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(usbtll_omap_dt_ids),
+ .of_match_table = usbtll_omap_dt_ids,
},
.probe = usbtll_omap_probe,
.remove = usbtll_omap_remove,
@@ -429,7 +429,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
if (IS_ERR(tll->ch_clk[i]))
continue;
- r = clk_enable(tll->ch_clk[i]);
+ r = clk_prepare_enable(tll->ch_clk[i]);
if (r) {
dev_err(tll_dev,
"Error enabling ch %d clock: %d\n", i, r);
@@ -460,7 +460,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
for (i = 0; i < tll->nch; i++) {
if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
if (!IS_ERR(tll->ch_clk[i]))
- clk_disable(tll->ch_clk[i]);
+ clk_disable_unprepare(tll->ch_clk[i]);
}
}
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 135afabe4ae..d280d789e55 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -368,6 +368,7 @@ static const struct of_device_id of_palmas_match_tbl[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, of_palmas_match_tbl);
static int palmas_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -402,7 +403,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
palmas->dev = &i2c->dev;
palmas->irq = i2c->irq;
- match = of_match_device(of_match_ptr(of_palmas_match_tbl), &i2c->dev);
+ match = of_match_device(of_palmas_match_tbl, &i2c->dev);
if (!match)
return -ENODATA;
@@ -421,7 +422,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
dev_err(palmas->dev,
"can't attach client %d\n", i);
ret = -ENOMEM;
- goto err;
+ goto err_i2c;
}
palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
}
@@ -432,7 +433,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
dev_err(palmas->dev,
"Failed to allocate regmap %d, err: %d\n",
i, ret);
- goto err;
+ goto err_i2c;
}
}
@@ -451,7 +452,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
reg);
if (ret < 0) {
dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret);
- goto err;
+ goto err_i2c;
}
/* Change IRQ into clear on read mode for efficiency */
@@ -465,7 +466,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip,
&palmas->irq_data);
if (ret < 0)
- goto err;
+ goto err_i2c;
no_irq:
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
@@ -551,7 +552,6 @@ no_irq:
} else if (pdata->pm_off && !pm_power_off) {
palmas_dev = palmas;
pm_power_off = palmas_power_off;
- return ret;
}
}
@@ -559,17 +559,31 @@ no_irq:
err_irq:
regmap_del_irq_chip(palmas->irq, palmas->irq_data);
-err:
+err_i2c:
+ for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
+ if (palmas->i2c_clients[i])
+ i2c_unregister_device(palmas->i2c_clients[i]);
+ }
return ret;
}
static int palmas_i2c_remove(struct i2c_client *i2c)
{
struct palmas *palmas = i2c_get_clientdata(i2c);
+ int i;
- mfd_remove_devices(palmas->dev);
regmap_del_irq_chip(palmas->irq, palmas->irq_data);
+ for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
+ if (palmas->i2c_clients[i])
+ i2c_unregister_device(palmas->i2c_clients[i]);
+ }
+
+ if (palmas == palmas_dev) {
+ pm_power_off = NULL;
+ palmas_dev = NULL;
+ }
+
return 0;
}
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index a6841f77aa5..484fe66e6c8 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -171,11 +171,12 @@ static int pm8921_remove(struct platform_device *pdev)
drvdata = platform_get_drvdata(pdev);
if (drvdata)
pmic = drvdata->pm_chip_data;
- if (pmic)
+ if (pmic) {
mfd_remove_devices(pmic->dev);
- if (pmic->irq_chip) {
- pm8xxx_irq_exit(pmic->irq_chip);
- pmic->irq_chip = NULL;
+ if (pmic->irq_chip) {
+ pm8xxx_irq_exit(pmic->irq_chip);
+ pmic->irq_chip = NULL;
+ }
}
return 0;
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 3b835f593e3..573de7bfcce 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -130,13 +130,57 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
- err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 0xFE46);
+ err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV,
+ PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED |
+ PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN |
+ PHY_REG_REV_RX_PWST | PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 |
+ PHY_REG_REV_STOP_CLKRD | PHY_REG_REV_STOP_CLKWR);
if (err < 0)
return err;
msleep(1);
- return rtsx_pci_write_phy_register(pcr, PHY_BPCR, 0x05C0);
+ err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
+ PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
+ PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
+ PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
+ PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
+ PHY_PCR_RSSI_EN);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
+ PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
+ PHY_RCR2_CDR_CP_10 | PHY_RCR2_CDR_SR_2 |
+ PHY_RCR2_FREQSEL_12 | PHY_RCR2_CPADJEN |
+ PHY_RCR2_CDR_SC_8 | PHY_RCR2_CALIB_LATE);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
+ PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
+ PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
+ PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
+ PHY_FLD4_BER_CHK_EN);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_RDR, PHY_RDR_RXDSEL_1_9);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
+ PHY_RCR1_ADP_TIME | PHY_RCR1_VCO_COARSE);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
+ PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
+ PHY_FLD3_RXDELINK);
+ if (err < 0)
+ return err;
+ return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
+ PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
+ PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
+ PHY_TUNE_TUNED12);
}
static int rts5249_turn_on_led(struct rtsx_pcr *pcr)
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index e6ae7720f9e..11e20afbdca 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1149,7 +1149,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->remap_addr = ioremap_nocache(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
- goto free_host;
+ goto free_handle;
}
pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
@@ -1209,8 +1209,6 @@ disable_msi:
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
iounmap(pcr->remap_addr);
-free_host:
- dev_set_drvdata(&pcidev->dev, NULL);
free_handle:
kfree(handle);
free_pcr:
@@ -1242,7 +1240,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pci_disable_msi(pcr->pci);
iounmap(pcr->remap_addr);
- dev_set_drvdata(&pcidev->dev, NULL);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index f530e4b73f1..34c18fb8c08 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 33f040c558d..c2c8c91c6c7 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1232,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev,
}
-static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
+static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
/* sm501_init_reg
*
@@ -1660,7 +1660,6 @@ static int sm501_pci_probe(struct pci_dev *dev,
err3:
pci_disable_device(dev);
err2:
- pci_set_drvdata(dev, NULL);
kfree(sm);
err1:
return err;
@@ -1695,7 +1694,6 @@ static void sm501_pci_remove(struct pci_dev *dev)
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
- pci_set_drvdata(dev, NULL);
pci_disable_device(dev);
}
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
new file mode 100644
index 00000000000..1243d5c6a44
--- /dev/null
+++ b/drivers/mfd/stw481x.c
@@ -0,0 +1,250 @@
+/*
+ * Core driver for STw4810/STw4811
+ *
+ * Copyright (C) 2013 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stw481x.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/*
+ * This driver can only access the non-USB portions of STw4811, the register
+ * range 0x00-0x10 dealing with USB is bound to the two special I2C pins used
+ * for USB control.
+ */
+
+/* Registers inside the power control address space */
+#define STW_PC_VCORE_SEL 0x05U
+#define STW_PC_VAUX_SEL 0x06U
+#define STW_PC_VPLL_SEL 0x07U
+
+/**
+ * stw481x_get_pctl_reg() - get a power control register
+ * @stw481x: handle to the stw481x chip
+ * @reg: power control register to fetch
+ *
+ * The power control registers is a set of one-time-programmable registers
+ * in its own register space, accessed by writing addess bits to these
+ * two registers: bits 7,6,5 of PCTL_REG_LO corresponds to the 3 LSBs of
+ * the address and bits 8,9 of PCTL_REG_HI corresponds to the 2 MSBs of
+ * the address, forming an address space of 5 bits, i.e. 32 registers
+ * 0x00 ... 0x1f can be obtained.
+ */
+static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg)
+{
+ u8 msb = (reg >> 3) & 0x03;
+ u8 lsb = (reg << 5) & 0xe0;
+ unsigned int val;
+ u8 vrfy;
+ int ret;
+
+ ret = regmap_write(stw481x->map, STW_PCTL_REG_HI, msb);
+ if (ret)
+ return ret;
+ ret = regmap_write(stw481x->map, STW_PCTL_REG_LO, lsb);
+ if (ret)
+ return ret;
+ ret = regmap_read(stw481x->map, STW_PCTL_REG_HI, &val);
+ if (ret)
+ return ret;
+ vrfy = (val & 0x03) << 3;
+ ret = regmap_read(stw481x->map, STW_PCTL_REG_LO, &val);
+ if (ret)
+ return ret;
+ vrfy |= ((val >> 5) & 0x07);
+ if (vrfy != reg)
+ return -EIO;
+ return (val >> 1) & 0x0f;
+}
+
+static int stw481x_startup(struct stw481x *stw481x)
+{
+ /* Voltages multiplied by 100 */
+ u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 138, 140, 145 };
+ u8 vpll_val[] = { 105, 120, 130, 180 };
+ u8 vaux_val[] = { 15, 18, 25, 28 };
+ u8 vcore;
+ u8 vcore_slp;
+ u8 vpll;
+ u8 vaux;
+ bool vaux_en;
+ bool it_warn;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(stw481x->map, STW_CONF1, &val);
+ if (ret)
+ return ret;
+ vaux_en = !!(val & STW_CONF1_PDN_VAUX);
+ it_warn = !!(val & STW_CONF1_IT_WARN);
+
+ dev_info(&stw481x->client->dev, "voltages %s\n",
+ (val & STW_CONF1_V_MONITORING) ? "OK" : "LOW");
+ dev_info(&stw481x->client->dev, "MMC level shifter %s\n",
+ (val & STW_CONF1_MMC_LS_STATUS) ? "high impedance" : "ON");
+ dev_info(&stw481x->client->dev, "VMMC: %s\n",
+ (val & STW_CONF1_PDN_VMMC) ? "ON" : "disabled");
+
+ dev_info(&stw481x->client->dev, "STw481x power control registers:\n");
+
+ ret = stw481x_get_pctl_reg(stw481x, STW_PC_VCORE_SEL);
+ if (ret < 0)
+ return ret;
+ vcore = ret & 0x0f;
+
+ ret = stw481x_get_pctl_reg(stw481x, STW_PC_VAUX_SEL);
+ if (ret < 0)
+ return ret;
+ vaux = (ret >> 2) & 3;
+ vpll = (ret >> 4) & 1; /* Save bit 4 */
+
+ ret = stw481x_get_pctl_reg(stw481x, STW_PC_VPLL_SEL);
+ if (ret < 0)
+ return ret;
+ vpll |= (ret >> 1) & 2;
+
+ dev_info(&stw481x->client->dev, "VCORE: %u.%uV %s\n",
+ vcore_val[vcore] / 100, vcore_val[vcore] % 100,
+ (ret & 4) ? "ON" : "OFF");
+
+ dev_info(&stw481x->client->dev, "VPLL: %u.%uV %s\n",
+ vpll_val[vpll] / 100, vpll_val[vpll] % 100,
+ (ret & 0x10) ? "ON" : "OFF");
+
+ dev_info(&stw481x->client->dev, "VAUX: %u.%uV %s\n",
+ vaux_val[vaux] / 10, vaux_val[vaux] % 10,
+ vaux_en ? "ON" : "OFF");
+
+ ret = regmap_read(stw481x->map, STW_CONF2, &val);
+ if (ret)
+ return ret;
+
+ dev_info(&stw481x->client->dev, "TWARN: %s threshold, %s\n",
+ it_warn ? "below" : "above",
+ (val & STW_CONF2_MASK_TWARN) ?
+ "enabled" : "mask through VDDOK");
+ dev_info(&stw481x->client->dev, "VMMC: %s\n",
+ (val & STW_CONF2_VMMC_EXT) ? "internal" : "external");
+ dev_info(&stw481x->client->dev, "IT WAKE UP: %s\n",
+ (val & STW_CONF2_MASK_IT_WAKE_UP) ? "enabled" : "masked");
+ dev_info(&stw481x->client->dev, "GPO1: %s\n",
+ (val & STW_CONF2_GPO1) ? "low" : "high impedance");
+ dev_info(&stw481x->client->dev, "GPO2: %s\n",
+ (val & STW_CONF2_GPO2) ? "low" : "high impedance");
+
+ ret = regmap_read(stw481x->map, STW_VCORE_SLEEP, &val);
+ if (ret)
+ return ret;
+ vcore_slp = val & 0x0f;
+ dev_info(&stw481x->client->dev, "VCORE SLEEP: %u.%uV\n",
+ vcore_val[vcore_slp] / 100, vcore_val[vcore_slp] % 100);
+
+ return 0;
+}
+
+/*
+ * MFD cells - we have one cell which is selected operation
+ * mode, and we always have a GPIO cell.
+ */
+static struct mfd_cell stw481x_cells[] = {
+ {
+ .of_compatible = "st,stw481x-vmmc",
+ .name = "stw481x-vmmc-regulator",
+ .id = -1,
+ },
+};
+
+const struct regmap_config stw481x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int stw481x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct stw481x *stw481x;
+ int ret;
+ int i;
+
+ stw481x = devm_kzalloc(&client->dev, sizeof(*stw481x), GFP_KERNEL);
+ if (!stw481x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, stw481x);
+ stw481x->client = client;
+ stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config);
+
+ ret = stw481x_startup(stw481x);
+ if (ret) {
+ dev_err(&client->dev, "chip initialization failed\n");
+ return ret;
+ }
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < ARRAY_SIZE(stw481x_cells); i++) {
+ /* One state holder for all drivers, this is simple */
+ stw481x_cells[i].platform_data = stw481x;
+ stw481x_cells[i].pdata_size = sizeof(*stw481x);
+ }
+
+ ret = mfd_add_devices(&client->dev, 0, stw481x_cells,
+ ARRAY_SIZE(stw481x_cells), NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ dev_info(&client->dev, "initialized STw481x device\n");
+
+ return ret;
+}
+
+static int stw481x_remove(struct i2c_client *client)
+{
+ mfd_remove_devices(&client->dev);
+ return 0;
+}
+
+/*
+ * This ID table is completely unused, as this is a pure
+ * device-tree probed driver, but it has to be here due to
+ * the structure of the I2C core.
+ */
+static const struct i2c_device_id stw481x_id[] = {
+ { "stw481x", 0 },
+ { },
+};
+
+static const struct of_device_id stw481x_match[] = {
+ { .compatible = "st,stw4810", },
+ { .compatible = "st,stw4811", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, stw481x_match);
+
+static struct i2c_driver stw481x_driver = {
+ .driver = {
+ .name = "stw481x",
+ .of_match_table = stw481x_match,
+ },
+ .probe = stw481x_probe,
+ .remove = stw481x_remove,
+ .id_table = stw481x_id,
+};
+
+module_i2c_driver(stw481x_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("STw481x PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 70f4909fee1..87ea51dc623 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -16,6 +16,19 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tc3589x.h>
+/**
+ * enum tc3589x_version - indicates the TC3589x version
+ */
+enum tc3589x_version {
+ TC3589X_TC35890,
+ TC3589X_TC35892,
+ TC3589X_TC35893,
+ TC3589X_TC35894,
+ TC3589X_TC35895,
+ TC3589X_TC35896,
+ TC3589X_UNKNOWN,
+};
+
#define TC3589x_CLKMODE_MODCTL_SLEEP 0x0
#define TC3589x_CLKMODE_MODCTL_OPERATION (1 << 0)
@@ -361,7 +374,21 @@ static int tc3589x_probe(struct i2c_client *i2c,
tc3589x->i2c = i2c;
tc3589x->pdata = pdata;
tc3589x->irq_base = pdata->irq_base;
- tc3589x->num_gpio = id->driver_data;
+
+ switch (id->driver_data) {
+ case TC3589X_TC35893:
+ case TC3589X_TC35895:
+ case TC3589X_TC35896:
+ tc3589x->num_gpio = 20;
+ break;
+ case TC3589X_TC35890:
+ case TC3589X_TC35892:
+ case TC3589X_TC35894:
+ case TC3589X_UNKNOWN:
+ default:
+ tc3589x->num_gpio = 24;
+ break;
+ }
i2c_set_clientdata(i2c, tc3589x);
@@ -432,7 +459,13 @@ static int tc3589x_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume);
static const struct i2c_device_id tc3589x_id[] = {
- { "tc3589x", 24 },
+ { "tc35890", TC3589X_TC35890 },
+ { "tc35892", TC3589X_TC35892 },
+ { "tc35893", TC3589X_TC35893 },
+ { "tc35894", TC3589X_TC35894 },
+ { "tc35895", TC3589X_TC35895 },
+ { "tc35896", TC3589X_TC35896 },
+ { "tc3589x", TC3589X_UNKNOWN },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc3589x_id);
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 1c2b994e1f6..71e3e0c5bf7 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -445,7 +445,6 @@ static int ti_ssp_remove(struct platform_device *pdev)
iounmap(ssp->regs);
release_mem_region(ssp->res->start, resource_size(ssp->res));
kfree(ssp);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index baaf5a8123b..88718abfb9b 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -56,21 +56,25 @@ EXPORT_SYMBOL_GPL(am335x_tsc_se_update);
void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val)
{
- spin_lock(&tsadc->reg_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache |= val;
am335x_tsc_se_update(tsadc);
- spin_unlock(&tsadc->reg_lock);
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
{
- spin_lock(&tsadc->reg_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache &= ~val;
am335x_tsc_se_update(tsadc);
- spin_unlock(&tsadc->reg_lock);
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
@@ -95,7 +99,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
const __be32 *cur;
u32 val;
int err, ctrl;
- int clk_value, clock_rate;
+ int clock_rate;
int tsc_wires = 0, adc_channels = 0, total_channels;
int readouts = 0;
@@ -196,11 +200,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
}
clock_rate = clk_get_rate(clk);
clk_put(clk);
- clk_value = clock_rate / ADC_CLK;
+ tscadc->clk_div = clock_rate / ADC_CLK;
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
- clk_value = clk_value - 1;
- tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+ tscadc->clk_div--;
+ tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div);
/* Set the control register bits */
ctrl = CNTRLREG_STEPCONFIGWRT |
@@ -303,6 +307,8 @@ static int tscadc_resume(struct device *dev)
tscadc_writel(tscadc_dev, REG_CTRL,
(restore | CNTRLREG_TSCSSENB));
+ tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div);
+
return 0;
}
@@ -326,7 +332,7 @@ static struct platform_driver ti_tscadc_driver = {
.name = "ti_am3359-tscadc",
.owner = THIS_MODULE,
.pm = TSCADC_PM_OPS,
- .of_match_table = of_match_ptr(ti_tscadc_dt_ids),
+ .of_match_table = ti_tscadc_dt_ids,
},
.probe = ti_tscadc_probe,
.remove = ti_tscadc_remove,
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index a6755ec7bd6..dbb34f94e5e 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -678,7 +678,7 @@ static int timb_probe(struct pci_dev *dev,
priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
dev_err(&dev->dev, "Failed to request ctl mem\n");
- goto err_request;
+ goto err_start;
}
priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
@@ -828,13 +828,10 @@ err_config:
iounmap(priv->ctl_membase);
err_ioremap:
release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
-err_request:
- pci_set_drvdata(dev, NULL);
err_start:
pci_disable_device(dev);
err_enable:
kfree(priv);
- pci_set_drvdata(dev, NULL);
return -ENODEV;
}
@@ -851,7 +848,6 @@ static void timb_remove(struct pci_dev *dev)
pci_disable_msix(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(priv);
}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 5ad4b772b09..a081b925d10 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index b8f48647661..b7be0b29557 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -245,7 +245,7 @@ static struct i2c_driver tps65217_driver = {
.driver = {
.name = "tps65217",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(tps65217_of_match),
+ .of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index f54fe4d4f77..ee61fd7c198 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/of.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6586x.h>
@@ -124,6 +125,7 @@ struct tps6586x {
struct i2c_client *client;
struct regmap *regmap;
+ int irq;
struct irq_chip irq_chip;
struct mutex irq_lock;
int irq_base;
@@ -261,12 +263,23 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&tps6586x->irq_lock);
}
+#ifdef CONFIG_PM_SLEEP
+static int tps6586x_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+ struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
+ return irq_set_irq_wake(tps6586x->irq, on);
+}
+#else
+#define tps6586x_irq_set_wake NULL
+#endif
+
static struct irq_chip tps6586x_irq_chip = {
.name = "tps6586x",
.irq_bus_lock = tps6586x_irq_lock,
.irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
.irq_disable = tps6586x_irq_disable,
.irq_enable = tps6586x_irq_enable,
+ .irq_set_wake = tps6586x_irq_set_wake,
};
static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
@@ -331,6 +344,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
int new_irq_base;
int irq_num = ARRAY_SIZE(tps6586x_irqs);
+ tps6586x->irq = irq;
+
mutex_init(&tps6586x->irq_lock);
for (i = 0; i < 5; i++) {
tps6586x->mask_reg[i] = 0xff;
@@ -360,10 +375,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
"tps6586x", tps6586x);
- if (!ret) {
+ if (!ret)
device_init_wakeup(tps6586x->dev, 1);
- enable_irq_wake(irq);
- }
return ret;
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index d7927720483..c0f608e3ca9 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -25,6 +25,7 @@
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of.h>
#include <linux/of_device.h>
static struct resource rtc_resources[] = {
@@ -410,14 +411,10 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
if (!ret)
board_info->vmbch_threshold = prop;
- else if (*chip_id == TPS65911)
- dev_warn(&client->dev, "VMBCH-Threshold not specified");
ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
if (!ret)
board_info->vmbch2_threshold = prop;
- else if (*chip_id == TPS65911)
- dev_warn(&client->dev, "VMBCH2-Threshold not specified");
prop = of_property_read_bool(np, "ti,en-ck32k-xtal");
board_info->en_ck32k_xtal = prop;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index daf66942071..0779d5ab9ab 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -565,13 +565,13 @@ static int twl6040_probe(struct i2c_client *client,
twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
- goto regulator_get_err;
+ return ret;
}
ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
- goto regulator_get_err;
+ return ret;
}
twl6040->dev = &client->dev;
@@ -619,7 +619,7 @@ static int twl6040_probe(struct i2c_client *client,
"twl6040_irq_th", twl6040);
if (ret) {
dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
- goto thirq_err;
+ goto readyirq_err;
}
/* dual-access registers controlled by I2C only */
@@ -659,21 +659,14 @@ static int twl6040_probe(struct i2c_client *client,
ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
NULL, 0, NULL);
if (ret)
- goto mfd_err;
+ goto readyirq_err;
return 0;
-mfd_err:
- devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
-thirq_err:
- devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
readyirq_err:
regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
gpio_err:
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
-regulator_get_err:
- i2c_set_clientdata(client, NULL);
-
return ret;
}
@@ -684,12 +677,9 @@ static int twl6040_remove(struct i2c_client *client)
if (twl6040->power_count)
twl6040_power(twl6040, 0);
- devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
- devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
mfd_remove_devices(&client->dev);
- i2c_set_clientdata(client, NULL);
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index d5966e6b5a7..0313f839e8f 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -553,6 +553,7 @@ static int ucb1x00_probe(struct mcp *mcp)
if (ucb->irq_base < 0) {
dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
ucb->irq_base);
+ ret = ucb->irq_base;
goto err_irq_alloc;
}
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 802dd3cb18c..1e9a4b2102f 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -903,7 +903,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
- { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 3113e39b318..bf8b3b5ad1f 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -243,6 +243,12 @@ int wm5110_patch(struct arizona *arizona)
EXPORT_SYMBOL_GPL(wm5110_patch);
static const struct regmap_irq wm5110_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+ .mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+ },
+ [ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+ .mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+ },
[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -505,6 +511,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
{ 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
+ { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
@@ -592,7 +599,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */
{ 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
- { 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
+ { 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
{ 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */
{ 0x00000481, 0x0040 }, /* R1153 - Class W ANC Threshold 2 */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
@@ -1204,7 +1211,6 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
- { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
@@ -1440,6 +1446,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_MICD_CLAMP_CONTROL:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
@@ -2291,21 +2298,37 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_DSP2_CONTROL_1:
case ARIZONA_DSP2_CLOCKING_1:
case ARIZONA_DSP2_STATUS_1:
case ARIZONA_DSP2_STATUS_2:
case ARIZONA_DSP2_STATUS_3:
+ case ARIZONA_DSP2_SCRATCH_0:
+ case ARIZONA_DSP2_SCRATCH_1:
+ case ARIZONA_DSP2_SCRATCH_2:
+ case ARIZONA_DSP2_SCRATCH_3:
case ARIZONA_DSP3_CONTROL_1:
case ARIZONA_DSP3_CLOCKING_1:
case ARIZONA_DSP3_STATUS_1:
case ARIZONA_DSP3_STATUS_2:
case ARIZONA_DSP3_STATUS_3:
+ case ARIZONA_DSP3_SCRATCH_0:
+ case ARIZONA_DSP3_SCRATCH_1:
+ case ARIZONA_DSP3_SCRATCH_2:
+ case ARIZONA_DSP3_SCRATCH_3:
case ARIZONA_DSP4_CONTROL_1:
case ARIZONA_DSP4_CLOCKING_1:
case ARIZONA_DSP4_STATUS_1:
case ARIZONA_DSP4_STATUS_2:
case ARIZONA_DSP4_STATUS_3:
+ case ARIZONA_DSP4_SCRATCH_0:
+ case ARIZONA_DSP4_SCRATCH_1:
+ case ARIZONA_DSP4_SCRATCH_2:
+ case ARIZONA_DSP4_SCRATCH_3:
return true;
default:
return false;
@@ -2347,25 +2370,41 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_INTERRUPT_RAW_STATUS_7:
case ARIZONA_INTERRUPT_RAW_STATUS_8:
case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
case ARIZONA_AOD_IRQ1:
case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_FX_CTRL2:
case ARIZONA_ASRC_STATUS:
case ARIZONA_DSP_STATUS:
- case ARIZONA_DSP1_CONTROL_1:
- case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_DSP2_STATUS_1:
case ARIZONA_DSP2_STATUS_2:
case ARIZONA_DSP2_STATUS_3:
+ case ARIZONA_DSP2_SCRATCH_0:
+ case ARIZONA_DSP2_SCRATCH_1:
+ case ARIZONA_DSP2_SCRATCH_2:
+ case ARIZONA_DSP2_SCRATCH_3:
case ARIZONA_DSP3_STATUS_1:
case ARIZONA_DSP3_STATUS_2:
case ARIZONA_DSP3_STATUS_3:
+ case ARIZONA_DSP3_SCRATCH_0:
+ case ARIZONA_DSP3_SCRATCH_1:
+ case ARIZONA_DSP3_SCRATCH_2:
+ case ARIZONA_DSP3_SCRATCH_3:
case ARIZONA_DSP4_STATUS_1:
case ARIZONA_DSP4_STATUS_2:
case ARIZONA_DSP4_STATUS_3:
+ case ARIZONA_DSP4_SCRATCH_0:
+ case ARIZONA_DSP4_SCRATCH_1:
+ case ARIZONA_DSP4_SCRATCH_2:
+ case ARIZONA_DSP4_SCRATCH_3:
return true;
default:
return false;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e1c283e6d4e..03082751166 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -33,84 +33,6 @@
#include "wm8994.h"
-/**
- * wm8994_reg_read: Read a single WM8994 register.
- *
- * @wm8994: Device to read from.
- * @reg: Register to read.
- */
-int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(wm8994->regmap, reg, &val);
-
- if (ret < 0)
- return ret;
- else
- return val;
-}
-EXPORT_SYMBOL_GPL(wm8994_reg_read);
-
-/**
- * wm8994_bulk_read: Read multiple WM8994 registers
- *
- * @wm8994: Device to read from
- * @reg: First register
- * @count: Number of registers
- * @buf: Buffer to fill. The data will be returned big endian.
- */
-int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
- int count, u16 *buf)
-{
- return regmap_bulk_read(wm8994->regmap, reg, buf, count);
-}
-
-/**
- * wm8994_reg_write: Write a single WM8994 register.
- *
- * @wm8994: Device to write to.
- * @reg: Register to write to.
- * @val: Value to write.
- */
-int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
- unsigned short val)
-{
- return regmap_write(wm8994->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(wm8994_reg_write);
-
-/**
- * wm8994_bulk_write: Write multiple WM8994 registers
- *
- * @wm8994: Device to write to
- * @reg: First register
- * @count: Number of registers
- * @buf: Buffer to write from. Data must be big-endian formatted.
- */
-int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
- int count, const u16 *buf)
-{
- return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
-}
-EXPORT_SYMBOL_GPL(wm8994_bulk_write);
-
-/**
- * wm8994_set_bits: Set the value of a bitfield in a WM8994 register
- *
- * @wm8994: Device to write to.
- * @reg: Register to write to.
- * @mask: Mask of bits to set.
- * @val: Value to set (unshifted)
- */
-int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
- unsigned short mask, unsigned short val)
-{
- return regmap_update_bits(wm8994->regmap, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(wm8994_set_bits);
-
static struct mfd_cell wm8994_regulator_devs[] = {
{
.name = "wm8994-ldo",
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8dacd4c9ee8..a3e291d0df9 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -381,19 +381,6 @@ config HMC6352
This driver provides support for the Honeywell HMC6352 compass,
providing configuration and heading data via sysfs.
-config EP93XX_PWM
- tristate "EP93xx PWM support"
- depends on ARCH_EP93XX
- help
- This option enables device driver support for the PWM channels
- on the Cirrus EP93xx processors. The EP9307 chip only has one
- PWM channel all the others have two, the second channel is an
- alternate function of the EGPIO14 pin. A sysfs interface is
- provided to control the PWM channels.
-
- To compile this driver as a module, choose M here: the module will
- be called ep93xx_pwm.
-
config DS1682
tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
depends on I2C
@@ -537,4 +524,5 @@ source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
+source "drivers/misc/mic/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c235d5b6831..f45473e68bf 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_APDS9802ALS) += apds9802als.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
-obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
@@ -53,3 +52,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
+obj-y += mic/
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index 1256a4bf1c0..b7ebf8021d9 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -297,7 +297,7 @@ static int __init charlcd_probe(struct platform_device *pdev)
lcd->irq = platform_get_irq(pdev, 0);
/* If no IRQ is supplied, we'll survive without it */
if (lcd->irq >= 0) {
- if (request_irq(lcd->irq, charlcd_interrupt, IRQF_DISABLED,
+ if (request_irq(lcd->irq, charlcd_interrupt, 0,
DRIVERNAME, lcd)) {
ret = -EIO;
goto out_no_irq;
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 494d0500bda..a6dc56e1bc5 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -90,8 +90,10 @@ int pwm_channel_alloc(int index, struct pwm_channel *ch)
unsigned long flags;
int status = 0;
- /* insist on PWM init, with this signal pinned out */
- if (!pwm || !(pwm->mask & 1 << index))
+ if (!pwm)
+ return -EPROBE_DEFER;
+
+ if (!(pwm->mask & 1 << index))
return -ENODEV;
if (index < 0 || index >= PWM_NCHAN || !ch)
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 057580e026c..48ea33d15a7 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
#define BH1780_REG_CONTROL 0x80
#define BH1780_REG_PARTID 0x8A
@@ -244,6 +245,15 @@ static const struct i2c_device_id bh1780_id[] = {
{ },
};
+#ifdef CONFIG_OF
+static const struct of_device_id of_bh1780_match[] = {
+ { .compatible = "rohm,bh1780gli", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_bh1780_match);
+#endif
+
static struct i2c_driver bh1780_driver = {
.probe = bh1780_probe,
.remove = bh1780_remove,
@@ -251,6 +261,7 @@ static struct i2c_driver bh1780_driver = {
.driver = {
.name = "bh1780",
.pm = &bh1780_pm,
+ .of_match_table = of_match_ptr(of_bh1780_match),
},
};
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 849e2fed4da..2704d885a9b 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -374,7 +374,7 @@ int bmp085_detect(struct device *dev)
}
EXPORT_SYMBOL_GPL(bmp085_detect);
-static void __init bmp085_get_of_properties(struct bmp085_data *data)
+static void bmp085_get_of_properties(struct bmp085_data *data)
{
#ifdef CONFIG_OF
struct device_node *np = data->dev->of_node;
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index c6bd7e84de2..7be89832db1 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -10,6 +10,8 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/completion.h>
#include <linux/miscdevice.h>
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 7b56563f8b7..9e2b985293f 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -88,6 +88,8 @@
* interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
*/
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
@@ -631,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dst, src;
- unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
- DMA_COMPL_SKIP_SRC_UNMAP;
+ unsigned long dma_flags = 0;
dst_sg = buf->vb.sglist;
dst_nents = buf->vb.sglen;
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index 2e50f811ff5..fb397e7d1cc 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -176,7 +176,7 @@ static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct cb710_chip *chip = pci_get_drvdata(pdev);
- free_irq(pdev->irq, chip);
+ devm_free_irq(&pdev->dev, pdev->irq, chip);
pci_save_state(pdev);
pci_disable_device(pdev);
if (state.event & PM_EVENT_SLEEP)
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 04f2e1fa9dd..9536852fd4c 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -96,4 +96,17 @@ config EEPROM_DIGSY_MTC_CFG
If unsure, say N.
+config EEPROM_SUNXI_SID
+ tristate "Allwinner sunxi security ID support"
+ depends on ARCH_SUNXI && SYSFS
+ help
+ This is a driver for the 'security ID' available on various Allwinner
+ devices.
+
+ Due to the potential risks involved with changing e-fuses,
+ this driver is read-only.
+
+ This driver can also be built as a module. If so, the module
+ will be called sunxi_sid.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index fc1e81d2926..9507aec95e9 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
+obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 5d4fd69d04c..d87f77f790d 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -22,7 +22,7 @@
#include <linux/jiffies.h>
#include <linux/of.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
/*
* I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
@@ -428,6 +428,9 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
{
struct at24_data *at24;
+ if (unlikely(off >= attr->size))
+ return -EFBIG;
+
at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
return at24_write(at24, buf, off, count);
}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 840b3594a5a..4f3bca1003a 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -462,10 +462,17 @@ static int at25_remove(struct spi_device *spi)
/*-------------------------------------------------------------------------*/
+static const struct of_device_id at25_of_match[] = {
+ { .compatible = "atmel,at25", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, at25_of_match);
+
static struct spi_driver at25_driver = {
.driver = {
.name = "at25",
.owner = THIS_MODULE,
+ .of_match_table = at25_of_match,
},
.probe = at25_probe,
.remove = at25_remove,
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 94cfc121257..3a015abb444 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -202,7 +202,7 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
edev = dev_get_drvdata(dev);
if (unlikely(off >= edev->bin.size))
- return 0;
+ return -EFBIG;
if ((off + count) > edev->bin.size)
count = edev->bin.size - off;
if (unlikely(!count))
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
new file mode 100644
index 00000000000..9c34e570430
--- /dev/null
+++ b/drivers/misc/eeprom/sunxi_sid.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
+ * http://www.linux-sunxi.org
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver exposes the Allwinner security ID, efuses exported in byte-
+ * sized chunks.
+ */
+
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define DRV_NAME "sunxi-sid"
+
+struct sunxi_sid_data {
+ void __iomem *reg_base;
+ unsigned int keysize;
+};
+
+/* We read the entire key, due to a 32 bit read alignment requirement. Since we
+ * want to return the requested byte, this results in somewhat slower code and
+ * uses 4 times more reads as needed but keeps code simpler. Since the SID is
+ * only very rarely probed, this is not really an issue.
+ */
+static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data,
+ const unsigned int offset)
+{
+ u32 sid_key;
+
+ if (offset >= sid_data->keysize)
+ return 0;
+
+ sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4));
+ sid_key >>= (offset % 4) * 8;
+
+ return sid_key; /* Only return the last byte */
+}
+
+static ssize_t sid_read(struct file *fd, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
+{
+ struct platform_device *pdev;
+ struct sunxi_sid_data *sid_data;
+ int i;
+
+ pdev = to_platform_device(kobj_to_dev(kobj));
+ sid_data = platform_get_drvdata(pdev);
+
+ if (pos < 0 || pos >= sid_data->keysize)
+ return 0;
+ if (size > sid_data->keysize - pos)
+ size = sid_data->keysize - pos;
+
+ for (i = 0; i < size; i++)
+ buf[i] = sunxi_sid_read_byte(sid_data, pos + i);
+
+ return i;
+}
+
+static struct bin_attribute sid_bin_attr = {
+ .attr = { .name = "eeprom", .mode = S_IRUGO, },
+ .read = sid_read,
+};
+
+static int sunxi_sid_remove(struct platform_device *pdev)
+{
+ device_remove_bin_file(&pdev->dev, &sid_bin_attr);
+ dev_dbg(&pdev->dev, "driver unloaded\n");
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_sid_of_match[] = {
+ { .compatible = "allwinner,sun4i-sid", .data = (void *)16},
+ { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
+
+static int sunxi_sid_probe(struct platform_device *pdev)
+{
+ struct sunxi_sid_data *sid_data;
+ struct resource *res;
+ const struct of_device_id *of_dev_id;
+ u8 *entropy;
+ unsigned int i;
+
+ sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data),
+ GFP_KERNEL);
+ if (!sid_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sid_data->reg_base))
+ return PTR_ERR(sid_data->reg_base);
+
+ of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev);
+ if (!of_dev_id)
+ return -ENODEV;
+ sid_data->keysize = (int)of_dev_id->data;
+
+ platform_set_drvdata(pdev, sid_data);
+
+ sid_bin_attr.size = sid_data->keysize;
+ if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
+ return -ENODEV;
+
+ entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL);
+ for (i = 0; i < sid_data->keysize; i++)
+ entropy[i] = sunxi_sid_read_byte(sid_data, i);
+ add_device_randomness(entropy, sid_data->keysize);
+ kfree(entropy);
+
+ dev_dbg(&pdev->dev, "loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver sunxi_sid_driver = {
+ .probe = sunxi_sid_probe,
+ .remove = sunxi_sid_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sunxi_sid_of_match,
+ },
+};
+module_platform_driver(sunxi_sid_driver);
+
+MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
+MODULE_DESCRIPTION("Allwinner sunxi security id driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
deleted file mode 100644
index cdb67a9c195..00000000000
--- a/drivers/misc/ep93xx_pwm.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Simple PWM driver for EP93XX
- *
- * (c) Copyright 2009 Matthieu Crapet <mcrapet@gmail.com>
- * (c) Copyright 2009 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * EP9307 has only one channel:
- * - PWMOUT
- *
- * EP9301/02/12/15 have two channels:
- * - PWMOUT
- * - PWMOUT1 (alternate function for EGPIO14)
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/platform.h>
-
-#define EP93XX_PWMx_TERM_COUNT 0x00
-#define EP93XX_PWMx_DUTY_CYCLE 0x04
-#define EP93XX_PWMx_ENABLE 0x08
-#define EP93XX_PWMx_INVERT 0x0C
-
-#define EP93XX_PWM_MAX_COUNT 0xFFFF
-
-struct ep93xx_pwm {
- void __iomem *mmio_base;
- struct clk *clk;
- u32 duty_percent;
-};
-
-/*
- * /sys/devices/platform/ep93xx-pwm.N
- * /min_freq read-only minimum pwm output frequency
- * /max_req read-only maximum pwm output frequency
- * /freq read-write pwm output frequency (0 = disable output)
- * /duty_percent read-write pwm duty cycle percent (1..99)
- * /invert read-write invert pwm output
- */
-
-static ssize_t ep93xx_pwm_get_min_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
- unsigned long rate = clk_get_rate(pwm->clk);
-
- return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1));
-}
-
-static ssize_t ep93xx_pwm_get_max_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
- unsigned long rate = clk_get_rate(pwm->clk);
-
- return sprintf(buf, "%ld\n", rate / 2);
-}
-
-static ssize_t ep93xx_pwm_get_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-
- if (readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1) {
- unsigned long rate = clk_get_rate(pwm->clk);
- u16 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-
- return sprintf(buf, "%ld\n", rate / (term + 1));
- } else {
- return sprintf(buf, "disabled\n");
- }
-}
-
-static ssize_t ep93xx_pwm_set_freq(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return -EINVAL;
-
- if (val == 0) {
- writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE);
- } else if (val <= (clk_get_rate(pwm->clk) / 2)) {
- u32 term, duty;
-
- val = (clk_get_rate(pwm->clk) / val) - 1;
- if (val > EP93XX_PWM_MAX_COUNT)
- val = EP93XX_PWM_MAX_COUNT;
- if (val < 1)
- val = 1;
-
- term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
- duty = ((val + 1) * pwm->duty_percent / 100) - 1;
-
- /* If pwm is running, order is important */
- if (val > term) {
- writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
- writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
- } else {
- writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
- writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
- }
-
- if (!readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1)
- writel(0x1, pwm->mmio_base + EP93XX_PWMx_ENABLE);
- } else {
- return -EINVAL;
- }
-
- return count;
-}
-
-static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-
- return sprintf(buf, "%d\n", pwm->duty_percent);
-}
-
-static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return -EINVAL;
-
- if (val > 0 && val < 100) {
- u32 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
- u32 duty = ((term + 1) * val / 100) - 1;
-
- writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
- pwm->duty_percent = val;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t ep93xx_pwm_get_invert(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
- int inverted = readl(pwm->mmio_base + EP93XX_PWMx_INVERT) & 0x1;
-
- return sprintf(buf, "%d\n", inverted);
-}
-
-static ssize_t ep93xx_pwm_set_invert(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return -EINVAL;
-
- if (val == 0)
- writel(0x0, pwm->mmio_base + EP93XX_PWMx_INVERT);
- else if (val == 1)
- writel(0x1, pwm->mmio_base + EP93XX_PWMx_INVERT);
- else
- return -EINVAL;
-
- return count;
-}
-
-static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
-static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
-static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
- ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
-static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
- ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
-static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
- ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
-
-static struct attribute *ep93xx_pwm_attrs[] = {
- &dev_attr_min_freq.attr,
- &dev_attr_max_freq.attr,
- &dev_attr_freq.attr,
- &dev_attr_duty_percent.attr,
- &dev_attr_invert.attr,
- NULL
-};
-
-static const struct attribute_group ep93xx_pwm_sysfs_files = {
- .attrs = ep93xx_pwm_attrs,
-};
-
-static int ep93xx_pwm_probe(struct platform_device *pdev)
-{
- struct ep93xx_pwm *pwm;
- struct resource *res;
- int ret;
-
- pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (!pwm)
- return -ENOMEM;
-
- pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk");
- if (IS_ERR(pwm->clk))
- return PTR_ERR(pwm->clk);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->mmio_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pwm->mmio_base))
- return PTR_ERR(pwm->mmio_base);
-
- ret = ep93xx_pwm_acquire_gpio(pdev);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
- if (ret) {
- ep93xx_pwm_release_gpio(pdev);
- return ret;
- }
-
- pwm->duty_percent = 50;
-
- /* disable pwm at startup. Avoids zero value. */
- writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE);
- writel(EP93XX_PWM_MAX_COUNT, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
- writel(EP93XX_PWM_MAX_COUNT/2, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
-
- clk_enable(pwm->clk);
-
- platform_set_drvdata(pdev, pwm);
- return 0;
-}
-
-static int ep93xx_pwm_remove(struct platform_device *pdev)
-{
- struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-
- writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE);
- clk_disable(pwm->clk);
- sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
- ep93xx_pwm_release_gpio(pdev);
-
- return 0;
-}
-
-static struct platform_driver ep93xx_pwm_driver = {
- .driver = {
- .name = "ep93xx-pwm",
- .owner = THIS_MODULE,
- },
- .probe = ep93xx_pwm_probe,
- .remove = ep93xx_pwm_remove,
-};
-module_platform_driver(ep93xx_pwm_driver);
-
-MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
- "H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_DESCRIPTION("EP93xx PWM driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ep93xx-pwm");
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 0346d87c5fe..6b3bf9ab051 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -153,7 +153,6 @@ error_ioremap:
error_heartbeat:
ibmasm_event_buffer_exit(sp);
error_eventbuffer:
- pci_set_drvdata(pdev, NULL);
kfree(sp);
error_kmalloc:
pci_release_regions(pdev);
@@ -165,7 +164,7 @@ error_resources:
static void ibmasm_remove_one(struct pci_dev *pdev)
{
- struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev);
+ struct service_processor *sp = pci_get_drvdata(pdev);
dbg("Unregistering UART\n");
ibmasm_unregister_uart(sp);
@@ -182,7 +181,6 @@ static void ibmasm_remove_one(struct pci_dev *pdev)
ibmasm_free_remote_input_dev(sp);
iounmap(sp->base_address);
ibmasm_event_buffer_exit(sp);
- pci_set_drvdata(pdev, NULL);
kfree(sp);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 2fc0586ce3b..a2edb2ee092 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -44,13 +44,25 @@
#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
+#include <linux/mman.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
#endif
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
#define DEFAULT_COUNT 10
-#define REC_NUM_DEFAULT 10
#define EXEC_SIZE 64
enum cname {
@@ -86,6 +98,9 @@ enum ctype {
CT_EXEC_STACK,
CT_EXEC_KMALLOC,
CT_EXEC_VMALLOC,
+ CT_EXEC_USERSPACE,
+ CT_ACCESS_USERSPACE,
+ CT_WRITE_RO,
};
static char* cp_name[] = {
@@ -119,6 +134,9 @@ static char* cp_type[] = {
"EXEC_STACK",
"EXEC_KMALLOC",
"EXEC_VMALLOC",
+ "EXEC_USERSPACE",
+ "ACCESS_USERSPACE",
+ "WRITE_RO",
};
static struct jprobe lkdtm;
@@ -139,9 +157,10 @@ static DEFINE_SPINLOCK(lock_me_up);
static u8 data_area[EXEC_SIZE];
+static const unsigned long rodata = 0xAA55AA55;
+
module_param(recur_count, int, 0644);
-MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
- "default is 10");
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
module_param(cpoint_type, charp, 0444);
@@ -280,16 +299,16 @@ static int lkdtm_parse_commandline(void)
return -EINVAL;
}
-static int recursive_loop(int a)
+static int recursive_loop(int remaining)
{
- char buf[1024];
+ char buf[REC_STACK_SIZE];
- memset(buf,0xFF,1024);
- recur_count--;
- if (!recur_count)
+ /* Make sure compiler does not optimize this away. */
+ memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+ if (!remaining)
return 0;
else
- return recursive_loop(a);
+ return recursive_loop(remaining - 1);
}
static void do_nothing(void)
@@ -297,6 +316,14 @@ static void do_nothing(void)
return;
}
+static noinline void corrupt_stack(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8];
+
+ memset((void *)data, 0, 64);
+}
+
static void execute_location(void *dst)
{
void (*func)(void) = dst;
@@ -305,6 +332,15 @@ static void execute_location(void *dst)
func();
}
+static void execute_user_location(void *dst)
+{
+ void (*func)(void) = dst;
+
+ if (copy_to_user(dst, do_nothing, EXEC_SIZE))
+ return;
+ func();
+}
+
static void lkdtm_do_action(enum ctype which)
{
switch (which) {
@@ -325,15 +361,11 @@ static void lkdtm_do_action(enum ctype which)
;
break;
case CT_OVERFLOW:
- (void) recursive_loop(0);
+ (void) recursive_loop(recur_count);
break;
- case CT_CORRUPT_STACK: {
- /* Make sure the compiler creates and uses an 8 char array. */
- volatile char data[8];
-
- memset((void *)data, 0, 64);
+ case CT_CORRUPT_STACK:
+ corrupt_stack();
break;
- }
case CT_UNALIGNED_LOAD_STORE_WRITE: {
static u8 data[5] __attribute__((aligned(4))) = {1, 2,
3, 4, 5};
@@ -401,6 +433,49 @@ static void lkdtm_do_action(enum ctype which)
vfree(vmalloc_area);
break;
}
+ case CT_EXEC_USERSPACE: {
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+ execute_user_location((void *)user_addr);
+ vm_munmap(user_addr, PAGE_SIZE);
+ break;
+ }
+ case CT_ACCESS_USERSPACE: {
+ unsigned long user_addr, tmp;
+ unsigned long *ptr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ ptr = (unsigned long *)user_addr;
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+ *ptr = tmp;
+
+ vm_munmap(user_addr, PAGE_SIZE);
+
+ break;
+ }
+ case CT_WRITE_RO: {
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)&rodata;
+ *ptr ^= 0xabcd1234;
+
+ break;
+ }
case CT_NONE:
default:
break;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index f6ff711aa5b..d22c6864508 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -58,6 +58,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
dev->iamthif_stall_timer = 0;
+ dev->iamthif_open_count = 0;
}
/**
@@ -78,8 +79,10 @@ int mei_amthif_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
- dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
- return -ENOENT;
+ ret = i;
+ dev_info(&dev->pdev->dev,
+ "amthif: failed to find the client %d\n", ret);
+ return ret;
}
cl->me_client_id = dev->me_clients[i].client_id;
@@ -106,8 +109,9 @@ int mei_amthif_host_init(struct mei_device *dev)
ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
if (ret < 0) {
- dev_err(&dev->pdev->dev, "amthif: failed link client\n");
- return -ENOENT;
+ dev_err(&dev->pdev->dev,
+ "amthif: failed link client %d\n", ret);
+ return ret;
}
cl->state = MEI_FILE_CONNECTING;
@@ -313,13 +317,13 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
- if (mei_write_message(dev, &mei_hdr,
- (unsigned char *)dev->iamthif_msg_buf))
- return -ENODEV;
+ ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
+ if (ret)
+ return ret;
if (mei_hdr.msg_complete) {
if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
- return -ENODEV;
+ return -EIO;
dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
@@ -459,6 +463,16 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_msg_hdr mei_hdr;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
u32 msg_slots = mei_data2slots(len);
+ int rets;
+
+ rets = mei_cl_flow_ctrl_creds(cl);
+ if (rets < 0)
+ return rets;
+
+ if (rets == 0) {
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
+ return 0;
+ }
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
@@ -481,16 +495,17 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, &mei_hdr,
- dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- cl->status = -ENODEV;
- list_del(&cb->list);
- return -ENODEV;
+ rets = mei_write_message(dev, &mei_hdr,
+ dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
+ if (rets) {
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ cl->status = rets;
+ list_del(&cb->list);
+ return rets;
}
if (mei_cl_flow_ctrl_reduce(cl))
- return -ENODEV;
+ return -EIO;
dev->iamthif_msg_buf_index += mei_hdr.length;
cl->status = 0;
@@ -720,8 +735,8 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file)
*/
int mei_amthif_release(struct mei_device *dev, struct file *file)
{
- if (dev->open_handle_count > 0)
- dev->open_handle_count--;
+ if (dev->iamthif_open_count > 0)
+ dev->iamthif_open_count--;
if (dev->iamthif_file_object == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index cd2033cd712..4bc7d620d69 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -245,7 +245,7 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
/* Check if we have an ME client device */
id = mei_me_cl_by_id(dev, cl->me_client_id);
if (id < 0)
- return -ENODEV;
+ return id;
if (length > dev->me_clients[id].props.max_msg_length)
return -EINVAL;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index e0684b4d9a0..87c96e4669e 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -187,10 +187,14 @@ int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
*/
int mei_cl_flush_queues(struct mei_cl *cl)
{
+ struct mei_device *dev;
+
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
- dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+ dev = cl->dev;
+
+ cl_dbg(dev, cl, "remove list entry belonging to cl\n");
mei_io_list_flush(&cl->dev->read_list, cl);
mei_io_list_flush(&cl->dev->write_list, cl);
mei_io_list_flush(&cl->dev->write_waiting_list, cl);
@@ -271,6 +275,7 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
int mei_cl_link(struct mei_cl *cl, int id)
{
struct mei_device *dev;
+ long open_handle_count;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -284,7 +289,14 @@ int mei_cl_link(struct mei_cl *cl, int id)
if (id >= MEI_CLIENTS_MAX) {
dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
- return -ENOENT;
+ return -EMFILE;
+ }
+
+ open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
+ if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
+ dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
+ MEI_MAX_OPEN_HANDLE_COUNT);
+ return -EMFILE;
}
dev->open_handle_count++;
@@ -296,7 +308,7 @@ int mei_cl_link(struct mei_cl *cl, int id)
cl->state = MEI_FILE_INITIALIZING;
- dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
+ cl_dbg(dev, cl, "link cl\n");
return 0;
}
@@ -308,7 +320,6 @@ int mei_cl_link(struct mei_cl *cl, int id)
int mei_cl_unlink(struct mei_cl *cl)
{
struct mei_device *dev;
- struct mei_cl *pos, *next;
/* don't shout on error exit path */
if (!cl)
@@ -320,14 +331,21 @@ int mei_cl_unlink(struct mei_cl *cl)
dev = cl->dev;
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (cl->host_client_id == pos->host_client_id) {
- dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
- pos->host_client_id, pos->me_client_id);
- list_del_init(&pos->link);
- break;
- }
- }
+ cl_dbg(dev, cl, "unlink client");
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+
+ /* never clear the 0 bit */
+ if (cl->host_client_id)
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+
+ list_del_init(&cl->link);
+
+ cl->state = MEI_FILE_INITIALIZING;
+
+ list_del_init(&cl->link);
+
return 0;
}
@@ -341,17 +359,6 @@ void mei_host_client_init(struct work_struct *work)
mutex_lock(&dev->device_lock);
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
-
- /*
- * Reserving the first three client IDs
- * 0: Reserved for MEI Bus Message communications
- * 1: Reserved for Watchdog
- * 2: Reserved for AMTHI
- */
- bitmap_set(dev->host_clients_map, 0, 3);
-
for (i = 0; i < dev->me_clients_num; i++) {
client_props = &dev->me_clients[i].props;
@@ -390,6 +397,8 @@ int mei_cl_disconnect(struct mei_cl *cl)
dev = cl->dev;
+ cl_dbg(dev, cl, "disconnecting");
+
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
@@ -402,13 +411,13 @@ int mei_cl_disconnect(struct mei_cl *cl)
dev->hbuf_is_ready = false;
if (mei_hbm_cl_disconnect_req(dev, cl)) {
rets = -ENODEV;
- dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+ cl_err(dev, cl, "failed to disconnect.\n");
goto free;
}
mdelay(10); /* Wait for hardware disconnection ready */
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
- dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
+ cl_dbg(dev, cl, "add disconnect cb to control write list\n");
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
@@ -421,18 +430,17 @@ int mei_cl_disconnect(struct mei_cl *cl)
mutex_lock(&dev->device_lock);
if (MEI_FILE_DISCONNECTED == cl->state) {
rets = 0;
- dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
+ cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
} else {
rets = -ENODEV;
if (MEI_FILE_DISCONNECTED != cl->state)
- dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
+ cl_err(dev, cl, "wrong status client disconnect.\n");
if (err)
- dev_dbg(&dev->pdev->dev,
- "wait failed disconnect err=%08x\n",
+ cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
err);
- dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
+ cl_err(dev, cl, "failed to disconnect from FW client.\n");
}
mei_io_list_flush(&dev->ctrl_rd_list, cl);
@@ -639,13 +647,12 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
return -ENODEV;
if (cl->read_cb) {
- dev_dbg(&dev->pdev->dev, "read is pending.\n");
+ cl_dbg(dev, cl, "read is pending.\n");
return -EBUSY;
}
i = mei_me_cl_by_id(dev, cl->me_client_id);
if (i < 0) {
- dev_err(&dev->pdev->dev, "no such me client %d\n",
- cl->me_client_id);
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
return -ENODEV;
}
@@ -664,6 +671,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_flow_control_req(dev, cl)) {
+ cl_err(dev, cl, "flow control send failed\n");
rets = -ENODEV;
goto err;
}
@@ -691,10 +699,32 @@ err:
int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
s32 *slots, struct mei_cl_cb *cmpl_list)
{
- struct mei_device *dev = cl->dev;
+ struct mei_device *dev;
+ struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
- size_t len = cb->request_buffer.size - cb->buf_idx;
- u32 msg_slots = mei_data2slots(len);
+ size_t len;
+ u32 msg_slots;
+ int rets;
+
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ buf = &cb->request_buffer;
+
+ rets = mei_cl_flow_ctrl_creds(cl);
+ if (rets < 0)
+ return rets;
+
+ if (rets == 0) {
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
+ return 0;
+ }
+
+ len = buf->size - cb->buf_idx;
+ msg_slots = mei_data2slots(len);
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
@@ -714,16 +744,15 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
- dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
+ cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
- dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, &mei_hdr,
- cb->request_buffer.data + cb->buf_idx)) {
- cl->status = -ENODEV;
+ rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
+ if (rets) {
+ cl->status = rets;
list_move_tail(&cb->list, &cmpl_list->list);
- return -ENODEV;
+ return rets;
}
cl->status = 0;
@@ -732,7 +761,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
if (mei_hdr.msg_complete) {
if (mei_cl_flow_ctrl_reduce(cl))
- return -ENODEV;
+ return -EIO;
list_move_tail(&cb->list, &dev->write_waiting_list.list);
}
@@ -767,7 +796,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
buf = &cb->request_buffer;
- dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size);
+ cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
cb->fop_type = MEI_FOP_WRITE;
@@ -800,14 +829,10 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
- dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
- MEI_HDR_PRM(&mei_hdr));
-
- if (mei_write_message(dev, &mei_hdr, buf->data)) {
- rets = -EIO;
+ rets = mei_write_message(dev, &mei_hdr, buf->data);
+ if (rets)
goto err;
- }
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
@@ -898,11 +923,11 @@ void mei_cl_all_wakeup(struct mei_device *dev)
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up reading client!\n");
+ cl_dbg(dev, cl, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
}
if (waitqueue_active(&cl->tx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up writing client!\n");
+ cl_dbg(dev, cl, "Waking up writing client!\n");
wake_up_interruptible(&cl->tx_wait);
}
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 892cc4207fa..c8396e582f1 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -115,4 +115,13 @@ void mei_cl_all_disconnect(struct mei_device *dev);
void mei_cl_all_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
+#define MEI_CL_FMT "cl:host=%02d me=%02d "
+#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id
+
+#define cl_dbg(dev, cl, format, arg...) \
+ dev_dbg(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
+#define cl_err(dev, cl, format, arg...) \
+ dev_err(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 0a0448326e9..9b3a0fb7f26 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -49,7 +49,7 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
kfree(dev->me_clients);
dev->me_clients = NULL;
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
+ dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n",
dev->me_clients_num * sizeof(struct mei_me_client));
/* allocate storage for ME clients representation */
clients = kcalloc(dev->me_clients_num,
@@ -174,7 +174,7 @@ int mei_hbm_start_req(struct mei_device *dev)
dev_err(&dev->pdev->dev, "version message write failed\n");
dev->dev_state = MEI_DEV_RESETTING;
mei_reset(dev, 1);
- return -ENODEV;
+ return -EIO;
}
dev->hbm_state = MEI_HBM_START;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
@@ -677,7 +677,10 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
case HOST_ENUM_RES_CMD:
enum_res = (struct hbm_host_enum_response *) mei_msg;
- memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
+ BUILD_BUG_ON(sizeof(dev->me_clients_map)
+ < sizeof(enum_res->valid_addresses));
+ memcpy(dev->me_clients_map, enum_res->valid_addresses,
+ sizeof(enum_res->valid_addresses));
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
dev->init_clients_timer = 0;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 6a203b6e834..6c0fde55270 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -110,6 +110,7 @@
#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
+#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */
#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
/*
* MEI HW Section
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6197018e2f1..f7f3abbe12b 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -68,6 +68,14 @@ void mei_device_init(struct mei_device *dev)
mei_io_list_init(&dev->amthif_cmd_list);
mei_io_list_init(&dev->amthif_rd_complete_list);
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first client ID
+ * 0: Reserved for MEI Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
}
EXPORT_SYMBOL_GPL(mei_device_init);
@@ -139,6 +147,10 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->dev_state != MEI_DEV_POWER_DOWN &&
dev->dev_state != MEI_DEV_POWER_UP);
+ if (unexpected)
+ dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
+ mei_dev_state_str(dev->dev_state));
+
ret = mei_hw_reset(dev, interrupts_enabled);
if (ret) {
dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n");
@@ -165,12 +177,7 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
mei_cl_unlink(&dev->wd_cl);
- if (dev->open_handle_count > 0)
- dev->open_handle_count--;
mei_cl_unlink(&dev->iamthif_cl);
- if (dev->open_handle_count > 0)
- dev->open_handle_count--;
-
mei_amthif_reset_params(dev);
memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
}
@@ -182,10 +189,6 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
- if (unexpected)
- dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
- mei_dev_state_str(dev->dev_state));
-
if (!interrupts_enabled) {
dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n");
return;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 4b59cb742de..7a95c07e59a 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -113,13 +113,13 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
if (cb->response_buffer.size == 0 ||
cb->response_buffer.data == NULL) {
- dev_err(&dev->pdev->dev, "response buffer is not allocated.\n");
+ cl_err(dev, cl, "response buffer is not allocated.\n");
list_del(&cb->list);
return -ENOMEM;
}
if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
- dev_dbg(&dev->pdev->dev, "message overflow. size %d len %d idx %ld\n",
+ cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
cb->response_buffer.size,
mei_hdr->length, cb->buf_idx);
buffer = krealloc(cb->response_buffer.data,
@@ -127,7 +127,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
GFP_KERNEL);
if (!buffer) {
- dev_err(&dev->pdev->dev, "allocation failed.\n");
+ cl_err(dev, cl, "allocation failed.\n");
list_del(&cb->list);
return -ENOMEM;
}
@@ -143,9 +143,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
if (mei_hdr->msg_complete) {
cl->status = 0;
list_del(&cb->list);
- dev_dbg(&dev->pdev->dev, "completed read H cl = %d, ME cl = %d, length = %lu\n",
- cl->host_client_id,
- cl->me_client_id,
+ cl_dbg(dev, cl, "completed read length = %lu\n",
cb->buf_idx);
list_add_tail(&cb->list, &complete_list->list);
}
@@ -218,9 +216,11 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
s32 *slots, struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
-
u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
+ int ret;
+
+
if (*slots < msg_slots) {
/* return the cancel routine */
list_del(&cb->list);
@@ -229,12 +229,14 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
*slots -= msg_slots;
- if (mei_hbm_cl_flow_control_req(dev, cl)) {
- cl->status = -ENODEV;
+ ret = mei_hbm_cl_flow_control_req(dev, cl);
+ if (ret) {
+ cl->status = ret;
cb->buf_idx = 0;
list_move_tail(&cb->list, &cmpl_list->list);
- return -ENODEV;
+ return ret;
}
+
list_move_tail(&cb->list, &dev->read_list.list);
return 0;
@@ -256,6 +258,7 @@ static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb,
s32 *slots, struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
+ int ret;
u32 msg_slots =
mei_data2slots(sizeof(struct hbm_client_connect_request));
@@ -270,11 +273,12 @@ static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb,
cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- cl->status = -ENODEV;
+ ret = mei_hbm_cl_connect_req(dev, cl);
+ if (ret) {
+ cl->status = ret;
cb->buf_idx = 0;
list_del(&cb->list);
- return -ENODEV;
+ return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
@@ -345,14 +349,14 @@ int mei_irq_read_handler(struct mei_device *dev,
/* decide where to read the message too */
if (!mei_hdr->host_addr) {
- dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
+ dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n");
mei_hbm_dispatch(dev, mei_hdr);
- dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
+ dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n");
} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
(MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
(dev->iamthif_state == MEI_IAMTHIF_READING)) {
- dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
+ dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n");
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
@@ -423,12 +427,12 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
if (MEI_WRITING == cl->writing_state &&
cb->fop_type == MEI_FOP_WRITE &&
cl != &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
+ cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
cl->writing_state = MEI_WRITE_COMPLETE;
list_add_tail(&cb->list, &cmpl_list->list);
}
if (cl == &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
+ cl_dbg(dev, cl, "check iamthif flow control.\n");
if (dev->iamthif_flow_control_pending) {
ret = mei_amthif_irq_read(dev, &slots);
if (ret)
@@ -509,13 +513,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
cl = cb->cl;
if (cl == NULL)
continue;
- if (mei_cl_flow_ctrl_creds(cl) <= 0) {
- dev_dbg(&dev->pdev->dev,
- "No flow control credentials for client %d, not sending.\n",
- cl->host_client_id);
- continue;
- }
-
if (cl == &dev->iamthif_cl)
ret = mei_amthif_irq_write_complete(cl, cb,
&slots, cmpl_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index cabeddd66c1..9661a812f55 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -60,48 +60,45 @@ static int mei_open(struct inode *inode, struct file *file)
int err;
- err = -ENODEV;
if (!misc->parent)
- goto out;
+ return -ENODEV;
pdev = container_of(misc->parent, struct pci_dev, dev);
dev = pci_get_drvdata(pdev);
if (!dev)
- goto out;
+ return -ENODEV;
mutex_lock(&dev->device_lock);
- err = -ENOMEM;
- cl = mei_cl_allocate(dev);
- if (!cl)
- goto out_unlock;
+
+ cl = NULL;
err = -ENODEV;
if (dev->dev_state != MEI_DEV_ENABLED) {
dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
- goto out_unlock;
- }
- err = -EMFILE;
- if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
- dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
- MEI_MAX_OPEN_HANDLE_COUNT);
- goto out_unlock;
+ goto err_unlock;
}
+ err = -ENOMEM;
+ cl = mei_cl_allocate(dev);
+ if (!cl)
+ goto err_unlock;
+
+ /* open_handle_count check is handled in the mei_cl_link */
err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
if (err)
- goto out_unlock;
+ goto err_unlock;
file->private_data = cl;
+
mutex_unlock(&dev->device_lock);
return nonseekable_open(inode, file);
-out_unlock:
+err_unlock:
mutex_unlock(&dev->device_lock);
kfree(cl);
-out:
return err;
}
@@ -144,10 +141,6 @@ static int mei_release(struct inode *inode, struct file *file)
cl->host_client_id,
cl->me_client_id);
- if (dev->open_handle_count > 0) {
- clear_bit(cl->host_client_id, dev->host_clients_map);
- dev->open_handle_count--;
- }
mei_cl_unlink(cl);
@@ -165,10 +158,7 @@ static int mei_release(struct inode *inode, struct file *file)
file->private_data = NULL;
- if (cb) {
- mei_io_cb_free(cb);
- cb = NULL;
- }
+ mei_io_cb_free(cb);
kfree(cl);
out:
@@ -203,12 +193,18 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
dev = cl->dev;
+
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
goto out;
}
+ if (length == 0) {
+ rets = 0;
+ goto out;
+ }
+
if (cl == &dev->iamthif_cl) {
rets = mei_amthif_read(dev, file, ubuf, length, offset);
goto out;
@@ -347,8 +343,14 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = -ENODEV;
goto out;
}
- if (length > dev->me_clients[id].props.max_msg_length || length <= 0) {
- rets = -EMSGSIZE;
+
+ if (length == 0) {
+ rets = 0;
+ goto out;
+ }
+
+ if (length > dev->me_clients[id].props.max_msg_length) {
+ rets = -EFBIG;
goto out;
}
@@ -401,8 +403,11 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
- if (rets)
+ if (rets) {
+ dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
+ rets = -EFAULT;
goto out;
+ }
if (cl == &dev->iamthif_cl) {
rets = mei_amthif_write(dev, write_cb);
@@ -489,11 +494,11 @@ static int mei_ioctl_connect_client(struct file *file,
rets = -ENODEV;
goto end;
}
- clear_bit(cl->host_client_id, dev->host_clients_map);
mei_cl_unlink(cl);
kfree(cl);
cl = NULL;
+ dev->iamthif_open_count++;
file->private_data = &dev->iamthif_cl;
client = &data->out_client_properties;
@@ -564,7 +569,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
if (copy_from_user(connect_data, (char __user *)data,
sizeof(struct mei_connect_client_data))) {
- dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
+ dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 456b322013e..406f68e05b4 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -414,6 +414,7 @@ struct mei_device {
struct file *iamthif_file_object;
struct mei_cl iamthif_cl;
struct mei_cl_cb *iamthif_current_cb;
+ long iamthif_open_count;
int iamthif_mtu;
unsigned long iamthif_timer;
u32 iamthif_stall_timer;
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index d0c6907dfd9..994ca4aff1a 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -485,8 +485,11 @@ int mei_nfc_host_init(struct mei_device *dev)
if (ndev->cl_info)
return 0;
- cl_info = mei_cl_allocate(dev);
- cl = mei_cl_allocate(dev);
+ ndev->cl_info = mei_cl_allocate(dev);
+ ndev->cl = mei_cl_allocate(dev);
+
+ cl = ndev->cl;
+ cl_info = ndev->cl_info;
if (!cl || !cl_info) {
ret = -ENOMEM;
@@ -527,10 +530,9 @@ int mei_nfc_host_init(struct mei_device *dev)
cl->device_uuid = mei_nfc_guid;
+
list_add_tail(&cl->device_link, &dev->device_list);
- ndev->cl_info = cl_info;
- ndev->cl = cl;
ndev->req_id = 1;
INIT_WORK(&ndev->init_work, mei_nfc_init);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 1b3844e8237..b96205aece0 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -77,6 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
/* required last entry */
@@ -189,7 +190,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
schedule_delayed_work(&dev->timer_work, HZ);
- pr_debug("initialization successful.\n");
+ dev_dbg(&pdev->dev, "initialization successful.\n");
return 0;
@@ -231,7 +232,7 @@ static void mei_me_remove(struct pci_dev *pdev)
hw = to_me_hw(dev);
- dev_err(&pdev->dev, "stop\n");
+ dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
/* disable interrupts */
@@ -239,7 +240,6 @@ static void mei_me_remove(struct pci_dev *pdev)
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
if (hw->mem_addr)
pci_iounmap(pdev, hw->mem_addr);
@@ -262,7 +262,7 @@ static int mei_me_pci_suspend(struct device *device)
if (!dev)
return -ENODEV;
- dev_err(&pdev->dev, "suspend\n");
+ dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index b8921432e89..9e354216c16 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -60,7 +60,7 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
int mei_wd_host_init(struct mei_device *dev)
{
struct mei_cl *cl = &dev->wd_cl;
- int i;
+ int id;
int ret;
mei_cl_init(cl, dev);
@@ -70,19 +70,19 @@ int mei_wd_host_init(struct mei_device *dev)
/* check for valid client id */
- i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
- if (i < 0) {
+ id = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+ if (id < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
- return -ENOENT;
+ return id;
}
- cl->me_client_id = dev->me_clients[i].client_id;
+ cl->me_client_id = dev->me_clients[id].client_id;
ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
if (ret < 0) {
dev_info(&dev->pdev->dev, "wd: failed link client\n");
- return -ENOENT;
+ return ret;
}
cl->state = MEI_FILE_CONNECTING;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
new file mode 100644
index 00000000000..e42b331edbc
--- /dev/null
+++ b/drivers/misc/mic/Kconfig
@@ -0,0 +1,39 @@
+comment "Intel MIC Host Driver"
+
+config INTEL_MIC_HOST
+ tristate "Intel MIC Host Driver"
+ depends on 64BIT && PCI && X86
+ select VHOST_RING
+ default N
+ help
+ This enables Host Driver support for the Intel Many Integrated
+ Core (MIC) family of PCIe form factor coprocessor devices that
+ run a 64 bit Linux OS. The driver manages card OS state and
+ enables communication between host and card. Intel MIC X100
+ devices are currently supported.
+
+ If you are building a host kernel with an Intel MIC device then
+ say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
+
+comment "Intel MIC Card Driver"
+
+config INTEL_MIC_CARD
+ tristate "Intel MIC Card Driver"
+ depends on 64BIT && X86
+ select VIRTIO
+ default N
+ help
+ This enables card driver support for the Intel Many Integrated
+ Core (MIC) device family. The card driver communicates shutdown/
+ crash events to the host and allows registration/configuration of
+ virtio devices. Intel MIC X100 devices are currently supported.
+
+ If you are building a card kernel for an Intel MIC device then
+ say M (recommended) or Y, else say N. If unsure say N.
+
+ For more information see
+ <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
new file mode 100644
index 00000000000..05b34d683a5
--- /dev/null
+++ b/drivers/misc/mic/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2013, Intel Corporation.
+#
+obj-$(CONFIG_INTEL_MIC_HOST) += host/
+obj-$(CONFIG_INTEL_MIC_CARD) += card/
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
new file mode 100644
index 00000000000..69d58bef92c
--- /dev/null
+++ b/drivers/misc/mic/card/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2013, Intel Corporation.
+#
+ccflags-y += -DINTEL_MIC_CARD
+
+obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o
+mic_card-y += mic_x100.o
+mic_card-y += mic_device.o
+mic_card-y += mic_debugfs.o
+mic_card-y += mic_virtio.o
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
new file mode 100644
index 00000000000..421b3d7911d
--- /dev/null
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -0,0 +1,130 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+
+/* Debugfs parent dir */
+static struct dentry *mic_dbg;
+
+/**
+ * mic_intr_test - Send interrupts to host.
+ */
+static int mic_intr_test(struct seq_file *s, void *unused)
+{
+ struct mic_driver *mdrv = s->private;
+ struct mic_device *mdev = &mdrv->mdev;
+
+ mic_send_intr(mdev, 0);
+ msleep(1000);
+ mic_send_intr(mdev, 1);
+ msleep(1000);
+ mic_send_intr(mdev, 2);
+ msleep(1000);
+ mic_send_intr(mdev, 3);
+ msleep(1000);
+
+ return 0;
+}
+
+static int mic_intr_test_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_intr_test, inode->i_private);
+}
+
+static int mic_intr_test_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations intr_test_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_intr_test_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_intr_test_release
+};
+
+/**
+ * mic_create_card_debug_dir - Initialize MIC debugfs entries.
+ */
+void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
+{
+ struct dentry *d;
+
+ if (!mic_dbg)
+ return;
+
+ mdrv->dbg_dir = debugfs_create_dir(mdrv->name, mic_dbg);
+ if (!mdrv->dbg_dir) {
+ dev_err(mdrv->dev, "Cant create dbg_dir %s\n", mdrv->name);
+ return;
+ }
+
+ d = debugfs_create_file("intr_test", 0444, mdrv->dbg_dir,
+ mdrv, &intr_test_ops);
+
+ if (!d) {
+ dev_err(mdrv->dev,
+ "Cant create dbg intr_test %s\n", mdrv->name);
+ return;
+ }
+}
+
+/**
+ * mic_delete_card_debug_dir - Uninitialize MIC debugfs entries.
+ */
+void mic_delete_card_debug_dir(struct mic_driver *mdrv)
+{
+ if (!mdrv->dbg_dir)
+ return;
+
+ debugfs_remove_recursive(mdrv->dbg_dir);
+}
+
+/**
+ * mic_init_card_debugfs - Initialize global debugfs entry.
+ */
+void __init mic_init_card_debugfs(void)
+{
+ mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!mic_dbg)
+ pr_err("can't create debugfs dir\n");
+}
+
+/**
+ * mic_exit_card_debugfs - Uninitialize global debugfs entry
+ */
+void mic_exit_card_debugfs(void)
+{
+ debugfs_remove(mic_dbg);
+}
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
new file mode 100644
index 00000000000..d0980ff9683
--- /dev/null
+++ b/drivers/misc/mic/card/mic_device.c
@@ -0,0 +1,305 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_virtio.h"
+
+static struct mic_driver *g_drv;
+static struct mic_irq *shutdown_cookie;
+
+static void mic_notify_host(u8 state)
+{
+ struct mic_driver *mdrv = g_drv;
+ struct mic_bootparam __iomem *bootparam = mdrv->dp;
+
+ iowrite8(state, &bootparam->shutdown_status);
+ dev_dbg(mdrv->dev, "%s %d system_state %d\n",
+ __func__, __LINE__, state);
+ mic_send_intr(&mdrv->mdev, ioread8(&bootparam->c2h_shutdown_db));
+}
+
+static int mic_panic_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct mic_driver *mdrv = g_drv;
+ struct mic_bootparam __iomem *bootparam = mdrv->dp;
+
+ iowrite8(-1, &bootparam->h2c_config_db);
+ iowrite8(-1, &bootparam->h2c_shutdown_db);
+ mic_notify_host(MIC_CRASHED);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mic_panic = {
+ .notifier_call = mic_panic_event,
+};
+
+static irqreturn_t mic_shutdown_isr(int irq, void *data)
+{
+ struct mic_driver *mdrv = g_drv;
+ struct mic_bootparam __iomem *bootparam = mdrv->dp;
+
+ mic_ack_interrupt(&g_drv->mdev);
+ if (ioread8(&bootparam->shutdown_card))
+ orderly_poweroff(true);
+ return IRQ_HANDLED;
+}
+
+static int mic_shutdown_init(void)
+{
+ int rc = 0;
+ struct mic_driver *mdrv = g_drv;
+ struct mic_bootparam __iomem *bootparam = mdrv->dp;
+ int shutdown_db;
+
+ shutdown_db = mic_next_card_db();
+ shutdown_cookie = mic_request_card_irq(mic_shutdown_isr,
+ "Shutdown", mdrv, shutdown_db);
+ if (IS_ERR(shutdown_cookie))
+ rc = PTR_ERR(shutdown_cookie);
+ else
+ iowrite8(shutdown_db, &bootparam->h2c_shutdown_db);
+ return rc;
+}
+
+static void mic_shutdown_uninit(void)
+{
+ struct mic_driver *mdrv = g_drv;
+ struct mic_bootparam __iomem *bootparam = mdrv->dp;
+
+ iowrite8(-1, &bootparam->h2c_shutdown_db);
+ mic_free_card_irq(shutdown_cookie, mdrv);
+}
+
+static int __init mic_dp_init(void)
+{
+ struct mic_driver *mdrv = g_drv;
+ struct mic_device *mdev = &mdrv->mdev;
+ struct mic_bootparam __iomem *bootparam;
+ u64 lo, hi, dp_dma_addr;
+ u32 magic;
+
+ lo = mic_read_spad(&mdrv->mdev, MIC_DPLO_SPAD);
+ hi = mic_read_spad(&mdrv->mdev, MIC_DPHI_SPAD);
+
+ dp_dma_addr = lo | (hi << 32);
+ mdrv->dp = mic_card_map(mdev, dp_dma_addr, MIC_DP_SIZE);
+ if (!mdrv->dp) {
+ dev_err(mdrv->dev, "Cannot remap Aperture BAR\n");
+ return -ENOMEM;
+ }
+ bootparam = mdrv->dp;
+ magic = ioread32(&bootparam->magic);
+ if (MIC_MAGIC != magic) {
+ dev_err(mdrv->dev, "bootparam magic mismatch 0x%x\n", magic);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Uninitialize the device page */
+static void mic_dp_uninit(void)
+{
+ mic_card_unmap(&g_drv->mdev, g_drv->dp);
+}
+
+/**
+ * mic_request_card_irq - request an irq.
+ *
+ * @func: The callback function that handles the interrupt.
+ * @name: The ASCII name of the callee requesting the irq.
+ * @data: private data that is returned back when calling the
+ * function handler.
+ * @index: The doorbell index of the requester.
+ *
+ * returns: The cookie that is transparent to the caller. Passed
+ * back when calling mic_free_irq. An appropriate error code
+ * is returned on failure. Caller needs to use IS_ERR(return_val)
+ * to check for failure and PTR_ERR(return_val) to obtained the
+ * error code.
+ *
+ */
+struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int index)
+{
+ int rc = 0;
+ unsigned long cookie;
+ struct mic_driver *mdrv = g_drv;
+
+ rc = request_irq(mic_db_to_irq(mdrv, index), func,
+ 0, name, data);
+ if (rc) {
+ dev_err(mdrv->dev, "request_irq failed rc = %d\n", rc);
+ goto err;
+ }
+ mdrv->irq_info.irq_usage_count[index]++;
+ cookie = index;
+ return (struct mic_irq *)cookie;
+err:
+ return ERR_PTR(rc);
+}
+
+/**
+ * mic_free_card_irq - free irq.
+ *
+ * @cookie: cookie obtained during a successful call to mic_request_irq
+ * @data: private data specified by the calling function during the
+ * mic_request_irq
+ *
+ * returns: none.
+ */
+void mic_free_card_irq(struct mic_irq *cookie, void *data)
+{
+ int index;
+ struct mic_driver *mdrv = g_drv;
+
+ index = (unsigned long)cookie & 0xFFFFU;
+ free_irq(mic_db_to_irq(mdrv, index), data);
+ mdrv->irq_info.irq_usage_count[index]--;
+}
+
+/**
+ * mic_next_card_db - Get the doorbell with minimum usage count.
+ *
+ * Returns the irq index.
+ */
+int mic_next_card_db(void)
+{
+ int i;
+ int index = 0;
+ struct mic_driver *mdrv = g_drv;
+
+ for (i = 0; i < mdrv->intr_info.num_intr; i++) {
+ if (mdrv->irq_info.irq_usage_count[i] <
+ mdrv->irq_info.irq_usage_count[index])
+ index = i;
+ }
+
+ return index;
+}
+
+/**
+ * mic_init_irq - Initialize irq information.
+ *
+ * Returns 0 in success. Appropriate error code on failure.
+ */
+static int mic_init_irq(void)
+{
+ struct mic_driver *mdrv = g_drv;
+
+ mdrv->irq_info.irq_usage_count = kzalloc((sizeof(u32) *
+ mdrv->intr_info.num_intr),
+ GFP_KERNEL);
+ if (!mdrv->irq_info.irq_usage_count)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * mic_uninit_irq - Uninitialize irq information.
+ *
+ * None.
+ */
+static void mic_uninit_irq(void)
+{
+ struct mic_driver *mdrv = g_drv;
+
+ kfree(mdrv->irq_info.irq_usage_count);
+}
+
+/*
+ * mic_driver_init - MIC driver initialization tasks.
+ *
+ * Returns 0 in success. Appropriate error code on failure.
+ */
+int __init mic_driver_init(struct mic_driver *mdrv)
+{
+ int rc;
+
+ g_drv = mdrv;
+ /*
+ * Unloading the card module is not supported. The MIC card module
+ * handles fundamental operations like host/card initiated shutdowns
+ * and informing the host about card crashes and cannot be unloaded.
+ */
+ if (!try_module_get(mdrv->dev->driver->owner)) {
+ rc = -ENODEV;
+ goto done;
+ }
+ rc = mic_dp_init();
+ if (rc)
+ goto put;
+ rc = mic_init_irq();
+ if (rc)
+ goto dp_uninit;
+ rc = mic_shutdown_init();
+ if (rc)
+ goto irq_uninit;
+ rc = mic_devices_init(mdrv);
+ if (rc)
+ goto shutdown_uninit;
+ mic_create_card_debug_dir(mdrv);
+ atomic_notifier_chain_register(&panic_notifier_list, &mic_panic);
+done:
+ return rc;
+shutdown_uninit:
+ mic_shutdown_uninit();
+irq_uninit:
+ mic_uninit_irq();
+dp_uninit:
+ mic_dp_uninit();
+put:
+ module_put(mdrv->dev->driver->owner);
+ return rc;
+}
+
+/*
+ * mic_driver_uninit - MIC driver uninitialization tasks.
+ *
+ * Returns None
+ */
+void mic_driver_uninit(struct mic_driver *mdrv)
+{
+ mic_delete_card_debug_dir(mdrv);
+ mic_devices_uninit(mdrv);
+ /*
+ * Inform the host about the shutdown status i.e. poweroff/restart etc.
+ * The module cannot be unloaded so the only code path to call
+ * mic_devices_uninit(..) is the shutdown callback.
+ */
+ mic_notify_host(system_state);
+ mic_shutdown_uninit();
+ mic_uninit_irq();
+ mic_dp_uninit();
+ module_put(mdrv->dev->driver->owner);
+}
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
new file mode 100644
index 00000000000..347b9b3b791
--- /dev/null
+++ b/drivers/misc/mic/card/mic_device.h
@@ -0,0 +1,133 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#ifndef _MIC_CARD_DEVICE_H_
+#define _MIC_CARD_DEVICE_H_
+
+#include <linux/workqueue.h>
+#include <linux/io.h>
+
+/**
+ * struct mic_intr_info - Contains h/w specific interrupt sources info
+ *
+ * @num_intr: The number of irqs available
+ */
+struct mic_intr_info {
+ u32 num_intr;
+};
+
+/**
+ * struct mic_irq_info - OS specific irq information
+ *
+ * @irq_usage_count: usage count array tracking the number of sources
+ * assigned for each irq.
+ */
+struct mic_irq_info {
+ int *irq_usage_count;
+};
+
+/**
+ * struct mic_device - MIC device information.
+ *
+ * @mmio: MMIO bar information.
+ */
+struct mic_device {
+ struct mic_mw mmio;
+};
+
+/**
+ * struct mic_driver - MIC card driver information.
+ *
+ * @name: Name for MIC driver.
+ * @dbg_dir: debugfs directory of this MIC device.
+ * @dev: The device backing this MIC.
+ * @dp: The pointer to the virtio device page.
+ * @mdev: MIC device information for the host.
+ * @hotplug_work: Hot plug work for adding/removing virtio devices.
+ * @irq_info: The OS specific irq information
+ * @intr_info: H/W specific interrupt information.
+ */
+struct mic_driver {
+ char name[20];
+ struct dentry *dbg_dir;
+ struct device *dev;
+ void __iomem *dp;
+ struct mic_device mdev;
+ struct work_struct hotplug_work;
+ struct mic_irq_info irq_info;
+ struct mic_intr_info intr_info;
+};
+
+/**
+ * struct mic_irq - opaque pointer used as cookie
+ */
+struct mic_irq;
+
+/**
+ * mic_mmio_read - read from an MMIO register.
+ * @mw: MMIO register base virtual address.
+ * @offset: register offset.
+ *
+ * RETURNS: register value.
+ */
+static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset)
+{
+ return ioread32(mw->va + offset);
+}
+
+/**
+ * mic_mmio_write - write to an MMIO register.
+ * @mw: MMIO register base virtual address.
+ * @val: the data value to put into the register
+ * @offset: register offset.
+ *
+ * RETURNS: none.
+ */
+static inline void
+mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
+{
+ iowrite32(val, mw->va + offset);
+}
+
+int mic_driver_init(struct mic_driver *mdrv);
+void mic_driver_uninit(struct mic_driver *mdrv);
+int mic_next_card_db(void);
+struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int intr_src);
+void mic_free_card_irq(struct mic_irq *cookie, void *data);
+u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
+void mic_send_intr(struct mic_device *mdev, int doorbell);
+int mic_db_to_irq(struct mic_driver *mdrv, int db);
+u32 mic_ack_interrupt(struct mic_device *mdev);
+void mic_hw_intr_init(struct mic_driver *mdrv);
+void __iomem *
+mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size);
+void mic_card_unmap(struct mic_device *mdev, void __iomem *addr);
+void __init mic_create_card_debug_dir(struct mic_driver *mdrv);
+void mic_delete_card_debug_dir(struct mic_driver *mdrv);
+void __init mic_init_card_debugfs(void);
+void mic_exit_card_debugfs(void);
+#endif
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
new file mode 100644
index 00000000000..8aa42e738ac
--- /dev/null
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -0,0 +1,630 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Adapted from:
+ *
+ * virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/virtio_config.h>
+
+#include "../common/mic_dev.h"
+#include "mic_virtio.h"
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+#define MIC_MAX_VRINGS 4
+struct mic_vdev {
+ struct virtio_device vdev;
+ struct mic_device_desc __iomem *desc;
+ struct mic_device_ctrl __iomem *dc;
+ struct mic_device *mdev;
+ void __iomem *vr[MIC_MAX_VRINGS];
+ int used_size[MIC_MAX_VRINGS];
+ struct completion reset_done;
+ struct mic_irq *virtio_cookie;
+ int c2h_vdev_db;
+};
+
+static struct mic_irq *virtio_config_cookie;
+#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev)
+
+/* Helper API to obtain the parent of the virtio device */
+static inline struct device *mic_dev(struct mic_vdev *mvdev)
+{
+ return mvdev->vdev.dev.parent;
+}
+
+/* This gets the device's feature bits. */
+static u32 mic_get_features(struct virtio_device *vdev)
+{
+ unsigned int i, bits;
+ u32 features = 0;
+ struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
+ u8 __iomem *in_features = mic_vq_features(desc);
+ int feature_len = ioread8(&desc->feature_len);
+
+ bits = min_t(unsigned, feature_len,
+ sizeof(vdev->features)) * 8;
+ for (i = 0; i < bits; i++)
+ if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
+ features |= BIT(i);
+
+ return features;
+}
+
+static void mic_finalize_features(struct virtio_device *vdev)
+{
+ unsigned int i, bits;
+ struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
+ u8 feature_len = ioread8(&desc->feature_len);
+ /* Second half of bitmap is features we accept. */
+ u8 __iomem *out_features =
+ mic_vq_features(desc) + feature_len;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ memset_io(out_features, 0, feature_len);
+ bits = min_t(unsigned, feature_len,
+ sizeof(vdev->features)) * 8;
+ for (i = 0; i < bits; i++) {
+ if (test_bit(i, vdev->features))
+ iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
+ &out_features[i / 8]);
+ }
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void mic_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned len)
+{
+ struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
+
+ if (offset + len > ioread8(&desc->config_len))
+ return;
+ memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len);
+}
+
+static void mic_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned len)
+{
+ struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
+
+ if (offset + len > ioread8(&desc->config_len))
+ return;
+ memcpy_toio(mic_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access the status
+ * field of the device descriptor. set_status also interrupts the host
+ * to tell about status changes.
+ */
+static u8 mic_get_status(struct virtio_device *vdev)
+{
+ return ioread8(&to_micvdev(vdev)->desc->status);
+}
+
+static void mic_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+ if (!status)
+ return;
+ iowrite8(status, &mvdev->desc->status);
+ mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
+}
+
+/* Inform host on a virtio device reset and wait for ack from host */
+static void mic_reset_inform_host(struct virtio_device *vdev)
+{
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+ struct mic_device_ctrl __iomem *dc = mvdev->dc;
+ int retry = 100, i;
+
+ iowrite8(0, &dc->host_ack);
+ iowrite8(1, &dc->vdev_reset);
+ mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
+
+ /* Wait till host completes all card accesses and acks the reset */
+ for (i = retry; i--;) {
+ if (ioread8(&dc->host_ack))
+ break;
+ msleep(100);
+ };
+
+ dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
+
+ /* Reset status to 0 in case we timed out */
+ iowrite8(0, &mvdev->desc->status);
+}
+
+static void mic_reset(struct virtio_device *vdev)
+{
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+
+ dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n",
+ __func__, vdev->id.device);
+
+ mic_reset_inform_host(vdev);
+ complete_all(&mvdev->reset_done);
+}
+
+/*
+ * The virtio_ring code calls this API when it wants to notify the Host.
+ */
+static void mic_notify(struct virtqueue *vq)
+{
+ struct mic_vdev *mvdev = vq->priv;
+
+ mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
+}
+
+static void mic_del_vq(struct virtqueue *vq, int n)
+{
+ struct mic_vdev *mvdev = to_micvdev(vq->vdev);
+ struct vring *vr = (struct vring *)(vq + 1);
+
+ free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n]));
+ vring_del_virtqueue(vq);
+ mic_card_unmap(mvdev->mdev, mvdev->vr[n]);
+ mvdev->vr[n] = NULL;
+}
+
+static void mic_del_vqs(struct virtio_device *vdev)
+{
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+ struct virtqueue *vq, *n;
+ int idx = 0;
+
+ dev_dbg(mic_dev(mvdev), "%s\n", __func__);
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ mic_del_vq(vq, idx++);
+}
+
+/*
+ * This routine will assign vring's allocated in host/io memory. Code in
+ * virtio_ring.c however continues to access this io memory as if it were local
+ * memory without io accessors.
+ */
+static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+ struct mic_vqconfig __iomem *vqconfig;
+ struct mic_vqconfig config;
+ struct virtqueue *vq;
+ void __iomem *va;
+ struct _mic_vring_info __iomem *info;
+ void *used;
+ int vr_size, _vr_size, err, magic;
+ struct vring *vr;
+ u8 type = ioread8(&mvdev->desc->type);
+
+ if (index >= ioread8(&mvdev->desc->num_vq))
+ return ERR_PTR(-ENOENT);
+
+ if (!name)
+ return ERR_PTR(-ENOENT);
+
+ /* First assign the vring's allocated in host memory */
+ vqconfig = mic_vq_config(mvdev->desc) + index;
+ memcpy_fromio(&config, vqconfig, sizeof(config));
+ _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN);
+ vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+ va = mic_card_map(mvdev->mdev, config.address, vr_size);
+ if (!va)
+ return ERR_PTR(-ENOMEM);
+ mvdev->vr[index] = va;
+ memset_io(va, 0x0, _vr_size);
+ vq = vring_new_virtqueue(index,
+ config.num, MIC_VIRTIO_RING_ALIGN, vdev,
+ false,
+ va, mic_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto unmap;
+ }
+ info = va + _vr_size;
+ magic = ioread32(&info->magic);
+
+ if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
+ err = -EIO;
+ goto unmap;
+ }
+
+ /* Allocate and reassign used ring now */
+ mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
+ sizeof(struct vring_used_elem) * config.num);
+ used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(mvdev->used_size[index]));
+ if (!used) {
+ err = -ENOMEM;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto del_vq;
+ }
+ iowrite64(virt_to_phys(used), &vqconfig->used_address);
+
+ /*
+ * To reassign the used ring here we are directly accessing
+ * struct vring_virtqueue which is a private data structure
+ * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
+ * vring_new_virtqueue() would ensure that
+ * (&vq->vring == (struct vring *) (&vq->vq + 1));
+ */
+ vr = (struct vring *)(vq + 1);
+ vr->used = used;
+
+ vq->priv = mvdev;
+ return vq;
+del_vq:
+ vring_del_virtqueue(vq);
+unmap:
+ mic_card_unmap(mvdev->mdev, mvdev->vr[index]);
+ return ERR_PTR(err);
+}
+
+static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+ struct mic_device_ctrl __iomem *dc = mvdev->dc;
+ int i, err, retry = 100;
+
+ /* We must have this many virtqueues. */
+ if (nvqs > ioread8(&mvdev->desc->num_vq))
+ return -ENOENT;
+
+ for (i = 0; i < nvqs; ++i) {
+ dev_dbg(mic_dev(mvdev), "%s: %d: %s\n",
+ __func__, i, names[i]);
+ vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ iowrite8(1, &dc->used_address_updated);
+ /*
+ * Send an interrupt to the host to inform it that used
+ * rings have been re-assigned.
+ */
+ mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
+ for (i = retry; i--;) {
+ if (!ioread8(&dc->used_address_updated))
+ break;
+ msleep(100);
+ };
+
+ dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
+ if (!retry) {
+ err = -ENODEV;
+ goto error;
+ }
+
+ return 0;
+error:
+ mic_del_vqs(vdev);
+ return err;
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops mic_vq_config_ops = {
+ .get_features = mic_get_features,
+ .finalize_features = mic_finalize_features,
+ .get = mic_get,
+ .set = mic_set,
+ .get_status = mic_get_status,
+ .set_status = mic_set_status,
+ .reset = mic_reset,
+ .find_vqs = mic_find_vqs,
+ .del_vqs = mic_del_vqs,
+};
+
+static irqreturn_t
+mic_virtio_intr_handler(int irq, void *data)
+{
+ struct mic_vdev *mvdev = data;
+ struct virtqueue *vq;
+
+ mic_ack_interrupt(mvdev->mdev);
+ list_for_each_entry(vq, &mvdev->vdev.vqs, list)
+ vring_interrupt(0, vq);
+
+ return IRQ_HANDLED;
+}
+
+static void mic_virtio_release_dev(struct device *_d)
+{
+ /*
+ * No need for a release method similar to virtio PCI.
+ * Provide an empty one to avoid getting a warning from core.
+ */
+}
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static int mic_add_device(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct mic_driver *mdrv)
+{
+ struct mic_vdev *mvdev;
+ int ret;
+ int virtio_db;
+ u8 type = ioread8(&d->type);
+
+ mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
+ if (!mvdev) {
+ dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n",
+ offset, type);
+ return -ENOMEM;
+ }
+
+ mvdev->mdev = &mdrv->mdev;
+ mvdev->vdev.dev.parent = mdrv->dev;
+ mvdev->vdev.dev.release = mic_virtio_release_dev;
+ mvdev->vdev.id.device = type;
+ mvdev->vdev.config = &mic_vq_config_ops;
+ mvdev->desc = d;
+ mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d);
+ init_completion(&mvdev->reset_done);
+
+ virtio_db = mic_next_card_db();
+ mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
+ "virtio intr", mvdev, virtio_db);
+ if (IS_ERR(mvdev->virtio_cookie)) {
+ ret = PTR_ERR(mvdev->virtio_cookie);
+ goto kfree;
+ }
+ iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db);
+ mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db);
+
+ ret = register_virtio_device(&mvdev->vdev);
+ if (ret) {
+ dev_err(mic_dev(mvdev),
+ "Failed to register mic device %u type %u\n",
+ offset, type);
+ goto free_irq;
+ }
+ iowrite64((u64)mvdev, &mvdev->dc->vdev);
+ dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n",
+ __func__, offset, type, mvdev);
+
+ return 0;
+
+free_irq:
+ mic_free_card_irq(mvdev->virtio_cookie, mvdev);
+kfree:
+ kfree(mvdev);
+ return ret;
+}
+
+/*
+ * match for a mic device with a specific desc pointer
+ */
+static int mic_match_desc(struct device *dev, void *data)
+{
+ struct virtio_device *vdev = dev_to_virtio(dev);
+ struct mic_vdev *mvdev = to_micvdev(vdev);
+
+ return mvdev->desc == (void __iomem *)data;
+}
+
+static void mic_handle_config_change(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct mic_driver *mdrv)
+{
+ struct mic_device_ctrl __iomem *dc
+ = (void __iomem *)d + mic_aligned_desc_size(d);
+ struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
+ struct virtio_driver *drv;
+
+ if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
+ return;
+
+ dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__);
+ drv = container_of(mvdev->vdev.dev.driver,
+ struct virtio_driver, driver);
+ if (drv->config_changed)
+ drv->config_changed(&mvdev->vdev);
+ iowrite8(1, &dc->guest_ack);
+}
+
+/*
+ * removes a virtio device if a hot remove event has been
+ * requested by the host.
+ */
+static int mic_remove_device(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct mic_driver *mdrv)
+{
+ struct mic_device_ctrl __iomem *dc
+ = (void __iomem *)d + mic_aligned_desc_size(d);
+ struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
+ u8 status;
+ int ret = -1;
+
+ if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+ dev_dbg(mdrv->dev,
+ "%s %d config_change %d type %d mvdev %p\n",
+ __func__, __LINE__,
+ ioread8(&dc->config_change), ioread8(&d->type), mvdev);
+
+ status = ioread8(&d->status);
+ reinit_completion(&mvdev->reset_done);
+ unregister_virtio_device(&mvdev->vdev);
+ mic_free_card_irq(mvdev->virtio_cookie, mvdev);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ wait_for_completion(&mvdev->reset_done);
+ kfree(mvdev);
+ iowrite8(1, &dc->guest_ack);
+ dev_dbg(mdrv->dev, "%s %d guest_ack %d\n",
+ __func__, __LINE__, ioread8(&dc->guest_ack));
+ ret = 0;
+ }
+
+ return ret;
+}
+
+#define REMOVE_DEVICES true
+
+static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
+{
+ s8 type;
+ unsigned int i;
+ struct mic_device_desc __iomem *d;
+ struct mic_device_ctrl __iomem *dc;
+ struct device *dev;
+ int ret;
+
+ for (i = mic_aligned_size(struct mic_bootparam);
+ i < MIC_DP_SIZE; i += mic_total_desc_size(d)) {
+ d = mdrv->dp + i;
+ dc = (void __iomem *)d + mic_aligned_desc_size(d);
+ /*
+ * This read barrier is paired with the corresponding write
+ * barrier on the host which is inserted before adding or
+ * removing a virtio device descriptor, by updating the type.
+ */
+ rmb();
+ type = ioread8(&d->type);
+
+ /* end of list */
+ if (type == 0)
+ break;
+
+ if (type == -1)
+ continue;
+
+ /* device already exists */
+ dev = device_find_child(mdrv->dev, d, mic_match_desc);
+ if (dev) {
+ if (remove)
+ iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
+ &dc->config_change);
+ put_device(dev);
+ mic_handle_config_change(d, i, mdrv);
+ ret = mic_remove_device(d, i, mdrv);
+ if (!ret && !remove)
+ iowrite8(-1, &d->type);
+ if (remove) {
+ iowrite8(0, &dc->config_change);
+ iowrite8(0, &dc->guest_ack);
+ }
+ continue;
+ }
+
+ /* new device */
+ dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n",
+ __func__, __LINE__, d);
+ if (!remove)
+ mic_add_device(d, i, mdrv);
+ }
+}
+
+/*
+ * mic_hotplug_device tries to find changes in the device page.
+ */
+static void mic_hotplug_devices(struct work_struct *work)
+{
+ struct mic_driver *mdrv = container_of(work,
+ struct mic_driver, hotplug_work);
+
+ mic_scan_devices(mdrv, !REMOVE_DEVICES);
+}
+
+/*
+ * Interrupt handler for hot plug/config changes etc.
+ */
+static irqreturn_t
+mic_extint_handler(int irq, void *data)
+{
+ struct mic_driver *mdrv = (struct mic_driver *)data;
+
+ dev_dbg(mdrv->dev, "%s %d hotplug work\n",
+ __func__, __LINE__);
+ mic_ack_interrupt(&mdrv->mdev);
+ schedule_work(&mdrv->hotplug_work);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Init function for virtio
+ */
+int mic_devices_init(struct mic_driver *mdrv)
+{
+ int rc;
+ struct mic_bootparam __iomem *bootparam;
+ int config_db;
+
+ INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices);
+ mic_scan_devices(mdrv, !REMOVE_DEVICES);
+
+ config_db = mic_next_card_db();
+ virtio_config_cookie = mic_request_card_irq(mic_extint_handler,
+ "virtio_config_intr", mdrv, config_db);
+ if (IS_ERR(virtio_config_cookie)) {
+ rc = PTR_ERR(virtio_config_cookie);
+ goto exit;
+ }
+
+ bootparam = mdrv->dp;
+ iowrite8(config_db, &bootparam->h2c_config_db);
+ return 0;
+exit:
+ return rc;
+}
+
+/*
+ * Uninit function for virtio
+ */
+void mic_devices_uninit(struct mic_driver *mdrv)
+{
+ struct mic_bootparam __iomem *bootparam = mdrv->dp;
+ iowrite8(-1, &bootparam->h2c_config_db);
+ mic_free_card_irq(virtio_config_cookie, mdrv);
+ flush_work(&mdrv->hotplug_work);
+ mic_scan_devices(mdrv, REMOVE_DEVICES);
+}
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
new file mode 100644
index 00000000000..2c5c22c93ba
--- /dev/null
+++ b/drivers/misc/mic/card/mic_virtio.h
@@ -0,0 +1,77 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#ifndef __MIC_CARD_VIRTIO_H
+#define __MIC_CARD_VIRTIO_H
+
+#include <linux/mic_common.h>
+#include "mic_device.h"
+
+/*
+ * 64 bit I/O access
+ */
+#ifndef ioread64
+#define ioread64 readq
+#endif
+#ifndef iowrite64
+#define iowrite64 writeq
+#endif
+
+static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
+{
+ return mic_aligned_size(*desc)
+ + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig)
+ + ioread8(&desc->feature_len) * 2
+ + ioread8(&desc->config_len);
+}
+
+static inline struct mic_vqconfig __iomem *
+mic_vq_config(struct mic_device_desc __iomem *desc)
+{
+ return (struct mic_vqconfig __iomem *)(desc + 1);
+}
+
+static inline __u8 __iomem *
+mic_vq_features(struct mic_device_desc __iomem *desc)
+{
+ return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq));
+}
+
+static inline __u8 __iomem *
+mic_vq_configspace(struct mic_device_desc __iomem *desc)
+{
+ return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2;
+}
+static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
+{
+ return mic_aligned_desc_size(desc) +
+ mic_aligned_size(struct mic_device_ctrl);
+}
+
+int mic_devices_init(struct mic_driver *mdrv);
+void mic_devices_uninit(struct mic_driver *mdrv);
+
+#endif
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
new file mode 100644
index 00000000000..2868945c9a4
--- /dev/null
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -0,0 +1,256 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_x100.h"
+
+static const char mic_driver_name[] = "mic";
+
+static struct mic_driver g_drv;
+
+/**
+ * mic_read_spad - read from the scratchpad register
+ * @mdev: pointer to mic_device instance
+ * @idx: index to scratchpad register, 0 based
+ *
+ * This function allows reading of the 32bit scratchpad register.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+u32 mic_read_spad(struct mic_device *mdev, unsigned int idx)
+{
+ return mic_mmio_read(&mdev->mmio,
+ MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_SPAD0 + idx * 4);
+}
+
+/**
+ * __mic_send_intr - Send interrupt to Host.
+ * @mdev: pointer to mic_device instance
+ * @doorbell: Doorbell number.
+ */
+void mic_send_intr(struct mic_device *mdev, int doorbell)
+{
+ struct mic_mw *mw = &mdev->mmio;
+
+ if (doorbell > MIC_X100_MAX_DOORBELL_IDX)
+ return;
+ /* Ensure that the interrupt is ordered w.r.t previous stores. */
+ wmb();
+ mic_mmio_write(mw, MIC_X100_SBOX_SDBIC0_DBREQ_BIT,
+ MIC_X100_SBOX_BASE_ADDRESS +
+ (MIC_X100_SBOX_SDBIC0 + (4 * doorbell)));
+}
+
+/**
+ * mic_ack_interrupt - Device specific interrupt handling.
+ * @mdev: pointer to mic_device instance
+ *
+ * Returns: bitmask of doorbell events triggered.
+ */
+u32 mic_ack_interrupt(struct mic_device *mdev)
+{
+ return 0;
+}
+
+static inline int mic_get_sbox_irq(int db)
+{
+ return MIC_X100_IRQ_BASE + db;
+}
+
+static inline int mic_get_rdmasr_irq(int index)
+{
+ return MIC_X100_RDMASR_IRQ_BASE + index;
+}
+
+/**
+ * mic_hw_intr_init - Initialize h/w specific interrupt
+ * information.
+ * @mdrv: pointer to mic_driver
+ */
+void mic_hw_intr_init(struct mic_driver *mdrv)
+{
+ mdrv->intr_info.num_intr = MIC_X100_NUM_SBOX_IRQ +
+ MIC_X100_NUM_RDMASR_IRQ;
+}
+
+/**
+ * mic_db_to_irq - Retrieve irq number corresponding to a doorbell.
+ * @mdrv: pointer to mic_driver
+ * @db: The doorbell obtained for which the irq is needed. Doorbell
+ * may correspond to an sbox doorbell or an rdmasr index.
+ *
+ * Returns the irq corresponding to the doorbell.
+ */
+int mic_db_to_irq(struct mic_driver *mdrv, int db)
+{
+ int rdmasr_index;
+ if (db < MIC_X100_NUM_SBOX_IRQ) {
+ return mic_get_sbox_irq(db);
+ } else {
+ rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ +
+ MIC_X100_RDMASR_IRQ_BASE;
+ return mic_get_rdmasr_irq(rdmasr_index);
+ }
+}
+
+/*
+ * mic_card_map - Allocate virtual address for a remote memory region.
+ * @mdev: pointer to mic_device instance.
+ * @addr: Remote DMA address.
+ * @size: Size of the region.
+ *
+ * Returns: Virtual address backing the remote memory region.
+ */
+void __iomem *
+mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size)
+{
+ return ioremap(addr, size);
+}
+
+/*
+ * mic_card_unmap - Unmap the virtual address for a remote memory region.
+ * @mdev: pointer to mic_device instance.
+ * @addr: Virtual address for remote memory region.
+ *
+ * Returns: None.
+ */
+void mic_card_unmap(struct mic_device *mdev, void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+static int __init mic_probe(struct platform_device *pdev)
+{
+ struct mic_driver *mdrv = &g_drv;
+ struct mic_device *mdev = &mdrv->mdev;
+ int rc = 0;
+
+ mdrv->dev = &pdev->dev;
+ snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
+
+ mdev->mmio.pa = MIC_X100_MMIO_BASE;
+ mdev->mmio.len = MIC_X100_MMIO_LEN;
+ mdev->mmio.va = ioremap(MIC_X100_MMIO_BASE, MIC_X100_MMIO_LEN);
+ if (!mdev->mmio.va) {
+ dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
+ rc = -EIO;
+ goto done;
+ }
+ mic_hw_intr_init(mdrv);
+ rc = mic_driver_init(mdrv);
+ if (rc) {
+ dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc);
+ goto iounmap;
+ }
+done:
+ return rc;
+iounmap:
+ iounmap(mdev->mmio.va);
+ return rc;
+}
+
+static int mic_remove(struct platform_device *pdev)
+{
+ struct mic_driver *mdrv = &g_drv;
+ struct mic_device *mdev = &mdrv->mdev;
+
+ mic_driver_uninit(mdrv);
+ iounmap(mdev->mmio.va);
+ return 0;
+}
+
+static void mic_platform_shutdown(struct platform_device *pdev)
+{
+ mic_remove(pdev);
+}
+
+static struct platform_device mic_platform_dev = {
+ .name = mic_driver_name,
+ .id = 0,
+ .num_resources = 0,
+};
+
+static struct platform_driver __refdata mic_platform_driver = {
+ .probe = mic_probe,
+ .remove = mic_remove,
+ .shutdown = mic_platform_shutdown,
+ .driver = {
+ .name = mic_driver_name,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mic_init(void)
+{
+ int ret;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(c->x86 == 11 && c->x86_model == 1)) {
+ ret = -ENODEV;
+ pr_err("%s not running on X100 ret %d\n", __func__, ret);
+ goto done;
+ }
+
+ mic_init_card_debugfs();
+ ret = platform_device_register(&mic_platform_dev);
+ if (ret) {
+ pr_err("platform_device_register ret %d\n", ret);
+ goto cleanup_debugfs;
+ }
+ ret = platform_driver_register(&mic_platform_driver);
+ if (ret) {
+ pr_err("platform_driver_register ret %d\n", ret);
+ goto device_unregister;
+ }
+ return ret;
+
+device_unregister:
+ platform_device_unregister(&mic_platform_dev);
+cleanup_debugfs:
+ mic_exit_card_debugfs();
+done:
+ return ret;
+}
+
+static void __exit mic_exit(void)
+{
+ platform_driver_unregister(&mic_platform_driver);
+ platform_device_unregister(&mic_platform_dev);
+ mic_exit_card_debugfs();
+}
+
+module_init(mic_init);
+module_exit(mic_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) MIC X100 Card driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h
new file mode 100644
index 00000000000..d66ea55639c
--- /dev/null
+++ b/drivers/misc/mic/card/mic_x100.h
@@ -0,0 +1,48 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Disclaimer: The codes contained in these modules may be specific to
+ * the Intel Software Development Platform codenamed: Knights Ferry, and
+ * the Intel product codenamed: Knights Corner, and are not backward
+ * compatible with other Intel products. Additionally, Intel will NOT
+ * support the codes or instruction set in future products.
+ *
+ * Intel MIC Card driver.
+ *
+ */
+#ifndef _MIC_X100_CARD_H_
+#define _MIC_X100_CARD_H_
+
+#define MIC_X100_MMIO_BASE 0x08007C0000ULL
+#define MIC_X100_MMIO_LEN 0x00020000ULL
+#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000ULL
+
+#define MIC_X100_SBOX_SPAD0 0x0000AB20
+#define MIC_X100_SBOX_SDBIC0 0x0000CC90
+#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000
+#define MIC_X100_SBOX_RDMASR0 0x0000B180
+
+#define MIC_X100_MAX_DOORBELL_IDX 8
+
+#define MIC_X100_NUM_SBOX_IRQ 8
+#define MIC_X100_NUM_RDMASR_IRQ 8
+#define MIC_X100_SBOX_IRQ_BASE 0
+#define MIC_X100_RDMASR_IRQ_BASE 17
+
+#define MIC_X100_IRQ_BASE 26
+
+#endif
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
new file mode 100644
index 00000000000..92999c2bbf8
--- /dev/null
+++ b/drivers/misc/mic/common/mic_dev.h
@@ -0,0 +1,51 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC driver.
+ *
+ */
+#ifndef __MIC_DEV_H__
+#define __MIC_DEV_H__
+
+/**
+ * struct mic_mw - MIC memory window
+ *
+ * @pa: Base physical address.
+ * @va: Base ioremap'd virtual address.
+ * @len: Size of the memory window.
+ */
+struct mic_mw {
+ phys_addr_t pa;
+ void __iomem *va;
+ resource_size_t len;
+};
+
+/*
+ * Scratch pad register offsets used by the host to communicate
+ * device page DMA address to the card.
+ */
+#define MIC_DPLO_SPAD 14
+#define MIC_DPHI_SPAD 15
+
+/*
+ * These values are supposed to be in the config_change field of the
+ * device page when the host sends a config change interrupt to the card.
+ */
+#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1
+#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2
+
+#endif
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
new file mode 100644
index 00000000000..c2197f99939
--- /dev/null
+++ b/drivers/misc/mic/host/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2013, Intel Corporation.
+#
+obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o
+mic_host-objs := mic_main.o
+mic_host-objs += mic_x100.o
+mic_host-objs += mic_sysfs.o
+mic_host-objs += mic_smpt.o
+mic_host-objs += mic_intr.o
+mic_host-objs += mic_boot.o
+mic_host-objs += mic_debugfs.o
+mic_host-objs += mic_fops.o
+mic_host-objs += mic_virtio.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
new file mode 100644
index 00000000000..7558d918643
--- /dev/null
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -0,0 +1,300 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_smpt.h"
+#include "mic_virtio.h"
+
+/**
+ * mic_reset - Reset the MIC device.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_reset(struct mic_device *mdev)
+{
+ int i;
+
+#define MIC_RESET_TO (45)
+
+ reinit_completion(&mdev->reset_wait);
+ mdev->ops->reset_fw_ready(mdev);
+ mdev->ops->reset(mdev);
+
+ for (i = 0; i < MIC_RESET_TO; i++) {
+ if (mdev->ops->is_fw_ready(mdev))
+ goto done;
+ /*
+ * Resets typically take 10s of seconds to complete.
+ * Since an MMIO read is required to check if the
+ * firmware is ready or not, a 1 second delay works nicely.
+ */
+ msleep(1000);
+ }
+ mic_set_state(mdev, MIC_RESET_FAILED);
+done:
+ complete_all(&mdev->reset_wait);
+}
+
+/* Initialize the MIC bootparams */
+void mic_bootparam_init(struct mic_device *mdev)
+{
+ struct mic_bootparam *bootparam = mdev->dp;
+
+ bootparam->magic = MIC_MAGIC;
+ bootparam->c2h_shutdown_db = mdev->shutdown_db;
+ bootparam->h2c_shutdown_db = -1;
+ bootparam->h2c_config_db = -1;
+ bootparam->shutdown_status = 0;
+ bootparam->shutdown_card = 0;
+}
+
+/**
+ * mic_start - Start the MIC.
+ * @mdev: pointer to mic_device instance
+ * @buf: buffer containing boot string including firmware/ramdisk path.
+ *
+ * This function prepares an MIC for boot and initiates boot.
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int mic_start(struct mic_device *mdev, const char *buf)
+{
+ int rc;
+ mutex_lock(&mdev->mic_mutex);
+retry:
+ if (MIC_OFFLINE != mdev->state) {
+ rc = -EINVAL;
+ goto unlock_ret;
+ }
+ if (!mdev->ops->is_fw_ready(mdev)) {
+ mic_reset(mdev);
+ /*
+ * The state will either be MIC_OFFLINE if the reset succeeded
+ * or MIC_RESET_FAILED if the firmware reset failed.
+ */
+ goto retry;
+ }
+ rc = mdev->ops->load_mic_fw(mdev, buf);
+ if (rc)
+ goto unlock_ret;
+ mic_smpt_restore(mdev);
+ mic_intr_restore(mdev);
+ mdev->intr_ops->enable_interrupts(mdev);
+ mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
+ mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
+ mdev->ops->send_firmware_intr(mdev);
+ mic_set_state(mdev, MIC_ONLINE);
+unlock_ret:
+ mutex_unlock(&mdev->mic_mutex);
+ return rc;
+}
+
+/**
+ * mic_stop - Prepare the MIC for reset and trigger reset.
+ * @mdev: pointer to mic_device instance
+ * @force: force a MIC to reset even if it is already offline.
+ *
+ * RETURNS: None.
+ */
+void mic_stop(struct mic_device *mdev, bool force)
+{
+ mutex_lock(&mdev->mic_mutex);
+ if (MIC_OFFLINE != mdev->state || force) {
+ mic_virtio_reset_devices(mdev);
+ mic_bootparam_init(mdev);
+ mic_reset(mdev);
+ if (MIC_RESET_FAILED == mdev->state)
+ goto unlock;
+ mic_set_shutdown_status(mdev, MIC_NOP);
+ if (MIC_SUSPENDED != mdev->state)
+ mic_set_state(mdev, MIC_OFFLINE);
+ }
+unlock:
+ mutex_unlock(&mdev->mic_mutex);
+}
+
+/**
+ * mic_shutdown - Initiate MIC shutdown.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: None.
+ */
+void mic_shutdown(struct mic_device *mdev)
+{
+ struct mic_bootparam *bootparam = mdev->dp;
+ s8 db = bootparam->h2c_shutdown_db;
+
+ mutex_lock(&mdev->mic_mutex);
+ if (MIC_ONLINE == mdev->state && db != -1) {
+ bootparam->shutdown_card = 1;
+ mdev->ops->send_intr(mdev, db);
+ mic_set_state(mdev, MIC_SHUTTING_DOWN);
+ }
+ mutex_unlock(&mdev->mic_mutex);
+}
+
+/**
+ * mic_shutdown_work - Handle shutdown interrupt from MIC.
+ * @work: The work structure.
+ *
+ * This work is scheduled whenever the host has received a shutdown
+ * interrupt from the MIC.
+ */
+void mic_shutdown_work(struct work_struct *work)
+{
+ struct mic_device *mdev = container_of(work, struct mic_device,
+ shutdown_work);
+ struct mic_bootparam *bootparam = mdev->dp;
+
+ mutex_lock(&mdev->mic_mutex);
+ mic_set_shutdown_status(mdev, bootparam->shutdown_status);
+ bootparam->shutdown_status = 0;
+
+ /*
+ * if state is MIC_SUSPENDED, OSPM suspend is in progress. We do not
+ * change the state here so as to prevent users from booting the card
+ * during and after the suspend operation.
+ */
+ if (MIC_SHUTTING_DOWN != mdev->state &&
+ MIC_SUSPENDED != mdev->state)
+ mic_set_state(mdev, MIC_SHUTTING_DOWN);
+ mutex_unlock(&mdev->mic_mutex);
+}
+
+/**
+ * mic_reset_trigger_work - Trigger MIC reset.
+ * @work: The work structure.
+ *
+ * This work is scheduled whenever the host wants to reset the MIC.
+ */
+void mic_reset_trigger_work(struct work_struct *work)
+{
+ struct mic_device *mdev = container_of(work, struct mic_device,
+ reset_trigger_work);
+
+ mic_stop(mdev, false);
+}
+
+/**
+ * mic_complete_resume - Complete MIC Resume after an OSPM suspend/hibernate
+ * event.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: None.
+ */
+void mic_complete_resume(struct mic_device *mdev)
+{
+ if (mdev->state != MIC_SUSPENDED) {
+ dev_warn(mdev->sdev->parent, "state %d should be %d\n",
+ mdev->state, MIC_SUSPENDED);
+ return;
+ }
+
+ /* Make sure firmware is ready */
+ if (!mdev->ops->is_fw_ready(mdev))
+ mic_stop(mdev, true);
+
+ mutex_lock(&mdev->mic_mutex);
+ mic_set_state(mdev, MIC_OFFLINE);
+ mutex_unlock(&mdev->mic_mutex);
+}
+
+/**
+ * mic_prepare_suspend - Handle suspend notification for the MIC device.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: None.
+ */
+void mic_prepare_suspend(struct mic_device *mdev)
+{
+ int rc;
+
+#define MIC_SUSPEND_TIMEOUT (60 * HZ)
+
+ mutex_lock(&mdev->mic_mutex);
+ switch (mdev->state) {
+ case MIC_OFFLINE:
+ /*
+ * Card is already offline. Set state to MIC_SUSPENDED
+ * to prevent users from booting the card.
+ */
+ mic_set_state(mdev, MIC_SUSPENDED);
+ mutex_unlock(&mdev->mic_mutex);
+ break;
+ case MIC_ONLINE:
+ /*
+ * Card is online. Set state to MIC_SUSPENDING and notify
+ * MIC user space daemon which will issue card
+ * shutdown and reset.
+ */
+ mic_set_state(mdev, MIC_SUSPENDING);
+ mutex_unlock(&mdev->mic_mutex);
+ rc = wait_for_completion_timeout(&mdev->reset_wait,
+ MIC_SUSPEND_TIMEOUT);
+ /* Force reset the card if the shutdown completion timed out */
+ if (!rc) {
+ mutex_lock(&mdev->mic_mutex);
+ mic_set_state(mdev, MIC_SUSPENDED);
+ mutex_unlock(&mdev->mic_mutex);
+ mic_stop(mdev, true);
+ }
+ break;
+ case MIC_SHUTTING_DOWN:
+ /*
+ * Card is shutting down. Set state to MIC_SUSPENDED
+ * to prevent further boot of the card.
+ */
+ mic_set_state(mdev, MIC_SUSPENDED);
+ mutex_unlock(&mdev->mic_mutex);
+ rc = wait_for_completion_timeout(&mdev->reset_wait,
+ MIC_SUSPEND_TIMEOUT);
+ /* Force reset the card if the shutdown completion timed out */
+ if (!rc)
+ mic_stop(mdev, true);
+ break;
+ default:
+ mutex_unlock(&mdev->mic_mutex);
+ break;
+ }
+}
+
+/**
+ * mic_suspend - Initiate MIC suspend. Suspend merely issues card shutdown.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: None.
+ */
+void mic_suspend(struct mic_device *mdev)
+{
+ struct mic_bootparam *bootparam = mdev->dp;
+ s8 db = bootparam->h2c_shutdown_db;
+
+ mutex_lock(&mdev->mic_mutex);
+ if (MIC_SUSPENDING == mdev->state && db != -1) {
+ bootparam->shutdown_card = 1;
+ mdev->ops->send_intr(mdev, db);
+ mic_set_state(mdev, MIC_SUSPENDED);
+ }
+ mutex_unlock(&mdev->mic_mutex);
+}
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
new file mode 100644
index 00000000000..028ba5d6fd1
--- /dev/null
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -0,0 +1,491 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_smpt.h"
+#include "mic_virtio.h"
+
+/* Debugfs parent dir */
+static struct dentry *mic_dbg;
+
+/**
+ * mic_log_buf_show - Display MIC kernel log buffer.
+ *
+ * log_buf addr/len is read from System.map by user space
+ * and populated in sysfs entries.
+ */
+static int mic_log_buf_show(struct seq_file *s, void *unused)
+{
+ void __iomem *log_buf_va;
+ int __iomem *log_buf_len_va;
+ struct mic_device *mdev = s->private;
+ void *kva;
+ int size;
+ unsigned long aper_offset;
+
+ if (!mdev || !mdev->log_buf_addr || !mdev->log_buf_len)
+ goto done;
+ /*
+ * Card kernel will never be relocated and any kernel text/data mapping
+ * can be translated to phys address by subtracting __START_KERNEL_map.
+ */
+ aper_offset = (unsigned long)mdev->log_buf_len - __START_KERNEL_map;
+ log_buf_len_va = mdev->aper.va + aper_offset;
+ aper_offset = (unsigned long)mdev->log_buf_addr - __START_KERNEL_map;
+ log_buf_va = mdev->aper.va + aper_offset;
+ size = ioread32(log_buf_len_va);
+
+ kva = kmalloc(size, GFP_KERNEL);
+ if (!kva)
+ goto done;
+ mutex_lock(&mdev->mic_mutex);
+ memcpy_fromio(kva, log_buf_va, size);
+ switch (mdev->state) {
+ case MIC_ONLINE:
+ /* Fall through */
+ case MIC_SHUTTING_DOWN:
+ seq_write(s, kva, size);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&mdev->mic_mutex);
+ kfree(kva);
+done:
+ return 0;
+}
+
+static int mic_log_buf_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_log_buf_show, inode->i_private);
+}
+
+static int mic_log_buf_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations log_buf_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_log_buf_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_log_buf_release
+};
+
+static int mic_smpt_show(struct seq_file *s, void *pos)
+{
+ int i;
+ struct mic_device *mdev = s->private;
+ unsigned long flags;
+
+ seq_printf(s, "MIC %-2d |%-10s| %-14s %-10s\n",
+ mdev->id, "SMPT entry", "SW DMA addr", "RefCount");
+ seq_puts(s, "====================================================\n");
+
+ if (mdev->smpt) {
+ struct mic_smpt_info *smpt_info = mdev->smpt;
+ spin_lock_irqsave(&smpt_info->smpt_lock, flags);
+ for (i = 0; i < smpt_info->info.num_reg; i++) {
+ seq_printf(s, "%9s|%-10d| %-#14llx %-10lld\n",
+ " ", i, smpt_info->entry[i].dma_addr,
+ smpt_info->entry[i].ref_count);
+ }
+ spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
+ }
+ seq_puts(s, "====================================================\n");
+ return 0;
+}
+
+static int mic_smpt_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_smpt_show, inode->i_private);
+}
+
+static int mic_smpt_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations smpt_file_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_smpt_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_smpt_debug_release
+};
+
+static int mic_soft_reset_show(struct seq_file *s, void *pos)
+{
+ struct mic_device *mdev = s->private;
+
+ mic_stop(mdev, true);
+ return 0;
+}
+
+static int mic_soft_reset_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_soft_reset_show, inode->i_private);
+}
+
+static int mic_soft_reset_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations soft_reset_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_soft_reset_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_soft_reset_debug_release
+};
+
+static int mic_post_code_show(struct seq_file *s, void *pos)
+{
+ struct mic_device *mdev = s->private;
+ u32 reg = mdev->ops->get_postcode(mdev);
+
+ seq_printf(s, "%c%c", reg & 0xff, (reg >> 8) & 0xff);
+ return 0;
+}
+
+static int mic_post_code_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_post_code_show, inode->i_private);
+}
+
+static int mic_post_code_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations post_code_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_post_code_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_post_code_debug_release
+};
+
+static int mic_dp_show(struct seq_file *s, void *pos)
+{
+ struct mic_device *mdev = s->private;
+ struct mic_device_desc *d;
+ struct mic_device_ctrl *dc;
+ struct mic_vqconfig *vqconfig;
+ __u32 *features;
+ __u8 *config;
+ struct mic_bootparam *bootparam = mdev->dp;
+ int i, j;
+
+ seq_printf(s, "Bootparam: magic 0x%x\n",
+ bootparam->magic);
+ seq_printf(s, "Bootparam: h2c_shutdown_db %d\n",
+ bootparam->h2c_shutdown_db);
+ seq_printf(s, "Bootparam: h2c_config_db %d\n",
+ bootparam->h2c_config_db);
+ seq_printf(s, "Bootparam: c2h_shutdown_db %d\n",
+ bootparam->c2h_shutdown_db);
+ seq_printf(s, "Bootparam: shutdown_status %d\n",
+ bootparam->shutdown_status);
+ seq_printf(s, "Bootparam: shutdown_card %d\n",
+ bootparam->shutdown_card);
+
+ for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
+ i += mic_total_desc_size(d)) {
+ d = mdev->dp + i;
+ dc = (void *)d + mic_aligned_desc_size(d);
+
+ /* end of list */
+ if (d->type == 0)
+ break;
+
+ if (d->type == -1)
+ continue;
+
+ seq_printf(s, "Type %d ", d->type);
+ seq_printf(s, "Num VQ %d ", d->num_vq);
+ seq_printf(s, "Feature Len %d\n", d->feature_len);
+ seq_printf(s, "Config Len %d ", d->config_len);
+ seq_printf(s, "Shutdown Status %d\n", d->status);
+
+ for (j = 0; j < d->num_vq; j++) {
+ vqconfig = mic_vq_config(d) + j;
+ seq_printf(s, "vqconfig[%d]: ", j);
+ seq_printf(s, "address 0x%llx ", vqconfig->address);
+ seq_printf(s, "num %d ", vqconfig->num);
+ seq_printf(s, "used address 0x%llx\n",
+ vqconfig->used_address);
+ }
+
+ features = (__u32 *)mic_vq_features(d);
+ seq_printf(s, "Features: Host 0x%x ", features[0]);
+ seq_printf(s, "Guest 0x%x\n", features[1]);
+
+ config = mic_vq_configspace(d);
+ for (j = 0; j < d->config_len; j++)
+ seq_printf(s, "config[%d]=%d\n", j, config[j]);
+
+ seq_puts(s, "Device control:\n");
+ seq_printf(s, "Config Change %d ", dc->config_change);
+ seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
+ seq_printf(s, "Guest Ack %d ", dc->guest_ack);
+ seq_printf(s, "Host ack %d\n", dc->host_ack);
+ seq_printf(s, "Used address updated %d ",
+ dc->used_address_updated);
+ seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
+ seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
+ seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
+ }
+
+ return 0;
+}
+
+static int mic_dp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_dp_show, inode->i_private);
+}
+
+static int mic_dp_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations dp_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_dp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_dp_debug_release
+};
+
+static int mic_vdev_info_show(struct seq_file *s, void *unused)
+{
+ struct mic_device *mdev = s->private;
+ struct list_head *pos, *tmp;
+ struct mic_vdev *mvdev;
+ int i, j;
+
+ mutex_lock(&mdev->mic_mutex);
+ list_for_each_safe(pos, tmp, &mdev->vdev_list) {
+ mvdev = list_entry(pos, struct mic_vdev, list);
+ seq_printf(s, "VDEV type %d state %s in %ld out %ld\n",
+ mvdev->virtio_id,
+ mic_vdevup(mvdev) ? "UP" : "DOWN",
+ mvdev->in_bytes,
+ mvdev->out_bytes);
+ for (i = 0; i < MIC_MAX_VRINGS; i++) {
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ struct mic_vringh *mvr = &mvdev->mvr[i];
+ struct vringh *vrh = &mvr->vrh;
+ int num = vrh->vring.num;
+ if (!num)
+ continue;
+ desc = vrh->vring.desc;
+ seq_printf(s, "vring i %d avail_idx %d",
+ i, mvr->vring.info->avail_idx & (num - 1));
+ seq_printf(s, " vring i %d avail_idx %d\n",
+ i, mvr->vring.info->avail_idx);
+ seq_printf(s, "vrh i %d weak_barriers %d",
+ i, vrh->weak_barriers);
+ seq_printf(s, " last_avail_idx %d last_used_idx %d",
+ vrh->last_avail_idx, vrh->last_used_idx);
+ seq_printf(s, " completed %d\n", vrh->completed);
+ for (j = 0; j < num; j++) {
+ seq_printf(s, "desc[%d] addr 0x%llx len %d",
+ j, desc->addr, desc->len);
+ seq_printf(s, " flags 0x%x next %d\n",
+ desc->flags, desc->next);
+ desc++;
+ }
+ avail = vrh->vring.avail;
+ seq_printf(s, "avail flags 0x%x idx %d\n",
+ avail->flags, avail->idx & (num - 1));
+ seq_printf(s, "avail flags 0x%x idx %d\n",
+ avail->flags, avail->idx);
+ for (j = 0; j < num; j++)
+ seq_printf(s, "avail ring[%d] %d\n",
+ j, avail->ring[j]);
+ used = vrh->vring.used;
+ seq_printf(s, "used flags 0x%x idx %d\n",
+ used->flags, used->idx & (num - 1));
+ seq_printf(s, "used flags 0x%x idx %d\n",
+ used->flags, used->idx);
+ for (j = 0; j < num; j++)
+ seq_printf(s, "used ring[%d] id %d len %d\n",
+ j, used->ring[j].id,
+ used->ring[j].len);
+ }
+ }
+ mutex_unlock(&mdev->mic_mutex);
+
+ return 0;
+}
+
+static int mic_vdev_info_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_vdev_info_show, inode->i_private);
+}
+
+static int mic_vdev_info_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations vdev_info_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_vdev_info_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_vdev_info_debug_release
+};
+
+static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
+{
+ struct mic_device *mdev = s->private;
+ int reg;
+ int i, j;
+ u16 entry;
+ u16 vector;
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+
+ if (pci_dev_msi_enabled(pdev)) {
+ for (i = 0; i < mdev->irq_info.num_vectors; i++) {
+ if (pdev->msix_enabled) {
+ entry = mdev->irq_info.msix_entries[i].entry;
+ vector = mdev->irq_info.msix_entries[i].vector;
+ } else {
+ entry = 0;
+ vector = pdev->irq;
+ }
+
+ reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry);
+
+ seq_printf(s, "%s %-10d %s %-10d MXAR[%d]: %08X\n",
+ "IRQ:", vector, "Entry:", entry, i, reg);
+
+ seq_printf(s, "%-10s", "offset:");
+ for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
+ seq_printf(s, "%4d ", j);
+ seq_puts(s, "\n");
+
+
+ seq_printf(s, "%-10s", "count:");
+ for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
+ seq_printf(s, "%4d ",
+ (mdev->irq_info.mic_msi_map[i] &
+ BIT(j)) ? 1 : 0);
+ seq_puts(s, "\n\n");
+ }
+ } else {
+ seq_puts(s, "MSI/MSIx interrupts not enabled\n");
+ }
+
+ return 0;
+}
+
+static int mic_msi_irq_info_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mic_msi_irq_info_show, inode->i_private);
+}
+
+static int
+mic_msi_irq_info_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations msi_irq_info_ops = {
+ .owner = THIS_MODULE,
+ .open = mic_msi_irq_info_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mic_msi_irq_info_debug_release
+};
+
+/**
+ * mic_create_debug_dir - Initialize MIC debugfs entries.
+ */
+void mic_create_debug_dir(struct mic_device *mdev)
+{
+ if (!mic_dbg)
+ return;
+
+ mdev->dbg_dir = debugfs_create_dir(dev_name(mdev->sdev), mic_dbg);
+ if (!mdev->dbg_dir)
+ return;
+
+ debugfs_create_file("log_buf", 0444, mdev->dbg_dir, mdev, &log_buf_ops);
+
+ debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops);
+
+ debugfs_create_file("soft_reset", 0444, mdev->dbg_dir, mdev,
+ &soft_reset_ops);
+
+ debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
+ &post_code_ops);
+
+ debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops);
+
+ debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev,
+ &vdev_info_ops);
+
+ debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
+ &msi_irq_info_ops);
+}
+
+/**
+ * mic_delete_debug_dir - Uninitialize MIC debugfs entries.
+ */
+void mic_delete_debug_dir(struct mic_device *mdev)
+{
+ if (!mdev->dbg_dir)
+ return;
+
+ debugfs_remove_recursive(mdev->dbg_dir);
+}
+
+/**
+ * mic_init_debugfs - Initialize global debugfs entry.
+ */
+void __init mic_init_debugfs(void)
+{
+ mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!mic_dbg)
+ pr_err("can't create debugfs dir\n");
+}
+
+/**
+ * mic_exit_debugfs - Uninitialize global debugfs entry
+ */
+void mic_exit_debugfs(void)
+{
+ debugfs_remove(mic_dbg);
+}
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
new file mode 100644
index 00000000000..3574cc375bb
--- /dev/null
+++ b/drivers/misc/mic/host/mic_device.h
@@ -0,0 +1,203 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#ifndef _MIC_DEVICE_H_
+#define _MIC_DEVICE_H_
+
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/notifier.h>
+
+#include "mic_intr.h"
+
+/* The maximum number of MIC devices supported in a single host system. */
+#define MIC_MAX_NUM_DEVS 256
+
+/**
+ * enum mic_hw_family - The hardware family to which a device belongs.
+ */
+enum mic_hw_family {
+ MIC_FAMILY_X100 = 0,
+ MIC_FAMILY_UNKNOWN
+};
+
+/**
+ * enum mic_stepping - MIC stepping ids.
+ */
+enum mic_stepping {
+ MIC_A0_STEP = 0x0,
+ MIC_B0_STEP = 0x10,
+ MIC_B1_STEP = 0x11,
+ MIC_C0_STEP = 0x20,
+};
+
+/**
+ * struct mic_device - MIC device information for each card.
+ *
+ * @mmio: MMIO bar information.
+ * @aper: Aperture bar information.
+ * @family: The MIC family to which this device belongs.
+ * @ops: MIC HW specific operations.
+ * @id: The unique device id for this MIC device.
+ * @stepping: Stepping ID.
+ * @attr_group: Pointer to list of sysfs attribute groups.
+ * @sdev: Device for sysfs entries.
+ * @mic_mutex: Mutex for synchronizing access to mic_device.
+ * @intr_ops: HW specific interrupt operations.
+ * @smpt_ops: Hardware specific SMPT operations.
+ * @smpt: MIC SMPT information.
+ * @intr_info: H/W specific interrupt information.
+ * @irq_info: The OS specific irq information
+ * @dbg_dir: debugfs directory of this MIC device.
+ * @cmdline: Kernel command line.
+ * @firmware: Firmware file name.
+ * @ramdisk: Ramdisk file name.
+ * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates.
+ * @bootaddr: MIC boot address.
+ * @reset_trigger_work: Work for triggering reset requests.
+ * @shutdown_work: Work for handling shutdown interrupts.
+ * @state: MIC state.
+ * @shutdown_status: MIC status reported by card for shutdown/crashes.
+ * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes.
+ * @reset_wait: Waitqueue for sleeping while reset completes.
+ * @log_buf_addr: Log buffer address for MIC.
+ * @log_buf_len: Log buffer length address for MIC.
+ * @dp: virtio device page
+ * @dp_dma_addr: virtio device page DMA address.
+ * @shutdown_db: shutdown doorbell.
+ * @shutdown_cookie: shutdown cookie.
+ * @cdev: Character device for MIC.
+ * @vdev_list: list of virtio devices.
+ * @pm_notifier: Handles PM notifications from the OS.
+ */
+struct mic_device {
+ struct mic_mw mmio;
+ struct mic_mw aper;
+ enum mic_hw_family family;
+ struct mic_hw_ops *ops;
+ int id;
+ enum mic_stepping stepping;
+ const struct attribute_group **attr_group;
+ struct device *sdev;
+ struct mutex mic_mutex;
+ struct mic_hw_intr_ops *intr_ops;
+ struct mic_smpt_ops *smpt_ops;
+ struct mic_smpt_info *smpt;
+ struct mic_intr_info *intr_info;
+ struct mic_irq_info irq_info;
+ struct dentry *dbg_dir;
+ char *cmdline;
+ char *firmware;
+ char *ramdisk;
+ char *bootmode;
+ u32 bootaddr;
+ struct work_struct reset_trigger_work;
+ struct work_struct shutdown_work;
+ u8 state;
+ u8 shutdown_status;
+ struct sysfs_dirent *state_sysfs;
+ struct completion reset_wait;
+ void *log_buf_addr;
+ int *log_buf_len;
+ void *dp;
+ dma_addr_t dp_dma_addr;
+ int shutdown_db;
+ struct mic_irq *shutdown_cookie;
+ struct cdev cdev;
+ struct list_head vdev_list;
+ struct notifier_block pm_notifier;
+};
+
+/**
+ * struct mic_hw_ops - MIC HW specific operations.
+ * @aper_bar: Aperture bar resource number.
+ * @mmio_bar: MMIO bar resource number.
+ * @read_spad: Read from scratch pad register.
+ * @write_spad: Write to scratch pad register.
+ * @send_intr: Send an interrupt for a particular doorbell on the card.
+ * @ack_interrupt: Hardware specific operations to ack the h/w on
+ * receipt of an interrupt.
+ * @reset: Reset the remote processor.
+ * @reset_fw_ready: Reset firmware ready field.
+ * @is_fw_ready: Check if firmware is ready for OS download.
+ * @send_firmware_intr: Send an interrupt to the card firmware.
+ * @load_mic_fw: Load firmware segments required to boot the card
+ * into card memory. This includes the kernel, command line, ramdisk etc.
+ * @get_postcode: Get post code status from firmware.
+ */
+struct mic_hw_ops {
+ u8 aper_bar;
+ u8 mmio_bar;
+ u32 (*read_spad)(struct mic_device *mdev, unsigned int idx);
+ void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
+ void (*send_intr)(struct mic_device *mdev, int doorbell);
+ u32 (*ack_interrupt)(struct mic_device *mdev);
+ void (*reset)(struct mic_device *mdev);
+ void (*reset_fw_ready)(struct mic_device *mdev);
+ bool (*is_fw_ready)(struct mic_device *mdev);
+ void (*send_firmware_intr)(struct mic_device *mdev);
+ int (*load_mic_fw)(struct mic_device *mdev, const char *buf);
+ u32 (*get_postcode)(struct mic_device *mdev);
+};
+
+/**
+ * mic_mmio_read - read from an MMIO register.
+ * @mw: MMIO register base virtual address.
+ * @offset: register offset.
+ *
+ * RETURNS: register value.
+ */
+static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset)
+{
+ return ioread32(mw->va + offset);
+}
+
+/**
+ * mic_mmio_write - write to an MMIO register.
+ * @mw: MMIO register base virtual address.
+ * @val: the data value to put into the register
+ * @offset: register offset.
+ *
+ * RETURNS: none.
+ */
+static inline void
+mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
+{
+ iowrite32(val, mw->va + offset);
+}
+
+void mic_sysfs_init(struct mic_device *mdev);
+int mic_start(struct mic_device *mdev, const char *buf);
+void mic_stop(struct mic_device *mdev, bool force);
+void mic_shutdown(struct mic_device *mdev);
+void mic_reset_delayed_work(struct work_struct *work);
+void mic_reset_trigger_work(struct work_struct *work);
+void mic_shutdown_work(struct work_struct *work);
+void mic_bootparam_init(struct mic_device *mdev);
+void mic_set_state(struct mic_device *mdev, u8 state);
+void mic_set_shutdown_status(struct mic_device *mdev, u8 status);
+void mic_create_debug_dir(struct mic_device *dev);
+void mic_delete_debug_dir(struct mic_device *dev);
+void __init mic_init_debugfs(void);
+void mic_exit_debugfs(void);
+void mic_prepare_suspend(struct mic_device *mdev);
+void mic_complete_resume(struct mic_device *mdev);
+void mic_suspend(struct mic_device *mdev);
+#endif
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c
new file mode 100644
index 00000000000..85776d7327f
--- /dev/null
+++ b/drivers/misc/mic/host/mic_fops.c
@@ -0,0 +1,222 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/poll.h>
+#include <linux/pci.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_fops.h"
+#include "mic_virtio.h"
+
+int mic_open(struct inode *inode, struct file *f)
+{
+ struct mic_vdev *mvdev;
+ struct mic_device *mdev = container_of(inode->i_cdev,
+ struct mic_device, cdev);
+
+ mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
+ if (!mvdev)
+ return -ENOMEM;
+
+ init_waitqueue_head(&mvdev->waitq);
+ INIT_LIST_HEAD(&mvdev->list);
+ mvdev->mdev = mdev;
+ mvdev->virtio_id = -1;
+
+ f->private_data = mvdev;
+ return 0;
+}
+
+int mic_release(struct inode *inode, struct file *f)
+{
+ struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
+
+ if (-1 != mvdev->virtio_id)
+ mic_virtio_del_device(mvdev);
+ f->private_data = NULL;
+ kfree(mvdev);
+ return 0;
+}
+
+long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ switch (cmd) {
+ case MIC_VIRTIO_ADD_DEVICE:
+ {
+ ret = mic_virtio_add_device(mvdev, argp);
+ if (ret < 0) {
+ dev_err(mic_dev(mvdev),
+ "%s %d errno ret %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ break;
+ }
+ case MIC_VIRTIO_COPY_DESC:
+ {
+ struct mic_copy_desc copy;
+
+ ret = mic_vdev_inited(mvdev);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(&copy, argp, sizeof(copy)))
+ return -EFAULT;
+
+ dev_dbg(mic_dev(mvdev),
+ "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n",
+ __func__, __LINE__, copy.iovcnt, copy.vr_idx,
+ copy.update_used);
+
+ ret = mic_virtio_copy_desc(mvdev, &copy);
+ if (ret < 0) {
+ dev_err(mic_dev(mvdev),
+ "%s %d errno ret %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ if (copy_to_user(
+ &((struct mic_copy_desc __user *)argp)->out_len,
+ &copy.out_len, sizeof(copy.out_len))) {
+ dev_err(mic_dev(mvdev), "%s %d errno ret %d\n",
+ __func__, __LINE__, -EFAULT);
+ return -EFAULT;
+ }
+ break;
+ }
+ case MIC_VIRTIO_CONFIG_CHANGE:
+ {
+ ret = mic_vdev_inited(mvdev);
+ if (ret)
+ return ret;
+
+ ret = mic_virtio_config_change(mvdev, argp);
+ if (ret < 0) {
+ dev_err(mic_dev(mvdev),
+ "%s %d errno ret %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ break;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ };
+ return 0;
+}
+
+/*
+ * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
+ * not when previously enqueued buffers may be available. This means that
+ * in the card->host (TX) path, when userspace is unblocked by poll it
+ * must drain all available descriptors or it can stall.
+ */
+unsigned int mic_poll(struct file *f, poll_table *wait)
+{
+ struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
+ int mask = 0;
+
+ poll_wait(f, &mvdev->waitq, wait);
+
+ if (mic_vdev_inited(mvdev)) {
+ mask = POLLERR;
+ } else if (mvdev->poll_wake) {
+ mvdev->poll_wake = 0;
+ mask = POLLIN | POLLOUT;
+ }
+
+ return mask;
+}
+
+static inline int
+mic_query_offset(struct mic_vdev *mvdev, unsigned long offset,
+ unsigned long *size, unsigned long *pa)
+{
+ struct mic_device *mdev = mvdev->mdev;
+ unsigned long start = MIC_DP_SIZE;
+ int i;
+
+ /*
+ * MMAP interface is as follows:
+ * offset region
+ * 0x0 virtio device_page
+ * 0x1000 first vring
+ * 0x1000 + size of 1st vring second vring
+ * ....
+ */
+ if (!offset) {
+ *pa = virt_to_phys(mdev->dp);
+ *size = MIC_DP_SIZE;
+ return 0;
+ }
+
+ for (i = 0; i < mvdev->dd->num_vq; i++) {
+ struct mic_vringh *mvr = &mvdev->mvr[i];
+ if (offset == start) {
+ *pa = virt_to_phys(mvr->vring.va);
+ *size = mvr->vring.len;
+ return 0;
+ }
+ start += mvr->vring.len;
+ }
+ return -1;
+}
+
+/*
+ * Maps the device page and virtio rings to user space for readonly access.
+ */
+int
+mic_mmap(struct file *f, struct vm_area_struct *vma)
+{
+ struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
+ int i, err;
+
+ err = mic_vdev_inited(mvdev);
+ if (err)
+ return err;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EACCES;
+
+ while (size_rem) {
+ i = mic_query_offset(mvdev, offset, &size, &pa);
+ if (i < 0)
+ return -EINVAL;
+ err = remap_pfn_range(vma, vma->vm_start + offset,
+ pa >> PAGE_SHIFT, size, vma->vm_page_prot);
+ if (err)
+ return err;
+ dev_dbg(mic_dev(mvdev),
+ "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n",
+ __func__, __LINE__, mvdev->virtio_id, size, offset,
+ pa, vma->vm_start + offset);
+ size_rem -= size;
+ offset += size;
+ }
+ return 0;
+}
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h
new file mode 100644
index 00000000000..dc3893dff66
--- /dev/null
+++ b/drivers/misc/mic/host/mic_fops.h
@@ -0,0 +1,32 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#ifndef _MIC_FOPS_H_
+#define _MIC_FOPS_H_
+
+int mic_open(struct inode *inode, struct file *filp);
+int mic_release(struct inode *inode, struct file *filp);
+ssize_t mic_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos);
+long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+int mic_mmap(struct file *f, struct vm_area_struct *vma);
+unsigned int mic_poll(struct file *f, poll_table *wait);
+
+#endif
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
new file mode 100644
index 00000000000..f9c29bc918b
--- /dev/null
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -0,0 +1,630 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+
+/*
+ * mic_invoke_callback - Invoke callback functions registered for
+ * the corresponding source id.
+ *
+ * @mdev: pointer to the mic_device instance
+ * @idx: The interrupt source id.
+ *
+ * Returns none.
+ */
+static inline void mic_invoke_callback(struct mic_device *mdev, int idx)
+{
+ struct mic_intr_cb *intr_cb;
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+
+ spin_lock(&mdev->irq_info.mic_intr_lock);
+ list_for_each_entry(intr_cb, &mdev->irq_info.cb_list[idx], list)
+ if (intr_cb->func)
+ intr_cb->func(pdev->irq, intr_cb->data);
+ spin_unlock(&mdev->irq_info.mic_intr_lock);
+}
+
+/**
+ * mic_interrupt - Generic interrupt handler for
+ * MSI and INTx based interrupts.
+ */
+static irqreturn_t mic_interrupt(int irq, void *dev)
+{
+ struct mic_device *mdev = dev;
+ struct mic_intr_info *info = mdev->intr_info;
+ u32 mask;
+ int i;
+
+ mask = mdev->ops->ack_interrupt(mdev);
+ if (!mask)
+ return IRQ_NONE;
+
+ for (i = info->intr_start_idx[MIC_INTR_DB];
+ i < info->intr_len[MIC_INTR_DB]; i++)
+ if (mask & BIT(i))
+ mic_invoke_callback(mdev, i);
+
+ return IRQ_HANDLED;
+}
+
+/* Return the interrupt offset from the index. Index is 0 based. */
+static u16 mic_map_src_to_offset(struct mic_device *mdev,
+ int intr_src, enum mic_intr_type type)
+{
+ if (type >= MIC_NUM_INTR_TYPES)
+ return MIC_NUM_OFFSETS;
+ if (intr_src >= mdev->intr_info->intr_len[type])
+ return MIC_NUM_OFFSETS;
+
+ return mdev->intr_info->intr_start_idx[type] + intr_src;
+}
+
+/* Return next available msix_entry. */
+static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
+{
+ int i;
+ struct mic_irq_info *info = &mdev->irq_info;
+
+ for (i = 0; i < info->num_vectors; i++)
+ if (!info->mic_msi_map[i])
+ return &info->msix_entries[i];
+ return NULL;
+}
+
+/**
+ * mic_register_intr_callback - Register a callback handler for the
+ * given source id.
+ *
+ * @mdev: pointer to the mic_device instance
+ * @idx: The source id to be registered.
+ * @func: The function to be called when the source id receives
+ * the interrupt.
+ * @data: Private data of the requester.
+ * Return the callback structure that was registered or an
+ * appropriate error on failure.
+ */
+static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
+ u8 idx, irqreturn_t (*func) (int irq, void *dev),
+ void *data)
+{
+ struct mic_intr_cb *intr_cb;
+ unsigned long flags;
+ int rc;
+ intr_cb = kmalloc(sizeof(*intr_cb), GFP_KERNEL);
+
+ if (!intr_cb)
+ return ERR_PTR(-ENOMEM);
+
+ intr_cb->func = func;
+ intr_cb->data = data;
+ intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
+ 0, 0, GFP_KERNEL);
+ if (intr_cb->cb_id < 0) {
+ rc = intr_cb->cb_id;
+ goto ida_fail;
+ }
+
+ spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
+ list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
+ spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
+
+ return intr_cb;
+ida_fail:
+ kfree(intr_cb);
+ return ERR_PTR(rc);
+}
+
+/**
+ * mic_unregister_intr_callback - Unregister the callback handler
+ * identified by its callback id.
+ *
+ * @mdev: pointer to the mic_device instance
+ * @idx: The callback structure id to be unregistered.
+ * Return the source id that was unregistered or MIC_NUM_OFFSETS if no
+ * such callback handler was found.
+ */
+static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
+{
+ struct list_head *pos, *tmp;
+ struct mic_intr_cb *intr_cb;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < MIC_NUM_OFFSETS; i++) {
+ spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
+ list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
+ intr_cb = list_entry(pos, struct mic_intr_cb, list);
+ if (intr_cb->cb_id == idx) {
+ list_del(pos);
+ ida_simple_remove(&mdev->irq_info.cb_ida,
+ intr_cb->cb_id);
+ kfree(intr_cb);
+ spin_unlock_irqrestore(
+ &mdev->irq_info.mic_intr_lock, flags);
+ return i;
+ }
+ }
+ spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
+ }
+ return MIC_NUM_OFFSETS;
+}
+
+/**
+ * mic_setup_msix - Initializes MSIx interrupts.
+ *
+ * @mdev: pointer to mic_device instance
+ *
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
+{
+ int rc, i;
+ int entry_size = sizeof(*mdev->irq_info.msix_entries);
+
+ mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX,
+ entry_size, GFP_KERNEL);
+ if (!mdev->irq_info.msix_entries) {
+ rc = -ENOMEM;
+ goto err_nomem1;
+ }
+
+ for (i = 0; i < MIC_MIN_MSIX; i++)
+ mdev->irq_info.msix_entries[i].entry = i;
+
+ rc = pci_enable_msix(pdev, mdev->irq_info.msix_entries,
+ MIC_MIN_MSIX);
+ if (rc) {
+ dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
+ goto err_enable_msix;
+ }
+
+ mdev->irq_info.num_vectors = MIC_MIN_MSIX;
+ mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
+ mdev->irq_info.num_vectors), GFP_KERNEL);
+
+ if (!mdev->irq_info.mic_msi_map) {
+ rc = -ENOMEM;
+ goto err_nomem2;
+ }
+
+ dev_dbg(mdev->sdev->parent,
+ "%d MSIx irqs setup\n", mdev->irq_info.num_vectors);
+ return 0;
+err_nomem2:
+ pci_disable_msix(pdev);
+err_enable_msix:
+ kfree(mdev->irq_info.msix_entries);
+err_nomem1:
+ mdev->irq_info.num_vectors = 0;
+ return rc;
+}
+
+/**
+ * mic_setup_callbacks - Initialize data structures needed
+ * to handle callbacks.
+ *
+ * @mdev: pointer to mic_device instance
+ */
+static int mic_setup_callbacks(struct mic_device *mdev)
+{
+ int i;
+
+ mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS,
+ sizeof(*mdev->irq_info.cb_list),
+ GFP_KERNEL);
+ if (!mdev->irq_info.cb_list)
+ return -ENOMEM;
+
+ for (i = 0; i < MIC_NUM_OFFSETS; i++)
+ INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
+ ida_init(&mdev->irq_info.cb_ida);
+ spin_lock_init(&mdev->irq_info.mic_intr_lock);
+ return 0;
+}
+
+/**
+ * mic_release_callbacks - Uninitialize data structures needed
+ * to handle callbacks.
+ *
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_release_callbacks(struct mic_device *mdev)
+{
+ unsigned long flags;
+ struct list_head *pos, *tmp;
+ struct mic_intr_cb *intr_cb;
+ int i;
+
+ for (i = 0; i < MIC_NUM_OFFSETS; i++) {
+ spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
+
+ if (list_empty(&mdev->irq_info.cb_list[i])) {
+ spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock,
+ flags);
+ break;
+ }
+
+ list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
+ intr_cb = list_entry(pos, struct mic_intr_cb, list);
+ list_del(pos);
+ ida_simple_remove(&mdev->irq_info.cb_ida,
+ intr_cb->cb_id);
+ kfree(intr_cb);
+ }
+ spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
+ }
+ ida_destroy(&mdev->irq_info.cb_ida);
+ kfree(mdev->irq_info.cb_list);
+}
+
+/**
+ * mic_setup_msi - Initializes MSI interrupts.
+ *
+ * @mdev: pointer to mic_device instance
+ * @pdev: PCI device structure
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_msi(pdev);
+ if (rc) {
+ dev_dbg(&pdev->dev, "Error enabling MSI. rc = %d\n", rc);
+ return rc;
+ }
+
+ mdev->irq_info.num_vectors = 1;
+ mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
+ mdev->irq_info.num_vectors), GFP_KERNEL);
+
+ if (!mdev->irq_info.mic_msi_map) {
+ rc = -ENOMEM;
+ goto err_nomem1;
+ }
+
+ rc = mic_setup_callbacks(mdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up callbacks\n");
+ goto err_nomem2;
+ }
+
+ rc = request_irq(pdev->irq, mic_interrupt, 0 , "mic-msi", mdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
+ goto err_irq_req_fail;
+ }
+
+ dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors);
+ return 0;
+err_irq_req_fail:
+ mic_release_callbacks(mdev);
+err_nomem2:
+ kfree(mdev->irq_info.mic_msi_map);
+err_nomem1:
+ pci_disable_msi(pdev);
+ mdev->irq_info.num_vectors = 0;
+ return rc;
+}
+
+/**
+ * mic_setup_intx - Initializes legacy interrupts.
+ *
+ * @mdev: pointer to mic_device instance
+ * @pdev: PCI device structure
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
+{
+ int rc;
+
+ pci_msi_off(pdev);
+
+ /* Enable intx */
+ pci_intx(pdev, 1);
+ rc = mic_setup_callbacks(mdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up callbacks\n");
+ goto err_nomem;
+ }
+
+ rc = request_irq(pdev->irq, mic_interrupt,
+ IRQF_SHARED, "mic-intx", mdev);
+ if (rc)
+ goto err;
+
+ dev_dbg(&pdev->dev, "intx irq setup\n");
+ return 0;
+err:
+ mic_release_callbacks(mdev);
+err_nomem:
+ return rc;
+}
+
+/**
+ * mic_next_db - Retrieve the next doorbell interrupt source id.
+ * The id is picked sequentially from the available pool of
+ * doorlbell ids.
+ *
+ * @mdev: pointer to the mic_device instance.
+ *
+ * Returns the next doorbell interrupt source.
+ */
+int mic_next_db(struct mic_device *mdev)
+{
+ int next_db;
+
+ next_db = mdev->irq_info.next_avail_src %
+ mdev->intr_info->intr_len[MIC_INTR_DB];
+ mdev->irq_info.next_avail_src++;
+ return next_db;
+}
+
+#define COOKIE_ID_SHIFT 16
+#define GET_ENTRY(cookie) ((cookie) & 0xFFFF)
+#define GET_OFFSET(cookie) ((cookie) >> COOKIE_ID_SHIFT)
+#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT)
+
+/**
+ * mic_request_irq - request an irq. mic_mutex needs
+ * to be held before calling this function.
+ *
+ * @mdev: pointer to mic_device instance
+ * @func: The callback function that handles the interrupt.
+ * The function needs to call ack_interrupts
+ * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts.
+ * @name: The ASCII name of the callee requesting the irq.
+ * @data: private data that is returned back when calling the
+ * function handler.
+ * @intr_src: The source id of the requester. Its the doorbell id
+ * for Doorbell interrupts and DMA channel id for DMA interrupts.
+ * @type: The type of interrupt. Values defined in mic_intr_type
+ *
+ * returns: The cookie that is transparent to the caller. Passed
+ * back when calling mic_free_irq. An appropriate error code
+ * is returned on failure. Caller needs to use IS_ERR(return_val)
+ * to check for failure and PTR_ERR(return_val) to obtained the
+ * error code.
+ *
+ */
+struct mic_irq *mic_request_irq(struct mic_device *mdev,
+ irqreturn_t (*func)(int irq, void *dev),
+ const char *name, void *data, int intr_src,
+ enum mic_intr_type type)
+{
+ u16 offset;
+ int rc = 0;
+ struct msix_entry *msix = NULL;
+ unsigned long cookie = 0;
+ u16 entry;
+ struct mic_intr_cb *intr_cb;
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+
+ offset = mic_map_src_to_offset(mdev, intr_src, type);
+ if (offset >= MIC_NUM_OFFSETS) {
+ dev_err(mdev->sdev->parent,
+ "Error mapping index %d to a valid source id.\n",
+ intr_src);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (mdev->irq_info.num_vectors > 1) {
+ msix = mic_get_available_vector(mdev);
+ if (!msix) {
+ dev_err(mdev->sdev->parent,
+ "No MSIx vectors available for use.\n");
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ rc = request_irq(msix->vector, func, 0, name, data);
+ if (rc) {
+ dev_dbg(mdev->sdev->parent,
+ "request irq failed rc = %d\n", rc);
+ goto err;
+ }
+ entry = msix->entry;
+ mdev->irq_info.mic_msi_map[entry] |= BIT(offset);
+ mdev->intr_ops->program_msi_to_src_map(mdev,
+ entry, offset, true);
+ cookie = MK_COOKIE(entry, offset);
+ dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n",
+ msix->vector, intr_src);
+ } else {
+ intr_cb = mic_register_intr_callback(mdev,
+ offset, func, data);
+ if (IS_ERR(intr_cb)) {
+ dev_err(mdev->sdev->parent,
+ "No available callback entries for use\n");
+ rc = PTR_ERR(intr_cb);
+ goto err;
+ }
+
+ entry = 0;
+ if (pci_dev_msi_enabled(pdev)) {
+ mdev->irq_info.mic_msi_map[entry] |= (1 << offset);
+ mdev->intr_ops->program_msi_to_src_map(mdev,
+ entry, offset, true);
+ }
+ cookie = MK_COOKIE(entry, intr_cb->cb_id);
+ dev_dbg(mdev->sdev->parent, "callback %d registered for src: %d\n",
+ intr_cb->cb_id, intr_src);
+ }
+ return (struct mic_irq *)cookie;
+err:
+ return ERR_PTR(rc);
+}
+
+/**
+ * mic_free_irq - free irq. mic_mutex
+ * needs to be held before calling this function.
+ *
+ * @mdev: pointer to mic_device instance
+ * @cookie: cookie obtained during a successful call to mic_request_irq
+ * @data: private data specified by the calling function during the
+ * mic_request_irq
+ *
+ * returns: none.
+ */
+void mic_free_irq(struct mic_device *mdev,
+ struct mic_irq *cookie, void *data)
+{
+ u32 offset;
+ u32 entry;
+ u8 src_id;
+ unsigned int irq;
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+
+ entry = GET_ENTRY((unsigned long)cookie);
+ offset = GET_OFFSET((unsigned long)cookie);
+ if (mdev->irq_info.num_vectors > 1) {
+ if (entry >= mdev->irq_info.num_vectors) {
+ dev_warn(mdev->sdev->parent,
+ "entry %d should be < num_irq %d\n",
+ entry, mdev->irq_info.num_vectors);
+ return;
+ }
+ irq = mdev->irq_info.msix_entries[entry].vector;
+ free_irq(irq, data);
+ mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset));
+ mdev->intr_ops->program_msi_to_src_map(mdev,
+ entry, offset, false);
+
+ dev_dbg(mdev->sdev->parent, "irq: %d freed\n", irq);
+ } else {
+ irq = pdev->irq;
+ src_id = mic_unregister_intr_callback(mdev, offset);
+ if (src_id >= MIC_NUM_OFFSETS) {
+ dev_warn(mdev->sdev->parent, "Error unregistering callback\n");
+ return;
+ }
+ if (pci_dev_msi_enabled(pdev)) {
+ mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id));
+ mdev->intr_ops->program_msi_to_src_map(mdev,
+ entry, src_id, false);
+ }
+ dev_dbg(mdev->sdev->parent, "callback %d unregistered for src: %d\n",
+ offset, src_id);
+ }
+}
+
+/**
+ * mic_setup_interrupts - Initializes interrupts.
+ *
+ * @mdev: pointer to mic_device instance
+ * @pdev: PCI device structure
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = mic_setup_msix(mdev, pdev);
+ if (!rc)
+ goto done;
+
+ rc = mic_setup_msi(mdev, pdev);
+ if (!rc)
+ goto done;
+
+ rc = mic_setup_intx(mdev, pdev);
+ if (rc) {
+ dev_err(mdev->sdev->parent, "no usable interrupts\n");
+ return rc;
+ }
+done:
+ mdev->intr_ops->enable_interrupts(mdev);
+ return 0;
+}
+
+/**
+ * mic_free_interrupts - Frees interrupts setup by mic_setup_interrupts
+ *
+ * @mdev: pointer to mic_device instance
+ * @pdev: PCI device structure
+ *
+ * returns none.
+ */
+void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
+{
+ int i;
+
+ mdev->intr_ops->disable_interrupts(mdev);
+ if (mdev->irq_info.num_vectors > 1) {
+ for (i = 0; i < mdev->irq_info.num_vectors; i++) {
+ if (mdev->irq_info.mic_msi_map[i])
+ dev_warn(&pdev->dev, "irq %d may still be in use.\n",
+ mdev->irq_info.msix_entries[i].vector);
+ }
+ kfree(mdev->irq_info.mic_msi_map);
+ kfree(mdev->irq_info.msix_entries);
+ pci_disable_msix(pdev);
+ } else {
+ if (pci_dev_msi_enabled(pdev)) {
+ free_irq(pdev->irq, mdev);
+ kfree(mdev->irq_info.mic_msi_map);
+ pci_disable_msi(pdev);
+ } else {
+ free_irq(pdev->irq, mdev);
+ }
+ mic_release_callbacks(mdev);
+ }
+}
+
+/**
+ * mic_intr_restore - Restore MIC interrupt registers.
+ *
+ * @mdev: pointer to mic_device instance.
+ *
+ * Restore the interrupt registers to values previously
+ * stored in the SW data structures. mic_mutex needs to
+ * be held before calling this function.
+ *
+ * returns None.
+ */
+void mic_intr_restore(struct mic_device *mdev)
+{
+ int entry, offset;
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+
+ if (!pci_dev_msi_enabled(pdev))
+ return;
+
+ for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) {
+ for (offset = 0; offset < MIC_NUM_OFFSETS; offset++) {
+ if (mdev->irq_info.mic_msi_map[entry] & BIT(offset))
+ mdev->intr_ops->program_msi_to_src_map(mdev,
+ entry, offset, true);
+ }
+ }
+}
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
new file mode 100644
index 00000000000..6091aa97e11
--- /dev/null
+++ b/drivers/misc/mic/host/mic_intr.h
@@ -0,0 +1,137 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#ifndef _MIC_INTR_H_
+#define _MIC_INTR_H_
+
+/*
+ * The minimum number of msix vectors required for normal operation.
+ * 3 for virtio network, console and block devices.
+ * 1 for card shutdown notifications.
+ */
+#define MIC_MIN_MSIX 4
+#define MIC_NUM_OFFSETS 32
+
+/**
+ * mic_intr_source - The type of source that will generate
+ * the interrupt.The number of types needs to be in sync with
+ * MIC_NUM_INTR_TYPES
+ *
+ * MIC_INTR_DB: The source is a doorbell
+ * MIC_INTR_DMA: The source is a DMA channel
+ * MIC_INTR_ERR: The source is an error interrupt e.g. SBOX ERR
+ * MIC_NUM_INTR_TYPES: Total number of interrupt sources.
+ */
+enum mic_intr_type {
+ MIC_INTR_DB = 0,
+ MIC_INTR_DMA,
+ MIC_INTR_ERR,
+ MIC_NUM_INTR_TYPES
+};
+
+/**
+ * struct mic_intr_info - Contains h/w specific interrupt sources
+ * information.
+ *
+ * @intr_start_idx: Contains the starting indexes of the
+ * interrupt types.
+ * @intr_len: Contains the length of the interrupt types.
+ */
+struct mic_intr_info {
+ u16 intr_start_idx[MIC_NUM_INTR_TYPES];
+ u16 intr_len[MIC_NUM_INTR_TYPES];
+};
+
+/**
+ * struct mic_irq_info - OS specific irq information
+ *
+ * @next_avail_src: next available doorbell that can be assigned.
+ * @msix_entries: msix entries allocated while setting up MSI-x
+ * @mic_msi_map: The MSI/MSI-x mapping information.
+ * @num_vectors: The number of MSI/MSI-x vectors that have been allocated.
+ * @cb_ida: callback ID allocator to track the callbacks registered.
+ * @mic_intr_lock: spinlock to protect the interrupt callback list.
+ * @cb_list: Array of callback lists one for each source.
+ */
+struct mic_irq_info {
+ int next_avail_src;
+ struct msix_entry *msix_entries;
+ u32 *mic_msi_map;
+ u16 num_vectors;
+ struct ida cb_ida;
+ spinlock_t mic_intr_lock;
+ struct list_head *cb_list;
+};
+
+/**
+ * struct mic_intr_cb - Interrupt callback structure.
+ *
+ * @func: The callback function
+ * @data: Private data of the requester.
+ * @cb_id: The callback id. Identifies this callback.
+ * @list: list head pointing to the next callback structure.
+ */
+struct mic_intr_cb {
+ irqreturn_t (*func) (int irq, void *data);
+ void *data;
+ int cb_id;
+ struct list_head list;
+};
+
+/**
+ * struct mic_irq - opaque pointer used as cookie
+ */
+struct mic_irq;
+
+/* Forward declaration */
+struct mic_device;
+
+/**
+ * struct mic_hw_intr_ops: MIC HW specific interrupt operations
+ * @intr_init: Initialize H/W specific interrupt information.
+ * @enable_interrupts: Enable interrupts from the hardware.
+ * @disable_interrupts: Disable interrupts from the hardware.
+ * @program_msi_to_src_map: Update MSI mapping registers with
+ * irq information.
+ * @read_msi_to_src_map: Read MSI mapping registers containing
+ * irq information.
+ */
+struct mic_hw_intr_ops {
+ void (*intr_init)(struct mic_device *mdev);
+ void (*enable_interrupts)(struct mic_device *mdev);
+ void (*disable_interrupts)(struct mic_device *mdev);
+ void (*program_msi_to_src_map) (struct mic_device *mdev,
+ int idx, int intr_src, bool set);
+ u32 (*read_msi_to_src_map) (struct mic_device *mdev,
+ int idx);
+};
+
+int mic_next_db(struct mic_device *mdev);
+struct mic_irq *mic_request_irq(struct mic_device *mdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int intr_src,
+ enum mic_intr_type type);
+
+void mic_free_irq(struct mic_device *mdev,
+ struct mic_irq *cookie, void *data);
+int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
+void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
+void mic_intr_restore(struct mic_device *mdev);
+#endif
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
new file mode 100644
index 00000000000..ad838c7651c
--- /dev/null
+++ b/drivers/misc/mic/host/mic_main.c
@@ -0,0 +1,536 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ * Global TODO's across the driver to be added after initial base
+ * patches are accepted upstream:
+ * 1) Enable DMA support.
+ * 2) Enable per vring interrupt support.
+ */
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/suspend.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_x100.h"
+#include "mic_smpt.h"
+#include "mic_fops.h"
+#include "mic_virtio.h"
+
+static const char mic_driver_name[] = "mic";
+
+static DEFINE_PCI_DEVICE_TABLE(mic_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2253)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2254)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2255)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2256)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2257)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2258)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2259)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225a)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225b)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225c)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225d)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225e)},
+
+ /* required last entry */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
+
+/* ID allocator for MIC devices */
+static struct ida g_mic_ida;
+/* Class of MIC devices for sysfs accessibility. */
+static struct class *g_mic_class;
+/* Base device node number for MIC devices */
+static dev_t g_mic_devno;
+
+static const struct file_operations mic_fops = {
+ .open = mic_open,
+ .release = mic_release,
+ .unlocked_ioctl = mic_ioctl,
+ .poll = mic_poll,
+ .mmap = mic_mmap,
+ .owner = THIS_MODULE,
+};
+
+/* Initialize the device page */
+static int mic_dp_init(struct mic_device *mdev)
+{
+ mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL);
+ if (!mdev->dp) {
+ dev_err(mdev->sdev->parent, "%s %d err %d\n",
+ __func__, __LINE__, -ENOMEM);
+ return -ENOMEM;
+ }
+
+ mdev->dp_dma_addr = mic_map_single(mdev,
+ mdev->dp, MIC_DP_SIZE);
+ if (mic_map_error(mdev->dp_dma_addr)) {
+ kfree(mdev->dp);
+ dev_err(mdev->sdev->parent, "%s %d err %d\n",
+ __func__, __LINE__, -ENOMEM);
+ return -ENOMEM;
+ }
+ mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
+ mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
+ return 0;
+}
+
+/* Uninitialize the device page */
+static void mic_dp_uninit(struct mic_device *mdev)
+{
+ mic_unmap_single(mdev, mdev->dp_dma_addr, MIC_DP_SIZE);
+ kfree(mdev->dp);
+}
+
+/**
+ * mic_shutdown_db - Shutdown doorbell interrupt handler.
+ */
+static irqreturn_t mic_shutdown_db(int irq, void *data)
+{
+ struct mic_device *mdev = data;
+ struct mic_bootparam *bootparam = mdev->dp;
+
+ mdev->ops->ack_interrupt(mdev);
+
+ switch (bootparam->shutdown_status) {
+ case MIC_HALTED:
+ case MIC_POWER_OFF:
+ case MIC_RESTART:
+ /* Fall through */
+ case MIC_CRASHED:
+ schedule_work(&mdev->shutdown_work);
+ break;
+ default:
+ break;
+ };
+ return IRQ_HANDLED;
+}
+
+/**
+ * mic_ops_init: Initialize HW specific operation tables.
+ *
+ * @mdev: pointer to mic_device instance
+ *
+ * returns none.
+ */
+static void mic_ops_init(struct mic_device *mdev)
+{
+ switch (mdev->family) {
+ case MIC_FAMILY_X100:
+ mdev->ops = &mic_x100_ops;
+ mdev->intr_ops = &mic_x100_intr_ops;
+ mdev->smpt_ops = &mic_x100_smpt_ops;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * mic_get_family - Determine hardware family to which this MIC belongs.
+ *
+ * @pdev: The pci device structure
+ *
+ * returns family.
+ */
+static enum mic_hw_family mic_get_family(struct pci_dev *pdev)
+{
+ enum mic_hw_family family;
+
+ switch (pdev->device) {
+ case MIC_X100_PCI_DEVICE_2250:
+ case MIC_X100_PCI_DEVICE_2251:
+ case MIC_X100_PCI_DEVICE_2252:
+ case MIC_X100_PCI_DEVICE_2253:
+ case MIC_X100_PCI_DEVICE_2254:
+ case MIC_X100_PCI_DEVICE_2255:
+ case MIC_X100_PCI_DEVICE_2256:
+ case MIC_X100_PCI_DEVICE_2257:
+ case MIC_X100_PCI_DEVICE_2258:
+ case MIC_X100_PCI_DEVICE_2259:
+ case MIC_X100_PCI_DEVICE_225a:
+ case MIC_X100_PCI_DEVICE_225b:
+ case MIC_X100_PCI_DEVICE_225c:
+ case MIC_X100_PCI_DEVICE_225d:
+ case MIC_X100_PCI_DEVICE_225e:
+ family = MIC_FAMILY_X100;
+ break;
+ default:
+ family = MIC_FAMILY_UNKNOWN;
+ break;
+ }
+ return family;
+}
+
+/**
+* mic_pm_notifier: Notifier callback function that handles
+* PM notifications.
+*
+* @notifier_block: The notifier structure.
+* @pm_event: The event for which the driver was notified.
+* @unused: Meaningless. Always NULL.
+*
+* returns NOTIFY_DONE
+*/
+static int mic_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct mic_device *mdev = container_of(notifier,
+ struct mic_device, pm_notifier);
+
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ /* Fall through */
+ case PM_SUSPEND_PREPARE:
+ mic_prepare_suspend(mdev);
+ break;
+ case PM_POST_HIBERNATION:
+ /* Fall through */
+ case PM_POST_SUSPEND:
+ /* Fall through */
+ case PM_POST_RESTORE:
+ mic_complete_resume(mdev);
+ break;
+ case PM_RESTORE_PREPARE:
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * mic_device_init - Allocates and initializes the MIC device structure
+ *
+ * @mdev: pointer to mic_device instance
+ * @pdev: The pci device structure
+ *
+ * returns none.
+ */
+static int
+mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
+{
+ int rc;
+
+ mdev->family = mic_get_family(pdev);
+ mdev->stepping = pdev->revision;
+ mic_ops_init(mdev);
+ mic_sysfs_init(mdev);
+ mutex_init(&mdev->mic_mutex);
+ mdev->irq_info.next_avail_src = 0;
+ INIT_WORK(&mdev->reset_trigger_work, mic_reset_trigger_work);
+ INIT_WORK(&mdev->shutdown_work, mic_shutdown_work);
+ init_completion(&mdev->reset_wait);
+ INIT_LIST_HEAD(&mdev->vdev_list);
+ mdev->pm_notifier.notifier_call = mic_pm_notifier;
+ rc = register_pm_notifier(&mdev->pm_notifier);
+ if (rc) {
+ dev_err(&pdev->dev, "register_pm_notifier failed rc %d\n",
+ rc);
+ goto register_pm_notifier_fail;
+ }
+ return 0;
+register_pm_notifier_fail:
+ flush_work(&mdev->shutdown_work);
+ flush_work(&mdev->reset_trigger_work);
+ return rc;
+}
+
+/**
+ * mic_device_uninit - Frees resources allocated during mic_device_init(..)
+ *
+ * @mdev: pointer to mic_device instance
+ *
+ * returns none
+ */
+static void mic_device_uninit(struct mic_device *mdev)
+{
+ /* The cmdline sysfs entry might have allocated cmdline */
+ kfree(mdev->cmdline);
+ kfree(mdev->firmware);
+ kfree(mdev->ramdisk);
+ kfree(mdev->bootmode);
+ flush_work(&mdev->reset_trigger_work);
+ flush_work(&mdev->shutdown_work);
+ unregister_pm_notifier(&mdev->pm_notifier);
+}
+
+/**
+ * mic_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in mic_pci_tbl
+ *
+ * returns 0 on success, < 0 on failure.
+ */
+static int mic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+ struct mic_device *mdev;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "mdev kmalloc failed rc %d\n", rc);
+ goto mdev_alloc_fail;
+ }
+ mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
+ if (mdev->id < 0) {
+ rc = mdev->id;
+ dev_err(&pdev->dev, "ida_simple_get failed rc %d\n", rc);
+ goto ida_fail;
+ }
+
+ rc = mic_device_init(mdev, pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "mic_device_init failed rc %d\n", rc);
+ goto device_init_fail;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to enable pci device.\n");
+ goto uninit_device;
+ }
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, mic_driver_name);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to get pci regions.\n");
+ goto disable_device;
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set DMA mask\n");
+ goto release_regions;
+ }
+
+ mdev->mmio.pa = pci_resource_start(pdev, mdev->ops->mmio_bar);
+ mdev->mmio.len = pci_resource_len(pdev, mdev->ops->mmio_bar);
+ mdev->mmio.va = pci_ioremap_bar(pdev, mdev->ops->mmio_bar);
+ if (!mdev->mmio.va) {
+ dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
+ rc = -EIO;
+ goto release_regions;
+ }
+
+ mdev->aper.pa = pci_resource_start(pdev, mdev->ops->aper_bar);
+ mdev->aper.len = pci_resource_len(pdev, mdev->ops->aper_bar);
+ mdev->aper.va = ioremap_wc(mdev->aper.pa, mdev->aper.len);
+ if (!mdev->aper.va) {
+ dev_err(&pdev->dev, "Cannot remap Aperture BAR\n");
+ rc = -EIO;
+ goto unmap_mmio;
+ }
+
+ mdev->intr_ops->intr_init(mdev);
+ rc = mic_setup_interrupts(mdev, pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "mic_setup_interrupts failed %d\n", rc);
+ goto unmap_aper;
+ }
+ rc = mic_smpt_init(mdev);
+ if (rc) {
+ dev_err(&pdev->dev, "smpt_init failed %d\n", rc);
+ goto free_interrupts;
+ }
+
+ pci_set_drvdata(pdev, mdev);
+
+ mdev->sdev = device_create_with_groups(g_mic_class, &pdev->dev,
+ MKDEV(MAJOR(g_mic_devno), mdev->id), NULL,
+ mdev->attr_group, "mic%d", mdev->id);
+ if (IS_ERR(mdev->sdev)) {
+ rc = PTR_ERR(mdev->sdev);
+ dev_err(&pdev->dev,
+ "device_create_with_groups failed rc %d\n", rc);
+ goto smpt_uninit;
+ }
+ mdev->state_sysfs = sysfs_get_dirent(mdev->sdev->kobj.sd, "state");
+ if (!mdev->state_sysfs) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "sysfs_get_dirent failed rc %d\n", rc);
+ goto destroy_device;
+ }
+
+ rc = mic_dp_init(mdev);
+ if (rc) {
+ dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc);
+ goto sysfs_put;
+ }
+ mutex_lock(&mdev->mic_mutex);
+
+ mdev->shutdown_db = mic_next_db(mdev);
+ mdev->shutdown_cookie = mic_request_irq(mdev, mic_shutdown_db,
+ "shutdown-interrupt", mdev, mdev->shutdown_db, MIC_INTR_DB);
+ if (IS_ERR(mdev->shutdown_cookie)) {
+ rc = PTR_ERR(mdev->shutdown_cookie);
+ mutex_unlock(&mdev->mic_mutex);
+ goto dp_uninit;
+ }
+ mutex_unlock(&mdev->mic_mutex);
+ mic_bootparam_init(mdev);
+
+ mic_create_debug_dir(mdev);
+ cdev_init(&mdev->cdev, &mic_fops);
+ mdev->cdev.owner = THIS_MODULE;
+ rc = cdev_add(&mdev->cdev, MKDEV(MAJOR(g_mic_devno), mdev->id), 1);
+ if (rc) {
+ dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc);
+ goto cleanup_debug_dir;
+ }
+ return 0;
+cleanup_debug_dir:
+ mic_delete_debug_dir(mdev);
+ mutex_lock(&mdev->mic_mutex);
+ mic_free_irq(mdev, mdev->shutdown_cookie, mdev);
+ mutex_unlock(&mdev->mic_mutex);
+dp_uninit:
+ mic_dp_uninit(mdev);
+sysfs_put:
+ sysfs_put(mdev->state_sysfs);
+destroy_device:
+ device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id));
+smpt_uninit:
+ mic_smpt_uninit(mdev);
+free_interrupts:
+ mic_free_interrupts(mdev, pdev);
+unmap_aper:
+ iounmap(mdev->aper.va);
+unmap_mmio:
+ iounmap(mdev->mmio.va);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+uninit_device:
+ mic_device_uninit(mdev);
+device_init_fail:
+ ida_simple_remove(&g_mic_ida, mdev->id);
+ida_fail:
+ kfree(mdev);
+mdev_alloc_fail:
+ dev_err(&pdev->dev, "Probe failed rc %d\n", rc);
+ return rc;
+}
+
+/**
+ * mic_remove - Device Removal Routine
+ * mic_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ *
+ * @pdev: PCI device structure
+ */
+static void mic_remove(struct pci_dev *pdev)
+{
+ struct mic_device *mdev;
+
+ mdev = pci_get_drvdata(pdev);
+ if (!mdev)
+ return;
+
+ mic_stop(mdev, false);
+ cdev_del(&mdev->cdev);
+ mic_delete_debug_dir(mdev);
+ mutex_lock(&mdev->mic_mutex);
+ mic_free_irq(mdev, mdev->shutdown_cookie, mdev);
+ mutex_unlock(&mdev->mic_mutex);
+ flush_work(&mdev->shutdown_work);
+ mic_dp_uninit(mdev);
+ sysfs_put(mdev->state_sysfs);
+ device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id));
+ mic_smpt_uninit(mdev);
+ mic_free_interrupts(mdev, pdev);
+ iounmap(mdev->mmio.va);
+ iounmap(mdev->aper.va);
+ mic_device_uninit(mdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ ida_simple_remove(&g_mic_ida, mdev->id);
+ kfree(mdev);
+}
+static struct pci_driver mic_driver = {
+ .name = mic_driver_name,
+ .id_table = mic_pci_tbl,
+ .probe = mic_probe,
+ .remove = mic_remove
+};
+
+static int __init mic_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&g_mic_devno, 0,
+ MIC_MAX_NUM_DEVS, mic_driver_name);
+ if (ret) {
+ pr_err("alloc_chrdev_region failed ret %d\n", ret);
+ goto error;
+ }
+
+ g_mic_class = class_create(THIS_MODULE, mic_driver_name);
+ if (IS_ERR(g_mic_class)) {
+ ret = PTR_ERR(g_mic_class);
+ pr_err("class_create failed ret %d\n", ret);
+ goto cleanup_chrdev;
+ }
+
+ mic_init_debugfs();
+ ida_init(&g_mic_ida);
+ ret = pci_register_driver(&mic_driver);
+ if (ret) {
+ pr_err("pci_register_driver failed ret %d\n", ret);
+ goto cleanup_debugfs;
+ }
+ return ret;
+cleanup_debugfs:
+ mic_exit_debugfs();
+ class_destroy(g_mic_class);
+cleanup_chrdev:
+ unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
+error:
+ return ret;
+}
+
+static void __exit mic_exit(void)
+{
+ pci_unregister_driver(&mic_driver);
+ ida_destroy(&g_mic_ida);
+ mic_exit_debugfs();
+ class_destroy(g_mic_class);
+ unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
+}
+
+module_init(mic_init);
+module_exit(mic_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) MIC X100 Host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
new file mode 100644
index 00000000000..fae474c4899
--- /dev/null
+++ b/drivers/misc/mic/host/mic_smpt.c
@@ -0,0 +1,442 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/pci.h>
+
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_smpt.h"
+
+static inline u64 mic_system_page_mask(struct mic_device *mdev)
+{
+ return (1ULL << mdev->smpt->info.page_shift) - 1ULL;
+}
+
+static inline u8 mic_sys_addr_to_smpt(struct mic_device *mdev, dma_addr_t pa)
+{
+ return (pa - mdev->smpt->info.base) >> mdev->smpt->info.page_shift;
+}
+
+static inline u64 mic_smpt_to_pa(struct mic_device *mdev, u8 index)
+{
+ return mdev->smpt->info.base + (index * mdev->smpt->info.page_size);
+}
+
+static inline u64 mic_smpt_offset(struct mic_device *mdev, dma_addr_t pa)
+{
+ return pa & mic_system_page_mask(mdev);
+}
+
+static inline u64 mic_smpt_align_low(struct mic_device *mdev, dma_addr_t pa)
+{
+ return ALIGN(pa - mic_system_page_mask(mdev),
+ mdev->smpt->info.page_size);
+}
+
+static inline u64 mic_smpt_align_high(struct mic_device *mdev, dma_addr_t pa)
+{
+ return ALIGN(pa, mdev->smpt->info.page_size);
+}
+
+/* Total Cumulative system memory accessible by MIC across all SMPT entries */
+static inline u64 mic_max_system_memory(struct mic_device *mdev)
+{
+ return mdev->smpt->info.num_reg * mdev->smpt->info.page_size;
+}
+
+/* Maximum system memory address accessible by MIC */
+static inline u64 mic_max_system_addr(struct mic_device *mdev)
+{
+ return mdev->smpt->info.base + mic_max_system_memory(mdev) - 1ULL;
+}
+
+/* Check if the DMA address is a MIC system memory address */
+static inline bool
+mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa)
+{
+ return pa >= mdev->smpt->info.base && pa <= mic_max_system_addr(mdev);
+}
+
+/* Populate an SMPT entry and update the reference counts. */
+static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr,
+ int entries, struct mic_device *mdev)
+{
+ struct mic_smpt_info *smpt_info = mdev->smpt;
+ int i;
+
+ for (i = spt; i < spt + entries; i++,
+ addr += smpt_info->info.page_size) {
+ if (!smpt_info->entry[i].ref_count &&
+ (smpt_info->entry[i].dma_addr != addr)) {
+ mdev->smpt_ops->set(mdev, addr, i);
+ smpt_info->entry[i].dma_addr = addr;
+ }
+ smpt_info->entry[i].ref_count += ref[i - spt];
+ }
+}
+
+/*
+ * Find an available MIC address in MIC SMPT address space
+ * for a given DMA address and size.
+ */
+static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr,
+ int entries, s64 *ref, size_t size)
+{
+ int spt;
+ int ae = 0;
+ int i;
+ unsigned long flags;
+ dma_addr_t mic_addr = 0;
+ dma_addr_t addr = dma_addr;
+ struct mic_smpt_info *smpt_info = mdev->smpt;
+
+ spin_lock_irqsave(&smpt_info->smpt_lock, flags);
+
+ /* find existing entries */
+ for (i = 0; i < smpt_info->info.num_reg; i++) {
+ if (smpt_info->entry[i].dma_addr == addr) {
+ ae++;
+ addr += smpt_info->info.page_size;
+ } else if (ae) /* cannot find contiguous entries */
+ goto not_found;
+
+ if (ae == entries)
+ goto found;
+ }
+
+ /* find free entry */
+ for (ae = 0, i = 0; i < smpt_info->info.num_reg; i++) {
+ ae = (smpt_info->entry[i].ref_count == 0) ? ae + 1 : 0;
+ if (ae == entries)
+ goto found;
+ }
+
+not_found:
+ spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
+ return mic_addr;
+
+found:
+ spt = i - entries + 1;
+ mic_addr = mic_smpt_to_pa(mdev, spt);
+ mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev);
+ smpt_info->map_count++;
+ smpt_info->ref_count += (s64)size;
+ spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
+ return mic_addr;
+}
+
+/*
+ * Returns number of smpt entries needed for dma_addr to dma_addr + size
+ * also returns the reference count array for each of those entries
+ * and the starting smpt address
+ */
+static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
+ size_t size, s64 *ref, u64 *smpt_start)
+{
+ u64 start = dma_addr;
+ u64 end = dma_addr + size;
+ int i = 0;
+
+ while (start < end) {
+ ref[i++] = min(mic_smpt_align_high(mdev, start + 1),
+ end) - start;
+ start = mic_smpt_align_high(mdev, start + 1);
+ }
+
+ if (smpt_start)
+ *smpt_start = mic_smpt_align_low(mdev, dma_addr);
+
+ return i;
+}
+
+/*
+ * mic_to_dma_addr - Converts a MIC address to a DMA address.
+ *
+ * @mdev: pointer to mic_device instance.
+ * @mic_addr: MIC address.
+ *
+ * returns a DMA address.
+ */
+static dma_addr_t
+mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
+{
+ struct mic_smpt_info *smpt_info = mdev->smpt;
+ int spt;
+ dma_addr_t dma_addr;
+
+ if (!mic_is_system_addr(mdev, mic_addr)) {
+ dev_err(mdev->sdev->parent,
+ "mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr);
+ return -EINVAL;
+ }
+ spt = mic_sys_addr_to_smpt(mdev, mic_addr);
+ dma_addr = smpt_info->entry[spt].dma_addr +
+ mic_smpt_offset(mdev, mic_addr);
+ return dma_addr;
+}
+
+/**
+ * mic_map - Maps a DMA address to a MIC physical address.
+ *
+ * @mdev: pointer to mic_device instance.
+ * @dma_addr: DMA address.
+ * @size: Size of the region to be mapped.
+ *
+ * This API converts the DMA address provided to a DMA address understood
+ * by MIC. Caller should check for errors by calling mic_map_error(..).
+ *
+ * returns DMA address as required by MIC.
+ */
+dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
+{
+ dma_addr_t mic_addr = 0;
+ int num_entries;
+ s64 *ref;
+ u64 smpt_start;
+
+ if (!size || size > mic_max_system_memory(mdev))
+ return mic_addr;
+
+ ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL);
+ if (!ref)
+ return mic_addr;
+
+ num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size,
+ ref, &smpt_start);
+
+ /* Set the smpt table appropriately and get 16G aligned mic address */
+ mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size);
+
+ kfree(ref);
+
+ /*
+ * If mic_addr is zero then its an error case
+ * since mic_addr can never be zero.
+ * else generate mic_addr by adding the 16G offset in dma_addr
+ */
+ if (!mic_addr && MIC_FAMILY_X100 == mdev->family) {
+ dev_err(mdev->sdev->parent,
+ "mic_map failed dma_addr 0x%llx size 0x%lx\n",
+ dma_addr, size);
+ return mic_addr;
+ } else {
+ return mic_addr + mic_smpt_offset(mdev, dma_addr);
+ }
+}
+
+/**
+ * mic_unmap - Unmaps a MIC physical address.
+ *
+ * @mdev: pointer to mic_device instance.
+ * @mic_addr: MIC physical address.
+ * @size: Size of the region to be unmapped.
+ *
+ * This API unmaps the mappings created by mic_map(..).
+ *
+ * returns None.
+ */
+void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
+{
+ struct mic_smpt_info *smpt_info = mdev->smpt;
+ s64 *ref;
+ int num_smpt;
+ int spt;
+ int i;
+ unsigned long flags;
+
+ if (!size)
+ return;
+
+ if (!mic_is_system_addr(mdev, mic_addr)) {
+ dev_err(mdev->sdev->parent,
+ "invalid address: 0x%llx\n", mic_addr);
+ return;
+ }
+
+ spt = mic_sys_addr_to_smpt(mdev, mic_addr);
+ ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL);
+ if (!ref)
+ return;
+
+ /* Get number of smpt entries to be mapped, ref count array */
+ num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL);
+
+ spin_lock_irqsave(&smpt_info->smpt_lock, flags);
+ smpt_info->unmap_count++;
+ smpt_info->ref_count -= (s64)size;
+
+ for (i = spt; i < spt + num_smpt; i++) {
+ smpt_info->entry[i].ref_count -= ref[i - spt];
+ if (smpt_info->entry[i].ref_count < 0)
+ dev_warn(mdev->sdev->parent,
+ "ref count for entry %d is negative\n", i);
+ }
+ spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
+ kfree(ref);
+}
+
+/**
+ * mic_map_single - Maps a virtual address to a MIC physical address.
+ *
+ * @mdev: pointer to mic_device instance.
+ * @va: Kernel direct mapped virtual address.
+ * @size: Size of the region to be mapped.
+ *
+ * This API calls pci_map_single(..) for the direct mapped virtual address
+ * and then converts the DMA address provided to a DMA address understood
+ * by MIC. Caller should check for errors by calling mic_map_error(..).
+ *
+ * returns DMA address as required by MIC.
+ */
+dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
+{
+ dma_addr_t mic_addr = 0;
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+ dma_addr_t dma_addr =
+ pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL);
+
+ if (!pci_dma_mapping_error(pdev, dma_addr)) {
+ mic_addr = mic_map(mdev, dma_addr, size);
+ if (!mic_addr) {
+ dev_err(mdev->sdev->parent,
+ "mic_map failed dma_addr 0x%llx size 0x%lx\n",
+ dma_addr, size);
+ pci_unmap_single(pdev, dma_addr,
+ size, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+ return mic_addr;
+}
+
+/**
+ * mic_unmap_single - Unmaps a MIC physical address.
+ *
+ * @mdev: pointer to mic_device instance.
+ * @mic_addr: MIC physical address.
+ * @size: Size of the region to be unmapped.
+ *
+ * This API unmaps the mappings created by mic_map_single(..).
+ *
+ * returns None.
+ */
+void
+mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
+{
+ struct pci_dev *pdev = container_of(mdev->sdev->parent,
+ struct pci_dev, dev);
+ dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr);
+ mic_unmap(mdev, mic_addr, size);
+ pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+}
+
+/**
+ * mic_smpt_init - Initialize MIC System Memory Page Tables.
+ *
+ * @mdev: pointer to mic_device instance.
+ *
+ * returns 0 for success and -errno for error.
+ */
+int mic_smpt_init(struct mic_device *mdev)
+{
+ int i, err = 0;
+ dma_addr_t dma_addr;
+ struct mic_smpt_info *smpt_info;
+
+ mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL);
+ if (!mdev->smpt)
+ return -ENOMEM;
+
+ smpt_info = mdev->smpt;
+ mdev->smpt_ops->init(mdev);
+ smpt_info->entry = kmalloc_array(smpt_info->info.num_reg,
+ sizeof(*smpt_info->entry), GFP_KERNEL);
+ if (!smpt_info->entry) {
+ err = -ENOMEM;
+ goto free_smpt;
+ }
+ spin_lock_init(&smpt_info->smpt_lock);
+ for (i = 0; i < smpt_info->info.num_reg; i++) {
+ dma_addr = i * smpt_info->info.page_size;
+ smpt_info->entry[i].dma_addr = dma_addr;
+ smpt_info->entry[i].ref_count = 0;
+ mdev->smpt_ops->set(mdev, dma_addr, i);
+ }
+ smpt_info->ref_count = 0;
+ smpt_info->map_count = 0;
+ smpt_info->unmap_count = 0;
+ return 0;
+free_smpt:
+ kfree(smpt_info);
+ return err;
+}
+
+/**
+ * mic_smpt_uninit - UnInitialize MIC System Memory Page Tables.
+ *
+ * @mdev: pointer to mic_device instance.
+ *
+ * returns None.
+ */
+void mic_smpt_uninit(struct mic_device *mdev)
+{
+ struct mic_smpt_info *smpt_info = mdev->smpt;
+ int i;
+
+ dev_dbg(mdev->sdev->parent,
+ "nodeid %d SMPT ref count %lld map %lld unmap %lld\n",
+ mdev->id, smpt_info->ref_count,
+ smpt_info->map_count, smpt_info->unmap_count);
+
+ for (i = 0; i < smpt_info->info.num_reg; i++) {
+ dev_dbg(mdev->sdev->parent,
+ "SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n",
+ i, smpt_info->entry[i].dma_addr,
+ smpt_info->entry[i].ref_count);
+ if (smpt_info->entry[i].ref_count)
+ dev_warn(mdev->sdev->parent,
+ "ref count for entry %d is not zero\n", i);
+ }
+ kfree(smpt_info->entry);
+ kfree(smpt_info);
+}
+
+/**
+ * mic_smpt_restore - Restore MIC System Memory Page Tables.
+ *
+ * @mdev: pointer to mic_device instance.
+ *
+ * Restore the SMPT registers to values previously stored in the
+ * SW data structures. Some MIC steppings lose register state
+ * across resets and this API should be called for performing
+ * a restore operation if required.
+ *
+ * returns None.
+ */
+void mic_smpt_restore(struct mic_device *mdev)
+{
+ int i;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < mdev->smpt->info.num_reg; i++) {
+ dma_addr = mdev->smpt->entry[i].dma_addr;
+ mdev->smpt_ops->set(mdev, dma_addr, i);
+ }
+}
diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h
new file mode 100644
index 00000000000..51970abfe7d
--- /dev/null
+++ b/drivers/misc/mic/host/mic_smpt.h
@@ -0,0 +1,98 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#ifndef MIC_SMPT_H
+#define MIC_SMPT_H
+/**
+ * struct mic_smpt_ops - MIC HW specific SMPT operations.
+ * @init: Initialize hardware specific SMPT information in mic_smpt_hw_info.
+ * @set: Set the value for a particular SMPT entry.
+ */
+struct mic_smpt_ops {
+ void (*init)(struct mic_device *mdev);
+ void (*set)(struct mic_device *mdev, dma_addr_t dma_addr, u8 index);
+};
+
+/**
+ * struct mic_smpt - MIC SMPT entry information.
+ * @dma_addr: Base DMA address for this SMPT entry.
+ * @ref_count: Number of active mappings for this SMPT entry in bytes.
+ */
+struct mic_smpt {
+ dma_addr_t dma_addr;
+ s64 ref_count;
+};
+
+/**
+ * struct mic_smpt_hw_info - MIC SMPT hardware specific information.
+ * @num_reg: Number of SMPT registers.
+ * @page_shift: System memory page shift.
+ * @page_size: System memory page size.
+ * @base: System address base.
+ */
+struct mic_smpt_hw_info {
+ u8 num_reg;
+ u8 page_shift;
+ u64 page_size;
+ u64 base;
+};
+
+/**
+ * struct mic_smpt_info - MIC SMPT information.
+ * @entry: Array of SMPT entries.
+ * @smpt_lock: Spin lock protecting access to SMPT data structures.
+ * @info: Hardware specific SMPT information.
+ * @ref_count: Number of active SMPT mappings (for debug).
+ * @map_count: Number of SMPT mappings created (for debug).
+ * @unmap_count: Number of SMPT mappings destroyed (for debug).
+ */
+struct mic_smpt_info {
+ struct mic_smpt *entry;
+ spinlock_t smpt_lock;
+ struct mic_smpt_hw_info info;
+ s64 ref_count;
+ s64 map_count;
+ s64 unmap_count;
+};
+
+dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size);
+void mic_unmap_single(struct mic_device *mdev,
+ dma_addr_t mic_addr, size_t size);
+dma_addr_t mic_map(struct mic_device *mdev,
+ dma_addr_t dma_addr, size_t size);
+void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
+
+/**
+ * mic_map_error - Check a MIC address for errors.
+ *
+ * @mdev: pointer to mic_device instance.
+ *
+ * returns Whether there was an error during mic_map..(..) APIs.
+ */
+static inline bool mic_map_error(dma_addr_t mic_addr)
+{
+ return !mic_addr;
+}
+
+int mic_smpt_init(struct mic_device *mdev);
+void mic_smpt_uninit(struct mic_device *mdev);
+void mic_smpt_restore(struct mic_device *mdev);
+
+#endif
diff --git a/drivers/misc/mic/host/mic_sysfs.c b/drivers/misc/mic/host/mic_sysfs.c
new file mode 100644
index 00000000000..6dd864e4a61
--- /dev/null
+++ b/drivers/misc/mic/host/mic_sysfs.c
@@ -0,0 +1,459 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/pci.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+
+/*
+ * A state-to-string lookup table, for exposing a human readable state
+ * via sysfs. Always keep in sync with enum mic_states
+ */
+static const char * const mic_state_string[] = {
+ [MIC_OFFLINE] = "offline",
+ [MIC_ONLINE] = "online",
+ [MIC_SHUTTING_DOWN] = "shutting_down",
+ [MIC_RESET_FAILED] = "reset_failed",
+ [MIC_SUSPENDING] = "suspending",
+ [MIC_SUSPENDED] = "suspended",
+};
+
+/*
+ * A shutdown-status-to-string lookup table, for exposing a human
+ * readable state via sysfs. Always keep in sync with enum mic_shutdown_status
+ */
+static const char * const mic_shutdown_status_string[] = {
+ [MIC_NOP] = "nop",
+ [MIC_CRASHED] = "crashed",
+ [MIC_HALTED] = "halted",
+ [MIC_POWER_OFF] = "poweroff",
+ [MIC_RESTART] = "restart",
+};
+
+void mic_set_shutdown_status(struct mic_device *mdev, u8 shutdown_status)
+{
+ dev_dbg(mdev->sdev->parent, "Shutdown Status %s -> %s\n",
+ mic_shutdown_status_string[mdev->shutdown_status],
+ mic_shutdown_status_string[shutdown_status]);
+ mdev->shutdown_status = shutdown_status;
+}
+
+void mic_set_state(struct mic_device *mdev, u8 state)
+{
+ dev_dbg(mdev->sdev->parent, "State %s -> %s\n",
+ mic_state_string[mdev->state],
+ mic_state_string[state]);
+ mdev->state = state;
+ sysfs_notify_dirent(mdev->state_sysfs);
+}
+
+static ssize_t
+family_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ static const char x100[] = "x100";
+ static const char unknown[] = "Unknown";
+ const char *card = NULL;
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ switch (mdev->family) {
+ case MIC_FAMILY_X100:
+ card = x100;
+ break;
+ default:
+ card = unknown;
+ break;
+ }
+ return scnprintf(buf, PAGE_SIZE, "%s\n", card);
+}
+static DEVICE_ATTR_RO(family);
+
+static ssize_t
+stepping_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ char *string = "??";
+
+ if (!mdev)
+ return -EINVAL;
+
+ switch (mdev->stepping) {
+ case MIC_A0_STEP:
+ string = "A0";
+ break;
+ case MIC_B0_STEP:
+ string = "B0";
+ break;
+ case MIC_B1_STEP:
+ string = "B1";
+ break;
+ case MIC_C0_STEP:
+ string = "C0";
+ break;
+ default:
+ break;
+ }
+ return scnprintf(buf, PAGE_SIZE, "%s\n", string);
+}
+static DEVICE_ATTR_RO(stepping);
+
+static ssize_t
+state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev || mdev->state >= MIC_LAST)
+ return -EINVAL;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ mic_state_string[mdev->state]);
+}
+
+static ssize_t
+state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = 0;
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ if (!mdev)
+ return -EINVAL;
+ if (sysfs_streq(buf, "boot")) {
+ rc = mic_start(mdev, buf);
+ if (rc) {
+ dev_err(mdev->sdev->parent,
+ "mic_boot failed rc %d\n", rc);
+ count = rc;
+ }
+ goto done;
+ }
+
+ if (sysfs_streq(buf, "reset")) {
+ schedule_work(&mdev->reset_trigger_work);
+ goto done;
+ }
+
+ if (sysfs_streq(buf, "shutdown")) {
+ mic_shutdown(mdev);
+ goto done;
+ }
+
+ if (sysfs_streq(buf, "suspend")) {
+ mic_suspend(mdev);
+ goto done;
+ }
+
+ count = -EINVAL;
+done:
+ return count;
+}
+static DEVICE_ATTR_RW(state);
+
+static ssize_t shutdown_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev || mdev->shutdown_status >= MIC_STATUS_LAST)
+ return -EINVAL;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ mic_shutdown_status_string[mdev->shutdown_status]);
+}
+static DEVICE_ATTR_RO(shutdown_status);
+
+static ssize_t
+cmdline_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ char *cmdline;
+
+ if (!mdev)
+ return -EINVAL;
+
+ cmdline = mdev->cmdline;
+
+ if (cmdline)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline);
+ return 0;
+}
+
+static ssize_t
+cmdline_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ mutex_lock(&mdev->mic_mutex);
+ kfree(mdev->cmdline);
+
+ mdev->cmdline = kmalloc(count + 1, GFP_KERNEL);
+ if (!mdev->cmdline) {
+ count = -ENOMEM;
+ goto unlock;
+ }
+
+ strncpy(mdev->cmdline, buf, count);
+
+ if (mdev->cmdline[count - 1] == '\n')
+ mdev->cmdline[count - 1] = '\0';
+ else
+ mdev->cmdline[count] = '\0';
+unlock:
+ mutex_unlock(&mdev->mic_mutex);
+ return count;
+}
+static DEVICE_ATTR_RW(cmdline);
+
+static ssize_t
+firmware_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ char *firmware;
+
+ if (!mdev)
+ return -EINVAL;
+
+ firmware = mdev->firmware;
+
+ if (firmware)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", firmware);
+ return 0;
+}
+
+static ssize_t
+firmware_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ mutex_lock(&mdev->mic_mutex);
+ kfree(mdev->firmware);
+
+ mdev->firmware = kmalloc(count + 1, GFP_KERNEL);
+ if (!mdev->firmware) {
+ count = -ENOMEM;
+ goto unlock;
+ }
+ strncpy(mdev->firmware, buf, count);
+
+ if (mdev->firmware[count - 1] == '\n')
+ mdev->firmware[count - 1] = '\0';
+ else
+ mdev->firmware[count] = '\0';
+unlock:
+ mutex_unlock(&mdev->mic_mutex);
+ return count;
+}
+static DEVICE_ATTR_RW(firmware);
+
+static ssize_t
+ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ char *ramdisk;
+
+ if (!mdev)
+ return -EINVAL;
+
+ ramdisk = mdev->ramdisk;
+
+ if (ramdisk)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk);
+ return 0;
+}
+
+static ssize_t
+ramdisk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ mutex_lock(&mdev->mic_mutex);
+ kfree(mdev->ramdisk);
+
+ mdev->ramdisk = kmalloc(count + 1, GFP_KERNEL);
+ if (!mdev->ramdisk) {
+ count = -ENOMEM;
+ goto unlock;
+ }
+
+ strncpy(mdev->ramdisk, buf, count);
+
+ if (mdev->ramdisk[count - 1] == '\n')
+ mdev->ramdisk[count - 1] = '\0';
+ else
+ mdev->ramdisk[count] = '\0';
+unlock:
+ mutex_unlock(&mdev->mic_mutex);
+ return count;
+}
+static DEVICE_ATTR_RW(ramdisk);
+
+static ssize_t
+bootmode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ char *bootmode;
+
+ if (!mdev)
+ return -EINVAL;
+
+ bootmode = mdev->bootmode;
+
+ if (bootmode)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode);
+ return 0;
+}
+
+static ssize_t
+bootmode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "elf"))
+ return -EINVAL;
+
+ mutex_lock(&mdev->mic_mutex);
+ kfree(mdev->bootmode);
+
+ mdev->bootmode = kmalloc(count + 1, GFP_KERNEL);
+ if (!mdev->bootmode) {
+ count = -ENOMEM;
+ goto unlock;
+ }
+
+ strncpy(mdev->bootmode, buf, count);
+
+ if (mdev->bootmode[count - 1] == '\n')
+ mdev->bootmode[count - 1] = '\0';
+ else
+ mdev->bootmode[count] = '\0';
+unlock:
+ mutex_unlock(&mdev->mic_mutex);
+ return count;
+}
+static DEVICE_ATTR_RW(bootmode);
+
+static ssize_t
+log_buf_addr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_addr);
+}
+
+static ssize_t
+log_buf_addr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ int ret;
+ unsigned long addr;
+
+ if (!mdev)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 16, &addr);
+ if (ret)
+ goto exit;
+
+ mdev->log_buf_addr = (void *)addr;
+ ret = count;
+exit:
+ return ret;
+}
+static DEVICE_ATTR_RW(log_buf_addr);
+
+static ssize_t
+log_buf_len_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+ if (!mdev)
+ return -EINVAL;
+
+ return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_len);
+}
+
+static ssize_t
+log_buf_len_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mic_device *mdev = dev_get_drvdata(dev->parent);
+ int ret;
+ unsigned long addr;
+
+ if (!mdev)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 16, &addr);
+ if (ret)
+ goto exit;
+
+ mdev->log_buf_len = (int *)addr;
+ ret = count;
+exit:
+ return ret;
+}
+static DEVICE_ATTR_RW(log_buf_len);
+
+static struct attribute *mic_default_attrs[] = {
+ &dev_attr_family.attr,
+ &dev_attr_stepping.attr,
+ &dev_attr_state.attr,
+ &dev_attr_shutdown_status.attr,
+ &dev_attr_cmdline.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_ramdisk.attr,
+ &dev_attr_bootmode.attr,
+ &dev_attr_log_buf_addr.attr,
+ &dev_attr_log_buf_len.attr,
+
+ NULL
+};
+
+ATTRIBUTE_GROUPS(mic_default);
+
+void mic_sysfs_init(struct mic_device *mdev)
+{
+ mdev->attr_group = mic_default_groups;
+}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
new file mode 100644
index 00000000000..5b8494bd1e0
--- /dev/null
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -0,0 +1,700 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_smpt.h"
+#include "mic_virtio.h"
+
+/*
+ * Initiates the copies across the PCIe bus from card memory to
+ * a user space buffer.
+ */
+static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
+ void __user *ubuf, size_t len, u64 addr)
+{
+ int err;
+ void __iomem *dbuf = mvdev->mdev->aper.va + addr;
+ /*
+ * We are copying from IO below an should ideally use something
+ * like copy_to_user_fromio(..) if it existed.
+ */
+ if (copy_to_user(ubuf, dbuf, len)) {
+ err = -EFAULT;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ mvdev->in_bytes += len;
+ err = 0;
+err:
+ return err;
+}
+
+/*
+ * Initiates copies across the PCIe bus from a user space
+ * buffer to card memory.
+ */
+static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
+ void __user *ubuf, size_t len, u64 addr)
+{
+ int err;
+ void __iomem *dbuf = mvdev->mdev->aper.va + addr;
+ /*
+ * We are copying to IO below and should ideally use something
+ * like copy_from_user_toio(..) if it existed.
+ */
+ if (copy_from_user(dbuf, ubuf, len)) {
+ err = -EFAULT;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ mvdev->out_bytes += len;
+ err = 0;
+err:
+ return err;
+}
+
+#define MIC_VRINGH_READ true
+
+/* The function to call to notify the card about added buffers */
+static void mic_notify(struct vringh *vrh)
+{
+ struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
+ struct mic_vdev *mvdev = mvrh->mvdev;
+ s8 db = mvdev->dc->h2c_vdev_db;
+
+ if (db != -1)
+ mvdev->mdev->ops->send_intr(mvdev->mdev, db);
+}
+
+/* Determine the total number of bytes consumed in a VRINGH KIOV */
+static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
+{
+ int i;
+ u32 total = iov->consumed;
+
+ for (i = 0; i < iov->i; i++)
+ total += iov->iov[i].iov_len;
+ return total;
+}
+
+/*
+ * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
+ * This API is heavily based on the vringh_iov_xfer(..) implementation
+ * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
+ * and vringh_iov_push_kern(..) directly is because there is no
+ * way to override the VRINGH xfer(..) routines as of v3.10.
+ */
+static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
+ void __user *ubuf, size_t len, bool read, size_t *out_len)
+{
+ int ret = 0;
+ size_t partlen, tot_len = 0;
+
+ while (len && iov->i < iov->used) {
+ partlen = min(iov->iov[iov->i].iov_len, len);
+ if (read)
+ ret = mic_virtio_copy_to_user(mvdev,
+ ubuf, partlen,
+ (u64)iov->iov[iov->i].iov_base);
+ else
+ ret = mic_virtio_copy_from_user(mvdev,
+ ubuf, partlen,
+ (u64)iov->iov[iov->i].iov_base);
+ if (ret) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= partlen;
+ ubuf += partlen;
+ tot_len += partlen;
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+ }
+ *out_len = tot_len;
+ return ret;
+}
+
+/*
+ * Use the standard VRINGH infrastructure in the kernel to fetch new
+ * descriptors, initiate the copies and update the used ring.
+ */
+static int _mic_virtio_copy(struct mic_vdev *mvdev,
+ struct mic_copy_desc *copy)
+{
+ int ret = 0, iovcnt = copy->iovcnt;
+ struct iovec iov;
+ struct iovec __user *u_iov = copy->iov;
+ void __user *ubuf = NULL;
+ struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
+ struct vringh_kiov *riov = &mvr->riov;
+ struct vringh_kiov *wiov = &mvr->wiov;
+ struct vringh *vrh = &mvr->vrh;
+ u16 *head = &mvr->head;
+ struct mic_vring *vr = &mvr->vring;
+ size_t len = 0, out_len;
+
+ copy->out_len = 0;
+ /* Fetch a new IOVEC if all previous elements have been processed */
+ if (riov->i == riov->used && wiov->i == wiov->used) {
+ ret = vringh_getdesc_kern(vrh, riov, wiov,
+ head, GFP_KERNEL);
+ /* Check if there are available descriptors */
+ if (ret <= 0)
+ return ret;
+ }
+ while (iovcnt) {
+ if (!len) {
+ /* Copy over a new iovec from user space. */
+ ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len = iov.iov_len;
+ ubuf = iov.iov_base;
+ }
+ /* Issue all the read descriptors first */
+ ret = mic_vringh_copy(mvdev, riov, ubuf, len,
+ MIC_VRINGH_READ, &out_len);
+ if (ret) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= out_len;
+ ubuf += out_len;
+ copy->out_len += out_len;
+ /* Issue the write descriptors next */
+ ret = mic_vringh_copy(mvdev, wiov, ubuf, len,
+ !MIC_VRINGH_READ, &out_len);
+ if (ret) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= out_len;
+ ubuf += out_len;
+ copy->out_len += out_len;
+ if (!len) {
+ /* One user space iovec is now completed */
+ iovcnt--;
+ u_iov++;
+ }
+ /* Exit loop if all elements in KIOVs have been processed. */
+ if (riov->i == riov->used && wiov->i == wiov->used)
+ break;
+ }
+ /*
+ * Update the used ring if a descriptor was available and some data was
+ * copied in/out and the user asked for a used ring update.
+ */
+ if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
+ u32 total = 0;
+
+ /* Determine the total data consumed */
+ total += mic_vringh_iov_consumed(riov);
+ total += mic_vringh_iov_consumed(wiov);
+ vringh_complete_kern(vrh, *head, total);
+ *head = USHRT_MAX;
+ if (vringh_need_notify_kern(vrh) > 0)
+ vringh_notify(vrh);
+ vringh_kiov_cleanup(riov);
+ vringh_kiov_cleanup(wiov);
+ /* Update avail idx for user space */
+ vr->info->avail_idx = vrh->last_avail_idx;
+ }
+ return ret;
+}
+
+static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
+ struct mic_copy_desc *copy)
+{
+ if (copy->vr_idx >= mvdev->dd->num_vq) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Copy a specified number of virtio descriptors in a chain */
+int mic_virtio_copy_desc(struct mic_vdev *mvdev,
+ struct mic_copy_desc *copy)
+{
+ int err;
+ struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
+
+ err = mic_verify_copy_args(mvdev, copy);
+ if (err)
+ return err;
+
+ mutex_lock(&mvr->vr_mutex);
+ if (!mic_vdevup(mvdev)) {
+ err = -ENODEV;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ err = _mic_virtio_copy(mvdev, copy);
+ if (err) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ }
+err:
+ mutex_unlock(&mvr->vr_mutex);
+ return err;
+}
+
+static void mic_virtio_init_post(struct mic_vdev *mvdev)
+{
+ struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
+ int i;
+
+ for (i = 0; i < mvdev->dd->num_vq; i++) {
+ if (!le64_to_cpu(vqconfig[i].used_address)) {
+ dev_warn(mic_dev(mvdev), "used_address zero??\n");
+ continue;
+ }
+ mvdev->mvr[i].vrh.vring.used =
+ mvdev->mdev->aper.va +
+ le64_to_cpu(vqconfig[i].used_address);
+ }
+
+ mvdev->dc->used_address_updated = 0;
+
+ dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
+ __func__, mvdev->virtio_id);
+}
+
+static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
+{
+ int i;
+
+ dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
+ __func__, mvdev->dd->status, mvdev->virtio_id);
+
+ for (i = 0; i < mvdev->dd->num_vq; i++)
+ /*
+ * Avoid lockdep false positive. The + 1 is for the mic
+ * mutex which is held in the reset devices code path.
+ */
+ mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
+
+ /* 0 status means "reset" */
+ mvdev->dd->status = 0;
+ mvdev->dc->vdev_reset = 0;
+ mvdev->dc->host_ack = 1;
+
+ for (i = 0; i < mvdev->dd->num_vq; i++) {
+ struct vringh *vrh = &mvdev->mvr[i].vrh;
+ mvdev->mvr[i].vring.info->avail_idx = 0;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ }
+
+ for (i = 0; i < mvdev->dd->num_vq; i++)
+ mutex_unlock(&mvdev->mvr[i].vr_mutex);
+}
+
+void mic_virtio_reset_devices(struct mic_device *mdev)
+{
+ struct list_head *pos, *tmp;
+ struct mic_vdev *mvdev;
+
+ dev_dbg(mdev->sdev->parent, "%s\n", __func__);
+
+ list_for_each_safe(pos, tmp, &mdev->vdev_list) {
+ mvdev = list_entry(pos, struct mic_vdev, list);
+ mic_virtio_device_reset(mvdev);
+ mvdev->poll_wake = 1;
+ wake_up(&mvdev->waitq);
+ }
+}
+
+void mic_bh_handler(struct work_struct *work)
+{
+ struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
+ virtio_bh_work);
+
+ if (mvdev->dc->used_address_updated)
+ mic_virtio_init_post(mvdev);
+
+ if (mvdev->dc->vdev_reset)
+ mic_virtio_device_reset(mvdev);
+
+ mvdev->poll_wake = 1;
+ wake_up(&mvdev->waitq);
+}
+
+static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
+{
+ struct mic_vdev *mvdev = data;
+ struct mic_device *mdev = mvdev->mdev;
+
+ mdev->ops->ack_interrupt(mdev);
+ schedule_work(&mvdev->virtio_bh_work);
+ return IRQ_HANDLED;
+}
+
+int mic_virtio_config_change(struct mic_vdev *mvdev,
+ void __user *argp)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+ int ret = 0, retry = 100, i;
+ struct mic_bootparam *bootparam = mvdev->mdev->dp;
+ s8 db = bootparam->h2c_config_db;
+
+ mutex_lock(&mvdev->mdev->mic_mutex);
+ for (i = 0; i < mvdev->dd->num_vq; i++)
+ mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
+
+ if (db == -1 || mvdev->dd->type == -1) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (copy_from_user(mic_vq_configspace(mvdev->dd),
+ argp, mvdev->dd->config_len)) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -EFAULT);
+ ret = -EFAULT;
+ goto exit;
+ }
+ mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
+ mvdev->mdev->ops->send_intr(mvdev->mdev, db);
+
+ for (i = retry; i--;) {
+ ret = wait_event_timeout(wake,
+ mvdev->dc->guest_ack, msecs_to_jiffies(100));
+ if (ret)
+ break;
+ }
+
+ dev_dbg(mic_dev(mvdev),
+ "%s %d retry: %d\n", __func__, __LINE__, retry);
+ mvdev->dc->config_change = 0;
+ mvdev->dc->guest_ack = 0;
+exit:
+ for (i = 0; i < mvdev->dd->num_vq; i++)
+ mutex_unlock(&mvdev->mvr[i].vr_mutex);
+ mutex_unlock(&mvdev->mdev->mic_mutex);
+ return ret;
+}
+
+static int mic_copy_dp_entry(struct mic_vdev *mvdev,
+ void __user *argp,
+ __u8 *type,
+ struct mic_device_desc **devpage)
+{
+ struct mic_device *mdev = mvdev->mdev;
+ struct mic_device_desc dd, *dd_config, *devp;
+ struct mic_vqconfig *vqconfig;
+ int ret = 0, i;
+ bool slot_found = false;
+
+ if (copy_from_user(&dd, argp, sizeof(dd))) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -EFAULT);
+ return -EFAULT;
+ }
+
+ if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
+ dd.num_vq > MIC_MAX_VRINGS) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -EINVAL);
+ return -EINVAL;
+ }
+
+ dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
+ if (dd_config == NULL) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -ENOMEM);
+ return -ENOMEM;
+ }
+ if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
+ ret = -EFAULT;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+
+ vqconfig = mic_vq_config(dd_config);
+ for (i = 0; i < dd.num_vq; i++) {
+ if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
+ ret = -EINVAL;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+ }
+
+ /* Find the first free device page entry */
+ for (i = mic_aligned_size(struct mic_bootparam);
+ i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
+ i += mic_total_desc_size(devp)) {
+ devp = mdev->dp + i;
+ if (devp->type == 0 || devp->type == -1) {
+ slot_found = true;
+ break;
+ }
+ }
+ if (!slot_found) {
+ ret = -EINVAL;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+ /*
+ * Save off the type before doing the memcpy. Type will be set in the
+ * end after completing all initialization for the new device.
+ */
+ *type = dd_config->type;
+ dd_config->type = 0;
+ memcpy(devp, dd_config, mic_desc_size(dd_config));
+
+ *devpage = devp;
+exit:
+ kfree(dd_config);
+ return ret;
+}
+
+static void mic_init_device_ctrl(struct mic_vdev *mvdev,
+ struct mic_device_desc *devpage)
+{
+ struct mic_device_ctrl *dc;
+
+ dc = (void *)devpage + mic_aligned_desc_size(devpage);
+
+ dc->config_change = 0;
+ dc->guest_ack = 0;
+ dc->vdev_reset = 0;
+ dc->host_ack = 0;
+ dc->used_address_updated = 0;
+ dc->c2h_vdev_db = -1;
+ dc->h2c_vdev_db = -1;
+ mvdev->dc = dc;
+}
+
+int mic_virtio_add_device(struct mic_vdev *mvdev,
+ void __user *argp)
+{
+ struct mic_device *mdev = mvdev->mdev;
+ struct mic_device_desc *dd = NULL;
+ struct mic_vqconfig *vqconfig;
+ int vr_size, i, j, ret;
+ u8 type = 0;
+ s8 db;
+ char irqname[10];
+ struct mic_bootparam *bootparam = mdev->dp;
+ u16 num;
+
+ mutex_lock(&mdev->mic_mutex);
+
+ ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
+ if (ret) {
+ mutex_unlock(&mdev->mic_mutex);
+ return ret;
+ }
+
+ mic_init_device_ctrl(mvdev, dd);
+
+ mvdev->dd = dd;
+ mvdev->virtio_id = type;
+ vqconfig = mic_vq_config(dd);
+ INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
+
+ for (i = 0; i < dd->num_vq; i++) {
+ struct mic_vringh *mvr = &mvdev->mvr[i];
+ struct mic_vring *vr = &mvdev->mvr[i].vring;
+ num = le16_to_cpu(vqconfig[i].num);
+ mutex_init(&mvr->vr_mutex);
+ vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
+ sizeof(struct _mic_vring_info));
+ vr->va = (void *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(vr_size));
+ if (!vr->va) {
+ ret = -ENOMEM;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vr->len = vr_size;
+ vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
+ vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i;
+ vqconfig[i].address = mic_map_single(mdev,
+ vr->va, vr_size);
+ if (mic_map_error(vqconfig[i].address)) {
+ free_pages((unsigned long)vr->va, get_order(vr_size));
+ ret = -ENOMEM;
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vqconfig[i].address = cpu_to_le64(vqconfig[i].address);
+
+ vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
+ ret = vringh_init_kern(&mvr->vrh,
+ *(u32 *)mic_vq_features(mvdev->dd), num, false,
+ vr->vr.desc, vr->vr.avail, vr->vr.used);
+ if (ret) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vringh_kiov_init(&mvr->riov, NULL, 0);
+ vringh_kiov_init(&mvr->wiov, NULL, 0);
+ mvr->head = USHRT_MAX;
+ mvr->mvdev = mvdev;
+ mvr->vrh.notify = mic_notify;
+ dev_dbg(mdev->sdev->parent,
+ "%s %d index %d va %p info %p vr_size 0x%x\n",
+ __func__, __LINE__, i, vr->va, vr->info, vr_size);
+ }
+
+ snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
+ mvdev->virtio_id);
+ mvdev->virtio_db = mic_next_db(mdev);
+ mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler,
+ irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB);
+ if (IS_ERR(mvdev->virtio_cookie)) {
+ ret = PTR_ERR(mvdev->virtio_cookie);
+ dev_dbg(mdev->sdev->parent, "request irq failed\n");
+ goto err;
+ }
+
+ mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
+
+ list_add_tail(&mvdev->list, &mdev->vdev_list);
+ /*
+ * Order the type update with previous stores. This write barrier
+ * is paired with the corresponding read barrier before the uncached
+ * system memory read of the type, on the card while scanning the
+ * device page.
+ */
+ smp_wmb();
+ dd->type = type;
+
+ dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type);
+
+ db = bootparam->h2c_config_db;
+ if (db != -1)
+ mdev->ops->send_intr(mdev, db);
+ mutex_unlock(&mdev->mic_mutex);
+ return 0;
+err:
+ vqconfig = mic_vq_config(dd);
+ for (j = 0; j < i; j++) {
+ struct mic_vringh *mvr = &mvdev->mvr[j];
+ mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
+ mvr->vring.len);
+ free_pages((unsigned long)mvr->vring.va,
+ get_order(mvr->vring.len));
+ }
+ mutex_unlock(&mdev->mic_mutex);
+ return ret;
+}
+
+void mic_virtio_del_device(struct mic_vdev *mvdev)
+{
+ struct list_head *pos, *tmp;
+ struct mic_vdev *tmp_mvdev;
+ struct mic_device *mdev = mvdev->mdev;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+ int i, ret, retry = 100;
+ struct mic_vqconfig *vqconfig;
+ struct mic_bootparam *bootparam = mdev->dp;
+ s8 db;
+
+ mutex_lock(&mdev->mic_mutex);
+ db = bootparam->h2c_config_db;
+ if (db == -1)
+ goto skip_hot_remove;
+ dev_dbg(mdev->sdev->parent,
+ "Requesting hot remove id %d\n", mvdev->virtio_id);
+ mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
+ mdev->ops->send_intr(mdev, db);
+ for (i = retry; i--;) {
+ ret = wait_event_timeout(wake,
+ mvdev->dc->guest_ack, msecs_to_jiffies(100));
+ if (ret)
+ break;
+ }
+ dev_dbg(mdev->sdev->parent,
+ "Device id %d config_change %d guest_ack %d\n",
+ mvdev->virtio_id, mvdev->dc->config_change,
+ mvdev->dc->guest_ack);
+ mvdev->dc->config_change = 0;
+ mvdev->dc->guest_ack = 0;
+skip_hot_remove:
+ mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
+ flush_work(&mvdev->virtio_bh_work);
+ vqconfig = mic_vq_config(mvdev->dd);
+ for (i = 0; i < mvdev->dd->num_vq; i++) {
+ struct mic_vringh *mvr = &mvdev->mvr[i];
+ vringh_kiov_cleanup(&mvr->riov);
+ vringh_kiov_cleanup(&mvr->wiov);
+ mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
+ mvr->vring.len);
+ free_pages((unsigned long)mvr->vring.va,
+ get_order(mvr->vring.len));
+ }
+
+ list_for_each_safe(pos, tmp, &mdev->vdev_list) {
+ tmp_mvdev = list_entry(pos, struct mic_vdev, list);
+ if (tmp_mvdev == mvdev) {
+ list_del(pos);
+ dev_dbg(mdev->sdev->parent,
+ "Removing virtio device id %d\n",
+ mvdev->virtio_id);
+ break;
+ }
+ }
+ /*
+ * Order the type update with previous stores. This write barrier
+ * is paired with the corresponding read barrier before the uncached
+ * system memory read of the type, on the card while scanning the
+ * device page.
+ */
+ smp_wmb();
+ mvdev->dd->type = -1;
+ mutex_unlock(&mdev->mic_mutex);
+}
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h
new file mode 100644
index 00000000000..184f3c84805
--- /dev/null
+++ b/drivers/misc/mic/host/mic_virtio.h
@@ -0,0 +1,138 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#ifndef MIC_VIRTIO_H
+#define MIC_VIRTIO_H
+
+#include <linux/virtio_config.h>
+#include <linux/mic_ioctl.h>
+
+/*
+ * Note on endianness.
+ * 1. Host can be both BE or LE
+ * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
+ * rings and ioreadXX/iowriteXX to access used ring.
+ * 3. Device page exposed by host to guest contains LE values. Guest
+ * accesses these using ioreadXX/iowriteXX etc. This way in general we
+ * obey the virtio spec according to which guest works with native
+ * endianness and host is aware of guest endianness and does all
+ * required endianness conversion.
+ * 4. Data provided from user space to guest (in ADD_DEVICE and
+ * CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
+ * in guest endianness.
+ */
+
+/**
+ * struct mic_vringh - Virtio ring host information.
+ *
+ * @vring: The MIC vring used for setting up user space mappings.
+ * @vrh: The host VRINGH used for accessing the card vrings.
+ * @riov: The VRINGH read kernel IOV.
+ * @wiov: The VRINGH write kernel IOV.
+ * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
+ * @vr_mutex: Mutex for synchronizing access to the VRING.
+ * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
+ */
+struct mic_vringh {
+ struct mic_vring vring;
+ struct vringh vrh;
+ struct vringh_kiov riov;
+ struct vringh_kiov wiov;
+ u16 head;
+ struct mutex vr_mutex;
+ struct mic_vdev *mvdev;
+};
+
+/**
+ * struct mic_vdev - Host information for a card Virtio device.
+ *
+ * @virtio_id - Virtio device id.
+ * @waitq - Waitqueue to allow ring3 apps to poll.
+ * @mdev - Back pointer to host MIC device.
+ * @poll_wake - Used for waking up threads blocked in poll.
+ * @out_bytes - Debug stats for number of bytes copied from host to card.
+ * @in_bytes - Debug stats for number of bytes copied from card to host.
+ * @mvr - Store per VRING data structures.
+ * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
+ * @dd - Virtio device descriptor.
+ * @dc - Virtio device control fields.
+ * @list - List of Virtio devices.
+ * @virtio_db - The doorbell used by the card to interrupt the host.
+ * @virtio_cookie - The cookie returned while requesting interrupts.
+ */
+struct mic_vdev {
+ int virtio_id;
+ wait_queue_head_t waitq;
+ struct mic_device *mdev;
+ int poll_wake;
+ unsigned long out_bytes;
+ unsigned long in_bytes;
+ struct mic_vringh mvr[MIC_MAX_VRINGS];
+ struct work_struct virtio_bh_work;
+ struct mic_device_desc *dd;
+ struct mic_device_ctrl *dc;
+ struct list_head list;
+ int virtio_db;
+ struct mic_irq *virtio_cookie;
+};
+
+void mic_virtio_uninit(struct mic_device *mdev);
+int mic_virtio_add_device(struct mic_vdev *mvdev,
+ void __user *argp);
+void mic_virtio_del_device(struct mic_vdev *mvdev);
+int mic_virtio_config_change(struct mic_vdev *mvdev,
+ void __user *argp);
+int mic_virtio_copy_desc(struct mic_vdev *mvdev,
+ struct mic_copy_desc *request);
+void mic_virtio_reset_devices(struct mic_device *mdev);
+void mic_bh_handler(struct work_struct *work);
+
+/* Helper API to obtain the MIC PCIe device */
+static inline struct device *mic_dev(struct mic_vdev *mvdev)
+{
+ return mvdev->mdev->sdev->parent;
+}
+
+/* Helper API to check if a virtio device is initialized */
+static inline int mic_vdev_inited(struct mic_vdev *mvdev)
+{
+ /* Device has not been created yet */
+ if (!mvdev->dd || !mvdev->dd->type) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -EINVAL);
+ return -EINVAL;
+ }
+
+ /* Device has been removed/deleted */
+ if (mvdev->dd->type == -1) {
+ dev_err(mic_dev(mvdev), "%s %d err %d\n",
+ __func__, __LINE__, -ENODEV);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Helper API to check if a virtio device is running */
+static inline bool mic_vdevup(struct mic_vdev *mvdev)
+{
+ return !!mvdev->dd->status;
+}
+#endif
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
new file mode 100644
index 00000000000..81e9541b784
--- /dev/null
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -0,0 +1,570 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "../common/mic_dev.h"
+#include "mic_device.h"
+#include "mic_x100.h"
+#include "mic_smpt.h"
+
+/**
+ * mic_x100_write_spad - write to the scratchpad register
+ * @mdev: pointer to mic_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register.
+ *
+ * RETURNS: none.
+ */
+static void
+mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val)
+{
+ dev_dbg(mdev->sdev->parent, "Writing 0x%x to scratch pad index %d\n",
+ val, idx);
+ mic_mmio_write(&mdev->mmio, val,
+ MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_SPAD0 + idx * 4);
+}
+
+/**
+ * mic_x100_read_spad - read from the scratchpad register
+ * @mdev: pointer to mic_device instance
+ * @idx: index to scratchpad register, 0 based
+ *
+ * This function allows reading of the 32bit scratchpad register.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static u32
+mic_x100_read_spad(struct mic_device *mdev, unsigned int idx)
+{
+ u32 val = mic_mmio_read(&mdev->mmio,
+ MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_SPAD0 + idx * 4);
+
+ dev_dbg(mdev->sdev->parent,
+ "Reading 0x%x from scratch pad index %d\n", val, idx);
+ return val;
+}
+
+/**
+ * mic_x100_enable_interrupts - Enable interrupts.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_enable_interrupts(struct mic_device *mdev)
+{
+ u32 reg;
+ struct mic_mw *mw = &mdev->mmio;
+ u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0;
+ u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0;
+
+ reg = mic_mmio_read(mw, sice0);
+ reg |= MIC_X100_SBOX_DBR_BITS(0xf) | MIC_X100_SBOX_DMA_BITS(0xff);
+ mic_mmio_write(mw, reg, sice0);
+
+ /*
+ * Enable auto-clear when enabling interrupts. Applicable only for
+ * MSI-x. Legacy and MSI mode cannot have auto-clear enabled.
+ */
+ if (mdev->irq_info.num_vectors > 1) {
+ reg = mic_mmio_read(mw, siac0);
+ reg |= MIC_X100_SBOX_DBR_BITS(0xf) |
+ MIC_X100_SBOX_DMA_BITS(0xff);
+ mic_mmio_write(mw, reg, siac0);
+ }
+}
+
+/**
+ * mic_x100_disable_interrupts - Disable interrupts.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_disable_interrupts(struct mic_device *mdev)
+{
+ u32 reg;
+ struct mic_mw *mw = &mdev->mmio;
+ u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0;
+ u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0;
+ u32 sicc0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICC0;
+
+ reg = mic_mmio_read(mw, sice0);
+ mic_mmio_write(mw, reg, sicc0);
+
+ if (mdev->irq_info.num_vectors > 1) {
+ reg = mic_mmio_read(mw, siac0);
+ reg &= ~(MIC_X100_SBOX_DBR_BITS(0xf) |
+ MIC_X100_SBOX_DMA_BITS(0xff));
+ mic_mmio_write(mw, reg, siac0);
+ }
+}
+
+/**
+ * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_send_sbox_intr(struct mic_device *mdev,
+ int doorbell)
+{
+ struct mic_mw *mw = &mdev->mmio;
+ u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
+ u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
+ apic_icr_offset);
+
+ /* for MIC we need to make sure we "hit" the send_icr bit (13) */
+ apicicr_low = (apicicr_low | (1 << 13));
+
+ /* Ensure that the interrupt is ordered w.r.t. previous stores. */
+ wmb();
+ mic_mmio_write(mw, apicicr_low,
+ MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
+}
+
+/**
+ * mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
+ int doorbell)
+{
+ int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
+ /* Ensure that the interrupt is ordered w.r.t. previous stores. */
+ wmb();
+ mic_mmio_write(&mdev->mmio, 0,
+ MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
+}
+
+/**
+ * __mic_x100_send_intr - Send interrupt to MIC.
+ * @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number.
+ */
+static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
+{
+ int rdmasr_db;
+ if (doorbell < MIC_X100_NUM_SBOX_IRQ) {
+ mic_x100_send_sbox_intr(mdev, doorbell);
+ } else {
+ rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ +
+ MIC_X100_RDMASR_IRQ_BASE;
+ mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
+ }
+}
+
+/**
+ * mic_ack_interrupt - Device specific interrupt handling.
+ * @mdev: pointer to mic_device instance
+ *
+ * Returns: bitmask of doorbell events triggered.
+ */
+static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
+{
+ u32 reg = 0;
+ struct mic_mw *mw = &mdev->mmio;
+ u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0;
+
+ /* Clear pending bit array. */
+ if (MIC_A0_STEP == mdev->stepping)
+ mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_MSIXPBACR);
+
+ if (mdev->irq_info.num_vectors <= 1) {
+ reg = mic_mmio_read(mw, sicr0);
+
+ if (unlikely(!reg))
+ goto done;
+
+ mic_mmio_write(mw, reg, sicr0);
+ }
+
+ if (mdev->stepping >= MIC_B0_STEP)
+ mdev->intr_ops->enable_interrupts(mdev);
+done:
+ return reg;
+}
+
+/**
+ * mic_x100_hw_intr_init - Initialize h/w specific interrupt
+ * information.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_hw_intr_init(struct mic_device *mdev)
+{
+ mdev->intr_info = (struct mic_intr_info *)mic_x100_intr_init;
+}
+
+/**
+ * mic_x100_read_msi_to_src_map - read from the MSI mapping registers
+ * @mdev: pointer to mic_device instance
+ * @idx: index to the mapping register, 0 based
+ *
+ * This function allows reading of the 32bit MSI mapping register.
+ *
+ * RETURNS: The value in the register.
+ */
+static u32
+mic_x100_read_msi_to_src_map(struct mic_device *mdev, int idx)
+{
+ return mic_mmio_read(&mdev->mmio,
+ MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_MXAR0 + idx * 4);
+}
+
+/**
+ * mic_x100_program_msi_to_src_map - program the MSI mapping registers
+ * @mdev: pointer to mic_device instance
+ * @idx: index to the mapping register, 0 based
+ * @offset: The bit offset in the register that needs to be updated.
+ * @set: boolean specifying if the bit in the specified offset needs
+ * to be set or cleared.
+ *
+ * RETURNS: None.
+ */
+static void
+mic_x100_program_msi_to_src_map(struct mic_device *mdev,
+ int idx, int offset, bool set)
+{
+ unsigned long reg;
+ struct mic_mw *mw = &mdev->mmio;
+ u32 mxar = MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_MXAR0 + idx * 4;
+
+ reg = mic_mmio_read(mw, mxar);
+ if (set)
+ __set_bit(offset, &reg);
+ else
+ __clear_bit(offset, &reg);
+ mic_mmio_write(mw, reg, mxar);
+}
+
+/*
+ * mic_x100_reset_fw_ready - Reset Firmware ready status field.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_reset_fw_ready(struct mic_device *mdev)
+{
+ mdev->ops->write_spad(mdev, MIC_X100_DOWNLOAD_INFO, 0);
+}
+
+/*
+ * mic_x100_is_fw_ready - Check if firmware is ready.
+ * @mdev: pointer to mic_device instance
+ */
+static bool mic_x100_is_fw_ready(struct mic_device *mdev)
+{
+ u32 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
+ return MIC_X100_SPAD2_DOWNLOAD_STATUS(scratch2) ? true : false;
+}
+
+/**
+ * mic_x100_get_apic_id - Get bootstrap APIC ID.
+ * @mdev: pointer to mic_device instance
+ */
+static u32 mic_x100_get_apic_id(struct mic_device *mdev)
+{
+ u32 scratch2 = 0;
+
+ scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
+ return MIC_X100_SPAD2_APIC_ID(scratch2);
+}
+
+/**
+ * mic_x100_send_firmware_intr - Send an interrupt to the firmware on MIC.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_send_firmware_intr(struct mic_device *mdev)
+{
+ u32 apicicr_low;
+ u64 apic_icr_offset = MIC_X100_SBOX_APICICR7;
+ int vector = MIC_X100_BSP_INTERRUPT_VECTOR;
+ struct mic_mw *mw = &mdev->mmio;
+
+ /*
+ * For MIC we need to make sure we "hit"
+ * the send_icr bit (13).
+ */
+ apicicr_low = (vector | (1 << 13));
+
+ mic_mmio_write(mw, mic_x100_get_apic_id(mdev),
+ MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset + 4);
+
+ /* Ensure that the interrupt is ordered w.r.t. previous stores. */
+ wmb();
+ mic_mmio_write(mw, apicicr_low,
+ MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
+}
+
+/**
+ * mic_x100_hw_reset - Reset the MIC device.
+ * @mdev: pointer to mic_device instance
+ */
+static void mic_x100_hw_reset(struct mic_device *mdev)
+{
+ u32 reset_reg;
+ u32 rgcr = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_RGCR;
+ struct mic_mw *mw = &mdev->mmio;
+
+ /* Ensure that the reset is ordered w.r.t. previous loads and stores */
+ mb();
+ /* Trigger reset */
+ reset_reg = mic_mmio_read(mw, rgcr);
+ reset_reg |= 0x1;
+ mic_mmio_write(mw, reset_reg, rgcr);
+ /*
+ * It seems we really want to delay at least 1 second
+ * after touching reset to prevent a lot of problems.
+ */
+ msleep(1000);
+}
+
+/**
+ * mic_x100_load_command_line - Load command line to MIC.
+ * @mdev: pointer to mic_device instance
+ * @fw: the firmware image
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int
+mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
+{
+ u32 len = 0;
+ u32 boot_mem;
+ char *buf;
+ void __iomem *cmd_line_va = mdev->aper.va + mdev->bootaddr + fw->size;
+#define CMDLINE_SIZE 2048
+
+ boot_mem = mdev->aper.len >> 20;
+ buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(mdev->sdev->parent,
+ "%s %d allocation failed\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ len += snprintf(buf, CMDLINE_SIZE - len,
+ " mem=%dM", boot_mem);
+ if (mdev->cmdline)
+ snprintf(buf + len, CMDLINE_SIZE - len, " %s", mdev->cmdline);
+ memcpy_toio(cmd_line_va, buf, strlen(buf) + 1);
+ kfree(buf);
+ return 0;
+}
+
+/**
+ * mic_x100_load_ramdisk - Load ramdisk to MIC.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int
+mic_x100_load_ramdisk(struct mic_device *mdev)
+{
+ const struct firmware *fw;
+ int rc;
+ struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr;
+
+ rc = request_firmware(&fw,
+ mdev->ramdisk, mdev->sdev->parent);
+ if (rc < 0) {
+ dev_err(mdev->sdev->parent,
+ "ramdisk request_firmware failed: %d %s\n",
+ rc, mdev->ramdisk);
+ goto error;
+ }
+ /*
+ * Typically the bootaddr for card OS is 64M
+ * so copy over the ramdisk @ 128M.
+ */
+ memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
+ iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image);
+ iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size);
+ release_firmware(fw);
+error:
+ return rc;
+}
+
+/**
+ * mic_x100_get_boot_addr - Get MIC boot address.
+ * @mdev: pointer to mic_device instance
+ *
+ * This function is called during firmware load to determine
+ * the address at which the OS should be downloaded in card
+ * memory i.e. GDDR.
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int
+mic_x100_get_boot_addr(struct mic_device *mdev)
+{
+ u32 scratch2, boot_addr;
+ int rc = 0;
+
+ scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
+ boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2);
+ dev_dbg(mdev->sdev->parent, "%s %d boot_addr 0x%x\n",
+ __func__, __LINE__, boot_addr);
+ if (boot_addr > (1 << 31)) {
+ dev_err(mdev->sdev->parent,
+ "incorrect bootaddr 0x%x\n",
+ boot_addr);
+ rc = -EINVAL;
+ goto error;
+ }
+ mdev->bootaddr = boot_addr;
+error:
+ return rc;
+}
+
+/**
+ * mic_x100_load_firmware - Load firmware to MIC.
+ * @mdev: pointer to mic_device instance
+ * @buf: buffer containing boot string including firmware/ramdisk path.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int
+mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
+{
+ int rc;
+ const struct firmware *fw;
+
+ rc = mic_x100_get_boot_addr(mdev);
+ if (rc)
+ goto error;
+ /* load OS */
+ rc = request_firmware(&fw, mdev->firmware, mdev->sdev->parent);
+ if (rc < 0) {
+ dev_err(mdev->sdev->parent,
+ "ramdisk request_firmware failed: %d %s\n",
+ rc, mdev->firmware);
+ goto error;
+ }
+ if (mdev->bootaddr > mdev->aper.len - fw->size) {
+ rc = -EINVAL;
+ dev_err(mdev->sdev->parent, "%s %d rc %d bootaddr 0x%x\n",
+ __func__, __LINE__, rc, mdev->bootaddr);
+ release_firmware(fw);
+ goto error;
+ }
+ memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
+ mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
+ if (!strcmp(mdev->bootmode, "elf"))
+ goto done;
+ /* load command line */
+ rc = mic_x100_load_command_line(mdev, fw);
+ if (rc) {
+ dev_err(mdev->sdev->parent, "%s %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
+ release_firmware(fw);
+ /* load ramdisk */
+ if (mdev->ramdisk)
+ rc = mic_x100_load_ramdisk(mdev);
+error:
+ dev_dbg(mdev->sdev->parent, "%s %d rc %d\n", __func__, __LINE__, rc);
+done:
+ return rc;
+}
+
+/**
+ * mic_x100_get_postcode - Get postcode status from firmware.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: postcode.
+ */
+static u32 mic_x100_get_postcode(struct mic_device *mdev)
+{
+ return mic_mmio_read(&mdev->mmio, MIC_X100_POSTCODE);
+}
+
+/**
+ * mic_x100_smpt_set - Update an SMPT entry with a DMA address.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: none.
+ */
+static void
+mic_x100_smpt_set(struct mic_device *mdev, dma_addr_t dma_addr, u8 index)
+{
+#define SNOOP_ON (0 << 0)
+#define SNOOP_OFF (1 << 0)
+/*
+ * Sbox Smpt Reg Bits:
+ * Bits 31:2 Host address
+ * Bits 1 RSVD
+ * Bits 0 No snoop
+ */
+#define BUILD_SMPT(NO_SNOOP, HOST_ADDR) \
+ (u32)(((HOST_ADDR) << 2) | ((NO_SNOOP) & 0x01))
+
+ uint32_t smpt_reg_val = BUILD_SMPT(SNOOP_ON,
+ dma_addr >> mdev->smpt->info.page_shift);
+ mic_mmio_write(&mdev->mmio, smpt_reg_val,
+ MIC_X100_SBOX_BASE_ADDRESS +
+ MIC_X100_SBOX_SMPT00 + (4 * index));
+}
+
+/**
+ * mic_x100_smpt_hw_init - Initialize SMPT X100 specific fields.
+ * @mdev: pointer to mic_device instance
+ *
+ * RETURNS: none.
+ */
+static void mic_x100_smpt_hw_init(struct mic_device *mdev)
+{
+ struct mic_smpt_hw_info *info = &mdev->smpt->info;
+
+ info->num_reg = 32;
+ info->page_shift = 34;
+ info->page_size = (1ULL << info->page_shift);
+ info->base = 0x8000000000ULL;
+}
+
+struct mic_smpt_ops mic_x100_smpt_ops = {
+ .init = mic_x100_smpt_hw_init,
+ .set = mic_x100_smpt_set,
+};
+
+struct mic_hw_ops mic_x100_ops = {
+ .aper_bar = MIC_X100_APER_BAR,
+ .mmio_bar = MIC_X100_MMIO_BAR,
+ .read_spad = mic_x100_read_spad,
+ .write_spad = mic_x100_write_spad,
+ .send_intr = mic_x100_send_intr,
+ .ack_interrupt = mic_x100_ack_interrupt,
+ .reset = mic_x100_hw_reset,
+ .reset_fw_ready = mic_x100_reset_fw_ready,
+ .is_fw_ready = mic_x100_is_fw_ready,
+ .send_firmware_intr = mic_x100_send_firmware_intr,
+ .load_mic_fw = mic_x100_load_firmware,
+ .get_postcode = mic_x100_get_postcode,
+};
+
+struct mic_hw_intr_ops mic_x100_intr_ops = {
+ .intr_init = mic_x100_hw_intr_init,
+ .enable_interrupts = mic_x100_enable_interrupts,
+ .disable_interrupts = mic_x100_disable_interrupts,
+ .program_msi_to_src_map = mic_x100_program_msi_to_src_map,
+ .read_msi_to_src_map = mic_x100_read_msi_to_src_map,
+};
diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h
new file mode 100644
index 00000000000..8b7daa182e5
--- /dev/null
+++ b/drivers/misc/mic/host/mic_x100.h
@@ -0,0 +1,98 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Host driver.
+ *
+ */
+#ifndef _MIC_X100_HW_H_
+#define _MIC_X100_HW_H_
+
+#define MIC_X100_PCI_DEVICE_2250 0x2250
+#define MIC_X100_PCI_DEVICE_2251 0x2251
+#define MIC_X100_PCI_DEVICE_2252 0x2252
+#define MIC_X100_PCI_DEVICE_2253 0x2253
+#define MIC_X100_PCI_DEVICE_2254 0x2254
+#define MIC_X100_PCI_DEVICE_2255 0x2255
+#define MIC_X100_PCI_DEVICE_2256 0x2256
+#define MIC_X100_PCI_DEVICE_2257 0x2257
+#define MIC_X100_PCI_DEVICE_2258 0x2258
+#define MIC_X100_PCI_DEVICE_2259 0x2259
+#define MIC_X100_PCI_DEVICE_225a 0x225a
+#define MIC_X100_PCI_DEVICE_225b 0x225b
+#define MIC_X100_PCI_DEVICE_225c 0x225c
+#define MIC_X100_PCI_DEVICE_225d 0x225d
+#define MIC_X100_PCI_DEVICE_225e 0x225e
+
+#define MIC_X100_APER_BAR 0
+#define MIC_X100_MMIO_BAR 4
+
+#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000
+#define MIC_X100_SBOX_SPAD0 0x0000AB20
+#define MIC_X100_SBOX_SICR0_DBR(x) ((x) & 0xf)
+#define MIC_X100_SBOX_SICR0_DMA(x) (((x) >> 8) & 0xff)
+#define MIC_X100_SBOX_SICE0_DBR(x) ((x) & 0xf)
+#define MIC_X100_SBOX_DBR_BITS(x) ((x) & 0xf)
+#define MIC_X100_SBOX_SICE0_DMA(x) (((x) >> 8) & 0xff)
+#define MIC_X100_SBOX_DMA_BITS(x) (((x) & 0xff) << 8)
+
+#define MIC_X100_SBOX_APICICR0 0x0000A9D0
+#define MIC_X100_SBOX_SICR0 0x00009004
+#define MIC_X100_SBOX_SICE0 0x0000900C
+#define MIC_X100_SBOX_SICC0 0x00009010
+#define MIC_X100_SBOX_SIAC0 0x00009014
+#define MIC_X100_SBOX_MSIXPBACR 0x00009084
+#define MIC_X100_SBOX_MXAR0 0x00009044
+#define MIC_X100_SBOX_SMPT00 0x00003100
+#define MIC_X100_SBOX_RDMASR0 0x0000B180
+
+#define MIC_X100_DOORBELL_IDX_START 0
+#define MIC_X100_NUM_DOORBELL 4
+#define MIC_X100_DMA_IDX_START 8
+#define MIC_X100_NUM_DMA 8
+#define MIC_X100_ERR_IDX_START 30
+#define MIC_X100_NUM_ERR 1
+
+#define MIC_X100_NUM_SBOX_IRQ 8
+#define MIC_X100_NUM_RDMASR_IRQ 8
+#define MIC_X100_RDMASR_IRQ_BASE 17
+#define MIC_X100_SPAD2_DOWNLOAD_STATUS(x) ((x) & 0x1)
+#define MIC_X100_SPAD2_APIC_ID(x) (((x) >> 1) & 0x1ff)
+#define MIC_X100_SPAD2_DOWNLOAD_ADDR(x) ((x) & 0xfffff000)
+#define MIC_X100_SBOX_APICICR7 0x0000AA08
+#define MIC_X100_SBOX_RGCR 0x00004010
+#define MIC_X100_SBOX_SDBIC0 0x0000CC90
+#define MIC_X100_DOWNLOAD_INFO 2
+#define MIC_X100_FW_SIZE 5
+#define MIC_X100_POSTCODE 0x242c
+
+static const u16 mic_x100_intr_init[] = {
+ MIC_X100_DOORBELL_IDX_START,
+ MIC_X100_DMA_IDX_START,
+ MIC_X100_ERR_IDX_START,
+ MIC_X100_NUM_DOORBELL,
+ MIC_X100_NUM_DMA,
+ MIC_X100_NUM_ERR,
+};
+
+/* Host->Card(bootstrap) Interrupt Vector */
+#define MIC_X100_BSP_INTERRUPT_VECTOR 229
+
+extern struct mic_hw_ops mic_x100_ops;
+extern struct mic_smpt_ops mic_x100_smpt_ops;
+extern struct mic_hw_intr_ops mic_x100_intr_ops;
+
+#endif
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 68b7c773d2c..30754927fd8 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -395,7 +395,7 @@ static int phantom_probe(struct pci_dev *pdev,
iowrite32(0, pht->caddr + PHN_IRQCTL);
ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
retval = request_irq(pdev->irq, phantom_isr,
- IRQF_SHARED | IRQF_DISABLED, "phantom", pht);
+ IRQF_SHARED, "phantom", pht);
if (retval) {
dev_err(&pdev->dev, "can't establish ISR\n");
goto err_unmo;
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index f84ff0c0603..eda38cbe853 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -892,7 +892,6 @@ static void pti_pci_remove(struct pci_dev *pdev)
}
iounmap(drv_data->pti_ioaddr);
- pci_set_drvdata(pdev, NULL);
kfree(drv_data);
pci_release_region(pdev, 1);
pci_disable_device(pdev);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 83907c72059..96853a09788 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -218,7 +218,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
pr_debug("%s", __func__);
- INIT_COMPLETION(kim_gdata->kim_rcvd);
+ reinit_completion(&kim_gdata->kim_rcvd);
if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
pr_err("kim: couldn't write 4 bytes");
return -EIO;
@@ -229,7 +229,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
pr_err(" waiting for ver info- timed out ");
return -ETIMEDOUT;
}
- INIT_COMPLETION(kim_gdata->kim_rcvd);
+ reinit_completion(&kim_gdata->kim_rcvd);
/* the positions 12 & 13 in the response buffer provide with the
* chip, major & minor numbers
*/
@@ -362,7 +362,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
/* reinit completion before sending for the
* relevant wait
*/
- INIT_COMPLETION(kim_gdata->kim_rcvd);
+ reinit_completion(&kim_gdata->kim_rcvd);
/*
* Free space found in uart buffer, call st_int_write
@@ -398,7 +398,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
- INIT_COMPLETION(kim_gdata->kim_rcvd);
+ reinit_completion(&kim_gdata->kim_rcvd);
break;
case ACTION_DELAY: /* sleep */
pr_info("sleep command in scr");
@@ -474,7 +474,7 @@ long st_kim_start(void *kim_data)
gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(100);
/* re-initialize the completion */
- INIT_COMPLETION(kim_gdata->ldisc_installed);
+ reinit_completion(&kim_gdata->ldisc_installed);
/* send notification to UIM */
kim_gdata->ldisc_install = 1;
pr_info("ldisc_install = 1");
@@ -525,7 +525,7 @@ long st_kim_stop(void *kim_data)
kim_gdata->kim_pdev->dev.platform_data;
struct tty_struct *tty = kim_gdata->core_data->tty;
- INIT_COMPLETION(kim_gdata->ldisc_installed);
+ reinit_completion(&kim_gdata->ldisc_installed);
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
index 9b237221bc4..83da711ce9f 100644
--- a/drivers/misc/ti_dac7512.c
+++ b/drivers/misc/ti_dac7512.c
@@ -22,9 +22,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
-
-#define DAC7512_DRV_NAME "dac7512"
-#define DRIVER_VERSION "1.0"
+#include <linux/of.h>
static ssize_t dac7512_store_val(struct device *dev,
struct device_attribute *attr,
@@ -75,13 +73,29 @@ static int dac7512_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id dac7512_id_table[] = {
+ { "dac7512", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, dac7512_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id dac7512_of_match[] = {
+ { .compatible = "ti,dac7512", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dac7512_of_match);
+#endif
+
static struct spi_driver dac7512_driver = {
.driver = {
- .name = DAC7512_DRV_NAME,
+ .name = "dac7512",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dac7512_of_match),
},
.probe = dac7512_probe,
.remove = dac7512_remove,
+ .id_table = dac7512_id_table,
};
module_spi_driver(dac7512_driver);
@@ -89,4 +103,3 @@ module_spi_driver(dac7512_driver);
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
MODULE_DESCRIPTION("DAC7512 16-bit DAC");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index f8d6654391e..a606c8901e1 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -356,8 +356,10 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
pci_set_drvdata(dev, fm);
fm->addr = pci_ioremap_bar(dev, 0);
- if (!fm->addr)
+ if (!fm->addr) {
+ rc = -ENODEV;
goto err_out_free;
+ }
rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm);
if (rc)
@@ -378,7 +380,6 @@ err_out_irq:
err_out_unmap:
iounmap(fm->addr);
err_out_free:
- pci_set_drvdata(dev, NULL);
tifm_free_adapter(fm);
err_out_int:
pci_intx(dev, 0);
@@ -405,8 +406,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
for (cnt = 0; cnt < fm->num_sockets; cnt++)
tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
- pci_set_drvdata(dev, NULL);
-
iounmap(fm->addr);
pci_intx(dev, 0);
pci_release_regions(dev);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 0ab7c922212..a511b2a713b 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -145,15 +145,17 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
return sprintf(buf, "%x", sock->type);
}
+static DEVICE_ATTR_RO(type);
-static struct device_attribute tifm_dev_attrs[] = {
- __ATTR(type, S_IRUGO, type_show, NULL),
- __ATTR_NULL
+static struct attribute *tifm_dev_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(tifm_dev);
static struct bus_type tifm_bus_type = {
.name = "tifm",
- .dev_attrs = tifm_dev_attrs,
+ .dev_groups = tifm_dev_groups,
.match = tifm_bus_match,
.uevent = tifm_uevent,
.probe = tifm_device_probe,
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index b3a2b763ecf..c98b03b9935 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -649,7 +649,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
return 0;
err_free_irq:
- free_irq(vmci_dev->irq, &vmci_dev);
+ free_irq(vmci_dev->irq, vmci_dev);
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index d4722b3dc8e..1723a6e4f2e 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -243,11 +243,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
- down_read(&current->mm->mmap_sem);
- retval = get_user_pages(current, current->mm,
- PAGE_ALIGN(uva),
- 1, 1, 0, &page, NULL);
- up_read(&current->mm->mmap_sem);
+ retval = get_user_pages_fast(PAGE_ALIGN(uva), 1, 1, &page);
if (retval != 1)
return VMCI_ERROR_GENERIC;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index a0515a6d6eb..1b7b303085d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -732,13 +732,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
int retval;
int err = VMCI_SUCCESS;
- down_write(&current->mm->mmap_sem);
- retval = get_user_pages(current,
- current->mm,
- (uintptr_t) produce_uva,
- produce_q->kernel_if->num_pages,
- 1, 0,
- produce_q->kernel_if->u.h.header_page, NULL);
+ retval = get_user_pages_fast((uintptr_t) produce_uva,
+ produce_q->kernel_if->num_pages, 1,
+ produce_q->kernel_if->u.h.header_page);
if (retval < produce_q->kernel_if->num_pages) {
pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -747,12 +743,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
goto out;
}
- retval = get_user_pages(current,
- current->mm,
- (uintptr_t) consume_uva,
- consume_q->kernel_if->num_pages,
- 1, 0,
- consume_q->kernel_if->u.h.header_page, NULL);
+ retval = get_user_pages_fast((uintptr_t) consume_uva,
+ consume_q->kernel_if->num_pages, 1,
+ consume_q->kernel_if->u.h.header_page);
if (retval < consume_q->kernel_if->num_pages) {
pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
@@ -763,8 +756,6 @@ static int qp_host_get_user_memory(u64 produce_uva,
}
out:
- up_write(&current->mm->mmap_sem);
-
return err;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1a3163f1407..29d5d988a51 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2448,7 +2448,6 @@ static int _mmc_blk_suspend(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
- pm_runtime_get_sync(&card->dev);
mmc_queue_suspend(&md->queue);
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_suspend(&part_md->queue);
@@ -2483,7 +2482,6 @@ static int mmc_blk_resume(struct mmc_card *card)
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_resume(&part_md->queue);
}
- pm_runtime_put(&card->dev);
}
return 0;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fa9632eb63f..357bbc54fe4 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = *mmc_dev(host)->dma_mask;
+ limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 704bf66f587..64145a32b91 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -27,7 +27,7 @@
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
-static ssize_t mmc_type_show(struct device *dev,
+static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mmc_card *card = mmc_dev_to_card(dev);
@@ -45,11 +45,13 @@ static ssize_t mmc_type_show(struct device *dev,
return -EFAULT;
}
}
+static DEVICE_ATTR_RO(type);
-static struct device_attribute mmc_dev_attrs[] = {
- __ATTR(type, S_IRUGO, mmc_type_show, NULL),
- __ATTR_NULL,
+static struct attribute *mmc_dev_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mmc_dev);
/*
* This currently matches any MMC driver to any MMC card - drivers
@@ -218,7 +220,7 @@ static const struct dev_pm_ops mmc_bus_pm_ops = {
static struct bus_type mmc_bus_type = {
.name = "mmc",
- .dev_attrs = mmc_dev_attrs,
+ .dev_groups = mmc_dev_groups,
.match = mmc_bus_match,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
@@ -340,7 +342,7 @@ int mmc_add_card(struct mmc_card *card)
break;
}
- if (mmc_sd_card_uhs(card) &&
+ if (mmc_card_uhs(card) &&
(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index bf18b6bfce4..57a2b403bf8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,7 @@
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
@@ -301,7 +302,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
}
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
+ EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@@ -918,31 +919,6 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
EXPORT_SYMBOL(__mmc_claim_host);
/**
- * mmc_try_claim_host - try exclusively to claim a host
- * @host: mmc host to claim
- *
- * Returns %1 if the host is claimed, %0 otherwise.
- */
-int mmc_try_claim_host(struct mmc_host *host)
-{
- int claimed_host = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- if (!host->claimed || host->claimer == current) {
- host->claimed = 1;
- host->claimer = current;
- host->claim_cnt += 1;
- claimed_host = 1;
- }
- spin_unlock_irqrestore(&host->lock, flags);
- if (host->ops->enable && claimed_host && host->claim_cnt == 1)
- host->ops->enable(host);
- return claimed_host;
-}
-EXPORT_SYMBOL(mmc_try_claim_host);
-
-/**
* mmc_release_host - release a host
* @host: mmc host to release
*
@@ -1382,22 +1358,31 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{
int bit;
- ocr &= host->ocr_avail;
+ /*
+ * Sanity check the voltages that the card claims to
+ * support.
+ */
+ if (ocr & 0x7F) {
+ dev_warn(mmc_dev(host),
+ "card claims to support voltages below defined range\n");
+ ocr &= ~0x7F;
+ }
- bit = ffs(ocr);
- if (bit) {
- bit -= 1;
+ ocr &= host->ocr_avail;
+ if (!ocr) {
+ dev_warn(mmc_dev(host), "no support for card's volts\n");
+ return 0;
+ }
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
+ bit = ffs(ocr) - 1;
ocr &= 3 << bit;
-
- mmc_host_clk_hold(host);
- host->ios.vdd = bit;
- mmc_set_ios(host);
- mmc_host_clk_release(host);
+ mmc_power_cycle(host, ocr);
} else {
- pr_warning("%s: host doesn't support card's voltages\n",
- mmc_hostname(host));
- ocr = 0;
+ bit = fls(ocr) - 1;
+ ocr &= 3 << bit;
+ if (bit != host->ios.vdd)
+ dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
return ocr;
@@ -1422,7 +1407,7 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
{
struct mmc_command cmd = {0};
int err = 0;
@@ -1504,7 +1489,7 @@ power_cycle:
if (err) {
pr_debug("%s: Signal voltage switch failed, "
"power cycling card\n", mmc_hostname(host));
- mmc_power_cycle(host);
+ mmc_power_cycle(host, ocr);
}
mmc_host_clk_release(host);
@@ -1545,22 +1530,14 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
* If a host does all the power sequencing itself, ignore the
* initial MMC_POWER_UP stage.
*/
-void mmc_power_up(struct mmc_host *host)
+void mmc_power_up(struct mmc_host *host, u32 ocr)
{
- int bit;
-
if (host->ios.power_mode == MMC_POWER_ON)
return;
mmc_host_clk_hold(host);
- /* If ocr is set, we use it */
- if (host->ocr)
- bit = ffs(host->ocr) - 1;
- else
- bit = fls(host->ocr_avail) - 1;
-
- host->ios.vdd = bit;
+ host->ios.vdd = fls(ocr) - 1;
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else
@@ -1604,13 +1581,6 @@ void mmc_power_off(struct mmc_host *host)
host->ios.clock = 0;
host->ios.vdd = 0;
-
- /*
- * Reset ocr mask to be the highest possible voltage supported for
- * this mmc host. This value will be used at next power up.
- */
- host->ocr = 1 << (fls(host->ocr_avail) - 1);
-
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1630,12 +1600,12 @@ void mmc_power_off(struct mmc_host *host)
mmc_host_clk_release(host);
}
-void mmc_power_cycle(struct mmc_host *host)
+void mmc_power_cycle(struct mmc_host *host, u32 ocr)
{
mmc_power_off(host);
/* Wait at least 1 ms according to SD spec */
mmc_delay(1);
- mmc_power_up(host);
+ mmc_power_up(host, ocr);
}
/*
@@ -1723,6 +1693,28 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
+static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq)
+{
+#ifdef CONFIG_MMC_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&host->lock, flags);
+ WARN_ON(host->removed);
+ spin_unlock_irqrestore(&host->lock, flags);
+#endif
+
+ /*
+ * If the device is configured as wakeup, we prevent a new sleep for
+ * 5 s to give provision for user space to consume the event.
+ */
+ if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
+ device_can_wakeup(mmc_dev(host)))
+ pm_wakeup_event(mmc_dev(host), 5000);
+
+ host->detect_change = 1;
+ mmc_schedule_delayed_work(&host->detect, delay);
+}
+
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
@@ -1735,16 +1727,8 @@ void mmc_detach_bus(struct mmc_host *host)
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
-#ifdef CONFIG_MMC_DEBUG
- unsigned long flags;
- spin_lock_irqsave(&host->lock, flags);
- WARN_ON(host->removed);
- spin_unlock_irqrestore(&host->lock, flags);
-#endif
- host->detect_change = 1;
- mmc_schedule_delayed_work(&host->detect, delay);
+ _mmc_detect_change(host, delay, true);
}
-
EXPORT_SYMBOL(mmc_detect_change);
void mmc_init_erase(struct mmc_card *card)
@@ -2334,7 +2318,7 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
- mmc_power_up(host);
+ mmc_power_up(host, host->ocr_avail);
/*
* Some eMMCs (with VCCQ always on) may not be reset after power up, so
@@ -2423,7 +2407,7 @@ int mmc_detect_card_removed(struct mmc_host *host)
* rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
- mmc_detect_change(host, 0);
+ _mmc_detect_change(host, 0, false);
}
}
@@ -2504,8 +2488,8 @@ void mmc_start_host(struct mmc_host *host)
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
- mmc_power_up(host);
- mmc_detect_change(host, 0);
+ mmc_power_up(host, host->ocr_avail);
+ _mmc_detect_change(host, 0, false);
}
void mmc_stop_host(struct mmc_host *host)
@@ -2583,7 +2567,7 @@ int mmc_power_restore_host(struct mmc_host *host)
return -EINVAL;
}
- mmc_power_up(host);
+ mmc_power_up(host, host->card->ocr);
ret = host->bus_ops->power_restore(host);
mmc_bus_put(host);
@@ -2657,28 +2641,6 @@ EXPORT_SYMBOL(mmc_cache_ctrl);
#ifdef CONFIG_PM
-/**
- * mmc_suspend_host - suspend a host
- * @host: mmc host
- */
-int mmc_suspend_host(struct mmc_host *host)
-{
- /* This function is deprecated */
- return 0;
-}
-EXPORT_SYMBOL(mmc_suspend_host);
-
-/**
- * mmc_resume_host - resume a previously suspended host
- * @host: mmc host
- */
-int mmc_resume_host(struct mmc_host *host)
-{
- /* This function is deprecated */
- return 0;
-}
-EXPORT_SYMBOL(mmc_resume_host);
-
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
@@ -2724,7 +2686,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
- mmc_detect_change(host, 0);
+ _mmc_detect_change(host, 0, false);
}
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 5345d156493..443a584660f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -42,13 +42,13 @@ void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr);
int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
-void mmc_power_up(struct mmc_host *host);
+void mmc_power_up(struct mmc_host *host, u32 ocr);
void mmc_power_off(struct mmc_host *host);
-void mmc_power_cycle(struct mmc_host *host);
+void mmc_power_cycle(struct mmc_host *host, u32 ocr);
static inline void mmc_delay(unsigned int ms)
{
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6d02012a1d0..f631f5a9bf7 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -934,6 +935,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto err;
}
+ card->ocr = ocr;
card->type = MMC_TYPE_MMC;
card->rca = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
@@ -1404,9 +1406,9 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
if (notify_type == EXT_CSD_POWER_OFF_LONG)
timeout = card->ext_csd.power_off_longtime;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout, true, false);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
@@ -1477,6 +1479,9 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
mmc_claim_host(host);
+ if (mmc_card_suspended(host->card))
+ goto out;
+
if (mmc_card_doing_bkops(host->card)) {
err = mmc_stop_bkops(host->card);
if (err)
@@ -1496,51 +1501,93 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
- if (!err)
+ if (!err) {
mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
out:
mmc_release_host(host);
return err;
}
/*
- * Suspend callback from host.
+ * Suspend callback
*/
static int mmc_suspend(struct mmc_host *host)
{
- return _mmc_suspend(host, true);
-}
+ int err;
-/*
- * Shutdown callback
- */
-static int mmc_shutdown(struct mmc_host *host)
-{
- return _mmc_suspend(host, false);
+ err = _mmc_suspend(host, true);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
}
/*
- * Resume callback from host.
- *
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
-static int mmc_resume(struct mmc_host *host)
+static int _mmc_resume(struct mmc_host *host)
{
- int err;
+ int err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
- err = mmc_init_card(host, host->ocr, host->card);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Shutdown callback
+ */
+static int mmc_shutdown(struct mmc_host *host)
+{
+ int err = 0;
+
+ /*
+ * In a specific case for poweroff notify, we need to resume the card
+ * before we can shutdown it properly.
+ */
+ if (mmc_can_poweroff_notify(host->card) &&
+ !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
+ err = _mmc_resume(host);
+
+ if (!err)
+ err = _mmc_suspend(host, false);
return err;
}
+/*
+ * Callback for resume.
+ */
+static int mmc_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
+ err = _mmc_resume(host);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_mark_last_busy(&host->card->dev);
+ }
+ pm_runtime_enable(&host->card->dev);
+
+ return err;
+}
/*
* Callback for runtime_suspend.
@@ -1552,18 +1599,11 @@ static int mmc_runtime_suspend(struct mmc_host *host)
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
- mmc_claim_host(host);
-
- err = mmc_suspend(host);
- if (err) {
+ err = _mmc_suspend(host, true);
+ if (err)
pr_err("%s: error %d doing aggessive suspend\n",
mmc_hostname(host), err);
- goto out;
- }
- mmc_power_off(host);
-out:
- mmc_release_host(host);
return err;
}
@@ -1574,18 +1614,14 @@ static int mmc_runtime_resume(struct mmc_host *host)
{
int err;
- if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
- mmc_claim_host(host);
-
- mmc_power_up(host);
- err = mmc_resume(host);
+ err = _mmc_resume(host);
if (err)
pr_err("%s: error %d doing aggessive resume\n",
mmc_hostname(host), err);
- mmc_release_host(host);
return 0;
}
@@ -1595,7 +1631,7 @@ static int mmc_power_restore(struct mmc_host *host)
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_claim_host(host);
- ret = mmc_init_card(host, host->ocr, host->card);
+ ret = mmc_init_card(host, host->card->ocr, host->card);
mmc_release_host(host);
return ret;
@@ -1640,7 +1676,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
int mmc_attach_mmc(struct mmc_host *host)
{
int err;
- u32 ocr;
+ u32 ocr, rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1666,23 +1702,12 @@ int mmc_attach_mmc(struct mmc_host *host)
goto err;
}
- /*
- * Sanity check the voltages that the card claims to
- * support.
- */
- if (ocr & 0x7F) {
- pr_warning("%s: card claims to support voltages "
- "below the defined range. These will be ignored.\n",
- mmc_hostname(host));
- ocr &= ~0x7F;
- }
-
- host->ocr = mmc_select_voltage(host, ocr);
+ rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage of the card?
*/
- if (!host->ocr) {
+ if (!rocr) {
err = -EINVAL;
goto err;
}
@@ -1690,7 +1715,7 @@ int mmc_attach_mmc(struct mmc_host *host)
/*
* Detect and init the card.
*/
- err = mmc_init_card(host, host->ocr, NULL);
+ err = mmc_init_card(host, rocr, NULL);
if (err)
goto err;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index ef183483d5b..e5b5eeb548d 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -23,6 +23,40 @@
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
+ bool ignore_crc)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ if (ignore_crc)
+ cmd.flags &= ~MMC_RSP_CRC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ /* NOTE: callers are required to understand the difference
+ * between "native" and SPI format status words!
+ */
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
+
+int mmc_send_status(struct mmc_card *card, u32 *status)
+{
+ return __mmc_send_status(card, status, false);
+}
+
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
int err;
@@ -370,16 +404,18 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
* @use_busy_signal: use the busy signal as response type
+ * @send_status: send status cmd to poll for busy
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal)
+ unsigned int timeout_ms, bool use_busy_signal, bool send_status)
{
int err;
struct mmc_command cmd = {0};
unsigned long timeout;
- u32 status;
+ u32 status = 0;
+ bool ignore_crc = false;
BUG_ON(!card);
BUG_ON(!card->host);
@@ -408,17 +444,37 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
return 0;
- /* Must check status to be sure of no errors */
+ /*
+ * Must check status to be sure of no errors
+ * If CMD13 is to check the busy completion of the timing change,
+ * disable the check of CRC error.
+ */
+ if (index == EXT_CSD_HS_TIMING &&
+ !(card->host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ ignore_crc = true;
+
timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS);
do {
- err = mmc_send_status(card, &status);
- if (err)
- return err;
+ if (send_status) {
+ err = __mmc_send_status(card, &status, ignore_crc);
+ if (err)
+ return err;
+ }
if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
break;
if (mmc_host_is_spi(card->host))
break;
+ /*
+ * We are not allowed to issue a status command and the host
+ * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
+ * rely on waiting for the stated timeout to be sufficient.
+ */
+ if (!send_status) {
+ mmc_delay(timeout_ms);
+ return 0;
+ }
+
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
@@ -445,36 +501,10 @@ EXPORT_SYMBOL_GPL(__mmc_switch);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
- return __mmc_switch(card, set, index, value, timeout_ms, true);
+ return __mmc_switch(card, set, index, value, timeout_ms, true, true);
}
EXPORT_SYMBOL_GPL(mmc_switch);
-int mmc_send_status(struct mmc_card *card, u32 *status)
-{
- int err;
- struct mmc_command cmd = {0};
-
- BUG_ON(!card);
- BUG_ON(!card->host);
-
- cmd.opcode = MMC_SEND_STATUS;
- if (!mmc_host_is_spi(card->host))
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- /* NOTE: callers are required to understand the difference
- * between "native" and SPI format status words!
- */
- if (status)
- *status = cmd.resp[0];
-
- return 0;
-}
-
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 5e8823dc3ef..6f42050b7cc 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -721,6 +722,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
int err;
u32 max_current;
int retries = 10;
+ u32 pocr = ocr;
try_again:
if (!retries) {
@@ -773,7 +775,8 @@ try_again:
*/
if (!mmc_host_is_spi(host) && rocr &&
((*rocr & 0x41000000) == 0x41000000)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ pocr);
if (err == -EAGAIN) {
retries--;
goto try_again;
@@ -935,6 +938,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (IS_ERR(card))
return PTR_ERR(card);
+ card->ocr = ocr;
card->type = MMC_TYPE_SD;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
@@ -1064,10 +1068,7 @@ static void mmc_sd_detect(struct mmc_host *host)
}
}
-/*
- * Suspend callback from host.
- */
-static int mmc_sd_suspend(struct mmc_host *host)
+static int _mmc_sd_suspend(struct mmc_host *host)
{
int err = 0;
@@ -1075,34 +1076,77 @@ static int mmc_sd_suspend(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ if (mmc_card_suspended(host->card))
+ goto out;
+
if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
- if (!err)
+ if (!err) {
mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
+
+out:
mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for suspend
+ */
+static int mmc_sd_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_sd_suspend(host);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
return err;
}
/*
- * Resume callback from host.
- *
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
-static int mmc_sd_resume(struct mmc_host *host)
+static int _mmc_sd_resume(struct mmc_host *host)
{
- int err;
+ int err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
- err = mmc_sd_init_card(host, host->ocr, host->card);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_sd_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for resume
+ */
+static int mmc_sd_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
+ err = _mmc_sd_resume(host);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_mark_last_busy(&host->card->dev);
+ }
+ pm_runtime_enable(&host->card->dev);
return err;
}
@@ -1117,18 +1161,11 @@ static int mmc_sd_runtime_suspend(struct mmc_host *host)
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
- mmc_claim_host(host);
-
- err = mmc_sd_suspend(host);
- if (err) {
+ err = _mmc_sd_suspend(host);
+ if (err)
pr_err("%s: error %d doing aggessive suspend\n",
mmc_hostname(host), err);
- goto out;
- }
- mmc_power_off(host);
-out:
- mmc_release_host(host);
return err;
}
@@ -1139,18 +1176,14 @@ static int mmc_sd_runtime_resume(struct mmc_host *host)
{
int err;
- if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
- mmc_claim_host(host);
-
- mmc_power_up(host);
- err = mmc_sd_resume(host);
+ err = _mmc_sd_resume(host);
if (err)
pr_err("%s: error %d doing aggessive resume\n",
mmc_hostname(host), err);
- mmc_release_host(host);
return 0;
}
@@ -1160,7 +1193,7 @@ static int mmc_sd_power_restore(struct mmc_host *host)
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
- ret = mmc_sd_init_card(host, host->ocr, host->card);
+ ret = mmc_sd_init_card(host, host->card->ocr, host->card);
mmc_release_host(host);
return ret;
@@ -1205,7 +1238,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
int mmc_attach_sd(struct mmc_host *host)
{
int err;
- u32 ocr;
+ u32 ocr, rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1229,31 +1262,12 @@ int mmc_attach_sd(struct mmc_host *host)
goto err;
}
- /*
- * Sanity check the voltages that the card claims to
- * support.
- */
- if (ocr & 0x7F) {
- pr_warning("%s: card claims to support voltages "
- "below the defined range. These will be ignored.\n",
- mmc_hostname(host));
- ocr &= ~0x7F;
- }
-
- if ((ocr & MMC_VDD_165_195) &&
- !(host->ocr_avail_sd & MMC_VDD_165_195)) {
- pr_warning("%s: SD card claims to support the "
- "incompletely defined 'low voltage range'. This "
- "will be ignored.\n", mmc_hostname(host));
- ocr &= ~MMC_VDD_165_195;
- }
-
- host->ocr = mmc_select_voltage(host, ocr);
+ rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
- if (!host->ocr) {
+ if (!rocr) {
err = -EINVAL;
goto err;
}
@@ -1261,7 +1275,7 @@ int mmc_attach_sd(struct mmc_host *host)
/*
* Detect and init the card.
*/
- err = mmc_sd_init_card(host, host->ocr, NULL);
+ err = mmc_sd_init_card(host, rocr, NULL);
if (err)
goto err;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 80d89cff730..4d721c6e2af 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -593,23 +593,28 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *card;
int err;
int retries = 10;
+ u32 rocr = 0;
+ u32 ocr_card = ocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ /* to query card if 1.8V signalling is supported */
+ if (mmc_host_uhs(host))
+ ocr |= R4_18V_PRESENT;
+
try_again:
if (!retries) {
pr_warning("%s: Skipping voltage switch\n",
mmc_hostname(host));
ocr &= ~R4_18V_PRESENT;
- host->ocr &= ~R4_18V_PRESENT;
}
/*
* Inform the card of the voltage
*/
if (!powered_resume) {
- err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ err = mmc_send_io_op_cond(host, ocr, &rocr);
if (err)
goto err;
}
@@ -632,8 +637,8 @@ try_again:
goto err;
}
- if ((ocr & R4_MEMORY_PRESENT) &&
- mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
+ if ((rocr & R4_MEMORY_PRESENT) &&
+ mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
@@ -663,8 +668,9 @@ try_again:
* systems that claim 1.8v signalling in fact do not support
* it.
*/
- if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ ocr);
if (err == -EAGAIN) {
sdio_reset(host);
mmc_go_idle(host);
@@ -674,12 +680,10 @@ try_again:
goto try_again;
} else if (err) {
ocr &= ~R4_18V_PRESENT;
- host->ocr &= ~R4_18V_PRESENT;
}
err = 0;
} else {
ocr &= ~R4_18V_PRESENT;
- host->ocr &= ~R4_18V_PRESENT;
}
/*
@@ -759,6 +763,7 @@ try_again:
card = oldcard;
}
+ card->ocr = ocr_card;
mmc_fixup_device(card, NULL);
if (card->type == MMC_TYPE_SD_COMBO) {
@@ -981,8 +986,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
/* Restore power if needed */
if (!mmc_card_keep_power(host)) {
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
+ mmc_power_up(host, host->card->ocr);
/*
* Tell runtime PM core we just powered up the card,
* since it still believes the card is powered off.
@@ -1000,7 +1004,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
sdio_reset(host);
mmc_go_idle(host);
- err = mmc_sdio_init_card(host, host->ocr, host->card,
+ err = mmc_sdio_init_card(host, host->card->ocr, host->card,
mmc_card_keep_power(host));
} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
@@ -1040,7 +1044,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
- u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
@@ -1062,32 +1065,17 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
* for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
* harmless in other situations.
*
- * With these steps taken, mmc_select_voltage() is also required to
- * restore the correct voltage setting of the card.
*/
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
- ret = mmc_send_io_op_cond(host, 0, &ocr);
+ ret = mmc_send_io_op_cond(host, 0, NULL);
if (ret)
goto out;
- if (host->ocr_avail_sdio)
- host->ocr_avail = host->ocr_avail_sdio;
-
- host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
- if (!host->ocr) {
- ret = -EINVAL;
- goto out;
- }
-
- if (mmc_host_uhs(host))
- /* to query card if 1.8V signalling is supported */
- host->ocr |= R4_18V_PRESENT;
-
- ret = mmc_sdio_init_card(host, host->ocr, host->card,
+ ret = mmc_sdio_init_card(host, host->card->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
@@ -1108,7 +1096,7 @@ static int mmc_sdio_runtime_suspend(struct mmc_host *host)
static int mmc_sdio_runtime_resume(struct mmc_host *host)
{
/* Restore power and re-initialize. */
- mmc_power_up(host);
+ mmc_power_up(host, host->card->ocr);
return mmc_sdio_power_restore(host);
}
@@ -1131,7 +1119,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
int mmc_attach_sdio(struct mmc_host *host)
{
int err, i, funcs;
- u32 ocr;
+ u32 ocr, rocr;
struct mmc_card *card;
BUG_ON(!host);
@@ -1145,23 +1133,13 @@ int mmc_attach_sdio(struct mmc_host *host)
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
- /*
- * Sanity check the voltages that the card claims to
- * support.
- */
- if (ocr & 0x7F) {
- pr_warning("%s: card claims to support voltages "
- "below the defined range. These will be ignored.\n",
- mmc_hostname(host));
- ocr &= ~0x7F;
- }
- host->ocr = mmc_select_voltage(host, ocr);
+ rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
- if (!host->ocr) {
+ if (!rocr) {
err = -EINVAL;
goto err;
}
@@ -1169,22 +1147,10 @@ int mmc_attach_sdio(struct mmc_host *host)
/*
* Detect and init the card.
*/
- if (mmc_host_uhs(host))
- /* to query card if 1.8V signalling is supported */
- host->ocr |= R4_18V_PRESENT;
+ err = mmc_sdio_init_card(host, rocr, NULL, 0);
+ if (err)
+ goto err;
- err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- if (err) {
- if (err == -EAGAIN) {
- /*
- * Retry initialization with S18R set to 0.
- */
- host->ocr &= ~R4_18V_PRESENT;
- err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- }
- if (err)
- goto err;
- }
card = host->card;
/*
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 6d67492a924..157b570ba34 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -34,7 +34,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
\
func = dev_to_sdio_func (dev); \
return sprintf (buf, format_string, func->field); \
-}
+} \
+static DEVICE_ATTR_RO(field)
sdio_config_attr(class, "0x%02x\n");
sdio_config_attr(vendor, "0x%04x\n");
@@ -47,14 +48,16 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n",
func->class, func->vendor, func->device);
}
-
-static struct device_attribute sdio_dev_attrs[] = {
- __ATTR_RO(class),
- __ATTR_RO(vendor),
- __ATTR_RO(device),
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *sdio_dev_attrs[] = {
+ &dev_attr_class.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(sdio_dev);
static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
const struct sdio_device_id *id)
@@ -225,7 +228,7 @@ static const struct dev_pm_ops sdio_bus_pm_ops = {
static struct bus_type sdio_bus_type = {
.name = "sdio",
- .dev_attrs = sdio_dev_attrs,
+ .dev_groups = sdio_dev_groups,
.match = sdio_bus_match,
.uevent = sdio_bus_uevent,
.probe = sdio_bus_probe,
@@ -305,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func)
struct mmc_host *host = func->card->host;
u64 addr = (host->slotno << 16) | func->num;
- ACPI_HANDLE_SET(&func->dev,
- acpi_get_child(ACPI_HANDLE(host->parent), addr));
+ acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr);
}
#else
static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 69e438ee043..2cbb4516d35 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -255,7 +255,6 @@ struct atmel_mci_slot {
#define ATMCI_CARD_PRESENT 0
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
-#define ATMCI_SUSPENDED 3
int detect_pin;
int wp_pin;
@@ -589,6 +588,13 @@ static void atmci_timeout_timer(unsigned long data)
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
host->data = NULL;
+ /*
+ * With some SDIO modules, sometimes DMA transfer hangs. If
+ * stop_transfer() is not called then the DMA request is not
+ * removed, following ones are queued and never computed.
+ */
+ if (host->state == STATE_DATA_XFER)
+ host->stop_transfer(host);
} else {
host->mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
@@ -1803,12 +1809,14 @@ static void atmci_tasklet_func(unsigned long priv)
if (unlikely(status)) {
host->stop_transfer(host);
host->data = NULL;
- if (status & ATMCI_DTOE) {
- data->error = -ETIMEDOUT;
- } else if (status & ATMCI_DCRCE) {
- data->error = -EILSEQ;
- } else {
- data->error = -EIO;
+ if (data) {
+ if (status & ATMCI_DTOE) {
+ data->error = -ETIMEDOUT;
+ } else if (status & ATMCI_DCRCE) {
+ data->error = -EILSEQ;
+ } else {
+ data->error = -EIO;
+ }
}
}
@@ -2520,70 +2528,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int atmci_suspend(struct device *dev)
-{
- struct atmel_mci *host = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
- struct atmel_mci_slot *slot = host->slot[i];
- int ret;
-
- if (!slot)
- continue;
- ret = mmc_suspend_host(slot->mmc);
- if (ret < 0) {
- while (--i >= 0) {
- slot = host->slot[i];
- if (slot
- && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
- mmc_resume_host(host->slot[i]->mmc);
- clear_bit(ATMCI_SUSPENDED, &slot->flags);
- }
- }
- return ret;
- } else {
- set_bit(ATMCI_SUSPENDED, &slot->flags);
- }
- }
-
- return 0;
-}
-
-static int atmci_resume(struct device *dev)
-{
- struct atmel_mci *host = dev_get_drvdata(dev);
- int i;
- int ret = 0;
-
- for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
- struct atmel_mci_slot *slot = host->slot[i];
- int err;
-
- slot = host->slot[i];
- if (!slot)
- continue;
- if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
- continue;
- err = mmc_resume_host(slot->mmc);
- if (err < 0)
- ret = err;
- else
- clear_bit(ATMCI_SUSPENDED, &slot->flags);
- }
-
- return ret;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
-
static struct platform_driver atmci_driver = {
.remove = __exit_p(atmci_remove),
.driver = {
.name = "atmel_mci",
- .pm = &atmci_pm,
.of_match_table = of_match_ptr(atmci_dt_ids),
},
};
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index df9becdd2e9..f5443a6c491 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1157,11 +1157,6 @@ static int au1xmmc_remove(struct platform_device *pdev)
static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
- int ret;
-
- ret = mmc_suspend_host(host->mmc);
- if (ret)
- return ret;
au_writel(0, HOST_CONFIG2(host));
au_writel(0, HOST_CONFIG(host));
@@ -1178,7 +1173,7 @@ static int au1xmmc_resume(struct platform_device *pdev)
au1xmmc_reset_controller(host);
- return mmc_resume_host(host->mmc);
+ return 0;
}
#else
#define au1xmmc_suspend NULL
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 94fae2f1baa..2b7f37e82ca 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -391,6 +391,7 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* Disable 4 bit SDIO */
cfg &= ~SD4E;
}
+ bfin_write_SDH_CFG(cfg);
host->power_mode = ios->power_mode;
#ifndef RSI_BLKSZ
@@ -415,7 +416,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
cfg &= ~SD_CMD_OD;
# endif
-
if (ios->power_mode != MMC_POWER_OFF)
cfg |= PWR_ON;
else
@@ -433,7 +433,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_ctl |= CLK_E;
host->clk_div = clk_div;
bfin_write_SDH_CLK_CTL(clk_ctl);
-
} else
sdh_stop_clock(host);
@@ -640,21 +639,15 @@ static int sdh_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sdh_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mmc_host *mmc = platform_get_drvdata(dev);
struct bfin_sd_host *drv_data = get_sdh_data(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
peripheral_free_list(drv_data->pin_req);
- return ret;
+ return 0;
}
static int sdh_resume(struct platform_device *dev)
{
- struct mmc_host *mmc = platform_get_drvdata(dev);
struct bfin_sd_host *drv_data = get_sdh_data(dev);
int ret = 0;
@@ -665,10 +658,6 @@ static int sdh_resume(struct platform_device *dev)
}
sdh_reset();
-
- if (mmc)
- ret = mmc_resume_host(mmc);
-
return ret;
}
#else
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 9d6e2b84440..1087b4c79cd 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -667,12 +667,6 @@ static const struct mmc_host_ops cb710_mmc_host = {
static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
- struct mmc_host *mmc = cb710_slot_to_mmc(slot);
- int err;
-
- err = mmc_suspend_host(mmc);
- if (err)
- return err;
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
@@ -681,11 +675,9 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
static int cb710_mmc_resume(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
- struct mmc_host *mmc = cb710_slot_to_mmc(slot);
cb710_mmc_enable_irq(slot, 0, ~0);
-
- return mmc_resume_host(mmc);
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e9fa87df909..d6153740b77 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -193,7 +193,6 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_READ 1
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
- unsigned char suspended;
/* buffer is used during PIO of one scatterlist segment, and
* is updated along with buffer_bytes_left. bytes_left applies
@@ -1435,38 +1434,23 @@ static int davinci_mmcsd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
- int ret;
- ret = mmc_suspend_host(host->mmc);
- if (!ret) {
- writel(0, host->base + DAVINCI_MMCIM);
- mmc_davinci_reset_ctrl(host, 1);
- clk_disable(host->clk);
- host->suspended = 1;
- } else {
- host->suspended = 0;
- }
+ writel(0, host->base + DAVINCI_MMCIM);
+ mmc_davinci_reset_ctrl(host, 1);
+ clk_disable(host->clk);
- return ret;
+ return 0;
}
static int davinci_mmcsd_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
- int ret;
-
- if (!host->suspended)
- return 0;
clk_enable(host->clk);
-
mmc_davinci_reset_ctrl(host, 0);
- ret = mmc_resume_host(host->mmc);
- if (!ret)
- host->suspended = 0;
- return ret;
+ return 0;
}
static const struct dev_pm_ops davinci_mmcsd_pm = {
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 6a1fa2110a0..3423c5ed50c 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -14,8 +14,10 @@
#include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/mmc/dw_mmc.h>
+#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/slab.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
@@ -30,16 +32,39 @@
#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
SDMMC_CLKSEL_CCLK_DRIVE(y) | \
SDMMC_CLKSEL_CCLK_DIVIDER(z))
+#define SDMMC_CLKSEL_WAKEUP_INT BIT(11)
#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
+/* Block number in eMMC */
+#define DWMCI_BLOCK_NUM 0xFFFFFFFF
+
+#define SDMMC_EMMCP_BASE 0x1000
+#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010)
+#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200)
+#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204)
+#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C)
+
+/* SMU control bits */
+#define DWMCI_MPSCTRL_SECURE_READ_BIT BIT(7)
+#define DWMCI_MPSCTRL_SECURE_WRITE_BIT BIT(6)
+#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT BIT(5)
+#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4)
+#define DWMCI_MPSCTRL_USE_FUSE_KEY BIT(3)
+#define DWMCI_MPSCTRL_ECB_MODE BIT(2)
+#define DWMCI_MPSCTRL_ENCRYPTION BIT(1)
+#define DWMCI_MPSCTRL_VALID BIT(0)
+
+#define EXYNOS_CCLKIN_MIN 50000000 /* unit: HZ */
+
/* Variations in Exynos specific dw-mshc controller */
enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS4210,
DW_MCI_TYPE_EXYNOS4412,
DW_MCI_TYPE_EXYNOS5250,
DW_MCI_TYPE_EXYNOS5420,
+ DW_MCI_TYPE_EXYNOS5420_SMU,
};
/* Exynos implementation specific driver private data */
@@ -48,6 +73,7 @@ struct dw_mci_exynos_priv_data {
u8 ciu_div;
u32 sdr_timing;
u32 ddr_timing;
+ u32 cur_speed;
};
static struct dw_mci_exynos_compatible {
@@ -66,44 +92,80 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos5420-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5420,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU,
},
};
static int dw_mci_exynos_priv_init(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv;
- int idx;
-
- priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(host->dev, "mem alloc failed for private data\n");
- return -ENOMEM;
- }
+ struct dw_mci_exynos_priv_data *priv = host->priv;
- for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
- if (of_device_is_compatible(host->dev->of_node,
- exynos_compat[idx].compatible))
- priv->ctrl_type = exynos_compat[idx].ctrl_type;
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) {
+ mci_writel(host, MPSBEGIN0, 0);
+ mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM);
+ mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT |
+ DWMCI_MPSCTRL_NON_SECURE_READ_BIT |
+ DWMCI_MPSCTRL_VALID |
+ DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT);
}
- host->priv = priv;
return 0;
}
static int dw_mci_exynos_setup_clock(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
+ unsigned long rate = clk_get_rate(host->ciu_clk);
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420)
- host->bus_hz /= (priv->ciu_div + 1);
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
- host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
- host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV;
+ host->bus_hz = rate / (priv->ciu_div + 1);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_exynos_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_exynos_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ dw_mci_exynos_priv_init(host);
+ return dw_mci_resume(host);
+}
+
+/**
+ * dw_mci_exynos_resume_noirq - Exynos-specific resume code
+ *
+ * On exynos5420 there is a silicon errata that will sometimes leave the
+ * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate
+ * that it fired and we can clear it by writing a 1 back. Clear it to prevent
+ * interrupts from going off constantly.
+ *
+ * We run this code on all exynos variants because it doesn't hurt.
+ */
+
+static int dw_mci_exynos_resume_noirq(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ u32 clksel;
+
+ clksel = mci_readl(host, CLKSEL);
+ if (clksel & SDMMC_CLKSEL_WAKEUP_INT)
+ mci_writel(host, CLKSEL, clksel);
return 0;
}
+#else
+#define dw_mci_exynos_suspend NULL
+#define dw_mci_exynos_resume NULL
+#define dw_mci_exynos_resume_noirq NULL
+#endif /* CONFIG_PM_SLEEP */
static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
{
@@ -121,23 +183,68 @@ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
+ unsigned int wanted = ios->clock;
+ unsigned long actual;
+ u8 div = priv->ciu_div + 1;
- if (ios->timing == MMC_TIMING_UHS_DDR50)
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
mci_writel(host, CLKSEL, priv->ddr_timing);
- else
+ /* Should be double rate for DDR mode */
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ wanted <<= 1;
+ } else {
mci_writel(host, CLKSEL, priv->sdr_timing);
+ }
+
+ /* Don't care if wanted clock is zero */
+ if (!wanted)
+ return;
+
+ /* Guaranteed minimum frequency for cclkin */
+ if (wanted < EXYNOS_CCLKIN_MIN)
+ wanted = EXYNOS_CCLKIN_MIN;
+
+ if (wanted != priv->cur_speed) {
+ int ret = clk_set_rate(host->ciu_clk, wanted * div);
+ if (ret)
+ dev_warn(host->dev,
+ "failed to set clk-rate %u error: %d\n",
+ wanted * div, ret);
+ actual = clk_get_rate(host->ciu_clk);
+ host->bus_hz = actual / div;
+ priv->cur_speed = wanted;
+ host->current_speed = 0;
+ }
}
static int dw_mci_exynos_parse_dt(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
+ struct dw_mci_exynos_priv_data *priv;
struct device_node *np = host->dev->of_node;
u32 timing[2];
u32 div = 0;
+ int idx;
int ret;
- of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
- priv->ciu_div = div;
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(host->dev, "mem alloc failed for private data\n");
+ return -ENOMEM;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
+ if (of_device_is_compatible(np, exynos_compat[idx].compatible))
+ priv->ctrl_type = exynos_compat[idx].ctrl_type;
+ }
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
+ priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1;
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
+ priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1;
+ else {
+ of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
+ priv->ciu_div = div;
+ }
ret = of_property_read_u32_array(np,
"samsung,dw-mshc-sdr-timing", timing, 2);
@@ -152,9 +259,131 @@ static int dw_mci_exynos_parse_dt(struct dw_mci *host)
return ret;
priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
+ host->priv = priv;
return 0;
}
+static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
+{
+ return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
+}
+
+static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
+{
+ u32 clksel;
+ clksel = mci_readl(host, CLKSEL);
+ clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample);
+ mci_writel(host, CLKSEL, clksel);
+}
+
+static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
+{
+ u32 clksel;
+ u8 sample;
+
+ clksel = mci_readl(host, CLKSEL);
+ sample = (clksel + 1) & 0x7;
+ clksel = (clksel & ~0x7) | sample;
+ mci_writel(host, CLKSEL, clksel);
+ return sample;
+}
+
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+{
+ const u8 iter = 8;
+ u8 __c;
+ s8 i, loc = -1;
+
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candiates, i);
+ if ((__c & 0xc7) == 0xc7) {
+ loc = i;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candiates, i);
+ if ((__c & 0x83) == 0x83) {
+ loc = i;
+ goto out;
+ }
+ }
+
+out:
+ return loc;
+}
+
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
+ struct dw_mci_tuning_data *tuning_data)
+{
+ struct dw_mci *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ const u8 *blk_pattern = tuning_data->blk_pattern;
+ u8 *blk_test;
+ unsigned int blksz = tuning_data->blksz;
+ u8 start_smpl, smpl, candiates = 0;
+ s8 found = -1;
+ int ret = 0;
+
+ blk_test = kmalloc(blksz, GFP_KERNEL);
+ if (!blk_test)
+ return -ENOMEM;
+
+ start_smpl = dw_mci_exynos_get_clksmpl(host);
+
+ do {
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ data.blksz = blksz;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, blk_test, blksz);
+ mrq.cmd = &cmd;
+ mrq.stop = &stop;
+ mrq.data = &data;
+ host->mrq = &mrq;
+
+ mci_writel(host, TMOUT, ~0);
+ smpl = dw_mci_exynos_move_next_clksmpl(host);
+
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (!cmd.error && !data.error) {
+ if (!memcmp(blk_pattern, blk_test, blksz))
+ candiates |= (1 << smpl);
+ } else {
+ dev_dbg(host->dev,
+ "Tuning error: cmd.error:%d, data.error:%d\n",
+ cmd.error, data.error);
+ }
+ } while (start_smpl != smpl);
+
+ found = dw_mci_exynos_get_best_clksmpl(candiates);
+ if (found >= 0)
+ dw_mci_exynos_set_clksmpl(host, found);
+ else
+ ret = -EIO;
+
+ kfree(blk_test);
+ return ret;
+}
+
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
@@ -171,6 +400,7 @@ static const struct dw_mci_drv_data exynos_drv_data = {
.prepare_command = dw_mci_exynos_prepare_command,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
+ .execute_tuning = dw_mci_exynos_execute_tuning,
};
static const struct of_device_id dw_mci_exynos_match[] = {
@@ -180,6 +410,8 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5420-dw-mshc",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc-smu",
+ .data = &exynos_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
@@ -194,13 +426,20 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
+const struct dev_pm_ops dw_mci_exynos_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
+ .resume_noirq = dw_mci_exynos_resume_noirq,
+ .thaw_noirq = dw_mci_exynos_resume_noirq,
+ .restore_noirq = dw_mci_exynos_resume_noirq,
+};
+
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
.remove = __exit_p(dw_mci_pltfm_remove),
.driver = {
.name = "dwmmc_exynos",
.of_match_table = dw_mci_exynos_match,
- .pm = &dw_mci_pltfm_pmops,
+ .pm = &dw_mci_exynos_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 20897529ea5..5c496565529 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -39,7 +39,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
{
struct dw_mci *host;
struct resource *regs;
- int ret;
host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
if (!host)
@@ -59,12 +58,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
- if (drv_data && drv_data->init) {
- ret = drv_data->init(host);
- if (ret)
- return ret;
- }
-
platform_set_drvdata(pdev, host);
return dw_mci_probe(host);
}
diff --git a/drivers/mmc/host/dw_mmc-socfpga.c b/drivers/mmc/host/dw_mmc-socfpga.c
index 14b5961a851..3e8e53ae330 100644
--- a/drivers/mmc/host/dw_mmc-socfpga.c
+++ b/drivers/mmc/host/dw_mmc-socfpga.c
@@ -38,21 +38,6 @@ struct dw_mci_socfpga_priv_data {
static int dw_mci_socfpga_priv_init(struct dw_mci *host)
{
- struct dw_mci_socfpga_priv_data *priv;
-
- priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(host->dev, "mem alloc failed for private data\n");
- return -ENOMEM;
- }
-
- priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
- if (IS_ERR(priv->sysreg)) {
- dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n");
- return PTR_ERR(priv->sysreg);
- }
- host->priv = priv;
-
return 0;
}
@@ -79,12 +64,24 @@ static void dw_mci_socfpga_prepare_command(struct dw_mci *host, u32 *cmdr)
static int dw_mci_socfpga_parse_dt(struct dw_mci *host)
{
- struct dw_mci_socfpga_priv_data *priv = host->priv;
+ struct dw_mci_socfpga_priv_data *priv;
struct device_node *np = host->dev->of_node;
u32 timing[2];
u32 div = 0;
int ret;
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(host->dev, "mem alloc failed for private data\n");
+ return -ENOMEM;
+ }
+
+ priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(priv->sysreg)) {
+ dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n");
+ return PTR_ERR(priv->sysreg);
+ }
+
ret = of_property_read_u32(np, "altr,dw-mshc-ciu-div", &div);
if (ret)
dev_info(host->dev, "No dw-mshc-ciu-div specified, assuming 1");
@@ -96,6 +93,7 @@ static int dw_mci_socfpga_parse_dt(struct dw_mci *host)
return ret;
priv->hs_timing = SYSMGR_SDMMC_CTRL_SET(timing[0], timing[1]);
+ host->priv = priv;
return 0;
}
@@ -113,7 +111,7 @@ static const struct of_device_id dw_mci_socfpga_match[] = {
};
MODULE_DEVICE_TABLE(of, dw_mci_socfpga_match);
-int dw_mci_socfpga_probe(struct platform_device *pdev)
+static int dw_mci_socfpga_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
@@ -128,7 +126,7 @@ static struct platform_driver dw_mci_socfpga_pltfm_driver = {
.remove = __exit_p(dw_mci_pltfm_remove),
.driver = {
.name = "dwmmc_socfpga",
- .of_match_table = of_match_ptr(dw_mci_socfpga_match),
+ .of_match_table = dw_mci_socfpga_match,
.pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 018f365e5ae..4bce0deec36 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -29,6 +29,7 @@
#include <linux/irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
@@ -50,6 +51,9 @@
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
+#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
+#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
+
#ifdef CONFIG_MMC_DW_IDMAC
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
@@ -76,42 +80,39 @@ struct idmac_desc {
};
#endif /* CONFIG_MMC_DW_IDMAC */
-/**
- * struct dw_mci_slot - MMC slot state
- * @mmc: The mmc_host representing this slot.
- * @host: The MMC controller this slot is using.
- * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
- * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
- * @ctype: Card type for this slot.
- * @mrq: mmc_request currently being processed or waiting to be
- * processed, or NULL when the slot is idle.
- * @queue_node: List node for placing this node in the @queue list of
- * &struct dw_mci.
- * @clock: Clock rate configured by set_ios(). Protected by host->lock.
- * @flags: Random state bits associated with the slot.
- * @id: Number of this slot.
- * @last_detect_state: Most recently observed card detect state.
- */
-struct dw_mci_slot {
- struct mmc_host *mmc;
- struct dw_mci *host;
-
- int quirks;
- int wp_gpio;
-
- u32 ctype;
-
- struct mmc_request *mrq;
- struct list_head queue_node;
+static const u8 tuning_blk_pattern_4bit[] = {
+ 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
+};
- unsigned int clock;
- unsigned long flags;
-#define DW_MMC_CARD_PRESENT 0
-#define DW_MMC_CARD_NEED_INIT 1
- int id;
- int last_detect_state;
+static const u8 tuning_blk_pattern_8bit[] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
+static inline bool dw_mci_fifo_reset(struct dw_mci *host);
+static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
+
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -249,10 +250,15 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
cmdr = cmd->opcode;
- if (cmdr == MMC_STOP_TRANSMISSION)
+ if (cmd->opcode == MMC_STOP_TRANSMISSION ||
+ cmd->opcode == MMC_GO_IDLE_STATE ||
+ cmd->opcode == MMC_GO_INACTIVE_STATE ||
+ (cmd->opcode == SD_IO_RW_DIRECT &&
+ ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
cmdr |= SDMMC_CMD_STOP;
else
- cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+ if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
@@ -279,6 +285,40 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
return cmdr;
}
+static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
+{
+ struct mmc_command *stop;
+ u32 cmdr;
+
+ if (!cmd->data)
+ return 0;
+
+ stop = &host->stop_abort;
+ cmdr = cmd->opcode;
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ if (cmdr == MMC_READ_SINGLE_BLOCK ||
+ cmdr == MMC_READ_MULTIPLE_BLOCK ||
+ cmdr == MMC_WRITE_BLOCK ||
+ cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
+ stop->opcode = MMC_STOP_TRANSMISSION;
+ stop->arg = 0;
+ stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ } else if (cmdr == SD_IO_RW_EXTENDED) {
+ stop->opcode = SD_IO_RW_DIRECT;
+ stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
+ ((cmd->arg >> 28) & 0x7);
+ stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
+ } else {
+ return 0;
+ }
+
+ cmdr = stop->opcode | SDMMC_CMD_STOP |
+ SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
+
+ return cmdr;
+}
+
static void dw_mci_start_command(struct dw_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
@@ -293,9 +333,10 @@ static void dw_mci_start_command(struct dw_mci *host,
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
-static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
- dw_mci_start_command(host, data->stop, host->stop_cmdr);
+ struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+ dw_mci_start_command(host, stop, host->stop_cmdr);
}
/* DMA interface functions */
@@ -304,10 +345,10 @@ static void dw_mci_stop_dma(struct dw_mci *host)
if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
- } else {
- /* Data transfer was stopped by the interrupt handler */
- set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
+
+ /* Data transfer was stopped by the interrupt handler */
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static int dw_mci_get_dma_dir(struct mmc_data *data)
@@ -331,6 +372,14 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
dw_mci_get_dma_dir(data));
}
+static void dw_mci_idmac_reset(struct dw_mci *host)
+{
+ u32 bmod = mci_readl(host, BMOD);
+ /* Software reset of DMA */
+ bmod |= SDMMC_IDMAC_SWRESET;
+ mci_writel(host, BMOD, bmod);
+}
+
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
{
u32 temp;
@@ -344,6 +393,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
+ temp |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, temp);
}
@@ -435,7 +485,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
- mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+ dw_mci_idmac_reset(host);
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS, IDMAC_INT_CLR);
@@ -532,6 +582,78 @@ static void dw_mci_post_req(struct mmc_host *mmc,
data->host_cookie = 0;
}
+static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
+{
+#ifdef CONFIG_MMC_DW_IDMAC
+ unsigned int blksz = data->blksz;
+ const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ u32 fifo_width = 1 << host->data_shift;
+ u32 blksz_depth = blksz / fifo_width, fifoth_val;
+ u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
+ int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
+
+ tx_wmark = (host->fifo_depth) / 2;
+ tx_wmark_invers = host->fifo_depth - tx_wmark;
+
+ /*
+ * MSIZE is '1',
+ * if blksz is not a multiple of the FIFO width
+ */
+ if (blksz % fifo_width) {
+ msize = 0;
+ rx_wmark = 1;
+ goto done;
+ }
+
+ do {
+ if (!((blksz_depth % mszs[idx]) ||
+ (tx_wmark_invers % mszs[idx]))) {
+ msize = idx;
+ rx_wmark = mszs[idx] - 1;
+ break;
+ }
+ } while (--idx > 0);
+ /*
+ * If idx is '0', it won't be tried
+ * Thus, initial values are uesed
+ */
+done:
+ fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
+ mci_writel(host, FIFOTH, fifoth_val);
+#endif
+}
+
+static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
+{
+ unsigned int blksz = data->blksz;
+ u32 blksz_depth, fifo_depth;
+ u16 thld_size;
+
+ WARN_ON(!(data->flags & MMC_DATA_READ));
+
+ if (host->timing != MMC_TIMING_MMC_HS200 &&
+ host->timing != MMC_TIMING_UHS_SDR104)
+ goto disable;
+
+ blksz_depth = blksz / (1 << host->data_shift);
+ fifo_depth = host->fifo_depth;
+
+ if (blksz_depth > fifo_depth)
+ goto disable;
+
+ /*
+ * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
+ * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
+ * Currently just choose blksz.
+ */
+ thld_size = blksz;
+ mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
+ return;
+
+disable:
+ mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
+}
+
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
int sg_len;
@@ -556,6 +678,14 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
(unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
sg_len);
+ /*
+ * Decide the MSIZE and RX/TX Watermark.
+ * If current block size is same with previous size,
+ * no need to update fifoth.
+ */
+ if (host->prev_blksz != data->blksz)
+ dw_mci_adjust_fifoth(host, data);
+
/* Enable the DMA interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_DMA_ENABLE;
@@ -581,10 +711,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->sg = NULL;
host->data = data;
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
host->dir_status = DW_MCI_RECV_STATUS;
- else
+ dw_mci_ctrl_rd_thld(host, data);
+ } else {
host->dir_status = DW_MCI_SEND_STATUS;
+ }
if (dw_mci_submit_data_dma(host, data)) {
int flags = SG_MITER_ATOMIC;
@@ -606,6 +738,21 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
+
+ /*
+ * Use the initial fifoth_val for PIO mode.
+ * If next issued data may be transfered by DMA mode,
+ * prev_blksz should be invalidated.
+ */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
+ } else {
+ /*
+ * Keep the current block size.
+ * It will be used to decide whether to update
+ * fifoth register next time.
+ */
+ host->prev_blksz = data->blksz;
}
}
@@ -632,24 +779,31 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
{
struct dw_mci *host = slot->host;
+ unsigned int clock = slot->clock;
u32 div;
u32 clk_en_a;
- if (slot->clock != host->current_speed || force_clkinit) {
- div = host->bus_hz / slot->clock;
- if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
+ if (!clock) {
+ mci_writel(host, CLKENA, 0);
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ } else if (clock != host->current_speed || force_clkinit) {
+ div = host->bus_hz / clock;
+ if (host->bus_hz % clock && host->bus_hz > clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
div += 1;
- div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
+ div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- dev_info(&slot->mmc->class_dev,
- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
- " div = %d)\n", slot->id, host->bus_hz, slot->clock,
- div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+ if ((clock << div) != slot->__clk_old || force_clkinit)
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
@@ -676,9 +830,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
- host->current_speed = slot->clock;
+ /* keep the clock with reflecting clock dividor */
+ slot->__clk_old = clock << div;
}
+ host->current_speed = clock;
+
/* Set the current slot bus width */
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
@@ -700,7 +857,9 @@ static void __dw_mci_start_request(struct dw_mci *host,
host->pending_events = 0;
host->completed_events = 0;
+ host->cmd_status = 0;
host->data_status = 0;
+ host->dir_status = 0;
data = cmd->data;
if (data) {
@@ -724,6 +883,8 @@ static void __dw_mci_start_request(struct dw_mci *host,
if (mrq->stop)
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+ else
+ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
}
static void dw_mci_start_request(struct dw_mci *host,
@@ -806,14 +967,13 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
regs &= ~((0x1 << slot->id) << 16);
mci_writel(slot->host, UHS_REG, regs);
+ slot->host->timing = ios->timing;
- if (ios->clock) {
- /*
- * Use mirror of ios->clock to prevent race with mmc
- * core ios update when finding the minimum.
- */
- slot->clock = ios->clock;
- }
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
if (drv_data && drv_data->set_ios)
drv_data->set_ios(slot->host, ios);
@@ -939,6 +1099,38 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
}
}
+static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ struct dw_mci_tuning_data tuning_data;
+ int err = -ENOSYS;
+
+ if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
+ tuning_data.blk_pattern = tuning_blk_pattern_8bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
+ } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ tuning_data.blk_pattern = tuning_blk_pattern_4bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
+ } else {
+ return -EINVAL;
+ }
+ } else if (opcode == MMC_SEND_TUNING_BLOCK) {
+ tuning_data.blk_pattern = tuning_blk_pattern_4bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
+ } else {
+ dev_err(host->dev,
+ "Undefined command(%d) for tuning\n", opcode);
+ return -EINVAL;
+ }
+
+ if (drv_data && drv_data->execute_tuning)
+ err = drv_data->execute_tuning(slot, opcode, &tuning_data);
+ return err;
+}
+
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
.pre_req = dw_mci_pre_req,
@@ -947,6 +1139,7 @@ static const struct mmc_host_ops dw_mci_ops = {
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
+ .execute_tuning = dw_mci_execute_tuning,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -978,7 +1171,7 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
spin_lock(&host->lock);
}
-static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
{
u32 status = host->cmd_status;
@@ -1012,12 +1205,52 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
/* newer ip versions need a delay between retries */
if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
mdelay(20);
+ }
- if (cmd->data) {
- dw_mci_stop_dma(host);
- host->data = NULL;
+ return cmd->error;
+}
+
+static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
+{
+ u32 status = host->data_status;
+
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ if (status & SDMMC_INT_DRTO) {
+ data->error = -ETIMEDOUT;
+ } else if (status & SDMMC_INT_DCRC) {
+ data->error = -EILSEQ;
+ } else if (status & SDMMC_INT_EBE) {
+ if (host->dir_status ==
+ DW_MCI_SEND_STATUS) {
+ /*
+ * No data CRC status was returned.
+ * The number of bytes transferred
+ * will be exaggerated in PIO mode.
+ */
+ data->bytes_xfered = 0;
+ data->error = -ETIMEDOUT;
+ } else if (host->dir_status ==
+ DW_MCI_RECV_STATUS) {
+ data->error = -EIO;
+ }
+ } else {
+ /* SDMMC_INT_SBE is included */
+ data->error = -EIO;
}
+
+ dev_err(host->dev, "data error, status 0x%08x\n", status);
+
+ /*
+ * After an error, there may be data lingering
+ * in the FIFO
+ */
+ dw_mci_fifo_reset(host);
+ } else {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
}
+
+ return data->error;
}
static void dw_mci_tasklet_func(unsigned long priv)
@@ -1025,14 +1258,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
struct dw_mci *host = (struct dw_mci *)priv;
struct mmc_data *data;
struct mmc_command *cmd;
+ struct mmc_request *mrq;
enum dw_mci_state state;
enum dw_mci_state prev_state;
- u32 status, ctrl;
+ unsigned int err;
spin_lock(&host->lock);
state = host->state;
data = host->data;
+ mrq = host->mrq;
do {
prev_state = state;
@@ -1049,16 +1284,23 @@ static void dw_mci_tasklet_func(unsigned long priv)
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
- dw_mci_command_complete(host, cmd);
- if (cmd == host->mrq->sbc && !cmd->error) {
+ err = dw_mci_command_complete(host, cmd);
+ if (cmd == mrq->sbc && !err) {
prev_state = state = STATE_SENDING_CMD;
__dw_mci_start_request(host, host->cur_slot,
- host->mrq->cmd);
+ mrq->cmd);
goto unlock;
}
- if (!host->mrq->data || cmd->error) {
- dw_mci_request_end(host, host->mrq);
+ if (cmd->data && err) {
+ dw_mci_stop_dma(host);
+ send_stop_abort(host, data);
+ state = STATE_SENDING_STOP;
+ break;
+ }
+
+ if (!cmd->data || err) {
+ dw_mci_request_end(host, mrq);
goto unlock;
}
@@ -1069,8 +1311,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
- if (data->stop)
- send_stop_cmd(host, data);
+ send_stop_abort(host, data);
state = STATE_DATA_ERROR;
break;
}
@@ -1090,60 +1331,27 @@ static void dw_mci_tasklet_func(unsigned long priv)
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
- status = host->data_status;
-
- if (status & DW_MCI_DATA_ERROR_FLAGS) {
- if (status & SDMMC_INT_DRTO) {
- data->error = -ETIMEDOUT;
- } else if (status & SDMMC_INT_DCRC) {
- data->error = -EILSEQ;
- } else if (status & SDMMC_INT_EBE &&
- host->dir_status ==
- DW_MCI_SEND_STATUS) {
- /*
- * No data CRC status was returned.
- * The number of bytes transferred will
- * be exaggerated in PIO mode.
- */
- data->bytes_xfered = 0;
- data->error = -ETIMEDOUT;
- } else {
- dev_err(host->dev,
- "data FIFO error "
- "(status=%08x)\n",
- status);
- data->error = -EIO;
- }
- /*
- * After an error, there may be data lingering
- * in the FIFO, so reset it - doing so
- * generates a block interrupt, hence setting
- * the scatter-gather pointer to NULL.
- */
- sg_miter_stop(&host->sg_miter);
- host->sg = NULL;
- ctrl = mci_readl(host, CTRL);
- ctrl |= SDMMC_CTRL_FIFO_RESET;
- mci_writel(host, CTRL, ctrl);
- } else {
- data->bytes_xfered = data->blocks * data->blksz;
- data->error = 0;
- }
+ err = dw_mci_data_complete(host, data);
- if (!data->stop) {
- dw_mci_request_end(host, host->mrq);
- goto unlock;
- }
+ if (!err) {
+ if (!data->stop || mrq->sbc) {
+ if (mrq->sbc)
+ data->stop->error = 0;
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+ }
- if (host->mrq->sbc && !data->error) {
- data->stop->error = 0;
- dw_mci_request_end(host, host->mrq);
- goto unlock;
+ /* stop command for open-ended transfer*/
+ if (data->stop)
+ send_stop_abort(host, data);
}
+ /*
+ * If err has non-zero,
+ * stop-abort command has been already issued.
+ */
prev_state = state = STATE_SENDING_STOP;
- if (!data->error)
- send_stop_cmd(host, data);
+
/* fall through */
case STATE_SENDING_STOP:
@@ -1151,9 +1359,19 @@ static void dw_mci_tasklet_func(unsigned long priv)
&host->pending_events))
break;
+ /* CMD error in data command */
+ if (mrq->cmd->error && mrq->data)
+ dw_mci_fifo_reset(host);
+
host->cmd = NULL;
- dw_mci_command_complete(host, host->mrq->stop);
- dw_mci_request_end(host, host->mrq);
+ host->data = NULL;
+
+ if (mrq->stop)
+ dw_mci_command_complete(host, mrq->stop);
+ else
+ host->cmd_status = 0;
+
+ dw_mci_request_end(host, mrq);
goto unlock;
case STATE_DATA_ERROR:
@@ -1697,7 +1915,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
struct mmc_host *mmc = slot->mmc;
struct mmc_request *mrq;
int present;
- u32 ctrl;
present = dw_mci_get_cd(mmc);
while (present != slot->last_detect_state) {
@@ -1736,11 +1953,10 @@ static void dw_mci_work_routine_card(struct work_struct *work)
case STATE_DATA_ERROR:
if (mrq->data->error == -EINPROGRESS)
mrq->data->error = -ENOMEDIUM;
- if (!mrq->stop)
- break;
/* fall through */
case STATE_SENDING_STOP:
- mrq->stop->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
break;
}
@@ -1763,23 +1979,10 @@ static void dw_mci_work_routine_card(struct work_struct *work)
if (present == 0) {
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- /*
- * Clear down the FIFO - doing so generates a
- * block interrupt, hence setting the
- * scatter-gather pointer to NULL.
- */
- sg_miter_stop(&host->sg_miter);
- host->sg = NULL;
-
- ctrl = mci_readl(host, CTRL);
- ctrl |= SDMMC_CTRL_FIFO_RESET;
- mci_writel(host, CTRL, ctrl);
-
+ /* Clear down the FIFO */
+ dw_mci_fifo_reset(host);
#ifdef CONFIG_MMC_DW_IDMAC
- ctrl = mci_readl(host, BMOD);
- /* Software reset of DMA */
- ctrl |= SDMMC_IDMAC_SWRESET;
- mci_writel(host, BMOD, ctrl);
+ dw_mci_idmac_reset(host);
#endif
}
@@ -1901,6 +2104,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
struct dw_mci_slot *slot;
const struct dw_mci_drv_data *drv_data = host->drv_data;
int ctrl_id, ret;
+ u32 freq[2];
u8 bus_width;
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -1916,8 +2120,14 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
mmc->ops = &dw_mci_ops;
- mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
- mmc->f_max = host->bus_hz;
+ if (of_property_read_u32_array(host->dev->of_node,
+ "clock-freq-min-max", freq, 2)) {
+ mmc->f_min = DW_MCI_FREQ_MIN;
+ mmc->f_max = DW_MCI_FREQ_MAX;
+ } else {
+ mmc->f_min = freq[0];
+ mmc->f_max = freq[1];
+ }
if (host->pdata->get_ocr)
mmc->ocr_avail = host->pdata->get_ocr(id);
@@ -1964,9 +2174,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
- if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
-
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -2008,12 +2215,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Card initially undetected */
slot->last_detect_state = 0;
- /*
- * Card may have been plugged in prior to boot so we
- * need to run the detect tasklet
- */
- queue_work(host->card_workqueue, &host->card_work);
-
return 0;
err_setup_bus:
@@ -2074,36 +2275,57 @@ no_dma:
return;
}
-static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
{
unsigned long timeout = jiffies + msecs_to_jiffies(500);
- unsigned int ctrl;
+ u32 ctrl;
- mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
- SDMMC_CTRL_DMA_RESET));
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= reset;
+ mci_writel(host, CTRL, ctrl);
/* wait till resets clear */
do {
ctrl = mci_readl(host, CTRL);
- if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
- SDMMC_CTRL_DMA_RESET)))
+ if (!(ctrl & reset))
return true;
} while (time_before(jiffies, timeout));
- dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+ dev_err(host->dev,
+ "Timeout resetting block (ctrl reset %#x)\n",
+ ctrl & reset);
return false;
}
+static inline bool dw_mci_fifo_reset(struct dw_mci *host)
+{
+ /*
+ * Reseting generates a block interrupt, hence setting
+ * the scatter-gather pointer to NULL.
+ */
+ if (host->sg) {
+ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+ }
+
+ return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
+}
+
+static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
+{
+ return dw_mci_ctrl_reset(host,
+ SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_RESET |
+ SDMMC_CTRL_DMA_RESET);
+}
+
#ifdef CONFIG_OF
static struct dw_mci_of_quirks {
char *quirk;
int id;
} of_quirks[] = {
{
- .quirk = "supports-highspeed",
- .id = DW_MCI_QUIRK_HIGHSPEED,
- }, {
.quirk = "broken-cd",
.id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
},
@@ -2158,6 +2380,15 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
if (of_find_property(np, "enable-sdio-wakeup", NULL))
pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (of_find_property(np, "supports-highspeed", NULL))
+ pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+
+ if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
+ pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+
+ if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
+ pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+
return pdata;
}
@@ -2221,6 +2452,15 @@ int dw_mci_probe(struct dw_mci *host)
host->bus_hz = clk_get_rate(host->ciu_clk);
}
+ if (drv_data && drv_data->init) {
+ ret = drv_data->init(host);
+ if (ret) {
+ dev_err(host->dev,
+ "implementation specific init failed\n");
+ goto err_clk_ciu;
+ }
+ }
+
if (drv_data && drv_data->setup_clock) {
ret = drv_data->setup_clock(host);
if (ret) {
@@ -2287,7 +2527,7 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!mci_wait_reset(host->dev, host))
+ if (!dw_mci_ctrl_all_reset(host))
return -ENODEV;
host->dma_ops = host->pdata->dma_ops;
@@ -2317,8 +2557,8 @@ int dw_mci_probe(struct dw_mci *host)
fifo_size = host->pdata->fifo_depth;
}
host->fifo_depth = fifo_size;
- host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
- ((fifo_size/2) << 0));
+ host->fifoth_val =
+ SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
mci_writel(host, FIFOTH, host->fifoth_val);
/* disable clock to CIU */
@@ -2456,23 +2696,6 @@ EXPORT_SYMBOL(dw_mci_remove);
*/
int dw_mci_suspend(struct dw_mci *host)
{
- int i, ret = 0;
-
- for (i = 0; i < host->num_slots; i++) {
- struct dw_mci_slot *slot = host->slot[i];
- if (!slot)
- continue;
- ret = mmc_suspend_host(slot->mmc);
- if (ret < 0) {
- while (--i >= 0) {
- slot = host->slot[i];
- if (slot)
- mmc_resume_host(host->slot[i]->mmc);
- }
- return ret;
- }
- }
-
if (host->vmmc)
regulator_disable(host->vmmc);
@@ -2493,7 +2716,7 @@ int dw_mci_resume(struct dw_mci *host)
}
}
- if (!mci_wait_reset(host->dev, host)) {
+ if (!dw_mci_ctrl_all_reset(host)) {
ret = -ENODEV;
return ret;
}
@@ -2501,8 +2724,15 @@ int dw_mci_resume(struct dw_mci *host)
if (host->use_dma && host->dma_ops->init)
host->dma_ops->init(host);
- /* Restore the old value at FIFOTH register */
+ /*
+ * Restore the initial value at FIFOTH register
+ * And Invalidate the prev_blksz with zero
+ */
mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
@@ -2518,10 +2748,6 @@ int dw_mci_resume(struct dw_mci *host)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
dw_mci_setup_bus(slot, true);
}
-
- ret = mmc_resume_host(host->slot[i]->mmc);
- if (ret < 0)
- return ret;
}
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 81b29941c5b..6bf24ab917e 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -53,6 +53,7 @@
#define SDMMC_IDINTEN 0x090
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
+#define SDMMC_CDTHRCTL 0x100
#define SDMMC_DATA(x) (x)
/*
@@ -128,6 +129,10 @@
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
+/* FIFOTH register defines */
+#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
+ ((r) & 0xFFF) << 16 | \
+ ((t) & 0xFFF))
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -142,6 +147,8 @@
#define SDMMC_IDMAC_SWRESET BIT(0)
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
+/* Card read threshold */
+#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x))
/* Register access macros */
#define mci_readl(dev, reg) \
@@ -184,6 +191,52 @@ extern int dw_mci_resume(struct dw_mci *host);
#endif
/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
+ * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last updated clock with reflecting clock divider.
+ * Keeping track of this helps us to avoid spamming the console
+ * with CONFIG_MMC_CLKGATE.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+ struct mmc_host *mmc;
+ struct dw_mci *host;
+
+ int quirks;
+ int wp_gpio;
+
+ u32 ctype;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned int __clk_old;
+
+ unsigned long flags;
+#define DW_MMC_CARD_PRESENT 0
+#define DW_MMC_CARD_NEED_INIT 1
+ int id;
+ int last_detect_state;
+};
+
+struct dw_mci_tuning_data {
+ const u8 *blk_pattern;
+ unsigned int blksz;
+};
+
+/**
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
* @init: early implementation specific initialization.
@@ -203,5 +256,7 @@ struct dw_mci_drv_data {
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
+ int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
+ struct dw_mci_tuning_data *tuning_data);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 66516339e3a..de2139cf344 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -880,8 +880,6 @@ static int jz4740_mmc_suspend(struct device *dev)
{
struct jz4740_mmc_host *host = dev_get_drvdata(dev);
- mmc_suspend_host(host->mmc);
-
jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
return 0;
@@ -893,8 +891,6 @@ static int jz4740_mmc_resume(struct device *dev)
jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
- mmc_resume_host(host->mmc);
-
return 0;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c3785edc0e9..f32057972dd 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -62,6 +62,7 @@ static unsigned int fmax = 515633;
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
* @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
*/
struct variant_data {
unsigned int clkreg;
@@ -76,6 +77,7 @@ struct variant_data {
bool signal_direction;
bool pwrreg_clkgate;
bool busy_detect;
+ bool pwrreg_nopower;
};
static struct variant_data variant_arm = {
@@ -109,6 +111,7 @@ static struct variant_data variant_u300 = {
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
.pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_nomadik = {
@@ -121,6 +124,7 @@ static struct variant_data variant_nomadik = {
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
.pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_ux500 = {
@@ -135,6 +139,7 @@ static struct variant_data variant_ux500 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_ux500v2 = {
@@ -150,6 +155,7 @@ static struct variant_data variant_ux500v2 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .pwrreg_nopower = true,
};
static int mmci_card_busy(struct mmc_host *mmc)
@@ -189,6 +195,21 @@ static int mmci_validate_data(struct mmci_host *host,
return 0;
}
+static void mmci_reg_delay(struct mmci_host *host)
+{
+ /*
+ * According to the spec, at least three feedback clock cycles
+ * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+ * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+ * Worst delay time during card init is at 100 kHz => 30 us.
+ * Worst delay time when up and running is at 25 MHz => 120 ns.
+ */
+ if (host->cclk < 25000000)
+ udelay(30);
+ else
+ ndelay(120);
+}
+
/*
* This must be called with host->lock held
*/
@@ -1264,6 +1285,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmci_set_clkreg(host, ios->clock);
mmci_write_pwrreg(host, pwr);
+ mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1510,23 +1532,6 @@ static int mmci_probe(struct amba_device *dev,
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
- host->pinctrl = devm_pinctrl_get(&dev->dev);
- if (IS_ERR(host->pinctrl)) {
- ret = PTR_ERR(host->pinctrl);
- goto clk_disable;
- }
-
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
-
- /* enable pins to be muxed in and configured */
- if (!IS_ERR(host->pins_default)) {
- ret = pinctrl_select_state(host->pinctrl, host->pins_default);
- if (ret)
- dev_warn(&dev->dev, "could not set default pins\n");
- } else
- dev_warn(&dev->dev, "could not get default pinstate\n");
-
/* Get regulators and the supported OCR mask */
mmc_regulator_get_supply(mmc);
if (!mmc->ocr_avail)
@@ -1725,41 +1730,67 @@ static int mmci_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct mmc_host *mmc = amba_get_drvdata(adev);
- int ret = 0;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
-
- ret = mmc_suspend_host(mmc);
- if (ret == 0) {
- pm_runtime_get_sync(dev);
- writel(0, host->base + MMCIMASK0);
- }
+ pm_runtime_get_sync(dev);
+ writel(0, host->base + MMCIMASK0);
}
- return ret;
+ return 0;
}
static int mmci_resume(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct mmc_host *mmc = amba_get_drvdata(adev);
- int ret = 0;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
-
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
pm_runtime_put(dev);
-
- ret = mmc_resume_host(mmc);
}
- return ret;
+ return 0;
}
#endif
#ifdef CONFIG_PM_RUNTIME
+static void mmci_save(struct mmci_host *host)
+{
+ unsigned long flags;
+
+ if (host->variant->pwrreg_nopower) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIDATACTRL);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+}
+
+static void mmci_restore(struct mmci_host *host)
+{
+ unsigned long flags;
+
+ if (host->variant->pwrreg_nopower) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+}
+
static int mmci_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
@@ -1767,6 +1798,8 @@ static int mmci_runtime_suspend(struct device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ pinctrl_pm_select_sleep_state(dev);
+ mmci_save(host);
clk_disable_unprepare(host->clk);
}
@@ -1781,6 +1814,8 @@ static int mmci_runtime_resume(struct device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
+ mmci_restore(host);
+ pinctrl_pm_select_default_state(dev);
}
return 0;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 69080fab637..168bc72f7a9 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -200,10 +200,6 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
- /* pinctrl handles */
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
-
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
struct dma_chan *dma_current;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index b900de4e7e9..9405ecdaf6c 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1416,28 +1416,10 @@ ioremap_free:
}
#ifdef CONFIG_PM
-#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
-static void
-do_resume_work(struct work_struct *work)
-{
- struct msmsdcc_host *host =
- container_of(work, struct msmsdcc_host, resume_task);
- struct mmc_host *mmc = host->mmc;
-
- if (mmc) {
- mmc_resume_host(mmc);
- if (host->stat_irq)
- enable_irq(host->stat_irq);
- }
-}
-#endif
-
-
static int
msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
- int rc = 0;
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
@@ -1445,14 +1427,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
if (host->stat_irq)
disable_irq(host->stat_irq);
- if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
- rc = mmc_suspend_host(mmc);
- if (!rc)
- msmsdcc_writel(host, 0, MMCIMASK0);
+ msmsdcc_writel(host, 0, MMCIMASK0);
if (host->clks_on)
msmsdcc_disable_clocks(host, 0);
}
- return rc;
+ return 0;
}
static int
@@ -1467,8 +1446,6 @@ msmsdcc_resume(struct platform_device *dev)
msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
- if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
- mmc_resume_host(mmc);
if (host->stat_irq)
enable_irq(host->stat_irq);
#if BUSCLK_PWRSAVE
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 06c5b0b28eb..45aa2206741 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -655,7 +655,7 @@ static const struct mmc_host_ops mvsd_ops = {
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
-static void __init
+static void
mv_conf_mbus_windows(struct mvsd_host *host,
const struct mbus_dram_target_info *dram)
{
@@ -677,7 +677,7 @@ mv_conf_mbus_windows(struct mvsd_host *host,
}
}
-static int __init mvsd_probe(struct platform_device *pdev)
+static int mvsd_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mmc_host *mmc = NULL;
@@ -775,9 +775,9 @@ static int __init mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = devm_request_and_ioremap(&pdev->dev, r);
- if (!host->base) {
- ret = -ENOMEM;
+ host->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto out;
}
@@ -819,7 +819,7 @@ out:
return ret;
}
-static int __exit mvsd_remove(struct platform_device *pdev)
+static int mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -838,33 +838,6 @@ static int __exit mvsd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int mvsd_resume(struct platform_device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_resume_host(mmc);
-
- return ret;
-}
-#else
-#define mvsd_suspend NULL
-#define mvsd_resume NULL
-#endif
-
static const struct of_device_id mvsdio_dt_ids[] = {
{ .compatible = "marvell,orion-sdio" },
{ /* sentinel */ }
@@ -872,16 +845,15 @@ static const struct of_device_id mvsdio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
static struct platform_driver mvsd_driver = {
- .remove = __exit_p(mvsd_remove),
- .suspend = mvsd_suspend,
- .resume = mvsd_resume,
+ .probe = mvsd_probe,
+ .remove = mvsd_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mvsdio_dt_ids,
},
};
-module_platform_driver_probe(mvsd_driver, mvsd_probe);
+module_platform_driver(mvsd_driver);
/* maximum card clock frequency (default 50MHz) */
module_param(maxfreq, int, 0);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index c174c6a0d22..f7199c83f5c 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1250,28 +1250,20 @@ static int mxcmci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
- int ret = 0;
- if (mmc)
- ret = mmc_suspend_host(mmc);
clk_disable_unprepare(host->clk_per);
clk_disable_unprepare(host->clk_ipg);
-
- return ret;
+ return 0;
}
static int mxcmci_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
- int ret = 0;
clk_prepare_enable(host->clk_per);
clk_prepare_enable(host->clk_ipg);
- if (mmc)
- ret = mmc_resume_host(mmc);
-
- return ret;
+ return 0;
}
static const struct dev_pm_ops mxcmci_pm_ops = {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index e1fa3ef735e..50fc9df791b 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -724,13 +724,9 @@ static int mxs_mmc_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
- int ret = 0;
-
- ret = mmc_suspend_host(mmc);
clk_disable_unprepare(ssp->clk);
-
- return ret;
+ return 0;
}
static int mxs_mmc_resume(struct device *dev)
@@ -738,13 +734,9 @@ static int mxs_mmc_resume(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
- int ret = 0;
clk_prepare_enable(ssp->clk);
-
- ret = mmc_resume_host(mmc);
-
- return ret;
+ return 0;
}
static const struct dev_pm_ops mxs_mmc_pm_ops = {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b94f38ec2a8..0b10a9030f4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -128,7 +128,6 @@ struct mmc_omap_slot {
struct mmc_omap_host {
int initialized;
- int suspended;
struct mmc_request * mrq;
struct mmc_command * cmd;
struct mmc_data * data;
@@ -1513,61 +1512,9 @@ static int mmc_omap_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- int i, ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
-
- if (host == NULL || host->suspended)
- return 0;
-
- for (i = 0; i < host->nr_slots; i++) {
- struct mmc_omap_slot *slot;
-
- slot = host->slots[i];
- ret = mmc_suspend_host(slot->mmc);
- if (ret < 0) {
- while (--i >= 0) {
- slot = host->slots[i];
- mmc_resume_host(slot->mmc);
- }
- return ret;
- }
- }
- host->suspended = 1;
- return 0;
-}
-
-static int mmc_omap_resume(struct platform_device *pdev)
-{
- int i, ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
-
- if (host == NULL || !host->suspended)
- return 0;
-
- for (i = 0; i < host->nr_slots; i++) {
- struct mmc_omap_slot *slot;
- slot = host->slots[i];
- ret = mmc_resume_host(slot->mmc);
- if (ret < 0)
- return ret;
-
- host->suspended = 0;
- }
- return 0;
-}
-#else
-#define mmc_omap_suspend NULL
-#define mmc_omap_resume NULL
-#endif
-
static struct platform_driver mmc_omap_driver = {
.probe = mmc_omap_probe,
.remove = mmc_omap_remove,
- .suspend = mmc_omap_suspend,
- .resume = mmc_omap_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 6ac63df645c..dbd32ad3b74 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -75,6 +75,7 @@
#define ICE 0x1
#define ICS 0x2
#define CEN (1 << 2)
+#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */
#define CLKD_MASK 0x0000FFC0
#define CLKD_SHIFT 6
#define DTO_MASK 0x000F0000
@@ -119,7 +120,8 @@
BRR_EN | BWR_EN | TC_EN | CC_EN)
#define MMC_AUTOSUSPEND_DELAY 100
-#define MMC_TIMEOUT_MS 20
+#define MMC_TIMEOUT_MS 20 /* 20 mSec */
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
#define OMAP_MMC_MIN_CLOCK 400000
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
@@ -171,6 +173,10 @@ struct omap_hsmmc_host {
unsigned char bus_mode;
unsigned char power_mode;
int suspended;
+ u32 con;
+ u32 hctl;
+ u32 sysctl;
+ u32 capa;
int irq;
int use_dma, dma_ch;
struct dma_chan *tx_chan;
@@ -183,7 +189,6 @@ struct omap_hsmmc_host {
int use_reg;
int req_in_progress;
struct omap_hsmmc_next next_data;
-
struct omap_mmc_platform_data *pdata;
};
@@ -493,8 +498,8 @@ static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
if (ios->clock) {
dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
- if (dsor > 250)
- dsor = 250;
+ if (dsor > CLKD_MAX)
+ dsor = CLKD_MAX;
}
return dsor;
@@ -597,25 +602,20 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
- struct omap_mmc_platform_data *pdata = host->pdata;
- int context_loss = 0;
u32 hctl, capa;
unsigned long timeout;
- if (pdata->get_context_loss_count) {
- context_loss = pdata->get_context_loss_count(host->dev);
- if (context_loss < 0)
- return 1;
- }
-
- dev_dbg(mmc_dev(host->mmc), "context was %slost\n",
- context_loss == host->context_loss ? "not " : "");
- if (host->context_loss == context_loss)
- return 1;
-
if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE)
return 1;
+ if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
+ host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
+ host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
+ host->capa == OMAP_HSMMC_READ(host->base, CAPA))
+ return 0;
+
+ host->context_loss++;
+
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
if (host->power_mode != MMC_POWER_OFF &&
(1 << ios->vdd) <= MMC_VDD_23_24)
@@ -655,9 +655,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
omap_hsmmc_set_bus_mode(host);
out:
- host->context_loss = context_loss;
-
- dev_dbg(mmc_dev(host->mmc), "context is restored\n");
+ dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
+ host->context_loss);
return 0;
}
@@ -666,15 +665,10 @@ out:
*/
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
- struct omap_mmc_platform_data *pdata = host->pdata;
- int context_loss;
-
- if (pdata->get_context_loss_count) {
- context_loss = pdata->get_context_loss_count(host->dev);
- if (context_loss < 0)
- return;
- host->context_loss = context_loss;
- }
+ host->con = OMAP_HSMMC_READ(host->base, CON);
+ host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
+ host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL);
+ host->capa = OMAP_HSMMC_READ(host->base, CAPA);
}
#else
@@ -975,8 +969,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
unsigned long bit)
{
unsigned long i = 0;
- unsigned long limit = (loops_per_jiffy *
- msecs_to_jiffies(MMC_TIMEOUT_MS));
+ unsigned long limit = MMC_TIMEOUT_US;
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
@@ -988,13 +981,13 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
&& (i++ < limit))
- cpu_relax();
+ udelay(1);
}
i = 0;
while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
(i++ < limit))
- cpu_relax();
+ udelay(1);
if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
dev_err(mmc_dev(host->mmc),
@@ -1178,9 +1171,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
struct omap_mmc_slot_data *slot = &mmc_slot(host);
int carddetect;
- if (host->suspended)
- return IRQ_HANDLED;
-
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
if (slot->card_detect)
@@ -1635,18 +1625,9 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
{
struct mmc_host *mmc = s->private;
struct omap_hsmmc_host *host = mmc_priv(mmc);
- int context_loss = 0;
-
- if (host->pdata->get_context_loss_count)
- context_loss = host->pdata->get_context_loss_count(host->dev);
- seq_printf(s, "mmc%d:\n ctx_loss:\t%d:%d\n\nregs:\n",
- mmc->index, host->context_loss, context_loss);
-
- if (host->suspended) {
- seq_printf(s, "host suspended, can't read registers\n");
- return 0;
- }
+ seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n",
+ mmc->index, host->context_loss);
pm_runtime_get_sync(host->dev);
@@ -1838,13 +1819,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->ops = &omap_hsmmc_ops;
- /*
- * If regulator_disable can only put vcc_aux to sleep then there is
- * no off state.
- */
- if (mmc_slot(host).vcc_aux_disable_is_sleep)
- mmc_slot(host).no_off = 1;
-
mmc->f_min = OMAP_MMC_MIN_CLOCK;
if (pdata->max_freq > 0)
@@ -1874,7 +1848,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
/* This can be removed once we support PBIAS with DT */
- if (host->dev->of_node && host->mapbase == 0x4809c000)
+ if (host->dev->of_node && res->start == 0x4809c000)
host->pbias_disable = 1;
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
@@ -2119,23 +2093,12 @@ static void omap_hsmmc_complete(struct device *dev)
static int omap_hsmmc_suspend(struct device *dev)
{
- int ret = 0;
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
if (!host)
return 0;
- if (host && host->suspended)
- return 0;
-
pm_runtime_get_sync(host->dev);
- host->suspended = 1;
- ret = mmc_suspend_host(host->mmc);
-
- if (ret) {
- host->suspended = 0;
- goto err;
- }
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
omap_hsmmc_disable_irq(host);
@@ -2145,23 +2108,19 @@ static int omap_hsmmc_suspend(struct device *dev)
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
-err:
+
pm_runtime_put_sync(host->dev);
- return ret;
+ return 0;
}
/* Routine to resume the MMC device */
static int omap_hsmmc_resume(struct device *dev)
{
- int ret = 0;
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
if (!host)
return 0;
- if (host && !host->suspended)
- return 0;
-
pm_runtime_get_sync(host->dev);
if (host->dbclk)
@@ -2172,16 +2131,9 @@ static int omap_hsmmc_resume(struct device *dev)
omap_hsmmc_protect_card(host);
- /* Notify the core to resume the host */
- ret = mmc_resume_host(host->mmc);
- if (ret == 0)
- host->suspended = 0;
-
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
-
- return ret;
-
+ return 0;
}
#else
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1956a3df7cf..32fe11323f3 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -880,35 +880,6 @@ static int pxamci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int pxamci_suspend(struct device *dev)
-{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int pxamci_resume(struct device *dev)
-{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_resume_host(mmc);
-
- return ret;
-}
-
-static const struct dev_pm_ops pxamci_pm_ops = {
- .suspend = pxamci_suspend,
- .resume = pxamci_resume,
-};
-#endif
-
static struct platform_driver pxamci_driver = {
.probe = pxamci_probe,
.remove = pxamci_remove,
@@ -916,9 +887,6 @@ static struct platform_driver pxamci_driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
-#ifdef CONFIG_PM
- .pm = &pxamci_pm_ops,
-#endif
},
};
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 375a880e0c5..c46feda07d5 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -364,7 +364,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
struct mmc_host *mmc = host->mmc;
struct mmc_card *card = mmc->card;
struct mmc_data *data = mrq->data;
- int uhs = mmc_sd_card_uhs(card);
+ int uhs = mmc_card_uhs(card);
int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
u8 cfg2, trans_mode;
int err;
@@ -1197,37 +1197,6 @@ static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
.execute_tuning = sdmmc_execute_tuning,
};
-#ifdef CONFIG_PM
-static int rtsx_pci_sdmmc_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
- struct mmc_host *mmc = host->mmc;
- int err;
-
- dev_dbg(sdmmc_dev(host), "--> %s\n", __func__);
-
- err = mmc_suspend_host(mmc);
- if (err)
- return err;
-
- return 0;
-}
-
-static int rtsx_pci_sdmmc_resume(struct platform_device *pdev)
-{
- struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
- struct mmc_host *mmc = host->mmc;
-
- dev_dbg(sdmmc_dev(host), "--> %s\n", __func__);
-
- return mmc_resume_host(mmc);
-}
-#else /* CONFIG_PM */
-#define rtsx_pci_sdmmc_suspend NULL
-#define rtsx_pci_sdmmc_resume NULL
-#endif /* CONFIG_PM */
-
static void init_extra_caps(struct realtek_pci_sdmmc *host)
{
struct mmc_host *mmc = host->mmc;
@@ -1367,8 +1336,6 @@ static struct platform_driver rtsx_pci_sdmmc_driver = {
.probe = rtsx_pci_sdmmc_drv_probe,
.remove = rtsx_pci_sdmmc_drv_remove,
.id_table = rtsx_pci_sdmmc_ids,
- .suspend = rtsx_pci_sdmmc_suspend,
- .resume = rtsx_pci_sdmmc_resume,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME_RTSX_PCI_SDMMC,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 8d6794cdf89..2fce5ea5eb3 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1949,39 +1949,10 @@ static struct platform_device_id s3cmci_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
-
-#ifdef CONFIG_PM
-
-static int s3cmci_suspend(struct device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
-
- return mmc_suspend_host(mmc);
-}
-
-static int s3cmci_resume(struct device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
-
- return mmc_resume_host(mmc);
-}
-
-static const struct dev_pm_ops s3cmci_pm = {
- .suspend = s3cmci_suspend,
- .resume = s3cmci_resume,
-};
-
-#define s3cmci_pm_ops &s3cmci_pm
-#else /* CONFIG_PM */
-#define s3cmci_pm_ops NULL
-#endif /* CONFIG_PM */
-
-
static struct platform_driver s3cmci_driver = {
.driver = {
.name = "s3c-sdi",
.owner = THIS_MODULE,
- .pm = s3cmci_pm_ops,
},
.id_table = s3cmci_driver_ids,
.probe = s3cmci_probe,
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index cdd4ce0d7c9..ef19874fcd1 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -310,8 +310,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
dma_mask = DMA_BIT_MASK(32);
}
- dev->dma_mask = &dev->coherent_dma_mask;
- dev->coherent_dma_mask = dma_mask;
+ err = dma_coerce_mask_and_coherent(dev, dma_mask);
+ if (err)
+ goto err_free;
}
if (c->slot) {
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 85472d3fd37..7a190fe4dff 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -316,19 +316,7 @@ err_pltfm_free:
static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev)
{
- struct sdhci_host *host = platform_get_drvdata(pdev);
- int dead;
- u32 scratch;
-
- dead = 0;
- scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (scratch == (u32)-1)
- dead = 1;
- sdhci_remove_host(host, dead);
-
- sdhci_free_host(host);
-
- return 0;
+ return sdhci_pltfm_unregister(pdev);
}
static struct platform_driver sdhci_bcm_kona_driver = {
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 36fa2df0466..f6d8d67c545 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -178,13 +178,7 @@ err:
static int bcm2835_sdhci_remove(struct platform_device *pdev)
{
- struct sdhci_host *host = platform_get_drvdata(pdev);
- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
-
- sdhci_remove_host(host, dead);
- sdhci_pltfm_free(pdev);
-
- return 0;
+ return sdhci_pltfm_unregister(pdev);
}
static const struct of_device_id bcm2835_sdhci_of_match[] = {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index abc8cf01e6e..461a4c3f4ef 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -34,12 +34,40 @@
/* VENDOR SPEC register */
#define ESDHC_VENDOR_SPEC 0xc0
#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
+#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1)
+#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8)
#define ESDHC_WTMK_LVL 0x44
#define ESDHC_MIX_CTRL 0x48
+#define ESDHC_MIX_CTRL_DDREN (1 << 3)
#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
+#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
+#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
+#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+/* dll control register */
+#define ESDHC_DLL_CTRL 0x60
+#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9
+#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8
+
+/* tune control register */
+#define ESDHC_TUNE_CTRL_STATUS 0x68
+#define ESDHC_TUNE_CTRL_STEP 1
+#define ESDHC_TUNE_CTRL_MIN 0
+#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
+
+#define ESDHC_TUNING_CTRL 0xcc
+#define ESDHC_STD_TUNING_EN (1 << 24)
+/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
+#define ESDHC_TUNING_START_TAP 0x1
+
+#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64
+
+/* pinctrl state */
+#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
+#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz"
+
/*
* Our interpretation of the SDHCI_HOST_CONTROL register
*/
@@ -66,21 +94,60 @@
* As a result, the TC flag is not asserted and SW received timeout
* exeception. Bit1 of Vendor Spec registor is used to fix it.
*/
-#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
-
-enum imx_esdhc_type {
- IMX25_ESDHC,
- IMX35_ESDHC,
- IMX51_ESDHC,
- IMX53_ESDHC,
- IMX6Q_USDHC,
+#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1)
+/*
+ * The flag enables the workaround for ESDHC errata ENGcm07207 which
+ * affects i.MX25 and i.MX35.
+ */
+#define ESDHC_FLAG_ENGCM07207 BIT(2)
+/*
+ * The flag tells that the ESDHC controller is an USDHC block that is
+ * integrated on the i.MX6 series.
+ */
+#define ESDHC_FLAG_USDHC BIT(3)
+/* The IP supports manual tuning process */
+#define ESDHC_FLAG_MAN_TUNING BIT(4)
+/* The IP supports standard tuning process */
+#define ESDHC_FLAG_STD_TUNING BIT(5)
+/* The IP has SDHCI_CAPABILITIES_1 register */
+#define ESDHC_FLAG_HAVE_CAP1 BIT(6)
+
+struct esdhc_soc_data {
+ u32 flags;
+};
+
+static struct esdhc_soc_data esdhc_imx25_data = {
+ .flags = ESDHC_FLAG_ENGCM07207,
+};
+
+static struct esdhc_soc_data esdhc_imx35_data = {
+ .flags = ESDHC_FLAG_ENGCM07207,
+};
+
+static struct esdhc_soc_data esdhc_imx51_data = {
+ .flags = 0,
+};
+
+static struct esdhc_soc_data esdhc_imx53_data = {
+ .flags = ESDHC_FLAG_MULTIBLK_NO_INT,
+};
+
+static struct esdhc_soc_data usdhc_imx6q_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
+};
+
+static struct esdhc_soc_data usdhc_imx6sl_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1,
};
struct pltfm_imx_data {
- int flags;
u32 scratchpad;
- enum imx_esdhc_type devtype;
struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_100mhz;
+ struct pinctrl_state *pins_200mhz;
+ const struct esdhc_soc_data *socdata;
struct esdhc_platform_data boarddata;
struct clk *clk_ipg;
struct clk *clk_ahb;
@@ -90,25 +157,20 @@ struct pltfm_imx_data {
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
} multiblock_status;
-
+ u32 uhs_mode;
+ u32 is_ddr;
};
static struct platform_device_id imx_esdhc_devtype[] = {
{
.name = "sdhci-esdhc-imx25",
- .driver_data = IMX25_ESDHC,
+ .driver_data = (kernel_ulong_t) &esdhc_imx25_data,
}, {
.name = "sdhci-esdhc-imx35",
- .driver_data = IMX35_ESDHC,
+ .driver_data = (kernel_ulong_t) &esdhc_imx35_data,
}, {
.name = "sdhci-esdhc-imx51",
- .driver_data = IMX51_ESDHC,
- }, {
- .name = "sdhci-esdhc-imx53",
- .driver_data = IMX53_ESDHC,
- }, {
- .name = "sdhci-usdhc-imx6q",
- .driver_data = IMX6Q_USDHC,
+ .driver_data = (kernel_ulong_t) &esdhc_imx51_data,
}, {
/* sentinel */
}
@@ -116,38 +178,34 @@ static struct platform_device_id imx_esdhc_devtype[] = {
MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
static const struct of_device_id imx_esdhc_dt_ids[] = {
- { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], },
- { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
- { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
- { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
- { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], },
+ { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, },
+ { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, },
+ { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, },
+ { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
+ { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
+ { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX25_ESDHC;
-}
-
-static inline int is_imx35_esdhc(struct pltfm_imx_data *data)
-{
- return data->devtype == IMX35_ESDHC;
+ return data->socdata == &esdhc_imx25_data;
}
-static inline int is_imx51_esdhc(struct pltfm_imx_data *data)
+static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX51_ESDHC;
+ return data->socdata == &esdhc_imx53_data;
}
-static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
+static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX53_ESDHC;
+ return data->socdata == &usdhc_imx6q_data;
}
-static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
+static inline int esdhc_is_usdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX6Q_USDHC;
+ return !!(data->socdata->flags & ESDHC_FLAG_USDHC);
}
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
@@ -164,7 +222,21 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 val = readl(host->ioaddr + reg);
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ u32 fsl_prss = val;
+ /* save the least 20 bits */
+ val = fsl_prss & 0x000FFFFF;
+ /* move dat[0-3] bits */
+ val |= (fsl_prss & 0x0F000000) >> 4;
+ /* move cmd line bit */
+ val |= (fsl_prss & 0x00800000) << 1;
+ }
+
if (unlikely(reg == SDHCI_CAPABILITIES)) {
+ /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */
+ if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
+ val &= 0xffff0000;
+
/* In FSL esdhc IC module, only bit20 is used to indicate the
* ADMA2 capability of esdhc, but this bit is messed up on
* some SOCs (e.g. on MX25, MX35 this bit is set, but they
@@ -178,6 +250,25 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
}
}
+ if (unlikely(reg == SDHCI_CAPABILITIES_1)) {
+ if (esdhc_is_usdhc(imx_data)) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
+ val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF;
+ else
+ /* imx6q/dl does not have cap_1 register, fake one */
+ val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
+ | SDHCI_SUPPORT_SDR50
+ | SDHCI_USE_SDR50_TUNING;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) {
+ val = 0;
+ val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT;
+ val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT;
+ val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT;
+ }
+
if (unlikely(reg == SDHCI_INT_STATUS)) {
if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
@@ -224,7 +315,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
}
}
- if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (reg == SDHCI_INT_STATUS)
&& (val & SDHCI_INT_DATA_END))) {
u32 v;
@@ -256,10 +347,12 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u16 ret = 0;
+ u32 val;
if (unlikely(reg == SDHCI_HOST_VERSION)) {
reg ^= 2;
- if (is_imx6q_usdhc(imx_data)) {
+ if (esdhc_is_usdhc(imx_data)) {
/*
* The usdhc register returns a wrong host version.
* Correct it here.
@@ -268,6 +361,30 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
}
}
+ if (unlikely(reg == SDHCI_HOST_CONTROL2)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & ESDHC_VENDOR_SPEC_VSELECT)
+ ret |= SDHCI_CTRL_VDD_180;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+ /* the std tuning bits is in ACMD12_ERR for imx6sl */
+ val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ }
+
+ if (val & ESDHC_MIX_CTRL_EXE_TUNE)
+ ret |= SDHCI_CTRL_EXEC_TUNING;
+ if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
+ ret |= SDHCI_CTRL_TUNED_CLK;
+
+ ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
+ ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
+ return ret;
+ }
+
return readw(host->ioaddr + reg);
}
@@ -275,10 +392,59 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u32 new_val = 0;
switch (reg) {
+ case SDHCI_CLOCK_CONTROL:
+ new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & SDHCI_CLOCK_CARD_EN)
+ new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ return;
+ case SDHCI_HOST_CONTROL2:
+ new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & SDHCI_CTRL_VDD_180)
+ new_val |= ESDHC_VENDOR_SPEC_VSELECT;
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK)
+ new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ else
+ new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ new_val = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ if (val & SDHCI_CTRL_EXEC_TUNING) {
+ new_val |= ESDHC_STD_TUNING_EN |
+ ESDHC_TUNING_START_TAP;
+ v |= ESDHC_MIX_CTRL_EXE_TUNE;
+ m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ } else {
+ new_val &= ~ESDHC_STD_TUNING_EN;
+ v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ }
+
+ if (val & SDHCI_CTRL_TUNED_CLK)
+ v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ else
+ v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+
+ writel(new_val, host->ioaddr + ESDHC_TUNING_CTRL);
+ writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ }
+ return;
case SDHCI_TRANSFER_MODE:
- if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (host->cmd->opcode == SD_IO_RW_EXTENDED)
&& (host->cmd->data->blocks > 1)
&& (host->cmd->data->flags & MMC_DATA_READ)) {
@@ -288,7 +454,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
}
- if (is_imx6q_usdhc(imx_data)) {
+ if (esdhc_is_usdhc(imx_data)) {
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
/* Swap AC23 bit */
if (val & SDHCI_TRNS_AUTO_CMD23) {
@@ -310,10 +476,10 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
val |= SDHCI_CMD_ABORTCMD;
if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
- (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
- if (is_imx6q_usdhc(imx_data))
+ if (esdhc_is_usdhc(imx_data))
writel(val << 16,
host->ioaddr + SDHCI_TRANSFER_MODE);
else
@@ -379,8 +545,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
* The reset on usdhc fails to clear MIX_CTRL register.
* Do it manually here.
*/
- if (is_imx6q_usdhc(imx_data))
+ if (esdhc_is_usdhc(imx_data)) {
writel(0, host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 0;
+ }
}
}
@@ -409,8 +577,60 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ unsigned int host_clock = clk_get_rate(pltfm_host->clk);
+ int pre_div = 2;
+ int div = 1;
+ u32 temp, val;
+
+ if (clock == 0) {
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+ goto out;
+ }
+
+ if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
+ pre_div = 1;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host_clock / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host_clock / pre_div / div > clock && div < 16)
+ div++;
- esdhc_set_clock(host, clock, clk_get_rate(pltfm_host->clk));
+ host->mmc->actual_clock = host_clock / pre_div / div;
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->mmc->actual_clock);
+
+ if (imx_data->is_ddr)
+ pre_div >>= 2;
+ else
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
+ mdelay(1);
+out:
+ host->clock = clock;
}
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
@@ -454,7 +674,192 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
return 0;
}
-static const struct sdhci_ops sdhci_esdhc_ops = {
+static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
+{
+ u32 reg;
+
+ /* FIXME: delay a bit for card to be ready for next tuning due to errors */
+ mdelay(1);
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
+ ESDHC_MIX_CTRL_FBCLK_SEL;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+ writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ dev_dbg(mmc_dev(host->mmc),
+ "tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
+ val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
+}
+
+static void esdhc_request_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+ char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
+
+ mrq.cmd = &cmd;
+ mrq.cmd->mrq = &mrq;
+ mrq.data = &data;
+ mrq.data->mrq = &mrq;
+ mrq.cmd->data = mrq.data;
+
+ mrq.done = esdhc_request_done;
+ init_completion(&(mrq.completion));
+
+ disable_irq(host->irq);
+ spin_lock(&host->lock);
+ host->mrq = &mrq;
+
+ sdhci_send_command(host, mrq.cmd);
+
+ spin_unlock(&host->lock);
+ enable_irq(host->irq);
+
+ wait_for_completion(&mrq.completion);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+static void esdhc_post_tuning(struct sdhci_host *host)
+{
+ u32 reg;
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+}
+
+static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int min, max, avg, ret;
+
+ /* find the mininum delay first which can pass tuning */
+ min = ESDHC_TUNE_CTRL_MIN;
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+ if (!esdhc_send_tuning_cmd(host, opcode))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+ if (esdhc_send_tuning_cmd(host, opcode)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+ max += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ /* use average delay to get the best timing */
+ avg = (min + max) / 2;
+ esdhc_prepare_tuning(host, avg);
+ ret = esdhc_send_tuning_cmd(host, opcode);
+ esdhc_post_tuning(host);
+
+ dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+ return ret;
+}
+
+static int esdhc_change_pinstate(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pinctrl_state *pinctrl;
+
+ dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
+
+ if (IS_ERR(imx_data->pinctrl) ||
+ IS_ERR(imx_data->pins_default) ||
+ IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz))
+ return -EINVAL;
+
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR50:
+ pinctrl = imx_data->pins_100mhz;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ pinctrl = imx_data->pins_200mhz;
+ break;
+ default:
+ /* back to default state for other legacy timing */
+ pinctrl = imx_data->pins_default;
+ }
+
+ return pinctrl_select_state(imx_data->pinctrl, pinctrl);
+}
+
+static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
+ writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
+ ESDHC_MIX_CTRL_DDREN,
+ host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 1;
+ if (boarddata->delay_line) {
+ u32 v;
+ v = boarddata->delay_line <<
+ ESDHC_DLL_OVERRIDE_VAL_SHIFT |
+ (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT);
+ if (is_imx53_esdhc(imx_data))
+ v <<= 1;
+ writel(v, host->ioaddr + ESDHC_DLL_CTRL);
+ }
+ break;
+ }
+
+ return esdhc_change_pinstate(host, uhs);
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
.write_l = esdhc_writel_le,
@@ -465,6 +870,7 @@ static const struct sdhci_ops sdhci_esdhc_ops = {
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_ro = esdhc_pltfm_get_ro,
.platform_bus_width = esdhc_pltfm_bus_width,
+ .set_uhs_signaling = esdhc_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -506,6 +912,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
of_property_read_u32(np, "max-frequency", &boarddata->f_max);
+ if (of_find_property(np, "no-1-8-v", NULL))
+ boarddata->support_vsel = false;
+ else
+ boarddata->support_vsel = true;
+
+ if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
+ boarddata->delay_line = 0;
+
return 0;
}
#else
@@ -539,9 +953,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto free_sdhci;
}
- if (of_id)
- pdev->id_entry = of_id->data;
- imx_data->devtype = pdev->id_entry->driver_data;
+ imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
+ pdev->id_entry->driver_data;
pltfm_host->priv = imx_data;
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -568,29 +981,39 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
clk_prepare_enable(imx_data->clk_ipg);
clk_prepare_enable(imx_data->clk_ahb);
- imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(imx_data->pinctrl)) {
err = PTR_ERR(imx_data->pinctrl);
goto disable_clk;
}
+ imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(imx_data->pins_default)) {
+ err = PTR_ERR(imx_data->pins_default);
+ dev_err(mmc_dev(host->mmc), "could not get default state\n");
+ goto disable_clk;
+ }
+
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
+ if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
| SDHCI_QUIRK_BROKEN_ADMA;
- if (is_imx53_esdhc(imx_data))
- imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
-
/*
* The imx6q ROM code will change the default watermark level setting
* to something insane. Change it back here.
*/
- if (is_imx6q_usdhc(imx_data))
+ if (esdhc_is_usdhc(imx_data)) {
writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ }
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ sdhci_esdhc_ops.platform_execute_tuning =
+ esdhc_executing_tuning;
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
if (!host->mmc->parent->platform_data) {
@@ -650,6 +1073,23 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
break;
}
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /* fall back to not support uhs by specify no 1.8v quirk */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
err = sdhci_add_host(host);
if (err)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index a2a06420e46..a7d9f95a7b0 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -49,41 +49,4 @@
#define ESDHC_HOST_CONTROL_RES 0x05
-static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock,
- unsigned int host_clock)
-{
- int pre_div = 2;
- int div = 1;
- u32 temp;
-
- if (clock == 0)
- goto out;
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | ESDHC_CLOCK_MASK);
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
-
- while (host_clock / pre_div / 16 > clock && pre_div < 256)
- pre_div *= 2;
-
- while (host_clock / pre_div / div > clock && div < 16)
- div++;
-
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host_clock / pre_div / div);
-
- pre_div >>= 1;
- div--;
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | (div << ESDHC_DIVIDER_SHIFT)
- | (pre_div << ESDHC_PREDIV_SHIFT));
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- mdelay(1);
-out:
- host->clock = clock;
-}
-
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index e328252ebf2..0b249970b11 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -199,6 +199,14 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
+
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
+ if (clock == 0)
+ goto out;
+
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
if (clock > 20000000)
@@ -207,8 +215,31 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
clock -= 5000000;
}
- /* Set the clock */
- esdhc_set_clock(host, clock, host->max_clk);
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(1);
+out:
+ host->clock = clock;
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d7d6bc8968d..8f753811fc7 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -37,6 +37,12 @@
#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
/*
* PCI registers
@@ -356,6 +362,28 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
};
+/* Define Host controllers for Intel Merrifield platform */
+#define INTEL_MRFL_EMMC_0 0
+#define INTEL_MRFL_EMMC_1 1
+
+static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) &&
+ (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1))
+ /* SD support is not ready yet */
+ return -ENODEV;
+
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_1_8V_DDR;
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = intel_mrfl_mmc_probe_slot,
+};
+
/* O2Micro extra registers */
#define O2_SD_LOCK_WP 0xD3
#define O2_SD_MULTI_VCC3V 0xEE
@@ -939,6 +967,54 @@ static const struct pci_device_id pci_ids[] = {
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
},
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRFL_MMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
+ },
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7a7fb4f0d5a..bd8a0982aec 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -49,7 +49,6 @@ static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
-static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
@@ -981,7 +980,7 @@ static void sdhci_finish_data(struct sdhci_host *host)
tasklet_schedule(&host->finish_tasklet);
}
-static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
int flags;
u32 mask;
@@ -1053,6 +1052,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
+EXPORT_SYMBOL_GPL(sdhci_send_command);
static void sdhci_finish_command(struct sdhci_host *host)
{
@@ -1435,7 +1435,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
if (host->version >= SDHCI_SPEC_300 &&
- (ios->power_mode == MMC_POWER_UP))
+ (ios->power_mode == MMC_POWER_UP) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
sdhci_set_clock(host, ios->clock);
@@ -1875,6 +1876,14 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
return 0;
}
+ if (host->ops->platform_execute_tuning) {
+ spin_unlock(&host->lock);
+ enable_irq(host->irq);
+ err = host->ops->platform_execute_tuning(host, opcode);
+ sdhci_runtime_pm_put(host);
+ return err;
+ }
+
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
/*
@@ -1981,6 +1990,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (!tuning_loop_counter || !timeout) {
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ err = -EIO;
} else {
if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
pr_info(DRIVER_NAME ": Tuning procedure"
@@ -2491,6 +2501,14 @@ again:
result = IRQ_HANDLED;
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+
+ /*
+ * If we know we'll call the driver to signal SDIO IRQ, disregard
+ * further indications of Card Interrupt in the status to avoid a
+ * needless loop.
+ */
+ if (cardint)
+ intmask &= ~SDHCI_INT_CARD_INT;
if (intmask && --max_loops)
goto again;
out:
@@ -2546,8 +2564,6 @@ EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
- int ret;
-
if (host->ops->platform_suspend)
host->ops->platform_suspend(host);
@@ -2559,19 +2575,6 @@ int sdhci_suspend_host(struct sdhci_host *host)
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
- ret = mmc_suspend_host(host->mmc);
- if (ret) {
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags |= SDHCI_NEEDS_RETUNING;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
- }
-
- sdhci_enable_card_detection(host);
-
- return ret;
- }
-
if (!device_may_wakeup(mmc_dev(host->mmc))) {
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
@@ -2579,14 +2582,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
sdhci_enable_irq_wakeups(host);
enable_irq_wake(host->irq);
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
- int ret;
+ int ret = 0;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
@@ -2615,7 +2618,6 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- ret = mmc_resume_host(host->mmc);
sdhci_enable_card_detection(host);
if (host->ops->platform_resume)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index b037f188fe4..0a3ed01887d 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -288,6 +288,7 @@ struct sdhci_ops {
unsigned int (*get_ro)(struct sdhci_host *host);
void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*platform_suspend)(struct sdhci_host *host);
@@ -393,6 +394,8 @@ static inline void *sdhci_priv(struct sdhci_host *host)
extern void sdhci_card_detect(struct sdhci_host *host);
extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
+extern void sdhci_send_command(struct sdhci_host *host,
+ struct mmc_command *cmd);
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index 50adbd155f3..b7e30577531 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -516,9 +516,7 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
#ifdef CONFIG_PM
static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
{
- struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "suspend\n");
- mmc_suspend_host(mmc);
return 0;
}
@@ -527,7 +525,6 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "resume\n");
sdricoh_reset(mmc_priv(mmc));
- mmc_resume_host(mmc);
return 0;
}
#else
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 36629a024aa..d032b080ac4 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -964,7 +964,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
{
- int ret = clk_enable(host->hclk);
+ int ret = clk_prepare_enable(host->hclk);
if (!ret) {
host->clk = clk_get_rate(host->hclk);
@@ -1018,7 +1018,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (host->power) {
pm_runtime_put_sync(&host->pd->dev);
- clk_disable(host->hclk);
+ clk_disable_unprepare(host->hclk);
host->power = false;
if (ios->power_mode == MMC_POWER_OFF)
sh_mmcif_set_power(host, ios);
@@ -1466,7 +1466,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mutex_init(&host->thread_lock);
- clk_disable(host->hclk);
+ clk_disable_unprepare(host->hclk);
ret = mmc_add_host(mmc);
if (ret < 0)
goto emmcaddh;
@@ -1487,7 +1487,7 @@ ereqirq1:
ereqirq0:
pm_runtime_suspend(&pdev->dev);
eresume:
- clk_disable(host->hclk);
+ clk_disable_unprepare(host->hclk);
eclkupdate:
clk_put(host->hclk);
eclkget:
@@ -1505,7 +1505,7 @@ static int sh_mmcif_remove(struct platform_device *pdev)
int irq[2];
host->dying = true;
- clk_enable(host->hclk);
+ clk_prepare_enable(host->hclk);
pm_runtime_get_sync(&pdev->dev);
dev_pm_qos_hide_latency_limit(&pdev->dev);
@@ -1530,7 +1530,7 @@ static int sh_mmcif_remove(struct platform_device *pdev)
if (irq[1] >= 0)
free_irq(irq[1], host);
- clk_disable(host->hclk);
+ clk_disable_unprepare(host->hclk);
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1538,28 +1538,21 @@ static int sh_mmcif_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sh_mmcif_suspend(struct device *dev)
{
struct sh_mmcif_host *host = dev_get_drvdata(dev);
- int ret = mmc_suspend_host(host->mmc);
- if (!ret)
- sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- return ret;
+ return 0;
}
static int sh_mmcif_resume(struct device *dev)
{
- struct sh_mmcif_host *host = dev_get_drvdata(dev);
-
- return mmc_resume_host(host->mmc);
+ return 0;
}
-#else
-#define sh_mmcif_suspend NULL
-#define sh_mmcif_resume NULL
-#endif /* CONFIG_PM */
+#endif
static const struct of_device_id mmcif_of_match[] = {
{ .compatible = "renesas,sh-mmcif" },
@@ -1568,8 +1561,7 @@ static const struct of_device_id mmcif_of_match[] = {
MODULE_DEVICE_TABLE(of, mmcif_of_match);
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
- .suspend = sh_mmcif_suspend,
- .resume = sh_mmcif_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
};
static struct platform_driver sh_mmcif_driver = {
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 43d962829f8..d1760ebcac0 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1030,7 +1030,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
{
- return mmc_suspend_host(tifm_get_drvdata(sock));
+ return 0;
}
static int tifm_sd_resume(struct tifm_dev *sock)
@@ -1044,8 +1044,6 @@ static int tifm_sd_resume(struct tifm_dev *sock)
if (rc)
host->eject = 1;
- else
- rc = mmc_resume_host(mmc);
return rc;
}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index b3802256f95..f3b2d8ca1ec 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1145,12 +1145,9 @@ int tmio_mmc_host_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- int ret = mmc_suspend_host(mmc);
- if (!ret)
- tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
-
- return ret;
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
+ return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_suspend);
@@ -1163,7 +1160,7 @@ int tmio_mmc_host_resume(struct device *dev)
/* The MMC core will perform the complete set up */
host->resuming = true;
- return mmc_resume_host(mmc);
+ return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_resume);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 4f84586c6e9..63fac78b3d4 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1269,21 +1269,18 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
{
struct via_crdr_mmc_host *host;
- int ret = 0;
host = pci_get_drvdata(pcidev);
via_save_pcictrlreg(host);
via_save_sdcreg(host);
- ret = mmc_suspend_host(host->mmc);
-
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
pci_disable_device(pcidev);
pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
- return ret;
+ return 0;
}
static int via_sd_resume(struct pci_dev *pcidev)
@@ -1316,8 +1313,6 @@ static int via_sd_resume(struct pci_dev *pcidev)
via_restore_pcictrlreg(sdhost);
via_init_sdc_pm(sdhost);
- ret = mmc_resume_host(sdhost->mmc);
-
return ret;
}
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e9028ad05ff..4262296c12f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2392,26 +2392,12 @@ static void vub300_disconnect(struct usb_interface *interface)
#ifdef CONFIG_PM
static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
- if (!vub300 || !vub300->mmc) {
- return 0;
- } else {
- struct mmc_host *mmc = vub300->mmc;
- mmc_suspend_host(mmc);
- return 0;
- }
+ return 0;
}
static int vub300_resume(struct usb_interface *intf)
{
- struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
- if (!vub300 || !vub300->mmc) {
- return 0;
- } else {
- struct mmc_host *mmc = vub300->mmc;
- mmc_resume_host(mmc);
- return 0;
- }
+ return 0;
}
#else
#define vub300_suspend NULL
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index e954b775887..1defd5ed323 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1814,28 +1814,11 @@ static void wbsd_pnp_remove(struct pnp_dev *dev)
#ifdef CONFIG_PM
-static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
-{
- BUG_ON(host == NULL);
-
- return mmc_suspend_host(host->mmc);
-}
-
-static int wbsd_resume(struct wbsd_host *host)
-{
- BUG_ON(host == NULL);
-
- wbsd_init_device(host);
-
- return mmc_resume_host(host->mmc);
-}
-
static int wbsd_platform_suspend(struct platform_device *dev,
pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct wbsd_host *host;
- int ret;
if (mmc == NULL)
return 0;
@@ -1844,12 +1827,7 @@ static int wbsd_platform_suspend(struct platform_device *dev,
host = mmc_priv(mmc);
- ret = wbsd_suspend(host, state);
- if (ret)
- return ret;
-
wbsd_chip_poweroff(host);
-
return 0;
}
@@ -1872,7 +1850,8 @@ static int wbsd_platform_resume(struct platform_device *dev)
*/
mdelay(5);
- return wbsd_resume(host);
+ wbsd_init_device(host);
+ return 0;
}
#ifdef CONFIG_PNP
@@ -1880,16 +1859,12 @@ static int wbsd_platform_resume(struct platform_device *dev)
static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
- struct wbsd_host *host;
if (mmc == NULL)
return 0;
DBGF("Suspending...\n");
-
- host = mmc_priv(mmc);
-
- return wbsd_suspend(host, state);
+ return 0;
}
static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
@@ -1922,7 +1897,8 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
*/
mdelay(5);
- return wbsd_resume(host);
+ wbsd_init_device(host);
+ return 0;
}
#endif /* CONFIG_PNP */
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 34231d5168f..e902ed7846b 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -212,28 +212,14 @@ struct wmt_mci_priv {
static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
{
- u32 reg_tmp;
- if (enable) {
- if (priv->power_inverted) {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp | BM_SD_OFF,
- priv->sdmmc_base + SDMMC_BUSMODE);
- } else {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp & (~BM_SD_OFF),
- priv->sdmmc_base + SDMMC_BUSMODE);
- }
- } else {
- if (priv->power_inverted) {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp & (~BM_SD_OFF),
- priv->sdmmc_base + SDMMC_BUSMODE);
- } else {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp | BM_SD_OFF,
- priv->sdmmc_base + SDMMC_BUSMODE);
- }
- }
+ u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+
+ if (enable ^ priv->power_inverted)
+ reg_tmp &= ~BM_SD_OFF;
+ else
+ reg_tmp |= BM_SD_OFF;
+
+ writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
}
static void wmt_mci_read_response(struct mmc_host *mmc)
@@ -939,28 +925,23 @@ static int wmt_mci_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct wmt_mci_priv *priv;
- int ret;
if (!mmc)
return 0;
priv = mmc_priv(mmc);
- ret = mmc_suspend_host(mmc);
-
- if (!ret) {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
- SDMMC_BUSMODE);
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
+ SDMMC_BUSMODE);
- reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
- writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
+ reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
+ writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
- writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
- writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
- clk_disable(priv->clk_sdmmc);
- }
- return ret;
+ clk_disable(priv->clk_sdmmc);
+ return 0;
}
static int wmt_mci_resume(struct device *dev)
@@ -969,7 +950,6 @@ static int wmt_mci_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct wmt_mci_priv *priv;
- int ret = 0;
if (mmc) {
priv = mmc_priv(mmc);
@@ -987,10 +967,9 @@ static int wmt_mci_resume(struct device *dev)
writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
SDMMC_INTMASK0);
- ret = mmc_resume_host(mmc);
}
- return ret;
+ return 0;
}
static const struct dev_pm_ops wmt_mci_pm = {
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 9279a9174f8..7a6384b0962 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -27,11 +27,13 @@
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define FACTORY_MAGIC 0x59544346 /* FCTY */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
#define ML_MAGIC1 0x39685a42
#define ML_MAGIC2 0x26594131
#define TRX_MAGIC 0x30524448
+#define SQSH_MAGIC 0x71736873 /* shsq */
struct trx_header {
uint32_t magic;
@@ -71,7 +73,14 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Alloc */
parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ if (!buf) {
+ kfree(parts);
+ return -ENOMEM;
+ }
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
@@ -110,6 +119,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,
continue;
}
+ /* Found on Huawei E970 */
+ if (buf[0x000 / 4] == FACTORY_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "factory",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
/* POT(TOP) */
if (buf[0x000 / 4] == POT_MAGIC1 &&
(buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
@@ -167,6 +183,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset = rounddown(offset + trx->length, blocksize);
continue;
}
+
+ /* Squashfs on devices not using TRX */
+ if (buf[0x000 / 4] == SQSH_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset, 0);
+ continue;
+ }
}
/* Look for NVRAM at the end of the last block. */
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 74ab4b7e523..01281382180 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -95,13 +95,6 @@ config MTD_M25P80
if you want to specify device partitioning or to use a device which
doesn't support the JEDEC ID instruction.
-config M25PXX_USE_FAST_READ
- bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz"
- depends on MTD_M25P80
- default y
- help
- This option enables FAST_READ access supported by ST M25Pxx.
-
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 5cb4c04726b..d9fd87a4c8d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
+#include <linux/major.h>
/* Info for the block device */
struct block2mtd_dev {
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 3e1b0a0ef4d..4f091c1a998 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2097,7 +2097,7 @@ notfound:
ret = -ENODEV;
dev_info(dev, "No supported DiskOnChip found\n");
err_probe:
- kfree(cascade->bch);
+ free_bch(cascade->bch);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6bc9618af09..7eda71dbc18 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -78,7 +78,7 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define MAX_CMD_SIZE 5
+#define MAX_CMD_SIZE 6
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
@@ -367,10 +367,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_message_init(&m);
memset(t, 0, (sizeof t));
- /* NOTE:
- * OPCODE_FAST_READ (if available) is faster.
- * Should add 1 byte DUMMY_BYTE.
- */
t[0].tx_buf = flash->command;
t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
spi_message_add_tail(&t[0], &m);
@@ -388,11 +384,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
return 1;
}
- /* FIXME switch to OPCODE_FAST_READ. It's required for higher
- * clocks; and at this writing, every chip this driver handles
- * supports that opcode.
- */
-
/* Set up the write data buffer. */
opcode = flash->read_opcode;
flash->command[0] = opcode;
@@ -749,16 +740,19 @@ static const struct spi_device_id m25p_ids[] = {
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
/* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
/* Everspin */
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
/* GigaDevice */
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
@@ -775,6 +769,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
@@ -783,15 +778,16 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
/* Micron */
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
/* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -940,12 +936,7 @@ static int m25p_probe(struct spi_device *spi)
struct flash_info *info;
unsigned i;
struct mtd_part_parser_data ppdata;
- struct device_node __maybe_unused *np = spi->dev.of_node;
-
-#ifdef CONFIG_MTD_OF_PARTS
- if (!of_device_is_available(np))
- return -ENODEV;
-#endif
+ struct device_node *np = spi->dev.of_node;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -992,15 +983,13 @@ static int m25p_probe(struct spi_device *spi)
}
}
- flash = kzalloc(sizeof *flash, GFP_KERNEL);
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
- GFP_KERNEL);
- if (!flash->command) {
- kfree(flash);
+
+ flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
+ if (!flash->command)
return -ENOMEM;
- }
flash->spi = spi;
mutex_init(&flash->lock);
@@ -1062,13 +1051,14 @@ static int m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
- flash->fast_read = false;
- if (np && of_property_read_bool(np, "m25p,fast-read"))
+ if (np)
+ /* If we were instantiated by DT, use it */
+ flash->fast_read = of_property_read_bool(np, "m25p,fast-read");
+ else
+ /* If we weren't instantiated by DT, default to fast-read */
flash->fast_read = true;
-#ifdef CONFIG_M25PXX_USE_FAST_READ
- flash->fast_read = true;
-#endif
+ /* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & M25P_NO_FR)
flash->fast_read = false;
@@ -1133,15 +1123,9 @@ static int m25p_probe(struct spi_device *spi)
static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
- int status;
/* Clean up MTD stuff. */
- status = mtd_device_unregister(&flash->mtd);
- if (status == 0) {
- kfree(flash->command);
- kfree(flash);
- }
- return 0;
+ return mtd_device_unregister(&flash->mtd);
}
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0e8cbfeba11..4a47b0266d4 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -88,8 +88,6 @@ struct dataflash {
uint8_t command[4];
char name[24];
- unsigned partitioned:1;
-
unsigned short page_offset; /* offset in flash address */
unsigned int page_size; /* of bytes per page */
@@ -881,7 +879,7 @@ static int dataflash_probe(struct spi_device *spi)
break;
/* obsolete AT45DB1282 not (yet?) supported */
default:
- pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev),
+ dev_info(&spi->dev, "unsupported device (%x)\n",
status & 0x3c);
status = -ENODEV;
}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 67823de68db..e1f2aebaa48 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -94,7 +94,7 @@ static void unregister_devices(void)
}
}
-static int register_device(char *name, unsigned long start, unsigned long len)
+static int register_device(char *name, phys_addr_t start, size_t len)
{
struct phram_mtd_list *new;
int ret = -ENOMEM;
@@ -141,35 +141,35 @@ out0:
return ret;
}
-static int ustrtoul(const char *cp, char **endp, unsigned int base)
+static int parse_num64(uint64_t *num64, char *token)
{
- unsigned long result = simple_strtoul(cp, endp, base);
-
- switch (**endp) {
- case 'G':
- result *= 1024;
- case 'M':
- result *= 1024;
- case 'k':
- result *= 1024;
+ size_t len;
+ int shift = 0;
+ int ret;
+
+ len = strlen(token);
/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
- if ((*endp)[1] == 'i')
- (*endp) += 2;
+ if (len > 2) {
+ if (token[len - 1] == 'i') {
+ switch (token[len - 2]) {
+ case 'G':
+ shift += 10;
+ case 'M':
+ shift += 10;
+ case 'k':
+ shift += 10;
+ token[len - 2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
}
- return result;
-}
-static int parse_num32(uint32_t *num32, const char *token)
-{
- char *endp;
- unsigned long n;
+ ret = kstrtou64(token, 0, num64);
+ *num64 <<= shift;
- n = ustrtoul(token, &endp, 0);
- if (*endp)
- return -EINVAL;
-
- *num32 = n;
- return 0;
+ return ret;
}
static int parse_name(char **pname, const char *token)
@@ -209,19 +209,19 @@ static inline void kill_final_newline(char *str)
* This shall contain the module parameter if any. It is of the form:
* - phram=<device>,<address>,<size> for module case
* - phram.phram=<device>,<address>,<size> for built-in case
- * We leave 64 bytes for the device name, 12 for the address and 12 for the
+ * We leave 64 bytes for the device name, 20 for the address and 20 for the
* size.
* Example: phram.phram=rootfs,0xa0000000,512Mi
*/
-static __initdata char phram_paramline[64+12+12];
+static __initdata char phram_paramline[64 + 20 + 20];
static int __init phram_setup(const char *val)
{
- char buf[64+12+12], *str = buf;
+ char buf[64 + 20 + 20], *str = buf;
char *token[3];
char *name;
- uint32_t start;
- uint32_t len;
+ uint64_t start;
+ uint64_t len;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -243,13 +243,13 @@ static int __init phram_setup(const char *val)
if (ret)
return ret;
- ret = parse_num32(&start, token[1]);
+ ret = parse_num64(&start, token[1]);
if (ret) {
kfree(name);
parse_err("illegal start address\n");
}
- ret = parse_num32(&len, token[2]);
+ ret = parse_num64(&len, token[2]);
if (ret) {
kfree(name);
parse_err("illegal device length\n");
@@ -257,7 +257,7 @@ static int __init phram_setup(const char *val)
ret = register_device(name, start, len);
if (!ret)
- pr_info("%s device: %#x at %#x\n", name, len, start);
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
else
kfree(name);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index a42f1f0e728..687bf27ec85 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -364,7 +364,7 @@ static int sst25l_probe(struct spi_device *spi)
if (!flash_info)
return -ENODEV;
- flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
@@ -402,11 +402,8 @@ static int sst25l_probe(struct spi_device *spi)
ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
data ? data->parts : NULL,
data ? data->nr_parts : 0);
- if (ret) {
- kfree(flash);
- spi_set_drvdata(spi, NULL);
+ if (ret)
return -ENODEV;
- }
return 0;
}
@@ -414,12 +411,8 @@ static int sst25l_probe(struct spi_device *spi)
static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
- int ret;
- ret = mtd_device_unregister(&flash->mtd);
- if (ret == 0)
- kfree(flash);
- return ret;
+ return mtd_device_unregister(&flash->mtd);
}
static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 3af35148409..b66b541877f 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct INFTLrecord *inftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index d3cfe26beea..2ef19aa0086 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -703,7 +703,7 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
#define DO_XXLOCK_LOCK 1
#define DO_XXLOCK_UNLOCK 2
-int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
{
int ret = 0;
struct map_info *map = mtd->priv;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index f581ac1cf02..46d195fca94 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -180,7 +180,6 @@ static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index c2604f8b2a5..36da518915b 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -316,7 +316,6 @@ static void mtd_pci_remove(struct pci_dev *dev)
map->exit(dev, map);
kfree(map);
- pci_set_drvdata(dev, NULL);
pci_release_regions(dev);
}
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 676271659b3..10196f5a897 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -55,7 +55,7 @@ struct platram_info {
static inline struct platram_info *to_platram_info(struct platform_device *dev)
{
- return (struct platram_info *)platform_get_drvdata(dev);
+ return platform_get_drvdata(dev);
}
/* platram_setrw
@@ -257,21 +257,7 @@ static struct platform_driver platram_driver = {
},
};
-/* module init/exit */
-
-static int __init platram_init(void)
-{
- printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
- return platform_driver_register(&platram_driver);
-}
-
-static void __exit platram_exit(void)
-{
- platform_driver_unregister(&platram_driver);
-}
-
-module_init(platram_init);
-module_exit(platram_exit);
+module_platform_driver(platram_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index c77b68c9412..3051c4c3624 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -212,7 +212,6 @@ static void scb2_flash_remove(struct pci_dev *dev)
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
- pci_set_drvdata(dev, NULL);
}
static struct pci_device_id scb2_flash_pci_ids[] = {
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 2aef5dda522..485ea751c7f 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -32,6 +32,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/mutex.h>
+#include <linux/major.h>
struct mtdblk_dev {
@@ -373,7 +374,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
- .major = 31,
+ .major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 92759a9d298..fb5dc89369d 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -24,6 +24,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/module.h>
+#include <linux/major.h>
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
@@ -70,7 +71,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
- .major = 31,
+ .major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.readsect = mtdblock_readsect,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 684bfa39e4e..2147e733533 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -32,6 +32,7 @@
#include <linux/mount.h>
#include <linux/blkpg.h>
#include <linux/magic.h>
+#include <linux/major.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
@@ -1099,7 +1100,7 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
return (unsigned long) -EINVAL;
ret = mtd_get_unmapped_area(mtd, len, offset, flags);
- return ret == -EOPNOTSUPP ? -ENOSYS : ret;
+ return ret == -EOPNOTSUPP ? -ENODEV : ret;
}
#endif
@@ -1124,9 +1125,9 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
#endif
return vm_iomap_memory(vma, map->phys, map->size);
}
- return -ENOSYS;
+ return -ENODEV;
#else
- return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
+ return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
#endif
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5e14d540ba2..92311a56939 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -157,6 +157,9 @@ static ssize_t mtd_type_show(struct device *dev,
case MTD_UBIVOLUME:
type = "ubi";
break;
+ case MTD_MLCNANDFLASH:
+ type = "mlc-nand";
+ break;
default:
type = "unknown";
}
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 334da5f583c..20c02a3b741 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/major.h>
/*
* compare superblocks to see if they're equivalent
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index d88529841d3..93ae6a6d94f 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,43 +96,15 @@ config MTD_NAND_OMAP2
config MTD_NAND_OMAP_BCH
depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
- tristate "Enable support for hardware BCH error correction"
+ tristate "Support hardware based BCH error correction"
default n
select BCH
- select BCH_CONST_PARAMS
help
- Support for hardware BCH error correction.
-
-choice
- prompt "BCH error correction capability"
- depends on MTD_NAND_OMAP_BCH
-
-config MTD_NAND_OMAP_BCH8
- bool "8 bits / 512 bytes (recommended)"
- help
- Support correcting up to 8 bitflips per 512-byte block.
- This will use 13 bytes of spare area per 512 bytes of page data.
- This is the recommended mode, as 4-bit mode does not work
- on some OMAP3 revisions, due to a hardware bug.
-
-config MTD_NAND_OMAP_BCH4
- bool "4 bits / 512 bytes"
- help
- Support correcting up to 4 bitflips per 512-byte block.
- This will use 7 bytes of spare area per 512 bytes of page data.
- Note that this mode does not work on some OMAP3 revisions, due to a
- hardware bug. Please check your OMAP datasheet before selecting this
- mode.
-
-endchoice
-
-if MTD_NAND_OMAP_BCH
-config BCH_CONST_M
- default 13
-config BCH_CONST_T
- default 4 if MTD_NAND_OMAP_BCH4
- default 8 if MTD_NAND_OMAP_BCH8
-endif
+ This config enables the ELM hardware engine, which can be used to
+ locate and correct errors when using BCH ECC scheme. This offloads
+ the cpu from doing ECC error searching and correction. However some
+ legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+ so they should not enable this config symbol.
config MTD_NAND_IDS
tristate
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 060feeaf6b3..59f08c44abd 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
dma_dev = host->dma_chan->device;
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
if (dma_mapping_error(dma_dev->dev, phys_addr)) {
@@ -1062,56 +1061,28 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
}
/*
- * Get ECC requirement in ONFI parameters, returns -1 if ONFI
- * parameters is not supported.
- * return 0 if success to get the ECC requirement.
- */
-static int get_onfi_ecc_param(struct nand_chip *chip,
- int *ecc_bits, int *sector_size)
-{
- *ecc_bits = *sector_size = 0;
-
- if (chip->onfi_params.ecc_bits == 0xff)
- /* TODO: the sector_size and ecc_bits need to be find in
- * extended ecc parameter, currently we don't support it.
- */
- return -1;
-
- *ecc_bits = chip->onfi_params.ecc_bits;
-
- /* The default sector size (ecc codeword size) is 512 */
- *sector_size = 512;
-
- return 0;
-}
-
-/*
- * Get ecc requirement from ONFI parameters ecc requirement.
+ * Get minimum ecc requirements from NAND.
* If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
- * will set them according to ONFI ecc requirement. Otherwise, use the
+ * will set them according to minimum ecc requirement. Otherwise, use the
* value in DTS file.
* return 0 if success. otherwise return error code.
*/
static int pmecc_choose_ecc(struct atmel_nand_host *host,
int *cap, int *sector_size)
{
- /* Get ECC requirement from ONFI parameters */
- *cap = *sector_size = 0;
- if (host->nand_chip.onfi_version) {
- if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
- dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+ /* Get minimum ECC requirements */
+ if (host->nand_chip.ecc_strength_ds) {
+ *cap = host->nand_chip.ecc_strength_ds;
+ *sector_size = host->nand_chip.ecc_step_ds;
+ dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
*cap, *sector_size);
- else
- dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
} else {
- dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
- }
- if (*cap == 0 && *sector_size == 0) {
*cap = 2;
*sector_size = 512;
+ dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
}
- /* If dts file doesn't specify then use the one in ONFI parameters */
+ /* If device tree doesn't specify, use NAND's minimum ECC parameters */
if (host->pmecc_corr_cap == 0) {
/* use the most fitable ecc bits (the near bigger one ) */
if (*cap <= 2)
@@ -1139,7 +1110,7 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
return 0;
}
-static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
+static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
struct mtd_info *mtd = &host->mtd;
@@ -1449,7 +1420,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
-#if defined(CONFIG_OF)
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
@@ -1457,7 +1427,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
u32 offset[2];
int ecc_mode;
struct atmel_nand_data *board = &host->board;
- enum of_gpio_flags flags;
+ enum of_gpio_flags flags = 0;
if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
if (val >= 32) {
@@ -1540,15 +1510,8 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
return 0;
}
-#else
-static int atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
-{
- return -EINVAL;
-}
-#endif
-static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
+static int atmel_hw_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
struct mtd_info *mtd = &host->mtd;
@@ -1987,7 +1950,7 @@ static struct platform_driver atmel_nand_nfc_driver;
/*
* Probe for the NAND device.
*/
-static int __init atmel_nand_probe(struct platform_device *pdev)
+static int atmel_nand_probe(struct platform_device *pdev)
{
struct atmel_nand_host *host;
struct mtd_info *mtd;
@@ -2019,7 +1982,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
mtd = &host->mtd;
nand_chip = &host->nand_chip;
host->dev = &pdev->dev;
- if (pdev->dev.of_node) {
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ /* Only when CONFIG_OF is enabled of_node can be parsed */
res = atmel_of_init_port(host, pdev->dev.of_node);
if (res)
goto err_nand_ioremap;
@@ -2177,14 +2141,13 @@ err_no_card:
if (host->dma_chan)
dma_release_channel(host->dma_chan);
err_nand_ioremap:
- platform_driver_unregister(&atmel_nand_nfc_driver);
return res;
}
/*
* Remove a NAND device.
*/
-static int __exit atmel_nand_remove(struct platform_device *pdev)
+static int atmel_nand_remove(struct platform_device *pdev)
{
struct atmel_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
@@ -2207,14 +2170,12 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_nand_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-nand" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
-#endif
static int atmel_nand_nfc_probe(struct platform_device *pdev)
{
@@ -2253,12 +2214,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_OF)
-static struct of_device_id atmel_nand_nfc_match[] = {
+static const struct of_device_id atmel_nand_nfc_match[] = {
{ .compatible = "atmel,sama5d3-nfc" },
{ /* sentinel */ }
};
-#endif
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
static struct platform_driver atmel_nand_nfc_driver = {
.driver = {
@@ -2270,7 +2230,8 @@ static struct platform_driver atmel_nand_nfc_driver = {
};
static struct platform_driver atmel_nand_driver = {
- .remove = __exit_p(atmel_nand_remove),
+ .probe = atmel_nand_probe,
+ .remove = atmel_nand_remove,
.driver = {
.name = "atmel_nand",
.owner = THIS_MODULE,
@@ -2278,7 +2239,7 @@ static struct platform_driver atmel_nand_driver = {
},
};
-module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe);
+module_platform_driver(atmel_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 7bae569fdc7..10744591131 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -29,11 +29,9 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
struct bcm47xxnflash *b47n;
int err = 0;
- b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
- if (!b47n) {
- err = -ENOMEM;
- goto out;
- }
+ b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+ if (!b47n)
+ return -ENOMEM;
b47n->nand_chip.priv = b47n;
b47n->mtd.owner = THIS_MODULE;
@@ -48,22 +46,16 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
}
if (err) {
pr_err("Initialization failed: %d\n", err);
- goto err_init;
+ return err;
}
err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
- goto err_dev_reg;
+ return err;
}
return 0;
-
-err_dev_reg:
-err_init:
- kfree(b47n);
-out:
- return err;
}
static int bcm47xxnflash_remove(struct platform_device *pdev)
@@ -85,22 +77,4 @@ static struct platform_driver bcm47xxnflash_driver = {
},
};
-static int __init bcm47xxnflash_init(void)
-{
- int err;
-
- err = platform_driver_register(&bcm47xxnflash_driver);
- if (err)
- pr_err("Failed to register bcm47xx nand flash driver: %d\n",
- err);
-
- return err;
-}
-
-static void __exit bcm47xxnflash_exit(void)
-{
- platform_driver_unregister(&bcm47xxnflash_driver);
-}
-
-module_init(bcm47xxnflash_init);
-module_exit(bcm47xxnflash_exit);
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 2ed2bb33a6e..370b9dd7a27 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1394,7 +1394,7 @@ static struct nand_bbt_descr bbt_mirror_descr = {
};
/* initialize driver data structures */
-void denali_drv_init(struct denali_nand_info *denali)
+static void denali_drv_init(struct denali_nand_info *denali)
{
denali->idx = 0;
@@ -1520,7 +1520,7 @@ int denali_init(struct denali_nand_info *denali)
* so just let controller do 15bit ECC for MLC and 8bit ECC for
* SLC if possible.
* */
- if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK &&
+ if (!nand_is_slc(&denali->nand) &&
(denali->mtd.oobsize > (denali->bbtskipbytes +
ECC_15BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE)))) {
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index e3e46623b2b..033f177a636 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -119,7 +119,6 @@ static void denali_pci_remove(struct pci_dev *dev)
iounmap(denali->flash_mem);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(denali);
}
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index eaa3c29ad86..b68a4959f70 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -38,7 +38,7 @@
#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
#endif
-static unsigned long __initdata doc_locations[] = {
+static unsigned long doc_locations[] __initdata = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 548db2389fa..1b0265e85a0 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -44,6 +44,7 @@
#include <linux/mtd/nand.h>
#include <linux/bch.h>
#include <linux/bitrev.h>
+#include <linux/jiffies.h>
/*
* In "reliable mode" consecutive 2k pages are used in parallel (in some
@@ -269,7 +270,7 @@ static int poll_status(struct docg4_priv *doc)
*/
uint16_t flash_status;
- unsigned int timeo;
+ unsigned long timeo;
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev, "%s...\n", __func__);
@@ -277,22 +278,18 @@ static int poll_status(struct docg4_priv *doc)
/* hardware quirk requires reading twice initially */
flash_status = readw(docptr + DOC_FLASHCONTROL);
- timeo = 1000;
+ timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
do {
cpu_relax();
flash_status = readb(docptr + DOC_FLASHCONTROL);
- } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+ } while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+ time_before(jiffies, timeo));
-
- if (!timeo) {
+ if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
dev_err(doc->dev, "%s: timed out!\n", __func__);
return NAND_STATUS_FAIL;
}
- if (unlikely(timeo < 50))
- dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
- __func__, timeo);
-
return 0;
}
@@ -494,7 +491,7 @@ static uint8_t docg4_read_byte(struct mtd_info *mtd)
return status;
}
- dev_warn(doc->dev, "unexpectd call to read_byte()\n");
+ dev_warn(doc->dev, "unexpected call to read_byte()\n");
return 0;
}
@@ -1239,7 +1236,6 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->block_markbad = docg4_block_markbad;
nand->read_buf = docg4_read_buf;
nand->write_buf = docg4_write_buf16;
- nand->scan_bbt = nand_default_bbt;
nand->erase_cmd = docg4_erase_block;
nand->ecc.read_page = docg4_read_page;
nand->ecc.write_page = docg4_write_page;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 20657209a47..c966fc7474c 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -650,8 +651,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->page_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
chip->phys_erase_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
- chip->ecclayout);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
chip->ecc.mode);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 317a771f158..43355779cff 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -135,6 +136,69 @@ static struct nand_ecclayout oob_4096_ecc8 = {
.oobfree = { {2, 6}, {136, 82} },
};
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+ .eccbytes = 256,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263,
+ },
+ .oobfree = { {2, 6}, {264, 80} },
+};
/*
* Generic flash bbt descriptors
@@ -441,20 +505,29 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (mtd->writesize > 512) {
nand_fcr0 =
(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
iowrite32be(
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
- &ifc->ifc_nand.nand_fir0);
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_SEQIN <<
- IFC_NAND_FCR0_CMD2_SHIFT));
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
iowrite32be(
(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -463,8 +536,13 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
- iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
- &ifc->ifc_nand.nand_fir1);
+ iowrite32be(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -718,8 +796,6 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
chip->page_shift);
dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
chip->phys_erase_shift);
- dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
- chip->ecclayout);
dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
chip->ecc.mode);
dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
@@ -872,11 +948,25 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
} else {
layout = &oob_4096_ecc8;
chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
}
priv->bufnum_mask = 1;
break;
+ case CSOR_NAND_PGS_8K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_8192_ecc4;
+ } else {
+ layout = &oob_8192_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 0;
+ break;
+
default:
dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
return -ENODEV;
@@ -907,7 +997,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
iounmap(priv->vbase);
ifc_nand_ctrl->chips[priv->bank] = NULL;
- dev_set_drvdata(priv->dev, NULL);
return 0;
}
@@ -1082,25 +1171,7 @@ static struct platform_driver fsl_ifc_nand_driver = {
.remove = fsl_ifc_nand_remove,
};
-static int __init fsl_ifc_nand_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&fsl_ifc_nand_driver);
- if (ret)
- printk(KERN_ERR "fsl-ifc: Failed to register platform"
- "driver\n");
-
- return ret;
-}
-
-static void __exit fsl_ifc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_ifc_nand_driver);
-}
-
-module_init(fsl_ifc_nand_init);
-module_exit(fsl_ifc_nand_exit);
+module_platform_driver(fsl_ifc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 04e07252d74..4d203e84e8c 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -18,6 +18,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 3dc1a7564d8..8b2752263db 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
- flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
-
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 4f8857fa48a..aaced29727f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -187,6 +187,12 @@ int gpmi_init(struct gpmi_nand_data *this)
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+ /*
+ * Decouple the chip select from dma channel. We use dma0 for all
+ * the chips.
+ */
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
gpmi_disable_clk(this);
return 0;
err_out:
@@ -1073,6 +1079,13 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+ /*
+ * In the imx6, all the ready/busy pins are bound
+ * together. So we only need to check chip 0.
+ */
+ if (GPMI_IS_MX6Q(this))
+ chip = 0;
+
/* MX28 shares the same R/B register as MX6Q. */
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index a9830ff8e3f..dabbc14db56 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -45,7 +45,10 @@ static struct nand_bbt_descr gpmi_bbt_descr = {
.pattern = scan_ff_pattern
};
-/* We will use all the (page + OOB). */
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
static struct nand_ecclayout gpmi_hw_ecclayout = {
.eccbytes = 0,
.eccpos = { 0, },
@@ -354,9 +357,8 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
{
- int chipnr = this->current_chip;
-
- return this->dma_chans[chipnr];
+ /* We use the DMA channel 0 to access all the nand chips. */
+ return this->dma_chans[0];
}
/* Can we use the upper's buffer directly for DMA? */
@@ -392,8 +394,6 @@ static void dma_irq_callback(void *param)
struct gpmi_nand_data *this = param;
struct completion *dma_c = &this->dma_done;
- complete(dma_c);
-
switch (this->dma_type) {
case DMA_FOR_COMMAND:
dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
@@ -418,6 +418,8 @@ static void dma_irq_callback(void *param)
default:
pr_err("in wrong DMA operation.\n");
}
+
+ complete(dma_c);
}
int start_dma_without_bch_irq(struct gpmi_nand_data *this,
@@ -1263,14 +1265,22 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- /*
- * The BCH will use all the (page + oob).
- * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
- * But it can not stop some ioctls such MEMWRITEOOB which uses
- * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
- * these ioctls too.
- */
- return -EPERM;
+ struct nand_oobfree *of = mtd->ecclayout->oobfree;
+ int status = 0;
+
+ /* Do we have available oob area? */
+ if (!of->length)
+ return -EPERM;
+
+ if (!nand_is_slc(chip))
+ return -EPERM;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
+ chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -1568,8 +1578,6 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
{
- int ret;
-
/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
if (GPMI_IS_MX23(this))
this->swap_block_mark = false;
@@ -1577,12 +1585,8 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
this->swap_block_mark = true;
/* Set up the medium geometry */
- ret = gpmi_set_geometry(this);
- if (ret)
- return ret;
+ return gpmi_set_geometry(this);
- /* NAND boot init, depends on the gpmi_set_geometry(). */
- return nand_boot_init(this);
}
static void gpmi_nfc_exit(struct gpmi_nand_data *this)
@@ -1664,7 +1668,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan_ident(mtd, 1, NULL);
+ ret = nand_scan_ident(mtd, GPMI_IS_MX6Q(this) ? 2 : 1, NULL);
if (ret)
goto err_out;
@@ -1672,10 +1676,16 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
+ chip->options |= NAND_SKIP_BBTSCAN;
ret = nand_scan_tail(mtd);
if (ret)
goto err_out;
+ ret = nand_boot_init(this);
+ if (ret)
+ goto err_out;
+ chip->scan_bbt(mtd);
+
ppdata.of_node = this->pdev->dev.of_node;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (ret)
@@ -1691,19 +1701,19 @@ static const struct platform_device_id gpmi_ids[] = {
{ .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
{ .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
{ .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
- {},
+ {}
};
static const struct of_device_id gpmi_nand_id_table[] = {
{
.compatible = "fsl,imx23-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX23]
+ .data = (void *)&gpmi_ids[IS_MX23],
}, {
.compatible = "fsl,imx28-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX28]
+ .data = (void *)&gpmi_ids[IS_MX28],
}, {
.compatible = "fsl,imx6q-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX6Q]
+ .data = (void *)&gpmi_ids[IS_MX6Q],
}, {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -1722,7 +1732,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
- this = kzalloc(sizeof(*this), GFP_KERNEL);
+ this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
if (!this) {
pr_err("Failed to allocate per-device memory\n");
return -ENOMEM;
@@ -1752,7 +1762,6 @@ exit_nfc_init:
release_resources(this);
exit_acquire_resources:
dev_err(this->dev, "driver registration failed: %d\n", ret);
- kfree(this);
return ret;
}
@@ -1763,7 +1772,6 @@ static int gpmi_nand_remove(struct platform_device *pdev)
gpmi_nfc_exit(this);
release_resources(this);
- kfree(this);
return 0;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
index 53397cc290f..82114cdc833 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -108,6 +108,9 @@
#define HW_GPMI_CTRL1_CLR 0x00000068
#define HW_GPMI_CTRL1_TOG 0x0000006c
+#define BP_GPMI_CTRL1_DECOUPLE_CS 24
+#define BM_GPMI_CTRL1_DECOUPLE_CS (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index f4dd2a887ea..327d96c0350 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -905,7 +905,7 @@ static struct platform_driver lpc32xx_nand_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(lpc32xx_nand_match),
+ .of_match_table = lpc32xx_nand_match,
},
};
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index add75709d41..23e6974ccd2 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -893,7 +893,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Avoid extra scan if using BBT, setup BBT support */
if (host->ncfg->use_bbt) {
- chip->options |= NAND_SKIP_BBTSCAN;
chip->bbt_options |= NAND_BBT_USE_FLASH;
/*
@@ -915,13 +914,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto err_exit3;
}
- /* Standard layout in FLASH for bad block tables */
- if (host->ncfg->use_bbt) {
- if (nand_default_bbt(mtd) < 0)
- dev_err(&pdev->dev,
- "Error initializing default bad block tables\n");
- }
-
mtd->name = "nxp_lpc3220_slc";
ppdata.of_node = pdev->dev.of_node;
res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
@@ -1023,7 +1015,7 @@ static struct platform_driver lpc32xx_nand_driver = {
.driver = {
.name = LPC32XX_MODNAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(lpc32xx_nand_match),
+ .of_match_table = lpc32xx_nand_match,
},
};
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 3c60a000b42..439bc389641 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -36,7 +36,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mpc5121.h>
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index ce8242b6c3e..9dfdb06c508 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/completion.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mtd.h>
@@ -395,7 +396,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
if (useirq) {
if (!host->devtype_data->check_int(host)) {
- INIT_COMPLETION(host->op_completion);
+ reinit_completion(&host->op_completion);
irq_control(host, 1);
wait_for_completion(&host->op_completion);
}
@@ -1507,7 +1508,7 @@ static int mxcnd_probe(struct platform_device *pdev)
host->devtype_data->irq_control(host, 0);
err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
- IRQF_DISABLED, DRIVER_NAME, host);
+ 0, DRIVER_NAME, host);
if (err)
return err;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d340b2f198c..bd39f7b6790 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2912,12 +2912,13 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
/* get the info we want. */
ecc = (struct onfi_ext_ecc_info *)cursor;
- if (ecc->codeword_size) {
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
}
- pr_info("ONFI extended param page detected.\n");
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
ret = 0;
ext_out:
@@ -2935,29 +2936,34 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
- /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
- if (chip->options & NAND_BUSWIDTH_16) {
- pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
- return 0;
- }
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
+ /*
+ * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
+ * with NAND_BUSWIDTH_16
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
+ return 0;
+ }
+
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
- pr_info("ONFI param page %d valid\n", i);
break;
}
}
- if (i == 3)
+ if (i == 3) {
+ pr_err("Could not find valid ONFI parameter page; aborting\n");
return 0;
+ }
/* Check version */
val = le16_to_cpu(p->revision);
@@ -2981,11 +2987,23 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
sanitize_string(p->model, sizeof(p->model));
if (!mtd->name)
mtd->name = p->model;
+
mtd->writesize = le32_to_cpu(p->byte_per_page);
- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+
+ /* See erasesize comment */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
*busw = NAND_BUSWIDTH_16;
@@ -3009,10 +3027,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
/* The Extended Parameter Page is supported since ONFI 2.1. */
if (nand_flash_detect_ext_param_page(mtd, chip, p))
- pr_info("Failed to detect the extended param page.\n");
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
}
- pr_info("ONFI flash detected\n");
return 1;
}
@@ -3075,6 +3094,16 @@ static int nand_id_len(u8 *id_data, int arrlen)
return arrlen;
}
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
/*
* Many new NAND share similar device ID codes, which represent the size of the
* chip. The rest of the parameters must be decoded according to generic or
@@ -3085,7 +3114,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
{
int extid, id_len;
/* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = id_data[2];
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
/* The 4th id byte is the important one */
extid = id_data[3];
@@ -3101,8 +3130,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
* ID to decide what to do.
*/
if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- id_data[5] != 0x00) {
+ !nand_is_slc(chip) && id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
@@ -3134,7 +3162,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
(((extid >> 1) & 0x04) | (extid & 0x03));
*busw = 0;
} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+ !nand_is_slc(chip)) {
unsigned int tmp;
/* Calc pagesize */
@@ -3197,7 +3225,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
* - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
*/
if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
- !(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ nand_is_slc(chip) &&
(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
!(id_data[4] & 0x80) /* !BENAND */) {
mtd->oobsize = 32 * mtd->writesize >> 9;
@@ -3222,6 +3250,9 @@ static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize = mtd->writesize / 32;
*busw = type->options & NAND_BUSWIDTH_16;
+ /* All legacy ID NAND are small-page, SLC */
+ chip->bits_per_cell = 1;
+
/*
* Check for Spansion/AMD ID + repeating 5th, 6th byte since
* some Spansion chips have erasesize that conflicts with size
@@ -3258,11 +3289,11 @@ static void nand_decode_bbm_options(struct mtd_info *mtd,
* Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
* AMD/Spansion, and Macronix. All others scan only the first page.
*/
- if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ if (!nand_is_slc(chip) &&
(maf_id == NAND_MFR_SAMSUNG ||
maf_id == NAND_MFR_HYNIX))
chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
- else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ else if ((nand_is_slc(chip) &&
(maf_id == NAND_MFR_SAMSUNG ||
maf_id == NAND_MFR_HYNIX ||
maf_id == NAND_MFR_TOSHIBA ||
@@ -3286,7 +3317,7 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
mtd->erasesize = type->erasesize;
mtd->oobsize = type->oobsize;
- chip->cellinfo = id_data[2];
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
chip->chipsize = (uint64_t)type->chipsize << 20;
chip->options |= type->options;
chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
@@ -3441,11 +3472,13 @@ ident_done:
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " %dMiB, page size: %d, OOB size: %d\n",
+ pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name,
- (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
+ chip->onfi_version ? chip->onfi_params.model : type->name);
+
+ pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+ (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->writesize, mtd->oobsize);
return type;
}
@@ -3525,6 +3558,7 @@ int nand_scan_tail(struct mtd_info *mtd)
{
int i;
struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
@@ -3541,19 +3575,19 @@ int nand_scan_tail(struct mtd_info *mtd)
/*
* If no default placement scheme is given, select an appropriate one.
*/
- if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
+ if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
case 8:
- chip->ecc.layout = &nand_oob_8;
+ ecc->layout = &nand_oob_8;
break;
case 16:
- chip->ecc.layout = &nand_oob_16;
+ ecc->layout = &nand_oob_16;
break;
case 64:
- chip->ecc.layout = &nand_oob_64;
+ ecc->layout = &nand_oob_64;
break;
case 128:
- chip->ecc.layout = &nand_oob_128;
+ ecc->layout = &nand_oob_128;
break;
default:
pr_warn("No oob scheme defined for oobsize %d\n",
@@ -3570,64 +3604,62 @@ int nand_scan_tail(struct mtd_info *mtd)
* selected and we have 256 byte pagesize fallback to software ECC
*/
- switch (chip->ecc.mode) {
+ switch (ecc->mode) {
case NAND_ECC_HW_OOB_FIRST:
/* Similar to NAND_ECC_HW, but a separate read_page handle */
- if (!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) {
+ if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
pr_warn("No ECC functions supplied; "
"hardware ECC not possible\n");
BUG();
}
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc_oob_first;
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_hwecc;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_std;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.read_subpage)
- chip->ecc.read_subpage = nand_read_subpage;
- if (!chip->ecc.write_subpage)
- chip->ecc.write_subpage = nand_write_subpage_hwecc;
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage)
+ ecc->write_subpage = nand_write_subpage_hwecc;
case NAND_ECC_HW_SYNDROME:
- if ((!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) &&
- (!chip->ecc.read_page ||
- chip->ecc.read_page == nand_read_page_hwecc ||
- !chip->ecc.write_page ||
- chip->ecc.write_page == nand_write_page_hwecc)) {
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
pr_warn("No ECC functions supplied; "
"hardware ECC not possible\n");
BUG();
}
/* Use standard syndrome read/write page function? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_syndrome;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_syndrome;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_syndrome;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_syndrome;
-
- if (mtd->writesize >= chip->ecc.size) {
- if (!chip->ecc.strength) {
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
pr_warn("Driver must set ecc.strength when using hardware ECC\n");
BUG();
}
@@ -3635,23 +3667,23 @@ int nand_scan_tail(struct mtd_info *mtd)
}
pr_warn("%d byte HW ECC not possible on "
"%d byte page size, fallback to SW ECC\n",
- chip->ecc.size, mtd->writesize);
- chip->ecc.mode = NAND_ECC_SOFT;
+ ecc->size, mtd->writesize);
+ ecc->mode = NAND_ECC_SOFT;
case NAND_ECC_SOFT:
- chip->ecc.calculate = nand_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_subpage = nand_read_subpage;
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.size)
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
break;
case NAND_ECC_SOFT_BCH:
@@ -3659,88 +3691,83 @@ int nand_scan_tail(struct mtd_info *mtd)
pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
BUG();
}
- chip->ecc.calculate = nand_bch_calculate_ecc;
- chip->ecc.correct = nand_bch_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_subpage = nand_read_subpage;
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.write_oob = nand_write_oob_std;
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
/*
* Board driver should supply ecc.size and ecc.bytes values to
* select how many bits are correctable; see nand_bch_init()
* for details. Otherwise, default to 4 bits for large page
* devices.
*/
- if (!chip->ecc.size && (mtd->oobsize >= 64)) {
- chip->ecc.size = 512;
- chip->ecc.bytes = 7;
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->bytes = 7;
}
- chip->ecc.priv = nand_bch_init(mtd,
- chip->ecc.size,
- chip->ecc.bytes,
- &chip->ecc.layout);
- if (!chip->ecc.priv) {
+ ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
+ &ecc->layout);
+ if (!ecc->priv) {
pr_warn("BCH ECC initialization failed!\n");
BUG();
}
- chip->ecc.strength =
- chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
+ ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
break;
case NAND_ECC_NONE:
pr_warn("NAND_ECC_NONE selected by board driver. "
"This is not recommended!\n");
- chip->ecc.read_page = nand_read_page_raw;
- chip->ecc.write_page = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.write_oob = nand_write_oob_std;
- chip->ecc.size = mtd->writesize;
- chip->ecc.bytes = 0;
- chip->ecc.strength = 0;
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
break;
default:
- pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
+ pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
BUG();
}
/* For many systems, the standard OOB write also works for raw */
- if (!chip->ecc.read_oob_raw)
- chip->ecc.read_oob_raw = chip->ecc.read_oob;
- if (!chip->ecc.write_oob_raw)
- chip->ecc.write_oob_raw = chip->ecc.write_oob;
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
/*
* The number of bytes available for a client to place data into
* the out of band area.
*/
- chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length
- && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
- chip->ecc.layout->oobavail +=
- chip->ecc.layout->oobfree[i].length;
- mtd->oobavail = chip->ecc.layout->oobavail;
+ ecc->layout->oobavail = 0;
+ for (i = 0; ecc->layout->oobfree[i].length
+ && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
+ ecc->layout->oobavail += ecc->layout->oobfree[i].length;
+ mtd->oobavail = ecc->layout->oobavail;
/*
* Set the number of read / write steps for one page depending on ECC
* mode.
*/
- chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
pr_warn("Invalid ECC parameters\n");
BUG();
}
- chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+ ecc->total = ecc->steps * ecc->bytes;
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
- if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
- !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
- switch (chip->ecc.steps) {
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+ switch (ecc->steps) {
case 2:
mtd->subpage_sft = 1;
break;
@@ -3760,11 +3787,11 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->pagebuf = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
- if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
+ if ((ecc->mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
chip->options |= NAND_SUBPAGE_READ;
/* Fill in remaining MTD driver data */
- mtd->type = MTD_NANDFLASH;
+ mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
@@ -3785,9 +3812,9 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->writebufsize = mtd->writesize;
/* propagate ecc info to mtd_info */
- mtd->ecclayout = chip->ecc.layout;
- mtd->ecc_strength = chip->ecc.strength;
- mtd->ecc_step_size = chip->ecc.size;
+ mtd->ecclayout = ecc->layout;
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index bc06196d573..c0615d1526f 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -412,25 +412,6 @@ static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
}
}
-/* Scan a given block full */
-static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, size_t readlen,
- int scanlen, int numpages)
-{
- int ret, j;
-
- ret = scan_read_oob(mtd, buf, offs, readlen);
- /* Ignore ECC errors when checking for BBM */
- if (ret && !mtd_is_bitflip_or_eccerr(ret))
- return ret;
-
- for (j = 0; j < numpages; j++, buf += scanlen) {
- if (check_pattern(buf, scanlen, mtd->writesize, bd))
- return 1;
- }
- return 0;
-}
-
/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int numpages)
@@ -477,24 +458,17 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
struct nand_chip *this = mtd->priv;
- int i, numblocks, numpages, scanlen;
+ int i, numblocks, numpages;
int startblock;
loff_t from;
- size_t readlen;
pr_info("Scanning device for bad blocks\n");
- if (bd->options & NAND_BBT_SCANALLPAGES)
- numpages = 1 << (this->bbt_erase_shift - this->page_shift);
- else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
numpages = 2;
else
numpages = 1;
- /* We need only read few bytes from the OOB area */
- scanlen = 0;
- readlen = bd->len;
-
if (chip == -1) {
numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
@@ -519,12 +493,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- if (bd->options & NAND_BBT_SCANALLPAGES)
- ret = scan_block_full(mtd, bd, from, buf, readlen,
- scanlen, numpages);
- else
- ret = scan_block_fast(mtd, bd, from, buf, numpages);
-
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
if (ret < 0)
return ret;
@@ -1392,4 +1361,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
}
EXPORT_SYMBOL(nand_scan_bbt);
-EXPORT_SYMBOL(nand_default_bbt);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index bdc1d15369f..42e8a770e63 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -575,7 +575,7 @@ static int alloc_device(struct nandsim *ns)
cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
if (IS_ERR(cfile))
return PTR_ERR(cfile);
- if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+ if (!cfile->f_op->read && !cfile->f_op->aio_read) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
goto err_close;
@@ -2372,7 +2372,7 @@ static int __init ns_init_module(void)
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
- if ((retval = nand_default_bbt(nsmtd)) != 0)
+ if ((retval = chip->scan_bbt(nsmtd)) != 0)
goto err_exit;
if ((retval = parse_badblocks(nand, nsmtd)) != 0)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 8e148f1478f..69eaba690a9 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -30,6 +30,7 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 4ecf0e5fd48..f7772500990 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -25,10 +25,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-#include <linux/bch.h>
+#include <linux/mtd/nand_bch.h>
#include <linux/platform_data/elm.h>
-#endif
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -141,6 +139,8 @@
#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+#define BADBLOCK_MARKER_LENGTH 2
+
#ifdef CONFIG_MTD_NAND_OMAP_BCH
static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
@@ -149,17 +149,6 @@ static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks
- */
-static uint8_t scan_ff_pattern[] = { 0xff };
-static struct nand_bbt_descr bb_descrip_flashbased = {
- .options = NAND_BBT_SCANALLPAGES,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
struct omap_nand_info {
struct nand_hw_control controller;
@@ -182,14 +171,10 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
struct gpmc_nand_regs reg;
-
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
- struct bch_control *bch;
- struct nand_ecclayout ecclayout;
+ /* fields specific for BCHx_HW ECC scheme */
bool is_elm_used;
struct device *elm_dev;
struct device_node *of_node;
-#endif
};
/**
@@ -1058,8 +1043,7 @@ static int omap_dev_ready(struct mtd_info *mtd)
}
}
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-
+#if defined(CONFIG_MTD_NAND_ECC_BCH) || defined(CONFIG_MTD_NAND_OMAP_BCH)
/**
* omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
* @mtd: MTD device structure
@@ -1140,7 +1124,9 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
/* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
+#endif
+#ifdef CONFIG_MTD_NAND_ECC_BCH
/**
* omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
* @mtd: MTD device structure
@@ -1225,7 +1211,9 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
return 0;
}
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
/**
* omap3_calculate_ecc_bch - Generate bytes of ECC bytes
* @mtd: MTD device structure
@@ -1519,38 +1507,6 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
}
/**
- * omap3_correct_data_bch - Decode received data and correct errors
- * @mtd: MTD device structure
- * @data: page data
- * @read_ecc: ecc read from nand flash
- * @calc_ecc: ecc read from HW ECC registers
- */
-static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
- u_char *read_ecc, u_char *calc_ecc)
-{
- int i, count;
- /* cannot correct more than 8 errors */
- unsigned int errloc[8];
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
-
- count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
- errloc);
- if (count > 0) {
- /* correct errors */
- for (i = 0; i < count; i++) {
- /* correct data only, not ecc bytes */
- if (errloc[i] < 8*512)
- data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
- pr_debug("corrected bitflip %u\n", errloc[i]);
- }
- } else if (count < 0) {
- pr_err("ecc unrecoverable error\n");
- }
- return count;
-}
-
-/**
* omap_write_page_bch - BCH ecc based write page function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1637,197 +1593,46 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * omap3_free_bch - Release BCH ecc resources
- * @mtd: MTD device structure
- */
-static void omap3_free_bch(struct mtd_info *mtd)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- if (info->bch) {
- free_bch(info->bch);
- info->bch = NULL;
- }
-}
-
-/**
- * omap3_init_bch - Initialize BCH ECC
- * @mtd: MTD device structure
- * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
- */
-static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
-{
- int max_errors;
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
-#ifdef CONFIG_MTD_NAND_OMAP_BCH8
- const int hw_errors = BCH8_MAX_ERROR;
-#else
- const int hw_errors = BCH4_MAX_ERROR;
-#endif
- enum bch_ecc bch_type;
- const __be32 *parp;
- int lenp;
- struct device_node *elm_node;
-
- info->bch = NULL;
-
- max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
- BCH8_MAX_ERROR : BCH4_MAX_ERROR;
- if (max_errors != hw_errors) {
- pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
- max_errors, hw_errors);
- goto fail;
- }
-
- info->nand.ecc.size = 512;
- info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
- info->nand.ecc.mode = NAND_ECC_HW;
- info->nand.ecc.strength = max_errors;
-
- if (hw_errors == BCH8_MAX_ERROR)
- bch_type = BCH8_ECC;
- else
- bch_type = BCH4_ECC;
-
- /* Detect availability of ELM module */
- parp = of_get_property(info->of_node, "elm_id", &lenp);
- if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
- pr_err("Missing elm_id property, fall back to Software BCH\n");
- info->is_elm_used = false;
- } else {
- struct platform_device *pdev;
-
- elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
- pdev = of_find_device_by_node(elm_node);
- info->elm_dev = &pdev->dev;
-
- if (elm_config(info->elm_dev, bch_type) == 0)
- info->is_elm_used = true;
- }
-
- if (info->is_elm_used && (mtd->writesize <= 4096)) {
-
- if (hw_errors == BCH8_MAX_ERROR)
- info->nand.ecc.bytes = BCH8_SIZE;
- else
- info->nand.ecc.bytes = BCH4_SIZE;
-
- info->nand.ecc.correct = omap_elm_correct_data;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch;
- info->nand.ecc.read_page = omap_read_page_bch;
- info->nand.ecc.write_page = omap_write_page_bch;
- } else {
- /*
- * software bch library is only used to detect and
- * locate errors
- */
- info->bch = init_bch(13, max_errors,
- 0x201b /* hw polynomial */);
- if (!info->bch)
- goto fail;
-
- info->nand.ecc.correct = omap3_correct_data_bch;
-
- /*
- * The number of corrected errors in an ecc block that will
- * trigger block scrubbing defaults to the ecc strength (4 or 8)
- * Set mtd->bitflip_threshold here to define a custom threshold.
- */
-
- if (max_errors == 8) {
- info->nand.ecc.bytes = 13;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
- } else {
- info->nand.ecc.bytes = 7;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
- }
- }
-
- pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
- return 0;
-fail:
- omap3_free_bch(mtd);
- return -1;
-}
-
-/**
- * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
- * @mtd: MTD device structure
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ * @bch_type: 0x0=BCH4, 0x1=BCH8, 0x2=BCH16
*/
-static int omap3_init_bch_tail(struct mtd_info *mtd)
+static int is_elm_present(struct omap_nand_info *info,
+ struct device_node *elm_node, enum bch_ecc bch_type)
{
- int i, steps, offset;
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- struct nand_ecclayout *layout = &info->ecclayout;
-
- /* build oob layout */
- steps = mtd->writesize/info->nand.ecc.size;
- layout->eccbytes = steps*info->nand.ecc.bytes;
-
- /* do not bother creating special oob layouts for small page devices */
- if (mtd->oobsize < 64) {
- pr_err("BCH ecc is not supported on small page devices\n");
- goto fail;
+ struct platform_device *pdev;
+ info->is_elm_used = false;
+ /* check whether elm-id is passed via DT */
+ if (!elm_node) {
+ pr_err("nand: error: ELM DT node not found\n");
+ return -ENODEV;
}
-
- /* reserve 2 bytes for bad block marker */
- if (layout->eccbytes+2 > mtd->oobsize) {
- pr_err("no oob layout available for oobsize %d eccbytes %u\n",
- mtd->oobsize, layout->eccbytes);
- goto fail;
+ pdev = of_find_device_by_node(elm_node);
+ /* check whether ELM device is registered */
+ if (!pdev) {
+ pr_err("nand: error: ELM device not found\n");
+ return -ENODEV;
}
-
- /* ECC layout compatible with RBL for BCH8 */
- if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
- offset = 2;
- else
- offset = mtd->oobsize - layout->eccbytes;
-
- /* put ecc bytes at oob tail */
- for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = offset + i;
-
- if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
- layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
- else
- layout->oobfree[0].offset = 2;
-
- layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
- info->nand.ecc.layout = layout;
-
- if (!(info->nand.options & NAND_BUSWIDTH_16))
- info->nand.badblock_pattern = &bb_descrip_flashbased;
+ /* ELM module available, now configure it */
+ info->elm_dev = &pdev->dev;
+ if (elm_config(info->elm_dev, bch_type))
+ return -ENODEV;
+ info->is_elm_used = true;
return 0;
-fail:
- omap3_free_bch(mtd);
- return -1;
-}
-
-#else
-static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
-{
- pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
- return -1;
-}
-static int omap3_init_bch_tail(struct mtd_info *mtd)
-{
- return -1;
-}
-static void omap3_free_bch(struct mtd_info *mtd)
-{
}
-#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct nand_ecclayout *ecclayout;
int err;
- int i, offset;
- dma_cap_mask_t mask;
- unsigned sig;
+ int i;
+ dma_cap_mask_t mask;
+ unsigned sig;
struct resource *res;
struct mtd_part_parser_data ppdata = {};
@@ -1837,7 +1642,8 @@ static int omap_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
- info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -1846,47 +1652,45 @@ static int omap_nand_probe(struct platform_device *pdev)
spin_lock_init(&info->controller.lock);
init_waitqueue_head(&info->controller.wq);
- info->pdev = pdev;
-
+ info->pdev = pdev;
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
-
- info->mtd.priv = &info->nand;
- info->mtd.name = dev_name(&pdev->dev);
- info->mtd.owner = THIS_MODULE;
-
- info->nand.options = pdata->devsize;
- info->nand.options |= NAND_SKIP_BBTSCAN;
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
info->of_node = pdata->of_node;
-#endif
+ mtd = &info->mtd;
+ mtd->priv = &info->nand;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->owner = THIS_MODULE;
+ nand_chip = &info->nand;
+ nand_chip->ecc.priv = NULL;
+ nand_chip->options |= NAND_SKIP_BBTSCAN;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
err = -EINVAL;
dev_err(&pdev->dev, "error getting memory resource\n");
- goto out_free_info;
+ goto return_error;
}
info->phys_base = res->start;
info->mem_size = resource_size(res);
- if (!request_mem_region(info->phys_base, info->mem_size,
- pdev->dev.driver->name)) {
+ if (!devm_request_mem_region(&pdev->dev, info->phys_base,
+ info->mem_size, pdev->dev.driver->name)) {
err = -EBUSY;
- goto out_free_info;
+ goto return_error;
}
- info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
- if (!info->nand.IO_ADDR_R) {
+ nand_chip->IO_ADDR_R = devm_ioremap(&pdev->dev, info->phys_base,
+ info->mem_size);
+ if (!nand_chip->IO_ADDR_R) {
err = -ENOMEM;
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.controller = &info->controller;
+ nand_chip->controller = &info->controller;
- info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
- info->nand.cmd_ctrl = omap_hwcontrol;
+ nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+ nand_chip->cmd_ctrl = omap_hwcontrol;
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
@@ -1896,26 +1700,42 @@ static int omap_nand_probe(struct platform_device *pdev)
* device and read status register until you get a failure or success
*/
if (pdata->dev_ready) {
- info->nand.dev_ready = omap_dev_ready;
- info->nand.chip_delay = 0;
+ nand_chip->dev_ready = omap_dev_ready;
+ nand_chip->chip_delay = 0;
} else {
- info->nand.waitfunc = omap_wait;
- info->nand.chip_delay = 50;
+ nand_chip->waitfunc = omap_wait;
+ nand_chip->chip_delay = 50;
}
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ pr_err("nand device scan failed, may be bus-width mismatch\n");
+ err = -ENXIO;
+ goto return_error;
+ }
+
+ /* check for small page devices */
+ if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) {
+ pr_err("small page devices are not supported\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* re-populate low-level callbacks based on xfer modes */
switch (pdata->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- info->nand.read_buf = omap_read_buf_pref;
- info->nand.write_buf = omap_write_buf_pref;
+ nand_chip->read_buf = omap_read_buf_pref;
+ nand_chip->write_buf = omap_write_buf_pref;
break;
case NAND_OMAP_POLLED:
- if (info->nand.options & NAND_BUSWIDTH_16) {
- info->nand.read_buf = omap_read_buf16;
- info->nand.write_buf = omap_write_buf16;
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ nand_chip->read_buf = omap_read_buf16;
+ nand_chip->write_buf = omap_write_buf16;
} else {
- info->nand.read_buf = omap_read_buf8;
- info->nand.write_buf = omap_write_buf8;
+ nand_chip->read_buf = omap_read_buf8;
+ nand_chip->write_buf = omap_write_buf8;
}
break;
@@ -1927,7 +1747,7 @@ static int omap_nand_probe(struct platform_device *pdev)
if (!info->dma) {
dev_err(&pdev->dev, "DMA engine request failed\n");
err = -ENXIO;
- goto out_release_mem_region;
+ goto return_error;
} else {
struct dma_slave_config cfg;
@@ -1942,10 +1762,10 @@ static int omap_nand_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
err);
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
+ nand_chip->read_buf = omap_read_buf_dma_pref;
+ nand_chip->write_buf = omap_write_buf_dma_pref;
}
break;
@@ -1954,34 +1774,36 @@ static int omap_nand_probe(struct platform_device *pdev)
if (info->gpmc_irq_fifo <= 0) {
dev_err(&pdev->dev, "error getting fifo irq\n");
err = -ENODEV;
- goto out_release_mem_region;
+ goto return_error;
}
- err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
- IRQF_SHARED, "gpmc-nand-fifo", info);
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
info->gpmc_irq_fifo, err);
info->gpmc_irq_fifo = 0;
- goto out_release_mem_region;
+ goto return_error;
}
info->gpmc_irq_count = platform_get_irq(pdev, 1);
if (info->gpmc_irq_count <= 0) {
dev_err(&pdev->dev, "error getting count irq\n");
err = -ENODEV;
- goto out_release_mem_region;
+ goto return_error;
}
- err = request_irq(info->gpmc_irq_count, omap_nand_irq,
- IRQF_SHARED, "gpmc-nand-count", info);
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
info->gpmc_irq_count, err);
info->gpmc_irq_count = 0;
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.read_buf = omap_read_buf_irq_pref;
- info->nand.write_buf = omap_write_buf_irq_pref;
+ nand_chip->read_buf = omap_read_buf_irq_pref;
+ nand_chip->write_buf = omap_write_buf_irq_pref;
break;
@@ -1989,117 +1811,223 @@ static int omap_nand_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"xfer_type(%d) not supported!\n", pdata->xfer_type);
err = -EINVAL;
- goto out_release_mem_region;
+ goto return_error;
}
- /* select the ecc type */
- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
- info->nand.ecc.mode = NAND_ECC_SOFT;
- else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.strength = 1;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
- } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
- err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
- if (err) {
+ /* populate MTD interface based on ECC scheme */
+ nand_chip->ecc.layout = &omap_oobinfo;
+ ecclayout = &omap_oobinfo;
+ switch (pdata->ecc_opt) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.bytes = 3;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.calculate = omap_calculate_ecc;
+ nand_chip->ecc.hwctl = omap_enable_hwecc;
+ nand_chip->ecc.correct = omap_correct_data;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ if (nand_chip->options & NAND_BUSWIDTH_16)
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ else
+ ecclayout->eccpos[0] = 1;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 7;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch4;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &nand_chip->ecc.layout);
+ if (!nand_chip->ecc.priv) {
+ pr_err("nand: error: unable to use s/w BCH library\n");
err = -EINVAL;
- goto out_release_mem_region;
}
- }
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
- /* DIP switches on some boards change between 8 and 16 bit
- * bus widths for flash. Try the other width if the first try fails.
- */
- if (nand_scan_ident(&info->mtd, 1, NULL)) {
- info->nand.options ^= NAND_BUSWIDTH_16;
- if (nand_scan_ident(&info->mtd, 1, NULL)) {
- err = -ENXIO;
- goto out_release_mem_region;
+ case OMAP_ECC_BCH4_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 7 + 1;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ /* This ECC scheme requires ELM H/W block */
+ if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
+ pr_err("nand: error: could not initialize ELM\n");
+ err = -ENODEV;
+ goto return_error;
}
- }
-
- /* rom code layout */
- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
- if (info->nand.options & NAND_BUSWIDTH_16)
- offset = 2;
- else {
- offset = 1;
- info->nand.badblock_pattern = &bb_descrip_flashbased;
- }
- omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
- for (i = 0; i < omap_oobinfo.eccbytes; i++)
- omap_oobinfo.eccpos[i] = i+offset;
-
- omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
- omap_oobinfo.oobfree->length = info->mtd.oobsize -
- (offset + omap_oobinfo.eccbytes);
-
- info->nand.ecc.layout = &omap_oobinfo;
- } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
- /* build OOB layout for BCH ECC correction */
- err = omap3_init_bch_tail(&info->mtd);
- if (err) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 13;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch8;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &nand_chip->ecc.layout);
+ if (!nand_chip->ecc.priv) {
+ pr_err("nand: error: unable to use s/w BCH library\n");
err = -EINVAL;
- goto out_release_mem_region;
+ goto return_error;
+ }
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ case OMAP_ECC_BCH8_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 13 + 1;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* This ECC scheme requires ELM H/W block */
+ err = is_elm_present(info, pdata->elm_of_node, BCH8_ECC);
+ if (err < 0) {
+ pr_err("nand: error: could not initialize ELM\n");
+ goto return_error;
}
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ default:
+ pr_err("nand: error: invalid or unsupported ECC scheme\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* populate remaining ECC layout data */
+ ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
+ ecclayout->eccbytes);
+ for (i = 1; i < ecclayout->eccbytes; i++)
+ ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+ /* check if NAND device's OOB is enough to store ECC signatures */
+ if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+ pr_err("not enough OOB bytes required = %d, available=%d\n",
+ ecclayout->eccbytes, mtd->oobsize);
+ err = -EINVAL;
+ goto return_error;
}
/* second phase scan */
- if (nand_scan_tail(&info->mtd)) {
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
- goto out_release_mem_region;
+ goto return_error;
}
ppdata.of_node = pdata->of_node;
- mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
+ mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,
pdata->nr_parts);
- platform_set_drvdata(pdev, &info->mtd);
+ platform_set_drvdata(pdev, mtd);
return 0;
-out_release_mem_region:
+return_error:
if (info->dma)
dma_release_channel(info->dma);
- if (info->gpmc_irq_count > 0)
- free_irq(info->gpmc_irq_count, info);
- if (info->gpmc_irq_fifo > 0)
- free_irq(info->gpmc_irq_fifo, info);
- release_mem_region(info->phys_base, info->mem_size);
-out_free_info:
- kfree(info);
-
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
return err;
}
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- omap3_free_bch(&info->mtd);
-
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
if (info->dma)
dma_release_channel(info->dma);
-
- if (info->gpmc_irq_count > 0)
- free_irq(info->gpmc_irq_count, info);
- if (info->gpmc_irq_fifo > 0)
- free_irq(info->gpmc_irq_fifo, info);
-
- /* Release NAND device, its internal structures and partitions */
- nand_release(&info->mtd);
- iounmap(info->nand.IO_ADDR_R);
- release_mem_region(info->phys_base, info->mem_size);
- kfree(info);
+ nand_release(mtd);
return 0;
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 5a67082c07e..4d174366a0f 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -28,6 +28,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c28d4e29af1..4cabdc9fda9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -39,6 +39,13 @@
#define NAND_STOP_DELAY (2 * HZ/50)
#define PAGE_CHUNK_SIZE (2048)
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE 256
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -164,6 +171,7 @@ struct pxa3xx_nand_info {
unsigned int buf_start;
unsigned int buf_count;
+ unsigned int buf_size;
/* DMA information */
int drcmr_dat;
@@ -540,7 +548,6 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
info->oob_size = 0;
info->use_ecc = 0;
info->use_spare = 1;
- info->use_dma = (use_dma) ? 1 : 0;
info->is_ready = 0;
info->retcode = ERR_NONE;
if (info->cs != 0)
@@ -912,26 +919,20 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
return 0;
}
-/* the maximum possible buffer size for large page with OOB data
- * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
- * data buffer and the DMA descriptor
- */
-#define MAX_BUFF_SIZE PAGE_SIZE
-
#ifdef ARCH_HAS_DMA
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
- int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+ int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
if (use_dma == 0) {
- info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL)
return -ENOMEM;
return 0;
}
- info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
&info->data_buff_phys, GFP_KERNEL);
if (info->data_buff == NULL) {
dev_err(&pdev->dev, "failed to allocate dma buffer\n");
@@ -945,11 +946,16 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
pxa3xx_nand_data_dma_irq, info);
if (info->data_dma_ch < 0) {
dev_err(&pdev->dev, "failed to request data dma\n");
- dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
return info->data_dma_ch;
}
+ /*
+ * Now that DMA buffers are allocated we turn on
+ * DMA proper for I/O operations.
+ */
+ info->use_dma = 1;
return 0;
}
@@ -958,7 +964,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
struct platform_device *pdev = info->pdev;
if (use_dma) {
pxa_free_dma(info->data_dma_ch);
- dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
} else {
kfree(info->data_buff);
@@ -967,7 +973,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
#else
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
- info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL)
return -ENOMEM;
return 0;
@@ -1081,7 +1087,16 @@ KEEP_CONFIG:
else
host->col_addr_cycles = 1;
+ /* release the initial buffer */
+ kfree(info->data_buff);
+
+ /* allocate the real data + oob buffer */
+ info->buf_size = mtd->writesize + mtd->oobsize;
+ ret = pxa3xx_nand_init_buff(info);
+ if (ret)
+ return ret;
info->oob_buff = info->data_buff + mtd->writesize;
+
if ((mtd->size >> chip->page_shift) > 65536)
host->row_addr_cycles = 3;
else
@@ -1187,15 +1202,18 @@ static int alloc_nand_resource(struct platform_device *pdev)
}
info->mmio_phys = r->start;
- ret = pxa3xx_nand_init_buff(info);
- if (ret)
+ /* Allocate a buffer to allow flash detection */
+ info->buf_size = INIT_BUFFER_SIZE;
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL) {
+ ret = -ENOMEM;
goto fail_disable_clk;
+ }
/* initialize all interrupts to be disabled */
disable_int(info, NDSR_MASK);
- ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
- pdev->name, info);
+ ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto fail_free_buf;
@@ -1207,7 +1225,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
fail_free_buf:
free_irq(irq, info);
- pxa3xx_nand_free_buff(info);
+ kfree(info->data_buff);
fail_disable_clk:
clk_disable_unprepare(info->clk);
return ret;
@@ -1412,7 +1430,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
- .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
+ .of_match_table = pxa3xx_nand_dt_ids,
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 9dcf02d22aa..325930db3f0 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -181,7 +181,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
/* Set dma direction */
dev->dma_dir = do_read;
dev->dma_stage = 1;
- INIT_COMPLETION(dev->dma_done);
+ reinit_completion(&dev->dma_done);
dbg_verbose("doing dma %s ", do_read ? "read" : "write");
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 09dde7d2717..fe8058a4505 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -15,6 +15,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
@@ -149,17 +150,13 @@ static int socrates_nand_probe(struct platform_device *ofdev)
struct mtd_part_parser_data ppdata;
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR
- "socrates_nand: failed to allocate device structure.\n");
+ host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
return -ENOMEM;
- }
host->io_base = of_iomap(ofdev->dev.of_node, 0);
if (host->io_base == NULL) {
- printk(KERN_ERR "socrates_nand: ioremap failed\n");
- kfree(host);
+ dev_err(&ofdev->dev, "ioremap failed\n");
return -EIO;
}
@@ -211,9 +208,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
nand_release(mtd);
out:
- dev_set_drvdata(&ofdev->dev, NULL);
iounmap(host->io_base);
- kfree(host);
return res;
}
@@ -227,9 +222,7 @@ static int socrates_nand_remove(struct platform_device *ofdev)
nand_release(mtd);
- dev_set_drvdata(&ofdev->dev, NULL);
iounmap(host->io_base);
- kfree(host);
return 0;
}
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 396530d87ec..a3747c914d5 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -428,8 +428,7 @@ static int tmio_probe(struct platform_device *dev)
/* 15 us command delay time */
nand_chip->chip_delay = 15;
- retval = request_irq(irq, &tmio_irq,
- IRQF_DISABLED, dev_name(&dev->dev), tmio);
+ retval = request_irq(irq, &tmio_irq, 0, dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index c5f4ebf4b38..46f27de018c 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -50,7 +50,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct NFTLrecord *nftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 558071bf92d..6547c84afc3 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -159,7 +159,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
- INIT_COMPLETION(c->irq_done);
+ reinit_completion(&c->irq_done);
if (c->gpio_irq) {
result = gpio_get_value(c->gpio_irq);
if (result == -1) {
@@ -349,7 +349,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
timeout = jiffies + msecs_to_jiffies(20);
@@ -420,7 +420,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
timeout = jiffies + msecs_to_jiffies(20);
@@ -499,7 +499,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
@@ -544,7 +544,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
- INIT_COMPLETION(c->dma_done);
+ reinit_completion(&c->dma_done);
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
@@ -573,28 +573,6 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
static struct platform_driver omap2_onenand_driver;
-static int __adjust_timing(struct device *dev, void *data)
-{
- int ret = 0;
- struct omap2_onenand *c;
-
- c = dev_get_drvdata(dev);
-
- BUG_ON(c->setup == NULL);
-
- /* DMA is not in use so this is all that is needed */
- /* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, &c->freq);
-
- return ret;
-}
-
-int omap2_onenand_rephase(void)
-{
- return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
- NULL, __adjust_timing);
-}
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3f41f20062..1de33b5d390 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2556,10 +2556,6 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
int ret;
- /* Check for invalid offset */
- if (ofs > mtd->size)
- return -EINVAL;
-
onenand_get_device(mtd, FL_READING);
ret = onenand_block_isbad_nolock(mtd, ofs, 0);
onenand_release_device(mtd);
@@ -3529,7 +3525,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned die, bdry;
- int ret, syscfg, locked;
+ int syscfg, locked;
/* Disable ECC */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
@@ -3540,7 +3536,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->wait(mtd, FL_SYNCING);
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
- ret = this->wait(mtd, FL_READING);
+ this->wait(mtd, FL_READING);
bdry = this->read_word(this->base + ONENAND_DATARAM);
if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
@@ -3550,7 +3546,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- ret = this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3734,7 +3730,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
/* Check is boundary is locked */
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
- ret = this->wait(mtd, FL_READING);
+ this->wait(mtd, FL_READING);
thisboundary = this->read_word(this->base + ONENAND_DATARAM);
if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
@@ -3835,7 +3831,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
static int onenand_probe(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
- int maf_id, dev_id, ver_id;
+ int dev_id, ver_id;
int density;
int ret;
@@ -3843,8 +3839,7 @@ static int onenand_probe(struct mtd_info *mtd)
if (ret)
return ret;
- /* Read manufacturer and device IDs from Register */
- maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ /* Device and version IDs from Register */
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index ab2a52a039c..daf82ba7aba 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -290,7 +290,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
int cis_sector;
/* Check for small page NAND flash */
- if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+ if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
mtd->size > UINT_MAX)
return;
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 3cd3aabbe1c..6f976159611 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -349,7 +349,7 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_mtddev;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
err = -ENODEV;
goto exit_nand;
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index ff35c465bfe..2e9e2d11f20 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -289,7 +289,7 @@ static int __init mtd_oobtest_init(void)
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 44b96e999ad..ed2d3f656fd 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -353,7 +353,7 @@ static int __init mtd_pagetest_init(void)
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index e2c0adf24cf..a876371ad41 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -299,7 +299,7 @@ static int __init mtd_subpagetest_init(void)
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c071d410488..33bb1f2b63e 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -900,10 +900,9 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* number.
*/
image_seq = be32_to_cpu(ech->image_seq);
- if (!ubi->image_seq && image_seq)
+ if (!ubi->image_seq)
ubi->image_seq = image_seq;
- if (ubi->image_seq && image_seq &&
- ubi->image_seq != image_seq) {
+ if (image_seq && ubi->image_seq != image_seq) {
ubi_err("bad image sequence number %d in PEB %d, expected %d",
image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
@@ -1417,9 +1416,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
ai = alloc_ai("ubi_aeb_slab_cache2");
if (!ai)
return -ENOMEM;
- }
- err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ err = scan_all(ubi, ai, 0);
+ } else {
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
}
}
#else
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 315dcc6ec1f..e05dc6298c1 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -41,6 +41,7 @@
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/major.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index f5aa4b02cfa..ead861307b3 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
for (i = 0; i < pool_size; i++) {
int scrub = 0;
+ int image_seq;
pnum = be32_to_cpu(pebs[i]);
@@ -425,10 +426,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
} else if (ret == UBI_IO_BITFLIPS)
scrub = 1;
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+
+ if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err("bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
- err = UBI_BAD_FASTMAP;
+ ret = UBI_BAD_FASTMAP;
goto out;
}
@@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->free);
+ ubi_assert(list_empty(&used));
+ ubi_assert(list_empty(&eba_orphans));
+ ubi_assert(list_empty(&free));
+
/*
* If fastmap is leaking PEBs (must not happen), raise a
* fat warning and fall back to scanning mode.
@@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+
return ret;
}
@@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
for (i = 0; i < used_blocks; i++) {
+ int image_seq;
+
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
@@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
} else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
+ image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
- ubi->image_seq = be32_to_cpu(ech->image_seq);
+ ubi->image_seq = image_seq;
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err("wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index c95bfb183c6..02317c1c023 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -599,10 +599,6 @@ static void refill_wl_user_pool(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, pool);
for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 1))
- break;
-
pool->pebs[pool->size] = __wl_get_peb(ubi);
if (pool->pebs[pool->size] < 0)
break;
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3a8c7532ee0..a7271e09384 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -102,8 +102,7 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_WD80x3
{wd_probe, 0},
#endif
-#if defined(CONFIG_NE2000) || \
- defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */
+#if defined(CONFIG_NE2000) /* ISA (use ne2k-pci for PCI cards) */
{ne_probe, 0},
#endif
#ifdef CONFIG_LANCE /* ISA/VLB (use pcnet32 for PCI cards) */
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4c21bf6b8b2..5a5d720da92 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0d8f427ade9..187b1b7772e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -136,41 +136,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
}
/**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
- struct slave *first_slave = bond_first_slave(bond);
-
- return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
- struct bonding *bond = __get_bond_by_port(port);
- struct slave *slave = port->slave, *slave_next;
-
- // If there's no bond for this port, or this is the last slave
- if (bond == NULL)
- return NULL;
- slave_next = bond_next_slave(bond, slave);
- if (!slave_next || bond_is_first_slave(bond, slave_next))
- return NULL;
-
- return &(SLAVE_AD_INFO(slave_next).port);
-}
-
-/**
* __get_first_agg - get the first aggregator in the bond
* @bond: the bond we're looking at
*
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
- struct slave *slave = aggregator->slave, *slave_next;
- struct bonding *bond = bond_get_bond_by_slave(slave);
-
- // If there's no bond for this aggregator, or this is the last slave
- if (bond == NULL)
- return NULL;
- slave_next = bond_next_slave(bond, slave);
- if (!slave_next || bond_is_first_slave(bond, slave_next))
- return NULL;
-
- return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
/*
* __agg_has_partner
*
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
*/
static struct aggregator *__get_active_agg(struct aggregator *aggregator)
{
- struct aggregator *retval = NULL;
+ struct bonding *bond = aggregator->slave->bond;
+ struct list_head *iter;
+ struct slave *slave;
- for (; aggregator; aggregator = __get_next_agg(aggregator)) {
- if (aggregator->is_active) {
- retval = aggregator;
- break;
- }
- }
+ bond_for_each_slave(bond, slave, iter)
+ if (SLAVE_AD_INFO(slave).aggregator.is_active)
+ return &(SLAVE_AD_INFO(slave).aggregator);
- return retval;
+ return NULL;
}
/**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
{
struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
struct port *last_port = NULL, *curr_port;
+ struct list_head *iter;
+ struct bonding *bond;
+ struct slave *slave;
int found = 0;
// if the port is already Selected, do nothing
if (port->sm_vars & AD_PORT_SELECTED)
return;
+ bond = __get_bond_by_port(port);
+
// if the port is connected to other aggregator, detach it
if (port->aggregator) {
// detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
}
}
// search on all aggregators for a suitable aggregator for this port
- for (aggregator = __get_first_agg(port); aggregator;
- aggregator = __get_next_agg(aggregator)) {
+ bond_for_each_slave(bond, slave, iter) {
+ aggregator = &(SLAVE_AD_INFO(slave).aggregator);
// keep a free aggregator for later use(if needed)
if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
static void ad_agg_selection_logic(struct aggregator *agg)
{
struct aggregator *best, *active, *origin;
+ struct bonding *bond = agg->slave->bond;
+ struct list_head *iter;
+ struct slave *slave;
struct port *port;
origin = agg;
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- do {
+ bond_for_each_slave(bond, slave, iter) {
+ agg = &(SLAVE_AD_INFO(slave).aggregator);
+
agg->is_active = 0;
if (agg->num_of_ports && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
-
- } while ((agg = __get_next_agg(agg)));
+ }
if (best &&
__get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- for (agg = __get_first_agg(best->lag_ports); agg;
- agg = __get_next_agg(agg)) {
+ bond_for_each_slave(bond, slave, iter) {
+ agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
- if (origin->slave) {
- struct bonding *bond;
-
- bond = bond_get_bond_by_slave(origin->slave);
- if (bond)
- bond_3ad_set_carrier(bond);
- }
+ bond_3ad_set_carrier(bond);
}
/**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct port *port, *prev_port, *temp_port;
struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
int select_new_active_agg = 0;
+ struct bonding *bond = slave->bond;
+ struct slave *slave_iter;
+ struct list_head *iter;
// find the aggregator related to this slave
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
// reason to search for new aggregator, and that we will find one
if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
// find new aggregator for the related port(s)
- new_aggregator = __get_first_agg(port);
- for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+ bond_for_each_slave(bond, slave_iter, iter) {
+ new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
// if the new aggregator is empty, or it is connected to our port only
if (!new_aggregator->lag_ports
|| ((new_aggregator->lag_ports == port)
&& !new_aggregator->lag_ports->next_port_in_aggregator))
break;
}
+ if (!slave_iter)
+ new_aggregator = NULL;
// if new aggregator found, copy the aggregator's parameters
// and connect the related lag_ports to the new aggregator
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
// select new active aggregator
- ad_agg_selection_logic(__get_first_agg(port));
+ temp_aggregator = __get_first_agg(port);
+ if (temp_aggregator)
+ ad_agg_selection_logic(temp_aggregator);
}
}
}
pr_debug("Unbinding port %d\n", port->actor_port_number);
// find the aggregator that this port is connected to
- temp_aggregator = __get_first_agg(port);
- for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+ bond_for_each_slave(bond, slave_iter, iter) {
+ temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
prev_port = NULL;
// search the port in the aggregator's related ports
for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
ad_work.work);
- struct port *port;
struct aggregator *aggregator;
+ struct list_head *iter;
+ struct slave *slave;
+ struct port *port;
read_lock(&bond->lock);
//check if there are any slaves
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
// check if agg_select_timer timer after initialize is timed out
if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ slave = bond_first_slave(bond);
+ port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
// select the active aggregator for the bond
- if ((port = __get_first_port(bond))) {
+ if (port) {
if (!port->slave) {
pr_warning("%s: Warning: bond's first port is uninitialized\n",
bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
// for each port run the state machines
- for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
pr_warning("%s: Warning: Found an uninitialized port\n",
bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct ad_info *ad_info)
{
struct aggregator *aggregator = NULL;
+ struct list_head *iter;
+ struct slave *slave;
struct port *port;
- for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave).port);
if (port->aggregator && port->aggregator->is_active) {
aggregator = port->aggregator;
break;
@@ -2408,25 +2369,25 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
{
int ret;
- read_lock(&bond->lock);
+ rcu_read_lock();
ret = __bond_3ad_get_active_agg_info(bond, ad_info);
- read_unlock(&bond->lock);
+ rcu_read_unlock();
return ret;
}
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
{
- struct slave *slave, *start_at;
struct bonding *bond = netdev_priv(dev);
- int slave_agg_no;
- int slaves_in_agg;
- int agg_id;
- int i;
+ struct slave *slave, *first_ok_slave;
+ struct aggregator *agg;
struct ad_info ad_info;
+ struct list_head *iter;
+ int slaves_in_agg;
+ int slave_agg_no;
int res = 1;
+ int agg_id;
- read_lock(&bond->lock);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2437,20 +2398,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
agg_id = ad_info.aggregator_id;
if (slaves_in_agg == 0) {
- /*the aggregator is empty*/
pr_debug("%s: Error: active aggregator is empty\n", dev->name);
goto out;
}
- slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ first_ok_slave = NULL;
- bond_for_each_slave(bond, slave) {
- struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
- if (agg && (agg->aggregator_identifier == agg_id)) {
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && SLAVE_IS_OK(slave))
+ first_ok_slave = slave;
slave_agg_no--;
- if (slave_agg_no < 0)
- break;
+ continue;
+ }
+
+ if (SLAVE_IS_OK(slave)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ goto out;
}
}
@@ -2460,23 +2429,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- start_at = slave;
-
- bond_for_each_slave_from(bond, slave, i, start_at) {
- int slave_agg_id = 0;
- struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
- if (agg)
- slave_agg_id = agg->aggregator_identifier;
-
- if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found. */
+ if (first_ok_slave)
+ res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
out:
- read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -2515,11 +2473,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
void bond_3ad_update_lacp_rate(struct bonding *bond)
{
struct port *port = NULL;
+ struct list_head *iter;
struct slave *slave;
int lacp_fast;
lacp_fast = bond->params.lacp_fast;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
__get_state_machine_lock(port);
if (lacp_fast)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f428ef57437..02872405d35 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
+ struct list_head *iter;
long long max_gap;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave);
@@ -382,30 +383,64 @@ out:
static struct slave *rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct slave *rx_slave, *slave, *start_at;
- int i = 0;
+ struct slave *before = NULL, *rx_slave = NULL, *slave;
+ struct list_head *iter;
+ bool found = false;
- if (bond_info->next_rx_slave)
- start_at = bond_info->next_rx_slave;
- else
- start_at = bond_first_slave(bond);
+ bond_for_each_slave(bond, slave, iter) {
+ if (!SLAVE_IS_OK(slave))
+ continue;
+ if (!found) {
+ if (!before || before->speed < slave->speed)
+ before = slave;
+ } else {
+ if (!rx_slave || rx_slave->speed < slave->speed)
+ rx_slave = slave;
+ }
+ if (slave == bond_info->rx_slave)
+ found = true;
+ }
+ /* we didn't find anything after the current or we have something
+ * better before and up to the current slave
+ */
+ if (!rx_slave || (before && rx_slave->speed < before->speed))
+ rx_slave = before;
- rx_slave = NULL;
+ if (rx_slave)
+ bond_info->rx_slave = rx_slave;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (SLAVE_IS_OK(slave)) {
- if (!rx_slave) {
- rx_slave = slave;
- } else if (slave->speed > rx_slave->speed) {
+ return rx_slave;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static struct slave *__rlb_next_rx_slave(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *before = NULL, *rx_slave = NULL, *slave;
+ struct list_head *iter;
+ bool found = false;
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (!SLAVE_IS_OK(slave))
+ continue;
+ if (!found) {
+ if (!before || before->speed < slave->speed)
+ before = slave;
+ } else {
+ if (!rx_slave || rx_slave->speed < slave->speed)
rx_slave = slave;
- }
}
+ if (slave == bond_info->rx_slave)
+ found = true;
}
+ /* we didn't find anything after the current or we have something
+ * better before and up to the current slave
+ */
+ if (!rx_slave || (before && rx_slave->speed < before->speed))
+ rx_slave = before;
- if (rx_slave) {
- slave = bond_next_slave(bond, rx_slave);
- bond_info->next_rx_slave = slave;
- }
+ if (rx_slave)
+ bond_info->rx_slave = rx_slave;
return rx_slave;
}
@@ -626,12 +661,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct arp_pkt *arp = arp_pkt(skb);
- struct slave *assigned_slave;
+ struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
_lock_rx_hashtbl(bond);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
@@ -656,14 +693,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* that the new client can be assigned to this entry.
*/
if (bond->curr_active_slave &&
- client_info->slave != bond->curr_active_slave) {
- client_info->slave = bond->curr_active_slave;
+ client_info->slave != curr_active_slave) {
+ client_info->slave = curr_active_slave;
rlb_update_client(client_info);
}
}
}
/* assign a new slave */
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave) {
if (!(client_info->assigned &&
@@ -726,7 +763,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
*/
- if (!bond_slave_has_mac(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
/* loop through vlans and send one packet for each */
rcu_read_lock();
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper->priv_flags & IFF_802_1Q_VLAN)
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*/
static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
{
- struct slave *tmp_slave1, *free_mac_slave = NULL;
struct slave *has_bond_addr = bond->curr_active_slave;
+ struct slave *tmp_slave1, *free_mac_slave = NULL;
+ struct list_head *iter;
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
/* this is the first slave */
return 0;
}
@@ -1196,7 +1234,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
- bond_for_each_slave(bond, tmp_slave1) {
+ bond_for_each_slave(bond, tmp_slave1, iter) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
@@ -1246,15 +1284,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
- char tmp_addr[ETH_ALEN];
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
struct sockaddr sa;
+ char tmp_addr[ETH_ALEN];
int res;
if (bond->alb_info.rlb_enabled)
return 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
@@ -1274,10 +1313,12 @@ unwind:
sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
- dev_set_mac_address(slave->dev, &sa);
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ if (rollback_slave == slave)
+ break;
+ memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ dev_set_mac_address(rollback_slave->dev, &sa);
+ memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
}
return res;
@@ -1337,11 +1378,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
- /* make sure that the curr_active_slave do not change during tx
- */
- read_lock(&bond->lock);
- read_lock(&bond->curr_slave_lock);
-
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (!tx_slave) {
/* unbalanced or unassigned, send through primary */
- tx_slave = bond->curr_active_slave;
+ tx_slave = rcu_dereference(bond->curr_active_slave);
bond_info->unbalanced_load += skb->len;
}
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != bond->curr_active_slave) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
memcpy(eth_data->h_source,
tx_slave->dev->dev_addr,
ETH_ALEN);
@@ -1443,8 +1479,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -1458,11 +1492,12 @@ void bond_alb_monitor(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct list_head *iter;
struct slave *slave;
read_lock(&bond->lock);
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
@@ -1480,7 +1515,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
- if (!list_empty(&bond->slave_list))
+ if (bond_has_slaves(bond))
alb_change_hw_addr_on_detach(bond, slave);
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled) {
- bond->alb_info.next_rx_slave = NULL;
+ bond->alb_info.rx_slave = NULL;
rlb_clear_slave(bond, slave);
}
}
@@ -1669,7 +1704,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
swap_slave = bond->curr_active_slave;
rcu_assign_pointer(bond->curr_active_slave, new_slave);
- if (!new_slave || list_empty(&bond->slave_list))
+ if (!new_slave || !bond_has_slaves(bond))
return;
/* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
ASSERT_RTNL();
+ /* in TLB mode, the slave might flip down/up with the old dev_addr,
+ * and thus filter bond->dev_addr's packets, so force bond's mac
+ */
+ if (bond->params.mode == BOND_MODE_TLB) {
+ struct sockaddr sa;
+ u8 tmp_addr[ETH_ALEN];
+
+ memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+
+ memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+ sa.sa_family = bond->dev->type;
+ /* we don't care if it can't change its mac, best effort */
+ dev_set_mac_address(new_slave->dev, &sa);
+
+ memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ }
+
/* curr_active_slave must be set before calling alb_swap_mac_addr */
if (swap_slave) {
/* swap mac address */
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index c5eff5dafdf..4226044efd0 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -154,9 +154,7 @@ struct alb_bond_info {
u8 rx_ntt; /* flag - need to transmit
* to all rx clients
*/
- struct slave *next_rx_slave;/* next slave to be assigned
- * to a new rx client for
- */
+ struct slave *rx_slave;/* last slave to xmit from */
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e883bfe2e72..4dd5ee2a34c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -78,6 +78,8 @@
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
+#include <net/flow_keys.h>
+#include <linux/reciprocal_div.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -110,6 +112,7 @@ static char *fail_over_mac;
static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
+static int packets_per_slave = 1;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -159,7 +162,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
- "2 for layer 2+3");
+ "2 for layer 2+3, 3 for encap layer 2+3, "
+ "4 for encap layer 3+4");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
@@ -181,6 +185,10 @@ MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
module_param(resend_igmp, int, 0);
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
"link failure");
+module_param(packets_per_slave, int, 0);
+MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
+ "mode; 0 for a random slave, 1 packet per "
+ "slave (default), >1 packets per slave.");
/*----------------------------- Global variables ----------------------------*/
@@ -217,6 +225,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
{ "layer2", BOND_XMIT_POLICY_LAYER2},
{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
+{ "encap2+3", BOND_XMIT_POLICY_ENCAP23},
+{ "encap3+4", BOND_XMIT_POLICY_ENCAP34},
{ NULL, -1},
};
@@ -332,10 +342,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
int res;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
@@ -344,9 +355,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
return 0;
unwind:
- /* unwind from the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave)
- vlan_vid_del(slave->dev, proto, vid);
+ /* unwind to the slave that failed */
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ if (rollback_slave == slave)
+ break;
+
+ vlan_vid_del(rollback_slave->dev, proto, vid);
+ }
return res;
}
@@ -360,9 +375,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
vlan_vid_del(slave->dev, proto, vid);
if (bond_is_lb(bond))
@@ -382,15 +398,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
*/
static int bond_set_carrier(struct bonding *bond)
{
+ struct list_head *iter;
struct slave *slave;
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto down;
if (bond->params.mode == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
@@ -522,7 +539,9 @@ static int bond_check_dev_link(struct bonding *bond,
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
{
+ struct list_head *iter;
int err = 0;
+
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
@@ -532,7 +551,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
} else {
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
@@ -546,7 +565,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
*/
static int bond_set_allmulti(struct bonding *bond, int inc)
{
+ struct list_head *iter;
int err = 0;
+
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
@@ -556,7 +577,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
} else {
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
@@ -774,43 +795,24 @@ static bool bond_should_change_active(struct bonding *bond)
/**
* find_best_interface - select the best available slave to be the active one
* @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
- struct slave *new_active, *old_active;
- struct slave *bestslave = NULL;
+ struct slave *slave, *bestslave = NULL;
+ struct list_head *iter;
int mintime = bond->params.updelay;
- int i;
- new_active = bond->curr_active_slave;
-
- if (!new_active) { /* there were no active slaves left */
- new_active = bond_first_slave(bond);
- if (!new_active)
- return NULL; /* still no slave, return NULL */
- }
-
- if ((bond->primary_slave) &&
- bond->primary_slave->link == BOND_LINK_UP &&
- bond_should_change_active(bond)) {
- new_active = bond->primary_slave;
- }
-
- /* remember where to stop iterating over the slaves */
- old_active = new_active;
-
- bond_for_each_slave_from(bond, new_active, i, old_active) {
- if (new_active->link == BOND_LINK_UP) {
- return new_active;
- } else if (new_active->link == BOND_LINK_BACK &&
- IS_UP(new_active->dev)) {
- /* link up, but waiting for stabilization */
- if (new_active->delay < mintime) {
- mintime = new_active->delay;
- bestslave = new_active;
- }
+ if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+ bond_should_change_active(bond))
+ return bond->primary_slave;
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP)
+ return slave;
+ if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+ slave->delay < mintime) {
+ mintime = slave->delay;
+ bestslave = slave;
}
}
@@ -971,35 +973,6 @@ void bond_select_active_slave(struct bonding *bond)
}
}
-/*--------------------------- slave list handling ---------------------------*/
-
-/*
- * This function attaches the slave to the end of list.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
-{
- list_add_tail_rcu(&new_slave->list, &bond->slave_list);
- bond->slave_cnt++;
-}
-
-/*
- * This function detaches the slave from the list.
- * WARNING: no check is made to verify if the slave effectively
- * belongs to <bond>.
- * Nothing is freed on return, structures are just unchained.
- * If any slave pointer in bond was pointing to <slave>,
- * it should be changed by the calling function.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_detach_slave(struct bonding *bond, struct slave *slave)
-{
- list_del_rcu(&slave->list);
- bond->slave_cnt--;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline int slave_enable_netpoll(struct slave *slave)
{
@@ -1046,9 +1019,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
if (IS_UP(slave->dev))
slave_disable_netpoll(slave);
}
@@ -1056,10 +1030,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
{
struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
struct slave *slave;
int err = 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = slave_enable_netpoll(slave);
if (err) {
bond_netpoll_cleanup(dev);
@@ -1087,10 +1062,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
netdev_features_t mask;
struct slave *slave;
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
/* Disable adding VLANs to empty bond. But why? --mq */
features |= NETIF_F_VLAN_CHALLENGED;
return features;
@@ -1100,7 +1076,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
@@ -1118,16 +1094,17 @@ static void bond_compute_features(struct bonding *bond)
{
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+ struct net_device *bond_dev = bond->dev;
+ struct list_head *iter;
+ struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int gso_max_size = GSO_MAX_SIZE;
- struct net_device *bond_dev = bond->dev;
u16 gso_max_segs = GSO_MAX_SEGS;
- struct slave *slave;
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto done;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
@@ -1233,15 +1210,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
static int bond_master_upper_dev_link(struct net_device *bond_dev,
- struct net_device *slave_dev)
+ struct net_device *slave_dev,
+ struct slave *slave)
{
int err;
- err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+ err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
if (err)
return err;
slave_dev->flags |= IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
return 0;
}
@@ -1250,7 +1228,7 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
{
netdev_upper_dev_unlink(slave_dev, bond_dev);
slave_dev->flags &= ~IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
}
/* enslave device <slave> to bond device <master> */
@@ -1258,7 +1236,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- struct slave *new_slave = NULL;
+ struct slave *new_slave = NULL, *prev_slave;
struct sockaddr addr;
int link_reporting;
int res = 0, i;
@@ -1313,7 +1291,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
bond_dev->name,
@@ -1352,7 +1330,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (slave_ops->ndo_set_mac_address == NULL) {
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1346,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (list_empty(&bond->slave_list) &&
+ if (!bond_has_slaves(bond) &&
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
@@ -1377,7 +1355,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = -ENOMEM;
goto err_undo_flags;
}
- INIT_LIST_HEAD(&new_slave->list);
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1413,17 +1390,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = bond_master_upper_dev_link(bond_dev, slave_dev);
- if (res) {
- pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
- goto err_restore_mac;
- }
-
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
- goto err_unset_master;
+ goto err_restore_mac;
}
new_slave->bond = bond;
@@ -1479,21 +1450,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- write_lock_bh(&bond->lock);
-
- bond_attach_slave(bond, new_slave);
+ prev_slave = bond_last_slave(bond);
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- write_unlock_bh(&bond->lock);
-
- bond_compute_features(bond);
-
bond_update_speed_duplex(new_slave);
- read_lock(&bond->lock);
-
new_slave->last_arp_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1517,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- write_lock_bh(&bond->curr_slave_lock);
-
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave);
- bond_select_active_slave(bond);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1528,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
bond_set_slave_inactive_flags(new_slave);
/* if this is the first slave */
- if (bond_first_slave(bond) == new_slave) {
+ if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
- struct slave *prev_slave;
-
- prev_slave = bond_prev_slave(bond, new_slave);
SLAVE_AD_INFO(new_slave).id =
SLAVE_AD_INFO(prev_slave).id + 1;
}
@@ -1588,7 +1545,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave);
- bond_select_active_slave(bond);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1562,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
break;
} /* switch(bond_mode) */
- write_unlock_bh(&bond->curr_slave_lock);
-
- bond_set_carrier(bond);
-
#ifdef CONFIG_NET_POLL_CONTROLLER
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
@@ -1624,17 +1576,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
#endif
- read_unlock(&bond->lock);
-
- res = bond_create_slave_symlinks(bond_dev, slave_dev);
- if (res)
- goto err_detach;
-
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
- goto err_dest_symlinks;
+ goto err_detach;
+ }
+
+ res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+ if (res) {
+ pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+ goto err_unregister;
+ }
+
+ bond->slave_cnt++;
+ bond_compute_features(bond);
+ bond_set_carrier(bond);
+
+ if (USES_PRIMARY(bond->params.mode)) {
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1610,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
-err_dest_symlinks:
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+ netdev_rx_handler_unregister(slave_dev);
err_detach:
if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1619,6 @@ err_detach:
vlan_vids_del_by_dev(slave_dev, bond_dev);
write_lock_bh(&bond->lock);
- bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1638,6 @@ err_close:
slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
-err_unset_master:
- bond_upper_dev_unlink(bond_dev, slave_dev);
-
err_restore_mac:
if (!bond->params.fail_over_mac) {
/* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1656,8 @@ err_free:
kfree(new_slave);
err_undo_flags:
- bond_compute_features(bond);
/* Enslave of first slave has failed and we need to fix master's mac */
- if (list_empty(&bond->slave_list) &&
+ if (!bond_has_slaves(bond) &&
ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
@@ -1749,6 +1708,11 @@ static int __bond_release_one(struct net_device *bond_dev,
}
write_unlock_bh(&bond->lock);
+
+ /* release the slave from its bond */
+ bond->slave_cnt--;
+
+ bond_upper_dev_unlink(bond_dev, slave_dev);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
@@ -1772,12 +1736,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- /* release the slave from its bond */
- bond_detach_slave(bond, slave);
-
if (!all && !bond->params.fail_over_mac) {
if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
- !list_empty(&bond->slave_list))
+ bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
bond_dev->name, slave_dev->name,
slave->perm_hwaddr,
@@ -1820,7 +1781,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
}
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
@@ -1836,7 +1797,7 @@ static int __bond_release_one(struct net_device *bond_dev,
unblock_netpoll_tx();
synchronize_rcu();
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
@@ -1848,8 +1809,6 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode USES_PRIMARY, then this cases was handled above by
@@ -1873,8 +1832,6 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_flush(bond_dev, slave_dev);
}
- bond_upper_dev_unlink(bond_dev, slave_dev);
-
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
@@ -1913,7 +1870,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = bond_release(bond_dev, slave_dev);
- if (ret == 0 && list_empty(&bond->slave_list)) {
+ if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
@@ -1922,61 +1879,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
return ret;
}
-/*
- * This function changes the active slave to slave <slave_dev>.
- * It returns -EINVAL in the following cases.
- * - <slave_dev> is not found in the list.
- * - There is not active slave now.
- * - <slave_dev> is already active.
- * - The link state of <slave_dev> is not BOND_LINK_UP.
- * - <slave_dev> is not running.
- * In these cases, this function does nothing.
- * In the other cases, current_slave pointer is changed and 0 is returned.
- */
-static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *old_active = NULL;
- struct slave *new_active = NULL;
- int res = 0;
-
- if (!USES_PRIMARY(bond->params.mode))
- return -EINVAL;
-
- /* Verify that bond_dev is indeed the master of slave_dev */
- if (!(slave_dev->flags & IFF_SLAVE) ||
- !netdev_has_upper_dev(slave_dev, bond_dev))
- return -EINVAL;
-
- read_lock(&bond->lock);
-
- old_active = bond->curr_active_slave;
- new_active = bond_get_slave_by_dev(bond, slave_dev);
- /*
- * Changing to the current active: do nothing; return success.
- */
- if (new_active && new_active == old_active) {
- read_unlock(&bond->lock);
- return 0;
- }
-
- if (new_active &&
- old_active &&
- new_active->link == BOND_LINK_UP &&
- IS_UP(new_active->dev)) {
- block_netpoll_tx();
- write_lock_bh(&bond->curr_slave_lock);
- bond_change_active_slave(bond, new_active);
- write_unlock_bh(&bond->curr_slave_lock);
- unblock_netpoll_tx();
- } else
- res = -EINVAL;
-
- read_unlock(&bond->lock);
-
- return res;
-}
-
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1994,11 +1896,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
int i = 0, res = -ENODEV;
struct slave *slave;
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
strcpy(info->slave_name, slave->dev->name);
@@ -2019,12 +1922,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
static int bond_miimon_inspect(struct bonding *bond)
{
int link_state, commit = 0;
+ struct list_head *iter;
struct slave *slave;
bool ignore_updelay;
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2118,9 +2022,10 @@ static int bond_miimon_inspect(struct bonding *bond)
static void bond_miimon_commit(struct bonding *bond)
{
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2225,7 +2130,7 @@ void bond_mii_monitor(struct work_struct *work)
delay = msecs_to_jiffies(bond->params.miimon);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -2274,7 +2179,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return true;
rcu_read_lock();
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (ip == bond_confirm_addr(upper, 0, ip)) {
ret = true;
break;
@@ -2349,10 +2254,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
*
* TODO: QinQ?
*/
- netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+ vlan_iter) {
if (!is_vlan_dev(vlan_upper))
continue;
- netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+ iter) {
if (upper == rt->dst.dev) {
vlan_id = vlan_dev_vlan_id(vlan_upper);
rcu_read_unlock();
@@ -2365,7 +2272,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* our upper vlans, then just search for any dev that
* matches, and in case it's a vlan - save the id
*/
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper == rt->dst.dev) {
/* if it's a vlan - get its VID */
if (is_vlan_dev(upper))
@@ -2512,11 +2419,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
struct slave *slave, *oldcurrent;
+ struct list_head *iter;
int do_failover = 0;
read_lock(&bond->lock);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
oldcurrent = bond->curr_active_slave;
@@ -2528,7 +2436,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2619,10 +2527,11 @@ re_arm:
static int bond_ab_arp_inspect(struct bonding *bond)
{
unsigned long trans_start, last_rx;
+ struct list_head *iter;
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2689,9 +2598,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
static void bond_ab_arp_commit(struct bonding *bond)
{
unsigned long trans_start;
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2762,8 +2672,9 @@ do_failover:
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *next_slave;
- int i;
+ struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct list_head *iter;
+ bool found = false;
read_lock(&bond->curr_slave_lock);
@@ -2793,18 +2704,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(bond->current_arp_slave);
- /* search for next candidate */
- next_slave = bond_next_slave(bond, bond->current_arp_slave);
- bond_for_each_slave_from(bond, slave, i, next_slave) {
- if (IS_UP(slave->dev)) {
- slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(slave);
- bond_arp_send_all(bond, slave);
- slave->jiffies = jiffies;
- bond->current_arp_slave = slave;
- break;
- }
+ bond_for_each_slave(bond, slave, iter) {
+ if (!found && !before && IS_UP(slave->dev))
+ before = slave;
+ if (found && !new_slave && IS_UP(slave->dev))
+ new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
* simultaneous link failures and
@@ -2812,7 +2717,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
* one the current slave so it is still marked
* up when it is actually down
*/
- if (slave->link == BOND_LINK_UP) {
+ if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
slave->link = BOND_LINK_DOWN;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2822,7 +2727,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
+ if (slave == bond->current_arp_slave)
+ found = true;
}
+
+ if (!new_slave && before)
+ new_slave = before;
+
+ if (!new_slave)
+ return;
+
+ new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_active_flags(new_slave);
+ bond_arp_send_all(bond, new_slave);
+ new_slave->jiffies = jiffies;
+ bond->current_arp_slave = new_slave;
+
}
void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2836,7 +2756,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -3033,99 +2953,85 @@ static struct notifier_block bond_netdev_notifier = {
/*---------------------------- Hashing Policies -----------------------------*/
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
{
struct ethhdr *data = (struct ethhdr *)skb->data;
if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
- return (data->h_dest[5] ^ data->h_source[5]) % count;
+ return data->h_dest[5] ^ data->h_source[5];
return 0;
}
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+ struct flow_keys *fk)
{
- const struct ethhdr *data;
+ const struct ipv6hdr *iph6;
const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- u32 v6hash;
- const __be32 *s, *d;
+ int noff, proto = -1;
+
+ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+ return skb_flow_dissect(skb, fk);
- if (skb->protocol == htons(ETH_P_IP) &&
- pskb_network_may_pull(skb, sizeof(*iph))) {
+ fk->ports = 0;
+ noff = skb_network_offset(skb);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+ return false;
iph = ip_hdr(skb);
- data = (struct ethhdr *)skb->data;
- return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
- (data->h_dest[5] ^ data->h_source[5])) % count;
- } else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_network_may_pull(skb, sizeof(*ipv6h))) {
- ipv6h = ipv6_hdr(skb);
- data = (struct ethhdr *)skb->data;
- s = &ipv6h->saddr.s6_addr32[0];
- d = &ipv6h->daddr.s6_addr32[0];
- v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
- v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
- return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
- }
-
- return bond_xmit_hash_policy_l2(skb, count);
+ fk->src = iph->saddr;
+ fk->dst = iph->daddr;
+ noff += iph->ihl << 2;
+ if (!ip_is_fragment(iph))
+ proto = iph->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+ return false;
+ iph6 = ipv6_hdr(skb);
+ fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+ fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+ noff += sizeof(*iph6);
+ proto = iph6->nexthdr;
+ } else {
+ return false;
+ }
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+ fk->ports = skb_flow_get_ports(skb, noff, proto);
+
+ return true;
}
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
*/
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
{
- u32 layer4_xor = 0;
- const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- const __be32 *s, *d;
- const __be16 *l4 = NULL;
- __be16 _l4[2];
- int noff = skb_network_offset(skb);
- int poff;
-
- if (skb->protocol == htons(ETH_P_IP) &&
- pskb_may_pull(skb, noff + sizeof(*iph))) {
- iph = ip_hdr(skb);
- poff = proto_ports_offset(iph->protocol);
+ struct flow_keys flow;
+ u32 hash;
- if (!ip_is_fragment(iph) && poff >= 0) {
- l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
- }
- return (layer4_xor ^
- ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
- } else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
- ipv6h = ipv6_hdr(skb);
- poff = proto_ports_offset(ipv6h->nexthdr);
- if (poff >= 0) {
- l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
- }
- s = &ipv6h->saddr.s6_addr32[0];
- d = &ipv6h->daddr.s6_addr32[0];
- layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
- layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
- (layer4_xor >> 8);
- return layer4_xor % count;
- }
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect(bond, skb, &flow))
+ return bond_eth_hash(skb) % count;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+ hash = bond_eth_hash(skb);
+ else
+ hash = (__force u32)flow.ports;
+ hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
- return bond_xmit_hash_policy_l2(skb, count);
+ return hash % count;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3155,13 +3061,14 @@ static void bond_work_cancel_all(struct bonding *bond)
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
/* reset slave->backup and slave->inactive */
read_lock(&bond->lock);
- if (!list_empty(&bond->slave_list)) {
+ if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave);
@@ -3221,12 +3128,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
+ struct list_head *iter;
struct slave *slave;
memset(stats, 0, sizeof(*stats));
read_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
const struct rtnl_link_stats64 *sstats =
dev_get_stats(slave->dev, &temp);
@@ -3263,6 +3171,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = NULL;
struct ifbond k_binfo;
struct ifbond __user *u_binfo = NULL;
@@ -3293,7 +3202,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (mii->reg_num == 1) {
- struct bonding *bond = netdev_priv(bond_dev);
mii->val_out = 0;
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
@@ -3365,7 +3273,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
- res = bond_ioctl_change_active(bond_dev, slave_dev);
+ res = bond_option_active_slave_set(bond, slave_dev);
break;
default:
res = -EOPNOTSUPP;
@@ -3393,22 +3301,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- ASSERT_RTNL();
+ rcu_read_lock();
if (USES_PRIMARY(bond->params.mode)) {
- slave = rtnl_dereference(bond->curr_active_slave);
+ slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
} else {
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
+ rcu_read_unlock();
}
static int bond_neigh_init(struct neighbour *n)
@@ -3471,7 +3381,8 @@ static int bond_neigh_setup(struct net_device *dev,
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
int res = 0;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3492,10 +3403,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave) {
- pr_debug("s %p s->p %p c_m %p\n",
+ bond_for_each_slave(bond, slave, iter) {
+ pr_debug("s %p c_m %p\n",
slave,
- bond_prev_slave(bond, slave),
slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3520,13 +3430,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
unwind:
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
+ bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
- tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+ if (rollback_slave == slave)
+ break;
+
+ tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
- tmp_res, slave->dev->name);
+ tmp_res, rollback_slave->dev->name);
}
}
@@ -3543,8 +3456,9 @@ unwind:
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave, *rollback_slave;
struct sockaddr *sa = addr, tmp_sa;
- struct slave *slave;
+ struct list_head *iter;
int res = 0;
if (bond->params.mode == BOND_MODE_ALB)
@@ -3578,7 +3492,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
@@ -3610,13 +3524,16 @@ unwind:
tmp_sa.sa_family = bond_dev->type;
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
+ bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
- tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+ if (rollback_slave == slave)
+ break;
+
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
- tmp_res, slave->dev->name);
+ tmp_res, rollback_slave->dev->name);
}
}
@@ -3635,11 +3552,12 @@ unwind:
*/
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
+ struct list_head *iter;
struct slave *slave;
int i = slave_id;
/* Here we start from the slave with slave_id */
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3650,7 +3568,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
/* Here we start from the first slave up to slave_id */
i = slave_id;
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (slave_can_tx(slave)) {
@@ -3662,14 +3580,44 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
kfree_skb(skb);
}
+/**
+ * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
+ * @bond: bonding device to use
+ *
+ * Based on the value of the bonding device's packets_per_slave parameter
+ * this function generates a slave id, which is usually used as the next
+ * slave to transmit through.
+ */
+static u32 bond_rr_gen_slave_id(struct bonding *bond)
+{
+ int packets_per_slave = bond->params.packets_per_slave;
+ u32 slave_id;
+
+ switch (packets_per_slave) {
+ case 0:
+ slave_id = prandom_u32();
+ break;
+ case 1:
+ slave_id = bond->rr_tx_counter;
+ break;
+ default:
+ slave_id = reciprocal_divide(bond->rr_tx_counter,
+ packets_per_slave);
+ break;
+ }
+ bond->rr_tx_counter++;
+
+ return slave_id;
+}
+
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ u32 slave_id;
- /*
- * Start with the curr_active_slave that joined the bond as the
+ /* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
* needs to maintain some consistency for the interface that will
* send the join/membership reports. The curr_active_slave found
@@ -3682,8 +3630,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- bond_xmit_slave_id(bond, skb,
- bond->rr_tx_counter++ % bond->slave_cnt);
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
}
return NETDEV_TX_OK;
@@ -3707,8 +3655,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
return NETDEV_TX_OK;
}
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
* determined xmit_hash_policy(), If the selected device is not enabled,
* find the next active slave.
*/
@@ -3716,8 +3663,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb,
- bond->xmit_hash_policy(skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
return NETDEV_TX_OK;
}
@@ -3727,8 +3673,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
+ struct list_head *iter;
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_is_last_slave(bond, slave))
break;
if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3753,22 +3700,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
/*------------------------- Device initialization ---------------------------*/
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
- switch (bond->params.xmit_policy) {
- case BOND_XMIT_POLICY_LAYER23:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
- break;
- case BOND_XMIT_POLICY_LAYER34:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
- break;
- case BOND_XMIT_POLICY_LAYER2:
- default:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
- break;
- }
-}
-
/*
* Lookup the slave that corresponds to a qid
*/
@@ -3777,13 +3708,14 @@ static inline int bond_slave_override(struct bonding *bond,
{
struct slave *slave = NULL;
struct slave *check_slave;
+ struct list_head *iter;
int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, check_slave) {
+ bond_for_each_slave_rcu(bond, check_slave, iter) {
if (check_slave->queue_id == skb->queue_mapping) {
slave = check_slave;
break;
@@ -3869,7 +3801,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
rcu_read_lock();
- if (!list_empty(&bond->slave_list))
+ if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
kfree_skb(skb);
@@ -3878,43 +3810,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
- struct net_device *bond_dev = bond->dev;
-
- switch (mode) {
- case BOND_MODE_ROUNDROBIN:
- break;
- case BOND_MODE_ACTIVEBACKUP:
- break;
- case BOND_MODE_XOR:
- bond_set_xmit_hash_policy(bond);
- break;
- case BOND_MODE_BROADCAST:
- break;
- case BOND_MODE_8023AD:
- bond_set_xmit_hash_policy(bond);
- break;
- case BOND_MODE_ALB:
- /* FALLTHRU */
- case BOND_MODE_TLB:
- break;
- default:
- /* Should never happen, mode already checked */
- pr_err("%s: Error: Unknown bonding mode %d\n",
- bond_dev->name, mode);
- break;
- }
-}
-
static int bond_ethtool_get_settings(struct net_device *bond_dev,
struct ethtool_cmd *ecmd)
{
struct bonding *bond = netdev_priv(bond_dev);
unsigned long speed = 0;
+ struct list_head *iter;
struct slave *slave;
ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3926,7 +3827,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
* this is an accurate maximum.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (SLAVE_IS_OK(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
@@ -3994,14 +3895,13 @@ static void bond_destructor(struct net_device *bond_dev)
free_netdev(bond_dev);
}
-static void bond_setup(struct net_device *bond_dev)
+void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
/* initialize rwlocks */
rwlock_init(&bond->lock);
rwlock_init(&bond->curr_slave_lock);
- INIT_LIST_HEAD(&bond->slave_list);
bond->params = bonding_defaults;
/* Initialize pointers */
@@ -4011,7 +3911,6 @@ static void bond_setup(struct net_device *bond_dev)
ether_setup(bond_dev);
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
- bond_set_mode_ops(bond, bond->params.mode);
bond_dev->destructor = bond_destructor;
@@ -4057,12 +3956,13 @@ static void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *tmp_slave;
+ struct list_head *iter;
+ struct slave *slave;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+ bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
pr_info("%s: released all slaves\n", bond_dev->name);
@@ -4235,6 +4135,12 @@ static int bond_check_params(struct bond_params *params)
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
+ if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+ pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
+ packets_per_slave, USHRT_MAX);
+ packets_per_slave = 1;
+ }
+
/* reset values for TLB/ALB */
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
@@ -4424,7 +4330,10 @@ static int bond_check_params(struct bond_params *params)
params->resend_igmp = resend_igmp;
params->min_links = min_links;
params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
-
+ if (packets_per_slave > 1)
+ params->packets_per_slave = reciprocal_value(packets_per_slave);
+ else
+ params->packets_per_slave = packets_per_slave;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
params->primary[IFNAMSIZ - 1] = 0;
@@ -4495,32 +4404,11 @@ static int bond_init(struct net_device *bond_dev)
return 0;
}
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
-{
- if (tb[IFLA_ADDRESS]) {
- if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
- return -EINVAL;
- if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
- return -EADDRNOTAVAIL;
- }
- return 0;
-}
-
-static unsigned int bond_get_num_tx_queues(void)
+unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
-static struct rtnl_link_ops bond_link_ops __read_mostly = {
- .kind = "bond",
- .priv_size = sizeof(struct bonding),
- .setup = bond_setup,
- .validate = bond_validate,
- .get_num_tx_queues = bond_get_num_tx_queues,
- .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
- as for TX queues */
-};
-
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4607,7 +4495,7 @@ static int __init bonding_init(void)
if (res)
goto out;
- res = rtnl_link_register(&bond_link_ops);
+ res = bond_netlink_init();
if (res)
goto err_link;
@@ -4623,7 +4511,7 @@ static int __init bonding_init(void)
out:
return res;
err:
- rtnl_link_unregister(&bond_link_ops);
+ bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
goto out;
@@ -4636,7 +4524,7 @@ static void __exit bonding_exit(void)
bond_destroy_debugfs();
- rtnl_link_unregister(&bond_link_ops);
+ bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4653,4 +4541,3 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644
index 00000000000..40e7b1cb4ae
--- /dev/null
+++ b/drivers/net/bonding/bond_netlink.c
@@ -0,0 +1,131 @@
+/*
+ * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "bonding.h"
+
+static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ [IFLA_BOND_MODE] = { .type = NLA_U8 },
+ [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+};
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static int bond_changelink(struct net_device *bond_dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int err;
+
+ if (data && data[IFLA_BOND_MODE]) {
+ int mode = nla_get_u8(data[IFLA_BOND_MODE]);
+
+ err = bond_option_mode_set(bond, mode);
+ if (err)
+ return err;
+ }
+ if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
+ struct net_device *slave_dev;
+
+ if (ifindex == 0) {
+ slave_dev = NULL;
+ } else {
+ slave_dev = __dev_get_by_index(dev_net(bond_dev),
+ ifindex);
+ if (!slave_dev)
+ return -ENODEV;
+ }
+ err = bond_option_active_slave_set(bond, slave_dev);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ err = bond_changelink(bond_dev, tb, data);
+ if (err < 0)
+ return err;
+
+ return register_netdevice(bond_dev);
+}
+
+static size_t bond_get_size(const struct net_device *bond_dev)
+{
+ return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
+ nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+}
+
+static int bond_fill_info(struct sk_buff *skb,
+ const struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct net_device *slave_dev = bond_option_active_slave_get(bond);
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
+ (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+struct rtnl_link_ops bond_link_ops __read_mostly = {
+ .kind = "bond",
+ .priv_size = sizeof(struct bonding),
+ .setup = bond_setup,
+ .maxtype = IFLA_BOND_MAX,
+ .policy = bond_policy,
+ .validate = bond_validate,
+ .newlink = bond_newlink,
+ .changelink = bond_changelink,
+ .get_size = bond_get_size,
+ .fill_info = bond_fill_info,
+ .get_num_tx_queues = bond_get_num_tx_queues,
+ .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
+ as for TX queues */
+};
+
+int __init bond_netlink_init(void)
+{
+ return rtnl_link_register(&bond_link_ops);
+}
+
+void bond_netlink_fini(void)
+{
+ rtnl_link_unregister(&bond_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644
index 00000000000..9a5223c7b4d
--- /dev/null
+++ b/drivers/net/bonding/bond_options.c
@@ -0,0 +1,142 @@
+/*
+ * drivers/net/bond/bond_options.c - bonding options
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/rwlock.h>
+#include <linux/rcupdate.h>
+#include "bonding.h"
+
+static bool bond_mode_is_valid(int mode)
+{
+ int i;
+
+ for (i = 0; bond_mode_tbl[i].modename; i++);
+
+ return mode >= 0 && mode < i;
+}
+
+int bond_option_mode_set(struct bonding *bond, int mode)
+{
+ if (!bond_mode_is_valid(mode)) {
+ pr_err("invalid mode value %d.\n", mode);
+ return -EINVAL;
+ }
+
+ if (bond->dev->flags & IFF_UP) {
+ pr_err("%s: unable to update mode because interface is up.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (bond_has_slaves(bond)) {
+ pr_err("%s: unable to update mode because bond has slaves.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
+ pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+ bond->dev->name, bond_mode_tbl[mode].modename);
+ return -EINVAL;
+ }
+
+ /* don't cache arp_validate between modes */
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ bond->params.mode = mode;
+ return 0;
+}
+
+static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
+ struct slave *slave)
+{
+ return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+}
+
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+ struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+ return __bond_option_active_slave_get(bond, slave);
+}
+
+struct net_device *bond_option_active_slave_get(struct bonding *bond)
+{
+ return __bond_option_active_slave_get(bond, bond->curr_active_slave);
+}
+
+int bond_option_active_slave_set(struct bonding *bond,
+ struct net_device *slave_dev)
+{
+ int ret = 0;
+
+ if (slave_dev) {
+ if (!netif_is_bond_slave(slave_dev)) {
+ pr_err("Device %s is not bonding slave.\n",
+ slave_dev->name);
+ return -EINVAL;
+ }
+
+ if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
+ pr_err("%s: Device %s is not our slave.\n",
+ bond->dev->name, slave_dev->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!USES_PRIMARY(bond->params.mode)) {
+ pr_err("%s: Unable to change active slave; %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
+ return -EINVAL;
+ }
+
+ block_netpoll_tx();
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+
+ /* check to see if we are clearing active */
+ if (!slave_dev) {
+ pr_info("%s: Clearing current active slave.\n",
+ bond->dev->name);
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
+ bond_select_active_slave(bond);
+ } else {
+ struct slave *old_active = bond->curr_active_slave;
+ struct slave *new_active = bond_slave_get_rtnl(slave_dev);
+
+ BUG_ON(!new_active);
+
+ if (new_active == old_active) {
+ /* do nothing */
+ pr_info("%s: %s is already the current active slave.\n",
+ bond->dev->name, new_active->dev->name);
+ } else {
+ if (old_active && (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ pr_info("%s: Setting %s as active slave.\n",
+ bond->dev->name, new_active->dev->name);
+ bond_change_active_slave(bond, new_active);
+ } else {
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ bond->dev->name, new_active->dev->name,
+ new_active->dev->name);
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ unblock_netpoll_tx();
+ return ret;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 20a6ee25bb6..fb868d6c22d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
- loff_t off = 0;
+ struct list_head *iter;
struct slave *slave;
+ loff_t off = 0;
/* make sure the bond won't be taken away */
rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
if (++off == *pos)
return slave;
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bonding *bond = seq->private;
- struct slave *slave = v;
+ struct list_head *iter;
+ struct slave *slave;
+ bool found = false;
++*pos;
if (v == SEQ_START_TOKEN)
return bond_first_slave(bond);
- if (bond_is_last_slave(bond, slave))
+ if (bond_is_last_slave(bond, v))
return NULL;
- slave = bond_next_slave(bond, slave);
- return slave;
+ bond_for_each_slave(bond, slave, iter) {
+ if (found)
+ return slave;
+ if (slave == v)
+ found = true;
+ }
+
+ return NULL;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c29b836749b..0ec2a7e8c8a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -40,6 +40,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
+#include <linux/reciprocal_div.h>
#include "bonding.h"
@@ -149,14 +150,6 @@ err_no_cmd:
return -EPERM;
}
-static const void *bonding_namespace(struct class *cls,
- const struct class_attribute *attr)
-{
- const struct bond_net *bn =
- container_of(attr, struct bond_net, class_attr_bonding_masters);
- return bn->net;
-}
-
/* class attribute for bond_masters file. This ends up in /sys/class/net */
static const struct class_attribute class_attr_bonding_masters = {
.attr = {
@@ -165,44 +158,8 @@ static const struct class_attribute class_attr_bonding_masters = {
},
.show = bonding_show_bonds,
.store = bonding_store_bonds,
- .namespace = bonding_namespace,
};
-int bond_create_slave_symlinks(struct net_device *master,
- struct net_device *slave)
-{
- char linkname[IFNAMSIZ+7];
- int ret = 0;
-
- /* first, create a link from the slave back to the master */
- ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
- "master");
- if (ret)
- return ret;
- /* next, create a link from the master to the slave */
- sprintf(linkname, "slave_%s", slave->name);
- ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
- linkname);
-
- /* free the master link created earlier in case of error */
- if (ret)
- sysfs_remove_link(&(slave->dev.kobj), "master");
-
- return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
- struct net_device *slave)
-{
- char linkname[IFNAMSIZ+7];
-
- sysfs_remove_link(&(slave->dev.kobj), "master");
- sprintf(linkname, "slave_%s", slave->name);
- sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
/*
* Show the slaves in the current bond.
*/
@@ -210,11 +167,14 @@ static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
int res = 0;
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -224,7 +184,9 @@ static ssize_t bonding_show_slaves(struct device *d,
}
res += sprintf(buf + res, "%s ", slave->dev->name);
}
- read_unlock(&bond->lock);
+
+ rtnl_unlock();
+
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -313,50 +275,26 @@ static ssize_t bonding_store_mode(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond->dev->flags & IFF_UP) {
- pr_err("unable to update mode of %s because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (!list_empty(&bond->slave_list)) {
- pr_err("unable to update mode of %s because it has slaves.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid mode value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if ((new_value == BOND_MODE_ALB ||
- new_value == BOND_MODE_TLB) &&
- bond->params.arp_interval) {
- pr_err("%s: %s mode is incompatible with arp monitoring.\n",
- bond->dev->name, bond_mode_tbl[new_value].modename);
- ret = -EINVAL;
- goto out;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_mode_set(bond, new_value);
+ if (!ret) {
+ pr_info("%s: setting mode to %s (%d).\n",
+ bond->dev->name, bond_mode_tbl[new_value].modename,
+ new_value);
+ ret = count;
}
- /* don't cache arp_validate between modes */
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- bond->params.mode = new_value;
- bond_set_mode_ops(bond, bond->params.mode);
- pr_info("%s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
- new_value);
-out:
rtnl_unlock();
return ret;
}
@@ -392,7 +330,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
ret = -EINVAL;
} else {
bond->params.xmit_policy = new_value;
- bond_set_mode_ops(bond, bond->params.mode);
pr_info("%s: setting xmit hash policy to %s (%d).\n",
bond->dev->name,
xmit_hashtype_tbl[new_value].modename, new_value);
@@ -522,7 +459,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
if (!rtnl_trylock())
return restart_syscall();
- if (!list_empty(&bond->slave_list)) {
+ if (bond_has_slaves(bond)) {
pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
ret = -EPERM;
@@ -587,8 +524,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
goto out;
}
if (bond->params.mode == BOND_MODE_ALB ||
- bond->params.mode == BOND_MODE_TLB) {
- pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
+ bond->params.mode == BOND_MODE_TLB ||
+ bond->params.mode == BOND_MODE_8023AD) {
+ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
bond->dev->name, bond->dev->name);
ret = -EINVAL;
goto out;
@@ -656,21 +594,24 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
__be32 newtarget, *targets;
unsigned long *targets_rx;
int ind, i, j, ret = -EINVAL;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
targets = bond->params.arp_targets;
- newtarget = in_aton(buf + 1);
+ if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
+ IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
+ bond->dev->name, &newtarget);
+ goto out;
+ }
/* look for adds */
if (buf[0] == '+') {
- if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- pr_err("%s: invalid ARP target %pI4 specified for addition\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
pr_err("%s: ARP target %pI4 is already present\n",
bond->dev->name, &newtarget);
@@ -688,17 +629,11 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
slave->target_last_arp_rx[ind] = jiffies;
targets[ind] = newtarget;
write_unlock_bh(&bond->lock);
} else if (buf[0] == '-') {
- if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- pr_err("%s: invalid ARP target %pI4 specified for removal\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
ind = bond_get_targets_ip(targets, newtarget);
if (ind == -1) {
pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
@@ -714,7 +649,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
j = ind;
for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -734,6 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
ret = count;
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -759,6 +695,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ if (!rtnl_trylock())
+ return restart_syscall();
if (!(bond->params.miimon)) {
pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
bond->dev->name);
@@ -792,6 +730,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
}
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
@@ -814,6 +753,8 @@ static ssize_t bonding_store_updelay(struct device *d,
int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ if (!rtnl_trylock())
+ return restart_syscall();
if (!(bond->params.miimon)) {
pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
bond->dev->name);
@@ -847,6 +788,7 @@ static ssize_t bonding_store_updelay(struct device *d,
}
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
@@ -1111,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
char ifname[IFNAMSIZ];
struct slave *slave;
@@ -1138,7 +1081,7 @@ static ssize_t bonding_store_primary(struct device *d,
goto out;
}
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
pr_info("%s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
@@ -1268,13 +1211,13 @@ static ssize_t bonding_show_active_slave(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct slave *curr;
+ struct net_device *slave_dev;
int count = 0;
rcu_read_lock();
- curr = rcu_dereference(bond->curr_active_slave);
- if (USES_PRIMARY(bond->params.mode) && curr)
- count = sprintf(buf, "%s\n", curr->dev->name);
+ slave_dev = bond_option_active_slave_get_rcu(bond);
+ if (slave_dev)
+ count = sprintf(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -1284,80 +1227,33 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct slave *slave, *old_active, *new_active;
+ int ret;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
+ struct net_device *dev;
if (!rtnl_trylock())
return restart_syscall();
- old_active = new_active = NULL;
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: Unable to change active slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- goto out;
- }
-
sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing active */
if (!strlen(ifname) || buf[0] == '\n') {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
- rcu_assign_pointer(bond->curr_active_slave, NULL);
- bond_select_active_slave(bond);
- goto out;
- }
-
- bond_for_each_slave(bond, slave) {
- if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- old_active = bond->curr_active_slave;
- new_active = slave;
- if (new_active == old_active) {
- /* do nothing */
- pr_info("%s: %s is already the current"
- " active slave.\n",
- bond->dev->name,
- slave->dev->name);
- goto out;
- } else {
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active"
- " slave.\n",
- bond->dev->name,
- slave->dev->name);
- bond_change_active_slave(bond,
- new_active);
- } else {
- pr_info("%s: Could not set %s as"
- " active slave; either %s is"
- " down or the link is down.\n",
- bond->dev->name,
- slave->dev->name,
- slave->dev->name);
- }
- goto out;
- }
+ dev = NULL;
+ } else {
+ dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
}
}
- pr_info("%s: Unable to set %.*s as active slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- out:
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
+ ret = bond_option_active_slave_set(bond, dev);
+ if (!ret)
+ ret = count;
+ out:
rtnl_unlock();
- return count;
+ return ret;
}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1493,14 +1389,14 @@ static ssize_t bonding_show_queue_id(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
int res = 0;
if (!rtnl_trylock())
return restart_syscall();
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -1511,9 +1407,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
res += sprintf(buf + res, "%s:%d ",
slave->dev->name, slave->queue_id);
}
- read_unlock(&bond->lock);
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
rtnl_unlock();
return res;
@@ -1529,6 +1425,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
{
struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
u16 qid;
int ret = count;
char *delim;
@@ -1561,11 +1458,9 @@ static ssize_t bonding_store_queue_id(struct device *d,
if (!sdev)
goto err_no_cmd;
- read_lock(&bond->lock);
-
/* Search for thes slave and check for duplicate qids */
update_slave = NULL;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (sdev == slave->dev)
/*
* We don't need to check the matching
@@ -1573,23 +1468,20 @@ static ssize_t bonding_store_queue_id(struct device *d,
*/
update_slave = slave;
else if (qid && qid == slave->queue_id) {
- goto err_no_cmd_unlock;
+ goto err_no_cmd;
}
}
if (!update_slave)
- goto err_no_cmd_unlock;
+ goto err_no_cmd;
/* Actually set the qids for the slave */
update_slave->queue_id = qid;
- read_unlock(&bond->lock);
out:
rtnl_unlock();
return ret;
-err_no_cmd_unlock:
- read_unlock(&bond->lock);
err_no_cmd:
pr_info("invalid input for queue_id set for %s.\n",
bond->dev->name);
@@ -1619,8 +1511,12 @@ static ssize_t bonding_store_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
int new_value, ret = count;
+ struct list_head *iter;
struct slave *slave;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no all_slaves_active value specified.\n",
bond->dev->name);
@@ -1640,8 +1536,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
goto out;
}
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->inactive = 0;
@@ -1649,8 +1544,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
slave->inactive = 1;
}
}
- read_unlock(&bond->lock);
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1737,6 +1632,53 @@ out:
static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
bonding_show_lp_interval, bonding_store_lp_interval);
+static ssize_t bonding_show_packets_per_slave(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+ int packets_per_slave = bond->params.packets_per_slave;
+
+ if (packets_per_slave > 1)
+ packets_per_slave = reciprocal_value(packets_per_slave);
+
+ return sprintf(buf, "%d\n", packets_per_slave);
+}
+
+static ssize_t bonding_store_packets_per_slave(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no packets_per_slave value specified.\n",
+ bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (new_value < 0 || new_value > USHRT_MAX) {
+ pr_err("%s: packets_per_slave must be between 0 and %u\n",
+ bond->dev->name, USHRT_MAX);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (bond->params.mode != BOND_MODE_ROUNDROBIN)
+ pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
+ bond->dev->name);
+ if (new_value > 1)
+ bond->params.packets_per_slave = reciprocal_value(new_value);
+ else
+ bond->params.packets_per_slave = new_value;
+out:
+ return ret;
+}
+
+static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
+ bonding_show_packets_per_slave,
+ bonding_store_packets_per_slave);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -1768,6 +1710,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_resend_igmp.attr,
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
+ &dev_attr_packets_per_slave.attr,
NULL,
};
@@ -1787,7 +1730,8 @@ int bond_create_sysfs(struct bond_net *bn)
bn->class_attr_bonding_masters = class_attr_bonding_masters;
sysfs_attr_init(&bn->class_attr_bonding_masters.attr);
- ret = netdev_class_create_file(&bn->class_attr_bonding_masters);
+ ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
+ bn->net);
/*
* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
@@ -1817,7 +1761,7 @@ int bond_create_sysfs(struct bond_net *bn)
*/
void bond_destroy_sysfs(struct bond_net *bn)
{
- netdev_class_remove_file(&bn->class_attr_bonding_masters);
+ netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 03cf3fd1449..ca31286aa02 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -58,6 +58,14 @@
#define TX_QUEUE_OVERRIDE(mode) \
(((mode) == BOND_MODE_ACTIVEBACKUP) || \
((mode) == BOND_MODE_ROUNDROBIN))
+
+#define BOND_MODE_IS_LB(mode) \
+ (((mode) == BOND_MODE_TLB) || \
+ ((mode) == BOND_MODE_ALB))
+
+#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \
+ ((htonl(INADDR_BROADCAST) == a) || \
+ ipv4_is_zeronet(a))
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
@@ -72,63 +80,37 @@
res; })
/* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
#define bond_first_slave(bond) \
- list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+ NULL)
#define bond_last_slave(bond) \
- (list_empty(&(bond)->slave_list) ? NULL : \
- bond_to_slave((bond)->slave_list.prev))
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+ NULL)
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
- (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
- bond_to_slave((pos)->list.next))
-
-#define bond_prev_slave(bond, pos) \
- (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
- bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for max number of moves
- * @start: starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
- for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
- cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
/**
* bond_for_each_slave - iterate over all slaves
* @bond: the bond holding this list
* @pos: current slave
+ * @iter: list_head * iterator
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave(bond, pos) \
- list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+ netdev_for_each_lower_private((bond)->dev, pos, iter)
/* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
- list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond: the bond holding this list
- * @pos: slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
- list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -177,6 +159,7 @@ struct bond_params {
int all_slaves_active;
int resend_igmp;
int lp_interval;
+ int packets_per_slave;
};
struct bond_parm_tbl {
@@ -188,7 +171,6 @@ struct bond_parm_tbl {
struct slave {
struct net_device *dev; /* first - useful for panic debug */
- struct list_head list;
struct bonding *bond; /* our master */
int delay;
unsigned long jiffies;
@@ -228,7 +210,6 @@ struct slave {
*/
struct bonding {
struct net_device *dev; /* first - useful for panic debug */
- struct list_head slave_list;
struct slave *curr_active_slave;
struct slave *current_arp_slave;
struct slave *primary_slave;
@@ -245,8 +226,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- int (*xmit_hash_policy)(struct sk_buff *, int);
- u16 rr_tx_counter;
+ u32 rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
@@ -276,13 +256,7 @@ struct bonding {
static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev)
{
- struct slave *slave = NULL;
-
- bond_for_each_slave(bond, slave)
- if (slave->dev == slave_dev)
- return slave;
-
- return NULL;
+ return netdev_lower_dev_get_private(bond->dev, slave_dev);
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +268,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
static inline bool bond_is_lb(const struct bonding *bond)
{
- return (bond->params.mode == BOND_MODE_TLB ||
- bond->params.mode == BOND_MODE_ALB);
+ return BOND_MODE_IS_LB(bond->params.mode);
}
static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +405,18 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
void bond_mii_monitor(struct work_struct *);
void bond_loadbalance_arp_mon(struct work_struct *);
void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +426,14 @@ void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+int bond_option_mode_set(struct bonding *bond, int mode);
+int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+struct net_device *bond_option_active_slave_get(struct bonding *bond);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -492,9 +470,24 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
static inline struct slave *bond_slave_has_mac(struct bonding *bond,
const u8 *mac)
{
+ struct list_head *iter;
struct slave *tmp;
- bond_for_each_slave(bond, tmp)
+ bond_for_each_slave(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+ const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
@@ -528,4 +521,7 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
extern const struct bond_parm_tbl pri_reselect_tbl[];
extern struct bond_parm_tbl ad_select_tbl[];
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index b9ed1288ce2..985608634f8 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -686,18 +686,19 @@ static int cfv_probe(struct virtio_device *vdev)
goto err;
/* Get the CAIF configuration from virtio config space, if available */
-#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
- ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
- &_var, \
- FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
-
if (vdev->config->get) {
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
+ virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+ &cfv->tx_hr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+ &cfv->rx_hr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+ &cfv->tx_tr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+ &cfv->rx_tr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+ &cfv->mtu);
+ virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+ &cfv->mru);
} else {
cfv->tx_hr = CFV_DEF_HEADROOM;
cfv->rx_hr = CFV_DEF_HEADROOM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 693d8ffe465..cf0f63e14e5 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
priv->clk = clk;
- priv->pdata = pdev->dev.platform_data;
+ priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a2700d25ff0..8a0b515b33e 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
unsigned short *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data provided!\n");
err = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a668cd491cb..e3fc07cf2f6 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -814,9 +814,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
msg_ctrl_save = priv->read_reg(priv,
C_CAN_IFACE(MSGCTRL_REG, 0));
- if (msg_ctrl_save & IF_MCONT_EOB)
- return num_rx_pkts;
-
if (msg_ctrl_save & IF_MCONT_MSGLST) {
c_can_handle_lost_msg_obj(dev, 0, msg_obj);
num_rx_pkts++;
@@ -824,6 +821,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
continue;
}
+ if (msg_ctrl_save & IF_MCONT_EOB)
+ return num_rx_pkts;
+
if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
continue;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index b374be7891a..bce0be54c2f 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
return 0;
out_free_c_can:
- pci_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
out_iounmap:
pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
unregister_c_can_dev(dev);
- pci_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
pci_iounmap(pdev, priv->base);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 294ced3cc22..d66ac265269 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(c_can_of_table),
+ .of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
.remove = c_can_plat_remove,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 034bdd816a6..ad76734b3ec 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
- struct cc770_platform_data *pdata = pdev->dev.platform_data;
+ struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
priv->can.clock.freq = pdata->osc_freq;
if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
err = cc770_get_of_node_data(pdev, priv);
- else if (pdev->dev.platform_data)
+ else if (dev_get_platdata(&pdev->dev))
err = cc770_get_platform_data(pdev, priv);
else
err = -ENODEV;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 1870c4731a5..bda1888cae9 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -645,19 +645,6 @@ static int can_changelink(struct net_device *dev,
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- if (cm->flags & ~priv->ctrlmode_supported)
- return -EOPNOTSUPP;
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= cm->flags;
- }
-
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -680,6 +667,19 @@ static int can_changelink(struct net_device *dev,
}
}
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ if (cm->flags & ~priv->ctrlmode_supported)
+ return -EOPNOTSUPP;
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= cm->flags;
+ }
+
if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
@@ -702,17 +702,17 @@ static int can_changelink(struct net_device *dev,
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- size_t size;
-
- size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size_t size = 0;
+
+ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
return size;
}
@@ -726,23 +726,20 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
- nla_put(skb, IFLA_CAN_BITTIMING,
+ if (nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming) ||
+ (priv->bittiming_const &&
+ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
(priv->do_get_berr_counter &&
!priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)))
- goto nla_put_failure;
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)))
+ return -EMSGSIZE;
return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
}
static size_t can_get_xstats_size(const struct net_device *dev)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8f5ce747feb..ae08cf129eb 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1068,7 +1068,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
- priv->pdata = pdev->dev.platform_data;
+ priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 6aa737a2439..ab506d6cab3 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -34,10 +34,7 @@
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/spinlock.h>
-
#include <linux/of_platform.h>
-#include <asm/prom.h>
-
#include <linux/of_irq.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 36bd6fa1c7f..ab5909a7bae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
struct device *dev;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index fe7dd696957..08ac401e021 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
{
struct net_device *net;
struct mcp251x_priv *priv;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret = -ENODEV;
if (!pdata)
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 9c24d60a23b..e98abb97a05 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -297,8 +297,8 @@ struct mscan_priv {
struct napi_struct napi;
};
-extern struct net_device *alloc_mscandev(void);
-extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
-extern void unregister_mscandev(struct net_device *dev);
+struct net_device *alloc_mscandev(void);
+int register_mscandev(struct net_device *dev, int mscan_clksrc);
+void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5c314a96197..5f0e9b3bfa7 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
pci_disable_msi(priv->dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
pch_can_reset(priv);
pci_iounmap(pdev, priv->regs);
free_candev(priv->ndev);
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 3752342a678..835921388e7 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
kfree(card);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void ems_pci_card_reset(struct ems_pci_card *card)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 217585b97cd..087b13bd300 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver kvaser_pci_driver = {
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6b6f0ad7509..065ca49eb45 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cfg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver peak_pci_driver = {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index c52c1e96bf9..f9b4f81cd86 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
kfree(card);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/*
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 31ad3391116..047accd4ede 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -44,7 +44,6 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <asm/prom.h>
#include "sja1000.h"
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 8e259c54103..29f9b632118 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq;
struct sja1000_platform_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data provided!\n");
err = -ENODEV;
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index afd7d85b691..35f062282db 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -71,34 +71,34 @@ struct softing {
} id;
};
-extern int softing_default_output(struct net_device *netdev);
+int softing_default_output(struct net_device *netdev);
-extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+ktime_t softing_raw2ktime(struct softing *card, u32 raw);
-extern int softing_chip_poweron(struct softing *card);
+int softing_chip_poweron(struct softing *card);
-extern int softing_bootloader_command(struct softing *card, int16_t cmd,
- const char *msg);
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg);
/* Load firmware after reset */
-extern int softing_load_fw(const char *file, struct softing *card,
- __iomem uint8_t *virt, unsigned int size, int offset);
+int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *virt, unsigned int size, int offset);
/* Load final application firmware after bootloader */
-extern int softing_load_app_fw(const char *file, struct softing *card);
+int softing_load_app_fw(const char *file, struct softing *card);
/*
* enable or disable irq
* only called with fw.lock locked
*/
-extern int softing_enable_irq(struct softing *card, int enable);
+int softing_enable_irq(struct softing *card, int enable);
/* start/stop 1 bus on card */
-extern int softing_startstop(struct net_device *netdev, int up);
+int softing_startstop(struct net_device *netdev, int up);
/* netif_rx() */
-extern int softing_netdev_rx(struct net_device *netdev,
- const struct can_frame *msg, ktime_t ktime);
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+ ktime_t ktime);
/* SOFTING DPRAM mappings */
#define DPRAM_RX 0x0000
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 65eef1eea2e..6cd5c01b624 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
static int softing_pdev_probe(struct platform_device *pdev)
{
- const struct softing_platform_data *pdat = pdev->dev.platform_data;
+ const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
struct softing *card;
struct net_device *netdev;
struct softing_priv *priv;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 3a349a22d5b..60d95b44d0f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -286,15 +286,6 @@ static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
}
-static int ti_hecc_get_state(const struct net_device *ndev,
- enum can_state *state)
-{
- struct ti_hecc_priv *priv = netdev_priv(ndev);
-
- *state = priv->can.state;
- return 0;
-}
-
static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
{
struct can_bittiming *bit_timing = &priv->can.bittiming;
@@ -894,7 +885,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
void __iomem *addr;
int err = -ENODEV;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
goto probe_exit;
@@ -940,7 +931,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
- priv->can.do_get_state = ti_hecc_get_state;
priv->can.do_get_berr_counter = ti_hecc_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 3b954658824..4b2d5ed62b1 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1544,9 +1544,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
return 0;
}
-static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
- struct usb_endpoint_descriptor **in,
- struct usb_endpoint_descriptor **out)
+static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
+ struct usb_endpoint_descriptor **in,
+ struct usb_endpoint_descriptor **out)
{
const struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
@@ -1557,12 +1557,18 @@ static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
- if (usb_endpoint_is_bulk_in(endpoint))
+ if (!*in && usb_endpoint_is_bulk_in(endpoint))
*in = endpoint;
- if (usb_endpoint_is_bulk_out(endpoint))
+ if (!*out && usb_endpoint_is_bulk_out(endpoint))
*out = endpoint;
+
+ /* use first bulk endpoint for in and out */
+ if (*in && *out)
+ return 0;
}
+
+ return -ENODEV;
}
static int kvaser_usb_probe(struct usb_interface *intf,
@@ -1576,8 +1582,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
if (!dev)
return -ENOMEM;
- kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
- if (!dev->bulk_in || !dev->bulk_out) {
+ err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
+ if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index b710c6b2d65..bd8f84b0b89 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -88,10 +88,16 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
+ int i;
dev->dstats = alloc_percpu(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_dstats *dstats;
+ dstats = per_cpu_ptr(dev->dstats, i);
+ u64_stats_init(&dstats->syncp);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index f00c76377b4..65b735d4a6a 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -35,7 +35,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on (ISA || EISA) && ISA_DMA_API
+ depends on ISA && ISA_DMA_API
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
select MII
---help---
This option enables driver support for a large number of 10Mbps and
- 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+ 10/100Mbps EISA, PCI and Cardbus 3Com network cards:
"Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
"Boomerang" (EtherLink XL 3c900 or 3c905) PCI
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 144942f6372..465cc7108d8 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index ef325ffa1b5..2923c51bb35 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -28,42 +28,42 @@ extern int ei_debug;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void ei_poll(struct net_device *dev);
-extern void eip_poll(struct net_device *dev);
+void ei_poll(struct net_device *dev);
+void eip_poll(struct net_device *dev);
#endif
/* Without I/O delay - non ISA or later chips */
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern irqreturn_t ei_interrupt(int irq, void *dev_id);
-extern void ei_tx_timeout(struct net_device *dev);
-extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void ei_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+void NS8390_init(struct net_device *dev, int startp);
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+irqreturn_t ei_interrupt(int irq, void *dev_id);
+void ei_tx_timeout(struct net_device *dev);
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void ei_set_multicast_list(struct net_device *dev);
+struct net_device_stats *ei_get_stats(struct net_device *dev);
extern const struct net_device_ops ei_netdev_ops;
-extern struct net_device *__alloc_ei_netdev(int size);
+struct net_device *__alloc_ei_netdev(int size);
static inline struct net_device *alloc_ei_netdev(void)
{
return __alloc_ei_netdev(0);
}
/* With I/O delay form */
-extern void NS8390p_init(struct net_device *dev, int startp);
-extern int eip_open(struct net_device *dev);
-extern int eip_close(struct net_device *dev);
-extern irqreturn_t eip_interrupt(int irq, void *dev_id);
-extern void eip_tx_timeout(struct net_device *dev);
-extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void eip_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+void NS8390p_init(struct net_device *dev, int startp);
+int eip_open(struct net_device *dev);
+int eip_close(struct net_device *dev);
+irqreturn_t eip_interrupt(int irq, void *dev_id);
+void eip_tx_timeout(struct net_device *dev);
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void eip_set_multicast_list(struct net_device *dev);
+struct net_device_stats *eip_get_stats(struct net_device *dev);
extern const struct net_device_ops eip_netdev_ops;
-extern struct net_device *__alloc_eip_netdev(int size);
+struct net_device *__alloc_eip_netdev(int size);
static inline struct net_device *alloc_eip_netdev(void)
{
return __alloc_eip_netdev(0);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index becef25fa19..0988811f4e4 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -146,13 +146,6 @@ config PCMCIA_PCNET
To compile this driver as a module, choose M here: the module will be
called pcnet_cs. If unsure, say N.
-config NE_H8300
- tristate "NE2000 compatible support for H8/300"
- depends on H8300H_AKI3068NET || H8300H_H8MAX
- ---help---
- Say Y here if you want to use the NE2000 compatible
- controller on the Renesas H8/300 processor.
-
config STNIC
tristate "National DP83902AV support"
depends on SUPERH
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 588954a79b2..ff3b3189418 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index f92f001551d..36fa577970b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, 6);
+ memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
}
#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
deleted file mode 100644
index 7fc28f2d28a..00000000000
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ /dev/null
@@ -1,684 +0,0 @@
-/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
-/*
- original ne.c
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
-
- H8/300 modified
- Yoshinori Sato <ysato@users.sourceforge.jp>
-*/
-
-static const char version1[] =
-"ne-h8300.c:v1.00 2004/04/11 ysato\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-
-#include "8390.h"
-
-#define DRV_NAME "ne-h8300"
-
-/* Some defines that people can play with if so inclined. */
-
-/* Do we perform extra sanity checks on stuff ? */
-/* #define NE_SANITY_CHECK */
-
-/* Do we implement the read before write bugfix ? */
-/* #define NE_RW_BUGFIX */
-
-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
-/* #define PACKETBUF_MEMSIZE 0x40 */
-
-/* A zero-terminated list of I/O addresses to be probed at boot. */
-
-/* ---- No user-serviceable parts below ---- */
-
-static const char version[] =
- "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include "lib8390.c"
-
-#define NE_BASE (dev->base_addr)
-#define NE_CMD 0x00
-#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */
-#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT (ei_status.word16?0x40:0x20)
-
-#define NESM_START_PG 0x40 /* First page of TX buffer */
-#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-static int ne_probe1(struct net_device *dev, int ioaddr);
-
-static int ne_open(struct net_device *dev);
-static int ne_close(struct net_device *dev);
-
-static void ne_reset_8390(struct net_device *dev);
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void ne_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ne_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
-
-
-static u32 reg_offset[16];
-
-static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- int i;
- unsigned char bus_width;
-
- bus_width = *(volatile unsigned char *)ABWCR;
- bus_width &= 1 << ((base_addr >> 21) & 7);
-
- for (i = 0; i < ARRAY_SIZE(reg_offset); i++)
- if (bus_width == 0)
- reg_offset[i] = i * 2 + 1;
- else
- reg_offset[i] = i;
-
- ei_local->reg_offset = reg_offset;
- return 0;
-}
-
-static int __initdata h8300_ne_count = 0;
-#ifdef CONFIG_H8300H_H8MAX
-static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
-static int h8300_ne_irq[] = {EXT_IRQ4};
-#endif
-#ifdef CONFIG_H8300H_AKI3068NET
-static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
-static int h8300_ne_irq[] = {EXT_IRQ5};
-#endif
-
-static inline int init_dev(struct net_device *dev)
-{
- if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) {
- dev->base_addr = h8300_ne_base[h8300_ne_count];
- dev->irq = h8300_ne_irq[h8300_ne_count];
- h8300_ne_count++;
- return 0;
- } else
- return -ENODEV;
-}
-
-/* Probe for various non-shared-memory ethercards.
-
- NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
- buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
- the SAPROM, while other supposed NE2000 clones must be detected by their
- SA prefix.
-
- Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
- mode results in doubled values, which can be detected and compensated for.
-
- The probe is also responsible for initializing the card and filling
- in the 'dev' and 'ei_status' structures.
-
- We use the minimum memory size for some ethercard product lines, iff we can't
- distinguish models. You can increase the packet buffer size by setting
- PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
- E1010 starts at 0x100 and ends at 0x2000.
- E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
- E2010 starts at 0x100 and ends at 0x4000.
- E2010-x starts at 0x100 and ends at 0xffff. */
-
-static int __init do_ne_probe(struct net_device *dev)
-{
- unsigned int base_addr = dev->base_addr;
-
- /* First check any supplied i/o locations. User knows best. <cough> */
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return ne_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
-#ifndef MODULE
-struct net_device * __init ne_probe(int unit)
-{
- struct net_device *dev = ____alloc_ei_netdev(0);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (init_dev(dev))
- return ERR_PTR(-ENODEV);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = init_reg_offset(dev, dev->base_addr);
- if (err)
- goto out;
-
- err = do_ne_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ne_netdev_ops = {
- .ndo_open = ne_open,
- .ndo_stop = ne_close,
-
- .ndo_start_xmit = __ei_start_xmit,
- .ndo_tx_timeout = __ei_tx_timeout,
- .ndo_get_stats = __ei_get_stats,
- .ndo_set_rx_mode = __ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = __ei_poll,
-#endif
-};
-
-static int __init ne_probe1(struct net_device *dev, int ioaddr)
-{
- int i;
- unsigned char SA_prom[16];
- int wordlength = 2;
- const char *name = NULL;
- int start_page, stop_page;
- int reg0, ret;
- static unsigned version_printed;
- struct ei_device *ei_local = netdev_priv(dev);
- unsigned char bus_width;
-
- if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- reg0 = inb_p(ioaddr);
- if (reg0 == 0xFF) {
- ret = -ENODEV;
- goto err_out;
- }
-
- /* Do a preliminary verification that we have a 8390. */
- {
- int regd;
- outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
- regd = inb_p(ioaddr + EI_SHIFT(0x0d));
- outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
- outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
- inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
- if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
- outb_p(reg0, ioaddr + EI_SHIFT(0));
- outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */
- ret = -ENODEV;
- goto err_out;
- }
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version1);
-
- printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
-
- /* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
- We can't reliably read the SAPROM address without this.
- (I learned the hard way!). */
- {
- struct {unsigned char value, offset; } program_seq[] =
- {
- {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
- {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
- {0x00, EN0_RCNTLO}, /* Clear the count regs. */
- {0x00, EN0_RCNTHI},
- {0x00, EN0_IMR}, /* Mask completion irq. */
- {0xFF, EN0_ISR},
- {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
- {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
- {32, EN0_RCNTLO},
- {0x00, EN0_RCNTHI},
- {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
- {0x00, EN0_RSARHI},
- {E8390_RREAD+E8390_START, E8390_CMD},
- };
-
- for (i = 0; i < ARRAY_SIZE(program_seq); i++)
- outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
-
- }
- bus_width = *(volatile unsigned char *)ABWCR;
- bus_width &= 1 << ((ioaddr >> 21) & 7);
- ei_status.word16 = (bus_width == 0); /* temporary setting */
- for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
- SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
- inb_p(ioaddr + NE_DATAPORT); /* dummy read */
- }
-
- start_page = NESM_START_PG;
- stop_page = NESM_STOP_PG;
-
- if (bus_width)
- wordlength = 1;
- else
- outb_p(0x49, ioaddr + EN0_DCFG);
-
- /* Set up the rest of the parameters. */
- name = (wordlength == 2) ? "NE2000" : "NE1000";
-
- if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
- ret = -EAGAIN;
- goto err_out;
- }
-
- /* Snarf the interrupt now. There's no point in waiting since we cannot
- share and the board will usually be enabled. */
- ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
- if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
- goto err_out;
- }
-
- dev->base_addr = ioaddr;
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
- printk(" %pM\n", dev->dev_addr);
-
- printk("%s: %s found at %#x, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
-
- ei_status.name = name;
- ei_status.tx_start_page = start_page;
- ei_status.stop_page = stop_page;
- ei_status.word16 = (wordlength == 2);
-
- ei_status.rx_start_page = start_page + TX_PAGES;
-#ifdef PACKETBUF_MEMSIZE
- /* Allow the packet buffer size to be overridden by know-it-alls. */
- ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
-#endif
-
- ei_status.reset_8390 = &ne_reset_8390;
- ei_status.block_input = &ne_block_input;
- ei_status.block_output = &ne_block_output;
- ei_status.get_8390_hdr = &ne_get_8390_hdr;
- ei_status.priv = 0;
-
- dev->netdev_ops = &ne_netdev_ops;
-
- __NS8390_init(dev, 0);
-
- ret = register_netdev(dev);
- if (ret)
- goto out_irq;
- return 0;
-out_irq:
- free_irq(dev->irq, dev);
-err_out:
- release_region(ioaddr, NE_IO_EXTENT);
- return ret;
-}
-
-static int ne_open(struct net_device *dev)
-{
- __ei_open(dev);
- return 0;
-}
-
-static int ne_close(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- __ei_close(dev);
- return 0;
-}
-
-/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
-
-static void ne_reset_8390(struct net_device *dev)
-{
- unsigned long reset_start_time = jiffies;
- struct ei_device *ei_local = netdev_priv(dev);
-
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
-
- /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
- outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
- ei_status.txing = 0;
- ei_status.dmaing = 0;
-
- /* This check _should_not_ be necessary, omit eventually. */
- while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
- if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
- }
- outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
-
- ei_status.dmaing |= 0x01;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
- outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
- outb_p(0, NE_BASE + EN0_RCNTHI);
- outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */
- outb_p(ring_page, NE_BASE + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-
- if (ei_status.word16) {
- int len;
- unsigned short *p = (unsigned short *)hdr;
- for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
- *p++ = inw(NE_BASE + NE_DATAPORT);
- } else
- insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-
- le16_to_cpus(&hdr->count);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using outb. */
-
-static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- struct ei_device *ei_local = netdev_priv(dev);
-#ifdef NE_SANITY_CHECK
- int xfer_count = count;
-#endif
- char *buf = skb->data;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
- outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
- outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
- outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
- outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
- if (ei_status.word16)
- {
- int len;
- unsigned short *p = (unsigned short *)buf;
- for (len = count>>1; len > 0; len--)
- *p++ = inw(NE_BASE + NE_DATAPORT);
- if (count & 0x01)
- {
- buf[count-1] = inb(NE_BASE + NE_DATAPORT);
-#ifdef NE_SANITY_CHECK
- xfer_count++;
-#endif
- }
- } else {
- insb(NE_BASE + NE_DATAPORT, buf, count);
- }
-
-#ifdef NE_SANITY_CHECK
- /* This was for the ALPHA version only, but enough people have
- been encountering problems so it is still here. If you see
- this message you either 1) have a slightly incompatible clone
- or 2) have noise/speed problems with your bus. */
-
- if (ei_debug > 1)
- {
- /* DMA termination address check... */
- int addr, tries = 20;
- do {
- /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
- -- it's broken for Rx on some cards! */
- int high = inb_p(NE_BASE + EN0_RSARHI);
- int low = inb_p(NE_BASE + EN0_RSARLO);
- addr = (high << 8) + low;
- if (((ring_offset + xfer_count) & 0xff) == low)
- break;
- } while (--tries > 0);
- if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
- }
-#endif
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-}
-
-static void ne_block_output(struct net_device *dev, int count,
- const unsigned char *buf, const int start_page)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- unsigned long dma_start;
-#ifdef NE_SANITY_CHECK
- int retries = 0;
-#endif
-
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
-
- if (ei_status.word16 && (count & 0x01))
- count++;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- /* We should already be in page 0, but to be safe... */
- outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
-
-#ifdef NE_SANITY_CHECK
-retry:
-#endif
-
-#ifdef NE8390_RW_BUGFIX
- /* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work.
- Actually this doesn't always work either, but if you have
- problems with your NEx000 this is better than nothing! */
-
- outb_p(0x42, NE_BASE + EN0_RCNTLO);
- outb_p(0x00, NE_BASE + EN0_RCNTHI);
- outb_p(0x42, NE_BASE + EN0_RSARLO);
- outb_p(0x00, NE_BASE + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
- /* Make certain that the dummy read has occurred. */
- udelay(6);
-#endif
-
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
-
- /* Now the normal output. */
- outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
- outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
- outb_p(0x00, NE_BASE + EN0_RSARLO);
- outb_p(start_page, NE_BASE + EN0_RSARHI);
-
- outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
- if (ei_status.word16) {
- int len;
- unsigned short *p = (unsigned short *)buf;
- for (len = count>>1; len > 0; len--)
- outw(*p++, NE_BASE + NE_DATAPORT);
- } else {
- outsb(NE_BASE + NE_DATAPORT, buf, count);
- }
-
- dma_start = jiffies;
-
-#ifdef NE_SANITY_CHECK
- /* This was for the ALPHA version only, but enough people have
- been encountering problems so it is still here. */
-
- if (ei_debug > 1)
- {
- /* DMA termination address check... */
- int addr, tries = 20;
- do {
- int high = inb_p(NE_BASE + EN0_RSARHI);
- int low = inb_p(NE_BASE + EN0_RSARLO);
- addr = (high << 8) + low;
- if ((start_page << 8) + count == addr)
- break;
- } while (--tries > 0);
-
- if (tries <= 0)
- {
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
- if (retries++ == 0)
- goto retry;
- }
- }
-#endif
-
- while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
- if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
- ne_reset_8390(dev);
- __NS8390_init(dev,1);
- break;
- }
-
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-}
-
-
-#ifdef MODULE
-#define MAX_NE_CARDS 1 /* Max number of NE cards per module */
-static struct net_device *dev_ne[MAX_NE_CARDS];
-static int io[MAX_NE_CARDS];
-static int irq[MAX_NE_CARDS];
-static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(bad, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that no ISA autoprobe takes place. We can't guarantee
-that the ne2k probe is the last 8390 based probe to take place (as it
-is at boot) and so the probe will get confused by any other 8390 cards.
-ISA device autoprobes on a running machine are not recommended anyway. */
-
-int init_module(void)
-{
- int this_dev, found = 0;
- int err;
-
- for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = ____alloc_ei_netdev(0);
- if (!dev)
- break;
- if (io[this_dev]) {
- dev->irq = irq[this_dev];
- dev->mem_end = bad[this_dev];
- dev->base_addr = io[this_dev];
- } else {
- dev->base_addr = h8300_ne_base[this_dev];
- dev->irq = h8300_ne_irq[this_dev];
- }
- err = init_reg_offset(dev, dev->base_addr);
- if (!err) {
- if (do_ne_probe(dev) == 0) {
- dev_ne[found++] = dev;
- continue;
- }
- }
- free_netdev(dev);
- if (found)
- break;
- if (io[this_dev] != 0)
- printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
- else
- printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
- return -ENXIO;
- }
- if (found)
- return 0;
- return -ENODEV;
-}
-
-void cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = dev_ne[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 92201080e07..fc14a85e4d5 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -389,9 +389,7 @@ err_out_free_netdev:
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
- pci_set_drvdata (pdev, NULL);
return -ENODEV;
-
}
/*
@@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8b04bfc20cf..171d73c1d3c 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev,
return 0;
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
iounmap(base);
err_out_free_res:
pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev)
iounmap(np->base);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev); /* Will also free np!! */
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 7a07ee07906..6dec86ac97c 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -104,6 +104,6 @@ struct bfin_mac_local {
#endif
};
-extern int bfin_get_ether_addr(char *addr);
+int bfin_get_ether_addr(char *addr);
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 0a5837b9642..ae33a99bf47 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -242,13 +242,13 @@ struct lance_private
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
-extern int lance_open(struct net_device *dev);
-extern int lance_close (struct net_device *dev);
-extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-extern void lance_set_multicast (struct net_device *dev);
-extern void lance_tx_timeout(struct net_device *dev);
+int lance_open(struct net_device *dev);
+int lance_close (struct net_device *dev);
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast (struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void lance_poll(struct net_device *dev);
+void lance_poll(struct net_device *dev);
#endif
#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 1b1429d5d5c..d042511bdc1 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@ err_free_reg:
err_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 10ceca523fc..e07ce5ff2d4 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+ memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
break;
case NEW_RIEBL:
- lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+ lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
break;
case PAM_CARD:
i = IO->eeprom;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 91d52b49584..427c148bb64 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, 6);
+ memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 94edc9c6fbb..57397295887 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
}
clen = len & 1;
- rtp = tp;
- rfp = fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
* do the rest, if any.
*/
clen = len & 15;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
clen = len & 1;
- rtp = tp;
- rfp = fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
@@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
* do the rest, if any.
*/
clen = len & 15;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -725,7 +725,6 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
- clear_ioasic_dma_irq(irq);
printk(KERN_ERR "%s: DMA error\n", dev->name);
return IRQ_HANDLED;
}
@@ -812,7 +811,7 @@ static int lance_open(struct net_device *dev)
if (lp->dma_irq >= 0) {
unsigned long flags;
- if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
"lance error", dev)) {
free_irq(dev->irq, dev);
printk("%s: Can't get DMA IRQ %d\n", dev->name,
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 5c728436b85..256f590f6bb 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
int i;
if (dev->irq == 0 ||
- request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+ request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
return -EAGAIN;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 2d8e2881977..38492e0b704 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, 6);
+ memcpy(dev->dev_addr, promaddr, ETH_ALEN);
}
}
@@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
lp->init_block, lp->init_dma_addr);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a597b766f08..daae0e01625 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
if (skb != NULL) {
data = skb_put(skb, ETHERMINPACKET);
memset(data, 0, ETHERMINPACKET);
- memcpy(data, dev->dev_addr, 6);
- memcpy(data+6, dev->dev_addr, 6);
+ memcpy(data, dev->dev_addr, ETH_ALEN);
+ memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
bmac_transmit_packet(skb, dev);
}
spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 4ce8ceb6220..58a200df4c3 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -211,6 +211,7 @@ static int mace_probe(struct platform_device *pdev)
mp = netdev_priv(dev);
mp->device = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = (u32)MACE_BASE;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 9e160148726..b2ffad1304d 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -628,12 +628,12 @@ static const struct net_device_ops arc_emac_netdev_ops = {
static int arc_emac_probe(struct platform_device *pdev)
{
- struct resource res_regs, res_irq;
+ struct resource res_regs;
struct device_node *phy_node;
struct arc_emac_priv *priv;
struct net_device *ndev;
const char *mac_addr;
- unsigned int id, clock_frequency;
+ unsigned int id, clock_frequency, irq;
int err;
if (!pdev->dev.of_node)
@@ -661,8 +661,8 @@ static int arc_emac_probe(struct platform_device *pdev)
}
/* Get IRQ from device tree */
- err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq);
- if (!err) {
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq) {
dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n");
return -ENODEV;
}
@@ -671,6 +671,7 @@ static int arc_emac_probe(struct platform_device *pdev)
if (!ndev)
return -ENOMEM;
+ platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &arc_emac_netdev_ops;
@@ -711,7 +712,7 @@ static int arc_emac_probe(struct platform_device *pdev)
goto out;
}
- ndev->irq = res_irq.start;
+ ndev->irq = irq;
dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
/* Register interrupt handler for device */
@@ -725,10 +726,10 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Get MAC address from device tree */
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!mac_addr || !is_valid_ether_addr(mac_addr))
- eth_hw_addr_random(ndev);
- else
+ if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ else
+ eth_hw_addr_random(ndev);
dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index fc95b235e21..c3c4c266b84 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(alx->dev);
}
@@ -1389,6 +1388,9 @@ static int alx_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+
+ alx_reset_phy(hw);
if (!netif_running(alx->dev))
return 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 0f0556526ba..7f9369a3b37 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -600,7 +600,7 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
-extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
-extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+s32 atl1c_reset_hw(struct atl1c_hw *hw);
+void atl1c_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1C_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 3ef7092e3f1..1cda49a28f7 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
{
int i;
- int ret = false;
+ bool ret = false;
u32 otp_ctrl_data;
u32 control;
u32 data;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index b5fd934585e..1b0fe2d04a0 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -499,10 +499,10 @@ struct atl1e_adapter {
extern char atl1e_driver_name[];
extern char atl1e_driver_version[];
-extern void atl1e_check_options(struct atl1e_adapter *adapter);
-extern int atl1e_up(struct atl1e_adapter *adapter);
-extern void atl1e_down(struct atl1e_adapter *adapter);
-extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
-extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
-extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+void atl1e_check_options(struct atl1e_adapter *adapter);
+int atl1e_up(struct atl1e_adapter *adapter);
+void atl1e_down(struct atl1e_adapter *adapter);
+void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+void atl1e_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1_E_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1966444590f..7a73f3a9fcb 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+ if (features & NETIF_F_RXALL) {
+ /* enable RX of ALL frames */
+ *mac_ctrl_data |= MAC_CTRL_DBG;
+ } else {
+ /* disable RX of ALL frames */
+ *mac_ctrl_data &= ~MAC_CTRL_DBG;
+ }
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u32 mac_ctrl_data = 0;
+
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+ atl1e_irq_disable(adapter);
+ mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+ __atl1e_rx_mode(features, &mac_ctrl_data);
+ AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+ atl1e_irq_enable(adapter);
+}
+
+
static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1e_vlan_mode(netdev, features);
+ if (changed & NETIF_F_RXALL)
+ atl1e_rx_mode(netdev, features);
+
+
return 0;
}
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
value |= MAC_CTRL_PROMIS_EN;
if (netdev->flags & IFF_ALLMULTI)
value |= MAC_CTRL_MC_ALL_EN;
-
+ if (netdev->features & NETIF_F_RXALL)
+ value |= MAC_CTRL_DBG;
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page_desc[que].rx_nxseq++;
/* error packet */
- if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+ if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+ !(netdev->features & NETIF_F_RXALL)) {
if (prrs->err_flag & (RRS_ERR_BAD_CRC |
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
}
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
- RRS_PKT_SIZE_MASK) - 4; /* CRC */
+ RRS_PKT_SIZE_MASK);
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
+ packet_size -= 4; /* CRC */
+
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL)
goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_LLTX |
NETIF_F_HW_VLAN_CTAG_TX;
-
+ /* not enabled by default */
+ netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index 3ebe19f7242..2f27d4c4c3a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -42,7 +42,7 @@
#include "atlx.h"
#ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
+int ethtool_ioctl(struct ifreq *ifr);
#endif
#define PCI_COMMAND_REGISTER PCI_COMMAND
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9b017d9c58e..90e54d5488d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
static void b44_tx(struct b44 *bp)
{
u32 cur, cons;
+ unsigned bytes_compl = 0, pkts_compl = 0;
cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
+
+ bytes_compl += skb->len;
+ pkts_compl++;
+
dev_kfree_skb_irq(skb);
}
+ netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
bp->tx_cons = cons;
if (netif_queue_stopped(bp->dev) &&
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bp->flags & B44_FLAG_REORDER_BUG)
br32(bp, B44_DMATX_PTR);
+ netdev_sent_queue(dev, skb->len);
+
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
val = br32(bp, B44_ENET_CTRL);
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+ netdev_reset_queue(bp->dev);
}
static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, 6);
+ memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
@@ -2183,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
- dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 249468f9536..e2aa09ce6af 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
+ netdev_sent_queue(net_dev, skb->len);
+
wmb();
/* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
struct device *dma_dev = bgmac->core->dma_dev;
int empty_slot;
bool freed = false;
+ unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->skb->len, DMA_TO_DEVICE);
slot->dma_addr = 0;
+ bytes_compl += slot->skb->len;
+ pkts_compl++;
+
/* Free memory! :) */
dev_kfree_skb(slot->skb);
slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
freed = true;
}
+ netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
if (freed && netif_queue_stopped(bgmac->net_dev))
netif_wake_queue(bgmac->net_dev);
}
@@ -244,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
struct device *dma_dev = bgmac->core->dma_dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
/* Alloc skb */
- slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb)
+ skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
+ if (!skb)
return -ENOMEM;
/* Poison - if everything goes fine, hardware will overwrite it */
- rx = (struct bgmac_rx_header *)slot->skb->data;
+ rx = (struct bgmac_rx_header *)skb->data;
rx->len = cpu_to_le16(0xdead);
rx->flags = cpu_to_le16(0xbeef);
/* Map skb for the DMA */
- slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
bgmac_err(bgmac, "DMA mapping error\n");
+ dev_kfree_skb(skb);
return -ENOMEM;
}
+
+ /* Update the slot */
+ slot->skb = skb;
+ slot->dma_addr = dma_addr;
+
if (slot->dma_addr & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
return 0;
}
+static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring, int desc_idx)
+{
+ struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
+ u32 ctl0 = 0, ctl1 = 0;
+
+ if (desc_idx == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+ /* Is there any BGMAC device that requires extension? */
+ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+ * B43_DMA64_DCTL1_ADDREXT_MASK;
+ */
+
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
int weight)
{
@@ -287,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
struct sk_buff *skb = slot->skb;
- struct sk_buff *new_skb;
struct bgmac_rx_header *rx;
u16 len, flags;
@@ -300,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
len = le16_to_cpu(rx->len);
flags = le16_to_cpu(rx->flags);
- /* Check for poison and drop or pass the packet */
- if (len == 0xdead && flags == 0xbeef) {
- bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
- ring->start);
- } else {
+ do {
+ dma_addr_t old_dma_addr = slot->dma_addr;
+ int err;
+
+ /* Check for poison and drop or pass the packet */
+ if (len == 0xdead && flags == 0xbeef) {
+ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
+ dma_sync_single_for_device(dma_dev,
+ slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ break;
+ }
+
/* Omit CRC. */
len -= ETH_FCS_LEN;
- new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
- if (new_skb) {
- skb_put(new_skb, len);
- skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
- new_skb->data,
- len);
- skb_checksum_none_assert(skb);
- new_skb->protocol =
- eth_type_trans(new_skb, bgmac->net_dev);
- netif_receive_skb(new_skb);
- handled++;
- } else {
- bgmac->net_dev->stats.rx_dropped++;
- bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+ /* Prepare new skb as replacement */
+ err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
+ if (err) {
+ /* Poison the old skb */
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ dma_sync_single_for_device(dma_dev,
+ slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ break;
}
+ bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
- /* Poison the old skb */
- rx->len = cpu_to_le16(0xdead);
- rx->flags = cpu_to_le16(0xbeef);
- }
+ /* Unmap old skb, we'll pass it to the netfif */
+ dma_unmap_single(dma_dev, old_dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
+ skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
- /* Make it back accessible to the hardware */
- dma_sync_single_for_device(dma_dev, slot->dma_addr,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ netif_receive_skb(skb);
+ handled++;
+ } while (0);
if (++ring->start >= BGMAC_RX_RING_SLOTS)
ring->start = 0;
@@ -495,8 +543,6 @@ err_dma_free:
static void bgmac_dma_init(struct bgmac *bgmac)
{
struct bgmac_dma_ring *ring;
- struct bgmac_dma_desc *dma_desc;
- u32 ctl0, ctl1;
int i;
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -529,23 +575,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
if (ring->unaligned)
bgmac_dma_rx_enable(bgmac, ring);
- for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
- j++, dma_desc++) {
- ctl0 = ctl1 = 0;
-
- if (j == ring->num_slots - 1)
- ctl0 |= BGMAC_DESC_CTL0_EOT;
- ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
- /* Is there any BGMAC device that requires extension? */
- /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
- * B43_DMA64_DCTL1_ADDREXT_MASK;
- */
-
- dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
- dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
- dma_desc->ctl0 = cpu_to_le32(ctl0);
- dma_desc->ctl1 = cpu_to_le32(ctl1);
- }
+ for (j = 0; j < ring->num_slots; j++)
+ bgmac_dma_rx_setup_desc(bgmac, ring, j);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
ring->index_base +
@@ -988,6 +1019,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
bgmac_miiconfig(bgmac);
bgmac_phy_init(bgmac);
+ netdev_reset_queue(bgmac->net_dev);
+
bgmac->int_status = 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e838a3f74b6..d9980ad00b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
- memcpy(packet, bp->dev->dev_addr, 6);
- memset(packet + 6, 0x0, 8);
+ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+ memset(packet + ETH_ALEN, 0x0, 8);
for (i = 14; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
@@ -8413,7 +8413,6 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
@@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- memcpy(dev->dev_addr, bp->mac_addr, 6);
+ memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@ error:
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_free:
free_netdev(dev);
return rc;
@@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c5e375ddd6c..a1f66e2c9a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1376,7 +1376,6 @@ enum {
BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
- BNX2X_SP_RTNL_TX_RESUME,
};
struct bnx2x_prev_path_list {
@@ -1546,6 +1545,7 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
+#define HAS_PHYS_PORT_ID (1 << 25)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1876,6 +1876,8 @@ struct bnx2x {
u32 dump_preset_idx;
bool stats_started;
struct semaphore stats_sema;
+
+ u8 phys_port_id[ETH_ALEN];
};
/* Tx queues may be less or equal to Rx queues */
@@ -2232,7 +2234,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define BNX2X_NUM_TESTS_SF 7
#define BNX2X_NUM_TESTS_MF 3
#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
- BNX2X_NUM_TESTS_SF)
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
@@ -2492,12 +2494,6 @@ enum {
#define NUM_MACS 8
-enum bnx2x_pci_bus_speed {
- BNX2X_PCI_LINK_SPEED_2500 = 2500,
- BNX2X_PCI_LINK_SPEED_5000 = 5000,
- BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
void bnx2x_set_local_cmng(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4ab4c89c60c..ec96130533c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2545,10 +2545,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- LOAD_ERROR_EXIT(bp, load_error0);
-
/* need to be done after alloc mem, since it's self adjusting to amount
* of memory available for RSS queues
*/
@@ -2558,6 +2554,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error0);
}
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
/* request pf to initialize status blocks */
if (IS_VF(bp)) {
rc = bnx2x_vfpf_init(bp);
@@ -2812,8 +2812,8 @@ load_error1:
if (IS_PF(bp))
bnx2x_clear_pf_load(bp);
load_error0:
- bnx2x_free_fp_mem(bp);
bnx2x_free_fw_stats_mem(bp);
+ bnx2x_free_fp_mem(bp);
bnx2x_free_mem(bp);
return rc;
@@ -2959,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->port.pmf = 0;
+ /* clear pending work in rtnl task */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
if (CNIC_LOADED(bp))
@@ -3256,14 +3260,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
if (prot == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb)) {
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V6;
- } else if (skb_is_gso(skb)) {
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V4;
+ if (skb_is_gso(skb)) {
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
}
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fcf2761d882..fdace204b05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
/* ets may affect cmng configuration: reinit it in hw */
bnx2x_set_local_cmng(bp);
-
- set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index e8efa1c93ff..32d0f1435fb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
+ if (IS_VF(bp))
+ return 0;
+
regdump_len = __bnx2x_get_regs_len(bp);
regdump_len *= 4;
regdump_len += sizeof(struct dump_header);
@@ -2864,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev,
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+ if (bnx2x_test_nvram(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL,
- "Can't perform self-test when interface is down\n");
+ DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
return;
}
@@ -2928,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
- if (bnx2x_test_nvram(bp) != 0) {
- if (!IS_MF(bp))
- buf[4] = 1;
- else
- buf[0] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
if (bnx2x_test_intr(bp) != 0) {
if (!IS_MF(bp))
buf[5] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 32767f6aa33..cf1df8b62e2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 51468227bf3..20dcc02431c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3122,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
}
static int bnx2x_bsc_read(struct link_params *params,
- struct bnx2x_phy *phy,
+ struct bnx2x *bp,
u8 sl_devid,
u16 sl_addr,
u8 lc_addr,
@@ -3131,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
{
u32 val, i;
int rc = 0;
- struct bnx2x *bp = params->bp;
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -6371,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
* intended override.
*/
break;
- } else
+ } else {
+ u32 nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ nig_led_mode);
+ }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -7917,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -10653,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
0x40);
} else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
- 0x80);
+ val);
/* Tell LED3 to blink on source */
bnx2x_cl45_read(bp, phy,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b42f89ce02e..814d0eca9b3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -577,7 +577,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
+#endif
}
}
@@ -614,7 +616,9 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
+#endif
}
}
@@ -5231,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp)
case EVENT_RING_OPCODE_STOP_TRAFFIC:
DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_STOP))
break;
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_START))
break;
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
@@ -9352,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
bnx2x_process_kill_chip_reset(bp, global);
barrier();
+ /* clear errors in PGB */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
+
/* Recover after reset: */
/* MCP */
if (global && bnx2x_reset_mcp_comp(bp, val))
@@ -9706,11 +9714,10 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_pf_set_vfs_vlan(bp);
- if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
bnx2x_dcbx_stop_hw_tx(bp);
-
- if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
bnx2x_dcbx_resume_hw_tx(bp);
+ }
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
@@ -9916,7 +9923,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
- int rc = false;
+ bool rc = false;
if (down_trylock(&bnx2x_prev_sem))
return false;
@@ -11186,6 +11193,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_mac_hwinfo(bp);
}
+ if (!BP_NOMCP(bp)) {
+ /* Read physical port identifier from shmem */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11784,7 +11799,7 @@ static int bnx2x_open(struct net_device *dev)
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
- return bnx2x_open_epilog(bp);
+ return 0;
}
/* called with rtnl_lock */
@@ -12082,6 +12097,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
return 0;
}
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bp->phys_port_id);
+ memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12111,18 +12140,15 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
{
struct device *dev = &bp->pdev->dev;
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "System does not support DMA, aborting\n");
return -EIO;
}
@@ -12274,10 +12300,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!CHIP_IS_E1x(bp)) {
- dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -12310,34 +12339,11 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
}
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
- enum bnx2x_pci_bus_speed *speed)
-{
- u32 link_speed, val = 0;
-
- pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
- *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
- link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
- switch (link_speed) {
- case 3:
- *speed = BNX2X_PCI_LINK_SPEED_8000;
- break;
- case 2:
- *speed = BNX2X_PCI_LINK_SPEED_5000;
- break;
- default:
- *speed = BNX2X_PCI_LINK_SPEED_2500;
- }
-}
-
static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
@@ -12694,8 +12700,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- int pcie_width;
- enum bnx2x_pci_bus_speed pcie_speed;
+ enum pcie_link_width pcie_width;
+ enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -12844,18 +12850,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-
- bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
- pcie_width, pcie_speed);
-
- BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+ pcie_speed == PCI_SPEED_UNKNOWN ||
+ pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+ BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+ else
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
pcie_width,
- pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+ pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+ pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+ pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
"Unknown",
dev->base_addr, bp->pdev->irq, dev->dev_addr);
@@ -12874,7 +12881,6 @@ init_one_exit:
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return rc;
}
@@ -12957,7 +12963,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5ecf267dc4c..3efbb35267c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2864,6 +2864,17 @@
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [W 7] Writing 1 to each bit in this register clears a corresponding error
+ * details register and enables logging new error details. Bit 0 - clears
+ * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears
+ * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS
+ * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32
+ * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 -
+ * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears
+ * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6
+ * - clears TCPL_IN_TWO_RCBS_DETAILS. */
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c
+
/* [R 9] Interrupt register #0 read */
#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
/* [RC 9] Interrupt register #0 read clear */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 9fbeee522d2..32c92abf509 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1217,9 +1217,6 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
ETH_VLAN_FILTER_CLASSIFY, config);
}
-#define list_next_entry(pos, member) \
- list_entry((pos)->member.next, typeof(*(pos)), member)
-
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index bf08ad68b40..0216d592d0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2018,6 +2018,8 @@ failed:
void bnx2x_iov_remove_one(struct bnx2x *bp)
{
+ int vf_idx;
+
/* if SRIOV is not enabled there's nothing to do */
if (!IS_SRIOV(bp))
return;
@@ -2026,6 +2028,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
+ /* disable access to all VFs */
+ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
+ bnx2x_pretend_func(bp,
+ HW_VF_HANDLE(bp,
+ bp->vfdb->sriov.first_vf_in_pf +
+ vf_idx));
+ DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
+ bp->vfdb->sriov.first_vf_in_pf + vf_idx);
+ bnx2x_vf_enable_internal(bp, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+
/* free vf database */
__bnx2x_iov_free_vfdb(bp);
}
@@ -2802,7 +2816,7 @@ struct set_vf_state_cookie {
u8 state;
};
-void bnx2x_set_vf_state(void *cookie)
+static void bnx2x_set_vf_state(void *cookie)
{
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
@@ -3197,7 +3211,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
* the "acquire" messages to appear on the VF PF channel.
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
- pci_disable_sriov(bp->pdev);
+ bnx2x_disable_sriov(bp);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3225,8 +3239,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
- struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3642,29 +3657,6 @@ alloc_mem_err:
return -ENOMEM;
}
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm fails) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held). We only want to do this if the number of VFs
- * was set before PF driver was loaded.
- */
- if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
-
- return 0;
-}
-
void bnx2x_iov_channel_down(struct bnx2x *bp)
{
int vf_idx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 059f0d460af..1ff6a936662 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 28757dfacf0..efa8a151d78 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
mutex_unlock(&bp->vf2pf_mutex);
}
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+ enum channel_tlvs req_tlv)
+{
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ do {
+ if (tlv->type == req_tlv)
+ return tlv;
+
+ if (!tlv->length) {
+ BNX2X_ERR("Found TLV with length 0\n");
+ return NULL;
+ }
+
+ tlvs_list += tlv->length;
+ tlv = (struct channel_tlv *)tlvs_list;
+ } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+ DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+ return NULL;
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -128,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
*done = PFVF_STATUS_SUCCESS;
- return 0;
+ return -EINVAL;
}
/* Write message address */
@@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
int rc = 0, attempts = 0;
struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
u32 vf_id;
bool resources_acquired = false;
@@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+ /* Request physical port identifier */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+ CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
/* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, req,
+ req->first_tlv.tl.length + sizeof(struct channel_tlv),
+ CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* output tlvs list */
@@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
}
}
+ /* Retrieve physical port id (if possible) */
+ phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+ bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_PHYS_PORT_ID);
+ if (phys_port_resp) {
+ memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
/* get HW info */
bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
bp->link_params.chip_id = bp->common.chip_id;
@@ -983,53 +1023,59 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
}
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
- u64 vf_addr;
- dma_addr_t pf_addr;
u16 length, type;
- int rc;
- struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
/* prepare response */
type = mbx->first_tlv.tl.type;
length = type == CHANNEL_TLV_ACQUIRE ?
sizeof(struct pfvf_acquire_resp_tlv) :
sizeof(struct pfvf_general_resp_tlv);
- bnx2x_add_tlv(bp, resp, 0, type, length);
- resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
- bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+ dma_addr_t pf_addr;
+ u64 vf_addr;
+ int rc;
+
bnx2x_dp_tlv_list(bp, resp);
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
mbx->first_tlv.resp_msg_offset;
pf_addr = mbx->msg_mapping +
offsetof(struct bnx2x_vf_mbx_msg, resp);
- /* copy the response body, if there is one, before the header, as the vf
- * is sensitive to the header being written
+ /* Copy the response buffer. The first u64 is written afterwards, as
+ * the vf is sensitive to the header being written
*/
- if (resp->hdr.tl.length > sizeof(u64)) {
- length = resp->hdr.tl.length - sizeof(u64);
- vf_addr += sizeof(u64);
- pf_addr += sizeof(u64);
- rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
- U64_HI(vf_addr),
- U64_LO(vf_addr),
- length/4);
- if (rc) {
- BNX2X_ERR("Failed to copy response body to VF %d\n",
- vf->abs_vfid);
- goto mbx_error;
- }
- vf_addr -= sizeof(u64);
- pf_addr -= sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
}
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@ mbx_error:
bnx2x_vf_release(bp, vf, false); /* non blocking */
}
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_port_phys_id_resp_tlv *port_id;
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+ sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+ port_id = (struct vfpf_port_phys_id_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx, int vfop_status)
{
@@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
struct pf_vf_resc *resc = &resp->resc;
u8 status = bnx2x_pfvf_status_codes(vfop_status);
+ u16 length;
memset(resp, 0, sizeof(*resp));
@@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resc->hw_sbs[i].sb_qid);
DP_CONT(BNX2X_MSG_IOV, "]\n");
+ /* prepare response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+ /* Handle possible VF requests for physical port identifiers.
+ * 'length' should continue to indicate the offset of the first empty
+ * place in the buffer (i.e., where next TLV should be inserted)
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
/* send the response */
vf->op_rc = vfop_status;
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1874,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
/* process the VF message header */
mbx->first_tlv = mbx->msg->req.first_tlv;
+ /* Clean response buffer to refrain from falsely seeing chains */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
/* dispatch the request (will prepare the response) */
bnx2x_vf_mbx_request(bp, vf, mbx);
goto mbx_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 1179fe06d0c..208568bc7a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+struct vfpf_port_phys_id_resp_tlv {
+ struct channel_tlv tl;
+ u8 id[ETH_ALEN];
+ u8 padding[2];
+};
+
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
@@ -398,6 +404,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
+ CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 99394bd49a1..f58a8b80302 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
csk->vlan_id = path_resp->vlan_id;
- memcpy(csk->ha, path_resp->mac_addr, 6);
+ memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
- memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0658b43e148..ebbfe25acaa 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -353,8 +353,8 @@ struct cnic_ulp_ops {
atomic_t ref_count;
};
-extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
-extern int cnic_unregister_driver(int ulp_type);
+int cnic_unregister_driver(int ulp_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 12d961c4ebc..a9e068423ba 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 133
+#define TG3_MIN_NUM 134
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Jul 29, 2013"
+#define DRV_MODULE_RELDATE "Sep 16, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
return err;
}
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+ return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+ reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
static int tg3_bmcr_reset(struct tg3 *tp)
{
u32 phy_control;
@@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
spin_lock_bh(&tp->lock);
- if (tg3_readphy(tp, reg, &val))
+ if (__tg3_readphy(tp, mii_id, reg, &val))
val = -EIO;
spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
spin_lock_bh(&tp->lock);
- if (tg3_writephy(tp, reg, val))
+ if (__tg3_writephy(tp, mii_id, reg, val))
ret = -EIO;
spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
u32 val;
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
+ } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+ int addr;
+
+ addr = ssb_gige_get_phyaddr(tp->pdev);
+ if (addr < 0)
+ return addr;
+ tp->phy_addr = addr;
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
@@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
tp->mdio_bus->reset = &tg3_mdio_reset;
- tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+ tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
tp->mdio_bus->irq = &tp->mdio_irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
return i;
}
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (!phydev || !phydev->drv) {
dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
u32 old_tx_mode = tp->tx_mode;
if (tg3_flag(tp, USE_PHYLIB))
- autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+ autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
else
autoneg = tp->link_config.autoneg;
@@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
u8 oldflowctrl, linkmesg = 0;
u32 mac_mode, lcl_adv, rmt_adv;
struct tg3 *tp = netdev_priv(dev);
- struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
spin_lock_bh(&tp->lock);
@@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
/* Bring the PHY back to a known state. */
tg3_bmcr_reset(tp);
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
/* Attach the MAC to the PHY. */
phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
SUPPORTED_Asym_Pause);
break;
default:
- phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
return -EINVAL;
}
@@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return;
- phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
}
static void tg3_phy_fini(struct tg3 *tp)
{
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
- phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
}
}
@@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
return;
}
- reg = MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_SCR5_SEL |
- MII_TG3_MISC_SHDW_SCR5_LPED |
+ reg = MII_TG3_MISC_SHDW_SCR5_LPED |
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
- tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
- reg = MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_APD_SEL |
- MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
if (enable)
reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
- tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
}
static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
struct phy_device *phydev;
u32 phyid, advertising;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
@@ -6848,12 +6862,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- skb = build_skb(data, frag_size);
- if (!skb) {
- tg3_frag_free(frag_size != 0, data);
- goto drop_it_no_recycle;
- }
- skb_reserve(skb, TG3_RX_OFFSET(tp));
/* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
@@ -6861,6 +6869,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
ri->data = NULL;
+ skb = build_skb(data, frag_size);
+ if (!skb) {
+ tg3_frag_free(frag_size != 0, data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
} else {
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
@@ -9196,10 +9210,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
}
- if (err)
- return err;
-
- return 0;
+ return err;
}
static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
name = tp->dev->name;
else {
name = &tnapi->irq_lbl[0];
- snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+ if (tnapi->tx_buffers && tnapi->rx_rcb)
+ snprintf(name, IFNAMSIZ,
+ "%s-txrx-%d", tp->dev->name, irq_num);
+ else if (tnapi->tx_buffers)
+ snprintf(name, IFNAMSIZ,
+ "%s-tx-%d", tp->dev->name, irq_num);
+ else if (tnapi->rx_rcb)
+ snprintf(name, IFNAMSIZ,
+ "%s-rx-%d", tp->dev->name, irq_num);
+ else
+ snprintf(name, IFNAMSIZ,
+ "%s-%d", tp->dev->name, irq_num);
name[IFNAMSIZ-1] = 0;
}
@@ -11907,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_ethtool_gset(phydev, cmd);
}
@@ -11974,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_ethtool_sset(phydev, cmd);
}
@@ -12093,12 +12115,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
- spin_lock_bh(&tp->lock);
if (device_may_wakeup(dp))
tg3_flag_set(tp, WOL_ENABLE);
else
tg3_flag_clear(tp, WOL_ENABLE);
- spin_unlock_bh(&tp->lock);
return 0;
}
@@ -12131,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
if (tg3_flag(tp, USE_PHYLIB)) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
} else {
u32 bmcr;
@@ -12247,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
u32 newadv;
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (!(phydev->supported & SUPPORTED_Pause) ||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
return -ENOMEM;
tx_data = skb_put(skb, tx_len);
- memcpy(tx_data, tp->dev->dev_addr, 6);
- memset(tx_data + 6, 0x0, 8);
+ memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+ memset(tx_data + ETH_ALEN, 0x0, 8);
tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
@@ -13598,16 +13618,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
if (stmpconf.flags)
return -EINVAL;
- switch (stmpconf.tx_type) {
- case HWTSTAMP_TX_ON:
- tg3_flag_set(tp, TX_TSTAMP_EN);
- break;
- case HWTSTAMP_TX_OFF:
- tg3_flag_clear(tp, TX_TSTAMP_EN);
- break;
- default:
+ if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
+ stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
- }
switch (stmpconf.rx_filter) {
case HWTSTAMP_FILTER_NONE:
@@ -13669,6 +13682,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
tw32(TG3_RX_PTP_CTL,
tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
+ if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ tg3_flag_set(tp, TX_TSTAMP_EN);
+ else
+ tg3_flag_clear(tp, TX_TSTAMP_EN);
+
return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
-EFAULT : 0;
}
@@ -13683,7 +13701,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_mii_ioctl(phydev, ifr, cmd);
}
@@ -14921,6 +14939,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
+
+ if (tg3_flag(tp, 5717_PLUS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+ LED_CTRL_BLINK_RATE_MASK;
+
break;
case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15783,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16659,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
int len;
addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == 6) {
- memcpy(dev->dev_addr, addr, 6);
+ if (addr && len == ETH_ALEN) {
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
return 0;
}
return -ENODEV;
@@ -16643,7 +16670,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
return 0;
}
#endif
@@ -17052,10 +17079,6 @@ static int tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
-#if 0
- /* Unneeded, already done by tg3_get_invariants. */
- tg3_switch_clocks(tp);
-#endif
if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17106,6 @@ static int tg3_test_dma(struct tg3 *tp)
break;
}
-#if 0
- /* validate data reached card RAM correctly. */
- for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
- u32 val;
- tg3_read_mem(tp, 0x2100 + (i*4), &val);
- if (le32_to_cpu(val) != p[i]) {
- dev_err(&tp->pdev->dev,
- "%s: Buffer corrupted on device! "
- "(%d != %d)\n", __func__, val, i);
- /* ret = -ENODEV here? */
- }
- p[i] = 0;
- }
-#endif
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
if (ret) {
@@ -17362,8 +17371,10 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_flag_set(tp, FLUSH_POSTED_WRITES);
if (ssb_gige_one_dma_at_once(pdev))
tg3_flag_set(tp, ONE_DMA_AT_ONCE);
- if (ssb_gige_have_roboswitch(pdev))
+ if (ssb_gige_have_roboswitch(pdev)) {
+ tg3_flag_set(tp, USE_PHYLIB);
tg3_flag_set(tp, ROBOSWITCH);
+ }
if (ssb_gige_is_rgmii(pdev))
tg3_flag_set(tp, RGMII_MODE);
}
@@ -17409,9 +17420,12 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
tg3_flag_set(tp, ENABLE_APE);
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
@@ -17628,7 +17642,7 @@ static int tg3_init_one(struct pci_dev *pdev,
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
netdev_info(dev,
"attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17699,6 @@ err_out_free_res:
err_out_disable_pdev:
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -17717,7 +17730,6 @@ static void tg3_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 70257808aa3..5c3835aa1e1 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -68,6 +68,9 @@
#define TG3PCI_DEVICE_TIGON3_5762 0x1687
#define TG3PCI_DEVICE_TIGON3_5725 0x1643
#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
+#define TG3PCI_DEVICE_TIGON3_57764 0x1642
+#define TG3PCI_DEVICE_TIGON3_57767 0x1683
+#define TG3PCI_DEVICE_TIGON3_57787 0x1641
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b78e69e0e52..248bc37cb41 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad,
bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
if (!bnad->bar0) {
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
- pci_set_drvdata(pdev, NULL);
return -ENOMEM;
}
pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
@@ -3300,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = true;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
*using_dac = false;
}
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index aefee77523f..f7e033f8a00 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -372,38 +372,37 @@ extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
*/
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
-extern void bnad_set_rx_mode(struct net_device *netdev);
-extern struct net_device_stats *bnad_get_netdev_stats(
- struct net_device *netdev);
-extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
-extern int bnad_enable_default_bcast(struct bnad *bnad);
-extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-extern void bnad_cb_completion(void *arg, enum bfa_status status);
+void bnad_set_rx_mode(struct net_device *netdev);
+struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
+int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_enable_default_bcast(struct bnad *bnad);
+void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
-extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
/* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
+void bnad_dim_timer_start(struct bnad *bnad);
/* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
+void bnad_netdev_qstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+void bnad_netdev_hwstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
/* Debugfs */
-void bnad_debugfs_init(struct bnad *bnad);
-void bnad_debugfs_uninit(struct bnad *bnad);
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
/* MACROS */
/* To set & get the stats counters */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 48f52882a22..4fc5c8ef512 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1060,13 +1060,13 @@ static int xgmac_stop(struct net_device *dev)
{
struct xgmac_priv *priv = netdev_priv(dev);
- netif_stop_queue(dev);
-
if (readl(priv->base + XGMAC_DMA_INTR_ENA))
napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_disable(dev);
+
/* Disable the MAC core */
xgmac_mac_disable(priv->base);
@@ -1370,11 +1370,8 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
}
old_mtu = dev->mtu;
- dev->mtu = new_mtu;
/* return early if the buffer sizes will not change */
- if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
- return 0;
if (old_mtu == new_mtu)
return 0;
@@ -1382,8 +1379,9 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev))
return 0;
- /* Bring the interface down and then back up */
+ /* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
+ dev->mtu = new_mtu;
return xgmac_open(dev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 5ccbed1784d..8abb46b3903 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
return board_info(adap)->clock_core / 1000000;
}
-extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
-extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
-
-extern void t1_interrupts_enable(adapter_t *adapter);
-extern void t1_interrupts_disable(adapter_t *adapter);
-extern void t1_interrupts_clear(adapter_t *adapter);
-extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-extern void t1_elmer0_ext_intr(adapter_t *adapter);
-extern int t1_slow_intr_handler(adapter_t *adapter);
-
-extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
-extern const struct board_info *t1_get_board_info(unsigned int board_id);
-extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
+
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
unsigned short ssid);
-extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
-extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
-extern int t1_init_hw_modules(adapter_t *adapter);
-extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
-extern void t1_free_sw_modules(adapter_t *adapter);
-extern void t1_fatal_err(adapter_t *adapter);
-extern void t1_link_changed(adapter_t *adapter, int port_id);
-extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d7048db9863..1d021059f09 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1168,7 +1168,6 @@ out_free_dev:
pci_release_regions(pdev);
out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
t1_sw_reset(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 40c7b93abab..eb33a31b08a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
- memcpy(mac_addr, cmac->instance->mac_addr, 6);
+ memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
return 0;
}
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
*/
/* Store local copy */
- memcpy(cmac->instance->mac_addr, ma, 6);
+ memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b650951791d..45d77334d7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3374,7 +3374,6 @@ out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out:
return err;
}
@@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev)
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 6990f6c6522..81029b872bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -685,10 +685,6 @@
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
#define A_MC7_EXT_MODE1 0x108
#define A_MC7_EXT_MODE2 0x10c
@@ -749,14 +745,6 @@
#define A_MC7_CAL 0x128
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
#define S_CAL_FAULT 30
#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
#define F_CAL_FAULT V_CAL_FAULT(1U)
@@ -815,9 +803,6 @@
#define V_OP(x) ((x) << S_OP)
#define F_OP V_OP(1U)
-#define F_OP V_OP(1U)
-#define A_SF_OP 0x6dc
-
#define A_MC7_BIST_ADDR_BEG 0x168
#define A_MC7_BIST_ADDR_END 0x16c
@@ -830,8 +815,6 @@
#define V_CONT(x) ((x) << S_CONT)
#define F_CONT V_CONT(1U)
-#define F_CONT V_CONT(1U)
-
#define A_MC7_INT_ENABLE 0x178
#define S_AE 17
@@ -1017,8 +1000,6 @@
#define V_NICMODE(x) ((x) << S_NICMODE)
#define F_NICMODE V_NICMODE(1U)
-#define F_NICMODE V_NICMODE(1U)
-
#define S_IPV6ENABLE 15
#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
#define F_IPV6ENABLE V_IPV6ENABLE(1U)
@@ -1562,27 +1543,15 @@
#define A_ULPRX_STAG_ULIMIT 0x530
#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_PBL_LLIMIT 0x53c
#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_TDDP_TAGMASK 0x524
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
-
#define A_ULPTX_CONFIG 0x580
#define S_CFG_CQE_SOP_MASK 1
@@ -2053,8 +2022,6 @@
#define V_TMMODE(x) ((x) << S_TMMODE)
#define F_TMMODE V_TMMODE(1U)
-#define F_TMMODE V_TMMODE(1U)
-
#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
#define A_MC5_DB_FILTER_TABLE 0x710
@@ -2454,8 +2421,6 @@
#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
#define F_TXACTENABLE V_TXACTENABLE(1U)
-#define A_XGM_SERDES_CTRL0 0x8e0
-
#define S_RESET3 23
#define V_RESET3(x) ((x) << S_RESET3)
#define F_RESET3 V_RESET3(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 9c89dc8fe10..632b318eb38 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1599,7 +1599,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
- skb->tail - skb->transport_header,
+ skb_tail_pointer(skb) -
+ skb_transport_header(skb),
adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index dfd1e36f575..ecd2fb3ef69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -48,7 +48,6 @@
#include <linux/vmalloc.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
-#include "t4_hw.h"
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c73cabdbd4c..8b929eeecd2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
struct net_device *event_dev;
int ret = NOTIFY_DONE;
struct bonding *bond = netdev_priv(ifa->idev->dev);
+ struct list_head *iter;
struct slave *slave;
struct pci_dev *first_pdev = NULL;
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
* in all of them only once.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (!first_pdev) {
ret = clip_add(slave->dev, ifa, event);
/* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@ sriov:
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -6122,7 +6122,6 @@ static void remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
} else
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 40c22e7de15..5f90ec5f751 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2782,11 +2782,9 @@ err_unmap_bar:
err_free_adapter:
kfree(adapter);
- pci_set_drvdata(pdev, NULL);
err_release_regions:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
pci_clear_master(pdev);
err_disable_device:
@@ -2851,7 +2849,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
}
iounmap(adapter->regs);
kfree(adapter);
- pci_set_drvdata(pdev, NULL);
}
/*
@@ -2908,7 +2905,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static struct pci_device_id cxgb4vf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
CH_DEVICE(0xb000, 0), /* PE10K FPGA */
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index df296af20bd..8475c4cda9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb,
* Builds an sk_buff from the given packet gather list. Returns the
* sk_buff or %NULL if sk_buff allocation failed.
*/
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len, unsigned int pull_len)
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len,
+ unsigned int pull_len)
{
struct sk_buff *skb;
@@ -1443,7 +1444,7 @@ out:
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4vf_pktgl_free(const struct pkt_gl *gl)
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
{
int frag;
@@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq)
* on this queue. If the system is under memory shortage use a fairly
* long delay to help recovery.
*/
-int process_responses(struct sge_rspq *rspq, int budget)
+static int process_responses(struct sge_rspq *rspq, int budget)
{
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
int budget_left = budget;
@@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* The MSI interrupt handler handles data events from SGE response queues as
* well as error and other async events as they all use the same MSI vector.
*/
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
{
struct adapter *adapter = cookie;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 7b756cf9474..ff78dfaec50 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2309,7 +2309,6 @@ err_out_release_regions:
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
@@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev)
enic_iounmap(enic);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a7a941b1a65..7080ad6c401 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1623,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+ memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index eaab73cf27c..38148b0e3a9 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev)
iounmap(de->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 263b92c00cb..c05b66dfcc3 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
pci_disable_device (pdev);
}
-static struct pci_device_id de4x5_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83139307861..5ad9e3e3c0b 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -523,7 +523,6 @@ err_out_res:
err_out_disable:
pci_disable_device(pdev);
err_out_free:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
@@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev)
db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
-
- pci_set_drvdata(pdev, NULL);
}
DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4e8cfa2ac80..add05f14b38 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
- pci_set_drvdata (pdev, NULL);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 93845afe1ce..a5397b13072 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -429,7 +429,6 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
err_out_free:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
@@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev)
db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index c7b04ecf5b4..62fe512bb21 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev)
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 9b84cb04fe5..ab7ebac6fbe 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -289,7 +289,6 @@ out:
err_unmap:
pci_iounmap(pdev, private->ioaddr);
reg_fail:
- pci_set_drvdata(pdev, NULL);
dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
tx_buf_fail:
dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(pdev, card->ioaddr);
- pci_set_drvdata(pdev, NULL);
dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
free_netdev(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index afa8e3af2c4..4fb756d219f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev)
pci_release_regions (pdev);
pci_disable_device (pdev);
}
- pci_set_drvdata (pdev, NULL);
}
static struct pci_driver rio_driver = {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index bf3bf6f22c9..113cd799a13 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -703,7 +703,6 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_res:
pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev)
pci_iounmap(pdev, np->base);
pci_release_regions(pdev);
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index db020230bd0..f4825db5d17 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.9.134.0u"
+#define DRV_VER "4.9.224.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -89,7 +89,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_UMC_NUM_VLANS_SUPPORTED 15
-#define BE_MAX_EQD 96u
+#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
#define EVNT_Q_LEN 1024
@@ -199,8 +199,37 @@ struct be_eq_obj {
u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define BE_EQ_IDLE 0
+#define BE_EQ_NAPI 1 /* napi owns this EQ */
+#define BE_EQ_POLL 2 /* poll owns this EQ */
+#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
+#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
+#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
+#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
+#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
+ unsigned int state;
+ spinlock_t lock; /* lock to serialize napi and busy-poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
+struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ bool enable;
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 prev_eqd; /* in usecs */
+ u32 et_eqd; /* configured val when aic is off */
+ ulong jiffies;
+ u64 rx_pkts_prev; /* Used to calculate RX pps */
+ u64 tx_reqs_prev; /* Used to calculate TX pps */
+};
+
+enum {
+ NAPI_POLLING,
+ BUSY_POLLING
+};
+
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -215,6 +244,7 @@ struct be_tx_stats {
u64 tx_compl;
ulong tx_jiffies;
u32 tx_stops;
+ u32 tx_drv_drops; /* pkts dropped by driver */
struct u64_stats_sync sync;
struct u64_stats_sync sync_compl;
};
@@ -239,15 +269,12 @@ struct be_rx_page_info {
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
- u64 rx_pkts_prev;
- ulong rx_jiffies;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
u32 rx_compl;
u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */
- u32 rx_pps; /* pkts per second */
struct u64_stats_sync sync;
};
@@ -316,6 +343,11 @@ struct be_drv_stats {
u32 rx_input_fifo_overflow_drop;
u32 pmem_fifo_overflow_drop;
u32 jabber_events;
+ u32 rx_roce_bytes_lsd;
+ u32 rx_roce_bytes_msd;
+ u32 rx_roce_frames;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
};
struct be_vf_cfg {
@@ -405,6 +437,7 @@ struct be_adapter {
u32 big_page_size; /* Compounded page size shared by rx wrbs */
struct be_drv_stats drv_stats;
+ struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
u8 vlan_tag[VLAN_N_VID];
u8 vlan_prio_bmap; /* Available Priority BitMap */
@@ -437,7 +470,6 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool stats_cmd_sent;
- u32 if_type;
struct {
u32 size;
u32 total_size;
@@ -472,8 +504,8 @@ struct be_adapter {
#define be_physfn(adapter) (!adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
-#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
- be_physfn(adapter))
+#define sriov_want(adapter) (be_physfn(adapter) && \
+ (num_vfs || pci_num_vf(adapter->pdev)))
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
@@ -546,6 +578,10 @@ extern const struct ethtool_ops be_ethtool_ops;
for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
i++, eqo++)
+#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \
+ for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
+ i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
+
#define is_mcc_eqo(eqo) (eqo->idx == 0)
#define mcc_eqo(adapter) (&adapter->eq_obj[0])
@@ -696,27 +732,137 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
}
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
- u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-extern bool be_is_wol_supported(struct be_adapter *adapter);
-extern bool be_pause_supported(struct be_adapter *adapter);
-extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock(&eqo->lock); /* BH is already disabled */
+ if (eqo->state & BE_EQ_LOCKED) {
+ WARN_ON(eqo->state & BE_EQ_NAPI);
+ eqo->state |= BE_EQ_NAPI_YIELD;
+ status = false;
+ } else {
+ eqo->state = BE_EQ_NAPI;
+ }
+ spin_unlock(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+ spin_lock(&eqo->lock); /* BH is already disabled */
+
+ WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock(&eqo->lock);
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock_bh(&eqo->lock);
+ if (eqo->state & BE_EQ_LOCKED) {
+ eqo->state |= BE_EQ_POLL_YIELD;
+ status = false;
+ } else {
+ eqo->state |= BE_EQ_POLL;
+ }
+ spin_unlock_bh(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_bh(&eqo->lock);
+
+ WARN_ON(eqo->state & (BE_EQ_NAPI));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock_bh(&eqo->lock);
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_init(&eqo->lock);
+ eqo->state = BE_EQ_IDLE;
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+ local_bh_disable();
+
+ /* It's enough to just acquire napi lock on the eqo to stop
+ * be_busy_poll() from processing any queueus.
+ */
+ while (!be_lock_napi(eqo))
+ mdelay(1);
+
+ local_bh_enable();
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ return true;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ return false;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+ u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
+
+static inline int fw_major_num(const char *fw_ver)
+{
+ int fw_major = 0;
+
+ sscanf(fw_ver, "%d.", &fw_major);
+
+ return fw_major;
+}
+
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
*/
-extern void be_roce_dev_add(struct be_adapter *);
-extern void be_roce_dev_remove(struct be_adapter *);
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
/*
* internal function to open-close roce device during ifup-ifdown.
*/
-extern void be_roce_dev_open(struct be_adapter *);
-extern void be_roce_dev_close(struct be_adapter *);
+void be_roce_dev_open(struct be_adapter *);
+void be_roce_dev_close(struct be_adapter *);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c08fd32bb8e..dbcd5262c01 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -522,7 +522,7 @@ static u16 be_POST_stage_get(struct be_adapter *adapter)
return sem & POST_STAGE_MASK;
}
-int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_wait_ready(struct be_adapter *adapter)
{
#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
@@ -1436,8 +1436,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
- if (!BE2_chip(adapter))
+ if (BE2_chip(adapter))
+ hdr->version = 0;
+ if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
+ else
+ hdr->version = 2;
be_mcc_notify(adapter);
adapter->stats_cmd_sent = true;
@@ -1719,11 +1723,12 @@ err:
/* set the EQ delay interval of an EQ to specified value
* Uses async mcc
*/
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+ int num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_modify_eq_delay *req;
- int status = 0;
+ int status = 0, i;
spin_lock_bh(&adapter->mcc_lock);
@@ -1737,13 +1742,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
- req->num_eq = cpu_to_le32(1);
- req->delay[0].eq_id = cpu_to_le32(eq_id);
- req->delay[0].phase = 0;
- req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->set_eqd[i].phase = 0;
+ req->set_eqd[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
be_mcc_notify(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1751,7 +1758,7 @@ err:
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool untagged, bool promiscuous)
+ u32 num, bool promiscuous)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1771,7 +1778,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
req->interface_id = if_id;
req->promiscuous = promiscuous;
- req->untagged = untagged;
+ req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req->num_vlan = num;
if (!promiscuous) {
memcpy(req->normal_vlan, vtag_array,
@@ -1840,7 +1847,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
+ if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
+ req->if_flags_mask) {
+ dev_warn(&adapter->pdev->dev,
+ "Cannot set rx filter flags 0x%x\n",
+ req->if_flags_mask);
+ dev_warn(&adapter->pdev->dev,
+ "Interface is capable of 0x%x flags only\n",
+ be_if_cap_flags(adapter));
+ }
+ req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
+
status = be_mcc_notify_wait(adapter);
+
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -3520,7 +3539,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
struct be_cmd_enable_disable_vf *req;
int status;
- if (!lancer_chip(adapter))
+ if (BEx_chip(adapter))
return 0;
spin_lock_bh(&adapter->mcc_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 108ca8abf0a..0075686276a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1057,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
} __packed;
/******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+};
+
struct be_cmd_req_modify_eq_delay {
struct be_cmd_req_hdr hdr;
u32 num_eq;
- struct {
- u32 eq_id;
- u32 phase;
- u32 delay_multiplier;
- } delay[8];
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
} __packed;
struct be_cmd_resp_modify_eq_delay {
@@ -1660,6 +1662,67 @@ struct be_erx_stats_v1 {
u32 rsvd[4];
};
+struct be_port_rxf_stats_v2 {
+ u32 rsvd0[10];
+ u32 roce_bytes_received_lsd;
+ u32 roce_bytes_received_msd;
+ u32 rsvd1[5];
+ u32 roce_frames_received;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd2[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd3[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd4[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd5[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd6[3];
+ u32 rx_drops_payload_size;
+ u32 rx_drops_clipped_header;
+ u32 rx_drops_crc;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
+ u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+ struct be_port_rxf_stats_v2 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[35];
+};
+
struct be_hw_stats_v1 {
struct be_rxf_stats_v1 rxf;
u32 rsvd0[BE_TXP_SW_SZ];
@@ -1678,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 {
struct be_hw_stats_v1 hw_stats;
};
+struct be_erx_stats_v2 {
+ u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/
+ u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+ struct be_rxf_stats_v2 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v2 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v2 hw_stats;
+};
+
/************** get fat capabilites *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
@@ -1865,137 +1951,119 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_fw_wait_ready(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- bool permanent, u32 if_handle, u32 pmac_id);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- int pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u32 *if_handle, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
- u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
- struct be_queue_info *cq, struct be_queue_info *eq,
- bool no_delay, int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_tx_obj *txo);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id,
- u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
- struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
- u8 *link_status, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
- u16 *vtag_array, u32 num, bool untagged,
- bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
- u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
- u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+ u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+ u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+ struct be_queue_info *eq, bool no_delay,
+ int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+ u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, bool promiscuous);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
- u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
- u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
- struct be_dma_mem *cmd, u32 flash_oper,
- u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *change_status,
- u8 *addn_status);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u32 rss_hash_opts, u16 table_size);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+ u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u32 *state);
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 flash_oper, u32 flash_opcode, u32 buf_size);
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status);
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size,
- u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
- u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_error(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
- u32 *privilege, u32 domain);
-extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
- u32 privileges, u32 vf_num);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id,
- u8 domain);
-extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
- u8 *mac);
-extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
-extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
- u8 mac_count, u32 domain);
-extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
- u32 dom);
-extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id, u16 hsw_mode);
-extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id, u8 *mode);
-extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
-extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
- struct be_dma_mem *cmd);
-extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- struct be_fat_conf_params *cfgs);
-extern int lancer_wait_ready(struct be_adapter *adapter);
-extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
-extern int lancer_initiate_dump(struct be_adapter *adapter);
-extern bool dump_present(struct be_adapter *adapter);
-extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+ int offset);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+ struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+ u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+ u16 intf_id, u16 hsw_mode);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+ u16 intf_id, u8 *mode);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ struct be_fat_conf_params *cfgs);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
-extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
- u8 domain);
-extern int be_cmd_get_if_id(struct be_adapter *adapter,
- struct be_vf_cfg *vf_cfg, int vf_num);
-extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
-extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b440a1fac77..08330034d9e 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(rx_drops_mtu)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)}
+ {DRVSTAT_INFO(be_on_die_temperature)},
+ {DRVSTAT_INFO(rx_roce_bytes_lsd)},
+ {DRVSTAT_INFO(rx_roce_bytes_msd)},
+ {DRVSTAT_INFO(rx_roce_frames)},
+ {DRVSTAT_INFO(roce_drops_payload_len)},
+ {DRVSTAT_INFO(roce_drops_crc)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
*/
- {DRVSTAT_TX_INFO(tx_stops)}
+ {DRVSTAT_TX_INFO(tx_stops)},
+ /* Pkts dropped in the driver's transmit path */
+ {DRVSTAT_TX_INFO(tx_drv_drops)}
};
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
@@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo = &adapter->eq_obj[0];
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
- et->rx_coalesce_usecs = eqo->cur_eqd;
- et->rx_coalesce_usecs_high = eqo->max_eqd;
- et->rx_coalesce_usecs_low = eqo->min_eqd;
+ et->rx_coalesce_usecs = aic->prev_eqd;
+ et->rx_coalesce_usecs_high = aic->max_eqd;
+ et->rx_coalesce_usecs_low = aic->min_eqd;
- et->tx_coalesce_usecs = eqo->cur_eqd;
- et->tx_coalesce_usecs_high = eqo->max_eqd;
- et->tx_coalesce_usecs_low = eqo->min_eqd;
+ et->tx_coalesce_usecs = aic->prev_eqd;
+ et->tx_coalesce_usecs_high = aic->max_eqd;
+ et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = eqo->enable_aic;
- et->use_adaptive_tx_coalesce = eqo->enable_aic;
+ et->use_adaptive_rx_coalesce = aic->enable;
+ et->use_adaptive_tx_coalesce = aic->enable;
return 0;
}
@@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
struct be_eq_obj *eqo;
int i;
for_all_evt_queues(adapter, eqo, i) {
- eqo->enable_aic = et->use_adaptive_rx_coalesce;
- eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
- eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
- eqo->eqd = et->rx_coalesce_usecs;
+ aic->enable = et->use_adaptive_rx_coalesce;
+ aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+ aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+ aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+ aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+ aic++;
}
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 2c38cc40211..abde9747163 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -22,6 +22,7 @@
#include <asm/div64.h>
#include <linux/aer.h>
#include <linux/if_bridge.h>
+#include <net/busy_poll.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -306,10 +307,14 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)
struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
return &cmd->hw_stats;
- } else {
+ } else if (BE3_chip(adapter)) {
struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
}
}
@@ -320,10 +325,14 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
return &hw_stats->erx;
- } else {
+ } else if (BE3_chip(adapter)) {
struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
}
}
@@ -422,6 +431,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v2 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_filtered = port_stats->rx_address_filtered;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+ if (be_roce_supported(adapter)) {
+ drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+ drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+ drvs->rx_roce_frames = port_stats->roce_frames_received;
+ drvs->roce_drops_crc = port_stats->roce_drops_crc;
+ drvs->roce_drops_payload_len =
+ port_stats->roce_drops_payload_len;
+ }
+}
+
static void populate_lancer_stats(struct be_adapter *adapter)
{
@@ -489,7 +552,7 @@ static void populate_erx_stats(struct be_adapter *adapter,
void be_parse_stats(struct be_adapter *adapter)
{
- struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
struct be_rx_obj *rxo;
int i;
u32 erx_stat;
@@ -499,11 +562,13 @@ void be_parse_stats(struct be_adapter *adapter)
} else {
if (BE2_chip(adapter))
populate_be_v0_stats(adapter);
- else
- /* for BE3 and Skyhawk */
+ else if (BE3_chip(adapter))
+ /* for BE3 */
populate_be_v1_stats(adapter);
+ else
+ populate_be_v2_stats(adapter);
- /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+ /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
for_all_rx_queues(adapter, rxo, i) {
erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
populate_erx_stats(adapter, rxo, erx_stat);
@@ -935,8 +1000,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 start = txq->head;
skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
- if (!skb)
+ if (!skb) {
+ tx_stats(txo)->tx_drv_drops++;
return NETDEV_TX_OK;
+ }
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -965,6 +1032,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
txq->head = start;
+ tx_stats(txo)->tx_drv_drops++;
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
@@ -1011,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter)
vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vids, num, 1, 0);
+ vids, num, 0);
if (status) {
/* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1275,53 +1343,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+ ulong now)
{
- struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
- ulong now = jiffies;
- ulong delta = now - stats->rx_jiffies;
- u64 pkts;
- unsigned int start, eqd;
+ aic->rx_pkts_prev = rx_pkts;
+ aic->tx_reqs_prev = tx_pkts;
+ aic->jiffies = now;
+}
- if (!eqo->enable_aic) {
- eqd = eqo->eqd;
- goto modify_eqd;
- }
+static void be_eqd_update(struct be_adapter *adapter)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ int eqd, i, num = 0, start;
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 rx_pkts, tx_pkts;
+ ulong now;
+ u32 pps, delta;
- if (eqo->idx >= adapter->num_rx_qs)
- return;
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!aic->enable) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ goto modify_eqd;
+ }
- stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+ rxo = &adapter->rx_obj[eqo->idx];
+ do {
+ start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+ rx_pkts = rxo->stats.rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
- /* Wrapped around */
- if (time_before(now, stats->rx_jiffies)) {
- stats->rx_jiffies = now;
- return;
- }
+ txo = &adapter->tx_obj[eqo->idx];
+ do {
+ start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+ tx_pkts = txo->stats.tx_reqs;
+ } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
- /* Update once a second */
- if (delta < HZ)
- return;
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
- pkts = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
- stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
- stats->rx_pkts_prev = pkts;
- stats->rx_jiffies = now;
- eqd = (stats->rx_pps / 110000) << 3;
- eqd = min(eqd, eqo->max_eqd);
- eqd = max(eqd, eqo->min_eqd);
- if (eqd < 10)
- eqd = 0;
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ continue;
+ }
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
+
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
modify_eqd:
- if (eqd != eqo->cur_eqd) {
- be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
- eqo->cur_eqd = eqd;
+ if (eqd != aic->prev_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = eqo->q.id;
+ aic->prev_eqd = eqd;
+ num++;
+ }
}
+
+ if (num)
+ be_cmd_modify_eqd(adapter, set_eqd, num);
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1463,7 +1557,7 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_rx_obj *rxo,
+static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
struct be_rx_compl_info *rxcp)
{
struct be_adapter *adapter = rxo->adapter;
@@ -1488,7 +1582,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
-
+ skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1546,6 +1640,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
+ skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1726,6 +1821,8 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if (posted) {
atomic_add(posted, &rxq->used);
+ if (rxo->rx_post_starved)
+ rxo->rx_post_starved = false;
be_rxq_notify(adapter, rxq->id, posted);
} else if (atomic_read(&rxq->used) == 0) {
/* Let be_worker replenish when memory is available */
@@ -1928,6 +2025,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
@@ -1938,6 +2036,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq;
struct be_eq_obj *eqo;
+ struct be_aic_obj *aic;
int i, rc;
adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1946,11 +2045,13 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
+ aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
- eqo->max_eqd = BE_MAX_EQD;
- eqo->enable_aic = true;
+ aic->max_eqd = BE_MAX_EQD;
+ aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2047,6 +2148,9 @@ static int be_tx_qs_create(struct be_adapter *adapter)
if (status)
return status;
+ u64_stats_init(&txo->stats.sync);
+ u64_stats_init(&txo->stats.sync_compl);
+
/* If num_evt_qs is less than num_tx_qs, then more than
* one txq share an eq
*/
@@ -2108,6 +2212,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
if (rc)
return rc;
+ u64_stats_init(&rxo->stats.sync);
eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (rc)
@@ -2167,7 +2272,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2198,10 +2303,12 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
goto loop_continue;
}
- if (do_gro(rxcp))
+ /* Don't do gro when we're busy_polling */
+ if (do_gro(rxcp) && polling != BUSY_POLLING)
be_rx_compl_process_gro(rxo, napi, rxcp);
else
- be_rx_compl_process(rxo, rxcp);
+ be_rx_compl_process(rxo, napi, rxcp);
+
loop_continue:
be_rx_stats_update(rxo, rxcp);
}
@@ -2209,7 +2316,11 @@ loop_continue:
if (work_done) {
be_cq_notify(adapter, rx_cq->id, true, work_done);
- if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ /* When an rx-obj gets into post_starved state, just
+ * let be_worker do the posting.
+ */
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
+ !rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_ATOMIC);
}
@@ -2254,6 +2365,7 @@ int be_poll(struct napi_struct *napi, int budget)
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter = eqo->adapter;
int max_work = 0, work, i, num_evts;
+ struct be_rx_obj *rxo;
bool tx_done;
num_evts = events_get(eqo);
@@ -2266,13 +2378,18 @@ int be_poll(struct napi_struct *napi, int budget)
max_work = budget;
}
- /* This loop will iterate twice for EQ0 in which
- * completions of the last RXQ (default one) are also processed
- * For other EQs the loop iterates only once
- */
- for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
- work = be_process_rx(&adapter->rx_obj[i], napi, budget);
- max_work = max(work, max_work);
+ if (be_lock_napi(eqo)) {
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
+ max_work = max(work, max_work);
+ }
+ be_unlock_napi(eqo);
+ } else {
+ max_work = budget;
}
if (is_mcc_eqo(eqo))
@@ -2288,6 +2405,28 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int be_busy_poll(struct napi_struct *napi)
+{
+ struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_rx_obj *rxo;
+ int i, work = 0;
+
+ if (!be_lock_busy_poll(eqo))
+ return LL_FLUSH_BUSY;
+
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
+ if (work)
+ break;
+ }
+
+ be_unlock_busy_poll(eqo);
+ return work;
+}
+#endif
+
void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -2519,9 +2658,11 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
- for_all_evt_queues(adapter, eqo, i)
+ for_all_evt_queues(adapter, eqo, i) {
+ if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
napi_disable(&eqo->napi);
+ be_disable_busy_poll(eqo);
+ }
adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
}
@@ -2535,6 +2676,11 @@ static int be_close(struct net_device *netdev)
be_rx_qs_destroy(adapter);
+ for (i = 1; i < (adapter->uc_macs + 1); i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
+
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
synchronize_irq(be_msix_vec_get(adapter, eqo));
@@ -2632,6 +2778,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
+ be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -2937,7 +3084,8 @@ static int be_vf_setup(struct be_adapter *adapter)
goto err;
vf_cfg->def_vid = def_vlan;
- be_cmd_enable_vf(adapter, vf + 1);
+ if (!old_vfs)
+ be_cmd_enable_vf(adapter, vf + 1);
}
if (!old_vfs) {
@@ -2962,12 +3110,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
- if (BE3_chip(adapter) && be_physfn(adapter)) {
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
int max_vfs;
max_vfs = pci_sriov_get_totalvfs(pdev);
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
- use_sriov = res->max_vfs && num_vfs;
+ use_sriov = res->max_vfs;
}
if (be_physfn(adapter))
@@ -2983,8 +3131,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
res->max_mcast_mac = BE_MAX_MC;
+ /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
- !be_physfn(adapter))
+ !be_physfn(adapter) || (adapter->port_num > 1))
res->max_tx_qs = 1;
else
res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3026,14 +3175,6 @@ static int be_get_resources(struct be_adapter *adapter)
adapter->res = res;
}
- /* For BE3 only check if FW suggests a different max-txqs value */
- if (BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res, 0);
- if (!status && res.max_tx_qs)
- adapter->res.max_tx_qs =
- min(adapter->res.max_tx_qs, res.max_tx_qs);
- }
-
/* For Lancer, SH etc read per-function resource limits from FW.
* GET_FUNC_CONFIG returns per function guaranteed limits.
* GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3247,6 +3388,12 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
+ if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
+ dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
+ adapter->fw_ver);
+ dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
+ }
+
if (adapter->vlans_added)
be_vid_config(adapter);
@@ -3258,7 +3405,7 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter) && num_vfs) {
+ if (sriov_want(adapter)) {
if (be_max_vfs(adapter))
be_vf_setup(adapter);
else
@@ -3900,6 +4047,9 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = be_busy_poll
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3960,11 +4110,6 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- u32 sli_intf;
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
if (BEx_chip(adapter) && be_physfn(adapter)) {
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
@@ -4077,9 +4222,11 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
else if (BE2_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- else
- /* BE3 and Skyhawk */
+ else if (BE3_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ else
+ /* ALL non-BE ASICs */
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
GFP_KERNEL);
@@ -4113,7 +4260,6 @@ static void be_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -4262,7 +4408,6 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
- struct be_eq_obj *eqo;
int i;
/* when interrupts are not yet enabled, just reap any pending
@@ -4287,14 +4432,14 @@ static void be_worker(struct work_struct *work)
be_cmd_get_die_temperature(adapter);
for_all_rx_queues(adapter, rxo, i) {
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
+ /* Replenish RX-queues starved due to memory
+ * allocation failures.
+ */
+ if (rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_KERNEL);
- }
}
- for_all_evt_queues(adapter, eqo, i)
- be_eqd_update(adapter, eqo);
+ be_eqd_update(adapter);
reschedule:
adapter->work_counter++;
@@ -4351,28 +4496,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
- status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (status < 0) {
- dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
- goto free_netdev;
- }
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (!status)
- status = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
}
}
- status = pci_enable_pcie_error_reporting(pdev);
- if (status)
- dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
+ if (be_physfn(adapter)) {
+ status = pci_enable_pcie_error_reporting(pdev);
+ if (!status)
+ dev_info(&pdev->dev, "PCIe error reporting enabled\n");
+ }
status = be_ctrl_init(adapter);
if (status)
@@ -4443,7 +4582,6 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
free_netdev(netdev);
- pci_set_drvdata(pdev, NULL);
rel_reg:
pci_release_regions(pdev);
disable_dev:
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c706b7a9397..4b22a9579f8 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, np->mem);
free_netdev(dev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
} else
printk(KERN_ERR "fealnx: remove for unknown device\n");
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b2793b91cc5..4cbebf3d80e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -386,7 +386,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
-
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ bdp->cbd_bufaddr = 0;
+ fep->tx_skbuff[index] = NULL;
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
@@ -861,6 +868,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
struct bufdesc_ex *ebdp = NULL;
bool vlan_packet_rcvd = false;
u16 vlan_tag;
+ int index = 0;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -916,10 +924,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
ndev->stats.rx_packets++;
pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len;
- data = (__u8*)__va(bdp->cbd_bufaddr);
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->rx_bd_base;
+ else
+ index = bdp - fep->rx_bd_base;
+ data = fep->rx_skbuff[index]->data;
+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, pkt_len);
@@ -999,8 +1012,8 @@ fec_enet_rx(struct net_device *ndev, int budget)
napi_gro_receive(&fep->napi, skb);
}
- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
- FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -1719,6 +1732,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ fec_enet_free_buffers(ndev);
+ if (net_ratelimit())
+ netdev_err(ndev, "Rx DMA memory map failed\n");
+ return -ENOMEM;
+ }
bdp->cbd_sc = BD_ENET_RX_EMPTY;
if (fep->bufdesc_ex) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6b60582ce8c..56f2f608a9f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
mac_addr = of_get_mac_address(ofdev->dev.of_node);
if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, 6);
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 7583a9572bc..f8b92864fc5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -32,7 +32,9 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <asm/immap_cpm2.h>
@@ -88,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
- fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
goto out;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 9ae6cdbcac2..a9a00f39521 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -31,7 +31,9 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <asm/irq.h>
@@ -98,7 +100,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 22a02a76706..d37cd4ebac6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -31,6 +31,8 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/irq.h>
@@ -98,7 +100,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 844ecfa84d1..67caaacd19e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -22,6 +22,7 @@
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 2f1c46a12f0..ac5d447ff8c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -31,6 +31,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/pgtable.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9fbe4dda7a0..b14d7904a07 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -78,6 +78,8 @@
#include <linux/if_vlan.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/ip.h>
@@ -2918,7 +2920,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
struct gfar_priv_rx_q *rx_queue = NULL;
int work_done = 0, work_done_per_q = 0;
int i, budget_per_q = 0;
- int has_tx_work;
+ int has_tx_work = 0;
unsigned long rstat_rxf;
int num_act_queues;
@@ -2933,62 +2935,51 @@ static int gfar_poll(struct napi_struct *napi, int budget)
if (num_act_queues)
budget_per_q = budget/num_act_queues;
- while (1) {
- has_tx_work = 0;
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
}
+ }
- for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- /* skip queue if not active */
- if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
- continue;
-
- rx_queue = priv->rx_queue[i];
- work_done_per_q =
- gfar_clean_rx_ring(rx_queue, budget_per_q);
- work_done += work_done_per_q;
-
- /* finished processing this queue */
- if (work_done_per_q < budget_per_q) {
- /* clear active queue hw indication */
- gfar_write(&regs->rstat,
- RSTAT_CLEAR_RXF0 >> i);
- rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
- num_act_queues--;
-
- if (!num_act_queues)
- break;
- /* recompute budget per Rx queue */
- budget_per_q =
- (budget - work_done) / num_act_queues;
- }
- }
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+ continue;
- if (work_done >= budget)
- break;
+ rx_queue = priv->rx_queue[i];
+ work_done_per_q =
+ gfar_clean_rx_ring(rx_queue, budget_per_q);
+ work_done += work_done_per_q;
+
+ /* finished processing this queue */
+ if (work_done_per_q < budget_per_q) {
+ /* clear active queue hw indication */
+ gfar_write(&regs->rstat,
+ RSTAT_CLEAR_RXF0 >> i);
+ num_act_queues--;
+
+ if (!num_act_queues)
+ break;
+ }
+ }
- if (!num_act_queues && !has_tx_work) {
+ if (!num_act_queues && !has_tx_work) {
- napi_complete(napi);
+ napi_complete(napi);
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
- break;
- }
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return work_done;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04112b98ff5..114c58f9d8d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
-extern void lock_rx_qs(struct gfar_private *priv);
-extern void lock_tx_qs(struct gfar_private *priv);
-extern void unlock_rx_qs(struct gfar_private *priv);
-extern void unlock_tx_qs(struct gfar_private *priv);
-extern irqreturn_t gfar_receive(int irq, void *dev_id);
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
-extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
- int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing_all(struct gfar_private *priv);
+void lock_rx_qs(struct gfar_private *priv);
+void lock_tx_qs(struct gfar_private *priv);
+void unlock_rx_qs(struct gfar_private *priv);
+void unlock_tx_qs(struct gfar_private *priv);
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_halt(struct net_device *dev);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+ u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
-extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_check_rx_parser_mode(struct gfar_private *priv);
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5930c39672d..5548b6d00c3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -31,6 +31,8 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
@@ -3899,7 +3901,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index c1b6e7e31aa..d449fcb9019 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 6231bc02b96..1085257385d 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FUJITSU
bool "Fujitsu devices"
default y
- depends on ISA || PCMCIA
+ depends on PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 91227d03274..37860096f74 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
if (request_irq(dev->irq, hp100_interrupt,
lp->bus == HP100_BUS_PCI || lp->bus ==
HP100_BUS_EISA ? IRQF_SHARED : 0,
- "hp100", dev)) {
+ dev->name, dev)) {
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index e3881614539..7ce6379fd1a 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
- memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
lp->sa_cmd.cmd.command = CmdSASetup;
i596_add_cmd(dev, &lp->sa_cmd.cmd);
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
err = -ENODEV;
goto out;
}
- memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
+ memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found;
@@ -1527,9 +1527,7 @@ int __init init_module(void)
if (debug >= 0)
i596_debug = debug;
dev_82596 = i82596_probe(-1);
- if (IS_ERR(dev_82596))
- return PTR_ERR(dev_82596);
- return 0;
+ return PTR_ERR_OR_ZERO(dev_82596);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index d653bac4cfc..861fa15e1e8 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
i596_add_cmd(dev, &dma->cf_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
- memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+ memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, ha->addr, 6);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
"%s: Adding address %pM\n",
dev->name, cp));
- cp += 6;
+ cp += ETH_ALEN;
}
DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b5c7222342..ae342fdb42c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -39,6 +39,8 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/slab.h>
@@ -2676,7 +2678,7 @@ static int emac_init_config(struct emac_instance *dev)
np->full_name);
return -ENXIO;
}
- memcpy(dev->ndev->dev_addr, p, 6);
+ memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 59a92d5870b..9c45efe4c8f 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -29,13 +29,13 @@
struct emac_instance;
struct mal_instance;
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
# define DBG_LEVEL 1
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index dac564c2544..9d75fef6396 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include "core.h"
#include <asm/dcr-regs.h>
@@ -263,7 +264,9 @@ static inline void mal_schedule_poll(struct mal_instance *mal)
{
if (likely(napi_schedule_prep(&mal->napi))) {
MAL_DBG2(mal, "schedule_poll" NL);
+ spin_lock(&mal->lock);
mal_disable_eob_irq(mal);
+ spin_unlock(&mal->lock);
__napi_schedule(&mal->napi);
} else
MAL_DBG2(mal, "already in poll" NL);
@@ -442,15 +445,13 @@ static int mal_poll(struct napi_struct *napi, int budget)
if (unlikely(mc->ops->peek_rx(mc->dev) ||
test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
MAL_DBG2(mal, "rotting packet" NL);
- if (napi_reschedule(napi))
- mal_disable_eob_irq(mal);
- else
- MAL_DBG2(mal, "already in poll list" NL);
-
- if (budget > 0)
- goto again;
- else
+ if (!napi_reschedule(napi))
goto more_work;
+
+ spin_lock_irqsave(&mal->lock, flags);
+ mal_disable_eob_irq(mal);
+ spin_unlock_irqrestore(&mal->lock, flags);
+ goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c47e23d6eea..4fb2f96da23 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 668bceeff4a..d4f1374d190 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -56,15 +56,15 @@ struct rgmii_instance {
#ifdef CONFIG_IBM_EMAC_RGMII
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index c231a4a32c4..9f24769ed82 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -18,6 +18,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/of_address.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 350b7096a04..4d5f336f07b 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -72,13 +72,13 @@ struct tah_instance {
#ifdef CONFIG_IBM_EMAC_TAH
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 4cdf286f7ee..9ca67a38c06 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 455bfb08549..0959c55b145 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -53,15 +53,15 @@ struct zmii_instance {
#ifdef CONFIG_IBM_EMAC_ZMII
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define zmii_init() 0
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5d41aee69d1..952d795230a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, 6);
+ memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index bdf5023724e..25045ae0717 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev)
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static const struct net_device_ops ipg_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e210279..cbaba4442d4 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2985,7 +2985,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
err_out_free_dev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 26d9cd59ec7..58c147271a3 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -325,7 +325,7 @@ enum e1000_state_t {
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
#define e_dbg(format, arg...) \
netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
#define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 59ad007dd5a..e38622825fa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1018,19 +1018,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* according to DMA-API-HOWTO, coherent calls will always
- * succeed if the set call did
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
@@ -3917,8 +3912,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
- (unsigned long)((tx_ring - adapter->tx_ring) /
- sizeof(struct e1000_tx_ring)),
+ (unsigned long)(tx_ring - adapter->tx_ring),
readl(hw->hw_addr + tx_ring->tdh),
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ad0edd11015..0150f7fc893 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -472,26 +472,25 @@ enum latency_range {
extern char e1000e_driver_name[];
extern const char e1000e_driver_version[];
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef786775ac..8d3945ab733 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
+ struct hwtstamp_config *config)
{
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config *config = &adapter->hwtstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 rxmtrl = 0;
@@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
return -ERANGE;
}
+ adapter->hwtstamp_config = *config;
+
/* enable/disable Tx h/w time stamping */
regval = er32(TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
/* initialize systim and reset the ns time counter */
- e1000e_config_hwtstamp(adapter);
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
/* Set EEE advertisement as appropriate */
if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- adapter->hwtstamp_config = config;
-
- ret_val = e1000e_config_hwtstamp(adapter);
+ ret_val = e1000e_config_hwtstamp(adapter, &config);
if (ret_val)
return ret_val;
- config = adapter->hwtstamp_config;
-
switch (config.rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@@ -6553,21 +6551,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b5252eb8a6c..1ca9834cdfd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -46,7 +46,6 @@
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
-#include <linux/version.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
@@ -347,9 +346,9 @@ struct i40e_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
- /* These are arrays of rings, allocated at run-time */
- struct i40e_ring *rx_rings;
- struct i40e_ring *tx_rings;
+ /* These are containers of ring pointers, allocated at run-time */
+ struct i40e_ring **rx_rings;
+ struct i40e_ring **tx_rings;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +365,7 @@ struct i40e_vsi {
u8 dtype;
/* List of q_vectors allocated to this VSI */
- struct i40e_q_vector *q_vectors;
+ struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
@@ -422,8 +421,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
- char name[IFNAMSIZ + 9];
cpumask_t affinity_mask;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -544,6 +544,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8dbd91f64b7..ef4cb1cf31f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
- char dump_request_buf[16];
bool seid_found = false;
- int bytes_not_copied;
long seid = -1;
int buflen = 0;
int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(dump_request_buf))
- return -ENOSPC;
-
- bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- if (bytes_not_copied > 0)
- count -= bytes_not_copied;
- dump_request_buf[count] = '\0';
/* decode the SEID given to be dumped */
- ret = kstrtol(dump_request_buf, 0, &seid);
- if (ret < 0) {
- dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
- dump_request_buf);
+ ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "bad seid value\n");
} else if (seid == 0) {
seid_found = true;
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
memcpy(p, vsi, len);
p += len;
- len = (sizeof(struct i40e_q_vector)
- * vsi->num_q_vectors);
- memcpy(p, vsi->q_vectors, len);
- p += len;
-
- len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
- memcpy(p, vsi->tx_rings, len);
- p += len;
- memcpy(p, vsi->rx_rings, len);
- p += len;
+ if (vsi->num_q_vectors) {
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- len = sizeof(struct i40e_tx_buffer);
- memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ if (vsi->num_queue_pairs) {
+ len = (sizeof(struct i40e_ring) *
+ vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
p += len;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
+
+ if (vsi->tx_rings[0]) {
+ len = sizeof(struct i40e_tx_buffer);
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+ p += len;
+ }
len = sizeof(struct i40e_rx_buffer);
- memcpy(p, vsi->rx_rings[i].rx_bi, len);
- p += len;
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+ p += len;
+ }
}
/* macvlan filter list */
@@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: desc = %p\n",
- i, vsi->rx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
- i, vsi->rx_rings[i].dev,
- vsi->rx_rings[i].netdev,
- vsi->rx_rings[i].rx_bi);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->rx_rings[i].state,
- vsi->rx_rings[i].queue_index,
- vsi->rx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, vsi->rx_rings[i].rx_hdr_len,
- vsi->rx_rings[i].rx_buf_len,
- vsi->rx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->rx_rings[i].hsplit,
- vsi->rx_rings[i].next_to_use,
- vsi->rx_rings[i].next_to_clean,
- vsi->rx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
- i, vsi->rx_rings[i].rx_stats.packets,
- vsi->rx_rings[i].rx_stats.bytes,
- vsi->rx_rings[i].rx_stats.non_eop_descs);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
- i,
- vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
- vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->rx_rings[i].size,
- (long unsigned int)vsi->rx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->rx_rings[i].vsi,
- vsi->rx_rings[i].q_vector);
- }
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ if (!rx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, rx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, rx_ring->dev,
+ rx_ring->netdev,
+ rx_ring->rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, rx_ring->state,
+ rx_ring->queue_index,
+ rx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, rx_ring->rx_hdr_len,
+ rx_ring->rx_buf_len,
+ rx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, rx_ring->hsplit,
+ rx_ring->next_to_use,
+ rx_ring->next_to_clean,
+ rx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, rx_ring->stats.packets,
+ rx_ring->stats.bytes,
+ rx_ring->rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ rx_ring->rx_stats.alloc_rx_page_failed,
+ rx_ring->rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, rx_ring->size,
+ (long unsigned int)rx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, rx_ring->vsi,
+ rx_ring->q_vector);
}
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: desc = %p\n",
- i, vsi->tx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
- i, vsi->tx_rings[i].dev,
- vsi->tx_rings[i].netdev,
- vsi->tx_rings[i].tx_bi);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->tx_rings[i].state,
- vsi->tx_rings[i].queue_index,
- vsi->tx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, vsi->tx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->tx_rings[i].hsplit,
- vsi->tx_rings[i].next_to_use,
- vsi->tx_rings[i].next_to_clean,
- vsi->tx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
- i, vsi->tx_rings[i].tx_stats.packets,
- vsi->tx_rings[i].tx_stats.bytes,
- vsi->tx_rings[i].tx_stats.restart_queue);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
- i,
- vsi->tx_rings[i].tx_stats.tx_busy,
- vsi->tx_rings[i].tx_stats.completed,
- vsi->tx_rings[i].tx_stats.tx_done_old);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->tx_rings[i].size,
- (long unsigned int)vsi->tx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->tx_rings[i].vsi,
- vsi->tx_rings[i].q_vector);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: DCB tc = %d\n",
- i, vsi->tx_rings[i].dcb_tc);
- }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, tx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, tx_ring->dev,
+ tx_ring->netdev,
+ tx_ring->tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, tx_ring->state,
+ tx_ring->queue_index,
+ tx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, tx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, tx_ring->hsplit,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, tx_ring->stats.packets,
+ tx_ring->stats.bytes,
+ tx_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ tx_ring->tx_stats.tx_busy,
+ tx_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, tx_ring->size,
+ (long unsigned int)tx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, tx_ring->vsi,
+ tx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, tx_ring->dcb_tc);
}
+ rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
- if (vsi->q_vectors) {
- for (i = 0; i < vsi->num_q_vectors; i++) {
- dev_info(&pf->pdev->dev,
- " q_vectors[%i]: base index = %ld\n",
- i, ((long int)*vsi->q_vectors[i].rx.ring-
- (long int)*vsi->q_vectors[0].rx.ring)/
- sizeof(struct i40e_ring));
- }
- }
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return;
}
if (is_rx_ring)
- ring = vsi->rx_rings[ring_id];
+ ring = *vsi->rx_rings[ring_id];
else
- ring = vsi->tx_rings[ring_id];
+ ring = *vsi->tx_rings[ring_id];
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
u8 *print_buf_start;
u8 *print_buf;
- char *cmd_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count -= bytes_not_copied;
cmd_buf[count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
if (!print_buf_start)
goto command_write_done;
@@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- u8 ma[6];
- int vlan = 0;
struct i40e_mac_filter *f;
+ int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- u8 ma[6];
int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- int v;
- u16 vid;
i40e_status ret;
+ u16 vid;
+ int v;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
- int ret;
u16 packet_len, i, j = 0;
char *asc_packet;
bool add = false;
+ int ret;
asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
GFP_KERNEL);
@@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
+ u16 llen, rlen;
int ret, i;
u8 *buff;
- u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ u16 llen, rlen;
int ret, i;
u8 *buff;
- u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- /* Read at least 512 words */
- if (buffer_len == 0)
- buffer_len = 512;
+ /* set the max length */
+ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
bytes = 2 * buffer_len;
+
+ /* read at least 1k bytes, no more than 4kB */
+ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
buff = kzalloc(bytes, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
struct i40e_pf *pf = filp->private_data;
int bytes_not_copied;
struct i40e_vsi *vsi;
+ char *buf_tmp;
int vsi_seid;
int i, cnt;
@@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count -= bytes_not_copied;
i40e_dbg_netdev_ops_buf[count] = '\0';
+ buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ if (buf_tmp) {
+ *buf_tmp = '\0';
+ count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ }
+
if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
@@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
goto netdev_ops_write_done;
}
for (i = 0; i < vsi->num_q_vectors; i++)
- napi_schedule(&vsi->q_vectors[i].napi);
+ napi_schedule(&vsi->q_vectors[i]->napi);
dev_info(&pf->pdev->dev, "napi called\n");
} else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile __attribute__((unused));
+ struct dentry *pfile;
const char *name = pci_name(pf->pdev);
+ const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (pf->i40e_dbg_pf) {
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_command_fops);
- pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_dump_fops);
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_netdev_ops_fops);
- } else {
- dev_info(&pf->pdev->dev,
- "debugfs entry for %s failed\n", name);
- }
+ if (!pf->i40e_dbg_pf)
+ return;
+
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_info(dev, "debugfs dir/file for %s failed\n", name);
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a76b8cec76..1b86138fa9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = vsi->rx_rings[0].count;
- ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == vsi->tx_rings[0].count) &&
- (new_rx_count == vsi->rx_rings[0].count))
+ if ((new_tx_count == vsi->tx_rings[0]->count) &&
+ (new_rx_count == vsi->rx_rings[0]->count))
return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- vsi->tx_rings[i].count = new_tx_count;
- vsi->rx_rings[i].count = new_rx_count;
+ vsi->tx_rings[i]->count = new_tx_count;
+ vsi->rx_rings[i]->count = new_rx_count;
}
goto done;
}
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
/* alloc updated Tx resources */
- if (new_tx_count != vsi->tx_rings[0].count) {
+ if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
- vsi->tx_rings[0].count, new_tx_count);
+ vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
/* alloc updated Rx resources */
- if (new_rx_count != vsi->rx_rings[0].count) {
+ if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n",
- vsi->rx_rings[0].count, new_rx_count);
+ vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(&vsi->tx_rings[i]);
- vsi->tx_rings[i] = tx_rings[i];
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
}
kfree(tx_rings);
tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_rx_resources(&vsi->rx_rings[i]);
- vsi->rx_rings[i] = rx_rings[i];
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+ *vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
char *p;
int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+ unsigned int start;
i40e_update_stats(vsi);
@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->tx_rings[j].tx_stats.packets;
- data[i++] = vsi->tx_rings[j].tx_stats.bytes;
- }
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->rx_rings[j].rx_stats.packets;
- data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ rcu_read_lock();
+ for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ struct i40e_ring *rx_ring;
+
+ if (!tx_ring)
+ continue;
+
+ /* process Tx ring statistics */
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ data[i] = tx_ring->stats.packets;
+ data[i + 1] = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+ /* Rx ring is the 2nd half of the queue pair */
+ rx_ring = &tx_ring[1];
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ data[i + 2] = rx_ring->stats.packets;
+ data[i + 3] = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
}
+ rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
}
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa479501..be15938ba21 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
**/
static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u64 bytes, packets;
+ unsigned int start;
+
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
- *storage = *i40e_get_vsi_stats_struct(vsi);
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ packets = tx_ring->stats.packets;
+ bytes = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ rx_ring = &tx_ring[1];
- return storage;
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ packets = rx_ring->stats.packets;
+ bytes = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ rcu_read_unlock();
+
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = vsi_stats->multicast;
+ stats->tx_errors = vsi_stats->tx_errors;
+ stats->tx_dropped = vsi_stats->tx_dropped;
+ stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
+
+ return stats;
}
/**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i].rx_stats, 0 ,
- sizeof(vsi->rx_rings[i].rx_stats));
- memset(&vsi->tx_rings[i].tx_stats, 0,
- sizeof(vsi->tx_rings[i].tx_stats));
+ memset(&vsi->rx_rings[i]->stats, 0 ,
+ sizeof(vsi->rx_rings[i]->stats));
+ memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i]->rx_stats));
+ memset(&vsi->tx_rings[i]->stats, 0 ,
+ sizeof(vsi->tx_rings[i]->stats));
+ memset(&vsi->tx_rings[i]->tx_stats, 0,
+ sizeof(vsi->tx_rings[i]->tx_stats));
}
vsi->stat_offsets_loaded = false;
}
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
}
}
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc;
if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = 0;
rx_page = 0;
rx_buf = 0;
+ rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p;
+ u64 bytes, packets;
+ unsigned int start;
- p = &vsi->rx_rings[q];
- rx_b += p->rx_stats.bytes;
- rx_p += p->rx_stats.packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ /* locate Tx ring */
+ p = ACCESS_ONCE(vsi->tx_rings[q]);
- p = &vsi->tx_rings[q];
- tx_b += p->tx_stats.bytes;
- tx_p += p->tx_stats.packets;
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
+
+ /* Rx queue is part of the same block as Tx queue */
+ p = &p[1];
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ rx_b += bytes;
+ rx_p += packets;
+ rx_buf += p->rx_stats.alloc_rx_buff_failed;
+ rx_page += p->rx_stats.alloc_rx_page_failed;
}
+ rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+ err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err;
}
@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i].desc)
- i40e_free_tx_resources(&vsi->tx_rings[i]);
+ if (vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
}
/**
@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err;
}
@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i].desc)
- i40e_free_rx_resources(&vsi->rx_rings[i]);
+ if (vsi->rx_rings[i]->desc)
+ i40e_free_rx_resources(vsi->rx_rings[i]);
}
/**
@@ -2114,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* Now associate this queue with this PCI function */
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
- err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err;
}
@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+ err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err;
}
@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *rx_ring = vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
*/
qp = vsi->base_queue;
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
- struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2472,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2500,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- i40e_flush(hw);
+ /* skip the flush */
}
/**
@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int vector, err;
for (vector = 0; vector < q_vectors; vector++) {
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
- if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring[0]) {
+ } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring[0]) {
+ } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
}
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
i40e_irq_dynamic_enable_icr0(pf);
}
+ i40e_flush(&pf->hw);
return 0;
}
@@ -2681,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0 = rd32(hw, I40E_PFINT_ICR0);
- /* if sharing a legacy IRQ, we might get called w/o an intr pending */
- if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
-
val = rd32(hw, I40E_PFINT_DYN_CTL0);
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ return IRQ_NONE;
+
ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
/**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
* @vsi: the VSI being configured
* @v_idx: vector index
- * @r_idx: rx queue index
+ * @qp_idx: queue pair index
**/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
-
- rx_ring->q_vector = q_vector;
- q_vector->rx.ring[q_vector->rx.count] = rx_ring;
- q_vector->rx.count++;
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- q_vector->vsi = vsi;
-}
-
-/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @t_idx: tx queue index
- **/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
-{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+ struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
tx_ring->q_vector = q_vector;
- q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
- q_vector->num_ringpairs++;
- q_vector->vsi = vsi;
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
}
/**
@@ -2823,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
{
int qp_remaining = vsi->num_queue_pairs;
int q_vectors = vsi->num_q_vectors;
- int qp_per_vector;
+ int num_ringpairs;
int v_start = 0;
int qp_idx = 0;
@@ -2831,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
* group them so there are multiple queues per vector.
*/
for (; v_start < q_vectors && qp_remaining; v_start++) {
- qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
- for (; qp_per_vector;
- qp_per_vector--, qp_idx++, qp_remaining--) {
- map_vector_to_rxq(vsi, v_start, qp_idx);
- map_vector_to_txq(vsi, v_start, qp_idx);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+ num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+ q_vector->num_ringpairs = num_ringpairs;
+
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+
+ while (num_ringpairs--) {
+ map_vector_to_qp(vsi, v_start, qp_idx);
+ qp_idx++;
+ qp_remaining--;
}
}
}
@@ -2887,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++)
- i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
i40e_intr(pf->pdev->irq, netdev);
}
@@ -3073,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i].num_ringpairs == 0)
+ if (vsi->q_vectors[i]->num_ringpairs == 0)
continue;
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
free_irq(pf->msix_entries[vector].vector,
- &vsi->q_vectors[i]);
+ vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
@@ -3164,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
}
/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *ring;
+
+ if (!q_vector)
+ return;
+
+ /* disassociate q_vector from rings */
+ i40e_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ vsi->q_vectors[v_idx] = NULL;
+
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
* @vsi: the VSI being un-configured
*
@@ -3174,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
- struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
- int r_idx;
-
- if (!q_vector)
- continue;
-
- /* disassociate q_vector from rings */
- for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
- q_vector->tx.ring[r_idx]->q_vector = NULL;
- for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
- q_vector->rx.ring[r_idx]->q_vector = NULL;
-
- /* only VSI w/ an associated netdev is set up w/ NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
- }
- kfree(vsi->q_vectors);
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ i40e_free_q_vector(vsi, v_idx);
}
/**
@@ -3241,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx].napi);
+ napi_enable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3256,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx].napi);
+ napi_disable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3703,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
+ netdev_info(vsi->netdev, "NIC Link is Up\n");
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ } else if (vsi->netdev) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
}
i40e_service_event_schedule(pf);
@@ -3772,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_clean_tx_ring(&vsi->tx_rings[i]);
- i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ i40e_clean_tx_ring(vsi->tx_rings[i]);
+ i40e_clean_rx_ring(vsi->rx_rings[i]);
}
}
@@ -4153,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+ netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+ "NIC Link is %s\n", (new_link ? "Up" : "Down"));
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(&vsi->tx_rings[i]);
+ set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i].state))
+ &vsi->tx_rings[i]->state))
armed++;
}
@@ -4537,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED)))
return;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
+ int sz_vectors;
+ int sz_rings;
int vsi_idx;
int i;
@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
- goto err_alloc_vsi; /* out of VSI slots! */
+ goto unlock_pf; /* out of VSI slots! */
}
pf->next_vsi = ++i;
vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
if (!vsi) {
ret = -ENOMEM;
- goto err_alloc_vsi;
+ goto unlock_pf;
}
vsi->type = type;
vsi->back = pf;
@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
- i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_set_num_rings_in_vsi(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* allocate memory for ring pointers */
+ sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ goto err_rings;
+ }
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ /* allocate memory for q_vector pointers */
+ sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
-err_alloc_vsi:
+ goto unlock_pf;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+err_rings:
+ pf->next_vsi = i - 1;
+ kfree(vsi);
+unlock_pf:
mutex_unlock(&pf->switch_mutex);
return ret;
}
@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ /* free the ring and vector containers */
+ kfree(vsi->q_vectors);
+ kfree(vsi->tx_rings);
+
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
@@ -5043,34 +5152,40 @@ free_vsi:
}
/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings[0])
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ vsi->rx_rings[i] = NULL;
+ }
+
+ return 0;
+}
+
+/**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int ret = 0;
int i;
- vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->rx_rings) {
- ret = -ENOMEM;
- goto err_alloc_rings;
- }
-
- vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
- kfree(vsi->rx_rings);
- goto err_alloc_rings;
- }
-
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ vsi->tx_rings[i] = tx_ring;
+ rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
- }
-
-err_alloc_rings:
- return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
- if (vsi) {
- kfree(vsi->rx_rings);
- kfree(vsi->tx_rings);
+ vsi->rx_rings[i] = rx_ring;
}
return 0;
+
+err_out:
+ i40e_vsi_clear_rings(vsi);
+ return -ENOMEM;
}
/**
@@ -5249,6 +5356,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi,
+ i40e_napi_poll, vsi->work_limit);
+
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+ /* tie q_vector and vsi together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
* i40e_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
@@ -5259,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors;
+ int err;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- vsi->q_vectors = kcalloc(num_q_vectors,
- sizeof(struct i40e_q_vector),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- return -ENOMEM;
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- vsi->q_vectors[v_idx].vsi = vsi;
- vsi->q_vectors[v_idx].v_idx = v_idx;
- cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
- i40e_napi_poll, vsi->work_limit);
+ err = i40e_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
}
return 0;
+
+err_out:
+ while (v_idx--)
+ i40e_free_q_vector(vsi, v_idx);
+
+ return err;
}
/**
@@ -5297,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
I40E_FLAG_MQ_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
@@ -5312,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
- dev_info(&pf->pdev->dev,
- "MSI init failed (%d), trying legacy.\n", err);
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
}
+ if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
}
@@ -5950,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
int ret = -ENOENT;
struct i40e_pf *pf = vsi->back;
- if (vsi->q_vectors) {
+ if (vsi->q_vectors[0]) {
dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
vsi->seid);
return -EEXIST;
@@ -5972,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out;
}
- vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
- vsi->num_q_vectors, vsi->idx);
+ if (vsi->num_q_vectors)
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
"failed to get q tracking for VSI %d, err=%d\n",
@@ -7062,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
pf->vsi = kzalloc(len, GFP_KERNEL);
- if (!pf->vsi)
+ if (!pf->vsi) {
+ err = -ENOMEM;
goto err_switch_setup;
+ }
err = i40e_setup_pf_switch(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 49d2cfa9b0c..f1f03bc5c72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
}
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_program_fdir_filter - Program a Flow Director filter
* @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
+ unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
if (!vsi)
return -ENOENT;
- tx_ring = &vsi->tx_rings[0];
+ tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet,
- I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_fail;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
- << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
- & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+ fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
- << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
- & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+ fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
- << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
- & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+ fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK;
/* Use LAN VSI Id if not programmed by user */
if (fdir_data->dest_vsi == 0)
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
else
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((fdir_data->dest_vsi
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
- & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+ fpt |= ((u32)fdir_data->dest_vsi <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
- fdir_desc->dtype_cmd_cntindex =
- cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+
+ dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
if (add)
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
else
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
- << I40E_TXD_FLTR_QW1_DEST_SHIFT)
- & I40E_TXD_FLTR_QW1_DEST_MASK);
+ dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
- & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+ dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
if (fdir_data->cnt_index != 0) {
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32((fdir_data->cnt_index
- << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
- & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dcc |= ((u32)fdir_data->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */
- tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
tx_desc->buffer_addr = cpu_to_le64(dma);
- td_cmd = I40E_TX_DESC_CMD_EOP |
- I40E_TX_DESC_CMD_RS |
- I40E_TX_DESC_CMD_DUMMY;
+ td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
- /* Mark the data descriptor to be watched */
- tx_buf->next_to_watch = tx_desc;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
*/
wmb();
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
writel(tx_ring->next_to_use, tx_ring->tail);
return 0;
@@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
}
/**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free
**/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
- struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
{
- if (tx_buffer->dma) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
- dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
- DMA_TO_DEVICE);
- else
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- tx_buffer->dma = 0;
- tx_buffer->time_stamp = 0;
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
/**
@@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
**/
void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
{
- struct i40e_tx_buffer *tx_buffer;
unsigned long bi_size;
u16 i;
@@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer->skb)
- dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
- }
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
}
/**
@@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
@@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
- for (; budget; budget--) {
- struct i40e_tx_desc *eop_desc;
-
- eop_desc = tx_buf->next_to_watch;
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* count the packet as being completed */
- tx_ring->tx_stats.completed++;
+ /* clear next_to_watch to prevent false hangs */
tx_buf->next_to_watch = NULL;
- tx_buf->time_stamp = 0;
-
- /* set memory barrier before eop_desc is verified */
- rmb();
- do {
- i40e_unmap_tx_resource(tx_ring, tx_buf);
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
- /* clear dtype status */
- tx_desc->cmd_type_offset_bsz &=
- ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
- if (likely(tx_desc == eop_desc)) {
- eop_desc = NULL;
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
- total_bytes += tx_buf->bytecount;
- total_packets += tx_buf->gso_segs;
- }
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
tx_buf++;
tx_desc++;
i++;
- if (unlikely(i == tx_ring->count)) {
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
- } while (eop_desc);
- }
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
return true;
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
i40e_set_new_dynamic_itr(&q_vector->tx);
if (old_itr != q_vector->tx.itr)
wr32(hw, reg_addr, q_vector->tx.itr);
-
- i40e_flush(hw);
}
/**
@@ -1042,8 +1064,10 @@ next_desc:
}
rx_ring->next_to_clean = i;
- rx_ring->rx_stats.packets += total_rx_packets;
- rx_ring->rx_stats.bytes += total_rx_bytes;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
@@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
bool clean_complete = true;
int budget_per_ring;
- int i;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
return 0;
}
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
- * Since the actual Tx work is minimal, we can give the Tx a larger
- * budget and be more aggressive about cleaning up the Tx descriptors.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- for (i = 0; i < q_vector->num_ringpairs; i++) {
- clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
- vsi->work_limit);
- clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
- budget_per_ring);
- }
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
if (!clean_complete)
@@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
}
@@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th;
unsigned int hlen;
u32 flex_ptype, dtype_cmd;
+ u16 i;
/* make sure ATR is enabled */
if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->atr_count = 0;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
@@ -1276,27 +1303,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
}
/**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring: ptr to the ring to send
- * @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol)
-{
- if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
- !(tx_flags & I40E_TX_FLAGS_TXSW)) {
- if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
- return false;
- }
-
- return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-/**
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
@@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
const u32 cd_tunneling, const u32 cd_l2tag2)
{
struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
- context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* cpu_to_le32 and assign to struct fields */
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct device *dev = tx_ring->dev;
- u32 paylen = skb->len - hdr_len;
- u16 i = tx_ring->next_to_use;
+ struct skb_frag_struct *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
- u32 buf_offset = 0;
+ u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
-
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
I40E_TX_FLAGS_VLAN_SHIFT;
}
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
tx_desc = I40E_TX_DESC(tx_ring, i);
- for (;;) {
- while (size > I40E_MAX_DATA_PER_TXD) {
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
I40E_MAX_DATA_PER_TXD, td_tag);
- buf_offset += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
-
tx_desc++;
i++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
}
- }
- tx_bi = &tx_ring->tx_bi[i];
- tx_bi->length = buf_offset + size;
- tx_bi->tx_flags = tx_flags;
- tx_bi->dma = dma;
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
if (likely(!data_len))
break;
- size = skb_frag_size(frag);
- data_len -= size;
- buf_offset = 0;
- tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- frag++;
- }
-
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ size = skb_frag_size(frag);
+ data_len -= size;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
- tx_ring->next_to_use = i;
+ tx_bi = &tx_ring->tx_bi[i];
+ }
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
- /* multiply data chunks by size of headers */
- tx_bi->bytecount = paylen + (gso_segs * hdr_len);
- tx_bi->gso_segs = gso_segs;
- tx_bi->skb = skb;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
- /* set the timestamp and next to watch values */
+ /* set the timestamp */
first->time_stamp = jiffies;
- first->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
*/
wmb();
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
writel(i, tx_ring->tail);
+
return;
dma_error:
- dev_info(dev, "TX DMA map failed\n");
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_bi map */
for (;;) {
tx_bi = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_bi);
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first)
break;
if (i == 0)
@@ -1632,8 +1647,6 @@ dma_error:
i--;
}
- dev_kfree_skb_any(skb);
-
tx_ring->next_to_use = i;
}
@@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
/* Always offload the checksum, since it's in the data descriptor */
- if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- /* always enable offload insertion */
- td_cmd |= I40E_TX_DESC_CMD_ICRC;
-
- if (tx_flags & I40E_TX_FLAGS_CSUM)
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
+ }
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+ struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b1d7722d98a..db55d9947f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,23 +102,20 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
- u16 length;
- u32 tx_flags;
struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
unsigned int bytecount;
- u16 gso_segs;
- u8 mapped_as_page;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct i40e_rx_buffer {
@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
unsigned int page_offset;
};
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
u64 packets;
u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
- u64 completed;
u64 tx_done_old;
};
struct i40e_rx_queue_stats {
- u64 packets;
- u64 bytes;
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -219,6 +217,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
/* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
union {
struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -238,9 +240,8 @@ enum i40e_latency_range {
};
struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
/* array of pointers to rings */
- struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 count;
@@ -248,6 +249,10 @@ struct i40e_ring_container {
u16 itr;
};
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58e240..07596982a47 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
- ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+ (pf->hw.func_caps.num_msix_vectors_vf
* vf->vf_id) + (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
& I40E_QTX_CTL_PF_INDX_MASK);
qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
<< I40E_QTX_CTL_VFVM_INDX_SHIFT)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506b423..8c2437722aa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -28,14 +28,14 @@
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 37a9c06a6c6..2e166b22d52 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -562,11 +562,11 @@ struct e1000_hw {
u8 revision_id;
};
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
/* These functions must be implemented by drivers */
-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index dde3c4b7ea9..2d913716573 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -28,26 +28,24 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83cc60..e4cbe8ef67b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index e7266759a10..c4c4fe332c7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -708,11 +708,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
goto out;
}
- if (phy->type == e1000_phy_i210) {
- ret_val = igb_set_master_slave_mode(hw);
- if (ret_val)
- return ret_val;
- }
out:
return ret_val;
@@ -806,6 +801,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
return ret_val;
}
+ ret_val = igb_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 6807b098eda..5e9ed89403a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -483,40 +483,38 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
- struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
- struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+ struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
#ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 151e00cad11..b0f3666b1d7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -146,6 +146,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
+ status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
@@ -169,13 +170,22 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
} else {
ecmd->supported = (SUPPORTED_FIBRE |
+ SUPPORTED_1000baseKX_Full |
SUPPORTED_Autoneg |
SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_FIBRE;
-
- if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ ecmd->advertising = (ADVERTISED_FIBRE |
+ ADVERTISED_1000baseKX_Full);
+ if (hw->mac.type == e1000_i354) {
+ if ((hw->device_id ==
+ E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->supported |= SUPPORTED_2500baseX_Full;
+ ecmd->supported &=
+ ~SUPPORTED_1000baseKX_Full;
+ ecmd->advertising |= ADVERTISED_2500baseX_Full;
+ ecmd->advertising &=
+ ~ADVERTISED_1000baseKX_Full;
+ }
}
if (eth_flags->e100_base_fx) {
ecmd->supported |= SUPPORTED_100baseT_Full;
@@ -187,35 +197,29 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
}
-
if (hw->mac.autoneg != 1)
ecmd->advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- if (hw->fc.requested_mode == e1000_fc_full)
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
ecmd->advertising |= ADVERTISED_Pause;
- else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ break;
+ case e1000_fc_rx_pause:
ecmd->advertising |= (ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ break;
+ case e1000_fc_tx_pause:
ecmd->advertising |= ADVERTISED_Asym_Pause;
- else
+ break;
+ default:
ecmd->advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
-
- status = rd32(E1000_STATUS);
-
+ }
if (status & E1000_STATUS_LU) {
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->supported = SUPPORTED_2500baseX_Full;
- ecmd->advertising = ADVERTISED_2500baseX_Full;
- ecmd->speed = SPEED_2500;
- } else {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
- }
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->speed = SPEED_2500;
} else if (status & E1000_STATUS_SPEED_1000) {
ecmd->speed = SPEED_1000;
} else if (status & E1000_STATUS_SPEED_100) {
@@ -232,7 +236,6 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->speed = -1;
ecmd->duplex = -1;
}
-
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
@@ -771,8 +774,10 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (hw->mac.type == e1000_i211)
+ if ((hw->mac.type >= e1000_i210) &&
+ !igb_get_flash_presence_i210(hw)) {
return -EOPNOTSUPP;
+ }
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
@@ -1659,7 +1664,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1725,7 +1731,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
@@ -2877,6 +2884,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
return 0;
}
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int max_combined = 0;
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ max_combined = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ max_combined = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ case e1000_i354:
+ default:
+ max_combined = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+ return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = igb_max_channels(adapter);
+
+ /* Report info for other vector */
+ if (adapter->msix_entries) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int count = ch->combined_count;
+
+ /* Verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* Verify other_count is valid and has not been changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* Verify the number of channels doesn't exceed hw limits */
+ if (count > igb_max_channels(adapter))
+ return -EINVAL;
+
+ if (count != adapter->rss_queues) {
+ adapter->rss_queues = count;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match the new configuration.
+ */
+ return igb_reinit_queues(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2913,6 +3002,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh_indir = igb_get_rxfh_indir,
.set_rxfh_indir = igb_set_rxfh_indir,
+ .get_channels = igb_get_channels,
+ .set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8cf44f2a8cc..025e5f4b748 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -182,6 +182,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
#endif
#ifdef CONFIG_PM
@@ -1223,6 +1224,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
+ u64_stats_init(&ring->tx_syncp);
+ u64_stats_init(&ring->tx_syncp2);
+
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -1256,6 +1260,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
+ u64_stats_init(&ring->rx_syncp);
+
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
}
@@ -2034,21 +2040,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
@@ -2429,7 +2429,7 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_disable_sriov(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2470,27 +2470,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries) {
+ if (!adapter->msix_entries || num_vfs > 7) {
err = -EPERM;
goto out;
}
-
if (!num_vfs)
goto out;
- else if (old_vfs && old_vfs == num_vfs)
- goto out;
- else if (old_vfs && old_vfs != num_vfs)
- err = igb_disable_sriov(pdev);
- if (err)
- goto out;
-
- if (num_vfs > 7) {
- err = -EPERM;
- goto out;
- }
-
- adapter->vfs_allocated_count = num_vfs;
+ if (old_vfs) {
+ dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+ old_vfs, max_vfs);
+ adapter->vfs_allocated_count = old_vfs;
+ } else
+ adapter->vfs_allocated_count = num_vfs;
adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
sizeof(struct vf_data_storage), GFP_KERNEL);
@@ -2504,10 +2496,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
}
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
-
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs) {
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -2623,7 +2617,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_pci_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -5708,7 +5702,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, 6);
+ memcpy(addr, vf_mac, ETH_ALEN);
igb_write_mbx(hw, msgbuf, 3, vf);
}
@@ -7838,4 +7832,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_SUCCESS;
}
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igb_open(netdev);
+
+ return err;
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a1463e3d14c..7d6a25c8f88 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -312,17 +312,17 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
extern const char igbvf_driver_version[];
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 93eb7ee06d3..04bf22e5ee3 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2343,10 +2343,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
struct igbvf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+ if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
+ max_frame > MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- }
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
@@ -2638,21 +2637,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
}
@@ -2699,7 +2692,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ei->get_variants) {
err = ei->get_variants(adapter);
if (err)
- goto err_ioremap;
+ goto err_get_variants;
}
/* setup adapter struct */
@@ -2796,6 +2789,7 @@ err_hw_init:
kfree(adapter->rx_ring);
err_sw_init:
igbvf_reset_interrupt_capability(adapter);
+err_get_variants:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index eea0e10ce12..955ad8c2c53 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
- memcpy(hw->mac.perm_addr, addr, 6);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else
ret_val = -E1000_ERR_MAC_INIT;
}
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
memset(msgbuf, 0, 12);
msgbuf[0] = E1000_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 4d2ae97ff1b..2224cc2edf1 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -187,21 +187,21 @@ enum ixgb_state_t {
};
/* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 2a99a35c33a..0bd5d72e1af 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
};
/* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
- u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, u32 pad);
/* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
- u32 offset,
- u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9f6b236828e..57e390cbe6d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0ac6b11c6e4..f38fc0a343a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,7 +55,7 @@
#include <net/busy_poll.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
@@ -67,7 +67,11 @@
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
+#if (PAGE_SIZE < 8192)
#define IXGBE_DEFAULT_RXD 512
+#else
+#define IXGBE_DEFAULT_RXD 128
+#endif
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
@@ -187,11 +191,11 @@ struct ixgbe_rx_buffer {
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
u64 yields;
u64 misses;
u64 cleaned;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
@@ -219,6 +223,15 @@ enum ixgbe_ring_state_t {
__IXGBE_RX_FCOE,
};
+struct ixgbe_fwd_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct net_device *netdev;
+ struct ixgbe_adapter *real_adapter;
+ unsigned int tx_base_queue;
+ unsigned int rx_base_queue;
+ int pool;
+};
+
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
@@ -236,6 +249,7 @@ struct ixgbe_ring {
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
+ struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -293,6 +307,12 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_BAD_L2A_QUEUE 3
+#define IXGBE_MAX_MACVLANS 31
+#define IXGBE_MAX_DCBMACVLANS 8
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -369,11 +389,13 @@ struct ixgbe_q_vector {
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
-#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
-#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
+#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
+#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
@@ -394,18 +416,18 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = true;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBE_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
} else
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
- spin_unlock(&q_vector->lock);
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -413,14 +435,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = false;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
IXGBE_QV_STATE_NAPI_YIELD));
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
- spin_unlock(&q_vector->lock);
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -432,7 +455,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
} else
@@ -451,17 +474,32 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
- WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
+ WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
+
+/* false if QV is currently owned */
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBE_QV_OWNED)
+ rc = false;
+ q_vector->state |= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+
+ return rc;
+}
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@@ -487,10 +525,16 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
return false;
}
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
+
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ return true;
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
@@ -738,6 +782,7 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
+ unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
};
struct ixgbe_fdir_filter {
@@ -786,93 +831,89 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+ struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
#endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
- struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
#ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
#else
static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +925,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -906,13 +947,16 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+ int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e8649abf97c..4e7c9b098b5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1129
+#define IXGBE_REGS_LEN 1139
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
/* DCB */
- regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
- regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
- for (i = 0; i < 8; i++)
- regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
- for (i = 0; i < 8; i++)
- regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
- for (i = 0; i < 8; i++)
- regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
- for (i = 0; i < 8; i++)
- regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < 8; i++)
- regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+ regs_buff[865 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
for (i = 0; i < 8; i++)
- regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+ regs_buff[873 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
/* Statistics */
regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* 82599 X540 specific registers */
regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* 82599 X540 specific DCB registers */
+ regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < 4; i++)
+ regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+ regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+ /* same as RTTQCNRM */
+ regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+ /* same as RTTQCNRR */
+
+ /* X540 specific DCB registers */
+ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+ regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_misses", i);
+ sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_cleaned", i);
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_misses", i);
+ sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_cleaned", i);
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
@@ -2212,13 +2257,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
- if ((adapter->tx_itr_setting > 1) &&
+ if ((adapter->tx_itr_setting != 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
- (tx_itr_prev > IXGBE_100K_ITR))
+ (tx_itr_prev >= IXGBE_100K_ITR))
need_reset = true;
} else {
- if ((tx_itr_prev > 1) &&
+ if ((tx_itr_prev != 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 90b4e1089ec..32e3eaaa160 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
+ bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
@@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4)) {
+ if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
+ if (adapter->num_rx_pools > 1)
+ ring->queue_index =
+ txr_idx % adapter->num_rx_queues_per_pool;
+ else
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
+ if (adapter->num_rx_pools > 1)
+ ring->queue_index =
+ rxr_idx % adapter->num_rx_queues_per_pool;
+ else
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0ade0cd5ef5..0c55079ebee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -132,7 +133,7 @@ static struct notifier_block dca_notifier = {
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
MODULE_PARM_DESC(max_vfs,
- "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
+ "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
#endif /* CONFIG_PCI_IOV */
static unsigned int allow_unsupported_sfp;
@@ -153,7 +154,6 @@ MODULE_VERSION(DRV_VERSION);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
- int pos = 0;
struct pci_dev *parent_dev;
struct pci_bus *parent_bus;
@@ -165,11 +165,10 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
if (!parent_dev)
return -1;
- pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
- if (!pos)
+ if (!pci_is_pcie(parent_dev))
return -1;
- pci_read_config_word(parent_dev, pos + reg, value);
+ pcie_capability_read_word(parent_dev, reg, value);
return 0;
}
@@ -247,7 +246,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
max_gts = 4 * width;
break;
case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding only reduces throughput by 1% */
+ /* 128b/130b encoding reduces throughput by less than 2% */
max_gts = 8 * width;
break;
default:
@@ -265,7 +264,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
width,
(speed == PCIE_SPEED_2_5GT ? "20%" :
speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "N/a" :
+ speed == PCIE_SPEED_8_0GT ? "<2%" :
"Unknown"));
if (max_gts < expected_gts) {
@@ -872,11 +871,18 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+ u32 head, tail;
- u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+ if (ring->l2_accel_priv)
+ adapter = ring->l2_accel_priv->real_adapter;
+ else
+ adapter = netdev_priv(ring->netdev);
+
+ hw = &adapter->hw;
+ head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
if (head != tail)
return (head < tail) ?
@@ -1585,7 +1591,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- if (ixgbe_qv_ll_polling(q_vector))
+ if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2103,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
ixgbe_for_each_ring(ring, q_vector->rx) {
found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
if (found)
ring->stats.cleaned += found;
else
@@ -3005,7 +3011,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
- netif_set_xps_queue(adapter->netdev,
+ netif_set_xps_queue(ring->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
@@ -3395,7 +3401,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- int p;
+ u16 pool;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -3412,9 +3418,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1 << 29;
- for (p = 0; p < adapter->num_rx_pools; p++)
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
- psrtype);
+ for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
@@ -3683,7 +3688,11 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring->l2_accel_priv)
+ continue;
+ j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3713,7 +3722,11 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring->l2_accel_priv)
+ continue;
+ j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3750,7 +3763,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV mode significantly less RAR entries are available */
+ /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
@@ -3825,14 +3838,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
}
ixgbe_vlan_filter_enable(adapter);
hw->addr_ctrl.user_set_promisc = false;
@@ -3849,6 +3854,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
vmolr |= IXGBE_VMOLR_ROPE;
}
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
@@ -3893,15 +3905,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- local_bh_disable(); /* for ixgbe_qv_lock_napi() */
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
+ while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
pr_info("QV %d locked\n", q_idx);
- mdelay(1);
+ usleep_range(1000, 20000);
}
}
- local_bh_enable();
}
#ifdef CONFIG_IXGBE_DCB
@@ -4118,6 +4128,228 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
+static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
+ struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /* No unicast promiscuous support for VMDQ devices. */
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+ vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+
+ /* clear the affected bit */
+ vmolr &= ~IXGBE_VMOLR_MPE;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ hw->mac.ops.update_mc_addr_list(hw, dev);
+ }
+ ixgbe_write_uc_addr_list(adapter->netdev);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+}
+
+static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 pool)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int entry;
+
+ entry = hw->mac.num_rar_entries - pool;
+ hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
+}
+
+static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
+{
+ struct ixgbe_adapter *adapter = vadapter->real_adapter;
+ int rss_i = adapter->num_rx_queues_per_pool;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 pool = vadapter->pool;
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (rss_i > 3)
+ psrtype |= 2 << 29;
+ else if (rss_i > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
+}
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+ if (IXGBE_CB(skb)->page_released) {
+ dma_unmap_page(dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ }
+ dev_kfree_skb(skb);
+ }
+ rx_buffer->skb = NULL;
+ if (rx_buffer->dma)
+ dma_unmap_page(dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+ if (rx_buffer->page)
+ __free_pages(rx_buffer->page,
+ ixgbe_rx_pg_order(rx_ring));
+ rx_buffer->page = NULL;
+ }
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
+ struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_adapter *adapter = vadapter->real_adapter;
+ int index = rx_ring->queue_index + vadapter->rx_base_queue;
+
+ /* shutdown specific queue receive and wait for dma to settle */
+ ixgbe_disable_rx_queue(adapter, rx_ring);
+ usleep_range(10000, 20000);
+ ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_clean_rx_ring(rx_ring);
+ rx_ring->l2_accel_priv = NULL;
+}
+
+int ixgbe_fwd_ring_down(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
+{
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int txbase = accel->tx_base_queue;
+ int i;
+
+ netif_tx_stop_all_queues(vdev);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
+ adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
+ }
+
+
+ return 0;
+}
+
+static int ixgbe_fwd_ring_up(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
+{
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase, txbase, queues;
+ int i, baseq, err = 0;
+
+ if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ return 0;
+
+ baseq = accel->pool * adapter->num_rx_queues_per_pool;
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ accel->pool, adapter->num_rx_pools,
+ baseq, baseq + adapter->num_rx_queues_per_pool,
+ adapter->fwd_bitmask);
+
+ accel->netdev = vdev;
+ accel->rx_base_queue = rxbase = baseq;
+ accel->tx_base_queue = txbase = baseq;
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->rx_ring[rxbase + i]->netdev = vdev;
+ adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->tx_ring[txbase + i]->netdev = vdev;
+ adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ }
+
+ queues = min_t(unsigned int,
+ adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
+ err = netif_set_real_num_tx_queues(vdev, queues);
+ if (err)
+ goto fwd_queue_err;
+
+ err = netif_set_real_num_rx_queues(vdev, queues);
+ if (err)
+ goto fwd_queue_err;
+
+ if (is_valid_ether_addr(vdev->dev_addr))
+ ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+
+ ixgbe_fwd_psrtype(accel);
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return err;
+fwd_queue_err:
+ ixgbe_fwd_ring_down(vdev, accel);
+ return err;
+}
+
+static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (dfwd->fwd_priv) {
+ err = ixgbe_fwd_ring_up(upper, vadapter);
+ if (err)
+ continue;
+ }
+ }
+ }
+}
+
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -4169,6 +4401,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
+ ixgbe_configure_dfwd(adapter);
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -4322,6 +4555,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
int err;
u32 ctrl_ext;
@@ -4365,6 +4600,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
+ /* enable any upper devices */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_start_all_queues(upper);
+ }
+ }
+
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4456,59 +4701,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- **/
-static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_buffer->skb) {
- struct sk_buff *skb = rx_buffer->skb;
- if (IXGBE_CB(skb)->page_released) {
- dma_unmap_page(dev,
- IXGBE_CB(skb)->dma,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- }
- dev_kfree_skb(skb);
- }
- rx_buffer->skb = NULL;
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_pages(rx_buffer->page,
- ixgbe_rx_pg_order(rx_ring));
- rx_buffer->page = NULL;
- }
-
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-}
-
-/**
* ixgbe_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
@@ -4585,6 +4777,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
u32 rxctrl;
int i;
@@ -4608,6 +4802,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ /* disable any upper devices */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv) {
+ netif_tx_stop_all_queues(upper);
+ netif_carrier_off(upper);
+ netif_tx_disable(upper);
+ }
+ }
+ }
+
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -4816,11 +5023,20 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
+ if (max_vfs > 0)
+ e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
+
/* assign number of SR-IOV VFs */
- if (hw->mac.type != ixgbe_mac_82598EB)
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (max_vfs > 63) {
+ adapter->num_vfs = 0;
+ e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
+ } else {
+ adapter->num_vfs = max_vfs;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
-#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -4838,6 +5054,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
return -EIO;
}
+ /* PF holds first pool slot */
+ set_bit(0, &adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4867,6 +5085,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
if (!tx_ring->tx_buffer_info)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -4949,6 +5169,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
@@ -5143,7 +5365,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int err;
+ int err, queues;
/* disallow open during test */
if (test_bit(__IXGBE_TESTING, &adapter->state))
@@ -5168,16 +5390,21 @@ static int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev,
- adapter->num_rx_pools > 1 ? 1 :
- adapter->num_tx_queues);
+ if (adapter->num_rx_pools > 1)
+ queues = adapter->num_rx_queues_per_pool;
+ else
+ queues = adapter->num_tx_queues;
+
+ err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev,
- adapter->num_rx_pools > 1 ? 1 :
- adapter->num_rx_queues);
+ if (adapter->num_rx_pools > 1 &&
+ adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
+ queues = IXGBE_MAX_L2A_QUEUES;
+ else
+ queues = adapter->num_rx_queues;
+ err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -6767,8 +6994,9 @@ out_drop:
return NETDEV_TX_OK;
}
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct ixgbe_ring *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
@@ -6784,10 +7012,17 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
skb_set_tail_pointer(skb, 17);
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
+ tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ return __ixgbe_xmit_frame(skb, netdev, NULL);
+}
+
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -7044,6 +7279,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
+ bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
@@ -7051,6 +7287,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
tc < MAX_TRAFFIC_CLASS))
return -EINVAL;
+ pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
+ if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
+ return -EBUSY;
+
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -7305,6 +7545,104 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
}
+static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = NULL;
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ unsigned int limit;
+ int pool, err;
+
+#ifdef CONFIG_RPS
+ if (vdev->num_rx_queues != vdev->num_tx_queues) {
+ netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
+ vdev->name);
+ return ERR_PTR(-EINVAL);
+ }
+#endif
+ /* Check for hardware restriction on number of rx/tx queues */
+ if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
+ vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
+ netdev_info(pdev,
+ "%s: Supports RX/TX Queue counts 1,2, and 4\n",
+ pdev->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
+ return ERR_PTR(-EBUSY);
+
+ fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+ if (!fwd_adapter)
+ return ERR_PTR(-ENOMEM);
+
+ pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
+ adapter->num_rx_pools++;
+ set_bit(pool, &adapter->fwd_bitmask);
+ limit = find_last_bit(&adapter->fwd_bitmask, 32);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
+
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+ if (err)
+ goto fwd_add_err;
+ fwd_adapter->pool = pool;
+ fwd_adapter->real_adapter = adapter;
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ return fwd_adapter;
+fwd_add_err:
+ /* unwind counter and free adapter struct */
+ netdev_info(pdev,
+ "%s: dfwd hardware acceleration failed\n", vdev->name);
+ clear_bit(pool, &adapter->fwd_bitmask);
+ adapter->num_rx_pools--;
+ kfree(fwd_adapter);
+ return ERR_PTR(err);
+}
+
+static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = priv;
+ struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
+ unsigned int limit;
+
+ clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
+ adapter->num_rx_pools--;
+
+ limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
+ ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ fwd_adapter->pool, adapter->num_rx_pools,
+ fwd_adapter->rx_base_queue,
+ fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
+ adapter->fwd_bitmask);
+ kfree(fwd_adapter);
+}
+
+static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
+ struct net_device *dev,
+ void *priv)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = priv;
+ unsigned int queue;
+ struct ixgbe_ring *tx_ring;
+
+ queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
+ tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
+
+ return __ixgbe_xmit_frame(skb, dev, tx_ring);
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -7349,6 +7687,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_add = ixgbe_ndo_fdb_add,
.ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
+ .ndo_dfwd_add_station = ixgbe_fwd_add,
+ .ndo_dfwd_del_station = ixgbe_fwd_del,
+ .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
};
/**
@@ -7362,19 +7703,16 @@ static const struct net_device_ops ixgbe_netdev_ops = {
**/
static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *entry;
int physfns = 0;
- /* Some cards can not use the generic count PCIe functions method, and
- * so must be hardcoded to the correct value.
+ /* Some cards can not use the generic count PCIe functions method,
+ * because they are behind a parent switch, so we hardcode these with
+ * the correct number of functions.
*/
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_SFP_SF_QP:
- case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ if (ixgbe_pcie_from_parent(&adapter->hw)) {
physfns = 4;
- break;
- default:
+ } else {
list_for_each(entry, &adapter->pdev->bus_list) {
struct pci_dev *pdev =
list_entry(entry, struct pci_dev, bus_list);
@@ -7490,19 +7828,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
@@ -7653,7 +7986,8 @@ skip_sriov:
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_L2FW_DOFFLOAD;
netdev->hw_features = netdev->features;
@@ -7759,29 +8093,6 @@ skip_sriov:
if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
- /* print bus type/speed/width info */
- e_dev_info("(PCI Express:%s:%s) %pM\n",
- (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"),
- netdev->dev_addr);
-
- err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
- if (err)
- strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
- if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
- else
- e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, part_str);
-
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
* bandwidth due to being older generation PCIe parts. We clamp these
@@ -7797,6 +8108,19 @@ skip_sriov:
}
ixgbe_check_minimum_link(adapter, expected_gts);
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ part_str);
+ else
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ e_dev_info("%pM\n", netdev->dev_addr);
+
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
if (err == IXGBE_ERR_EEPROM_VERSION) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 24af12e3719..aae900a256d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -57,28 +57,28 @@
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
-#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
-#define IXGBE_SFF_ADDRESSING_MODE 0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
-#define IXGBE_I2C_EEPROM_READ_MASK 0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 276d7b13533..d6f0c0d8cf1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -129,10 +129,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
if (!pre_existing_vfs && !adapter->num_vfs)
return;
- if (!pre_existing_vfs)
- dev_warn(&adapter->pdev->dev,
- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
-
/* If there are pre-existing VFs then we have to force
* use of that many - over ride any module parameter value.
* This may result from the user unloading the PF driver
@@ -223,17 +219,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_FLUSH(hw);
/* Disable VMDq flag so device will be set in VM mode */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
- adapter->ring_feature[RING_F_VMDQ].offset = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ } else {
+ rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
+ }
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_VMDQ].offset = 0;
adapter->ring_feature[RING_F_RSS].limit = rss;
/* take a breather then clean up driver data */
msleep(100);
-
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
return 0;
}
@@ -298,13 +296,10 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
- if (!err && current_flags != adapter->flags) {
- /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
#ifdef CONFIG_PCI_IOV
+ if (!err && current_flags != adapter->flags)
ixgbe_sriov_reinit(adapter);
#endif
- }
return err;
}
@@ -558,7 +553,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
@@ -621,16 +616,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
- unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
- if (enable) {
- eth_zero_addr(vf_mac_addr);
- memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
- }
+ if (enable)
+ eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 10775cb9b6d..7c19e969576 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTDQSEL 0x04904
#define IXGBE_RTTDT1C 0x04908
#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTQCNCR 0x08B00
+#define IXGBE_RTTQCNTG 0x04A90
+#define IXGBE_RTTBCNRD 0x0498C
+#define IXGBE_RTTQCNRR 0x0498C
#define IXGBE_RTTDTECC 0x04990
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
+#define IXGBE_RTTQCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 389324f5929..24b80a6cfca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,12 +32,12 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES 128
-#define IXGBE_X540_MC_TBL_SIZE 128
-#define IXGBE_X540_VFT_TBL_SIZE 128
-#define IXGBE_X540_RX_PB_SIZE 384
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c9d0c12d6f0..54d9acef9c4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -45,16 +45,27 @@
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
+ struct {
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+ };
};
-#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
- offsetof(struct ixgbevf_adapter, m), \
- offsetof(struct ixgbevf_adapter, b), \
- offsetof(struct ixgbevf_adapter, r)
+#define IXGBEVF_STAT(m, b, r) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
+ .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+}
+
+#define IXGBEVF_ZSTAT(m) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = -1, \
+ .saved_reset_offset = -1 \
+}
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
@@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
stats.saved_reset_vfgorc)},
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
- zero_base)},
- {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
- zero_base)},
- {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
- zero_base)},
+ {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
+ {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
+ {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
+#ifdef BP_EXTENDED_STATS
+ {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
+ {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
+ {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
+ {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
+ {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
+ {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
+#endif
};
#define IXGBE_QUEUE_STATS_LEN 0
@@ -140,58 +156,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-static char *ixgbevf_reg_names[] = {
- "IXGBE_VFCTRL",
- "IXGBE_VFSTATUS",
- "IXGBE_VFLINKS",
- "IXGBE_VFRXMEMWRAP",
- "IXGBE_VFFRTIMER",
- "IXGBE_VTEICR",
- "IXGBE_VTEICS",
- "IXGBE_VTEIMS",
- "IXGBE_VTEIMC",
- "IXGBE_VTEIAC",
- "IXGBE_VTEIAM",
- "IXGBE_VTEITR",
- "IXGBE_VTIVAR",
- "IXGBE_VTIVAR_MISC",
- "IXGBE_VFRDBAL0",
- "IXGBE_VFRDBAL1",
- "IXGBE_VFRDBAH0",
- "IXGBE_VFRDBAH1",
- "IXGBE_VFRDLEN0",
- "IXGBE_VFRDLEN1",
- "IXGBE_VFRDH0",
- "IXGBE_VFRDH1",
- "IXGBE_VFRDT0",
- "IXGBE_VFRDT1",
- "IXGBE_VFRXDCTL0",
- "IXGBE_VFRXDCTL1",
- "IXGBE_VFSRRCTL0",
- "IXGBE_VFSRRCTL1",
- "IXGBE_VFPSRTYPE",
- "IXGBE_VFTDBAL0",
- "IXGBE_VFTDBAL1",
- "IXGBE_VFTDBAH0",
- "IXGBE_VFTDBAH1",
- "IXGBE_VFTDLEN0",
- "IXGBE_VFTDLEN1",
- "IXGBE_VFTDH0",
- "IXGBE_VFTDH1",
- "IXGBE_VFTDT0",
- "IXGBE_VFTDT1",
- "IXGBE_VFTXDCTL0",
- "IXGBE_VFTXDCTL1",
- "IXGBE_VFTDWBAL0",
- "IXGBE_VFTDWBAL1",
- "IXGBE_VFTDWBAH0",
- "IXGBE_VFTDWBAH1"
-};
-
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
- return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+ return IXGBE_REGS_LEN * sizeof(u32);
}
static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +232,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
- for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
- hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
}
static void ixgbevf_get_drvinfo(struct net_device *netdev,
@@ -441,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ char *base = (char *) adapter;
int i;
+#ifdef BP_EXTENDED_STATS
+ u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
+ tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_yields += adapter->rx_ring[i].bp_yields;
+ rx_cleaned += adapter->rx_ring[i].bp_cleaned;
+ rx_yields += adapter->rx_ring[i].bp_yields;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_yields += adapter->tx_ring[i].bp_yields;
+ tx_cleaned += adapter->tx_ring[i].bp_cleaned;
+ tx_yields += adapter->tx_ring[i].bp_yields;
+ }
+
+ adapter->bp_rx_yields = rx_yields;
+ adapter->bp_rx_cleaned = rx_cleaned;
+ adapter->bp_rx_missed = rx_missed;
+
+ adapter->bp_tx_yields = tx_yields;
+ adapter->bp_tx_cleaned = tx_cleaned;
+ adapter->bp_tx_missed = tx_missed;
+#endif
ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter +
- ixgbe_gstrings_stats[i].stat_offset;
- char *b = (char *)adapter +
- ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = (char *)adapter +
- ixgbe_gstrings_stats[i].saved_reset_offset;
- data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ char *p = base + ixgbe_gstrings_stats[i].stat_offset;
+ char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
+
+ if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
+ else
+ data[i] = *(u64 *)p;
+ } else {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -685,6 +678,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
return 0;
}
+static int ixgbevf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only valid if in constant ITR mode */
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+ return 0;
+}
+
+static int ixgbevf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_q_vector *q_vector;
+ int num_vectors, i;
+ u16 tx_itr_param, rx_itr_param;
+
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
+
+
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
+
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ ixgbevf_write_eitr(q_vector);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
@@ -700,6 +772,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_sset_count = ixgbevf_get_sset_count,
.get_strings = ixgbevf_get_strings,
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
+ .get_coalesce = ixgbevf_get_coalesce,
+ .set_coalesce = ixgbevf_set_coalesce,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fff0d986752..8971e2d0a98 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -38,6 +38,11 @@
#include "vf.h"
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
@@ -76,6 +81,11 @@ struct ixgbevf_ring {
struct u64_stats_sync syncp;
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_yields;
+ u64 bp_misses;
+ u64 bp_cleaned;
+#endif
u16 head;
u16 tail;
@@ -145,7 +155,118 @@ struct ixgbevf_q_vector {
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
char name[IFNAMSIZ + 9];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define IXGBEVF_QV_STATE_IDLE 0
+#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
+#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+ spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+
+ spin_lock_init(&q_vector->lock);
+ q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_LOCKED) {
+ WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+ q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->tx.ring->bp_yields++;
+#endif
+ } else {
+ /* we don't care if someone yielded */
+ q_vector->state = IXGBEVF_QV_STATE_NAPI;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+ IXGBEVF_QV_STATE_NAPI_YIELD));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+ q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->rx.ring->bp_yields++;
+#endif
+ } else {
+ /* preserve yield marks */
+ q_vector->state |= IXGBEVF_QV_STATE_POLL;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+ WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+ return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_OWNED)
+ rc = false;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
* microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -165,9 +286,13 @@ struct ixgbevf_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-#define IXGBE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -240,7 +365,6 @@ struct ixgbevf_adapter {
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbevf_hw_stats stats;
- u64 zero_base;
/* Interrupt Throttle Rate */
u32 eitr_param;
@@ -249,6 +373,16 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_rx_yields;
+ u64 bp_rx_cleaned;
+ u64 bp_rx_missed;
+
+ u64 bp_tx_yields;
+ u64 bp_tx_cleaned;
+ u64 bp_tx_missed;
+#endif
+
u32 link_speed;
bool link_up;
@@ -281,27 +415,25 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
#else
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59a62bbfb37..92ef4cb5a8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.7.12-k"
+#define DRV_VERSION "2.11.3-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -300,6 +300,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
}
/**
+ * ixgbevf_rx_skb - Helper function to determine proper Rx method
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ skb_mark_napi_id(skb, &q_vector->napi);
+
+ if (ixgbevf_qv_busy_polling(q_vector)) {
+ netif_receive_skb(skb);
+ /* exit early if we busy polled */
+ return;
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+ ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+}
+
+/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
@@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
-static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *rx_ring,
- int budget)
+static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -473,15 +497,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- /*
- * Work around issue of some types of VM to VM loop back
- * packets not getting split correctly
- */
- if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb_headlen(skb);
- if (header_fixup_len < 14)
- skb_push(skb, header_fixup_len);
- }
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* Workaround hardware that can't do proper VEPA multicast
@@ -494,7 +509,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
+ ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -514,7 +529,7 @@ next_desc:
}
rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+ cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
@@ -526,7 +541,7 @@ next_desc:
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- return !!budget;
+ return total_rx_packets;
}
/**
@@ -549,6 +564,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!ixgbevf_qv_lock_napi(q_vector))
+ return budget;
+#endif
+
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
if (q_vector->rx.count > 1)
@@ -558,10 +578,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
- clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
- per_ring_budget);
+ clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget)
+ < per_ring_budget);
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -580,7 +605,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
* ixgbevf_write_eitr - write VTEITR register in hardware specific way
* @q_vector: structure containing interrupt and ring information
*/
-static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
+void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
@@ -596,6 +621,40 @@ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *ring;
+ int found = 0;
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return LL_FLUSH_FAILED;
+
+ if (!ixgbevf_qv_lock_poll(q_vector))
+ return LL_FLUSH_BUSY;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+ if (found)
+ ring->bp_cleaned += found;
+ else
+ ring->bp_misses++;
+#endif
+ if (found)
+ break;
+ }
+
+ ixgbevf_qv_unlock_poll(q_vector);
+
+ return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -756,37 +815,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
- struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 msg;
- bool got_ack = false;
hw->mac.get_link_status = 1;
- if (!hw->mbx.ops.check_for_ack(hw))
- got_ack = true;
-
- if (!hw->mbx.ops.check_for_msg(hw)) {
- hw->mbx.ops.read(hw, &msg, 1);
-
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 1));
- adapter->link_up = false;
- }
- if (msg & IXGBE_VT_MSGTYPE_NACK)
- dev_info(&pdev->dev,
- "Last Request of type %2.2x to PF Nacked\n",
- msg & 0xFF);
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
- }
-
- /* checking for the ack clears the PFACK bit. Place
- * it back in the v2p_mailbox cache so that anyone
- * polling for an ack will not miss it
- */
- if (got_ack)
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1107,6 +1141,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
+static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+
+ if (adapter->num_rx_queues > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+}
+
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1154,8 +1203,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
int i, j;
u32 rdlen;
- /* PSRTYPE must be initialized in 82599 */
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
@@ -1293,6 +1341,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
napi_enable(&q_vector->napi);
}
}
@@ -1306,6 +1357,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+ pr_info("QV %d locked\n", q_idx);
+ usleep_range(1000, 20000);
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
@@ -1323,31 +1380,55 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
ixgbevf_alloc_rx_buffers(adapter, ring,
- IXGBE_DESC_UNUSED(ring));
+ ixgbevf_desc_unused(ring));
}
}
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
int j = adapter->rx_ring[rxr].reg_idx;
- int k;
- for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- if (k >= IXGBE_MAX_RX_DESC_POLL) {
- hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
- "not set within the polling period\n", rxr);
- }
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
- adapter->rx_ring[rxr].count - 1);
+ if (!wait_loop)
+ hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+ rxr);
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1626,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_negotiate_api(adapter);
-
ixgbevf_reset_queues(adapter);
ixgbevf_configure(adapter);
@@ -1679,7 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state);
- /* disable receives */
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1733,10 +1815,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- if (hw->mac.ops.reset_hw(hw))
+ if (hw->mac.ops.reset_hw(hw)) {
hw_dbg(hw, "PF still resetting\n");
- else
+ } else {
hw->mac.ops.init_hw(hw);
+ ixgbevf_negotiate_api(adapter);
+ }
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1929,6 +2013,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbevf_poll, 64);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_add(&q_vector->napi);
+#endif
adapter->q_vector[q_idx] = q_vector;
}
@@ -1938,6 +2025,9 @@ err_out:
while (q_idx) {
q_idx--;
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[q_idx] = NULL;
@@ -1961,6 +2051,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
@@ -2072,6 +2165,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->mac.max_tx_queues = 2;
hw->mac.max_rx_queues = 2;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -2082,6 +2178,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
+ ixgbevf_negotiate_api(adapter);
err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
if (err)
dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2194,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
}
- /* lock to protect mailbox accesses */
- spin_lock_init(&adapter->mbx_lock);
-
/* Enable dynamic interrupt throttling rates */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -2620,8 +2714,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- ixgbevf_negotiate_api(adapter);
-
/* setup queue reg_idx and Rx queue count */
err = ixgbevf_setup_queues(adapter);
if (err)
@@ -3010,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ if (likely(ixgbevf_desc_unused(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -3021,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ if (likely(ixgbevf_desc_unused(tx_ring) >= size))
return 0;
return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
@@ -3216,6 +3308,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
}
pci_set_master(pdev);
+ ixgbevf_reset(adapter);
+
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
rtnl_unlock();
@@ -3224,8 +3318,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
- ixgbevf_reset(adapter);
-
if (netif_running(netdev)) {
err = ixgbevf_open(netdev);
if (err)
@@ -3293,6 +3385,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = ixgbevf_busy_poll_recv,
+#endif
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3326,19 +3421,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 387b52635bc..4d44d64ae38 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 23de82a9da8..f5685c0d057 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -309,7 +309,7 @@ static void
jme_load_macaddr(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- unsigned char macaddr[6];
+ unsigned char macaddr[ETH_ALEN];
u32 val;
spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, 6);
+ memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev,
err_out_unmap:
iounmap(jme->regs);
err_out_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
err_out_release_regions:
pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev)
unregister_netdev(netdev);
iounmap(jme->regs);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 3efc897c991..58cd67c0c8e 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -28,7 +28,6 @@
#define DRV_NAME "jme"
#define DRV_VERSION "1.0.8"
-#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index a36fa80968e..4a5e3b0f712 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
bif->dev = dev;
- memcpy(dev->dev_addr, bif->mac, 6);
+ memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2c210ec35d5..61088a6a942 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2513,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
mac_addr = of_get_mac_address(pnp);
if (mac_addr)
- memcpy(ppd.mac_addr, mac_addr, 6);
+ memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, 6);
+ memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
else
uc_addr_get(mp, dev->dev_addr);
@@ -2890,6 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
PHY_INTERFACE_MODE_GMII);
if (!mp->phy)
err = -ENODEV;
+ else
+ phy_addr_set(mp, mp->phy->addr);
} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
mp->phy = phy_scan(mp, pd->phy_addr);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index e2f66266031..7354960b583 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -4,11 +4,9 @@
* Since the MDIO interface of Marvell network interfaces is shared
* between all network interfaces, having a single driver allows to
* handle concurrent accesses properly (you may have four Ethernet
- * ports, but they in fact share the same SMI interface to access the
- * MDIO bus). Moreover, this MDIO interface code is similar between
- * the mv643xx_eth driver and the mvneta driver. For now, it is only
- * used by the mvneta driver, but it could later be used by the
- * mv643xx_eth driver as well.
+ * ports, but they in fact share the same SMI interface to access
+ * the MDIO bus). This driver is currently used by the mvneta and
+ * mv643xx_eth drivers.
*
* Copyright (C) 2012 Marvell
*
@@ -44,6 +42,15 @@
#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
#define MVMDIO_ERR_INT_MASK 0x0080
+/*
+ * SMI Timeout measurements:
+ * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
+ * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
+ */
+#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+
struct orion_mdio_dev {
struct mutex lock;
void __iomem *regs;
@@ -68,77 +75,68 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
static int orion_mdio_wait_ready(struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
- int count;
-
- if (dev->err_interrupt <= 0) {
- count = 0;
- while (1) {
- if (orion_mdio_smi_is_done(dev))
- break;
-
- if (count > 100) {
- dev_err(bus->parent,
- "Timeout: SMI busy for too long\n");
- return -ETIMEDOUT;
- }
-
- udelay(10);
- count++;
- }
- } else {
- if (!orion_mdio_smi_is_done(dev)) {
+ unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+ unsigned long end = jiffies + timeout;
+ int timedout = 0;
+
+ while (1) {
+ if (orion_mdio_smi_is_done(dev))
+ return 0;
+ else if (timedout)
+ break;
+
+ if (dev->err_interrupt <= 0) {
+ usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
+ MVMDIO_SMI_POLL_INTERVAL_MAX);
+
+ if (time_is_before_jiffies(end))
+ ++timedout;
+ } else {
wait_event_timeout(dev->smi_busy_wait,
- orion_mdio_smi_is_done(dev),
- msecs_to_jiffies(100));
- if (!orion_mdio_smi_is_done(dev))
- return -ETIMEDOUT;
- }
+ orion_mdio_smi_is_done(dev),
+ timeout);
+
+ ++timedout;
+ }
}
- return 0;
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
}
static int orion_mdio_read(struct mii_bus *bus, int mii_id,
int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
- int count;
u32 val;
int ret;
mutex_lock(&dev->lock);
ret = orion_mdio_wait_ready(bus);
- if (ret < 0) {
- mutex_unlock(&dev->lock);
- return ret;
- }
+ if (ret < 0)
+ goto out;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
dev->regs);
- /* Wait for the value to become available */
- count = 0;
- while (1) {
- val = readl(dev->regs);
- if (val & MVMDIO_SMI_READ_VALID)
- break;
-
- if (count > 100) {
- dev_err(bus->parent, "Timeout when reading PHY\n");
- mutex_unlock(&dev->lock);
- return -ETIMEDOUT;
- }
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
- udelay(10);
- count++;
+ val = readl(dev->regs);
+ if (!(val & MVMDIO_SMI_READ_VALID)) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
}
+ ret = val & 0xFFFF;
+out:
mutex_unlock(&dev->lock);
-
- return val & 0xFFFF;
+ return ret;
}
static int orion_mdio_write(struct mii_bus *bus, int mii_id,
@@ -150,10 +148,8 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
mutex_lock(&dev->lock);
ret = orion_mdio_wait_ready(bus);
- if (ret < 0) {
- mutex_unlock(&dev->lock);
- return ret;
- }
+ if (ret < 0)
+ goto out;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
@@ -161,9 +157,9 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
(value << MVMDIO_SMI_DATA_SHIFT)),
dev->regs);
+out:
mutex_unlock(&dev->lock);
-
- return 0;
+ return ret;
}
static int orion_mdio_reset(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e35bac7cfdf..b8e232b4ea2 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2792,6 +2792,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
+ u64_stats_init(&pp->tx_stats.syncp);
+ u64_stats_init(&pp->rx_stats.syncp);
+
pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2811,7 +2814,7 @@ static int mvneta_probe(struct platform_device *pdev)
}
dt_mac_addr = of_get_mac_address(dn);
- if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+ if (dt_mac_addr) {
mac_from = "device tree";
memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
} else {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ecc7f7b696b..59784619386 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4046,7 +4046,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -4090,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev)
iounmap(hw->regs);
kfree(hw);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e09a8c6f853..43aa7acd84a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4763,6 +4763,9 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
sky2->hw = hw;
sky2->msg_enable = netif_msg_init(debug, default_msg);
+ u64_stats_init(&sky2->tx_stats.syncp);
+ u64_stats_init(&sky2->rx_stats.syncp);
+
/* Auto speed and flow control */
sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
if (hw->chip_id != CHIP_ID_YUKON_XL)
@@ -5081,7 +5084,6 @@ err_out_free_regions:
err_out_disable:
pci_disable_device(pdev);
err_out:
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -5124,8 +5126,6 @@ static void sky2_remove(struct pci_dev *pdev)
iounmap(hw->regs);
kfree(hw);
-
- pci_set_drvdata(pdev, NULL);
}
static int sky2_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index ea20182c696..1e9970d2f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1539,11 +1539,6 @@ out:
return ret;
}
-static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
-{
- return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
-}
-
static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
struct mlx4_dev *dev = &(priv->dev);
int err;
int admin_vlan_ix = NO_INDX;
- enum mlx4_vlan_transition vlan_trans;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.link_state == vp_admin->link_state)
return 0;
- vlan_trans = calculate_transition(vp_oper->state.default_vlan,
- vp_admin->default_vlan);
-
if (!(priv->mfunc.master.slave_state[slave].active &&
- dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP &&
- vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
+ dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
/* even if the UPDATE_QP command isn't supported, we still want
* to set this VF link according to the admin directive
*/
@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
- err = __mlx4_register_vlan(&priv->dev, port,
- vp_admin->default_vlan,
- &admin_vlan_ix);
- if (err) {
- kfree(work);
- mlx4_warn((&priv->dev),
- "No vlan resources slave %d, port %d\n",
- slave, port);
- return err;
+ if (MLX4_VGT != vp_admin->default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &admin_vlan_ix);
+ if (err) {
+ kfree(work);
+ mlx4_warn((&priv->dev),
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ } else {
+ admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg((&(priv->dev)),
@@ -1687,11 +1681,11 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
- port, vp_oper->vlan_idx);
+ port, vp_oper->state.default_vlan);
vp_oper->vlan_idx = NO_INDX;
}
if (NO_INDX != vp_oper->mac_idx) {
- __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
+ __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
vp_oper->mac_idx = NO_INDX;
}
}
@@ -1718,6 +1712,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
+ slave_state[slave].old_vlan_api = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2198,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
return ERR_PTR(-ENOMEM);
}
+ memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
@@ -2253,7 +2250,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_vport_oper_state *vf_oper;
struct mlx4_vport_state *vf_admin;
int slave;
@@ -2269,7 +2265,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
return -EINVAL;
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if ((0 == vlan) && (0 == qos))
vf_admin->default_vlan = MLX4_VGT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 004e4231af6..22fcbe78311 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->cq_max_count = cpu_to_be16(count);
cq_context->cq_period = cpu_to_be16(period);
@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cq_context->log_page_size = mtt->page_shift - 12;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
}
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->flags = cpu_to_be32(!!collapsed << 18);
if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d5047cdb..3a098cc4d34 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
- struct mlx4_en_cq *cq,
- int entries, int ring, enum cq_type mode)
+ struct mlx4_en_cq **pcq,
+ int entries, int ring, enum cq_type mode,
+ int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
int err;
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
+ if (!cq) {
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ en_err(priv, "Failed to allocate CQ structure\n");
+ return -ENOMEM;
+ }
+ }
+
cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
@@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->is_tx = mode;
spin_lock_init(&cq->lock);
+ /* Allocate HW buffers on provided NUMA node.
+ * dev->numa_node is used in mtt range allocation flow.
+ */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err)
- return err;
+ goto err_cq;
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- else
- cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+ goto err_res;
+ cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
+ *pcq = cq;
+
+ return 0;
+
+err_res:
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+err_cq:
+ kfree(cq);
+ *pcq = NULL;
return err;
}
@@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_cq *rx_cq;
cq_idx = cq_idx % priv->rx_ring_num;
- rx_cq = &priv->rx_cq[cq_idx];
+ rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
if (!cq->is_tx)
- cq->size = priv->rx_ring[cq->ring].actual_size;
+ cq->size = priv->rx_ring[cq->ring]->actual_size;
if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
(!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
return 0;
}
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq = *pcq;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
+ kfree(cq);
+ *pcq = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c750985f47..0596f9f85a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
int err = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i].moder_cnt = priv->tx_frames;
- priv->tx_cq[i].moder_time = priv->tx_usecs;
+ priv->tx_cq[i]->moder_cnt = priv->tx_frames;
+ priv->tx_cq[i]->moder_time = priv->tx_usecs;
if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
if (err)
return err;
}
@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
return 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i].moder_cnt = priv->rx_frames;
- priv->rx_cq[i].moder_time = priv->rx_usecs;
+ priv->rx_cq[i]->moder_cnt = priv->rx_frames;
+ priv->rx_cq[i]->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
if (err)
return err;
}
@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
}
}
for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i].packets;
- data[index++] = priv->tx_ring[i].bytes;
+ data[index++] = priv->tx_ring[i]->packets;
+ data[index++] = priv->tx_ring[i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- data[index++] = priv->rx_ring[i].packets;
- data[index++] = priv->rx_ring[i].bytes;
+ data[index++] = priv->rx_ring[i]->packets;
+ data[index++] = priv->rx_ring[i]->bytes;
#ifdef CONFIG_NET_RX_BUSY_POLL
- data[index++] = priv->rx_ring[i].yields;
- data[index++] = priv->rx_ring[i].misses;
- data[index++] = priv->rx_ring[i].cleaned;
+ data[index++] = priv->rx_ring[i]->yields;
+ data[index++] = priv->rx_ring[i]->misses;
+ data[index++] = priv->rx_ring[i]->cleaned;
#endif
}
spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
- if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
- priv->rx_ring[0].size) &&
- tx_size == priv->tx_ring[0].size)
+ if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
+ priv->rx_ring[0]->size) &&
+ tx_size == priv->tx_ring[0]->size)
return 0;
mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
- priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
- param->tx_pending = priv->tx_ring[0].size;
+ priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
+ param->tx_pending = priv->tx_ring[0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a071cda2dd0..0d087b03a7b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
+ /* Initialize time stamp mechanism */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_init_timestamp(mdev);
+
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (!dev->caps.comp_pool) {
mdev->profile.prof[i].rx_ring_num =
@@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->pndev[i] = NULL;
}
- /* Initialize time stamp mechanism */
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_init_timestamp(mdev);
-
return mdev;
err_mr:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fa37b7a6121..e72d8a112a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
int done;
if (!priv->port_up)
@@ -102,6 +102,7 @@ struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
+ u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -120,14 +121,26 @@ struct mlx4_en_filter {
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
+static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case IPPROTO_UDP:
+ return MLX4_NET_TRANS_RULE_ID_UDP;
+ case IPPROTO_TCP:
+ return MLX4_NET_TRANS_RULE_ID_TCP;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+};
+
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
- struct mlx4_spec_list spec_tcp = {
- .id = MLX4_NET_TRANS_RULE_ID_TCP,
+ struct mlx4_spec_list spec_tcp_udp = {
+ .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{
.tcp_udp = {
.dst_port = filter->dst_port,
@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+ if (spec_tcp_udp.id < 0) {
+ en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
+ filter->ip_proto);
+ goto ignore;
+ }
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
- list_add_tail(&spec_tcp.list, &rule.list);
+ list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
+ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
- __be32 dst_ip, __be16 src_port, __be16 dst_port,
- u32 flow_id)
+ __be32 dst_ip, u8 ip_proto, __be16 src_port,
+ __be16 dst_port, u32 flow_id)
{
struct mlx4_en_filter *filter = NULL;
@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
+ filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
- __be16 src_port, __be16 dst_port)
+ u8 ip_proto, __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
+ filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
+ u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
+ if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
- if (ip->protocol != IPPROTO_TCP)
- return -EPROTONOSUPPORT;
-
spin_lock_bh(&priv->filters_lock);
- filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
+ src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
- src_ip, dst_ip,
+ src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
@@ -332,8 +355,7 @@ err:
return ret;
}
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring)
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
@@ -417,7 +439,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- int idx;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -425,10 +446,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
- if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
- mlx4_unregister_vlan(mdev->dev, priv->port, idx);
- else
- en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
+ mlx4_unregister_vlan(mdev->dev, priv->port, vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1223,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1245,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
- priv->tx_ring[i].cons, priv->tx_ring[i].prod);
+ i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
+ priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
}
priv->port_stats.tx_timeout++;
@@ -1286,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1295,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
}
for (i = 0; i < priv->tx_ring_num; i++) {
- cq = &priv->tx_cq[i];
+ cq = priv->tx_cq[i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
@@ -1329,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
for (ring = 0; ring < priv->rx_ring_num; ring++) {
spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->rx_ring[ring].packets;
- rx_bytes = priv->rx_ring[ring].bytes;
+ rx_packets = priv->rx_ring[ring]->packets;
+ rx_bytes = priv->rx_ring[ring]->bytes;
spin_unlock_bh(&priv->stats_lock);
rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1359,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
if (moder_time != priv->last_moder_time[ring]) {
priv->last_moder_time[ring] = moder_time;
- cq = &priv->rx_cq[ring];
+ cq = priv->rx_cq[ring];
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
@@ -1482,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
mlx4_en_cq_init_lock(cq);
@@ -1500,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
- priv->rx_ring[i].cqn = cq->mcq.cqn;
+ priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index;
}
@@ -1526,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
- cq = &priv->tx_cq[i];
+ cq = priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
@@ -1542,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
- tx_ring = &priv->tx_ring[i];
+ tx_ring = priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
i / priv->num_tx_rings_p_up);
if (err) {
@@ -1612,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
tx_err:
while (tx_index--) {
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
@@ -1622,9 +1640,9 @@ mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
- mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+ mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
for (i = 0; i < priv->rx_ring_num; i++)
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */
}
@@ -1720,25 +1738,25 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
}
msleep(10);
for (i = 0; i < priv->tx_ring_num; i++)
- mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
- if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
- struct mlx4_en_cq *cq = &priv->rx_cq[i];
+ struct mlx4_en_cq *cq = priv->rx_cq[i];
local_bh_disable();
while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1749,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
msleep(1);
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
}
}
@@ -1787,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i].bytes = 0;
- priv->tx_ring[i].packets = 0;
- priv->tx_ring[i].tx_csum = 0;
+ priv->tx_ring[i]->bytes = 0;
+ priv->tx_ring[i]->packets = 0;
+ priv->tx_ring[i]->tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_ring[i].bytes = 0;
- priv->rx_ring[i].packets = 0;
- priv->rx_ring[i].csum_ok = 0;
- priv->rx_ring[i].csum_none = 0;
+ priv->rx_ring[i]->bytes = 0;
+ priv->rx_ring[i]->packets = 0;
+ priv->rx_ring[i]->csum_ok = 0;
+ priv->rx_ring[i]->csum_none = 0;
}
}
@@ -1852,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
#endif
for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring[i].tx_info)
+ if (priv->tx_ring && priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq[i].buf)
+ if (priv->tx_cq && priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- if (priv->rx_ring[i].rx_info)
+ if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride);
- if (priv->rx_cq[i].buf)
+ if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
@@ -1877,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
struct mlx4_en_port_profile *prof = priv->prof;
int i;
int err;
+ int node;
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
@@ -1886,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
+ node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
- prof->tx_ring_size, i, TX))
+ prof->tx_ring_size, i, TX, node))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE))
+ prof->tx_ring_size, TXBB_SIZE, node))
goto err;
}
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
+ node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
- prof->rx_ring_size, i, RX))
+ prof->rx_ring_size, i, RX, node))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
- prof->rx_ring_size, priv->stride))
+ prof->rx_ring_size, priv->stride,
+ node))
goto err;
}
@@ -1918,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (priv->rx_ring[i])
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ prof->rx_ring_size,
+ priv->stride);
+ if (priv->rx_cq[i])
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ }
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (priv->tx_ring[i])
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+ if (priv->tx_cq[i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ }
return -ENOMEM;
}
@@ -2211,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 331791467a2..dae1a1f4ae5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
return PTR_ERR(mailbox);
filter = mailbox->buf;
- memset(filter, 0, sizeof(*filter));
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
@@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- stats->rx_packets += priv->rx_ring[i].packets;
- stats->rx_bytes += priv->rx_ring[i].bytes;
- priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok;
- priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none;
+ stats->rx_packets += priv->rx_ring[i]->packets;
+ stats->rx_bytes += priv->rx_ring[i]->bytes;
+ priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
+ priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
- stats->tx_packets += priv->tx_ring[i].packets;
- stats->tx_bytes += priv->tx_ring[i].bytes;
- priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum;
+ stats->tx_packets += priv->tx_ring[i]->packets;
+ stats->tx_bytes += priv->tx_ring[i]->bytes;
+ priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa69c8..07a1d0fbae4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
@@ -319,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
+ if (!ring) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed to allocate RX ring structure\n");
+ return -ENOMEM;
+ }
+ }
+
ring->prod = 0;
ring->cons = 0;
ring->size = size;
@@ -335,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vmalloc(tmp);
- if (!ring->rx_info)
- return -ENOMEM;
+ ring->rx_info = vmalloc_node(tmp, node);
+ if (!ring->rx_info) {
+ ring->rx_info = vmalloc(tmp);
+ if (!ring->rx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
+ }
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
+ /* Allocate HW buffers on provided NUMA node */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err)
- goto err_ring;
+ goto err_info;
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
@@ -356,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
+ *pring = ring;
return 0;
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_ring:
+err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_ring:
+ kfree(ring);
+ *pring = NULL;
+
return err;
}
@@ -376,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
ring->prod = 0;
ring->cons = 0;
ring->actual_size = 0;
- ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+ ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
if (ring->stride <= TXBB_SIZE)
@@ -412,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
- mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+ mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
- if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
- priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+ if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
+ mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring = *pring;
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
+ kfree(ring);
+ *pring = NULL;
#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv, ring);
+ mlx4_en_cleanup_filters(priv);
#endif
}
@@ -592,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe;
- struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
@@ -991,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i;
- err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
+ err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
&rss_map->state[i],
&rss_map->qps[i]);
if (err)
@@ -1008,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
- priv->rx_ring[0].cqn, -1, &context);
+ priv->rx_ring[0]->cqn, -1, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d669e..40626690e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@ retry_tx:
* since we turned the carrier off */
msleep(200);
for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
- tx_ring = &priv->tx_ring[i];
+ tx_ring = priv->tx_ring[i];
if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
goto retry_tx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82d6ff..f54ebd5a170 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring, int qpn, u32 size,
- u16 stride)
+ struct mlx4_en_tx_ring **pring, int qpn, u32 size,
+ u16 stride, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring;
int tmp;
int err;
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
+ if (!ring) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
+ }
+ }
+
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
@@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
inline_thold = min(inline_thold, MAX_INLINE);
tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = vmalloc(tmp);
- if (!ring->tx_info)
- return -ENOMEM;
+ ring->tx_info = vmalloc_node(tmp, node);
+ if (!ring->tx_info) {
+ ring->tx_info = vmalloc(tmp);
+ if (!ring->tx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
+ }
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
if (!ring->bounce_buf) {
- err = -ENOMEM;
- goto err_tx;
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ if (!ring->bounce_buf) {
+ err = -ENOMEM;
+ goto err_info;
+ }
}
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+ /* Allocate HW buffers on provided NUMA node */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
@@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
}
ring->qp.event = mlx4_en_sqp_event;
- err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+ err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)", err);
ring->bf.uar = &mdev->priv_uar;
@@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ *pring = ring;
return 0;
err_map:
@@ -129,16 +151,20 @@ err_hwq_res:
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
-err_tx:
+err_info:
vfree(ring->tx_info);
ring->tx_info = NULL;
+err_ring:
+ kfree(ring);
+ *pring = NULL;
return err;
}
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring)
+ struct mlx4_en_tx_ring **pring)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring = *pring;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
if (ring->bf_enabled)
@@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
ring->bounce_buf = NULL;
vfree(ring->tx_info);
ring->tx_info = NULL;
+ kfree(ring);
+ *pring = NULL;
}
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
- struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
u16 new_index, ring_index, stamp_index;
@@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_ind = skb->queue_mapping;
- ring = &priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[tx_ind];
if (vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0416c5b3b35..c9cdb2a2c59 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
if (err)
goto err_out_free_mtt;
- memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
MLX4_EQ_STATE_ARMED);
eq_context->log_eq_size = ilog2(eq->nent);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 0d63daa2f42..19492821460 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
-
MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
@@ -177,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
u8 field;
u32 size;
int err = 0;
@@ -185,18 +184,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
-#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
-#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
-#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
-#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
-#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
-#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
+
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
+#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
@@ -237,8 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
} else if (vhcr->op_modifier == 0) {
- /* enable rdma and ethernet interfaces */
- field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
+ /* enable rdma and ethernet interfaces, and new quota locations */
+ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
+ QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports;
@@ -250,14 +258,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = 0; /* protected FMR support not available as yet */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
- size = dev->caps.num_qps;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
- size = dev->caps.num_srqs;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
- size = dev->caps.num_cqs;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
size = dev->caps.num_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
@@ -265,14 +279,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
- size = dev->caps.num_mpts;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
- size = dev->caps.num_mtts;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
} else
err = -EINVAL;
@@ -287,7 +306,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u32 *outbox;
u8 field, op_modifier;
u32 size;
- int err = 0;
+ int err = 0, quotas = 0;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
@@ -311,6 +330,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
func_cap->flags = field;
+ quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
@@ -318,29 +338,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
func_cap->pf_context_behaviour = size;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
- func_cap->qp_quota = size & 0xFFFFFF;
+ if (quotas) {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
- func_cap->srq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
- func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ } else {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+ }
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
func_cap->max_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
- func_cap->mpt_quota = size & 0xFFFFFF;
-
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
- func_cap->mtt_quota = size & 0xFFFFFF;
-
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
- func_cap->mcg_quota = size & 0xFFFFFF;
goto out;
}
@@ -652,7 +693,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
- dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -924,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
pages = mailbox->buf;
for (mlx4_icm_first(icm, &iter);
@@ -1273,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, INIT_HCA_IN_SIZE);
-
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
@@ -1573,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, INIT_PORT_IN_SIZE);
-
flags = 0;
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
@@ -1713,7 +1749,6 @@ void mlx4_opreq_action(struct work_struct *work)
u32 *outbox;
u32 modifier;
u16 token;
- u16 type_m;
u16 type;
int err;
u32 num_qps;
@@ -1739,14 +1774,13 @@ void mlx4_opreq_action(struct work_struct *work)
MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
- mlx4_err(dev, "Failed to retreive required operation: %d\n",
+ mlx4_err(dev, "Failed to retrieve required operation: %d\n",
err);
return;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
- type_m = type >> 12;
type &= 0xfff;
switch (type) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 31d02649be4..5fbf4924c27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
kfree(icm);
}
-static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
+ gfp_t gfp_mask, int node)
{
struct page *page;
- page = alloc_pages(gfp_mask, order);
- if (!page)
- return -ENOMEM;
+ page = alloc_pages_node(node, gfp_mask, order);
+ if (!page) {
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return -ENOMEM;
+ }
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
- icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!icm)
- return NULL;
+ icm = kmalloc_node(sizeof(*icm),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
+ dev->numa_node);
+ if (!icm) {
+ icm = kmalloc(sizeof(*icm),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!icm)
+ return NULL;
+ }
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) {
if (!chunk) {
- chunk = kmalloc(sizeof *chunk,
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!chunk)
- goto fail;
+ chunk = kmalloc_node(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM |
+ __GFP_NOWARN),
+ dev->numa_node);
+ if (!chunk) {
+ chunk = kmalloc(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM |
+ __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+ }
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
cur_order, gfp_mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
- cur_order, gfp_mask);
+ cur_order, gfp_mask,
+ dev->numa_node);
if (ret) {
if (--cur_order < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60c9f4f103f..5789ea2c934 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/kmod.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -561,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.num_ports = func_cap.num_ports;
- dev->caps.num_qps = func_cap.qp_quota;
- dev->caps.num_srqs = func_cap.srq_quota;
- dev->caps.num_cqs = func_cap.cq_quota;
- dev->caps.num_eqs = func_cap.max_eq;
- dev->caps.reserved_eqs = func_cap.reserved_eq;
- dev->caps.num_mpts = func_cap.mpt_quota;
- dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->quotas.qp = func_cap.qp_quota;
+ dev->quotas.srq = func_cap.srq_quota;
+ dev->quotas.cq = func_cap.cq_quota;
+ dev->quotas.mpt = func_cap.mpt_quota;
+ dev->quotas.mtt = func_cap.mtt_quota;
+ dev->caps.num_qps = 1 << hca_param.log_num_qps;
+ dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
+ dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
+ dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
@@ -650,6 +655,27 @@ err_mem:
return err;
}
+static void mlx4_request_modules(struct mlx4_dev *dev)
+{
+ int port;
+ int has_ib_port = false;
+ int has_eth_port = false;
+#define EN_DRV_NAME "mlx4_en"
+#define IB_DRV_NAME "mlx4_ib"
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+ has_ib_port = true;
+ else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ has_eth_port = true;
+ }
+
+ if (has_ib_port)
+ request_module_nowait(IB_DRV_NAME);
+ if (has_eth_port)
+ request_module_nowait(EN_DRV_NAME);
+}
+
/*
* Change the port configuration of the device.
* Every user of this function must hold the port mutex.
@@ -681,6 +707,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
}
mlx4_set_port_mask(dev);
err = mlx4_register_device(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to register device\n");
+ goto out;
+ }
+ mlx4_request_modules(dev);
}
out:
@@ -2075,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
"aborting.\n");
return err;
}
- if (num_vfs > MLX4_MAX_NUM_VF) {
- printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
- num_vfs, MLX4_MAX_NUM_VF);
+
+ /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
+ * per port, we must limit the number of VFs to 63 (since their are
+ * 128 MACs)
+ */
+ if (num_vfs >= MLX4_MAX_NUM_VF) {
+ dev_err(&pdev->dev,
+ "Requested more VF's (%d) than allowed (%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF - 1);
return -EINVAL;
}
@@ -2154,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ dev->numa_node = dev_to_node(&pdev->dev);
/* Detect if this device is a virtual function */
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
@@ -2295,6 +2333,8 @@ slave_start:
if (err)
goto err_steer;
+ mlx4_init_quotas(dev);
+
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
if (err)
@@ -2305,6 +2345,8 @@ slave_start:
if (err)
goto err_port;
+ mlx4_request_modules(dev);
+
mlx4_sense_init(dev);
mlx4_start_sense(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 55f6245efb6..acf9d5f1f92 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
goto out_list;
}
mgm = mailbox->buf;
- memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
@@ -645,7 +644,7 @@ static const u8 __promisc_mode[] = {
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type)
{
- if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+ if (flow_type >= MLX4_FS_MODE_NUM) {
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
return -EINVAL;
}
@@ -681,7 +680,7 @@ const u16 __sw_id_hw[] = {
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
- if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
@@ -706,7 +705,7 @@ static const int __rule_hw_sz[] = {
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
- if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 348bb8c7d9a..e582a41a802 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -455,6 +455,7 @@ struct mlx4_slave_state {
u8 last_cmd;
u8 init_port_mask;
bool active;
+ bool old_vlan_api;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -503,12 +504,28 @@ struct slave_list {
struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
};
+struct resource_allocator {
+ spinlock_t alloc_lock; /* protect quotas */
+ union {
+ int res_reserved;
+ int res_port_rsvd[MLX4_MAX_PORTS];
+ };
+ union {
+ int res_free;
+ int res_port_free[MLX4_MAX_PORTS];
+ };
+ int *quota;
+ int *allocated;
+ int *guaranteed;
+};
+
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
+ struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
};
#define SLAVE_EVENT_EQ_SIZE 128
@@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
@@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
+void mlx4_init_quotas(struct mlx4_dev *dev);
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e3610d2..f3758de59c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@ struct mlx4_en_priv {
u16 num_frags;
u16 log_rx_info;
- struct mlx4_en_tx_ring *tx_ring;
- struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
- struct mlx4_en_cq *tx_cq;
- struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq **tx_cq;
+ struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
struct work_struct watchdog_task;
@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
if ((cq->state & MLX4_CQ_LOCKED)) {
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
rc = false;
@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
-int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
- int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
+ int entries, int ring, enum cq_type mode, int node);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
- int qpn, u32 size, u16 stride);
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring,
+ int qpn, u32 size, u16 stride, int node);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio);
@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- u32 size, u16 stride);
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
#ifdef CONFIG_RFS_ACCEL
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring);
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#endif
#define MLX4_EN_NUM_SELF_TEST 5
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index f91719a08cb..b3ee9bafff5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
goto err_table;
}
mpt_entry = mailbox->buf;
-
- memset(mpt_entry, 0, sizeof *mpt_entry);
-
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
}
mpt_entry = mailbox->buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region.
*/
@@ -755,14 +750,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
- if (!is_power_of_2(dev->caps.num_mpts))
- return -EINVAL;
-
/* Nothing to do for slaves - all MR handling is forwarded
* to the master */
if (mlx4_is_slave(dev))
return 0;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223acada..84cfb40bf45 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_uar *uar;
@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
err = -ENOMEM;
goto out;
}
- uar = kmalloc(sizeof *uar, GFP_KERNEL);
+ uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
if (!uar) {
- err = -ENOMEM;
- goto out;
+ uar = kmalloc(sizeof(*uar), GFP_KERNEL);
+ if (!uar) {
+ err = -ENOMEM;
+ goto out;
+ }
}
err = mlx4_uar_alloc(dev, uar);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 946e0af5fae..97d342fa503 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
u64 out_param = 0;
- int err;
+ int err = -EINVAL;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
- RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+ err = mlx4_cmd_imm(dev, mac, &out_param,
+ ((u32) port) << 8 | (u32) RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ }
+ if (err && err == -EINVAL && mlx4_is_slave(dev)) {
+ /* retry using old REG_MAC format */
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ dev->flags |= MLX4_FLAG_OLD_REG_MAC;
+ }
if (err)
return err;
@@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
- RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+ (void) mlx4_cmd_imm(dev, mac, &out_param,
+ ((u32) port) << 8 | (u32) RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ } else {
+ /* use old unregister mac format */
+ set_param_l(&out_param, port);
+ (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ }
return;
}
__mlx4_unregister_mac(dev, port, mac);
@@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
u64 out_param = 0;
int err;
+ if (vlan > 4095)
+ return -EINVAL;
+
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ err = mlx4_cmd_imm(dev, vlan, &out_param,
+ ((u32) port) << 8 | (u32) RES_VLAN,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
@@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int index;
- if (index < MLX4_VLAN_REGULAR) {
- mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
- return;
+ mutex_lock(&table->mutex);
+ if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
+ mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
+ goto out;
}
- mutex_lock(&table->mutex);
- if (!table->refs[index]) {
- mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ if (index < MLX4_VLAN_REGULAR) {
+ mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
goto out;
}
+
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify vlan table\n", index);
+ mlx4_dbg(dev, "Have %d more references for index %d,"
+ "no need to modify vlan table\n", table->refs[index],
+ index);
goto out;
}
table->entries[index] = 0;
@@ -410,23 +435,19 @@ out:
mutex_unlock(&table->mutex);
}
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
- u64 in_param = 0;
- int err;
+ u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&in_param, port);
- err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
- MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
- if (!err)
- mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
- index);
-
+ (void) mlx4_cmd_imm(dev, vlan, &out_param,
+ ((u32) port) << 8 | (u32) RES_VLAN,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
return;
}
- __mlx4_unregister_vlan(dev, port, index);
+ __mlx4_unregister_vlan(dev, port, vlan);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
@@ -448,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
inbuf = inmailbox->buf;
outbuf = outmailbox->buf;
- memset(inbuf, 0, 256);
- memset(outbuf, 0, 256);
inbuf[0] = 1;
inbuf[1] = 1;
inbuf[2] = 1;
@@ -632,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, 256);
-
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
@@ -671,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
@@ -706,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
context->base_qpn = cpu_to_be32(base_qpn);
context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
@@ -740,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
for (i = 0; i < MLX4_NUM_UP; i += 2)
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
@@ -767,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
for (i = 0; i < MLX4_NUM_TC; i++) {
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index e891b058c1b..2715e61dbb7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
*/
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
- 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
+ (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
reserved_from_top);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd687632111..2f3f2bc7f28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -55,6 +55,14 @@ struct mac_res {
u8 port;
};
+struct vlan_res {
+ struct list_head list;
+ u16 vlan;
+ int ref_count;
+ int vlan_index;
+ u8 port;
+};
+
struct res_common {
struct list_head list;
struct rb_node node;
@@ -102,7 +110,14 @@ struct res_qp {
int local_qpn;
atomic_t ref_count;
u32 qpc_flags;
+ /* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue;
+ __be32 param3;
+ u8 vlan_control;
+ u8 fvl_rx;
+ u8 pri_path_fl;
+ u8 vlan_index;
+ u8 feup;
};
enum res_mtt_states {
@@ -266,6 +281,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC";
+ case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE";
@@ -274,10 +290,139 @@ static const char *ResourceType(enum mlx4_resource rt)
};
}
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
+static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int err = -EINVAL;
+ int allocated, free, reserved, guaranteed, from_free;
+
+ if (slave > dev->num_vfs)
+ return -EINVAL;
+
+ spin_lock(&res_alloc->alloc_lock);
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ free = (port > 0) ? res_alloc->res_port_free[port - 1] :
+ res_alloc->res_free;
+ reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
+ res_alloc->res_reserved;
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated + count > res_alloc->quota[slave])
+ goto out;
+
+ if (allocated + count <= guaranteed) {
+ err = 0;
+ } else {
+ /* portion may need to be obtained from free area */
+ if (guaranteed - allocated > 0)
+ from_free = count - (guaranteed - allocated);
+ else
+ from_free = count;
+
+ if (free - from_free > reserved)
+ err = 0;
+ }
+
+ if (!err) {
+ /* grant the request */
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
+ res_alloc->res_port_free[port - 1] -= count;
+ } else {
+ res_alloc->allocated[slave] += count;
+ res_alloc->res_free -= count;
+ }
+ }
+
+out:
+ spin_unlock(&res_alloc->alloc_lock);
+ return err;
+}
+
+static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+
+ if (slave > dev->num_vfs)
+ return;
+
+ spin_lock(&res_alloc->alloc_lock);
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
+ res_alloc->res_port_free[port - 1] += count;
+ } else {
+ res_alloc->allocated[slave] -= count;
+ res_alloc->res_free += count;
+ }
+
+ spin_unlock(&res_alloc->alloc_lock);
+ return;
+}
+
+static inline void initialize_res_quotas(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ enum mlx4_resource res_type,
+ int vf, int num_instances)
+{
+ res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
+ res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
+ if (vf == mlx4_master_func_num(dev)) {
+ res_alloc->res_free = num_instances;
+ if (res_type == RES_MTT) {
+ /* reserved mtts will be taken out of the PF allocation */
+ res_alloc->res_free += dev->caps.reserved_mtts;
+ res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
+ res_alloc->quota[vf] += dev->caps.reserved_mtts;
+ }
+ }
+}
+
+void mlx4_init_quotas(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int pf;
+
+ /* quotas for VFs are initialized in mlx4_slave_cap */
+ if (mlx4_is_slave(dev))
+ return;
+
+ if (!mlx4_is_mfunc(dev)) {
+ dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev);
+ dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
+ dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
+ dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
+ dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
+ return;
+ }
+
+ pf = mlx4_master_func_num(dev);
+ dev->quotas.qp =
+ priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
+ dev->quotas.cq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
+ dev->quotas.srq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
+ dev->quotas.mtt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
+ dev->quotas.mpt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
+ int i, j;
int t;
priv->mfunc.master.res_tracker.slave_list =
@@ -298,8 +443,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[i];
+ res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ if (i == RES_MAC || i == RES_VLAN)
+ res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
+ (dev->num_vfs + 1) * sizeof(int),
+ GFP_KERNEL);
+ else
+ res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+
+ if (!res_alloc->quota || !res_alloc->guaranteed ||
+ !res_alloc->allocated)
+ goto no_mem_err;
+
+ spin_lock_init(&res_alloc->alloc_lock);
+ for (t = 0; t < dev->num_vfs + 1; t++) {
+ switch (i) {
+ case RES_QP:
+ initialize_res_quotas(dev, res_alloc, RES_QP,
+ t, dev->caps.num_qps -
+ dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev));
+ break;
+ case RES_CQ:
+ initialize_res_quotas(dev, res_alloc, RES_CQ,
+ t, dev->caps.num_cqs -
+ dev->caps.reserved_cqs);
+ break;
+ case RES_SRQ:
+ initialize_res_quotas(dev, res_alloc, RES_SRQ,
+ t, dev->caps.num_srqs -
+ dev->caps.reserved_srqs);
+ break;
+ case RES_MPT:
+ initialize_res_quotas(dev, res_alloc, RES_MPT,
+ t, dev->caps.num_mpts -
+ dev->caps.reserved_mrws);
+ break;
+ case RES_MTT:
+ initialize_res_quotas(dev, res_alloc, RES_MTT,
+ t, dev->caps.num_mtts -
+ dev->caps.reserved_mtts);
+ break;
+ case RES_MAC:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ }
+ break;
+ case RES_VLAN:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
+ res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] =
+ res_alloc->quota[t];
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
+ res_alloc->guaranteed[t] = 0;
+ }
+ break;
+ case RES_COUNTER:
+ res_alloc->quota[t] = dev->caps.max_counters;
+ res_alloc->guaranteed[t] = 0;
+ if (t == mlx4_master_func_num(dev))
+ res_alloc->res_free = res_alloc->quota[t];
+ break;
+ default:
+ break;
+ }
+ if (i == RES_MAC || i == RES_VLAN) {
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_rsvd[j] +=
+ res_alloc->guaranteed[t];
+ } else {
+ res_alloc->res_reserved += res_alloc->guaranteed[t];
+ }
+ }
+ }
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
- return 0 ;
+ return 0;
+
+no_mem_err:
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
+ return -ENOMEM;
}
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -309,13 +551,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
int i;
if (priv->mfunc.master.res_tracker.slave_list) {
- if (type != RES_TR_FREE_STRUCTS_ONLY)
- for (i = 0 ; i < dev->num_slaves; i++)
+ if (type != RES_TR_FREE_STRUCTS_ONLY) {
+ for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL ||
dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i);
+ }
+ /* free master's vlans */
+ i = dev->caps.function;
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ rem_slave_vlans(dev, i);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
if (type != RES_TR_FREE_SLAVES_ONLY) {
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL;
}
@@ -1229,12 +1486,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
case RES_OP_RESERVE:
count = get_param_l(&in_param);
align = get_param_h(&in_param);
- err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
return err;
}
@@ -1282,15 +1546,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
order = get_param_l(&in_param);
+
+ err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
+ if (err)
+ return err;
+
base = __mlx4_alloc_mtt_range(dev, order);
- if (base == -1)
+ if (base == -1) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM;
+ }
err = add_res_range(dev, slave, base, 1, RES_MTT, order);
- if (err)
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
- else
+ } else {
set_param_l(out_param, base);
+ }
return err;
}
@@ -1305,13 +1578,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
+ err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
+ if (err)
+ break;
+
index = __mlx4_mpt_reserve(dev);
- if (index == -1)
+ if (index == -1) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break;
+ }
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
}
@@ -1345,12 +1625,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_cq_alloc_icm(dev, &cqn);
+ err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err)
break;
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
}
@@ -1373,12 +1660,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_srq_alloc_icm(dev, &srqn);
+ err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err)
break;
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
}
@@ -1399,9 +1693,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct mac_res *res;
+ if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
+ return -EINVAL;
res = kzalloc(sizeof *res, GFP_KERNEL);
- if (!res)
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM;
+ }
res->mac = mac;
res->port = (u8) port;
list_add_tail(&res->list,
@@ -1421,6 +1719,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
kfree(res);
break;
}
@@ -1438,12 +1737,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
__mlx4_unregister_mac(dev, res->port, res->mac);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int err = -EINVAL;
int port;
@@ -1452,7 +1752,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE_AND_MAP)
return err;
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
@@ -1469,12 +1769,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
-static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port, int vlan_index)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ /* vlan found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
+
+ if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
+ return -EINVAL;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
+ return -ENOMEM;
+ }
+ res->vlan = vlan;
+ res->port = (u8) port;
+ res->vlan_index = vlan_index;
+ res->ref_count = 1;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_VLAN]);
return 0;
}
+
+static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_VLAN,
+ 1, port);
+ kfree(res);
+ }
+ break;
+ }
+ }
+}
+
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+ int i;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ list_del(&res->list);
+ /* dereference the vlan the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_vlan(dev, res->port, res->vlan);
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
+ kfree(res);
+ }
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int in_port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err;
+ u16 vlan;
+ int vlan_index;
+ int port;
+
+ port = !in_port ? get_param_l(out_param) : in_port;
+
+ if (!port || op != RES_OP_RESERVE_AND_MAP)
+ return -EINVAL;
+
+ /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
+ if (!in_port && port > 0 && port <= dev->caps.num_ports) {
+ slave_state[slave].old_vlan_api = true;
+ return 0;
+ }
+
+ vlan = (u16) in_param;
+
+ err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
+ if (!err) {
+ set_param_l(out_param, (u32) vlan_index);
+ err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
+ if (err)
+ __mlx4_unregister_vlan(dev, port, vlan);
+ }
+ return err;
+}
+
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
@@ -1484,15 +1886,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE)
return -EINVAL;
- err = __mlx4_counter_alloc(dev, &index);
+ err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err)
return err;
+ err = __mlx4_counter_alloc(dev, &index);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
- if (err)
+ if (err) {
__mlx4_counter_free(dev, index);
- else
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ } else {
set_param_l(out_param, index);
+ }
return err;
}
@@ -1528,7 +1938,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
int err;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
@@ -1556,12 +1966,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -1597,6 +2009,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
break;
case RES_OP_MAP_ICM:
@@ -1634,8 +2047,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
base = get_param_l(&in_param);
order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
- if (!err)
+ if (!err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
+ }
return err;
}
@@ -1660,6 +2075,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
@@ -1694,6 +2110,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
@@ -1718,6 +2135,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
@@ -1730,14 +2148,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int port;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
@@ -1751,9 +2169,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
- return 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ if (slave_state[slave].old_vlan_api)
+ return 0;
+ if (!port)
+ return -EINVAL;
+ vlan_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_vlan(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
}
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1771,6 +2207,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
@@ -1803,7 +2240,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
int err = -EINVAL;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
@@ -1831,12 +2268,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -2136,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return err;
qp->local_qpn = local_qpn;
qp->sched_queue = 0;
+ qp->param3 = 0;
+ qp->vlan_control = 0;
+ qp->fvl_rx = 0;
+ qp->pri_path_fl = 0;
+ qp->vlan_index = 0;
+ qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -2862,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
+ __be32 orig_param3 = qpc->param3;
+ u8 orig_vlan_control = qpc->pri_path.vlan_control;
+ u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+ u8 orig_pri_path_fl = qpc->pri_path.fl;
+ u8 orig_vlan_index = qpc->pri_path.vlan_index;
+ u8 orig_feup = qpc->pri_path.feup;
err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
@@ -2889,9 +3340,15 @@ out:
* essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT
*/
- if (!err)
+ if (!err) {
qp->sched_queue = orig_sched_queue;
-
+ qp->param3 = orig_param3;
+ qp->vlan_control = orig_vlan_control;
+ qp->fvl_rx = orig_fvl_rx;
+ qp->pri_path_fl = orig_pri_path_fl;
+ qp->vlan_index = orig_vlan_index;
+ qp->feup = orig_feup;
+ }
put_res(dev, slave, qpn, RES_QP);
return err;
}
@@ -3498,6 +3955,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ if (!valid_reserved(dev, slave, qpn)) {
+ __mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_release_resource(dev, slave,
+ RES_QP, 1, 0);
+ }
kfree(qp);
state = 0;
break;
@@ -3569,6 +4031,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_SRQ, 1, 0);
kfree(srq);
state = 0;
break;
@@ -3635,6 +4099,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_CQ, 1, 0);
kfree(cq);
state = 0;
break;
@@ -3698,6 +4164,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_MPT, 1, 0);
kfree(mpt);
state = 0;
break;
@@ -3767,6 +4235,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave, RES_MTT,
+ 1 << mtt->order, 0);
kfree(mtt);
state = 0;
break;
@@ -3925,6 +4395,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -3964,7 +4435,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
- /*VLAN*/
+ rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave);
@@ -3991,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
&tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
- u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
+ u64 qp_path_mask_vlan_ctrl =
+ ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
+
+ u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err;
@@ -4029,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
- upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
- upd_context->qp_context.pri_path.vlan_control = vlan_control;
- upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4049,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
spin_lock_irq(mlx4_tlock(dev));
continue;
}
- upd_context->qp_context.pri_path.sched_queue =
- qp->sched_queue & 0xC7;
- upd_context->qp_context.pri_path.sched_queue |=
- ((work->qos & 0x7) << 3);
+ if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
+ upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
+ else
+ upd_context->primary_addr_path_mask =
+ cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
+ if (work->vlan_id == MLX4_VGT) {
+ upd_context->qp_context.param3 = qp->param3;
+ upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
+ upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
+ upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
+ upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
+ upd_context->qp_context.pri_path.feup = qp->feup;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue;
+ } else {
+ upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
+ upd_context->qp_context.pri_path.vlan_control = vlan_control;
+ upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_context.pri_path.fvl_rx =
+ qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.fl =
+ qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ upd_context->qp_context.pri_path.feup =
+ qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue & 0xC7;
+ upd_context->qp_context.pri_path.sched_queue |=
+ ((work->qos & 0x7) << 3);
+ }
err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff,
@@ -4081,7 +4582,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port,
- work->orig_vlan_ix);
+ work->orig_vlan_id);
out:
kfree(work);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 79fd269e2c5..8fdf2375377 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/srq.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -188,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
}
srq_context = mailbox->buf;
- memset(srq_context, 0, sizeof *srq_context);
-
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn);
srq_context->logstride = srq->wqe_shift - 4;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6ca30739625..8675d26a678 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -98,6 +98,7 @@ enum {
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
+ void *uout, int uout_size,
mlx5_cmd_cbk_t cbk,
void *context, int page_queue)
{
@@ -110,6 +111,8 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
ent->in = in;
ent->out = out;
+ ent->uout = uout;
+ ent->uout_size = uout_size;
ent->callback = cbk;
ent->context = context;
ent->cmd = cmd;
@@ -534,6 +537,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
+ ent->op = be32_to_cpu(lay->in[0]) >> 16;
if (ent->in->next)
lay->in_ptr = cpu_to_be64(ent->in->next->dma);
lay->inlen = cpu_to_be32(ent->in->len);
@@ -628,7 +632,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
* 2. page queue commands do not support asynchrous completion
*/
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
- struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
+ struct mlx5_cmd_msg *out, void *uout, int uout_size,
+ mlx5_cmd_cbk_t callback,
void *context, int page_queue, u8 *status)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -642,7 +647,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue)
return -EINVAL;
- ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
+ ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
+ page_queue);
if (IS_ERR(ent))
return PTR_ERR(ent);
@@ -670,10 +676,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
- spin_lock(&stats->lock);
+ spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
- spin_unlock(&stats->lock);
+ spin_unlock_irq(&stats->lock);
}
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
@@ -826,7 +832,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
int n;
int i;
- msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg), flags);
if (!msg)
return ERR_PTR(-ENOMEM);
@@ -1109,6 +1115,19 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
up(&cmd->sem);
}
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
+{
+ unsigned long flags;
+
+ if (msg->cache) {
+ spin_lock_irqsave(&msg->cache->lock, flags);
+ list_add_tail(&msg->list, &msg->cache->head);
+ spin_unlock_irqrestore(&msg->cache->lock, flags);
+ } else {
+ mlx5_free_cmd_msg(dev, msg);
+ }
+}
+
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1117,6 +1136,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
void *context;
int err;
int i;
+ ktime_t t1, t2, delta;
+ s64 ds;
+ struct mlx5_cmd_stats *stats;
+ unsigned long flags;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
@@ -1141,9 +1164,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
}
free_ent(cmd, ent->idx);
if (ent->callback) {
+ t1 = timespec_to_ktime(ent->ts1);
+ t2 = timespec_to_ktime(ent->ts2);
+ delta = ktime_sub(t2, t1);
+ ds = ktime_to_ns(delta);
+ if (ent->op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[ent->op];
+ spin_lock_irqsave(&stats->lock, flags);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irqrestore(&stats->lock, flags);
+ }
+
callback = ent->callback;
context = ent->context;
err = ent->ret;
+ if (!err)
+ err = mlx5_copy_from_msg(ent->uout,
+ ent->out,
+ ent->uout_size);
+
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+
free_cmd(ent);
callback(err, context);
} else {
@@ -1160,7 +1203,8 @@ static int status_to_err(u8 status)
return status ? -1 : 0; /* TBD more meaningful codes */
}
-static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
+static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+ gfp_t gfp)
{
struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1172,7 +1216,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
ent = &cmd->cache.med;
if (ent) {
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
msg = list_entry(ent->head.next, typeof(*msg), list);
/* For cached lists, we must explicitly state what is
@@ -1181,43 +1225,34 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
msg->len = in_size;
list_del(&msg->list);
}
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
}
if (IS_ERR(msg))
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
return msg;
}
-static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
-{
- if (msg->cache) {
- spin_lock(&msg->cache->lock);
- list_add_tail(&msg->list, &msg->cache->head);
- spin_unlock(&msg->cache->lock);
- } else {
- mlx5_free_cmd_msg(dev, msg);
- }
-}
-
static int is_manage_pages(struct mlx5_inbox_hdr *in)
{
return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
-int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
- int out_size)
+static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size, mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_cmd_msg *inb;
struct mlx5_cmd_msg *outb;
int pages_queue;
+ gfp_t gfp;
int err;
u8 status = 0;
pages_queue = is_manage_pages(in);
+ gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
- inb = alloc_msg(dev, in_size);
+ inb = alloc_msg(dev, in_size, gfp);
if (IS_ERR(inb)) {
err = PTR_ERR(inb);
return err;
@@ -1229,13 +1264,14 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
goto out_in;
}
- outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
+ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
if (IS_ERR(outb)) {
err = PTR_ERR(outb);
goto out_in;
}
- err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
+ err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
+ pages_queue, &status);
if (err)
goto out_out;
@@ -1248,14 +1284,30 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_copy_from_msg(out, outb, out_size);
out_out:
- mlx5_free_cmd_msg(dev, outb);
+ if (!callback)
+ mlx5_free_cmd_msg(dev, outb);
out_in:
- free_msg(dev, inb);
+ if (!callback)
+ free_msg(dev, inb);
return err;
}
+
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size)
+{
+ return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+}
EXPORT_SYMBOL(mlx5_cmd_exec);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context)
+{
+ return cmd_exec(dev, in, in_size, out, out_size, callback, context);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 9c7194b26ee..80f6d127257 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -154,10 +154,10 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return 0;
stats = filp->private_data;
- spin_lock(&stats->lock);
+ spin_lock_irq(&stats->lock);
if (stats->n)
field = div64_u64(stats->sum, stats->n);
- spin_unlock(&stats->lock);
+ spin_unlock_irq(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
@@ -175,10 +175,10 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
struct mlx5_cmd_stats *stats;
stats = filp->private_data;
- spin_lock(&stats->lock);
+ spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
- spin_unlock(&stats->lock);
+ spin_unlock_irq(&stats->lock);
*pos += count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2231d93cc7a..64a61b286b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -354,7 +354,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
in->ctx.intr = vecidx;
- in->ctx.log_page_size = PAGE_SHIFT - 12;
+ in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->events_mask = cpu_to_be64(mask);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bc0f5fb66e2..40a9f5ed814 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -159,6 +159,36 @@ struct mlx5_reg_host_endianess {
u8 rsvd[15];
};
+
+#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
+
+enum {
+ MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
+ CAP_MASK(MLX5_CAP_OFF_DCT, 1),
+};
+
+/* selectively copy writable fields clearing any reserved area
+ */
+static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from)
+{
+ u64 v64;
+
+ to->log_max_qp = from->log_max_qp & 0x1f;
+ to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f;
+ to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f;
+ to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f;
+ to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f;
+ to->log_max_atomic_size_qp = from->log_max_atomic_size_qp;
+ to->log_max_atomic_size_dc = from->log_max_atomic_size_dc;
+ v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK;
+ to->flags = cpu_to_be64(v64);
+}
+
+enum {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL;
@@ -180,7 +210,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
}
query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
- query_ctx.hdr.opmod = cpu_to_be16(0x1);
+ query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR);
err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx),
query_out, sizeof(*query_out));
if (err)
@@ -192,8 +222,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
goto query_ex;
}
- memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
- sizeof(set_ctx->hca_cap));
+ copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 5b44e2e46da..35e514dc7b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -37,31 +37,41 @@
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
- struct mlx5_create_mkey_mbox_in *in, int inlen)
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out)
{
- struct mlx5_create_mkey_mbox_out out;
+ struct mlx5_create_mkey_mbox_out lout;
int err;
u8 key;
- memset(&out, 0, sizeof(out));
- spin_lock(&dev->priv.mkey_lock);
+ memset(&lout, 0, sizeof(lout));
+ spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
- spin_unlock(&dev->priv.mkey_lock);
+ spin_unlock_irq(&dev->priv.mkey_lock);
in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (callback) {
+ err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
+ callback, context);
+ return err;
+ } else {
+ err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
+ }
+
if (err) {
mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
return err;
}
- if (out.hdr.status) {
- mlx5_core_dbg(dev, "status %d\n", out.hdr.status);
- return mlx5_cmd_status_to_err(&out.hdr);
+ if (lout.hdr.status) {
+ mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
+ return mlx5_cmd_status_to_err(&lout.hdr);
}
- mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key;
- mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key);
+ mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
+ mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
+ be32_to_cpu(lout.mkey), key, mr->key);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 7b12acf210f..37b6ad1f9a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -57,10 +57,13 @@ struct mlx5_pages_req {
};
struct fw_page {
- struct rb_node rb_node;
- u64 addr;
- struct page *page;
- u16 func_id;
+ struct rb_node rb_node;
+ u64 addr;
+ struct page *page;
+ u16 func_id;
+ unsigned long bitmask;
+ struct list_head list;
+ unsigned free_count;
};
struct mlx5_query_pages_inbox {
@@ -94,6 +97,11 @@ enum {
MAX_RECLAIM_TIME_MSECS = 5000,
};
+enum {
+ MLX5_MAX_RECLAIM_TIME_MILI = 5000,
+ MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096,
+};
+
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
struct rb_root *root = &dev->priv.page_root;
@@ -101,6 +109,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct rb_node *parent = NULL;
struct fw_page *nfp;
struct fw_page *tfp;
+ int i;
while (*new) {
parent = *new;
@@ -113,25 +122,29 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
return -EEXIST;
}
- nfp = kmalloc(sizeof(*nfp), GFP_KERNEL);
+ nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp)
return -ENOMEM;
nfp->addr = addr;
nfp->page = page;
nfp->func_id = func_id;
+ nfp->free_count = MLX5_NUM_4K_IN_PAGE;
+ for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
+ set_bit(i, &nfp->bitmask);
rb_link_node(&nfp->rb_node, parent, new);
rb_insert_color(&nfp->rb_node, root);
+ list_add(&nfp->list, &dev->priv.free_list);
return 0;
}
-static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
+static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
{
struct rb_root *root = &dev->priv.page_root;
struct rb_node *tmp = root->rb_node;
- struct page *result = NULL;
+ struct fw_page *result = NULL;
struct fw_page *tfp;
while (tmp) {
@@ -141,9 +154,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
} else if (tfp->addr > addr) {
tmp = tmp->rb_right;
} else {
- rb_erase(&tfp->rb_node, root);
- result = tfp->page;
- kfree(tfp);
+ result = tfp;
break;
}
}
@@ -176,12 +187,98 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
+{
+ struct fw_page *fp;
+ unsigned n;
+
+ if (list_empty(&dev->priv.free_list)) {
+ return -ENOMEM;
+ mlx5_core_warn(dev, "\n");
+ }
+
+ fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
+ n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
+ if (n >= MLX5_NUM_4K_IN_PAGE) {
+ mlx5_core_warn(dev, "alloc 4k bug\n");
+ return -ENOENT;
+ }
+ clear_bit(n, &fp->bitmask);
+ fp->free_count--;
+ if (!fp->free_count)
+ list_del(&fp->list);
+
+ *addr = fp->addr + n * 4096;
+
+ return 0;
+}
+
+static void free_4k(struct mlx5_core_dev *dev, u64 addr)
+{
+ struct fw_page *fwp;
+ int n;
+
+ fwp = find_fw_page(dev, addr & PAGE_MASK);
+ if (!fwp) {
+ mlx5_core_warn(dev, "page not found\n");
+ return;
+ }
+
+ n = (addr & ~PAGE_MASK) % 4096;
+ fwp->free_count++;
+ set_bit(n, &fwp->bitmask);
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
+ rb_erase(&fwp->rb_node, &dev->priv.page_root);
+ if (fwp->free_count != 1)
+ list_del(&fwp->list);
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(fwp->page);
+ kfree(fwp);
+ } else if (fwp->free_count == 1) {
+ list_add(&fwp->list, &dev->priv.free_list);
+ }
+}
+
+static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+{
+ struct page *page;
+ u64 addr;
+ int err;
+
+ page = alloc_page(GFP_HIGHUSER);
+ if (!page) {
+ mlx5_core_warn(dev, "failed to allocate page\n");
+ return -ENOMEM;
+ }
+ addr = dma_map_page(&dev->pdev->dev, page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ mlx5_core_warn(dev, "failed dma mapping page\n");
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ err = insert_page(dev, addr, page, func_id);
+ if (err) {
+ mlx5_core_err(dev, "failed to track allocated page\n");
+ goto out_mapping;
+ }
+
+ return 0;
+
+out_mapping:
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+out_alloc:
+ __free_page(page);
+
+ return err;
+}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
- struct page *page;
+ struct mlx5_manage_pages_inbox *nin;
int inlen;
u64 addr;
int err;
@@ -196,27 +293,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page) {
- err = -ENOMEM;
- mlx5_core_warn(dev, "failed to allocate page\n");
- goto out_alloc;
- }
- addr = dma_map_page(&dev->pdev->dev, page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->pdev->dev, addr)) {
- mlx5_core_warn(dev, "failed dma mapping page\n");
- __free_page(page);
- err = -ENOMEM;
- goto out_alloc;
- }
- err = insert_page(dev, addr, page, func_id);
+retry:
+ err = alloc_4k(dev, &addr);
if (err) {
- mlx5_core_err(dev, "failed to track allocated page\n");
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(page);
- err = -ENOMEM;
- goto out_alloc;
+ if (err == -ENOMEM)
+ err = alloc_system_page(dev, func_id);
+ if (err)
+ goto out_4k;
+
+ goto retry;
}
in->pas[i] = cpu_to_be64(addr);
}
@@ -226,7 +311,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->func_id = cpu_to_be16(func_id);
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
goto out_alloc;
@@ -247,25 +331,22 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_alloc:
if (notify_fail) {
- memset(in, 0, inlen);
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)))
- mlx5_core_warn(dev, "\n");
- }
- for (i--; i >= 0; i--) {
- addr = be64_to_cpu(in->pas[i]);
- page = remove_page(dev, addr);
- if (!page) {
- mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
- addr);
- continue;
+ nin = kzalloc(sizeof(*nin), GFP_KERNEL);
+ if (!nin) {
+ mlx5_core_warn(dev, "allocation failed\n");
+ goto out_4k;
}
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(page);
+ memset(&out, 0, sizeof(out));
+ nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+ if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
+ mlx5_core_warn(dev, "page notify failed\n");
+ kfree(nin);
}
+out_4k:
+ for (i--; i >= 0; i--)
+ free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
mlx5_vfree(in);
return err;
@@ -276,7 +357,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
{
struct mlx5_manage_pages_inbox in;
struct mlx5_manage_pages_outbox *out;
- struct page *page;
int num_claimed;
int outlen;
u64 addr;
@@ -315,13 +395,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
- page = remove_page(dev, addr);
- if (!page) {
- mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
- } else {
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(page);
- }
+ free_4k(dev, addr);
}
out_free:
@@ -381,14 +455,19 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
return give_pages(dev, func_id, npages, 0);
}
+enum {
+ MLX5_BLKS_FOR_RECLAIM_PAGES = 12
+};
+
static int optimal_reclaimed_pages(void)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_layout *lay;
int ret;
- ret = (sizeof(lay->in) + sizeof(block->data) -
- sizeof(struct mlx5_manage_pages_outbox)) / 8;
+ ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
+ sizeof(struct mlx5_manage_pages_outbox)) /
+ FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
return ret;
}
@@ -427,6 +506,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
dev->priv.page_root = RB_ROOT;
+ INIT_LIST_HEAD(&dev->priv.free_list);
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0951f7aca1e..822616e3c37 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
- &ctl->sg, 1, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!ctl->adesc)
return NETDEV_TX_BUSY;
@@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
- sg, 1, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!ctl->adesc)
goto out;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 075f4e21d33..c83d16dc7cd 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
ks_wrreg16(ks, KS_MARL, w);
- memcpy(ks->mac_addr, data, 6);
+ memcpy(ks->mac_addr, data, ETH_ALEN);
if (ks->enabled)
ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
}
netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
+ memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
ks_set_mac(ks, netdev->dev_addr);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 8ebc352bcbe..ddd252a3da9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev)
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
- pci_set_drvdata(pdev, NULL);
-
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
static char pcidev_name[] = "ksz884xp";
-static struct pci_device_id pcidev_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
{ PCI_VENDOR_ID_MICREL_KS, 0x8841,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_MICREL_KS, 0x8842,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index ea54d95e5b9..cbd01337925 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -26,7 +26,6 @@
#include <linux/of_irq.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
-#include <linux/dma-mapping.h>
#include "moxart_ether.h"
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 149355b52ad..68026f7e8ba 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
{
- int rc = true;
+ bool rc = true;
spin_lock(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
{
- int rc = true;
+ bool rc = true;
spin_lock_bh(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
/* Walk the multicast list, and add each address */
netdev_for_each_mc_addr(ha, dev) {
- memcpy(data, &ha->addr, 6);
+ memcpy(data, &ha->addr, ETH_ALEN);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, 6);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return 0;
}
@@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 7a5e295588b..64ec2a437f4 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
err_pci_request_regions:
free_netdev(dev);
@@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
pci_release_regions (pdev);
iounmap(ioaddr);
free_netdev (dev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 4da172ac559..7007d212f3e 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -264,6 +264,7 @@ int xtsonic_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
lp->device = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
netdev_boot_setup_check(dev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 51b00941302..9eeddbd0b2c 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8185,7 +8185,6 @@ mem_alloc_failed:
free_shared_mem(sp);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return ret;
@@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev)
iounmap(sp->bar0);
iounmap(sp->bar1);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5a20eaf903d..f9876ea8c8b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2072,6 +2072,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->config.tx_steering_type;
vpath->fifo.ndev = vdev->ndev;
vpath->fifo.pdev = vdev->pdev;
+
+ u64_stats_init(&vpath->fifo.stats.syncp);
+ u64_stats_init(&vpath->ring.stats.syncp);
+
if (vdev->config.tx_steering_type)
vpath->fifo.txq =
netdev_get_tx_queue(vdev->ndev, i);
@@ -4739,7 +4743,6 @@ _exit6:
_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
pci_disable_sriov(pdev);
_exit3:
@@ -4782,7 +4785,6 @@ static void vxge_remove(struct pci_dev *pdev)
vxge_free_mac_add_list(&vdev->vpaths[i]);
vxge_device_unregister(hldev);
- pci_set_drvdata(pdev, NULL);
/* Do not call pci_disable_sriov here, as it will break child devices */
vxge_hw_device_terminate(hldev);
iounmap(vdev->bar0);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 098b96dad66..2d045be4b5c 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5619,6 +5619,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
spin_lock_init(&np->lock);
spin_lock_init(&np->hwstats_lock);
SET_NETDEV_DEV(dev, &pci_dev->dev);
+ u64_stats_init(&np->swstats_rx_syncp);
+ u64_stats_init(&np->swstats_tx_syncp);
init_timer(&np->oom_kick);
np->oom_kick.data = (unsigned long) dev;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index a061b93efe6..ba3ca18611f 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
if (pldat->dma_buff_base_v == 0) {
- pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
- pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_out_free_irq;
+
pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
/* Allocate a chunk of memory for the DMA ethernet buffers
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 622aa75904c..7dc3e9b06d7 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1545,15 +1545,16 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac && is_valid_ether_addr(mac))
+ if (mac)
memcpy(netdev->dev_addr, mac, ETH_ALEN);
else
eth_hw_addr_random(netdev);
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (result)
+ goto err;
netif_carrier_off(netdev);
result = register_netdev(netdev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 6797b107587..2a9003071d5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
extern const char pch_driver_version[];
/* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
/* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
/* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
- u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 5a0f04c2c81..27ffe0ebf0a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/* Get ieee1588's dev information */
pdev = adapter->ptp_pdev;
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_OFF:
- adapter->hwts_tx_en = 0;
- break;
- case HWTSTAMP_TX_ON:
- adapter->hwts_tx_en = 1;
- break;
- default:
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- }
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
@@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
+ adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+
/* Clear out any old time stamps. */
pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index cac33e5f9bc..b6bdeb3c197 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev)
iounmap(hmp->base);
free_netdev(dev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d28593b1fc3..07a890eb72a 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -513,7 +513,6 @@ err_out_unmap_rx:
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
pci_release_regions (pdev);
free_netdev (dev);
- pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 5b65356e756..dbaa49e58b0 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev)
pasemi_dma_free_chan(&mac->tx->chan);
pasemi_dma_free_chan(&mac->rx->chan);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 32675e16021..9adcdbb4947 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
- int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
extern const struct ethtool_ops netxen_nic_ethtool_ops;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 32c790659f9..0c64c82b9ac 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -958,6 +958,7 @@ enum {
#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
/* MiniDIMM related macros */
#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 8375cbde996..67efe754367 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
mac_req = (nx_mac_req_t *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index cbd75f97ffb..3bec8cfebf9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
return 0;
}
+#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+ u32 temp;
+
+ /* Print ULA info only once for an adapter */
+ if (adapter->portnum != 0)
+ return;
+
+ temp = NXRD32(adapter, NETXEN_ULA_KEY);
+ switch (temp) {
+ case NETXEN_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "ULA adapter");
+ break;
+ case NETXEN_NON_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "non ULA adapter");
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
#ifdef CONFIG_PCIEAER
static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
{
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_msi;
}
+ netxen_read_ula_info(adapter);
+
err = netxen_setup_netdev(adapter, netdev);
if (err)
goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
@@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 91a8fcd6c24..0758b943535 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3916,7 +3916,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
iounmap(qdev->mem_map_registers);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81bf83604c4..631ea0ac1cd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 50
-#define QLCNIC_LINUX_VERSIONID "5.3.50"
+#define _QLCNIC_LINUX_SUBVERSION 52
+#define QLCNIC_LINUX_VERSIONID "5.3.52"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,8 +98,22 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-#define QLCNIC_MAX_TX_RINGS 8
-#define QLCNIC_MAX_SDS_RINGS 8
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
/*
* Following are the states of the Phantom. Phantom will set them and
@@ -533,6 +547,14 @@ struct qlcnic_host_sds_ring {
char name[IFNAMSIZ + 12];
} ____cacheline_internodealigned_in_smp;
+struct qlcnic_tx_queue_stats {
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+ u64 tx_bytes;
+};
+
struct qlcnic_host_tx_ring {
int irq;
void __iomem *crb_intr_mask;
@@ -544,10 +566,7 @@ struct qlcnic_host_tx_ring {
u32 sw_consumer;
u32 num_desc;
- u64 xmit_on;
- u64 xmit_off;
- u64 xmit_called;
- u64 xmit_finished;
+ struct qlcnic_tx_queue_stats tx_stats;
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
@@ -940,8 +959,6 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
-#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
-#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -961,8 +978,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
#define __QLCNIC_DIAG_MODE 13
-#define __QLCNIC_DCB_STATE 14
-#define __QLCNIC_DCB_IN_AEN 15
+#define __QLCNIC_MAINTENANCE_MODE 16
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -1013,7 +1029,6 @@ struct qlcnic_adapter {
unsigned long state;
u32 flags;
- int max_drv_tx_rings;
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
@@ -1021,7 +1036,13 @@ struct qlcnic_adapter {
u16 max_jumbo_rxd;
u8 max_rds_rings;
- u8 max_sds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
u8 rx_csum;
u8 portnum;
@@ -1199,6 +1220,7 @@ struct qlcnic_npar_info {
u8 promisc_mode;
u8 offload_flags;
u8 pci_func;
+ u8 mac[ETH_ALEN];
};
struct qlcnic_eswitch {
@@ -1543,12 +1565,13 @@ int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
-int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
-int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1641,19 +1664,18 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int err, tx_q;
-
- tx_q = adapter->max_drv_tx_rings;
+ int err;
- netdev->num_tx_queues = tx_q;
- netdev->real_num_tx_queues = tx_q;
+ netdev->num_tx_queues = adapter->drv_tx_rings;
+ netdev->real_num_tx_queues = adapter->drv_tx_rings;
- err = netif_set_real_num_tx_queues(netdev, tx_q);
+ err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- tx_q);
+ adapter->drv_tx_rings);
else
- dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+ dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
@@ -1695,7 +1717,7 @@ struct qlcnic_hardware_ops {
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
- int (*setup_intr) (struct qlcnic_adapter *, u8, int);
+ int (*setup_intr) (struct qlcnic_adapter *);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1766,10 +1788,9 @@ static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
- u8 num_intr, int txq)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
+ return adapter->ahw->hw_ops->setup_intr(adapter);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -2005,7 +2026,7 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
/* When operating in a muti tx mode, driver needs to write 0x1
@@ -2115,98 +2136,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
-
-static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_hw_capability)
- return dcb->ops->get_hw_capability(adapter);
-
- return 0;
-}
-
-static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->free)
- dcb->ops->free(adapter);
-}
-
-static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->attach)
- return dcb->ops->attach(adapter);
-
- return 0;
-}
-
-static inline int
-qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->query_hw_capability)
- return dcb->ops->query_hw_capability(adapter, buf);
-
- return 0;
-}
-
-static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_info)
- dcb->ops->get_info(adapter);
-}
-
-static inline int
-qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->query_cee_param)
- return dcb->ops->query_cee_param(adapter, buf, type);
-
- return 0;
-}
-
-static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_cee_cfg)
- return dcb->ops->get_cee_cfg(adapter);
-
- return 0;
-}
-
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(adapter, flag);
-}
-
-static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *msg)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->handle_aen)
- dcb->ops->handle_aen(adapter, msg);
-}
-
-static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->init_dcbnl_ops)
- dcb->ops->init_dcbnl_ops(adapter);
-}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 3ca00e05f23..b1cb0ffb15c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
-#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
@@ -268,20 +267,18 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
+ num_msix = adapter->drv_sds_rings;
+
/* account for AEN interrupt MSI-X based interrupts */
num_msix += 1;
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- num_msix += adapter->max_drv_tx_rings;
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
@@ -325,7 +322,8 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
- writel(1, adapter->tgt_mask_reg);
+ if (adapter->tgt_mask_reg)
+ writel(1, adapter->tgt_mask_reg);
}
/* Enable MSI-x and INT-x interrupts */
@@ -498,8 +496,11 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
num_msix = 0;
msleep(20);
- synchronize_irq(adapter->msix_entries[num_msix].vector);
- free_irq(adapter->msix_entries[num_msix].vector, adapter);
+
+ if (adapter->msix_entries) {
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+ }
}
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
@@ -760,6 +761,9 @@ int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
int cmd_type, err, opcode;
unsigned long timeout;
+ if (!mbx)
+ return -EIO;
+
opcode = LSW(cmd->req.arg[0]);
cmd_type = cmd->type;
err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
@@ -902,7 +906,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
QLCNIC_MBX_RSP(event[0]));
break;
case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
- qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
break;
default:
dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -979,14 +983,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
- num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
index = 2;
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
memset(&sds_mbx, 0, sds_mbx_size);
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
@@ -1021,7 +1025,7 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
index = 0;
/* status descriptor ring */
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
sds = &recv_ctx->sds_rings[i];
sds->crb_sts_consumer = ahw->pci_base0 +
mbx_out->host_csmr[index];
@@ -1079,10 +1083,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
num_rds = adapter->max_rds_rings;
- if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
- num_sds = adapter->max_sds_rings;
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
else
- num_sds = QLCNIC_MAX_RING_SETS;
+ num_sds = QLCNIC_MAX_SDS_RINGS;
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
@@ -1183,7 +1187,7 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
err = qlcnic_83xx_add_rings(adapter);
out:
qlcnic_free_mbx_args(&cmd);
@@ -1239,9 +1243,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx.size = tx->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- msix_vector = adapter->max_sds_rings + ring;
+ msix_vector = adapter->drv_sds_rings + ring;
else
- msix_vector = adapter->max_sds_rings - 1;
+ msix_vector = adapter->drv_sds_rings - 1;
msix_id = ahw->intr_tbl[msix_vector].id;
} else {
msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
@@ -1264,7 +1268,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
- cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
buf = &cmd.req.arg[6];
memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
/* send the mailbox command*/
@@ -1279,7 +1284,7 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
tx->ctx_id = mbx_out->ctx_id;
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
@@ -1290,7 +1295,7 @@ out:
}
static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
- int num_sds_ring)
+ u8 num_sds_ring)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
@@ -1306,7 +1311,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
@@ -1320,7 +1325,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (ret) {
qlcnic_detach(adapter);
if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
- adapter->max_sds_rings = num_sds_ring;
+ adapter->drv_sds_rings = num_sds_ring;
qlcnic_attach(adapter);
}
netif_device_attach(netdev);
@@ -1333,7 +1338,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_enable_intr(adapter, sds_ring);
}
@@ -1354,7 +1359,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
- int max_sds_rings)
+ u8 drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
@@ -1362,7 +1367,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -1386,7 +1391,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
}
}
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1648,7 +1653,9 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ int ret = 0, loop = 0;
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
@@ -1670,7 +1677,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
mode == QLCNIC_ILB_MODE ? "internal" : "external");
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_alloc;
@@ -1708,10 +1715,11 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
qlcnic_83xx_clear_lb_mode(adapter, mode);
free_diag_res:
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_alloc:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
@@ -1722,7 +1730,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
struct qlcnic_hardware_context *ahw = adapter->ahw;
int temp;
- netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+ netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
ahw->extend_lb_time);
temp = ahw->extend_lb_time * 1000;
*max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
@@ -2276,9 +2284,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
npar_info->max_linkspeed_reg_offset = temp;
}
- if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS)
- memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
- sizeof(ahw->extra_capability));
+
+ memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+ sizeof(ahw->extra_capability));
out:
qlcnic_free_mbx_args(&cmd);
@@ -2321,19 +2329,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
i++;
memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
i = i + 3;
- if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "id = %d active = %d type = %d\n"
- "\tport = %d min bw = %d max bw = %d\n"
- "\tmac_addr = %pM\n", pci_info->id,
- pci_info->active, pci_info->type,
- pci_info->default_port,
- pci_info->tx_min_bw,
- pci_info->tx_max_bw, pci_info->mac);
}
- if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
-
} else {
dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
err = -EIO;
@@ -3061,11 +3057,14 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
int status = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* Get port configuration info */
- status = qlcnic_83xx_get_port_info(adapter);
- /* Get Link Status related info */
- config = qlcnic_83xx_test_link(adapter);
- ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ }
+
/* hard code until there is a way to get it from flash */
ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
@@ -3279,12 +3278,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
{
return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
- sizeof(adapter->ahw->ext_reg_tbl)) +
- (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
- sizeof(adapter->ahw->reg_tbl));
+ sizeof(*adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+ sizeof(*adapter->ahw->reg_tbl));
}
int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3305,10 +3304,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
u32 data;
u16 intrpt_id, id;
- u8 val;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret;
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev, "Device is resetting\n");
@@ -3321,7 +3321,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
}
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_irq;
@@ -3358,10 +3358,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
done:
qlcnic_free_mbx_args(&cmd);
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
@@ -3381,10 +3382,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
}
config = ahw->port_config;
if (config & QLC_83XX_CFG_STD_PAUSE) {
- if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+ switch (MSW(config)) {
+ case QLC_83XX_TX_PAUSE:
+ pause->tx_pause = 1;
+ break;
+ case QLC_83XX_RX_PAUSE:
+ pause->rx_pause = 1;
+ break;
+ case QLC_83XX_TX_RX_PAUSE:
+ default:
+ /* Backward compatibility for existing
+ * flash definitions
+ */
pause->tx_pause = 1;
- if (config & QLC_83XX_CFG_STD_RX_PAUSE)
pause->rx_pause = 1;
+ }
}
if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3439,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
} else if (!pause->rx_pause && !pause->tx_pause) {
- ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+ QLC_83XX_CFG_STD_PAUSE);
}
status = qlcnic_83xx_set_port_config(adapter);
if (status) {
@@ -3503,7 +3516,7 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
if (err)
return err;
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
qlcnic_83xx_set_vnic_opmode(adapter);
} else {
@@ -3524,12 +3537,15 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
{
- INIT_COMPLETION(mbx->completion);
+ reinit_completion(&mbx->completion);
set_bit(QLC_83XX_MBX_READY, &mbx->status);
}
void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
{
+ if (!mbx)
+ return;
+
destroy_workqueue(mbx->work_q);
kfree(mbx);
}
@@ -3650,6 +3666,9 @@ void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ if (!mbx)
+ return;
+
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
complete(&mbx->completion);
cancel_work_sync(&mbx->work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 533e150503a..4cae6caa6bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -61,7 +61,6 @@
#define QLC_83XX_HOST_SDS_MBX_IDX 8
#define QLCNIC_HOST_RDS_MBX_IDX 88
-#define QLCNIC_MAX_RING_SETS 8
/* Pause control registers */
#define QLC_83XX_SRE_SHIM_REG 0x0D200284
@@ -183,8 +182,8 @@ struct qlcnic_rcv_mbx_out {
u8 num_pci_func;
u8 state;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
struct qlcnic_add_rings_mbx_out {
@@ -197,8 +196,8 @@ struct qlcnic_add_rings_mbx_out {
u8 sts_num;
u8 rcv_num;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
/* Transmit context mailbox inbox registers
@@ -363,6 +362,9 @@ enum qlcnic_83xx_states {
#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE 0x10
+#define QLC_83XX_RX_PAUSE 0x20
+#define QLC_83XX_TX_RX_PAUSE 0x30
#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
@@ -412,8 +414,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
-#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
-#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
#define QLCNIC_BRDTYPE_83XX_10G 0x0083
@@ -521,7 +521,7 @@ enum qlc_83xx_ext_regs {
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -626,7 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u8);
int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f09e787af0b..89208e5b25d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 owner;
u32 val;
/* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ } else {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_dump_fw(adapter);
}
return -EIO;
}
@@ -897,7 +902,7 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
- if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
if (qlcnic_check_diag_status(adapter)) {
@@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
qlcnic_83xx_periodic_tasks(adapter);
+ /* Do not reschedule if firmaware is in hanged state and auto
+ * recovery is disabled
+ */
+ if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
+ return;
+
/* Re-schedule the function */
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2022,6 +2033,8 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
+ adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = ahw->max_rx_ques;
/* eSwitch capability indicates vNIC mode.
* vNIC and SRIOV are mutually exclusive operational modes.
* If SR-IOV capability is detected, SR-IOV physical function
@@ -2034,7 +2047,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
return QLC_83XX_DEFAULT_OPMODE;
if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
- return QLC_83XX_VIRTUAL_NIC_MODE;
+ return QLCNIC_VNIC_MODE;
return QLC_83XX_DEFAULT_OPMODE;
}
@@ -2048,15 +2061,20 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
if (ret == -EIO)
return -EIO;
- if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ adapter->max_sds_rings = ahw->max_rx_ques;
+ adapter->max_tx_rings = ahw->max_tx_ques;
} else {
return -EIO;
}
@@ -2159,13 +2177,34 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
return err;
}
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+ u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (!adapter->ahw->msix_supported) {
+ rx_cnt = QLCNIC_SINGLE_RING;
+ tx_cnt = QLCNIC_SINGLE_RING;
+ }
+
+ /* compute and set drv sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_dcb *dcb;
int err = 0;
ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ qlcnic_83xx_init_rings(adapter);
+
err = qlcnic_83xx_init_mailbox_work(adapter);
if (err)
goto exit;
@@ -2178,22 +2217,26 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
return err;
}
+ if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+ qlcnic_83xx_read_flash_mfg_id(adapter)) {
+ dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+ err = -ENOTRECOVERABLE;
+ goto detach_mbx;
+ }
+
err = qlcnic_83xx_check_hw_status(adapter);
if (err)
goto detach_mbx;
- if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
- qlcnic_83xx_read_flash_mfg_id(adapter);
-
err = qlcnic_83xx_get_fw_info(adapter);
if (err)
goto detach_mbx;
err = qlcnic_83xx_idc_init(adapter);
if (err)
- goto clear_fw_info;
+ goto detach_mbx;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto disable_intr;
@@ -2215,13 +2258,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
+
/* Perform operating mode specific initialization */
err = adapter->nic_ops->init_driver(adapter);
if (err)
goto disable_mbx_intr;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
@@ -2233,12 +2279,10 @@ disable_mbx_intr:
disable_intr:
qlcnic_teardown_intr(adapter);
-clear_fw_info:
- kfree(ahw->fw_info);
-
detach_mbx:
qlcnic_83xx_detach_mailbox_work(adapter);
qlcnic_83xx_free_mailbox(ahw->mailbox);
+ ahw->mailbox = NULL;
exit:
return err;
}
@@ -2251,7 +2295,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
clear_bit(QLC_83XX_MBX_READY, &idc->status);
cancel_delayed_work_sync(&adapter->fw_work);
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 0248a4c2f5d..734d28602ac 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
{
- int err = -EIO;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_npar_info *npar;
+ int i, err = -EIO;
qlcnic_83xx_get_minidump_template(adapter);
+
if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
if (qlcnic_init_pci_info(adapter))
return err;
+ npar = adapter->npars;
+
+ for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+ npar->pci_func, npar->active, npar->type,
+ npar->phy_port, npar->min_bw, npar->max_bw,
+ npar->mac);
+ }
+
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
+ ahw->max_pci_func, ahw->act_pci_func);
+
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
@@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
return err;
qlcnic_83xx_config_vnic_buff_descriptors(adapter);
- adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
qlcnic_83xx_enable_vnic_mode(adapter, 1);
- dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
- adapter->ahw->fw_hal_version);
+ dev_info(dev, "HAL Version: %d, Management function\n",
+ ahw->fw_hal_version);
return 0;
}
@@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
- int func, int *port_id)
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
{
struct qlcnic_info nic_info;
int err = 0;
@@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
else
err = -EIO;
- return err;
-}
-
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
-{
- int id, err = 0;
-
- err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
- if (err)
- return err;
-
- if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
- if (!qlcnic_enable_eswitch(adapter, id, 1))
- adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
- else
- err = -EIO;
- }
+ if (!err)
+ adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 86850dd633a..859cb161fc6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -270,7 +270,7 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
int err;
nrds_rings = adapter->max_rds_rings;
- nsds_rings = adapter->max_sds_rings;
+ nsds_rings = adapter->drv_sds_rings;
rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
nsds_rings);
@@ -475,7 +475,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- temp_nsds_rings = adapter->max_sds_rings;
+ temp_nsds_rings = adapter->drv_sds_rings;
index = temp_nsds_rings + ring;
msix_id = ahw->intr_tbl[index].id;
prq->msi_index = cpu_to_le16(msix_id);
@@ -512,7 +512,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
intr_mask = ahw->intr_tbl[index].src;
tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
@@ -582,7 +582,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
@@ -616,7 +616,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
@@ -664,7 +664,7 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
if (err)
goto err_out;
- for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
err = qlcnic_fw_cmd_create_tx_ctx(dev,
&dev->tx_ring[ring],
ring);
@@ -703,7 +703,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_del_rx_ctx(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
@@ -733,7 +733,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->hw_consumer != NULL) {
dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
@@ -764,7 +764,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
@@ -895,6 +895,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ adapter->max_tx_rings = npar_info->max_tx_ques;
+ adapter->max_sds_rings = npar_info->max_rx_ques;
}
qlcnic_free_mbx_args(&cmd);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index d62d5ce432e..86bca7c14f9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
static void qlcnic_dcb_aen_work(struct work_struct *);
static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
-static void __qlcnic_dcb_free(struct qlcnic_adapter *);
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
-
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
-
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
bool tsa_capability;
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
.register_aen = qlcnic_83xx_dcb_register_aen,
- .handle_aen = qlcnic_83xx_dcb_handle_aen,
+ .aen_handler = qlcnic_83xx_dcb_aen_handler,
};
static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
.get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_82xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
- .handle_aen = qlcnic_82xx_dcb_handle_aen,
+ .aen_handler = qlcnic_82xx_dcb_aen_handler,
};
static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map)
return j;
}
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
{
- if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
- adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+ if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
}
static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
}
-int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
@@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
adapter->dcb = dcb;
dcb->adapter = adapter;
qlcnic_set_dcb_ops(adapter);
+ dcb->state = 0;
return 0;
}
-static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
+ struct qlcnic_adapter *adapter;
if (!dcb)
return;
- qlcnic_dcb_register_aen(adapter, 0);
+ adapter = dcb->adapter;
+ qlcnic_dcb_register_aen(dcb, 0);
- while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
adapter->dcb = NULL;
}
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
- qlcnic_dcb_get_hw_capability(adapter);
- qlcnic_dcb_get_cee_cfg(adapter);
- qlcnic_dcb_register_aen(adapter, 1);
+ qlcnic_dcb_get_hw_capability(dcb);
+ qlcnic_dcb_get_cee_cfg(dcb);
+ qlcnic_dcb_register_aen(dcb, 1);
}
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
int err = 0;
INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
if (!dcb->wq) {
- dev_err(&adapter->pdev->dev,
+ dev_err(&dcb->adapter->pdev->dev,
"DCB workqueue allocation failed. DCB will be disabled\n");
return -1;
}
@@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
goto out_free_cfg;
}
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(dcb);
return 0;
out_free_cfg:
@@ -345,9 +346,9 @@ out_free_wq:
return err;
}
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
- char *buf)
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
{
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_cmd_args cmd;
u32 mbx_out;
int err;
@@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
return err;
}
-static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
{
- struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
u32 mbx_out;
int err;
memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
- err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+ err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
if (err)
return err;
@@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
if (cap->max_num_tc > QLC_DCB_MAX_TC ||
cap->max_ets_tc > cap->max_num_tc ||
cap->max_pfc_tc > cap->max_num_tc) {
- dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+ dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
return -EINVAL;
}
return err;
}
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cfg *cfg = dcb->cfg;
struct qlcnic_dcb_capability *cap;
u32 mbx_out;
int err;
- err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
if (err)
return err;
@@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
return err;
}
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
char *buf, u8 type)
{
u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
struct device *dev = &adapter->pdev->dev;
dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
return -EINVAL;
}
- addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
- GFP_KERNEL);
+ addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
@@ -488,72 +489,67 @@ out:
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
- dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+ dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
return err;
}
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
{
struct qlcnic_dcb_mbx_params *mbx;
int err;
- mbx = adapter->dcb->param;
+ mbx = dcb->param;
if (!mbx)
return 0;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
QLC_DCB_LOCAL_PARAM_FWID);
if (err)
return err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
QLC_DCB_OPER_PARAM_FWID);
if (err)
return err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
QLC_DCB_PEER_PARAM_FWID);
if (err)
return err;
mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
- qlcnic_dcb_data_cee_param_map(adapter);
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
return err;
}
static void qlcnic_dcb_aen_work(struct work_struct *work)
{
- struct qlcnic_adapter *adapter;
struct qlcnic_dcb *dcb;
dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
- adapter = dcb->adapter;
- qlcnic_dcb_get_cee_cfg(adapter);
- clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+ qlcnic_dcb_get_cee_cfg(dcb);
+ clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
}
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *data)
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
return;
queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
}
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
u32 mbx_out;
int err;
- err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
if (err)
return err;
@@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
return err;
}
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
char *buf, u8 idx)
{
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_dcb_mbx_params mbx_out;
int err, i, j, k, max_app, size;
struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@ out:
return err;
}
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
int err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
if (err)
return err;
- qlcnic_dcb_data_cee_param_map(adapter);
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
- bool flag)
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
{
u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_cmd_args cmd;
int err;
@@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
return err;
}
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *data)
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
u32 *val = data;
- if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
return;
if (*val & BIT_8)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
else
- clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ clear_bit(QLCNIC_DCB_STATE, &dcb->state);
queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
}
@@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
}
static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
{
- memcpy(addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(addr, netdev->perm_addr, netdev->addr_len);
}
static void
@@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
*prio = *pgid = *bw_per = *up_tc_map = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->tc_param_valid)
return;
@@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
*bw_pct = 0;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->tc_param_valid)
return;
@@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
*setting = 0;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->pfc_mode_enable)
return;
@@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
switch (capid) {
@@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return -EINVAL;
switch (attr) {
@@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
.protocol = id,
};
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb *dcb = adapter->dcb;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
return 0;
return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cee *type;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 1;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
*app_count = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
struct qlcnic_dcb_app *app;
int i, j;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
struct qlcnic_dcb_cee *peer;
u8 i, j, k, map;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
pfc->pfc_en = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &cfg->type[QLC_DCB_PEER_IDX];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index b87ce9fb503..c04ae0cdc10 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -8,26 +8,29 @@
#ifndef __QLCNIC_DCBX_H
#define __QLCNIC_DCBX_H
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+#define QLCNIC_DCB_STATE 0
+#define QLCNIC_DCB_AEN_MODE 1
#ifdef CONFIG_QLCNIC_DCB
-int __qlcnic_register_dcb(struct qlcnic_adapter *);
+int qlcnic_register_dcb(struct qlcnic_adapter *);
#else
-static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{ return 0; }
#endif
+struct qlcnic_dcb;
+
struct qlcnic_dcb_ops {
- void (*init_dcbnl_ops) (struct qlcnic_adapter *);
- void (*free) (struct qlcnic_adapter *);
- int (*attach) (struct qlcnic_adapter *);
- int (*query_hw_capability) (struct qlcnic_adapter *, char *);
- int (*get_hw_capability) (struct qlcnic_adapter *);
- void (*get_info) (struct qlcnic_adapter *);
- int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
- int (*get_cee_cfg) (struct qlcnic_adapter *);
- int (*register_aen) (struct qlcnic_adapter *, bool);
- void (*handle_aen) (struct qlcnic_adapter *, void *);
+ int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+ int (*get_hw_capability) (struct qlcnic_dcb *);
+ int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+ void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+ int (*register_aen) (struct qlcnic_dcb *, bool);
+ void (*aen_handler) (struct qlcnic_dcb *, void *);
+ int (*get_cee_cfg) (struct qlcnic_dcb *);
+ void (*get_info) (struct qlcnic_dcb *);
+ int (*attach) (struct qlcnic_dcb *);
+ void (*free) (struct qlcnic_dcb *);
};
struct qlcnic_dcb {
@@ -37,5 +40,85 @@ struct qlcnic_dcb {
struct workqueue_struct *wq;
struct qlcnic_dcb_ops *ops;
struct qlcnic_dcb_cfg *cfg;
+ unsigned long state;
};
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+ kfree(dcb);
+ dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(dcb);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(dcb, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(dcb, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(dcb);
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
+{
+ if (dcb && dcb->ops->register_aen)
+ dcb->ops->register_aen(dcb, flag);
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+ if (dcb && dcb->ops->aen_handler)
+ dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index ff83a9fcd4c..b36c02fafcf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -27,43 +27,36 @@ static const u32 qlcnic_fw_dump_level[] = {
};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"xmit_called", QLC_SIZEOF(stats.xmitcalled),
- QLC_OFF(stats.xmitcalled)},
+ QLC_OFF(stats.xmitcalled)},
{"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
- QLC_OFF(stats.xmitfinished)},
- {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ QLC_OFF(stats.xmitfinished)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
{"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
{"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
- {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
- QLC_OFF(stats.rx_dma_map_error)},
- {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
- QLC_OFF(stats.tx_dma_map_error)},
{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
- QLC_OFF(stats.mac_filter_limit_overrun)},
+ QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
- "rx unicast frames",
- "rx multicast frames",
- "rx broadcast frames",
- "rx dropped frames",
- "rx errors",
- "rx local frames",
- "rx numbytes",
"tx unicast frames",
"tx multicast frames",
"tx broadcast frames",
@@ -71,6 +64,13 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx errors",
"tx local frames",
"tx numbytes",
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
};
static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
@@ -126,13 +126,16 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
"xmit_on",
"xmit_off",
"xmit_called",
"xmit_finished",
+ "tx_bytes",
};
+#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
@@ -187,8 +190,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
return -1;
}
-#define QLCNIC_RING_REGS_COUNT 20
-#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
+
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
@@ -219,7 +222,15 @@ static const u32 ext_diag_registers[] = {
};
#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_ETHTOOL_REGS_VER 3
+#define QLCNIC_ETHTOOL_REGS_VER 4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+ (adapter->max_rds_rings * 2) +
+ (adapter->drv_sds_rings * 3) + 5;
+ return ring_regs_cnt * sizeof(u32);
+}
static int qlcnic_get_regs_len(struct net_device *dev)
{
@@ -231,7 +242,9 @@ static int qlcnic_get_regs_len(struct net_device *dev)
else
len = sizeof(ext_diag_registers) + sizeof(diag_registers);
- return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
+ len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+ len += qlcnic_get_ring_regs_len(adapter);
+ return len;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +506,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_tx_ring *tx_ring;
u32 *regs_buff = p;
int ring, i = 0;
@@ -512,21 +527,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
-
- regs_buff[i++] = 1; /* No. of tx ring */
- regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
- regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
-
- regs_buff[i++] = 2; /* No. of rx ring */
- regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
- regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+ /* Marker btw regs and TX ring count */
+ regs_buff[i++] = 0xFFEFCDAB;
+
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+ regs_buff[i++] = tx_ring->sw_consumer;
+ regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+ regs_buff[i++] = tx_ring->producer;
+ if (tx_ring->crb_intr_mask)
+ regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+ else
+ regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+ }
- regs_buff[i++] = adapter->max_sds_rings;
+ regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_rings = &recv_ctx->rds_rings[ring];
+ regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+ regs_buff[i++] = rds_rings->producer;
+ }
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &(recv_ctx->sds_rings[ring]);
regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ regs_buff[i++] = sds_ring->consumer;
+ regs_buff[i++] = readl(sds_ring->crb_intr_mask);
}
}
@@ -635,46 +664,88 @@ qlcnic_set_ringparam(struct net_device *dev,
return qlcnic_reset_context(adapter);
}
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (qlcnic_82xx_check(adapter) &&
+ (tx_ring > adapter->max_tx_rings)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+
+ if (qlcnic_83xx_check(adapter) &&
+ (tx_ring > QLCNIC_SINGLE_RING)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
+ tx_ring, QLCNIC_SINGLE_RING);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int min;
-
- min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
- channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
- channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->max_drv_tx_rings;
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
- struct ethtool_channels *channel)
+ struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
- int txq = 0;
if (channel->other_count || channel->combined_count)
return -EINVAL;
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
+ if (err)
+ return err;
+
if (channel->rx_count) {
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
return err;
+ }
}
- if (qlcnic_82xx_check(adapter) && channel->tx_count) {
- err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
- if (err)
+ if (channel->tx_count) {
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
return err;
- txq = channel->tx_count;
+ }
}
- err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
- netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
- adapter->max_sds_rings, adapter->max_drv_tx_rings);
+ err = qlcnic_setup_rings(adapter, channel->rx_count,
+ channel->tx_count);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
return err;
}
@@ -876,7 +947,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
@@ -905,10 +976,10 @@ done:
qlcnic_free_mbx_args(&cmd);
free_diag_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -984,8 +1055,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_drv_tx_rings = adapter->max_drv_tx_rings;
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
@@ -1040,11 +1111,11 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
qlcnic_clear_lb_mode(adapter, mode);
free_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_it:
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_drv_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -1097,11 +1168,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
- for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
for (index = 0; index < num_stats; index++) {
- sprintf(data, "tx_ring_%d %s", i,
- qlcnic_tx_ring_stats_strings[index]);
+ sprintf(data, "tx_queue_%d %s", i,
+ qlcnic_tx_queue_stats_strings[index]);
data += ETH_GSTRING_LEN;
}
}
@@ -1199,6 +1270,36 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
return data;
}
+static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
+ adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
+ adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
+ adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
+ adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
+ }
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+ return data;
+}
+
static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1206,19 +1307,20 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size, ring;
+ int index, ret, length, size, tx_size, ring;
char *p;
- memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+
+ memset(data, 0, tx_size * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
tx_ring = &adapter->tx_ring[ring];
- *data++ = tx_ring->xmit_on;
- *data++ = tx_ring->xmit_off;
- *data++ = tx_ring->xmit_called;
- *data++ = tx_ring->xmit_finished;
+ data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+ qlcnic_update_stats(adapter);
}
}
+
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
@@ -1260,7 +1362,7 @@ static int qlcnic_set_led(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
@@ -1318,7 +1420,7 @@ static int qlcnic_set_led(struct net_device *dev,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(dev, max_sds_rings);
+ qlcnic_diag_free_res(dev, drv_sds_rings);
if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
@@ -1659,7 +1761,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
bool valid_mask = false;
int i, ret = 0;
- u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
@@ -1712,9 +1813,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- netdev_info(netdev, "Device in FAILED state\n");
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ netdev_info(netdev, "Device is in non-operational state\n");
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f8adc7b01f1..6f7f60c09f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mac_req = (struct qlcnic_mac_req *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
vlan_req->vlan_id = cpu_to_le16(vlan_id);
@@ -785,8 +785,6 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
#define QLCNIC_ENABLE_IPV4_LRO 1
#define QLCNIC_ENABLE_IPV6_LRO 2
-#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8)
-#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8)
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
@@ -806,11 +804,10 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
word = 0;
if (enable) {
- word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
+ word = QLCNIC_ENABLE_IPV4_LRO;
if (adapter->ahw->extra_capability[0] &
QLCNIC_FW_CAP2_HW_LRO_IPV6)
- word |= QLCNIC_ENABLE_IPV6_LRO |
- QLCNIC_NO_DEST_IPV6_CHECK;
+ word |= QLCNIC_ENABLE_IPV6_LRO;
}
req.words[0] = cpu_to_le64(word);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 272c356cf9b..13303e7d1ed 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -146,6 +146,12 @@ struct qlcnic_mailbox_metadata {
#define QLCNIC_MBX_PORT_RSP_OK 0x1a
#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
struct qlcnic_pci_info;
struct qlcnic_info;
struct qlcnic_cmd_args;
@@ -176,7 +182,7 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 66c26cf7a2b..e9c21e5d0ca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -236,7 +236,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
spin_lock_init(&rds_ring->lock);
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 11b4bb83b93..0149c949534 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,10 +581,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
- if (qlcnic_check_multi_tx(adapter))
- tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
- else
- tx_ring = &adapter->tx_ring[0];
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -607,8 +604,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_tx_start_queue(tx_ring->txq);
} else {
- adapter->stats.xmit_off++;
- tx_ring->xmit_off++;
+ tx_ring->tx_stats.xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -669,9 +665,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->drv_mac_learn)
qlcnic_send_filter(adapter, first_desc, skb);
- adapter->stats.txbytes += skb->len;
- adapter->stats.xmitcalled++;
- tx_ring->xmit_called++;
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -789,6 +784,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
@@ -805,8 +803,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
- adapter->stats.xmitfinished++;
- tx_ring->xmit_finished++;
+ tx_ring->tx_stats.xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
@@ -823,8 +820,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_tx_wake_queue(tx_ring->txq);
- adapter->stats.xmit_on++;
- tx_ring->xmit_on++;
+ tx_ring->tx_stats.xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -844,6 +840,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
return done;
}
@@ -1011,7 +1008,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
break;
case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
- qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
break;
default:
break;
@@ -1463,18 +1460,18 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
- if (ring == (adapter->max_sds_rings - 1))
+ if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_poll,
NAPI_POLL_WEIGHT);
@@ -1491,7 +1488,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
}
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
NAPI_POLL_WEIGHT);
@@ -1508,7 +1505,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
@@ -1516,7 +1513,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_sds_rings(adapter->recv_ctx);
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
@@ -1535,7 +1532,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
@@ -1544,8 +1541,8 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1563,7 +1560,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
napi_synchronize(&sds_ring->napi);
@@ -1573,7 +1570,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
qlcnic_check_multi_tx(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_disable_tx_int(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
@@ -1911,7 +1908,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1920,7 +1917,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
@@ -1938,7 +1935,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
qlcnic_83xx_disable_intr(adapter, sds_ring);
@@ -1948,7 +1945,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
@@ -1965,10 +1962,10 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
@@ -1994,7 +1991,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
@@ -2012,7 +2009,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
@@ -2021,7 +2018,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 9e61eb86745..05c1eef8df1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -548,36 +548,75 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.io_resume = qlcnic_82xx_io_resume,
};
-static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int num_tx_q;
- if (ahw->msix_supported &&
+ if (qlcnic_82xx_check(adapter) &&
(ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
- num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
- num_online_cpus());
- if (num_tx_q > 1) {
- test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
- &adapter->state);
- adapter->max_drv_tx_rings = num_tx_q;
- }
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
} else {
- adapter->max_drv_tx_rings = 1;
+ return 1;
}
}
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
+ adapter->drv_tx_rings);
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
+ adapter->drv_sds_rings);
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int max_tx_rings, max_sds_rings, tx_vector;
+ int drv_tx_rings, drv_sds_rings, tx_vector;
int err = -1, i;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- max_tx_rings = 0;
+ drv_tx_rings = 0;
tx_vector = 0;
} else {
- max_tx_rings = adapter->max_drv_tx_rings;
+ drv_tx_rings = adapter->drv_tx_rings;
tx_vector = 1;
}
@@ -589,7 +628,7 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return -ENOMEM;
}
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
@@ -602,18 +641,18 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
adapter->ahw->num_msix = num_msix;
/* subtract mail box and tx ring vectors */
- adapter->max_sds_rings = num_msix -
- max_tx_rings - 1;
+ adapter->drv_sds_rings = num_msix -
+ drv_tx_rings - 1;
} else {
adapter->ahw->num_msix = num_msix;
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1))
- max_sds_rings = num_msix - max_tx_rings;
+ (adapter->drv_tx_rings > 1))
+ drv_sds_rings = num_msix - drv_tx_rings;
else
- max_sds_rings = num_msix;
+ drv_sds_rings = num_msix;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
@@ -624,13 +663,13 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
return err;
- err -= (max_tx_rings + 1);
+ err -= drv_tx_rings + 1;
num_msix = rounddown_pow_of_two(err);
- num_msix += (max_tx_rings + 1);
+ num_msix += drv_tx_rings + 1;
} else {
num_msix = rounddown_pow_of_two(err);
if (qlcnic_check_multi_tx(adapter))
- num_msix += max_tx_rings;
+ num_msix += drv_tx_rings;
}
if (num_msix) {
@@ -683,25 +722,14 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = adapter->drv_sds_rings;
- if (ahw->msix_supported) {
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
- if (qlcnic_check_multi_tx(adapter)) {
- if (txq)
- adapter->max_drv_tx_rings = txq;
- num_msix += adapter->max_drv_tx_rings;
- }
- } else {
- num_msix = 1;
- }
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
@@ -819,7 +847,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
- int i, ret = 0, j = 0;
+ int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
@@ -860,7 +888,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
continue;
if (qlcnic_port_eswitch_cfg_capability(adapter)) {
- if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+ if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+ &id))
adapter->npars[j].eswitch_status = true;
else
continue;
@@ -875,15 +904,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
j++;
}
- if (qlcnic_82xx_check(adapter)) {
+ /* Update eSwitch status for adapters without per port eSwitch
+ * configuration capability
+ */
+ if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
- } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- qlcnic_enable_eswitch(adapter, i, 1);
}
kfree(pci_info);
@@ -1131,18 +1161,25 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
if (err == -EIO)
return err;
adapter->ahw->extra_capability[0] = temp;
+ } else {
+ adapter->ahw->extra_capability[0] = 0;
}
+
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
- /* Disable NPAR for 83XX */
- if (qlcnic_83xx_check(adapter))
- return err;
-
- if (adapter->ahw->capabilities & BIT_6)
+ if (adapter->ahw->capabilities & BIT_6) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
return err;
}
@@ -1290,6 +1327,8 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
"HAL Version: %d, Privileged function\n",
adapter->ahw->fw_hal_version);
}
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
}
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
@@ -1549,7 +1588,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- num_sds_rings = adapter->max_sds_rings;
+ num_sds_rings = adapter->drv_sds_rings;
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
@@ -1583,7 +1622,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
snprintf(tx_ring->name, sizeof(tx_ring->name),
@@ -1611,7 +1650,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
free_irq(sds_ring->irq, sds_ring);
}
@@ -1620,7 +1659,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
(qlcnic_82xx_check(adapter) &&
qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->irq)
@@ -1674,7 +1713,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->ahw->linkup = 0;
- if (adapter->max_sds_rings > 1)
+ if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
qlcnic_config_intr_coalesce(adapter);
@@ -1716,6 +1755,7 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1734,8 +1774,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_reset_rx_buffers_list(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+ spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -1811,16 +1852,16 @@ void qlcnic_detach(struct qlcnic_adapter *adapter)
adapter->is_up = 0;
}
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int max_tx_rings = adapter->max_drv_tx_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
}
@@ -1831,8 +1872,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
qlcnic_detach(adapter);
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1898,10 +1939,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
- adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1922,7 +1963,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
@@ -2069,7 +2110,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
return err;
}
- qlcnic_dcb_init_dcbnl_ops(adapter);
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
return 0;
}
@@ -2095,7 +2136,7 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
vfree(tx_ring->cmd_buf_arr);
@@ -2113,14 +2154,14 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *cmd_buf_arr;
- tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ tx_ring = kcalloc(adapter->drv_tx_rings,
sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
return -ENOMEM;
adapter->tx_ring = tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, ring);
@@ -2135,11 +2176,11 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
if (qlcnic_83xx_check(adapter) ||
(qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
vector = adapter->msix_entries[index].vector;
tx_ring->irq = vector;
}
@@ -2159,22 +2200,10 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
else if (qlcnic_83xx_check(adapter))
fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
- if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) &&
- (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER))
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
-static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
-{
- return __qlcnic_register_dcb(adapter);
-}
-
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
-{
- kfree(adapter->dcb);
- adapter->dcb = NULL;
-}
-
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2183,6 +2212,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2269,6 +2299,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
+ spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
@@ -2283,38 +2314,51 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_maintenance_mode;
}
- qlcnic_get_multiq_capability(adapter);
-
- if ((adapter->ahw->act_pci_func > 2) &&
- qlcnic_check_multi_tx(adapter)) {
- adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
- dev_info(&adapter->pdev->dev,
- "vNIC mode enabled, Set max TX rings = %d\n",
- adapter->max_drv_tx_rings);
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
}
- if (!qlcnic_check_multi_tx(adapter)) {
- clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
- }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
- adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
if (err) {
- dev_err(&pdev->dev, "%s: failed\n", __func__);
- goto err_out_free_hw;
+ switch (err) {
+ case -ENOTRECOVERABLE:
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
+ dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+ goto err_out_free_hw;
+ case -ENOMEM:
+ dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+ goto err_out_free_hw;
+ default:
+ dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
+ dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ goto err_out_maintenance_mode;
+ }
}
+
if (qlcnic_sriov_vf_check(adapter))
return 0;
} else {
@@ -2342,7 +2386,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -2412,13 +2456,20 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
err_out_maintenance_mode:
+ set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ ahw->port_type = QLCNIC_XGBE;
+
+ if (qlcnic_83xx_check(adapter))
+ adapter->tgt_status_reg = NULL;
+ else
+ ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
err = register_netdev(netdev);
if (err) {
@@ -2449,7 +2500,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter);
+ qlcnic_dcb_free(adapter->dcb);
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
@@ -2488,7 +2539,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (adapter->qlcnic_wq) {
destroy_workqueue(adapter->qlcnic_wq);
@@ -2541,12 +2591,11 @@ static int qlcnic_resume(struct pci_dev *pdev)
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state;
int err;
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
- netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(netdev, "%s: Device is in non-operational state\n",
+ __func__);
return -EIO;
}
@@ -2708,24 +2757,21 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- if (qlcnic_82xx_check(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
- ring++) {
- tx_ring = &adapter->tx_ring[ring];
- dev_info(&netdev->dev, "ring=%d\n", ring);
- dev_info(&netdev->dev, "crb_intr_mask=%d\n",
- readl(tx_ring->crb_intr_mask));
- dev_info(&netdev->dev, "producer=%d\n",
- readl(tx_ring->crb_cmd_producer));
- dev_info(&netdev->dev, "sw_consumer = %d\n",
- tx_ring->sw_consumer);
- dev_info(&netdev->dev, "hw_consumer = %d\n",
- le32_to_cpu(*(tx_ring->hw_consumer)));
- dev_info(&netdev->dev, "xmit-on=%llu\n",
- tx_ring->xmit_on);
- dev_info(&netdev->dev, "xmit-off=%llu\n",
- tx_ring->xmit_off);
- }
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d\n", ring);
+ netdev_info(netdev,
+ "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_intr_mask),
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
}
adapter->ahw->reset_context = 1;
}
@@ -2839,7 +2885,7 @@ static void qlcnic_poll_controller(struct net_device *netdev)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_intr(adapter->irq, sds_ring);
}
@@ -3259,8 +3305,9 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
return;
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
- netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
__func__);
qlcnic_api_unlock(adapter);
@@ -3327,7 +3374,7 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
@@ -3352,6 +3399,8 @@ done:
static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
u32 state = 0, heartbeat;
u32 peg_status;
int err = 0;
@@ -3376,7 +3425,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
+ if (ahw->reset_context && qlcnic_auto_fw_reset)
qlcnic_reset_hw_context(adapter);
return 0;
@@ -3419,6 +3468,9 @@ detach:
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+ adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ qlcnic_dump_fw(adapter);
}
return 1;
@@ -3500,7 +3552,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
@@ -3645,130 +3697,90 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw = QLCNIC_MAX_TX_RINGS;
- u32 max_allowed;
+ u8 max_hw_rings = 0;
+ char buf[8];
+ int cur_rings;
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
- return -EINVAL;
+ if (queue_type == QLCNIC_RX_QUEUE) {
+ max_hw_rings = adapter->max_sds_rings;
+ cur_rings = adapter->drv_sds_rings;
+ strcpy(buf, "SDS");
+ } else if (queue_type == QLCNIC_TX_QUEUE) {
+ max_hw_rings = adapter->max_tx_rings;
+ cur_rings = adapter->drv_tx_rings;
+ strcpy(buf, "Tx");
}
- if (!qlcnic_check_multi_tx(adapter)) {
- netdev_err(netdev, "No Multi TX-Q support\n");
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
- if (txq > QLCNIC_MAX_TX_RINGS) {
- netdev_err(netdev, "Invalid ring count\n");
+ if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((txq > max_allowed) || !is_power_of_2(txq)) {
- if (!is_power_of_2(txq))
- netdev_err(netdev,
- "TX queue should be a power of 2\n");
- if (txq > num_online_cpus())
- netdev_err(netdev,
- "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
- num_online_cpus());
- netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ if (ring_cnt < 2) {
+ netdev_err(netdev,
+ "%s rings value should not be lower than 2\n", buf);
return -EINVAL;
}
- return 0;
-}
-
-int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
-{
- struct net_device *netdev = adapter->netdev;
- u8 max_hw = adapter->ahw->max_rx_ques;
- u32 max_allowed;
-
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
return -EINVAL;
}
- if (val > QLCNIC_MAX_SDS_RINGS) {
- netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLCNIC_MAX_SDS_RINGS);
- return -EINVAL;
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
- if (!is_power_of_2(val))
- netdev_err(netdev, "RSS value should be a power of 2\n");
-
- if (val < 2)
- netdev_err(netdev, "RSS value should not be lower than 2\n");
-
- if (val > max_hw)
- netdev_err(netdev,
- "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
- max_hw);
-
- if (val > num_online_cpus())
- netdev_err(netdev,
- "RSS value should not be higher than[%u], number of online CPUs in the system\n",
- num_online_cpus());
-
- netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
-
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
return -EINVAL;
}
+
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
{
- int err;
struct net_device *netdev = adapter->netdev;
- int num_msix;
+ int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
- if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
- !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
- return -EINVAL;
- }
-
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
- if (qlcnic_82xx_check(adapter)) {
- if (txq != 0)
- adapter->max_drv_tx_rings = txq;
-
- if (qlcnic_check_multi_tx(adapter) &&
- (txq > adapter->max_drv_tx_rings))
- num_msix = adapter->max_drv_tx_rings;
- else
- num_msix = data;
- }
-
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
- netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
-
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data, txq);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 15513608d48..7763962e2ec 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
}
if (ops_index == ops_cnt) {
- dev_info(&adapter->pdev->dev,
- "Invalid entry type %d, exiting dump\n",
+ dev_info(dev, "Skipping unknown entry opcode %d\n",
entry->hdr.type);
- goto error;
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
}
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
+ if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
}
- if (dump_size != buf_offset) {
- dev_info(&adapter->pdev->dev,
- "Captured(%d) and expected size(%d) do not match\n",
- buf_offset, dump_size);
- goto error;
- } else {
- fw_dump->clr = 1;
- snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
- adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
- adapter->netdev->name, fw_dump->size);
- /* Send a udev event to notify availability of FW dump */
- kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
- return 0;
- }
-error:
+
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+ dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
+ adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
if (fw_dump->use_pex_dma)
dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
fw_dump->dma_buffer, fw_dump->phys_addr);
- vfree(fw_dump->data);
- return -EINVAL;
+
+ return 0;
}
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 392b9bd12b4..21a4b274d2e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
+ struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -507,7 +508,11 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1, 0);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -533,8 +538,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
@@ -1577,7 +1584,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 019f4377307..1a9f8a400e5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -156,7 +156,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
const char *buf, size_t len)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int err, max_sds_rings = adapter->max_sds_rings;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
u8 h_beacon_state, b_state, b_rate;
@@ -211,7 +211,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
out:
if (!ahw->beacon_state)
@@ -1272,7 +1272,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state;
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -1286,8 +1285,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
@@ -1313,7 +1311,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state;
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -1323,8 +1320,7 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
device_remove_bin_file(dev, &bin_attr_pci_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 89943377846..0c9c4e89559 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.32"
+#define DRV_VERSION "1.00.00.33"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+ int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
/* #define QL_OB_DUMP */
#ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
#endif
#ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
#else
#define QL_DUMP_STAT(qdev)
#endif
#ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
#else
#define QL_DUMP_QDEV(qdev)
#endif
#ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#endif
#ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
#else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#endif
#ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
#else
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
#else
#define QL_DUMP_ALL(qdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf4503b..a245dc18d76 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
}
}
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
+{
+ u16 *tags;
+
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ return;
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+ tags = (u16 *)page;
+ /* Look for stacked vlan tags in ethertype field */
+ if (tags[6] == ETH_P_8021Q &&
+ tags[8] == ETH_P_8021Q)
+ *len += 2 * VLAN_HLEN;
+ else
+ *len += VLAN_HLEN;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
goto err_out;
}
+ /* Update the MAC header length*/
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
- if (skb->len > ndev->mtu + ETH_HLEN) {
+ if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset+ETH_HLEN,
- length-ETH_HLEN);
- skb->len += length-ETH_HLEN;
- skb->data_len += length-ETH_HLEN;
- skb->truesize += length-ETH_HLEN;
+ lbq_desc->p.pg_chunk.offset + hlen,
+ length - hlen);
+ skb->len += length - hlen;
+ skb->data_len += length - hlen;
+ skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
- (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+ (struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->data_len += length;
skb->truesize += length;
length -= length;
- __pskb_pull_tail(skb,
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
} else {
/*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size;
i++;
}
- __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
return skb;
}
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+ if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
}
}
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status = 0;
+
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
+
+ /* update the features with resent change */
+ ndev->features = features;
+
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
+ return status;
+}
+
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
+ int err;
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
+
return features;
}
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
- mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ value |= NIC_RCV_CFG_RV;
+ mask |= (NIC_RCV_CFG_RV << 16);
+ }
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index e9dc84943cf..1e49ec5b223 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1231,7 +1231,6 @@ err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
netif_napi_del(&lp->napi);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d2e591955bd..f2a2128165d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2052,7 +2052,6 @@ static void cp_remove_one (struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ccedeb8aba..da5972eefdd 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
pci_release_regions (pdev);
free_netdev(dev);
- pci_set_drvdata (pdev, NULL);
}
@@ -791,6 +790,9 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
pci_set_master (pdev);
+ u64_stats_init(&tp->rx_stats.syncp);
+ u64_stats_init(&tp->tx_stats.syncp);
+
retry:
/* PIO bar register comes first. */
bar = !use_io;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3397cee8977..79938757076 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6811,7 +6811,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
rtl_disable_msi(pdev, tp);
rtl8169_release_board(pdev, dev, tp->mmio_addr);
- pci_set_drvdata(pdev, NULL);
}
static const struct net_device_ops rtl_netdev_ops = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b57c278d3b4..d256ce19d4d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -483,7 +483,7 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -561,7 +561,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000072f,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -689,7 +689,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.apr = 1,
.mpr = 1,
@@ -872,7 +872,7 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, 6);
+ memcpy(ndev->dev_addr, mac, ETH_ALEN);
} else {
ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
@@ -2663,6 +2663,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "no platform data\n");
+ ret = -EINVAL;
+ goto out_release;
+ }
+
/* get PHY ID */
mdp->phy_id = pd->phy;
mdp->phy_interface = pd->phy_interface;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a0db02c63b1..f32c1692d31 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -321,6 +321,9 @@ enum TD_STS_BIT {
#define TD_TFP (TD_TFP1|TD_TFP0)
/* RMCR */
+enum RMCR_BIT {
+ RMCR_RNC = 0x00000001,
+};
#define DEFAULT_RMCR_VALUE 0x00000000
/* ECMR */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21f9ad6392e..676c3c057bf 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
return rc;
}
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+ unsigned int i;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+ for (i = 0; i < nic_data->n_piobufs; i++) {
+ MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[i]);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc);
+ }
+
+ nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+ unsigned int i;
+ size_t outlen;
+ int rc = 0;
+
+ BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+ for (i = 0; i < n; i++) {
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ break;
+ if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = -EIO;
+ break;
+ }
+ nic_data->piobuf_handle[i] =
+ MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated PIO buffer %u handle %x\n", i,
+ nic_data->piobuf_handle[i]);
+ }
+
+ nic_data->n_piobufs = i;
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned int offset, index;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+ /* Link a buffer to each VI in the write-combining mapping */
+ for (index = 0; index < nic_data->n_piobufs; ++index) {
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ nic_data->pio_write_vi_base + index, index,
+ rc);
+ goto fail;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u\n",
+ nic_data->pio_write_vi_base + index, index);
+ }
+
+ /* Link a buffer to each TX queue */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ /* We assign the PIO buffers to queues in
+ * reverse order to allow for the following
+ * special case.
+ */
+ offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+ tx_queue->channel->channel - 1) *
+ efx_piobuf_size);
+ index = offset / ER_DZ_TX_PIOBUF_SIZE;
+ offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+ /* When the host page size is 4K, the first
+ * host page in the WC mapping may be within
+ * the same VI page as the last TX queue. We
+ * can only link one buffer to each VI.
+ */
+ if (tx_queue->queue == nic_data->pio_write_vi_base) {
+ BUG_ON(index != 0);
+ rc = 0;
+ } else {
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_TXQ_INSTANCE,
+ tx_queue->queue);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+
+ if (rc) {
+ /* This is non-fatal; the TX path just
+ * won't use PIO for this queue
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ tx_queue->queue, index, rc);
+ tx_queue->piobuf = NULL;
+ } else {
+ tx_queue->piobuf =
+ nic_data->pio_write_base +
+ index * EFX_VI_PAGE_SIZE + offset;
+ tx_queue->piobuf_offset = offset;
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u offset %x addr %p\n",
+ tx_queue->queue, index,
+ tx_queue->piobuf_offset,
+ tx_queue->piobuf);
+ }
+ }
+ }
+
+ return 0;
+
+fail:
+ while (index--) {
+ MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+ inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+ return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
static void efx_ef10_remove(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
/* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
+ if (nic_data->wc_membase)
+ iounmap(nic_data->wc_membase);
+
rc = efx_ef10_free_vis(efx);
WARN_ON(rc != 0);
+ if (!nic_data->must_restore_piobufs)
+ efx_ef10_free_piobufs(efx);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
return 0;
}
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
static int efx_ef10_dimension_resources(struct efx_nic *efx)
{
- unsigned int n_vis =
- max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int uc_mem_map_size, wc_mem_map_size;
+ unsigned int min_vis, pio_write_vi_base, max_vis;
+ void __iomem *membase;
+ int rc;
+
+ min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+ /* Try to allocate PIO buffers if wanted and if the full
+ * number of PIO buffers would be sufficient to allocate one
+ * copy-buffer per TX channel. Failure is non-fatal, as there
+ * are only a small number of PIO buffers shared between all
+ * functions of the controller.
+ */
+ if (efx_piobuf_size != 0 &&
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ efx->n_tx_channels) {
+ unsigned int n_piobufs =
+ DIV_ROUND_UP(efx->n_tx_channels,
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+ rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "failed to allocate PIO buffers (%d)\n", rc);
+ else
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated %u PIO buffers\n", n_piobufs);
+ }
+#else
+ nic_data->n_piobufs = 0;
+#endif
- return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+ /* PIO buffers should be mapped with write-combining enabled,
+ * and we want to make single UC and WC mappings rather than
+ * several of each (in fact that's the only option if host
+ * page size is >4K). So we may allocate some extra VIs just
+ * for writing PIO buffers through.
+ */
+ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ ER_DZ_TX_PIOBUF);
+ if (nic_data->n_piobufs) {
+ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+ nic_data->n_piobufs) *
+ EFX_VI_PAGE_SIZE) -
+ uc_mem_map_size);
+ max_vis = pio_write_vi_base + nic_data->n_piobufs;
+ } else {
+ pio_write_vi_base = 0;
+ wc_mem_map_size = 0;
+ max_vis = min_vis;
+ }
+
+ /* In case the last attached driver failed to free VIs, do it now */
+ rc = efx_ef10_free_vis(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+ if (rc != 0)
+ return rc;
+
+ /* If we didn't get enough VIs to map all the PIO buffers, free the
+ * PIO buffers
+ */
+ if (nic_data->n_piobufs &&
+ nic_data->n_allocated_vis <
+ pio_write_vi_base + nic_data->n_piobufs) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%u VIs are not sufficient to map %u PIO buffers\n",
+ nic_data->n_allocated_vis, nic_data->n_piobufs);
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Shrink the original UC mapping of the memory BAR */
+ membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ if (!membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not shrink memory BAR to %x\n",
+ uc_mem_map_size);
+ return -ENOMEM;
+ }
+ iounmap(efx->membase);
+ efx->membase = membase;
+
+ /* Set up the WC mapping if needed */
+ if (wc_mem_map_size) {
+ nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+ uc_mem_map_size,
+ wc_mem_map_size);
+ if (!nic_data->wc_membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not allocate WC mapping of size %x\n",
+ wc_mem_map_size);
+ return -ENOMEM;
+ }
+ nic_data->pio_write_vi_base = pio_write_vi_base;
+ nic_data->pio_write_base =
+ nic_data->wc_membase +
+ (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ uc_mem_map_size);
+
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+ &efx->membase_phys, efx->membase, uc_mem_map_size,
+ nic_data->wc_membase, wc_mem_map_size);
+
+ return 0;
}
static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_realloc_vis = false;
}
+ if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+ rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+ if (rc == 0) {
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Log an error on failure, but this is non-fatal */
+ if (rc)
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PIO buffers (%d)\n", rc);
+ nic_data->must_restore_piobufs = false;
+ }
+
efx_ef10_rx_push_indir_table(efx);
return 0;
}
@@ -759,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* The datapath firmware might have been changed */
@@ -2180,7 +2491,7 @@ out_unlock:
return rc;
}
-void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
{
/* no need to do anything here on EF10 */
}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index b3f4e3755fd..207ac9a1e3d 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -315,6 +315,7 @@
#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
#define ESF_DZ_TX_PIO_OPT_LBN 60
#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_PIO 1
#define ESF_DZ_TX_PIO_CONT_LBN 59
#define ESF_DZ_TX_PIO_CONT_WIDTH 1
#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 07c9bc4c61b..2e27837ce6a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1121,7 +1121,7 @@ static int efx_init_io(struct efx_nic *efx)
*/
while (dma_mask > 0x7fffffffUL) {
if (dma_supported(&pci_dev->dev, dma_mask)) {
- rc = dma_set_mask(&pci_dev->dev, dma_mask);
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc == 0)
break;
}
@@ -1134,16 +1134,6 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
- if (rc) {
- /* dma_set_coherent_mask() is not *allowed* to
- * fail with a mask that dma_set_mask() accepted,
- * but just in case...
- */
- netif_err(efx, probe, efx->net_dev,
- "failed to set consistent DMA mask\n");
- goto fail2;
- }
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5771f..b8235ee5d7d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -18,37 +18,36 @@
#define EFX_MEM_BAR 2
/* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
/* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
- unsigned int index, unsigned int n_frags,
- unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
static inline void efx_filter_rfs_expire(struct efx_channel *channel)
{
if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
/* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
/* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
- unsigned int rx_usecs, bool rx_adaptive,
- bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
- unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
/* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
- size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx)
{
return efx->type->mtd_probe(efx);
}
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf5c32..1f529fa2edb 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
return 0;
}
-int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96ce507d860..4d3f119b67b 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,6 +66,11 @@
#define EFX_USE_QWORD_IO 1
#endif
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c34d0d4e10e..656a3277c2b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
}
#endif
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+ size_t inlen, efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual);
-
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
efx_dword_t *outbuf,
size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen,
- efx_mcdi_async_completer *complete,
- unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
-extern void efx_mcdi_process_event(struct efx_channel *channel,
- efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
/* We expect that 16- and 32-bit fields in MCDI requests and responses
* are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
- u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
- u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
- size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
- const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+ int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
#else
static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
#endif
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index 16824fecc5e..4a2dc4c281b 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -20,7 +20,7 @@
static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
{
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
return sync;
}
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
*
* This function will sleep
*/
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
- int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
/* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
- int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+ unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
u8 efx_mdio_get_pause(struct efx_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
- unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
}
/* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b172ed13305..b14a717ac3e 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -141,6 +141,8 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
union {
@@ -154,6 +156,7 @@ struct efx_tx_buffer {
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
+ unsigned short dma_offset;
};
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
@@ -182,6 +185,9 @@ struct efx_tx_buffer {
* @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ * Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -209,6 +215,7 @@ struct efx_tx_buffer {
* blocks
* @tso_packets: Number of packets via the TSO xmit path
* @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +230,8 @@ struct efx_tx_queue {
struct efx_buffer *tsoh_page;
struct efx_special_buffer txd;
unsigned int ptr_mask;
+ void __iomem *piobuf;
+ unsigned int piobuf_offset;
bool initialised;
/* Members used mainly on the completion path */
@@ -238,6 +247,7 @@ struct efx_tx_queue {
unsigned int tso_long_headers;
unsigned int tso_packets;
unsigned int pushes;
+ unsigned int pio_packets;
/* Members shared between paths and sometimes updated */
unsigned int empty_read_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9826594c8a4..9c90bf56090 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -19,6 +19,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
+#include "ef10_regs.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Register dump */
-#define REGISTER_REVISION_A 1
-#define REGISTER_REVISION_B 2
-#define REGISTER_REVISION_C 3
-#define REGISTER_REVISION_Z 3 /* latest revision */
+#define REGISTER_REVISION_FA 1
+#define REGISTER_REVISION_FB 2
+#define REGISTER_REVISION_FC 3
+#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
+#define REGISTER_REVISION_ED 4
+#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
struct efx_nic_reg {
u32 offset:24;
- u32 min_revision:2, max_revision:2;
+ u32 min_revision:3, max_revision:3;
};
-#define REGISTER(name, min_rev, max_rev) { \
- FR_ ## min_rev ## max_rev ## _ ## name, \
- REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+#define REGISTER(name, arch, min_rev, max_rev) { \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev \
}
-#define REGISTER_AA(name) REGISTER(name, A, A)
-#define REGISTER_AB(name) REGISTER(name, A, B)
-#define REGISTER_AZ(name) REGISTER(name, A, Z)
-#define REGISTER_BB(name) REGISTER(name, B, B)
-#define REGISTER_BZ(name) REGISTER(name, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, C, Z)
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
+ REGISTER_DZ(BIU_HW_REV_ID),
+ REGISTER_DZ(MC_DB_LWRD),
+ REGISTER_DZ(MC_DB_HWRD),
};
struct efx_nic_reg_table {
u32 offset:24;
- u32 min_revision:2, max_revision:2;
+ u32 min_revision:3, max_revision:3;
u32 step:6, rows:21;
};
-#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
offset, \
- REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev, \
step, rows \
}
-#define REGISTER_TABLE(name, min_rev, max_rev) \
+#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
- name, FR_ ## min_rev ## max_rev ## _ ## name, \
- min_rev, max_rev, \
- FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
- FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ arch, min_rev, max_rev, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
- A, A, 8, 1024),
+ F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
- B, Z, 8, 1024),
+ F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+ REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
};
size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 890bbbe8320..11b6112d924 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
+/* Report whether the NIC considers this TX queue empty, given the
+ * write_count used for the last doorbell push. May return false
+ * negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+ return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
+}
+
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
+ bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
+ return was_empty && tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -401,6 +417,12 @@ enum {
EF10_STAT_COUNT
};
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
/**
* struct efx_ef10_nic_data - EF10 architecture NIC state
* @mcdi_buf: DMA buffer for MCDI
@@ -409,6 +431,13 @@ enum {
* @n_allocated_vis: Number of VIs allocated to this function
* @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ * reboot
* @rx_rss_context: Firmware handle for our RSS context
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -424,6 +453,11 @@ struct efx_ef10_nic_data {
unsigned int n_allocated_vis;
bool must_realloc_vis;
bool must_restore_filters;
+ unsigned int n_piobufs;
+ void __iomem *wc_membase, *pio_write_base;
+ unsigned int pio_write_vi_base;
+ unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ bool must_restore_piobufs;
u32 rx_rss_context;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
@@ -475,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
return 1 << efx->vi_scale;
}
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
#else
@@ -512,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
#endif
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
@@ -541,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
**************************************************************************
*/
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -609,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
/* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 filter_id,
+ struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 *buf,
+ u32 size);
#ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
#endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -681,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
}
/* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
/* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -703,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
}
/* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
efx->type->rx_push_indir_table(efx);
}
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
@@ -728,24 +760,22 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_farch_test_registers(struct efx_nic *efx,
- const struct efx_farch_register_test *regs,
- size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask,
- u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u64 *stats,
+ const void *dma_buf, bool accumulate);
#define EFX_MAX_FLUSH_TIME 5000
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 45eeb707515..803bf445c08 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -15,7 +15,7 @@
*/
extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a596725023..8f09e686fc2 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -12,6 +12,7 @@
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const struct iphdr *ip;
const __be16 *ports;
+ __be16 ether_type;
int nhoff;
int rc;
- nhoff = skb_network_offset(skb);
+ /* The core RPS/RFS code has already parsed and validated
+ * VLAN, IP and transport headers. We assume they are in the
+ * header area.
+ */
if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
+ const struct vlan_hdr *vh =
+ (const struct vlan_hdr *)skb->data;
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
+ /* We can't filter on the IP 5-tuple and the vlan
+ * together, so just strip the vlan header and filter
+ * on the IP part.
*/
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+ ether_type = vh->h_vlan_encapsulated_proto;
+ nhoff = sizeof(struct vlan_hdr);
+ } else {
+ ether_type = skb->protocol;
+ nhoff = 0;
}
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
+ if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
+ spec.match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec.ether_type = ether_type;
+
+ if (ether_type == htons(ETH_P_IP)) {
+ const struct iphdr *ip =
+ (const struct iphdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ spec.ip_proto = ip->protocol;
+ spec.rem_host[0] = ip->saddr;
+ spec.loc_host[0] = ip->daddr;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ } else {
+ const struct ipv6hdr *ip6 =
+ (const struct ipv6hdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(*ip6) + 4);
+ spec.ip_proto = ip6->nexthdr;
+ memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+ memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+ ports = (const __be16 *)(ip6 + 1);
+ }
+
+ spec.rem_port = ports[0];
+ spec.loc_port = ports[1];
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
+ if (ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87698ae0bf7..a2f4a06ffa4 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -43,13 +43,12 @@ struct efx_self_tests {
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
- const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
- struct efx_self_tests *tests,
- unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+ int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2ac91c5b5ee..c49d1fb1696 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -17,10 +17,46 @@
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
+#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
+#include "io.h"
#include "nic.h"
#include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer =
+ __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->flags);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
@@ -29,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
- buffer->unmap_len);
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
@@ -83,8 +118,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
- /* Possibly one more per segment for the alignment workaround */
- if (EFX_WORKAROUND_5391(efx))
+ /* Possibly one more per segment for the alignment workaround,
+ * or for option descriptors
+ */
+ if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +182,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
}
}
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+ int used;
+ u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+ memcpy_toio(*piobuf, data, block_len);
+ *piobuf += block_len;
+ len -= block_len;
+
+ if (len) {
+ data += block_len;
+ BUG_ON(copy_buf->used);
+ BUG_ON(len > sizeof(copy_buf->buf));
+ memcpy(copy_buf->buf, data, len);
+ copy_buf->used = len;
+ }
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ if (copy_buf->used) {
+ /* if the copy buffer is partially full, fill it up and write */
+ int copy_to_buf =
+ min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+ memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+ copy_buf->used += copy_to_buf;
+
+ /* if we didn't fill it up then we're done for now */
+ if (copy_buf->used < sizeof(copy_buf->buf))
+ return;
+
+ memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ *piobuf += sizeof(copy_buf->buf);
+ data += copy_to_buf;
+ len -= copy_to_buf;
+ copy_buf->used = 0;
+ }
+
+ efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ /* if there's anything in it, write the whole buffer, including junk */
+ if (copy_buf->used)
+ memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+ u8 __iomem **piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int i;
+
+ efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+ copy_buf);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ skb_frag_size(f), copy_buf);
+ kunmap_atomic(vaddr);
+ }
+
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_tx_buffer *buffer =
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ u8 __iomem *piobuf = tx_queue->piobuf;
+
+ /* Copy to PIO buffer. Ensure the writes are padded to the end
+ * of a cache line, as this is required for write-combining to be
+ * effective on at least x86.
+ */
+
+ if (skb_shinfo(skb)->nr_frags) {
+ /* The size of the copy buffer will ensure all writes
+ * are the size of a cache line.
+ */
+ struct efx_short_copy_buffer copy_buf;
+
+ copy_buf.used = 0;
+
+ efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+ &piobuf, &copy_buf);
+ efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+ } else {
+ /* Pad the write to the size of a cache line.
+ * We can do this because we know the skb_shared_info sruct is
+ * after the source, and the destination buffer is big enough.
+ */
+ BUILD_BUG_ON(L1_CACHE_BYTES >
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ memcpy_toio(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
+ }
+
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+ ESF_DZ_TX_PIO_BUF_ADDR,
+ tx_queue->piobuf_offset);
+ ++tx_queue->pio_packets;
+ ++tx_queue->insert_count;
+ return buffer;
+}
+#endif /* EFX_USE_PIO */
+
/*
* Add a socket buffer to a TX queue
*
@@ -167,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
- unsigned int len, unmap_len = 0, insert_ptr;
+ unsigned int len, unmap_len = 0;
dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len;
unsigned short dma_flags;
@@ -189,6 +365,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+ /* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+ if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
+ efx_nic_tx_is_empty(tx_queue) &&
+ efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
+ buffer = efx_enqueue_skb_pio(tx_queue, skb);
+ dma_flags = EFX_TX_BUF_OPTION;
+ goto finish_packet;
+ }
+#endif
+
/* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
@@ -208,11 +395,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Add to TX queue, splitting across DMA boundaries */
do {
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
- EFX_BUG_ON_PARANOID(buffer->flags);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
dma_len = efx_max_tx_len(efx, dma_addr);
if (likely(dma_len >= len))
@@ -230,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Transfer ownership of the unmapping to the final buffer */
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
unmap_len = 0;
/* Get address and size of next fragment */
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+finish_packet:
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
while (tx_queue->insert_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* @tcp_off: Offset of TCP header
* @header_len: Number of bytes of header
* @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ * descriptors
*
* The state used during segmentation. It is put into this data structure
* just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
/* Output position */
unsigned out_len;
unsigned seqnum;
- unsigned ipv4_id;
+ u16 ipv4_id;
unsigned packet_space;
/* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
unsigned int tcp_off;
unsigned header_len;
unsigned int ip_base_len;
+ dma_addr_t header_dma_addr;
+ unsigned int header_unmap_len;
};
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
{
struct efx_tx_buffer *buffer;
struct efx_nic *efx = tx_queue->efx;
- unsigned dma_len, insert_ptr;
+ unsigned dma_len;
EFX_BUG_ON_PARANOID(len <= 0);
while (1) {
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
++tx_queue->insert_count;
EFX_BUG_ON_PARANOID(tx_queue->insert_count -
tx_queue->read_count >=
efx->txq_entries);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
- EFX_BUG_ON_PARANOID(buffer->flags);
-
buffer->dma_addr = dma_addr;
dma_len = efx_max_tx_len(efx, dma_addr);
@@ -796,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
return -ENOMEM;
}
buffer->unmap_len = buffer->len;
+ buffer->dma_offset = 0;
buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
}
@@ -814,19 +999,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
- buffer = &tx_queue->buffer[tx_queue->insert_count &
- tx_queue->ptr_mask];
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
}
}
/* Parse the SKB header and initialise state. */
-static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
{
+ bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int header_len, in_len;
+ dma_addr_t dma_addr;
+
st->ip_off = skb_network_header(skb) - skb->data;
st->tcp_off = skb_transport_header(skb) - skb->data;
- st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ in_len = skb_headlen(skb) - header_len;
+ st->header_len = header_len;
+ st->in_len = in_len;
if (st->protocol == htons(ETH_P_IP)) {
st->ip_base_len = st->header_len - st->ip_off;
st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1033,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->out_len = skb->len - st->header_len;
- st->unmap_len = 0;
- st->dma_flags = 0;
+ st->out_len = skb->len - header_len;
+
+ if (!use_options) {
+ st->header_unmap_len = 0;
+
+ if (likely(in_len == 0)) {
+ st->dma_flags = 0;
+ st->unmap_len = 0;
+ return 0;
+ }
+
+ dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+ in_len, DMA_TO_DEVICE);
+ st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ st->dma_addr = dma_addr;
+ st->unmap_addr = dma_addr;
+ st->unmap_len = in_len;
+ } else {
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ st->header_dma_addr = dma_addr;
+ st->header_unmap_len = skb_headlen(skb);
+ st->dma_flags = 0;
+ st->dma_addr = dma_addr + header_len;
+ st->unmap_len = 0;
+ }
+
+ return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
}
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1078,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
return -ENOMEM;
}
-static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
- const struct sk_buff *skb)
-{
- int hl = st->header_len;
- int len = skb_headlen(skb) - hl;
-
- st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
- len, DMA_TO_DEVICE);
- if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
- st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
- st->unmap_len = len;
- st->in_len = len;
- st->dma_addr = st->unmap_addr;
- return 0;
- }
- return -ENOMEM;
-}
-
/**
* tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -922,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
if (st->in_len == 0) {
/* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
+ buffer->dma_offset = buffer->unmap_len - buffer->len;
buffer->flags |= st->dma_flags;
st->unmap_len = 0;
}
@@ -944,55 +1145,98 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
struct tso_state *st)
{
struct efx_tx_buffer *buffer =
- &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
- struct tcphdr *tsoh_th;
- unsigned ip_length;
- u8 *header;
- int rc;
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+ u8 tcp_flags_clear;
- /* Allocate and insert a DMA-mapped header buffer. */
- header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
- if (!header)
- return -ENOMEM;
-
- tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
- /* Copy and update the headers. */
- memcpy(header, skb->data, st->header_len);
-
- tsoh_th->seq = htonl(st->seqnum);
- st->seqnum += skb_shinfo(skb)->gso_size;
- if (st->out_len > skb_shinfo(skb)->gso_size) {
- /* This packet will not finish the TSO burst. */
+ if (!is_last) {
st->packet_space = skb_shinfo(skb)->gso_size;
- tsoh_th->fin = 0;
- tsoh_th->psh = 0;
+ tcp_flags_clear = 0x09; /* mask out FIN and PSH */
} else {
- /* This packet will be the last in the TSO burst. */
st->packet_space = st->out_len;
- tsoh_th->fin = tcp_hdr(skb)->fin;
- tsoh_th->psh = tcp_hdr(skb)->psh;
+ tcp_flags_clear = 0x00;
}
- ip_length = st->ip_base_len + st->packet_space;
- if (st->protocol == htons(ETH_P_IP)) {
- struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
+ if (!st->header_unmap_len) {
+ /* Allocate and insert a DMA-mapped header buffer. */
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+ int rc;
+
+ header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+ if (!header)
+ return -ENOMEM;
- tsoh_iph->tot_len = htons(ip_length);
+ tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->header_len);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+ ip_length = st->ip_base_len + st->packet_space;
+
+ if (st->protocol == htons(ETH_P_IP)) {
+ struct iphdr *tsoh_iph =
+ (struct iphdr *)(header + st->ip_off);
+
+ tsoh_iph->tot_len = htons(ip_length);
+ tsoh_iph->id = htons(st->ipv4_id);
+ } else {
+ struct ipv6hdr *tsoh_iph =
+ (struct ipv6hdr *)(header + st->ip_off);
+
+ tsoh_iph->payload_len = htons(ip_length);
+ }
- /* Linux leaves suitable gaps in the IP ID space for us to fill. */
- tsoh_iph->id = htons(st->ipv4_id);
- st->ipv4_id++;
+ rc = efx_tso_put_header(tx_queue, buffer, header);
+ if (unlikely(rc))
+ return rc;
} else {
- struct ipv6hdr *tsoh_iph =
- (struct ipv6hdr *)(header + st->ip_off);
+ /* Send the original headers with a TSO option descriptor
+ * in front
+ */
+ u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
- tsoh_iph->payload_len = htons(ip_length);
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+ ++tx_queue->insert_count;
+
+ /* We mapped the headers in tso_start(). Unmap them
+ * when the last segment is completed.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->dma_addr = st->header_dma_addr;
+ buffer->len = st->header_len;
+ if (is_last) {
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+ buffer->unmap_len = st->header_unmap_len;
+ buffer->dma_offset = 0;
+ /* Ensure we only unmap them once in case of a
+ * later DMA mapping error and rollback
+ */
+ st->header_unmap_len = 0;
+ } else {
+ buffer->flags = EFX_TX_BUF_CONT;
+ buffer->unmap_len = 0;
+ }
+ ++tx_queue->insert_count;
}
- rc = efx_tso_put_header(tx_queue, buffer, header);
- if (unlikely(rc))
- return rc;
+ st->seqnum += skb_shinfo(skb)->gso_size;
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ ++st->ipv4_id;
++tx_queue->tso_packets;
@@ -1023,12 +1267,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
- tso_start(&state, skb);
+ rc = tso_start(&state, efx, skb);
+ if (rc)
+ goto mem_err;
- /* Assume that skb header area contains exactly the headers, and
- * all payload is in the frag list.
- */
- if (skb_headlen(skb) == state.header_len) {
+ if (likely(state.in_len == 0)) {
/* Grab the first payload fragment. */
EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
frag_i = 0;
@@ -1037,9 +1280,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
if (rc)
goto mem_err;
} else {
- rc = tso_get_head_fragment(&state, efx, skb);
- if (rc)
- goto mem_err;
+ /* Payload starts in the header area. */
frag_i = -1;
}
@@ -1091,6 +1332,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
state.unmap_len, DMA_TO_DEVICE);
}
+ /* Free the header DMA mapping, if using option descriptors */
+ if (state.header_unmap_len)
+ dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+ state.header_unmap_len, DMA_TO_DEVICE);
+
efx_enqueue_unwind(tx_queue);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 770036bc2d8..513ed8b1ba5 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
dev->watchdog_timeo = timeout;
dev->irq = MACE_ETHERNET_IRQ;
dev->base_addr = (unsigned long)&mace->eth;
- memcpy(dev->dev_addr, o2meth_eaddr, 6);
+ memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
priv = netdev_priv(dev);
spin_lock_init(&priv->meth_lock);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index ee18e6f7b4f..acbbe48a519 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->phy_task);
unregister_netdev(dev);
sis190_release_board(pdev);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver sis190_pci_driver = {
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 068fc44d37e..753630f5d3d 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
- BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
+ BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -39,7 +39,7 @@ config SMC91X
select CRC32
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE || ARM64)
+ MN10300 || COLDFIRE || ARM64 || XTENSA)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 03b256af7ed..8ae1f8a7bf3 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -91,9 +91,9 @@ static int rx_copybreak;
/* These identify the driver base version and may not be removed. */
static char version[] =
-DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
+DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
static char version2[] =
-" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
@@ -332,9 +332,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(KERN_INFO "%s%s", version, version2);
+ pr_info_once("%s%s\n", version, version2);
#endif
card_idx++;
@@ -423,9 +421,9 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
if (debug > 2) {
- dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
+ dev_dbg(&pdev->dev, "EEPROM contents:\n");
for (i = 0; i < 64; i++)
- printk(" %4.4x%s", read_eeprom(ep, i),
+ pr_cont(" %4.4x%s", read_eeprom(ep, i),
i % 16 == 15 ? "\n" : "");
}
@@ -490,10 +488,10 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto err_out_unmap_rx;
- printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
- dev->name, pci_id_tbl[chip_idx].name,
- (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
+ pci_id_tbl[chip_idx].name,
+ (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
+ dev->dev_addr);
out:
return ret;
@@ -703,9 +701,8 @@ static int epic_open(struct net_device *dev)
mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
if (dev->if_port == 1) {
if (debug > 1)
- printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
- "status %4.4x.\n",
- dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
+ netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
+ mdio_read(dev, ep->phys[0], MII_BMSR));
}
} else {
int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
@@ -715,10 +712,10 @@ static int epic_open(struct net_device *dev)
else if (! (mii_lpa & LPA_LPACK))
mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
if (debug > 1)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
- " register read of %4.4x.\n", dev->name,
- ep->mii.full_duplex ? "full" : "half",
- ep->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
+ ep->mii.full_duplex ? "full"
+ : "half",
+ ep->phys[0], mii_lpa);
}
}
@@ -738,10 +735,9 @@ static int epic_open(struct net_device *dev)
TxUnderrun);
if (debug > 1) {
- printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
- "status %4.4x %s-duplex.\n",
- dev->name, ioaddr, irq, er32(GENCTL),
- ep->mii.full_duplex ? "full" : "half");
+ netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
+ ioaddr, irq, er32(GENCTL),
+ ep->mii.full_duplex ? "full" : "half");
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -790,8 +786,8 @@ static void epic_restart(struct net_device *dev)
/* Soft reset the chip. */
ew32(GENCTL, 0x4001);
- printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
- dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
udelay(1);
/* This magic is documented in SMSC app note 7.15 */
@@ -827,9 +823,8 @@ static void epic_restart(struct net_device *dev)
((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
TxUnderrun);
- printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
- " interrupt %4.4x.\n",
- dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
+ netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
+ er32(COMMAND), er32(GENCTL), er32(INTSTAT));
}
static void check_media(struct net_device *dev)
@@ -846,9 +841,9 @@ static void check_media(struct net_device *dev)
return;
if (ep->mii.full_duplex != duplex) {
ep->mii.full_duplex = duplex;
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
- " partner capability of %4.4x.\n", dev->name,
- ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
+ ep->mii.full_duplex ? "full" : "half",
+ ep->phys[0], mii_lpa);
ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
}
}
@@ -861,11 +856,10 @@ static void epic_timer(unsigned long data)
int next_tick = 5*HZ;
if (debug > 3) {
- printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
- dev->name, er32(TxSTAT));
- printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
- "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
- er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
+ netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
+ er32(TxSTAT));
+ netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
+ er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
}
check_media(dev);
@@ -880,11 +874,11 @@ static void epic_tx_timeout(struct net_device *dev)
void __iomem *ioaddr = ep->ioaddr;
if (debug > 0) {
- printk(KERN_WARNING "%s: Transmit timeout using MII device, "
- "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
+ netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
+ er16(TxSTAT));
if (debug > 1) {
- printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
- dev->name, ep->dirty_tx, ep->cur_tx);
+ netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
+ ep->dirty_tx, ep->cur_tx);
}
}
if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
@@ -994,9 +988,8 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
ew32(COMMAND, TxQueued);
if (debug > 4)
- printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
- "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
- entry, ctrl_word, er32(TxSTAT));
+ netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
+ skb->len, entry, ctrl_word, er32(TxSTAT));
return NETDEV_TX_OK;
}
@@ -1009,8 +1002,8 @@ static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
#ifndef final_version
/* There was an major error, log it. */
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
+ status);
#endif
stats->tx_errors++;
if (status & 0x1050)
@@ -1057,9 +1050,8 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
#ifndef final_version
if (cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_WARNING
- "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, cur_tx, ep->tx_full);
+ netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -1086,8 +1078,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
ew32(INTSTAT, status & EpicNormalEvent);
if (debug > 4) {
- printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
- "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
+ netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
+ status, er32(INTSTAT));
}
if ((status & IntrSummary) == 0)
@@ -1125,8 +1117,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
ew32(COMMAND, RestartTx);
}
if (status & PCIBusErr170) {
- printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
- dev->name, status);
+ netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
+ status);
epic_pause(dev);
epic_restart(dev);
}
@@ -1136,8 +1128,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
out:
if (debug > 3) {
- printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
- dev->name, status);
+ netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
+ status);
}
return IRQ_RETVAL(handled);
@@ -1151,7 +1143,7 @@ static int epic_rx(struct net_device *dev, int budget)
int work_done = 0;
if (debug > 4)
- printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
ep->rx_ring[entry].rxstatus);
if (rx_work_limit > budget)
@@ -1162,16 +1154,17 @@ static int epic_rx(struct net_device *dev, int budget)
int status = ep->rx_ring[entry].rxstatus;
if (debug > 4)
- printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ netdev_dbg(dev, " epic_rx() status was %8.8x.\n",
+ status);
if (--rx_work_limit < 0)
break;
if (status & 0x2006) {
if (debug > 2)
- printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
+ status);
if (status & 0x2000) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, status %4.4x!\n", dev->name, status);
+ netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
+ status);
dev->stats.rx_length_errors++;
} else if (status & 0x0006)
/* Rx Frame errors are counted in hardware. */
@@ -1183,9 +1176,8 @@ static int epic_rx(struct net_device *dev, int budget)
struct sk_buff *skb;
if (pkt_len > PKT_BUF_SZ - 4) {
- printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
- "%d bytes.\n",
- dev->name, status, pkt_len);
+ netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
+ status, pkt_len);
pkt_len = 1514;
}
/* Check if the packet is long enough to accept without copying
@@ -1305,8 +1297,8 @@ static int epic_close(struct net_device *dev)
napi_disable(&ep->napi);
if (debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, er32(INTSTAT));
+ netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
+ er32(INTSTAT));
del_timer_sync(&ep->timer);
@@ -1324,7 +1316,7 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].buflength = 0;
if (skb) {
pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
}
ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1535,7 +1527,6 @@ static void epic_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* pci_power_off(pdev, -1); */
}
@@ -1588,8 +1579,7 @@ static int __init epic_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk (KERN_INFO "%s%s",
- version, version2);
+ pr_info("%s%s\n", version, version2);
#endif
return pci_register_driver(&epic_driver);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index afe01c4088a..0f096a89005 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -106,16 +106,16 @@ MODULE_ALIAS("platform:smc911x");
#define POWER_DOWN 1
#if SMC_DEBUG > 0
-#define DBG(n, args...) \
+#define DBG(n, dev, args...) \
do { \
if (SMC_DEBUG & (n)) \
- printk(args); \
+ netdev_dbg(dev, args); \
} while (0)
-#define PRINTK(args...) printk(args)
+#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, args...) do { } while (0)
-#define PRINTK(args...) printk(KERN_DEBUG args)
+#define DBG(n, dev, args...) do { } while (0)
+#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
#if SMC_DEBUG_PKTS > 0
@@ -130,21 +130,23 @@ static void PRINT_PKT(u_char *buf, int length)
for (i = 0; i < lines ; i ++) {
int cur;
+ printk(KERN_DEBUG);
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
#else
#define PRINT_PKT(x...) do { } while (0)
@@ -176,7 +178,7 @@ static void smc911x_reset(struct net_device *dev)
unsigned int reg, timeout=0, resets=1, irq_cfg;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* Take out of PM setting first */
if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -188,7 +190,7 @@ static void smc911x_reset(struct net_device *dev)
reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
} while (--timeout && !reg);
if (timeout == 0) {
- PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
return;
}
}
@@ -206,14 +208,14 @@ static void smc911x_reset(struct net_device *dev)
reg = SMC_GET_HW_CFG(lp);
/* If chip indicates reset timeout then try again */
if (reg & HW_CFG_SRST_TO_) {
- PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+ PRINTK(dev, "chip reset timeout, retrying...\n");
resets++;
break;
}
} while (--timeout && (reg & HW_CFG_SRST_));
}
if (timeout == 0) {
- PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
return;
}
@@ -223,7 +225,7 @@ static void smc911x_reset(struct net_device *dev)
udelay(10);
if (timeout == 0){
- PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
return;
}
@@ -270,7 +272,7 @@ static void smc911x_enable(struct net_device *dev)
unsigned mask, cfg, cr;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
@@ -296,7 +298,7 @@ static void smc911x_enable(struct net_device *dev)
/* Turn on receiver and enable RX */
if (cr & MAC_CR_RXEN_)
- DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
@@ -327,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev)
unsigned cr;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
/* Disable IRQ's */
SMC_SET_INT_EN(lp, 0);
@@ -346,7 +348,8 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
struct smc911x_local *lp = netdev_priv(dev);
unsigned int fifo_count, timeout, reg;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
+ CARDNAME, __func__);
fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
if (fifo_count <= 4) {
/* Manually dump the packet data */
@@ -361,7 +364,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
} while (--timeout && reg);
if (timeout == 0) {
- PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+ PRINTK(dev, "timeout waiting for RX fast forward\n");
}
}
}
@@ -379,11 +382,11 @@ static inline void smc911x_rcv(struct net_device *dev)
struct sk_buff *skb;
unsigned char *data;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
+ __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
- dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+ DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
+ (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
/* Deal with a bad packet */
@@ -403,8 +406,7 @@ static inline void smc911x_rcv(struct net_device *dev)
/* Alloc a buffer with extra room for DMA alignment */
skb = netdev_alloc_skb(dev, pkt_len+32);
if (unlikely(skb == NULL)) {
- PRINTK( "%s: Low memory, rcvd packet dropped.\n",
- dev->name);
+ PRINTK(dev, "Low memory, rcvd packet dropped.\n");
dev->stats.rx_dropped++;
smc911x_drop_pkt(dev);
return;
@@ -422,8 +424,8 @@ static inline void smc911x_rcv(struct net_device *dev)
/* Lower the FIFO threshold if possible */
fifo = SMC_GET_FIFO_INT(lp);
if (fifo & 0xFF) fifo--;
- DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
- dev->name, fifo & 0xff);
+ DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
+ fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
/* Setup RX DMA */
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
@@ -436,7 +438,7 @@ static inline void smc911x_rcv(struct net_device *dev)
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
SMC_PULL_DATA(lp, data, pkt_len+2+3);
- DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
+ DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -456,7 +458,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
unsigned int cmdA, cmdB, len;
unsigned char *buf;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
BUG_ON(lp->pending_tx_skb == NULL);
skb = lp->pending_tx_skb;
@@ -481,12 +483,12 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
/* tag is packet length so we can use this in stats update later */
cmdB = (skb->len << 16) | (skb->len & 0x7FF);
- DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
- dev->name, len, len, buf, cmdA, cmdB);
+ DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+ len, len, buf, cmdA, cmdB);
SMC_SET_TX_FIFO(lp, cmdA);
SMC_SET_TX_FIFO(lp, cmdB);
- DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+ DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
PRINT_PKT(buf, len <= 64 ? len : 64);
/* Send pkt via PIO or DMA */
@@ -517,20 +519,20 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int free;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+ __func__);
spin_lock_irqsave(&lp->lock, flags);
BUG_ON(lp->pending_tx_skb != NULL);
free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
- DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+ DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
/* Turn off the flow when running out of space in FIFO */
if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
- DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
- dev->name, free);
+ DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
+ free);
/* Reenable when at least 1 packet of size MTU present */
SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
lp->tx_throttle = 1;
@@ -545,8 +547,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* End padding 15 bytes
*/
if (unlikely(free < (skb->len + 8 + 15 + 15))) {
- printk("%s: No Tx free space %d < %d\n",
- dev->name, free, skb->len);
+ netdev_warn(dev, "No Tx free space %d < %d\n",
+ free, skb->len);
lp->pending_tx_skb = NULL;
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
@@ -561,13 +563,13 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the DMA IRQ starts it
*/
if (lp->txdma_active) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
lp->pending_tx_skb = skb;
netif_stop_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
} else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
lp->txdma_active = 1;
}
}
@@ -589,20 +591,19 @@ static void smc911x_tx(struct net_device *dev)
struct smc911x_local *lp = netdev_priv(dev);
unsigned int tx_status;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+ __func__);
/* Collect the TX status */
while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
- DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
- dev->name,
- (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
+ DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
+ (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
tx_status = SMC_GET_TX_STS_FIFO(lp);
dev->stats.tx_packets++;
dev->stats.tx_bytes+=tx_status>>16;
- DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
- dev->name, (tx_status & 0xffff0000) >> 16,
- tx_status & 0x0000ffff);
+ DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
+ (tx_status & 0xffff0000) >> 16,
+ tx_status & 0x0000ffff);
/* count Tx errors, but ignore lost carrier errors when in
* full-duplex mode */
if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
@@ -640,8 +641,8 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
SMC_GET_MII(lp, phyreg, phyaddr, phydata);
- DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+ __func__, phyaddr, phyreg, phydata);
return phydata;
}
@@ -654,8 +655,8 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SET_MII(lp, phyreg, phyaddr, phydata);
}
@@ -670,7 +671,7 @@ static void smc911x_phy_detect(struct net_device *dev)
int phyaddr;
unsigned int cfg, id1, id2;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
lp->phy_type = 0;
@@ -731,8 +732,8 @@ static void smc911x_phy_detect(struct net_device *dev)
lp->phy_type = id1 << 16 | id2;
}
- DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
- dev->name, id1, id2, lp->mii.phy_id);
+ DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+ id1, id2, lp->mii.phy_id);
}
/*
@@ -745,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int bmcr;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* Enter Link Disable state */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -792,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
unsigned long flags;
unsigned int reg;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
reg = SMC_GET_PMT_CTRL(lp);
@@ -851,18 +852,18 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
int phyaddr = lp->mii.phy_id;
unsigned int bmcr, cr;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
/* duplex state has changed */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
SMC_GET_MAC_CR(lp, cr);
if (lp->mii.full_duplex) {
- DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
bmcr |= BMCR_FULLDPLX;
cr |= MAC_CR_RCVOWN_;
} else {
- DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
bmcr &= ~BMCR_FULLDPLX;
cr &= ~MAC_CR_RCVOWN_;
}
@@ -891,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work)
int status;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
/*
* We should not be called if phy_type is zero.
@@ -900,7 +901,7 @@ static void smc911x_phy_configure(struct work_struct *work)
return;
if (smc911x_phy_reset(dev, phyaddr)) {
- printk("%s: PHY reset timed out\n", dev->name);
+ netdev_info(dev, "PHY reset timed out\n");
return;
}
spin_lock_irqsave(&lp->lock, flags);
@@ -922,7 +923,7 @@ static void smc911x_phy_configure(struct work_struct *work)
/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- printk(KERN_INFO "Auto negotiation NOT supported\n");
+ netdev_info(dev, "Auto negotiation NOT supported\n");
smc911x_phy_fixed(dev);
goto smc911x_phy_configure_exit;
}
@@ -960,8 +961,8 @@ static void smc911x_phy_configure(struct work_struct *work)
udelay(10);
SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
- DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
- DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+ DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
+ DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -984,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int status;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
if (lp->phy_type == 0)
return;
@@ -992,10 +993,10 @@ static void smc911x_phy_interrupt(struct net_device *dev)
smc911x_phy_check_media(dev, 0);
/* read to clear status bits */
SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
- DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
- dev->name, status & 0xffff);
- DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
- dev->name, SMC_GET_AFC_CFG(lp));
+ DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
+ status & 0xffff);
+ DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
+ SMC_GET_AFC_CFG(lp));
}
/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
@@ -1012,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
unsigned int rx_overrun=0, cr, pkts;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
@@ -1033,8 +1034,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
do {
status = SMC_GET_INT(lp);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
- dev->name, status, mask, status & ~mask);
+ DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+ status, mask, status & ~mask);
status &= mask;
if (!status)
@@ -1066,7 +1067,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
SMC_GET_MAC_CR(lp, cr);
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
- DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
@@ -1078,7 +1079,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
rx_overrun=1;
- DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
@@ -1087,23 +1088,23 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle receive condition */
if ((status & INT_STS_RSFL_) || rx_overrun) {
unsigned int fifo;
- DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX irq\n");
fifo = SMC_GET_RX_FIFO_INF(lp);
pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
- DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
- dev->name, pkts, fifo & 0xFFFF );
+ DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
+ pkts, fifo & 0xFFFF);
if (pkts != 0) {
#ifdef SMC_USE_DMA
unsigned int fifo;
if (lp->rxdma_active){
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
- "%s: RX DMA active\n", dev->name);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+ "RX DMA active\n");
/* The DMA is already running so up the IRQ threshold */
fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
fifo |= pkts & 0xFF;
- DBG(SMC_DEBUG_RX,
- "%s: Setting RX stat FIFO threshold to %d\n",
- dev->name, fifo & 0xff);
+ DBG(SMC_DEBUG_RX, dev,
+ "Setting RX stat FIFO threshold to %d\n",
+ fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
} else
#endif
@@ -1113,7 +1114,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
/* Handle transmit FIFO available */
if (status & INT_STS_TDFA_) {
- DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+ DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
SMC_SET_FIFO_TDA(lp, 0xFF);
lp->tx_throttle = 0;
#ifdef SMC_USE_DMA
@@ -1125,9 +1126,9 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle transmit done condition */
#if 1
if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
- "%s: Tx stat FIFO limit (%d) /GPT irq\n",
- dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
+ "Tx stat FIFO limit (%d) /GPT irq\n",
+ (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
smc911x_tx(dev);
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_TSFL_);
@@ -1135,23 +1136,20 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
+ DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
if (status & INT_STS_GPT_INT_) {
- DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
- dev->name,
- SMC_GET_IRQ_CFG(lp),
- SMC_GET_FIFO_INT(lp),
- SMC_GET_RX_CFG(lp));
- DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
- "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
- dev->name,
- (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
- SMC_GET_RX_FIFO_INF(lp) & 0xffff,
- SMC_GET_RX_STS_FIFO_PEEK(lp));
+ DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+ SMC_GET_IRQ_CFG(lp),
+ SMC_GET_FIFO_INT(lp),
+ SMC_GET_RX_CFG(lp));
+ DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+ (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
+ SMC_GET_RX_FIFO_INF(lp) & 0xffff,
+ SMC_GET_RX_STS_FIFO_PEEK(lp));
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_GPT_INT_);
}
@@ -1159,7 +1157,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle PHY interrupt condition */
if (status & INT_STS_PHY_INT_) {
- DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
smc911x_phy_interrupt(dev);
SMC_ACK_INT(lp, INT_STS_PHY_INT_);
}
@@ -1168,8 +1166,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* restore mask state */
SMC_SET_INT_EN(lp, mask);
- DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
- dev->name, 8-timeout);
+ DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
+ 8-timeout);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1185,9 +1183,9 @@ smc911x_tx_dma_irq(int dma, void *data)
struct sk_buff *skb = lp->current_tx_skb;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
BUG_ON(skb == NULL);
@@ -1198,8 +1196,8 @@ smc911x_tx_dma_irq(int dma, void *data)
if (lp->pending_tx_skb != NULL)
smc911x_hardware_send_pkt(dev);
else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
- "%s: No pending Tx packets. DMA disabled\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+ "No pending Tx packets. DMA disabled\n");
spin_lock_irqsave(&lp->lock, flags);
lp->txdma_active = 0;
if (!lp->tx_throttle) {
@@ -1208,8 +1206,8 @@ smc911x_tx_dma_irq(int dma, void *data)
spin_unlock_irqrestore(&lp->lock, flags);
}
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
- "%s: TX DMA irq completed\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+ "TX DMA irq completed\n");
}
static void
smc911x_rx_dma_irq(int dma, void *data)
@@ -1221,8 +1219,8 @@ smc911x_rx_dma_irq(int dma, void *data)
unsigned long flags;
unsigned int pkts;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
@@ -1242,9 +1240,9 @@ smc911x_rx_dma_irq(int dma, void *data)
lp->rxdma_active = 0;
}
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
- "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
- dev->name, pkts);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+ "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+ pkts);
}
#endif /* SMC_USE_DMA */
@@ -1268,14 +1266,14 @@ static void smc911x_timeout(struct net_device *dev)
int status, mask;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
- dev->name, status, mask);
+ DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
+ status, mask);
/* Dump the current TX FIFO contents and restart */
mask = SMC_GET_TX_CFG(lp);
@@ -1306,7 +1304,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
unsigned int mcr, update_multicast = 0;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CR(lp, mcr);
@@ -1314,7 +1312,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
- DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
mcr |= MAC_CR_PRMS_;
}
/*
@@ -1323,7 +1321,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
mcr |= MAC_CR_MCPAS_;
}
@@ -1363,8 +1361,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
- DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
- dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
/*
@@ -1378,9 +1375,9 @@ static void smc911x_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
SMC_SET_MAC_CR(lp, mcr);
if (update_multicast) {
- DBG(SMC_DEBUG_MISC,
- "%s: update mcast hash table 0x%08x 0x%08x\n",
- dev->name, multicast_table[0], multicast_table[1]);
+ DBG(SMC_DEBUG_MISC, dev,
+ "update mcast hash table 0x%08x 0x%08x\n",
+ multicast_table[0], multicast_table[1]);
SMC_SET_HASHL(lp, multicast_table[0]);
SMC_SET_HASHH(lp, multicast_table[1]);
}
@@ -1398,7 +1395,7 @@ smc911x_open(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* reset the hardware */
smc911x_reset(dev);
@@ -1425,7 +1422,7 @@ static int smc911x_close(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1459,7 +1456,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
int ret, status;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
cmd->maxtxpkt = 1;
cmd->maxrxpkt = 1;
@@ -1597,16 +1594,16 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
e2p_cmd = SMC_GET_E2P_CMD(lp);
for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
- PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
- dev->name, __func__);
+ PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
+ __func__);
return -EFAULT;
}
mdelay(1);
e2p_cmd = SMC_GET_E2P_CMD(lp);
}
if (timeout == 0) {
- PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
- dev->name, __func__);
+ PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
+ __func__);
return -ETIMEDOUT;
}
return 0;
@@ -1719,7 +1716,7 @@ static int smc911x_findirq(struct net_device *dev)
int timeout = 20;
unsigned long cookie;
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
cookie = probe_irq_on();
@@ -1799,13 +1796,14 @@ static int smc911x_probe(struct net_device *dev)
const char *version_string;
unsigned long irq_flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* First, see if the endian word is recognized */
val = SMC_GET_BYTE_TEST(lp);
- DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+ DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
+ CARDNAME, val);
if (val != 0x87654321) {
- printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
+ netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
retval = -ENODEV;
goto err_out;
}
@@ -1816,26 +1814,29 @@ static int smc911x_probe(struct net_device *dev)
* as future revisions could be added.
*/
chip_id = SMC_GET_PN(lp);
- DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+ DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
+ CARDNAME, chip_id);
for(i=0;chip_ids[i].id != 0; i++) {
if (chip_ids[i].id == chip_id) break;
}
if (!chip_ids[i].id) {
- printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+ netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
retval = -ENODEV;
goto err_out;
}
version_string = chip_ids[i].name;
revision = SMC_GET_REV(lp);
- DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+ DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
/* At this point I'll assume that the chip is an SMC911x. */
- DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+ DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
+ CARDNAME, chip_ids[i].name);
/* Validate the TX FIFO size requested */
if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
- printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+ netdev_err(dev, "Invalid TX FIFO size requested %d\n",
+ tx_fifo_kb);
retval = -EINVAL;
goto err_out;
}
@@ -1887,14 +1888,13 @@ static int smc911x_probe(struct net_device *dev)
case 14:/* 1920 Rx Data Fifo Size */
lp->afc_cfg=0x0006032F;break;
default:
- PRINTK("%s: ERROR -- no AFC_CFG setting found",
- dev->name);
+ PRINTK(dev, "ERROR -- no AFC_CFG setting found");
break;
}
- DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
- "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
- lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+ DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
+ "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+ lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
spin_lock_init(&lp->lock);
@@ -1924,8 +1924,7 @@ static int smc911x_probe(struct net_device *dev)
}
}
if (dev->irq == 0) {
- printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
- dev->name);
+ netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
retval = -ENODEV;
goto err_out;
}
@@ -1980,33 +1979,32 @@ static int smc911x_probe(struct net_device *dev)
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %#lx IRQ %d",
- dev->name, version_string, lp->revision,
- dev->base_addr, dev->irq);
+ netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
+ version_string, lp->revision,
+ dev->base_addr, dev->irq);
#ifdef SMC_USE_DMA
if (lp->rxdma != -1)
- printk(" RXDMA %d ", lp->rxdma);
+ pr_cont(" RXDMA %d", lp->rxdma);
if (lp->txdma != -1)
- printk("TXDMA %d", lp->txdma);
+ pr_cont(" TXDMA %d", lp->txdma);
#endif
- printk("\n");
+ pr_cont("\n");
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", dev->name);
+ netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Ethernet addr: %pM\n",
+ dev->dev_addr);
}
if (lp->phy_type == 0) {
- PRINTK("%s: No PHY found\n", dev->name);
+ PRINTK(dev, "No PHY found\n");
} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
- PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+ PRINTK(dev, "LAN911x Internal PHY\n");
} else {
- PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+ PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
}
}
@@ -2025,7 +2023,7 @@ err_out:
}
/*
- * smc911x_init(void)
+ * smc911x_drv_probe(void)
*
* Output:
* 0 --> there is a device
@@ -2039,6 +2037,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
+ /* ndev is not valid yet, so avoid passing it in. */
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -2093,7 +2092,7 @@ release_both:
release_1:
release_mem_region(res->start, SMC911X_IO_EXTENT);
out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
+ pr_info("%s: not found (%d).\n", CARDNAME, ret);
}
#ifdef SMC_USE_DMA
else {
@@ -2111,7 +2110,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
struct smc911x_local *lp = netdev_priv(ndev);
struct resource *res;
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
unregister_netdev(ndev);
@@ -2140,7 +2139,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
struct net_device *ndev = platform_get_drvdata(dev);
struct smc911x_local *lp = netdev_priv(ndev);
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
if (ndev) {
if (netif_running(ndev)) {
netif_device_detach(ndev);
@@ -2158,7 +2157,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
{
struct net_device *ndev = platform_get_drvdata(dev);
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
if (ndev) {
struct smc911x_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index d51261ba464..9965da39281 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -227,7 +227,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
#define SMC_DMA_ACK_IRQ(dev, dma) \
{ \
if (DCSR(dma) & DCSR_BUSERR) { \
- printk("%s: DMA %d bus error!\n", dev->name, dma); \
+ netdev_err(dev, "DMA %d bus error!\n", dma); \
} \
DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index e85c2e7e824..67d9fdeedd8 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -55,7 +55,7 @@
----------------------------------------------------------------------------*/
static const char version[] =
- "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n";
+ "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)";
#include <linux/module.h>
#include <linux/kernel.h>
@@ -95,14 +95,6 @@ static const char version[] =
#define USE_32_BIT 1
#endif
-#if defined(__H8300H__) || defined(__H8300S__)
-#define NO_AUTOPROBE
-#undef insl
-#undef outsl
-#define insl(a,b,l) io_insl_noswap(a,b,l)
-#define outsl(a,b,l) io_outsl_noswap(a,b,l)
-#endif
-
/*
.the SMC9194 can be at any of the following port addresses. To change,
.for a slightly different card, you can add it to the array. Keep in
@@ -114,12 +106,6 @@ struct devlist {
unsigned int irq;
};
-#if defined(CONFIG_H8S_EDOSK2674)
-static struct devlist smc_devlist[] __initdata = {
- {.port = 0xf80000, .irq = 16},
- {.port = 0, .irq = 0 },
-};
-#else
static struct devlist smc_devlist[] __initdata = {
{.port = 0x200, .irq = 0},
{.port = 0x220, .irq = 0},
@@ -139,7 +125,6 @@ static struct devlist smc_devlist[] __initdata = {
{.port = 0x3E0, .irq = 0},
{.port = 0, .irq = 0},
};
-#endif
/*
. Wait time for memory to be free. This probably shouldn't be
. tuned that much, as waiting for this means nothing else happens
@@ -612,7 +597,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
+ netdev_dbg(dev, CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -625,7 +610,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* point to the beginning of the packet */
outw( PTR_AUTOINC , ioaddr + POINTER );
- PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length));
#if SMC_DEBUG > 2
print_packet( buf, length );
#endif
@@ -651,11 +636,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
#ifdef USE_32_BIT
if ( length & 0x2 ) {
outsl(ioaddr + DATA_1, buf, length >> 2 );
-#if !defined(__H8300H__) && !defined(__H8300S__)
outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#else
- ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#endif
}
else
outsl(ioaddr + DATA_1, buf, length >> 2 );
@@ -865,7 +846,6 @@ static const struct net_device_ops smc_netdev_ops = {
static int __init smc_probe(struct net_device *dev, int ioaddr)
{
int i, memory, retval;
- static unsigned version_printed;
unsigned int bank;
const char *version_string;
@@ -899,7 +879,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
retval = -ENODEV;
goto err_out;
}
-#if !defined(CONFIG_H8S_EDOSK2674)
/* well, we've already written once, so hopefully another time won't
hurt. This time, I need to switch the bank register to bank 1,
so I can access the base address register */
@@ -914,10 +893,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
retval = -ENODEV;
goto err_out;
}
-#else
- (void)base_address_register; /* Warning suppression */
-#endif
-
/* check if the revision register is something that I recognize.
These might need to be added to later, as future revisions
@@ -937,8 +912,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
It might be prudent to check a listing of MAC addresses
against the hardware address, or do some other tests. */
- if (version_printed++ == 0)
- printk("%s", version);
+ pr_info_once("%s\n", version);
/* fill in some of the fields */
dev->base_addr = ioaddr;
@@ -1027,21 +1001,21 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
/* now, print out the card info, in a short format.. */
- printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name,
- version_string, revision_register & 0xF, ioaddr, dev->irq,
- if_string, memory );
+ netdev_info(dev, "%s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ",
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory);
/*
. Print the Ethernet address
*/
- printk("ADDR: %pM\n", dev->dev_addr);
+ netdev_info(dev, "ADDR: %pM\n", dev->dev_addr);
/* Grab the IRQ */
- retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
- dev->irq, retval);
- goto err_out;
- }
+ retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ netdev_warn(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
+ DRV_NAME, dev->irq, retval);
+ goto err_out;
+ }
dev->netdev_ops = &smc_netdev_ops;
dev->watchdog_timeo = HZ/20;
@@ -1061,30 +1035,32 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d\n", length);
+ pr_dbg("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
for ( i = 0; i < lines ; i ++ ) {
int cur;
+ printk(KERN_DEBUG);
for ( cur = 0; cur < 8; cur ++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
- printk("%02x%02x ", a, b );
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for ( i = 0; i < remainder/2 ; i++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
- printk("%02x%02x ", a, b );
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
#endif
}
#endif
@@ -1151,9 +1127,8 @@ static void smc_timeout(struct net_device *dev)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
- printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
- tx_done(dev) ? "IRQ conflict" :
- "network cable problem");
+ netdev_warn(dev, CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
@@ -1323,8 +1298,7 @@ static void smc_tx( struct net_device * dev )
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
if ( tx_status & TS_LATCOL ) {
- printk(KERN_DEBUG CARDNAME
- ": Late collision occurred on last xmit.\n");
+ netdev_dbg(dev, CARDNAME": Late collision occurred on last xmit.\n");
dev->stats.tx_window_errors++;
}
#if 0
@@ -1332,7 +1306,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt\n");
+ netdev_info(dev, CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
@@ -1571,9 +1545,7 @@ int __init init_module(void)
/* copy the parameters from insmod into the device structure */
devSMC9194 = smc_init(-1);
- if (IS_ERR(devSMC9194))
- return PTR_ERR(devSMC9194);
- return 0;
+ return PTR_ERR_OR_ZERO(devSMC9194);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 656d2e2ebfc..8ef70d9c20c 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -740,7 +740,7 @@ static int smc91c92_resume(struct pcmcia_device *link)
(smc->cardid == PRODID_PSION_NET100))) {
i = osi_load_firmware(link);
if (i) {
- pr_err("smc91c92_cs: Failed to load firmware\n");
+ netdev_err(dev, "Failed to load firmware\n");
return i;
}
}
@@ -793,7 +793,7 @@ static int check_sig(struct pcmcia_device *link)
}
if (width) {
- pr_info("using 8-bit IO window\n");
+ netdev_info(dev, "using 8-bit IO window\n");
smc91c92_suspend(link);
pcmcia_fixup_iowidth(link);
@@ -1036,7 +1036,7 @@ static void smc_dump(struct net_device *dev)
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
SMC_SELECT_BANK(w);
- netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
+ netdev_dbg(dev, "bank %d: ", w);
for (i = 0; i < 14; i += 2)
pr_cont(" %04x", inw(ioaddr + i));
pr_cont("\n");
@@ -1213,8 +1213,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
if (smc->saved_skb) {
/* THIS SHOULD NEVER HAPPEN. */
dev->stats.tx_aborted_errors++;
- netdev_printk(KERN_DEBUG, dev,
- "Internal error -- sent packet while busy\n");
+ netdev_dbg(dev, "Internal error -- sent packet while busy\n");
return NETDEV_TX_BUSY;
}
smc->saved_skb = skb;
@@ -1254,7 +1253,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
}
/* Otherwise defer until the Tx-space-allocated interrupt. */
- pr_debug("%s: memory allocation deferred.\n", dev->name);
+ netdev_dbg(dev, "memory allocation deferred.\n");
outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
spin_unlock_irqrestore(&smc->lock, flags);
@@ -1317,8 +1316,8 @@ static void smc_eph_irq(struct net_device *dev)
SMC_SELECT_BANK(0);
ephs = inw(ioaddr + EPH);
- pr_debug("%s: Ethernet protocol handler interrupt, status"
- " %4.4x.\n", dev->name, ephs);
+ netdev_dbg(dev, "Ethernet protocol handler interrupt, status %4.4x.\n",
+ ephs);
/* Could be a counter roll-over warning: update stats. */
card_stats = inw(ioaddr + COUNTER);
/* single collisions */
@@ -1357,8 +1356,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
ioaddr = dev->base_addr;
- pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
- irq, ioaddr);
+ netdev_dbg(dev, "SMC91c92 interrupt %d at %#x.\n",
+ irq, ioaddr);
spin_lock(&smc->lock);
smc->watchdog = 0;
@@ -1366,8 +1365,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
if ((saved_bank & 0xff00) != 0x3300) {
/* The device does not exist -- the card could be off-line, or
maybe it has been ejected. */
- pr_debug("%s: SMC91c92 interrupt %d for non-existent"
- "/ejected device.\n", dev->name, irq);
+ netdev_dbg(dev, "SMC91c92 interrupt %d for non-existent/ejected device.\n",
+ irq);
handled = 0;
goto irq_done;
}
@@ -1380,8 +1379,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
do { /* read the status flag, and mask it */
status = inw(ioaddr + INTERRUPT) & 0xff;
- pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
- status, mask);
+ netdev_dbg(dev, "Status is %#2.2x (mask %#2.2x).\n",
+ status, mask);
if ((status & mask) == 0) {
if (bogus_cnt == INTR_WORK)
handled = 0;
@@ -1425,15 +1424,15 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_eph_irq(dev);
} while (--bogus_cnt);
- pr_debug(" Restoring saved registers mask %2.2x bank %4.4x"
- " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+ netdev_dbg(dev, " Restoring saved registers mask %2.2x bank %4.4x pointer %4.4x.\n",
+ mask, saved_bank, saved_pointer);
/* restore state register */
outw((mask<<8), ioaddr + INTERRUPT);
outw(saved_pointer, ioaddr + POINTER);
SMC_SELECT_BANK(saved_bank);
- pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+ netdev_dbg(dev, "Exiting interrupt IRQ%d.\n", irq);
irq_done:
@@ -1491,10 +1490,10 @@ static void smc_rx(struct net_device *dev)
rx_status = inw(ioaddr + DATA_1);
packet_length = inw(ioaddr + DATA_1) & 0x07ff;
- pr_debug("%s: Receive status %4.4x length %d.\n",
- dev->name, rx_status, packet_length);
+ netdev_dbg(dev, "Receive status %4.4x length %d.\n",
+ rx_status, packet_length);
- if (!(rx_status & RS_ERRORS)) {
+ if (!(rx_status & RS_ERRORS)) {
/* do stuff to make a new packet */
struct sk_buff *skb;
@@ -1502,7 +1501,7 @@ static void smc_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, packet_length+2);
if (skb == NULL) {
- pr_debug("%s: Low memory, packet dropped.\n", dev->name);
+ netdev_dbg(dev, "Low memory, packet dropped.\n");
dev->stats.rx_dropped++;
outw(MC_RELEASE, ioaddr + MMU_CMD);
return;
@@ -1643,7 +1642,7 @@ static void smc_reset(struct net_device *dev)
struct smc_private *smc = netdev_priv(dev);
int i;
- pr_debug("%s: smc91c92 reset called.\n", dev->name);
+ netdev_dbg(dev, "smc91c92 reset called.\n");
/* The first interaction must be a write to bring the chip out
of sleep mode. */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 73be7f3982e..0c9b5d94154 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -58,7 +58,7 @@
* 22/09/04 Nicolas Pitre big update (see commit log for details)
*/
static const char version[] =
- "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n";
+ "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>";
/* Debugging level */
#ifndef SMC_DEBUG
@@ -149,16 +149,16 @@ MODULE_ALIAS("platform:smc91x");
#define MII_DELAY 1
#if SMC_DEBUG > 0
-#define DBG(n, args...) \
+#define DBG(n, dev, args...) \
do { \
if (SMC_DEBUG >= (n)) \
- printk(args); \
+ netdev_dbg(dev, args); \
} while (0)
-#define PRINTK(args...) printk(args)
+#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, args...) do { } while(0)
-#define PRINTK(args...) printk(KERN_DEBUG args)
+#define DBG(n, dev, args...) do { } while (0)
+#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
#if SMC_DEBUG > 3
@@ -173,24 +173,26 @@ static void PRINT_PKT(u_char *buf, int length)
for (i = 0; i < lines ; i ++) {
int cur;
+ printk(KERN_DEBUG);
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
#else
-#define PRINT_PKT(x...) do { } while(0)
+#define PRINT_PKT(x...) do { } while (0)
#endif
@@ -226,8 +228,8 @@ static void PRINT_PKT(u_char *buf, int length)
unsigned long timeout = jiffies + 2; \
while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
if (time_after(jiffies, timeout)) { \
- printk("%s: timeout %s line %d\n", \
- dev->name, __FILE__, __LINE__); \
+ netdev_dbg(dev, "timeout %s line %d\n", \
+ __FILE__, __LINE__); \
break; \
} \
cpu_relax(); \
@@ -246,7 +248,7 @@ static void smc_reset(struct net_device *dev)
unsigned int ctl, cfg;
struct sk_buff *pending_skb;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* Disable all interrupts, block TX tasklet */
spin_lock_irq(&lp->lock);
@@ -339,7 +341,7 @@ static void smc_enable(struct net_device *dev)
void __iomem *ioaddr = lp->base;
int mask;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* see the header file for options in TCR/RCR DEFAULT */
SMC_SELECT_BANK(lp, 0);
@@ -373,7 +375,7 @@ static void smc_shutdown(struct net_device *dev)
void __iomem *ioaddr = lp->base;
struct sk_buff *pending_skb;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
/* no more interrupts for me */
spin_lock_irq(&lp->lock);
@@ -406,11 +408,11 @@ static inline void smc_rcv(struct net_device *dev)
void __iomem *ioaddr = lp->base;
unsigned int packet_number, status, packet_len;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
packet_number = SMC_GET_RXFIFO(lp);
if (unlikely(packet_number & RXFIFO_REMPTY)) {
- PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
+ PRINTK(dev, "smc_rcv with nothing on FIFO.\n");
return;
}
@@ -420,9 +422,8 @@ static inline void smc_rcv(struct net_device *dev)
/* First two words are status and packet length */
SMC_GET_PKT_HDR(lp, status, packet_len);
packet_len &= 0x07ff; /* mask off top bits */
- DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
- dev->name, packet_number, status,
- packet_len, packet_len);
+ DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
+ packet_number, status, packet_len, packet_len);
back:
if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
@@ -433,8 +434,8 @@ static inline void smc_rcv(struct net_device *dev)
}
if (packet_len < 6) {
/* bloody hardware */
- printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
- dev->name, packet_len, status);
+ netdev_err(dev, "fubar (rxlen %u status %x\n",
+ packet_len, status);
status |= RS_TOOSHORT;
}
SMC_WAIT_MMU_BUSY(lp);
@@ -551,7 +552,7 @@ static void smc_hardware_send_pkt(unsigned long data)
unsigned char *buf;
unsigned long flags;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
if (!smc_special_trylock(&lp->lock, flags)) {
netif_stop_queue(dev);
@@ -568,7 +569,7 @@ static void smc_hardware_send_pkt(unsigned long data)
packet_no = SMC_GET_AR(lp);
if (unlikely(packet_no & AR_FAILED)) {
- printk("%s: Memory allocation failed.\n", dev->name);
+ netdev_err(dev, "Memory allocation failed.\n");
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
smc_special_unlock(&lp->lock, flags);
@@ -581,8 +582,8 @@ static void smc_hardware_send_pkt(unsigned long data)
buf = skb->data;
len = skb->len;
- DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
- dev->name, packet_no, len, len, buf);
+ DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
+ packet_no, len, len, buf);
PRINT_PKT(buf, len);
/*
@@ -637,7 +638,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int numPages, poll_count, status;
unsigned long flags;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
BUG_ON(lp->pending_tx_skb != NULL);
@@ -654,7 +655,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
if (unlikely(numPages > 7)) {
- printk("%s: Far too big packet error.\n", dev->name);
+ netdev_warn(dev, "Far too big packet error.\n");
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
@@ -685,7 +686,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!poll_count) {
/* oh well, wait until the chip finds memory later */
netif_stop_queue(dev);
- DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
+ DBG(2, dev, "TX memory allocation deferred.\n");
SMC_ENABLE_INT(lp, IM_ALLOC_INT);
} else {
/*
@@ -709,12 +710,12 @@ static void smc_tx(struct net_device *dev)
void __iomem *ioaddr = lp->base;
unsigned int saved_packet, packet_no, tx_status, pkt_len;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
/* If the TX FIFO is empty then nothing to do */
packet_no = SMC_GET_TXFIFO(lp);
if (unlikely(packet_no & TXFIFO_TEMPTY)) {
- PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
+ PRINTK(dev, "smc_tx with nothing on FIFO.\n");
return;
}
@@ -725,8 +726,8 @@ static void smc_tx(struct net_device *dev)
/* read the first word (status word) from this packet */
SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
- DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
- dev->name, tx_status, packet_no);
+ DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n",
+ tx_status, packet_no);
if (!(tx_status & ES_TX_SUC))
dev->stats.tx_errors++;
@@ -735,14 +736,12 @@ static void smc_tx(struct net_device *dev)
dev->stats.tx_carrier_errors++;
if (tx_status & (ES_LATCOL | ES_16COL)) {
- PRINTK("%s: %s occurred on last xmit\n", dev->name,
+ PRINTK(dev, "%s occurred on last xmit\n",
(tx_status & ES_LATCOL) ?
"late collision" : "too many collisions");
dev->stats.tx_window_errors++;
if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
- printk(KERN_INFO "%s: unexpectedly large number of "
- "bad collisions. Please check duplex "
- "setting.\n", dev->name);
+ netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n");
}
}
@@ -830,8 +829,8 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
/* Return to idle state */
SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
- DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SELECT_BANK(lp, 2);
return phydata;
@@ -857,8 +856,8 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
/* Return to idle state */
SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
- DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SELECT_BANK(lp, 2);
}
@@ -871,7 +870,7 @@ static void smc_phy_detect(struct net_device *dev)
struct smc_local *lp = netdev_priv(dev);
int phyaddr;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
lp->phy_type = 0;
@@ -886,8 +885,8 @@ static void smc_phy_detect(struct net_device *dev)
id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
- DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
- dev->name, id1, id2);
+ DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n",
+ id1, id2);
/* Make sure it is a valid identifier */
if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
@@ -910,7 +909,7 @@ static int smc_phy_fixed(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int bmcr, cfg1;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
/* Enter Link Disable state */
cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1044,7 +1043,7 @@ static void smc_phy_configure(struct work_struct *work)
int my_ad_caps; /* My Advertised capabilities */
int status;
- DBG(3, "%s:smc_program_phy()\n", dev->name);
+ DBG(3, dev, "smc_program_phy()\n");
spin_lock_irq(&lp->lock);
@@ -1055,7 +1054,7 @@ static void smc_phy_configure(struct work_struct *work)
goto smc_phy_configure_exit;
if (smc_phy_reset(dev, phyaddr)) {
- printk("%s: PHY reset timed out\n", dev->name);
+ netdev_info(dev, "PHY reset timed out\n");
goto smc_phy_configure_exit;
}
@@ -1082,7 +1081,7 @@ static void smc_phy_configure(struct work_struct *work)
my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- printk(KERN_INFO "Auto negotiation NOT supported\n");
+ netdev_info(dev, "Auto negotiation NOT supported\n");
smc_phy_fixed(dev);
goto smc_phy_configure_exit;
}
@@ -1118,8 +1117,8 @@ static void smc_phy_configure(struct work_struct *work)
*/
status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
- DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
- DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
+ DBG(2, dev, "phy caps=%x\n", my_phy_caps);
+ DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -1143,7 +1142,7 @@ static void smc_phy_interrupt(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int phy18;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
if (lp->phy_type == 0)
return;
@@ -1179,8 +1178,8 @@ static void smc_10bt_check_media(struct net_device *dev, int init)
netif_carrier_on(dev);
}
if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link %s\n", dev->name,
- new_carrier ? "up" : "down");
+ netdev_info(dev, "link %s\n",
+ new_carrier ? "up" : "down");
}
}
@@ -1211,7 +1210,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
int status, mask, timeout, card_stats;
int saved_pointer;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
spin_lock(&lp->lock);
@@ -1230,12 +1229,12 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
do {
status = SMC_GET_INT(lp);
- DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
- dev->name, status, mask,
- ({ int meminfo; SMC_SELECT_BANK(lp, 0);
- meminfo = SMC_GET_MIR(lp);
- SMC_SELECT_BANK(lp, 2); meminfo; }),
- SMC_GET_FIFO(lp));
+ DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ status, mask,
+ ({ int meminfo; SMC_SELECT_BANK(lp, 0);
+ meminfo = SMC_GET_MIR(lp);
+ SMC_SELECT_BANK(lp, 2); meminfo; }),
+ SMC_GET_FIFO(lp));
status &= mask;
if (!status)
@@ -1243,20 +1242,20 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
if (status & IM_TX_INT) {
/* do this before RX as it will free memory quickly */
- DBG(3, "%s: TX int\n", dev->name);
+ DBG(3, dev, "TX int\n");
smc_tx(dev);
SMC_ACK_INT(lp, IM_TX_INT);
if (THROTTLE_TX_PKTS)
netif_wake_queue(dev);
} else if (status & IM_RCV_INT) {
- DBG(3, "%s: RX irq\n", dev->name);
+ DBG(3, dev, "RX irq\n");
smc_rcv(dev);
} else if (status & IM_ALLOC_INT) {
- DBG(3, "%s: Allocation irq\n", dev->name);
+ DBG(3, dev, "Allocation irq\n");
tasklet_hi_schedule(&lp->tx_task);
mask &= ~IM_ALLOC_INT;
} else if (status & IM_TX_EMPTY_INT) {
- DBG(3, "%s: TX empty\n", dev->name);
+ DBG(3, dev, "TX empty\n");
mask &= ~IM_TX_EMPTY_INT;
/* update stats */
@@ -1271,10 +1270,10 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
/* multiple collisions */
dev->stats.collisions += card_stats & 0xF;
} else if (status & IM_RX_OVRN_INT) {
- DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
- ({ int eph_st; SMC_SELECT_BANK(lp, 0);
- eph_st = SMC_GET_EPH_STATUS(lp);
- SMC_SELECT_BANK(lp, 2); eph_st; }));
+ DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n",
+ ({ int eph_st; SMC_SELECT_BANK(lp, 0);
+ eph_st = SMC_GET_EPH_STATUS(lp);
+ SMC_SELECT_BANK(lp, 2); eph_st; }));
SMC_ACK_INT(lp, IM_RX_OVRN_INT);
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
@@ -1285,7 +1284,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
+ PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n");
}
} while (--timeout);
@@ -1296,11 +1295,11 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
#ifndef CONFIG_NET_POLL_CONTROLLER
if (timeout == MAX_IRQ_LOOPS)
- PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
- dev->name, mask);
+ PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n",
+ mask);
#endif
- DBG(3, "%s: Interrupt done (%d loops)\n",
- dev->name, MAX_IRQ_LOOPS - timeout);
+ DBG(3, dev, "Interrupt done (%d loops)\n",
+ MAX_IRQ_LOOPS - timeout);
/*
* We return IRQ_HANDLED unconditionally here even if there was
@@ -1333,7 +1332,7 @@ static void smc_timeout(struct net_device *dev)
void __iomem *ioaddr = lp->base;
int status, mask, eph_st, meminfo, fifo;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
spin_lock_irq(&lp->lock);
status = SMC_GET_INT(lp);
@@ -1344,9 +1343,8 @@ static void smc_timeout(struct net_device *dev)
meminfo = SMC_GET_MIR(lp);
SMC_SELECT_BANK(lp, 2);
spin_unlock_irq(&lp->lock);
- PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
- "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
- dev->name, status, mask, meminfo, fifo, eph_st );
+ PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
+ status, mask, meminfo, fifo, eph_st);
smc_reset(dev);
smc_enable(dev);
@@ -1377,10 +1375,10 @@ static void smc_set_multicast_list(struct net_device *dev)
unsigned char multicast_table[8];
int update_multicast = 0;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
if (dev->flags & IFF_PROMISC) {
- DBG(2, "%s: RCR_PRMS\n", dev->name);
+ DBG(2, dev, "RCR_PRMS\n");
lp->rcr_cur_mode |= RCR_PRMS;
}
@@ -1395,7 +1393,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(2, "%s: RCR_ALMUL\n", dev->name);
+ DBG(2, dev, "RCR_ALMUL\n");
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1437,7 +1435,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
- DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
+ DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n");
lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
/*
@@ -1470,7 +1468,7 @@ smc_open(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* Setup the default Register Modes */
lp->tcr_cur_mode = TCR_DEFAULT;
@@ -1514,7 +1512,7 @@ static int smc_close(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1694,7 +1692,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev,
int i;
int imax;
- DBG(1, "Reading %d bytes at %d(0x%x)\n",
+ DBG(1, dev, "Reading %d bytes at %d(0x%x)\n",
eeprom->len, eeprom->offset, eeprom->offset);
imax = smc_ethtool_geteeprom_len(dev);
for (i = 0; i < eeprom->len; i += 2) {
@@ -1706,7 +1704,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev,
ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
if (ret != 0)
return ret;
- DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
+ DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
data[i] = (wbuf >> 8) & 0xff;
data[i+1] = wbuf & 0xff;
}
@@ -1719,8 +1717,8 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
int i;
int imax;
- DBG(1, "Writing %d bytes to %d(0x%x)\n",
- eeprom->len, eeprom->offset, eeprom->offset);
+ DBG(1, dev, "Writing %d bytes to %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
imax = smc_ethtool_geteeprom_len(dev);
for (i = 0; i < eeprom->len; i += 2) {
int ret;
@@ -1729,7 +1727,7 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
if (offset > imax)
break;
wbuf = (data[i] << 8) | data[i + 1];
- DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
+ DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
if (ret != 0)
return ret;
@@ -1784,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
int timeout = 20;
unsigned long cookie;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
cookie = probe_irq_on();
@@ -1856,21 +1854,21 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
unsigned long irq_flags)
{
struct smc_local *lp = netdev_priv(dev);
- static int version_printed = 0;
int retval;
unsigned int val, revision_register;
const char *version_string;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
/* First, see if the high byte is 0x33 */
val = SMC_CURRENT_BANK(lp);
- DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
+ DBG(2, dev, "%s: bank signature probe returned 0x%04x\n",
+ CARDNAME, val);
if ((val & 0xFF00) != 0x3300) {
if ((val & 0xFF) == 0x33) {
- printk(KERN_WARNING
- "%s: Detected possible byte-swapped interface"
- " at IOADDR %p\n", CARDNAME, ioaddr);
+ netdev_warn(dev,
+ "%s: Detected possible byte-swapped interface at IOADDR %p\n",
+ CARDNAME, ioaddr);
}
retval = -ENODEV;
goto err_out;
@@ -1897,8 +1895,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
val = SMC_GET_BASE(lp);
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
- printk("%s: IOADDR %p doesn't match configuration (%x).\n",
- CARDNAME, ioaddr, val);
+ netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
+ CARDNAME, ioaddr, val);
}
/*
@@ -1908,21 +1906,19 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
*/
SMC_SELECT_BANK(lp, 3);
revision_register = SMC_GET_REV(lp);
- DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
+ DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
version_string = chip_ids[ (revision_register >> 4) & 0xF];
if (!version_string || (revision_register & 0xff00) != 0x3300) {
/* I don't recognize this chip, so... */
- printk("%s: IO %p: Unrecognized revision register 0x%04x"
- ", Contact author.\n", CARDNAME,
- ioaddr, revision_register);
+ netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n",
+ CARDNAME, ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
}
/* At this point I'll assume that the chip is an SMC91x. */
- if (version_printed++ == 0)
- printk("%s", version);
+ pr_info_once("%s\n", version);
/* fill in some of the fields */
dev->base_addr = (unsigned long)ioaddr;
@@ -1940,7 +1936,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/*
* If dev->irq is 0, then the device has to be banged on to see
* what the IRQ is.
- *
+ *
* This banging doesn't always detect the IRQ, for unknown reasons.
* a workaround is to reset the chip and try again.
*
@@ -1965,8 +1961,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
}
}
if (dev->irq == 0) {
- printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
- dev->name);
+ netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
retval = -ENODEV;
goto err_out;
}
@@ -2030,32 +2025,31 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %p IRQ %d",
- dev->name, version_string, revision_register & 0x0f,
- lp->base, dev->irq);
+ netdev_info(dev, "%s (rev %d) at %p IRQ %d",
+ version_string, revision_register & 0x0f,
+ lp->base, dev->irq);
if (dev->dma != (unsigned char)-1)
- printk(" DMA %d", dev->dma);
+ pr_cont(" DMA %d", dev->dma);
- printk("%s%s\n",
+ pr_cont("%s%s\n",
lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", dev->name);
+ netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Ethernet addr: %pM\n",
+ dev->dev_addr);
}
if (lp->phy_type == 0) {
- PRINTK("%s: No PHY found\n", dev->name);
+ PRINTK(dev, "No PHY found\n");
} else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
- PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
+ PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n");
} else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
- PRINTK("%s: PHY LAN83C180\n", dev->name);
+ PRINTK(dev, "PHY LAN83C180\n");
}
}
@@ -2165,7 +2159,8 @@ static inline void smc_request_datacs(struct platform_device *pdev, struct net_d
return;
if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
- printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ netdev_info(ndev, "%s: failed to request datacs memory region.\n",
+ CARDNAME);
return;
}
@@ -2307,7 +2302,7 @@ static int smc_drv_probe(struct platform_device *pdev)
out_free_netdev:
free_netdev(ndev);
out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
+ pr_info("%s: not found (%d).\n", CARDNAME, ret);
return ret;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 98eedb90cdc..c9d4c872e81 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -907,8 +907,8 @@ static const char * chip_ids[ 16 ] = {
({ \
int __b = SMC_CURRENT_BANK(lp); \
if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
- printk( "%s: bank reg screwed (0x%04x)\n", \
- CARDNAME, __b ); \
+ pr_err("%s: bank reg screwed (0x%04x)\n", \
+ CARDNAME, __b); \
BUG(); \
} \
reg<<SMC_IO_SHIFT; \
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 5fdbc2686eb..8564f23a679 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2167,7 +2167,7 @@ static int smsc911x_init(struct net_device *dev)
udelay(1000);
if (to == 0) {
- pr_err("Device not READY in 100ms aborting\n");
+ netdev_err(dev, "Device not READY in 100ms aborting\n");
return -ENODEV;
}
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(pdata, probe,
"MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
- memcpy(dev->dev_addr, pdata->config.mac, 6);
+ memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
SMSC_TRACE(pdata, probe,
"MAC Address specified by platform data");
} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 5f9e79f7f2d..f433d97aa09 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -19,6 +19,8 @@
***************************************************************************
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -33,7 +35,6 @@
#include "smsc9420.h"
#define DRV_NAME "smsc9420"
-#define PFX DRV_NAME ": "
#define DRV_MDIONAME "smsc9420-mdio"
#define DRV_DESCRIPTION "SMSC LAN9420 driver"
#define DRV_VERSION "1.01"
@@ -97,21 +98,6 @@ static uint debug = -1;
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "debug level");
-#define smsc_dbg(TYPE, f, a...) \
-do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f "\n", ## a); \
-} while (0)
-
-#define smsc_info(TYPE, f, a...) \
-do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f "\n", ## a); \
-} while (0)
-
-#define smsc_warn(TYPE, f, a...) \
-do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f "\n", ## a); \
-} while (0)
-
static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
{
return ioread32(pd->ioaddr + offset);
@@ -140,7 +126,7 @@ static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
/* confirm MII not busy */
if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
- smsc_warn(DRV, "MII is busy???");
+ netif_warn(pd, drv, pd->dev, "MII is busy???\n");
goto out;
}
@@ -159,7 +145,7 @@ static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
udelay(10);
}
- smsc_warn(DRV, "MII busy timeout!");
+ netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
out:
spin_unlock_irqrestore(&pd->phy_lock, flags);
@@ -178,7 +164,7 @@ static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
/* confirm MII not busy */
if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
- smsc_warn(DRV, "MII is busy???");
+ netif_warn(pd, drv, pd->dev, "MII is busy???\n");
goto out;
}
@@ -200,7 +186,7 @@ static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
udelay(10);
}
- smsc_warn(DRV, "MII busy timeout!");
+ netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
out:
spin_unlock_irqrestore(&pd->phy_lock, flags);
@@ -222,7 +208,7 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
BUG_ON(!pd);
if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
- smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy");
+ netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__);
return -EIO;
}
@@ -235,7 +221,7 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
return 0;
} while (timeout--);
- smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out");
+ netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__);
return -EIO;
}
@@ -347,9 +333,9 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
int timeout = 100;
u32 e2cmd;
- smsc_dbg(HW, "op 0x%08x", op);
+ netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op);
if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
- smsc_warn(HW, "Busy at start");
+ netif_warn(pd, hw, pd->dev, "Busy at start\n");
return -EBUSY;
}
@@ -362,12 +348,13 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
} while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
if (!timeout) {
- smsc_info(HW, "TIMED OUT");
+ netif_info(pd, hw, pd->dev, "TIMED OUT\n");
return -EAGAIN;
}
if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
- smsc_info(HW, "Error occurred during eeprom operation");
+ netif_info(pd, hw, pd->dev,
+ "Error occurred during eeprom operation\n");
return -EINVAL;
}
@@ -380,7 +367,7 @@ static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd,
u32 op = E2P_CMD_EPC_CMD_READ_ | address;
int ret;
- smsc_dbg(HW, "address 0x%x", address);
+ netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address);
ret = smsc9420_eeprom_send_cmd(pd, op);
if (!ret)
@@ -395,7 +382,7 @@ static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd,
u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
int ret;
- smsc_dbg(HW, "address 0x%x, data 0x%x", address, data);
+ netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data);
ret = smsc9420_eeprom_send_cmd(pd, op);
if (!ret) {
@@ -492,7 +479,8 @@ static void smsc9420_check_mac_address(struct net_device *dev)
/* Check if mac address has been specified when bringing interface up */
if (is_valid_ether_addr(dev->dev_addr)) {
smsc9420_set_mac_address(dev);
- smsc_dbg(PROBE, "MAC Address is specified by configuration");
+ netif_dbg(pd, probe, pd->dev,
+ "MAC Address is specified by configuration\n");
} else {
/* Try reading mac address from device. if EEPROM is present
* it will already have been set */
@@ -507,12 +495,14 @@ static void smsc9420_check_mac_address(struct net_device *dev)
if (is_valid_ether_addr(dev->dev_addr)) {
/* eeprom values are valid so use them */
- smsc_dbg(PROBE, "Mac Address is read from EEPROM");
+ netif_dbg(pd, probe, pd->dev,
+ "Mac Address is read from EEPROM\n");
} else {
/* eeprom values are invalid, generate random MAC */
eth_hw_addr_random(dev);
smsc9420_set_mac_address(dev);
- smsc_dbg(PROBE, "MAC Address is set to random");
+ netif_dbg(pd, probe, pd->dev,
+ "MAC Address is set to random\n");
}
}
}
@@ -535,7 +525,7 @@ static void smsc9420_stop_tx(struct smsc9420_pdata *pd)
}
if (!timeout)
- smsc_warn(IFDOWN, "TX DMAC failed to stop");
+ netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n");
/* ACK Tx DMAC stop bit */
smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_);
@@ -646,7 +636,8 @@ static void smsc9420_stop_rx(struct smsc9420_pdata *pd)
}
if (!timeout)
- smsc_warn(IFDOWN, "RX DMAC did not stop! timeout.");
+ netif_warn(pd, ifdown, pd->dev,
+ "RX DMAC did not stop! timeout\n");
/* ACK the Rx DMAC stop bit */
smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_);
@@ -736,7 +727,7 @@ static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd)
smsc9420_reg_read(pd, BUS_MODE);
udelay(2);
if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_)
- smsc_warn(DRV, "Software reset not cleared");
+ netif_warn(pd, drv, pd->dev, "Software reset not cleared\n");
}
static int smsc9420_stop(struct net_device *dev)
@@ -855,7 +846,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pd->pdev, mapping)) {
dev_kfree_skb_any(skb);
- smsc_warn(RX_ERR, "pci_map_single failed!");
+ netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
return -ENOMEM;
}
@@ -1004,7 +995,8 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
mapping = pci_map_single(pd->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pd->pdev, mapping)) {
- smsc_warn(TX_ERR, "pci_map_single failed, dropping packet");
+ netif_warn(pd, tx_err, pd->dev,
+ "pci_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
}
@@ -1056,12 +1048,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
if (dev->flags & IFF_PROMISC) {
- smsc_dbg(HW, "Promiscuous Mode Enabled");
+ netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n");
mac_cr |= MAC_CR_PRMS_;
mac_cr &= (~MAC_CR_MCPAS_);
mac_cr &= (~MAC_CR_HPFILT_);
} else if (dev->flags & IFF_ALLMULTI) {
- smsc_dbg(HW, "Receive all Multicast Enabled");
+ netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n");
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
@@ -1069,7 +1061,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
struct netdev_hw_addr *ha;
u32 hash_lo = 0, hash_hi = 0;
- smsc_dbg(HW, "Multicast filter enabled");
+ netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n");
netdev_for_each_mc_addr(ha, dev) {
u32 bit_num = smsc9420_hash(ha->addr);
u32 mask = 1 << (bit_num & 0x1F);
@@ -1087,7 +1079,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr &= (~MAC_CR_MCPAS_);
mac_cr |= MAC_CR_HPFILT_;
} else {
- smsc_dbg(HW, "Receive own packets only.");
+ netif_dbg(pd, hw, pd->dev, "Receive own packets only\n");
smsc9420_reg_write(pd, HASHH, 0);
smsc9420_reg_write(pd, HASHL, 0);
@@ -1115,11 +1107,11 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
else
flow = 0;
- smsc_info(LINK, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n",
+ cap & FLOW_CTRL_RX ? "enabled" : "disabled",
+ cap & FLOW_CTRL_TX ? "enabled" : "disabled");
} else {
- smsc_info(LINK, "half duplex");
+ netif_info(pd, link, pd->dev, "half duplex\n");
flow = 0;
}
@@ -1137,10 +1129,10 @@ static void smsc9420_phy_adjust_link(struct net_device *dev)
if (phy_dev->duplex != pd->last_duplex) {
u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
if (phy_dev->duplex) {
- smsc_dbg(LINK, "full duplex mode");
+ netif_dbg(pd, link, pd->dev, "full duplex mode\n");
mac_cr |= MAC_CR_FDPX_;
} else {
- smsc_dbg(LINK, "half duplex mode");
+ netif_dbg(pd, link, pd->dev, "half duplex mode\n");
mac_cr &= ~MAC_CR_FDPX_;
}
smsc9420_reg_write(pd, MAC_CR, mac_cr);
@@ -1152,9 +1144,9 @@ static void smsc9420_phy_adjust_link(struct net_device *dev)
carrier = netif_carrier_ok(dev);
if (carrier != pd->last_carrier) {
if (carrier)
- smsc_dbg(LINK, "carrier OK");
+ netif_dbg(pd, link, pd->dev, "carrier OK\n");
else
- smsc_dbg(LINK, "no carrier");
+ netif_dbg(pd, link, pd->dev, "no carrier\n");
pd->last_carrier = carrier;
}
}
@@ -1168,24 +1160,24 @@ static int smsc9420_mii_probe(struct net_device *dev)
/* Device only supports internal PHY at address 1 */
if (!pd->mii_bus->phy_map[1]) {
- pr_err("%s: no PHY found at address 1\n", dev->name);
+ netdev_err(dev, "no PHY found at address 1\n");
return -ENODEV;
}
phydev = pd->mii_bus->phy_map[1];
- smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr,
- phydev->phy_id);
+ netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n",
+ phydev->addr, phydev->phy_id);
phydev = phy_connect(dev, dev_name(&phydev->dev),
smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- pr_err("%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
- pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
- dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
/* mask with MAC supported features */
phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -1223,12 +1215,12 @@ static int smsc9420_mii_init(struct net_device *dev)
pd->mii_bus->phy_mask = ~(1 << 1);
if (mdiobus_register(pd->mii_bus)) {
- smsc_warn(PROBE, "Error registering mii bus");
+ netif_warn(pd, probe, pd->dev, "Error registering mii bus\n");
goto err_out_free_bus_2;
}
if (smsc9420_mii_probe(dev) < 0) {
- smsc_warn(PROBE, "Error probing mii bus");
+ netif_warn(pd, probe, pd->dev, "Error probing mii bus\n");
goto err_out_unregister_bus_3;
}
@@ -1281,12 +1273,11 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
BUG_ON(!pd->rx_ring);
- pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
- RX_RING_SIZE), GFP_KERNEL);
- if (pd->rx_buffers == NULL) {
- smsc_warn(IFUP, "Failed to allocated rx_buffers");
+ pd->rx_buffers = kmalloc_array(RX_RING_SIZE,
+ sizeof(struct smsc9420_ring_info),
+ GFP_KERNEL);
+ if (pd->rx_buffers == NULL)
goto out;
- }
/* initialize the rx ring */
for (i = 0; i < RX_RING_SIZE; i++) {
@@ -1301,7 +1292,8 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
/* now allocate the entire ring of skbs */
for (i = 0; i < RX_RING_SIZE; i++) {
if (smsc9420_alloc_rx_buffer(pd, i)) {
- smsc_warn(IFUP, "failed to allocate rx skb %d", i);
+ netif_warn(pd, ifup, pd->dev,
+ "failed to allocate rx skb %d\n", i);
goto out_free_rx_skbs;
}
}
@@ -1310,13 +1302,14 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
pd->rx_ring_tail = 0;
smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q);
- smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1));
+ netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n",
+ smsc9420_reg_read(pd, VLAN1));
if (pd->rx_csum) {
/* Enable RX COE */
u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN;
smsc9420_reg_write(pd, COE_CR, coe);
- smsc_dbg(IFUP, "COE_CR = 0x%08x", coe);
+ netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe);
}
smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr);
@@ -1339,7 +1332,8 @@ static int smsc9420_open(struct net_device *dev)
int result = 0, timeout;
if (!is_valid_ether_addr(dev->dev_addr)) {
- smsc_warn(IFUP, "dev_addr is not a valid MAC address");
+ netif_warn(pd, ifup, pd->dev,
+ "dev_addr is not a valid MAC address\n");
result = -EADDRNOTAVAIL;
goto out_0;
}
@@ -1358,7 +1352,7 @@ static int smsc9420_open(struct net_device *dev)
result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
if (result) {
- smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
+ netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq);
result = -ENODEV;
goto out_0;
}
@@ -1393,7 +1387,7 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_pci_flush_write(pd);
/* test the IRQ connection to the ISR */
- smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
+ netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq);
pd->software_irq_signal = false;
spin_lock_irqsave(&pd->int_lock, flags);
@@ -1423,30 +1417,32 @@ static int smsc9420_open(struct net_device *dev)
spin_unlock_irqrestore(&pd->int_lock, flags);
if (!pd->software_irq_signal) {
- smsc_warn(IFUP, "ISR failed signaling test");
+ netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n");
result = -ENODEV;
goto out_free_irq_1;
}
- smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
+ netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq);
result = smsc9420_alloc_tx_ring(pd);
if (result) {
- smsc_warn(IFUP, "Failed to Initialize tx dma ring");
+ netif_warn(pd, ifup, pd->dev,
+ "Failed to Initialize tx dma ring\n");
result = -ENOMEM;
goto out_free_irq_1;
}
result = smsc9420_alloc_rx_ring(pd);
if (result) {
- smsc_warn(IFUP, "Failed to Initialize rx dma ring");
+ netif_warn(pd, ifup, pd->dev,
+ "Failed to Initialize rx dma ring\n");
result = -ENOMEM;
goto out_free_tx_ring_2;
}
result = smsc9420_mii_init(dev);
if (result) {
- smsc_warn(IFUP, "Failed to initialize Phy");
+ netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n");
result = -ENODEV;
goto out_free_rx_ring_3;
}
@@ -1547,7 +1543,8 @@ static int smsc9420_resume(struct pci_dev *pdev)
err = pci_enable_wake(pdev, 0, 0);
if (err)
- smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
+ netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
+ err);
if (netif_running(dev)) {
/* FIXME: gross. It looks like ancient PM relic.*/
@@ -1582,12 +1579,12 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int result = 0;
u32 id_rev;
- printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n");
+ pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION);
/* First do the PCI initialisation */
result = pci_enable_device(pdev);
if (unlikely(result)) {
- printk(KERN_ERR "Cannot enable smsc9420\n");
+ pr_err("Cannot enable smsc9420\n");
goto out_0;
}
@@ -1600,24 +1597,24 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
SET_NETDEV_DEV(dev, &pdev->dev);
if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
- printk(KERN_ERR "Cannot find PCI device base address\n");
+ netdev_err(dev, "Cannot find PCI device base address\n");
goto out_free_netdev_2;
}
if ((pci_request_regions(pdev, DRV_NAME))) {
- printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
+ netdev_err(dev, "Cannot obtain PCI resources, aborting\n");
goto out_free_netdev_2;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "No usable DMA configuration, aborting.\n");
+ netdev_err(dev, "No usable DMA configuration, aborting\n");
goto out_free_regions_3;
}
virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
pci_resource_len(pdev, SMSC_BAR));
if (!virt_addr) {
- printk(KERN_ERR "Cannot map device registers, aborting.\n");
+ netdev_err(dev, "Cannot map device registers, aborting\n");
goto out_free_regions_3;
}
@@ -1646,16 +1643,17 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd->msg_enable = smsc_debug;
pd->rx_csum = true;
- smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr);
+ netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr);
id_rev = smsc9420_reg_read(pd, ID_REV);
switch (id_rev & 0xFFFF0000) {
case 0x94200000:
- smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev);
+ netif_info(pd, probe, pd->dev,
+ "LAN9420 identified, ID_REV=0x%08X\n", id_rev);
break;
default:
- smsc_warn(PROBE, "LAN9420 NOT identified");
- smsc_warn(PROBE, "ID_REV=0x%08X", id_rev);
+ netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n");
+ netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev);
goto out_free_dmadesc_5;
}
@@ -1670,7 +1668,8 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = register_netdev(dev);
if (result) {
- smsc_warn(PROBE, "error %i registering device", result);
+ netif_warn(pd, probe, pd->dev, "error %i registering device\n",
+ result);
goto out_free_dmadesc_5;
}
@@ -1707,8 +1706,6 @@ static void smsc9420_remove(struct pci_dev *pdev)
if (!dev)
return;
- pci_set_drvdata(pdev, NULL);
-
pd = netdev_priv(dev);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7eb8babed2c..fc94f202a43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -451,14 +451,14 @@ struct mac_device_info {
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
- unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
- unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
extern const struct stmmac_chain_mode_ops chain_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 8e5662ce488..def266da55d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -104,14 +104,13 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 48ec001566b..8607488cbcf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -128,8 +128,8 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_err_octets;
};
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f16a9bdf45b..22f89ffdfd9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -110,14 +110,14 @@ struct stmmac_priv {
extern int phyaddr;
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8d4ccd35a01..8a7a23a84ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
if (config.flags)
return -EINVAL;
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- priv->hwts_tx_en = 0;
- break;
- case HWTSTAMP_TX_ON:
- priv->hwts_tx_en = 1;
- break;
- default:
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- }
if (priv->adv_ts) {
switch (config.rx_filter) {
@@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
}
}
priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en)
priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 023b7c29cb2..644d80ece06 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(ndev);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, priv->ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 759441b29e5..b4d50d74ba1 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
#if defined(CONFIG_SPARC)
addr = of_get_property(cp->of_node, "local-mac-address", NULL);
if (addr != NULL) {
- memcpy(dev_addr, addr, 6);
+ memcpy(dev_addr, addr, ETH_ALEN);
goto done;
}
#endif
@@ -5168,7 +5168,6 @@ err_out_free_netdev:
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
@@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f28460ce24a..388540fcb97 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9875,7 +9875,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -9900,7 +9899,6 @@ static void niu_pci_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e62df2b8130..b5655b79bd3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
return -1;
#endif
}
- memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
#else
get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
#endif
@@ -2806,8 +2806,6 @@ static void gem_remove_one(struct pci_dev *pdev)
iounmap(gp->regs);
pci_release_regions(pdev);
free_netdev(dev);
-
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index e37b587b386..0dbf46f08ed 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
addr = of_get_property(dp, "local-mac-address", &len);
- if (qfe_slot != -1 && addr && len == 6)
- memcpy(dev->dev_addr, addr, 6);
+ if (qfe_slot != -1 && addr && len == ETH_ALEN)
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
else
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
}
hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
- memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
} else {
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
}
#else
get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
pci_release_regions(hp->happy_dev);
free_netdev(net_dev);
-
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b072f4dba03..5695ae2411d 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
if (!dev)
return -ENOMEM;
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
qe = netdev_priv(dev);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 571452e786d..dd0dd6279b4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2447,7 +2447,6 @@ static void bdx_remove(struct pci_dev *pdev)
iounmap(nic->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
vfree(nic);
RET();
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de71b1ec462..53150c25a96 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA
To compile this driver as a module, choose M here: the module
will be called davinci_cpdma. This is recommended.
+config TI_CPSW_PHY_SEL
+ boolean "TI CPSW Switch Phy sel Support"
+ depends on TI_CPSW
+ ---help---
+ This driver supports configuring of the phy mode connected to
+ the CPSW.
+
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
+ select TI_CPSW_PHY_SEL
---help---
This driver supports TI's CPSW Ethernet Switch.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index c65148e8aa1..9cfaab8152b 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 00000000000..148da9ae836
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,161 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII 0
+#define AM33XX_GMII_SEL_MODE_RMII 1
+#define AM33XX_GMII_SEL_MODE_RGMII 2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+
+struct cpsw_phy_sel_priv {
+ struct device *dev;
+ u32 __iomem *gmii_sel;
+ bool rmii_clock_external;
+ void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ mask = 0x3 << (slave * 2) | BIT(slave + 6);
+ mode <<= slave * 2;
+
+ if (priv->rmii_clock_external) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+ else
+ mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+ }
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+ struct device_node *node = (struct device_node *)data;
+ return dev->of_node == node &&
+ dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+ struct device_node *node;
+ struct cpsw_phy_sel_priv *priv;
+
+ node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+ if (!node) {
+ dev_err(dev, "Phy mode driver DT not found\n");
+ return;
+ }
+
+ dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ priv = dev_get_drvdata(dev);
+
+ priv->cpsw_phy_sel(priv, phy_mode, slave);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+ {
+ .compatible = "ti,am3352-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct of_device_id *of_id;
+ struct cpsw_phy_sel_priv *priv;
+
+ of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+ return -ENOMEM;
+ }
+
+ priv->cpsw_phy_sel = of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+ priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->gmii_sel))
+ return PTR_ERR(priv->gmii_sel);
+
+ if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+ priv->rmii_clock_external = true;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+ .probe = cpsw_phy_sel_probe,
+ .driver = {
+ .name = "cpsw-phy-sel",
+ .owner = THIS_MODULE,
+ .of_match_table = cpsw_phy_sel_id_table,
+ },
+};
+
+module_platform_driver(cpsw_phy_sel_driver);
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cc3ce557e4a..7536a4c0129 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,8 +367,6 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct resource *cpsw_res;
- struct resource *cpsw_wr_res;
struct napi_struct napi;
struct device *dev;
struct cpsw_platform_data data;
@@ -969,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
priv->host_port, ALE_VLAN, slave->port_vlan);
}
-static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+static void soft_reset_slave(struct cpsw_slave *slave)
{
char name[32];
- u32 slave_port;
-
- sprintf(name, "slave-%d", slave->slave_num);
+ snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
soft_reset(name, &slave->sliver->soft_reset);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ u32 slave_port;
+
+ soft_reset_slave(slave);
/* setup priority mapping */
__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
@@ -1016,6 +1019,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
dev_info(priv->dev, "phy found : id is : 0x%x\n",
slave->phy->phy_id);
phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
+ slave->slave_num);
}
}
@@ -1321,6 +1328,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
+ if (priv->version != CPSW_VERSION_1 &&
+ priv->version != CPSW_VERSION_2)
+ return -EOPNOTSUPP;
+
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
@@ -1328,16 +1339,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
if (cfg.flags)
return -EINVAL;
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_OFF:
- cpts->tx_enable = 0;
- break;
- case HWTSTAMP_TX_ON:
- cpts->tx_enable = 1;
- break;
- default:
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- }
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
@@ -1364,6 +1367,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
+ cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
+
switch (priv->version) {
case CPSW_VERSION_1:
cpsw_hwtstamp_v1(priv);
@@ -1372,7 +1377,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
cpsw_hwtstamp_v2(priv);
break;
default:
- return -ENOTSUPP;
+ WARN_ON(1);
}
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
@@ -1705,62 +1710,55 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (of_property_read_u32(node, "active_slave", &prop)) {
pr_err("Missing active_slave property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
pr_err("Missing cpts_clock_mult property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->cpts_clock_mult = prop;
if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
pr_err("Missing cpts_clock_shift property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->cpts_clock_shift = prop;
- data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
- GFP_KERNEL);
+ data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
+ * sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
if (!data->slave_data)
- return -EINVAL;
+ return -ENOMEM;
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
pr_err("Missing cpdma_channels property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
pr_err("Missing ale_entries property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->ale_entries = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
pr_err("Missing bd_ram_size property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "rx_descs", &prop)) {
pr_err("Missing rx_descs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->rx_descs = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
pr_err("Missing mac_control property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->mac_control = prop;
@@ -1791,8 +1789,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
@@ -1822,10 +1819,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
return 0;
-
-error_ret:
- kfree(data->slave_data);
- return ret;
}
static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1867,7 +1860,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->coal_intvl = 0;
priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
- priv_sl2->cpsw_res = priv->cpsw_res;
priv_sl2->regs = priv->regs;
priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1911,8 +1903,8 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpsw_priv *priv;
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
- void __iomem *ss_regs, *wr_regs;
- struct resource *res;
+ void __iomem *ss_regs;
+ struct resource *res, *ss_res;
u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i, k = 0;
@@ -1948,7 +1940,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (cpsw_probe_dt(&priv->data, pdev)) {
pr_err("cpsw: platform data missing\n");
ret = -ENODEV;
- goto clean_ndev_ret;
+ goto clean_runtime_disable_ret;
}
data = &priv->data;
@@ -1962,11 +1954,12 @@ static int cpsw_probe(struct platform_device *pdev)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
- priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
- GFP_KERNEL);
+ priv->slaves = devm_kzalloc(&pdev->dev,
+ sizeof(struct cpsw_slave) * data->slaves,
+ GFP_KERNEL);
if (!priv->slaves) {
- ret = -EBUSY;
- goto clean_ndev_ret;
+ ret = -ENOMEM;
+ goto clean_runtime_disable_ret;
}
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
@@ -1974,55 +1967,31 @@ static int cpsw_probe(struct platform_device *pdev)
priv->slaves[0].ndev = ndev;
priv->emac_port = 0;
- priv->clk = clk_get(&pdev->dev, "fck");
+ priv->clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "fck is not found\n");
+ dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
- goto clean_slave_ret;
+ goto clean_runtime_disable_ret;
}
priv->coal_intvl = 0;
priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
- priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!priv->cpsw_res) {
- dev_err(priv->dev, "error getting i/o resource\n");
- ret = -ENOENT;
- goto clean_clk_ret;
- }
- if (!request_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res), ndev->name)) {
- dev_err(priv->dev, "failed request i/o region\n");
- ret = -ENXIO;
- goto clean_clk_ret;
- }
- ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
- if (!ss_regs) {
- dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_iores_ret;
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
priv->version = __raw_readl(&priv->regs->id_ver);
priv->host_port = HOST_PORT_NUM;
- priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!priv->cpsw_wr_res) {
- dev_err(priv->dev, "error getting i/o resource\n");
- ret = -ENOENT;
- goto clean_iomap_ret;
- }
- if (!request_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res), ndev->name)) {
- dev_err(priv->dev, "failed request i/o region\n");
- ret = -ENXIO;
- goto clean_iomap_ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->wr_regs)) {
+ ret = PTR_ERR(priv->wr_regs);
+ goto clean_runtime_disable_ret;
}
- wr_regs = ioremap(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
- if (!wr_regs) {
- dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_wr_iores_ret;
- }
- priv->wr_regs = wr_regs;
memset(&dma_params, 0, sizeof(dma_params));
memset(&ale_params, 0, sizeof(ale_params));
@@ -2053,12 +2022,12 @@ static int cpsw_probe(struct platform_device *pdev)
slave_size = CPSW2_SLAVE_SIZE;
sliver_offset = CPSW2_SLIVER_OFFSET;
dma_params.desc_mem_phys =
- (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+ (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
break;
default:
dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
ret = -ENODEV;
- goto clean_cpsw_wr_iores_ret;
+ goto clean_runtime_disable_ret;
}
for (i = 0; i < priv->data.slaves; i++) {
struct cpsw_slave *slave = &priv->slaves[i];
@@ -2086,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!priv->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_wr_iomap_ret;
+ goto clean_runtime_disable_ret;
}
priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2121,8 +2090,8 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, cpsw_interrupt, 0,
- dev_name(&pdev->dev), priv)) {
+ if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
+ dev_name(priv->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
@@ -2144,7 +2113,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret) {
dev_err(priv->dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_irq_ret;
+ goto clean_ale_ret;
}
if (cpts_register(&pdev->dev, priv->cpts,
@@ -2152,44 +2121,27 @@ static int cpsw_probe(struct platform_device *pdev)
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- priv->cpsw_res->start, ndev->irq);
+ ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_irq_ret;
+ goto clean_ale_ret;
}
}
return 0;
-clean_irq_ret:
- for (i = 0; i < priv->num_irqs; i++)
- free_irq(priv->irqs_table[i], priv);
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
-clean_wr_iomap_ret:
- iounmap(priv->wr_regs);
-clean_cpsw_wr_iores_ret:
- release_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
-clean_iomap_ret:
- iounmap(priv->regs);
-clean_cpsw_iores_ret:
- release_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res));
-clean_clk_ret:
- clk_put(priv->clk);
-clean_slave_ret:
+clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
- kfree(priv->slaves);
clean_ndev_ret:
- kfree(priv->data.slave_data);
free_netdev(priv->ndev);
return ret;
}
@@ -2198,30 +2150,18 @@ static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- int i;
if (priv->data.dual_emac)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
unregister_netdev(ndev);
cpts_unregister(priv->cpts);
- for (i = 0; i < priv->num_irqs; i++)
- free_irq(priv->irqs_table[i], priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
- iounmap(priv->regs);
- release_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res));
- iounmap(priv->wr_regs);
- release_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
pm_runtime_disable(&pdev->dev);
- clk_put(priv->clk);
- kfree(priv->slaves);
- kfree(priv->data.slave_data);
if (priv->data.dual_emac)
free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
@@ -2236,8 +2176,9 @@ static int cpsw_suspend(struct device *dev)
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
- soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
- soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
+
+ for_each_slave(priv, soft_reset_slave);
+
pm_runtime_put_sync(&pdev->dev);
/* Select sleep pin state */
@@ -2277,7 +2218,7 @@ static struct platform_driver cpsw_driver = {
.name = "cpsw",
.owner = THIS_MODULE,
.pm = &cpsw_pm_ops,
- .of_match_table = of_match_ptr(cpsw_of_mtable),
+ .of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
.remove = cpsw_remove,
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index eb3e101ec04..574f49da693 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -39,4 +39,6 @@ struct cpsw_platform_data {
bool dual_emac; /* Enable Dual EMAC mode */
};
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index fe993cdd7e2..1a581ef7eee 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -127,8 +127,8 @@ struct cpts {
};
#ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
#else
static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
}
#endif
-extern int cpts_register(struct device *dev, struct cpts *cpts,
- u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6a32ef9d63a..41ba974bf37 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1852,7 +1852,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
- memcpy(priv->mac_addr, pdata->mac_addr, 6);
+ memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 591437e59b9..62b19be5183 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -319,7 +319,6 @@ static void tlan_remove_one(struct pci_dev *pdev)
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
cancel_work_sync(&priv->tlan_tqueue);
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 13e6fff8ca2..628b736e5ae 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
nz_addr |= mac[i];
if (nz_addr) {
- memcpy(dev->dev_addr, mac, 6);
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
dev->addr_len = 6;
} else {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 106be47716e..edb2e12a0fe 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1008,6 +1008,8 @@ static void tile_net_register(void *dev_ptr)
info->egress_timer.data = (long)info;
info->egress_timer.function = tile_net_handle_egress_timer;
+ u64_stats_init(&info->stats.syncp);
+
priv->cpu[my_cpu] = info;
/*
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 309abb472aa..8505196be9f 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
}
#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
#else
static inline void udbg_shutdown_ps3gelic(void) {}
#endif
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
- struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index f7e51b7d704..11f443d8e4e 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5734480c1ec..3f4a32e39d2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2478,7 +2478,6 @@ out_release_regions:
pci_release_regions(pdev);
out_disable_dev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return NULL;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 4ba2135474d..9b6af0845a1 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -29,8 +29,8 @@
#include <linux/sungem_phy.h>
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
extern const struct ethtool_ops spider_net_ethtool_ops;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index a971b9cca56..1322546d92a 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -887,7 +887,6 @@ static void tc35815_remove_one(struct pci_dev *pdev)
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
}
static int
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index bdf697b184a..cce6c4bc556 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -987,6 +987,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rp->base = ioaddr;
+ u64_stats_init(&rp->tx_stats.syncp);
+ u64_stats_init(&rp->rx_stats.syncp);
+
/* Get chip registers into a sane state */
rhine_power_init(dev);
rhine_hw_init(dev, pioaddr);
@@ -2292,7 +2295,6 @@ static void rhine_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void rhine_shutdown (struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 0029148077a..1f236412632 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -36,6 +36,7 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 4c619ea5189..74234a51c85 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -31,7 +31,7 @@
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
-#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
@@ -63,13 +63,13 @@
#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
/* Global Interrupt Enable Register (GIER) Bit Masks */
-#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
+#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
/* Transmit Status Register (TSR) Bit Masks */
-#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
-#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
-#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
-#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
+#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
+#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
* in the HW spec */
@@ -77,21 +77,21 @@
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
/* Receive Status Register (RSR) */
-#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
-#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
+#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
/* Transmit Packet Length Register (TPLR) */
-#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
+#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
/* Receive Packet Length Register (RPLR) */
-#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
+#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
-#define XEL_HEADER_OFFSET 12 /* Offset to length field */
-#define XEL_HEADER_SHIFT 16 /* Shift value for length */
+#define XEL_HEADER_OFFSET 12 /* Offset to length field */
+#define XEL_HEADER_SHIFT 16 /* Shift value for length */
/* General Ethernet Definitions */
-#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
-#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
+#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
@@ -1075,14 +1075,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
* This function un maps the IO region of the Emaclite device and frees the net
* device.
*/
-static void xemaclite_remove_ndev(struct net_device *ndev,
- struct platform_device *pdev)
+static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
- struct net_local *lp = netdev_priv(ndev);
-
- if (lp->base_addr)
- devm_iounmap(&pdev->dev, lp->base_addr);
free_netdev(ndev);
}
}
@@ -1177,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (mac_address)
/* Set the MAC address. */
- memcpy(ndev->dev_addr, mac_address, 6);
+ memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
else
dev_warn(dev, "No MAC address found\n");
@@ -1214,7 +1209,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
return 0;
error:
- xemaclite_remove_ndev(ndev, ofdev);
+ xemaclite_remove_ndev(ndev);
return rc;
}
@@ -1248,7 +1243,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- xemaclite_remove_ndev(ndev, of_dev);
+ xemaclite_remove_ndev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index e78802e75ea..bcc224a8373 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
ch = PORT2CHANNEL(port);
regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_OFF:
- port->hwts_tx_en = 0;
- break;
- case HWTSTAMP_TX_ON:
- port->hwts_tx_en = 1;
- break;
- default:
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- }
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
@@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
+ port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+
/* Clear out any old time stamps. */
__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
&regs->channel[ch].ch_event);
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index a20ed1a9809..f8399359017 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
*/
* (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
a[1] = 0 ;
- memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+ memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
CHECK_NPP() ;
/* set memory address reg for writes */
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3ca308b2821..bd1166bf8f6 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -469,20 +469,20 @@ struct s_smc {
extern const struct fddi_addr fddi_broadcast;
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
#endif /* _SCMECM_ */
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index f5d7305a578..713d303a06a 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -436,7 +436,7 @@ static int skfp_driver_init(struct net_device *dev)
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
smt_reset_defaults(smc, 0);
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
* address.
*/
read_address(smc, NULL);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
init_smt(smc, NULL);
smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
if ((unsigned short) frame[1 + 10] != 0)
return;
SRBit = frame[1 + 6] & 0x01;
- memcpy(&frame[1 + 6], hw_addr, 6);
+ memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
frame[8] |= SRBit;
} // CheckSourceAddress
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index a974727dd9a..636b65c66d4 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser_fdx", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index e349d867449..f9a8976195b 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser12", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index bc1d5217038..4bc6ee8e798 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1734,7 +1734,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
{
if (request_irq(hwcfg.irq, scc_isr,
- IRQF_DISABLED, "AX.25 SCC",
+ 0, "AX.25 SCC",
(void *)(long) hwcfg.irq))
printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
else
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5af1c3e5032..1971411574d 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -888,7 +888,7 @@ static int yam_open(struct net_device *dev)
goto out_release_base;
}
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
ret = -EBUSY;
goto out_release_base;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 6f10b496472..2cbe1c24999 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -561,7 +561,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
spin_lock_irqsave(&lp->lock, flags);
lp->is_tx = 1;
- INIT_COMPLETION(lp->tx_complete);
+ reinit_completion(&lp->tx_complete);
spin_unlock_irqrestore(&lp->lock, flags);
rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 0632d34905c..c6e46d6e9f7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -343,7 +343,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
if (ret)
goto err;
- INIT_COMPLETION(devrec->tx_complete);
+ reinit_completion(&devrec->tx_complete);
/* Set TXNTRIG bit of TXNCON to send packet */
ret = read_short_reg(devrec, REG_TXNCON, &val);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index a3bed28197d..c14d39bf32d 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -265,6 +265,7 @@ MODULE_PARM_DESC(numifbs, "Number of ifb devices");
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
+ struct ifb_private *dp;
int err;
dev_ifb = alloc_netdev(sizeof(struct ifb_private),
@@ -273,6 +274,10 @@ static int __init ifb_init_one(int index)
if (!dev_ifb)
return -ENOMEM;
+ dp = netdev_priv(dev_ifb);
+ u64_stats_init(&dp->rsync);
+ u64_stats_init(&dp->tsync);
+
dev_ifb->rtnl_link_ops = &ifb_link_ops;
err = register_netdevice(dev_ifb);
if (err < 0)
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 7bbd318bc93..befa45f809c 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -627,7 +627,7 @@ static int ali_ircc_setup(chipio_t *info)
/*
* Function ali_ircc_read_dongle_id (int index, info)
*
- * Try to read dongle indentification. This procedure needs to be executed
+ * Try to read dongle identification. This procedure needs to be executed
* once after power-on/reset. It also needs to be used whenever you suspect
* that the user may have plugged/unplugged the IrDA Dongle.
*/
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c74f384c87d..303c4bd26e1 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -411,12 +411,12 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
#else
- if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+ if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
return -EBUSY;
}
- if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+ if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
free_irq(port->irq, dev);
return -EBUSY;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 31bcb98ef35..768dfe9a931 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1352,7 +1352,7 @@ toshoboe_net_open (struct net_device *dev)
return 0;
rc = request_irq (self->io.irq, toshoboe_interrupt,
- IRQF_SHARED | IRQF_DISABLED, dev->name, self);
+ IRQF_SHARED, dev->name, self);
if (rc)
return rc;
@@ -1559,7 +1559,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
self->io.fir_base = self->base;
self->io.fir_ext = OBOE_IO_EXTENT;
self->io.irq = pci_dev->irq;
- self->io.irqflags = IRQF_SHARED | IRQF_DISABLED;
+ self->io.irqflags = IRQF_SHARED;
self->speed = self->io.speed = 9600;
self->async = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index ceeb53737f8..66bc03bdb13 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1035,7 +1035,7 @@ static int nsc_ircc_setup(chipio_t *info)
/*
* Function nsc_ircc_read_dongle_id (void)
*
- * Try to read dongle indentification. This procedure needs to be executed
+ * Try to read dongle identification. This procedure needs to be executed
* once after power-on/reset. It also needs to be used whenever you suspect
* that the user may have plugged/unplugged the IrDA Dongle.
*/
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4455425f1c7..ff45cd0d60e 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+ err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 89682b49900..8d9ae5a086d 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+ err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 6d5b1e2b128..f50b9c1c063 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -102,28 +102,29 @@ struct sir_driver {
/* exported */
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+ const char *name);
+int sirdev_put_instance(struct sir_dev *self);
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
/* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
/* not exported */
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
/* inline helpers */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a17d85a331f..ac24c27b4b2 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -137,10 +137,16 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
+ int i;
dev->lstats = alloc_percpu(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_lstats *lb_stats;
+ lb_stats = per_cpu_ptr(dev->lstats, i);
+ u64_stats_init(&lb_stats->syncp);
+ }
return 0;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9bf46bd19b8..acf93798dc6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -297,7 +297,13 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
int ret;
const struct macvlan_dev *vlan = netdev_priv(dev);
- ret = macvlan_queue_xmit(skb, dev);
+ if (vlan->fwd_priv) {
+ skb->dev = vlan->lowerdev;
+ ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+ } else {
+ ret = macvlan_queue_xmit(skb, dev);
+ }
+
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct macvlan_pcpu_stats *pcpu_stats;
@@ -347,6 +353,21 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+ vlan->fwd_priv =
+ lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
+
+ /* If we get a NULL pointer back, or if we get an error
+ * then we should just fall through to the non accelerated path
+ */
+ if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
+ vlan->fwd_priv = NULL;
+ } else {
+ dev->features &= ~NETIF_F_LLTX;
+ return 0;
+ }
+ }
+
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
@@ -367,6 +388,11 @@ hash_add:
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
+ if (vlan->fwd_priv) {
+ lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+ vlan->fwd_priv);
+ vlan->fwd_priv = NULL;
+ }
return err;
}
@@ -375,6 +401,13 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ if (vlan->fwd_priv) {
+ lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+ vlan->fwd_priv);
+ vlan->fwd_priv = NULL;
+ return 0;
+ }
+
dev_uc_unsync(lowerdev, dev);
dev_mc_unsync(lowerdev, dev);
@@ -501,6 +534,7 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
+ int i;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -516,6 +550,12 @@ static int macvlan_init(struct net_device *dev)
if (!vlan->pcpu_stats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct macvlan_pcpu_stats *mvlstats;
+ mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
+ u64_stats_init(&mvlstats->syncp);
+ }
+
return 0;
}
@@ -828,22 +868,22 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_inherit(dev, lowerdev);
}
+ port->count += 1;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto destroy_port;
+
+ dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
goto destroy_port;
- port->count += 1;
- err = register_netdevice(dev);
- if (err < 0)
- goto upper_dev_unlink;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
-upper_dev_unlink:
- netdev_upper_dev_unlink(lowerdev, dev);
destroy_port:
port->count -= 1;
if (!port->count)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 9dccb1edfd2..dc76670c2f2 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
const struct iovec *iv, unsigned long total_len,
size_t count, int noblock)
{
+ int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
struct sk_buff *skb;
struct macvlan_dev *vlan;
unsigned long len = total_len;
@@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
+ if (copylen > good_linear)
+ copylen = good_linear;
linear = copylen;
if (iov_pages(iv, vnet_hdr_len + copylen, count)
<= MAX_SKB_FRAGS)
@@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (!zerocopy) {
copylen = len;
- linear = vnet_hdr.hdr_len;
+ if (vnet_hdr.hdr_len > good_linear)
+ linear = good_linear;
+ else
+ linear = vnet_hdr.hdr_len;
}
skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index adeee615dd1..ba2f5e710af 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -34,6 +34,8 @@
*
****************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -310,6 +312,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
const char *buf,
size_t count)
{
+ unsigned long flags;
int enabled;
int err;
@@ -319,14 +322,12 @@ static ssize_t store_enabled(struct netconsole_target *nt,
if (enabled < 0 || enabled > 1)
return -EINVAL;
if (enabled == nt->enabled) {
- printk(KERN_INFO "netconsole: network logging has already %s\n",
- nt->enabled ? "started" : "stopped");
+ pr_info("network logging has already %s\n",
+ nt->enabled ? "started" : "stopped");
return -EINVAL;
}
- mutex_lock(&nt->mutex);
if (enabled) { /* 1 */
-
/*
* Skip netpoll_parse_options() -- all the attributes are
* already configured via configfs. Just print them out.
@@ -334,19 +335,22 @@ static ssize_t store_enabled(struct netconsole_target *nt,
netpoll_print_options(&nt->np);
err = netpoll_setup(&nt->np);
- if (err) {
- mutex_unlock(&nt->mutex);
+ if (err)
return err;
- }
-
- printk(KERN_INFO "netconsole: network logging started\n");
+ pr_info("netconsole: network logging started\n");
} else { /* 0 */
+ /* We need to disable the netconsole before cleaning it up
+ * otherwise we might end up in write_msg() with
+ * nt->np.dev == NULL and nt->enabled == 1
+ */
+ spin_lock_irqsave(&target_list_lock, flags);
+ nt->enabled = 0;
+ spin_unlock_irqrestore(&target_list_lock, flags);
netpoll_cleanup(&nt->np);
}
nt->enabled = enabled;
- mutex_unlock(&nt->mutex);
return strnlen(buf, count);
}
@@ -358,9 +362,8 @@ static ssize_t store_dev_name(struct netconsole_target *nt,
size_t len;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -381,9 +384,8 @@ static ssize_t store_local_port(struct netconsole_target *nt,
int rv;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -400,9 +402,8 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
int rv;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -417,9 +418,8 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
size_t count)
{
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -427,7 +427,7 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
const char *end;
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
- printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ pr_err("invalid IPv6 address at: <%c>\n", *end);
return -EINVAL;
}
nt->np.ipv6 = true;
@@ -448,9 +448,8 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
size_t count)
{
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -458,7 +457,7 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
const char *end;
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
- printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ pr_err("invalid IPv6 address at: <%c>\n", *end);
return -EINVAL;
}
nt->np.ipv6 = true;
@@ -481,9 +480,8 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
u8 remote_mac[ETH_ALEN];
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -563,8 +561,10 @@ static ssize_t netconsole_target_attr_store(struct config_item *item,
struct netconsole_target_attr *na =
container_of(attr, struct netconsole_target_attr, attr);
+ mutex_lock(&nt->mutex);
if (na->store)
ret = na->store(nt, buf, count);
+ mutex_unlock(&nt->mutex);
return ret;
}
@@ -704,19 +704,20 @@ restart:
}
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
- printk(KERN_INFO "netconsole: network logging stopped on "
- "interface %s as it ", dev->name);
+ const char *msg = "had an event";
switch (event) {
case NETDEV_UNREGISTER:
- printk(KERN_CONT "unregistered\n");
+ msg = "unregistered";
break;
case NETDEV_RELEASE:
- printk(KERN_CONT "released slaves\n");
+ msg = "released slaves";
break;
case NETDEV_JOIN:
- printk(KERN_CONT "is joining a master device\n");
+ msg = "is joining a master device";
break;
}
+ pr_info("network logging stopped on interface %s as it %s\n",
+ dev->name, msg);
}
done:
@@ -802,7 +803,7 @@ static int __init init_netconsole(void)
goto undonotifier;
register_console(&netconsole);
- printk(KERN_INFO "netconsole: network logging started\n");
+ pr_info("network logging started\n");
return err;
@@ -810,7 +811,7 @@ undonotifier:
unregister_netdevice_notifier(&netconsole_netdev_notifier);
fail:
- printk(KERN_ERR "netconsole: cleaning up\n");
+ pr_err("cleaning up\n");
/*
* Remove all targets and destroy them (only targets created
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index b57ce5f4896..d2bb12bfabd 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,8 +47,16 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
static int nlmon_dev_init(struct net_device *dev)
{
+ int i;
+
dev->lstats = alloc_percpu(struct pcpu_lstats);
+ for_each_possible_cpu(i) {
+ struct pcpu_lstats *nlmstats;
+ nlmstats = per_cpu_ptr(dev->lstats, i);
+ u64_stats_init(&nlmstats->syncp);
+ }
+
return dev->lstats == NULL ? -ENOMEM : 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 342561ad315..9b5d46c03ee 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -154,6 +154,13 @@ config MDIO_SUN4I
interface units of the Allwinner SoC that have an EMAC (A10,
A12, A10s, etc.)
+config MDIO_MOXART
+ tristate "MOXA ART MDIO interface support"
+ depends on ARCH_MOXART
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the MOXA ART SoC
+
config MDIO_BUS_MUX
tristate
depends on OF_MDIO
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 23a2ab2e847..9013dfa12aa 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ac22283aaf2..bc71947b1ec 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->wolopts |= WAKE_MAGIC;
}
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_WOL_ENABLE;
+
+ value = phy_read(phydev, MII_BMCR);
+
+ if (wol_enabled)
+ value |= BMCR_ISOLATE;
+ else
+ value |= BMCR_PDOWN;
+
+ phy_write(phydev, MII_BMCR, value);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ int value;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, MII_BMCR);
+ value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+ phy_write(phydev, MII_BMCR, value);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e91477362d..2e3c778ea9b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -34,9 +34,9 @@
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define MII_MARVELL_PHY_PAGE 22
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
new file mode 100644
index 00000000000..a5741cb0304
--- /dev/null
+++ b/drivers/net/phy/mdio-moxart.c
@@ -0,0 +1,201 @@
+/* MOXA ART Ethernet (RTL8201CP) MDIO interface driver
+ *
+ * Copyright (C) 2013 Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define REG_PHY_CTRL 0
+#define REG_PHY_WRITE_DATA 4
+
+/* REG_PHY_CTRL */
+#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD BIT(26)
+#define REGAD_MASK 0x3e00000
+#define PHYAD_MASK 0x1f0000
+#define MIIRDATA_MASK 0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK 0xffff
+
+struct moxart_mdio_data {
+ void __iomem *base;
+};
+
+static int moxart_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct moxart_mdio_data *data = bus->priv;
+ u32 ctrl = 0;
+ unsigned int count = 5;
+
+ dev_dbg(&bus->dev, "%s\n", __func__);
+
+ ctrl |= MIIRD | ((mii_id << 16) & PHYAD_MASK) |
+ ((regnum << 21) & REGAD_MASK);
+
+ writel(ctrl, data->base + REG_PHY_CTRL);
+
+ do {
+ ctrl = readl(data->base + REG_PHY_CTRL);
+
+ if (!(ctrl & MIIRD))
+ return ctrl & MIIRDATA_MASK;
+
+ mdelay(10);
+ count--;
+ } while (count > 0);
+
+ dev_dbg(&bus->dev, "%s timed out\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int moxart_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct moxart_mdio_data *data = bus->priv;
+ u32 ctrl = 0;
+ unsigned int count = 5;
+
+ dev_dbg(&bus->dev, "%s\n", __func__);
+
+ ctrl |= MIIWR | ((mii_id << 16) & PHYAD_MASK) |
+ ((regnum << 21) & REGAD_MASK);
+
+ value &= MIIWDATA_MASK;
+
+ writel(value, data->base + REG_PHY_WRITE_DATA);
+ writel(ctrl, data->base + REG_PHY_CTRL);
+
+ do {
+ ctrl = readl(data->base + REG_PHY_CTRL);
+
+ if (!(ctrl & MIIWR))
+ return 0;
+
+ mdelay(10);
+ count--;
+ } while (count > 0);
+
+ dev_dbg(&bus->dev, "%s timed out\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int moxart_mdio_reset(struct mii_bus *bus)
+{
+ int data, i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ data = moxart_mdio_read(bus, i, MII_BMCR);
+ if (data < 0)
+ continue;
+
+ data |= BMCR_RESET;
+ if (moxart_mdio_write(bus, i, MII_BMCR, data) < 0)
+ continue;
+ }
+
+ return 0;
+}
+
+static int moxart_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct moxart_mdio_data *data;
+ struct resource *res;
+ int ret, i;
+
+ bus = mdiobus_alloc_size(sizeof(*data));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "MOXA ART Ethernet MII";
+ bus->read = &moxart_mdio_read;
+ bus->write = &moxart_mdio_write;
+ bus->reset = &moxart_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
+ if (!bus->irq) {
+ ret = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ /* Setting PHY_IGNORE_INTERRUPT here even if it has no effect,
+ * of_mdiobus_register() sets these PHY_POLL.
+ * Ideally, the interrupt from MAC controller could be used to
+ * detect link state changes, not polling, i.e. if there was
+ * a way phy_driver could set PHY_HAS_INTERRUPT but have that
+ * interrupt handled in ethernet drivercode.
+ */
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->irq[i] = PHY_IGNORE_INTERRUPT;
+
+ data = bus->priv;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->base)) {
+ ret = PTR_ERR(data->base);
+ goto err_out_free_mdiobus;
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0)
+ goto err_out_free_mdiobus;
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_out_free_mdiobus:
+ mdiobus_free(bus);
+ return ret;
+}
+
+static int moxart_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_mdio_dt_ids[] = {
+ { .compatible = "moxa,moxart-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids);
+
+static struct platform_driver moxart_mdio_driver = {
+ .probe = moxart_mdio_probe,
+ .remove = moxart_mdio_remove,
+ .driver = {
+ .name = "moxart-mdio",
+ .of_match_table = moxart_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(moxart_mdio_driver);
+
+MODULE_DESCRIPTION("MOXA ART MDIO interface driver");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index dc920974204..56178761ce9 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -438,17 +438,19 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
}
+static DEVICE_ATTR_RO(phy_id);
-static struct device_attribute mdio_dev_attrs[] = {
- __ATTR_RO(phy_id),
- __ATTR_NULL
+static struct attribute *mdio_dev_attrs[] = {
+ &dev_attr_phy_id.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mdio_dev);
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
.pm = MDIO_BUS_PM_OPS,
- .dev_attrs = mdio_dev_attrs,
+ .dev_groups = mdio_dev_groups,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c31aad0004c..3ae28f42086 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -287,6 +287,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ks8737_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
} };
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 74630e94fa3..d6447b3f740 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -697,7 +697,7 @@ static int genphy_config_advert(struct phy_device *phydev)
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
-static int genphy_setup_forced(struct phy_device *phydev)
+int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;
@@ -716,7 +716,7 @@ static int genphy_setup_forced(struct phy_device *phydev)
return err;
}
-
+EXPORT_SYMBOL(genphy_setup_forced);
/**
* genphy_restart_aneg - Enable and Restart Autonegotiation
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 138de837977..fa1d69a38cc 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,6 +64,18 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
return err;
}
+/* RTL8201CP */
+static struct phy_driver rtl8201cp_driver = {
+ .phy_id = 0x00008201,
+ .name = "RTL8201CP Ethernet",
+ .phy_id_mask = 0x0000ffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
/* RTL8211B */
static struct phy_driver rtl8211b_driver = {
.phy_id = 0x001cc912,
@@ -98,6 +110,9 @@ static int __init realtek_init(void)
{
int ret;
+ ret = phy_driver_register(&rtl8201cp_driver);
+ if (ret < 0)
+ return -ENODEV;
ret = phy_driver_register(&rtl8211b_driver);
if (ret < 0)
return -ENODEV;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 69b482bce7d..508e4359338 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -3,7 +3,7 @@
*
* Author: Kriston Carson
*
- * Copyright (c) 2005, 2009 Freescale Semiconductor, Inc.
+ * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -18,6 +18,11 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+/* Vitesse Extended Page Magic Register(s) */
+#define MII_VSC82X4_EXT_PAGE_16E 0x10
+#define MII_VSC82X4_EXT_PAGE_17E 0x11
+#define MII_VSC82X4_EXT_PAGE_18E 0x12
+
/* Vitesse Extended Control Register 1 */
#define MII_VSC8244_EXT_CON1 0x17
#define MII_VSC8244_EXTCON1_INIT 0x0000
@@ -54,7 +59,13 @@
#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
+/* Vitesse Extended Page Access Register */
+#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+
+#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
+#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8662 0x00070660
#define PHY_ID_VSC8221 0x000fc550
#define PHY_ID_VSC8211 0x000fc4b0
@@ -118,7 +129,9 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_VSC8244_IMASK,
- phydev->drv->phy_id == PHY_ID_VSC8244 ?
+ (phydev->drv->phy_id == PHY_ID_VSC8234 ||
+ phydev->drv->phy_id == PHY_ID_VSC8244 ||
+ phydev->drv->phy_id == PHY_ID_VSC8574) ?
MII_VSC8244_IMASK_MASK :
MII_VSC8221_IMASK_MASK);
else {
@@ -149,21 +162,114 @@ static int vsc8221_config_init(struct phy_device *phydev)
*/
}
-/* Vitesse 824x */
+/* vsc82x4_config_autocross_enable - Enable auto MDI/MDI-X for forced links
+ * @phydev: target phy_device struct
+ *
+ * Enable auto MDI/MDI-X when in 10/100 forced link speeds by writing
+ * special values in the VSC8234/VSC8244 extended reserved registers
+ */
+static int vsc82x4_config_autocross_enable(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed > SPEED_100)
+ return 0;
+
+ /* map extended registers set 0x10 - 0x1e */
+ ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x52b5);
+ if (ret >= 0)
+ ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_18E, 0x0012);
+ if (ret >= 0)
+ ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_17E, 0x2803);
+ if (ret >= 0)
+ ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_16E, 0x87fa);
+ /* map standard registers set 0x10 - 0x1e */
+ if (ret >= 0)
+ ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000);
+ else
+ phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000);
+
+ return ret;
+}
+
+/* vsc82x4_config_aneg - restart auto-negotiation or write BMCR
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR and also start the auto
+ * MDI/MDI-X feature
+ */
+static int vsc82x4_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable auto MDI/MDI-X when in 10/100 forced link speeds by
+ * writing special values in the VSC8234 extended reserved registers
+ */
+ if (phydev->autoneg != AUTONEG_ENABLE && phydev->speed <= SPEED_100) {
+ ret = genphy_setup_forced(phydev);
+
+ if (ret < 0) /* error */
+ return ret;
+
+ return vsc82x4_config_autocross_enable(phydev);
+ }
+
+ return genphy_config_aneg(phydev);
+}
+
+/* Vitesse 82xx */
static struct phy_driver vsc82xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8234,
+ .name = "Vitesse VSC8234",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &vsc82x4_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_VSC8244,
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
- .config_aneg = &genphy_config_aneg,
+ .config_aneg = &vsc82x4_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_VSC8574,
+ .name = "Vitesse VSC8574",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &vsc82x4_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ .phy_id = PHY_ID_VSC8662,
+ .name = "Vitesse VSC8662",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &vsc82x4_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
/* Vitesse 8221 */
.phy_id = PHY_ID_VSC8221,
.phy_id_mask = 0x000ffff0,
@@ -207,7 +313,10 @@ module_init(vsc82xx_init);
module_exit(vsc82xx_exit);
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
+ { PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
+ { PHY_ID_VSC8574, 0x000ffff0 },
+ { PHY_ID_VSC8662, 0x000ffff0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
{ PHY_ID_VSC8211, 0x000ffff0 },
{ }
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 1f7bef90b46..7b4ff35c8bf 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
/* Any address will do - we take the first */
const struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa) {
- memcpy(eth->h_source, dev->dev_addr, 6);
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memset(eth->h_dest, 0xfc, 2);
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5f66e30d982..82ee6ed954c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
if (error < 0)
goto end;
- m->msg_namelen = 0;
-
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 50e43e64d51..34b0de09d88 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1540,6 +1540,12 @@ static int team_init(struct net_device *dev)
if (!team->pcpu_stats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct team_pcpu_stats *team_stats;
+ team_stats = per_cpu_ptr(team->pcpu_stats, i);
+ u64_stats_init(&team_stats->syncp);
+ }
+
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
@@ -2644,7 +2650,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static struct genl_ops team_nl_ops[] = {
+static const struct genl_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
.doit = team_nl_cmd_noop,
@@ -2670,15 +2676,15 @@ static struct genl_ops team_nl_ops[] = {
},
};
-static struct genl_multicast_group team_change_event_mcgrp = {
- .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+static const struct genl_multicast_group team_nl_mcgrps[] = {
+ { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
};
static int team_nl_send_multicast(struct sk_buff *skb,
struct team *team, u32 portid)
{
- return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
- team_change_event_mcgrp.id, GFP_KERNEL);
+ return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
+ skb, 0, 0, GFP_KERNEL);
}
static int team_nl_send_event_options_get(struct team *team,
@@ -2697,23 +2703,8 @@ static int team_nl_send_event_port_get(struct team *team,
static int team_nl_init(void)
{
- int err;
-
- err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
- ARRAY_SIZE(team_nl_ops));
- if (err)
- return err;
-
- err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
- if (err)
- goto err_change_event_grp_reg;
-
- return 0;
-
-err_change_event_grp_reg:
- genl_unregister_family(&team_nl_family);
-
- return err;
+ return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
+ team_nl_mcgrps);
}
static void team_nl_fini(void)
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 829a9cd2b4d..d671fc3ac5a 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -570,7 +570,7 @@ static int lb_init(struct team *team)
{
struct lb_priv *lb_priv = get_lb_priv(team);
lb_select_tx_port_func_t *func;
- int err;
+ int i, err;
/* set default tx port selector */
func = lb_select_tx_port_get_func("hash");
@@ -588,6 +588,13 @@ static int lb_init(struct team *team)
goto err_alloc_pcpu_stats;
}
+ for_each_possible_cpu(i) {
+ struct lb_pcpu_stats *team_lb_stats;
+ team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ u64_stats_init(&team_lb_stats->syncp);
+ }
+
+
INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7cb105c103f..782e38bfc1e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *skb;
size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 };
+ int good_linear;
int offset = 0;
int copylen;
bool zerocopy = false;
@@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
}
+ good_linear = SKB_MAX_HEAD(align);
+
if (msg_control) {
/* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+ if (copylen > good_linear)
+ copylen = good_linear;
linear = copylen;
if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
zerocopy = true;
@@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (!zerocopy) {
copylen = len;
- linear = gso.hdr_len;
+ if (gso.hdr_len > good_linear)
+ linear = good_linear;
+ else
+ linear = gso.hdr_len;
}
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 40db3123331..85e4a01670f 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -242,6 +242,21 @@ config USB_NET_CDC_NCM
* ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
* Ericsson F5521gw Mobile Broadband Module
+config USB_NET_HUAWEI_CDC_NCM
+ tristate "Huawei NCM embedded AT channel support"
+ depends on USB_USBNET
+ select USB_WDM
+ select USB_NET_CDC_NCM
+ help
+ This driver supports huawei-style NCM devices, that use NCM as a
+ transport for other protocols, usually an embedded AT channel.
+ Good examples are:
+ * Huawei E3131
+ * Huawei E3251
+
+ To compile this driver as a module, choose M here: the module will be
+ called huawei_cdc_ncm.ko.
+
config USB_NET_CDC_MBIM
tristate "CDC MBIM support"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 8b342cf992f..b17b5e88bba 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
+obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 846cc19c04f..8e8d0fcd497 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -78,7 +78,6 @@
#define AX_MEDIUM_STATUS_MODE 0x22
#define AX_MEDIUM_GIGAMODE 0x01
#define AX_MEDIUM_FULL_DUPLEX 0x02
- #define AX_MEDIUM_ALWAYS_ONE 0x04
#define AX_MEDIUM_EN_125MHZ 0x08
#define AX_MEDIUM_RXFLOW_CTRLEN 0x10
#define AX_MEDIUM_TXFLOW_CTRLEN 0x20
@@ -1065,8 +1064,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
/* Configure default medium type => giga */
*tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
- AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
+ AX_MEDIUM_GIGAMODE;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
2, 2, tmp16);
@@ -1225,7 +1224,7 @@ static int ax88179_link_reset(struct usbnet *dev)
}
mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
+ AX_MEDIUM_RXFLOW_CTRLEN;
ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
1, 1, &link_sts);
@@ -1339,8 +1338,8 @@ static int ax88179_reset(struct usbnet *dev)
/* Configure default medium type => giga */
*tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
- AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
+ AX_MEDIUM_GIGAMODE;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
2, 2, tmp16);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8d5cac2d8e3..df507e6dbb9 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 broadcast[6];
+ u8 broadcast[ETH_ALEN];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- memset(broadcast, 0xff, 6);
+ memset(broadcast, 0xff, ETH_ALEN);
memset(catc->multicast, 0, 64);
catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *netdev;
struct catc *catc;
- u8 broadcast[6];
+ u8 broadcast[ETH_ALEN];
int i, pktsz;
if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "Filling the multicast list.\n");
- memset(broadcast, 0xff, 6);
+ memset(broadcast, 0xff, ETH_ALEN);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 7d78669000d..6358d420e18 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,7 +328,7 @@ MODULE_DEVICE_TABLE(usb, usbpn_ids);
static struct usb_driver usbpn_driver;
-int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
static const char ifname[] = "usbpn%d";
const struct usb_cdc_union_desc *union_header = NULL;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 25ba7eca9a1..c9f3281506a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,8 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
#include <linux/usb/cdc_ncm.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
/* driver specific data - must match cdc_ncm usage */
struct cdc_mbim_state {
@@ -42,13 +44,11 @@ static int cdc_mbim_manage_power(struct usbnet *dev, int on)
if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
/* need autopm_get/put here to ensure the usbcore sees the new value */
rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
+ if (!rv)
+ usb_autopm_put_interface(dev->intf);
}
-err:
- return rv;
+ return 0;
}
static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
@@ -173,7 +173,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
}
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -184,6 +184,60 @@ error:
return NULL;
}
+/* Some devices are known to send Neigbor Solicitation messages and
+ * require Neigbor Advertisement replies. The IPv6 core will not
+ * respond since IFF_NOARP is set, so we must handle them ourselves.
+ */
+static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
+{
+ struct ipv6hdr *iph = (void *)buf;
+ struct nd_msg *msg = (void *)(iph + 1);
+ struct net_device *netdev;
+ struct inet6_dev *in6_dev;
+ bool is_router;
+
+ /* we'll only respond to requests from unicast addresses to
+ * our solicited node addresses.
+ */
+ if (!ipv6_addr_is_solict_mult(&iph->daddr) ||
+ !(ipv6_addr_type(&iph->saddr) & IPV6_ADDR_UNICAST))
+ return;
+
+ /* need to send the NA on the VLAN dev, if any */
+ if (tci)
+ netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
+ tci);
+ else
+ netdev = dev->net;
+ if (!netdev)
+ return;
+
+ in6_dev = in6_dev_get(netdev);
+ if (!in6_dev)
+ return;
+ is_router = !!in6_dev->cnf.forwarding;
+ in6_dev_put(in6_dev);
+
+ /* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
+ ipv6_stub->ndisc_send_na(netdev, NULL, &iph->saddr, &msg->target,
+ is_router /* router */,
+ true /* solicited */,
+ false /* override */,
+ true /* inc_opt */);
+}
+
+static bool is_neigh_solicit(u8 *buf, size_t len)
+{
+ struct ipv6hdr *iph = (void *)buf;
+ struct nd_msg *msg = (void *)(iph + 1);
+
+ return (len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
+ iph->nexthdr == IPPROTO_ICMPV6 &&
+ msg->icmph.icmp6_code == 0 &&
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION);
+}
+
+
static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
{
__be16 proto = htons(ETH_P_802_3);
@@ -198,6 +252,8 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
proto = htons(ETH_P_IP);
break;
case 0x60:
+ if (is_neigh_solicit(buf, len))
+ do_neigh_solicit(dev, buf, tci);
proto = htons(ETH_P_IPV6);
break;
default:
@@ -313,15 +369,13 @@ error:
static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
{
- int ret = 0;
+ int ret = -ENODEV;
struct usbnet *dev = usb_get_intfdata(intf);
struct cdc_mbim_state *info = (void *)&dev->data;
struct cdc_ncm_ctx *ctx = info->ctx;
- if (ctx == NULL) {
- ret = -1;
+ if (!ctx)
goto error;
- }
/*
* Both usbnet_suspend() and subdriver->suspend() MUST return 0
@@ -354,7 +408,7 @@ static int cdc_mbim_resume(struct usb_interface *intf)
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && callsub && info->subdriver->suspend)
+ if (ret < 0 && callsub)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
@@ -371,9 +425,18 @@ static const struct driver_info cdc_mbim_info = {
};
/* MBIM and NCM devices should not need a ZLP after NTBs with
- * dwNtbOutMaxSize length. This driver_info is for the exceptional
- * devices requiring it anyway, allowing them to be supported without
- * forcing the performance penalty on all the sane devices.
+ * dwNtbOutMaxSize length. Nevertheless, a number of devices from
+ * different vendor IDs will fail unless we send ZLPs, forcing us
+ * to make this the default.
+ *
+ * This default may cause a performance penalty for spec conforming
+ * devices wanting to take advantage of optimizations possible without
+ * ZLPs. A whitelist is added in an attempt to avoid this for devices
+ * known to conform to the MBIM specification.
+ *
+ * All known devices supporting NCM compatibility mode are also
+ * conforming to the NCM and MBIM specifications. For this reason, the
+ * NCM subclass entry is also in the ZLP whitelist.
*/
static const struct driver_info cdc_mbim_info_zlp = {
.description = "CDC MBIM",
@@ -396,16 +459,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Sierra Wireless MC7710 need ZLPs */
- { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info_zlp,
- },
- /* HP hs2434 Mobile Broadband Module needs ZLPs */
- { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ /* ZLP conformance whitelist: All Ericsson MBIM devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info,
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
},
{
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 43afde8f48d..e15ec2b1203 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,8 +53,6 @@
#include <linux/usb/cdc.h>
#include <linux/usb/cdc_ncm.h>
-#define DRIVER_VERSION "14-Mar-2012"
-
#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
static bool prefer_mbim = true;
#else
@@ -68,71 +66,67 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
-static void
-cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- struct usbnet *dev = netdev_priv(net);
-
- strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, dev->driver_info->description,
- sizeof(info->fw_version));
- usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
-}
-
-static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+static int cdc_ncm_setup(struct usbnet *dev)
{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
int err;
int eth_hlen;
u16 ntb_fmt_supported;
- u32 min_dgram_size;
- u32 min_hdr_size;
- struct usbnet *dev = netdev_priv(ctx->netdev);
+ __le16 max_datagram_size;
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ctx->ncm_parm,
- sizeof(ctx->ncm_parm));
+ 0, iface_no, &ncm_parm,
+ sizeof(ncm_parm));
if (err < 0) {
- pr_debug("failed GET_NTB_PARAMETERS\n");
- return 1;
+ dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
+ return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
- eth_hlen = ETH_HLEN;
- min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
- if (ctx->mbim_desc != NULL) {
- flags = ctx->mbim_desc->bmNetworkCapabilities;
+ /* there are some minor differences in NCM and MBIM defaults */
+ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
+ if (!ctx->mbim_desc)
+ return -EINVAL;
eth_hlen = 0;
- min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
- min_hdr_size = 0;
- } else if (ctx->func_desc != NULL) {
- flags = ctx->func_desc->bmNetworkCapabilities;
+ flags = ctx->mbim_desc->bmNetworkCapabilities;
+ ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
} else {
- flags = 0;
+ if (!ctx->func_desc)
+ return -EINVAL;
+ eth_hlen = ETH_HLEN;
+ flags = ctx->func_desc->bmNetworkCapabilities;
+ ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
}
- pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
- "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
- "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
- ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
- ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+ /* common absolute max for NCM and MBIM */
+ if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+
+ dev_dbg(&dev->intf->dev,
+ "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
+ ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
/* max count of tx datagrams */
if ((ctx->tx_max_datagrams == 0) ||
@@ -141,19 +135,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* verify maximum size of received NTB in bytes */
if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
- pr_debug("Using min receive length=%d\n",
- USB_CDC_NCM_NTB_MIN_IN_SIZE);
+ dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
+ USB_CDC_NCM_NTB_MIN_IN_SIZE);
ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
}
if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
- pr_debug("Using default maximum receive length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_RX);
+ dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_RX);
ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -161,16 +155,22 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
| USB_RECIP_INTERFACE,
0, iface_no, &dwNtbInMaxSize, 4);
if (err < 0)
- pr_debug("Setting NTB Input Size failed\n");
+ dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
}
/* verify maximum size of transmitted NTB in bytes */
- if ((ctx->tx_max <
- (min_hdr_size + min_dgram_size)) ||
- (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
- pr_debug("Using default maximum transmit length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_TX);
+ if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
+ dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+
+ /* Adding a pad byte here simplifies the handling in
+ * cdc_ncm_fill_tx_frame, by making tx_max always
+ * represent the real skb max size.
+ */
+ if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
}
/*
@@ -183,7 +183,7 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
- pr_debug("Using default alignment: 4 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default alignment: 4 bytes\n");
ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
@@ -197,13 +197,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
- pr_debug("Using default transmit modulus: 4 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default transmit modulus: 4 bytes\n");
ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
/* verify the payload remainder */
if (ctx->tx_remainder >= ctx->tx_modulus) {
- pr_debug("Using default transmit remainder: 0 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default transmit remainder: 0 bytes\n");
ctx->tx_remainder = 0;
}
@@ -221,7 +221,7 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
USB_CDC_NCM_CRC_NOT_APPENDED,
iface_no, NULL, 0);
if (err < 0)
- pr_debug("Setting CRC mode off failed\n");
+ dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
@@ -232,69 +232,43 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
USB_CDC_NCM_NTB16_FORMAT,
iface_no, NULL, 0);
if (err < 0)
- pr_debug("Setting NTB format to 16-bit failed\n");
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
}
- ctx->max_datagram_size = min_dgram_size;
+ /* inform the device about the selected Max Datagram Size */
+ if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+ goto out;
- /* set Max Datagram Size (MTU) */
- if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 max_datagram_size;
- u16 eth_max_sz;
- if (ctx->ether_desc != NULL)
- eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- else if (ctx->mbim_desc != NULL)
- eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
- else
- goto max_dgram_err;
-
- err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
- pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
- min_dgram_size);
- } else {
- ctx->max_datagram_size =
- le16_to_cpu(max_datagram_size);
- /* Check Eth descriptor value */
- if (ctx->max_datagram_size > eth_max_sz)
- ctx->max_datagram_size = eth_max_sz;
-
- if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
-
- if (ctx->max_datagram_size < min_dgram_size)
- ctx->max_datagram_size = min_dgram_size;
-
- /* if value changed, update device */
- if (ctx->max_datagram_size !=
- le16_to_cpu(max_datagram_size)) {
- err = usbnet_write_cmd(dev,
- USB_CDC_SET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0,
- iface_no, &max_datagram_size,
- 2);
- if (err < 0)
- pr_debug("SET_MAX_DGRAM_SIZE failed\n");
- }
- }
+ /* read current mtu value from device */
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0) {
+ dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ goto out;
}
-max_dgram_err:
- if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
- ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
+ if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+ goto out;
+
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0)
+ dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+out:
+ /* set MTU to max supported by the device if necessary */
+ if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
+ dev->net->mtu = ctx->max_datagram_size - eth_hlen;
return 0;
}
static void
-cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
{
- struct usb_host_endpoint *e;
+ struct usb_host_endpoint *e, *in = NULL, *out = NULL;
u8 ep;
for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
@@ -303,18 +277,18 @@ cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(&e->desc)) {
- if (ctx->status_ep == NULL)
- ctx->status_ep = e;
+ if (!dev->status)
+ dev->status = e;
}
break;
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(&e->desc)) {
- if (ctx->in_ep == NULL)
- ctx->in_ep = e;
+ if (!in)
+ in = e;
} else {
- if (ctx->out_ep == NULL)
- ctx->out_ep = e;
+ if (!out)
+ out = e;
}
break;
@@ -322,6 +296,14 @@ cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
break;
}
}
+ if (in && !dev->in)
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ if (out && !dev->out)
+ dev->out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
}
static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
@@ -342,18 +324,9 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
kfree(ctx);
}
-static const struct ethtool_ops cdc_ncm_ethtool_ops = {
- .get_drvinfo = cdc_ncm_get_drvinfo,
- .get_link = usbnet_get_link,
- .get_msglevel = usbnet_get_msglevel,
- .set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
- .nway_reset = usbnet_nway_reset,
-};
-
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
{
+ const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
u8 *buf;
@@ -367,23 +340,22 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- ctx->bh.data = (unsigned long)ctx;
+ ctx->bh.data = (unsigned long)dev;
ctx->bh.func = cdc_ncm_txpath_bh;
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
- ctx->netdev = dev->net;
/* store ctx pointer in device data field */
dev->data[0] = (unsigned long)ctx;
+ /* only the control interface can be successfully probed */
+ ctx->control = intf;
+
/* get some pointers */
driver = driver_of(intf);
buf = intf->cur_altsetting->extra;
len = intf->cur_altsetting->extralen;
- ctx->udev = dev->udev;
- ctx->intf = intf;
-
/* parse through descriptors associated with control interface */
while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
@@ -392,16 +364,18 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
switch (buf[2]) {
case USB_CDC_UNION_TYPE:
- if (buf[0] < sizeof(*(ctx->union_desc)))
+ if (buf[0] < sizeof(*union_desc))
break;
- ctx->union_desc =
- (const struct usb_cdc_union_desc *)buf;
-
- ctx->control = usb_ifnum_to_if(dev->udev,
- ctx->union_desc->bMasterInterface0);
+ union_desc = (const struct usb_cdc_union_desc *)buf;
+ /* the master must be the interface we are probing */
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ union_desc->bMasterInterface0) {
+ dev_dbg(&intf->dev, "bogus CDC Union\n");
+ goto error;
+ }
ctx->data = usb_ifnum_to_if(dev->udev,
- ctx->union_desc->bSlaveInterface0);
+ union_desc->bSlaveInterface0);
break;
case USB_CDC_ETHERNET_TYPE:
@@ -410,13 +384,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->ether_desc =
(const struct usb_cdc_ether_desc *)buf;
- dev->hard_mtu =
- le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
- if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
- dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
- else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
- dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
break;
case USB_CDC_NCM_TYPE:
@@ -444,69 +411,71 @@ advance:
}
/* some buggy devices have an IAD but no CDC Union */
- if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
- ctx->control = intf;
+ if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
}
/* check if we got everything */
- if ((ctx->control == NULL) || (ctx->data == NULL) ||
- ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
+ if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
+ dev_dbg(&intf->dev, "CDC descriptors missing\n");
goto error;
+ }
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "failed to claim data intf\n");
goto error;
+ }
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* reset data interface */
temp = usb_set_interface(dev->udev, iface_no, 0);
- if (temp)
- goto error2;
-
- /* initialize data interface */
- if (cdc_ncm_setup(ctx))
+ if (temp) {
+ dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
+ }
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
+ }
- cdc_ncm_find_endpoints(ctx, ctx->data);
- cdc_ncm_find_endpoints(ctx, ctx->control);
-
- if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
- (ctx->status_ep == NULL))
+ cdc_ncm_find_endpoints(dev, ctx->data);
+ cdc_ncm_find_endpoints(dev, ctx->control);
+ if (!dev->in || !dev->out || !dev->status) {
+ dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
+ }
- dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev)) {
+ dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
+ goto error2;
+ }
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
- usb_set_intfdata(ctx->intf, dev);
if (ctx->ether_desc) {
temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "failed to get mac address\n");
goto error2;
- dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+ }
+ dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
-
- dev->in = usb_rcvbulkpipe(dev->udev,
- ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->out = usb_sndbulkpipe(dev->udev,
- ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->status = ctx->status_ep;
+ /* usbnet use these values for sizing tx/rx queues */
+ dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
- ctx->tx_speed = ctx->rx_speed = 0;
return 0;
error2:
@@ -517,7 +486,7 @@ error2:
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
- dev_info(&dev->udev->dev, "bind() failure\n");
+ dev_info(&intf->dev, "bind() failure\n");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
@@ -553,7 +522,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
ctx->control = NULL;
}
- usb_set_intfdata(ctx->intf, NULL);
+ usb_set_intfdata(intf, NULL);
cdc_ncm_free(ctx);
}
EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
@@ -662,8 +631,9 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
}
struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
struct sk_buff *skb_out;
@@ -683,11 +653,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* allocate a new OUT skb */
if (!skb_out) {
- skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
+ skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
dev_kfree_skb_any(skb);
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
goto exit_no_skb;
}
@@ -725,12 +695,12 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
skb = NULL;
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
} else {
/* no room for skb - store for later */
if (ctx->tx_rem_skb != NULL) {
dev_kfree_skb_any(ctx->tx_rem_skb);
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
ctx->tx_rem_skb = skb;
ctx->tx_rem_sign = sign;
@@ -763,7 +733,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
if (skb != NULL) {
dev_kfree_skb_any(skb);
skb = NULL;
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
ctx->tx_curr_frame_num = n;
@@ -788,19 +758,20 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* variables will be reset at next call */
}
- /*
- * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
- * we send buffers as it is. If we get more data, it would be more
- * efficient for USB HS mobile device with DMA engine to receive a full
- * size NTB, than canceling DMA transfer and receiving a short packet.
+ /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+ * bytes, we send buffers as it is. If we get more data, it
+ * would be more efficient for USB HS mobile device with DMA
+ * engine to receive a full size NTB, than canceling DMA
+ * transfer and receiving a short packet.
+ *
+ * This optimization support is pointless if we end up sending
+ * a ZLP after full sized NTBs.
*/
- if (skb_out->len > CDC_NCM_MIN_TX_PKT)
- /* final zero padding */
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
-
- /* do we need to prevent a ZLP? */
- if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
- (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+ skb_out->len > CDC_NCM_MIN_TX_PKT)
+ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
+ ctx->tx_max - skb_out->len);
+ else if ((skb_out->len % dev->maxpacket) == 0)
*skb_put(skb_out, 1) = 0; /* force short packet */
/* set final frame length */
@@ -809,7 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
- ctx->netdev->stats.tx_packets += ctx->tx_curr_frame_num;
+ dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
return skb_out;
exit_no_skb:
@@ -841,24 +812,25 @@ static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer)
static void cdc_ncm_txpath_bh(unsigned long param)
{
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param;
+ struct usbnet *dev = (struct usbnet *)param;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
spin_lock_bh(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
spin_unlock_bh(&ctx->mtx);
- } else if (ctx->netdev != NULL) {
+ } else if (dev->net != NULL) {
spin_unlock_bh(&ctx->mtx);
- netif_tx_lock_bh(ctx->netdev);
- usbnet_start_xmit(NULL, ctx->netdev);
- netif_tx_unlock_bh(ctx->netdev);
+ netif_tx_lock_bh(dev->net);
+ usbnet_start_xmit(NULL, dev->net);
+ netif_tx_unlock_bh(dev->net);
} else {
spin_unlock_bh(&ctx->mtx);
}
}
-static struct sk_buff *
+struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb_out;
@@ -875,7 +847,7 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -885,10 +857,12 @@ error:
return NULL;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_tx_fixup);
/* verify NTB header and return offset of first NDP, or negative error */
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_nth16 *nth16;
int len;
int ret = -EINVAL;
@@ -898,30 +872,33 @@ int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) +
sizeof(struct usb_cdc_ncm_ndp16))) {
- pr_debug("frame too short\n");
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
goto error;
}
nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data;
- if (le32_to_cpu(nth16->dwSignature) != USB_CDC_NCM_NTH16_SIGN) {
- pr_debug("invalid NTH16 signature <%u>\n",
- le32_to_cpu(nth16->dwSignature));
+ if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH16 signature <%#010x>\n",
+ le32_to_cpu(nth16->dwSignature));
goto error;
}
len = le16_to_cpu(nth16->wBlockLength);
if (len > ctx->rx_max) {
- pr_debug("unsupported NTB block length %u/%u\n", len,
- ctx->rx_max);
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
goto error;
}
if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
- (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
- !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
- pr_debug("sequence number glitch prev=%d curr=%d\n",
- ctx->rx_seq, le16_to_cpu(nth16->wSequence));
+ (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth16->wSequence));
}
ctx->rx_seq = le16_to_cpu(nth16->wSequence);
@@ -934,18 +911,20 @@ EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp16 *ndp16;
int ret = -EINVAL;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
- pr_debug("invalid NDP offset <%u>\n", ndpoffset);
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
goto error;
}
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
- pr_debug("invalid DPT16 length <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n",
+ le16_to_cpu(ndp16->wLength));
goto error;
}
@@ -954,9 +933,9 @@ int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* we process NDP entries except for the last one */
- if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
- skb_in->len) {
- pr_debug("Invalid nframes = %d\n", ret);
+ if ((sizeof(struct usb_cdc_ncm_ndp16) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
@@ -965,7 +944,7 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
-static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -989,9 +968,10 @@ next_ndp:
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
- pr_debug("invalid DPT16 signature <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp16->dwSignature));
goto err_ndp;
}
dpe16 = ndp16->dpe16;
@@ -1013,9 +993,9 @@ next_ndp:
/* sanity checking */
if (((offset + len) > skb_in->len) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
- pr_debug("invalid frame detected (ignored)"
- "offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, len, skb_in);
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, len, skb_in);
if (!x)
goto err_ndp;
break;
@@ -1040,9 +1020,10 @@ err_ndp:
error:
return 0;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_fixup);
static void
-cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+cdc_ncm_speed_change(struct usbnet *dev,
struct usb_cdc_speed_change *data)
{
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
@@ -1052,25 +1033,16 @@ cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
*/
- if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
- ctx->tx_speed = tx_speed;
- ctx->rx_speed = rx_speed;
-
- if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
- printk(KERN_INFO KBUILD_MODNAME
- ": %s: %u mbit/s downlink "
- "%u mbit/s uplink\n",
- ctx->netdev->name,
- (unsigned int)(rx_speed / 1000000U),
- (unsigned int)(tx_speed / 1000000U));
- } else {
- printk(KERN_INFO KBUILD_MODNAME
- ": %s: %u kbit/s downlink "
- "%u kbit/s uplink\n",
- ctx->netdev->name,
- (unsigned int)(rx_speed / 1000U),
- (unsigned int)(tx_speed / 1000U));
- }
+ if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+ netif_info(dev, link, dev->net,
+ "%u mbit/s downlink %u mbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
+ } else {
+ netif_info(dev, link, dev->net,
+ "%u kbit/s downlink %u kbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
}
}
@@ -1086,7 +1058,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
/* test for split data in 8-byte chunks */
if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
- cdc_ncm_speed_change(ctx,
+ cdc_ncm_speed_change(dev,
(struct usb_cdc_speed_change *)urb->transfer_buffer);
return;
}
@@ -1101,14 +1073,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
ctx->connected = le16_to_cpu(event->wValue);
-
- printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
- " %sconnected\n",
- ctx->netdev->name, ctx->connected ? "" : "dis");
-
+ netif_info(dev, link, dev->net,
+ "network connection: %sconnected\n",
+ ctx->connected ? "" : "dis");
usbnet_link_change(dev, ctx->connected, 0);
- if (!ctx->connected)
- ctx->tx_speed = ctx->rx_speed = 0;
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1116,8 +1084,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
sizeof(struct usb_cdc_speed_change)))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
- cdc_ncm_speed_change(ctx,
- (struct usb_cdc_speed_change *) &event[1]);
+ cdc_ncm_speed_change(dev,
+ (struct usb_cdc_speed_change *)&event[1]);
break;
default:
@@ -1139,22 +1107,6 @@ static int cdc_ncm_check_connect(struct usbnet *dev)
return !ctx->connected;
}
-static int
-cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
-{
- return usbnet_probe(udev, prod);
-}
-
-static void cdc_ncm_disconnect(struct usb_interface *intf)
-{
- struct usbnet *dev = usb_get_intfdata(intf);
-
- if (dev == NULL)
- return; /* already disconnected */
-
- usbnet_disconnect(intf);
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
@@ -1234,17 +1186,6 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
- /* Huawei NCM devices disguised as vendor specific */
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
- .driver_info = (unsigned long)&wwan_info,
- },
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
- .driver_info = (unsigned long)&wwan_info,
- },
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
- .driver_info = (unsigned long)&wwan_info,
- },
-
/* Infineon(now Intel) HSPA Modem platform */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
USB_CLASS_COMM,
@@ -1265,8 +1206,8 @@ MODULE_DEVICE_TABLE(usb, cdc_devs);
static struct usb_driver cdc_ncm_driver = {
.name = "cdc_ncm",
.id_table = cdc_devs,
- .probe = cdc_ncm_probe,
- .disconnect = cdc_ncm_disconnect,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.reset_resume = usbnet_resume,
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
new file mode 100644
index 00000000000..312178d7b69
--- /dev/null
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -0,0 +1,230 @@
+/* huawei_cdc_ncm.c - handles Huawei devices using the CDC NCM protocol as
+ * transport layer.
+ * Copyright (C) 2013 Enrico Mioso <mrkiko.rs@gmail.com>
+ *
+ *
+ * ABSTRACT:
+ * This driver handles devices resembling the CDC NCM standard, but
+ * encapsulating another protocol inside it. An example are some Huawei 3G
+ * devices, exposing an embedded AT channel where you can set up the NCM
+ * connection.
+ * This code has been heavily inspired by the cdc_mbim.c driver, which is
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc-wdm.h>
+#include <linux/usb/cdc_ncm.h>
+
+/* Driver data */
+struct huawei_cdc_ncm_state {
+ struct cdc_ncm_ctx *ctx;
+ atomic_t pmcount;
+ struct usb_driver *subdriver;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+static int huawei_cdc_ncm_manage_power(struct usbnet *usbnet_dev, int on)
+{
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ int rv;
+
+ if ((on && atomic_add_return(1, &drvstate->pmcount) == 1) ||
+ (!on && atomic_dec_and_test(&drvstate->pmcount))) {
+ rv = usb_autopm_get_interface(usbnet_dev->intf);
+ usbnet_dev->intf->needs_remote_wakeup = on;
+ if (!rv)
+ usb_autopm_put_interface(usbnet_dev->intf);
+ }
+ return 0;
+}
+
+static int huawei_cdc_ncm_wdm_manage_power(struct usb_interface *intf,
+ int status)
+{
+ struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!usbnet_dev)
+ return 0;
+
+ return huawei_cdc_ncm_manage_power(usbnet_dev, status);
+}
+
+
+static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
+ struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_driver *subdriver = ERR_PTR(-ENODEV);
+ int ret = -ENODEV;
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+
+ /* altsetting should always be 1 for NCM devices - so we hard-coded
+ * it here
+ */
+ ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+ if (ret)
+ goto err;
+
+ ctx = drvstate->ctx;
+
+ if (usbnet_dev->status)
+ /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
+ * decimal (0x100)"
+ */
+ subdriver = usb_cdc_wdm_register(ctx->control,
+ &usbnet_dev->status->desc,
+ 256, /* wMaxCommand */
+ huawei_cdc_ncm_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ ret = PTR_ERR(subdriver);
+ cdc_ncm_unbind(usbnet_dev, intf);
+ goto err;
+ }
+
+ /* Prevent usbnet from using the status descriptor */
+ usbnet_dev->status = NULL;
+
+ drvstate->subdriver = subdriver;
+
+err:
+ return ret;
+}
+
+static void huawei_cdc_ncm_unbind(struct usbnet *usbnet_dev,
+ struct usb_interface *intf)
+{
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+ if (drvstate->subdriver && drvstate->subdriver->disconnect)
+ drvstate->subdriver->disconnect(ctx->control);
+ drvstate->subdriver = NULL;
+
+ cdc_ncm_unbind(usbnet_dev, intf);
+}
+
+static int huawei_cdc_ncm_suspend(struct usb_interface *intf,
+ pm_message_t message)
+{
+ int ret = 0;
+ struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+ if (ctx == NULL) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = usbnet_suspend(intf, message);
+ if (ret < 0)
+ goto error;
+
+ if (intf == ctx->control &&
+ drvstate->subdriver &&
+ drvstate->subdriver->suspend)
+ ret = drvstate->subdriver->suspend(intf, message);
+ if (ret < 0)
+ usbnet_resume(intf);
+
+error:
+ return ret;
+}
+
+static int huawei_cdc_ncm_resume(struct usb_interface *intf)
+{
+ int ret = 0;
+ struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ bool callsub;
+ struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+ /* should we call subdriver's resume function? */
+ callsub =
+ (intf == ctx->control &&
+ drvstate->subdriver &&
+ drvstate->subdriver->resume);
+
+ if (callsub)
+ ret = drvstate->subdriver->resume(intf);
+ if (ret < 0)
+ goto err;
+ ret = usbnet_resume(intf);
+ if (ret < 0 && callsub)
+ drvstate->subdriver->suspend(intf, PMSG_SUSPEND);
+err:
+ return ret;
+}
+
+static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
+{
+ struct cdc_ncm_ctx *ctx;
+
+ ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
+
+ if (ctx == NULL)
+ return 1; /* disconnected */
+
+ return !ctx->connected;
+}
+
+static const struct driver_info huawei_cdc_ncm_info = {
+ .description = "Huawei CDC NCM device",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .bind = huawei_cdc_ncm_bind,
+ .unbind = huawei_cdc_ncm_unbind,
+ .check_connect = huawei_cdc_ncm_check_connect,
+ .manage_power = huawei_cdc_ncm_manage_power,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static const struct usb_device_id huawei_cdc_ncm_devs[] = {
+ /* Huawei NCM devices disguised as vendor specific */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
+
+ /* Terminating entry */
+ {
+ },
+};
+MODULE_DEVICE_TABLE(usb, huawei_cdc_ncm_devs);
+
+static struct usb_driver huawei_cdc_ncm_driver = {
+ .name = "huawei_cdc_ncm",
+ .id_table = huawei_cdc_ncm_devs,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = huawei_cdc_ncm_suspend,
+ .resume = huawei_cdc_ncm_resume,
+ .reset_resume = huawei_cdc_ncm_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(huawei_cdc_ncm_driver);
+MODULE_AUTHOR("Enrico Mioso <mrkiko.rs@gmail.com>");
+MODULE_DESCRIPTION("USB CDC NCM host driver with encapsulated protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 818ce90185b..23bdd5b9274 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -143,24 +143,28 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
struct qmi_wwan_state *info = (void *)&dev->data;
- int rv = 0;
+ int rv;
- dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+ atomic_read(&info->pmcount), on);
- if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
- /* need autopm_get/put here to ensure the usbcore sees the new value */
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+ (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees
+ * the new value
+ */
rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
+ if (!rv)
+ usb_autopm_put_interface(dev->intf);
}
-err:
- return rv;
+ return 0;
}
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
@@ -199,7 +203,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
atomic_set(&info->pmcount, 0);
/* register subdriver */
- subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+ 4096, &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
dev_err(&info->control->dev, "subdriver registration failed\n");
rv = PTR_ERR(subdriver);
@@ -228,7 +233,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_driver *driver = driver_of(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
- BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+ sizeof(struct qmi_wwan_state)));
/* set up initial state */
info->control = intf;
@@ -250,7 +256,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
- dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC header len %u\n",
+ h->bLength);
goto err;
}
break;
@@ -260,7 +267,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
- dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC union len %u\n",
+ h->bLength);
goto err;
}
cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +279,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
- dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC ether len %u\n",
+ h->bLength);
goto err;
}
cdc_ether = (struct usb_cdc_ether_desc *)buf;
break;
}
- /*
- * Remember which CDC functional descriptors we've seen. Works
+ /* Remember which CDC functional descriptors we've seen. Works
* for all types we care about, of which USB_CDC_ETHERNET_TYPE
* (0x0f) is the highest numbered
*/
@@ -293,10 +301,14 @@ next_desc:
/* Use separate control and data interfaces if we found a CDC Union */
if (cdc_union) {
- info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
- if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
- dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
- cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+ info->data = usb_ifnum_to_if(dev->udev,
+ cdc_union->bSlaveInterface0);
+ if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+ !info->data) {
+ dev_err(&intf->dev,
+ "bogus CDC Union: master=%u, slave=%u\n",
+ cdc_union->bMasterInterface0,
+ cdc_union->bSlaveInterface0);
goto err;
}
}
@@ -374,8 +386,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
- /*
- * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+ /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
* in system sleep context, otherwise, the resume callback has
* to recover device from previous suspend failure.
*/
@@ -383,7 +394,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
if (ret < 0)
goto err;
- if (intf == info->control && info->subdriver && info->subdriver->suspend)
+ if (intf == info->control && info->subdriver &&
+ info->subdriver->suspend)
ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
@@ -396,14 +408,15 @@ static int qmi_wwan_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
- bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+ bool callsub = (intf == info->control && info->subdriver &&
+ info->subdriver->resume);
if (callsub)
ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && callsub && info->subdriver->suspend)
+ if (ret < 0 && callsub)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
@@ -714,6 +727,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
@@ -777,7 +791,8 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+ const struct usb_device_id *prod)
{
struct usb_device_id *id = (struct usb_device_id *)prod;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f3fce412c0c..51073721e22 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,7 +24,7 @@
#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
+#define DRIVER_VERSION "v1.02.0 (2013/10/28)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
@@ -307,22 +307,22 @@ enum rtl8152_flags {
#define MCU_TYPE_USB 0x0000
struct rx_desc {
- u32 opts1;
+ __le32 opts1;
#define RX_LEN_MASK 0x7fff
- u32 opts2;
- u32 opts3;
- u32 opts4;
- u32 opts5;
- u32 opts6;
+ __le32 opts2;
+ __le32 opts3;
+ __le32 opts4;
+ __le32 opts5;
+ __le32 opts6;
};
struct tx_desc {
- u32 opts1;
+ __le32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
#define TX_LEN_MASK 0x3ffff
- u32 opts2;
+ __le32 opts2;
#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
@@ -365,6 +365,7 @@ struct r8152 {
struct mii_if_info mii;
int intr_interval;
u32 msg_enable;
+ u32 tx_qlen;
u16 ocp_base;
u8 *intr_buff;
u8 version;
@@ -876,7 +877,7 @@ static void write_bulk_callback(struct urb *urb)
static void intr_callback(struct urb *urb)
{
struct r8152 *tp;
- __u16 *d;
+ __le16 *d;
int status = urb->status;
int res;
@@ -1136,14 +1137,14 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
{
- u32 remain;
+ int remain;
u8 *tx_data;
tx_data = agg->head;
agg->skb_num = agg->skb_len = 0;
- remain = rx_buf_sz - sizeof(struct tx_desc);
+ remain = rx_buf_sz;
- while (remain >= ETH_ZLEN) {
+ while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) {
struct tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int len;
@@ -1152,12 +1153,14 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
if (!skb)
break;
+ remain -= sizeof(*tx_desc);
len = skb->len;
if (remain < len) {
skb_queue_head(&tp->tx_queue, skb);
break;
}
+ tx_data = tx_agg_align(tx_data);
tx_desc = (struct tx_desc *)tx_data;
tx_data += sizeof(*tx_desc);
@@ -1167,11 +1170,18 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
agg->skb_len += len;
dev_kfree_skb_any(skb);
- tx_data = tx_agg_align(tx_data + len);
- remain = rx_buf_sz - sizeof(*tx_desc) -
- (u32)((void *)tx_data - agg->head);
+ tx_data += len;
+ remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
}
+ netif_tx_lock(tp->netdev);
+
+ if (netif_queue_stopped(tp->netdev) &&
+ skb_queue_len(&tp->tx_queue) < tp->tx_qlen)
+ netif_wake_queue(tp->netdev);
+
+ netif_tx_unlock(tp->netdev);
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
agg->head, (int)(tx_data - (u8 *)agg->head),
(usb_complete_t)write_bulk_callback, agg);
@@ -1188,7 +1198,6 @@ static void rx_bottom(struct r8152 *tp)
list_for_each_safe(cursor, next, &tp->rx_done) {
struct rx_desc *rx_desc;
struct rx_agg *agg;
- unsigned pkt_len;
int len_used = 0;
struct urb *urb;
u8 *rx_data;
@@ -1204,17 +1213,22 @@ static void rx_bottom(struct r8152 *tp)
rx_desc = agg->head;
rx_data = agg->head;
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
- len_used += sizeof(struct rx_desc) + pkt_len;
+ len_used += sizeof(struct rx_desc);
- while (urb->actual_length >= len_used) {
+ while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
struct net_device_stats *stats;
+ unsigned int pkt_len;
struct sk_buff *skb;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
if (pkt_len < ETH_ZLEN)
break;
+ len_used += pkt_len;
+ if (urb->actual_length < len_used)
+ break;
+
stats = rtl8152_get_stats(netdev);
pkt_len -= 4; /* CRC */
@@ -1234,9 +1248,8 @@ static void rx_bottom(struct r8152 *tp)
rx_data = rx_agg_align(rx_data + pkt_len + 4);
rx_desc = (struct rx_desc *)rx_data;
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
len_used = (int)(rx_data - (u8 *)agg->head);
- len_used += sizeof(struct rx_desc) + pkt_len;
+ len_used += sizeof(struct rx_desc);
}
submit:
@@ -1384,53 +1397,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- struct net_device_stats *stats = rtl8152_get_stats(netdev);
- unsigned long flags;
- struct tx_agg *agg = NULL;
- struct tx_desc *tx_desc;
- unsigned int len;
- u8 *tx_data;
- int res;
skb_tx_timestamp(skb);
- /* If tx_queue is not empty, it means at least one previous packt */
- /* is waiting for sending. Don't send current one before it. */
- if (skb_queue_empty(&tp->tx_queue))
- agg = r8152_get_tx_agg(tp);
-
- if (!agg) {
- skb_queue_tail(&tp->tx_queue, skb);
- return NETDEV_TX_OK;
- }
+ skb_queue_tail(&tp->tx_queue, skb);
- tx_desc = (struct tx_desc *)agg->head;
- tx_data = agg->head + sizeof(*tx_desc);
- agg->skb_num = agg->skb_len = 0;
+ if (list_empty(&tp->tx_free) &&
+ skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
+ netif_stop_queue(netdev);
- len = skb->len;
- r8152_tx_csum(tp, tx_desc, skb);
- memcpy(tx_data, skb->data, len);
- dev_kfree_skb_any(skb);
- agg->skb_num++;
- agg->skb_len += len;
- usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
- agg->head, len + sizeof(*tx_desc),
- (usb_complete_t)write_bulk_callback, agg);
- res = usb_submit_urb(agg->urb, GFP_ATOMIC);
- if (res) {
- /* Can we get/handle EPIPE here? */
- if (res == -ENODEV) {
- netif_device_detach(tp->netdev);
- } else {
- netif_warn(tp, tx_err, netdev,
- "failed tx_urb %d\n", res);
- stats->tx_dropped++;
- spin_lock_irqsave(&tp->tx_lock, flags);
- list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock_irqrestore(&tp->tx_lock, flags);
- }
- }
+ if (!list_empty(&tp->tx_free))
+ tasklet_schedule(&tp->tl);
return NETDEV_TX_OK;
}
@@ -1459,6 +1436,14 @@ static void rtl8152_nic_reset(struct r8152 *tp)
}
}
+static void set_tx_qlen(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+
+ tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN +
+ sizeof(struct tx_desc));
+}
+
static inline u8 rtl8152_get_speed(struct r8152 *tp)
{
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
@@ -1470,6 +1455,7 @@ static int rtl8152_enable(struct r8152 *tp)
int i, ret;
u8 speed;
+ set_tx_qlen(tp);
speed = rtl8152_get_speed(tp);
if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 90a429b7eba..8494bb53ebd 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb)
break;
}
- if (!netif_running (dev->net))
- return;
-
status = usb_submit_urb (urb, GFP_ATOMIC);
if (status != 0)
netif_err(dev, timer, dev->net,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index eee1f19ef1e..2ec2041b62d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -188,6 +188,11 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
return tot;
}
+/* fake multicast ability */
+static void veth_set_multicast_list(struct net_device *dev)
+{
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -230,10 +235,18 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
+ int i;
+
dev->vstats = alloc_percpu(struct pcpu_vstats);
if (!dev->vstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_vstats *veth_stats;
+ veth_stats = per_cpu_ptr(dev->vstats, i);
+ u64_stats_init(&veth_stats->syncp);
+ }
+
return 0;
}
@@ -250,11 +263,14 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_start_xmit = veth_xmit,
.ndo_change_mtu = veth_change_mtu,
.ndo_get_stats64 = veth_get_stats64,
+ .ndo_set_rx_mode = veth_set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
@@ -273,6 +289,7 @@ static void veth_setup(struct net_device *dev)
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
+ dev->hw_enc_features = VETH_FEATURES;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9fbdfcd1e1a..7bab4de658a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -36,7 +36,10 @@ module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
/* FIXME: MTU in config. */
-#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
+ sizeof(struct virtio_net_hdr_mrg_rxbuf), \
+ L1_CACHE_BYTES))
#define GOOD_COPY_LEN 128
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -124,12 +127,14 @@ struct virtnet_info {
/* Lock for config space updates */
struct mutex config_lock;
+ /* Page_frag for GFP_KERNEL packet buffer allocation when we run
+ * low on memory.
+ */
+ struct page_frag alloc_frag;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
- /* Per-cpu variable to show the mapping from CPU to virtqueue */
- int __percpu *vq_index;
-
/* CPU hot plug notifier */
struct notifier_block nb;
};
@@ -217,33 +222,18 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
-static void set_skb_frag(struct sk_buff *skb, struct page *page,
- unsigned int offset, unsigned int *len)
-{
- int size = min((unsigned)PAGE_SIZE - offset, *len);
- int i = skb_shinfo(skb)->nr_frags;
-
- __skb_fill_page_desc(skb, i, page, offset, size);
-
- skb->data_len += size;
- skb->len += size;
- skb->truesize += PAGE_SIZE;
- skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
- *len -= size;
-}
-
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct receive_queue *rq,
- struct page *page, unsigned int len)
+ struct page *page, unsigned int offset,
+ unsigned int len, unsigned int truesize)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- unsigned int copy, hdr_len, offset;
+ unsigned int copy, hdr_len, hdr_padded_len;
char *p;
- p = page_address(page);
+ p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +244,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr;
- offset = hdr_len;
+ hdr_padded_len = sizeof hdr->mhdr;
} else {
hdr_len = sizeof hdr->hdr;
- offset = sizeof(struct padded_vnet_hdr);
+ hdr_padded_len = sizeof(struct padded_vnet_hdr);
}
memcpy(hdr, p, hdr_len);
len -= hdr_len;
- p += offset;
+ offset += hdr_padded_len;
+ p += hdr_padded_len;
copy = len;
if (copy > skb_tailroom(skb))
@@ -273,6 +264,14 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
len -= copy;
offset += copy;
+ if (vi->mergeable_rx_bufs) {
+ if (len)
+ skb_add_rx_frag(skb, 0, page, offset, len, truesize);
+ else
+ put_page(page);
+ return skb;
+ }
+
/*
* Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously
@@ -284,9 +283,12 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
dev_kfree_skb(skb);
return NULL;
}
-
+ BUG_ON(offset >= PAGE_SIZE);
while (len) {
- set_skb_frag(skb, page, offset, &len);
+ unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
+ frag_size, truesize);
+ len -= frag_size;
page = (struct page *)page->private;
offset = 0;
}
@@ -297,33 +299,58 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
+static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+ struct sk_buff *curr_skb = head_skb;
+ char *buf;
struct page *page;
- int num_buf, i, len;
+ int num_buf, len, offset;
num_buf = hdr->mhdr.num_buffers;
while (--num_buf) {
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long\n", skb->dev->name);
- skb->dev->stats.rx_length_errors++;
- return -EINVAL;
- }
- page = virtqueue_get_buf(rq->vq, &len);
- if (!page) {
+ int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
- skb->dev->name, hdr->mhdr.num_buffers);
- skb->dev->stats.rx_length_errors++;
+ head_skb->dev->name, hdr->mhdr.num_buffers);
+ head_skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
-
- set_skb_frag(skb, page, 0, &len);
-
+ if (unlikely(len > MERGE_BUFFER_LEN)) {
+ pr_debug("%s: rx error: merge buffer too long\n",
+ head_skb->dev->name);
+ len = MERGE_BUFFER_LEN;
+ }
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ head_skb->dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ if (curr_skb == head_skb)
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ else
+ curr_skb->next = nskb;
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+ }
+ if (curr_skb != head_skb) {
+ head_skb->data_len += len;
+ head_skb->len += len;
+ head_skb->truesize += MERGE_BUFFER_LEN;
+ }
+ page = virt_to_head_page(buf);
+ offset = buf - (char *)page_address(page);
+ if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+ put_page(page);
+ skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+ len, MERGE_BUFFER_LEN);
+ } else {
+ skb_add_rx_frag(curr_skb, num_skb_frags, page,
+ offset, len, MERGE_BUFFER_LEN);
+ }
--rq->num;
}
return 0;
@@ -341,8 +368,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs || vi->big_packets)
+ if (vi->big_packets)
give_pages(rq, buf);
+ else if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
else
dev_kfree_skb(buf);
return;
@@ -352,19 +381,28 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
skb = buf;
len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len);
+ } else if (vi->mergeable_rx_bufs) {
+ struct page *page = virt_to_head_page(buf);
+ skb = page_to_skb(rq, page,
+ (char *)buf - (char *)page_address(page),
+ len, MERGE_BUFFER_LEN);
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ put_page(page);
+ return;
+ }
+ if (receive_mergeable(rq, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
} else {
page = buf;
- skb = page_to_skb(rq, page, len);
+ skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
give_pages(rq, page);
return;
}
- if (vi->mergeable_rx_bufs)
- if (receive_mergeable(rq, skb)) {
- dev_kfree_skb(skb);
- return;
- }
}
hdr = skb_vnet_hdr(skb);
@@ -435,11 +473,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
struct skb_vnet_hdr *hdr;
int err;
- skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
+ skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
- skb_put(skb, MAX_PACKET_LEN);
+ skb_put(skb, GOOD_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
@@ -501,18 +539,28 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
- struct page *page;
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ char *buf = NULL;
int err;
- page = get_a_page(rq, gfp);
- if (!page)
+ if (gfp & __GFP_WAIT) {
+ if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
+ gfp)) {
+ buf = (char *)page_address(vi->alloc_frag.page) +
+ vi->alloc_frag.offset;
+ get_page(vi->alloc_frag.page);
+ vi->alloc_frag.offset += MERGE_BUFFER_LEN;
+ }
+ } else {
+ buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
+ }
+ if (!buf)
return -ENOMEM;
- sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
-
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
+ sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
- give_pages(rq, page);
+ put_page(virt_to_head_page(buf));
return err;
}
@@ -545,7 +593,8 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
} while (rq->vq->num_free);
if (unlikely(rq->num > rq->max))
rq->max = rq->num;
- virtqueue_kick(rq->vq);
+ if (unlikely(!virtqueue_kick(rq->vq)))
+ return false;
return !oom;
}
@@ -751,7 +800,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err)) {
+ if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@@ -760,7 +809,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb(skb);
return NETDEV_TX_OK;
}
- virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -819,12 +867,14 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
< 0);
- virtqueue_kick(vi->cvq);
+ if (unlikely(!virtqueue_kick(vi->cvq)))
+ return status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!virtqueue_get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp) &&
+ !virtqueue_is_broken(vi->cvq))
cpu_relax();
return status == VIRTIO_NET_OK;
@@ -852,8 +902,13 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
return -EINVAL;
}
} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
- vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
- addr->sa_data, dev->addr_len);
+ unsigned int i;
+
+ /* Naturally, this has an atomicity problem. */
+ for (i = 0; i < dev->addr_len; i++)
+ virtio_cwrite8(vdev,
+ offsetof(struct virtio_net_config, mac) +
+ i, addr->sa_data[i]);
}
eth_commit_mac_addr_change(dev, p);
@@ -1065,7 +1120,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
- int cpu;
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1075,16 +1129,6 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
vi->affinity_hint_set = false;
}
-
- i = 0;
- for_each_online_cpu(cpu) {
- if (cpu == hcpu) {
- *per_cpu_ptr(vi->vq_index, cpu) = -1;
- } else {
- *per_cpu_ptr(vi->vq_index, cpu) =
- ++i % vi->curr_queue_pairs;
- }
- }
}
static void virtnet_set_affinity(struct virtnet_info *vi)
@@ -1106,7 +1150,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
- *per_cpu_ptr(vi->vq_index, cpu) = i;
+ netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
i++;
}
@@ -1118,11 +1162,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
{
struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
- mutex_lock(&vi->config_lock);
-
- if (!vi->config_enable)
- goto done;
-
switch(action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
@@ -1136,8 +1175,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
break;
}
-done:
- mutex_unlock(&vi->config_lock);
return NOTIFY_OK;
}
@@ -1227,28 +1264,6 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-/* To avoid contending a lock hold by a vcpu who would exit to host, select the
- * txq based on the processor id.
- */
-static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- int txq;
- struct virtnet_info *vi = netdev_priv(dev);
-
- if (skb_rx_queue_recorded(skb)) {
- txq = skb_get_rx_queue(skb);
- } else {
- txq = *__this_cpu_ptr(vi->vq_index);
- if (txq == -1)
- txq = 0;
- }
-
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
-
- return txq;
-}
-
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -1260,7 +1275,6 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
- .ndo_select_queue = virtnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
@@ -1276,9 +1290,8 @@ static void virtnet_config_changed_work(struct work_struct *work)
if (!vi->config_enable)
goto done;
- if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
- offsetof(struct virtio_net_config, status),
- &v) < 0)
+ if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
+ struct virtio_net_config, status, &v) < 0)
goto done;
if (v & VIRTIO_NET_S_ANNOUNCE) {
@@ -1343,8 +1356,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs || vi->big_packets)
+ if (vi->big_packets)
give_pages(&vi->rq[i], buf);
+ else if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
else
dev_kfree_skb(buf);
--vi->rq[i].num;
@@ -1500,9 +1515,9 @@ static int virtnet_probe(struct virtio_device *vdev)
u16 max_queue_pairs;
/* Find if host supports multiqueue virtio_net device */
- err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
- offsetof(struct virtio_net_config,
- max_virtqueue_pairs), &max_queue_pairs);
+ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
+ struct virtio_net_config,
+ max_virtqueue_pairs, &max_queue_pairs);
/* We need at least 2 queue's */
if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
@@ -1554,9 +1569,11 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->vlan_features = dev->features;
/* Configuration may specify what MAC to use. Otherwise random. */
- if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
- offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len) < 0)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ virtio_cread_bytes(vdev,
+ offsetof(struct virtio_net_config, mac),
+ dev->dev_addr, dev->addr_len);
+ else
eth_hw_addr_random(dev);
/* Set up our device-specific information */
@@ -1569,9 +1586,12 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->stats == NULL)
goto free;
- vi->vq_index = alloc_percpu(int);
- if (vi->vq_index == NULL)
- goto free_stats;
+ for_each_possible_cpu(i) {
+ struct virtnet_stats *virtnet_stats;
+ virtnet_stats = per_cpu_ptr(vi->stats, i);
+ u64_stats_init(&virtnet_stats->tx_syncp);
+ u64_stats_init(&virtnet_stats->rx_syncp);
+ }
mutex_init(&vi->config_lock);
vi->config_enable = true;
@@ -1599,10 +1619,10 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_index;
+ goto free_stats;
- netif_set_real_num_tx_queues(dev, 1);
- netif_set_real_num_rx_queues(dev, 1);
+ netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
+ netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
err = register_netdev(dev);
if (err) {
@@ -1650,8 +1670,8 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
-free_index:
- free_percpu(vi->vq_index);
+ if (vi->alloc_frag.page)
+ put_page(vi->alloc_frag.page);
free_stats:
free_percpu(vi->stats);
free:
@@ -1685,20 +1705,23 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
+ if (vi->alloc_frag.page)
+ put_page(vi->alloc_frag.page);
flush_work(&vi->config_work);
- free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int i;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
@@ -1747,6 +1770,10 @@ static int virtnet_restore(struct virtio_device *vdev)
virtnet_set_queues(vi, vi->curr_queue_pairs);
rtnl_unlock();
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err)
+ return err;
+
return 0;
}
#endif
@@ -1778,7 +1805,7 @@ static struct virtio_driver virtio_net_driver = {
.probe = virtnet_probe,
.remove = virtnet_remove,
.config_changed = virtnet_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtnet_freeze,
.restore = virtnet_restore,
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a03f358fd58..12040a35d95 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -410,9 +410,9 @@ int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
extern char vmxnet3_driver_name[];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2ef5b6219f3..0358c07f766 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -60,10 +60,6 @@
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@@ -1884,11 +1880,19 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
+ int i;
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *vxlan_stats;
+ vxlan_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&vxlan_stats->syncp);
+ }
+
+
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
@@ -2087,7 +2091,7 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(dev_net(dev), &low, &high);
vxlan->port_min = low;
vxlan->port_max = high;
vxlan->dst_port = htons(vxlan_port);
@@ -2180,7 +2184,7 @@ static void vxlan_del_work(struct work_struct *work)
* could be used for both IPv4 and IPv6 communications, but
* users may set bindv6only=1.
*/
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
{
struct sock *sk;
struct socket *sock;
@@ -2193,7 +2197,7 @@ static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDPv6 socket create failed\n");
- return rc;
+ return ERR_PTR(rc);
}
/* Put in proper namespace */
@@ -2208,28 +2212,27 @@ static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
&vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
sk_release_kernel(sk);
- return rc;
+ return ERR_PTR(rc);
}
/* At this point, IPv6 module should have been loaded in
* sock_create_kern().
*/
BUG_ON(!ipv6_stub);
- *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
- return 0;
+ return sock;
}
#else
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
{
- return -EPFNOSUPPORT;
+ return ERR_PTR(-EPFNOSUPPORT);
}
#endif
-static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v4_sock(struct net *net, __be16 port)
{
struct sock *sk;
struct socket *sock;
@@ -2244,7 +2247,7 @@ static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDP socket create failed\n");
- return rc;
+ return ERR_PTR(rc);
}
/* Put in proper namespace */
@@ -2257,13 +2260,12 @@ static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
sk_release_kernel(sk);
- return rc;
+ return ERR_PTR(rc);
}
- *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
- return 0;
+ return sock;
}
/* Create new listen socket if needed */
@@ -2274,7 +2276,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct vxlan_sock *vs;
struct socket *sock;
struct sock *sk;
- int rc = 0;
unsigned int h;
vs = kmalloc(sizeof(*vs), GFP_KERNEL);
@@ -2287,12 +2288,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
INIT_WORK(&vs->del_work, vxlan_del_work);
if (ipv6)
- rc = create_v6_sock(net, port, &sock);
+ sock = create_v6_sock(net, port);
else
- rc = create_v4_sock(net, port, &sock);
- if (rc < 0) {
+ sock = create_v4_sock(net, port);
+ if (IS_ERR(sock)) {
kfree(vs);
- return ERR_PTR(rc);
+ return ERR_CAST(sock);
}
vs->sock = sock;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 3d80e4267de..3d741663fd6 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -220,7 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
- if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, z8530_interrupt, 0,
"Hostess SV11", sv) < 0) {
pr_warn("IRQ %d already in use\n", irq);
goto err_irq;
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 5bbcb5e3ee0..388ddf60a66 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -148,10 +148,6 @@ static int enslave( struct net_device *, struct net_device * );
static int emancipate( struct net_device * );
#endif
-#ifdef __i386__
-#define ASM_CRC 1
-#endif
-
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
@@ -1551,88 +1547,6 @@ __setup( "sbni=", sbni_setup );
/* -------------------------------------------------------------------------- */
-#ifdef ASM_CRC
-
-static u32
-calc_crc32( u32 crc, u8 *p, u32 len )
-{
- register u32 _crc;
- _crc = crc;
-
- __asm__ __volatile__ (
- "xorl %%ebx, %%ebx\n"
- "movl %2, %%esi\n"
- "movl %3, %%ecx\n"
- "movl $crc32tab, %%edi\n"
- "shrl $2, %%ecx\n"
- "jz 1f\n"
-
- ".align 4\n"
- "0:\n"
- "movb %%al, %%bl\n"
- "movl (%%esi), %%edx\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "shrl $8, %%edx\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "shrl $8, %%edx\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "movb %%dh, %%dl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "addl $4, %%esi\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "decl %%ecx\n"
- "jnz 0b\n"
-
- "1:\n"
- "movl %3, %%ecx\n"
- "andl $3, %%ecx\n"
- "jz 2f\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb (%%esi), %%bl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "decl %%ecx\n"
- "jz 2f\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb 1(%%esi), %%bl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "decl %%ecx\n"
- "jz 2f\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb 2(%%esi), %%bl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
- "2:\n"
- : "=a" (_crc)
- : "0" (_crc), "g" (p), "g" (len)
- : "bx", "cx", "dx", "si", "di"
- );
-
- return _crc;
-}
-
-#else /* ASM_CRC */
-
static u32
calc_crc32( u32 crc, u8 *p, u32 len )
{
@@ -1642,9 +1556,6 @@ calc_crc32( u32 crc, u8 *p, u32 len )
return crc;
}
-#endif /* ASM_CRC */
-
-
static u32 crc32tab[] __attribute__ ((aligned(8))) = {
0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4f774847898..27860b4f590 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
- if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, z8530_interrupt, 0,
"SeaLevel", dev) < 0) {
pr_warn("IRQ %d already in use\n", irq);
goto err_request_irq;
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index 8f0fc2e57e2..f57ee67836a 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -41,6 +41,6 @@ struct x25_asy {
#define X25_ASY_MAGIC 0x5303
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index f29d554fc07..2416a9d60bd 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -395,20 +395,19 @@ struct z8530_dev
extern u8 z8530_dead_port[];
extern u8 z8530_hdlc_kilostream_85230[];
extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
- struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
/*
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 9f1e947f355..649ecad6844 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
i2400mu->rx_size_auto_shrink = 1;
}
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
- const struct i2400m_bootrom_header *,
- size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
- struct i2400m_bootrom_header *,
- size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+ const struct i2400m_bootrom_header *, size_t,
+ int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+ struct i2400m_bootrom_header *, size_t);
#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 79c6505b5c2..5a34e72bab9 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -710,18 +710,18 @@ enum i2400m_bri {
I2400M_BRI_MAC_REINIT = 1 << 3,
};
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
static inline
int i2400m_is_d2h_barker(const void *buf)
{
const __le32 *barker = buf;
return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
}
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
/* Make/grok boot-rom header commands */
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
/*
* Driver / device setup and internal functions
*/
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
-
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
-
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
-
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
- const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
- enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+ int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
#ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
#else
static inline int i2400m_debugfs_add(struct i2400m *i2400m)
{
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
#endif
/* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
extern struct attribute_group i2400m_dev_attr_group;
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
dev_put(i2400m->wimax_dev.net_dev);
}
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
/*
* _setup()/_release() are called by the probe/disconnect functions of
* the bus-specific drivers.
*/
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
/*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
return i2400m->wimax_dev.net_dev->dev.parent;
}
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
- char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
static inline
struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
return &iface->cur_altsetting->endpoint[ep].desc;
}
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
- enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
- struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+ const struct i2400m_tlv_rf_switches_status *);
/*
* Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
/* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9a24e599de..cfce83e1f27 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
pci_iounmap(pdev, priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7fe19648f10..edf4b57c4aa 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1abf1d42117..c63d1159db5 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,6 +25,23 @@ config ATH_DEBUG
Say Y, if you want to debug atheros wireless drivers.
Right now only ath9k makes use of this.
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+ bool "Atheros dynamic user regulatory hints"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled in countries where
+ this feature is explicitly allowed and only on cards that
+ specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+ bool "Atheros dynamic user regulatory testing"
+ depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems
+ undergoing certification testing.
+
source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
@@ -32,5 +49,6 @@ source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index fb05cfd1936..7d023b0f13b 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -5,13 +5,16 @@ obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
+obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH_COMMON) += ath.o
ath-objs := main.o \
regd.o \
hw.o \
- key.o
+ key.o \
+ dfs_pattern_detector.o \
+ dfs_pri_detector.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 17d7fece35d..280fc3d53a3 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 744da6d1c40..a1f09962885 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+
ar->bmi.done_sent = false;
}
@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+
if (ar->bmi.done_sent) {
- ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
return 0;
}
@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, *param);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
- __func__, ar, address, *param);
-
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ address, buffer, length);
+
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f8b969f518f..e46951b8fb9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *indicator_addr;
-
- if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
- return;
- }
-
- /* workaround for QCA988x_1.0 HW CE */
- indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
-
- if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
- iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
- } else {
- unsigned long irq_flags;
- local_irq_save(irq_flags);
- iowrite32(1, indicator_addr);
-
- /*
- * PCIE write waits for ACK in IPQ8K, there is no
- * need to read back value.
- */
- (void)ioread32(indicator_addr);
- (void)ioread32(indicator_addr); /* conservative */
-
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-
- iowrite32(0, indicator_addr);
- local_irq_restore(irq_flags);
- }
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc *desc, *sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
@@ -306,11 +277,13 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
- ret = -EIO;
+ ret = -ENOSR;
goto exit;
}
@@ -346,7 +319,7 @@ exit:
return ret;
}
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -365,77 +338,26 @@ int ath10k_ce_send(struct ce_state *ce_state,
return ret;
}
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
- unsigned int nbytes, u32 flags)
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
- unsigned int num_items = sendlist->num_items;
- struct ce_sendlist_item *item;
-
- item = &sendlist->item[num_items];
- item->data = buffer;
- item->u.nbytes = nbytes;
- item->flags = flags;
- sendlist->num_items++;
-}
-
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_context,
- struct ce_sendlist *sendlist,
- unsigned int transfer_id)
-{
- struct ce_ring_state *src_ring = ce_state->src_ring;
- struct ce_sendlist_item *item;
- struct ath10k *ar = ce_state->ar;
+ struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int num_items = sendlist->num_items;
- unsigned int sw_index;
- unsigned int write_index;
- int i, delta, ret = -ENOMEM;
+ int delta;
spin_lock_bh(&ar_pci->ce_lock);
-
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
-
- delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
-
- if (delta >= num_items) {
- /*
- * Handle all but the last item uniformly.
- */
- for (i = 0; i < num_items - 1; i++) {
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state,
- CE_SENDLIST_ITEM_CTXT,
- (u32) item->data,
- item->u.nbytes, transfer_id,
- item->flags |
- CE_SEND_FLAG_GATHER);
- if (ret)
- ath10k_warn("CE send failed for item: %d\n", i);
- }
- /*
- * Provide valid context pointer for final item.
- */
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
- (u32) item->data, item->u.nbytes,
- transfer_id, item->flags);
- if (ret)
- ath10k_warn("CE send failed for last item: %d\n", i);
- }
-
+ delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+ pipe->src_ring->write_index,
+ pipe->src_ring->sw_index - 1);
spin_unlock_bh(&ar_pci->ce_lock);
- return ret;
+ return delta;
}
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +370,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ goto out;
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +394,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
ret = -EIO;
}
ath10k_pci_sleep(ar);
+
+out:
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -479,14 +405,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int sw_index = dest_ring->sw_index;
@@ -535,7 +461,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
return 0;
}
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -556,11 +482,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
{
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -612,19 +538,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
+ struct ce_desc *sdesc, *sbase;
unsigned int read_index;
- int ret = -EIO;
+ int ret;
if (src_ring->hw_index == sw_index) {
/*
@@ -634,48 +561,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
- ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
+
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
+
ath10k_pci_sleep(ar);
}
+
read_index = src_ring->hw_index;
- if ((read_index != sw_index) && (read_index != 0xffffffff)) {
- struct ce_desc *sbase = src_ring->shadow_base;
- struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+ if ((read_index == sw_index) || (read_index == 0xffffffff))
+ return -EIO;
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(sdesc->addr);
- *nbytesp = __le16_to_cpu(sdesc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
- CE_DESC_FLAGS_META_DATA);
+ sbase = src_ring->shadow_base;
+ sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
- if (per_transfer_contextp)
- *per_transfer_contextp =
- src_ring->per_transfer_context[sw_index];
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
- /* sanity */
- src_ring->per_transfer_context[sw_index] = NULL;
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- ret = 0;
- }
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
- return ret;
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
}
/* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -727,7 +660,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -756,53 +689,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
- void *transfer_context;
- u32 buf;
- unsigned int nbytes;
- unsigned int id;
- unsigned int flags;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
- if (ce_state->recv_cb) {
- /*
- * Pop completed recv buffers and call the registered
- * recv callback for each
- */
- while (ath10k_ce_completed_recv_next_nolock(ce_state,
- &transfer_context,
- &buf, &nbytes,
- &id, &flags) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->recv_cb(ce_state, transfer_context, buf,
- nbytes, id, flags);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ spin_unlock_bh(&ar_pci->ce_lock);
- if (ce_state->send_cb) {
- /*
- * Pop completed send buffers and call the registered
- * send callback for each
- */
- while (ath10k_ce_completed_send_next_nolock(ce_state,
- &transfer_context,
- &buf,
- &nbytes,
- &id) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->send_cb(ce_state, transfer_context,
- buf, nbytes, id);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+
+ spin_lock_bh(&ar_pci->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
@@ -823,10 +732,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
u32 intr_summary;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +761,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
+ int ret;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +786,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +801,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
@@ -900,13 +814,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
spin_unlock_bh(&ar_pci->ce_lock);
}
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +828,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries = attr->src_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +846,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->src_ring = (struct ce_ring_state *)ptr;
+ ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +865,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@@ -970,6 +877,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
+
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +899,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->shadow_base_unaligned =
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned) {
+ pci_free_consistent(ar_pci->pdev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +921,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +949,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->dest_ring = (struct ce_ring_state *)ptr;
+ ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
@@ -1055,6 +978,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->dest_ring);
+ ce_state->dest_ring = NULL;
+ return -ENOMEM;
+ }
+
dest_ring->base_addr_ce_space_unaligned = base_addr;
/*
@@ -1071,44 +1000,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = NULL;
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
- if (!ar_pci->ce_id_to_state[ce_id]) {
- ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
- if (ce_state == NULL) {
- spin_unlock_bh(&ar_pci->ce_lock);
- return NULL;
- }
-
- ar_pci->ce_id_to_state[ce_id] = ce_state;
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->state = CE_RUNNING;
- /* Save attribute flags */
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- }
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ctrl_addr;
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1122,12 +1042,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
- struct ce_state *ce_state;
+ struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return NULL;
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
@@ -1136,40 +1061,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
}
if (attr->src_nentries) {
- if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE src ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
if (attr->dest_nentries) {
- if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
/* Enable CE error interrupts */
- ath10k_pci_wake(ar);
ath10k_ce_error_intr_enable(ar, ctrl_addr);
+
ath10k_pci_sleep(ar);
return ce_state;
}
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
- unsigned int ce_id = ce_state->id;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ce_state->state = CE_UNUSED;
- ar_pci->ce_id_to_state[ce_id] = NULL;
-
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1113,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
- kfree(ce_state);
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c17f07c026f..15d45b5b761 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
-#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
@@ -36,16 +35,9 @@
* how to use copy engines.
*/
-struct ce_state;
+struct ath10k_ce_pipe;
-/* Copy Engine operational state */
-enum ce_op_state {
- CE_UNUSED,
- CE_PAUSED,
- CE_RUNNING,
-};
-
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
-/* Copy Engine Ring internal state */
-struct ce_ring_state {
+struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
@@ -116,49 +107,20 @@ struct ce_ring_state {
void **per_transfer_context;
};
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
- enum ce_op_state state;
-
- void (*send_cb) (struct ce_state *ce_state,
- void *per_transfer_send_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id);
- void (*recv_cb) (struct ce_state *ce_state,
- void *per_transfer_recv_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags);
-
- unsigned int src_sz_max;
- struct ce_ring_state *src_ring;
- struct ce_ring_state *dest_ring;
-};
-struct ce_sendlist_item {
- /* e.g. buffer or desc list */
- dma_addr_t data;
- union {
- /* simple buffer */
- unsigned int nbytes;
- /* Rx descriptor list */
- unsigned int ndesc;
- } u;
- /* externally-specified flags; OR-ed with internal flags */
- u32 flags;
-};
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
-struct ce_sendlist {
- unsigned int num_items;
- struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
@@ -182,7 +144,7 @@ struct ce_attr;
*
* Implementation note: pushes 1 buffer to Source ring
*/
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
@@ -190,36 +152,11 @@ int ath10k_ce_send(struct ce_state *ce_state,
unsigned int transfer_id,
unsigned int flags);
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
- u32 buffer,
- unsigned int nbytes,
- /* OR-ed with internal flags */
- u32 flags);
-
-/*
- * Queue a "sendlist" of buffers to be sent using gather to a single
- * anonymous destination buffer
- * ce - which copy engine to use
- * sendlist - list of simple buffers to send using gather
- * transfer_id - arbitrary ID; reflected to destination
- * Returns 0 on success; otherwise an error status.
- *
- * Implemenation note: Pushes multiple buffers with Gather to Source ring.
- */
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_send_context,
- struct ce_sendlist *sendlist,
- /* 14 bits */
- unsigned int transfer_id);
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
@@ -233,17 +170,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
@@ -253,7 +185,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -263,7 +195,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -272,7 +204,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
@@ -282,7 +214,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
@@ -291,13 +223,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* pending sends. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +254,6 @@ struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
- /* currently not in use */
- unsigned int priority;
-
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
@@ -336,21 +265,8 @@ struct ce_attr {
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
-
- /* Future use */
- void *reserved;
};
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
-
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7226c23b956..1129994fb10 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
- .id = QCA988X_HW_1_0_VERSION,
- .name = "qca988x hw1.0",
- .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
- .fw = {
- .dir = QCA988X_HW_1_0_FW_DIR,
- .fw = QCA988X_HW_1_0_FW_FILE,
- .otp = QCA988X_HW_1_0_OTP_FILE,
- .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
- },
- },
- {
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
@@ -64,33 +53,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
}
-static int ath10k_check_fw_version(struct ath10k *ar)
-{
- char version[32];
-
- if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
- ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
- ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
- ar->fw_version_build >= SUPPORTED_FW_BUILD)
- return 0;
-
- snprintf(version, sizeof(version), "%u.%u.%u.%u",
- SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
- SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
-
- ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
- ar->hw->wiphy->fw_version);
- ath10k_warn("Please upgrade to version %s (or newer)\n", version);
-
- return 0;
-}
-
static int ath10k_init_connect_htc(struct ath10k *ar)
{
int status;
@@ -112,7 +80,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
- ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
@@ -200,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
-static int ath10k_push_board_ext_data(struct ath10k *ar,
- const struct firmware *fw)
+static int ath10k_push_board_ext_data(struct ath10k *ar)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -214,21 +181,21 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "ath10k: Board extended Data download addr: 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
return 0;
- if (fw->size != (board_data_size + board_ext_data_size)) {
+ if (ar->board_len != (board_data_size + board_ext_data_size)) {
ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
- fw->size, board_data_size, board_ext_data_size);
+ ar->board_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
- fw->data + board_data_size,
+ ar->board_data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err("could not write board ext data (%d)\n", ret);
@@ -247,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
static int ath10k_download_board_data(struct ath10k *ar)
{
- const struct firmware *fw = ar->board_data;
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
- ret = ath10k_push_board_ext_data(ar, fw);
+ ret = ath10k_push_board_ext_data(ar);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
goto exit;
@@ -264,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
- ret = ath10k_bmi_write_memory(ar, address, fw->data,
- min_t(u32, board_data_size, fw->size));
+ ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+ min_t(u32, board_data_size,
+ ar->board_len));
if (ret) {
ath10k_err("could not write board data (%d)\n", ret);
goto exit;
@@ -283,17 +250,16 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- const struct firmware *fw = ar->otp;
u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret;
/* OTP is optional */
- if (!ar->otp)
+ if (!ar->otp_data || !ar->otp_len)
return 0;
- ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
goto exit;
@@ -312,13 +278,13 @@ exit:
static int ath10k_download_fw(struct ath10k *ar)
{
- const struct firmware *fw = ar->firmware;
u32 address;
int ret;
address = ar->hw_params.patch_load_addr;
- ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
+ ar->firmware_len);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
goto exit;
@@ -330,8 +296,8 @@ exit:
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (ar->board_data && !IS_ERR(ar->board_data))
- release_firmware(ar->board_data);
+ if (ar->board && !IS_ERR(ar->board))
+ release_firmware(ar->board);
if (ar->otp && !IS_ERR(ar->otp))
release_firmware(ar->otp);
@@ -339,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
+ ar->board = NULL;
ar->board_data = NULL;
+ ar->board_len = 0;
+
ar->otp = NULL;
+ ar->otp_data = NULL;
+ ar->otp_len = 0;
+
ar->firmware = NULL;
+ ar->firmware_data = NULL;
+ ar->firmware_len = 0;
}
-static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
{
int ret = 0;
@@ -358,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
return -EINVAL;
}
- ar->board_data = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board_data)) {
- ret = PTR_ERR(ar->board_data);
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
goto err;
}
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.fw);
@@ -376,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
+ ar->firmware_data = ar->firmware->data;
+ ar->firmware_len = ar->firmware->size;
+
/* OTP may be undefined. If so, don't fetch it at all */
if (ar->hw_params.fw.otp == NULL)
return 0;
@@ -389,6 +369,172 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
+ ar->otp_data = ar->otp->data;
+ ar->otp_len = ar->otp->size;
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ /* first fetch the firmware file (firmware-*.bin) */
+ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+ if (IS_ERR(ar->firmware)) {
+ ath10k_err("Could not fetch firmware file '%s': %ld\n",
+ name, PTR_ERR(ar->firmware));
+ return PTR_ERR(ar->firmware);
+ }
+
+ data = ar->firmware->data;
+ len = ar->firmware->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath10k_err("firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath10k_err("Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_FW_IE_FW_VERSION:
+ if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ break;
+
+ memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+ ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found fw version %s\n",
+ ar->hw->wiphy->fw_version);
+ break;
+ case ATH10K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH10K_FW_IE_FEATURES:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ar->fw_features);
+ }
+
+ ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
+ ar->fw_features,
+ sizeof(ar->fw_features));
+ break;
+ case ATH10K_FW_IE_FW_IMAGE:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ar->firmware_data = data;
+ ar->firmware_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_OTP_IMAGE:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found otp image ie (%zd B)\n",
+ ie_len);
+
+ ar->otp_data = data;
+ ar->otp_len = ie_len;
+
+ break;
+ default:
+ ath10k_warn("Unknown FW IE: %u\n",
+ le32_to_cpu(hdr->id));
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ if (!ar->firmware_data || !ar->firmware_len) {
+ ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
+ name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
+
+ /* now fetch the board file */
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err("board data file not defined");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
+ ath10k_err("could not fetch board data (%d)\n", ret);
+ goto err;
+ }
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
return 0;
err:
@@ -396,6 +542,28 @@ err:
return ret;
}
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+ if (ret == 0) {
+ ar->fw_api = 2;
+ goto out;
+ }
+
+ ret = ath10k_core_fetch_firmware_api_1(ar);
+ if (ret)
+ return ret;
+
+ ar->fw_api = 1;
+
+out:
+ ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
static int ath10k_init_download_firmware(struct ath10k *ar)
{
int ret;
@@ -446,6 +614,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn("could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
ath10k_info("UART prints enabled\n");
return 0;
}
@@ -545,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
init_waitqueue_head(&ar->event_queue);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
@@ -559,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
+ ath10k_debug_destroy(ar);
+
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
@@ -570,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar)
{
int status;
+ lockdep_assert_held(&ar->conf_mutex);
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -620,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar)
ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
- status = ath10k_check_fw_version(ar);
- if (status)
- goto err_disconnect_htc;
-
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
@@ -641,7 +819,12 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_disconnect_htc;
+
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ INIT_LIST_HEAD(&ar->arvifs);
return 0;
@@ -658,6 +841,9 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
@@ -704,23 +890,65 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
+ mutex_lock(&ar->conf_mutex);
+
ret = ath10k_core_start(ar);
if (ret) {
ath10k_err("could not init core (%d)\n", ret);
ath10k_core_free_firmware_files(ar);
ath10k_hif_power_down(ar);
+ mutex_unlock(&ar->conf_mutex);
return ret;
}
ath10k_core_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
ath10k_hif_power_down(ar);
return 0;
}
-int ath10k_core_register(struct ath10k *ar)
+static int ath10k_core_check_chip_id(struct ath10k *ar)
+{
+ u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+ ar->chip_id, hw_revision);
+
+ /* Check that we are not using hw1.0 (some of them have same pci id
+ * as hw2.0) before doing anything else as ath10k crashes horribly
+ * due to missing hw1.0 workarounds. */
+ switch (hw_revision) {
+ case QCA988X_HW_1_0_CHIP_ID_REV:
+ ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+ return -EOPNOTSUPP;
+
+ case QCA988X_HW_2_0_CHIP_ID_REV:
+ /* known hardware revision, continue normally */
+ return 0;
+
+ default:
+ ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+ ar->chip_id);
+ return 0;
+ }
+
+ return 0;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
+ ar->chip_id = chip_id;
+
+ status = ath10k_core_check_chip_id(ar);
+ if (status) {
+ ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ return status;
+ }
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +983,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
+
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e4bba563ed4..0934f7633de 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -43,27 +43,23 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
+#define ATH10K_MAX_NUM_MGMT_PENDING 16
+
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
bool is_aborted;
+ u8 vdev_id;
struct {
- u8 vdev_id;
- u16 msdu_id;
u8 tid;
bool is_offchan;
- bool is_conf;
- bool discard;
- bool no_ack;
- u8 refcount;
- struct sk_buff *txfrag;
- struct sk_buff *msdu;
- } __packed htt;
- /* 4 bytes left on 64bit arch */
+ u8 frag_len;
+ u8 pad_len;
+ } __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -108,15 +104,26 @@ struct ath10k_bmi {
bool done_sent;
};
+#define ATH10K_MAX_MEM_REQS 16
+
+struct ath10k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
- atomic_t pending_tx_count;
- wait_queue_head_t wq;
+ wait_queue_head_t tx_credits_wq;
+ struct wmi_cmd_map *cmd;
+ struct wmi_vdev_param_map *vdev_param;
+ struct wmi_pdev_param_map *pdev_param;
- struct sk_buff_head wmi_event_list;
- struct work_struct wmi_event_work;
+ u32 num_mem_chunks;
+ struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
@@ -198,17 +205,22 @@ struct ath10k_peer {
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
+ struct list_head list;
+
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
+ struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
+ struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
- u8 def_wep_key_index;
+ u8 def_wep_key_idx;
+ u8 def_wep_key_newidx;
u16 tx_seq_no;
@@ -246,6 +258,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
+
+ unsigned long htt_stats_mask;
+ struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
@@ -270,12 +285,27 @@ enum ath10k_state {
ATH10K_STATE_WEDGED,
};
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* firmware from 10X branch */
+ ATH10K_FW_FEATURE_WMI_10X = 1,
+
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
+ ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
+ u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@@ -288,6 +318,8 @@ struct ath10k {
u32 vht_cap_info;
u32 num_rf_chains;
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
struct targetdef *targetdef;
struct hostdef *hostdef;
@@ -319,9 +351,19 @@ struct ath10k {
} fw;
} hw_params;
- const struct firmware *board_data;
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
const struct firmware *otp;
+ const void *otp_data;
+ size_t otp_len;
+
const struct firmware *firmware;
+ const void *firmware_data;
+ size_t firmware_len;
+
+ int fw_api;
struct {
struct completion started;
@@ -364,6 +406,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
+ struct list_head arvifs;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
@@ -372,6 +415,9 @@ struct ath10k {
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
enum ath10k_state state;
struct work_struct restart_work;
@@ -393,7 +439,7 @@ void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3d65594fa09..760ff2289e3 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
spin_unlock_bh(&ar->data_lock);
- mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@@ -499,6 +501,144 @@ static const struct file_operations fops_simulate_fw_crash = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ cookie);
+ if (ret) {
+ ath10k_warn("failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 8 bit masks (for now) */
+ if (mask > 0xff)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+
+ return 0;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from del_timer(). */
+ if (ar->debug.htt_stats_mask != 0)
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
+}
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +647,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +661,20 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
+ debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_htt_stats_mask);
+
return 0;
}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 168140c5402..3cfe3ee90db 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -27,22 +27,26 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
- ATH10K_DBG_CORE = 0x00000020,
+ ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
extern unsigned int ath10k_debug_mask;
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
@@ -50,11 +54,24 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
}
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
@@ -68,7 +85,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index ef3329ef52f..edae50b5280 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
- memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
-static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- struct sk_buff *skb,
- u8 credits)
-{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- int ret;
-
- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
- ep->eid, skb);
-
- ath10k_htc_prepare_tx_skb(ep, skb);
-
- ret = ath10k_skb_map(htc->ar->dev, skb);
- if (ret)
- goto err;
-
- ret = ath10k_hif_send_head(htc->ar,
- ep->ul_pipe_id,
- ep->eid,
- skb->len,
- skb);
- if (unlikely(ret))
- goto err;
-
- return 0;
-err:
- ath10k_warn("HTC issue failed: %d\n", ret);
-
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- spin_unlock_bh(&htc->tx_lock);
-
- /* this is the simplest way to handle out-of-resources for non-credit
- * based endpoints. credit based endpoints can still get -ENOSR, but
- * this is highly unlikely as credit reservation should prevent that */
- if (ret == -ENOSR) {
- spin_lock_bh(&htc->tx_lock);
- __skb_queue_head(&ep->tx_queue, skb);
- spin_unlock_bh(&htc->tx_lock);
-
- return ret;
- }
-
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
-
- return ret;
-}
-
-static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- u8 *credits)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
- int credits_required;
- int remainder;
- unsigned int transfer_len;
-
- lockdep_assert_held(&htc->tx_lock);
-
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- return NULL;
-
- skb_cb = ATH10K_SKB_CB(skb);
- transfer_len = skb->len;
-
- if (likely(transfer_len <= htc->target_credit_size)) {
- credits_required = 1;
- } else {
- /* figure out how many credits this message requires */
- credits_required = transfer_len / htc->target_credit_size;
- remainder = transfer_len % htc->target_credit_size;
-
- if (remainder)
- credits_required++;
- }
-
- ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
- credits_required, ep->tx_credits);
-
- if (ep->tx_credits < credits_required) {
- __skb_queue_head(&ep->tx_queue, skb);
- return NULL;
- }
-
- ep->tx_credits -= credits_required;
- *credits = credits_required;
- return skb;
-}
-
-static void ath10k_htc_send_work(struct work_struct *work)
-{
- struct ath10k_htc_ep *ep = container_of(work,
- struct ath10k_htc_ep, send_work);
- struct ath10k_htc *htc = ep->htc;
- struct sk_buff *skb;
- u8 credits = 0;
- int ret;
-
- while (true) {
- if (ep->ul_is_polled)
- ath10k_htc_send_complete_check(ep, 0);
-
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credit_flow_enabled)
- skb = ath10k_htc_get_skb_credit_based(htc, ep,
- &credits);
- else
- skb = __skb_dequeue(&ep->tx_queue);
- spin_unlock_bh(&htc->tx_lock);
-
- if (!skb)
- break;
-
- ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
- if (ret == -ENOSR)
- break;
- }
-}
-
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ int credits = 0;
+ int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
+ /* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
+ spin_unlock_bh(&htc->tx_lock);
- __skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- spin_unlock_bh(&htc->tx_lock);
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ ret = ath10k_skb_map(htc->ar->dev, skb);
+ if (ret)
+ goto err_credits;
+
+ ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
+ skb->len, skb);
+ if (ret)
+ goto err_unmap;
+
return 0;
+
+err_unmap:
+ ath10k_skb_unmap(htc->ar->dev, skb);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
- /* note: when using TX credit flow, the re-checking of queues happens
- * when credits flow back from the target. in the non-TX credit case,
- * we recheck after the packet completes */
- spin_lock_bh(&htc->tx_lock);
- if (!ep->tx_credit_flow_enabled && !htc->stopped)
- queue_work(ar->workqueue, &ep->send_work);
- spin_unlock_bh(&htc->tx_lock);
-
return 0;
}
-/* flush endpoint TX queue */
-static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
-
- spin_lock_bh(&htc->tx_lock);
- for (;;) {
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- break;
-
- skb_cb = ATH10K_SKB_CB(skb);
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
- }
- spin_unlock_bh(&htc->tx_lock);
-
- cancel_work_sync(&ep->send_work);
-}
-
/***********/
/* Receive */
/***********/
@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
- if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
}
spin_unlock_bh(&htc->tx_lock);
}
@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
- skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
- INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
@@ -647,7 +534,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count;
u16 credit_size;
- INIT_COMPLETION(htc->ctl_resp);
+ reinit_completion(&htc->ctl_resp);
status = ath10k_hif_start(htc->ar);
if (status) {
@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC Service %s does not allocate target credits\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,17 +659,17 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
- req_msg = &msg->connect_service;
- req_msg->flags = __cpu_to_le16(flags);
- req_msg->service_id = __cpu_to_le16(conn_req->service_id);
-
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
- INIT_COMPLETION(htc->ctl_resp);
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
@@ -873,19 +760,19 @@ setup:
if (status)
return status;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
- ath10k_dbg(ATH10K_DBG_HTC,
- "EP %d UL polled: %d, DL polled: %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s eid: %d TX flow control disabled\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
- int i;
- struct ath10k_htc_ep *ep;
-
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
- for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
- ep = &htc->endpoint[i];
- ath10k_htc_flush_endpoint_tx(htc, ep);
- }
-
ath10k_hif_stop(htc->ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e1dd8c76185..4716d331e6b 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
- struct sk_buff_head tx_queue;
-
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
-
- struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 39342c5cfcb..5f7eeebc543 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,21 +104,16 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt target version %d.%d; host version %d.%d\n",
- htt->target_version_major,
- htt->target_version_minor,
- HTT_CURRENT_VERSION_MAJOR,
- HTT_CURRENT_VERSION_MINOR);
-
- if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
- ath10k_err("htt major versions are incompatible!\n");
+ ath10k_info("htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
return -ENOTSUPP;
}
- if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
- ath10k_warn("htt minor version differ but still compatible\n");
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 318be4629cd..1a337e93b7e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -19,13 +19,11 @@
#define _HTT_H_
#include <linux/bug.h>
+#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
-#define HTT_CURRENT_VERSION_MAJOR 2
-#define HTT_CURRENT_VERSION_MINOR 1
-
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
HTT_DBG_STATS_RX_REORDER = 1 << 1,
@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_SYNC = 4,
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
+ struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e784c40b904..90d4f74c28d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -20,6 +20,7 @@
#include "htt.h"
#include "txrx.h"
#include "debug.h"
+#include "trace.h"
#include <linux/log2.h>
@@ -40,6 +41,10 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
int size;
@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
- int ret, num_to_fill;
+ int ret, num_deficit, num_to_fill;
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the avarage and stability. */
spin_lock_bh(&htt->rx_ring.lock);
- num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ tasklet_schedule(&htt->rx_replenish_task);
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
+ tasklet_kill(&htt->rx_replenish_task);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
int ath10k_htt_rx_attach(struct ath10k_htt *htt)
{
dma_addr_t paddr;
@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
goto err_fill_ring;
- ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+ tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+ (unsigned long)htt);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
return false;
}
-static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+ struct htt_rx_info *info)
{
struct htt_rx_desc *rxd;
- struct sk_buff *amsdu;
struct sk_buff *first;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ struct ieee80211_hdr *hdr;
+ u8 hdr_buf[64], addr[ETH_ALEN], *qos;
unsigned int hdr_len;
- int crypto_len;
rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- /* FIXME: No idea what assumptions are safe here. Need logs */
- if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
- (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOTSUPP;
- }
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(hdr_buf, hdr, hdr_len);
+ hdr = (struct ieee80211_hdr *)hdr_buf;
- /* A-MSDU max is a little less than 8K */
- amsdu = dev_alloc_skb(8*1024);
- if (!amsdu) {
- ath10k_warn("A-MSDU allocation failed\n");
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOMEM;
- }
-
- if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
- int hdrlen;
-
- hdr = (void *)rxd->rx_hdr_status;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
- }
+ /* FIXME: Hopefully this is a temporary measure.
+ *
+ * Reporting individual A-MSDU subframes means each reported frame
+ * shares the same sequence number.
+ *
+ * mac80211 drops frames it recognizes as duplicates, i.e.
+ * retransmission flag is set and sequence number matches sequence
+ * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
+ * "Duplicate detection and recovery")
+ *
+ * To avoid frames being dropped clear retransmission flag for all
+ * received A-MSDUs.
+ *
+ * Worst case: actual duplicate frames will be reported but this should
+ * still be handled gracefully by other OSI/ISO layers. */
+ hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
first = skb;
while (skb) {
void *decap_hdr;
- int decap_len = 0;
+ int len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
decap_hdr = (void *)rxd->rx_hdr_status;
- if (skb == first) {
- /* We receive linked A-MSDU subframe skbuffs. The
- * first one contains the original 802.11 header (and
- * possible crypto param) in the RX descriptor. The
- * A-MSDU subframe header follows that. Each part is
- * aligned to 4 byte boundary. */
-
- hdr = (void *)amsdu->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
-
- decap_hdr += roundup(hdr_len, 4);
- decap_hdr += roundup(crypto_len, 4);
- }
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- /* Ethernet2 decap inserts ethernet header in place of
- * A-MSDU subframe header. */
- skb_pull(skb, 6 + 6 + 2);
-
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
-
- /* Ethernet2 decap also strips the LLC/SNAP so we need
- * to re-insert it. The LLC/SNAP follows A-MSDU
- * subframe header. */
- /* FIXME: Not all LLCs are 8 bytes long */
- decap_len += 8;
-
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+ /* First frame in an A-MSDU chain has more decapped data. */
+ if (skb == first) {
+ len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+ len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
+ 4);
+ decap_hdr += len;
}
- if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
- /* Native Wifi decap inserts regular 802.11 header
- * in place of A-MSDU subframe header. */
+ switch (fmt) {
+ case RX_MSDU_DECAP_RAW:
+ /* remove trailing FCS */
+ skb_trim(skb, skb->len - FCS_LEN);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ /* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
- skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ skb_pull(skb, hdr_len);
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)hdr_buf;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
- }
+ /* original A-MSDU header has the bit set but we're
+ * not including A-MSDU subframe header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (fmt == RX_MSDU_DECAP_RAW)
- skb_trim(skb, skb->len - 4); /* remove FCS */
+ /* original 802.11 header has a different DA */
+ memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ /* strip ethernet header and insert decapped 802.11
+ * header, amsdu subframe header and rfc1042 header */
- memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+ len = 0;
+ len += sizeof(struct rfc1042_hdr);
+ len += sizeof(struct amsdu_subframe_hdr);
- /* A-MSDU subframes are padded to 4bytes
- * but relative to first subframe, not the whole MPDU */
- if (skb->next && ((decap_len + skb->len) & 3)) {
- int padlen = 4 - ((decap_len + skb->len) & 3);
- memset(skb_put(amsdu, padlen), 0, padlen);
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, len), decap_hdr, len);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* insert decapped 802.11 header making a singly
+ * A-MSDU */
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
+ info->skb = skb;
+ info->encrypt_type = enctype;
skb = skb->next;
- }
+ info->skb->next = NULL;
- info->skb = amsdu;
- info->encrypt_type = enctype;
-
- ath10k_htt_rx_free_msdu_chain(first);
+ ath10k_process_rx(htt->ar, info);
+ }
- return 0;
+ /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
+ * monitor interface active for sniffing purposes. */
}
-static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ int hdr_len;
+ void *rfc1042;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
@@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
- skb_trim(skb, skb->len - 4);
+ skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* nothing to do here */
+ /* Pull decapped header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ skb_pull(skb, hdr_len);
+
+ /* Push original header */
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* macaddr[6] + macaddr[6] + ethertype[2] */
- skb_pull(skb, 6 + 6 + 2);
- break;
- case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* macaddr[6] + macaddr[6] + len[2] */
- /* we don't need this for non-A-MSDU */
- skb_pull(skb, 6 + 6 + 2);
- break;
- }
+ /* strip ethernet header and insert decapped 802.11 header and
+ * rfc1042 header */
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- void *llc;
- int llclen;
+ rfc1042 = hdr;
+ rfc1042 += roundup(hdr_len, 4);
+ rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
- llclen = 8;
- llc = hdr;
- llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
- llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
-
- skb_push(skb, llclen);
- memcpy(skb->data, llc, llclen);
- }
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
+ rfc1042, sizeof(struct rfc1042_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* remove A-MSDU subframe header and insert
+ * decapped 802.11 header. rfc1042 header is already there */
- if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
- int len = ieee80211_hdrlen(hdr->frame_control);
- skb_push(skb, len);
- memcpy(skb->data, hdr, len);
+ skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
info->skb = skb;
info->encrypt_type = enctype;
- return 0;
+
+ ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
int fw_desc_len;
u8 *fw_desc;
int i, j;
- int ret;
- int ip_summed;
memset(&info, 0, sizeof(info));
@@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
- /* The skb is not yet processed and it may be
- * reallocated. Since the offload is in the original
- * skb extract the checksum now and assign it later */
- ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ret = ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, &info);
else
- ret = ath10k_htt_rx_msdu(htt, &info);
-
- if (ret && !info.fcs_err) {
- ath10k_warn("error processing msdus %d\n", ret);
- dev_kfree_skb_any(info.skb);
- continue;
- }
-
- if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
- ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
-
- info.skb->ip_summed = ip_summed;
-
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ ath10k_htt_rx_msdu(htt, &info);
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
+ tasklet_schedule(&htt->rx_replenish_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
@@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
- case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_RX_ADDBA:
case HTT_T2H_MSG_TYPE_RX_DELBA:
case HTT_T2H_MSG_TYPE_RX_FLUSH:
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 656c2546b29..d9335e9d0d0 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
- struct sk_buff *txdesc;
+ struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
- txdesc = htt->pending_tx[msdu_id];
- if (!txdesc)
- continue;
-
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
- ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
- ATH10K_SKB_CB(txdesc)->htt.discard = true;
- ath10k_txrx_tx_unref(htt, txdesc);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
}
@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = &ar->htt;
-
- if (skb_cb->htt.is_conf) {
- dev_kfree_skb_any(skb);
- return;
- }
-
- if (skb_cb->is_aborted) {
- skb_cb->htt.discard = true;
-
- /* if the skbuff is aborted we need to make sure we'll free up
- * the tx resources, we can't simply run tx_unref() 2 times
- * because if htt tx completion came in earlier we'd access
- * unallocated memory */
- if (skb_cb->htt.refcount > 1)
- skb_cb->htt.refcount = 1;
- }
-
- ath10k_txrx_tx_unref(htt, skb);
+ dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 8 bit masks so no need to worry
+ * about endian support */
+ req->upload_types[0] = mask;
+ req->reset_types[0] = mask;
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
+ ath10k_warn("failed to send htt type stats request: %d", ret);
dev_kfree_skb_any(skb);
return ret;
}
@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
#undef desc_offset
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
-
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
- struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- txdesc = ath10k_htc_alloc_skb(len);
- if (!txdesc) {
- res = -ENOMEM;
- goto err;
- }
-
spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ goto err_tx_dec;
}
- htt->pending_tx[msdu_id] = txdesc;
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
+ txdesc = ath10k_htc_alloc_skb(len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
+ goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.msdu = msdu;
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
-
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- struct ath10k_skb_cb *skb_cb;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
- struct sk_buff *txfrag = NULL;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ bool use_frags;
+ u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
u8 tid;
- int prefetch_len, desc_len, frag_len;
- dma_addr_t frags_paddr;
+ int prefetch_len, desc_len;
int msdu_id = -1;
int res;
u8 flags0;
@@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ goto err_tx_dec;
+ }
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
+ spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
- frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
- goto err;
+ goto err_free_msdu_id;
}
- txfrag = dev_alloc_skb(frag_len);
- if (!txfrag) {
- res = -ENOMEM;
- goto err;
- }
+ /* Since HTT 3.0 there is no separate mgmt tx command. However in case
+ * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+ * fragment list host driver specifies directly frame pointer. */
+ use_frags = htt->target_version_major < 3 ||
+ !ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
- goto err;
+ goto err_free_txdesc;
}
- spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
- spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ if (use_frags) {
+ skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
+ skb_cb->htt.pad_len = (unsigned long)msdu->data -
+ round_down((unsigned long)msdu->data, 4);
+
+ skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+ } else {
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
}
- htt->pending_tx[msdu_id] = txdesc;
- spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
-
- /* tx fragment list must be terminated with zero-entry */
- skb_put(txfrag, frag_len);
- tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
- tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
- tx_frags[0].len = __cpu_to_le32(msdu->len);
- tx_frags[1].paddr = __cpu_to_le32(0);
- tx_frags[1].len = __cpu_to_le32(0);
-
- res = ath10k_skb_map(dev, txfrag);
- if (res)
- goto err;
+ goto err_pull_txfrag;
+
+ if (use_frags) {
+ dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ /* tx fragment list must be terminated with zero-entry */
+ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
+ tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
+ skb_cb->htt.frag_len +
+ skb_cb->htt.pad_len);
+ tx_frags[0].len = __cpu_to_le32(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
+ tx_frags[1].paddr = __cpu_to_le32(0);
+ tx_frags[1].len = __cpu_to_le32(0);
+
+ dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
- ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
- (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+ ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
- txfrag->data, frag_len);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
- memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
@@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ if (use_frags)
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ else
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
-
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
- cmd->data_tx.len = __cpu_to_le16(msdu->len);
+ cmd->data_tx.len = __cpu_to_le16(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
- cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
-
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.txfrag = txfrag;
- skb_cb->htt.msdu = msdu;
+ memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
- if (txfrag)
- ath10k_skb_unmap(dev, txfrag);
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (txfrag)
- dev_kfree_skb_any(txfrag);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
- ath10k_htt_tx_dec_pending(htt);
+
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
+err_pull_txfrag:
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+ ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 44ed5af0a20..8aeb46d9b53 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,28 +20,37 @@
#include "targaddrs.h"
-/* Supported FW version */
-#define SUPPORTED_FW_MAJOR 1
-#define SUPPORTED_FW_MINOR 0
-#define SUPPORTED_FW_RELEASE 0
-#define SUPPORTED_FW_BUILD 629
-
-/* QCA988X 1.0 definitions */
-#define QCA988X_HW_1_0_VERSION 0x4000002c
-#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
-#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
-#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+#define ATH10K_FW_API2_FILE "firmware-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+
+struct ath10k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+ ATH10K_FW_IE_FW_VERSION = 0,
+ ATH10K_FW_IE_TIMESTAMP = 1,
+ ATH10K_FW_IE_FEATURES = 2,
+ ATH10K_FW_IE_FW_IMAGE = 3,
+ ATH10K_FW_IE_OTP_IMAGE = 4,
+};
+
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
@@ -53,6 +62,9 @@ enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
@@ -60,6 +72,7 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
+/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
@@ -75,7 +88,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
-#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@@ -90,6 +107,36 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS 16
+#define TARGET_10X_NUM_PEER_AST 2
+#define TARGET_10X_NUM_WDS_ENTRIES 32
+#define TARGET_10X_DMA_BURST_SIZE 0
+#define TARGET_10X_MAC_AGGR_DELIM 0
+#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS 0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_10X_NUM_PEER_KEYS 2
+#define TARGET_10X_NUM_TIDS 256
+#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
+#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10X_NUM_MCAST_GROUPS 0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE 1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG 0
+#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
@@ -169,6 +216,10 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_CHIP_ID_ADDRESS 0x000000ec
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cf2ba4d850c..97ac8c87cba 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -92,7 +92,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
- INIT_COMPLETION(ar->install_key_done);
+ reinit_completion(&ar->install_key_done);
ret = ath10k_send_key(arvif, key, cmd, macaddr);
if (ret)
@@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
if (value != 0xFFFFFFFF)
value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
ATH10K_RTS_MAX);
- return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_RTS_THRESHOLD,
- value);
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
if (value != 0xFFFFFFFF)
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
ATH10K_FRAGMT_THRESHOLD_MIN,
ATH10K_FRAGMT_THRESHOLD_MAX);
- return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
- value);
+ vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
@@ -434,7 +438,7 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
lockdep_assert_held(&ar->conf_mutex);
- INIT_COMPLETION(ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_setup_done);
arg.vdev_id = arvif->vdev_id;
arg.dtim_period = arvif->dtim_period;
@@ -460,6 +464,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -482,7 +491,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
lockdep_assert_held(&ar->conf_mutex);
- INIT_COMPLETION(ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_setup_done);
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
@@ -503,13 +512,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
struct wmi_vdev_start_request_arg arg = {};
- enum nl80211_channel_type type;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -560,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
- /* For some reasons, ath10k_wmi_vdev_down() here couse
- * often ath10k_wmi_vdev_stop() to fail. Next we could
- * not run monitor vdev and driver reload
- * required. Don't see such problems we skip
- * ath10k_wmi_vdev_down() here.
- */
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn("Monitor vdev down failed: %d\n", ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
@@ -607,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
goto vdev_fail;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
ar->monitor_present = true;
@@ -639,7 +642,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
ar->monitor_present = false;
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
@@ -668,13 +671,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->vdev_id);
return;
}
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info,
const u8 self_peer[ETH_ALEN])
{
+ u32 vdev_param;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -708,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
return;
}
- ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_ATIM_WINDOW,
+ vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
@@ -719,47 +723,45 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
/*
* Review this when mac80211 gains per-interface powersave support.
*/
-static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{
- struct ath10k_generic_iter *ar_iter = data;
- struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
if (conf->flags & IEEE80211_CONF_PS) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
- ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
- arvif->vdev_id,
- param,
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
arvif->vdev_id);
- return;
+ return ret;
}
-
- ar_iter->ret = ret;
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
- ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
- psmode);
- if (ar_iter->ret)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+ if (ret) {
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
+ return ret;
+ }
+
+ return 0;
}
/**********************/
@@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.num_rates = n;
arg->peer_num_spatial_streams = max((n+7) / 8, 1);
- ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
}
@@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_QOS;
if (sta->wme && sta->uapsd_queues) {
- ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
arg->peer_flags |= WMI_PEER_APSD;
- arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1028,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ u8 ampdu_factor;
if (!vht_cap->vht_supported)
return;
arg->peer_flags |= WMI_PEER_VHT;
-
arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller. */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= WMI_PEER_80MHZ;
@@ -1048,7 +1064,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1093,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- /* FIXME: add VHT */
-
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1106,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
case IEEE80211_BAND_5GHZ:
- if (sta->ht_cap.ht_supported) {
+ /*
+ * Check VHT first.
+ */
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AC_VHT80;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
@@ -1105,30 +1130,32 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
-static int ath10k_peer_assoc(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf)
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
{
- struct wmi_peer_assoc_complete_arg arg;
-
lockdep_assert_held(&ar->conf_mutex);
- memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+ memset(arg, 0, sizeof(*arg));
- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
- ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
- ath10k_peer_assoc_h_rates(ar, sta, &arg);
- ath10k_peer_assoc_h_ht(ar, sta, &arg);
- ath10k_peer_assoc_h_vht(ar, sta, &arg);
- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
- ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+ ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
+ ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+ ath10k_peer_assoc_h_rates(ar, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
+ ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
- return ath10k_wmi_peer_assoc(ar, &arg);
+ return 0;
}
/* can be called only in mac80211 callbacks due to `key_count` usage */
@@ -1138,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
@@ -1153,24 +1181,33 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+ ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
+ bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+ ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
+ bss_conf->bssid, ret);
rcu_read_unlock();
return;
}
rcu_read_unlock();
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn("Peer assoc failed for %pM\n: %d",
+ bss_conf->bssid, ret);
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
bss_conf->bssid);
if (ret)
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d associated, BSSID: %pM, AID: %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
}
/*
@@ -1191,10 +1228,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+ arvif->vdev_id);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
- arvif->vdev_id);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,26 +1241,33 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* interfaces as it expects there is no rx when no interface is
* running.
*/
- ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
- arvif->vdev_id, ret);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
- ath10k_wmi_flush_tx(ar);
+ /* FIXME: why don't we print error if wmi call fails? */
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- arvif->def_wep_key_index = 0;
+ arvif->def_wep_key_idx = 0;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
struct ieee80211_sta *sta)
{
+ struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+ ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
+ if (ret) {
+ ath10k_warn("WMI peer assoc prepare failed for %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+ ath10k_warn("Peer assoc failed for STA %pM\n: %d",
+ sta->addr, ret);
return ret;
}
@@ -1333,8 +1378,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
continue;
ath10k_dbg(ATH10K_DBG_WMI,
- "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
- __func__, ch - arg.channels, arg.n_channels,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ch->max_antenna_gain, ch->mode);
@@ -1391,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
+ struct ieee80211_tx_info *info)
+{
+ if (info->control.vif)
+ return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
+
+ if (ar->monitor_enabled)
+ return ar->monitor_vdev_id;
+
+ ath10k_warn("could not resolve vdev id\n");
+ return 0;
+}
+
/*
* Frames sent to the FW have to be in "Native Wifi" format.
* Strip the QoS field from the 802.11 header.
@@ -1411,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
+static void ath10k_tx_wep_key_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ wep_key_work);
+ int ret, keyidx = arvif->def_wep_key_newidx;
+
+ if (arvif->def_wep_key_idx == keyidx)
+ return;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+ if (ret) {
+ ath10k_warn("could not update wep keyidx (%d)\n", ret);
+ return;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+}
+
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1419,11 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
struct ath10k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
- int ret;
-
- /* TODO AP mode should be implemented */
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
if (!ieee80211_has_protected(hdr->frame_control))
return;
@@ -1435,20 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
key->cipher != WLAN_CIPHER_SUITE_WEP104)
return;
- if (key->keyidx == arvif->def_wep_key_index)
+ if (key->keyidx == arvif->def_wep_key_idx)
return;
- ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
-
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_DEF_KEYID,
- key->keyidx);
- if (ret) {
- ath10k_warn("could not update wep keyidx (%d)\n", ret);
- return;
- }
-
- arvif->def_wep_key_index = key->keyidx;
+ /* FIXME: Most likely a few frames will be TXed with an old key. Simply
+ * queueing frames until key index is updated is not an option because
+ * sk_buff may need more processing to be done, e.g. offchannel */
+ arvif->def_wep_key_newidx = key->keyidx;
+ ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
@@ -1478,19 +1563,42 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int ret;
+ int ret = 0;
- if (ieee80211_is_mgmt(hdr->frame_control))
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- else if (ieee80211_is_nullfunc(hdr->frame_control))
+ if (ar->htt.target_version_major >= 3) {
+ /* Since HTT 3.0 there is no separate mgmt tx command */
+ ret = ath10k_htt_tx(&ar->htt, skb);
+ goto exit;
+ }
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features)) {
+ if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
+ ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn("wmi mgmt_tx queue limit reached\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ } else {
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+ }
+ } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features) &&
+ ieee80211_is_nullfunc(hdr->frame_control)) {
/* FW does not report tx status properly for NullFunc frames
* unless they are sent through mgmt tx path. mac80211 sends
- * those frames when it detects link/beacon loss and depends on
- * the tx status to be correct. */
+ * those frames when it detects link/beacon loss and depends
+ * on the tx status to be correct. */
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- else
+ } else {
ret = ath10k_htt_tx(&ar->htt, skb);
+ }
+exit:
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -1534,18 +1642,19 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
- vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+ vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
if (peer)
+ /* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
@@ -1557,7 +1666,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
spin_lock_bh(&ar->data_lock);
- INIT_COMPLETION(ar->offchan_tx_completed);
+ reinit_completion(&ar->offchan_tx_completed);
ar->offchan_tx_skb = skb;
spin_unlock_bh(&ar->data_lock);
@@ -1580,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
}
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+ struct sk_buff *skb;
+ int ret;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
+ if (ret)
+ ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ }
+}
+
/************/
/* Scanning */
/************/
@@ -1643,8 +1782,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
return -EIO;
}
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0)
ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1815,6 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- /* make sure we submit the command so the completion
- * timeout makes sense */
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
if (ret == 0) {
ath10k_abort_scan(ar);
@@ -1709,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = NULL;
- u32 vdev_id = 0;
- u8 tid;
-
- if (info->control.vif) {
- arvif = ath10k_vif_to_arvif(info->control.vif);
- vdev_id = arvif->vdev_id;
- } else if (ar->monitor_enabled) {
- vdev_id = ar->monitor_vdev_id;
- }
+ u8 tid, vdev_id;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
@@ -1726,12 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* we must calculate tid before we apply qos workaround
* as we'd lose the qos control field */
- tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- if (ieee80211_is_data_qos(hdr->frame_control) &&
- is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- }
+ tid = ath10k_tx_h_get_tid(hdr);
+ vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
/* it makes no sense to process injected frames like that */
if (info->control.vif &&
@@ -1742,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
- memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
- ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.tid = tid;
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
ATH10K_SKB_CB(skb)->htt.is_offchan = true;
- ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+ ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
@@ -1771,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar)
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
@@ -1817,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
else if (ar->state == ATH10K_STATE_RESTARTING)
ar->state = ATH10K_STATE_RESTARTED;
- ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret)
ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
ret);
- ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
if (ret)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
@@ -1847,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw)
ar->state = ATH10K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work);
}
-static void ath10k_config_ps(struct ath10k *ar)
+static int ath10k_config_ps(struct ath10k *ar)
{
- struct ath10k_generic_iter ar_iter;
+ struct ath10k_vif *arvif;
+ int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar->state == ATH10K_STATE_RESTARTED)
- return;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
-
- ieee80211_iterate_active_interfaces_atomic(
- ar->hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_ps_iter, &ar_iter);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn("could not setup powersave (%d)\n", ret);
+ break;
+ }
+ }
- if (ar_iter.ret)
- ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+ return ret;
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1884,7 +2002,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
conf->chandef.chan->center_freq);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +2019,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
- ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1922,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u32 value;
int bit;
+ u32 vdev_param;
mutex_lock(&ar->conf_mutex);
@@ -1930,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->ar = ar;
arvif->vif = vif;
+ INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+
if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
ath10k_warn("Only one monitor interface allowed\n");
ret = -EBUSY;
- goto exit;
+ goto err;
}
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
- goto exit;
+ goto err;
}
arvif->vdev_id = bit - 1;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
- ar->free_vdev_map &= ~(1 << arvif->vdev_id);
if (ar->p2p)
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
@@ -1973,32 +2092,41 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
ath10k_warn("WMI vdev create failed: ret %d\n", ret);
- goto exit;
+ goto err;
}
- ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
- arvif->def_wep_key_index);
- if (ret)
+ ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+ list_add(&arvif->list, &ar->arvifs);
+
+ vdev_param = ar->wmi.vdev_param->def_keyid;
+ ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
+ arvif->def_wep_key_idx);
+ if (ret) {
ath10k_warn("Failed to set default keyid: %d\n", ret);
+ goto err_vdev_delete;
+ }
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
ATH10K_HW_TXRX_NATIVE_WIFI);
- if (ret)
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
ath10k_warn("Failed to set TX encap: %d\n", ret);
+ goto err_vdev_delete;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
ath10k_warn("Failed to create peer for AP: %d\n", ret);
- goto exit;
+ goto err_vdev_delete;
}
}
@@ -2007,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+ goto err_peer_delete;
+ }
param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+ goto err_peer_delete;
+ }
param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+ goto err_peer_delete;
+ }
}
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
- if (ret)
+ if (ret) {
ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
- if (ret)
+ if (ret) {
ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = true;
-exit:
mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_peer_delete:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+ list_del(&arvif->list);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -2052,9 +2203,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+ cancel_work_sync(&arvif->wep_key_work);
+
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+ }
+ spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
+ list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
@@ -2064,6 +2223,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+ arvif->vdev_id);
+
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2267,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
!ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Unable to start monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_stop(ar);
if (ret)
ath10k_warn("Unable to stop monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
}
mutex_unlock(&ar->conf_mutex);
@@ -2130,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
+ u32 vdev_param, pdev_param;
mutex_lock(&ar->conf_mutex);
@@ -2138,44 +2303,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BEACON_INTERVAL,
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->beacon_interval);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Beacon interval: %d set for VDEV: %d\n",
- arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
- ret = ath10k_wmi_pdev_set_param(ar,
- WMI_PDEV_PARAM_BEACON_TX_MODE,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
+ pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set staggered beacon mode for VDEV: %d\n",
- arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_DTIM_PERIOD,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
+ vdev_param = ar->wmi.vdev_param->dtim_period;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set dtim period: %d for VDEV: %d\n",
- arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2353,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(info->bssid)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d create peer %pM\n",
+ arvif->vdev_id, info->bssid);
+
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- info->bssid, arvif->vdev_id);
-
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@@ -2207,11 +2371,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(arvif->u.sta.bssid, info->bssid,
ETH_ALEN);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start %pM\n",
+ arvif->vdev_id, info->bssid);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d started with BSSID: %pM\n",
- arvif->vdev_id, info->bssid);
}
/*
@@ -2235,16 +2400,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
cts_prot = 0;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+ arvif->vdev_id, cts_prot);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set CTS prot: %d for VDEV: %d\n",
- cts_prot, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2419,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_SLOT_TIME,
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
+ vdev_param = ar->wmi.vdev_param->slot_time;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set slottime: %d for VDEV: %d\n",
- slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2437,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
preamble = WMI_VDEV_PREAMBLE_LONG;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_PREAMBLE,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
+ vdev_param = ar->wmi.vdev_param->preamble;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set preamble: %d for VDEV: %d\n",
- preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
@@ -2313,8 +2476,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
goto exit;
}
- INIT_COMPLETION(ar->scan.started);
- INIT_COMPLETION(ar->scan.completed);
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
ar->scan.in_progress = true;
ar->scan.aborting = false;
ar->scan.is_roc = false;
@@ -2474,27 +2637,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta)\n",
+ arvif->vdev_id, sta->addr);
+
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer delete %pM (sta gone)\n",
+ arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2667,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ sta->addr);
+
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to assoc state\n",
- sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2681,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ sta->addr);
+
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to disassociated state\n",
- sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@@ -2672,9 +2832,9 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
goto exit;
}
- INIT_COMPLETION(ar->scan.started);
- INIT_COMPLETION(ar->scan.completed);
- INIT_COMPLETION(ar->scan.on_channel);
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
ar->scan.in_progress = true;
ar->scan.aborting = false;
ar->scan.is_roc = true;
@@ -2732,88 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
* Both RTS and Fragmentation threshold are interface-specific
* in ath10k, but device-specific in mac80211.
*/
-static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath10k_generic_iter *ar_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
-
- lockdep_assert_held(&arvif->ar->conf_mutex);
-
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
- return;
-
- ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
- if (ar_iter->ret)
- ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
- arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set RTS threshold: %d for VDEV: %d\n",
- rts, arvif->vdev_id);
-}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
+ struct ath10k_vif *arvif;
+ int ret = 0;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(
- hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_set_rts_iter, &ar_iter);
- mutex_unlock(&ar->conf_mutex);
-
- return ar_iter.ret;
-}
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ arvif->vdev_id, value);
-static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath10k_generic_iter *ar_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
-
- lockdep_assert_held(&arvif->ar->conf_mutex);
-
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
- return;
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
- ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
- if (ar_iter->ret)
- ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
- arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set frag threshold: %d for VDEV: %d\n",
- frag, arvif->vdev_id);
+ return ret;
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
+ struct ath10k_vif *arvif;
+ int ret = 0;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(
- hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_set_frag_iter, &ar_iter);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
+ arvif->vdev_id, value);
+
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
mutex_unlock(&ar->conf_mutex);
- return ar_iter.ret;
+ return ret;
}
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
@@ -2836,8 +2959,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
- empty = bitmap_empty(ar->htt.used_msdu_ids,
- ar->htt.max_num_pending_tx);
+ empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3448,10 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
+ /* MSDU can have HTT TX fragment pushed in front. The additional 4
+ * bytes is used for padding/alignment if necessary. */
+ ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
+
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6fce9bfb19a..ba1021997b8 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef50b1b..9e86a811086 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
-#define QCA988X_1_0_DEVICE_ID (0xabcd)
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
- { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{0}
};
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static void ath10k_pci_device_reset(struct ath10k *ar);
static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
- /* could be moved to share CE3 */
- /* target->host HTT + HTC control */
- { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
- /* target->host WMI */
- { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
- /* host->target WMI */
- { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
- /* host->target HTT */
- { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
- CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
- /* unused */
- { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* Target autonomous hif_memcpy */
- { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* ce_diag, the Diagnostic Window */
- { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: unused */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
};
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config target_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
- /* target->host HTT + HTC control */
- { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
- /* target->host WMI */
- { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target WMI */
- { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target HTT */
- { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = 0,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = 1,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 512,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = 2,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = 3,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = 4,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 256,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* NB: 50% of src nentries, since tx has 2 frags */
- /* unused */
- { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* Reserved for target autonomous hif_memcpy */
- { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+ /* CE5: unused */
+ {
+ .pipenum = 5,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = 6,
+ .pipedir = PIPEDIR_INOUT,
+ .nentries = 32,
+ .nbytes_max = 4096,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* CE7 used only by Host */
};
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
ath10k_warn("Unable to wakeup target\n");
}
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
atomic_inc(&ar_pci->keep_awake_count);
if (ar_pci->verified_awake)
- return;
+ return 0;
for (;;) {
if (ath10k_pci_target_is_awake(ar)) {
ar_pci->verified_awake = true;
- break;
+ return 0;
}
if (tot_delay > PCIE_WAKE_TIMEOUT) {
- ath10k_warn("target takes too long to wake up (awake count %d)\n",
+ ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+ PCIE_WAKE_TIMEOUT,
atomic_read(&ar_pci->keep_awake_count));
- break;
+ return -ETIMEDOUT;
}
udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
* FIXME: Handle OOM properly.
*/
static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k_pci_compl *compl = NULL;
@@ -511,39 +612,28 @@ exit:
}
/* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
- bool process = false;
-
- do {
- /*
- * For the send completion of an item in sendlist, just
- * increment num_sends_allowed. The upper layer callback will
- * be triggered when last fragment is done with send.
- */
- if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
- spin_lock_bh(&pipe_info->pipe_lock);
- pipe_info->num_sends_allowed++;
- spin_unlock_bh(&pipe_info->pipe_lock);
- continue;
- }
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+ &ce_data, &nbytes,
+ &transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+ compl->state = ATH10K_PCI_COMPL_SEND;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- process = true;
- } while (ath10k_ce_completed_send_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id) == 0);
-
- /*
- * If only some of the items within a sendlist have completed,
- * don't invoke completion processing until the entire sendlist
- * has been sent.
- */
- if (!process)
- return;
+ }
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
- void *transfer_context, u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
- do {
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &ce_data, &nbytes, &transfer_id,
+ &flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+ compl->state = ATH10K_PCI_COMPL_RECV;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- } while (ath10k_ce_completed_recv_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id,
- &flags) == 0);
+ }
ath10k_pci_process_ce(ar);
}
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ce_state *ce_hdl = pipe_info->ce_hdl;
- struct ce_sendlist sendlist;
+ struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+ struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
unsigned int len;
u32 flags = 0;
int ret;
- memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
len = min(bytes, nbuf->len);
bytes -= len;
@@ -648,19 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
- ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
- /* Make sure we have resources to handle this request */
- spin_lock_bh(&pipe_info->pipe_lock);
- if (!pipe_info->num_sends_allowed) {
- ath10k_warn("Pipe: %d is full\n", pipe_id);
- spin_unlock_bh(&pipe_info->pipe_lock);
- return -ENOSR;
- }
- pipe_info->num_sends_allowed--;
- spin_unlock_bh(&pipe_info->pipe_lock);
-
- ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+ ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
+ flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
@@ -670,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
- int ret;
-
- spin_lock_bh(&pipe_info->pipe_lock);
- ret = pipe_info->num_sends_allowed;
- spin_unlock_bh(&pipe_info->pipe_lock);
-
- return ret;
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -764,9 +818,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
static int ath10k_pci_start_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_diag = ar_pci->ce_diag;
+ struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
int i, pipe_num, completions, disable_interrupts;
@@ -792,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
ath10k_pci_ce_send_done,
disable_interrupts);
completions += attr->src_nentries;
- pipe_info->num_sends_allowed = attr->src_nentries - 1;
}
if (attr->dest_nentries) {
@@ -805,15 +858,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
continue;
for (i = 0; i < completions; i++) {
- compl = kmalloc(sizeof(struct ath10k_pci_compl),
- GFP_KERNEL);
+ compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
ath10k_pci_stop_ce(ar);
return -ENOMEM;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
list_add_tail(&compl->list, &pipe_info->compl_free);
}
}
@@ -840,7 +892,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
* their associated resources */
spin_lock_bh(&ar_pci->compl_lock);
list_for_each_entry(compl, &ar_pci->compl_process, list) {
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
ATH10K_SKB_CB(skb)->is_aborted = true;
}
spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +902,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl, *tmp;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct sk_buff *netbuf;
int pipe_num;
@@ -861,7 +913,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
list_del(&compl->list);
- netbuf = (struct sk_buff *)compl->transfer_context;
+ netbuf = compl->skb;
dev_kfree_skb_any(netbuf);
kfree(compl);
}
@@ -912,12 +964,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
list_del(&compl->list);
spin_unlock_bh(&ar_pci->compl_lock);
- if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+ switch (compl->state) {
+ case ATH10K_PCI_COMPL_SEND:
cb->tx_completion(ar,
- compl->transfer_context,
+ compl->skb,
compl->transfer_id);
send_done = 1;
- } else {
+ break;
+ case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +979,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
break;
}
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
nbytes = compl->nbytes;
ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,16 +998,23 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
nbytes,
skb->len + skb_tailroom(skb));
}
+ break;
+ case ATH10K_PCI_COMPL_FREE:
+ ath10k_warn("free completion cannot be processed\n");
+ break;
+ default:
+ ath10k_warn("invalid completion state (%d)\n",
+ compl->state);
+ break;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
/*
* Add completion back to the pipe's free list.
*/
spin_lock_bh(&compl->pipe_info->pipe_lock);
list_add_tail(&compl->list, &compl->pipe_info->compl_free);
- compl->pipe_info->num_sends_allowed += send_done;
spin_unlock_bh(&compl->pipe_info->pipe_lock);
}
@@ -1037,12 +1098,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num)
{
struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = pipe_info->ce_hdl;
+ struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
struct sk_buff *skb;
dma_addr_t ce_data;
int i, ret = 0;
@@ -1097,7 +1158,7 @@ err:
static int ath10k_pci_post_rx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num, ret = 0;
@@ -1147,11 +1208,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
@@ -1179,11 +1240,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
}
}
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
@@ -1206,15 +1267,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- if (netbuf != CE_SENDLIST_ITEM_CTXT)
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
+ /*
+ * Indicate the completion to higer layer to free
+ * the buffer
+ */
+ ATH10K_SKB_CB(netbuf)->is_aborted = true;
+ ar_pci->msg_callbacks_current.tx_completion(ar,
+ netbuf,
+ id);
}
}
@@ -1232,7 +1292,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1303,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1353,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
- struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
dma_addr_t req_paddr = 0;
dma_addr_t resp_paddr = 0;
struct bmi_xfer xfer = {};
@@ -1378,13 +1440,16 @@ err_dma:
return ret;
}
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id))
+ return;
if (xfer->wait_for_resp)
return;
@@ -1392,14 +1457,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
complete(&xfer->done);
}
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id, &flags))
+ return;
if (!xfer->wait_for_resp) {
ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1747,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num;
@@ -1895,7 +1963,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
static void ath10k_pci_ce_tasklet(unsigned long ptr)
{
- struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+ struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
struct ath10k_pci *ar_pci = pipe->ar_pci;
ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2280,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
static void ath10k_pci_device_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *mem = ar_pci->mem;
int i;
u32 val;
if (!SOC_GLOBAL_RESET_ADDRESS)
return;
- if (!mem)
- return;
-
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2295,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
}
/* Put Target, including PCIe, into RESET. */
- val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK)
break;
msleep(1);
@@ -2245,16 +2308,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK))
break;
msleep(1);
}
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2330,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
switch (i) {
case ATH10K_PCI_FEATURE_MSI_X:
- ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
- break;
- case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
break;
}
}
@@ -2286,7 +2346,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- u32 lcr_val;
+ u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
@@ -2298,15 +2358,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
switch (pci_dev->device) {
- case QCA988X_1_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
- break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
break;
default:
ret = -ENODEV;
- ath10k_err("Unkown device ID: %d\n", pci_dev->device);
+ ath10k_err("Unknown device ID: %d\n", pci_dev->device);
goto err_ar_pci;
}
@@ -2322,10 +2379,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- /* Enable QCA988X_1.0 HW workarounds */
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
- spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2448,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
spin_lock_init(&ar_pci->ce_lock);
- ar_pci->cacheline_sz = dma_get_cache_alignment();
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("Failed to get chip id: %d\n", ret);
+ return ret;
+ }
+
+ chip_id = ath10k_pci_read32(ar,
+ RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+
+ ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
- ret = ath10k_core_register(ar);
+ ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
goto err_iomap;
@@ -2414,7 +2478,6 @@ err_region:
err_device:
pci_disable_device(pdev);
err_ar:
- pci_set_drvdata(pdev, NULL);
ath10k_core_destroy(ar);
err_ar_pci:
/* call HIF PCI free here */
@@ -2442,7 +2505,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
pci_clear_master(pdev);
@@ -2483,9 +2545,6 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 871bb339d56..52fb7b97357 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,22 +43,23 @@ struct bmi_xfer {
u32 resp_len;
};
+enum ath10k_pci_compl_state {
+ ATH10K_PCI_COMPL_FREE = 0,
+ ATH10K_PCI_COMPL_SEND,
+ ATH10K_PCI_COMPL_RECV,
+};
+
struct ath10k_pci_compl {
struct list_head list;
- int send_or_recv;
- struct ce_state *ce_state;
- struct hif_ce_pipe_info *pipe_info;
- void *transfer_context;
+ enum ath10k_pci_compl_state state;
+ struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci_pipe *pipe_info;
+ struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
-/* compl_state.send_or_recv */
-#define HIF_CE_COMPLETE_FREE 0
-#define HIF_CE_COMPLETE_SEND 1
-#define HIF_CE_COMPLETE_RECV 2
-
/*
* PCI-specific Target state
*
@@ -152,17 +153,16 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
- ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
- ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
+ ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
-struct hif_ce_pipe_info {
+struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
@@ -178,9 +178,6 @@ struct hif_ce_pipe_info {
/* List of free CE completion slots */
struct list_head compl_free;
- /* Limit the number of outstanding send requests. */
- int num_sends_allowed;
-
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
@@ -190,7 +187,6 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
- int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
@@ -219,7 +215,7 @@ struct ath10k_pci {
bool compl_processing;
- struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
@@ -227,16 +223,13 @@ struct ath10k_pci {
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
- struct ce_state *ce_id_to_state[CE_COUNT_MAX];
-
- /* makes sure that dummy reads are atomic */
- spinlock_t hw_v1_workaround_lock;
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +237,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
return ar->hif.priv;
}
-static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
- return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
-static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
- iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +307,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *addr = ar_pci->mem;
-
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- unsigned long irq_flags;
- spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
-
- ioread32(addr+offset+4); /* 3rd read prior to write */
- ioread32(addr+offset+4); /* 2nd read prior to write */
- ioread32(addr+offset+4); /* 1st read prior to write */
- iowrite32(value, addr+offset);
-
- spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
- irq_flags);
- } else {
- iowrite32(value, addr+offset);
- }
+ iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +318,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
-void ath10k_do_pci_wake(struct ath10k *ar);
+int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
-static inline void ath10k_pci_wake(struct ath10k *ar)
+static inline int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- ath10k_do_pci_wake(ar);
+ return ath10k_do_pci_wake(ar);
+
+ return 0;
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bfec6c8f2ec..1c584c4b019 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if appliable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
- RX_MSDU_DECAP_RAW = 0,
- RX_MSDU_DECAP_NATIVE_WIFI = 1,
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header. */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 85e806bf725..90817ddc92b 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(int id, void *buf, size_t buf_len),
+ TP_PROTO(int id, void *buf, size_t buf_len, int ret),
- TP_ARGS(id, buf, buf_len),
+ TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
+ __field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
+ __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "id %d len %zu",
+ "id %d len %zu ret %d",
__entry->id,
- __entry->buf_len
+ __entry->buf_len,
+ __entry->ret
)
);
@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "len %zu",
+ __entry->buf_len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 68b6faefd1d..5ae373a1e29 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,40 +44,39 @@ out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
- struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
- struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *msdu;
int ret;
- if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
- return;
-
- ATH10K_SKB_CB(txdesc)->htt.refcount--;
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
return;
-
- if (txfrag) {
- ret = ath10k_skb_unmap(dev, txfrag);
- if (ret)
- ath10k_warn("txfrag unmap failed (%d)\n", ret);
-
- dev_kfree_skb_any(txfrag);
}
+ msdu = htt->pending_tx[tx_done->msdu_id];
+ skb_cb = ATH10K_SKB_CB(msdu);
+
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
+ if (skb_cb->htt.frag_len)
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
- memset(&info->status, 0, sizeof(info->status));
- if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+ if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+ if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
exit:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+ htt->pending_tx[tx_done->msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
- if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+ if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
-
- dev_kfree_skb_any(txdesc);
-}
-
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
-{
- struct sk_buff *txdesc;
-
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
-
- if (tx_done->msdu_id >= htt->max_num_pending_tx) {
- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
- tx_done->msdu_id);
- return;
- }
-
- txdesc = htt->pending_tx[tx_done->msdu_id];
-
- ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
- ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
-
- ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e78632a76df..356dc9c04c9 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,9 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 55f90c76186..ccf3597fd9e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,29 +23,470 @@
#include "wmi.h"
#include "mac.h"
-void ath10k_wmi_flush_tx(struct ath10k *ar)
-{
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- if (ar->state == ATH10K_STATE_WEDGED) {
- ath10k_warn("wmi flush skipped - device is wedged anyway\n");
- return;
- }
-
- ret = wait_event_timeout(ar->wmi.wq,
- atomic_read(&ar->wmi.pending_tx_count) == 0,
- 5*HZ);
- if (atomic_read(&ar->wmi.pending_tx_count) == 0)
- return;
-
- if (ret == 0)
- ret = -ETIMEDOUT;
-
- if (ret < 0)
- ath10k_warn("wmi flush failed (%d)\n", ret);
-}
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+ .init_cmdid = WMI_INIT_CMDID,
+ .start_scan_cmdid = WMI_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+ .echo_cmdid = WMI_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+ .init_cmdid = WMI_10X_INIT_CMDID,
+ .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10X_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+ .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_VDEV_PARAM_WDS,
+ .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_VDEV_PARAM_SGI,
+ .ldpc = WMI_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dcs = WMI_PDEV_PARAM_DCS,
+ .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
@@ -85,18 +526,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb(skb);
-
- if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
- wake_up(&ar->wmi.wq);
}
-/* WMI command API */
-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
- enum wmi_cmd_id cmd_id)
+static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
- int status;
+ int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +544,146 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
- if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
- WMI_MAX_PENDING_TX_COUNT) {
- /* avoid using up memory when FW hangs */
- atomic_dec(&ar->wmi.pending_tx_count);
- return -EBUSY;
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+ trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct wmi_bcn_tx_arg arg = {0};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->data_lock);
+
+ if (arvif->beacon == NULL)
+ return;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.tx_rate = 0;
+ arg.tx_power = 0;
+ arg.bcn = arvif->beacon->data;
+ arg.bcn_len = arvif->beacon->len;
+
+ ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+ if (ret)
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
+ ath10k_warn("wmi command %d is not supported by firmware\n",
+ cmd_id);
+ return ret;
}
- memset(skb_cb, 0, sizeof(*skb_cb));
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
- trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+ (ret != -EAGAIN);
+ }), 3*HZ);
- status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- if (status) {
+ if (ret)
dev_kfree_skb_any(skb);
- atomic_dec(&ar->wmi.pending_tx_count);
- return status;
+
+ return ret;
+}
+
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret = 0;
+ struct wmi_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *wmi_skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int len;
+ u16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr) + skb->len;
+ len = round_up(len, 4);
+
+ wmi_skb = ath10k_wmi_alloc_skb(len);
+ if (!wmi_skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
+
+ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
+ cmd->hdr.tx_rate = 0;
+ cmd->hdr.tx_power = 0;
+ cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+
+ memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(cmd->buf, skb->data, skb->len);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
+ fc & IEEE80211_FCTL_STYPE);
+
+ /* Send the management frame buffer to the target */
+ ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
}
- return 0;
+ /* TODO: report tx status to mac80211 - temporary just ACK */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+
+ return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +873,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u32 rx_status;
@@ -325,13 +885,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rate;
u32 buf_len;
u16 fc;
+ int pull_len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
- channel = __le32_to_cpu(event->hdr.channel);
- buf_len = __le32_to_cpu(event->hdr.buf_len);
- rx_status = __le32_to_cpu(event->hdr.status);
- snr = __le32_to_cpu(event->hdr.snr);
- phy_mode = __le32_to_cpu(event->hdr.phy_mode);
- rate = __le32_to_cpu(event->hdr.rate);
+ channel = __le32_to_cpu(ev_hdr->channel);
+ buf_len = __le32_to_cpu(ev_hdr->buf_len);
+ rx_status = __le32_to_cpu(ev_hdr->status);
+ snr = __le32_to_cpu(ev_hdr->snr);
+ phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
+ rate = __le32_to_cpu(ev_hdr->rate);
memset(status, 0, sizeof(*status));
@@ -358,7 +929,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
- skb_pull(skb, sizeof(event->hdr));
+ skb_pull(skb, pull_len);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +1305,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
int i = -1;
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
- struct wmi_bcn_tx_arg arg;
struct sk_buff *bcn;
int vdev_id = 0;
- int ret;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@@ -794,17 +1363,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
- arg.vdev_id = arvif->vdev_id;
- arg.tx_rate = 0;
- arg.tx_power = 0;
- arg.bcn = bcn->data;
- arg.bcn_len = bcn->len;
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ ath10k_warn("SWBA overrun on vdev %d\n",
+ arvif->vdev_id);
+ dev_kfree_skb_any(arvif->beacon);
+ }
- ret = ath10k_wmi_beacon_send(ar, &arg);
- if (ret)
- ath10k_warn("could not send beacon (%d)\n", ret);
+ arvif->beacon = bcn;
- dev_kfree_skb_any(bcn);
+ ath10k_wmi_tx_beacon_nowait(arvif);
+ spin_unlock_bh(&ar->data_lock);
}
}
@@ -919,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
}
+static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ dma_addr_t paddr;
+ u32 pool_size;
+ int idx = ar->wmi.num_mem_chunks;
+
+ pool_size = num_units * round_up(unit_len, 4);
+
+ if (!pool_size)
+ return -EINVAL;
+
+ ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+ pool_size,
+ &paddr,
+ GFP_ATOMIC);
+ if (!ar->wmi.mem_chunks[idx].vaddr) {
+ ath10k_warn("failed to allocate memory chunk\n");
+ return -ENOMEM;
+ }
+
+ memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+ ar->wmi.mem_chunks[idx].paddr = paddr;
+ ar->wmi.mem_chunks[idx].len = pool_size;
+ ar->wmi.mem_chunks[idx].req_id = req_id;
+ ar->wmi.num_mem_chunks++;
+
+ return 0;
+}
+
static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -943,6 +1561,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+ /* only manually set fw features when not using FW IE format */
+ if (ar->fw_api == 1 && ar->fw_version_build > 636)
+ set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -987,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
complete(&ar->wmi.service_ready);
}
+static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
+ struct wmi_service_ready_event_10x *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+ skb->len, sizeof(*ev));
+ return;
+ }
+
+ ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+ ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+ ar->fw_version_major =
+ (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+ ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+ ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ }
+
+ ar->ath_common.regulatory.current_rd =
+ __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+ ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+ sizeof(ev->wmi_service_bitmap));
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor);
+ }
+
+ num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
+
+ if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+ ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
+ num_mem_reqs);
+ return;
+ }
+
+ if (!num_mem_reqs)
+ goto exit;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
+ num_mem_reqs);
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
+ num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
+ unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
+ num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+ /* number of units to allocate is number of
+ * peers, 1 extra for self peer on target */
+ /* this needs to be tied, host and target
+ * can get out of sync */
+ num_units = TARGET_10X_NUM_PEERS + 1;
+ else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
+ num_units = TARGET_10X_NUM_VDEVS + 1;
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+ req_id,
+ __le32_to_cpu(ev->mem_reqs[i].num_units),
+ num_unit_info,
+ unit_size,
+ num_units);
+
+ ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+ unit_size);
+ if (ret)
+ return;
+ }
+
+exit:
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
+ __le32_to_cpu(ev->sw_version),
+ __le32_to_cpu(ev->abi_version),
+ __le32_to_cpu(ev->phy_capability),
+ __le32_to_cpu(ev->ht_cap_info),
+ __le32_to_cpu(ev->vht_cap_info),
+ __le32_to_cpu(ev->vht_supp_mcs),
+ __le32_to_cpu(ev->sys_cap_info),
+ __le32_to_cpu(ev->num_mem_reqs),
+ __le32_to_cpu(ev->num_rf_chains));
+
+ complete(&ar->wmi.service_ready);
+}
+
static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -1007,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
@@ -1126,64 +1850,158 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-static void ath10k_wmi_event_work(struct work_struct *work)
+static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k *ar = container_of(work, struct ath10k,
- wmi.wmi_event_work);
- struct sk_buff *skb;
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10x_event_id id;
+ u16 len;
- for (;;) {
- skb = skb_dequeue(&ar->wmi.wmi_event_list);
- if (!skb)
- break;
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
- ath10k_wmi_event_process(ar, skb);
- }
-}
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
-{
- struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
- enum wmi_event_id event_id;
+ len = skb->len;
- event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+ trace_ath10k_wmi_event(id, skb->data, skb->len);
- /* some events require to be handled ASAP
- * thus can't be defered to a worker thread */
- switch (event_id) {
- case WMI_HOST_SWBA_EVENTID:
- case WMI_MGMT_RX_EVENTID:
- ath10k_wmi_event_process(ar, skb);
+ switch (id) {
+ case WMI_10X_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
return;
+ case WMI_10X_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10X_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10X_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10X_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10X_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10X_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10X_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10X_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10X_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_10X_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10X_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10X_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10X_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10X_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10X_SERVICE_READY_EVENTID:
+ ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ break;
+ case WMI_10X_READY_EVENTID:
+ ath10k_wmi_ready_event_rx(ar, skb);
+ break;
default:
+ ath10k_warn("Unknown eventid: %d\n", id);
break;
}
- skb_queue_tail(&ar->wmi.wmi_event_list, skb);
- queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+ dev_kfree_skb(skb);
+}
+
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ath10k_wmi_10x_process_rx(ar, skb);
+ else
+ ath10k_wmi_main_process_rx(ar, skb);
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ } else {
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ }
+
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
- init_waitqueue_head(&ar->wmi.wq);
-
- skb_queue_head_init(&ar->wmi.wmi_event_list);
- INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
- /* HTC should've drained the packets already */
- if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
- ath10k_warn("there are still pending packets\n");
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
- cancel_work_sync(&ar->wmi.wmi_event_work);
- skb_queue_purge(&ar->wmi.wmi_event_list);
+ ar->wmi.num_mem_chunks = 0;
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +2016,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -1234,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -1264,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
"wmi set channel mode %d freq %d\n",
arg->mode, arg->freq);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_channel_cmdid);
}
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
@@ -1279,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->suspend_opt = WMI_PDEV_SUSPEND;
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
@@ -1290,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
if (skb == NULL)
return -ENOMEM;
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
}
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
- u32 value)
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
{
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+ ath10k_warn("pdev param %d not supported by firmware\n", id);
+ return -EOPNOTSUPP;
+ }
+
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1309,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
id, value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
-int ath10k_wmi_cmd_init(struct ath10k *ar)
+static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
- u32 val;
+ u32 len, val;
+ int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
@@ -1370,23 +2196,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
- buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(len);
if (!buf)
return -ENOMEM;
cmd = (struct wmi_init_cmd *)buf->data;
- cmd->num_host_mem_chunks = 0;
+
+ if (ar->wmi.num_mem_chunks == 0) {
+ cmd->num_host_mem_chunks = 0;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ cmd->host_mem_chunks[i].ptr =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ cmd->host_mem_chunks[i].size =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ cmd->host_mem_chunks[i].req_id =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%x\n",
+ i,
+ cmd->host_mem_chunks[i].size,
+ cmd->host_mem_chunks[i].ptr);
+ }
+out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
- return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+ return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
-static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10x *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val;
+ int i;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(len);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+ if (ar->wmi.num_mem_chunks == 0) {
+ cmd->num_host_mem_chunks = 0;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ cmd->host_mem_chunks[i].ptr =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ cmd->host_mem_chunks[i].size =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ cmd->host_mem_chunks[i].req_id =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%x\n",
+ i,
+ cmd->host_mem_chunks[i].size,
+ cmd->host_mem_chunks[i].ptr);
+ }
+out:
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
+ return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ int ret;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ret = ath10k_wmi_10x_cmd_init(ar);
+ else
+ ret = ath10k_wmi_main_cmd_init(ar);
+
+ return ret;
+}
+
+static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
{
int len;
- len = sizeof(struct wmi_start_scan_cmd);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ len = sizeof(struct wmi_start_scan_cmd_10x);
+ else
+ len = sizeof(struct wmi_start_scan_cmd);
if (arg->ie_len) {
if (!arg->ie)
@@ -1446,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
int len = 0;
int i;
- len = ath10k_wmi_start_scan_calc_len(arg);
+ len = ath10k_wmi_start_scan_calc_len(ar, arg);
if (len < 0)
return len; /* len contains error code here */
@@ -1478,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
/* TLV list starts after fields included in the struct */
- off = sizeof(*cmd);
+ /* There's just one filed that differes the two start_scan
+ * structures - burst_duration, which we are not using btw,
+ no point to make the split here, just shift the buffer to fit with
+ given FW */
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ off = sizeof(struct wmi_start_scan_cmd_10x);
+ else
+ off = sizeof(struct wmi_start_scan_cmd);
if (arg->n_channels) {
channels = (void *)skb->data + off;
@@ -1540,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
- return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
}
void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -1556,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
arg->repeat_probe_time = 0;
arg->probe_spacing_time = 0;
arg->idle_time = 0;
- arg->max_scan_time = 5000;
+ arg->max_scan_time = 20000;
arg->probe_delay = 5;
arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
| WMI_SCAN_EVENT_COMPLETED
@@ -1600,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
arg->req_id, arg->req_type, arg->u.scan_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
}
int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
@@ -1625,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
"WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
vdev_id, type, subtype, macaddr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
}
int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
@@ -1643,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"WMI vdev delete id %d\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
}
static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg,
- enum wmi_cmd_id cmd_id)
+ u32 cmd_id)
{
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
- if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
- cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+ if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
+ cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
return -EINVAL;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
return -EINVAL;
@@ -1665,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
- if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+ if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
cmdname = "start";
- else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+ else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
cmdname = "restart";
else
return -EINVAL; /* should not happen, we already check cmd_id */
@@ -1718,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
int ath10k_wmi_vdev_start(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
- return ath10k_wmi_vdev_start_restart(ar, arg,
- WMI_VDEV_START_REQUEST_CMDID);
+ u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
+
+ return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
- return ath10k_wmi_vdev_start_restart(ar, arg,
- WMI_VDEV_RESTART_REQUEST_CMDID);
+ u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
+
+ return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
@@ -1743,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
}
int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
@@ -1758,13 +2728,13 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd = (struct wmi_vdev_up_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->vdev_assoc_id = __cpu_to_le32(aid);
- memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+ memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
}
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
@@ -1782,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev down id 0x%x\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
}
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_param param_id, u32 param_value)
+ u32 param_id, u32 param_value)
{
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "vdev param %d not supported by firmware\n",
+ param_id);
+ return -EOPNOTSUPP;
+ }
+
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1804,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
"wmi vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
}
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
@@ -1839,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
}
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -1859,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
}
int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
@@ -1879,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
}
int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
@@ -1900,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, tid_bitmap);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
}
int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
@@ -1918,13 +2896,13 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
- memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+ memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
}
int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
@@ -1945,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
"wmi set powersave id 0x%x mode %d\n",
vdev_id, psmode);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
}
int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
@@ -1967,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
}
int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
@@ -1993,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
"wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
vdev_id, param_id, value, mac);
- return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
}
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
@@ -2046,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->flags |= __cpu_to_le32(flags);
}
- return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
}
int ath10k_wmi_peer_assoc(struct ath10k *ar,
@@ -2105,10 +3086,11 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM\n",
arg->vdev_id, arg->addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg)
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
@@ -2124,7 +3106,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+ return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -2155,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
}
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
@@ -2171,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
cmd->stats_id = __cpu_to_le32(stats_id);
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
@@ -2190,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
type, delay_ms);
- return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c5a4f8daf2..78c991aec7f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -208,6 +208,118 @@ struct wmi_mac_addr {
(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
} while (0)
+struct wmi_cmd_map {
+ u32 init_cmdid;
+ u32 start_scan_cmdid;
+ u32 stop_scan_cmdid;
+ u32 scan_chan_list_cmdid;
+ u32 scan_sch_prio_tbl_cmdid;
+ u32 pdev_set_regdomain_cmdid;
+ u32 pdev_set_channel_cmdid;
+ u32 pdev_set_param_cmdid;
+ u32 pdev_pktlog_enable_cmdid;
+ u32 pdev_pktlog_disable_cmdid;
+ u32 pdev_set_wmm_params_cmdid;
+ u32 pdev_set_ht_cap_ie_cmdid;
+ u32 pdev_set_vht_cap_ie_cmdid;
+ u32 pdev_set_dscp_tid_map_cmdid;
+ u32 pdev_set_quiet_mode_cmdid;
+ u32 pdev_green_ap_ps_enable_cmdid;
+ u32 pdev_get_tpc_config_cmdid;
+ u32 pdev_set_base_macaddr_cmdid;
+ u32 vdev_create_cmdid;
+ u32 vdev_delete_cmdid;
+ u32 vdev_start_request_cmdid;
+ u32 vdev_restart_request_cmdid;
+ u32 vdev_up_cmdid;
+ u32 vdev_stop_cmdid;
+ u32 vdev_down_cmdid;
+ u32 vdev_set_param_cmdid;
+ u32 vdev_install_key_cmdid;
+ u32 peer_create_cmdid;
+ u32 peer_delete_cmdid;
+ u32 peer_flush_tids_cmdid;
+ u32 peer_set_param_cmdid;
+ u32 peer_assoc_cmdid;
+ u32 peer_add_wds_entry_cmdid;
+ u32 peer_remove_wds_entry_cmdid;
+ u32 peer_mcast_group_cmdid;
+ u32 bcn_tx_cmdid;
+ u32 pdev_send_bcn_cmdid;
+ u32 bcn_tmpl_cmdid;
+ u32 bcn_filter_rx_cmdid;
+ u32 prb_req_filter_rx_cmdid;
+ u32 mgmt_tx_cmdid;
+ u32 prb_tmpl_cmdid;
+ u32 addba_clear_resp_cmdid;
+ u32 addba_send_cmdid;
+ u32 addba_status_cmdid;
+ u32 delba_send_cmdid;
+ u32 addba_set_resp_cmdid;
+ u32 send_singleamsdu_cmdid;
+ u32 sta_powersave_mode_cmdid;
+ u32 sta_powersave_param_cmdid;
+ u32 sta_mimo_ps_mode_cmdid;
+ u32 pdev_dfs_enable_cmdid;
+ u32 pdev_dfs_disable_cmdid;
+ u32 roam_scan_mode;
+ u32 roam_scan_rssi_threshold;
+ u32 roam_scan_period;
+ u32 roam_scan_rssi_change_threshold;
+ u32 roam_ap_profile;
+ u32 ofl_scan_add_ap_profile;
+ u32 ofl_scan_remove_ap_profile;
+ u32 ofl_scan_period;
+ u32 p2p_dev_set_device_info;
+ u32 p2p_dev_set_discoverability;
+ u32 p2p_go_set_beacon_ie;
+ u32 p2p_go_set_probe_resp_ie;
+ u32 p2p_set_vendor_ie_data_cmdid;
+ u32 ap_ps_peer_param_cmdid;
+ u32 ap_ps_peer_uapsd_coex_cmdid;
+ u32 peer_rate_retry_sched_cmdid;
+ u32 wlan_profile_trigger_cmdid;
+ u32 wlan_profile_set_hist_intvl_cmdid;
+ u32 wlan_profile_get_profile_data_cmdid;
+ u32 wlan_profile_enable_profile_id_cmdid;
+ u32 wlan_profile_list_profile_id_cmdid;
+ u32 pdev_suspend_cmdid;
+ u32 pdev_resume_cmdid;
+ u32 add_bcn_filter_cmdid;
+ u32 rmv_bcn_filter_cmdid;
+ u32 wow_add_wake_pattern_cmdid;
+ u32 wow_del_wake_pattern_cmdid;
+ u32 wow_enable_disable_wake_event_cmdid;
+ u32 wow_enable_cmdid;
+ u32 wow_hostwakeup_from_sleep_cmdid;
+ u32 rtt_measreq_cmdid;
+ u32 rtt_tsf_cmdid;
+ u32 vdev_spectral_scan_configure_cmdid;
+ u32 vdev_spectral_scan_enable_cmdid;
+ u32 request_stats_cmdid;
+ u32 set_arp_ns_offload_cmdid;
+ u32 network_list_offload_config_cmdid;
+ u32 gtk_offload_cmdid;
+ u32 csa_offload_enable_cmdid;
+ u32 csa_offload_chanswitch_cmdid;
+ u32 chatter_set_mode_cmdid;
+ u32 peer_tid_addba_cmdid;
+ u32 peer_tid_delba_cmdid;
+ u32 sta_dtim_ps_method_cmdid;
+ u32 sta_uapsd_auto_trig_cmdid;
+ u32 sta_keepalive_cmd;
+ u32 echo_cmdid;
+ u32 pdev_utf_cmdid;
+ u32 dbglog_cfg_cmdid;
+ u32 pdev_qvit_cmdid;
+ u32 pdev_ftm_intg_cmdid;
+ u32 vdev_set_keepalive_cmdid;
+ u32 vdev_get_keepalive_cmdid;
+ u32 force_fw_hang_cmdid;
+ u32 gpio_config_cmdid;
+ u32 gpio_output_cmdid;
+};
+
/*
* wmi command groups.
*/
@@ -247,7 +359,9 @@ enum wmi_cmd_group {
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
-/* Command IDs and commande events. */
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
enum wmi_cmd_id {
WMI_INIT_CMDID = 0x1,
@@ -488,6 +602,217 @@ enum wmi_event_id {
WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
};
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+ WMI_10X_START_CMDID = 0x9000,
+ WMI_10X_END_CMDID = 0x9FFF,
+
+ /* initialize the wlan sub system */
+ WMI_10X_INIT_CMDID,
+
+ /* Scan specific commands */
+
+ WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+ WMI_10X_STOP_SCAN_CMDID,
+ WMI_10X_SCAN_CHAN_LIST_CMDID,
+ WMI_10X_ECHO_CMDID,
+
+ /* PDEV(physical device) specific commands */
+ WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ WMI_10X_PDEV_SET_PARAM_CMDID,
+ WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+ /* VDEV(virtual device) specific commands */
+ WMI_10X_VDEV_CREATE_CMDID,
+ WMI_10X_VDEV_DELETE_CMDID,
+ WMI_10X_VDEV_START_REQUEST_CMDID,
+ WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10X_VDEV_UP_CMDID,
+ WMI_10X_VDEV_STOP_CMDID,
+ WMI_10X_VDEV_DOWN_CMDID,
+ WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10X_VDEV_SET_PARAM_CMDID,
+ WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_10X_PEER_CREATE_CMDID,
+ WMI_10X_PEER_DELETE_CMDID,
+ WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ WMI_10X_PEER_SET_PARAM_CMDID,
+ WMI_10X_PEER_ASSOC_CMDID,
+ WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+
+ WMI_10X_BCN_TX_CMDID,
+ WMI_10X_BCN_PRB_TMPL_CMDID,
+ WMI_10X_BCN_FILTER_RX_CMDID,
+ WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10X_MGMT_TX_CMDID,
+
+ /* commands to directly control ba negotiation directly from host. */
+ WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10X_ADDBA_SEND_CMDID,
+ WMI_10X_ADDBA_STATUS_CMDID,
+ WMI_10X_DELBA_SEND_CMDID,
+ WMI_10X_ADDBA_SET_RESP_CMDID,
+ WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+ /* set debug log config */
+ WMI_10X_DBGLOG_CFG_CMDID,
+
+ /* DFS-specific commands */
+ WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+ /* QVIT specific command id */
+ WMI_10X_PDEV_QVIT_CMDID,
+
+ /* Offload Scan and Roaming related commands */
+ WMI_10X_ROAM_SCAN_MODE,
+ WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10X_ROAM_SCAN_PERIOD,
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10X_ROAM_AP_PROFILE,
+ WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10X_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10X_P2P_GO_SET_BEACON_IE,
+ WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+ /* AP power save specific config */
+ WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+ /* WLAN Profiling commands. */
+ WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_10X_PDEV_SUSPEND_CMDID,
+ WMI_10X_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_10X_ADD_BCN_FILTER_CMDID,
+ WMI_10X_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10X_WOW_ENABLE_CMDID,
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_10X_RTT_MEASREQ_CMDID,
+ WMI_10X_RTT_TSF_CMDID,
+
+ /* transmit beacon by value */
+ WMI_10X_PDEV_SEND_BCN_CMDID,
+
+ /* F/W stats */
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10X_REQUEST_STATS_CMDID,
+
+ /* GPIO Configuration */
+ WMI_10X_GPIO_CONFIG_CMDID,
+ WMI_10X_GPIO_OUTPUT_CMDID,
+
+ WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+ WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10X_READY_EVENTID,
+ WMI_10X_START_EVENTID = 0x9000,
+ WMI_10X_END_EVENTID = 0x9FFF,
+
+ /* Scan specific events */
+ WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+ WMI_10X_ECHO_EVENTID,
+ WMI_10X_DEBUG_MESG_EVENTID,
+ WMI_10X_UPDATE_STATS_EVENTID,
+
+ /* Instantaneous RSSI event */
+ WMI_10X_INST_RSSI_STATS_EVENTID,
+
+ /* VDEV specific events */
+ WMI_10X_VDEV_START_RESP_EVENTID,
+ WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10X_VDEV_RESUME_REQ_EVENTID,
+ WMI_10X_VDEV_STOPPED_EVENTID,
+
+ /* peer specific events */
+ WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+ /* beacon/mgmt specific events */
+ WMI_10X_HOST_SWBA_EVENTID,
+ WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10X_MGMT_RX_EVENTID,
+
+ /* Channel stats event */
+ WMI_10X_CHAN_INFO_EVENTID,
+
+ /* PHY Error specific WMI event */
+ WMI_10X_PHYERR_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_10X_ROAM_EVENTID,
+
+ /* matching AP found from list of profiles */
+ WMI_10X_PROFILE_MATCH,
+
+ /* debug print message used for tracing FW code while debugging */
+ WMI_10X_DEBUG_PRINT_EVENTID,
+ /* VI spoecific event */
+ WMI_10X_PDEV_QVIT_EVENTID,
+ /* FW code profile data in response to profile request */
+ WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+ /*RTT related event ID*/
+ WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+ WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+ /* TPC config for the current operating channel */
+ WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+ WMI_10X_GPIO_INPUT_EVENTID,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
@@ -508,6 +833,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled */
+ };
+
+ return "<unknown>";
+}
+
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
@@ -763,13 +1130,45 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
-/*
- * status consists of upper 16 bits fo int status and lower 16 bits of
- * module ID that retuned status
- */
-#define WLAN_INIT_STATUS_SUCCESS 0x0
-#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
-#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+/* This is the definition from 10.X firmware branch */
+struct wmi_service_ready_event_10x {
+ __le32 sw_version;
+ __le32 abi_version;
+
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+ __le32 num_rf_chains;
+
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+
+ struct hal_reg_capabilities hal_reg_capabilities;
+
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+
+ struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@@ -978,6 +1377,192 @@ struct wmi_resource_config {
__le32 max_frag_entries;
} __packed;
+struct wmi_resource_config_10x {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum scan requests than can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overriden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+
+#define NUM_UNITS_IS_NUM_VDEVS 0x1
+#define NUM_UNITS_IS_NUM_PEERS 0x2
+
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
@@ -999,6 +1584,18 @@ struct wmi_init_cmd {
struct host_memory_chunk host_mem_chunks[1];
} __packed;
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+ struct wmi_resource_config_10x resource_config;
+ __le32 num_host_mem_chunks;
+
+ /*
+ * variable number of host memory chunks.
+ * This should be the last element in the structure
+ */
+ struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
/* TLV for channel list */
struct wmi_chan_list {
__le32 tag; /* WMI_CHAN_LIST_TAG */
@@ -1118,6 +1715,88 @@ struct wmi_start_scan_cmd {
*/
} __packed;
+/* This is the definition from 10.X firmware branch */
+struct wmi_start_scan_cmd_10x {
+ /* Scan ID */
+ __le32 scan_id;
+
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+
+ /*
+ * min time in msec on the BSS channel,only valid if atleast one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+
+ /* time in msec between 2 consequetive probe requests with in a set. */
+ __le32 probe_spacing_time;
+
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+
+ /*
+ * TLV (tag length value ) paramerters follow the scan_cmd structure.
+ * TLV can contain channel list, bssid list, ssid list and
+ * ie. the TLV tags are defined above;
+ */
+} __packed;
+
+
struct wmi_ssid_arg {
int len;
const u8 *ssid;
@@ -1268,7 +1947,7 @@ struct wmi_scan_event {
* good idea to pass all the fields in the RX status
* descriptor up to the host.
*/
-struct wmi_mgmt_rx_hdr {
+struct wmi_mgmt_rx_hdr_v1 {
__le32 channel;
__le32 snr;
__le32 rate;
@@ -1277,8 +1956,18 @@ struct wmi_mgmt_rx_hdr {
__le32 status; /* %WMI_RX_STATUS_ */
} __packed;
-struct wmi_mgmt_rx_event {
- struct wmi_mgmt_rx_hdr hdr;
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
u8 buf[0];
} __packed;
@@ -1465,6 +2154,60 @@ struct wmi_csa_event {
#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+struct wmi_pdev_param_map {
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 txpower_limit2g;
+ u32 txpower_limit5g;
+ u32 txpower_scale;
+ u32 beacon_gen_mode;
+ u32 beacon_tx_mode;
+ u32 resmgr_offchan_mode;
+ u32 protection_mode;
+ u32 dynamic_bw;
+ u32 non_agg_sw_retry_th;
+ u32 agg_sw_retry_th;
+ u32 sta_kickout_th;
+ u32 ac_aggrsize_scaling;
+ u32 ltr_enable;
+ u32 ltr_ac_latency_be;
+ u32 ltr_ac_latency_bk;
+ u32 ltr_ac_latency_vi;
+ u32 ltr_ac_latency_vo;
+ u32 ltr_ac_latency_timeout;
+ u32 ltr_sleep_override;
+ u32 ltr_rx_override;
+ u32 ltr_tx_activity_timeout;
+ u32 l1ss_enable;
+ u32 dsleep_enable;
+ u32 pcielp_txbuf_flush;
+ u32 pcielp_txbuf_watermark;
+ u32 pcielp_txbuf_tmo_en;
+ u32 pcielp_txbuf_tmo_value;
+ u32 pdev_stats_update_period;
+ u32 vdev_stats_update_period;
+ u32 peer_stats_update_period;
+ u32 bcnflt_stats_update_period;
+ u32 pmf_qos;
+ u32 arp_ac_override;
+ u32 arpdhcp_ac_override;
+ u32 dcs;
+ u32 ani_enable;
+ u32 ani_poll_period;
+ u32 ani_listen_period;
+ u32 ani_ofdm_level;
+ u32 ani_cck_level;
+ u32 dyntxchain;
+ u32 proxy_sta;
+ u32 idle_ps_config;
+ u32 power_gating_sleep;
+ u32 fast_channel_reset;
+ u32 burst_dur;
+ u32 burst_enable;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
enum wmi_pdev_param {
/* TX chian mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
@@ -1564,6 +2307,97 @@ enum wmi_pdev_param {
WMI_PDEV_PARAM_POWER_GATING_SLEEP,
};
+enum wmi_10x_pdev_param {
+ /* TX chian mask */
+ WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ /* pdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_10X_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP and DHCP frames are sent */
+ WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_10X_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable Fast channel reset*/
+ WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ /* Set Bursting DUR */
+ WMI_10X_PDEV_PARAM_BURST_DUR,
+ /* Set Bursting Enable*/
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
+
struct wmi_pdev_set_param_cmd {
__le32 param_id;
__le32 param_value;
@@ -2088,6 +2922,61 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
+struct wmi_vdev_param_map {
+ u32 rts_threshold;
+ u32 fragmentation_threshold;
+ u32 beacon_interval;
+ u32 listen_interval;
+ u32 multicast_rate;
+ u32 mgmt_tx_rate;
+ u32 slot_time;
+ u32 preamble;
+ u32 swba_time;
+ u32 wmi_vdev_stats_update_period;
+ u32 wmi_vdev_pwrsave_ageout_time;
+ u32 wmi_vdev_host_swba_interval;
+ u32 dtim_period;
+ u32 wmi_vdev_oc_scheduler_air_time_limit;
+ u32 wds;
+ u32 atim_window;
+ u32 bmiss_count_max;
+ u32 bmiss_first_bcnt;
+ u32 bmiss_final_bcnt;
+ u32 feature_wmm;
+ u32 chwidth;
+ u32 chextoffset;
+ u32 disable_htprotection;
+ u32 sta_quickkickout;
+ u32 mgmt_rate;
+ u32 protection_mode;
+ u32 fixed_rate;
+ u32 sgi;
+ u32 ldpc;
+ u32 tx_stbc;
+ u32 rx_stbc;
+ u32 intra_bss_fwd;
+ u32 def_keyid;
+ u32 nss;
+ u32 bcast_data_rate;
+ u32 mcast_data_rate;
+ u32 mcast_indicate;
+ u32 dhcp_indicate;
+ u32 unknown_dest_indicate;
+ u32 ap_keepalive_min_idle_inactive_time_secs;
+ u32 ap_keepalive_max_idle_inactive_time_secs;
+ u32 ap_keepalive_max_unresponsive_time_secs;
+ u32 ap_enable_nawds;
+ u32 mcast2ucast_set;
+ u32 enable_rtscts;
+ u32 txbf;
+ u32 packet_powersave;
+ u32 drop_unencry;
+ u32 tx_encap_type;
+ u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
/* the definition of different VDEV parameters */
enum wmi_vdev_param {
/* RTS Threshold */
@@ -2219,6 +3108,121 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_TX_ENCAP_TYPE,
};
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+ /* RTS Threshold */
+ WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ /* muticast rate in Mbps */
+ WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_10X_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_10X_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_10X_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_10X_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* WMM enables/disabled */
+ WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_10X_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_10X_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_10X_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_10X_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_10X_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_10X_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_10X_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_10X_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_10X_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+ WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ /* Enable/Disable RTS-CTS */
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
@@ -3000,7 +4004,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
-#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
@@ -3013,7 +4016,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3022,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
- u32 value);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
@@ -3043,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
const u8 *bssid);
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_param param_id, u32 param_value);
+ u32 param_id, u32 param_value);
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg);
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -3066,11 +4067,13 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e9bc9e616b6..79bffe165ca 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
-
-
- bcfg = pdev->dev.platform_data;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int ret = 0;
u32 reg;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
static int ath_ahb_remove(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 48161edec8d..69f58b073e8 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
ah->stats.tx_bytes_count += skb->len;
info = IEEE80211_SKB_CB(skb);
+ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+ memcpy(info->status.rates, bf->rates, size);
+
tries[0] = info->status.rates[0].count;
tries[1] = info->status.rates[1].count;
tries[2] = info->status.rates[2].count;
ieee80211_tx_info_clear_status(info);
- size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
- memcpy(info->status.rates, bf->rates, size);
-
for (i = 0; i < ts->ts_final_idx; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index ce86f158423..ba200b24be6 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -661,7 +661,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
- /* Currently this is not much usefull since we treat
+ /* Currently this is not much useful since we treat
* all queues the same way if we get a TXURN (update
* tx trigger level) but we might need it later on*/
if (pisr & AR5K_ISR_TXURN)
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 98a886154d9..05debf700a8 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,8 +22,7 @@
#define ATH6KL_MAX_IE 256
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 74369de00fb..ca9ba005f28 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index a2c8ff80979..14cab1403dd 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -60,7 +60,7 @@
/* disable credit flow control on a specific service */
#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
-#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
/* connect response status codes */
#define HTC_SERVICE_SUCCESS 0
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7944c25c9a4..32f139e2e89 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,6 +84,26 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
+config ATH9K_TX99
+ bool "Atheros ath9k TX99 testing support"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems undergoing
+ certification testing and evaluation in a controlled environment.
+ Enabling this will only enable TX99 support, all other modes of
+ operation will be disabled.
+
+ TX99 support enables Specific Absorption Rate (SAR) testing.
+ SAR is the unit of measurement for the amount of radio frequency(RF)
+ absorbed by the body when using a wireless device. The RF exposure
+ limits used are expressed in the terms of SAR, which is a measure
+ of the electric and magnetic field strength and power density for
+ transmitters operating at frequencies from 300 kHz to 100 GHz.
+ Regulatory bodies around the world require that wireless device
+ be evaluated to meet the RF exposure limits set forth in the
+ governmental SAR regulations.
+
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 75ee9e7704c..6205ef5a932 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -14,9 +14,7 @@ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
- dfs.o \
- dfs_pattern_detector.o \
- dfs_pri_detector.o
+ dfs.o
ath9k-$(CONFIG_PM_SLEEP) += wow.o
obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 072e4b53106..2dff2765769 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
- pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
struct ath_hw *ah;
char hw_name[64];
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index be466b0ef7a..d28923b7435 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -338,10 +338,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
ath_dbg(common, ANI,
- "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
* restore historical levels for this channel
*/
ath_dbg(common, ANI,
- "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index dd1cc73d794..bd048cc69a3 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+ div_ant_conf->lna1_lna2_switch_delta)
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
else
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x39;
+ ant_conf->fast_div_bias = 0x3f;
break;
case 0x13: /* LNA2 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x4;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x6;
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x23: /* LNA1 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x6;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_sub = alt_rssi_avg;
antcomb->scan = false;
if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
/* use LNA2 as main LNA */
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
(antcomb->rssi_add > antcomb->rssi_sub)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 08656473c63..ff415e863ee 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+ val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
-
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
@@ -667,14 +666,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_FC_DYN2040_EN;
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
REG_WRITE(ah, AR_PHY_TURBO, phymode);
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
ENABLE_REGWRITE_BUFFER(ah);
@@ -692,31 +690,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
int i, regWrites = 0;
u32 modesIndex, freqIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
+ if (IS_CHAN_5GHZ(chan)) {
freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ } else {
freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
}
/*
@@ -815,8 +794,10 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (!AR_SREV_9280_20_OR_LATER(ah))
rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 9f589744a9f..cdc74005650 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -33,15 +33,12 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
bool supported = false;
switch (ah->supp_cals & cal_type) {
case IQ_MISMATCH_CAL:
- /* Run IQ Mismatch for non-CCK only */
- if (!IS_CHAN_B(chan))
- supported = true;
+ supported = true;
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
- if (!IS_CHAN_B(chan) &&
- !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
IS_CHAN_HT20(chan)))
supported = true;
break;
@@ -671,7 +668,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
- nfcal_pending = ah->caldata->nfcal_pending;
+ nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (currCal && !nfcal &&
(currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index fb61b081d17..5c95fd9e9c9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -419,28 +419,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
u32 modesIndex;
int i;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
ENABLE_REGWRITE_BUFFER(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 1fc1fa955d4..f087117b2e6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
if (IS_CHAN_HT40(ah->curchan))
nfarray[3] = sign_extend32(nf, 8);
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ if (!(ah->rxchainmask & BIT(1)))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
AR_PHY_9285_FAST_DIV_BIAS_S;
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -679,6 +680,26 @@ static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
}
}
+static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9002_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -700,6 +721,8 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
#endif
+ ops->tx99_start = ar9002_hw_tx99_start;
+ ops->tx99_stop = ar9002_hw_tx99_stop;
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6988e1d081f..22934d3ca54 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
- if (caldata)
- caldata->done_txiqcal_once = is_reusable;
+ if (caldata) {
+ if (is_reusable)
+ set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ else
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ }
return;
}
@@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
}
static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+ return;
+
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
}
+
+ if (caldata)
+ set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+ if (IS_CHAN_2GHZ(chan)){
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ } else {
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ }
+ }
}
static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
- if (caldata->done_txclcal_once) {
+ if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
continue;
@@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
caldata->tx_clcal[i][j] =
REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
}
- caldata->done_txclcal_once = true;
+ set_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
}
@@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
ar9003_hw_rtt_clear_hist(ah);
}
- if (rtt && !run_rtt_cal) {
- agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
- agc_supp_cals &= agc_ctrl;
- agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
- AR_PHY_AGC_CONTROL_FLTR_CAL |
- AR_PHY_AGC_CONTROL_PKDET_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ if (rtt) {
+ if (!run_rtt_cal) {
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_supp_cals &= agc_ctrl;
+ agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ } else {
+ if (ah->ah_flags & AH_FASTCC)
+ run_agc_cal = true;
+ }
}
if (ah->enabled_cals & TX_CL_CAL) {
- if (caldata && caldata->done_txclcal_once)
+ if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
else {
@@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
* AGC calibration
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- if (caldata && !caldata->done_txiqcal_once)
+ if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- } else if (caldata && !caldata->done_txiqcal_once) {
+ } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
run_agc_cal = true;
sep_iq_cal = true;
}
@@ -1099,6 +1135,15 @@ skip_tx_iqcal:
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+ /* Disable BB_active */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- ar9003_hw_do_manual_peak_cal(ah, chan);
+ ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+ }
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+ udelay(5);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@ skip_tx_iqcal:
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
- else if (caldata && caldata->done_txiqcal_once)
+ else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
- if (!ath9k_hw_rfbus_req(ah))
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah),
"Could not stop baseband\n");
- else
+ } else {
ar9003_hw_rtt_fill_hist(ah);
+ if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+ ar9003_hw_rtt_load_hist(ah);
+ }
+
ath9k_hw_rfbus_done(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f4864807e15..1ec52356b5a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
- return eep->base_ext1.ant_div_control;
+ if (AR_SREV_9565(ah))
+ return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+ else
+ return eep->base_ext1.ant_div_control;
case EEP_ANTENNA_GAIN_5G:
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct ar9300_base_eep_hdr *pBase;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
@@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- ah->eeprom.ar9300_eep.macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ ah->eeprom.ar9300_eep.macAddr);
out:
if (len > size)
len = size;
@@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9565(ah)) {
if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+ /* Force WLAN LNA diversity ON */
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+ /* Force WLAN LNA diversity OFF */
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
}
}
@@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
- if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ && common->bt_ant_diversity)
regval |= AR_FAST_DIV_ENABLE;
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 75d4fb41962..0e5daa58a4f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -52,6 +52,8 @@
#define AR9300_PAPRD_SCALE_2 0x70000000
#define AR9300_PAPRD_SCALE_2_S 28
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
* schemes. In open loop power control, it is not really needed, but for
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 608bb4824e2..20e49095db2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -187,17 +187,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
- /* Load PCIE SERDES settings from INI */
-
- /* Awake Setting */
-
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9485_1_1_pcie_phy_clkreq_disable_L1);
-
- /* Sleep Setting */
-
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ if (ah->config.no_pll_pwrsave) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+ }
} else if (AR_SREV_9462_21(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar9462_2p1_mac_core);
@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8dd069259e7..7b94a6c7db3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_done = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e897648d323..d39b79f5e84 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -551,8 +551,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_GC_DYN2040_EN;
/* Configure control (primary) channel at +-10MHz */
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_GC_DYN2040_PRI_CH;
}
@@ -565,7 +564,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
/* Configure MAC for 20/40 operation */
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
/* global transmit timeout (25 TUs default)*/
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
* MAC addr only will fail.
*/
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
- REG_WRITE(ah, AR_PCU_MISC_MODE2,
- val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
-
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ val |= AR_AGG_WEP_ENABLE_FIX |
+ AR_AGG_WEP_ENABLE |
+ AR_PCU_MISC_MODE2_CFP_IGNORE;
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,43 +681,72 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
{
int ret;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- if (chan->channel <= 5350)
- ret = 1;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 3;
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 7;
else
- ret = 5;
- break;
+ return 8;
+ }
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- if (chan->channel <= 5350)
- ret = 2;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 4;
- else
- ret = 6;
- break;
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- ret = 8;
- break;
+ if (IS_CHAN_HT40(chan))
+ ret++;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- ret = 7;
- break;
+ return ret;
+}
- default:
- ret = -EINVAL;
+static void ar9003_doubler_fix(struct ath_hw *ah)
+{
+ if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) {
+ REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
+ REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
+ REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
+
+ udelay(200);
+
+ REG_CLR_BIT(ah, AR_PHY_65NM_CH0_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
+ REG_CLR_BIT(ah, AR_PHY_65NM_CH1_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
+ REG_CLR_BIT(ah, AR_PHY_65NM_CH2_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
+
+ udelay(1);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
+
+ udelay(200);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH12,
+ AR_PHY_65NM_CH0_SYNTH12_VREFMUL3, 0xf);
+
+ REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, 0,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
+ REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, 0,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
+ REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, 0,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
}
-
- return ret;
}
static int ar9003_hw_process_ini(struct ath_hw *ah,
@@ -728,28 +755,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
unsigned int regWrites = 0, i;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
/*
* SOC, MAC, BB, RADIO initvals.
@@ -765,6 +774,8 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
modesIndex);
}
+ ar9003_doubler_fix(ah);
+
/*
* RXGAIN initvals.
*/
@@ -847,8 +858,10 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1287,11 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1387,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 1;
} else if (AR_SREV_9485(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
} else if (AR_SREV_9565(ah)) {
- antconf->lna1_lna2_delta = -3;
+ antconf->lna1_lna2_switch_delta = 3;
+ antconf->lna1_lna2_delta = -9;
antconf->div_group = 3;
} else {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -1489,17 +1505,24 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
} else if (AR_SREV_9565(ah)) {
if (enable) {
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
- REG_SET_BIT(ah, AR_PHY_RESTART,
- AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
@@ -1526,28 +1549,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
unsigned int regWrites = 0;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
if (modesIndex == ah->modes_index) {
*ini_reloaded = false;
@@ -1662,6 +1667,98 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
}
}
+static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9003_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
+{
+ static s16 p_pwr_array[ar9300RateSize] = { 0 };
+ unsigned int i;
+
+ if (txpower <= MAX_RATE_POWER) {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = txpower;
+ } else {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = MAX_RATE_POWER;
+ }
+
+ REG_WRITE(ah, 0xa458, 0);
+
+ REG_WRITE(ah, 0xa3c0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3cc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3d0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
+ REG_WRITE(ah, 0xa3d4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
+ REG_WRITE(ah, 0xa3e4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
+ REG_WRITE(ah, 0xa3e8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
+ REG_WRITE(ah, 0xa3d8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
+ REG_WRITE(ah, 0xa3dc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
+ REG_WRITE(ah, 0xa3ec,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1701,6 +1798,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
#endif
+ ops->tx99_start = ar9003_hw_tx99_start;
+ ops->tx99_stop = ar9003_hw_tx99_stop;
+ ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6fd752321e3..2af667beb27 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -343,8 +343,12 @@
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
@@ -652,13 +656,24 @@
#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
#define AR_PHY_65NM_CH0_SYNTH7 0x16098
+#define AR_PHY_65NM_CH0_SYNTH12 0x160ac
#define AR_PHY_65NM_CH0_BIAS1 0x160c0
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
#define AR_PHY_65NM_CH0_BIAS4 0x160cc
+#define AR_PHY_65NM_CH0_RXTX2 0x16104
+#define AR_PHY_65NM_CH1_RXTX2 0x16504
+#define AR_PHY_65NM_CH2_RXTX2 0x16904
#define AR_PHY_65NM_CH0_RXTX4 0x1610c
#define AR_PHY_65NM_CH1_RXTX4 0x1650c
#define AR_PHY_65NM_CH2_RXTX4 0x1690c
+#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000
+#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S 2
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3
+
#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
(((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 74de3539c2c..934418872e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
}
}
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+ int agc, caldac;
+
+ if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ return;
+
+ if ((index != 5) || (chain >= 2))
+ return;
+
+ agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+ if (!agc)
+ return;
+
+ caldac = ah->caldata->caldac[chain];
+ ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+ caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+ ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
{
u32 val;
@@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ah->caldata->rtt_table[chain][i] =
ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+ ar9003_hw_patch_rtt(ah, i, chain);
+
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"RTT value at idx %d, chain %d is: 0x%x\n",
i, chain, ah->caldata->rtt_table[chain][i]);
}
}
- ah->caldata->rtt_done = true;
+ set_bit(RTT_DONE, &ah->caldata->cal_flags);
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
}
if (ah->caldata)
- ah->caldata->rtt_done = false;
+ clear_bit(RTT_DONE, &ah->caldata->cal_flags);
}
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ah->caldata)
return false;
- if (!ah->caldata->rtt_done)
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+ if (IS_CHAN_2GHZ(chan)){
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ }
+
+ if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
return false;
ar9003_hw_rtt_enable(ah);
- ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ ar9003_hw_rtt_set_mask(ah, 0x30);
+ else
+ ar9003_hw_rtt_set_mask(ah, 0x10);
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index 4dbc294df7e..57fc5f459d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -361,7 +361,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -400,7 +400,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
@@ -472,7 +472,7 @@ static const u32 ar9462_2p1_radio_postamble[][5] = {
static const u32 ar9462_2p1_soc_preamble[][2] = {
/* Addr allmodes */
- {0x000040a4, 0x00a0c1c9},
+ {0x000040a4, 0x00a0c9c9},
{0x00007020, 0x00000000},
{0x00007034, 0x00000002},
{0x00007038, 0x000004c2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 88ff1d7b53a..7c1845221e1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,13 +20,16 @@
/* AR9485 1.1 */
-#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
-
-static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18012e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
};
static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
@@ -34,6 +37,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
{0x00009e00, 0x037216a0},
{0x00009e04, 0x00182020},
{0x00009e18, 0x00000000},
+ {0x00009e20, 0x000003a8},
{0x00009e2c, 0x00004121},
{0x00009e44, 0x02282324},
{0x0000a000, 0x00060005},
@@ -174,7 +178,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +204,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +267,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +306,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,6 +366,100 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -427,7 +546,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
@@ -521,12 +640,15 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
-
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
{0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +665,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
{0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
{0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
{0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+ {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +961,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x00009e00, 0x03721b20},
{0x00009e04, 0x00082020},
{0x00009e18, 0x0300501e},
+ {0x00009e20, 0x000003ba},
{0x00009e2c, 0x00002e21},
{0x00009e44, 0x02182324},
{0x0000a000, 0x00060005},
@@ -955,20 +1094,6 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x0000a1fc, 0x00000296},
};
-static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18052e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
-static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18053e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
static const u32 ar9485_1_1_soc_preamble[][2] = {
/* Addr allmodes */
{0x00004014, 0xba280400},
@@ -1001,7 +1126,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1144,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1028,13 +1152,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18013e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
static const u32 ar9485_1_1_radio_postamble[][2] = {
/* Addr allmodes */
{0x0001609c, 0x0b283f31},
@@ -1206,6 +1323,25 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x1801265e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index e85a8b076c2..a8c757b6124 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0xaaaaaa6e},
+ {0x0000a3ac, 0x3c466455},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a404, 0x00000000},
{0x0000a408, 0x0e79e5c6},
{0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
+ {0x0000a414, 0x1ce739c5},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
+ {0x0000a41c, 0x1ce739c5},
{0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
+ {0x0000a424, 0x1ce739c5},
{0x0000a428, 0x000001ce},
{0x0000a42c, 0x1ce739ce},
{0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
/* Addr allmodes */
{0x00004050, 0x00300300},
{0x0000406c, 0x00100000},
+ {0x00009e20, 0x000003b6},
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
+static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2ee35f677c0..60a5da53668 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -64,7 +64,6 @@ struct ath_node;
struct ath_config {
u16 txpowlimit;
- u8 cabqReadytime;
};
/*************************/
@@ -207,6 +206,14 @@ struct ath_frame_info {
u8 baw_tracked : 1;
};
+struct ath_rxbuf {
+ struct list_head list;
+ struct sk_buff *bf_mpdu;
+ void *bf_desc;
+ dma_addr_t bf_daddr;
+ dma_addr_t bf_buf_addr;
+};
+
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -307,7 +314,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
- struct ath_buf *buf_hold;
+ struct ath_rxbuf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@@ -459,8 +466,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
#define ATH_DUMP_BTCOEX(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, \
- "%20s : %10d\n", _s, (_val)); \
+ len += scnprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
} while (0)
enum bt_op_flags {
@@ -581,7 +588,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
-#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
/* Main driver core */
/********************/
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW 0x0008
-#define ATH9K_PCI_BT_ANT_DIV 0x0010
-#define ATH9K_PCI_D3_L1_WAR 0x0020
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_CUS252 0x0008
+#define ATH9K_PCI_WOW 0x0010
+#define ATH9K_PCI_BT_ANT_DIV 0x0020
+#define ATH9K_PCI_D3_L1_WAR 0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
+#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
/*
* Default cache line size, in bytes.
@@ -769,6 +779,11 @@ struct ath_softc {
enum spectral_mode spectral_mode;
struct ath_spec_scan spec_config;
+ struct ieee80211_vif *tx99_vif;
+ struct sk_buff *tx99_skb;
+ bool tx99_state;
+ s16 tx99_power;
+
#ifdef CONFIG_PM_SLEEP
atomic_t wow_got_bmiss_intr;
atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
@@ -877,6 +892,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
*/
enum ath_fft_sample_type {
ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
};
struct fft_sample_tlv {
@@ -903,6 +919,39 @@ struct fft_sample_ht20 {
u8 data[SPECTRAL_HT20_NUM_BINS];
} __packed;
+struct fft_sample_ht20_40 {
+ struct fft_sample_tlv tlv;
+
+ u8 channel_type;
+ __be16 freq;
+
+ s8 lower_rssi;
+ s8 upper_rssi;
+
+ __be64 tsf;
+
+ s8 lower_noise;
+ s8 upper_noise;
+
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+
+ u8 lower_max_index;
+ u8 upper_max_index;
+
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+
+ u8 max_exp;
+
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+int ath9k_tx99_init(struct ath_softc *sc);
+void ath9k_tx99_deinit(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+
void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
@@ -924,7 +973,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-bool ath9k_uses_beacons(int type);
void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
enum spectral_mode spectral_mode);
@@ -952,7 +1000,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ath9k_vif_iter_data *iter_data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b5c16b3a37b..17be35392bb 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
sc->beacon.bmisscnt++;
+ ath9k_hw_check_nav(ah);
+
if (!ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 5e8219a91e2..278365b8a89 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -63,13 +63,13 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf)
{
s8 noise = ATH_DEFAULT_NOISE_FLOOR;
- if (chan && chan->noisefloor) {
- s8 delta = chan->noisefloor -
- ATH9K_NF_CAL_NOISE_THRESH -
+ if (nf) {
+ s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
ath9k_hw_get_default_nf(ah, chan);
if (delta > 0)
noise += delta;
@@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
- (cal->nfcal_interference ?
+ (test_bit(NFCAL_INTF, &cal->cal_flags) ?
"not corrected (due to interference)" :
"correcting to MAX"));
@@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* we bypass this limit here in order to better deal
* with our environment.
*/
- if (!cal->nfcal_interference)
+ if (!test_bit(NFCAL_INTF, &cal->cal_flags))
h[i].privNF = limit->max;
}
}
@@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
- cal->nfcal_interference = false;
+ clear_bit(NFCAL_INTF, &cal->cal_flags);
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->caldata)
@@ -208,7 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, conf->chandef.chan->center_freq);
+ currCal->calData->calType, ah->curchan->chan->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
if (ah->caldata)
@@ -252,7 +250,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (chainmask & (1 << i)) {
s16 nfval;
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
if (h)
@@ -314,7 +312,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,10 +389,10 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
h = caldata->nfCalHist;
- caldata->nfcal_pending = false;
+ clear_bit(NFCAL_PENDING, &caldata->cal_flags);
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
return true;
}
EXPORT_SYMBOL(ath9k_hw_getnf);
@@ -408,7 +406,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags;
- ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
- if (!caldata->nfcal_pending)
+ if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
- caldata->nfcal_interference = true;
+ set_bit(NFCAL_INTF, &caldata->cal_flags);
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3d70b8c2bcd..b8ed95e9a33 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -116,7 +116,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
void ath9k_hw_reset_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf);
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index d3063c21e16..a7e5a05b2ef 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,103 +49,64 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
-static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
-{
- u32 chanmode = 0;
-
- switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_G_HT40PLUS;
- else
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_A_HT40PLUS;
- else
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
/*
* Update internal channel flags.
*/
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef)
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct cfg80211_chan_def *chandef)
{
- ichan->channel = chandef->chan->center_freq;
- ichan->chan = chandef->chan;
-
- if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
+ struct ieee80211_channel *chan = chandef->chan;
+ u16 flags = 0;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ flags |= CHANNEL_5GHZ;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_5:
- ichan->channelFlags |= CHANNEL_QUARTER;
+ flags |= CHANNEL_QUARTER;
break;
case NL80211_CHAN_WIDTH_10:
- ichan->channelFlags |= CHANNEL_HALF;
+ flags |= CHANNEL_HALF;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
+ flags |= CHANNEL_HT;
+ break;
case NL80211_CHAN_WIDTH_40:
- ichan->chanmode = ath9k_get_extchanmode(chandef);
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+ else
+ flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
break;
default:
WARN_ON(1);
}
+
+ ichan->channelFlags = flags;
}
-EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
/*
* Get the internal channel reference.
*/
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah)
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef)
{
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
u8 chan_idx;
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
+ ath9k_cmn_update_ichannel(channel, chandef);
return channel;
}
-EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
int ath9k_cmn_count_streams(unsigned int chainmask, int max)
{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e039bcbfbd7..eb85e1bdca8 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -43,10 +43,9 @@
(((x) + ((mul)/2)) / (mul))
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef);
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c088744a6bf..83a2c59f680 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
return -ENOMEM;
if (common->disable_ani) {
- len += snprintf(buf + len, size - len, "%s: %s\n",
- "ANI", "DISABLED");
+ len += scnprintf(buf + len, size - len, "%s: %s\n",
+ "ANI", "DISABLED");
goto exit;
}
- len += snprintf(buf + len, size - len, "%15s: %s\n",
- "ANI", "ENABLED");
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "ANI RESET", ah->stats.ast_ani_reset);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR UP", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR DOWN", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK ON", ah->stats.ast_ani_ccklow);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP UP", ah->stats.ast_ani_stepup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %s\n",
+ "ANI", "ENABLED");
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "ANI RESET", ah->stats.ast_ani_reset);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR UP", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR DOWN", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP UP", ah->stats.ast_ani_stepup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "CCK ERRORS", ah->stats.ast_ani_cckerrs);
exit:
if (len > size)
len = size;
@@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
return -ENOMEM;
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
- len += snprintf(buf + len, size - len, "%s\n",
- "Antenna Diversity Combining is disabled");
+ len += scnprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
goto exit;
}
ath9k_ps_wakeup(sc);
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
- len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
- lna_conf_str[div_ant_conf.main_lna_conf]);
- len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
- lna_conf_str[div_ant_conf.alt_lna_conf]);
- len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
- as_main->rssi_avg);
- len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
- as_alt->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
- len += snprintf(buf + len, size - len, "-------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "TOTAL COUNT",
- as_main->recv_cnt,
- as_alt->recv_cnt);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
-
- len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
- len += snprintf(buf + len, size - len, "--------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += scnprintf(buf + len, size - len, "-------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += scnprintf(buf + len, size - len, "--------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
exit:
if (len > size)
@@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Raw DMA Debug values:\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
- i, val[i]);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
+ i, val[i]);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
@@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "%2d %2x %1x %2x %2x\n",
- i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
- (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
- val[2] & (0x7 << (i * 3)) >> (i * 3),
- (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+ val[2] & (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
- REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
+ REG_READ_D(ah, AR_OBS_BUS_1));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
#define PR_IS(a, s) \
do { \
- len += snprintf(buf + len, mxlen - len, \
- "%21s: %10u\n", a, \
- sc->debug.stats.istats.s); \
+ len += scnprintf(buf + len, mxlen - len, \
+ "%21s: %10u\n", a, \
+ sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
- len += snprintf(buf + len, mxlen - len,
- "SYNC_CAUSE stats:\n");
+ len += scnprintf(buf + len, mxlen - len,
+ "SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += snprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += snprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
+ len += scnprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += scnprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += scnprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
ath_txq_unlock(sc, txq);
return len;
@@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
+ len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
len += print_queue(sc, txq, buf + len, size - len);
}
- len += snprintf(buf + len, size - len, "(CAB): ");
+ len += scnprintf(buf + len, size - len, "(CAB): ");
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
if (len > size)
@@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int reg;
u32 rxfilter;
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID: %pM\n", common->curbssid);
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID-MASK: %pM\n", common->bssidmask);
- len += snprintf(buf + len, sizeof(buf) - len,
- "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID: %pM\n", common->curbssid);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID-MASK: %pM\n", common->bssidmask);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "OPMODE: %s\n",
+ ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, sizeof(buf) - len,
- "RXFILTER: 0x%x", rxfilter);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
- len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
- len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
- len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
+ len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
- len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
- len += snprintf(buf + len, sizeof(buf) - len, " CST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
- len += snprintf(buf + len, sizeof(buf) - len, " RX");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
- len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
ath9k_calculate_iter_data(hw, NULL, &iter_data);
- len += snprintf(buf + len, sizeof(buf) - len,
- "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
- " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
- iter_data.naps, iter_data.nstations, iter_data.nmeshes,
- iter_data.nwds, iter_data.nadhocs,
- sc->nvifs, sc->nbcnvifs);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs,
+ sc->nvifs, sc->nbcnvifs);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Hang",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Watchdog",
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Fatal HW Error",
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX HW error",
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX Path Hang",
- sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "PLL RX Hang",
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MCI Reset",
- sc->debug.stats.reset[RESET_TYPE_MCI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Hang",
+ sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Watchdog",
+ sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Fatal HW Error",
+ sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX HW error",
+ sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX Path Hang",
+ sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "PLL RX Hang",
+ sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MCI Reset",
+ sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
#define RXS_ERR(s, e) \
do { \
- len += snprintf(buf + len, size - len, \
- "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.e); \
+ len += scnprintf(buf + len, size - len, \
+ "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.e);\
} while (0)
struct ath_softc *sc = file->private_data;
@@ -1048,6 +1050,9 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
char buf[32];
ssize_t len;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
@@ -1439,22 +1444,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "Channel Noise Floor : %d\n", ah->noise);
- len += snprintf(buf + len, size - len,
- "Chain | privNF | # Readings | NF Readings\n");
+ len += scnprintf(buf + len, size - len,
+ "Channel Noise Floor : %d\n", ah->noise);
+ len += scnprintf(buf + len, size - len,
+ "Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
- len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
- i, h[i].privNF, nread);
+ len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+ i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
- len += snprintf(buf + len, size - len,
- " %d", h[i].nfCalBuffer[j]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += scnprintf(buf + len, size - len, "\n");
}
if (len > size)
@@ -1543,8 +1548,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!sc->sc_ah->common.btcoex_enabled) {
- len = snprintf(buf, size, "%s\n",
- "BTCOEX is disabled");
+ len = scnprintf(buf, size, "%s\n",
+ "BTCOEX is disabled");
goto exit;
}
@@ -1582,43 +1587,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!an->sta->ht_cap.ht_supported) {
- len = snprintf(buf, size, "%s\n",
- "HT not supported");
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
goto exit;
}
- len = snprintf(buf, size, "Max-AMPDU: %d\n",
- an->maxampdu);
- len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
- an->mpdudensity);
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
- len += snprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
txq = ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
ath_txq_unlock(sc, txq);
}
- len += snprintf(buf + len, size - len,
- "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
- "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
- "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
txq = tid->ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
- tid->tidno, tid->seq_start, tid->seq_next,
- tid->baw_size, tid->baw_head, tid->baw_tail,
- tid->bar_index, tid->sched, tid->paused);
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno, tid->seq_start, tid->seq_next,
+ tid->baw_size, tid->baw_head, tid->baw_tail,
+ tid->bar_index, tid->sched, tid->paused);
ath_txq_unlock(sc, txq);
}
exit:
@@ -1773,6 +1778,111 @@ void ath9k_deinit_debug(struct ath_softc *sc)
}
}
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[3];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->tx99_state);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ bool start;
+ ssize_t len;
+ int r;
+
+ if (sc->nvifs > 1)
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (strtobool(buf, &start))
+ return -EINVAL;
+
+ if (start == sc->tx99_state) {
+ if (!start)
+ return count;
+ ath_dbg(common, XMIT, "Resetting TX99\n");
+ ath9k_tx99_deinit(sc);
+ }
+
+ if (!start) {
+ ath9k_tx99_deinit(sc);
+ return count;
+ }
+
+ r = ath9k_tx99_init(sc);
+ if (r)
+ return r;
+
+ return count;
+}
+
+static const struct file_operations fops_tx99 = {
+ .read = read_file_tx99,
+ .write = write_file_tx99,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d (%d dBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ int r;
+ u8 tx_power;
+
+ r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+ if (r)
+ return r;
+
+ if (tx_power > MAX_RATE_POWER)
+ return -EINVAL;
+
+ sc->tx99_power = tx_power;
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+ ath9k_ps_restore(sc);
+
+ return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+ .read = read_file_tx99_power,
+ .write = write_file_tx99_power,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1864,5 +1974,15 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
+ if (config_enabled(CONFIG_ATH9K_TX99) &&
+ AR_SREV_9300_20_OR_LATER(ah)) {
+ debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99);
+ debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99_power);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 6e1556fa2f3..d6e3fa4299a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -193,12 +193,12 @@ struct ath_tx_stats {
#define TXSTATS sc->debug.stats.txstats
#define PR(str, elem) \
do { \
- len += snprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ len += scnprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index 3c839f06a06..c6fa3d5b5d7 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,7 +17,7 @@
#ifndef ATH9K_DFS_H
#define ATH9K_DFS_H
-#include "dfs_pattern_detector.h"
+#include "../dfs_pattern_detector.h"
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
/**
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 3c6e4138a95..8824610c21f 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -20,16 +20,16 @@
#include "ath9k.h"
#include "dfs_debug.h"
+#include "../dfs_pattern_detector.h"
-
-struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
+static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
#define ATH9K_DFS_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- sc->debug.stats.dfs_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
#define ATH9K_DFS_POOL_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- global_dfs_pool_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ dfs_pool_stats.p);
static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -44,12 +44,21 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len, "DFS support for "
- "macVersion = 0x%x, macRev = 0x%x: %s\n",
- hw_ver->macVersion, hw_ver->macRev,
- (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ len += scnprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
"enabled" : "disabled");
- len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+ if (!sc->dfs_detector) {
+ len += scnprintf(buf + len, size - len,
+ "DFS detector not enabled\n");
+ goto exit;
+ }
+
+ dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
ATH9K_DFS_STAT("pulse events reported ", pulses_total);
ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
@@ -59,11 +68,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
- len += snprintf(buf + len, size - len, "Radar detector statistics "
- "(current DFS region: %d)\n", sc->dfs_detector->region);
+ len += scnprintf(buf + len, size - len, "Radar detector statistics "
+ "(current DFS region: %d)\n",
+ sc->dfs_detector->region);
ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
ATH9K_DFS_STAT("Radars detected ", radar_detected);
- len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
@@ -72,6 +82,7 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error);
ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used);
+exit:
if (len > size)
len = size;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index e36810a4b58..0a7ddf4c88c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -51,25 +51,11 @@ struct ath_dfs_stats {
u32 radar_detected;
};
-/**
- * struct ath_dfs_pool_stats - DFS Statistics for global pools
- */
-struct ath_dfs_pool_stats {
- u32 pool_reference;
- u32 pulse_allocated;
- u32 pulse_alloc_error;
- u32 pulse_used;
- u32 pseq_allocated;
- u32 pseq_alloc_error;
- u32 pseq_used;
-};
#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
void ath9k_dfs_init_debug(struct ath_softc *sc);
-#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
-#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
extern struct ath_dfs_pool_stats global_dfs_pool_stats;
#else
@@ -77,8 +63,6 @@ extern struct ath_dfs_pool_stats global_dfs_pool_stats;
#define DFS_STAT_INC(sc, c) do { } while (0)
static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
-#define DFS_POOL_STAT_INC(c) do { } while (0)
-#define DFS_POOL_STAT_DEC(c) do { } while (0)
#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9ea8e4b779c..b4091716e9b 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_dump_4k_modal_eeprom(buf, len, size,
- &eep->modalHeader);
+ &eep->modalHeader);
goto out;
}
@@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("TX Gain type", pBase->txGainType);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 3ae1f3df063..e1d0c217c10 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
@@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Power Table Offset", pBase->pwrTableOffset);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 1c25368b383..39107e31e79 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
@@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4b412aaf4f3..c34f21241da 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
- len += snprintf(buf + len, size - len, "BT Weights: ");
+ len += scnprintf(buf + len, size - len, "BT Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->bt_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "WLAN Weights: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "WLAN Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->wlan_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "Tx Priorities: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "Tx Priorities: ");
for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
- len += snprintf(buf + len, size - len, "%08x ",
+ len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->tx_prio[i]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index c1b45e2f848..fb071ee4fcf 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RX",
- be32_to_cpu(cmd_rsp.rx));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RX",
+ be32_to_cpu(cmd_rsp.rx));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXORN",
- be32_to_cpu(cmd_rsp.rxorn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXORN",
+ be32_to_cpu(cmd_rsp.rxorn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXEOL",
- be32_to_cpu(cmd_rsp.rxeol));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXEOL",
+ be32_to_cpu(cmd_rsp.rxeol));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXURN",
- be32_to_cpu(cmd_rsp.txurn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXURN",
+ be32_to_cpu(cmd_rsp.txurn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXTO",
- be32_to_cpu(cmd_rsp.txto));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXTO",
+ be32_to_cpu(cmd_rsp.txto));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CST",
- be32_to_cpu(cmd_rsp.cst));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CST",
+ be32_to_cpu(cmd_rsp.cst));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Xretries",
- be32_to_cpu(cmd_rsp.xretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Xretries",
+ be32_to_cpu(cmd_rsp.xretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "FifoErr",
- be32_to_cpu(cmd_rsp.fifoerr));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "FifoErr",
+ be32_to_cpu(cmd_rsp.fifoerr));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Filtered",
- be32_to_cpu(cmd_rsp.filtered));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Filtered",
+ be32_to_cpu(cmd_rsp.filtered));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TimerExp",
- be32_to_cpu(cmd_rsp.timer_exp));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TimerExp",
+ be32_to_cpu(cmd_rsp.timer_exp));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "ShortRetries",
- be32_to_cpu(cmd_rsp.shortretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "ShortRetries",
+ be32_to_cpu(cmd_rsp.shortretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "LongRetries",
- be32_to_cpu(cmd_rsp.longretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "LongRetries",
+ be32_to_cpu(cmd_rsp.longretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "QueueNull",
- be32_to_cpu(cmd_rsp.qnull));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "QueueNull",
+ be32_to_cpu(cmd_rsp.qnull));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "EncapFail",
- be32_to_cpu(cmd_rsp.encap_fail));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "EncapFail",
+ be32_to_cpu(cmd_rsp.encap_fail));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostSend",
- be32_to_cpu(cmd_rsp.host_send));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostSend",
+ be32_to_cpu(cmd_rsp.host_send));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostDone",
- be32_to_cpu(cmd_rsp.host_done));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostDone",
+ be32_to_cpu(cmd_rsp.host_done));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers queued",
- priv->debug.tx_stats.buf_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers completed",
- priv->debug.tx_stats.buf_completed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs queued",
- priv->debug.tx_stats.skb_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs success",
- priv->debug.tx_stats.skb_success);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs failed",
- priv->debug.tx_stats.skb_failed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CAB queued",
- priv->debug.tx_stats.cab_queued);
-
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BE queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BK queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VI queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VO queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs success",
+ priv->debug.tx_stats.skb_success);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs failed",
+ priv->debug.tx_stats.skb_failed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CAB queued",
+ priv->debug.tx_stats.cab_queued);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BE queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BK queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VI queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VO queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \
- priv->debug.rx_stats.err_phy_stats[p]);
+ len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
+ priv->debug.rx_stats.err_phy_stats[p]);
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
@@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs allocated",
- priv->debug.rx_stats.skb_allocated);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs completed",
- priv->debug.rx_stats.skb_completed);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs Dropped",
- priv->debug.rx_stats.skb_dropped);
-
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "CRC ERR",
- priv->debug.rx_stats.err_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT CRC ERR",
- priv->debug.rx_stats.err_decrypt_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "MIC ERR",
- priv->debug.rx_stats.err_mic);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "PRE-DELIM CRC ERR",
- priv->debug.rx_stats.err_pre_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "POST-DELIM CRC ERR",
- priv->debug.rx_stats.err_post_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT BUSY ERR",
- priv->debug.rx_stats.err_decrypt_busy);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "TOTAL PHY ERR",
- priv->debug.rx_stats.err_phy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "CRC ERR",
+ priv->debug.rx_stats.err_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT CRC ERR",
+ priv->debug.rx_stats.err_decrypt_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "MIC ERR",
+ priv->debug.rx_stats.err_mic);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "PRE-DELIM CRC ERR",
+ priv->debug.rx_stats.err_pre_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "POST-DELIM CRC ERR",
+ priv->debug.rx_stats.err_post_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT BUSY ERR",
+ priv->debug.rx_stats.err_decrypt_busy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "TOTAL PHY ERR",
+ priv->debug.rx_stats.err_phy);
PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
+ len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
priv->tx.tx_slot, MAX_TX_BUF_NUM);
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
- "Used slots : %d\n",
- bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "Used slots : %d\n",
+ bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
spin_unlock_bh(&priv->tx.tx_lock);
@@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Failed queue", skb_queue_len(&priv->tx.tx_failed));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Failed queue", skb_queue_len(&priv->tx.tx_failed));
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Queued count", priv->tx.queued_cnt);
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Queued count", priv->tx.queued_cnt);
spin_unlock_bh(&priv->tx.tx_lock);
if (len > sizeof(buf))
@@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Major Version",
- pBase->version >> 12);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Minor Version",
- pBase->version & 0xFFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Checksum",
- pBase->checksum);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Length",
- pBase->length);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain1",
- pBase->regDmn[0]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain2",
- pBase->regDmn[1]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Mask", pBase->txMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "RX Mask", pBase->rxMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 5GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 2GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Big Endian",
- !!(pBase->eepMisc & 0x01));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Major Ver",
- (pBase->binBuildNumber >> 24) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Minor Ver",
- (pBase->binBuildNumber >> 16) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Build",
- (pBase->binBuildNumber >> 8) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Major Version",
+ pBase->version >> 12);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Minor Version",
+ pBase->version & 0xFFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Checksum",
+ pBase->checksum);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Length",
+ pBase->length);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain1",
+ pBase->regDmn[0]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain2",
+ pBase->regDmn[1]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Mask", pBase->txMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "RX Mask", pBase->rxMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 5GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 2GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Big Endian",
+ !!(pBase->eepMisc & 0x01));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Major Ver",
+ (pBase->binBuildNumber >> 24) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Minor Ver",
+ (pBase->binBuildNumber >> 16) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Build",
+ (pBase->binBuildNumber >> 8) & 0xFF);
/*
* UB91 specific data.
@@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_header_4k *pBase4k =
&priv->ah->eeprom.map4k.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Gain type",
- pBase4k->txGainType);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Gain type",
+ pBase4k->txGainType);
}
/*
@@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_ar9287_header *pBase9287 =
&priv->ah->eeprom.map9287.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10ddB\n",
- "Power Table Offset",
- pBase9287->pwrTableOffset);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10ddB\n",
+ "Power Table Offset",
+ pBase9287->pwrTableOffset);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "OpenLoop Power Ctrl",
- pBase9287->openLoopPwrCntl);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "OpenLoop Power Ctrl",
+ pBase9287->openLoopPwrCntl);
}
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
if (len > size)
len = size;
@@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
do { \
if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
pModal = &priv->ah->eeprom.def.modalHeader[1]; \
- len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
- _s, (_val), "|"); \
+ len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
+ _s, (_val), "|"); \
} \
if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
pModal = &priv->ah->eeprom.def.modalHeader[0]; \
- len += snprintf(buf + len, size - len, "%9d\n", \
+ len += scnprintf(buf + len, size - len, "%9d\n",\
(_val)); \
} \
} while (0)
@@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%31s %15s\n", "2G", "5G");
- len += snprintf(buf + len, size - len,
- "%32s %16s\n", "====", "====\n");
+ len += scnprintf(buf + len, size - len,
+ "%31s %15s\n", "2G", "5G");
+ len += scnprintf(buf + len, size - len,
+ "%32s %16s\n", "====", "====\n");
PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index d44258172c0..9a2657fdd9c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,30 +24,10 @@
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
{
- enum htc_phymode mode;
-
- mode = -EINVAL;
-
- switch (ichan->chanmode) {
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- mode = HTC_MODE_11NG;
- break;
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- mode = HTC_MODE_11NA;
- break;
- default:
- break;
- }
+ if (IS_CHAN_5GHZ(ichan))
+ return HTC_MODE_11NA;
- WARN_ON(mode < 0);
-
- return mode;
+ return HTC_MODE_11NG;
}
bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
WMI_CMD(WMI_FLUSH_RECV_CMDID);
/* setup initial channel */
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
@@ -1208,9 +1188,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
- ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
- &hw->conf.chandef);
-
+ ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 83f4927aeac..4f9378ddf07 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,22 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
}
+static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ ath9k_hw_ops(ah)->tx99_start(ah, qnum);
+}
+
+static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->tx99_stop(ah);
+}
+
+static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
+{
+ if (ath9k_hw_ops(ah)->tx99_set_txpower)
+ ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
+}
+
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ecc6ec4a1ed..54b04155e43 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,29 +130,29 @@ void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
unsigned int clockrate;
/* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
clockrate = 117;
- else if (!ah->curchan) /* should really check for CCK instead */
+ else if (!chan) /* should really check for CCK instead */
clockrate = ATH9K_CLOCK_RATE_CCK;
- else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
+ else if (IS_CHAN_2GHZ(chan))
clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
else
clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
- if (conf_is_ht40(conf))
+ if (IS_CHAN_HT40(chan))
clockrate *= 2;
if (ah->curchan) {
- if (IS_CHAN_HALF_RATE(ah->curchan))
+ if (IS_CHAN_HALF_RATE(chan))
clockrate /= 2;
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ if (IS_CHAN_QUARTER_RATE(chan))
clockrate /= 4;
}
@@ -190,10 +190,7 @@ EXPORT_SYMBOL(ath9k_hw_wait);
void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
int hw_delay)
{
- if (IS_CHAN_B(chan))
- hw_delay = (4 * hw_delay) / 22;
- else
- hw_delay /= 10;
+ hw_delay /= 10;
if (IS_CHAN_HALF_RATE(chan))
hw_delay *= 2;
@@ -294,8 +291,7 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
return;
}
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+ if (IS_CHAN_HT40PLUS(chan)) {
centers->synth_center =
chan->channel + HT40_CHANNEL_CENTER_SHIFT;
extoff = 1;
@@ -549,6 +545,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath9k_hw_ani_init(ah);
+ /*
+ * EEPROM needs to be initialized before we do this.
+ * This is required for regulatory compliance.
+ */
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+ if ((regdmn & 0xF0) == CTL_FCC) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+ }
+ }
+
return 0;
}
@@ -1030,7 +1038,6 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
const struct ath9k_channel *chan = ah->curchan;
int acktimeout, ctstimeout, ack_offset = 0;
int slottime;
@@ -1105,8 +1112,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
- if (conf->chandef.chan &&
- conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
+ if (IS_CHAN_2GHZ(chan) &&
!IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
acktimeout += 64 - sifstime - ah->slottime;
ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
- if (IS_CHAN_B(chan))
- ctl |= CTL_11B;
- else if (IS_CHAN_G(chan))
+ if (IS_CHAN_2GHZ(chan))
ctl |= CTL_11G;
else
ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
int r;
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
- u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- band_switch = (cur != new);
- mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
+ mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
}
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@ hang_check_iter:
return true;
}
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 val;
+
+ val = REG_READ(ah, AR_NAV);
+ if (val != 0xdeadbeef && val > 0x7fff) {
+ ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+ REG_WRITE(ah, AR_NAV, 0);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
@@ -1799,20 +1812,11 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
goto fail;
/*
- * If cross-band fcc is not supoprted, bail out if
- * either channelFlags or chanmode differ.
- *
- * chanmode will be different if the HT operating mode
- * changes because of CSA.
+ * If cross-band fcc is not supoprted, bail out if channelFlags differ.
*/
- if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
- if ((chan->channelFlags & CHANNEL_ALL) !=
- (ah->curchan->channelFlags & CHANNEL_ALL))
- goto fail;
-
- if (chan->chanmode != ah->curchan->chanmode)
- goto fail;
- }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+ chan->channelFlags != ah->curchan->channelFlags)
+ goto fail;
if (!ath9k_hw_check_alive(ah))
goto fail;
@@ -1822,9 +1826,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* re-using are present.
*/
if (AR_SREV_9462(ah) && (ah->caldata &&
- (!ah->caldata->done_txiqcal_once ||
- !ah->caldata->done_txclcal_once ||
- !ah->caldata->rtt_done)))
+ (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,15 +1878,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->caldata = caldata;
if (caldata && (chan->channel != caldata->channel ||
- chan->channelFlags != caldata->channelFlags ||
- chan->chanmode != caldata->chanmode)) {
+ chan->channelFlags != caldata->channelFlags)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
} else if (caldata) {
- caldata->paprd_packet_sent = false;
+ clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
}
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
if (fastcc) {
r = ath9k_hw_do_fastcc(ah, chan);
@@ -1964,9 +1967,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_mfp(ah);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
@@ -2017,8 +2018,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_bb(ah, chan);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
@@ -2943,12 +2944,11 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
u32 macmode;
- if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
+ if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
macmode = AR_2040_JOINED_RX_CLEAR;
else
macmode = 0;
@@ -3240,19 +3240,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
/* chipsets >= AR9280 are single-chip */
if (AR_SREV_9280_20_OR_LATER(ah)) {
- used = snprintf(hw_name, len,
- "Atheros AR%s Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev);
}
else {
- used = snprintf(hw_name, len,
- "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev,
- ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
- AR_RADIO_SREV_MAJOR)),
- ah->hw_version.phyRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev,
+ ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+ & AR_RADIO_SREV_MAJOR)),
+ ah->hw_version.phyRev);
}
hw_name[used] = '\0';
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 69a907b55a7..a2c9a5dbac6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -98,8 +98,8 @@
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
#define SM(_v, _f) (((_v) << _f##_S) & _f)
@@ -316,6 +316,7 @@ struct ath9k_ops_config {
u32 ant_ctrl_comm2g_switch_enable;
bool xatten_margin_cfg;
bool alt_mingainidx;
+ bool no_pll_pwrsave;
};
enum ath9k_int {
@@ -369,55 +370,30 @@ enum ath9k_int {
ATH9K_INT_NOCARD = 0xffffffff
};
-#define CHANNEL_CCK 0x00020
-#define CHANNEL_OFDM 0x00040
-#define CHANNEL_2GHZ 0x00080
-#define CHANNEL_5GHZ 0x00100
-#define CHANNEL_PASSIVE 0x00200
-#define CHANNEL_DYN 0x00400
-#define CHANNEL_HALF 0x04000
-#define CHANNEL_QUARTER 0x08000
-#define CHANNEL_HT20 0x10000
-#define CHANNEL_HT40PLUS 0x20000
-#define CHANNEL_HT40MINUS 0x40000
-
-#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
-#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
-#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_ALL \
- (CHANNEL_OFDM| \
- CHANNEL_CCK| \
- CHANNEL_2GHZ | \
- CHANNEL_5GHZ | \
- CHANNEL_HT20 | \
- CHANNEL_HT40PLUS | \
- CHANNEL_HT40MINUS)
-
#define MAX_RTT_TABLE_ENTRY 6
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
+enum ath9k_cal_flags {
+ RTT_DONE,
+ PAPRD_PACKET_SENT,
+ PAPRD_DONE,
+ NFCAL_PENDING,
+ NFCAL_INTF,
+ TXIQCAL_DONE,
+ TXCLCAL_DONE,
+ SW_PKDET_DONE,
+};
+
struct ath9k_hw_cal_data {
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
+ unsigned long cal_flags;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
- bool rtt_done;
- bool paprd_packet_sent;
- bool paprd_done;
- bool nfcal_pending;
- bool nfcal_interference;
- bool done_txiqcal_once;
- bool done_txclcal_once;
+ u8 caldac[2];
u16 small_signal_gain[AR9300_MAX_CHAINS];
u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +406,34 @@ struct ath9k_hw_cal_data {
struct ath9k_channel {
struct ieee80211_channel *chan;
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
s16 noisefloor;
};
-#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
- (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
- (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
- (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
-#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
-#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
-#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+#define CHANNEL_5GHZ BIT(0)
+#define CHANNEL_HALF BIT(1)
+#define CHANNEL_QUARTER BIT(2)
+#define CHANNEL_HT BIT(3)
+#define CHANNEL_HT40PLUS BIT(4)
+#define CHANNEL_HT40MINUS BIT(5)
+
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
- ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
-
-/* These macros check chanmode and not channelFlags */
-#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
-#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
- ((_c)->chanmode == CHANNEL_G_HT20))
-#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+ (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+ (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
enum ath9k_power_mode {
ATH9K_PM_AWAKE = 0,
@@ -558,6 +535,7 @@ struct ath_hw_antcomb_conf {
u8 main_gaintb;
u8 alt_gaintb;
int lna1_lna2_delta;
+ int lna1_lna2_switch_delta;
u8 div_group;
};
@@ -726,6 +704,10 @@ struct ath_hw_ops {
void (*spectral_scan_trigger)(struct ath_hw *ah);
void (*spectral_scan_wait)(struct ath_hw *ah);
+ void (*tx99_start)(struct ath_hw *ah, u32 qnum);
+ void (*tx99_stop)(struct ath_hw *ah);
+ void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
+
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
#endif
@@ -1026,10 +1008,11 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 9a1f349f926..710192ed27e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -347,7 +347,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
- struct ath_buf *bf;
int i, bsize, desc_len;
ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
- if (!bf)
- return -ENOMEM;
+ if (is_tx) {
+ struct ath_buf *bf;
+
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
- for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += (desc_len * ndesc);
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
}
+ list_add_tail(&bf->list, head);
+ }
+ } else {
+ struct ath_rxbuf *bf;
+
+ bsize = sizeof(struct ath_rxbuf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
}
- list_add_tail(&bf->list, head);
}
return 0;
}
@@ -437,7 +471,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
+ if (sc->driver_data & ATH9K_PCI_CUS252)
+ ath_info(common, "CUS252 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+ ath_info(common, "WB335 1-ANT card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+ ath_info(common, "WB335 2-ANT card detected\n");
+
+ /*
+ * Some WB335 cards do not support antenna diversity. Since
+ * we use a hardcoded value for AR9565 instead of using the
+ * EEPROM/OTP data, remove the combining feature from
+ * the HW capabilities bitmap.
+ */
+ if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+ if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+ }
+
if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -556,6 +609,11 @@ static void ath9k_init_platform(struct ath_softc *sc)
ah->config.pcie_waen = 0x0040473b;
ath_info(common, "Enable WAR for ASPM D3/L1\n");
}
+
+ if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
+ ah->config.no_pll_pwrsave = true;
+ ath_info(common, "Disable PLL PowerSave\n");
+ }
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -627,7 +685,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
sc->sc_ah = ah;
pCap = &ah->caps;
- sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
+ common = ath9k_hw_common(ah);
+ sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
+ sc->tx99_power = MAX_RATE_POWER + 1;
if (!pdata) {
ah->ah_flags |= AH_USE_EEPROM;
@@ -641,7 +701,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->external_reset = pdata->external_reset;
}
- common = ath9k_hw_common(ah);
common->ops = &ah->reg_ops;
common->bus_ops = bus_ops;
common->ah = ah;
@@ -732,6 +791,7 @@ err_queues:
ath9k_hw_deinit(ah);
err_hw:
ath9k_eeprom_release(sc);
+ dev_kfree_skb_any(sc->tx99_skb);
return ret;
}
@@ -748,7 +808,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
- ath9k_cmn_update_ichannel(ah->curchan, &chandef);
+ ath9k_cmn_get_channel(sc->hw, ah, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
@@ -789,9 +849,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) },
};
-
static const struct ieee80211_iface_limit if_dfs_limits[] = {
- { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) },
};
static const struct ieee80211_iface_combination if_comb[] = {
@@ -808,8 +868,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
.max_interfaces = 1,
.num_different_channels = 1,
.beacon_int_infra_match = true,
- .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) |
- BIT(NL80211_CHAN_HT20),
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20),
}
};
@@ -850,17 +910,18 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_WDS) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->iface_combinations = if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_WDS) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ }
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2f831db396a..aed7e29dc50 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,6 +28,13 @@ void ath_tx_complete_poll_work(struct work_struct *work)
int i;
bool needreset = false;
+
+ if (sc->tx99_state) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "skip tx hung detection on tx99\n");
+ return;
+ }
+
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
@@ -70,7 +77,7 @@ void ath_hw_check(struct work_struct *work)
ath9k_ps_wakeup(sc);
is_alive = ath9k_hw_check_alive(sc->sc_ah);
- if (is_alive && !AR_SREV_9300(sc->sc_ah))
+ if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
goto out;
else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
ath_dbg(common, RESET,
@@ -141,6 +148,9 @@ void ath_hw_pll_work(struct work_struct *work)
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
return;
+ if (sc->tx99_state)
+ return;
+
ath9k_ps_wakeup(sc);
pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
ath9k_ps_restore(sc);
@@ -184,7 +194,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
struct ath9k_hw_cal_data *caldata = ah->caldata;
int chain;
- if (!caldata || !caldata->paprd_done) {
+ if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
return;
}
@@ -256,7 +266,9 @@ void ath_paprd_calibrate(struct work_struct *work)
int len = 1800;
int ret;
- if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+ if (!caldata ||
+ !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+ test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
return;
}
@@ -316,7 +328,7 @@ void ath_paprd_calibrate(struct work_struct *work)
kfree_skb(skb);
if (chain_ok) {
- caldata->paprd_done = true;
+ set_bit(PAPRD_DONE, &caldata->cal_flags);
ath_paprd_activate(sc);
}
@@ -343,7 +355,7 @@ void ath_ani_calibrate(unsigned long data)
u32 cal_interval, short_cal_interval, long_cal_interval;
unsigned long flags;
- if (ah->caldata && ah->caldata->nfcal_interference)
+ if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
long_cal_interval = ATH_LONG_CALINTERVAL_INT;
else
long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +444,7 @@ set_timer:
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
- if (!ah->caldata->paprd_done) {
+ if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
ieee80211_queue_work(sc->hw, &sc->paprd_work);
} else if (!ah->paprd_table_write_done) {
ath9k_ps_wakeup(sc);
@@ -516,7 +528,8 @@ void ath_update_survey_nf(struct ath_softc *sc, int channel)
if (chan->noisefloor) {
survey->filled |= SURVEY_INFO_NOISE_DBM;
- survey->noise = ath9k_hw_getchan_noise(ah, chan);
+ survey->noise = ath9k_hw_getchan_noise(ah, chan,
+ chan->noisefloor);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index a3eff0986a3..6a18f9d3e9c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -374,7 +374,6 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_channel *chan = ah->curchan;
struct ath9k_tx_queue_info *qi;
u32 cwMin, chanCwMin, value;
@@ -387,10 +386,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
- if (chan && IS_CHAN_B(chan))
- chanCwMin = INIT_CWMIN_11B;
- else
- chanCwMin = INIT_CWMIN;
+ chanCwMin = INIT_CWMIN;
for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
} else
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index bfccaceed44..e3eed81f243 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -603,8 +603,6 @@ enum ath9k_tx_queue_flags {
#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
#define ATH9K_DECOMP_MASK_SIZE 128
-#define ATH9K_READY_TIME_LO_BOUND 50
-#define ATH9K_READY_TIME_HI_BOUND 96
enum ath9k_pkt_type {
ATH9K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 709301f88dc..74f452c7b16 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -312,17 +312,91 @@ out:
* by reseting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *hchan)
+static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath9k_channel *hchan;
+ struct ieee80211_channel *chan = chandef->chan;
+ unsigned long flags;
+ bool offchannel;
+ int pos = chan->hw_value;
+ int old_pos = -1;
int r;
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
+ offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
+
+ if (ah->curchan)
+ old_pos = ah->curchan - &ah->channels[0];
+
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ chan->center_freq, chandef->width);
+
+ /* update survey stats for the old channel before switching */
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath9k_cmn_get_channel(hw, ah, chandef);
+
+ /*
+ * If the operating channel changes, change the survey in-use flags
+ * along with it.
+ * Reset the survey data for the new channel, unless we're switching
+ * back to the operating channel from an off-channel operation.
+ */
+ if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
+ if (sc->cur_survey)
+ sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+ sc->cur_survey = &sc->survey[pos];
+
+ memset(sc->cur_survey, 0, sizeof(struct survey_info));
+ sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+ } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+ memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+ }
+
+ hchan = &sc->sc_ah->channels[pos];
r = ath_reset_internal(sc, hchan);
+ if (r)
+ return r;
- return r;
+ /*
+ * The most recent snapshot of channel->noisefloor for the old
+ * channel is only available after the hardware reset. Copy it to
+ * the survey stats now.
+ */
+ if (old_pos >= 0)
+ ath_update_survey_nf(sc, old_pos);
+
+ /*
+ * Enable radar pulse detection if on a DFS channel. Spectral
+ * scanning and radar detection can not be used concurrently.
+ */
+ if (hw->conf.radar_enabled) {
+ u32 rxfilter;
+
+ /* set HW specific DFS configuration */
+ ath9k_hw_set_radar_params(ah);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR;
+ ath9k_hw_setrxfilter(ah, rxfilter);
+ ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+ chan->center_freq);
+ } else {
+ /* perform spectral scan if requested. */
+ if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+ sc->spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_spectral_scan_trigger(hw);
+ }
+
+ return 0;
}
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -372,6 +446,13 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -410,10 +491,9 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
-out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
-
+out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}
@@ -594,7 +674,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, false);
@@ -797,7 +877,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+ ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
ath9k_hw_phy_disable(ah);
@@ -816,7 +896,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG, "Driver halt\n");
}
-bool ath9k_uses_beacons(int type)
+static bool ath9k_uses_beacons(int type)
{
switch (type) {
case NL80211_IFTYPE_AP:
@@ -966,6 +1046,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (sc->nvifs >= 1) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+ sc->tx99_vif = vif;
+ }
+
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -994,9 +1082,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_dbg(common, CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+
+ ath_dbg(common, CONFIG, "Change Interface\n");
+
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1026,6 +1120,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
sc->nvifs--;
+ sc->tx99_vif = NULL;
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1047,6 +1142,9 @@ static void ath9k_enable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
sc->ps_enabled = true;
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -1063,6 +1161,9 @@ static void ath9k_disable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
sc->ps_enabled = false;
ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1086,6 +1187,9 @@ void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
struct ath_common *common = ath9k_hw_common(ah);
u32 rxfilter;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
ath_err(common, "spectrum analyzer not implemented on this hardware\n");
return;
@@ -1201,81 +1305,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- int pos = curchan->hw_value;
- int old_pos = -1;
- unsigned long flags;
-
- if (ah->curchan)
- old_pos = ah->curchan - &ah->channels[0];
-
- ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
- curchan->center_freq, hw->conf.chandef.width);
-
- /* update survey stats for the old channel before switching */
- spin_lock_irqsave(&common->cc_lock, flags);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- &conf->chandef);
-
- /*
- * If the operating channel changes, change the survey in-use flags
- * along with it.
- * Reset the survey data for the new channel, unless we're switching
- * back to the operating channel from an off-channel operation.
- */
- if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
- sc->cur_survey != &sc->survey[pos]) {
-
- if (sc->cur_survey)
- sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
- sc->cur_survey = &sc->survey[pos];
-
- memset(sc->cur_survey, 0, sizeof(struct survey_info));
- sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
- } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
- memset(&sc->survey[pos], 0, sizeof(struct survey_info));
- }
-
- if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
+ if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return -EINVAL;
}
-
- /*
- * The most recent snapshot of channel->noisefloor for the old
- * channel is only available after the hardware reset. Copy it to
- * the survey stats now.
- */
- if (old_pos >= 0)
- ath_update_survey_nf(sc, old_pos);
-
- /*
- * Enable radar pulse detection if on a DFS channel. Spectral
- * scanning and radar detection can not be used concurrently.
- */
- if (hw->conf.radar_enabled) {
- u32 rxfilter;
-
- /* set HW specific DFS configuration */
- ath9k_hw_set_radar_params(ah);
- rxfilter = ath9k_hw_getrxfilter(ah);
- rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
- ATH9K_RX_FILTER_PHYERR;
- ath9k_hw_setrxfilter(ah, rxfilter);
- ath_dbg(common, DFS, "DFS enabled at freq %d\n",
- curchan->center_freq);
- } else {
- /* perform spectral scan if requested. */
- if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
- sc->spectral_mode == SPECTRAL_CHANSCAN)
- ath9k_spectral_scan_trigger(hw);
- }
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1734,6 +1769,9 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
unsigned long flags;
int pos;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
spin_lock_irqsave(&common->cc_lock, flags);
if (idx == 0)
ath_update_survey_stats(sc);
@@ -1766,6 +1804,9 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
mutex_lock(&sc->mutex);
ah->coverage_class = coverage_class;
@@ -2332,6 +2373,134 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
sc->csa_vif = vif;
}
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_drain_all_txq(sc);
+ ath_startrecv(sc);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ ieee80211_wake_queues(sc->hw);
+
+ kfree_skb(sc->tx99_skb);
+ sc->tx99_skb = NULL;
+ sc->tx99_state = false;
+
+ ath9k_hw_tx99_stop(sc->sc_ah);
+ ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+ static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+ 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+ 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+ 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+ 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+ 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+ 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+ 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+ u32 len = 1200;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, len);
+
+ memset(skb->data, 0, len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+ hdr->duration_id = 0;
+
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = hw->conf.chandef.chan->band;
+ tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.vif = sc->tx99_vif;
+
+ memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+ return skb;
+}
+
+void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_tx99_stop(sc);
+ ath9k_ps_restore(sc);
+}
+
+int ath9k_tx99_init(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int r;
+
+ if (sc->sc_flags & SC_OP_INVALID) {
+ ath_err(common,
+ "driver is in invalid state unable to use TX99");
+ return -EINVAL;
+ }
+
+ sc->tx99_skb = ath9k_build_tx99_skb(sc);
+ if (!sc->tx99_skb)
+ return -ENOMEM;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_hw_disable_interrupts(ah);
+ atomic_set(&ah->intr_ref_cnt, -1);
+ ath_drain_all_txq(sc);
+ ath_stoprecv(sc);
+
+ sc->tx99_state = true;
+
+ ieee80211_stop_queues(hw);
+
+ if (sc->tx99_power == MAX_RATE_POWER + 1)
+ sc->tx99_power = MAX_RATE_POWER;
+
+ ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+ r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+ if (r) {
+ ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+ return r;
+ }
+
+ ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ /* We leave the harware awake as it will be chugging on */
+
+ return 0;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 815bee21c19..0ac1b5f0425 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -661,9 +661,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
chan_start = wlan_chan - 10;
chan_end = wlan_chan + 10;
- if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ if (IS_CHAN_HT40PLUS(chan))
chan_end += 20;
- else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ else if (IS_CHAN_HT40MINUS(chan))
chan_start -= 20;
/* adjust side band */
@@ -707,11 +707,11 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
if (setchannel) {
struct ath9k_hw_cal_data *caldata = &sc->caldata;
- if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+ if (IS_CHAN_HT40PLUS(ah->curchan) &&
(ah->curchan->channel > caldata->channel) &&
(ah->curchan->channel <= caldata->channel + 20))
return;
- if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+ if (IS_CHAN_HT40MINUS(ah->curchan) &&
(ah->curchan->channel < caldata->channel) &&
(ah->curchan->channel >= caldata->channel - 20))
return;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d089a7cf01c..b5656fce4ff 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -195,6 +195,93 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x3219),
.driver_data = ATH9K_PCI_BT_ANT_DIV },
+ /* AR9485 cards with PLL power-save disabled by default. */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C97),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2100),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1C56, /* ASKEY */
+ 0x4001),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x11AD, /* LITEON */
+ 0x6627),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x11AD, /* LITEON */
+ 0x6628),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE04E),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE04F),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x144F, /* ASKEY */
+ 0x7197),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x2000),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x2001),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1186),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1F86),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1195),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1F95),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x1C00),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x1C01),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x850D),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
@@ -269,7 +356,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
- { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
+
+ /* CUS252 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3028),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2176),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 1-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE068),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA119),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0632),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x6671),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2811),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2812),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+ /* WB335 1-ANT / Antenna Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3025),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302B),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE069),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3028),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0622),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0672),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0662),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x18E3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x217F),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 2-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411D),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+
+ /* WB335 2-ANT / Antenna-Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0642),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0652),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0612),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2130),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x144F, /* ASKEY */
+ 0x7202),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2810),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* PCI-E AR9565 (WB335) */
+ { PCI_VDEVICE(ATHEROS, 0x0036),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index d3d7c51fa6c..d829bb62a3f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
int used_mcs = 0, used_htmode = 0;
if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = snprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
+ used_mcs = scnprintf(mcs, 5, "%d",
+ rc->rate_table->info[i].ratecode);
if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT40");
+ used_htmode = scnprintf(htmode, 5, "HT40");
else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT20");
+ used_htmode = scnprintf(htmode, 5, "HT20");
else
- used_htmode = snprintf(htmode, 5, "????");
+ used_htmode = scnprintf(htmode, 5, "????");
}
mcs[used_mcs] = '\0';
htmode[used_htmode] = '\0';
- len += snprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
+ len += scnprintf(buf + len, max - len,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
+ stats->per);
}
if (len > max)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ab9e3a8410b..95ddca5495d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,7 +19,7 @@
#include "ath9k.h"
#include "ar9003_mac.h"
-#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
@@ -35,7 +35,7 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
{
if (sc->rx.buf_hold)
ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
rx_edma = &sc->rx.rx_edma[qtype];
if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
return false;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
list_del_init(&bf->list);
skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (list_empty(&sc->rx.rxbuf)) {
ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
static void ath_rx_remove_buffer(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
@@ -171,7 +171,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0, i;
u32 size;
@@ -211,7 +211,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
ah->caps.rx_hp_qdepth);
- size = sizeof(struct ath_buf) * nbufs;
+ size = sizeof(struct ath_rxbuf) * nbufs;
bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
if (!bf)
return -ENOMEM;
@@ -271,7 +271,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_rx_edma_cleanup(sc);
@@ -375,6 +375,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
{
u32 rfilt;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return 0;
+
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST;
@@ -427,7 +430,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
int ath_startrecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_edma_start_recv(sc);
@@ -447,7 +450,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
ath9k_hw_rxena(ah);
@@ -603,13 +606,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype,
struct ath_rx_status *rs,
- struct ath_buf **dest)
+ struct ath_rxbuf **dest)
{
struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +656,11 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
return true;
}
-static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf = NULL;
+ struct ath_rxbuf *bf = NULL;
while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
if (!bf)
@@ -668,13 +671,13 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
-static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +685,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
if (bf == sc->rx.buf_hold)
return NULL;
@@ -702,7 +705,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
ret = ath9k_hw_rxprocdesc(ah, ds, rs);
if (ret == -EINPROGRESS) {
struct ath_rx_status trs;
- struct ath_buf *tbf;
+ struct ath_rxbuf *tbf;
struct ath_desc *tds;
memset(&trs, 0, sizeof(trs));
@@ -711,7 +714,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- tbf = list_entry(bf->list.next, struct ath_buf, list);
+ tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
/*
* On some hardware the descriptor status words could
@@ -972,14 +975,15 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
{
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath_hw *ah = sc->sc_ah;
- u8 bins[SPECTRAL_HT20_NUM_BINS];
- u8 *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample;
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
struct ath_radar_info *radar_info;
- struct ath_ht20_mag_info *mag_info;
int len = rs->rs_datalen;
int dc_pos;
- u16 length, max_magnitude;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -997,45 +1001,44 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
- /* Variation in the data length is possible and will be fixed later.
- * Note that we only support HT20 for now.
- *
- * TODO: add HT20_40 support as well.
- */
- if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
- (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
- return 1;
-
- fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
- length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
- fft_sample.tlv.length = __cpu_to_be16(length);
+ chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ } else {
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ }
- fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
- fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample.noise = ah->noise;
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
- switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+ switch (len - fft_len) {
case 0:
/* length correct, nothing to do. */
- memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+ memcpy(bins, vdata, num_bins);
break;
case -1:
/* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+ memcpy(&bins[1], vdata, num_bins - 1);
bins[0] = vdata[0];
break;
case 2:
/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
memcpy(bins, vdata, 30);
bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
break;
case 1:
/* MAC added 2 extra bytes AND first byte is missing. */
bins[0] = vdata[0];
- memcpy(&bins[0], vdata, 30);
+ memcpy(&bins[1], vdata, 30);
bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
break;
default:
return 1;
@@ -1044,23 +1047,93 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
- dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ dc_pos = num_bins / 2;
bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
- /* mag data is at the end of the frame, in front of radar_info */
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- /* copy raw bins without scaling them */
- memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
- fft_sample.max_exp = mag_info->max_exp & 0xf;
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- max_magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
- fft_sample.max_index = spectral_max_index(mag_info->all_bins);
- fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample.tsf = __cpu_to_be64(tsf);
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ } else {
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ }
- ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+ ath_debug_send_fft_sample(sc, tlv);
return 1;
#else
return 0;
@@ -1308,7 +1381,7 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index fde6da619f3..0db37f23001 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_fw_version {
struct wmi_event_swba {
__be64 tsf;
u8 beacon_pending;
-};
+} __packed;
/*
* 64 - HTC header - WMI header - 1 / txstatus
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index dd30452df96..09cdbcd0973 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1241,12 +1241,13 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
if (bf->bf_next)
info.link = bf->bf_next->bf_daddr;
else
- info.link = 0;
+ info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
if (!bf_first) {
bf_first = bf;
- info.flags = ATH9K_TXDESC_INTREQ;
+ if (!sc->tx99_state)
+ info.flags = ATH9K_TXDESC_INTREQ;
if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
txq == sc->tx.uapsdq)
info.flags |= ATH9K_TXDESC_CLRDMASK;
@@ -1704,16 +1705,9 @@ int ath_cabq_update(struct ath_softc *sc)
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
- /*
- * Ensure the readytime % is within the bounds.
- */
- if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
- else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
qi.tqi_readyTime = (cur_conf->beacon_interval *
- sc->config.cabqReadytime) / 100;
+ ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
return 0;
@@ -1948,7 +1942,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
- if (!edma) {
+ if (!edma || sc->tx99_state) {
TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
@@ -2027,6 +2021,9 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+
+ if (!rate)
+ return;
fi->rtscts_rate = rate->hw_value;
if (short_preamble)
fi->rtscts_rate |= rate->hw_value_short;
@@ -2037,8 +2034,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
struct ath_hw *ah = sc->sc_ah;
struct ath9k_channel *curchan = ah->curchan;
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
- (curchan->channelFlags & CHANNEL_5GHZ) &&
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
(chainmask == 0x7) && (rate < 0x90))
return 0x3;
else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2329,7 +2325,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (sc->sc_ah->caldata)
- sc->sc_ah->caldata->paprd_packet_sent = true;
+ set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
@@ -2379,6 +2375,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
bf->bf_buf_addr = 0;
+ if (sc->tx99_state)
+ goto skip_tx_complete;
if (bf->bf_state.bfs_paprd) {
if (time_after(jiffies,
@@ -2391,6 +2389,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
ath_tx_complete(sc, skb, tx_flags, txq);
}
+skip_tx_complete:
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
* accidentally reference it later.
*/
@@ -2749,3 +2748,46 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ath_txq_unlock(sc, txq);
}
}
+
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_buf *bf;
+ int padpos, padsize;
+
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize) {
+ ath_dbg(common, XMIT,
+ "tx99 padding failed\n");
+ return -EINVAL;
+ }
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->framelen = skb->len + FCS_LEN;
+ fi->keytype = ATH9K_KEY_TYPE_CLEAR;
+
+ bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
+ if (!bf) {
+ ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
+ return -EINVAL;
+ }
+
+ ath_set_rates(sc->tx99_vif, NULL, bf);
+
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
+ ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
+
+ ath_tx_send_normal(sc, txctl->txq, NULL, skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 307bc0ddff9..ca115f33746 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -773,7 +773,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
complete_all(&ar->cmd_wait);
/* This is required to prevent an early completion on _start */
- INIT_COMPLETION(ar->cmd_wait);
+ reinit_completion(&ar->cmd_wait);
/*
* Note:
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 491305c81fc..a1a69c5db40 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -19,7 +19,7 @@
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
-#include "ath9k.h"
+#include "ath.h"
/*
* tolerated deviation of radar time stamp in usecs on both sides
@@ -143,7 +143,6 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
u32 sz, i;
struct channel_detector *cd;
- struct ath_common *common = ath9k_hw_common(dpd->ah);
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
if (cd == NULL)
@@ -167,7 +166,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
return cd;
fail:
- ath_dbg(common, DFS,
+ ath_dbg(dpd->common, DFS,
"failed to allocate channel_detector for freq=%d\n", freq);
channel_detector_exit(dpd, cd);
return NULL;
@@ -242,7 +241,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
struct pri_detector *pd = cd->detectors[i];
struct pri_sequence *ps = pd->add_pulse(pd, event);
if (ps != NULL) {
- ath_dbg(ath9k_hw_common(dpd->ah), DFS,
+ ath_dbg(dpd->common, DFS,
"DFS: radar found on freq=%d: id=%d, pri=%d, "
"count=%d, count_false=%d\n",
event->freq, pd->rs->type_id,
@@ -254,6 +253,12 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
return false;
}
+static struct ath_dfs_pool_stats
+dpd_get_stats(struct dfs_pattern_detector *dpd)
+{
+ return global_dfs_pool_stats;
+}
+
static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
enum nl80211_dfs_regions region)
{
@@ -284,14 +289,18 @@ static struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
.set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
+ .get_stats = dpd_get_stats,
.region = NL80211_DFS_UNSET,
};
struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region)
{
struct dfs_pattern_detector *dpd;
- struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
if (dpd == NULL)
@@ -300,7 +309,7 @@ dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
- dpd->ah = ah;
+ dpd->common = common;
if (dpd->set_dfs_domain(dpd, region))
return dpd;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index 90a5abcc426..dde2652b787 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -22,6 +22,19 @@
#include <linux/nl80211.h>
/**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+ u32 pool_reference;
+ u32 pulse_allocated;
+ u32 pulse_alloc_error;
+ u32 pulse_used;
+ u32 pseq_allocated;
+ u32 pseq_alloc_error;
+ u32 pseq_used;
+};
+
+/**
* struct pulse_event - describing pulses reported by PHY
* @ts: pulse time stamp in us
* @freq: channel frequency in MHz
@@ -77,11 +90,12 @@ struct dfs_pattern_detector {
bool (*add_pulse)(struct dfs_pattern_detector *dpd,
struct pulse_event *pe);
+ struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
enum nl80211_dfs_regions region;
u8 num_radar_types;
u64 last_pulse_ts;
/* needed for ath_dbg() */
- struct ath_hw *ah;
+ struct ath_common *common;
const struct radar_detector_specs *radar_spec;
struct list_head channel_detectors;
@@ -92,15 +106,7 @@ struct dfs_pattern_detector {
* @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
* @return instance pointer on success, NULL otherwise
*/
-#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
extern struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region);
-#else
-static inline struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
-{
- return NULL;
-}
-#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
-
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region);
#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 5ba4b6fe37c..43b60817888 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -17,10 +17,14 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "ath9k.h"
+#include "ath.h"
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
-#include "dfs_debug.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
/**
* struct pulse_elem - elements in pulse queue
@@ -392,7 +396,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
pri_detector_reset(de, ts);
- return false;
+ return NULL;
}
ps = pseq_handler_check_detection(de);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h
index 723962d1abc..79f0fff4d1e 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
+++ b/drivers/net/wireless/ath/dfs_pri_detector.h
@@ -19,6 +19,8 @@
#include <linux/list.h>
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
/**
* struct pri_sequence - sequence of pulses matching one PRI
* @head: list_head
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7d077c752dd..1217c52ab28 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -356,14 +356,132 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
return -1;
}
+static int __ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ u16 country_code;
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ !ath_is_world_regd(reg))
+ return -EINVAL;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ return -EINVAL;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
+ return 0;
+}
+
+static void ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (__ath_reg_dyn_country(wiphy, reg, request))
+ return;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x "
+ "dynamically updated by %s\n",
+ reg->current_rd,
+ reg_initiator_name(request->initiator));
+}
+
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+ if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ return true;
+
+ switch (reg->country_code) {
+ case CTRY_UNITED_STATES:
+ case CTRY_JAPAN1:
+ case CTRY_JAPAN2:
+ case CTRY_JAPAN3:
+ case CTRY_JAPAN4:
+ case CTRY_JAPAN5:
+ case CTRY_JAPAN6:
+ case CTRY_JAPAN7:
+ case CTRY_JAPAN8:
+ case CTRY_JAPAN9:
+ case CTRY_JAPAN10:
+ case CTRY_JAPAN11:
+ case CTRY_JAPAN12:
+ case CTRY_JAPAN13:
+ case CTRY_JAPAN14:
+ case CTRY_JAPAN15:
+ case CTRY_JAPAN16:
+ case CTRY_JAPAN17:
+ case CTRY_JAPAN18:
+ case CTRY_JAPAN19:
+ case CTRY_JAPAN20:
+ case CTRY_JAPAN21:
+ case CTRY_JAPAN22:
+ case CTRY_JAPAN23:
+ case CTRY_JAPAN24:
+ case CTRY_JAPAN25:
+ case CTRY_JAPAN26:
+ case CTRY_JAPAN27:
+ case CTRY_JAPAN28:
+ case CTRY_JAPAN29:
+ case CTRY_JAPAN30:
+ case CTRY_JAPAN31:
+ case CTRY_JAPAN32:
+ case CTRY_JAPAN33:
+ case CTRY_JAPAN34:
+ case CTRY_JAPAN35:
+ case CTRY_JAPAN36:
+ case CTRY_JAPAN37:
+ case CTRY_JAPAN38:
+ case CTRY_JAPAN39:
+ case CTRY_JAPAN40:
+ case CTRY_JAPAN41:
+ case CTRY_JAPAN42:
+ case CTRY_JAPAN43:
+ case CTRY_JAPAN44:
+ case CTRY_JAPAN45:
+ case CTRY_JAPAN46:
+ case CTRY_JAPAN47:
+ case CTRY_JAPAN48:
+ case CTRY_JAPAN49:
+ case CTRY_JAPAN50:
+ case CTRY_JAPAN51:
+ case CTRY_JAPAN52:
+ case CTRY_JAPAN53:
+ case CTRY_JAPAN54:
+ case CTRY_JAPAN55:
+ case CTRY_JAPAN56:
+ case CTRY_JAPAN57:
+ case CTRY_JAPAN58:
+ case CTRY_JAPAN59:
+ return false;
+ }
+
+ return true;
+}
+
+static void ath_reg_dyn_country_user(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ return;
+ if (!dynamic_country_user_possible(reg))
+ return;
+ ath_reg_dyn_country(wiphy, reg, request);
+}
+
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
- u16 country_code;
-
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -388,25 +506,12 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
sizeof(struct ath_regulatory));
break;
case NL80211_REGDOM_SET_BY_DRIVER:
+ break;
case NL80211_REGDOM_SET_BY_USER:
+ ath_reg_dyn_country_user(wiphy, reg, request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (!ath_is_world_regd(reg))
- break;
-
- country_code = ath_regd_find_country_by_name(request->alpha2);
- if (country_code == (u16) -1)
- break;
-
- reg->current_rd = COUNTRY_ERD_FLAG;
- reg->current_rd |= country_code;
-
- printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
- reg->current_rd);
- __ath_regd_init(reg);
-
- ath_reg_apply_world_flags(wiphy, request->initiator, reg);
-
+ ath_reg_dyn_country(wiphy, reg, request);
break;
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 00000000000..591ebaea826
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
+config WCN36XX
+ tristate "Qualcomm Atheros WCN3660/3680 support"
+ depends on MAC80211 && HAS_DMA
+ ---help---
+ This module adds support for wireless adapters based on
+ Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+ If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+ bool "WCN36XX debugfs support"
+ depends on WCN36XX
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 00000000000..50c43b4382b
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y += main.o \
+ dxe.o \
+ txrx.o \
+ smd.o \
+ pmc.o \
+ debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 00000000000..ef44a2da644
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ char buf[3];
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ if (vif_priv->pw_state == WCN36XX_BMPS)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ break;
+ }
+ }
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ char buf[32];
+ int buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ }
+ }
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+ .open = simple_open,
+ .read = read_file_bool_bmps,
+ .write = write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ char buf[255], *tmp;
+ int buf_size;
+ u32 arg[WCN36xx_MAX_DUMP_ARGS];
+ int i;
+
+ memset(buf, 0, sizeof(buf));
+ memset(arg, 0, sizeof(arg));
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ tmp = buf;
+
+ for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+ char *begin;
+ begin = strsep(&tmp, " ");
+ if (begin == NULL)
+ break;
+
+ if (kstrtou32(begin, 0, &arg[i]) != 0)
+ break;
+ }
+
+ wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+ arg[3], arg[4]);
+ wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+ .open = simple_open,
+ .write = write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data) \
+ do { \
+ struct dentry *d; \
+ d = debugfs_create_file(__stringify(name), \
+ mode, dfs->rootdir, \
+ priv_data, fop); \
+ dfs->file_##name.dentry = d; \
+ if (IS_ERR(d)) { \
+ wcn36xx_warn("Create the debugfs entry failed");\
+ dfs->file_##name.dentry = NULL; \
+ } \
+ } while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+ dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wcn->hw->wiphy->debugfsdir);
+ if (IS_ERR(dfs->rootdir)) {
+ wcn36xx_warn("Create the debugfs failed\n");
+ dfs->rootdir = NULL;
+ }
+
+ ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+ &fops_wcn36xx_bmps, wcn);
+ ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+ debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 00000000000..46307aa562d
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS 5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+ struct dentry *dentry;
+ u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+ struct dentry *rootdir;
+ struct wcn36xx_dfs_file file_bmps_switcher;
+ struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 00000000000..ee25786b444
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+ struct wcn36xx_dxe_ch *ch = is_low ?
+ &wcn->dxe_tx_l_ch :
+ &wcn->dxe_tx_h_ch;
+
+ return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+ addr, data);
+
+ writel(data, wcn->mmio + addr);
+}
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+ *data = readl(wcn->mmio + addr);
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+ addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+ int i;
+
+ for (i = 0; i < ch->desc_num && ctl; i++) {
+ next = ctl->next;
+ kfree(ctl);
+ ctl = next;
+ }
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ int i;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+ if (!cur_ctl)
+ goto out_fail;
+
+ cur_ctl->ctl_blk_order = i;
+ if (i == 0) {
+ ch->head_blk_ctl = cur_ctl;
+ ch->tail_blk_ctl = cur_ctl;
+ } else if (ch->desc_num - 1 == i) {
+ prev_ctl->next = cur_ctl;
+ cur_ctl->next = ch->head_blk_ctl;
+ } else {
+ prev_ctl->next = cur_ctl;
+ }
+ prev_ctl = cur_ctl;
+ }
+
+ return 0;
+
+out_fail:
+ wcn36xx_dxe_free_ctl_block(ch);
+ return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+ int ret;
+
+ wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+ wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+ wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+ wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+ wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+ wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+ wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
+ wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
+
+ wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+ wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+ wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+ wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+ wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+ wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+ wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+ wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+ /* DXE control block allocation */
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+ if (ret)
+ goto out_err;
+
+ /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
+ ret = wcn->ctrl_ops->smsm_change_state(
+ WCN36XX_SMSM_WLAN_TX_ENABLE,
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+ return 0;
+
+out_err:
+ wcn36xx_err("Failed to allocate DXE control blocks\n");
+ wcn36xx_dxe_free_ctl_blks(wcn);
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_desc *cur_dxe = NULL;
+ struct wcn36xx_dxe_desc *prev_dxe = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ size_t size;
+ int i;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
+ if (!wcn_ch->cpu_addr)
+ return -ENOMEM;
+
+ memset(wcn_ch->cpu_addr, 0, size);
+
+ cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ cur_ctl->desc = cur_dxe;
+ cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+ i * sizeof(struct wcn36xx_dxe_desc);
+
+ switch (wcn_ch->ch_type) {
+ case WCN36XX_DXE_CH_TX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+ break;
+ case WCN36XX_DXE_CH_TX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+ break;
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ }
+ if (0 == i) {
+ cur_dxe->phy_next_l = 0;
+ } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ } else if (i == (wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ cur_dxe->phy_next_l =
+ wcn_ch->head_blk_ctl->desc_phy_addr;
+ }
+ cur_ctl = cur_ctl->next;
+ prev_dxe = cur_dxe;
+ cur_dxe++;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+ struct wcn36xx_dxe_mem_pool *pool)
+{
+ int i, chunk_size = pool->chunk_size;
+ dma_addr_t bd_phy_addr = pool->phy_addr;
+ void *bd_cpu_addr = pool->virt_addr;
+ struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ /* Only every second dxe needs a bd pointer,
+ the other will point to the skb data */
+ if (!(i & 1)) {
+ cur->bd_phy_addr = bd_phy_addr;
+ cur->bd_cpu_addr = bd_cpu_addr;
+ bd_phy_addr += chunk_size;
+ bd_cpu_addr += chunk_size;
+ } else {
+ cur->bd_phy_addr = 0;
+ cur->bd_cpu_addr = NULL;
+ }
+ cur = cur->next;
+ }
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+ int reg_data = 0;
+
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ &reg_data);
+
+ reg_data |= wcn_ch;
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ (int)reg_data);
+ return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ dxe->dst_addr_l = dma_map_single(NULL,
+ skb_tail_pointer(skb),
+ WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ ctl->skb = skb;
+
+ return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ int i;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ wcn36xx_dxe_fill_skb(cur_ctl);
+ cur_ctl = cur_ctl->next;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+ int i;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ kfree_skb(cur->skb);
+ cur = cur->next;
+ }
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb) {
+ wcn36xx_warn("Spurious TX complete indication\n");
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (status == 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /*
+ * Make at least one loop of do-while because in case ring is
+ * completely full head and tail are pointing to the same element
+ * and while-do will not make any cycles.
+ */
+ do {
+ if (ctl->skb) {
+ dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ ctl->skb->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(ctl->skb);
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+ /* Keep frame until TX status comes */
+ ieee80211_free_txskb(wcn->hw, ctl->skb);
+ }
+ spin_lock_irqsave(&ctl->skb_lock, flags);
+ if (wcn->queues_stopped) {
+ wcn->queues_stopped = false;
+ ieee80211_wake_queues(wcn->hw);
+ }
+ spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+ ctl->skb = NULL;
+ }
+ ctl = ctl->next;
+ } while (ctl != ch->head_blk_ctl &&
+ !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+ ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+ int int_src, int_reason;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+ &int_reason);
+
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ }
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+ &int_reason);
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+ disable_irq_nosync(wcn->rx_irq);
+ wcn36xx_dxe_rx_frame(wcn);
+ enable_irq(wcn->rx_irq);
+ return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+ int ret;
+
+ ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+ IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc tx irq\n");
+ goto out_err;
+ }
+
+ ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+ "wcn36xx_rx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc rx irq\n");
+ goto out_txirq;
+ }
+
+ enable_irq_wake(wcn->rx_irq);
+
+ return 0;
+
+out_txirq:
+ free_irq(wcn->tx_irq, wcn);
+out_err:
+ return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = ctl->skb;
+ dma_addr = dxe->dst_addr_l;
+ wcn36xx_dxe_fill_skb(ctl);
+
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ break;
+ default:
+ wcn36xx_warn("Unknown channel\n");
+ }
+
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ ctl = ctl->next;
+ dxe = ctl->desc;
+ }
+
+ ch->head_blk_ctl = ctl;
+
+ return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+ int int_src;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }
+
+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ /* Clean up all the INT within this channel */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+ }
+
+ if (!int_src)
+ wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+ size_t s;
+ void *cpu_addr;
+
+ /* Allocate BD headers for MGMT frames */
+
+ /* Where this come from ask QC */
+ wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ /* Allocate BD headers for DATA frames */
+
+ /* Where this come from ask QC */
+ wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->data_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ return 0;
+
+out_err:
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_err("Failed to allocate BD mempool\n");
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+ if (wcn->mgmt_mem_pool.virt_addr)
+ dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+ wcn->mgmt_mem_pool.virt_addr,
+ wcn->mgmt_mem_pool.phy_addr);
+
+ if (wcn->data_mem_pool.virt_addr) {
+ dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+ wcn->data_mem_pool.virt_addr,
+ wcn->data_mem_pool.phy_addr);
+ }
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low)
+{
+ struct wcn36xx_dxe_ctl *ctl = NULL;
+ struct wcn36xx_dxe_desc *desc = NULL;
+ struct wcn36xx_dxe_ch *ch = NULL;
+ unsigned long flags;
+
+ ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+ ctl = ch->head_blk_ctl;
+
+ spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+ /*
+ * If skb is not null that means that we reached the tail of the ring
+ * hence ring is full. Stop queues to let mac80211 back off until ring
+ * has an empty slot again.
+ */
+ if (NULL != ctl->next->skb) {
+ ieee80211_stop_queues(wcn->hw);
+ wcn->queues_stopped = true;
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+ ctl->skb = NULL;
+ desc = ctl->desc;
+
+ /* Set source address of the BD we send */
+ desc->src_addr_l = ctl->bd_phy_addr;
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+ desc->ctrl = ch->ctrl_bd;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+ "BD >>> ", (char *)ctl->bd_cpu_addr,
+ sizeof(struct wcn36xx_tx_bd));
+
+ /* Set source address of the SKB we send */
+ ctl = ctl->next;
+ ctl->skb = skb;
+ desc = ctl->desc;
+ if (ctl->bd_cpu_addr) {
+ wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+ return -EINVAL;
+ }
+
+ desc->src_addr_l = dma_map_single(NULL,
+ ctl->skb->data,
+ ctl->skb->len,
+ DMA_TO_DEVICE);
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = ctl->skb->len;
+
+ /* set dxe descriptor to VALID */
+ desc->ctrl = ch->ctrl_skb;
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
+ (char *)ctl->skb->data, ctl->skb->len);
+
+ /* Move the head of the ring to the next empty descriptor */
+ ch->head_blk_ctl = ctl->next;
+
+ /*
+ * When connected and trying to send data frame chip can be in sleep
+ * mode and writing to the register will not wake up the chip. Instead
+ * notify chip about new frame through SMSM bus.
+ */
+ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
+ wcn->ctrl_ops->smsm_change_state(
+ 0,
+ WCN36XX_SMSM_WLAN_TX_ENABLE);
+ } else {
+ /* indicate End Of Packet and generate interrupt on descriptor
+ * done.
+ */
+ wcn36xx_dxe_write_register(wcn,
+ ch->reg_ctrl, ch->def_ctrl);
+ }
+
+ return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+ int reg_data = 0, ret;
+
+ reg_data = WCN36XX_DXE_REG_RESET;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+ /* Setting interrupt path */
+ reg_data = WCN36XX_DXE_CCU_INT;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+ /***************************************/
+ /* Init descriptors for TX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+ wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX LOW */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+ WCN36XX_DXE_WQ_TX_L);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+ /***************************************/
+ /* Init descriptors for TX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+ wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX HIGH */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+ WCN36XX_DXE_WQ_TX_H);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+ /***************************************/
+ /* Init descriptors for RX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+ /* For RX we need to preallocated buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+ WCN36XX_DXE_WQ_RX_L);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_L,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+ /***************************************/
+ /* Init descriptors for RX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+ /* For RX we need to prealocat buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+ /* Write chanel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+ WCN36XX_DXE_WQ_RX_H);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_H,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+ ret = wcn36xx_dxe_request_irqs(wcn);
+ if (ret < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+ free_irq(wcn->tx_irq, wcn);
+ free_irq(wcn->rx_irq, wcn);
+
+ if (wcn->tx_ack_skb) {
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ }
+
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 00000000000..c88562f85de
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW = DMA0
+TX_HIGH = DMA4
+RX_LOW = DMA1
+RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_BASE 0x03000000
+#define WCN36XX_DXE_MEM_REG 0x202000
+
+#define WCN36XX_DXE_CCU_INT 0xA0011
+#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L 0x328a44
+#define WCN36XX_DXE_CTRL_TX_H 0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H 0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L 0x17
+#define WCN36XX_DXE_WQ_TX_H 0x17
+#define WCN36XX_DXE_WQ_RX_L 0xB
+#define WCN36XX_DXE_WQ_RX_H 0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
+ /* #define WCN36XX_DXE_INT_CH6_MASK 0x00000040 */
+ /* #define WCN36XX_DXE_INT_CH5_MASK 0x00000020 */
+ #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
+ #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
+ /* #define WCN36XX_DXE_INT_CH2_MASK 0x00000004 */
+ #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
+ #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET 0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW 0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH 0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET 0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET 0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET 0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET 0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR 0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR 0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR 0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR 0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L 0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L 0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H 0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H 0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE 128
+
+#define WCN36XX_PKT_SIZE 0xF20
+enum wcn36xx_dxe_ch_type {
+ WCN36XX_DXE_CH_TX_L,
+ WCN36XX_DXE_CH_TX_H,
+ WCN36XX_DXE_CH_RX_L,
+ WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L = 128,
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H = 10,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_L = 512,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_H = 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ * u32 valid :1; //0 = DMA stop, 1 = DMA continue with this
+ * //descriptor
+ * u32 transfer_type :2; //0 = Host to Host space
+ * u32 eop :1; //End of Packet
+ * u32 bd_handling :1; //if transferType = Host to BMU, then 0
+ * // means first 128 bytes contain BD, and 1
+ * // means create new empty BD
+ * u32 siq :1; // SIQ
+ * u32 diq :1; // DIQ
+ * u32 pdu_rel :1; //0 = don't release BD and PDUs when done,
+ * // 1 = release them
+ * u32 bthld_sel :4; //BMU Threshold Select
+ * u32 prio :3; //Specifies the priority level to use for
+ * // the transfer
+ * u32 stop_channel :1; //1 = DMA stops processing further, channel
+ * //requires re-enabling after this
+ * u32 intr :1; //Interrupt on Descriptor Done
+ * u32 rsvd :1; //reserved
+ * u32 size :14;//14 bits used - ignored for BMU transfers,
+ * //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+ u32 ctrl;
+ u32 fr_len;
+
+ u32 src_addr_l;
+ u32 dst_addr_l;
+ u32 phy_next_l;
+ u32 src_addr_h;
+ u32 dst_addr_h;
+ u32 phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+ struct wcn36xx_dxe_ctl *next;
+ struct wcn36xx_dxe_desc *desc;
+ unsigned int desc_phy_addr;
+ int ctl_blk_order;
+ struct sk_buff *skb;
+ spinlock_t skb_lock;
+ void *bd_cpu_addr;
+ dma_addr_t bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+ enum wcn36xx_dxe_ch_type ch_type;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ enum wcn36xx_dxe_ch_desc_num desc_num;
+ /* DXE control block ring */
+ struct wcn36xx_dxe_ctl *head_blk_ctl;
+ struct wcn36xx_dxe_ctl *tail_blk_ctl;
+
+ /* DXE channel specific configs */
+ u32 dxe_wq;
+ u32 ctrl_bd;
+ u32 ctrl_skb;
+ u32 reg_ctrl;
+ u32 def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+ int chunk_size;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 00000000000..c02dbc61872
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4657 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+ API VERSIONING INFORMATION
+
+ The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+ The MAJOR is incremented for major product/architecture changes
+ (and then MINOR/VERSION/REVISION are zeroed)
+ The MINOR is incremented for minor product/architecture changes
+ (and then VERSION/REVISION are zeroed)
+ The VERSION is incremented if a significant API change occurs
+ (and then REVISION is zeroed)
+ The REVISION is incremented if an insignificant API change occurs
+ or if a new API is added
+ All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE 0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE 0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC 8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC 4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN 4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED (1 << 2)
+#define PARAM_llACOEXIST_CHANGED (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED (1<<9)
+#define PARAM_OBSS_MODE_CHANGED (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+ (PARAM_BCN_INTERVAL_CHANGED | \
+ PARAM_SHORT_PREAMBLE_CHANGED | \
+ PARAM_SHORT_SLOT_TIME_CHANGED | \
+ PARAM_llACOEXIST_CHANGED | \
+ PARAM_llBCOEXIST_CHANGED | \
+ PARAM_llGCOEXIST_CHANGED | \
+ PARAM_HT20MHZCOEXIST_CHANGED | \
+ PARAM_NON_GF_DEVICES_PRESENT_CHANGED | \
+ PARAM_RIFS_MODE_CHANGED | \
+ PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED | \
+ PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH 64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+ /* Init/De-Init */
+ WCN36XX_HAL_START_REQ = 0,
+ WCN36XX_HAL_START_RSP = 1,
+ WCN36XX_HAL_STOP_REQ = 2,
+ WCN36XX_HAL_STOP_RSP = 3,
+
+ /* Scan */
+ WCN36XX_HAL_INIT_SCAN_REQ = 4,
+ WCN36XX_HAL_INIT_SCAN_RSP = 5,
+ WCN36XX_HAL_START_SCAN_REQ = 6,
+ WCN36XX_HAL_START_SCAN_RSP = 7,
+ WCN36XX_HAL_END_SCAN_REQ = 8,
+ WCN36XX_HAL_END_SCAN_RSP = 9,
+ WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+ WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+ /* HW STA configuration/deconfiguration */
+ WCN36XX_HAL_CONFIG_STA_REQ = 12,
+ WCN36XX_HAL_CONFIG_STA_RSP = 13,
+ WCN36XX_HAL_DELETE_STA_REQ = 14,
+ WCN36XX_HAL_DELETE_STA_RSP = 15,
+ WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+ WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+ WCN36XX_HAL_DELETE_BSS_REQ = 18,
+ WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+ /* Infra STA asscoiation */
+ WCN36XX_HAL_JOIN_REQ = 20,
+ WCN36XX_HAL_JOIN_RSP = 21,
+ WCN36XX_HAL_POST_ASSOC_REQ = 22,
+ WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+ /* Security */
+ WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+ WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+ WCN36XX_HAL_SET_STAKEY_REQ = 26,
+ WCN36XX_HAL_SET_STAKEY_RSP = 27,
+ WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+ WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+ WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+ WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+ /* Qos Related */
+ WCN36XX_HAL_ADD_TS_REQ = 32,
+ WCN36XX_HAL_ADD_TS_RSP = 33,
+ WCN36XX_HAL_DEL_TS_REQ = 34,
+ WCN36XX_HAL_DEL_TS_RSP = 35,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+ WCN36XX_HAL_ADD_BA_REQ = 38,
+ WCN36XX_HAL_ADD_BA_RSP = 39,
+ WCN36XX_HAL_DEL_BA_REQ = 40,
+ WCN36XX_HAL_DEL_BA_RSP = 41,
+
+ WCN36XX_HAL_CH_SWITCH_REQ = 42,
+ WCN36XX_HAL_CH_SWITCH_RSP = 43,
+ WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+ WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+ WCN36XX_HAL_GET_STATS_REQ = 46,
+ WCN36XX_HAL_GET_STATS_RSP = 47,
+ WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+ WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+ WCN36XX_HAL_MISSED_BEACON_IND = 50,
+ WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+ WCN36XX_HAL_MIC_FAILURE_IND = 52,
+ WCN36XX_HAL_FATAL_ERROR_IND = 53,
+ WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+ /* NV Interface */
+ WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+ WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+ WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+ WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+ WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+ WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+ WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+ WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+ WCN36XX_HAL_SEND_BEACON_REQ = 63,
+ WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+ WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+ WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+ WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+ /* PTT interface support */
+ WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+ WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+ /* BTAMP related events */
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+ WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+ WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+ WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+ WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+ WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+ WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+ WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+ WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+ WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+ WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+ WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+ WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+ WCN36XX_HAL_GET_RSSI_REQ = 92,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+ WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+ WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+ WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+ WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+ WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+ WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+ WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+ WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+ WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+ WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+ WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+ WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+ WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+ WCN36XX_HAL_GET_RSSI_RSP = 113,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+ /* 11k related events */
+ WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+ WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+ /* 11R related msgs */
+ WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+ WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+ /* P2P WLAN_FEATURE_P2P */
+ WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+ WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+ /* WLAN Dump commands */
+ WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+ WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+ /* OEM_DATA FEATURE SUPPORT */
+ WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+ WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+ /* ADD SELF STA REQ and RSP */
+ WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+ WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+ /* DEL SELF STA SUPPORT */
+ WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+ WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+ /* Coex Indication */
+ WCN36XX_HAL_COEX_IND = 129,
+
+ /* Tx Complete Indication */
+ WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+ /* Host Suspend/resume messages */
+ WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+ WCN36XX_HAL_HOST_RESUME_REQ = 132,
+ WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+ WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+ WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+ WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+ WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+ WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+ WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+ WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+ WCN36XX_HAL_RADAR_DETECT_IND = 143,
+ WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+ WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+ WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+ /* PNO messages */
+ WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+ WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+ WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+ WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+ WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+ WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+ WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+ WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+ WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+ WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+ WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+ WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+ /*
+ * This is temp fix. Should be removed once Host and Riva code is
+ * in sync.
+ */
+ WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+ WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+ WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+ WCN36XX_HAL_TSM_STATS_REQ = 168,
+ WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+ /* wake reason indication (WOW) */
+ WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+ /* GTK offload support */
+ WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+ WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+ WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+ WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+ WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+ WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+ WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+ WCN36XX_HAL_DEL_BA_IND = 188,
+ WCN36XX_HAL_DHCP_START_IND = 189,
+ WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+ WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+ WCN36XX_HAL_MSG_VERSION0 = 0,
+ WCN36XX_HAL_MSG_VERSION1 = 1,
+ /* define as 2 bytes data */
+ WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+ WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+ DRIVER_TYPE_PRODUCTION = 0,
+ DRIVER_TYPE_MFG = 1,
+ DRIVER_TYPE_DVT = 2,
+ DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+ HAL_STOP_TYPE_SYS_RESET,
+ HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+ HAL_STOP_TYPE_RF_KILL,
+ HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+ HAL_SYS_MODE_NORMAL,
+ HAL_SYS_MODE_LEARN,
+ HAL_SYS_MODE_SCAN,
+ HAL_SYS_MODE_PROMISC,
+ HAL_SYS_MODE_SUSPEND_LINK,
+ HAL_SYS_MODE_ROAM_SCAN,
+ HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+ HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+ /* 20MHz IF bandwidth centered on IF carrier */
+ PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+ /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+ /* 40MHz IF bandwidth centered on IF carrier */
+ PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+ /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+ /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+ /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+ PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+ /* Static SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+ /* Dynamic SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+ /* reserved */
+ WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+ /* SM Power Save disabled */
+ WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+ WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+ STA_TAURUS = 0,
+ STA_TITAN,
+ STA_POLARIS,
+ STA_11b,
+ STA_11bg,
+ STA_11a,
+ STA_11n,
+ STA_11ac,
+ STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES 4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES 8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES 3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET 16
+
+enum wcn36xx_hal_bss_type {
+ WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+ /* Added for softAP support */
+ WCN36XX_HAL_INFRA_AP_MODE,
+
+ WCN36XX_HAL_IBSS_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_STA_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_AP_MODE,
+
+ WCN36XX_HAL_AUTO_MODE,
+
+ WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+ WCN36XX_HAL_11A_NW_TYPE,
+ WCN36XX_HAL_11B_NW_TYPE,
+ WCN36XX_HAL_11G_NW_TYPE,
+ WCN36XX_HAL_11N_NW_TYPE,
+ WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX 12
+
+enum wcn36xx_hal_ht_operating_mode {
+ /* No Protection */
+ WCN36XX_HAL_HT_OP_MODE_PURE,
+
+ /* Overlap Legacy device present, protection is optional */
+ WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+ /* No legacy device, but 20 MHz HT present */
+ WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+ /* Protection is required */
+ WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+ WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+ WCN36XX_HAL_ED_NONE,
+ WCN36XX_HAL_ED_WEP40,
+ WCN36XX_HAL_ED_WEP104,
+ WCN36XX_HAL_ED_TKIP,
+ WCN36XX_HAL_ED_CCMP,
+ WCN36XX_HAL_ED_WPI,
+ WCN36XX_HAL_ED_AES_128_CMAC,
+ WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN 16
+#define WLAN_WAPI_KEY_RSC_LEN 16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH 32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+ WCN36XX_HAL_TX_ONLY,
+ WCN36XX_HAL_RX_ONLY,
+ WCN36XX_HAL_TX_RX,
+ WCN36XX_HAL_TX_DEFAULT,
+ WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+ WCN36XX_HAL_WEP_STATIC,
+ WCN36XX_HAL_WEP_DYNAMIC,
+ WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+ WCN36XX_HAL_LINK_IDLE_STATE = 0,
+ WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+ WCN36XX_HAL_LINK_AP_STATE = 3,
+ WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+ /* BT-AMP Case */
+ WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+ WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+ WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+ WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+ /* Reserved for HAL Internal Use */
+ WCN36XX_HAL_LINK_LEARN_STATE = 9,
+ WCN36XX_HAL_LINK_SCAN_STATE = 10,
+ WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+ WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+ WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+ WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+ WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+ HAL_SUMMARY_STATS_INFO = 0x00000001,
+ HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+ BTAMP_EVENT_CONNECTION_START,
+ BTAMP_EVENT_CONNECTION_STOP,
+ BTAMP_EVENT_CONNECTION_TERMINATED,
+
+ /* This and beyond are invalid values */
+ BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+ PE_SUMMARY_STATS_INFO = 0x00000001,
+ PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ PE_PER_STA_STATS_INFO = 0x00000020,
+
+ /* This and beyond are invalid values */
+ PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID 0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA 1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA 2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE 3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN 4
+#define WCN36XX_HAL_CFG_CAL_PERIOD 5
+#define WCN36XX_HAL_CFG_CAL_CONTROL 6
+#define WCN36XX_HAL_CFG_PROXIMITY 7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY 8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME 9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU 10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD 11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT 12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT 13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD 14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO 15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE 16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO 17
+#define WCN36XX_HAL_CFG_FIXED_RATE 18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY 19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY 20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY 21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION 22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ 23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ 24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ 25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ 26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS 27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT 28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER 29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR 30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE 31
+#define WCN36XX_HAL_CFG_STATS_PERIOD 32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION 33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED 34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD 35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK 36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE 37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO 38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI 39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH 40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS 41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD 42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG 43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG 44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG 45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG 46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS 47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL 48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD 49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER 50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL 51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD 52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD 53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM 55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM 56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE 57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT 58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN 59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI 60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS 61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI 62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS 63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE 64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST 65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20 66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40 67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20 68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40 69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING 70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4 72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5 73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD 74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP 75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE 76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK 77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT 79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION 80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT 81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT 82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST 83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT 84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT 85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT 86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT 87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN 88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN 89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN 90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN 91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT 92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN 93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC 94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP 95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO 96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER 97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT 98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION 99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER 100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT 101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
+#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+ u8 revision;
+ u8 version;
+ u8 minor;
+ u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+ u8 id;
+
+ /* 0 for multicast */
+ u8 unicast;
+
+ enum ani_key_direction direction;
+
+ /* Usage is unknown */
+ u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+ /* =1 for authenticator,=0 for supplicant */
+ u8 pae_role;
+
+ u16 length;
+ u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+ /* STA Index */
+ u16 sta_index;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* STATIC/DYNAMIC - valid only for WEP */
+ enum ani_wep_type wep_type;
+
+ /* Default WEP key, valid only for static WEP, must between 0 and 3. */
+ u8 def_wep_idx;
+
+ /* valid only for non-static WEP encyrptions */
+ struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /*
+ * Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX
+ */
+ u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+ enum wcn36xx_hal_host_msg_type msg_type:16;
+ enum wcn36xx_hal_host_msg_version msg_version:16;
+ u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+ /* Cfg Id. The Id required by HAL is exported by HAL
+ * in shared header file between UMAC and HAL.*/
+ u16 id;
+
+ /* Length of the Cfg. This parameter is used to go to next cfg
+ * in the TLV format.*/
+ u16 len;
+
+ /* Padding bytes for unaligned address's */
+ u16 pad_bytes;
+
+ /* Reserve bytes for making cfgVal to align address */
+ u16 reserve;
+
+ /* Following the uCfgLen field there should be a 'uCfgLen' bytes
+ * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+ /* Drive Type - Production or FTM etc */
+ enum driver_type type;
+
+ /* Length of the config buffer */
+ u32 len;
+
+ /* Following this there is a TLV formatted buffer of length
+ * "len" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+ /* config buffer must start in TLV format just here */
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+ /* success or failure */
+ u16 status;
+
+ /* Max number of STA supported by the device */
+ u8 stations;
+
+ /* Max number of BSS supported by the device */
+ u8 bssids;
+
+ /* API Version */
+ struct wcnss_wlan_version version;
+
+ /* CRM build information */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+ /* hardware/chipset/misc version information */
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+ /* The reason for which the device is being stopped */
+ enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+ /*
+ * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+ * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+ */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Length of the config buffer. Allows UMAC to update multiple CFGs */
+ u32 len;
+
+ /*
+ * Following this there is a TLV formatted buffer of length
+ * "uConfigBufferLen" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+ u8 subType:4;
+ u8 type:2;
+ u8 protVer:2;
+
+ u8 order:1;
+ u8 wep:1;
+ u8 moreData:1;
+ u8 powerMgmt:1;
+ u8 retry:1;
+ u8 moreFrag:1;
+ u8 fromDS:1;
+ u8 toDS:1;
+
+#else
+
+ u8 protVer:2;
+ u8 type:2;
+ u8 subType:4;
+
+ u8 toDS:1;
+ u8 fromDS:1;
+ u8 moreFrag:1;
+ u8 retry:1;
+ u8 powerMgmt:1;
+ u8 moreData:1;
+ u8 wep:1;
+ u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+ u8 fragNum:4;
+ u8 seqNumLo:4;
+ u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+ struct wcn36xx_hal_mac_frame_ctl fc;
+ u8 durationLo;
+ u8 durationHi;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssId[6];
+ struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID 2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+ u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+ u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_len;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+ /* Single NoA usage in Scanning */
+ u8 use_noa;
+
+ /* Indicates the scan duration (in ms) */
+ u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to scan */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u32 start_tsf[2];
+ u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to stop scanning. Not used really. But
+ * retained for symmetry with "start Scan" message. It can also
+ * help in error check if needed. */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Identifies the operational state of the AP/STA
+ * LEARN - AP Role SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* Operating channel to tune to. */
+ u8 oper_channel;
+
+ /* Channel Bonding state If 20/40 MHz is operational, this will
+ * indicate the 40 MHz extension channel in combination with the
+ * control channel */
+ enum phy_chan_bond_state cb_state;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+ HW_RATE_INDEX_1MBPS = 0x82,
+ HW_RATE_INDEX_2MBPS = 0x84,
+ HW_RATE_INDEX_5_5MBPS = 0x8B,
+ HW_RATE_INDEX_6MBPS = 0x0C,
+ HW_RATE_INDEX_9MBPS = 0x12,
+ HW_RATE_INDEX_11MBPS = 0x96,
+ HW_RATE_INDEX_12MBPS = 0x18,
+ HW_RATE_INDEX_18MBPS = 0x24,
+ HW_RATE_INDEX_24MBPS = 0x30,
+ HW_RATE_INDEX_36MBPS = 0x48,
+ HW_RATE_INDEX_48MBPS = 0x60,
+ HW_RATE_INDEX_54MBPS = 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+ /*
+ * For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved. */
+ /* Titan and Taurus Rates */
+ u32 enhanced_rate_bitmap;
+
+ /*
+ * 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /*
+ * RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* TODO move this parameter to the end for 3680 */
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* TODO add this parameter for 3680. */
+ /* Reserved to align next field on a dword boundary */
+ /* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+ /* success or failure */
+ u32 status;
+
+ /* Station index; valid only when 'status' field value SUCCESS */
+ u8 sta_index;
+
+ /* BSSID Index of BSS to which the station is associated */
+ u8 bssid_index;
+
+ /* DPU Index for PTK */
+ u8 dpu_index;
+
+ /* DPU Index for GTK */
+ u8 bcast_dpu_index;
+
+ /* DPU Index for IGTK */
+ u8 bcast_mgmt_dpu_idx;
+
+ /* PTK DPU signature */
+ u8 uc_ucast_sig;
+
+ /* GTK DPU isignature */
+ u8 uc_bcast_sig;
+
+ /* IGTK DPU signature */
+ u8 uc_mgmt_sig;
+
+ u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Index of STA to delete */
+ u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Index of STA deleted */
+ u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+ u8 num_rates;
+ u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:1;
+ u8 aci:2;
+ u8 acm:1;
+ u8 aifsn:4;
+#else
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 max:4;
+ u8 min:4;
+#else
+ u8 min:4;
+ u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+ struct wcn36xx_hal_aci_aifsn aci;
+ struct wcn36xx_hal_mac_cw cw;
+ u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ * in the software system. */
+enum wcn36xx_hal_con_mode {
+ WCN36XX_HAL_STA_MODE = 0,
+
+ /* to support softAp mode . This is misleading.
+ It means AP MODE only. */
+ WCN36XX_HAL_STA_SAP_MODE = 1,
+
+ WCN36XX_HAL_P2P_CLIENT_MODE,
+ WCN36XX_HAL_P2P_GO_MODE,
+ WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+ HAL_STA = 1,
+ HAL_SAP = 2,
+
+ /* to support sta, softAp mode . This means STA+AP mode */
+ HAL_STA_SAP = 3,
+
+ HAL_P2P_CLIENT = 4,
+ HAL_P2P_GO = 8,
+ HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* TODO move sta to the end for 3680 */
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params sta;
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index allocated by HAL */
+ u8 bss_index;
+
+ /* DPU descriptor index for PTK */
+ u8 dpu_desc_index;
+
+ /* PTK DPU signature */
+ u8 ucast_dpu_signature;
+
+ /* DPU descriptor index for GTK */
+ u8 bcast_dpu_desc_indx;
+
+ /* GTK DPU signature */
+ u8 bcast_dpu_signature;
+
+ /* DPU descriptor for IGTK */
+ u8 mgmt_dpu_desc_index;
+
+ /* IGTK DPU signature */
+ u8 mgmt_dpu_signature;
+
+ /* Station Index for BSS entry */
+ u8 bss_sta_index;
+
+ /* Self station index for this BSS */
+ u8 bss_self_sta_index;
+
+ /* Bcast station for buffering bcast frames in AP role */
+ u8 bss_bcast_sta_idx;
+
+ /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+ u8 mac[ETH_ALEN];
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS index to be deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index that has been deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the BSSID to which STA is going to associate */
+ u8 bssid[ETH_ALEN];
+
+ /* Indicates the channel to switch to. */
+ u8 channel;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* link State */
+ enum wcn36xx_hal_link_state link_state;
+
+ /* Max TX power */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct wcn36xx_hal_config_sta_params sta_params;
+ struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct config_sta_rsp_params sta_rsp_params;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Number of keys */
+ u8 num_keys;
+
+ /* Array of keys. */
+ struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /* Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX */
+ u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+ struct wcn36xx_hal_set_bss_key_req_msg Msg;
+ u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+ * Static/Dynamic keys */
+ enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA Index */
+ u16 sta_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* Whether to invalidate the Broadcast key or Unicast key. In case
+ * of WEP, the same key is used for both broadcast and unicast. */
+ u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ tSirMacAddr self_mac_addr;
+ u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Channel number */
+ u8 channel_number;
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ u8 tx_mgmt_power;
+
+ /* Max TX power */
+ u8 max_tx_power;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* VO WIFI comment: BSSID needed to identify session. As the
+ * request has power constraints, this should be applied only to
+ * that session Since MTU timing and EDCA are sessionized, this
+ * struct needs to be sessionized and bssid needs to be out of the
+ * VOWifi feature flag V IMP: Keep bssId field at the end of this
+ * msg. It is used to mantain backward compatbility by way of
+ * ignoring if using new host/old FW or old host/new FW since it is
+ * at the end of this struct
+ */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Status */
+ u32 status;
+
+ /* Channel number - same as in request */
+ u8 channel_number;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+
+ /* BSSID needed to identify session - same as in request */
+ u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*BSS Index */
+ u16 bss_index;
+
+ /* Best Effort */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* Background */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* Video */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* Voice */
+ struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct dpu_stats_params {
+ /* Index of STA to which the statistics */
+ u16 sta_index;
+
+ /* Encryption mode */
+ u8 enc_mode;
+
+ /* status */
+ u32 status;
+
+ /* Statistics */
+ u32 send_blocks;
+ u32 recv_blocks;
+ u32 replays;
+ u8 mic_error_cnt;
+ u32 prot_excl_cnt;
+ u16 format_err_cnt;
+ u16 un_decryptable_cnt;
+ u32 decrypt_err_cnt;
+ u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+
+ /* Categories of stats requested as specified in eHalStatsMask */
+ u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+ /* Total number of packets(per AC) that were successfully
+ * transmitted with retries */
+ u32 retry_cnt[4];
+
+ /* The number of MSDU packets and MMPDU frames per AC that the
+ * 802.11 station successfully transmitted after more than one
+ * retransmission attempt */
+ u32 multiple_retry_cnt[4];
+
+ /* Total number of packets(per AC) that were successfully
+ * transmitted (with and without retries, including multi-cast,
+ * broadcast) */
+ u32 tx_frm_cnt[4];
+
+ /* Total number of packets that were successfully received (after
+ * appropriate filter rules including multi-cast, broadcast) */
+ u32 rx_frm_cnt;
+
+ /* Total number of duplicate frames received successfully */
+ u32 frm_dup_cnt;
+
+ /* Total number packets(per AC) failed to transmit */
+ u32 fail_cnt[4];
+
+ /* Total number of RTS/CTS sequence failures for transmission of a
+ * packet */
+ u32 rts_fail_cnt;
+
+ /* Total number packets failed transmit because of no ACK from the
+ * remote entity */
+ u32 ack_fail_cnt;
+
+ /* Total number of RTS/CTS sequence success for transmission of a
+ * packet */
+ u32 rts_succ_cnt;
+
+ /* The sum of the receive error count and dropped-receive-buffer
+ * error count. HAL will provide this as a sum of (FCS error) +
+ * (Fail get BD/PDU in HW) */
+ u32 rx_discard_cnt;
+
+ /*
+ * The receive error count. HAL will provide the RxP FCS error
+ * global counter. */
+ u32 rx_error_cnt;
+
+ /* The sum of the transmit-directed byte count, transmit-multicast
+ * byte count and transmit-broadcast byte count. HAL will sum TPE
+ * UC/MC/BCAST global counters to provide this. */
+ u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+ /* Legacy rates */
+ HAL_TX_RATE_LEGACY = 0x1,
+
+ /* HT20 rates */
+ HAL_TX_RATE_HT20 = 0x2,
+
+ /* HT40 rates */
+ HAL_TX_RATE_HT40 = 0x4,
+
+ /* Rate with Short guard interval */
+ HAL_TX_RATE_SGI = 0x8,
+
+ /* Rate with Long guard interval */
+ HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames */
+ u32 rx_frag_cnt;
+
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames when a promiscuous packet filter
+ * was enabled */
+ u32 promiscuous_rx_frag_cnt;
+
+ /* The receiver input sensitivity referenced to a FER of 8% at an
+ * MPDU length of 1024 bytes at the antenna connector. Each element
+ * of the array shall correspond to a supported rate and the order
+ * shall be the same as the supporteRates parameter. */
+ u32 rx_input_sensitivity;
+
+ /* The maximum transmit power in dBm upto one decimal. for eg: if
+ * it is 10.5dBm, the value would be 105 */
+ u32 max_pwr;
+
+ /* Number of times the receiver failed to synchronize with the
+ * incoming signal after detecting the sync in the preamble of the
+ * transmitted PLCP protocol data unit. */
+ u32 sync_fail_cnt;
+
+ /* Legacy transmit rate, in units of 500 kbit/sec, for the most
+ * recently transmitted frame */
+ u32 tx_rate;
+
+ /* mcs index for HT20 and HT40 rates */
+ u32 mcs_index;
+
+ /* to differentiate between HT20 and HT40 rates; short and long
+ * guard interval */
+ u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+ /* The number of unencrypted received MPDU frames that the MAC
+ * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+ * management information base (MIB) object is enabled */
+ u32 rx_wep_unencrypted_frm_cnt;
+
+ /* The number of received MSDU packets that that the 802.11 station
+ * discarded because of MIC failures */
+ u32 rx_mic_fail_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a TKIP ICV error */
+ u32 tkip_icv_err;
+
+ /* The number of received MPDU frames that the 802.11 discarded
+ * because of an invalid AES-CCMP format */
+ u32 aes_ccmp_format_err;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of the AES-CCMP replay protection procedure */
+ u32 aes_ccmp_replay_cnt;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of errors detected by the AES-CCMP decryption
+ * algorithm */
+ u32 aes_ccmp_decrpt_err;
+
+ /* The number of encrypted MPDU frames received for which a WEP
+ * decryption key was not available on the 802.11 station */
+ u32 wep_undecryptable_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a WEP ICV error */
+ u32 wep_icv_err;
+
+ /* The number of received encrypted packets that the 802.11 station
+ * successfully decrypted */
+ u32 rx_decrypt_succ_cnt;
+
+ /* The number of encrypted packets that the 802.11 station failed
+ * to decrypt */
+ u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+ struct ani_global_security_stats uc_stats;
+ struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+ /* This counter shall be incremented for a received A-MSDU frame
+ * with the stations MAC address in the address 1 field or an
+ * A-MSDU frame with a group address in the address 1 field */
+ u32 rx_amsdu_cnt;
+
+ /* This counter shall be incremented when the MAC receives an AMPDU
+ * from the PHY */
+ u32 rx_ampdu_cnt;
+
+ /* This counter shall be incremented when a Frame is transmitted
+ * only on the primary channel */
+ u32 tx_20_frm_cnt;
+
+ /* This counter shall be incremented when a Frame is received only
+ * on the primary channel */
+ u32 rx_20_frm_cnt;
+
+ /* This counter shall be incremented by the number of MPDUs
+ * received in the A-MPDU when an A-MPDU is received */
+ u32 rx_mpdu_in_ampdu_cnt;
+
+ /* This counter shall be incremented when an MPDU delimiter has a
+ * CRC error when this is the first CRC error in the received AMPDU
+ * or when the previous delimiter has been decoded correctly */
+ u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+ /* The number of MPDU frames that the 802.11 station transmitted
+ * and acknowledged through a received 802.11 ACK frame */
+ u32 tx_frag_cnt[4];
+
+ /* This counter shall be incremented when an A-MPDU is transmitted */
+ u32 tx_ampdu_cnt;
+
+ /* This counter shall increment by the number of MPDUs in the AMPDU
+ * when an A-MPDU is transmitted */
+ u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* STA Idx */
+ u32 sta_index;
+
+ /* Categories of STATS being returned as per eHalStatsMask */
+ u32 stats_mask;
+
+ /* message type is same as the request type */
+ u16 msg_type;
+
+ /* length of the entire request, includes the pStatsBuf length too */
+ u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ enum wcn36xx_hal_link_state state;
+ u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u16 ackPolicy:2;
+ u16 userPrio:3;
+ u16 psb:1;
+ u16 aggregation:1;
+ u16 accessPolicy:2;
+ u16 direction:2;
+ u16 tsid:4;
+ u16 trafficType:1;
+#else
+ u16 trafficType:1;
+ u16 tsid:4;
+ u16 direction:2;
+ u16 accessPolicy:2;
+ u16 aggregation:1;
+ u16 psb:1;
+ u16 userPrio:3;
+ u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:7;
+ u8 schedule:1;
+#else
+ u8 schedule:1;
+ u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+ struct wcn36xx_hal_ts_info_tfc traffic;
+ struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+ u8 type;
+ u8 length;
+ struct wcn36xx_hal_ts_info ts_info;
+ u16 nom_msdu_size;
+ u16 max_msdu_size;
+ u32 min_svc_interval;
+ u32 max_svc_interval;
+ u32 inact_interval;
+ u32 suspend_interval;
+ u32 svc_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_sz;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surplus_bw;
+ u16 medium_time;
+};
+
+struct add_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct del_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To lookup station id using the mac address */
+ u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* Peer MAC Address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* ADDBA Action Frame dialog token
+ HAL will not interpret this object */
+ u8 dialog_token;
+
+ /* TID for which the BA is being setup
+ This identifies the TC or TS of interest */
+ u8 tid;
+
+ /* 0 - Delayed BA (Not supported)
+ 1 - Immediate BA */
+ u8 policy;
+
+ /* Indicates the number of buffers for this TID (baTID)
+ NOTE - This is the requested buffer size. When this
+ is processed by HAL and subsequently by HDD, it is
+ possible that HDD may change this buffer size. Any
+ change in the buffer size should be noted by PE and
+ advertized appropriately in the ADDBA response */
+ u16 buffer_size;
+
+ /* BA timeout in TU's 0 means no timeout will occur */
+ u16 timeout;
+
+ /* b0..b3 - Fragment Number - Always set to 0
+ b4..b15 - Starting Sequence Number of first MSDU
+ for which this BA is setup */
+ u16 ssn;
+
+ /* ADDBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+
+ /* TID for which the BA session has been setup */
+ u8 ba_tid;
+
+ /* BA Buffer Size allocated for the current BA session */
+ u8 ba_buffer_size;
+
+ u8 ba_session_id;
+
+ /* Reordering Window buffer */
+ u8 win_size;
+
+ /* Station Index to id the sta */
+ u8 sta_index;
+
+ /* Starting Sequence Number */
+ u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* Reorder Window Size */
+ u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+ u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+ u16 ba_enable:1;
+ u16 starting_seq_num:12;
+ u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+ u8 sta_addr[ETH_ALEN];
+ struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigget_ba_req_candidate {
+ u8 sta_index;
+ u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Candidate List(tTriggerBaCandidate)
+ */
+ u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Rsp Candidate List(tTriggerRspBaCandidate)
+ */
+ u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TID for which the BA session is being deleted */
+ u8 tid;
+
+ /* DELBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Traffic Id */
+ u8 tid;
+
+ u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ /* Uplink Packet Queue delay */
+ u16 uplink_pkt_queue_delay;
+
+ /* Uplink Packet Queue delay histogram */
+ u16 uplink_pkt_queue_delay_hist[4];
+
+ /* Uplink Packet Transmit delay */
+ u32 uplink_pkt_tx_delay;
+
+ /* Uplink Packet loss */
+ u16 uplink_pkt_loss;
+
+ /* Uplink Packet count */
+ u16 uplink_pkt_count;
+
+ /* Roaming count */
+ u8 roaming_count;
+
+ /* Roaming Delay */
+ u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*bssid of the keys */
+ u8 bssidx;
+ u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+ /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+ * messages should be
+ * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+ * nv_img_buffer_size */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Fragment sequence number of the NV Image. Note that NV Image
+ * might not fit into one message due to size limitation of the SMD
+ * channel FIFO. UMAC can hence choose to chop the NV blob into
+ * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+ * The last fragment MUST be indicated by marking the
+ * isLastFragment field to 1. Note that all the NV blobs would be
+ * concatenated together by HAL without any padding bytes in
+ * between.*/
+ u16 frag_number;
+
+ /* Is this the last fragment? When set to 1 it indicates that no
+ * more fragments will be sent by UMAC and HAL can concatenate all
+ * the NV blobs rcvd & proceed with the parsing. HAL would generate
+ * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+ * after it receives each fragment */
+ u16 last_fragment;
+
+ /* NV Image size (number of bytes) */
+ u32 nv_img_buffer_size;
+
+ /* Following the 'nv_img_buffer_size', there should be
+ * nv_img_buffer_size bytes of NV Image i.e.
+ * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure. HAL would generate a
+ * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+ /* Note: The length specified in tHalNvStoreInd messages should be
+ * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+ struct wcn36xx_hal_msg_header header;
+
+ /* NV Item */
+ u32 table_id;
+
+ /* Size of NV Blob */
+ u32 nv_blob_size;
+
+ /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+ * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+
+ /* address used to compute MIC */
+ u8 src_addr[ETH_ALEN];
+
+ /* transmitter address */
+ u8 ta_addr[ETH_ALEN];
+
+ u8 dst_addr[ETH_ALEN];
+
+ u8 multicast;
+
+ /* first byte of IV */
+ u8 iv1;
+
+ /* second byte of IV */
+ u8 key_id;
+
+ /* sequence number */
+ u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+ /* receive address */
+ u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 op_mode;
+ u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+};
+
+struct update_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* shortPreamble mode. HAL should update all the STA rates when it
+ * receives this message */
+ u8 short_preamble;
+
+ /* short Slot time. */
+ u8 short_slot_time;
+
+ /* Beacon Interval */
+ u16 beacon_interval;
+
+ /* Protection related */
+ u8 lla_coexist;
+ u8 llb_coexist;
+ u8 llg_coexist;
+ u8 ht20_coexist;
+ u8 lln_non_gf_coexist;
+ u8 lsig_tx_op_protection_full_support;
+ u8 rifs_mode;
+
+ u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* length of the template. */
+ u32 beacon_length;
+
+ /* Beacon data. */
+ u8 beacon[BEACON_TEMPLATE_SIZE];
+
+ u8 bssid[ETH_ALEN];
+
+ /* TIM IE offset from the beginning of the template. */
+ u32 tim_ie_offset;
+
+ /* P2P IE offset from the begining of the template */
+ u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Link Parameters */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* channel number in which the RADAR detected */
+ u8 channel_number;
+
+ /* RADAR pulse width in usecond */
+ u16 radar_pulse_width;
+
+ /* Number of RADAR pulses */
+ u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta[ETH_ALEN];
+ u8 dialog_token;
+ u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+ u32 probe_resp_template_len;
+ u32 proxy_probe_req_valid_ie_bmap[8];
+ u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 aid;
+ u16 sta_id;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* HAL copies bssid from the sta table. */
+ u8 addr2[ETH_ALEN];
+
+ /* To unify the keepalive / unknown A2 / tim-based disa */
+ u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+ struct wcn36xx_hal_msg_header header;
+ u8 aid;
+ u8 sta_index;
+ u8 bss_index;
+ u8 reason_code;
+ u32 status;
+};
+
+struct bt_amp_event_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+ u64 tbtt;
+#endif
+ u8 dtim_count;
+
+ /* DTIM period given to HAL during association may not be valid, if
+ * association is based on ProbeRsp instead of beacon. */
+ u8 dtim_period;
+
+ /* For CCX and 11R Roaming */
+ u32 rssi_filter_period;
+
+ u32 num_beacon_per_rssi_average;
+ u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 send_data_null;
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+ u8 element_id;
+ u8 check_ie_presence;
+ u8 offset;
+ u8 value;
+ u8 bitmask;
+ u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 capability_info;
+ u16 capability_mask;
+ u16 beacon_interval;
+ u16 ie_num;
+ u8 bss_index;
+ u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 ie_Count;
+ u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD 2
+#define WCN36XX_HAL_IPV6_ADDR_LEN 16
+#define WCN36XX_HAL_OFFLOAD_DISABLE 0
+#define WCN36XX_HAL_OFFLOAD_ENABLE 1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
+ (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+ u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ /* Only support 2 possible Network Advertisement IPv6 address */
+ u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ u8 self_addr[ETH_ALEN];
+ u8 src_ipv6_addr_valid:1;
+ u8 target_ipv6_addr1_valid:1;
+ u8 target_ipv6_addr2_valid:1;
+ u8 reserved1:5;
+
+ /* make it DWORD aligned */
+ u8 reserved2;
+
+ /* slot index for this offload */
+ u32 slot_index;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+ u8 offload_Type;
+
+ /* enable or disable */
+ u8 enable;
+
+ union {
+ u8 host_ipv4_addr[4];
+ u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ } u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_host_offload_req host_offload_params;
+ struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE 0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE 1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD 30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 packet_type;
+ u32 time_period;
+ u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_addr[ETH_ALEN];
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ s8 threshold1:8;
+ s8 threshold2:8;
+ s8 threshold3:8;
+ u8 thres1_pos_notify:1;
+ u8 thres1_neg_notify:1;
+ u8 thres2_pos_notify:1;
+ u8 thres2_neg_notify:1;
+ u8 thres3_pos_notify:1;
+ u8 thres3_neg_notify:1;
+ u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bk_delivery:1;
+ u8 be_delivery:1;
+ u8 vi_delivery:1;
+ u8 vo_delivery:1;
+ u8 bk_trigger:1;
+ u8 be_trigger:1;
+ u8 vi_trigger:1;
+ u8 vo_trigger:1;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID */
+ u8 id;
+
+ /* Pattern byte offset from beginning of the 802.11 packet to start
+ * of the wake-up pattern */
+ u8 byte_Offset;
+
+ /* Non-Zero Pattern size */
+ u8 size;
+
+ /* Pattern */
+ u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Non-zero pattern mask size */
+ u8 mask_size;
+
+ /* Pattern mask */
+ u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern */
+ u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern mask */
+ u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID of the wakeup pattern to be deleted */
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enables/disables magic packet filtering */
+ u8 magic_packet_enable;
+
+ /* Magic pattern */
+ u8 magic_pattern[ETH_ALEN];
+
+ /* Enables/disables packet pattern filtering in firmware. Enabling
+ * this flag enables broadcast pattern matching in Firmware. If
+ * unicast pattern matching is also desired,
+ * ucUcastPatternFilteringEnable flag must be set tot true as well
+ */
+ u8 pattern_filtering_enable;
+
+ /* Enables/disables unicast packet pattern filtering. This flag
+ * specifies whether we want to do pattern match on unicast packets
+ * as well and not just broadcast packets. This flag has no effect
+ * if the ucPatternFilteringEnable (main controlling flag) is set
+ * to false
+ */
+ u8 ucast_pattern_filtering_enable;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Channel Switch
+ * Action Frame.
+ */
+ u8 wow_channel_switch_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the
+ * Deauthentication Frame.
+ */
+ u8 wow_deauth_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Disassociation
+ * Frame.
+ */
+ u8 wow_disassoc_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it has missed consecutive
+ * beacons. This is a hardware register configuration (NOT a
+ * firmware configuration).
+ */
+ u8 wow_max_missed_beacons;
+
+ /* This configuration is valid only when magicPktEnable=1. This is
+ * a timeout value in units of microsec. It requests hardware to
+ * unconditionally wake up after it has stayed in WoWLAN mode for
+ * some time. Set 0 to disable this feature.
+ */
+ u8 wow_max_sleep;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAP-ID requests embedded in EAPOL frames and use this as a wake
+ * source.
+ */
+ u8 wow_eap_id_request_enable;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAPOL-4WAY requests and use this as a wake source.
+ */
+ u8 wow_eapol_4way_enable;
+
+ /* This configuration allows a host wakeup on an network scan
+ * offload match.
+ */
+ u8 wow_net_scan_offload_match;
+
+ /* This configuration allows a host wakeup on any GTK rekeying
+ * error.
+ */
+ u8 wow_gtk_rekey_error;
+
+ /* This configuration allows a host wakeup on BSS connection loss.
+ */
+ u8 wow_bss_connection_loss;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA index */
+ u8 sta_idx;
+
+ /* Access Category */
+ u8 ac;
+
+ /* User Priority */
+ u8 up;
+
+ /* Service Interval */
+ u32 service_interval;
+
+ /* Suspend Interval */
+ u32 suspend_interval;
+
+ /* Delay Interval */
+ u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 set_mcst_bcst_filter_setting;
+ u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 rssi_thres1_pos_cross:1;
+ u32 rssi_thres1_neg_cross:1;
+ u32 rssi_thres2_pos_cross:1;
+ u32 rssi_thres2_neg_cross:1;
+ u32 rssi_thres3_pos_cross:1;
+ u32 rssi_thres3_neg_cross:1;
+ u32 avg_rssi:8;
+ u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 sta_id;
+ s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_max_tx_pwr_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSID is needed to identify which session issued this request.
+ * As the request has power constraints, this should be applied
+ * only to that session */
+ u8 bssid[ETH_ALEN];
+
+ u8 self_addr[ETH_ALEN];
+
+ /* In request, power == MaxTx power to be used. */
+ u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* power == tx power used for management frames */
+ u8 power;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+
+ u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 opp_ps;
+ u32 ct_window;
+ u8 count;
+ u32 duration;
+ u32 interval;
+ u32 single_noa_duration;
+ u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Self STA Index */
+ u8 self_sta_index;
+
+ /* DPU Index (IGTK, PTK, GTK all same) */
+ u8 dpu_index;
+
+ /* DPU Signature */
+ u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_idx;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+ * This will carry the bitmap with the bit positions representing
+ * different AC.s */
+ u16 tspec_index;
+
+ /* Tspec info per AC To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status0;
+
+ /* FIXME PRIMA for future use for 11R */
+ u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+ u32 arg4;
+ u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Length of the responce message */
+ u32 rsp_length;
+
+ /* FIXME: Currently considering the the responce will be less than
+ * 100bytes */
+ u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Coex Indication Type */
+ u32 type;
+
+ /* Coex Indication Data */
+ u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Tx Complete Indication Success or Failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 configured_mcst_bcst_filter_setting;
+ u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 dot11_exclude_unencrypted;
+ u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 index;
+ u8 opp_ps_flag;
+ u16 ctwin;
+
+ u16 noa1_interval_count;
+ u16 bss_index;
+ u32 noa1_duration;
+ u32 noa1_interval;
+ u32 noa1_starttime;
+
+ u16 noa2_interval_count;
+ u16 reserved2;
+ u32 noa2_duration;
+ u32 noa2_interval;
+ u32 noa2_start_time;
+
+ u32 status;
+};
+
+struct noa_start_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 sta_idx;
+
+ /* Peer MAC Address, whose BA session has timed out */
+ u8 peer_addr[ETH_ALEN];
+
+ /* TID for which a BA session timeout is being triggered */
+ u8 ba_tid;
+
+ /* DELBA direction
+ * 1 - Originator
+ * 0 - Recipient
+ */
+ u8 direction;
+
+ u32 reason_code;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS 26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX 60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS 16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS 10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE 450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+ PNO_MODE_IMMEDIATE,
+ PNO_MODE_ON_SUSPEND,
+ PNO_MODE_ON_RESUME,
+ PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+ AUTH_TYPE_ANY = 0,
+ AUTH_TYPE_OPEN_SYSTEM = 1,
+
+ /* Upper layer authentication types */
+ AUTH_TYPE_WPA = 2,
+ AUTH_TYPE_WPA_PSK = 3,
+
+ AUTH_TYPE_RSN = 4,
+ AUTH_TYPE_RSN_PSK = 5,
+ AUTH_TYPE_FT_RSN = 6,
+ AUTH_TYPE_FT_RSN_PSK = 7,
+ AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+ AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+ AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+ ED_ANY = 0,
+ ED_NONE = 1,
+ ED_WEP = 2,
+ ED_TKIP = 3,
+ ED_CCMP = 4,
+ ED_WPI = 5,
+
+ ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast type */
+enum ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+
+ BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+struct scan_timer {
+ /* How much it should wait */
+ u32 value;
+
+ /* How many times it should repeat that wait value 0 - keep using
+ * this timer until PNO is disabled */
+ u32 repeat;
+
+ /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+ * times - after that it will wait 4s between consecutive scans
+ * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+ /* set to 0 if you wish for PNO to use its default telescopic timer */
+ u8 count;
+
+ /* A set value represents the amount of time that PNO will wait
+ * between two consecutive scan procedures If the desired is for a
+ * uniform timer that fires always at the exact same interval - one
+ * single value is to be set If there is a desire for a more
+ * complex - telescopic like timer multiple values can be set -
+ * once PNO reaches the end of the array it will continue scanning
+ * at intervals presented by the last value */
+ struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* SSID broadcast type, normal, hidden or unknown */
+ enum ssid_bcast_type bcast_network_type;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request - just to indicate that PNO has
+ * acknowledged the request and will start scanning */
+ u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Network that was found with the highest RSSI */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Indicates the RSSI */
+ u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* RSSI Threshold */
+ u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* 0: disable, 1:enable */
+ u8 tx_per_tracking_enable;
+
+ /* Check period, unit is sec. */
+ u8 tx_per_tracking_period;
+
+ /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+ u8 tx_per_tracking_ratio;
+
+ /* A watermark of check number, once the tx packet exceed this
+ * number, we do the check, default is 5 */
+ u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define WCN36XX_HAL_PROTOCOL_DATA_LEN 8
+#define WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS 240
+#define WCN36XX_HAL_MAX_NUM_FILTERS 20
+#define WCN36XX_HAL_MAX_CMP_PER_FILTER 10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+ HAL_RCV_FILTER_TYPE_INVALID,
+ HAL_RCV_FILTER_TYPE_FILTER_PKT,
+ HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+ HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+ HAL_FILTER_PROTO_TYPE_INVALID,
+ HAL_FILTER_PROTO_TYPE_MAC,
+ HAL_FILTER_PROTO_TYPE_ARP,
+ HAL_FILTER_PROTO_TYPE_IPV4,
+ HAL_FILTER_PROTO_TYPE_IPV6,
+ HAL_FILTER_PROTO_TYPE_UDP,
+ HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+ HAL_FILTER_CMP_TYPE_INVALID,
+ HAL_FILTER_CMP_TYPE_EQUAL,
+ HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+ HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+ HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+ u8 protocol_layer;
+ u8 cmp_flag;
+
+ /* Length of the data to compare */
+ u16 data_length;
+
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ /* Reserved field */
+ u8 reserved;
+
+ /* Data to compare */
+ u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+ /* Mask to be applied on the received packet data before compare */
+ u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coleasce_time;
+ u8 bss_index;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coalesce_time;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ u32 mc_addr_count;
+ u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+ u8 id;
+ u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ u32 match_count;
+ struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+ matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+ /* only valid for response message */
+ u32 status;
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+ u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Ignore DTIM */
+ u32 ignore_dtim;
+
+ /* DTIM Period */
+ u32 dtim_period;
+
+ /* Listen Interval */
+ u32 listen_interval;
+
+ /* Broadcast Multicast Filter */
+ u32 bcast_mcast_filter;
+
+ /* Beacon Early Termination */
+ u32 enable_bet;
+
+ /* Beacon Early Termination Interval */
+ u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+struct wcn36xx_hal_feat_caps_msg {
+
+ struct wcn36xx_hal_msg_header header;
+
+ u32 feat_caps[4];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+ WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+ /* rekey detected, but not handled */
+ WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+ /* MIC check error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+ /* decryption error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+ /* M1 replay detected */
+ WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+ /* missing GTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+ /* missing iGTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+ /* key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+ /* iGTK key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+ /* GTK rekey M2 response TX error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+ /* non-specific general error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+ WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+ /* magic packet match */
+ WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+ /* host defined pattern match */
+ WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+ /* EAP-ID frame detected */
+ WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+ /* start of EAPOL 4-way handshake detected */
+ WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+ /* network scan offload match */
+ WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+ /* GTK rekey status wakeup (see status) */
+ WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+ /* BSS connection lost */
+ WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+ Wake Packet which is saved at tWakeReasonParams.DataStart
+ This data is sent for any wake reasons that involve a packet-based wakeup :
+
+ WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+ The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* see tWakeReasonType */
+ u32 reason;
+
+ /* argument specific to the reason type */
+ u32 reason_arg;
+
+ /* length of optional data stored in this message, in case HAL
+ * truncates the data (i.e. data packets) this length will be less
+ * than the actual length */
+ u32 stored_data_len;
+
+ /* actual length of data */
+ u32 actual_data_len;
+
+ /* variable length start of data (length == storedDataLen) see
+ * specific wake type */
+ u8 data_start[1];
+
+ u32 bss_index:8;
+ u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG 0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* optional flags */
+ u32 flags;
+
+ /* Key confirmation key */
+ u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+ /* key encryption key */
+ u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+ /* replay counter */
+ u64 key_replay_counter;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* last rekey status when the rekey was offloaded */
+ u32 last_rekey_status;
+
+ /* current replay counter value */
+ u64 key_replay_counter;
+
+ /* total rekey attempts */
+ u32 total_rekey_count;
+
+ /* successful GTK rekeys */
+ u32 gtk_rekey_count;
+
+ /* successful iGTK rekeys */
+ u32 igtk_rekey_count;
+
+ u8 bss_index;
+};
+
+struct dhcp_info {
+ /* Indicates the device mode which indicates about the DHCP activity */
+ u8 device_mode;
+
+ u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/*
+ * Thermal Mitigation mode of operation.
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ * and reducing transmit power
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+ HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+ HAL_THERMAL_MITIGATION_MODE_0,
+ HAL_THERMAL_MITIGATION_MODE_1,
+ HAL_THERMAL_MITIGATION_MODE_2,
+ HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ * Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+ HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+ HAL_THERMAL_MITIGATION_LEVEL_0,
+ HAL_THERMAL_MITIGATION_LEVEL_1,
+ HAL_THERMAL_MITIGATION_LEVEL_2,
+ HAL_THERMAL_MITIGATION_LEVEL_3,
+ HAL_THERMAL_MITIGATION_LEVEL_4,
+ HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Thermal Mitigation Operation Mode */
+ enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+ /* Thermal Mitigation Level */
+ enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Duration over which this stats was collected */
+ u32 duration;
+
+ /* Per STA Stats */
+
+ /* TX stats */
+ u32 tx_bytes_pushed;
+ u32 tx_packets_pushed;
+
+ /* RX stats */
+ u32 rx_bytes_rcvd;
+ u32 rx_packets_rcvd;
+ u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 00000000000..7839b31e482
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+ CHAN2G(2412, 1), /* Channel 1 */
+ CHAN2G(2417, 2), /* Channel 2 */
+ CHAN2G(2422, 3), /* Channel 3 */
+ CHAN2G(2427, 4), /* Channel 4 */
+ CHAN2G(2432, 5), /* Channel 5 */
+ CHAN2G(2437, 6), /* Channel 6 */
+ CHAN2G(2442, 7), /* Channel 7 */
+ CHAN2G(2447, 8), /* Channel 8 */
+ CHAN2G(2452, 9), /* Channel 9 */
+ CHAN2G(2457, 10), /* Channel 10 */
+ CHAN2G(2462, 11), /* Channel 11 */
+ CHAN2G(2467, 12), /* Channel 12 */
+ CHAN2G(2472, 13), /* Channel 13 */
+ CHAN2G(2484, 14) /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+ CHAN5G(5180, 36),
+ CHAN5G(5200, 40),
+ CHAN5G(5220, 44),
+ CHAN5G(5240, 48),
+ CHAN5G(5260, 52),
+ CHAN5G(5280, 56),
+ CHAN5G(5300, 60),
+ CHAN5G(5320, 64),
+ CHAN5G(5500, 100),
+ CHAN5G(5520, 104),
+ CHAN5G(5540, 108),
+ CHAN5G(5560, 112),
+ CHAN5G(5580, 116),
+ CHAN5G(5600, 120),
+ CHAN5G(5620, 124),
+ CHAN5G(5640, 128),
+ CHAN5G(5660, 132),
+ CHAN5G(5700, 140),
+ CHAN5G(5745, 149),
+ CHAN5G(5765, 153),
+ CHAN5G(5785, 157),
+ CHAN5G(5805, 161),
+ CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (_hw_rate) \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+ RATE(10, HW_RATE_INDEX_1MBPS, 0),
+ RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+ .channels = wcn_2ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_2ghz_channels),
+ .bitrates = wcn_2ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+ .channels = wcn_5ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_5ghz_channels),
+ .bitrates = wcn_5ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+ struct wcn36xx_sta *sta_priv)
+{
+ return NL80211_IFTYPE_STATION == vif->type ?
+ sta_priv->bss_sta_index :
+ sta_priv->sta_index;
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+ int ret;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+ /* SMD initialization */
+ ret = wcn36xx_smd_open(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to open smd channel: %d\n", ret);
+ goto out_err;
+ }
+
+ /* Allocate memory pools for Mgmt BD headers and Data BD headers */
+ ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+ goto out_smd_close;
+ }
+
+ ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+ goto out_free_dxe_pool;
+ }
+
+ wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ wcn36xx_err("Failed to allocate smd buf\n");
+ ret = -ENOMEM;
+ goto out_free_dxe_ctl;
+ }
+
+ ret = wcn36xx_smd_load_nv(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to push NV to chip\n");
+ goto out_free_smd_buf;
+ }
+
+ ret = wcn36xx_smd_start(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to start chip\n");
+ goto out_free_smd_buf;
+ }
+
+ /* DMA channel initialization */
+ ret = wcn36xx_dxe_init(wcn);
+ if (ret) {
+ wcn36xx_err("DXE init failed\n");
+ goto out_smd_stop;
+ }
+
+ wcn36xx_debugfs_init(wcn);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ }
+ INIT_LIST_HEAD(&wcn->vif_list);
+ return 0;
+
+out_smd_stop:
+ wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+ kfree(wcn->hal_buf);
+out_free_dxe_pool:
+ wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+ wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+ wcn36xx_smd_close(wcn);
+out_err:
+ return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+ wcn36xx_debugfs_exit(wcn);
+ wcn36xx_smd_stop(wcn);
+ wcn36xx_dxe_deinit(wcn);
+ wcn36xx_smd_close(wcn);
+
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_dxe_free_ctl_blks(wcn);
+
+ kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ int ch = WCN36XX_HW_CHANNEL(wcn);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+ ch);
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+ }
+
+ return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed,
+ unsigned int *total, u64 multicast)
+{
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+ *total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ if (control->sta)
+ sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+ if (wcn36xx_start_tx(wcn, sta_priv, skb))
+ ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ int ret = 0;
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+ cmd, key_conf->cipher, key_conf->keyidx,
+ key_conf->keylen, key_conf->flags);
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+ key_conf->key,
+ key_conf->keylen);
+
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+ break;
+ default:
+ wcn36xx_err("Unsupported key type 0x%x\n",
+ key_conf->cipher);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+ /*
+ * Supplicant is sending key in the wrong order:
+ * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+ * but HW expects it to be in the order as described in
+ * IEEE 802.11 spec (see chapter 11.7) like this:
+ * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+ */
+ memcpy(key, key_conf->key, 16);
+ memcpy(key + 16, key_conf->key + 24, 8);
+ memcpy(key + 24, key_conf->key + 16, 8);
+ } else {
+ memcpy(key, key_conf->key, key_conf->keylen);
+ }
+
+ if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+ sta_priv->is_data_encrypted = true;
+ /* Reconfigure bss with encrypt_type */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_smd_config_bss(wcn,
+ vif,
+ sta,
+ sta->addr,
+ true);
+
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ } else {
+ wcn36xx_smd_set_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key);
+ if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+ (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
+ }
+ break;
+ case DISABLE_KEY:
+ if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ wcn36xx_smd_remove_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx);
+ } else {
+ sta_priv->is_data_encrypted = false;
+ /* do not remove key if disassociated */
+ if (sta_priv->aid)
+ wcn36xx_smd_remove_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ get_sta_index(vif, sta_priv));
+ }
+ break;
+ default:
+ wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+ ret = -EOPNOTSUPP;
+ goto out;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+ wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_end_scan(wcn);
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, size;
+ u16 *rates_table;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ u32 rates = sta->supp_rates[band];
+
+ memset(&sta_priv->supported_rates, 0,
+ sizeof(sta_priv->supported_rates));
+ sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+ rates_table = sta_priv->supported_rates.dsss_rates;
+ if (band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_2ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+ }
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+ rates_table = sta_priv->supported_rates.ofdm_rates;
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_5ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
+ memcpy(sta_priv->supported_rates.supported_mcs_set,
+ sta->ht_cap.mcs.rx_mask,
+ sizeof(sta->ht_cap.mcs.rx_mask));
+ }
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+ HW_RATE_INDEX_6MBPS,
+ HW_RATE_INDEX_9MBPS,
+ HW_RATE_INDEX_12MBPS,
+ HW_RATE_INDEX_18MBPS,
+ HW_RATE_INDEX_24MBPS,
+ HW_RATE_INDEX_36MBPS,
+ HW_RATE_INDEX_48MBPS,
+ HW_RATE_INDEX_54MBPS
+ };
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+ HW_RATE_INDEX_1MBPS,
+ HW_RATE_INDEX_2MBPS,
+ HW_RATE_INDEX_5_5MBPS,
+ HW_RATE_INDEX_11MBPS
+ };
+
+ rates->op_rate_mode = STA_11n;
+ memcpy(rates->dsss_rates, dsss_rates,
+ sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+ memcpy(rates->ofdm_rates, ofdm_rates,
+ sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+ rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct sk_buff *skb = NULL;
+ u16 tim_off, tim_len;
+ enum wcn36xx_hal_link_state link_state;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ vif, changed);
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed dtim period %d\n",
+ bss_conf->dtim_period);
+
+ vif_priv->dtim_period = bss_conf->dtim_period;
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss PS set %d\n",
+ bss_conf->ps);
+ if (bss_conf->ps) {
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+ bss_conf->bssid);
+
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ vif_priv->is_joining = true;
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_join(wcn, bss_conf->bssid,
+ vif->addr, WCN36XX_HW_CHANNEL(wcn));
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ bss_conf->bssid, false);
+ } else {
+ vif_priv->is_joining = false;
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_SSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed ssid\n");
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+ bss_conf->ssid, bss_conf->ssid_len);
+
+ vif_priv->ssid.length = bss_conf->ssid_len;
+ memcpy(&vif_priv->ssid.ssid,
+ bss_conf->ssid,
+ bss_conf->ssid_len);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ vif_priv->is_joining = false;
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta;
+ struct wcn36xx_sta *sta_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac assoc bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ wcn36xx_err("sta %pM is not found\n",
+ bss_conf->bssid);
+ rcu_read_unlock();
+ goto out;
+ }
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+ wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE);
+ wcn36xx_smd_config_bss(wcn, vif, sta,
+ bss_conf->bssid,
+ true);
+ sta_priv->aid = bss_conf->aid;
+ /*
+ * config_sta must be called from because this is the
+ * place where AID is available.
+ */
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ rcu_read_unlock();
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "disassociated bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+ wcn36xx_smd_set_link_st(wcn,
+ bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+ skb = ieee80211_proberesp_get(hw, vif);
+ if (!skb) {
+ wcn36xx_err("failed to alloc probereq skb\n");
+ goto out;
+ }
+
+ wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+ dev_kfree_skb(skb);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed beacon enabled %d\n",
+ bss_conf->enable_beacon);
+
+ if (bss_conf->enable_beacon) {
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ vif->addr, false);
+ skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+ &tim_len);
+ if (!skb) {
+ wcn36xx_err("failed to alloc beacon skb\n");
+ goto out;
+ }
+ wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+ dev_kfree_skb(skb);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
+ link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+ else
+ link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ link_state);
+ } else {
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+out:
+ return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+ return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+ list_del(&vif_priv->list);
+ wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+ vif, vif->type);
+
+ if (!(NL80211_IFTYPE_STATION == vif->type ||
+ NL80211_IFTYPE_AP == vif->type ||
+ NL80211_IFTYPE_ADHOC == vif->type ||
+ NL80211_IFTYPE_MESH_POINT == vif->type)) {
+ wcn36xx_warn("Unsupported interface type requested: %d\n",
+ vif->type);
+ return -EOPNOTSUPP;
+ }
+
+ list_add(&vif_priv->list, &wcn->vif_list);
+ wcn36xx_smd_add_sta_self(wcn, vif);
+
+ return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+ vif, sta->addr);
+
+ vif_priv->sta = sta_priv;
+ sta_priv->vif = vif_priv;
+ /*
+ * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+ * at this stage AID is not available yet.
+ */
+ if (NL80211_IFTYPE_STATION != vif->type) {
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+ sta_priv->aid = sta->aid;
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
+ return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+ vif, sta->addr, sta_priv->sta_index);
+
+ wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+ vif_priv->sta = NULL;
+ sta_priv->vif = NULL;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, true);
+ return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, false);
+ return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+ action, tid);
+
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ sta_priv->tid = tid;
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+ get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ default:
+ wcn36xx_err("Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+ .start = wcn36xx_start,
+ .stop = wcn36xx_stop,
+ .add_interface = wcn36xx_add_interface,
+ .remove_interface = wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+ .suspend = wcn36xx_suspend,
+ .resume = wcn36xx_resume,
+#endif
+ .config = wcn36xx_config,
+ .configure_filter = wcn36xx_configure_filter,
+ .tx = wcn36xx_tx,
+ .set_key = wcn36xx_set_key,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
+ .bss_info_changed = wcn36xx_bss_info_changed,
+ .set_rts_threshold = wcn36xx_set_rts_threshold,
+ .sta_add = wcn36xx_sta_add,
+ .sta_remove = wcn36xx_sta_remove,
+ .ampdu_action = wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+ int ret = 0;
+
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+ wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
+
+ wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+ wcn->hw->wiphy->cipher_suites = cipher_suites;
+ wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+ wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+ wcn->hw->max_listen_interval = 200;
+
+ wcn->hw->queues = 4;
+
+ SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+ wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+ wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+ return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ /* Set TX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlantx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get tx_irq\n");
+ return -ENOENT;
+ }
+ wcn->tx_irq = res->start;
+
+ /* Set RX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlanrx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get rx_irq\n");
+ return -ENOENT;
+ }
+ wcn->rx_irq = res->start;
+
+ /* Map the memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wcnss_mmio");
+ if (!res) {
+ wcn36xx_err("failed to get mmio\n");
+ return -ENOENT;
+ }
+ wcn->mmio = ioremap(res->start, resource_size(res));
+ if (!wcn->mmio) {
+ wcn36xx_err("failed to map io memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct wcn36xx *wcn;
+ int ret;
+ u8 addr[ETH_ALEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+ hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+ if (!hw) {
+ wcn36xx_err("failed to alloc hw\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ platform_set_drvdata(pdev, hw);
+ wcn = hw->priv;
+ wcn->hw = hw;
+ wcn->dev = &pdev->dev;
+ wcn->ctrl_ops = pdev->dev.platform_data;
+
+ mutex_init(&wcn->hal_mutex);
+
+ if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ wcn36xx_info("mac address: %pM\n", addr);
+ SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+ }
+
+ ret = wcn36xx_platform_get_resources(wcn, pdev);
+ if (ret)
+ goto out_wq;
+
+ wcn36xx_init_ieee80211(wcn);
+ ret = ieee80211_register_hw(wcn->hw);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(wcn->mmio);
+out_wq:
+ ieee80211_free_hw(hw);
+out_err:
+ return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+ mutex_destroy(&wcn->hal_mutex);
+
+ ieee80211_unregister_hw(hw);
+ iounmap(wcn->mmio);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+ {
+ .name = "wcn36xx",
+ .driver_data = 0
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+ .probe = wcn36xx_probe,
+ .remove = wcn36xx_remove,
+ .driver = {
+ .name = "wcn36xx",
+ .owner = THIS_MODULE,
+ },
+ .id_table = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+ platform_driver_register(&wcn36xx_driver);
+ return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 00000000000..28b515c81b0
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ int ret = 0;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ /* TODO: Make sure the TX chain clean */
+ ret = wcn36xx_smd_enter_bmps(wcn, vif);
+ if (!ret) {
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+ vif_priv->pw_state = WCN36XX_BMPS;
+ } else {
+ /*
+ * One of the reasons why HW will not enter BMPS is because
+ * driver is trying to enter bmps before first beacon was
+ * received just after auth complete
+ */
+ wcn36xx_err("Can not enter BMPS!\n");
+ }
+ return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (WCN36XX_BMPS != vif_priv->pw_state) {
+ wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+ return -EINVAL;
+ }
+ wcn36xx_smd_exit_bmps(wcn, vif);
+ vif_priv->pw_state = WCN36XX_FULL_POWER;
+ return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+ return wcn36xx_smd_keep_alive_req(wcn, vif,
+ WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 00000000000..f72ed68b5a0
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+ WCN36XX_FULL_POWER,
+ WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 00000000000..de9eb2cfbf4
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2129 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+ struct wcn36xx_hal_cfg *entry;
+ u32 *val;
+
+ if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+ wcn36xx_err("Not enough room for TLV entry\n");
+ return -ENOMEM;
+ }
+
+ entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+ entry->id = id;
+ entry->len = sizeof(u32);
+ entry->pad_bytes = 0;
+ entry->reserve = 0;
+
+ val = (u32 *) (entry + 1);
+ *val = value;
+
+ *len += sizeof(*entry) + sizeof(u32);
+
+ return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+ else if (sta && sta->ht_cap.ht_supported)
+ bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+ else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+ else
+ bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+ return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (sta && sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ bss_params->ht = sta->ht_cap.ht_supported;
+ bss_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ bss_params->lsig_tx_op_protection_full_support =
+ is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+ bss_params->lln_non_gf_coexist =
+ !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+ bss_params->dual_cts_protection = 0;
+ /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+ bss_params->ht20_coexist = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ sta_params->ht_capable = sta->ht_cap.ht_supported;
+ sta_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ sta_params->lsig_txop_protection = is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_amsdu_size = is_cap_supported(caps,
+ IEEE80211_HT_CAP_MAX_AMSDU);
+ sta_params->sgi_20Mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_20);
+ sta_params->sgi_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_40);
+ sta_params->green_field_capable = is_cap_supported(caps,
+ IEEE80211_HT_CAP_GRN_FLD);
+ sta_params->delayed_ba_support = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DELAY_BA);
+ sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DSSSCCK40);
+ }
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *priv_sta = NULL;
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ sta_params->type = 1;
+ sta_params->sta_index = 0xFF;
+ } else {
+ sta_params->type = 0;
+ sta_params->sta_index = 1;
+ }
+
+ sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ /*
+ * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+ * contains our mac address. In AP mode we are bssid so vif
+ * contains bssid and ieee80211_sta contains mac.
+ */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+ sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->short_preamble_supported =
+ !(WCN36XX_FLAGS(wcn) &
+ IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+ sta_params->rifs_mode = 0;
+ sta_params->rmf = 0;
+ sta_params->action = 0;
+ sta_params->uapsd = 0;
+ sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+ sta_params->max_ampdu_duration = 0;
+ sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->p2p = 0;
+
+ if (sta) {
+ priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+ sta_params->wmm_enabled = sta->wme;
+ sta_params->max_sp_len = sta->max_sp;
+ sta_params->aid = priv_sta->aid;
+ wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+ memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+ sizeof(priv_sta->supported_rates));
+ } else {
+ wcn36xx_set_default_rates(&sta_params->supported_rates);
+ }
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+ int ret = 0;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+ init_completion(&wcn->hal_rsp_compl);
+ ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ if (ret) {
+ wcn36xx_err("HAL TX failed\n");
+ goto out;
+ }
+ if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+ msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+ wcn36xx_err("Timeout while waiting SMD response\n");
+ ret = -ETIME;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+ do { \
+ memset(&msg_body, 0, sizeof(msg_body)); \
+ msg_body.header.msg_type = type; \
+ msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.len = sizeof(msg_body); \
+ } while (0) \
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+ do { \
+ memset(send_buf, 0, msg_body.header.len); \
+ memcpy(send_buf, &msg_body, sizeof(msg_body)); \
+ } while (0) \
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp *rsp;
+
+ if (len < sizeof(struct wcn36xx_hal_msg_header) +
+ sizeof(struct wcn36xx_fw_msg_status_rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_fw_msg_status_rsp *)
+ (buf + sizeof(struct wcn36xx_hal_msg_header));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+ const struct firmware *nv;
+ struct nv_data *nv_d;
+ struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+ int fw_bytes_left;
+ int ret;
+ u16 fm_offset = 0;
+
+ ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out_free_nv;
+ }
+
+ nv_d = (struct nv_data *)nv->data;
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+ msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+ msg_body.frag_number = 0;
+ /* hal_buf must be protected with mutex */
+ mutex_lock(&wcn->hal_mutex);
+
+ do {
+ fw_bytes_left = nv->size - fm_offset - 4;
+ if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+ msg_body.last_fragment = 0;
+ msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+ } else {
+ msg_body.last_fragment = 1;
+ msg_body.nv_img_buffer_size = fw_bytes_left;
+
+ /* Do not forget update general message len */
+ msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+ }
+
+ /* Add load NV request message header */
+ memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
+
+ /* Add NV body itself */
+ memcpy(wcn->hal_buf + sizeof(msg_body),
+ &nv_d->table + fm_offset,
+ msg_body.nv_img_buffer_size);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret)
+ goto out_unlock;
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_load_nv response failed err=%d\n",
+ ret);
+ goto out_unlock;
+ }
+ msg_body.frag_number++;
+ fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+ } while (msg_body.last_fragment != 1);
+
+out_unlock:
+ mutex_unlock(&wcn->hal_mutex);
+out_free_nv:
+ release_firmware(nv);
+
+ return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+ return -EIO;
+
+ memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+ memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+
+ /* null terminate the strings, just in case */
+ wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+ wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+ wcn->fw_revision = rsp->start_rsp_params.version.revision;
+ wcn->fw_version = rsp->start_rsp_params.version.version;
+ wcn->fw_minor = rsp->start_rsp_params.version.minor;
+ wcn->fw_major = rsp->start_rsp_params.version.major;
+
+ wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+ wcn->wlan_version, wcn->crm_version);
+
+ wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+ wcn->fw_major, wcn->fw_minor,
+ wcn->fw_version, wcn->fw_revision,
+ rsp->start_rsp_params.stations,
+ rsp->start_rsp_params.bssids);
+
+ return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_start_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+ msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+ msg_body.params.len = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+ msg_body.params.type);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start response failed err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_stop_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+ msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_init_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_init_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_start_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_end_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_end_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_finish_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+ msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_finish_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+ int ret = 0;
+
+ ret = wcn36xx_smd_rsp_status_check(buf, len);
+ if (ret)
+ return ret;
+ rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+ rsp->channel_number, rsp->status);
+ return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch)
+{
+ struct wcn36xx_hal_switch_channel_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+ msg_body.channel_number = (u8)ch;
+ msg_body.tx_mgmt_power = 0xbf;
+ msg_body.max_tx_power = 0xbf;
+ memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_switch_channel failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+ rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+ /* Remove the PNO version bit */
+ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+ wcn36xx_warn("error response from update scan\n");
+ return rsp->status;
+ }
+
+ return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_update_scan_params_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+ msg_body.dot11d_enabled = 0;
+ msg_body.dot11d_resolved = 0;
+ msg_body.channel_count = 26;
+ msg_body.active_min_ch_time = 60;
+ msg_body.active_max_ch_time = 120;
+ msg_body.passive_min_ch_time = 60;
+ msg_body.passive_max_ch_time = 110;
+ msg_body.state = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update scan params channel_count %d\n",
+ msg_body.channel_count);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_scan_params failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal add sta self failure: %d\n",
+ rsp->status);
+ return rsp->status;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+ rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+ priv_vif->self_sta_index = rsp->self_sta_index;
+ priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+ return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_add_sta_self_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self self_addr %pM status %d\n",
+ msg_body.self_addr, msg_body.status);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+ struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_delete_sta_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+ msg_body.sta_index = sta_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal delete sta sta_index %d\n",
+ msg_body.sta_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_join_rsp_msg *rsp;
+
+ if (wcn36xx_smd_rsp_status_check(buf, len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal rsp join status %d tx_mgmt_power %d\n",
+ rsp->status, rsp->tx_mgmt_power);
+
+ return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+ struct wcn36xx_hal_join_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+ msg_body.channel = ch;
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+ else
+ msg_body.secondary_channel_offset =
+ PHY_SINGLE_CHANNEL_CENTERED;
+
+ msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+ msg_body.max_tx_power = 0xbf;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+ msg_body.bssid, msg_body.self_sta_mac_addr,
+ msg_body.channel, msg_body.link_state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_join failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_join response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state)
+{
+ struct wcn36xx_hal_set_link_state_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+ msg_body.state = state;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+ msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_link_st failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_params *orig,
+ struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+ /* convert orig to v1 format */
+ memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+ memcpy(&v1->mac, orig->mac, ETH_ALEN);
+ v1->aid = orig->aid;
+ v1->type = orig->type;
+ v1->listen_interval = orig->listen_interval;
+ v1->ht_capable = orig->ht_capable;
+
+ v1->max_ampdu_size = orig->max_ampdu_size;
+ v1->max_ampdu_density = orig->max_ampdu_density;
+ v1->sgi_40mhz = orig->sgi_40mhz;
+ v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+ memcpy(&v1->supported_rates, &orig->supported_rates,
+ sizeof(orig->supported_rates));
+ v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+ struct config_sta_rsp_params *params;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+ params = &rsp->params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config sta response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ sta_priv->sta_index = params->sta_index;
+ sta_priv->dpu_desc_index = params->dpu_index;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ params->status, params->sta_index, params->bssid_index,
+ params->p2p);
+
+ return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+ struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+ &msg_body.sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta->action, sta->sta_index, sta->bssid_index,
+ sta->bssid, sta->type, sta->mac, sta->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx_hal_config_sta_req_msg msg;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ sta_params = &msg.sta_params;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_sta_rsp(wcn,
+ sta,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ /* convert orig to v1 */
+ memcpy(&msg_body.bss_params.bssid,
+ &orig->bss_params.bssid, ETH_ALEN);
+ memcpy(&msg_body.bss_params.self_mac_addr,
+ &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+ msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+ msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+ msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+ msg_body.bss_params.short_slot_time_supported =
+ orig->bss_params.short_slot_time_supported;
+ msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+ msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+ msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+ msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+ msg_body.bss_params.lln_non_gf_coexist =
+ orig->bss_params.lln_non_gf_coexist;
+
+ msg_body.bss_params.lsig_tx_op_protection_full_support =
+ orig->bss_params.lsig_tx_op_protection_full_support;
+ msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+ msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+ msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+ msg_body.bss_params.tx_channel_width_set =
+ orig->bss_params.tx_channel_width_set;
+ msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+ msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+ msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+ memcpy(&msg_body.bss_params.ssid,
+ &orig->bss_params.ssid,
+ sizeof(orig->bss_params.ssid));
+
+ msg_body.bss_params.action = orig->bss_params.action;
+ msg_body.bss_params.rateset = orig->bss_params.rateset;
+ msg_body.bss_params.ht = orig->bss_params.ht;
+ msg_body.bss_params.obss_prot_enabled =
+ orig->bss_params.obss_prot_enabled;
+ msg_body.bss_params.rmf = orig->bss_params.rmf;
+ msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+ msg_body.bss_params.dual_cts_protection =
+ orig->bss_params.dual_cts_protection;
+
+ msg_body.bss_params.max_probe_resp_retry_limit =
+ orig->bss_params.max_probe_resp_retry_limit;
+ msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+ msg_body.bss_params.proxy_probe_resp =
+ orig->bss_params.proxy_probe_resp;
+ msg_body.bss_params.edca_params_valid =
+ orig->bss_params.edca_params_valid;
+
+ memcpy(&msg_body.bss_params.acbe,
+ &orig->bss_params.acbe,
+ sizeof(orig->bss_params.acbe));
+ memcpy(&msg_body.bss_params.acbk,
+ &orig->bss_params.acbk,
+ sizeof(orig->bss_params.acbk));
+ memcpy(&msg_body.bss_params.acvi,
+ &orig->bss_params.acvi,
+ sizeof(orig->bss_params.acvi));
+ memcpy(&msg_body.bss_params.acvo,
+ &orig->bss_params.acvo,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.ext_set_sta_key_param_valid =
+ orig->bss_params.ext_set_sta_key_param_valid;
+
+ memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+ &orig->bss_params.ext_set_sta_key_param,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.wcn36xx_hal_persona =
+ orig->bss_params.wcn36xx_hal_persona;
+ msg_body.bss_params.spectrum_mgt_enable =
+ orig->bss_params.spectrum_mgt_enable;
+ msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+ msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+ &msg_body.bss_params.sta);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
+
+ priv_vif->bss_index = params->bss_index;
+
+ if (priv_vif->sta) {
+ priv_vif->sta->bss_sta_index = params->bss_sta_index;
+ priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ }
+
+ priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg.bss_params;
+ sta_params = &bss->sta;
+
+ WARN_ON(is_zero_ether_addr(bssid));
+
+ memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+ memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+ /* AP */
+ bss->oper_mode = 0;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+ } else if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ } else {
+ wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+ else
+ bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+ bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+ bss->lla_coexist = 0;
+ bss->llb_coexist = 0;
+ bss->llg_coexist = 0;
+ bss->rifs_mode = 0;
+ bss->beacon_interval = vif->bss_conf.beacon_int;
+ bss->dtim_period = vif_priv->dtim_period;
+
+ wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+ bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ bss->reserved = 0;
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ /* wcn->ssid is only valid in AP and IBSS mode */
+ bss->ssid.length = vif_priv->ssid.length;
+ memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+ bss->obss_prot_enabled = 0;
+ bss->rmf = 0;
+ bss->max_probe_resp_retry_limit = 0;
+ bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+ bss->proxy_probe_resp = 0;
+ bss->edca_params_valid = 0;
+
+ /* FIXME: set acbe, acbk, acvi and acvo */
+
+ bss->ext_set_sta_key_param_valid = 0;
+
+ /* FIXME: set ext_set_sta_key_param */
+
+ bss->spectrum_mgt_enable = 0;
+ bss->tx_mgmt_power = 0;
+ bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+ bss->action = update;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta_params->bssid, sta_params->action,
+ sta_params->sta_index, sta_params->bssid_index,
+ sta_params->aid, sta_params->type,
+ sta_params->mac);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_bss_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_delete_bss_req_msg msg_body;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+ msg_body.bss_index = priv_vif->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off)
+{
+ struct wcn36xx_hal_send_beacon_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.beacon_length = skb_beacon->len + 6;
+
+ if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+ memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+ memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+ skb_beacon->len);
+ } else {
+ wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ msg_body.beacon_length);
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.tim_ie_offset = tim_off+4;
+ msg_body.p2p_ie_offset = p2p_off;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal send beacon beacon_length %d\n",
+ msg_body.beacon_length);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_send_beacon failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wcn36xx_hal_send_probe_resp_req_msg msg;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+ if (skb->len > BEACON_TEMPLATE_SIZE) {
+ wcn36xx_warn("probe response template is too big: %d\n",
+ skb->len);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ msg.probe_resp_template_len = skb->len;
+ memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+ memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update probe rsp len %d bssid %pM\n",
+ msg.probe_resp_template_len, msg.bssid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+ msg_body.set_sta_key_params.sta_index = sta_index;
+ msg_body.set_sta_key_params.enc_type = enc_type;
+
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ msg_body.set_sta_key_params.single_tid_rc = 1;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key)
+{
+ struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.num_keys = 1;
+ msg_body.keys[0].id = keyidx;
+ msg_body.keys[0].unicast = 0;
+ msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+ msg_body.keys[0].pae_role = 0;
+ msg_body.keys[0].length = keylen;
+ memcpy(msg_body.keys[0].key, key, keylen);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+ msg_body.sta_idx = sta_index;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx)
+{
+ struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.tbtt = vif->bss_conf.sync_tsf;
+ msg_body.dtim_period = vif_priv->dtim_period;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+ struct wcn36xx_hal_set_power_params_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+ /*
+ * When host is down ignore every second dtim
+ */
+ if (ignore_dtim) {
+ msg_body.ignore_dtim = 1;
+ msg_body.dtim_period = 2;
+ }
+ msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_power_params failed\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type)
+{
+ struct wcn36xx_hal_keep_alive_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+ if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+ msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+ } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+ /* TODO: it also support ARP response type */
+ } else {
+ wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5)
+{
+ struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+ msg_body.arg1 = arg1;
+ msg_body.arg2 = arg2;
+ msg_body.arg3 = arg3;
+ msg_body.arg4 = arg4;
+ msg_body.arg5 = arg5;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_dump_cmd failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static inline void set_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+static inline int get_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+ int ret = 0;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+ return ret;
+}
+
+static inline void clear_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_feat_caps_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+ set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+ msg_body.sta_index = sta_index;
+ memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+ msg_body.dialog_token = 0x10;
+ msg_body.tid = tid;
+
+ /* Immediate BA because Delayed BA is not supported */
+ msg_body.policy = 1;
+ msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+ msg_body.timeout = 0;
+ if (ssn)
+ msg_body.ssn = *ssn;
+ msg_body.direction = direction;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba_session failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_add_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+ struct wcn36xx_hal_del_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+ msg_body.sta_index = sta_index;
+ msg_body.tid = tid;
+ msg_body.direction = 0;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_del_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+ struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.candidate_cnt = 1;
+ msg_body.header.len += sizeof(*candidate);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+ (wcn->hal_buf + sizeof(msg_body));
+ candidate->sta_index = sta_index;
+ candidate->tid_bitmap = 1;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_trigger_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Bad TX complete indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+ return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ /* Old FW does not have bss index */
+ if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ tmp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ }
+ return 0;
+ }
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted missed beacon indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (tmp->bss_index == rsp->bss_index) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ rsp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+ return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_sta *sta = NULL;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete sta indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+ sta = container_of((void *)tmp->sta,
+ struct ieee80211_sta,
+ drv_priv);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d\n",
+ rsp->addr2,
+ rsp->sta_id);
+ ieee80211_report_low_ack(sta, 0);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("STA with addr %pM and index %d not found\n",
+ rsp->addr2,
+ rsp->sta_id);
+ return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+ struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+ size_t len;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+ len = msg_body.header.len;
+
+ put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+ body->header.len = len;
+ body->len = len - sizeof(*body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_cfg failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_msg_header *msg_header = buf;
+ struct wcn36xx_hal_ind_msg *msg_ind;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_START_RSP:
+ case WCN36XX_HAL_CONFIG_STA_RSP:
+ case WCN36XX_HAL_CONFIG_BSS_RSP:
+ case WCN36XX_HAL_ADD_STA_SELF_RSP:
+ case WCN36XX_HAL_STOP_RSP:
+ case WCN36XX_HAL_DEL_STA_SELF_RSP:
+ case WCN36XX_HAL_DELETE_STA_RSP:
+ case WCN36XX_HAL_INIT_SCAN_RSP:
+ case WCN36XX_HAL_START_SCAN_RSP:
+ case WCN36XX_HAL_END_SCAN_RSP:
+ case WCN36XX_HAL_FINISH_SCAN_RSP:
+ case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+ case WCN36XX_HAL_DELETE_BSS_RSP:
+ case WCN36XX_HAL_SEND_BEACON_RSP:
+ case WCN36XX_HAL_SET_LINK_ST_RSP:
+ case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+ case WCN36XX_HAL_SET_BSSKEY_RSP:
+ case WCN36XX_HAL_SET_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_BSSKEY_RSP:
+ case WCN36XX_HAL_ENTER_BMPS_RSP:
+ case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+ case WCN36XX_HAL_EXIT_BMPS_RSP:
+ case WCN36XX_HAL_KEEP_ALIVE_RSP:
+ case WCN36XX_HAL_DUMP_COMMAND_RSP:
+ case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+ case WCN36XX_HAL_ADD_BA_RSP:
+ case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_TRIGGER_BA_RSP:
+ case WCN36XX_HAL_UPDATE_CFG_RSP:
+ case WCN36XX_HAL_JOIN_RSP:
+ case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+ case WCN36XX_HAL_CH_SWITCH_RSP:
+ case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ memcpy(wcn->hal_buf, buf, len);
+ wcn->hal_rsp_len = len;
+ complete(&wcn->hal_rsp_compl);
+ break;
+
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ mutex_lock(&wcn->hal_ind_mutex);
+ msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ memcpy(msg_ind->msg, buf, len);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ mutex_unlock(&wcn->hal_ind_mutex);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, hal_ind_work);
+ struct wcn36xx_hal_msg_header *msg_header;
+ struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+ mutex_lock(&wcn->hal_ind_mutex);
+
+ hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+ struct wcn36xx_hal_ind_msg,
+ list);
+
+ msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ wcn36xx_smd_tx_compl_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ wcn36xx_smd_missed_beacon_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ wcn36xx_smd_delete_sta_context_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+ list_del(wcn->hal_ind_queue.next);
+ kfree(hal_ind_msg->msg);
+ kfree(hal_ind_msg);
+ mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+ int ret = 0;
+ wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+ if (!wcn->hal_ind_wq) {
+ wcn36xx_err("failed to allocate wq\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+ INIT_LIST_HEAD(&wcn->hal_ind_queue);
+ mutex_init(&wcn->hal_ind_mutex);
+
+ ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+ if (ret) {
+ wcn36xx_err("failed to open control channel\n");
+ goto free_wq;
+ }
+
+ return ret;
+
+free_wq:
+ destroy_workqueue(wcn->hal_ind_wq);
+out:
+ return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+ wcn->ctrl_ops->close();
+ destroy_workqueue(wcn->hal_ind_wq);
+ mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 00000000000..e7c39019c6f
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE 3072
+
+#define WCN36XX_HAL_BUF_SIZE 4096
+
+#define HAL_MSG_TIMEOUT 200
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK 0x8000
+
+enum wcn36xx_fw_msg_result {
+ WCN36XX_FW_MSG_RESULT_SUCCESS = 0,
+ WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC = 1,
+
+ WCN36XX_FW_MSG_RESULT_MEM_FAIL = 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+ struct list_head list;
+ u8 *msg;
+ size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 00000000000..b2b60e30caa
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+ return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status status;
+ struct ieee80211_hdr *hdr;
+ struct wcn36xx_rx_bd *bd;
+ u16 fc, sn;
+
+ /*
+ * All fields must be 0, otherwise it can lead to
+ * unexpected consequences.
+ */
+ memset(&status, 0, sizeof(status));
+
+ bd = (struct wcn36xx_rx_bd *)skb->data;
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+ "BD <<< ", (char *)bd,
+ sizeof(struct wcn36xx_rx_bd));
+
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);
+
+ status.mactime = 10;
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
+ status.signal = -get_rssi0(bd);
+ status.antenna = 1;
+ status.rate_idx = 1;
+ status.flag = 0;
+ status.rx_flags = 0;
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_DECRYPTED;
+
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
+ status.flag, status.vendor_radiotap_len);
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = __le16_to_cpu(hdr->frame_control);
+ sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ }
+
+ ieee80211_rx_irqsafe(wcn->hw, skb);
+
+ return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+ u32 mpdu_header_len,
+ u32 len,
+ u16 tid)
+{
+ bd->pdu.mpdu_header_len = mpdu_header_len;
+ bd->pdu.mpdu_header_off = sizeof(*bd);
+ bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+ bd->pdu.mpdu_header_off;
+ bd->pdu.mpdu_len = len;
+ bd->pdu.tid = tid;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+ u8 *addr)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+ return vif_priv;
+ }
+ wcn36xx_warn("vif %pM not found\n", addr);
+ return NULL;
+}
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct wcn36xx_sta *sta_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *__vif_priv = NULL;
+ bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+ /*
+ * For not unicast frames mac80211 will not set sta pointer so use
+ * self_sta_index instead.
+ */
+ if (sta_priv) {
+ __vif_priv = sta_priv->vif;
+ vif = container_of((void *)__vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bd->sta_index = sta_priv->bss_sta_index;
+ bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bd->sta_index = sta_priv->sta_index;
+ bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+ }
+ } else {
+ __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ }
+
+ bd->dpu_sign = __vif_priv->ucast_dpu_signature;
+
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ (sta_priv && !sta_priv->is_data_encrypted))
+ bd->dpu_ne = 1;
+
+ if (bcast) {
+ bd->ub = 1;
+ bd->ack_policy = 1;
+ }
+ *vif_priv = __vif_priv;
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct wcn36xx_vif *__vif_priv =
+ get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_ne = 1;
+
+ /* default rate for unicast */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ WCN36XX_BD_RATE_CTRL :
+ WCN36XX_BD_RATE_MGMT;
+ else if (ieee80211_is_ctl(hdr->frame_control))
+ bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+ else
+ wcn36xx_warn("frame control type unknown\n");
+
+ /*
+ * In joining state trick hardware that probe is sent as
+ * unicast even if address is broadcast.
+ */
+ if (__vif_priv->is_joining &&
+ ieee80211_is_probe_req(hdr->frame_control))
+ bcast = false;
+
+ if (bcast) {
+ /* broadcast */
+ bd->ub = 1;
+ /* No ack needed not unicast */
+ bd->ack_policy = 1;
+ bd->queue_id = WCN36XX_TX_B_WQ_ID;
+ } else
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ *vif_priv = __vif_priv;
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
+ bool is_low = ieee80211_is_data(hdr->frame_control);
+ bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+ if (!bd) {
+ /*
+ * TX DXE are used in pairs. One for the BD and one for the
+ * actual frame. The BD DXE's has a preallocated buffer while
+ * the skb ones does not. If this isn't true something is really
+ * wierd. TODO: Recover from this situation
+ */
+
+ wcn36xx_err("bd address may not be NULL for BD DXE\n");
+ return -EINVAL;
+ }
+
+ memset(bd, 0, sizeof(*bd));
+
+ wcn36xx_dbg(WCN36XX_DBG_TX,
+ "tx skb %p len %d fc %04x sn %d %s %s\n",
+ skb, skb->len, __le16_to_cpu(hdr->frame_control),
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+ is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+ bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+ bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+ if (bd->tx_comp) {
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ if (wcn->tx_ack_skb) {
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+ wcn36xx_warn("tx_ack_skb already set\n");
+ return -EINVAL;
+ }
+
+ wcn->tx_ack_skb = skb;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ /* Only one at a time is supported by fw. Stop the TX queues
+ * until the ack status gets back.
+ *
+ * TODO: Add watchdog in case FW does not answer
+ */
+ ieee80211_stop_queues(wcn->hw);
+ }
+
+ /* Data frames served first*/
+ if (is_low) {
+ wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, sta_priv ? sta_priv->tid : 0);
+ } else {
+ /* MGMT and CTRL frames are handeld here*/
+ wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, WCN36XX_TID);
+ }
+
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ bd->tx_bd_sign = 0xbdbdbdbd;
+
+ return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 00000000000..bbfbcf808c7
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN 24
+#define WCN36XX_BMU_WQ_TX 25
+#define WCN36XX_TID 7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID 0xA
+#define WCN36XX_TX_U_WQ_ID 0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+struct wcn36xx_pdu {
+ u32 dpu_fb:8;
+ u32 adu_fb:8;
+ u32 pdu_id:16;
+
+ /* 0x04*/
+ u32 tail_pdu_idx:16;
+ u32 head_pdu_idx:16;
+
+ /* 0x08*/
+ u32 pdu_count:7;
+ u32 mpdu_data_off:9;
+ u32 mpdu_header_off:8;
+ u32 mpdu_header_len:8;
+
+ /* 0x0c*/
+ u32 reserved4:8;
+ u32 tid:4;
+ u32 reserved3:4;
+ u32 mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 rx_key_id:3;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 uma_bypass:1;
+ u32 csr11:1;
+ u32 reserved0:1;
+ u32 scan_learn:1;
+ u32 rx_ch:4;
+ u32 rtsf:1;
+ u32 bsf:1;
+ u32 a2hf:1;
+ u32 st_auf:1;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 addr3:8;
+ u32 addr2:8;
+ u32 addr1:8;
+ u32 dpu_desc_idx:8;
+
+ /* 0x18*/
+ u32 rxp_flags:23;
+ u32 rate_id:9;
+
+ u32 phy_stat0;
+ u32 phy_stat1;
+
+ /* 0x24 */
+ u32 rx_times;
+
+ u32 pmi_cmd[6];
+
+ /* 0x40 */
+ u32 reserved7:4;
+ u32 reorder_slot_id:6;
+ u32 reorder_fwd_id:6;
+ u32 reserved6:12;
+ u32 reorder_code:4;
+
+ /* 0x44 */
+ u32 exp_seq_num:12;
+ u32 cur_seq_num:12;
+ u32 fr_type_subtype:8;
+
+ /* 0x48 */
+ u32 msdu_size:16;
+ u32 sub_fr_id:4;
+ u32 proc_order:4;
+ u32 reserved9:4;
+ u32 aef:1;
+ u32 lsf:1;
+ u32 esf:1;
+ u32 asf:1;
+};
+
+struct wcn36xx_tx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 fw_tx_comp:1;
+ u32 tx_comp:1;
+ u32 reserved1:1;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 reserved0:12;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 reserved5:7;
+ u32 queue_id:5;
+ u32 bd_rate:2;
+ u32 ack_policy:2;
+ u32 sta_index:8;
+ u32 dpu_desc_idx:8;
+
+ u32 tx_bd_sign;
+ u32 reserved6;
+ u32 dxe_start_time;
+ u32 dxe_end_time;
+
+ /*u32 tcp_udp_start_off:10;
+ u32 header_cks:16;
+ u32 reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb);
+
+#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 00000000000..58b63833e8e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+ WCN36XX_DBG_DXE = 0x00000001,
+ WCN36XX_DBG_DXE_DUMP = 0x00000002,
+ WCN36XX_DBG_SMD = 0x00000004,
+ WCN36XX_DBG_SMD_DUMP = 0x00000008,
+ WCN36XX_DBG_RX = 0x00000010,
+ WCN36XX_DBG_RX_DUMP = 0x00000020,
+ WCN36XX_DBG_TX = 0x00000040,
+ WCN36XX_DBG_TX_DUMP = 0x00000080,
+ WCN36XX_DBG_HAL = 0x00000100,
+ WCN36XX_DBG_HAL_DUMP = 0x00000200,
+ WCN36XX_DBG_MAC = 0x00000400,
+ WCN36XX_DBG_BEACON = 0x00000800,
+ WCN36XX_DBG_BEACON_DUMP = 0x00001000,
+ WCN36XX_DBG_PMC = 0x00002000,
+ WCN36XX_DBG_PMC_DUMP = 0x00004000,
+ WCN36XX_DBG_ANY = 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...) \
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+
+#define wcn36xx_warn(fmt, arg...) \
+ printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...) \
+ printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##arg); \
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str), \
+ DUMP_PREFIX_OFFSET, 32, 1, \
+ buf, len, false); \
+} while (0)
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+ int is_valid;
+ u8 table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+ int (*open)(void *drv_priv, void *rsp_cb);
+ void (*close)(void);
+ int (*tx)(char *buf, size_t len);
+ int (*get_hw_mac)(u8 *addr);
+ int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+ struct list_head list;
+ struct wcn36xx_sta *sta;
+ u8 dtim_period;
+ enum ani_ed_type encrypt_type;
+ bool is_joining;
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Power management */
+ enum wcn36xx_power_state pw_state;
+
+ u8 bss_index;
+ u8 ucast_dpu_signature;
+ /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+ u8 self_sta_index;
+ u8 self_dpu_desc_index;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * | | STA | AP |
+ * |______________|_____________|_______________|
+ * | TX BD |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |smd_delete_sta| sta_index | sta_index |
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+ struct wcn36xx_vif *vif;
+ u16 aid;
+ u16 tid;
+ u8 sta_index;
+ u8 dpu_desc_index;
+ u8 bss_sta_index;
+ u8 bss_dpu_desc_index;
+ bool is_data_encrypted;
+ /* Rates */
+ struct wcn36xx_hal_supported_rates supported_rates;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ struct list_head vif_list;
+
+ u8 fw_revision;
+ u8 fw_version;
+ u8 fw_minor;
+ u8 fw_major;
+
+ /* extra byte for the NULL termination */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+ /* IRQs */
+ int tx_irq;
+ int rx_irq;
+ void __iomem *mmio;
+
+ struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+ /*
+ * smd_buf must be protected with smd_mutex to garantee
+ * that all messages are sent one after another
+ */
+ u8 *hal_buf;
+ size_t hal_rsp_len;
+ struct mutex hal_mutex;
+ struct completion hal_rsp_compl;
+ struct workqueue_struct *hal_ind_wq;
+ struct work_struct hal_ind_work;
+ struct mutex hal_ind_mutex;
+ struct list_head hal_ind_queue;
+
+ /* DXE channels */
+ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
+ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
+ struct wcn36xx_dxe_ch dxe_rx_l_ch; /* RX low */
+ struct wcn36xx_dxe_ch dxe_rx_h_ch; /* RX high */
+
+ /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+ spinlock_t dxe_lock;
+ bool queues_stopped;
+
+ /* Memory pools */
+ struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+ struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+ struct sk_buff *tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+ /* Debug file system entry */
+ struct wcn36xx_dfs_entry dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+ u8 major,
+ u8 minor,
+ u8 version,
+ u8 revision)
+{
+ return (wcn->fw_major == major &&
+ wcn->fw_minor == minor &&
+ wcn->fw_version == version &&
+ wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 61c302a6bde..5b340769d5b 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
}
conn.channel = ch - 1;
- memcpy(conn.bssid, bss->bssid, 6);
- memcpy(conn.dst_mac, bss->bssid, 6);
+ memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+ memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
/*
* FW don't support scan after connection attempt
*/
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 0a2844c48a6..fd30cddd588 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -250,7 +250,7 @@ int wil_reset(struct wil6210_priv *wil)
/* init after reset */
wil->pending_connect_cid = -1;
- INIT_COMPLETION(wil->wmi_ready);
+ reinit_completion(&wil->wmi_ready);
/* TODO: release MAC reset */
wil6210_enable_irq(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eb1dc7ad80f..eeceab39cda 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, wil->csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index b827d51c30a..0d950f209da 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if (priv->wep_is_on)
frame_ctl |= IEEE80211_FCTL_PROTECTED;
if (priv->operating_mode == IW_MODE_ADHOC) {
- skb_copy_from_linear_data(skb, &header.addr1, 6);
- memcpy(&header.addr2, dev->dev_addr, 6);
- memcpy(&header.addr3, priv->BSSID, 6);
+ skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+ memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
} else {
frame_ctl |= IEEE80211_FCTL_TODS;
- memcpy(&header.addr1, priv->CurrentBSSID, 6);
- memcpy(&header.addr2, dev->dev_addr, 6);
- skb_copy_from_linear_data(skb, &header.addr3, 6);
+ memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+ skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
}
if (priv->use_wpa)
- memcpy(&header.addr4, SNAP_RFC1024, 6);
+ memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
header.frame_control = cpu_to_le16(frame_ctl);
/* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
}
}
- memcpy(skbp, header->addr1, 6); /* destination address */
+ memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(&skbp[6], header->addr3, 6);
+ memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
else
- memcpy(&skbp[6], header->addr2, 6); /* source address */
+ memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
u8 frag_no, int more_frags)
{
- u8 mac4[6];
- u8 source[6];
+ u8 mac4[ETH_ALEN];
+ u8 source[ETH_ALEN];
struct sk_buff *skb;
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(source, header->addr3, 6);
+ memcpy(source, header->addr3, ETH_ALEN);
else
- memcpy(source, header->addr2, 6);
+ memcpy(source, header->addr2, ETH_ALEN);
rx_packet_loc += 24; /* skip header */
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
msdu_size -= 4;
if (frag_no == 0) { /* first fragment */
- atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
- msdu_size -= 6;
- rx_packet_loc += 6;
+ atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+ msdu_size -= ETH_ALEN;
+ rx_packet_loc += ETH_ALEN;
if (priv->do_rx_crc)
crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_seq = seq_no;
priv->frag_no = 1;
priv->frag_len = msdu_size;
- memcpy(priv->frag_source, source, 6);
- memcpy(&priv->rx_buf[6], source, 6);
- memcpy(priv->rx_buf, header->addr1, 6);
+ memcpy(priv->frag_source, source, ETH_ALEN);
+ memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+ memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
}
}
} else if (priv->frag_no == frag_no &&
priv->frag_seq == seq_no &&
- memcmp(priv->frag_source, source, 6) == 0) {
+ memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
more_frags = 1; /* don't send broken assembly */
}
}
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_no++;
if (!more_frags) { /* last one */
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
priv->dev->stats.rx_dropped++;
} else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
/* we use the same buffer for frag reassembly and control packets */
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
if (priv->do_rx_crc) {
/* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->last_qual = jiffies;
priv->last_beacon_timestamp = 0;
memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
- memset(priv->BSSID, 0, 6);
+ memset(priv->BSSID, 0, ETH_ALEN);
priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
priv->station_was_associated = 0;
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
- memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+ memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
for (i = 0; i < priv->BSS_list_entries; i++) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+ memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
static void atmel_scan(struct atmel_private *priv, int specific_ssid)
{
struct {
- u8 BSSID[6];
+ u8 BSSID[ETH_ALEN];
u8 SSID[MAX_SSID_LENGTH];
u8 scan_type;
u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
u8 SSID_size;
} cmd;
- memset(cmd.BSSID, 0xff, 6);
+ memset(cmd.BSSID, 0xff, ETH_ALEN);
if (priv->fast_scan) {
cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+ memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
cmd.channel = (priv->channel & 0x7f);
cmd.BSS_type = type;
cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->BSSID, 6);
+ memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
cmd.BSS_type = type;
cmd.channel = (priv->channel & 0x7f);
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, 6);
- memcpy(header.addr2, priv->dev->dev_addr, 6);
- memcpy(header.addr3, priv->CurrentBSSID, 6);
+ memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+ memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
/* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
struct ass_req_format {
__le16 capability;
__le16 listen_interval;
- u8 ap[6]; /* nothing after here directly accessible */
+ u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
u8 ssid_el_id;
u8 ssid_len;
u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, 6);
- memcpy(header.addr2, priv->dev->dev_addr, 6);
- memcpy(header.addr3, priv->CurrentBSSID, 6);
+ memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+ memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
/* current AP address - only in reassoc frame */
if (is_reassoc) {
- memcpy(body.ap, priv->CurrentBSSID, 6);
+ memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
int i, index;
for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
- if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+ if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
index = i;
/* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
return;
index = priv->BSS_list_entries++;
- memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+ memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
priv->BSSinfo[index].RSSI = rssi;
} else {
if (rssi > priv->BSSinfo[index].RSSI)
@@ -3212,7 +3212,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
status != WLAN_STATUS_ASSOC_DENIED_RATES &&
status != WLAN_STATUS_CAPS_UNSUPPORTED &&
- priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+ priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->ReAssociationRequestRetryCnt++;
send_association_request(priv, 1);
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
{
struct bss_info *bss = &priv->BSSinfo[bss_index];
- memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+ memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
/* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
0x00, 0x04, 0x25, 0x00, 0x00, 0x00
};
printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
- memcpy(dev->dev_addr, default_mac, 6);
+ memcpy(dev->dev_addr, default_mac, ETH_ALEN);
}
}
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
struct { /* NB this is matched to the hardware, don't change. */
u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
- u8 receiver_address[6];
+ u8 receiver_address[ETH_ALEN];
u8 wep_is_on;
u8 default_key; /* 0..3 */
u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
mib.wep_is_on = priv->wep_is_on;
mib.exclude_unencrypted = priv->exclude_unencrypted;
- memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+ memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
/* zero all the keys before adding in valid ones. */
memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index c51d2dc489e..1d7982afc0a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1065,12 +1065,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->dev->dma_dev, mask);
- if (!err) {
- err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
- if (!err)
- break;
- }
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 7c970d3ae35..05ee7f10cc8 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -164,7 +164,8 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
}
en_addr = en_addrs[override][i];
- val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+ if (e)
+ val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
if (off) {
b43_phy_mask(dev, en_addr, ~en_mask);
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8cb206a8908..4ae63f4ddfb 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
else
txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
txhdr->mac_frame_ctl = wlhdr->frame_control;
- memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+ memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
/* Calculate duration for fallback rate */
if ((rate_fb == rate) ||
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 42eb26c99e1..b2ed1795130 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -806,12 +806,9 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->dev->dma_dev, mask);
- if (!err) {
- err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
- if (!err)
- break;
- }
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 849a28c8030..86588c9ff0f 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
txhdr->mac_frame_ctl = wlhdr->frame_control;
- memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+ memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
/* Calculate duration for fallback rate */
if ((rate_fb->hw_value == rate) ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e13b1a65c65..3e10b801eee 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -26,7 +26,6 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
#include <linux/platform_data/brcmfmac-sdio.h>
#include <defs.h>
@@ -239,7 +238,9 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num = SDIO_FUNC_1;
reg_size = 4;
- brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ if (ret)
+ goto done;
}
do {
@@ -255,6 +256,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num, addr, data, 4);
} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+done:
if (ret != 0)
brcmf_err("failed with %d\n", ret);
@@ -315,8 +317,36 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
*ret = retval;
}
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr, struct sk_buff *pkt)
+{
+ unsigned int req_sz;
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* Single skb use the standard mmc interface */
+ req_sz = pkt->len + 3;
+ req_sz &= (uint)~3;
+
+ if (write)
+ return sdio_memcpy_toio(sdiodev->func[fn], addr,
+ ((u8 *)(pkt->data)),
+ req_sz);
+ else if (fn == 1)
+ return sdio_memcpy_fromio(sdiodev->func[fn],
+ ((u8 *)(pkt->data)),
+ addr, req_sz);
+ else
+ /* function 2 read is FIFO operation */
+ return sdio_readsb(sdiodev->func[fn],
+ ((u8 *)(pkt->data)), addr,
+ req_sz);
+}
+
/**
- * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * brcmf_sdio_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
@@ -327,12 +357,13 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr, struct sk_buff_head *pktlist)
+static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr,
+ struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
- unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
- unsigned short max_seg_sz, seg_sz;
+ unsigned int max_req_sz, orig_offset, dst_offset;
+ unsigned short max_seg_cnt, seg_sz;
unsigned char *pkt_data, *orig_data, *dst_data;
struct sk_buff *pkt_next = NULL, *local_pkt_next;
struct sk_buff_head local_list, *target_list;
@@ -341,7 +372,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
struct mmc_data mmc_dat;
struct sg_table st;
struct scatterlist *sgl;
- struct mmc_host *host;
int ret = 0;
if (!pktlist->qlen)
@@ -351,27 +381,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Single skb use the standard mmc interface */
- if (pktlist->qlen == 1) {
- pkt_next = pktlist->next;
- req_sz = pkt_next->len + 3;
- req_sz &= (uint)~3;
-
- if (write)
- return sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt_next->data)),
- req_sz);
- else if (fn == 1)
- return sdio_memcpy_fromio(sdiodev->func[fn],
- ((u8 *)(pkt_next->data)),
- addr, req_sz);
- else
- /* function 2 read is FIFO operation */
- return sdio_readsb(sdiodev->func[fn],
- ((u8 *)(pkt_next->data)), addr,
- req_sz);
- }
-
target_list = pktlist;
/* for host with broken sg support, prepare a page aligned list */
__skb_queue_head_init(&local_list);
@@ -398,38 +407,46 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = &local_list;
}
- host = sdiodev->func[fn]->card->host;
func_blk_sz = sdiodev->func[fn]->cur_blksize;
- /* Blocks per command is limited by host count, host transfer
- * size and the maximum for IO_RW_EXTENDED of 511 blocks.
- */
- max_blks = min_t(unsigned int, host->max_blk_count, 511u);
- max_req_sz = min_t(unsigned int, host->max_req_size,
- max_blks * func_blk_sz);
- max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
- max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
+ max_req_sz = sdiodev->max_request_size;
+ max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
+ target_list->qlen);
seg_sz = target_list->qlen;
pkt_offset = 0;
pkt_next = target_list->next;
- if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
+ if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
ret = -ENOMEM;
goto exit;
}
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+ mmc_dat.sg = st.sgl;
+ mmc_dat.blksz = func_blk_sz;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+ mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
+ mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
+ mmc_cmd.arg |= 1<<27; /* block mode */
+ /* for function 1 the addr will be incremented */
+ mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+
while (seg_sz) {
req_sz = 0;
sg_cnt = 0;
- memset(&mmc_req, 0, sizeof(struct mmc_request));
- memset(&mmc_cmd, 0, sizeof(struct mmc_command));
- memset(&mmc_dat, 0, sizeof(struct mmc_data));
sgl = st.sgl;
/* prep sg table */
while (pkt_next != (struct sk_buff *)target_list) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
- if (sg_data_sz > host->max_seg_size)
- sg_data_sz = host->max_seg_size;
+ if (sg_data_sz > sdiodev->max_segment_size)
+ sg_data_sz = sdiodev->max_segment_size;
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
@@ -444,7 +461,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
pkt_next = pkt_next->next;
}
- if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
break;
}
seg_sz -= sg_cnt;
@@ -455,27 +472,17 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
ret = -ENOTBLK;
goto exit;
}
- mmc_dat.sg = st.sgl;
+
mmc_dat.sg_len = sg_cnt;
- mmc_dat.blksz = func_blk_sz;
mmc_dat.blocks = req_sz / func_blk_sz;
- mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
- mmc_cmd.opcode = SD_IO_RW_EXTENDED;
- mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
- mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
- mmc_cmd.arg |= 1<<27; /* block mode */
- /* incrementing addr for function 1 */
- mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
- mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
- mmc_req.cmd = &mmc_cmd;
- mmc_req.data = &mmc_dat;
+ /* incrementing addr for function 1 */
if (fn == 1)
addr += req_sz;
mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
- mmc_wait_for_req(host, &mmc_req);
+ mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret != 0) {
@@ -546,7 +553,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
{
uint width;
int err = 0;
- struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
@@ -556,19 +562,17 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
- skb_queue_head_init(&pkt_list);
- skb_queue_tail(&pkt_list, pkt);
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
done:
return err;
}
int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq)
+ uint flags, struct sk_buff_head *pktq, uint totlen)
{
- uint incr_fix;
+ struct sk_buff *glom_skb;
+ struct sk_buff *skb;
uint width;
int err = 0;
@@ -580,8 +584,22 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
+ if (pktq->qlen == 1)
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+ else if (!sdiodev->sg_support) {
+ glom_skb = brcmu_pkt_buf_get_skb(totlen);
+ if (!glom_skb)
+ return -ENOMEM;
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+ if (err)
+ goto done;
+
+ skb_queue_walk(pktq, skb) {
+ memcpy(skb->data, glom_skb->data, skb->len);
+ skb_pull(glom_skb, skb->len);
+ }
+ } else
+ err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
done:
return err;
@@ -592,7 +610,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- struct sk_buff_head pktq;
+ uint width;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -603,10 +621,12 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
}
memcpy(mypkt->data, buf, nbytes);
- __skb_queue_head_init(&pktq);
- __skb_queue_tail(&pktq, mypkt);
- err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
- __skb_dequeue_tail(&pktq);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+
+ if (!err)
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
brcmu_pkt_buf_free_skb(mypkt);
return err;
@@ -617,16 +637,26 @@ int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff_head *pktq)
{
+ struct sk_buff *skb;
uint width;
- int err = 0;
+ int err;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ if (err)
+ return err;
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
+ if (pktq->qlen == 1 || !sdiodev->sg_support)
+ skb_queue_walk(pktq, skb) {
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+ if (err)
+ break;
+ }
+ else
+ err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
return err;
}
@@ -639,7 +669,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
- struct sk_buff_head pkt_list;
dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
pkt = dev_alloc_skb(dsize);
@@ -648,7 +677,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return -EIO;
}
pkt->priority = 0;
- skb_queue_head_init(&pkt_list);
/* Determine initial transfer parameters */
sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -676,10 +704,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- skb_queue_tail(&pkt_list, pkt);
bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ sdaddr, pkt);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3462b75bd0..905704e335d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -21,6 +21,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/sched.h> /* request_irq() */
@@ -34,6 +35,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
@@ -41,13 +43,6 @@
#define DMA_ALIGN_MASK 0x03
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
-#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
-
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -58,7 +53,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -320,6 +316,8 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
+ struct mmc_host *host;
+ uint max_blocks;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -366,6 +364,20 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_err("F2 error, probe failed %d...\n", err);
goto fail;
}
+
+ /*
+ * determine host related variables after brcmf_sdio_probe()
+ * as func->cur_blksize is properly set and F2 init has been
+ * completed successfully.
+ */
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
brcmf_dbg(SDIO, "F2 init completed...\n");
return 0;
@@ -466,7 +478,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");
- brcmfmac_sdio_pdata = pdev->dev.platform_data;
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
if (brcmfmac_sdio_pdata->power_on)
brcmfmac_sdio_pdata->power_on();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 2eb9e642c9b..899a2ada5b8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -97,8 +97,6 @@
#define WLC_PHY_TYPE_LCN 8
#define WLC_PHY_TYPE_NULL 0xf
-#define BRCMF_EVENTING_MASK_LEN 16
-
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
@@ -632,29 +630,29 @@ struct brcmf_skb_reorder_data {
u8 *reorder;
};
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
/* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
/* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *rxp);
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
- s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+ char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+ bool success);
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 74156f84180..a6eb09e5d46 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -132,35 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
* interface functions from common layer
*/
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
- struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+ int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
/* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
- bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
#ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
-extern void brcmf_sdio_register(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
#endif
#ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_register(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
#endif
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 40e7f854e10..64e9cff241b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -509,9 +509,8 @@ netif_rx:
}
}
-void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
{
- struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
- brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
- skb_queue_len(skb_list));
+ brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- skb_queue_walk_safe(skb_list, skb, pnext) {
- skb_unlink(skb, skb_list);
-
- /* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
- ifp = drvr->iflist[ifidx];
-
- if (ret || !ifp || !ifp->ndev) {
- if ((ret != -ENODATA) && ifp)
- ifp->stats.rx_errors++;
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+ ifp = drvr->iflist[ifidx];
- rd = (struct brcmf_skb_reorder_data *)skb->cb;
- if (rd->reorder)
- brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
- else
- brcmf_netif_rx(ifp, skb);
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return;
}
+
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ if (rd->reorder)
+ brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+ else
+ brcmf_netif_rx(ifp, skb);
}
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index ef917988374..53c6e710f2c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -22,21 +22,21 @@
*/
/* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
/* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
- struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+ struct sk_buff *txp);
/* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1aa75d5951b..b02953c4ade 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -275,11 +275,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
-#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
-MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
-MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
-
#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
@@ -454,9 +449,6 @@ struct brcmf_sdio {
struct work_struct datawork;
atomic_t dpc_tskcnt;
- const struct firmware *firmware;
- u32 fw_ptr;
-
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
+#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
+#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+ u32 chipid;
+ u32 revmsk;
+ const char *bin;
+ const char *nv;
+};
+
+enum brcmf_firmware_type {
+ BRCMF_FIRMWARE_BIN,
+ BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+ name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+ { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+ { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+ { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+ { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+ { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+};
+
+
+static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+ enum brcmf_firmware_type type)
+{
+ const struct firmware *fw;
+ const char *name;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+ if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+ brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+ switch (type) {
+ case BRCMF_FIRMWARE_BIN:
+ name = brcmf_fwname_data[i].bin;
+ break;
+ case BRCMF_FIRMWARE_NVRAM:
+ name = brcmf_fwname_data[i].nv;
+ break;
+ default:
+ brcmf_err("invalid firmware type (%d)\n", type);
+ return NULL;
+ }
+ goto found;
+ }
+ }
+ brcmf_err("Unknown chipid %d [%d]\n",
+ bus->ci->chip, bus->ci->chiprev);
+ return NULL;
+
+found:
+ err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+ if ((err) || (!fw)) {
+ brcmf_err("fail to request firmware %s (%d)\n", name, err);
+ return NULL;
+ }
+
+ return fw;
+}
+
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@@ -1061,6 +1147,8 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
u8 rx_seq, fc, tx_seq_max;
u32 swheader;
+ trace_brcmf_sdpcm_hdr(false, header);
+
/* hw header */
len = get_unaligned_le16(header);
checksum = get_unaligned_le16(header + sizeof(u16));
@@ -1183,6 +1271,7 @@ static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
SDPCM_DOFFSET_MASK;
*(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
*(((__le32 *)header) + 2) = 0;
+ trace_brcmf_sdpcm_hdr(true, header);
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1303,7 +1392,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
sdio_claim_host(bus->sdiodev->func[1]);
errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom);
+ SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -1406,13 +1495,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
+ skb_unlink(pfirst, &bus->glom);
+ brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+ bus->sdcnt.rxglompkts++;
}
- /* sent any remaining packets up */
- if (bus->glom.qlen)
- brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
- bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@@ -1557,7 +1645,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
- struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
@@ -1759,9 +1846,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
continue;
}
- skb_queue_head_init(&pktlist);
- skb_queue_tail(&pktlist, pkt);
- brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
+ brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@@ -1786,10 +1871,65 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
/* flag marking a dummy skb added for DMA alignment requirement */
-#define DUMMY_SKB_FLAG 0x10000
+#define ALIGN_SKB_FLAG 0x8000
/* bit mask of data length chopped from the previous packet */
-#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
+#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
+
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq,
+ struct sk_buff *pkt, uint chan)
+{
+ struct sk_buff *pkt_pad;
+ u16 tail_pad, tail_chop, sg_align;
+ unsigned int blksize;
+ u8 *dat_buf;
+ int ntail;
+
+ blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ sg_align = 4;
+ if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
+ sg_align = sdiodev->pdata->sd_sgentry_align;
+ /* sg entry alignment should be a divisor of block size */
+ WARN_ON(blksize % sg_align);
+
+ /* Check tail padding */
+ pkt_pad = NULL;
+ tail_chop = pkt->len % sg_align;
+ tail_pad = sg_align - tail_chop;
+ tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+ if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ if (pkt_pad == NULL)
+ return -ENOMEM;
+ memcpy(pkt_pad->data,
+ pkt->data + pkt->len - tail_chop,
+ tail_chop);
+ *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ skb_trim(pkt, pkt->len - tail_chop);
+ __skb_queue_after(pktq, pkt, pkt_pad);
+ } else {
+ ntail = pkt->data_len + tail_pad -
+ (pkt->end - pkt->tail);
+ if (skb_cloned(pkt) || ntail > 0)
+ if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
+ return -ENOMEM;
+ if (skb_linearize(pkt))
+ return -ENOMEM;
+ dat_buf = (u8 *)(pkt->data);
+ __skb_put(pkt, tail_pad);
+ }
+
+ if (pkt_pad)
+ return pkt->len + tail_chop;
+ else
+ return pkt->len - tail_pad;
+}
+
/**
* brcmf_sdio_txpkt_prep - packet preparation for transmit
* @bus: brcmf_sdio structure pointer
@@ -1806,24 +1946,16 @@ static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
- u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
- int ntail;
- struct sk_buff *pkt_next, *pkt_new;
+ u16 head_pad, head_align;
+ struct sk_buff *pkt_next;
u8 *dat_buf;
- unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ int err;
struct brcmf_sdio_hdrinfo hd_info = {0};
/* SDIO ADMA requires at least 32 bit alignment */
head_align = 4;
- sg_align = 4;
- if (bus->sdiodev->pdata) {
- head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
- bus->sdiodev->pdata->sd_head_align : 4;
- sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
- bus->sdiodev->pdata->sd_sgentry_align : 4;
- }
- /* sg entry alignment should be a divisor of block size */
- WARN_ON(blksize % sg_align);
+ if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
+ head_align = bus->sdiodev->pdata->sd_head_align;
pkt_next = pktq->next;
dat_buf = (u8 *)(pkt_next->data);
@@ -1842,40 +1974,20 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
}
- /* Check tail padding */
- pkt_new = NULL;
- tail_chop = pkt_next->len % sg_align;
- tail_pad = sg_align - tail_chop;
- tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
- if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
- pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
- if (pkt_new == NULL)
- return -ENOMEM;
- memcpy(pkt_new->data,
- pkt_next->data + pkt_next->len - tail_chop,
- tail_chop);
- *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
- skb_trim(pkt_next, pkt_next->len - tail_chop);
- __skb_queue_after(pktq, pkt_next, pkt_new);
+ if (bus->sdiodev->sg_support && pktq->qlen > 1) {
+ err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
+ pkt_next, chan);
+ if (err < 0)
+ return err;
+ hd_info.len = (u16)err;
} else {
- ntail = pkt_next->data_len + tail_pad -
- (pkt_next->end - pkt_next->tail);
- if (skb_cloned(pkt_next) || ntail > 0)
- if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
- return -ENOMEM;
- if (skb_linearize(pkt_next))
- return -ENOMEM;
- dat_buf = (u8 *)(pkt_next->data);
- __skb_put(pkt_next, tail_pad);
+ hd_info.len = pkt_next->len;
}
- /* Now prep the header */
- if (pkt_new)
- hd_info.len = pkt_next->len + tail_chop;
- else
- hd_info.len = pkt_next->len - tail_pad;
hd_info.channel = chan;
hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+
+ /* Now fill the header */
brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
if (BRCMF_BYTES_ON() &&
@@ -1908,8 +2020,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
skb_queue_walk_safe(pktq, pkt_next, tmp) {
dummy_flags = *(u32 *)(pkt_next->cb);
- if (dummy_flags & DUMMY_SKB_FLAG) {
- chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+ if (dummy_flags & ALIGN_SKB_FLAG) {
+ chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3149,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
return true;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
-{
- if (bus->firmware->size < bus->fw_ptr + len)
- len = bus->firmware->size - bus->fw_ptr;
-
- memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
- bus->fw_ptr += len;
- return len;
-}
-
static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
+ const struct firmware *fw;
+ int err;
int offset;
- uint len;
- u8 *memblock = NULL, *memptr;
- int ret;
- u8 idx;
-
- brcmf_dbg(INFO, "Enter\n");
-
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request firmware %d\n", ret);
- return ret;
- }
- bus->fw_ptr = 0;
-
- memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
- if (memblock == NULL) {
- ret = -ENOMEM;
- goto err;
- }
- if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
- memptr += (BRCMF_SDALIGN -
- ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
- offset = bus->ci->rambase;
-
- /* Download image */
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != idx)
- memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
- while (len) {
- ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
- if (ret) {
+ int address;
+ int len;
+
+ fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ if (fw == NULL)
+ return -ENOENT;
+
+ if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
+ BRCMF_MAX_CORENUM)
+ memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+
+ err = 0;
+ offset = 0;
+ address = bus->ci->rambase;
+ while (offset < fw->size) {
+ len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+ fw->size - offset;
+ err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
+ if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- ret, MEMBLOCK, offset);
- goto err;
+ err, len, address);
+ goto failure;
}
-
- offset += MEMBLOCK;
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
+ offset += len;
+ address += len;
}
-err:
- kfree(memblock);
-
- release_firmware(bus->firmware);
- bus->fw_ptr = 0;
+failure:
+ release_firmware(fw);
- return ret;
+ return err;
}
/*
@@ -3111,7 +3197,8 @@ err:
* by two NULs.
*/
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
+ const struct firmware *nv)
{
char *varbuf;
char *dp;
@@ -3120,12 +3207,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
int ret = 0;
uint buf_len, n, len;
- len = bus->firmware->size;
+ len = nv->size;
varbuf = vmalloc(len);
if (!varbuf)
return -ENOMEM;
- memcpy(varbuf, bus->firmware->data, len);
+ memcpy(varbuf, nv->data, len);
dp = varbuf;
findNewline = false;
@@ -3177,18 +3264,16 @@ err:
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
+ const struct firmware *nv;
int ret;
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request nvram %d\n", ret);
- return ret;
- }
+ nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ if (nv == NULL)
+ return -ENOENT;
- ret = brcmf_process_nvram_vars(bus);
+ ret = brcmf_process_nvram_vars(bus, nv);
- release_firmware(bus->firmware);
+ release_firmware(nv);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index e679214b3c9..14bc24dc5ba 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,7 +102,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+ BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
};
#undef BRCMF_ENUM_DEF
+#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 82f9140f3d3..d0cd0bf95c5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
/**
* struct brcmf_skbuff_cb - control buffer associated with skbuff.
*
+ * @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
* @state: transmit state of the packet.
@@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
* provides 48 bytes of storage so this structure should not exceed that.
*/
struct brcmf_skbuff_cb {
+ u16 bus_flags;
u16 if_flags;
u32 htod;
enum brcmf_fws_skb_state state;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d7a97453290..4a229304182 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -823,6 +823,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
}
err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
action, P2PAPI_BSSCFG_DEVICE);
+ kfree(chanspecs);
}
exit:
if (err)
@@ -1148,7 +1149,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
- INIT_COMPLETION(afx_hdl->act_frm_scan);
+ reinit_completion(&afx_hdl->act_frm_scan);
set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
afx_hdl->is_active = true;
afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
@@ -1501,7 +1502,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
brcmf_dbg(TRACE, "Enter\n");
- INIT_COMPLETION(p2p->send_af_done);
+ reinit_completion(&p2p->send_af_done);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index ca72177388b..2096a14ef1f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
NULL);
@@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* if core is already in reset, just return */
regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/*
* Must do the disable sequence first to work for
@@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ ci->chiprev >= 2)
+ ci->chip = BCM4339_CHIP_ID;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
+ case BCM4339_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2e084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x15004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x04084411;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
default:
brcmf_err("chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 83c041f1bf4..507c61c991f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,6 +54,14 @@
#define BRCMF_MAX_CORENUM 6
+/* SDIO device ID */
+#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+
struct chip_core_info {
u16 id;
u16 rev;
@@ -215,17 +223,16 @@ struct sdpcmd_regs {
u16 PAD[0x80];
};
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci,
- u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, char *nvram_dat,
+ uint nvram_sz);
#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 2b5407f002e..fc0d4f0129d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -178,21 +178,25 @@ struct brcmf_sdio_dev {
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
bool irq_wake; /* irq wake enable flags */
+ bool sg_support;
+ uint max_request_size;
+ ushort max_segment_count;
+ uint max_segment_size;
};
/* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+ int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
@@ -206,22 +210,17 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* Returns 0 or error code.
* NOTE: Async operation is not currently supported.
*/
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes);
+
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq, uint totlen);
/* Flags bits */
@@ -237,46 +236,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
* nbytes: number of bytes to transfer to/from buf
* Returns 0 or error code.
*/
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
- u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
- u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+ u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size);
/* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
/* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
/* attach, return handler on success, NULL if failed.
* The handler shall be provided by all subsequent calls. No local cache
* cfghdl points to the starting address of pci device mapped memory
*/
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
/* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
- uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+ uint addr, u8 *byte);
/* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
- uint rw, uint fnc, uint addr,
- u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+ uint addr, u32 *word, uint nbyte);
/* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
- bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index bc291711289..3c67529b907 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -78,13 +78,15 @@ TRACE_EVENT(brcmf_hexdump,
TP_ARGS(data, len),
TP_STRUCT__entry(
__field(unsigned long, len)
+ __field(unsigned long, addr)
__dynamic_array(u8, hdata, len)
),
TP_fast_assign(
__entry->len = len;
+ __entry->addr = (unsigned long)data;
memcpy(__get_dynamic_array(hdata), data, len);
),
- TP_printk("hexdump [length=%lu]", __entry->len)
+ TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
);
TRACE_EVENT(brcmf_bdchdr,
@@ -108,6 +110,23 @@ TRACE_EVENT(brcmf_bdchdr,
TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
);
+TRACE_EVENT(brcmf_sdpcm_hdr,
+ TP_PROTO(bool tx, void *data),
+ TP_ARGS(tx, data),
+ TP_STRUCT__entry(
+ __field(u8, tx)
+ __field(u16, len)
+ __array(u8, hdr, 12)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->hdr, data, 12);
+ __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
+ __entry->tx = tx ? 1 : 0;
+ ),
+ TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
+ __entry->len, __entry->hdr[4])
+);
+
#ifdef CONFIG_BRCM_TRACING
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index f4aea47e073..422f44c6317 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
- struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
- skb_queue_head_init(&skbq);
- skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
- brcmf_rx_frames(devinfo->dev, &skbq);
+ brcmf_rx_frame(devinfo->dev, skb);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index a8a267b5b87..2d08c155c23 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -172,19 +172,19 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 73d01e58610..03bdcf29bd5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
u16 dma_len;
};
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
- struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
- struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+ struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+ struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
#endif /* _BRCM_AMPDU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
index 97ea3881a8e..a3d487ab196 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
@@ -17,13 +17,11 @@
#ifndef _BRCM_ANTSEL_H_
#define _BRCM_ANTSEL_H_
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
- bool sel,
- u8 id, u8 fbid, u8 *antcfg,
- u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+ u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
#endif /* _BRCM_ANTSEL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 006483a0abe..39dd3a5b297 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -32,20 +32,16 @@
#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
- u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
- u16 chanspec,
- struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
- u16 chanspec,
- u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 4090032e81a..198053dfc31 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -88,26 +88,26 @@ struct brcms_info {
};
/* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
- bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
/* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg), void *arg,
+ const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 4608e0eb149..8138f1cff4e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
/* If macaddr exists, use it (Sromrev4, CIS, ...). */
if (!is_zero_ether_addr(sprom->il0mac)) {
- memcpy(etheraddr, sprom->il0mac, 6);
+ memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
return;
}
if (wlc_hw->_nbands > 1)
- memcpy(etheraddr, sprom->et1mac, 6);
+ memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
else
- memcpy(etheraddr, sprom->il0mac, 6);
+ memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
}
/* power both the pll and external oscillator on/off */
@@ -5695,7 +5695,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
- if (device == BCM4313_D11N2G_ID)
+ if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
return true;
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index b5d7a38b53f..c4d135cff04 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
struct brcms_bss_info *current_bss;
};
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
- struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
- uint *blocks);
-
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
- uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
- u32 rspec,
- bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
- u32 rts_rate,
- u32 frame_rate,
- u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len,
- bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
- struct ieee80211_sta *sta,
- void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
- u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
- u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
- u16 chanspec,
- bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
- u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
- u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
- int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
- uint offset, const void *buf, int len,
- u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
- void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
- u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks);
+
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+ bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ u32 rts_rate, u32 frame_rate,
+ u8 rts_preamble_type, u8 frame_preamble_type,
+ uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+ struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+ bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+ int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+ int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index e34a71e7d24..4d3734f48d9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -179,121 +179,106 @@ struct shared_phy_params {
};
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct bcma_device *d11core,
- int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
-
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
- u16 *phyrev, u16 *radioid,
- u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
- u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
- u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
- struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
- bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
- struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
- uint band);
-
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
- u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
- uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
- uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *,
- u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
- bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
- bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
- bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
- u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
- u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
-
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
-
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
- struct tx_power *power, uint channel);
-
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
- u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
- bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
- u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+ struct bcma_device *d11core, int bandtype,
+ struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+ u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+ bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+ struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
+
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+ u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+ u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+ s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+ u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+ struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+ struct tx_power *power, uint channel);
+
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+ u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
#endif /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 1dc767c3165..4960f7d2680 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
u8 do_init_g;
};
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
- u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_read_table(struct brcms_phy *pi,
- const struct phytbl_info *ptbl_info,
- u16 tblAddr, u16 tblDataHi,
- u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
- const struct phytbl_info *ptbl_info,
- u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
- uint tbl_offset, u16 tblAddr, u16 tblDataHi,
- u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
- struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
- const struct radio_regs *radioregs,
- u16 core_offset);
-
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
- s32 *eps_imag);
-
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
- u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
- u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
- u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
-
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
- u16 max_val, bool iqcalmode);
-
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
- u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
- u8 rate_mcs_end,
- u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
- u8 rate_ofdm_start,
- u8 rate_ofdm_end,
- u8 rate_mcs_start);
-
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+
+void wlc_phy_read_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+ u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+ struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+ const struct radio_regs *radioregs,
+ u16 core_offset);
+
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
+
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+ bool iqcalmode);
+
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+ u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+ u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+ u8 rate_ofdm_end, u8 rate_mcs_start);
+
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
#define LCNPHY_TX_POWER_TABLE_SIZE 128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TX_PWR_CTRL_TEMPBASED 0xE001
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
- const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
- struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
- u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
- s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+ u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
#define NPHY_MAX_HPVGA1_INDEX 10
#define NPHY_DEF_HPVGA1_INDEXLIMIT 7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
u32 q_pwr;
};
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
- bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_phy_write_table_nphy(pi, pti) \
wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_nphy_table_data_write(pi, w, v) \
wlc_phy_table_data_write((pi), (w), (v))
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
- u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
- u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+ void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+ const void *);
#define PHY_IPA(pi) \
((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
- u16 num_samps, u8 wait_time,
- u8 wait_for_crs);
-
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
- struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
- u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
- struct nphy_txgains target_gain,
- bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
- struct nphy_txgains target_gain,
- u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
- s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
- s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
- s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
- u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
- u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
- struct d11rxhdr *rxh);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+ u16 num_samps, u8 wait_time, u8 wait_for_crs);
+
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+ struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+ struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+ u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+ s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+ s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+ bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+ u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+ u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
#define NPHY_TESTPATTERN_BPHY_EVM 0
#define NPHY_TESTPATTERN_BPHY_RFCS 1
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
- u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 2c5b66b7597..dd8774717ad 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -124,56 +124,49 @@
struct brcms_phy;
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
- struct brcms_info *wl,
- struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+ struct brcms_info *wl,
+ struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn) (struct brcms_phy *pi),
- void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
- u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
- u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
- u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
- u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
- int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
- u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
- void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
- const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
- u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+ void (*fn)(struct brcms_phy *pi),
+ void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+ int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+ int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+ int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+ int, u32);
+
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 20e2012d5a3..a014bbc4f93 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -20,7 +20,7 @@
#include "types.h"
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index d36ea5e1cc4..4da38cb4f31 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
};
/* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
- bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
- struct sk_buff *sdu,
- struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
- int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
- int match_reg_offset,
- const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
- const struct ieee80211_tx_queue_params *arg,
- bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
- struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
- u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
- const char *name, struct brcms_info *hdl,
- int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
- struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+ struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
+
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
+
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+ const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+ const struct ieee80211_tx_queue_params *arg,
+ bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+ u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+ u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl,
+ int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
- struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
- u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
- const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
- struct sk_buff *beacon, u16 tim_offset,
- u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
- struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
- size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+ u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+ u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+ struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index 980d578825c..5bb88b78ed6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
/* sanitize, and sort a rateset with the basic bit(s) preserved, validate
* rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
- const struct brcms_c_rateset *hw_rs,
- bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+ const struct brcms_c_rateset *hw_rs,
+ bool check_brate, u8 txstreams);
/* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
- struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst);
/* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
- struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
- bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
- const struct brcms_c_rateset *rs_hw, uint phy_type,
- int bandtype, bool cck_only, uint rate_mask,
- bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
- u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
- u8 bw);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst, bool basic_only,
+ u8 rates, uint xmask, bool mcsallow);
+
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+ const struct brcms_c_rateset *rs_hw, uint phy_type,
+ int bandtype, bool cck_only, uint rate_mask,
+ bool mcsallow, u8 bw, u8 txstreams);
+
+s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
index 19f6580f69b..ba9493009a3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
@@ -19,24 +19,19 @@
#include "types.h"
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
- u16 *ss_algo_channel,
- u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
- struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
- bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
- u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
- u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+ u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
#endif /* _BRCM_STF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
index 18750a814b4..c87dd89bcb7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
@@ -43,16 +43,14 @@ struct brcms_ucode {
u32 *bcm43xx_bomminor;
};
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
- unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
- unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+ unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int brcms_check_firmwares(struct brcms_info *wl);
#endif /* _BRCM_UCODE_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index c1fe245bb07..84113ea16f8 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,5 +41,6 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
+#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 92623f02b1c..8660a2cba09 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
void (*decchspec)(struct brcmu_chan *ch);
};
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
#endif /* _BRCMU_CHANNELS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 898cacb8d01..8ba445b3fd7 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
return skb_peek_tail(&pq->q[prec].skblist);
}
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
- bool (*match_fn)(struct sk_buff *p,
- void *arg),
- void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+ bool (*match_fn)(struct sk_buff *p,
+ void *arg),
+ void *arg);
/* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
/* Empty the queue at particular precedence level */
/* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
- bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
/* operations on a set of precedences in packet queue */
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
- int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
/* operations on packet queue as a whole */
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
return pq->len == 0;
}
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
/* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
- bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
/* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
/* externs */
/* format/print */
#ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
#endif /* DEBUG */
#ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
#else
__printf(3, 4)
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 755a0c8edfe..40078f5f932 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -365,7 +365,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
static int cw1200_spi_probe(struct spi_device *func)
{
const struct cw1200_platform_data_spi *plat_data =
- func->dev.platform_data;
+ dev_get_platdata(&func->dev);
struct hwbus_priv *self;
int status;
@@ -443,7 +443,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
kfree(self);
}
- cw1200_spi_off(func->dev.platform_data);
+ cw1200_spi_off(dev_get_platdata(&func->dev));
return 0;
}
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 970a48baaf8..de7c4ffec30 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
}
}
- memcpy(req.bssid, selected->bssid, 6);
+ memcpy(req.bssid, selected->bssid, ETH_ALEN);
req.channel = selected->chid;
spin_unlock_irqrestore(&local->lock, flags);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6b823a1ab78..81903e33d5b 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
/* data's copy of the eeprom data */
static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
{
- memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+ memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
}
static void ipw_read_eeprom(struct ipw_priv *priv)
@@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out_free_libipw:
free_libipw(priv->net_dev, 0);
out:
@@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
iounmap(priv->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 6eede52ad8c..5ce2f59d337 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
}
/* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
-extern void libipw_networks_age(struct libipw_device *ieee,
- unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
/* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
- struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
/* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
- struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
- struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+ struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+ struct libipw_rx_stats *rx_stats);
/* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
- struct libipw_hdr_4addr *header,
- struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+ struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
/* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
- *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
- const struct libipw_geo *geo);
-
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
- u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
- u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
- u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
- libipw_device
- *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
- u8 channel);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+ u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
/* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
static inline void libipw_increment_scans(struct libipw_device *ieee)
{
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 9581d07a424..dea3b50d68b 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3811,7 +3811,6 @@ out_iounmap:
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il_free_channel_map(il);
il_free_geos(il);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 9a8703def0b..00030d43a19 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
* for use by iwl-*.c
*
*****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
- struct ieee80211_hdr *hdr,
- int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
- char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+ bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
/******************************************************************************
*
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
* il3945_mac_ <-- mac80211 callback
*
****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
- struct il_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset,
- u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
- struct il3945_frame *frame,
- u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame, u8 rate);
void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
/**
* il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
* not yet been merged into a single common layer for managing the
* station tables.
*/
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
/* scanning */
int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 5ab50a5b48b..3982ab76f37 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6706,7 +6706,6 @@ out_free_eeprom:
out_iounmap:
iounmap(il->hw_base);
out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il4965_uninit_drv(il);
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1b15b0b2292..337dfcf3bbd 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
((t) < IL_TX_POWER_TEMPERATURE_MIN || \
(t) > IL_TX_POWER_TEMPERATURE_MAX)
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
/********************* END TEMPERATURE ***************************************/
/********************* START TXPOWER *****************************************/
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 83f8ed8a552..ad123d66ab6 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -858,9 +858,9 @@ struct il_hw_params {
* il4965_mac_ <-- mac80211 callback
*
****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
static inline int
il_queue_used(const struct il_queue *q, int i)
{
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
#ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
#else
static inline void
il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
/*****************************************************
* TX
******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
/*****************************************************
* TX power
****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
return il_is_ready(il);
}
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
int il_apm_init(struct il_priv *il);
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
irqreturn_t il_isr(int irq, void *data);
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
static inline void
_il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
* The specific throughput table used is based on the type of network
* the associated with, including A, B, G, and G w/ TGG protection
*/
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
/* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
- u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
- u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
/**
* il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
* ieee80211_register_hw
*
*/
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
/**
* il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
extern u32 il_debug_level;
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index f2a86ffc3b4..23d5f0275ce 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
return cpu_to_le32(flags|(u32)rate);
}
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a79fdd137f9..7434d9edf3b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
* iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
*
****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
#define IWL_OPERATION_MODE_AUTO 0
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 5d83cab22d6..26fc550cd68 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
/* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ u8 sta_id);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
* ieee80211_register_hw
*
*/
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da442b81370..1fef5240e6a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
+ txq_id = info->hw_queue;
+
if (is_agg)
txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
/*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- txq_id = ctx->mcast_queue;
-
- /*
* The microcode will clear the more data
* bit in the last frame it transmits.
*/
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- txq_id = IWL_AUX_QUEUE;
- else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+ }
- WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
WARN_ON_ONCE(is_agg &&
priv->queue_to_mac80211[txq_id] != info->hw_queue);
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 86270b69cd0..63637949a14 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -330,15 +330,14 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type old_type;
static const u8 alive_cmd[] = { REPLY_ALIVE };
- old_type = priv->cur_ucode;
- priv->cur_ucode = ucode_type;
fw = iwl_get_ucode_image(priv, ucode_type);
+ if (WARN_ON(!fw))
+ return -EINVAL;
+ old_type = priv->cur_ucode;
+ priv->cur_ucode = ucode_type;
priv->ucode_loaded = false;
- if (!fw)
- return -EINVAL;
-
iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 76e14c046d9..85879dbaa40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -83,6 +83,8 @@
#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3160_NVM_VERSION 0x709
#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL7265_NVM_VERSION 0x0a1d
+#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -90,6 +92,9 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+#define IWL7265_FW_PRE "iwlwifi-7265-"
+#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -182,5 +187,14 @@ const struct iwl_cfg iwl3160_n_cfg = {
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
};
+const struct iwl_cfg iwl7265_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 7265",
+ .fw_name_pre = IWL7265_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7265_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index b03c25e1490..18f232e8e81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -293,6 +293,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
extern const struct iwl_cfg iwl3160_2ac_cfg;
extern const struct iwl_cfg iwl3160_2n_cfg;
extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl7265_2ac_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a276af476e2..54a4fdc631b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -394,6 +394,38 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+/* SECURE boot registers */
+#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
+#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
+enum secure_boot_status_reg {
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
+enum secure_load_status_reg {
+ CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
+ CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
+ CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
+#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
+
+#define CSR_SECURE_TIME_OUT (100)
+
+#define FH_TCSR_0_REG0 (0x1D00)
+
/*
* HBUS (Host-side Bus)
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 99e1da3123c..ff570027e9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -483,6 +483,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const u8 *tlv_data;
char buildstr[25];
u32 build;
+ int num_of_cpus;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -692,6 +693,42 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
goto invalid_tlv_len;
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_SECURE_SEC_RT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_SECURE_SEC_INIT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_INIT].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_NUM_OF_CPU:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ num_of_cpus =
+ le32_to_cpup((__le32 *)tlv_data);
+
+ if (num_of_cpus == 2) {
+ drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
+ true;
+ drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
+ true;
+ drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
+ true;
+ } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
+ IWL_ERR(drv, "Driver support upto 2 CPUs\n");
+ return -EINVAL;
+ }
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 8b6c6fd95ed..6c6c35c5228 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -121,6 +121,10 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SEC_WOWLAN = 21,
IWL_UCODE_TLV_DEF_CALIB = 22,
IWL_UCODE_TLV_PHY_SKU = 23,
+ IWL_UCODE_TLV_SECURE_SEC_RT = 24,
+ IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
+ IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+ IWL_UCODE_TLV_NUM_OF_CPU = 27,
};
struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index a1223680bc7..75db087120c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -75,11 +75,23 @@
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
+ * connection when going back to D0
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
+ * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
+ * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
+ * containing CAM (Continuous Active Mode) indication.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -87,11 +99,21 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
+ IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
+ IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+ IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
+ IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
};
/* The default calibrate table size if not specified by firmware file */
@@ -133,7 +155,8 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 4
+#define IWL_UCODE_SECTION_MAX 6
+#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -150,6 +173,8 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+ bool is_secure;
+ bool is_dual_cpus;
};
/* uCode version contains 4 values: Major/Minor/API/Serial */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index dfa4d2e3aaa..ad8e19a56ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -34,7 +34,6 @@
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-fh.h"
-#include "iwl-csr.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ff8cc75c189..a70c7b9d9ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,6 +97,8 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_RTC_INT_STT_RFKILL (0x10000000)
+
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 80b47508647..143292b4dbb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -344,7 +344,7 @@ struct iwl_trans_config {
u8 cmd_queue;
u8 cmd_fifo;
const u8 *no_reclaim_cmds;
- int n_no_reclaim_cmds;
+ unsigned int n_no_reclaim_cmds;
bool rx_buf_size_8k;
bool bc_table_dword;
@@ -601,7 +601,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
{
int ret;
- if (trans->state != IWL_TRANS_FW_ALIVE) {
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return -EIO;
}
@@ -640,8 +640,8 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->tx(trans, skb, dev_cmd, queue);
}
@@ -649,16 +649,16 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
int ssn, struct sk_buff_head *skbs)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->reclaim(trans, queue, ssn, skbs);
}
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->txq_disable(trans, queue);
}
@@ -669,8 +669,8 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
{
might_sleep();
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
@@ -685,8 +685,8 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->wait_tx_queue_empty(trans);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 0fad98b85f6..5d066cbc5ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -98,126 +98,258 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#undef EVENT_PRIO_ANT
-/* BT Antenna Coupling Threshold (dB) */
-#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
-#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD (3)
-
#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
-#define BT_REDUCED_TX_POWER_BIT BIT(7)
-
-static inline bool is_loose_coex(void)
-{
- return iwlwifi_mod_params.ant_coupling >
- IWL_BT_ANTENNA_COUPLING_THRESHOLD;
-}
+#define BT_ANTENNA_COUPLING_THRESHOLD (30)
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return 0;
+
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
}
-static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
-{
- struct iwl_bt_coex_prot_env_cmd env_cmd;
- int ret;
-
- env_cmd.action = action;
- env_cmd.type = type;
- ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
- sizeof(env_cmd), &env_cmd);
- if (ret)
- IWL_ERR(mvm, "failed to send BT env command\n");
- return ret;
-}
-
-enum iwl_bt_kill_msk {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_SCO_HID_A2DP,
- BT_KILL_MSK_REDUCED_TXPOW,
- BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xffff0000,
[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
[BT_KILL_MSK_REDUCED_TXPOW] = 0,
};
-static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xffff0000,
[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
[BT_KILL_MSK_REDUCED_TXPOW] = 0,
};
-#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
-
-/* Tight Coex */
-static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+ cpu_to_le32(0xf0f0f0f0),
+ cpu_to_le32(0xc0c0c0c0),
+ cpu_to_le32(0xfcfcfcfc),
+ cpu_to_le32(0xff00ff00),
};
-/* Loose Coex */
-static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
};
-/* Full concurrency */
-static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+ {
+ /* Tight */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaeaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ /* Loose */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ /* Tx Tx disabled */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xF0005000),
+ cpu_to_le32(0xF0005000),
+ },
};
-/* single shared antenna */
-static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+ /* dummy entry for channel 0 */
+ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+ {
+ cpu_to_le64(0x0000001FFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000000FFFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000003FFFCULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ },
+ {
+ cpu_to_le64(0x00001FFFE0ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ },
+ {
+ cpu_to_le64(0x00007FFF80ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ },
+ {
+ cpu_to_le64(0x0003FFFC00ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ },
+ {
+ cpu_to_le64(0x000FFFF000ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ },
+ {
+ cpu_to_le64(0x007FFF8000ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ },
+ {
+ cpu_to_le64(0x01FFFE0000ULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ },
+ {
+ cpu_to_le64(0x0FFFF00000ULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ cpu_to_le64(0x0ULL),
+ },
+ {
+ cpu_to_le64(0x3FFFC00000ULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFFE000000ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFF8000000ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0)
+ },
};
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+ cpu_to_le32(0x22002200),
+ cpu_to_le32(0x33113311),
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum iwl_bt_coex_lut_type ret;
+ u16 phy_ctx_id;
+
+ /*
+ * Checking that we hold mvm->mutex is a good idea, but the rate
+ * control can't acquire the mutex since it runs in Tx path.
+ * So this is racy in that case, but in the worst case, the AMPDU
+ * size limit will be wrong for a short time which is not a big
+ * issue.
+ */
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return BT_COEX_LOOSE_LUT;
+ }
+
+ ret = BT_COEX_TX_DIS_LUT;
+
+ if (mvm->cfg->bt_shared_single_ant) {
+ rcu_read_unlock();
+ return ret;
+ }
+
+ phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+ if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+ else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+ /* else - default = TX TX disallowed */
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd *bt_cmd;
@@ -228,17 +360,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
.flags = CMD_SYNC,
};
int ret;
+ u32 flags;
- /* go to CALIB state in internal BT-Coex state machine */
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
-
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@@ -246,40 +371,52 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
- bt_cmd->bt3_time_t7_value = 1;
- bt_cmd->bt3_prio_sample_time = 2;
- bt_cmd->bt3_timer_t2_value = 0xc;
+ bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
+ bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
+ bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
+ bt_cmd->bt4_tx_rx_max_freq0 = 15,
- bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
+ flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+ flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
+ bt_cmd->flags = cpu_to_le32(flags);
- bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
BT_VALID_BT_PRIO_BOOST |
BT_VALID_MAX_KILL |
BT_VALID_3W_TMRS |
BT_VALID_KILL_ACK |
BT_VALID_KILL_CTS |
BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT);
+ BT_VALID_LUT |
+ BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+ BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+ BT_VALID_MULTI_PRIO_LUT |
+ BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40 |
+ BT_VALID_ANT_ISOLATION |
+ BT_VALID_ANT_ISOLATION_THRS |
+ BT_VALID_TXTX_DELTA_FREQ_THRS |
+ BT_VALID_TXRX_MAX_FREQ_0);
if (mvm->cfg->bt_shared_single_ant)
- memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
- sizeof(iwl_single_shared_ant_lookup));
- else if (is_loose_coex())
- memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
- sizeof(iwl_tight_lookup));
+ memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+ sizeof(iwl_single_shared_ant));
else
- memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
- sizeof(iwl_tight_lookup));
+ memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+ sizeof(iwl_combined_lookup));
- bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+ memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+ sizeof(iwl_bt_prio_boost));
+ memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+ sizeof(iwl_bt_mprio_lut));
bt_cmd->kill_ack_msk =
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
bt_cmd->kill_cts_msk =
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -334,13 +471,17 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
- bt_cmd->valid_bit_msk =
- cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+ BT_VALID_KILL_ACK |
+ BT_VALID_KILL_CTS);
- IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
+ IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+ iwl_bt_ack_kill_msk[bt_kill_msk],
+ iwl_bt_cts_kill_msk[bt_kill_msk]);
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -364,12 +505,16 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
int ret;
- /* This can happen if the station has been removed right now */
if (sta_id == IWL_MVM_STATION_COUNT)
return 0;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return 0;
+
mvmsta = (void *)sta->drv_priv;
/* nothing to do */
@@ -380,8 +525,10 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
- bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+ bt_cmd->valid_bit_msk =
+ cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
bt_cmd->bt_reduced_tx_power = sta_id;
if (enable)
@@ -403,8 +550,25 @@ struct iwl_bt_iterator_data {
struct iwl_mvm *mvm;
u32 num_bss_ifaces;
bool reduced_tx_power;
+ struct ieee80211_chanctx_conf *primary;
+ struct ieee80211_chanctx_conf *secondary;
};
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, int rssi)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->bf_data.last_bt_coex_event = rssi;
+ mvmvif->bf_data.bt_coex_max_thold =
+ enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+ mvmvif->bf_data.bt_coex_min_thold =
+ enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+}
+
+/* must be called under rcu_read_lock */
static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -413,65 +577,94 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
- enum ieee80211_band band;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf && chanctx_conf->def.chan)
- band = chanctx_conf->def.chan->band;
- else
- band = -1;
- rcu_read_unlock();
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- /* non associated BSSes aren't to be considered */
- if (!vif->bss_conf.assoc)
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ /* If channel context is invalid or not on 2.4GHz .. */
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+ /* ... and it is an associated STATION, relax constraints */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
+ }
+
+ /* SoftAP / GO will always be primary */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
- if (band != IEEE80211_BAND_2GHZ) {
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
+
+ if (chanctx_conf == data->primary)
+ return;
+
+ /* downgrade the current primary no matter what its type is */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
return;
}
- if (data->notif->bt_status)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
+ data->num_bss_ifaces++;
+
+ /* we are now a STA / P2P Client, and take associated ones only */
+ if (!vif->bss_conf.assoc)
+ return;
+
+ /* STA / P2P Client, try to be primary if first vif */
+ if (!data->primary || data->primary == chanctx_conf)
+ data->primary = chanctx_conf;
+ else if (!data->secondary)
+ /* if secondary is not NULL, it might be a GO */
+ data->secondary = chanctx_conf;
- if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD)
+ if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
smps_mode = IEEE80211_SMPS_STATIC;
+ else if (le32_to_cpu(data->notif->bt_activity_grading) >=
+ BT_LOW_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d traffic_load %d smps_req %d\n",
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status,
- data->notif->bt_traffic_load, smps_mode);
+ data->notif->bt_activity_grading, smps_mode);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
/* don't reduce the Tx power if in loose scheme */
- if (is_loose_coex())
+ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+ mvm->cfg->bt_shared_single_ant) {
+ data->reduced_tx_power = false;
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
+ }
- data->num_bss_ifaces++;
-
- /* reduced Txpower only if there are open BT connections, so ...*/
- if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
+ /* reduced Txpower only if BT is on, so ...*/
+ if (!data->notif->bt_status) {
/* ... cancel reduced Tx power ... */
if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
data->reduced_tx_power = false;
/* ... and there is no need to get reports on RSSI any more. */
- ieee80211_disable_rssi_reports(vif);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- ave_rssi = ieee80211_ave_rssi(vif);
+ /* try to get the avg rssi from fw */
+ ave_rssi = mvmvif->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
@@ -499,8 +692,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
- ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD,
- BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
}
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
@@ -510,11 +702,72 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
.notif = &mvm->last_bt_notif,
.reduced_tx_power = true,
};
+ struct iwl_bt_coex_ci_cmd cmd = {};
+ u8 ci_bw_idx;
+ rcu_read_lock();
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (data.primary) {
+ struct ieee80211_chanctx_conf *chan = data.primary;
+ if (WARN_ON(!chan->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ cmd.co_run_bw_primary = 0;
+ } else {
+ cmd.co_run_bw_primary = 1;
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_primary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+ }
+
+ if (data.secondary) {
+ struct ieee80211_chanctx_conf *chan = data.secondary;
+ if (WARN_ON(!data.secondary->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ cmd.co_run_bw_secondary = 0;
+ } else {
+ cmd.co_run_bw_secondary = 1;
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_secondary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
+ }
+
+ rcu_read_unlock();
+
+ /* Don't spam the fw with the same command over and over */
+ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send BT_CI cmd");
+ memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+ }
+
/*
* If there are no BSS / P2P client interfaces, reduced Tx Power is
* irrelevant since it is based on the RSSI coming from the beacon.
@@ -536,12 +789,18 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
+ IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+ notif->bt_status ? "ON" : "OFF");
IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
- IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
notif->bt_agg_traffic_load);
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
/* remember this notification for future use: rssi fluctuations */
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -565,6 +824,18 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ /* If channel context is invalid or not on 2.4GHz - don't count it */
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
if (vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
@@ -594,15 +865,15 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
};
int ret;
- mutex_lock(&mvm->mutex);
+ lockdep_assert_held(&mvm->mutex);
/* Rssi update while not associated ?! */
if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
- goto out_unlock;
+ return;
- /* No open connection - reports should be disabled */
- if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2))
- goto out_unlock;
+ /* No BT - reports should be disabled */
+ if (!mvm->last_bt_notif.bt_status)
+ return;
IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
@@ -611,7 +882,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Check if rssi is good enough for reduced Tx power, but not in loose
* scheme.
*/
- if (rssi_event == RSSI_EVENT_LOW || is_loose_coex())
+ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+ iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
false);
else
@@ -633,12 +905,52 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
- out_unlock:
- mutex_unlock(&mvm->mutex);
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
+
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_LOW_TRAFFIC)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+ if (lut_type == BT_COEX_LOOSE_LUT)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ /* tight coex, high bt traffic, reduce AGG time limit */
+ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
}
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return true;
+
+ /*
+ * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
+ * already killed.
+ * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
+ * Tx.
+ */
+ return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return;
+
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 2bf29f7992e..4b6d670c350 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -70,7 +70,9 @@
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
-#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 417639f77b0..6f45966817b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -67,6 +67,7 @@
#include <net/cfg80211.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/addrconf.h>
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
@@ -381,14 +382,74 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = CMD_SYNC,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v3l.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v3l.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ if (mvmvif->num_target_ipv6_addrs)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+ else
+ cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
@@ -419,7 +480,13 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
}
#endif
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v3l.common;
+ size = sizeof(cmd.v3l);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
@@ -438,8 +505,8 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
common->enabled = cpu_to_le32(enabled);
- return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
- size, &cmd);
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
}
enum iwl_mvm_tcp_packet_type {
@@ -793,6 +860,74 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NON_QOS_TX_COUNTER_CMD,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ int err;
+ u32 size;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
+ }
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ if (err)
+ return err;
+
+ size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ size -= sizeof(cmd.resp_pkt->hdr);
+ if (size < sizeof(__le16)) {
+ err = -EINVAL;
+ } else {
+ err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+ /* new API returns next, not last-used seqno */
+ if (mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+ err -= 0x10;
+ }
+
+ iwl_free_resp(&cmd);
+ return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .value = cpu_to_le16(mvmvif->seqno),
+ };
+
+ /* return if called during restart, not resume from D3 */
+ if (!mvmvif->seqno_valid)
+ return;
+
+ mvmvif->seqno_valid = false;
+
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
+ return;
+
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+ sizeof(query_cmd), &query_cmd))
+ IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
@@ -829,7 +964,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret, i;
int len __maybe_unused;
- u16 seq;
u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
@@ -872,26 +1006,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
- /*
- * We know the last used seqno, and the uCode expects to know that
- * one, it will increment before TX.
- */
- seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
- wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+ /* Query the last used seqno and set it */
+ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+ if (ret < 0)
+ goto out_noreset;
+ wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
@@ -899,7 +1022,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- seq = mvm_ap_sta->tid_data[i].seq_number;
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
}
@@ -945,6 +1068,16 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. Store the real STA ID here
+ * and assign 0. When we leave this function, we'll restore
+ * the original value for the resume code.
+ */
+ old_ap_sta_id = mvm_ap_sta->sta_id;
+ mvm_ap_sta->sta_id = 0;
+ mvmvif->ap_sta_id = 0;
+
+ /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1059,6 +1192,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ if (ret)
+ goto out;
+
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
goto out;
@@ -1109,16 +1246,26 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false);
}
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+ u16 pattern_number;
+ u16 qos_seq_ctr[8];
+ u32 wakeup_reasons;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ const u8 *wake_packet;
+};
+
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status *status)
+ struct iwl_wowlan_status_data *status)
{
struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- u32 reasons = le32_to_cpu(status->wakeup_reasons);
+ u32 reasons = status->wakeup_reasons;
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
@@ -1130,7 +1277,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
wakeup.pattern_idx =
- le16_to_cpu(status->pattern_number);
+ status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
@@ -1158,8 +1305,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
wakeup.tcp_match = true;
if (status->wake_packet_bufsize) {
- int pktsize = le32_to_cpu(status->wake_packet_bufsize);
- int pktlen = le32_to_cpu(status->wake_packet_length);
+ int pktsize = status->wake_packet_bufsize;
+ int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
struct ieee80211_hdr *hdr = (void *)pktdata;
int truncated = pktlen - pktsize;
@@ -1239,8 +1386,229 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
kfree_skb(pkt);
}
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ u64 pn;
+
+ pn = le64_to_cpu(sc->pn);
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+ seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_v6 *status)
+{
+ union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+ struct iwl_wowlan_status_v6 *status;
+ void *last_gtk;
+ u32 cipher;
+ bool find_phase, unhandled_cipher;
+ int num_keys;
+};
+
+static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* we support these */
+ break;
+ default:
+ /* everything else (even CMAC for MFP) - disconnect from AP */
+ data->unhandled_cipher = true;
+ return;
+ }
+
+ data->num_keys++;
+
+ /*
+ * pairwise key - update sequence counters only;
+ * note that this assumes no TDLS sessions are active
+ */
+ if (sta) {
+ struct ieee80211_key_seq seq = {};
+ union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+ if (data->find_phase)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
+ iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+ iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+ break;
+ }
+ ieee80211_set_key_tx_seq(key, &seq);
+
+ /* that's it for this key */
+ return;
+ }
+
+ if (data->find_phase) {
+ data->last_gtk = key;
+ data->cipher = key->cipher;
+ return;
+ }
+
+ if (data->status->num_of_gtk_rekeys)
+ ieee80211_remove_key(key);
+ else if (data->last_gtk == key)
+ iwl_mvm_set_key_rx_seq(key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_v6 *status)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+ .status = status,
+ };
+
+ if (!status || !vif->bss_conf.bssid)
+ return false;
+
+ /* find last GTK that we used initially, if any */
+ gtkdata.find_phase = true;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_gtks, &gtkdata);
+ /* not trying to keep connections with MFP/unhandled ciphers */
+ if (gtkdata.unhandled_cipher)
+ return false;
+ if (!gtkdata.num_keys)
+ return true;
+ if (!gtkdata.last_gtk)
+ return false;
+
+ /*
+ * invalidate all other GTKs that might still exist and update
+ * the one that we used
+ */
+ gtkdata.find_phase = false;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_gtks, &gtkdata);
+
+ if (status->num_of_gtk_rekeys) {
+ struct ieee80211_key_conf *key;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {
+ .conf.cipher = gtkdata.cipher,
+ .conf.keyidx = status->gtk.key_index,
+ };
+
+ switch (gtkdata.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key,
+ WLAN_KEY_LEN_CCMP);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+ /* leave TX MIC key zeroed, we don't use it anyway */
+ memcpy(conf.conf.key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ status->gtk.tkip_mic_key, 8);
+ break;
+ }
+
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ if (IS_ERR(key))
+ return false;
+ iwl_mvm_set_key_rx_seq(key, status);
+ }
+
+ if (status->num_of_gtk_rekeys) {
+ __be64 replay_ctr =
+ cpu_to_be64(le64_to_cpu(status->replay_ctr));
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ }
+
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+ return true;
+}
+
/* releases the MVM mutex */
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
@@ -1253,8 +1621,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
- struct iwl_wowlan_status *status;
- int ret, len;
+ struct iwl_wowlan_status_data status;
+ struct iwl_wowlan_status_v6 *status_v6;
+ int ret, len, status_size, i;
+ bool keep;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
@@ -1287,32 +1659,83 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+ status_size = sizeof(struct iwl_wowlan_status_v6);
+ else
+ status_size = sizeof(struct iwl_wowlan_status_v4);
+
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ if (len - sizeof(struct iwl_cmd_header) < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
- status = (void *)cmd.resp_pkt->data;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+ status_v6 = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(status_v6->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(status_v6->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(status_v6->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(status_v6->wake_packet_bufsize);
+ status.wake_packet = status_v6->wake_packet;
+ } else {
+ struct iwl_wowlan_status_v4 *status_v4;
+ status_v6 = NULL;
+ status_v4 = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(status_v4->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(status_v4->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(status_v4->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(status_v4->wake_packet_bufsize);
+ status.wake_packet = status_v4->wake_packet;
+ }
if (len - sizeof(struct iwl_cmd_header) !=
- sizeof(*status) +
- ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+ status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
+ /* still at hard-coded place 0 for D3 image */
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[0],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out_free_resp;
+
+ mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = status.qos_seq_ctr[i];
+ /* firmware stores last-used value, we store next value */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
- iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+ iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+
iwl_free_resp(&cmd);
- return;
+ return keep;
out_free_resp:
iwl_free_resp(&cmd);
out_unlock:
mutex_unlock(&mvm->mutex);
+ return false;
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1335,6 +1758,17 @@ static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
#endif
}
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_resume_disconnect(vif);
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct iwl_d3_iter_data resume_iter_data = {
@@ -1343,6 +1777,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
struct ieee80211_vif *vif = NULL;
int ret;
enum iwl_d3_status d3_status;
+ bool keep = false;
mutex_lock(&mvm->mutex);
@@ -1368,7 +1803,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
- iwl_mvm_query_wakeup_reasons(mvm, vif);
+ keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
@@ -1376,8 +1811,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_unlock(&mvm->mutex);
out:
- if (!test && vif)
- ieee80211_resume_disconnect(vif);
+ if (!test)
+ ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index aac81b8984b..9864d713eb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -246,58 +246,56 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+ char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
- int allow;
-
- if (!mvm->ucode_loaded)
- return -EIO;
-
- if (copy_from_user(buf, user_buf, sizeof(buf)))
- return -EFAULT;
-
- if (sscanf(buf, "%d", &allow) != 1)
- return -EINVAL;
-
- IWL_DEBUG_POWER(mvm, "%s device power down\n",
- allow ? "allow" : "prevent");
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos = 0;
- /*
- * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
- */
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+ mvm->disable_power_off);
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+ mvm->disable_power_off_d3);
- return count;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
- int allow;
+ char buf[64] = {};
+ int ret;
+ int val;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ if (!mvm->ucode_loaded)
+ return -EIO;
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- if (sscanf(buf, "%d", &allow) != 1)
+ if (!strncmp("disable_power_off_d0=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off = val;
+ } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off_d3 = val;
+ } else {
return -EINVAL;
+ }
- IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
- allow ? "allow" : "prevent");
-
- /*
- * TODO: When WoWLAN FW alive notification happens, driver will send
- * REPLY_DEBUG_CMD setting power_down_allow flag according to
- * mvm->prevent_power_down_d3
- */
- mvm->prevent_power_down_d3 = !allow;
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ mutex_unlock(&mvm->mutex);
- return count;
+ return ret ?: count;
}
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -344,6 +342,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
dbgfs_pm->disable_power_off = val;
+ break;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
@@ -371,7 +370,8 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
int val;
int ret;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (!strncmp("keep_alive=", buf, 11)) {
@@ -394,7 +394,9 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18)) {
+ } else if (!strncmp("disable_power_off=", buf, 18) &&
+ !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
if (sscanf(buf + 18, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
@@ -581,15 +583,21 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
- notif->bt_status);
+ notif->bt_status);
pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
- notif->bt_open_conn);
+ notif->bt_open_conn);
pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
- notif->bt_traffic_load);
+ notif->bt_traffic_load);
pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
- notif->bt_agg_traffic_load);
+ notif->bt_agg_traffic_load);
pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
+ notif->bt_ci_compliance);
+ pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
mutex_unlock(&mvm->mutex);
@@ -600,6 +608,38 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
}
#undef BT_MBOX_PRINT
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+ char buf[256];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
+ le64_to_cpu(cmd->bt_primary_ci),
+ !!cmd->co_run_bw_primary);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
+ le64_to_cpu(cmd->bt_secondary_ci),
+ !!cmd->co_run_bw_secondary);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+ pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+ iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+ pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+ iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
@@ -615,9 +655,11 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
int pos = 0;
char *buf;
int ret;
- int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
- sizeof(struct mvm_statistics_rx_non_phy) * 10 +
- sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+ /* 43 is the size of each data line, 33 is the size of each header */
+ size_t bufsz =
+ ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
+ (4 * 33) + 1;
+
struct mvm_statistics_rx_phy *ofdm;
struct mvm_statistics_rx_phy *cck;
struct mvm_statistics_rx_non_phy *general;
@@ -712,6 +754,7 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+ PRINT_STATS_LE32("mac_id", general->mac_id);
PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
@@ -757,6 +800,59 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
}
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ /* print which antennas were set for the scan command by the user */
+ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+ if (mvm->scan_rx_ant & ANT_A)
+ pos += scnprintf(buf + pos, bufsz - pos, "A");
+ if (mvm->scan_rx_ant & ANT_B)
+ pos += scnprintf(buf + pos, bufsz - pos, "B");
+ if (mvm->scan_rx_ant & ANT_C)
+ pos += scnprintf(buf + pos, bufsz - pos, "C");
+ pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8];
+ int buf_size;
+ u8 scan_rx_ant;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+
+ /* get the argument from the user and check if it is valid */
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+ return -EINVAL;
+ if (scan_rx_ant > ANT_ABC)
+ return -EINVAL;
+ if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+ return -EINVAL;
+
+ /* change the rx antennas for scan command */
+ mvm->scan_rx_ant = scan_rx_ant;
+
+ return count;
+}
+
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -968,7 +1064,8 @@ static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
char buf[8] = {};
int store;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (sscanf(buf, "%d", &store) != 1)
@@ -1063,10 +1160,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
#endif
@@ -1087,10 +1186,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
- MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+ S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
+ S_IWUSR | S_IRUSR);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 05c61d6f384..4ea5e24ca92 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -82,6 +82,8 @@
* @BT_USE_DEFAULTS:
* @BT_SYNC_2_BT_DISABLE:
* @BT_COEX_CORUNNING_TBL_EN:
+ *
+ * The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
BT_CH_PRIMARY_EN = BIT(0),
@@ -95,14 +97,16 @@ enum iwl_bt_coex_flags {
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
BT_USE_DEFAULTS = BIT(6),
BT_SYNC_2_BT_DISABLE = BIT(7),
- /*
- * For future use - when the flags will be enlarged
- * BT_COEX_CORUNNING_TBL_EN = BIT(8),
- */
+ BT_COEX_CORUNNING_TBL_EN = BIT(8),
+ BT_COEX_MPLUT_TBL_EN = BIT(9),
+ /* Bit 10 is reserved */
+ BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
};
/*
* indicates what has changed in the BT_COEX command.
+ * BT_VALID_ENABLE must be set for each command. Commands without this bit will
+ * discarded by the firmware
*/
enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ENABLE = BIT(0),
@@ -121,11 +125,8 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_CORUN_LUT_40 = BIT(13),
BT_VALID_ANT_ISOLATION = BIT(14),
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
- /*
- * For future use - when the valid flags will be enlarged
- * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
- * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
- */
+ BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
+ BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
};
/**
@@ -142,48 +143,88 @@ enum iwl_bt_reduced_tx_power {
BT_REDUCED_TX_POWER_DATA = BIT(1),
};
+enum iwl_bt_coex_lut_type {
+ BT_COEX_TIGHT_LUT = 0,
+ BT_COEX_LOOSE_LUT,
+ BT_COEX_TX_DIS_LUT,
+
+ BT_COEX_MAX_LUT,
+};
+
#define BT_COEX_LUT_SIZE (12)
+#define BT_COEX_CORUN_LUT_SIZE (32)
+#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
+#define BT_COEX_BOOST_SIZE (4)
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
/**
* struct iwl_bt_coex_cmd - bt coex configuration command
* @flags:&enum iwl_bt_coex_flags
- * @lead_time:
* @max_kill:
- * @bt3_time_t7_value:
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @bt3_prio_sample_time:
- * @bt3_timer_t2_value:
- * @bt4_reaction_time:
- * @decision_lut[12]:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- * @bt_prio_boost: values for PTA boost register
+ * @bt4_antenna_isolation:
+ * @bt4_antenna_isolation_thr:
+ * @bt4_tx_tx_delta_freq_thr:
+ * @bt4_tx_rx_max_freq0:
+ * @bt_prio_boost:
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @decision_lut:
+ * @bt4_multiprio_lut:
+ * @bt4_corun_lut20:
+ * @bt4_corun_lut40:
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
*/
struct iwl_bt_coex_cmd {
- u8 flags;
- u8 lead_time;
+ __le32 flags;
u8 max_kill;
- u8 bt3_time_t7_value;
+ u8 bt_reduced_tx_power;
+ u8 reserved[2];
+
+ u8 bt4_antenna_isolation;
+ u8 bt4_antenna_isolation_thr;
+ u8 bt4_tx_tx_delta_freq_thr;
+ u8 bt4_tx_rx_max_freq0;
+
+ __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
+ __le32 wifi_tx_prio_boost;
+ __le32 wifi_rx_prio_boost;
__le32 kill_ack_msk;
__le32 kill_cts_msk;
- u8 bt3_prio_sample_time;
- u8 bt3_timer_t2_value;
- __le16 bt4_reaction_time;
- __le32 decision_lut[BT_COEX_LUT_SIZE];
- u8 bt_reduced_tx_power;
- u8 reserved;
- __le16 valid_bit_msk;
- __le32 bt_prio_boost;
- u8 reserved2;
- u8 wifi_tx_prio_boost;
- __le16 wifi_rx_prio_boost;
+
+ __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+ __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+ __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+ __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+
+ __le32 valid_bit_msk;
} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+ __le64 bt_primary_ci;
+ __le64 bt_secondary_ci;
+
+ u8 co_run_bw_primary;
+ u8 co_run_bw_secondary;
+ u8 primary_ch_phy_id;
+ u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
@@ -244,23 +285,39 @@ enum iwl_bt_mxbox_dw3 {
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
+enum iwl_bt_activity_grading {
+ BT_OFF = 0,
+ BT_ON_NO_CONNECTION = 1,
+ BT_LOW_TRAFFIC = 2,
+ BT_HIGH_TRAFFIC = 3,
+};
+
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
* @mbox_msg: message from BT to WiFi
- * @:bt_status: 0 - off, 1 - on
- * @:bt_open_conn: number of BT connections open
- * @:bt_traffic_load: load of BT traffic
- * @:bt_agg_traffic_load: aggregated load of BT traffic
- * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
+ __le32 msg_idx;
u8 bt_status;
u8 bt_open_conn;
u8 bt_traffic_load;
u8 bt_agg_traffic_load;
u8 bt_ci_compliance;
u8 reserved[3];
+
+ __le32 primary_ch_lut;
+ __le32 secondary_ch_lut;
+ __le32 bt_activity_grading;
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
enum iwl_bt_coex_prio_table_event {
@@ -300,20 +357,4 @@ struct iwl_bt_coex_prio_tbl_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
} __packed;
-enum iwl_bt_coex_env_action {
- BT_COEX_ENV_CLOSE = 0,
- BT_COEX_ENV_OPEN = 1,
-}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
-
-/**
- * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
- * @action: enum %iwl_bt_coex_env_action
- * @type: enum %iwl_bt_coex_prio_table_event
- */
-struct iwl_bt_coex_prot_env_cmd {
- u8 action; /* 0 = closed, 1 = open */
- u8 type; /* 0 .. 15 */
- u8 reserved[2];
-} __packed;
-
#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index df72fcdf817..4e7dd8cf87d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -100,7 +100,12 @@ enum iwl_proto_offloads {
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
/**
* struct iwl_proto_offload_cmd_common - ARP/NS offload common part
@@ -155,6 +160,43 @@ struct iwl_proto_offload_cmd_v2 {
u8 reserved2[3];
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+struct iwl_ns_config {
+ struct in6_addr source_ipv6_addr;
+ struct in6_addr dest_ipv6_addr;
+ u8 target_mac_addr[ETH_ALEN];
+ __le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+ struct in6_addr addr;
+ __le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
/*
* WOWLAN_PATTERNS
@@ -293,7 +335,7 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v4 {
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -308,6 +350,29 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+struct iwl_wowlan_gtk_status {
+ u8 key_index;
+ u8 reserved[3];
+ u8 decrypt_key[16];
+ u8 tkip_mic_key[8];
+ struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed;
+
+struct iwl_wowlan_status_v6 {
+ struct iwl_wowlan_gtk_status gtk;
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 98b1feb43d3..39c3148bdfa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -170,12 +170,14 @@ struct iwl_mac_data_ap {
* @beacon_tsf: beacon transmit time in TSF
* @bi: beacon interval in TU
* @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
*/
struct iwl_mac_data_ibss {
__le32 beacon_time;
__le64 beacon_tsf;
__le32 bi;
__le32 bi_reciprocal;
+ __le32 beacon_template;
} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
/**
@@ -372,4 +374,13 @@ static inline u32 iwl_mvm_reciprocal(u32 v)
return 0xFFFFFFFF / v;
}
+#define IWL_NONQOS_SEQ_GET 0x1
+#define IWL_NONQOS_SEQ_SET 0x2
+struct iwl_nonqos_seq_query_cmd {
+ __le32 get_set_flag;
+ __le32 mac_id_n_color;
+ __le16 value;
+ __le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 8e7ab41079c..5cb93ae5cd2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -132,6 +132,33 @@ struct iwl_powertable_cmd {
} __packed;
/**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow. This flag should be
+ * always set to '1' unless one need to disable actual power down for debug
+ * purposes.
+ * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
+ * that power management is disabled. '0' Power management is enabled, one
+ * of power schemes is applied.
+*/
+enum iwl_device_power_flags {
+ DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ DEVICE_POWER_FLAGS_CAM_MSK = BIT(13),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
+ */
+struct iwl_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ __le16 flags;
+ __le16 reserved;
+} __packed;
+
+/**
* struct iwl_mac_power_cmd - New power command containing uAPSD support
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
* @id_and_color: MAC contex identifier
@@ -290,7 +317,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
-#define IWL_BA_ESCAPE_TIMER_D3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index fdd33bc0a59..538f1c7a596 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -68,6 +68,7 @@
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
*/
enum {
IWL_RATE_1M_INDEX = 0,
@@ -78,18 +79,31 @@ enum {
IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
IWL_RATE_6M_INDEX,
IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+ IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+ IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX,
+ IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
IWL_RATE_18M_INDEX,
+ IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
IWL_RATE_24M_INDEX,
+ IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
IWL_RATE_36M_INDEX,
+ IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX,
+ IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
IWL_RATE_54M_INDEX,
+ IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
IWL_RATE_60M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+ IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+ IWL_RATE_MCS_8_INDEX,
+ IWL_RATE_MCS_9_INDEX,
+ IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT,
+ IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -108,6 +122,7 @@ enum {
IWL_RATE_2M_PLCP = 20,
IWL_RATE_5M_PLCP = 55,
IWL_RATE_11M_PLCP = 110,
+ IWL_RATE_INVM_PLCP = -1,
};
/*
@@ -164,6 +179,8 @@ enum {
* which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
*/
#define RATE_HT_MCS_RATE_CODE_MSK 0x7
+#define RATE_HT_MCS_NSS_POS 3
+#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
/* Bit 10: (1) Use Green Field preamble */
#define RATE_HT_MCS_GF_POS 10
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 83cb9b992ea..c3782b48ded 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -356,6 +356,7 @@ struct iwl_scan_complete_notif {
/* scan offload */
#define IWL_MAX_SCAN_CHANNELS 40
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
#define IWL_SCAN_MAX_PROFILES 11
#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
@@ -368,6 +369,12 @@ struct iwl_scan_complete_notif {
#define IWL_FULL_SCAN_MULTIPLIER 5
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+enum scan_framework_client {
+ SCAN_CLIENT_SCHED_SCAN = BIT(0),
+ SCAN_CLIENT_NETDETECT = BIT(1),
+ SCAN_CLIENT_ASSET_TRACKING = BIT(2),
+};
+
/**
* struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
* @scan_flags: see enum iwl_scan_flags
@@ -449,11 +456,12 @@ struct iwl_scan_offload_cfg {
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
* @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
*/
struct iwl_scan_offload_blacklist {
u8 ssid[ETH_ALEN];
u8 reported_rssi;
- u8 reserved;
+ u8 client_bitmap;
} __packed;
enum iwl_scan_offload_network_type {
@@ -475,6 +483,7 @@ enum iwl_scan_offload_band_selection {
* @aut_alg: authentication olgorithm to match - bitmap
* @network_type: enum iwl_scan_offload_network_type
* @band_selection: enum iwl_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
*/
struct iwl_scan_offload_profile {
u8 ssid_index;
@@ -482,7 +491,8 @@ struct iwl_scan_offload_profile {
u8 auth_alg;
u8 network_type;
u8 band_selection;
- u8 reserved[3];
+ u8 client_bitmap;
+ u8 reserved[2];
} __packed;
/**
@@ -491,13 +501,18 @@ struct iwl_scan_offload_profile {
* @profiles: profiles to search for match
* @blacklist_len: length of blacklist
* @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
*/
struct iwl_scan_offload_profile_cfg {
- struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
u8 blacklist_len;
u8 num_profiles;
- u8 reserved[2];
+ u8 match_notify;
+ u8 pass_match;
+ u8 active_clients;
+ u8 reserved[3];
} __packed;
/**
@@ -560,4 +575,15 @@ struct iwl_scan_offload_complete {
u8 reserved;
} __packed;
+/**
+ * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
+ * @ssid_bitmap: SSIDs indexes found in this iteration
+ * @client_bitmap: clients that are active and wait for this notification
+ */
+struct iwl_sched_scan_results {
+ __le16 ssid_bitmap;
+ u8 client_bitmap;
+ u8 reserved;
+};
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index a30691a8a85..4aca5933a65 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -247,7 +247,7 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
- * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
* @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
@@ -286,7 +286,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwl_mvm_add_sta_cmd {
+struct iwl_mvm_add_sta_cmd_v5 {
u8 add_modify;
u8 unicast_tx_key_id;
u8 multicast_tx_key_id;
@@ -313,6 +313,57 @@ struct iwl_mvm_add_sta_cmd {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
+ * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
+ * VER_6 of this command is quite similar to VER_5 except
+ * exclusion of all fields related to the security key installation.
+ */
+struct iwl_mvm_add_sta_cmd_v6 {
+ u8 add_modify;
+ u8 reserved1;
+ __le16 tid_disable_tx;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwl_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd {
+ u8 sta_id;
+ u8 key_offset;
+ __le16 key_flags;
+ u8 key[16];
+ u8 key2[16];
+ u8 rx_secur_seq_cnt[16];
+ u8 tkip_rx_tsc_byte2;
+ u8 reserved;
+ __le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
* @ADD_STA_SUCCESS: operation was executed successfully
* @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 66264cc5a01..bad5a552dd8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -72,17 +72,17 @@
#include "fw-api-d3.h"
#include "fw-api-bt-coex.h"
-/* queue and FIFO numbers by usage */
+/* maximal number of Tx queues in any platform */
+#define IWL_MVM_MAX_QUEUES 20
+
+/* Tx queue numbers */
enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9,
- IWL_MVM_AUX_QUEUE = 15,
- IWL_MVM_FIRST_AGG_QUEUE = 16,
- IWL_MVM_NUM_QUEUES = 20,
- IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
- IWL_MVM_CMD_FIFO = 7
};
+#define IWL_MVM_CMD_FIFO 7
+
#define IWL_MVM_STATION_COUNT 16
/* commands */
@@ -97,6 +97,7 @@ enum {
DBG_CFG = 0x9,
/* station table */
+ ADD_STA_KEY = 0x17,
ADD_STA = 0x18,
REMOVE_STA = 0x19,
@@ -114,6 +115,7 @@ enum {
TIME_EVENT_NOTIFICATION = 0x2a,
BINDING_CONTEXT_CMD = 0x2b,
TIME_QUOTA_CMD = 0x2c,
+ NON_QOS_TX_COUNTER_CMD = 0x2d,
LQ_CMD = 0x4e,
@@ -130,6 +132,7 @@ enum {
SCAN_OFFLOAD_COMPLETE = 0x6D,
SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+ MATCH_FOUND_NOTIFICATION = 0xd9,
/* Phy */
PHY_CONFIGURATION_CMD = 0x6a,
@@ -178,6 +181,7 @@ enum {
BT_COEX_PRIO_TABLE = 0xcc,
BT_COEX_PROT_ENV = 0xcd,
BT_PROFILE_NOTIFICATION = 0xce,
+ BT_COEX_CI = 0x5d,
REPLY_BEACON_FILTERING_CMD = 0xd2,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c76299a3a1e..70e5297646b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -151,13 +151,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type old_type = mvm->cur_ucode;
static const u8 alive_cmd[] = { MVM_ALIVE };
- mvm->cur_ucode = ucode_type;
fw = iwl_get_ucode_image(mvm, ucode_type);
-
- mvm->ucode_loaded = false;
-
- if (!fw)
+ if (WARN_ON(!fw))
return -EINVAL;
+ mvm->cur_ucode = ucode_type;
+ mvm->ucode_loaded = false;
iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
@@ -199,7 +197,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
- if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+ if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
mvm->queue_to_mac80211[i] = i;
else
mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
@@ -243,7 +241,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->init_ucode_run)
+ if (mvm->init_ucode_complete)
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -264,6 +262,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
if (ret)
goto error;
+ /* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
ret = iwl_nvm_init(mvm);
@@ -273,6 +272,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
}
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (iwlwifi_mod_params.nvm_file)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
WARN_ON(ret);
@@ -310,7 +313,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
MVM_UCODE_CALIB_TIMEOUT);
if (!ret)
- mvm->init_ucode_run = true;
+ mvm->init_ucode_complete = true;
goto out;
error:
@@ -353,8 +356,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
return ret;
- /* If we were in RFKILL during module loading, load init ucode now */
- if (!mvm->init_ucode_run) {
+ /*
+ * If we haven't completed the run of the init ucode during
+ * module loading, load init ucode now
+ * (for example, if we were in RFKILL)
+ */
+ if (!mvm->init_ucode_complete) {
ret = iwl_run_init_mvm_ucode(mvm, false);
if (ret && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
@@ -424,6 +431,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ if (ret)
+ goto error;
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 5fe23a5ea9b..f41f9b07983 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -80,7 +80,7 @@ struct iwl_mvm_mac_iface_iterator_data {
struct ieee80211_vif *vif;
unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
- unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+ unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)];
enum iwl_tsf_id preferred_tsf;
bool found_vif;
};
@@ -218,7 +218,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.preferred_tsf = NUM_TSF_IDS,
.used_hw_queues = {
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(IWL_MVM_AUX_QUEUE) |
+ BIT(mvm->aux_queue) |
BIT(IWL_MVM_CMD_QUEUE)
},
.found_vif = false,
@@ -242,9 +242,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
* that we should share it with another interface.
*/
- /* Currently, MAC ID 0 should be used only for the managed vif */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ break;
+ /* fall through */
+ default:
__clear_bit(0, data.available_mac_ids);
+ }
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -302,9 +310,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(data.used_hw_queues,
- IWL_MVM_FIRST_AGG_QUEUE);
+ mvm->first_agg_queue);
- if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate queue\n");
ret = -EIO;
goto exit_fail;
@@ -317,9 +325,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) {
u8 queue = find_first_zero_bit(data.used_hw_queues,
- IWL_MVM_FIRST_AGG_QUEUE);
+ mvm->first_agg_queue);
- if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate cab queue\n");
ret = -EIO;
goto exit_fail;
@@ -559,8 +567,12 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
/* Don't use cts to self as the fw doesn't support it currently. */
- if (vif->bss_conf.use_cts_prot)
+ if (vif->bss_conf.use_cts_prot) {
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+ cmd->protection_flags |=
+ cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+ }
/*
* I think that we should enable these 2 flags regardless the HT PROT
@@ -707,8 +719,35 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
MAC_FILTER_IN_CONTROL_AND_MGMT |
MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_CRC32);
+ mvm->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
MAC_FILTER_IN_PROBE_REQUEST);
+ /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ cmd.ibss.bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+ /* TODO: Assumes that the beacon id == mac context id */
+ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -721,7 +760,8 @@ static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
struct iwl_mvm_go_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->ap_ibss_active)
data->go_active = true;
}
@@ -833,9 +873,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
/* Set up TX beacon command fields */
- iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
- beacon->data,
- beacon_skb_len);
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+ beacon->data,
+ beacon_skb_len);
/* Submit command */
cmd.len[0] = sizeof(beacon_cmd);
@@ -848,14 +889,15 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd(mvm, &cmd);
}
-/* The beacon template for the AP/GO context has changed and needs update */
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
int ret;
- WARN_ON(vif->type != NL80211_IFTYPE_AP);
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
beacon = ieee80211_beacon_get(mvm->hw, vif);
if (!beacon)
@@ -1018,6 +1060,8 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
case NL80211_IFTYPE_P2P_DEVICE:
return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ case NL80211_IFTYPE_ADHOC:
+ return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
default:
break;
}
@@ -1038,6 +1082,9 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (ret)
return ret;
+ /* will only do anything at resume from D3 time */
+ iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
mvmvif->uploaded = true;
return 0;
}
@@ -1077,6 +1124,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
mvmvif->uploaded = false;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9833cdf6177..74bc2c8af06 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -77,6 +77,7 @@
#include "iwl-eeprom-parse.h"
#include "fw-api-scan.h"
#include "iwl-phy-db.h"
+#include "testmode.h"
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
@@ -138,6 +139,14 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
}
}
+static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+ /* we create the 802.11 header and SSID element */
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
+ return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
+ return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -155,10 +164,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_UAPSD;
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
- hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
+ hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->rate_control_algorithm = "iwl-mvm-rs";
@@ -171,6 +179,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+ hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
+ hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ }
+
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
@@ -181,6 +195,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE);
+ /* IBSS has bugs in older versions */
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
@@ -191,8 +209,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- hw->uapsd_queues = IWL_UAPSD_AC_INFO;
- hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -212,9 +228,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
- /* we create the 802.11 header and a max-length SSID element */
- hw->wiphy->max_scan_ie_len =
- mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -231,6 +246,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+ }
+
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
@@ -548,7 +572,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
* In short: there's not much we can do at this point, other than
* allocating resources :)
*/
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
qmask);
@@ -698,7 +723,14 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
* For AP/GO interface, the tear down of the resources allocated to the
* interface is be handled as part of the stop_ap flow.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+ if (vif == mvm->noa_vif) {
+ mvm->noa_vif = NULL;
+ mvm->noa_duration = 0;
+ }
+#endif
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
goto out_release;
}
@@ -796,6 +828,27 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
return;
}
iwl_mvm_configure_mcast_filter(mvm, vif);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ /*
+ * If we're restarting then the firmware will
+ * obviously have lost synchronisation with
+ * the AP. It will attempt to synchronise by
+ * itself, but we can make it more reliable by
+ * scheduling a session protection time event.
+ *
+ * The firmware needs to receive a beacon to
+ * catch up with synchronisation, use 110% of
+ * the beacon interval.
+ *
+ * Set a large maximum delay to allow for more
+ * than a single interface.
+ */
+ u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+ iwl_mvm_protect_session(mvm, vif, dur, dur,
+ 5 * dur);
+ }
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -811,7 +864,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
/* Workaround for FW bug, otherwise FW disables device
* power save upon disassociation
*/
@@ -819,7 +873,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
- iwl_mvm_bt_coex_vif_assoc(mvm, vif);
+ iwl_mvm_bt_coex_vif_change(mvm);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -848,7 +902,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
-static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -871,7 +926,7 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (ret)
goto out_remove;
- mvmvif->ap_active = true;
+ mvmvif->ap_ibss_active = true;
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
@@ -883,10 +938,12 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (ret)
goto out_rm_bcast;
- /* Need to update the P2P Device MAC */
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_bt_coex_vif_change(mvm);
+
mutex_unlock(&mvm->mutex);
return 0;
@@ -901,7 +958,8 @@ out_unlock:
return ret;
}
-static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -910,9 +968,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&mvm->mutex);
- mvmvif->ap_active = false;
+ mvmvif->ap_ibss_active = false;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
- /* Need to update the P2P Device MAC */
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -924,10 +984,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_unlock(&mvm->mutex);
}
-static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
{
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
@@ -950,7 +1011,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
break;
case NL80211_IFTYPE_AP:
- iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
default:
/* shouldn't happen */
@@ -1163,7 +1225,54 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
/* Try really hard to protect the session and hear a beacon */
- iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
+ IWL_DEBUG_SCAN(mvm,
+ "SCHED SCAN request during internal scan - abort\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+
+ ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+ if (ret)
+ goto err;
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ goto err;
+
+ ret = iwl_mvm_sched_scan_start(mvm, req);
+ if (!ret)
+ goto out;
+err:
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -1207,8 +1316,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (cmd) {
case SET_KEY:
- if (vif->type == NL80211_IFTYPE_AP && !sta) {
- /* GTK on AP interface is a TX-only key, return 0 */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta) {
+ /*
+ * GTK on AP interface is a TX-only key, return 0;
+ * on IBSS they're per-station and because we're lazy
+ * we don't support them for RX, so do the same.
+ */
ret = 0;
key->hw_key_idx = STA_KEY_IDX_INVALID;
break;
@@ -1252,6 +1366,9 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
}
@@ -1445,6 +1562,7 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
+ iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -1464,14 +1582,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
/*
* The AP binding flow is handled as part of the start_ap flow
- * (in bss_info_changed).
+ * (in bss_info_changed), similarly for IBSS.
*/
ret = 0;
goto out_unlock;
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MONITOR:
break;
default:
@@ -1517,10 +1635,10 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
- if (vif->type == NL80211_IFTYPE_AP)
- goto out_unlock;
-
switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ goto out_unlock;
case NL80211_IFTYPE_MONITOR:
mvmvif->monitor_active = false;
iwl_mvm_update_quotas(mvm, NULL);
@@ -1550,14 +1668,72 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
}
-static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+ [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event)
+ void *data, int len)
+{
+ struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+ int err;
+ u32 noa_duration;
+
+ err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[IWL_MVM_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+ case IWL_MVM_TM_CMD_SET_NOA:
+ if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+ !vif->bss_conf.enable_beacon ||
+ !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+ return -EINVAL;
+
+ noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+ if (noa_duration >= vif->bss_conf.beacon_int)
+ return -EINVAL;
+
+ mvm->noa_duration = noa_duration;
+ mvm->noa_vif = vif;
+
+ return iwl_mvm_update_quotas(mvm, NULL);
+ case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+ /* must be associated client vif - ignore authorized */
+ if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+ !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+ return -EINVAL;
+
+ if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int err;
- iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
+ mutex_lock(&mvm->mutex);
+ err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+ mutex_unlock(&mvm->mutex);
+
+ return err;
}
+#endif
struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
@@ -1578,23 +1754,27 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+ .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
.set_key = iwl_mvm_mac_set_key,
.update_tkip_key = iwl_mvm_mac_update_tkip_key,
.remain_on_channel = iwl_mvm_roc,
.cancel_remain_on_channel = iwl_mvm_cancel_roc,
- .rssi_callback = iwl_mvm_mac_rssi_callback,
-
.add_chanctx = iwl_mvm_add_chanctx,
.remove_chanctx = iwl_mvm_remove_chanctx,
.change_chanctx = iwl_mvm_change_chanctx,
.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
- .start_ap = iwl_mvm_start_ap,
- .stop_ap = iwl_mvm_stop_ap,
+ .start_ap = iwl_mvm_start_ap_ibss,
+ .stop_ap = iwl_mvm_stop_ap_ibss,
+ .join_ibss = iwl_mvm_start_ap_ibss,
+ .leave_ibss = iwl_mvm_stop_ap_ibss,
.set_tim = iwl_mvm_set_tim,
+ CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b0389279cc1..fed21ef4162 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -73,7 +73,6 @@
#include "iwl-trans.h"
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
-#include "iwl-trans.h"
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
@@ -162,6 +161,7 @@ enum iwl_power_scheme {
struct iwl_mvm_power_ops {
int (*power_update_mode)(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+ int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -241,12 +241,18 @@ enum iwl_mvm_smps_type_request {
* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
*/
struct iwl_mvm_vif_bf_data {
bool bf_enabled;
bool ba_enabled;
s8 ave_beacon_signal;
s8 last_cqm_event;
+ s8 bt_coex_min_thold;
+ s8 bt_coex_max_thold;
+ s8 last_bt_coex_event;
};
/**
@@ -255,8 +261,8 @@ struct iwl_mvm_vif_bf_data {
* @color: to solve races upon MAC addition and removal
* @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
* @uploaded: indicates the MAC context has been added to the device
- * @ap_active: indicates that ap context is configured, and that the interface
- * should get quota etc.
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ * should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @queue_params: QoS params for this MAC
@@ -272,7 +278,7 @@ struct iwl_mvm_vif {
u8 ap_sta_id;
bool uploaded;
- bool ap_active;
+ bool ap_ibss_active;
bool monitor_active;
struct iwl_mvm_vif_bf_data bf_data;
@@ -306,6 +312,9 @@ struct iwl_mvm_vif {
int tx_key_idx;
+ bool seqno_valid;
+ u16 seqno;
+
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
@@ -333,6 +342,7 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
+ IWL_MVM_SCAN_SCHED,
};
/**
@@ -434,7 +444,7 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
- bool init_ucode_run;
+ bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
@@ -470,6 +480,9 @@ struct iwl_mvm {
enum iwl_scan_status scan_status;
struct iwl_scan_cmd *scan_cmd;
+ /* rx chain antennas set through debugfs for the scan command */
+ u8 scan_rx_ant;
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -479,7 +492,8 @@ struct iwl_mvm {
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool prevent_power_down_d3;
+ bool disable_power_off;
+ bool disable_power_off_d3;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -523,12 +537,23 @@ struct iwl_mvm {
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
+ struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
const struct iwl_mvm_power_ops *pm_ops;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ u32 noa_duration;
+ struct ieee80211_vif *noa_vif;
+#endif
+
+ /* Tx queues */
+ u8 aux_queue;
+ u8 first_agg_queue;
+ u8 last_agg_queue;
};
/* Extract MVM priv from op_mode and _hw */
@@ -570,6 +595,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum ieee80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
@@ -608,6 +636,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -682,6 +711,23 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+/* Scheduled scan */
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req);
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -720,6 +766,13 @@ static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
return mvm->pm_ops->power_disable(mvm, vif);
}
+static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
+{
+ if (mvm->pm_ops->power_update_device_mode)
+ return mvm->pm_ops->power_update_device_mode(mvm);
+ return 0;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -745,6 +798,15 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM_SLEEP
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#else
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -754,7 +816,20 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+
+enum iwl_bt_kill_msk {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_SCO_HID_A2DP,
+ BT_KILL_MSK_REDUCED_TXPOW,
+ BT_KILL_MSK_MAX,
+};
+extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
+extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index edb94ea3165..2beffd028b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -77,7 +77,7 @@ static const int nvm_to_read[] = {
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 6000
+#define IWL_MAX_NVM_SECTION_SIZE 7000
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@@ -259,6 +259,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
#define MAX_NVM_FILE_LEN 16384
/*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
* HOW TO CREATE THE NVM FILE FORMAT:
* ------------------------------
* 1. create hex file, format:
@@ -277,20 +279,23 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
*
* 4. save as "iNVM_xxx.bin" under /lib/firmware
*/
-static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
{
- int ret, section_id, section_size;
+ int ret, section_size;
+ u16 section_id;
const struct firmware *fw_entry;
const struct {
__le16 word1;
__le16 word2;
u8 data[];
} *file_sec;
- const u8 *eof;
+ const u8 *eof, *temp;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
+
/*
* Obtain NVM image via request_firmware. Since we already used
* request_firmware_nowait() for the firmware binary load and only
@@ -362,12 +367,18 @@ static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
break;
}
- ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
- section_size);
- if (ret < 0) {
- IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
+ IWL_ERR(mvm, "Invalid NVM section ID\n");
+ ret = -EINVAL;
break;
}
+ mvm->nvm_sections[section_id].data = temp;
+ mvm->nvm_sections[section_id].length = section_size;
/* advance to the next section */
file_sec = (void *)(file_sec->data + section_size);
@@ -377,6 +388,28 @@ out:
return ret;
}
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+ int i, ret;
+ u16 section_id;
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section_id = nvm_to_read[i];
+ ret = iwl_nvm_write_section(mvm, section_id,
+ sections[section_id].data,
+ sections[section_id].length);
+ if (ret < 0) {
+ IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
int iwl_nvm_init(struct iwl_mvm *mvm)
{
int ret, i, section;
@@ -385,36 +418,36 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
/* move to External NVM flow */
- ret = iwl_mvm_load_external_nvm(mvm);
+ ret = iwl_mvm_read_external_nvm(mvm);
if (ret)
return ret;
- }
-
- /* Read From FW NVM */
- IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
-
- /* TODO: find correct NVM max size for a section */
- nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
- GFP_KERNEL);
- if (!nvm_buffer)
- return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
- section = nvm_to_read[i];
- /* we override the constness for initial read */
- ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
- if (ret < 0)
- break;
- temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
- if (!temp) {
- ret = -ENOMEM;
- break;
+ } else {
+ /* Read From FW NVM */
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+ /* TODO: find correct NVM max size for a section */
+ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section = nvm_to_read[i];
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+ if (ret < 0)
+ break;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
}
- mvm->nvm_sections[section].data = temp;
- mvm->nvm_sections[section].length = ret;
+ kfree(nvm_buffer);
+ if (ret < 0)
+ return ret;
}
- kfree(nvm_buffer);
- if (ret < 0)
- return ret;
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2fcc8ef88a6..d86083c6f44 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -224,6 +224,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+ RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+ iwl_mvm_rx_scan_offload_complete_notif, false),
+ RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
+ false),
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -249,6 +253,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TIME_EVENT_NOTIFICATION),
CMD(BINDING_CONTEXT_CMD),
CMD(TIME_QUOTA_CMD),
+ CMD(NON_QOS_TX_COUNTER_CMD),
CMD(RADIO_VERSION_NOTIFICATION),
CMD(SCAN_REQUEST_CMD),
CMD(SCAN_ABORT_CMD),
@@ -260,10 +265,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+ CMD(ADD_STA_KEY),
CMD(ADD_STA),
CMD(REMOVE_STA),
CMD(LQ_CMD),
CMD(SCAN_OFFLOAD_CONFIG_CMD),
+ CMD(MATCH_FOUND_NOTIFICATION),
CMD(SCAN_OFFLOAD_REQUEST_CMD),
CMD(SCAN_OFFLOAD_ABORT_CMD),
CMD(SCAN_OFFLOAD_COMPLETE),
@@ -303,6 +310,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
+ CMD(BT_COEX_CI),
};
#undef CMD
@@ -344,6 +352,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
+ mvm->aux_queue = 15;
+ mvm->first_agg_queue = 16;
+ mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ if (mvm->cfg->base_params->num_of_queues == 16) {
+ mvm->aux_queue = 11;
+ mvm->first_agg_queue = 12;
+ }
+
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
@@ -401,24 +417,32 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
iwl_mvm_tt_initialize(mvm);
- mutex_lock(&mvm->mutex);
- err = iwl_run_init_mvm_ucode(mvm, true);
- mutex_unlock(&mvm->mutex);
- /* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
+ /*
+ * If the NVM exists in an external file,
+ * there is no need to unnecessarily power up the NIC at driver load
+ */
+ if (iwlwifi_mod_params.nvm_file) {
+ iwl_nvm_init(mvm);
+ } else {
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
+ goto out_free;
+
+ mutex_lock(&mvm->mutex);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ mutex_unlock(&mvm->mutex);
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
+ }
- /* Stop the hw after the ALIVE and NVM has been read */
- if (!iwlmvm_mod_params.init_dbg)
- iwl_trans_stop_hw(mvm->trans, false);
+ /* Stop the hw after the ALIVE and NVM has been read */
+ if (!iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_hw(mvm->trans, false);
+ }
scan_size = sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
@@ -435,7 +459,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
mvm->pm_ops = &pm_mac_ops;
else
mvm->pm_ops = &pm_legacy_ops;
@@ -449,7 +473,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- iwl_trans_stop_hw(trans, true);
+ if (!iwlwifi_mod_params.nvm_file)
+ iwl_trans_stop_hw(trans, true);
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -715,6 +740,9 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
case IWL_MVM_SCAN_OS:
ieee80211_scan_completed(mvm->hw, true);
break;
+ case IWL_MVM_SCAN_SCHED:
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
}
if (mvm->restart_fw > 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d58e393324e..550824aa84e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -300,11 +300,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
-
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
BIT(IEEE80211_AC_VI) |
BIT(IEEE80211_AC_BE) |
@@ -319,10 +314,31 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
cmd->heavy_tx_thld_percentage =
IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
cmd->heavy_rx_thld_percentage =
@@ -430,6 +446,32 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
}
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ struct iwl_device_power_cmd cmd = {
+ .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+ };
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ return 0;
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
+ mvm->disable_power_off)
+ cmd.flags &=
+ cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ IWL_DEBUG_POWER(mvm,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+ &cmd);
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
@@ -440,10 +482,11 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -609,6 +652,7 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
+ .power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 5c6ae16ec52..17e2bc827f9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -110,7 +110,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
data->n_interfaces[id]++;
break;
case NL80211_IFTYPE_AP:
- if (mvmvif->ap_active)
+ case NL80211_IFTYPE_ADHOC:
+ if (mvmvif->ap_ibss_active)
data->n_interfaces[id]++;
break;
case NL80211_IFTYPE_MONITOR:
@@ -119,16 +120,45 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
break;
case NL80211_IFTYPE_P2P_DEVICE:
break;
- case NL80211_IFTYPE_ADHOC:
- if (vif->bss_conf.ibss_joined)
- data->n_interfaces[id]++;
- break;
default:
WARN_ON_ONCE(1);
break;
}
}
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct iwl_mvm_vif *mvmvif;
+ int i, phy_id = -1, beacon_int = 0;
+
+ if (!mvm->noa_duration || !mvm->noa_vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ phy_id = mvmvif->phy_ctxt->id;
+ beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
+ u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+ u32 quota = le32_to_cpu(cmd->quotas[i].quota);
+
+ if (id != phy_id)
+ continue;
+
+ quota *= (beacon_int - mvm->noa_duration);
+ quota /= beacon_int;
+
+ cmd->quotas[i].quota = cpu_to_le32(quota);
+ }
+#endif
+}
+
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
@@ -196,6 +226,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
/* Give the remainder of the session to the first binding */
le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 4ffaa3fa153..a0b4cc8d9c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -82,13 +82,24 @@ static const u8 ant_toggle_lookup[] = {
[ANT_ABC] = ANT_ABC,
};
-#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##rp##M_INDEX, \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+ IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX }
+#define IWL_DECLARE_MCS_RATE(s) \
+ [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_INVM_INDEX, \
+ IWL_RATE_INVM_INDEX }
+
/*
* Parameter order:
* rate, ht rate, prev rate, next rate
@@ -102,16 +113,17 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
- /* FIXME:RS: ^^ should be INV (legacy) */
+ IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
+ IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
+ IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
+ IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
+ IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
+ IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
+ IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
+ IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
+ IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
+ IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
};
static inline u8 rs_extract_rate(u32 rate_n_flags)
@@ -124,26 +136,30 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
- /* HT rate format */
if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = rs_extract_rate(rate_n_flags);
-
- WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+ idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
+ /* skip 9M not supported in HT*/
if (idx >= IWL_RATE_9M_INDEX)
idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
- /* legacy rate format, search for match in table */
+ /* skip 9M not supported in VHT*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx++;
+ if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+ return idx;
} else {
+ /* legacy rate format, search for match in table */
+
+ u8 legacy_rate = rs_extract_rate(rate_n_flags);
for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
- if (iwl_rates[idx].plcp ==
- rs_extract_rate(rate_n_flags))
+ if (iwl_rates[idx].plcp == legacy_rate)
return idx;
}
@@ -155,6 +171,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
@@ -180,35 +197,52 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
*/
static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
- {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
+ {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
+ {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
- {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
+ {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
+ {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
+ {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
+ {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+ {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
- {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
- {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
+ {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+ {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
- {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
+ {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+ {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
+ {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
+ {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+ {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
};
/* mbps, mcs */
@@ -263,7 +297,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
if (lq_sta->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
}
@@ -275,17 +309,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
{
int ret = -EAGAIN;
- /*
- * Don't create TX aggregation sessions when in high
- * BT traffic, as they would just be disrupted by BT.
- */
- if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
- IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
- BT_MBOX_MSG(&mvm->last_bt_notif,
- 3, TRAFFIC_LOAD));
- return ret;
- }
-
IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -416,49 +439,54 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
*/
/* FIXME:RS:remove this function and put the flags statically in the table */
static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
+ struct iwl_scale_tbl_info *tbl, int index)
{
u32 rate_n_flags = 0;
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwl_rates[index].plcp;
+ rate_n_flags |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
rate_n_flags |= RATE_MCS_CCK_MSK;
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
+ return rate_n_flags;
+ }
+
+ if (is_ht(tbl->lq_type)) {
+ if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
+ index = IWL_LAST_HT_RATE;
}
- rate_n_flags = RATE_MCS_HT_MSK;
+ rate_n_flags |= RATE_MCS_HT_MSK;
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_siso;
- else if (is_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_mimo2;
+ if (is_ht_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
else
WARN_ON_ONCE(1);
+ } else if (is_vht(tbl->lq_type)) {
+ if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+ IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+ index = IWL_LAST_VHT_RATE;
+ }
+ rate_n_flags |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+ else
+ WARN_ON_ONCE(1);
+
} else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40)
- rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_HT_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(mvm, "GF was set with SGI:SISO\n");
- }
- }
- }
+ rate_n_flags |= tbl->bw;
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
return rate_n_flags;
}
@@ -473,7 +501,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
+ u8 nss;
memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
@@ -483,41 +511,62 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
return -EINVAL;
}
tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
+ tbl->bw = 0;
tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
tbl->lq_type = LQ_NONE;
tbl->max_search = IWL_MAX_SEARCH;
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ /* Legacy */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
+ !(rate_n_flags & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
+ tbl->lq_type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_G;
+ tbl->lq_type = LQ_LEGACY_G;
}
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
- tbl->is_ht40 = 1;
-
- mcs = rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
- if (num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
+
+ return 0;
+ }
+
+ /* HT or VHT */
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ RATE_HT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ tbl->lq_type = LQ_HT_SISO;
+ WARN_ON_ONCE(num_of_ant != 1);
+ } else if (nss == 2) {
+ tbl->lq_type = LQ_HT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ tbl->lq_type = LQ_VHT_SISO;
+ WARN_ON_ONCE(num_of_ant != 1);
+ } else if (nss == 2) {
+ tbl->lq_type = LQ_VHT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
} else {
- WARN_ON_ONCE(num_of_ant == 3);
+ WARN_ON_ONCE(1);
}
}
+
+ WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
+ WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_vht(tbl->lq_type));
+
return 0;
}
@@ -550,22 +599,6 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
}
/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool rs_use_green(struct ieee80211_sta *sta)
-{
- /*
- * There's a bug somewhere in this code that causes the
- * scaling to get stuck because GF+SGI can't be combined
- * in SISO rates. Until we find that bug, disable GF, it
- * has only limited benefit and we still interoperate with
- * GF APs since we can always receive GF transmissions.
- */
- return false;
-}
-
-/**
* rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -576,16 +609,15 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
- if (is_legacy(rate_type)) {
+ if (is_legacy(rate_type))
return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else {
- WARN_ON_ONCE(!is_mimo2(rate_type));
- return lq_sta->active_mimo2_rate;
- }
- }
+ else if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else if (is_mimo2(rate_type))
+ return lq_sta->active_mimo2_rate;
+
+ WARN_ON_ONCE(1);
+ return 0;
}
static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
@@ -652,7 +684,6 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
u16 rate_mask;
u16 high_low;
u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
struct iwl_mvm *mvm = lq_sta->drv;
/* check if we need to switch from HT to legacy rates.
@@ -662,15 +693,15 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
switch_to_legacy = 1;
scale_index = rs_ht_to_legacy[scale_index];
if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
+ tbl->lq_type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_G;
+ tbl->lq_type = LQ_LEGACY_G;
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- tbl->is_ht40 = 0;
+ tbl->bw = 0;
tbl->is_SGI = 0;
tbl->max_search = IWL_MAX_SEARCH;
}
@@ -701,7 +732,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
low = scale_index;
out:
- return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+ return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
}
/*
@@ -714,6 +745,18 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
(a->is_SGI == b->is_SGI);
}
+static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+{
+ if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_40;
+ else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_80;
+ else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_160;
+
+ return RATE_MCS_CHAN_WIDTH_20;
+}
+
/*
* mac80211 sends us Tx status
*/
@@ -783,16 +826,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
*/
if (info->band == IEEE80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
+ } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
+ mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
}
+
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
(tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
(tbl_type.ant_type != info->status.antenna) ||
(!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
(!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
(rs_index != mac_index)) {
IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n",
@@ -947,7 +997,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
+ !(is_vht(tbl->lq_type)))) {
tbl->expected_tpt = expected_tpt_legacy;
return;
}
@@ -958,18 +1009,40 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
return;
}
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
/* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
* status */
- if (is_siso(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else {
- WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ if (is_siso(tbl->lq_type)) {
+ switch (tbl->bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_siso_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_siso_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_siso_80MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else if (is_mimo2(tbl->lq_type)) {
+ switch (tbl->bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(1);
}
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
@@ -1084,9 +1157,47 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
return new_rate;
}
-static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+/* Move to the next action and wrap around to the first action in case
+ * we're at the last action. Assumes actions start at 0.
+ */
+static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
+ u8 last_action)
+{
+ BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
+ BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
+ BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
+
+ tbl->action = (tbl->action + 1) % (last_action + 1);
+}
+
+static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
+ tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+ else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ tbl->bw = RATE_MCS_CHAN_WIDTH_40;
+ else
+ tbl->bw = RATE_MCS_CHAN_WIDTH_20;
+}
+
+static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
+ struct ieee80211_sta *sta)
{
- return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (is_ht20(tbl) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ return true;
+ if (is_ht40(tbl) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ return true;
+ if (is_ht80(tbl) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+ return true;
+
+ return false;
}
/*
@@ -1099,7 +1210,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
{
u16 rate_mask;
s32 rate;
- s8 is_green = lq_sta->is_green;
if (!sta->ht_cap.ht_supported)
return -1;
@@ -1113,16 +1223,12 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
- tbl->lq_type = LQ_MIMO2;
+ tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
+ rs_set_bw_from_sta(tbl, sta);
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1134,10 +1240,10 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
rate, rate_mask);
return -1;
}
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+ tbl->current_rate);
return 0;
}
@@ -1150,7 +1256,6 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
- u8 is_green = lq_sta->is_green;
s32 rate;
if (!sta->ht_cap.ht_supported)
@@ -1158,19 +1263,12 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
- tbl->lq_type = LQ_SISO;
+ tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
+ rs_set_bw_from_sta(tbl, sta);
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1181,9 +1279,9 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
rate, rate_mask);
return -1;
}
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+ tbl->current_rate);
return 0;
}
@@ -1211,14 +1309,10 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
+ case IWL_LEGACY_SWITCH_ANTENNA:
IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
+ if (tx_chains_num <= 1)
break;
/* Don't change antenna if success has been great */
@@ -1273,9 +1367,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1285,9 +1377,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
@@ -1300,12 +1390,10 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
- u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1314,40 +1402,17 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
u8 update_search_tbl_counter = 0;
int ret;
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_MIMO2;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- valid_tx_ant =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ tbl->action = IWL_SISO_SWITCH_ANTENNA;
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
+ case IWL_SISO_SWITCH_ANTENNA:
IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
+ if (tx_chains_num <= 1)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
@@ -1380,23 +1445,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out;
break;
case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
+ if (!rs_sgi_allowed(tbl, sta))
break;
IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(mvm,
- "SGI was set in GF+SISO\n");
- }
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
if (tbl->is_SGI) {
@@ -1405,16 +1459,13 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
break;
}
search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
+ rate_n_flags_from_tbl(mvm, search_tbl, index);
update_search_tbl_counter = 1;
goto out;
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1424,9 +1475,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1440,63 +1489,20 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
- s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
u8 update_search_tbl_counter = 0;
int ret;
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
- tbl->action = IWL_MIMO2_SWITCH_SISO_A;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- tbl->action = IWL_MIMO2_SWITCH_SISO_A;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
-
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
@@ -1521,11 +1527,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
+ if (!rs_sgi_allowed(tbl, sta))
break;
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
@@ -1546,16 +1548,13 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
}
search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
+ rate_n_flags_from_tbl(mvm, search_tbl, index);
update_search_tbl_counter = 1;
goto out;
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1564,9 +1563,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
return 0;
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1660,15 +1657,16 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* setup rate table in uCode
*/
static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
+ int index)
{
u32 rate;
/* Update uCode's rate table. */
- rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
- rs_fill_link_cmd(mvm, lq_sta, rate);
+ rate = rate_n_flags_from_tbl(mvm, tbl, index);
+ rs_fill_link_cmd(mvm, sta, lq_sta, rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
}
@@ -1712,7 +1710,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
u16 rate_scale_index_msk = 0;
- u8 is_green = 0;
u8 active_tbl = 0;
u8 done_search = 0;
u16 high_low;
@@ -1754,11 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = rs_use_green(sta);
- is_green = lq_sta->is_green;
/* current tx rate */
index = lq_sta->last_txrate_idx;
@@ -1797,7 +1789,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
/* get "active" rate info */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
- rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
}
return;
}
@@ -1978,24 +1970,24 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
(current_tpt > (100 * tbl->expected_tpt[low]))))
scale_action = 0;
- if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
if (lq_sta->last_bt_traffic >
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
/*
* don't set scale_action, don't want to scale up if
* the rate scale doesn't otherwise think that is a
* good idea.
*/
} else if (lq_sta->last_bt_traffic <=
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
scale_action = -1;
}
}
lq_sta->last_bt_traffic =
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
@@ -2032,7 +2024,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq)
- rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
rs_stay_in_table(lq_sta, false);
@@ -2071,7 +2063,7 @@ lq_update:
IWL_DEBUG_RATE(mvm,
"Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
- rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+ rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
} else {
done_search = 1;
@@ -2113,7 +2105,7 @@ lq_update:
}
out:
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
lq_sta->last_txrate_idx = index;
}
@@ -2140,7 +2132,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
int rate_idx;
int i;
u32 rate;
- u8 use_green = rs_use_green(sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
@@ -2172,10 +2163,10 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
rs_toggle_antenna(valid_tx_ant, &rate, tbl);
- rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+ rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
- rs_fill_link_cmd(NULL, lq_sta, rate);
+ rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
}
@@ -2190,7 +2181,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
- int rate_idx;
IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
@@ -2215,36 +2205,9 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
if (rate_control_send_low(sta, mvm_sta, txrc))
return;
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO3_6M_PLCP);
- if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
- info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
- info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
+ iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+ info->band, &info->control.rates[0]);
+
info->control.rates[0].count = 1;
}
@@ -2261,6 +2224,24 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
return &sta_priv->lq_sta;
}
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+ return IWL_RATE_MCS_7_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+ return IWL_RATE_MCS_8_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+ return IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ return -1;
+}
+
/*
* Called after adding a new station to initialize rate scaling
*/
@@ -2270,6 +2251,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int i, j;
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct iwl_mvm_sta *sta_priv;
struct iwl_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2298,7 +2280,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = rs_use_green(sta);
lq_sta->band = sband->band;
/*
* active legacy rates as per supported rates bitmap
@@ -2308,25 +2289,54 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+ /* TODO: should probably account for rx_highest for both HT/VHT */
+ if (!vht_cap || !vht_cap->vht_supported) {
+ /* active_siso_rate mask includes 9 MBits (bit 5),
+ * and CCK (bits 0-3), supp_rates[] does not;
+ * shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->is_vht = false;
+ } else {
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ lq_sta->active_siso_rate |= BIT(i);
+ }
+ }
+
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+ lq_sta->active_mimo2_rate |= BIT(i);
+ }
+ }
+
+ /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+ lq_sta->is_vht = true;
+ }
IWL_DEBUG_RATE(mvm,
- "SISO-RATE=%X MIMO2-RATE=%X\n",
+ "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
lq_sta->active_siso_rate,
- lq_sta->active_mimo2_rate);
+ lq_sta->active_mimo2_rate,
+ lq_sta->is_vht);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@@ -2358,6 +2368,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta, u32 new_rate)
{
struct iwl_scale_tbl_info tbl_type;
@@ -2429,7 +2440,6 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
&rate_idx);
-
/* Indicate to uCode which entries might be MIMO.
* If initial rate was MIMO, this will finally end up
* as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2455,7 +2465,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
}
/* Don't allow HT rates after next pass.
- * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ * rs_get_lower_rate() will change type to LQ_LEGACY_A
+ * or LQ_LEGACY_G.
+ */
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
@@ -2474,12 +2486,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
lq_cmd->agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
- /*
- * overwrite if needed, pass aggregation time limit
- * to uCode in uSec - This is racy - but heh, at least it helps...
- */
- if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
- lq_cmd->agg_time_limit = cpu_to_le16(1200);
+ if (sta)
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2586,16 +2595,18 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
+ (is_legacy(tbl->lq_type)) ? "legacy" :
+ is_vht(tbl->lq_type) ? "VHT" : "HT");
+ if (is_ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
+ (is_ht20(tbl)) ? "20MHz" :
+ (is_ht40(tbl)) ? "40MHz" :
+ (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+ desc += sprintf(buff+desc, " %s %s\n",
(tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
+ (lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
@@ -2653,7 +2664,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
int desc = 0;
int i, j;
ssize_t ret;
-
+ struct iwl_scale_tbl_info *tbl;
struct iwl_lq_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
@@ -2661,21 +2672,23 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
return -ENOMEM;
for (i = 0; i < LQ_SIZE; i++) {
+ tbl = &(lq_sta->lq_info[i]);
desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+ "%s type=%d SGI=%d BW=%s DUP=0\n"
"rate=0x%X\n",
lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
+ tbl->lq_type,
+ tbl->is_SGI,
+ is_ht20(tbl) ? "20Mhz" :
+ is_ht40(tbl) ? "40Mhz" :
+ is_ht80(tbl) ? "80Mhz" : "ERR",
+ tbl->current_rate);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
+ tbl->win[j].counter,
+ tbl->win[j].success_counter,
+ tbl->win[j].success_ratio);
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 335cf168290..5d5344f7070 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -35,9 +35,11 @@
#include "iwl-trans.h"
struct iwl_rs_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_vht_siso;
+ u8 plcp_vht_mimo2;
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
};
@@ -83,35 +85,52 @@ enum {
#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-/* uCode API values for OFDM high-throughput (HT) bit rates */
+/* uCode API values for HT/VHT bit rates */
enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_MIMO3_6M_PLCP = 0x10,
- IWL_RATE_MIMO3_12M_PLCP = 0x11,
- IWL_RATE_MIMO3_18M_PLCP = 0x12,
- IWL_RATE_MIMO3_24M_PLCP = 0x13,
- IWL_RATE_MIMO3_36M_PLCP = 0x14,
- IWL_RATE_MIMO3_48M_PLCP = 0x15,
- IWL_RATE_MIMO3_54M_PLCP = 0x16,
- IWL_RATE_MIMO3_60M_PLCP = 0x17,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+ IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+ IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+ IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+ IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+ IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+ IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+ IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+ IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+ IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+ IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+ IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+ IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+ IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+ IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+ IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+ IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+ IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+ IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+ IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+ IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
};
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
@@ -139,25 +158,33 @@ enum {
#define IWL_RATE_DECREASE_TH 1920 /* 15% */
/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2 3
+enum {
+ IWL_LEGACY_SWITCH_ANTENNA,
+ IWL_LEGACY_SWITCH_SISO,
+ IWL_LEGACY_SWITCH_MIMO2,
+ IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
+ IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
+};
/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2 2
-#define IWL_SISO_SWITCH_GI 3
+enum {
+ IWL_SISO_SWITCH_ANTENNA,
+ IWL_SISO_SWITCH_MIMO2,
+ IWL_SISO_SWITCH_GI,
+ IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
+ IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
+};
/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_GI 4
+enum {
+ IWL_MIMO2_SWITCH_SISO_A,
+ IWL_MIMO2_SWITCH_SISO_B,
+ IWL_MIMO2_SWITCH_GI,
+ IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
+ IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
+};
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
#define IWL_ACTION_LIMIT 3 /* # possible actions */
@@ -188,20 +215,31 @@ enum {
enum iwl_table_type {
LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
+ LQ_LEGACY_G, /* legacy types */
+ LQ_LEGACY_A,
+ LQ_HT_SISO, /* HT types */
+ LQ_HT_MIMO2,
+ LQ_VHT_SISO, /* VHT types */
+ LQ_VHT_MIMO2,
LQ_MAX,
};
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) is_mimo2(tbl)
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
+#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
+#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
+#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
+#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
+#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
+#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
+#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
+#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
+#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
+
+#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@@ -232,7 +270,7 @@ struct iwl_scale_tbl_info {
enum iwl_table_type lq_type;
u8 ant_type;
u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
+ u32 bw; /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
@@ -262,7 +300,7 @@ struct iwl_lq_sta {
u64 flush_timer; /* time staying in mode before new search */
u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
+ bool is_vht;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -314,9 +352,8 @@ static inline u8 num_of_ant(u8 mask)
}
/* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum ieee80211_band band);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +365,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
* ieee80211_register_hw
*
*/
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +373,7 @@ extern int iwl_mvm_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 2a8cb5a6053..3a1f3982109 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -300,10 +300,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+ /*
+ * Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
!(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
- return 0;
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* This will be used in several places later */
@@ -422,6 +426,27 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->bf_data.ave_beacon_signal = sig;
+ /* BT Coex */
+ if (mvmvif->bf_data.bt_coex_min_thold !=
+ mvmvif->bf_data.bt_coex_max_thold) {
+ last_event = mvmvif->bf_data.last_bt_coex_event;
+ if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+ (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+ } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+ (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+ }
+ }
+
if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 621fb71f282..dff7592e1ff 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -74,8 +74,12 @@
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
u16 rx_chain;
- u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+ u8 rx_ant;
+ if (mvm->scan_rx_ant != ANT_NONE)
+ rx_ant = mvm->scan_rx_ant;
+ else
+ rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -93,10 +97,10 @@ static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
{
- if (vif->bss_conf.assoc)
- return cpu_to_le32(vif->bss_conf.beacon_int);
- else
+ if (!vif->bss_conf.assoc)
return 0;
+
+ return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
}
static inline __le32
@@ -133,11 +137,12 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
* request.
*/
static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req)
+ struct cfg80211_scan_request *req,
+ int first)
{
int fw_idx, req_idx;
- for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+ for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
req_idx--, fw_idx++) {
cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
@@ -153,9 +158,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). The first SSID is already
- * included in the probe template, so we need to set only req->n_ssids - 1 bits
- * in addition to the first bit.
+ * one for each SSID, and set the active bit (first). If the first SSID is
+ * already included in the probe template, so we need to set only
+ * req->n_ssids - 1 bits in addition to the first bit.
*/
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
{
@@ -170,7 +175,8 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
}
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req)
+ struct cfg80211_scan_request *req,
+ bool basic_ssid)
{
u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
@@ -178,10 +184,14 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
+ int type = BIT(req->n_ssids) - 1;
+
+ if (!basic_ssid)
+ type |= BIT(req->n_ssids);
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
- chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
+ chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
@@ -268,6 +278,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
+ bool basic_ssid = !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
lockdep_assert_held(&mvm->mutex);
BUG_ON(mvm->scan_cmd == NULL);
@@ -302,14 +314,16 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
if (req->n_ssids > 0) {
cmd->passive2active = cpu_to_le16(1);
cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ if (basic_ssid) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ }
} else {
cmd->passive2active = 0;
cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
}
- iwl_mvm_scan_fill_ssids(cmd, req);
+ iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
@@ -326,7 +340,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
req->ie, req->ie_len,
mvm->fw->ucode_capa.max_probe_length));
- iwl_mvm_scan_fill_channels(cmd, req);
+ iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
@@ -377,6 +391,21 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sched_scan_results *notif = (void *)pkt->data;
+
+ if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ }
+
+ return 0;
+}
+
static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@@ -447,3 +476,406 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
}
+
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted");
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_sched_scan_stopped(mvm->hw);
+
+ return 0;
+}
+
+static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sched_scan_ies *ies,
+ enum ieee80211_band band,
+ struct iwl_tx_cmd *cmd,
+ u8 *data)
+{
+ u16 cmd_len;
+
+ cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ cmd->sta_id = mvm->aux_sta.sta_id;
+
+ cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
+
+ cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
+ vif->addr,
+ 1, NULL, 0,
+ ies->ie[band], ies->len[band],
+ SCAN_OFFLOAD_PROBE_REQ_SIZE);
+ cmd->len = cpu_to_le16(cmd_len);
+}
+
+static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_offload_cmd *scan)
+{
+ scan->channel_count =
+ mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+ scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+ scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
+ scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+ scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
+ scan->rep_count = cpu_to_le32(1);
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ int i;
+
+ for (i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ break;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list->ssid, ssid, ssid_len))
+ return i;
+ }
+ return -1;
+}
+
+static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_offload_cmd *scan,
+ u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+
+ /*
+ * copy SSIDs from match list.
+ * iwl_config_sched_scan_profiles() uses the order of these ssids to
+ * config match list.
+ */
+ for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ scan->direct_scan[i].id = WLAN_EID_SSID;
+ scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
+ memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
+ scan->direct_scan[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ *ssid_bitmap = 0;
+ for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
+ index = iwl_ssid_exist(req->ssids[j].ssid,
+ req->ssids[j].ssid_len,
+ scan->direct_scan);
+ if (index < 0) {
+ if (!req->ssids[j].ssid_len)
+ continue;
+ scan->direct_scan[i].id = WLAN_EID_SSID;
+ scan->direct_scan[i].len = req->ssids[j].ssid_len;
+ memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
+ scan->direct_scan[i].len);
+ *ssid_bitmap |= BIT(i + 1);
+ i++;
+ } else {
+ *ssid_bitmap |= BIT(index + 1);
+ }
+ }
+}
+
+static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_channel_cfg *channels,
+ enum ieee80211_band band,
+ int *head, int *tail,
+ u32 ssid_bitmap)
+{
+ struct ieee80211_supported_band *s_band;
+ int n_probes = req->n_ssids;
+ int n_channels = req->n_channels;
+ u8 active_dwell, passive_dwell;
+ int i, j, index = 0;
+ bool partial;
+
+ /*
+ * We have to configure all supported channels, even if we don't want to
+ * scan on them, but we have to send channels in the order that we want
+ * to scan. So add requested channels to head of the list and others to
+ * the end.
+ */
+ active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
+ passive_dwell = iwl_mvm_get_passive_dwell(band);
+ s_band = &mvm->nvm_data->bands[band];
+
+ for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
+ partial = false;
+ for (j = 0; j < n_channels; j++)
+ if (s_band->channels[i].center_freq ==
+ req->channels[j]->center_freq) {
+ index = *head;
+ (*head)++;
+ /*
+ * Channels that came with the request will be
+ * in partial scan .
+ */
+ partial = true;
+ break;
+ }
+ if (!partial) {
+ index = *tail;
+ (*tail)--;
+ }
+ channels->channel_number[index] =
+ cpu_to_le16(ieee80211_frequency_to_channel(
+ s_band->channels[i].center_freq));
+ channels->dwell_time[index][0] = active_dwell;
+ channels->dwell_time[index][1] = passive_dwell;
+
+ channels->iter_count[index] = cpu_to_le16(1);
+ channels->iter_interval[index] = 0;
+
+ if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
+
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
+ if (partial)
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+
+ if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
+
+ /* scan for all SSIDs from req->ssids */
+ channels->type[index] |= cpu_to_le32(ssid_bitmap);
+ }
+}
+
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ int supported_bands = 0;
+ int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
+ int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ int head = 0;
+ int tail = band_2ghz + band_5ghz;
+ u32 ssid_bitmap;
+ int cmd_len;
+ int ret;
+
+ struct iwl_scan_offload_cfg *scan_cfg;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_CONFIG_CMD,
+ .flags = CMD_SYNC,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (band_2ghz)
+ supported_bands++;
+ if (band_5ghz)
+ supported_bands++;
+
+ cmd_len = sizeof(struct iwl_scan_offload_cfg) +
+ supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+ scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
+ if (!scan_cfg)
+ return -ENOMEM;
+
+ iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+ scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
+
+ iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
+ /* build tx frames for supported bands */
+ if (band_2ghz) {
+ iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+ IEEE80211_BAND_2GHZ,
+ &scan_cfg->scan_cmd.tx_cmd[0],
+ scan_cfg->data);
+ iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+ IEEE80211_BAND_2GHZ, &head, &tail,
+ ssid_bitmap);
+ }
+ if (band_5ghz) {
+ iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+ IEEE80211_BAND_5GHZ,
+ &scan_cfg->scan_cmd.tx_cmd[1],
+ scan_cfg->data +
+ SCAN_OFFLOAD_PROBE_REQ_SIZE);
+ iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+ IEEE80211_BAND_5GHZ, &head, &tail,
+ ssid_bitmap);
+ }
+
+ cmd.data[0] = scan_cfg;
+ cmd.len[0] = cmd_len;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(scan_cfg);
+ return ret;
+}
+
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_blacklist *blacklist;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .flags = CMD_SYNC,
+ .len[1] = sizeof(*profile_cfg),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int blacklist_len;
+ int i;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+ return -EIO;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+ blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+ else
+ blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+ blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
+ if (!blacklist)
+ return -ENOMEM;
+
+ profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+ if (!profile_cfg) {
+ ret = -ENOMEM;
+ goto free_blacklist;
+ }
+
+ cmd.data[0] = blacklist;
+ cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+ cmd.data[1] = profile_cfg;
+
+ /* No blacklist configuration */
+
+ profile_cfg->num_profiles = req->n_match_sets;
+ profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg->profiles[i];
+ profile->ssid_index = i;
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = 0xff;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(profile_cfg);
+free_blacklist:
+ kfree(blacklist);
+
+ return ret;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_req scan_req = {
+ .watchdog = IWL_SCHED_SCAN_WATCHDOG,
+
+ .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
+ .schedule_line[0].delay = req->interval / 1000,
+ .schedule_line[0].full_scan_mul = 1,
+
+ .schedule_line[1].iterations = 0xff,
+ .schedule_line[1].delay = req->interval / 1000,
+ .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
+ };
+
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, filter len %d\n",
+ req->n_match_sets);
+ scan_req.flags |=
+ cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
+ } else {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending Scheduled scan without filtering\n");
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+ sizeof(scan_req), &scan_req);
+}
+
+static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_ABORT_CMD,
+ .flags = CMD_SYNC,
+ };
+ u32 status;
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * scheduled scan currently */
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
+ return -EIO;
+
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+ if (ret)
+ return ret;
+
+ if (status != CAN_ABORT_STATUS) {
+ /*
+ * The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before the
+ * microcode has notified us that a scan is completed.
+ */
+ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
+ IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
+ return;
+ }
+
+ ret = iwl_mvm_send_sched_scan_abort(mvm);
+ if (ret)
+ IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
+ else
+ IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 44add291531..329952363a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,6 +66,115 @@
#include "sta.h"
#include "rs.h"
+static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+ struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
+{
+ memset(cmd_v5, 0, sizeof(*cmd_v5));
+
+ cmd_v5->add_modify = cmd_v6->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v6->sta_id;
+ cmd_v5->modify_mask = cmd_v6->modify_mask;
+ cmd_v5->station_flags = cmd_v6->station_flags;
+ cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v6->assoc_id;
+ cmd_v5->beamform_flags = cmd_v6->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+}
+
+static void
+iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
+ struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
+ u32 mac_id_n_color)
+{
+ memset(sta_cmd, 0, sizeof(*sta_cmd));
+
+ sta_cmd->sta_id = key_cmd->sta_id;
+ sta_cmd->add_modify = STA_MODE_MODIFY;
+ sta_cmd->modify_mask = STA_MODIFY_KEY;
+ sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
+
+ sta_cmd->key.key_offset = key_cmd->key_offset;
+ sta_cmd->key.key_flags = key_cmd->key_flags;
+ memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
+ sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
+ memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
+ sizeof(sta_cmd->key.tkip_rx_ttak));
+}
+
+static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ int *status)
+{
+ struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
+ cmd, status);
+
+ iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
+ &cmd_v5, status);
+}
+
+static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
+ struct iwl_mvm_add_sta_cmd_v6 *cmd)
+{
+ struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
+ &cmd_v5);
+}
+
+static int
+iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_add_sta_key_cmd *cmd,
+ u32 mac_id_n_color,
+ int *status)
+{
+ struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
+ sizeof(*cmd), cmd, status);
+
+ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
+ &sta_cmd, status);
+}
+
+static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
+ u32 flags,
+ struct iwl_mvm_add_sta_key_cmd *cmd,
+ u32 mac_id_n_color)
+{
+ struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
+ &sta_cmd);
+}
+
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
{
int sta_id;
@@ -87,7 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -175,8 +284,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
- &add_sta_cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
if (ret)
return ret;
@@ -229,8 +337,12 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
- /* for HW restart - need to reset the seq_number etc... */
- memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+ /* for HW restart - reset everything but the sequence number */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_sta->tid_data[i].seq_number;
+ memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+ mvm_sta->tid_data[i].seq_number = seq;
+ }
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
@@ -256,7 +368,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -269,8 +381,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -469,13 +580,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd cmd;
+ struct iwl_mvm_add_sta_cmd_v6 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -485,8 +596,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -534,10 +644,14 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *bsta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ static const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ baddr = vif->bss_conf.bssid;
+
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
@@ -614,7 +728,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -638,8 +752,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -674,7 +787,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -696,8 +809,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -743,13 +855,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
- for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
- txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+ for (txq_id = mvm->first_agg_queue;
+ txq_id <= mvm->last_agg_queue; txq_id++)
if (mvm->queue_to_mac80211[txq_id] ==
IWL_INVALID_MAC80211_QUEUE)
break;
- if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+ if (txq_id > mvm->last_agg_queue) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
return -EIO;
}
@@ -987,10 +1099,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u32 cmd_flags)
{
__le16 key_flags;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
int ret, status;
u16 keyidx;
int i;
+ u32 mac_id_n_color = mvm_sta->mac_id_n_color;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1000,14 +1113,14 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
- cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ cmd.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
- cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
- memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
- memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
default:
WARN_ON(1);
@@ -1017,20 +1130,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.key.key_offset = keyconf->hw_key_idx;
- cmd.key.key_flags = key_flags;
- cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.key_offset = keyconf->hw_key_idx;
+ cmd.key_flags = key_flags;
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
if (cmd_flags == CMD_SYNC)
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+ mac_id_n_color,
+ &status);
else
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
+ mac_id_n_color);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1197,7 +1308,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_sta *mvm_sta;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
int ret, status;
u8 sta_id;
@@ -1252,17 +1363,14 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.key.key_flags = key_flags;
- cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.key_flags = key_flags;
+ cmd.key_offset = keyconf->hw_key_idx;
cmd.sta_id = sta_id;
- cmd.modify_mask = STA_MODIFY_KEY;
- cmd.add_modify = STA_MODE_MODIFY;
-
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+ mvm_sta->mac_id_n_color,
+ &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1309,7 +1417,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1317,7 +1425,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1331,7 +1439,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1346,7 +1454,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
int ret;
/* TODO: somehow the fw doesn't seem to take PS_POLL into account */
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 94b265eb32b..4dfc359a4bd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -293,10 +293,6 @@ struct iwl_mvm_sta {
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
-#ifdef CONFIG_PM_SLEEP
- u16 last_seq_ctl;
-#endif
-
/* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection;
bool tt_tx_protection;
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
new file mode 100644
index 00000000000..eb74391d91c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+ IWL_MVM_TM_ATTR_UNSPEC,
+ IWL_MVM_TM_ATTR_CMD,
+ IWL_MVM_TM_ATTR_NOA_DURATION,
+ IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+ /* keep last */
+ NUM_IWL_MVM_TM_ATTRS,
+ IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+ IWL_MVM_TM_CMD_SET_NOA,
+ IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 76a3c177e10..33cf56fdfc4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -387,7 +387,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration)
+ u32 duration, u32 min_duration,
+ u32 max_delay)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -426,7 +427,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
time_cmd.max_frags = TE_V2_FRAG_NONE;
- time_cmd.max_delay = cpu_to_le32(500);
+ time_cmd.max_delay = cpu_to_le32(max_delay);
/* TODO: why do we need to interval = bi if it is not periodic? */
time_cmd.interval = cpu_to_le32(1);
time_cmd.duration = cpu_to_le32(duration);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index f86c51065ed..d9c8d6cfa2d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -123,6 +123,7 @@
* @duration: the duration of the session in TU.
* @min_duration: will start a new session if the current session will end
* in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
*
* This function can be used to start a session protection which means that the
* fw will stay on the channel for %duration_ms milliseconds. This function
@@ -133,7 +134,8 @@
*/
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration);
+ u32 duration, u32 min_duration,
+ u32 max_delay);
/**
* iwl_mvm_stop_session_protection - cancel the session protection.
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index e05440d9031..43d97c33a75 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -417,7 +417,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
+ if (txq_id < mvm->first_agg_queue)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -511,16 +511,10 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
}
#endif /* CONFIG_IWLWIFI_DEBUG */
-/**
- * translate ucode response to mac80211 tx status control values
- */
-static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
- struct ieee80211_tx_info *info)
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct ieee80211_tx_rate *r)
{
- struct ieee80211_tx_rate *r = &info->status.rates[0];
-
- info->status.antenna =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
@@ -549,10 +543,23 @@ static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
r->flags |= IEEE80211_TX_RC_VHT_MCS;
} else {
r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- info->band);
+ band);
}
}
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+ info->status.antenna =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -602,11 +609,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
}
info->status.rates[0].count = tx_resp->failure_frame + 1;
- iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+ info);
/* Single frame failure in an AMPDU queue => send BAR */
- if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE &&
+ if (txq_id >= mvm->first_agg_queue &&
!(info->flags & IEEE80211_TX_STAT_ACK))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -619,7 +626,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status_ni(mvm->hw, skb);
}
- if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which
@@ -668,10 +675,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
-
-#ifdef CONFIG_PM_SLEEP
- mvmsta->last_seq_ctl = seq_ctl;
-#endif
} else {
sta = NULL;
mvmsta = NULL;
@@ -681,7 +684,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
+ if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
if (mvmsta) {
/*
@@ -777,7 +780,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct ieee80211_sta *sta;
- if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE))
+ if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -904,8 +907,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
- iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
- info);
+ iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
+ info);
}
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a9c35749143..ed69e9b78e8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -466,7 +466,7 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
len = img->sec[IWL_UCODE_SECTION_DATA].len;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 26108a1a29f..941c0c88f98 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -268,7 +268,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
-/* 7000 Series */
+/* 7260 Series */
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
@@ -350,6 +350,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -391,7 +394,6 @@ out_free_drv:
iwl_drv_stop(trans_pcie->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -402,8 +404,6 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_drv_stop(trans_pcie->drv);
iwl_trans_pcie_free(trans);
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index c3f904d422b..5d9337bec67 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -220,6 +220,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+
set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
out:
@@ -443,22 +446,138 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
+static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+{
+ int shift_param;
+ u32 address;
+ int ret = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ } else {
+ shift_param = 16;
+ address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ }
+
+ /* set CPU to started */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_LOADING_STARTED << shift_param,
+ 1);
+
+ /* set last complete descriptor number */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
+ << shift_param,
+ 1);
+
+ /* set last loaded block */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
+ << shift_param,
+ 1);
+
+ /* image loading complete */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_LOADING_COMPLETED
+ << shift_param,
+ 1);
+
+ /* set FH_TCSR_0_REG */
+ iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
+
+ /* verify image verification started */
+ ret = iwl_poll_bit(trans, address,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+ CSR_SECURE_TIME_OUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "secure boot process didn't start\n");
+ return ret;
+ }
+
+ /* wait for image verification to complete */
+ ret = iwl_poll_bit(trans, address,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+ CSR_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
int i, ret = 0;
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ IWL_DEBUG_FW(trans,
+ "working with %s image\n",
+ image->is_secure ? "Secured" : "Non Secured");
+ IWL_DEBUG_FW(trans,
+ "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ if (image->is_secure) {
+ /* set secure boot inspector addresses */
+ iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
+ iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
+
+ /* release CPU1 reset if secure inspector image burned in OTP */
+ iwl_write32(trans, CSR_RESET, 0);
+ }
+
+ /* load to FW the binary sections of CPU1 */
+ IWL_DEBUG_INFO(trans, "Loading CPU1\n");
+ for (i = 0;
+ i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+ i++) {
if (!image->sec[i].data)
break;
-
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
+ /* configure the ucode to start secure process on CPU1 */
+ if (image->is_secure) {
+ /* config CPU1 to start secure protocol */
+ ret = iwl_pcie_secure_set(trans, 1);
+ if (ret)
+ return ret;
+ } else {
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(trans, CSR_RESET, 0);
+ }
+
+ if (image->is_dual_cpus) {
+ /* load to FW the binary sections of CPU2 */
+ IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
+ for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+ i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].data)
+ break;
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (image->is_secure) {
+ /* set CPU2 for secure protocol */
+ ret = iwl_pcie_secure_set(trans, 2);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 1424335163b..059c5acad3a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1465,7 +1465,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_unlock_bh(&txq->lock);
}
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define COMMAND_POKE_TIMEOUT (HZ / 10)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1493,16 +1494,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
+ int timeout = HOST_COMPLETE_TIMEOUT;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
- if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))) {
- IWL_ERR(trans, "Command %s: a command is already active!\n",
- get_cmd_string(trans_pcie, cmd->id));
+ if (WARN(test_and_set_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ "Command %s: a command is already active!\n",
+ get_cmd_string(trans_pcie, cmd->id)))
return -EIO;
- }
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
@@ -1517,10 +1518,29 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
return ret;
}
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
- HOST_COMPLETE_TIMEOUT);
+ while (timeout > 0) {
+ unsigned long flags;
+
+ timeout -= COMMAND_POKE_TIMEOUT;
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ COMMAND_POKE_TIMEOUT);
+ if (ret)
+ break;
+ /* poke the device - it may have lost the command */
+ if (iwl_trans_grab_nic_access(trans, true, &flags)) {
+ iwl_trans_release_nic_access(trans, &flags);
+ IWL_DEBUG_INFO(trans,
+ "Tried to wake NIC for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ } else {
+ IWL_ERR(trans, "Failed to poke NIC for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ break;
+ }
+ }
+
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
struct iwl_txq *txq =
@@ -1541,6 +1561,9 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
"Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
+
+ iwl_op_mode_nic_error(trans->op_mode);
+
goto cancel;
}
}
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 668dd27616a..cc6a0a586f0 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
char *p2;
struct debug_data *d = f->private_data;
- pdata = kmalloc(cnt, GFP_KERNEL);
+ if (cnt == 0)
+ return 0;
+
+ pdata = kmalloc(cnt + 1, GFP_KERNEL);
if (pdata == NULL)
return 0;
@@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
kfree(pdata);
return 0;
}
+ pdata[cnt] = '\0';
p0 = pdata;
for (i = 0; i < num_of_items; i++) {
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index c0f9e7e862f..51b92b5df11 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -53,6 +53,11 @@ static void main_firmware_cb(const struct firmware *firmware, void *context)
/* Firmware found! */
lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
+ if (priv->helper_fw) {
+ release_firmware (priv->helper_fw);
+ priv->helper_fw = NULL;
+ }
+ release_firmware (firmware);
}
static void helper_firmware_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index c94dd680267..f499efc6abc 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -754,14 +754,14 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
if (ret == 0 && (card->model != MODEL_8305))
ret = if_cs_prog_real(card, mainfw);
if (ret)
- goto out;
+ return;
/* Now actually get the IRQ */
ret = request_irq(card->p_dev->irq, if_cs_interrupt,
IRQF_SHARED, DRV_NAME, card);
if (ret) {
pr_err("error in request_irq\n");
- goto out;
+ return;
}
/*
@@ -777,10 +777,6 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
pr_err("could not activate card\n");
free_irq(card->p_dev->irq, card);
}
-
-out:
- release_firmware(helper);
- release_firmware(mainfw);
}
@@ -906,6 +902,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
if (card->model == MODEL_UNKNOWN) {
pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n",
p_dev->manf_id, p_dev->card_id);
+ ret = -ENODEV;
goto out2;
}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 45578335e42..991238afd1b 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -708,20 +708,16 @@ static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
ret = if_sdio_prog_helper(card, helper);
if (ret)
- goto out;
+ return;
lbs_deb_sdio("Helper firmware loaded\n");
ret = if_sdio_prog_real(card, mainfw);
if (ret)
- goto out;
+ return;
lbs_deb_sdio("Firmware loaded\n");
if_sdio_finish_power_on(card);
-
-out:
- release_firmware(helper);
- release_firmware(mainfw);
}
static int if_sdio_prog_firmware(struct if_sdio_card *card)
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 4bb6574f407..83669151bb8 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1094,11 +1094,7 @@ static int if_spi_init_card(struct if_spi_card *card)
goto out;
out:
- release_firmware(helper);
- release_firmware(mainfw);
-
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
-
return err;
}
@@ -1128,7 +1124,7 @@ static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 27980778d99..dff08a2896a 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -844,7 +844,7 @@ static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
cardp->fw = fw;
if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
ret = -EINVAL;
- goto release_fw;
+ goto done;
}
/* Cancel any pending usb business */
@@ -861,7 +861,7 @@ restart:
if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
ret = -EIO;
- goto release_fw;
+ goto done;
}
cardp->bootcmdresp = 0;
@@ -883,14 +883,14 @@ restart:
usb_kill_urb(cardp->tx_urb);
if (if_usb_submit_rx_urb(cardp) < 0)
ret = -EIO;
- goto release_fw;
+ goto done;
} else if (cardp->bootcmdresp <= 0) {
if (--reset_count >= 0) {
if_usb_reset_device(cardp);
goto restart;
}
ret = -EIO;
- goto release_fw;
+ goto done;
}
i = 0;
@@ -921,14 +921,14 @@ restart:
pr_info("FW download failure, time = %d ms\n", i * 100);
ret = -EIO;
- goto release_fw;
+ goto done;
}
cardp->priv->fw_ready = 1;
if_usb_submit_rx_urb(cardp);
if (lbs_start_card(priv))
- goto release_fw;
+ goto done;
if_usb_setup_firmware(priv);
@@ -939,11 +939,8 @@ restart:
if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
priv->ehs_remove_supported = false;
- release_fw:
- release_firmware(cardp->fw);
- cardp->fw = NULL;
-
done:
+ cardp->fw = NULL;
lbs_deb_leave(LBS_DEB_USB);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2cd3f54e1ef..9df7bc91a26 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -167,6 +167,7 @@ struct hwsim_vif_priv {
u32 magic;
u8 bssid[ETH_ALEN];
bool assoc;
+ bool bcn_en;
u16 aid;
};
@@ -1170,6 +1171,16 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
*total_flags = data->rx_filter;
}
+static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ unsigned int *count = data;
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+ if (vp->bcn_en)
+ (*count)++;
+}
+
static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -1180,7 +1191,8 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
hwsim_check_magic(vif);
- wiphy_debug(hw->wiphy, "%s(changed=0x%x)\n", __func__, changed);
+ wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
+ __func__, changed, vif->addr);
if (changed & BSS_CHANGED_BSSID) {
wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n",
@@ -1202,6 +1214,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
wiphy_debug(hw->wiphy, " BCN EN: %d\n", info->enable_beacon);
+ vp->bcn_en = info->enable_beacon;
if (data->started &&
!hrtimer_is_queued(&data->beacon_timer.timer) &&
info->enable_beacon) {
@@ -1215,8 +1228,16 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
tasklet_hrtimer_start(&data->beacon_timer,
ns_to_ktime(until_tbtt * 1000),
HRTIMER_MODE_REL);
- } else if (!info->enable_beacon)
- tasklet_hrtimer_cancel(&data->beacon_timer);
+ } else if (!info->enable_beacon) {
+ unsigned int count = 0;
+ ieee80211_iterate_active_interfaces(
+ data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ mac80211_hwsim_bcn_en_iter, &count);
+ wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
+ count);
+ if (count == 0)
+ tasklet_hrtimer_cancel(&data->beacon_timer);
+ }
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
@@ -2076,7 +2097,7 @@ out:
}
/* Generic Netlink operations array */
-static struct genl_ops hwsim_ops[] = {
+static const struct genl_ops hwsim_ops[] = {
{
.cmd = HWSIM_CMD_REGISTER,
.policy = hwsim_genl_policy,
@@ -2127,8 +2148,7 @@ static int hwsim_init_netlink(void)
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- rc = genl_register_family_with_ops(&hwsim_genl_family,
- hwsim_ops, ARRAY_SIZE(hwsim_ops));
+ rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
if (rc)
goto failure;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index fbad00a5abc..aeaea0e3b4c 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2210,8 +2210,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_started = 0;
priv->bss_num = 0;
- if (mwifiex_cfg80211_init_p2p_client(priv))
- return ERR_PTR(-EFAULT);
+ if (mwifiex_cfg80211_init_p2p_client(priv)) {
+ wdev = ERR_PTR(-EFAULT);
+ goto done;
+ }
break;
default:
@@ -2224,7 +2226,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
if (!dev) {
wiphy_err(wiphy, "no memory available for netdevice\n");
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-ENOMEM);
+ wdev = ERR_PTR(-ENOMEM);
+ goto done;
}
mwifiex_init_priv_params(priv, dev);
@@ -2264,7 +2267,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
wiphy_err(wiphy, "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-EFAULT);
+ priv->netdev = NULL;
+ wdev = ERR_PTR(-EFAULT);
+ goto done;
}
sema_init(&priv->async_sem, 1);
@@ -2274,6 +2279,13 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_init(priv);
#endif
+
+done:
+ if (IS_ERR(wdev)) {
+ kfree(priv->wdev);
+ priv->wdev = NULL;
+ }
+
return wdev;
}
EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
@@ -2298,7 +2310,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
unregister_netdevice(wdev->netdev);
/* Clear the priv in adapter */
+ priv->netdev->ieee80211_ptr = NULL;
priv->netdev = NULL;
+ kfree(wdev);
+ priv->wdev = NULL;
priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index a6c46f3b6e3..e47f4e3012b 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1048,7 +1048,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
- uint16_t cancel_scan_cmd = false;
+ bool cancel_scan_cmd = false;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index f80f30b6160..c8385ec77a8 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -1020,8 +1020,8 @@ struct mwifiex_power_group {
} __packed;
struct mwifiex_types_power_group {
- u16 type;
- u16 length;
+ __le16 type;
+ __le16 length;
} __packed;
struct host_cmd_ds_txpwr_cfg {
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 220af4fe0fc..81ac001ee74 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -82,7 +82,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
struct mwifiex_ie_list *ie_list)
{
u16 travel_len, index, mask;
- s16 input_len;
+ s16 input_len, tlv_len;
struct mwifiex_ie *ie;
u8 *tmp;
@@ -91,11 +91,13 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
ie_list->len = 0;
- while (input_len > 0) {
+ while (input_len >= sizeof(struct mwifiex_ie_types_header)) {
ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
- input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE;
- travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE;
+ tlv_len = le16_to_cpu(ie->ie_length);
+ travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE;
+ if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE)
+ return -1;
index = le16_to_cpu(ie->ie_index);
mask = le16_to_cpu(ie->mgmt_subtype_mask);
@@ -132,6 +134,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
le16_add_cpu(&ie_list->len,
le16_to_cpu(priv->mgmt_ie[index].ie_length) +
MWIFIEX_IE_HDR_SIZE);
+ input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 37f873bb342..4e4686e6ac0 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -621,7 +621,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
int ret = 0;
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
- u8 enable_data = true;
+ bool enable_data = true;
u16 cap_info, status_code;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index c2b91f566e0..78e8a6666cc 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -411,13 +411,14 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
*/
static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
{
- int ret, i;
+ int ret;
char fmt[64];
struct mwifiex_private *priv;
struct mwifiex_adapter *adapter = context;
struct mwifiex_fw_image fw;
struct semaphore *sem = adapter->card_sem;
bool init_failed = false;
+ struct wireless_dev *wdev;
if (!firmware) {
dev_err(adapter->dev,
@@ -469,14 +470,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
if (mwifiex_register_cfg80211(adapter)) {
dev_err(adapter->dev, "cannot register with cfg80211\n");
- goto err_register_cfg80211;
+ goto err_init_fw;
}
rtnl_lock();
/* Create station interface by default */
- if (!mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
- NL80211_IFTYPE_STATION, NULL, NULL)) {
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
+ NL80211_IFTYPE_STATION, NULL, NULL);
+ if (IS_ERR(wdev)) {
dev_err(adapter->dev, "cannot create default STA interface\n");
+ rtnl_unlock();
goto err_add_intf;
}
rtnl_unlock();
@@ -486,17 +489,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- for (i = 0; i < adapter->priv_num; i++) {
- priv = adapter->priv[i];
-
- if (!priv)
- continue;
-
- if (priv->wdev && priv->netdev)
- mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
- }
- rtnl_unlock();
-err_register_cfg80211:
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
err_init_fw:
@@ -882,7 +874,9 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
+ adapter->workqueue =
+ alloc_workqueue("MWIFIEX_WORK_QUEUE",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!adapter->workqueue)
goto err_kmalloc;
@@ -1004,12 +998,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
wiphy_unregister(priv->wdev->wiphy);
wiphy_free(priv->wdev->wiphy);
- for (i = 0; i < adapter->priv_num; i++) {
- priv = adapter->priv[i];
- if (priv)
- kfree(priv->wdev);
- }
-
mwifiex_terminate_workqueue(adapter);
/* Unregister device */
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 52da8ee7599..03688aa14e8 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -232,7 +232,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
}
mwifiex_remove_card(card->adapter, &add_remove_card_sem);
- kfree(card);
}
static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
@@ -2037,7 +2036,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
goto exit;
}
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
card ? card->adapter : NULL);
@@ -2313,6 +2312,7 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
pci_release_region(pdev, 0);
pci_set_drvdata(pdev, NULL);
}
+ kfree(card);
}
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 1576104e3d9..b44a3152346 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -196,7 +196,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
}
mwifiex_remove_card(card->adapter, &add_remove_card_sem);
- kfree(card);
}
/*
@@ -1029,7 +1028,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb, u32 upld_typ)
{
u8 *cmd_buf;
+ __le16 *curr_ptr = (__le16 *)skb->data;
+ u16 pkt_len = le16_to_cpu(*curr_ptr);
+ skb_trim(skb, pkt_len);
skb_pull(skb, INTF_HEADER_LEN);
switch (upld_typ) {
@@ -1742,7 +1744,6 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
sdio_claim_host(card->func);
sdio_disable_func(card->func);
sdio_release_host(card->func);
- sdio_set_drvdata(card->func, NULL);
}
}
@@ -1770,7 +1771,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return ret;
}
- sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
@@ -1798,6 +1798,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
int ret;
u8 sdio_ireg;
+ sdio_set_drvdata(card->func, card);
+
/*
* Read the HOST_INT_STATUS_REG for ACK the first interrupt got
* from the bootloader. If we don't do this we get a interrupt
@@ -1880,6 +1882,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
kfree(card->mpa_rx.len_arr);
kfree(card->mpa_tx.buf);
kfree(card->mpa_rx.buf);
+ sdio_set_drvdata(card->func, NULL);
+ kfree(card);
}
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c0268b59774..2181ee283d8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -239,14 +239,14 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
memmove(cmd_txp_cfg, txp,
sizeof(struct host_cmd_ds_txpwr_cfg) +
sizeof(struct mwifiex_types_power_group) +
- pg_tlv->length);
+ le16_to_cpu(pg_tlv->length));
pg_tlv = (struct mwifiex_types_power_group *) ((u8 *)
cmd_txp_cfg +
sizeof(struct host_cmd_ds_txpwr_cfg));
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) +
sizeof(struct mwifiex_types_power_group) +
- pg_tlv->length);
+ le16_to_cpu(pg_tlv->length));
} else {
memmove(cmd_txp_cfg, txp, sizeof(*txp));
}
@@ -327,7 +327,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
- u16 hs_activate = false;
+ bool hs_activate = false;
if (!hscfg_param)
/* New Activate command */
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 58a6013712d..2675ca7f8d1 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -274,17 +274,20 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
struct mwifiex_ie_types_header *head;
- u16 tlv, tlv_buf_len;
+ u16 tlv, tlv_buf_len, tlv_buf_left;
u8 *tlv_buf;
u32 i;
- tlv_buf = ((u8 *)rate_cfg) +
- sizeof(struct host_cmd_ds_tx_rate_cfg);
- tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16)));
+ tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg);
+ tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*rate_cfg);
- while (tlv_buf && tlv_buf_len > 0) {
- tlv = (*tlv_buf);
- tlv = tlv | (*(tlv_buf + 1) << 8);
+ while (tlv_buf_left >= sizeof(*head)) {
+ head = (struct mwifiex_ie_types_header *)tlv_buf;
+ tlv = le16_to_cpu(head->type);
+ tlv_buf_len = le16_to_cpu(head->len);
+
+ if (tlv_buf_left < (sizeof(*head) + tlv_buf_len))
+ break;
switch (tlv) {
case TLV_TYPE_RATE_SCOPE:
@@ -304,9 +307,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
/* Add RATE_DROP tlv here */
}
- head = (struct mwifiex_ie_types_header *) tlv_buf;
- tlv_buf += le16_to_cpu(head->len) + sizeof(*head);
- tlv_buf_len -= le16_to_cpu(head->len);
+ tlv_buf += (sizeof(*head) + tlv_buf_len);
+ tlv_buf_left -= (sizeof(*head) + tlv_buf_len);
}
priv->is_data_rate_auto = mwifiex_is_rate_auto(priv);
@@ -340,13 +342,17 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg));
pg = (struct mwifiex_power_group *)
((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group));
- length = pg_tlv_hdr->length;
- if (length > 0) {
- max_power = pg->power_max;
- min_power = pg->power_min;
- length -= sizeof(struct mwifiex_power_group);
- }
- while (length) {
+ length = le16_to_cpu(pg_tlv_hdr->length);
+
+ /* At least one structure required to update power */
+ if (length < sizeof(struct mwifiex_power_group))
+ return 0;
+
+ max_power = pg->power_max;
+ min_power = pg->power_min;
+ length -= sizeof(struct mwifiex_power_group);
+
+ while (length >= sizeof(struct mwifiex_power_group)) {
pg++;
if (max_power < pg->power_max)
max_power = pg->power_max;
@@ -356,10 +362,8 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
length -= sizeof(struct mwifiex_power_group);
}
- if (pg_tlv_hdr->length > 0) {
- priv->min_tx_power_level = (u8) min_power;
- priv->max_tx_power_level = (u8) max_power;
- }
+ priv->min_tx_power_level = (u8) min_power;
+ priv->max_tx_power_level = (u8) max_power;
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index f084412eee0..c8e029df770 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -638,8 +638,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
txp_cfg->mode = cpu_to_le32(1);
pg_tlv = (struct mwifiex_types_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg));
- pg_tlv->type = TLV_TYPE_POWER_GROUP;
- pg_tlv->length = 4 * sizeof(struct mwifiex_power_group);
+ pg_tlv->type = cpu_to_le16(TLV_TYPE_POWER_GROUP);
+ pg_tlv->length =
+ cpu_to_le16(4 * sizeof(struct mwifiex_power_group));
pg = (struct mwifiex_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg)
+ sizeof(struct mwifiex_types_power_group));
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 1cfe5a738c4..92f76d655e6 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
struct mwifiex_txinfo *tx_info;
int hdr_chop;
struct timeval tv;
+ struct ethhdr *p_ethhdr;
u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
uap_rx_pd = (struct uap_rxpd *)(skb->data);
@@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
}
if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
+ rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+ /* Replace the 803 header and rfc1042 header (llc/snap) with
+ * an Ethernet II header, keep the src/dst and snap_type
+ * (ethertype).
+ *
+ * The firmware only passes up SNAP frames converting all RX
+ * data from 802.11 to 802.2/LLC/SNAP frames.
+ *
+ * To create the Ethernet II, just move the src, dst address
+ * right before the snap_type.
+ */
+ p_ethhdr = (struct ethhdr *)
+ ((u8 *)(&rx_pkt_hdr->eth803_hdr)
+ + sizeof(rx_pkt_hdr->eth803_hdr)
+ + sizeof(rx_pkt_hdr->rfc1042_hdr)
+ - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
+ - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
+ - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
+ memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
+ sizeof(p_ethhdr->h_source));
+ memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
+ sizeof(p_ethhdr->h_dest));
/* Chop off the rxpd + the excess memory from
* 802.2/llc/snap header that was removed.
*/
- hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
- else
+ hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
+ } else {
/* Chop off the rxpd */
hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
+ }
/* Chop off the leading header bytes so the it points
* to the start of either the reconstructed EthII frame
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 1c70b8d0922..edf5b7a2490 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -350,7 +350,6 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
card->udev = udev;
card->intf = intf;
- usb_card = card;
pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
@@ -525,25 +524,28 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
static void mwifiex_usb_disconnect(struct usb_interface *intf)
{
struct usb_card_rec *card = usb_get_intfdata(intf);
- struct mwifiex_adapter *adapter;
- if (!card || !card->adapter) {
- pr_err("%s: card or card->adapter is NULL\n", __func__);
+ if (!card) {
+ pr_err("%s: card is NULL\n", __func__);
return;
}
- adapter = card->adapter;
- if (!adapter->priv_num)
- return;
-
mwifiex_usb_free(card);
- dev_dbg(adapter->dev, "%s: removing card\n", __func__);
- mwifiex_remove_card(adapter, &add_remove_card_sem);
+ if (card->adapter) {
+ struct mwifiex_adapter *adapter = card->adapter;
+
+ if (!adapter->priv_num)
+ return;
+
+ dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+ mwifiex_remove_card(adapter, &add_remove_card_sem);
+ }
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
kfree(card);
+ usb_card = NULL;
return;
}
@@ -754,6 +756,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
card->adapter = adapter;
adapter->dev = &card->udev->dev;
strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
+ usb_card = card;
return 0;
}
@@ -762,7 +765,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
- usb_set_intfdata(card->intf, NULL);
+ card->adapter = NULL;
}
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
@@ -1004,7 +1007,7 @@ static void mwifiex_usb_cleanup_module(void)
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
- if (usb_card) {
+ if (usb_card && usb_card->adapter) {
struct mwifiex_adapter *adapter = usb_card->adapter;
int i;
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 95fa3599b40..13eaeed0389 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -708,7 +708,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
{
u8 *curr = (u8 *) &resp->params.get_wmm_status;
uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
- int valid = true;
+ bool valid = true;
struct mwifiex_ie_types_data *tlv_hdr;
struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
@@ -722,6 +722,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
tlv_hdr = (struct mwifiex_ie_types_data *) curr;
tlv_len = le16_to_cpu(tlv_hdr->header.len);
+ if (resp_len < tlv_len + sizeof(tlv_hdr->header))
+ break;
+
switch (le16_to_cpu(tlv_hdr->header.type)) {
case TLV_TYPE_WMMQSTATUS:
tlv_wmm_qstatus =
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 644d6e0c51c..0f129d498fb 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
}
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
- struct sk_buff *skb);
+ struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ra,
- int tid);
+ struct mwifiex_ra_list_tbl *ra, int tid);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list, int tid);
u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
- const struct sk_buff *skb);
+ const struct sk_buff *skb);
void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
- u8 **assoc_buf,
- struct ieee_types_wmm_parameter
- *wmmie,
- struct ieee80211_ht_cap
- *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+ u8 **assoc_buf,
+ struct ieee_types_wmm_parameter *wmmie,
+ struct ieee80211_ht_cap *htcap);
void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
- struct ieee_types_wmm_parameter
- *wmm_ie);
+ struct ieee_types_wmm_parameter *wmm_ie);
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
- const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+ const struct host_cmd_ds_command *resp);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a3707fd4ef6..b953ad621e0 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -6093,7 +6093,6 @@ err_iounmap:
if (priv->sram != NULL)
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
err_free_reg:
@@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 3bb936b9558..eebd2be21ee 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -182,23 +182,20 @@ extern int orinoco_debug;
/* Exported prototypes */
/********************************************************************/
-extern struct orinoco_private *alloc_orinocodev(
- int sizeof_card, struct device *device,
- int (*hard_reset)(struct orinoco_private *),
- int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
- unsigned long base_addr,
- unsigned int irq,
- const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+ int (*hard_reset)(struct orinoco_private *),
+ int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+ unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
int orinoco_process_xmit_skb(struct sk_buff *skb,
struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index d73fdf6185a..ffb2469eb67 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 677bf14eca8..5ae1191d253 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2559dbd6184..bbd36d1676f 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 42afeeea2c4..04b08de5fd5 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->bridge_io);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 57e3af8ebb4..f9a07b0d83a 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
p54_free_common(dev);
err_free_reg:
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7fc46f26cf2..de15171e2cd 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -636,7 +636,7 @@ static int p54spi_probe(struct spi_device *spi)
gpio_direction_input(p54spi_gpio_irq);
ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
- p54spi_interrupt, IRQF_DISABLED, "p54spi",
+ p54spi_interrupt, 0, "p54spi",
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 1c22b81e6ef..8863a6cb238 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
data = r.ptr;
/* copy this MAC to the bss */
- memcpy(bss.address, data, 6);
+ memcpy(bss.address, data, ETH_ALEN);
kfree(data);
/* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
return -EINVAL;
/* prepare the structure for the set object */
- memcpy(&bssid[0], awrq->sa_data, 6);
+ memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
/* set the bssid -- does this make sense when in AP mode? */
rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
- memcpy(awrq->sa_data, r.ptr, 6);
+ memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
kfree(r.ptr);
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
size_t wpa_ie_len;
/* The first entry must be the MAC address */
- memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+ memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
iwe.cmd = SIOCGIWAP;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
&((struct sockaddr *) addr)->sa_data);
if (!ret)
memcpy(priv->ndev->dev_addr,
- &((struct sockaddr *) addr)->sa_data, 6);
+ &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
return ret;
}
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5970ff6f40c..e05d9b4c831 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static struct device_type wlan_type = {
+ .name = "wlan",
+};
+
struct net_device *
islpci_setup(struct pci_dev *pdev)
{
@@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev)
return ndev;
pci_set_drvdata(pdev, ndev);
-#if defined(SET_NETDEV_DEV)
SET_NETDEV_DEV(ndev, &pdev->dev);
-#endif
+ SET_NETDEV_DEVTYPE(ndev, &wlan_type);
/* setup the structure members */
ndev->base_addr = pci_resource_start(pdev, 0);
@@ -837,7 +840,7 @@ islpci_setup(struct pci_dev *pdev)
/* ndev->set_multicast_list = &islpci_set_multicast_list; */
ndev->addr_len = ETH_ALEN;
/* Get a non-zero dummy MAC address for nameif. Jean II */
- memcpy(ndev->dev_addr, dummy_mac, 6);
+ memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index a01606b36e0..056af38e72e 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
isl_oid[GEN_OID_MACADDRESS].size, &res);
if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
- memcpy(priv->ndev->dev_addr, res->data, 6);
+ memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
else
ret = -EIO;
if (res)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 68dbbb9c6d1..006b8bcb2e3 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -58,11 +58,11 @@ config RT61PCI
config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
- depends on PCI || SOC_RT288X || SOC_RT305X
+ depends on PCI
select RT2800_LIB
+ select RT2800_LIB_MMIO
select RT2X00_LIB_MMIO
- select RT2X00_LIB_PCI if PCI
- select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
+ select RT2X00_LIB_PCI
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_CCITT
@@ -199,9 +199,30 @@ config RT2800USB_UNKNOWN
endif
+config RT2800SOC
+ tristate "Ralink WiSoC support"
+ depends on SOC_RT288X || SOC_RT305X
+ select RT2X00_LIB_SOC
+ select RT2X00_LIB_MMIO
+ select RT2X00_LIB_CRYPTO
+ select RT2X00_LIB_FIRMWARE
+ select RT2800_LIB
+ select RT2800_LIB_MMIO
+ ---help---
+ This adds support for Ralink WiSoC devices.
+ Supported chips: RT2880, RT3050, RT3052, RT3350, RT3352.
+
+ When compiled as a module, this driver will be called rt2800soc.
+
+
config RT2800_LIB
tristate
+config RT2800_LIB_MMIO
+ tristate
+ select RT2X00_LIB_MMIO
+ select RT2800_LIB
+
config RT2X00_LIB_MMIO
tristate
@@ -219,6 +240,7 @@ config RT2X00_LIB_USB
config RT2X00_LIB
tristate
+ select AVERAGE
config RT2X00_LIB_FIRMWARE
boolean
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index f069d8bc5b6..24a66015a49 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
+obj-$(CONFIG_RT2800_LIB_MMIO) += rt2800mmio.o
obj-$(CONFIG_RT2400PCI) += rt2400pci.o
obj-$(CONFIG_RT2500PCI) += rt2500pci.o
obj-$(CONFIG_RT61PCI) += rt61pci.o
@@ -21,3 +22,4 @@ obj-$(CONFIG_RT2800PCI) += rt2800pci.o
obj-$(CONFIG_RT2500USB) += rt2500usb.o
obj-$(CONFIG_RT73USB) += rt73usb.o
obj-$(CONFIG_RT2800USB) += rt2800usb.o
+obj-$(CONFIG_RT2800SOC) += rt2800soc.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3d53a09da5a..38ed9a3e44c 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
+ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
entry->queue->rt2x00dev->rssi_offset;
rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index fa33b5edf93..aab6b5e4f5d 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -52,6 +52,7 @@
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
#define RF3322 0x000c
#define RF3053 0x000d
#define RF5592 0x000f
+#define RF3070 0x3070
#define RF3290 0x3290
#define RF5360 0x5360
#define RF5370 0x5370
@@ -122,7 +124,7 @@
/*
* MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
*/
-#define MAC_CSR0_3290 0x0000
+#define MAC_CSR0_3290 0x0000
/*
* E2PROM_CSR: PCI EEPROM control register.
@@ -211,17 +213,17 @@
/*
* COEX_CFG_0
*/
-#define COEX_CFG0 0x0040
+#define COEX_CFG0 0x0040
#define COEX_CFG_ANT FIELD32(0xff000000)
/*
* COEX_CFG_1
*/
-#define COEX_CFG1 0x0044
+#define COEX_CFG1 0x0044
/*
* COEX_CFG_2
*/
-#define COEX_CFG2 0x0048
+#define COEX_CFG2 0x0048
#define BT_COEX_CFG1 FIELD32(0xff000000)
#define BT_COEX_CFG0 FIELD32(0x00ff0000)
#define WL_COEX_CFG1 FIELD32(0x0000ff00)
@@ -235,8 +237,8 @@
#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
#define PLL_CONTROL FIELD32(0x00070000)
#define PLL_LPF_R1 FIELD32(0x00080000)
-#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
-#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
+#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
#define PLL_LOCK_CTRL FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
-#define RFCSR6_TXDIV FIELD8(0x0c)
+#define RFCSR6_TXDIV FIELD8(0x0c)
/* bits for RF3053 */
#define RFCSR6_VCO_IC FIELD8(0xc0)
@@ -2202,13 +2204,13 @@ struct mac_iveiv_entry {
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
-#define RFCSR12_DR0 FIELD8(0xe0)
+#define RFCSR12_DR0 FIELD8(0xe0)
/*
* RFCSR 13:
*/
#define RFCSR13_TX_POWER FIELD8(0x1f)
-#define RFCSR13_DR0 FIELD8(0xe0)
+#define RFCSR13_DR0 FIELD8(0xe0)
/*
* RFCSR 15:
@@ -2226,7 +2228,7 @@ struct mac_iveiv_entry {
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
#define RFCSR17_TX_LO1_EN FIELD8(0x08)
#define RFCSR17_R FIELD8(0x20)
-#define RFCSR17_CODE FIELD8(0x7f)
+#define RFCSR17_CODE FIELD8(0x7f)
/* RFCSR 18 */
#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
@@ -2449,7 +2451,7 @@ enum rt2800_eeprom_word {
*/
#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
-#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
+#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
/*
* EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@ enum rt2800_eeprom_word {
* DAC_TEST: 0: disable, 1: enable
*/
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
-#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
-#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
-#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
@@ -2521,9 +2523,9 @@ enum rt2800_eeprom_word {
* TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
* CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
*/
-#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
-#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
-#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
/*
* EEPROM LNA
@@ -2790,7 +2792,7 @@ enum rt2800_eeprom_word {
#define MCU_CURRENT 0x36
#define MCU_LED 0x50
#define MCU_LED_STRENGTH 0x51
-#define MCU_LED_AG_CONF 0x52
+#define MCU_LED_AG_CONF 0x52
#define MCU_LED_ACT_CONF 0x53
#define MCU_LED_LED_POLARITY 0x54
#define MCU_RADAR 0x60
@@ -2799,7 +2801,7 @@ enum rt2800_eeprom_word {
#define MCU_FREQ_OFFSET 0x74
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
-#define MCU_BAND_SELECT 0x91
+#define MCU_BAND_SELECT 0x91
/*
* MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88ce656f96c..776aff3678f 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -278,12 +278,9 @@ static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
[EEPROM_LNA] = 0x0026,
[EEPROM_EXT_LNA2] = 0x0027,
[EEPROM_RSSI_BG] = 0x0028,
- [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
[EEPROM_RSSI_BG2] = 0x0029,
- [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
[EEPROM_RSSI_A] = 0x002a,
[EEPROM_RSSI_A2] = 0x002b,
- [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
[EEPROM_TXPOWER_BG1] = 0x0030,
[EEPROM_TXPOWER_BG2] = 0x0037,
[EEPROM_EXT_TXPOWER_BG3] = 0x003e,
@@ -1783,7 +1780,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_read(rt2x00dev, 3, &r3);
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2800_config_3572bt_ant(rt2x00dev);
/*
@@ -1795,7 +1792,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
else
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2650,7 +2640,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
- if (info->default_power1 > POWER_BOUND)
+ if (info->default_power2 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR50_TX,
@@ -2674,7 +2664,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
int idx = rf->channel-1;
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
+ case RF3070:
case RF5360:
case RF5370:
case RF5372:
@@ -3166,7 +3157,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
- if (rt2x00_rf(rt2x00dev, RF3290) ||
+ if (rt2x00_rf(rt2x00dev, RF3070) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392)) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
- &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
@@ -3244,7 +3235,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
rf->channel > 14);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
else
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
- if (rt2x00_rt(rt2x00dev, RT3572))
+ if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x1c + (2 * rt2x00dev->lna_gain);
+ else
+ reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3593)) {
- if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
- /* Band selection. GPIO #8 controls all paths */
+ /* Band selection */
+ if (rt2x00_is_usb(rt2x00dev) ||
+ rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #8 controls all paths */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
if (rf->channel <= 14)
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
else
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+ }
+ /* LNA PE control. */
+ if (rt2x00_is_usb(rt2x00dev)) {
+ /* GPIO #4 controls PE0 and PE1,
+ * GPIO #7 controls PE2
+ */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
- /* LNA PE control.
- * GPIO #4 controls PE0 and PE1,
- * GPIO #7 controls PE2
- */
rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
-
- rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #4 controls PE0, PE1 and PE2 */
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
}
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
{
int delta;
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_power_limit(rt2x00dev))
return 0;
/*
@@ -3594,7 +3602,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
* We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
@@ -4405,6 +4414,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
- if (rt2x00_rt(rt2x00dev, RT3572))
- vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
else {
@@ -4431,11 +4441,17 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
- if (rt2x00_rt(rt2x00dev, RT5592)) {
+ if (rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
+ vgc_level);
+ } else if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
- } else
+ } else {
rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+ }
+
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
@@ -4454,17 +4470,35 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
- /*
- * When RSSI is better then -80 increase VGC level with 0x10, except
- * for rt5592 chip.
+
+ /* When RSSI is better than a certain threshold, increase VGC
+ * with a chip specific value in order to improve the balance
+ * between sensibility and noise isolation.
*/
vgc = rt2800_get_default_vgc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
- vgc += 0x20;
- else if (qual->rssi > -80)
- vgc += 0x10;
+ switch (rt2x00dev->chip.rt) {
+ case RT3572:
+ case RT3593:
+ if (qual->rssi > -65) {
+ if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+ vgc += 0x20;
+ else
+ vgc += 0x10;
+ }
+ break;
+
+ case RT5592:
+ if (qual->rssi > -65)
+ vgc += 0x20;
+ break;
+
+ default:
+ if (qual->rssi > -80)
+ vgc += 0x10;
+ break;
+ }
rt2800_set_vgc(rt2x00dev, qual, vgc);
}
@@ -5489,7 +5523,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
ant = (div_mode == 3) ? 1 : 0;
/* check if this is a Bluetooth combo card */
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
u32 reg;
rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
@@ -5985,7 +6019,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
u16 word;
/*
- * Initialize all registers.
+ * Initialize MAC registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev)))
return -EIO;
+ /*
+ * Wait BBP/RF to wake up.
+ */
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
return -EIO;
/*
- * Send signal to firmware during boot time.
+ * Send signal during boot time to initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
+ /*
+ * Make sure BBP is up and running.
+ */
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
+ /*
+ * Initialize BBP/RF registers.
+ */
rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
@@ -7021,6 +7063,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF3320:
case RF3322:
@@ -7203,7 +7246,7 @@ static const struct rf_channel rf_vals[] = {
/*
* RF value list for rt3xxx
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
*/
static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
@@ -7399,72 +7442,6 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
-static const struct rf_channel rf_vals_3053[] = {
- /* Channel, N, R, K */
- {1, 241, 2, 2},
- {2, 241, 2, 7},
- {3, 242, 2, 2},
- {4, 242, 2, 7},
- {5, 243, 2, 2},
- {6, 243, 2, 7},
- {7, 244, 2, 2},
- {8, 244, 2, 7},
- {9, 245, 2, 2},
- {10, 245, 2, 7},
- {11, 246, 2, 2},
- {12, 246, 2, 7},
- {13, 247, 2, 2},
- {14, 248, 2, 4},
-
- {36, 0x56, 0, 4},
- {38, 0x56, 0, 6},
- {40, 0x56, 0, 8},
- {44, 0x57, 0, 0},
- {46, 0x57, 0, 2},
- {48, 0x57, 0, 4},
- {52, 0x57, 0, 8},
- {54, 0x57, 0, 10},
- {56, 0x58, 0, 0},
- {60, 0x58, 0, 4},
- {62, 0x58, 0, 6},
- {64, 0x58, 0, 8},
-
- {100, 0x5B, 0, 8},
- {102, 0x5B, 0, 10},
- {104, 0x5C, 0, 0},
- {108, 0x5C, 0, 4},
- {110, 0x5C, 0, 6},
- {112, 0x5C, 0, 8},
-
- /* NOTE: Channel 114 has been removed intentionally.
- * The EEPROM contains no TX power values for that,
- * and it is disabled in the vendor driver as well.
- */
-
- {116, 0x5D, 0, 0},
- {118, 0x5D, 0, 2},
- {120, 0x5D, 0, 4},
- {124, 0x5D, 0, 8},
- {126, 0x5D, 0, 10},
- {128, 0x5E, 0, 0},
- {132, 0x5E, 0, 4},
- {134, 0x5E, 0, 6},
- {136, 0x5E, 0, 8},
- {140, 0x5F, 0, 0},
-
- {149, 0x5F, 0, 9},
- {151, 0x5F, 0, 11},
- {153, 0x60, 0, 1},
- {157, 0x60, 0, 5},
- {159, 0x60, 0, 7},
- {161, 0x60, 0, 9},
- {165, 0x61, 0, 1},
- {167, 0x61, 0, 3},
- {169, 0x61, 0, 5},
- {171, 0x61, 0, 7},
- {173, 0x61, 0, 9},
-};
-
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7473,7 +7450,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power2;
char *default_power3;
unsigned int i;
- u16 eeprom;
u32 reg;
/*
@@ -7522,48 +7498,48 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-
/*
* Initialize hw_mode information.
*/
- spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(rt2x00dev, RF2820) ||
- rt2x00_rf(rt2x00dev, RF2720)) {
+ switch (rt2x00dev->chip.rf) {
+ case RF2720:
+ case RF2820:
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF2850) ||
- rt2x00_rf(rt2x00dev, RF2750)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+
+ case RF2750:
+ case RF2850:
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3290) ||
- rt2x00_rf(rt2x00dev, RF3320) ||
- rt2x00_rf(rt2x00dev, RF3322) ||
- rt2x00_rf(rt2x00dev, RF5360) ||
- rt2x00_rf(rt2x00dev, RF5370) ||
- rt2x00_rf(rt2x00dev, RF5372) ||
- rt2x00_rf(rt2x00dev, RF5390) ||
- rt2x00_rf(rt2x00dev, RF5392)) {
+ break;
+
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3070:
+ case RF3290:
+ case RF3320:
+ case RF3322:
+ case RF5360:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+ case RF5392:
spec->num_channels = 14;
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3052)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+
+ case RF3052:
+ case RF3053:
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3053)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
- spec->num_channels = ARRAY_SIZE(rf_vals_3053);
- spec->channels = rf_vals_3053;
- } else if (rt2x00_rf(rt2x00dev, RF5592)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+ case RF5592:
rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
@@ -7572,11 +7548,16 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
spec->channels = rf_vals_5592_xtal20;
}
+ break;
}
if (WARN_ON_ONCE(!spec->channels))
return -ENODEV;
+ spec->supported_bands = SUPPORT_BAND_2GHZ;
+ if (spec->num_channels > 14)
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+
/*
* Initialize HT information.
*/
@@ -7591,22 +7572,21 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40;
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
+ if (rt2x00dev->default_ant.tx_chain_num >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
- spec->ht.cap |=
- rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
- IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
+ IEEE80211_HT_CAP_RX_STBC_SHIFT;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
IEEE80211_HT_MCS_TX_DEFINED |
IEEE80211_HT_MCS_TX_RX_DIFF |
- ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ ((rt2x00dev->default_ant.tx_chain_num - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
+ switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
case 2:
@@ -7671,6 +7651,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3320:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
new file mode 100644
index 00000000000..a8cc736b506
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -0,0 +1,873 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800mmio
+ * Abstract: rt2800 MMIO device routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/*
+ * TX descriptor initialization
+ */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
+{
+ return (__le32 *) entry->skb->data;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
+
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ __le32 *txd = entry_priv->desc;
+ u32 word;
+ const unsigned int txwi_size = entry->queue->winfo_size;
+
+ /*
+ * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+ * must contains a TXWI structure + 802.11 header + padding + 802.11
+ * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+ * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+ * data. It means that LAST_SEC0 is always 0.
+ */
+
+ /*
+ * Initialize TX descriptor
+ */
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+ rt2x00_desc_write(txd, 0, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
+ rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+ !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W1_BURST,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
+ rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+ rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+ rt2x00_desc_write(txd, 1, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+ skbdesc->skb_dma + txwi_size);
+ rt2x00_desc_write(txd, 2, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W3_WIV,
+ !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+ rt2x00_desc_write(txd, 3, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
+
+/*
+ * RX control handlers
+ */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ __le32 *rxd = entry_priv->desc;
+ u32 word;
+
+ rt2x00_desc_read(rxd, 3, &word);
+
+ if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+ /*
+ * Unfortunately we don't know the cipher type used during
+ * decryption. This prevents us from correct providing
+ * correct statistics through debugfs.
+ */
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
+
+ if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
+ /*
+ * Hardware has stripped IV/EIV data from 802.11 frame during
+ * decryption. Unfortunately the descriptor doesn't contain
+ * any fields with the EIV/IV data either, so they can't
+ * be restored by rt2x00lib.
+ */
+ rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+ if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+ rxdesc->flags |= RX_FLAG_DECRYPTED;
+ else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+ rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+ }
+
+ if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
+ rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+ if (rt2x00_get_field32(word, RXD_W3_L2PAD))
+ rxdesc->dev_flags |= RXDONE_L2PAD;
+
+ /*
+ * Process the RXWI structure that is at the start of the buffer.
+ */
+ rt2800_process_rxwi(entry, rxdesc);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
+static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
+{
+ __le32 *txwi;
+ u32 word;
+ int wcid, tx_wcid;
+
+ wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+
+ txwi = rt2800_drv_get_txwi(entry);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+
+ return (tx_wcid == wcid);
+}
+
+static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * rt2800pci hardware might reorder frames when exchanging traffic
+ * with multiple BA enabled STAs.
+ *
+ * For example, a tx queue
+ * [ STA1 | STA2 | STA1 | STA2 ]
+ * can result in tx status reports
+ * [ STA1 | STA1 | STA2 | STA2 ]
+ * when the hw decides to aggregate the frames for STA1 into one AMPDU.
+ *
+ * To mitigate this effect, associate the tx status to the first frame
+ * in the tx queue with a matching wcid.
+ */
+ if (rt2800mmio_txdone_entry_check(entry, status) &&
+ !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+
+static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * Find the first frame without tx status and assign this status to it
+ * regardless if it matches or not.
+ */
+ if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
+ void *data)
+{
+ if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ rt2800_txdone_entry(entry, entry->status,
+ rt2800mmio_get_txwi(entry));
+ return false;
+ }
+
+ /* No more frames to release */
+ return true;
+}
+
+static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ u32 status;
+ u8 qid;
+ int max_tx_done = 16;
+
+ while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
+ qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
+ if (unlikely(qid >= QID_RX)) {
+ /*
+ * Unknown queue, this shouldn't happen. Just drop
+ * this tx status.
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
+ qid);
+ break;
+ }
+
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+ if (unlikely(queue == NULL)) {
+ /*
+ * The queue is NULL, this shouldn't happen. Stop
+ * processing here and drop the tx status
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ if (unlikely(rt2x00queue_empty(queue))) {
+ /*
+ * The queue is empty. Stop processing here
+ * and drop the tx status.
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ /*
+ * Let's associate this tx status with the first
+ * matching frame.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800mmio_txdone_find_entry)) {
+ /*
+ * We cannot match the tx status to any frame, so just
+ * use the first one.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800mmio_txdone_match_first)) {
+ rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
+ qid);
+ break;
+ }
+ }
+
+ /*
+ * Release all frames with a valid tx status.
+ */
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, NULL,
+ rt2800mmio_txdone_release_entries);
+
+ if (--max_tx_done == 0)
+ break;
+ }
+
+ return !max_tx_done;
+}
+
+static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
+{
+ u32 reg;
+
+ /*
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
+ */
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
+ rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 1);
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ if (rt2800mmio_txdone(rt2x00dev))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+ /*
+ * No need to enable the tx status interrupt here as we always
+ * leave it enabled to minimize the possibility of a tx status
+ * register overflow. See comment in interrupt handler.
+ */
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
+
+void rt2800mmio_pretbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_pretbtt(rt2x00dev);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
+
+void rt2800mmio_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u32 reg;
+
+ rt2x00lib_beacondone(rt2x00dev);
+
+ if (rt2x00dev->intf_ap_count) {
+ /*
+ * The rt2800pci hardware tbtt timer is off by 1us per tbtt
+ * causing beacon skew and as a result causing problems with
+ * some powersaving clients over time. Shorten the beacon
+ * interval every 64 beacons by 64us to mitigate this effect.
+ */
+ if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16) - 1);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16));
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ }
+ drv_data->tbtt_tick++;
+ drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
+ }
+
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
+
+void rt2800mmio_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ if (rt2x00mmio_rxdone(rt2x00dev))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
+
+void rt2800mmio_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2800mmio_wakeup(rt2x00dev);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev,
+ INT_MASK_CSR_AUTO_WAKEUP);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
+
+static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ u32 status;
+ int i;
+
+ /*
+ * The TX_FIFO_STATUS interrupt needs special care. We should
+ * read TX_STA_FIFO but we should do it immediately as otherwise
+ * the register can overflow and we would lose status reports.
+ *
+ * Hence, read the TX_STA_FIFO register and copy all tx status
+ * reports into a kernel FIFO which is handled in the txstatus
+ * tasklet. We use a tasklet to process the tx status reports
+ * because we can schedule the tasklet multiple times (when the
+ * interrupt fires again during tx status processing).
+ *
+ * Furthermore we don't disable the TX_FIFO_STATUS
+ * interrupt here but leave it enabled so that the TX_STA_FIFO
+ * can also be read while the tx status tasklet gets executed.
+ *
+ * Since we have only one producer and one consumer we don't
+ * need to lock the kfifo.
+ */
+ for (i = 0; i < rt2x00dev->tx->limit; i++) {
+ rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
+
+ if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
+ break;
+
+ if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
+ rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
+ break;
+ }
+ }
+
+ /* Schedule the tasklet for processing the tx status. */
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+}
+
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
+{
+ struct rt2x00_dev *rt2x00dev = dev_instance;
+ u32 reg, mask;
+
+ /* Read status and ACK all interrupts */
+ rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+ rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ if (!reg)
+ return IRQ_NONE;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
+
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = ~reg;
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2800mmio_txstatus_interrupt(rt2x00dev);
+ /*
+ * Never disable the TX_FIFO_STATUS interrupt.
+ */
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ }
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+ rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg &= mask;
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
+
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ u32 reg;
+ unsigned long flags;
+
+ /*
+ * When interrupts are being enabled, the interrupt registers
+ * should clear the register to assure a clean state.
+ */
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+ rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+ }
+
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ reg = 0;
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+ }
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Wait for possibly running tasklets to finish.
+ */
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
+
+/*
+ * Queue handlers.
+ */
+void rt2800mmio_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+ rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+
+void rt2800mmio_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ struct queue_entry *entry;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
+ entry->entry_idx);
+ break;
+ case QID_MGMT:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
+ entry->entry_idx);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
+
+void rt2800mmio_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+ rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+ /*
+ * Wait for current invocation to finish. The tasklet
+ * won't be scheduled anymore afterwards since we disabled
+ * the TBTT and PRE TBTT timer.
+ */
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
+
+void rt2800mmio_queue_init(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ unsigned short txwi_size, rxwi_size;
+
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 128;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->winfo_size = rxwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 64;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_BEACON:
+ queue->limit = 8;
+ queue->data_size = 0; /* No DMA required for beacons */
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
+
+/*
+ * Initialization functions.
+ */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ u32 word;
+
+ if (entry->queue->qid == QID_RX) {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+ return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+ } else {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+ return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
+
+void rt2800mmio_clear_entry(struct queue_entry *entry)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 word;
+
+ if (entry->queue->qid == QID_RX) {
+ rt2x00_desc_read(entry_priv->desc, 0, &word);
+ rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+ rt2x00_desc_write(entry_priv->desc, 0, word);
+
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+ rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Set RX IDX in register to inform hardware that we have
+ * handled this entry and it is available for reuse again.
+ */
+ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+ entry->entry_idx);
+ } else {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+ rt2x00_desc_write(entry_priv->desc, 1, word);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
+
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct queue_entry_priv_mmio *entry_priv;
+
+ /*
+ * Initialize registers.
+ */
+ entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
+ rt2x00dev->tx[0].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+ entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
+ rt2x00dev->tx[1].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+ entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
+ rt2x00dev->tx[2].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+ entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
+ rt2x00dev->tx[3].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
+
+ entry_priv = rt2x00dev->rx->entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
+ rt2x00dev->rx[0].limit);
+ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+ rt2x00dev->rx[0].limit - 1);
+ rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+ rt2800_disable_wpdma(rt2x00dev);
+
+ rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
+
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+
+ /*
+ * Reset DMA indexes
+ */
+ rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+ if (rt2x00_is_pcie(rt2x00dev) &&
+ (rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592))) {
+ rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
+
+ rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+ reg = 0;
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
+
+/*
+ * Device state switch handlers.
+ */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ /* Wait for DMA, ignore error until we initialize queues. */
+ rt2800_wait_wpdma_ready(rt2x00dev);
+
+ if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
+ return -EIO;
+
+ return rt2800_enable_radio(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2800 MMIO library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
new file mode 100644
index 00000000000..6a10de3eee3
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -0,0 +1,165 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800mmio
+ * Abstract: forward declarations for the rt2800mmio module.
+ */
+
+#ifndef RT2800MMIO_H
+#define RT2800MMIO_H
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET 0x10
+#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE (4 * sizeof(__le32))
+#define RXD_DESC_SIZE (4 * sizeof(__le32))
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
+#define TXD_W1_BURST FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ * 0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV FIELD32(0x01000000)
+#define TXD_W3_QSEL FIELD32(0x06000000)
+#define TXD_W3_TCO FIELD32(0x20000000)
+#define TXD_W3_UCO FIELD32(0x40000000)
+#define TXD_W3_ICO FIELD32(0x80000000)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1 FIELD32(0x00003fff)
+#define RXD_W1_SDL0 FIELD32(0x3fff0000)
+#define RXD_W1_LS0 FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA FIELD32(0x00000001)
+#define RXD_W3_DATA FIELD32(0x00000002)
+#define RXD_W3_NULLDATA FIELD32(0x00000004)
+#define RXD_W3_FRAG FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W3_MULTICAST FIELD32(0x00000020)
+#define RXD_W3_BROADCAST FIELD32(0x00000040)
+#define RXD_W3_MY_BSS FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W3_AMSDU FIELD32(0x00000800)
+#define RXD_W3_HTC FIELD32(0x00001000)
+#define RXD_W3_RSSI FIELD32(0x00002000)
+#define RXD_W3_L2PAD FIELD32(0x00004000)
+#define RXD_W3_AMPDU FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
+
+/* TX descriptor initialization */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
+
+/* RX control handlers */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc);
+
+/* Interrupt functions */
+void rt2800mmio_txstatus_tasklet(unsigned long data);
+void rt2800mmio_pretbtt_tasklet(unsigned long data);
+void rt2800mmio_tbtt_tasklet(unsigned long data);
+void rt2800mmio_rxdone_tasklet(unsigned long data);
+void rt2800mmio_autowake_tasklet(unsigned long data);
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state);
+
+/* Queue handlers */
+void rt2800mmio_start_queue(struct data_queue *queue);
+void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_stop_queue(struct data_queue *queue);
+void rt2800mmio_queue_init(struct data_queue *queue);
+
+/* Initialization functions */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry);
+void rt2800mmio_clear_entry(struct queue_entry *entry);
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev);
+
+/* Device state switch handlers. */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2800MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index f8f2abbfbb6..b504455b4fe 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -37,14 +37,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <linux/eeprom_93cx6.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
-#include "rt2x00soc.h"
#include "rt2800lib.h"
+#include "rt2800mmio.h"
#include "rt2800.h"
#include "rt2800pci.h"
@@ -90,27 +89,6 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
- void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
-
- if (!base_addr)
- return -ENOMEM;
-
- memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
-
- iounmap(base_addr);
- return 0;
-}
-#else
-static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
- return -ENOMEM;
-}
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
{
struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -183,112 +161,6 @@ static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
return rt2800_read_eeprom_efuse(rt2x00dev);
}
-#else
-static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
-{
- return 0;
-}
-
-static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
-{
- return -EOPNOTSUPP;
-}
-#endif /* CONFIG_PCI */
-
-/*
- * Queue handlers.
- */
-static void rt2800pci_start_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- switch (queue->qid) {
- case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- break;
- case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
- rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
- break;
- default:
- break;
- }
-}
-
-static void rt2800pci_kick_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- struct queue_entry *entry;
-
- switch (queue->qid) {
- case QID_AC_VO:
- case QID_AC_VI:
- case QID_AC_BE:
- case QID_AC_BK:
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
- entry->entry_idx);
- break;
- case QID_MGMT:
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
- entry->entry_idx);
- break;
- default:
- break;
- }
-}
-
-static void rt2800pci_stop_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- switch (queue->qid) {
- case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- break;
- case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
- rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
- /*
- * Wait for current invocation to finish. The tasklet
- * won't be scheduled anymore afterwards since we disabled
- * the TBTT and PRE TBTT timer.
- */
- tasklet_kill(&rt2x00dev->tbtt_tasklet);
- tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-
- break;
- default:
- break;
- }
-}
/*
* Firmware functions
@@ -332,217 +204,13 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
}
/*
- * Initialization functions.
- */
-static bool rt2800pci_get_entry_state(struct queue_entry *entry)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- u32 word;
-
- if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
-
- return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
- } else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
-
- return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
- }
-}
-
-static void rt2800pci_clear_entry(struct queue_entry *entry)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 word;
-
- if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
- rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 0, word);
-
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
- rt2x00_desc_write(entry_priv->desc, 1, word);
-
- /*
- * Set RX IDX in register to inform hardware that we have
- * handled this entry and it is available for reuse again.
- */
- rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
- entry->entry_idx);
- } else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
- rt2x00_desc_write(entry_priv->desc, 1, word);
- }
-}
-
-static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
-{
- struct queue_entry_priv_mmio *entry_priv;
-
- /*
- * Initialize registers.
- */
- entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
- rt2x00dev->tx[0].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
-
- entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
- rt2x00dev->tx[1].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
-
- entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
- rt2x00dev->tx[2].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
-
- entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
- rt2x00dev->tx[3].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
-
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
-
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
-
- entry_priv = rt2x00dev->rx->entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
- rt2x00dev->rx[0].limit);
- rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
- rt2x00dev->rx[0].limit - 1);
- rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
-
- rt2800_disable_wpdma(rt2x00dev);
-
- rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
-
- return 0;
-}
-
-/*
* Device state switch handlers.
*/
-static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
- unsigned long flags;
-
- /*
- * When interrupts are being enabled, the interrupt registers
- * should clear the register to assure a clean state.
- */
- if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
- rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
- }
-
- spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- reg = 0;
- if (state == STATE_RADIO_IRQ_ON) {
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
- }
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
-
- if (state == STATE_RADIO_IRQ_OFF) {
- /*
- * Wait for possibly running tasklets to finish.
- */
- tasklet_kill(&rt2x00dev->txstatus_tasklet);
- tasklet_kill(&rt2x00dev->rxdone_tasklet);
- tasklet_kill(&rt2x00dev->autowake_tasklet);
- tasklet_kill(&rt2x00dev->tbtt_tasklet);
- tasklet_kill(&rt2x00dev->pretbtt_tasklet);
- }
-}
-
-static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
-{
- u32 reg;
-
- /*
- * Reset DMA indexes
- */
- rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
- if (rt2x00_is_pcie(rt2x00dev) &&
- (rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT5592))) {
- rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
- rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
- rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
- rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
- }
-
- rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
- reg = 0;
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
- return 0;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
int retval;
- /* Wait for DMA, ignore error until we initialize queues. */
- rt2800_wait_wpdma_ready(rt2x00dev);
-
- if (unlikely(rt2800pci_init_queues(rt2x00dev)))
- return -EIO;
-
- retval = rt2800_enable_radio(rt2x00dev);
+ retval = rt2800mmio_enable_radio(rt2x00dev);
if (retval)
return retval;
@@ -559,15 +227,6 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
return retval;
}
-static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
-{
- if (rt2x00_is_soc(rt2x00dev)) {
- rt2800_disable_radio(rt2x00dev);
- rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
- }
-}
-
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
@@ -601,12 +260,11 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
* After the radio has been disabled, the device should
* be put to sleep for powersaving.
*/
- rt2800pci_disable_radio(rt2x00dev);
rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
case STATE_RADIO_IRQ_OFF:
- rt2800pci_toggle_irq(rt2x00dev, state);
+ rt2800mmio_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
case STATE_SLEEP:
@@ -627,479 +285,13 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
}
/*
- * TX descriptor initialization
- */
-static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
-{
- return (__le32 *) entry->skb->data;
-}
-
-static void rt2800pci_write_tx_desc(struct queue_entry *entry,
- struct txentry_desc *txdesc)
-{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- __le32 *txd = entry_priv->desc;
- u32 word;
- const unsigned int txwi_size = entry->queue->winfo_size;
-
- /*
- * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
- * must contains a TXWI structure + 802.11 header + padding + 802.11
- * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
- * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
- * data. It means that LAST_SEC0 is always 0.
- */
-
- /*
- * Initialize TX descriptor
- */
- word = 0;
- rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
- rt2x00_desc_write(txd, 0, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
- rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
- !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_BURST,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
- rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
- rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
- rt2x00_desc_write(txd, 1, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
- skbdesc->skb_dma + txwi_size);
- rt2x00_desc_write(txd, 2, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W3_WIV,
- !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
- rt2x00_desc_write(txd, 3, word);
-
- /*
- * Register descriptor details in skb frame descriptor.
- */
- skbdesc->desc = txd;
- skbdesc->desc_len = TXD_DESC_SIZE;
-}
-
-/*
- * RX control handlers
- */
-static void rt2800pci_fill_rxdone(struct queue_entry *entry,
- struct rxdone_entry_desc *rxdesc)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- __le32 *rxd = entry_priv->desc;
- u32 word;
-
- rt2x00_desc_read(rxd, 3, &word);
-
- if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
- rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
-
- /*
- * Unfortunately we don't know the cipher type used during
- * decryption. This prevents us from correct providing
- * correct statistics through debugfs.
- */
- rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
-
- if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
- /*
- * Hardware has stripped IV/EIV data from 802.11 frame during
- * decryption. Unfortunately the descriptor doesn't contain
- * any fields with the EIV/IV data either, so they can't
- * be restored by rt2x00lib.
- */
- rxdesc->flags |= RX_FLAG_IV_STRIPPED;
-
- /*
- * The hardware has already checked the Michael Mic and has
- * stripped it from the frame. Signal this to mac80211.
- */
- rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
- if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
- rxdesc->flags |= RX_FLAG_DECRYPTED;
- else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
- rxdesc->flags |= RX_FLAG_MMIC_ERROR;
- }
-
- if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
- rxdesc->dev_flags |= RXDONE_MY_BSS;
-
- if (rt2x00_get_field32(word, RXD_W3_L2PAD))
- rxdesc->dev_flags |= RXDONE_L2PAD;
-
- /*
- * Process the RXWI structure that is at the start of the buffer.
- */
- rt2800_process_rxwi(entry, rxdesc);
-}
-
-/*
- * Interrupt functions.
- */
-static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
-{
- struct ieee80211_conf conf = { .flags = 0 };
- struct rt2x00lib_conf libconf = { .conf = &conf };
-
- rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
-}
-
-static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
- __le32 *txwi;
- u32 word;
- int wcid, tx_wcid;
-
- wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
- txwi = rt2800_drv_get_txwi(entry);
- rt2x00_desc_read(txwi, 1, &word);
- tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
- return (tx_wcid == wcid);
-}
-
-static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * rt2800pci hardware might reorder frames when exchanging traffic
- * with multiple BA enabled STAs.
- *
- * For example, a tx queue
- * [ STA1 | STA2 | STA1 | STA2 ]
- * can result in tx status reports
- * [ STA1 | STA1 | STA2 | STA2 ]
- * when the hw decides to aggregate the frames for STA1 into one AMPDU.
- *
- * To mitigate this effect, associate the tx status to the first frame
- * in the tx queue with a matching wcid.
- */
- if (rt2800pci_txdone_entry_check(entry, status) &&
- !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-
-static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * Find the first frame without tx status and assign this status to it
- * regardless if it matches or not.
- */
- if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
- void *data)
-{
- if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- rt2800_txdone_entry(entry, entry->status,
- rt2800pci_get_txwi(entry));
- return false;
- }
-
- /* No more frames to release */
- return true;
-}
-
-static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- u32 status;
- u8 qid;
- int max_tx_done = 16;
-
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
- qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
- if (unlikely(qid >= QID_RX)) {
- /*
- * Unknown queue, this shouldn't happen. Just drop
- * this tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
- qid);
- break;
- }
-
- queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
- if (unlikely(queue == NULL)) {
- /*
- * The queue is NULL, this shouldn't happen. Stop
- * processing here and drop the tx status
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
- qid);
- break;
- }
-
- if (unlikely(rt2x00queue_empty(queue))) {
- /*
- * The queue is empty. Stop processing here
- * and drop the tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
- break;
- }
-
- /*
- * Let's associate this tx status with the first
- * matching frame.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800pci_txdone_find_entry)) {
- /*
- * We cannot match the tx status to any frame, so just
- * use the first one.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800pci_txdone_match_first)) {
- rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
- qid);
- break;
- }
- }
-
- /*
- * Release all frames with a valid tx status.
- */
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, NULL,
- rt2800pci_txdone_release_entries);
-
- if (--max_tx_done == 0)
- break;
- }
-
- return !max_tx_done;
-}
-
-static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_field32 irq_field)
-{
- u32 reg;
-
- /*
- * Enable a single interrupt. The interrupt mask register
- * access needs locking.
- */
- spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, irq_field, 1);
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock_irq(&rt2x00dev->irqmask_lock);
-}
-
-static void rt2800pci_txstatus_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2800pci_txdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
- /*
- * No need to enable the tx status interrupt here as we always
- * leave it enabled to minimize the possibility of a tx status
- * register overflow. See comment in interrupt handler.
- */
-}
-
-static void rt2800pci_pretbtt_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- rt2x00lib_pretbtt(rt2x00dev);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
-}
-
-static void rt2800pci_tbtt_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
- u32 reg;
-
- rt2x00lib_beacondone(rt2x00dev);
-
- if (rt2x00dev->intf_ap_count) {
- /*
- * The rt2800pci hardware tbtt timer is off by 1us per tbtt
- * causing beacon skew and as a result causing problems with
- * some powersaving clients over time. Shorten the beacon
- * interval every 64 beacons by 64us to mitigate this effect.
- */
- if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
- (rt2x00dev->beacon_int * 16) - 1);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
- (rt2x00dev->beacon_int * 16));
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
- drv_data->tbtt_tick++;
- drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
- }
-
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
-}
-
-static void rt2800pci_rxdone_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2x00mmio_rxdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
-}
-
-static void rt2800pci_autowake_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- rt2800pci_wakeup(rt2x00dev);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
-}
-
-static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
-{
- u32 status;
- int i;
-
- /*
- * The TX_FIFO_STATUS interrupt needs special care. We should
- * read TX_STA_FIFO but we should do it immediately as otherwise
- * the register can overflow and we would lose status reports.
- *
- * Hence, read the TX_STA_FIFO register and copy all tx status
- * reports into a kernel FIFO which is handled in the txstatus
- * tasklet. We use a tasklet to process the tx status reports
- * because we can schedule the tasklet multiple times (when the
- * interrupt fires again during tx status processing).
- *
- * Furthermore we don't disable the TX_FIFO_STATUS
- * interrupt here but leave it enabled so that the TX_STA_FIFO
- * can also be read while the tx status tasklet gets executed.
- *
- * Since we have only one producer and one consumer we don't
- * need to lock the kfifo.
- */
- for (i = 0; i < rt2x00dev->tx->limit; i++) {
- rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
-
- if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
- break;
-
- if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
- rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
- break;
- }
- }
-
- /* Schedule the tasklet for processing the tx status. */
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-}
-
-static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
-{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg, mask;
-
- /* Read status and ACK all interrupts */
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
- rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-
- if (!reg)
- return IRQ_NONE;
-
- if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- return IRQ_HANDLED;
-
- /*
- * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
- * for interrupts and interrupt masks we can just use the value of
- * INT_SOURCE_CSR to create the interrupt mask.
- */
- mask = ~reg;
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
- rt2800pci_txstatus_interrupt(rt2x00dev);
- /*
- * Never disable the TX_FIFO_STATUS interrupt.
- */
- rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- }
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
- tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
- tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
- tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
- tasklet_schedule(&rt2x00dev->autowake_tasklet);
-
- /*
- * Disable all interrupts for which a tasklet was scheduled right now,
- * the tasklet will reenable the appropriate interrupts.
- */
- spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- reg &= mask;
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock(&rt2x00dev->irqmask_lock);
-
- return IRQ_HANDLED;
-}
-
-/*
* Device probe functions.
*/
static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (rt2x00_is_soc(rt2x00dev))
- retval = rt2800pci_read_eeprom_soc(rt2x00dev);
- else if (rt2800pci_efuse_detect(rt2x00dev))
+ if (rt2800pci_efuse_detect(rt2x00dev))
retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
else
retval = rt2800pci_read_eeprom_pci(rt2x00dev);
@@ -1145,25 +337,25 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
.read_eeprom = rt2800pci_read_eeprom,
.hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
.drv_write_firmware = rt2800pci_write_firmware,
- .drv_init_registers = rt2800pci_init_registers,
- .drv_get_txwi = rt2800pci_get_txwi,
+ .drv_init_registers = rt2800mmio_init_registers,
+ .drv_get_txwi = rt2800mmio_get_txwi,
};
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
- .irq_handler = rt2800pci_interrupt,
- .txstatus_tasklet = rt2800pci_txstatus_tasklet,
- .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
- .tbtt_tasklet = rt2800pci_tbtt_tasklet,
- .rxdone_tasklet = rt2800pci_rxdone_tasklet,
- .autowake_tasklet = rt2800pci_autowake_tasklet,
+ .irq_handler = rt2800mmio_interrupt,
+ .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
+ .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
+ .autowake_tasklet = rt2800mmio_autowake_tasklet,
.probe_hw = rt2800_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
.load_firmware = rt2800_load_firmware,
.initialize = rt2x00mmio_initialize,
.uninitialize = rt2x00mmio_uninitialize,
- .get_entry_state = rt2800pci_get_entry_state,
- .clear_entry = rt2800pci_clear_entry,
+ .get_entry_state = rt2800mmio_get_entry_state,
+ .clear_entry = rt2800mmio_clear_entry,
.set_device_state = rt2800pci_set_device_state,
.rfkill_poll = rt2800_rfkill_poll,
.link_stats = rt2800_link_stats,
@@ -1171,15 +363,15 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
- .start_queue = rt2800pci_start_queue,
- .kick_queue = rt2800pci_kick_queue,
- .stop_queue = rt2800pci_stop_queue,
+ .start_queue = rt2800mmio_start_queue,
+ .kick_queue = rt2800mmio_kick_queue,
+ .stop_queue = rt2800mmio_stop_queue,
.flush_queue = rt2x00mmio_flush_queue,
- .write_tx_desc = rt2800pci_write_tx_desc,
+ .write_tx_desc = rt2800mmio_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
.clear_beacon = rt2800_clear_beacon,
- .fill_rxdone = rt2800pci_fill_rxdone,
+ .fill_rxdone = rt2800mmio_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
.config_filter = rt2800_config_filter,
@@ -1191,49 +383,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.sta_remove = rt2800_sta_remove,
};
-static void rt2800pci_queue_init(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- unsigned short txwi_size, rxwi_size;
-
- rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
-
- switch (queue->qid) {
- case QID_RX:
- queue->limit = 128;
- queue->data_size = AGGREGATION_SIZE;
- queue->desc_size = RXD_DESC_SIZE;
- queue->winfo_size = rxwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_AC_VO:
- case QID_AC_VI:
- case QID_AC_BE:
- case QID_AC_BK:
- queue->limit = 64;
- queue->data_size = AGGREGATION_SIZE;
- queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = txwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_BEACON:
- queue->limit = 8;
- queue->data_size = 0; /* No DMA required for beacons */
- queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = txwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_ATIM:
- /* fallthrough */
- default:
- BUG();
- break;
- }
-}
-
static const struct rt2x00_ops rt2800pci_ops = {
.name = KBUILD_MODNAME,
.drv_data_size = sizeof(struct rt2800_drv_data),
@@ -1241,7 +390,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .queue_init = rt2800pci_queue_init,
+ .queue_init = rt2800mmio_queue_init,
.lib = &rt2800pci_rt2x00_ops,
.drv = &rt2800pci_rt2800_ops,
.hw = &rt2800pci_mac80211_ops,
@@ -1253,7 +402,6 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-#ifdef CONFIG_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601) },
{ PCI_DEVICE(0x1814, 0x0681) },
@@ -1298,38 +446,15 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
#endif
{ 0, }
};
-#endif /* CONFIG_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
-#ifdef CONFIG_PCI
MODULE_FIRMWARE(FIRMWARE_RT2860);
MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
-#endif /* CONFIG_PCI */
MODULE_LICENSE("GPL");
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800soc_probe(struct platform_device *pdev)
-{
- return rt2x00soc_probe(pdev, &rt2800pci_ops);
-}
-
-static struct platform_driver rt2800soc_driver = {
- .driver = {
- .name = "rt2800_wmac",
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
- .probe = rt2800soc_probe,
- .remove = rt2x00soc_remove,
- .suspend = rt2x00soc_suspend,
- .resume = rt2x00soc_resume,
-};
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
static int rt2800pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -1344,39 +469,5 @@ static struct pci_driver rt2800pci_driver = {
.suspend = rt2x00pci_suspend,
.resume = rt2x00pci_resume,
};
-#endif /* CONFIG_PCI */
-
-static int __init rt2800pci_init(void)
-{
- int ret = 0;
-
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- ret = platform_driver_register(&rt2800soc_driver);
- if (ret)
- return ret;
-#endif
-#ifdef CONFIG_PCI
- ret = pci_register_driver(&rt2800pci_driver);
- if (ret) {
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- platform_driver_unregister(&rt2800soc_driver);
-#endif
- return ret;
- }
-#endif
-
- return ret;
-}
-
-static void __exit rt2800pci_exit(void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver(&rt2800pci_driver);
-#endif
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- platform_driver_unregister(&rt2800soc_driver);
-#endif
-}
-module_init(rt2800pci_init);
-module_exit(rt2800pci_exit);
+module_pci_driver(rt2800pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index ab22a087c50..a81c9ee281c 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,107 +35,10 @@
#define RT2800PCI_H
/*
- * Queue register offset macros
- */
-#define TX_QUEUE_REG_OFFSET 0x10
-#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-
-/*
* 8051 firmware image.
*/
#define FIRMWARE_RT2860 "rt2860.bin"
#define FIRMWARE_RT3290 "rt3290.bin"
#define FIRMWARE_IMAGE_BASE 0x2000
-/*
- * DMA descriptor defines.
- */
-#define TXD_DESC_SIZE (4 * sizeof(__le32))
-#define RXD_DESC_SIZE (4 * sizeof(__le32))
-
-/*
- * TX descriptor format for TX, PRIO and Beacon Ring.
- */
-
-/*
- * Word0
- */
-#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
-#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
-#define TXD_W1_BURST FIELD32(0x00008000)
-#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
-#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
-#define TXD_W1_DMA_DONE FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
-
-/*
- * Word3
- * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
- * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
- * 0:MGMT, 1:HCCA 2:EDCA
- */
-#define TXD_W3_WIV FIELD32(0x01000000)
-#define TXD_W3_QSEL FIELD32(0x06000000)
-#define TXD_W3_TCO FIELD32(0x20000000)
-#define TXD_W3_UCO FIELD32(0x40000000)
-#define TXD_W3_ICO FIELD32(0x80000000)
-
-/*
- * RX descriptor format for RX Ring.
- */
-
-/*
- * Word0
- */
-#define RXD_W0_SDP0 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define RXD_W1_SDL1 FIELD32(0x00003fff)
-#define RXD_W1_SDL0 FIELD32(0x3fff0000)
-#define RXD_W1_LS0 FIELD32(0x40000000)
-#define RXD_W1_DMA_DONE FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define RXD_W2_SDP1 FIELD32(0xffffffff)
-
-/*
- * Word3
- * AMSDU: RX with 802.3 header, not 802.11 header.
- * DECRYPTED: This frame is being decrypted.
- */
-#define RXD_W3_BA FIELD32(0x00000001)
-#define RXD_W3_DATA FIELD32(0x00000002)
-#define RXD_W3_NULLDATA FIELD32(0x00000004)
-#define RXD_W3_FRAG FIELD32(0x00000008)
-#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXD_W3_MULTICAST FIELD32(0x00000020)
-#define RXD_W3_BROADCAST FIELD32(0x00000040)
-#define RXD_W3_MY_BSS FIELD32(0x00000080)
-#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
-#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
-#define RXD_W3_AMSDU FIELD32(0x00000800)
-#define RXD_W3_HTC FIELD32(0x00001000)
-#define RXD_W3_RSSI FIELD32(0x00002000)
-#define RXD_W3_L2PAD FIELD32(0x00004000)
-#define RXD_W3_AMPDU FIELD32(0x00008000)
-#define RXD_W3_DECRYPTED FIELD32(0x00010000)
-#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
-#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
-
#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
new file mode 100644
index 00000000000..1359227ca41
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -0,0 +1,263 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800soc
+ * Abstract: rt2800 WiSoC specific routines.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2x00soc.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/* Allow hardware encryption to be disabled. */
+static bool modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+ return modparam_nohwcrypt;
+}
+
+static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_disable_radio(rt2x00dev);
+ rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+}
+
+static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ int retval = 0;
+
+ switch (state) {
+ case STATE_RADIO_ON:
+ retval = rt2800mmio_enable_radio(rt2x00dev);
+ break;
+
+ case STATE_RADIO_OFF:
+ rt2800soc_disable_radio(rt2x00dev);
+ break;
+
+ case STATE_RADIO_IRQ_ON:
+ case STATE_RADIO_IRQ_OFF:
+ rt2800mmio_toggle_irq(rt2x00dev, state);
+ break;
+
+ case STATE_DEEP_SLEEP:
+ case STATE_SLEEP:
+ case STATE_STANDBY:
+ case STATE_AWAKE:
+ /* These states are not supported, but don't report an error */
+ retval = 0;
+ break;
+
+ default:
+ retval = -ENOTSUPP;
+ break;
+ }
+
+ if (unlikely(retval))
+ rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
+ state, retval);
+
+ return retval;
+}
+
+static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+ void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
+
+ if (!base_addr)
+ return -ENOMEM;
+
+ memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+
+ iounmap(base_addr);
+ return 0;
+}
+
+/* Firmware functions */
+static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .tx = rt2x00mac_tx,
+ .start = rt2x00mac_start,
+ .stop = rt2x00mac_stop,
+ .add_interface = rt2x00mac_add_interface,
+ .remove_interface = rt2x00mac_remove_interface,
+ .config = rt2x00mac_config,
+ .configure_filter = rt2x00mac_configure_filter,
+ .set_key = rt2x00mac_set_key,
+ .sw_scan_start = rt2x00mac_sw_scan_start,
+ .sw_scan_complete = rt2x00mac_sw_scan_complete,
+ .get_stats = rt2x00mac_get_stats,
+ .get_tkip_seq = rt2800_get_tkip_seq,
+ .set_rts_threshold = rt2800_set_rts_threshold,
+ .sta_add = rt2x00mac_sta_add,
+ .sta_remove = rt2x00mac_sta_remove,
+ .bss_info_changed = rt2x00mac_bss_info_changed,
+ .conf_tx = rt2800_conf_tx,
+ .get_tsf = rt2800_get_tsf,
+ .rfkill_poll = rt2x00mac_rfkill_poll,
+ .ampdu_action = rt2800_ampdu_action,
+ .flush = rt2x00mac_flush,
+ .get_survey = rt2800_get_survey,
+ .get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
+};
+
+static const struct rt2800_ops rt2800soc_rt2800_ops = {
+ .register_read = rt2x00mmio_register_read,
+ .register_read_lock = rt2x00mmio_register_read, /* same for SoCs */
+ .register_write = rt2x00mmio_register_write,
+ .register_write_lock = rt2x00mmio_register_write, /* same for SoCs */
+ .register_multiread = rt2x00mmio_register_multiread,
+ .register_multiwrite = rt2x00mmio_register_multiwrite,
+ .regbusy_read = rt2x00mmio_regbusy_read,
+ .read_eeprom = rt2800soc_read_eeprom,
+ .hwcrypt_disabled = rt2800soc_hwcrypt_disabled,
+ .drv_write_firmware = rt2800soc_write_firmware,
+ .drv_init_registers = rt2800mmio_init_registers,
+ .drv_get_txwi = rt2800mmio_get_txwi,
+};
+
+static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
+ .irq_handler = rt2800mmio_interrupt,
+ .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
+ .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
+ .autowake_tasklet = rt2800mmio_autowake_tasklet,
+ .probe_hw = rt2800_probe_hw,
+ .get_firmware_name = rt2800soc_get_firmware_name,
+ .check_firmware = rt2800soc_check_firmware,
+ .load_firmware = rt2800soc_load_firmware,
+ .initialize = rt2x00mmio_initialize,
+ .uninitialize = rt2x00mmio_uninitialize,
+ .get_entry_state = rt2800mmio_get_entry_state,
+ .clear_entry = rt2800mmio_clear_entry,
+ .set_device_state = rt2800soc_set_device_state,
+ .rfkill_poll = rt2800_rfkill_poll,
+ .link_stats = rt2800_link_stats,
+ .reset_tuner = rt2800_reset_tuner,
+ .link_tuner = rt2800_link_tuner,
+ .gain_calibration = rt2800_gain_calibration,
+ .vco_calibration = rt2800_vco_calibration,
+ .start_queue = rt2800mmio_start_queue,
+ .kick_queue = rt2800mmio_kick_queue,
+ .stop_queue = rt2800mmio_stop_queue,
+ .flush_queue = rt2x00mmio_flush_queue,
+ .write_tx_desc = rt2800mmio_write_tx_desc,
+ .write_tx_data = rt2800_write_tx_data,
+ .write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
+ .fill_rxdone = rt2800mmio_fill_rxdone,
+ .config_shared_key = rt2800_config_shared_key,
+ .config_pairwise_key = rt2800_config_pairwise_key,
+ .config_filter = rt2800_config_filter,
+ .config_intf = rt2800_config_intf,
+ .config_erp = rt2800_config_erp,
+ .config_ant = rt2800_config_ant,
+ .config = rt2800_config,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
+};
+
+static const struct rt2x00_ops rt2800soc_ops = {
+ .name = KBUILD_MODNAME,
+ .drv_data_size = sizeof(struct rt2800_drv_data),
+ .max_ap_intf = 8,
+ .eeprom_size = EEPROM_SIZE,
+ .rf_size = RF_SIZE,
+ .tx_queues = NUM_TX_QUEUES,
+ .queue_init = rt2800mmio_queue_init,
+ .lib = &rt2800soc_rt2x00_ops,
+ .drv = &rt2800soc_rt2800_ops,
+ .hw = &rt2800soc_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+ .debugfs = &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+ return rt2x00soc_probe(pdev, &rt2800soc_ops);
+}
+
+static struct platform_driver rt2800soc_driver = {
+ .driver = {
+ .name = "rt2800_wmac",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+ .probe = rt2800soc_probe,
+ .remove = rt2x00soc_remove,
+ .suspend = rt2x00soc_suspend,
+ .resume = rt2x00soc_resume,
+};
+
+module_platform_driver(rt2800soc_driver);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96961b9a395..a81ceb61d74 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
return false;
}
+#define TXSTATUS_READ_INTERVAL 1000000
+
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
int urb_status, u32 tx_status)
{
@@ -162,7 +164,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
if (valid) {
- if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status))
+ if (!kfifo_put(&rt2x00dev->txstatus_fifo, tx_status))
rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n");
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
if (rt2800usb_txstatus_pending(rt2x00dev)) {
- /* Read register after 250 us */
- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+ /* Read register after 1 ms */
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ ktime_set(0, TXSTATUS_READ_INTERVAL),
HRTIMER_MODE_REL);
return false;
}
@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
return;
- /* Read TX_STA_FIFO register after 500 us */
- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+ /* Read TX_STA_FIFO register after 2 ms */
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
HRTIMER_MODE_REL);
}
@@ -1176,6 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Linksys */
{ USB_DEVICE(0x13b1, 0x002f) },
{ USB_DEVICE(0x1737, 0x0079) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0170) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3572) },
/* Sitecom */
@@ -1199,6 +1205,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x1103) },
/* Cameo */
{ USB_DEVICE(0x148f, 0xf301) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c1f) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7733) },
/* Hawking */
@@ -1212,6 +1220,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0789, 0x016b) },
/* NETGEAR */
{ USB_DEVICE(0x0846, 0x9012) },
+ { USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
{ USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1229,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0067) },
{ USB_DEVICE(0x0df6, 0x006a) },
+ { USB_DEVICE(0x0df6, 0x006e) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3421) },
#endif
@@ -1236,6 +1246,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1c) },
{ USB_DEVICE(0x2001, 0x3c1d) },
{ USB_DEVICE(0x2001, 0x3c1e) },
+ { USB_DEVICE(0x2001, 0x3c20) },
+ { USB_DEVICE(0x2001, 0x3c22) },
+ { USB_DEVICE(0x2001, 0x3c23) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
{ USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1271,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x043e, 0x7a32) },
/* AVM GmbH */
{ USB_DEVICE(0x057c, 0x8501) },
- /* D-Link DWA-160-B2 */
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x0241) },
+ /* D-Link */
{ USB_DEVICE(0x2001, 0x3c1a) },
+ { USB_DEVICE(0x2001, 0x3c21) },
/* Proware */
{ USB_DEVICE(0x043e, 0x7a13) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5572) },
+ /* TRENDnet */
+ { USB_DEVICE(0x20f4, 0x724a) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1333,6 +1351,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24) },
+ { USB_DEVICE(0x2019, 0xab29) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259) },
/* RadioShack */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index fe4c572db52..e4ba2ce0f21 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
+#include <linux/average.h>
#include <net/mac80211.h>
@@ -138,17 +139,6 @@
#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-/*
- * Structure for average calculation
- * The avg field contains the actual average value,
- * but avg_weight is internally used during calculations
- * to prevent rounding errors.
- */
-struct avg_val {
- int avg;
- int avg_weight;
-};
-
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@ struct link_ant {
* Similar to the avg_rssi in the link_qual structure
* this value is updated by using the walking average.
*/
- struct avg_val rssi_ant;
+ struct ewma rssi_ant;
};
/*
@@ -326,7 +316,7 @@ struct link {
/*
* Currently active average RSSI value
*/
- struct avg_val avg_rssi;
+ struct ewma avg_rssi;
/*
* Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
}
+/* Helpers for capability flags */
+
+static inline bool
+rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
+ enum rt2x00_capability_flags cap_flag)
+{
+ return test_bit(cap_flag, &rt2x00dev->cap_flags);
+}
+
+static inline bool
+rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
+}
+
+static inline bool
+rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
+}
+
+static inline bool
+rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
+}
+
+static inline bool
+rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
+}
+
+static inline bool
+rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
+}
+
+static inline bool
+rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
+}
+
+static inline bool
+rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
+}
+
+static inline bool
+rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
+}
+
+static inline bool
+rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
+}
+
+static inline bool
+rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
+}
+
+static inline bool
+rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
+}
+
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1ca4c7ffc18..3db0d99d9da 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
return;
__set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
struct ieee80211_key_conf *key = tx_info->control.hw_key;
unsigned int overhead = 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
return overhead;
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fe7a7f63a9e..7f7baae5ae0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -750,7 +750,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf, &rt2x00debug_fop_queue_stats);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
intf->crypto_stats_entry =
debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
intf, &rt2x00debug_fop_crypto_stats);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 712eea9d398..9dd92a70044 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,7 +88,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00queue_start_queues(rt2x00dev);
rt2x00link_start_tuner(rt2x00dev);
rt2x00link_start_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_start_vcocal(rt2x00dev);
/*
@@ -113,7 +113,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
* Stop all queues
*/
rt2x00link_stop_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_stop_vcocal(rt2x00dev);
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
@@ -181,6 +181,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ieee80211_tx_control control = {};
struct rt2x00_dev *rt2x00dev = data;
struct sk_buff *skb;
@@ -195,7 +196,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
*/
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
while (skb) {
- rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
+ rt2x00mac_tx(rt2x00dev->hw, &control, skb);
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
}
}
@@ -234,7 +235,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
* here as they will fetch the next beacon directly prior to
* transmission.
*/
- if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
return;
/* fetch next beacon */
@@ -358,7 +359,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* mac80211 will expect the same data to be present it the
* frame as it was passed to us.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
rt2x00crypto_tx_insert_iv(entry->skb, header_length);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a0935987fa3..7f40ab8e1bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
* @local: frame is not from mac80211
*/
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
- bool local);
+ struct ieee80211_sta *sta, bool local);
/**
* rt2x00queue_update_beacon - Send new beacon from mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 8368aab86f2..c2b3b662918 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -35,50 +35,28 @@
*/
#define DEFAULT_RSSI -128
-/*
- * Helper struct and macro to work with moving/walking averages.
- * When adding a value to the average value the following calculation
- * is needed:
- *
- * avg_rssi = ((avg_rssi * 7) + rssi) / 8;
- *
- * The advantage of this approach is that we only need 1 variable
- * to store the average in (No need for a count and a total).
- * But more importantly, normal average values will over time
- * move less and less towards newly added values this results
- * that with link tuning, the device can have a very good RSSI
- * for a few minutes but when the device is moved away from the AP
- * the average will not decrease fast enough to compensate.
- * The walking average compensates this and will move towards
- * the new values correctly allowing a effective link tuning,
- * the speed of the average moving towards other values depends
- * on the value for the number of samples. The higher the number
- * of samples, the slower the average will move.
- * We use two variables to keep track of the average value to
- * compensate for the rounding errors. This can be a significant
- * error (>5dBm) if the factor is too low.
- */
-#define AVG_SAMPLES 8
-#define AVG_FACTOR 1000
-#define MOVING_AVERAGE(__avg, __val) \
-({ \
- struct avg_val __new; \
- __new.avg_weight = \
- (__avg).avg_weight ? \
- ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
- ((__val) * (AVG_FACTOR))) / \
- (AVG_SAMPLES)) : \
- ((__val) * (AVG_FACTOR)); \
- __new.avg = __new.avg_weight / (AVG_FACTOR); \
- __new; \
-})
+/* Constants for EWMA calculations. */
+#define RT2X00_EWMA_FACTOR 1024
+#define RT2X00_EWMA_WEIGHT 8
+
+static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+{
+ unsigned long avg;
+
+ avg = ewma_read(ewma);
+ if (avg)
+ return -avg;
+
+ return DEFAULT_RSSI;
+}
static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
{
struct link_ant *ant = &rt2x00dev->link.ant;
- if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success)
- return ant->rssi_ant.avg;
+ if (rt2x00dev->link.qual.rx_success)
+ return rt2x00link_get_avg_rssi(&ant->rssi_ant);
+
return DEFAULT_RSSI;
}
@@ -100,8 +78,8 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
{
- rt2x00dev->link.ant.rssi_ant.avg = 0;
- rt2x00dev->link.ant.rssi_ant.avg_weight = 0;
+ ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
}
static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
/*
* Update global RSSI
*/
- link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi);
+ ewma_add(&link->avg_rssi, -rxdesc->rssi);
/*
* Update antenna RSSI
*/
- ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
+ ewma_add(&ant->rssi_ant, -rxdesc->rssi);
}
void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
*/
rt2x00dev->link.count = 0;
memset(qual, 0, sizeof(*qual));
+ ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
/*
* Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@ static void rt2x00link_tuner(struct work_struct *work)
* collect the RSSI data we could use this. Otherwise we
* must fallback to the default RSSI value.
*/
- if (!link->avg_rssi.avg || !qual->rx_success)
+ if (!qual->rx_success)
qual->rssi = DEFAULT_RSSI;
else
- qual->rssi = link->avg_rssi.avg;
+ qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
/*
* Check if link tuning is supported by the hardware, some hardware
* do not support link tuning at all, while other devices can disable
* the feature from the EEPROM.
*/
- if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_link_tuning(rt2x00dev))
rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
/*
@@ -513,7 +493,7 @@ static void rt2x00link_vcocal(struct work_struct *work)
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f883802f350..2183e797839 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
frag_skb->data, data_length, tx_info,
(struct ieee80211_rts *)(skb->data));
- retval = rt2x00queue_write_tx_frame(queue, skb, true);
+ retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
if (retval) {
dev_kfree_skb_any(skb);
rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
@@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
goto exit_fail;
}
- if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
+ if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
goto exit_fail;
/*
@@ -382,11 +382,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
* of different types, but has no a separate filter for PS Poll frames,
* FIF_CONTROL flag implies FIF_PSPOLL.
*/
- if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
*total_flags |= FIF_CONTROL | FIF_PSPOLL;
}
- if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
if (*total_flags & FIF_CONTROL)
*total_flags |= FIF_PSPOLL;
}
@@ -469,7 +469,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
return -EOPNOTSUPP;
/*
@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_flush_queue(queue, drop);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index dc49e525ae5..25da20e7e1f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -119,7 +119,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
rt2x00dev->ops = ops;
rt2x00dev->hw = hw;
rt2x00dev->irq = pci_dev->irq;
- rt2x00dev->name = pci_name(pci_dev);
+ rt2x00dev->name = ops->name;
if (pci_is_pcie(pci_dev))
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c8a33b6ee2..a5d38e8ad9e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -61,7 +61,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
head_size += 8;
tail_size += 8;
}
@@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
}
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
- bool local)
+ struct ieee80211_sta *sta, bool local)
{
struct ieee80211_tx_info *tx_info;
struct queue_entry *entry;
@@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* after that we are free to use the skb->cb array
* for our information.
*/
- rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
+ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
/*
* All information is retrieved from the skb->cb array,
@@ -1033,38 +1033,21 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
{
- bool started;
bool tx_queue =
(queue->qid == QID_AC_VO) ||
(queue->qid == QID_AC_VI) ||
(queue->qid == QID_AC_BE) ||
(queue->qid == QID_AC_BK);
- mutex_lock(&queue->status_lock);
/*
- * If the queue has been started, we must stop it temporarily
- * to prevent any new frames to be queued on the device. If
- * we are not dropping the pending frames, the queue must
- * only be stopped in the software and not the hardware,
- * otherwise the queue will never become empty on its own.
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
*/
- started = test_bit(QUEUE_STARTED, &queue->flags);
- if (started) {
- /*
- * Pause the queue
- */
- rt2x00queue_pause_queue(queue);
-
- /*
- * If we are not supposed to drop any pending
- * frames, this means we must force a start (=kick)
- * to the queue to make sure the hardware will
- * start transmitting.
- */
- if (!drop && tx_queue)
- queue->rt2x00dev->ops->lib->kick_queue(queue);
- }
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
/*
* Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
if (unlikely(!rt2x00queue_empty(queue)))
rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
queue->qid);
-
- /*
- * Restore the queue to the previous status
- */
- if (started)
- rt2x00queue_unpause_queue(queue);
-
- mutex_unlock(&queue->status_lock);
}
EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 88289873c0c..4e121627925 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -523,7 +523,9 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
queue->qid);
+ rt2x00queue_stop_queue(queue);
rt2x00queue_flush_queue(queue, true);
+ rt2x00queue_start_queue(queue);
}
static int rt2x00usb_dma_timeout(struct data_queue *queue)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 54d3ddfc988..a5b69cb4901 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -685,7 +685,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -813,10 +813,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else if (rt2x00_rf(rt2x00dev, RF2529)) {
- if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_double_antenna(rt2x00dev))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
} else {
low_bound = 0x20;
up_bound = 0x40;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -2578,7 +2578,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* eeprom word.
*/
if (rt2x00_rf(rt2x00dev, RF2529) &&
- !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
+ !rt2x00_has_cap_double_antenna(rt2x00dev)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
spec->num_channels = 14;
spec->channels = rf_vals_noseq;
} else {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1d3880e09a1..1baf9c896dc 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,8 +595,8 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
switch (ant->rx) {
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
- temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
- && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+ temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
+ (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
up_bound = 0x1c;
}
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x14;
up_bound += 0x10;
}
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
}
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
if (lna == 3 || lna == 2)
offset += 10;
} else {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index fc207b268e4..a91506b12a6 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8bb4a9a01a1..ff784072fb4 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -37,6 +37,7 @@
#include <linux/ip.h>
#include <linux/module.h>
+#include <linux/udp.h>
/*
*NOTICE!!!: This file will be very big, we should
@@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
if (!ieee80211_is_data(fc))
return false;
+ ip = (const struct iphdr *)(skb->data + mac_hdr_len +
+ SNAP_SIZE + PROTOC_TYPE_SIZE);
+ ether_type = be16_to_cpup((__be16 *)
+ (skb->data + mac_hdr_len + SNAP_SIZE));
- ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
- SNAP_SIZE + PROTOC_TYPE_SIZE);
- ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
- /* ether_type = ntohs(ether_type); */
-
- if (ETH_P_IP == ether_type) {
- if (IPPROTO_UDP == ip->protocol) {
- struct udphdr *udp = (struct udphdr *)((u8 *) ip +
- (ip->ihl << 2));
- if (((((u8 *) udp)[1] == 68) &&
- (((u8 *) udp)[3] == 67)) ||
- ((((u8 *) udp)[1] == 67) &&
- (((u8 *) udp)[3] == 68))) {
- /*
- * 68 : UDP BOOTP client
- * 67 : UDP BOOTP server
- */
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, "dhcp %s !!\n",
- is_tx ? "Tx" : "Rx");
-
- if (is_tx) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->
- works.lps_change_work);
- ppsc->last_delaylps_stamp_jiffies =
- jiffies;
- }
+ switch (ether_type) {
+ case ETH_P_IP: {
+ struct udphdr *udp;
+ u16 src;
+ u16 dst;
- return true;
- }
- }
- } else if (ETH_P_ARP == ether_type) {
- if (is_tx) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
- ppsc->last_delaylps_stamp_jiffies = jiffies;
- }
+ if (ip->protocol != IPPROTO_UDP)
+ return false;
+ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
+ src = be16_to_cpu(udp->source);
+ dst = be16_to_cpu(udp->dest);
+
+ /* If this case involves port 68 (UDP BOOTP client) connecting
+ * with port 67 (UDP BOOTP server), then return true so that
+ * the lowest speed is used.
+ */
+ if (!((src == 68 && dst == 67) || (src == 67 && dst == 68)))
+ return false;
- return true;
- } else if (ETH_P_PAE == ether_type) {
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "dhcp %s !!\n", is_tx ? "Tx" : "Rx");
+ break;
+ }
+ case ETH_P_ARP:
+ break;
+ case ETH_P_PAE:
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
-
- if (is_tx) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
- ppsc->last_delaylps_stamp_jiffies = jiffies;
- }
-
- return true;
- } else if (ETH_P_IPV6 == ether_type) {
- /* IPv6 */
- return true;
+ break;
+ case ETH_P_IPV6:
+ /* TODO: Is this right? */
+ return false;
+ default:
+ return false;
}
-
- return false;
+ if (is_tx) {
+ rtlpriv->enter_ps = false;
+ schedule_work(&rtlpriv->works.lps_change_work);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+ return true;
}
EXPORT_SYMBOL_GPL(rtl_is_special_data);
@@ -1613,6 +1602,35 @@ err_free:
}
EXPORT_SYMBOL(rtl_send_smps_action);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Unknown Scan Backup operation.\n");
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
+
/* There seem to be issues in mac80211 regarding when del ba frames can be
* received. As a work around, we make a fake del_ba if we receive a ba_req;
* however, rx_agg was opened to let mac80211 release some ba related
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 0e5fe0902da..0cd07420777 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
u8 *sa, u8 *bssid, u16 tid);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index 35e00086a52..0105e6c1901 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -41,12 +41,12 @@
#define CAM_CONFIG_USEDK 1
#define CAM_CONFIG_NO_USEDK 0
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id);
+ u32 ul_key_id);
void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 733b7ce7f0e..210ce7cd94d 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
mutex_lock(&rtlpriv->locks.conf_mutex);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
/*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mac->p2p = 0;
mac->vif = NULL;
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_LINKED;
mac->cnt_after_linked = 0;
mac->assoc_id = bss_conf->aid;
- memcpy(mac->bssid, bss_conf->bssid, 6);
+ memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
if (rtlpriv->cfg->ops->linked_set_reg)
rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
- memcpy(mac->bssid, bss_conf->bssid, 6);
+ memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
rcu_read_lock();
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 838a1ed3f19..2ffc7298f68 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -262,9 +262,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
sizeof(u8), GFP_ATOMIC);
if (!efuse_tbl)
return;
- efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+ efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
if (!efuse_word)
- goto done;
+ goto out;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16),
GFP_ATOMIC);
@@ -378,6 +378,7 @@ done:
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
kfree(efuse_word[i]);
kfree(efuse_word);
+out:
kfree(efuse_tbl);
}
@@ -1203,20 +1204,18 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
- int continual = true;
u16 efuse_addr = 0;
u8 hworden;
u8 efuse_data, word_cnts;
- while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
- && (efuse_addr < EFUSE_MAX_SIZE)) {
- if (efuse_data != 0xFF) {
- hworden = efuse_data & 0x0F;
- word_cnts = efuse_calculate_word_cnts(hworden);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- continual = false;
- }
+ while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ efuse_addr < EFUSE_MAX_SIZE) {
+ if (efuse_data == 0xFF)
+ break;
+
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
}
return efuse_addr;
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 395a326acfb..1663b3afd41 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -104,20 +104,19 @@ struct efuse_priv {
u8 tx_power_g[14];
};
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
- u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 703f839af6c..0f494444bcd 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -736,7 +736,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@ fail2:
fail1:
if (hw)
ieee80211_free_hw(hw);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
@@ -2064,8 +2062,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtl_pci_disable_aspm(hw);
- pci_set_drvdata(pdev, NULL);
-
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_pci_disconnect);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index b68cae3024f..e06971be7df 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
} else {
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index e655c047322..d67f9c731cc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -1136,34 +1136,6 @@ void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
&bw40_pwr[0], channel);
}
-void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
index f1acd6d27e4..89f0f1ef146 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -200,37 +200,35 @@ enum _ANT_DIV_TYPE {
CGCS_RX_SW_ANTDIV = 0x05,
};
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index c254693a1e6..347af1e4f43 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -244,7 +245,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
.set_bw_mode = rtl88e_phy_set_bw_mode,
.switch_channel = rtl88e_phy_sw_chnl,
.dm_watchdog = rtl88e_dm_watchdog,
- .scan_operation_backup = rtl88e_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl88e_phy_set_rf_power_state,
.led_control = rtl88ee_led_control,
.set_desc = rtl88ee_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 68685a89825..aece6c9cccf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -478,7 +478,6 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index d2d57a27a7c..e9caa5d4cff 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -541,29 +541,6 @@ EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
- u8 h2c_parameter[3] = { 0 };
-
- return;
-
- if (tmpentry_max_pwdb != 0) {
- rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
- } else {
- rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
- }
-
- if (tmpentry_min_pwdb != 0xff) {
- rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
- } else {
- rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
- }
-
- h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0;
-
- rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
s8 cck_index = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
- s8 txpwr_level[2] = {0, 0};
+ s8 txpwr_level[3] = {0, 0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 246e5352f2e..0c0e78263a6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -592,36 +592,6 @@ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
-
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index cec10d69649..e79dabe9ba1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -205,8 +203,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 3cfa1bb0f47..fa24de43ce7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -152,8 +152,6 @@ enum version_8192c {
#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
- ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
#define IS_CHIP_VENDOR_UMC(version) \
((version & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index d5e3b704f93..94486cca400 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -188,36 +186,29 @@ struct tx_power_struct {
};
bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
+ enum radio_path rfpath);
void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
+ enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
- u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -225,28 +216,25 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
- u32 rfpath);
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+ u32 rfpath);
bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+ enum rf_pwrstate rfpwr_state);
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
void rtl92c_phy_set_io(struct ieee80211_hw *hw);
void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 offset);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
+ enum radio_path rfpath, u32 offset);
u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
+ enum radio_path rfpath, u32 offset, u32 data);
void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
+ enum radio_path rfpath, u32 offset,
+ u32 data);
void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask,
- u32 data);
+ u32 regaddr, u32 bitmask, u32 data);
bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index bd4aef74c05..8922ecb47ad 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -560,7 +560,6 @@
#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
#define EEPROM_DEFAULT_HT20_DIFF 2
-#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
@@ -639,17 +638,8 @@
#define EEPROM_TXPWR_GROUP 0x6F
-#define EEPROM_TSSI_A 0x76
-#define EEPROM_TSSI_B 0x77
-#define EEPROM_THERMAL_METER 0x78
-
#define EEPROM_CHANNELPLAN 0x75
-#define RF_OPTION1 0x79
-#define RF_OPTION2 0x7A
-#define RF_OPTION3 0x7B
-#define RF_OPTION4 0x7C
-
#define STOPBECON BIT(6)
#define STOPHIGHT BIT(5)
#define STOPMGT BIT(4)
@@ -689,13 +679,6 @@
#define RSV_CTRL 0x001C
#define RD_CTRL 0x0524
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
#define REG_USB_VID 0xFE60
#define REG_USB_PID 0xFE62
#define REG_USB_OPTIONAL 0xFE64
@@ -1196,9 +1179,6 @@
#define POLLING_LLT_THRESHOLD 20
#define POLLING_READY_TIMEOUT_COUNT 1000
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
#define EPROM_CMD_CONFIG 0x3
#define EPROM_CMD_LOAD 1
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 6c8d56efcea..d8fe68b389d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -34,11 +34,10 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 14203561b6e..b790320d203 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -219,7 +220,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92c_phy_set_rf_power_state,
.led_control = rtl92ce_led_control,
.set_desc = rtl92ce_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 6ad23b413eb..52abf0a862f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -420,7 +420,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->recvsignalpower + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index da4f587199e..e26312fb435 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -32,6 +32,7 @@
#include "../usb.h"
#include "../ps.h"
#include "../cam.h"
+#include "../stats.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -738,16 +739,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
return ret_val;
}
-static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
long currsig)
{
@@ -778,7 +769,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
- struct rx_desc_92c *pdesc,
+ struct rx_desc_92c *p_desc,
struct rx_fwinfo_92c *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
@@ -793,11 +784,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
u32 rssi, total_rssi = 0;
bool in_powersavemode = false;
bool is_cck_rate;
+ u8 *pdesc = (u8 *)p_desc;
- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+ is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc);
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
- pstats->is_cck = is_cck_rate;
pstats->packet_beacon = packet_beacon;
pstats->is_cck = is_cck_rate;
pstats->RX_SIGQ[0] = -1;
@@ -913,180 +904,6 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
(hw, total_rssi /= rf_rx_num));
}
-static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
- u32 last_rssi, tmpval;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- rtlpriv->stats.rssi_calculate_cnt++;
- if (rtlpriv->stats.ui_rssi.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi =
- rtlpriv->stats.ui_rssi.elements[rtlpriv->
- stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
- rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
- index++] = pstats->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength =
- _rtl92c_translate_todbm(hw, (u8) tmpval);
- pstats->rssi = rtlpriv->stats.signal_strength;
- }
- if (!pstats->is_cck && pstats->packet_toself) {
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
- continue;
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstats->rx_mimo_signalstrength[rfpath];
- }
- if (pstats->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
-
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] +
- 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
-}
-
-static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
- if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power =
- (rtlpriv->stats.recv_signal_power * 5 +
- pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undec_sm_pwdb = 0;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- return;
- } else {
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- }
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb += 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92c_update_rxsignalstatistics(hw, pstats);
- }
-}
-
-static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm = 0, n_stream, tmpval;
-
- if (pstats->signalquality != 0) {
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (rtlpriv->stats.LINK_Q.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.LINK_Q.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm =
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index];
- rtlpriv->stats.LINK_Q.total_val -=
- last_evm;
- }
- rtlpriv->stats.LINK_Q.total_val +=
- pstats->signalquality;
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index++] =
- pstats->signalquality;
- if (rtlpriv->stats.LINK_Q.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.LINK_Q.index = 0;
- tmpval = rtlpriv->stats.LINK_Q.total_val /
- rtlpriv->stats.LINK_Q.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- for (n_stream = 0; n_stream < 2;
- n_stream++) {
- if (pstats->RX_SIGQ[n_stream] != -1) {
- if (!rtlpriv->stats.RX_EVM[n_stream]) {
- rtlpriv->stats.RX_EVM[n_stream]
- = pstats->RX_SIGQ[n_stream];
- }
- rtlpriv->stats.RX_EVM[n_stream] =
- ((rtlpriv->stats.RX_EVM
- [n_stream] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->RX_SIGQ
- [n_stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
- } else {
- ;
- }
-}
-
-static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
-{
- if (!pcurrent_stats->packet_matchbssid &&
- !pcurrent_stats->packet_beacon)
- return;
- _rtl92c_process_ui_rssi(hw, pcurrent_stats);
- _rtl92c_process_pwdb(hw, pcurrent_stats);
- _rtl92c_process_LINK_Q(hw, pcurrent_stats);
-}
-
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself,
packet_beacon);
- _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+ rtl_process_phyinfo(hw, tmp_buf, pstats);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 090fd33a158..11b439d6b67 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -34,15 +34,14 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
+ enum radio_path rfpath);
void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel);
void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 2bd59852621..9936de716ad 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -31,6 +31,7 @@
#include "../core.h"
#include "../usb.h"
#include "../efuse.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -117,7 +118,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92cu_phy_set_rf_power_state,
.led_control = rtl92cu_led_control,
.enable_hw_sec = rtl92cu_enable_hw_security_config,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 5a060e537fb..1bc21ccfa71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -303,10 +303,10 @@ out:
bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
- u8 *p_desc, struct sk_buff *skb)
+ u8 *pdesc, struct sk_buff *skb)
{
struct rx_fwinfo_92c *p_drvinfo;
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc;
u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
@@ -345,12 +345,11 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
stats->rx_bufshift);
- rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
- rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
+ rx_status->signal = stats->recvsignalpower + 10;
return true;
}
@@ -365,7 +364,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
u8 *rxdesc;
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
struct rx_fwinfo_92c *p_drvinfo;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index f700f7a614b..7908e1c8581 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -840,9 +840,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[2];
+ u8 ofdm_index[3];
s8 cck_index = 0;
- u8 ofdm_index_old[2] = {0, 0};
+ u8 ofdm_index_old[3] = {0, 0, 0};
s8 cck_index_old = 0;
u8 index;
int i;
@@ -1118,6 +1118,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
+ if (cck_index >= CCK_TABLE_SIZE)
+ cck_index = CCK_TABLE_SIZE - 1;
+ if (cck_index < 0)
+ cck_index = 0;
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 7dd8f6de055..c4a7db9135d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1194,25 +1194,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
* mac80211 will send pkt when scan */
void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92d_dm_init_edca_turbo(hw);
- return;
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
- break;
- case AC0_BE:
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
- break;
- default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
- break;
- }
}
void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
index 7c9f7a2f1e4..1bc7b1a96d4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all);
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
- u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
- u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+ u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
void rtl92de_suspend(struct ieee80211_hw *hw);
void rtl92de_resume(struct ieee80211_hw *hw);
void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 840bac5fa2f..13196cc4b1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1022,34 +1022,6 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
}
-void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- rtlhal->current_bandtypebackup =
- rtlhal->current_bandtype;
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index f074952bf25..48d5c6835b6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -127,34 +125,32 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
*flag);
}
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum rf_content content,
enum radio_path rfpath);
bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@ void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
unsigned long *flag);
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
-void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
index 0fe1a48593e..7303d12c266 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -30,15 +30,13 @@
#ifndef __RTL92D_RF_H__
#define __RTL92D_RF_H__
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
- bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index c18c04bf0c1..edab5a5351b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -236,7 +237,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
.dm_watchdog = rtl92d_dm_watchdog,
- .scan_operation_backup = rtl92d_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index b8ec718a0fa..0eb0f4ae592 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -525,8 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
- rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
+ rx_status->signal = stats->recvsignalpower + 10;
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 84d1181795b..c81c8359194 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -425,14 +425,9 @@
#define EXT_IMEM_CODE_DONE BIT(2)
#define IMEM_CHK_RPT BIT(1)
#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CHK_RPT BIT(1)
#define EMEM_CODE_DONE BIT(2)
#define EMEM_CHK_RPT BIT(3)
-#define DMEM_CODE_DONE BIT(4)
#define IMEM_RDY BIT(5)
-#define BASECHG BIT(6)
-#define FWRDY BIT(7)
#define LOAD_FW_READY (IMEM_CODE_DONE | \
IMEM_CHK_RPT | \
EMEM_CODE_DONE | \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 5061f1db3f0..92d38ab3c60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
rtlefuse->pwrgroup_ht40
[RF90_PATH_A][chnl - 1]) {
pwrdiff_limit[i] =
- rtlefuse->pwrgroup_ht20
+ rtlefuse->pwrgroup_ht40
[RF90_PATH_A][chnl - 1];
}
} else {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index c7095118de6..27efbcdac6a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -329,8 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
/*rx_status->qual = stats->signal; */
- rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
+ rx_status->signal = stats->recvsignalpower + 10;
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index eafbb18dd48..5d318a85eda 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -934,35 +934,6 @@ static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
return pwrout_dbm;
}
-void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index e7a59eba351..007ebdbbe10 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -183,42 +183,40 @@ struct tx_power_struct {
u32 mcs_original_offset[4][16];
};
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
- u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
- long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+ u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+ long power_indbm);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
index d0f9dd79abe..57f1933ee66 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -32,12 +32,11 @@
#define RF6052_MAX_TX_PWR 0x3F
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index d9ee2efffe5..62b204faf77 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -33,6 +33,7 @@
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -220,7 +221,7 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.set_bw_mode = rtl8723ae_phy_set_bw_mode,
.switch_channel = rtl8723ae_phy_sw_chnl,
.dm_watchdog = rtl8723ae_dm_watchdog,
- .scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
.led_control = rtl8723ae_led_control,
.set_desc = rtl8723ae_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index bcd82a1020a..50b7be3f3a6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -359,7 +359,6 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e56778cac9b..6e2b5c5c83c 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -455,7 +455,6 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -498,7 +497,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -582,12 +580,15 @@ static void _rtl_rx_work(unsigned long param)
static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
unsigned int len)
{
+#if NET_IP_ALIGN != 0
unsigned int padding = 0;
+#endif
/* make function no-op when possible */
if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
return 0;
+#if NET_IP_ALIGN != 0
/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
/* TODO: deduplicate common code, define helper function instead? */
@@ -608,6 +609,7 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
padding ^= NET_IP_ALIGN;
return padding;
+#endif
}
#define __RADIO_TAP_SIZE_RSV 32
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 703258742d2..0c65386fa30 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -77,11 +77,7 @@
#define RTL_SLOT_TIME_9 9
#define RTL_SLOT_TIME_20 20
-/*related with tcp/ip. */
-/*if_ehther.h*/
-#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
-#define ETH_P_IP 0x0800 /*Internet Protocol packet */
-#define ETH_P_ARP 0x0806 /*Address Resolution packet */
+/*related to tcp/ip. */
#define SNAP_SIZE 6
#define PROTOC_TYPE_SIZE 2
@@ -192,8 +188,6 @@ enum hardware_type {
(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
#define IS_HARDWARE_TYPE_8723(rtlhal) \
(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
-#define IS_HARDWARE_TYPE_8723U(rtlhal) \
- (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
#define RX_HAL_IS_CCK_RATE(_pdesc)\
(_pdesc->rxmcs == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index c7dc6feab2f..1342f81e683 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -243,7 +243,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
struct wl1251 *wl;
int ret;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (!pdata) {
wl1251_error("no platform data");
return -ENODEV;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index fd02060038d..2c3bd1bff3f 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -424,8 +424,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
-#define WL1251_FW_NAME "wl1251-fw.bin"
-#define WL1251_NVS_NAME "wl1251-nvs.bin"
+#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
+#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 1c627da8508..be7129ba16a 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -333,11 +333,11 @@ static struct wlcore_conf wl12xx_conf = {
.always = 0,
},
.fwlog = {
- .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mode = WL12XX_FWLOG_CONTINUOUS,
.mem_blocks = 2,
.severity = 0,
.timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
- .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .output = WL12XX_FWLOG_OUTPUT_DBG_PINS,
.threshold = 0,
},
.rate = {
@@ -717,6 +717,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
goto out;
}
+ wl->fw_mem_block_size = 256;
+ wl->fwlog_end = 0x2000000;
+
/* common settings */
wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
@@ -1262,9 +1265,10 @@ static int wl12xx_boot(struct wl1271 *wl)
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
- MAX_TX_RETRY_EVENT_ID |
CHANNEL_SWITCH_COMPLETE_EVENT_ID;
+ wl->ap_event_mask = MAX_TX_RETRY_EVENT_ID;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -1648,6 +1652,11 @@ static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
return true;
}
+static u32 wl12xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+ return hwaddr << 5;
+}
+
static int wl12xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl12xx_ops = {
@@ -1684,6 +1693,7 @@ static struct wlcore_ops wl12xx_ops = {
.channel_switch = wl12xx_cmd_channel_switch,
.pre_pkt_send = NULL,
.set_peer_cap = wl12xx_set_peer_cap,
+ .convert_hwaddr = wl12xx_convert_hwaddr,
.lnk_high_prio = wl12xx_lnk_high_prio,
.lnk_low_prio = wl12xx_lnk_low_prio,
};
@@ -1704,7 +1714,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
- struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
wl->rtable = wl12xx_rtable;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7aa0eb848c5..ec37b16585d 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -456,11 +456,11 @@ static struct wlcore_conf wl18xx_conf = {
.always = 0,
},
.fwlog = {
- .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mode = WL12XX_FWLOG_CONTINUOUS,
.mem_blocks = 2,
.severity = 0,
.timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
- .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .output = WL12XX_FWLOG_OUTPUT_DBG_PINS,
.threshold = 0,
},
.rate = {
@@ -505,7 +505,7 @@ static struct wlcore_conf wl18xx_conf = {
static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.ht = {
- .mode = HT_MODE_DEFAULT,
+ .mode = HT_MODE_WIDE,
},
.phy = {
.phy_standalone = 0x00,
@@ -516,7 +516,7 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.auto_detect = 0x00,
.dedicated_fem = FEM_NONE,
.low_band_component = COMPONENT_3_WAY_SWITCH,
- .low_band_component_type = 0x04,
+ .low_band_component_type = 0x05,
.high_band_component = COMPONENT_2_WAY_SWITCH,
.high_band_component_type = 0x09,
.tcxo_ldo_voltage = 0x00,
@@ -556,15 +556,15 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.per_chan_pwr_limit_arr_11p = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff },
.psat = 0,
- .low_power_val = 0x08,
- .med_power_val = 0x12,
- .high_power_val = 0x18,
- .low_power_val_2nd = 0x05,
- .med_power_val_2nd = 0x0a,
- .high_power_val_2nd = 0x14,
.external_pa_dc2dc = 0,
.number_of_assembled_ant2_4 = 2,
.number_of_assembled_ant5 = 1,
+ .low_power_val = 0xff,
+ .med_power_val = 0xff,
+ .high_power_val = 0xff,
+ .low_power_val_2nd = 0xff,
+ .med_power_val_2nd = 0xff,
+ .high_power_val_2nd = 0xff,
.tx_rf_margin = 1,
},
};
@@ -623,6 +623,18 @@ static const int wl18xx_rtable[REG_TABLE_LEN] = {
[REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
};
+static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
+ [CLOCK_CONFIG_16_2_M] = { 8, 121, 0, 0, false },
+ [CLOCK_CONFIG_16_368_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_16_8_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_19_2_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_26_M] = { 11, 104, 0, 0, false },
+ [CLOCK_CONFIG_32_736_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_33_6_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_52_M] = { 11, 104, 0, 0, false },
+};
+
static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
[CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
[CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
@@ -674,6 +686,9 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
goto out;
}
+ wl->fw_mem_block_size = 272;
+ wl->fwlog_end = 0x40000000;
+
wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
@@ -704,6 +719,23 @@ static int wl18xx_set_clk(struct wl1271 *wl)
wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
+ /* coex PLL configuration */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
+ wl18xx_clk_table_coex[clk_freq].n);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
+ wl18xx_clk_table_coex[clk_freq].m);
+ if (ret < 0)
+ goto out;
+
+ /* bypass the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
wl18xx_clk_table[clk_freq].n);
if (ret < 0)
@@ -745,6 +777,30 @@ static int wl18xx_set_clk(struct wl1271 *wl)
PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
}
+ /* choose WCS PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
+ PLLSH_WL_PLL_SEL_WCS_PLL);
+ if (ret < 0)
+ goto out;
+
+ /* enable both PLLs */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
+ udelay(1000);
+
+ /* disable coex PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
+ /* reset the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
@@ -935,10 +991,11 @@ static int wl18xx_boot(struct wl1271 *wl)
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
- MAX_TX_FAILURE_EVENT_ID |
CHANNEL_SWITCH_COMPLETE_EVENT_ID |
DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+ wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -1175,16 +1232,48 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
}
}
+static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
+{
+ switch (rdl_num) {
+ case RDL_1_HP:
+ return "183xH";
+ case RDL_2_SP:
+ return "183x or 180x";
+ case RDL_3_HP:
+ return "187xH";
+ case RDL_4_SP:
+ return "187x";
+ case RDL_5_SP:
+ return "RDL11 - Not Supported";
+ case RDL_6_SP:
+ return "180xD";
+ case RDL_7_SP:
+ return "RDL13 - Not Supported (1893Q)";
+ case RDL_8_SP:
+ return "18xxQ";
+ case RDL_NONE:
+ return "UNTRIMMED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
u32 fuse;
- s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
+ s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
+ if (ret < 0)
+ goto out;
+
+ package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
+
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
if (ret < 0)
goto out;
@@ -1192,7 +1281,7 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
- if (rom <= 0xE)
+ if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
metal = (fuse & WL18XX_METAL_VER_MASK) >>
WL18XX_METAL_VER_OFFSET;
else
@@ -1204,11 +1293,9 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
goto out;
rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
- if (rdl_ver > RDL_MAX)
- rdl_ver = RDL_NONE;
- wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
- rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
+ wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
+ wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
if (ver)
*ver = pg_ver;
@@ -1521,6 +1608,11 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
return lnk->allocated_pkts < thold;
}
+static u32 wl18xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+ return hwaddr & ~0x80000000;
+}
+
static int wl18xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl18xx_ops = {
@@ -1558,6 +1650,7 @@ static struct wlcore_ops wl18xx_ops = {
.pre_pkt_send = wl18xx_pre_pkt_send,
.sta_rc_update = wl18xx_sta_rc_update,
.set_peer_cap = wl18xx_set_peer_cap,
+ .convert_hwaddr = wl18xx_convert_hwaddr,
.lnk_high_prio = wl18xx_lnk_high_prio,
.lnk_low_prio = wl18xx_lnk_low_prio,
};
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 05dd8bad274..a433a75f3cd 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -114,6 +114,11 @@
#define PLATFORM_DETECTION 0xA0E3E0
#define OCS_EN 0xA02080
#define PRIMARY_CLK_DETECT 0xA020A6
+#define PLLSH_COEX_PLL_N 0xA02384
+#define PLLSH_COEX_PLL_M 0xA02382
+#define PLLSH_COEX_PLL_SWALLOW_EN 0xA0238E
+#define PLLSH_WL_PLL_SEL 0xA02398
+
#define PLLSH_WCS_PLL_N 0xA02362
#define PLLSH_WCS_PLL_M 0xA02360
#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
@@ -128,19 +133,30 @@
#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
+#define PLLSH_WL_PLL_EN_VAL1 0x7
+#define PLLSH_WL_PLL_EN_VAL2 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2 0x11
+
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
+#define PLLSH_WL_PLL_SEL_WCS_PLL 0x0
+#define PLLSH_WL_PLL_SEL_COEX_PLL 0x1
+
#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
#define WL18XX_PG_VER_MASK 0x70
#define WL18XX_PG_VER_OFFSET 4
-#define WL18XX_ROM_VER_MASK 0x3
-#define WL18XX_ROM_VER_OFFSET 0
+#define WL18XX_ROM_VER_MASK 0x3e00
+#define WL18XX_ROM_VER_OFFSET 9
#define WL18XX_METAL_VER_MASK 0xC
#define WL18XX_METAL_VER_OFFSET 2
#define WL18XX_NEW_METAL_VER_MASK 0x180
#define WL18XX_NEW_METAL_VER_OFFSET 7
+#define WL18XX_PACKAGE_TYPE_OFFSET 13
+#define WL18XX_PACKAGE_TYPE_WSP 0
+
#define WL18XX_REG_FUSE_DATA_2_3 0xA02614
#define WL18XX_RDL_VER_MASK 0x1f00
#define WL18XX_RDL_VER_OFFSET 8
@@ -201,24 +217,21 @@ enum {
NUM_BOARD_TYPES,
};
-enum {
+enum wl18xx_rdl_num {
RDL_NONE = 0,
RDL_1_HP = 1,
RDL_2_SP = 2,
RDL_3_HP = 3,
RDL_4_SP = 4,
+ RDL_5_SP = 0x11,
+ RDL_6_SP = 0x12,
+ RDL_7_SP = 0x13,
+ RDL_8_SP = 0x14,
_RDL_LAST,
RDL_MAX = _RDL_LAST - 1,
};
-static const char * const rdl_names[] = {
- [RDL_NONE] = "",
- [RDL_1_HP] = "1853 SISO",
- [RDL_2_SP] = "1857 MIMO",
- [RDL_3_HP] = "1893 SISO",
- [RDL_4_SP] = "1897 MIMO",
-};
/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 7a970cd9c55..ec83675a244 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -162,7 +162,8 @@ int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
wl1271_debug(DEBUG_ACX, "acx mem map");
- ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len);
+ ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map,
+ sizeof(struct acx_header), len);
if (ret < 0)
return ret;
@@ -722,6 +723,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
wl1271_debug(DEBUG_ACX, "acx statistics");
ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
+ sizeof(struct acx_header),
wl->stats.fw_stats_len);
if (ret < 0) {
wl1271_warning("acx statistics failed: %d", ret);
@@ -1470,8 +1472,8 @@ int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
tsf_info->role_id = wlvif->role_id;
- ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO,
- tsf_info, sizeof(*tsf_info));
+ ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, tsf_info,
+ sizeof(struct acx_header), sizeof(*tsf_info));
if (ret < 0) {
wl1271_warning("acx tsf info interrogate failed");
goto out;
@@ -1752,7 +1754,7 @@ int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
acx->role_id = wlvif->role_id;
ret = wl1271_cmd_interrogate(wl, ACX_ROAMING_STATISTICS_TBL,
- acx, sizeof(*acx));
+ acx, sizeof(*acx), sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx roaming statistics failed: %d", ret);
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c9e060795d1..34d9dfff2ad 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,7 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
u16 status;
u16 poll_count = 0;
- if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
+ if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
+ id != CMD_STOP_FWLOGGER))
return -EIO;
cmd = buf;
@@ -845,7 +846,8 @@ EXPORT_SYMBOL_GPL(wl1271_cmd_test);
* @buf: buffer for the response, including all headers, must work with dma
* @len: length of buf
*/
-int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
+ size_t cmd_len, size_t res_len)
{
struct acx_header *acx = buf;
int ret;
@@ -854,10 +856,10 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
acx->id = cpu_to_le16(id);
- /* payload length, does not include any headers */
- acx->len = cpu_to_le16(len - sizeof(*acx));
+ /* response payload length, does not include any headers */
+ acx->len = cpu_to_le16(res_len - sizeof(*acx));
- ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len);
+ ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, cmd_len, res_len);
if (ret < 0)
wl1271_error("INTERROGATE command failed");
@@ -1126,6 +1128,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 template_id_2_4 = wl->scan_templ_id_2_4;
u16 template_id_5 = wl->scan_templ_id_5;
+ wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
+
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie_len);
if (!skb) {
@@ -1135,8 +1139,6 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (ie_len)
memcpy(skb_put(skb, ie_len), ie, ie_len);
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
-
if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1174,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
if (!skb)
goto out;
- wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+ wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1609,43 @@ out:
static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
{
- int idx = -1;
-
+ /*
+ * map the given band/channel to the respective predefined
+ * bit expected by the fw
+ */
switch (band) {
- case IEEE80211_BAND_5GHZ:
- if (ch >= 8 && ch <= 16)
- idx = ((ch-8)/4 + 18);
- else if (ch >= 34 && ch <= 64)
- idx = ((ch-34)/2 + 3 + 18);
- else if (ch >= 100 && ch <= 140)
- idx = ((ch-100)/4 + 15 + 18);
- else if (ch >= 149 && ch <= 165)
- idx = ((ch-149)/4 + 26 + 18);
- else
- idx = -1;
- break;
case IEEE80211_BAND_2GHZ:
+ /* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
- idx = ch - 1;
- else
- idx = -1;
+ return ch - 1;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (ch) {
+ case 8 ... 16:
+ /* channels 8,12,16 are mapped to 18,19,20 */
+ return 18 + (ch-8)/4;
+ case 34 ... 48:
+ /* channels 34,36..48 are mapped to 21..28 */
+ return 21 + (ch-34)/2;
+ case 52 ... 64:
+ /* channels 52,56..64 are mapped to 29..32 */
+ return 29 + (ch-52)/4;
+ case 100 ... 140:
+ /* channels 100,104..140 are mapped to 33..43 */
+ return 33 + (ch-100)/4;
+ case 149 ... 165:
+ /* channels 149,153..165 are mapped to 44..48 */
+ return 44 + (ch-149)/4;
+ default:
+ break;
+ }
break;
default:
- wl1271_error("get reg conf ch idx - unknown band: %d",
- (int)band);
+ break;
}
- return idx;
+ wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
+ return -1;
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1658,7 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
- if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+ if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index fd34123047c..323d4a856e4 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -45,7 +45,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum ieee80211_band band, int channel);
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
-int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
+ size_t cmd_len, size_t res_len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
size_t len, unsigned long valid_rets);
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 2b96ff82134..40995c42bef 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1274,6 +1274,9 @@ struct conf_rx_streaming_settings {
u8 always;
} __packed;
+#define CONF_FWLOG_MIN_MEM_BLOCKS 2
+#define CONF_FWLOG_MAX_MEM_BLOCKS 16
+
struct conf_fwlog {
/* Continuous or on-demand */
u8 mode;
@@ -1281,7 +1284,7 @@ struct conf_fwlog {
/*
* Number of memory blocks dedicated for the FW logger
*
- * Range: 1-3, or 0 to disable the FW logger
+ * Range: 2-16, or 0 to disable the FW logger
*/
u8 mem_blocks;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index e17630c2a84..89893c71702 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -437,6 +437,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
int res = 0;
ssize_t ret;
char *buf;
+ struct wl12xx_vif *wlvif;
#define DRIVER_STATE_BUF_LEN 1024
@@ -450,12 +451,28 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
+#define DRIVER_STATE_PRINT_GENERIC(x, fmt, args...) \
+ (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
+ #x " = " fmt "\n", args))
+
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
#define DRIVER_STATE_PRINT_INT(x) DRIVER_STATE_PRINT(x, "%d")
#define DRIVER_STATE_PRINT_STR(x) DRIVER_STATE_PRINT(x, "%s")
#define DRIVER_STATE_PRINT_LHEX(x) DRIVER_STATE_PRINT(x, "0x%lx")
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ continue;
+
+ DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
+ wlvif->p2p ? "P2P-CL" : "STA");
+ }
+
+ wl12xx_for_each_wlvif_ap(wl, wlvif)
+ DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
+ wlvif->p2p ? "P2P-GO" : "AP");
+
DRIVER_STATE_PRINT_INT(tx_blocks_available);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
@@ -474,7 +491,6 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
DRIVER_STATE_PRINT_INT(rx_counter);
DRIVER_STATE_PRINT_INT(state);
- DRIVER_STATE_PRINT_INT(channel);
DRIVER_STATE_PRINT_INT(band);
DRIVER_STATE_PRINT_INT(power_level);
DRIVER_STATE_PRINT_INT(sg_enabled);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 67f61689b49..8d3b34965db 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -266,6 +266,7 @@ int wl1271_event_unmask(struct wl1271 *wl)
{
int ret;
+ wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 7fd260c02a0..51f8d634d32 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -222,6 +222,15 @@ wlcore_hw_set_peer_cap(struct wl1271 *wl,
return 0;
}
+static inline u32
+wlcore_hw_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+ if (!wl->ops->convert_hwaddr)
+ BUG_ON(1);
+
+ return wl->ops->convert_hwaddr(wl, hwaddr);
+}
+
static inline bool
wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 5c6f11e157d..7699f9d07e2 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -571,6 +571,12 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
if (ret < 0)
return ret;
+
+ /* unmask ap events */
+ wl->event_mask |= wl->ap_event_mask;
+ ret = wl1271_event_unmask(wl);
+ if (ret < 0)
+ return ret;
/* first STA, no APs */
} else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
u8 sta_auth = wl->conf.conn.sta_sleep_auth;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index af7d9f9b3b4..07e3d6a049a 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -165,8 +165,8 @@ static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr,
int physical;
int addr;
- /* Addresses are stored internally as addresses to 32 bytes blocks */
- addr = hwaddr << 5;
+ /* Convert from FW internal address which is chip arch dependent */
+ addr = wl->ops->convert_hwaddr(wl, hwaddr);
physical = wlcore_translate_addr(wl, addr);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 38995f90040..0368b9cbfb8 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -44,6 +44,7 @@
#define WL1271_BOOT_RETRIES 3
static char *fwlog_param;
+static int fwlog_mem_blocks = -1;
static int bug_on_recovery = -1;
static int no_recovery = -1;
@@ -291,6 +292,18 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
{
/* Adjust settings according to optional module parameters */
+ /* Firmware Logger params */
+ if (fwlog_mem_blocks != -1) {
+ if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
+ fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
+ wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
+ } else {
+ wl1271_error(
+ "Illegal fwlog_mem_blocks=%d using default %d",
+ fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
+ }
+ }
+
if (fwlog_param) {
if (!strcmp(fwlog_param, "continuous")) {
wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
@@ -780,6 +793,7 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
if (wl->state == WLCORE_STATE_ON) {
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+ wl1271_ps_elp_wakeup(wl);
wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
@@ -787,19 +801,10 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
{
- size_t len = 0;
-
- /* The FW log is a length-value list, find where the log end */
- while (len < maxlen) {
- if (memblock[len] == 0)
- break;
- if (len + memblock[len] + 1 > maxlen)
- break;
- len += memblock[len] + 1;
- }
+ size_t len;
/* Make sure we have enough room */
- len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
+ len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
/* Fill the FW log file, consumed by the sysfs fwlog entry */
memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -808,10 +813,9 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
return len;
}
-#define WLCORE_FW_LOG_END 0x2000000
-
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
+ struct wlcore_partition_set part, old_part;
u32 addr;
u32 offset;
u32 end_of_log;
@@ -824,7 +828,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl1271_info("Reading FW panic log");
- block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
+ block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
if (!block)
return;
@@ -850,17 +854,31 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
- end_of_log = WLCORE_FW_LOG_END;
+ end_of_log = wl->fwlog_end;
} else {
offset = sizeof(addr);
end_of_log = addr;
}
+ old_part = wl->curr_part;
+ memset(&part, 0, sizeof(part));
+
/* Traverse the memory blocks linked list */
do {
- memset(block, 0, WL12XX_HW_BLOCK_SIZE);
- ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
- false);
+ part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
+ part.mem.size = PAGE_SIZE;
+
+ ret = wlcore_set_partition(wl, &part);
+ if (ret < 0) {
+ wl1271_error("%s: set_partition start=0x%X size=%d",
+ __func__, part.mem.start, part.mem.size);
+ goto out;
+ }
+
+ memset(block, 0, wl->fw_mem_block_size);
+ ret = wlcore_read_hwaddr(wl, addr, block,
+ wl->fw_mem_block_size, false);
+
if (ret < 0)
goto out;
@@ -871,8 +889,9 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* on demand mode and is equal to 0x2000000 in continuous mode.
*/
addr = le32_to_cpup((__le32 *)block);
+
if (!wl12xx_copy_fwlog(wl, block + offset,
- WL12XX_HW_BLOCK_SIZE - offset))
+ wl->fw_mem_block_size - offset))
break;
} while (addr && (addr != end_of_log));
@@ -880,6 +899,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
out:
kfree(block);
+ wlcore_set_partition(wl, &old_part);
}
static void wlcore_print_recovery(struct wl1271 *wl)
@@ -924,7 +944,8 @@ static void wl1271_recovery_work(struct work_struct *work)
goto out_unlock;
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
- wl12xx_read_fwlog_panic(wl);
+ if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
+ wl12xx_read_fwlog_panic(wl);
wlcore_print_recovery(wl);
}
@@ -1062,7 +1083,8 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
static const char* const PLT_MODE[] = {
"PLT_OFF",
"PLT_ON",
- "PLT_FEM_DETECT"
+ "PLT_FEM_DETECT",
+ "PLT_CHIP_AWAKE"
};
int ret;
@@ -1088,9 +1110,11 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
if (ret < 0)
goto power_off;
- ret = wl->ops->plt_init(wl);
- if (ret < 0)
- goto power_off;
+ if (plt_mode != PLT_CHIP_AWAKE) {
+ ret = wl->ops->plt_init(wl);
+ if (ret < 0)
+ goto power_off;
+ }
wl->state = WLCORE_STATE_ON;
wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -1925,8 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
/*
* FW channels must be re-calibrated after recovery,
- * clear the last Reg-Domain channel configuration.
+ * save current Reg-Domain channel configuration and clear it.
*/
+ memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
+ sizeof(wl->reg_ch_conf_pending));
memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
@@ -2008,6 +2034,47 @@ out:
mutex_unlock(&wl->mutex);
}
+static void wlcore_pending_auth_complete_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
+ unsigned long time_spare;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif,
+ pending_auth_complete_work);
+ wl = wlvif->wl;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /*
+ * Make sure a second really passed since the last auth reply. Maybe
+ * a second auth reply arrived while we were stuck on the mutex.
+ * Check for a little less than the timeout to protect from scheduler
+ * irregularities.
+ */
+ time_spare = jiffies +
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
+ if (!time_after(time_spare, wlvif->pending_auth_reply_time))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* cancel the ROC if active */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, false);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2226,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_channel_switch_work);
INIT_DELAYED_WORK(&wlvif->connection_loss_work,
wlcore_connection_loss_work);
+ INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
+ wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2445,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u8 role_type;
+ if (wl->plt) {
+ wl1271_error("Adding Interface not allowed while in PLT mode");
+ return -EBUSY;
+ }
+
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2572,6 +2646,12 @@ deinit:
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
goto unlock;
+ if (wl->ap_count == 0 && is_ap) {
+ /* mask ap events */
+ wl->event_mask &= ~wl->ap_event_mask;
+ wl1271_event_unmask(wl);
+ }
+
if (wl->ap_count == 0 && is_ap && wl->sta_count) {
u8 sta_auth = wl->conf.conn.sta_sleep_auth;
/* Configure for power according to debugfs */
@@ -2590,6 +2670,7 @@ unlock:
cancel_work_sync(&wlvif->rx_streaming_disable_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
cancel_delayed_work_sync(&wlvif->channel_switch_work);
+ cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
mutex_lock(&wl->mutex);
}
@@ -2875,6 +2956,25 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->rate_set = wlvif->basic_rate_set;
}
+static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
+{
+ bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return;
+
+ if (idle) {
+ clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ } else {
+ /* The current firmware only supports sched_scan in idle */
+ if (wl->sched_vif == wlvif)
+ wl->ops->sched_scan_stop(wl, wlvif);
+
+ set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ }
+}
+
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
@@ -3969,6 +4069,13 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
}
} else {
if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ /*
+ * AP might be in ROC in case we have just
+ * sent auth reply. handle it.
+ */
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+
ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
@@ -4120,6 +4227,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE && !is_ibss)
+ wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4766,49 @@ static void wlcore_roc_if_possible(struct wl1271 *wl,
wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
}
-static void wlcore_update_inconn_sta(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct wl1271_station *wl_sta,
- bool in_connection)
+/*
+ * when wl_sta is NULL, we treat this call as if coming from a
+ * pending auth reply.
+ * wl->mutex must be taken and the FW must be awake when the call
+ * takes place.
+ */
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn)
{
- if (in_connection) {
- if (WARN_ON(wl_sta->in_connection))
+ if (in_conn) {
+ if (WARN_ON(wl_sta && wl_sta->in_connection))
return;
- wl_sta->in_connection = true;
- if (!wlvif->inconn_count++)
+
+ if (!wlvif->ap_pending_auth_reply &&
+ !wlvif->inconn_count)
wlcore_roc_if_possible(wl, wlvif);
+
+ if (wl_sta) {
+ wl_sta->in_connection = true;
+ wlvif->inconn_count++;
+ } else {
+ wlvif->ap_pending_auth_reply = true;
+ }
} else {
- if (!wl_sta->in_connection)
+ if (wl_sta && !wl_sta->in_connection)
return;
- wl_sta->in_connection = false;
- wlvif->inconn_count--;
- if (WARN_ON(wlvif->inconn_count < 0))
+ if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
return;
- if (!wlvif->inconn_count)
- if (test_bit(wlvif->role_id, wl->roc_map))
- wl12xx_croc(wl, wlvif->role_id);
+ if (WARN_ON(wl_sta && !wlvif->inconn_count))
+ return;
+
+ if (wl_sta) {
+ wl_sta->in_connection = false;
+ wlvif->inconn_count--;
+ } else {
+ wlvif->ap_pending_auth_reply = false;
+ }
+
+ if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
+ test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
}
}
@@ -5313,10 +5443,7 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +6023,20 @@ static const struct wiphy_wowlan_support wlcore_wowlan_support = {
};
#endif
+static irqreturn_t wlcore_hardirq(int irq, void *cookie)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
- struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
unsigned long irqflags;
int ret;
+ irq_handler_t hardirq_fn = NULL;
if (fw) {
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6065,14 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
wl->platform_quirks = pdata->platform_quirks;
wl->if_ops = pdev_data->if_ops;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
irqflags = IRQF_TRIGGER_RISING;
- else
+ hardirq_fn = wlcore_hardirq;
+ } else {
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ }
- ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
+ ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
irqflags, pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
@@ -6046,6 +6181,9 @@ module_param_named(fwlog, fwlog_param, charp, 0);
MODULE_PARM_DESC(fwlog,
"FW logger options: continuous, ondemand, dbgpins or disable");
+module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
+
module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 98066d40c2a..26bfc365ba7 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -83,6 +83,10 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
struct wl12xx_vif *wlvif;
u32 timeout;
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return;
+
if (wl->sleep_auth != WL1271_PSM_ELP)
return;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index f407101e525..7ed86203304 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -92,9 +92,31 @@ out:
static void wlcore_started_vifs_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool active = false;
int *count = (int *)data;
- if (!vif->bss_conf.idle)
+ /*
+ * count active interfaces according to interface type.
+ * checking only bss_conf.idle is bad for some cases, e.g.
+ * we don't want to count sta in p2p_find as active interface.
+ */
+ switch (wlvif->bss_type) {
+ case BSS_TYPE_STA_BSS:
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ active = true;
+ break;
+
+ case BSS_TYPE_AP_BSS:
+ if (wlvif->wl->active_sta_count > 0)
+ active = true;
+ break;
+
+ default:
+ break;
+ }
+
+ if (active)
(*count)++;
}
@@ -174,17 +196,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
- wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req_channels[i]->band,
- req_channels[i]->center_freq);
- wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req_channels[i]->hw_value,
- req_channels[i]->flags);
- wl1271_debug(DEBUG_SCAN, "max_power %d",
- req_channels[i]->max_power);
- wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
- min_dwell_time_active,
- max_dwell_time_active);
+
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +234,17 @@ wlcore_scan_get_channels(struct wl1271 *wl,
*n_pactive_ch);
}
+ wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
+ req_channels[i]->center_freq,
+ req_channels[i]->hw_value,
+ req_channels[i]->flags,
+ req_channels[i]->max_power,
+ min_dwell_time_active,
+ max_dwell_time_active,
+ flags & IEEE80211_CHAN_RADAR ?
+ ", DFS" : "",
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ ", PASSIVE" : "");
j++;
}
}
@@ -364,7 +387,7 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct cfg80211_ssid *ssids = req->ssids;
int ret = 0, type, i, j, n_match_ssids = 0;
- wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+ wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
/* count the match sets that contain SSIDs */
for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +465,6 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
}
}
- wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
-
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 1b0cd98e35f..b2c018dccf1 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -335,7 +335,7 @@ static int wl1271_probe(struct spi_device *spi)
if (!pdev_data)
goto out;
- pdev_data->pdata = spi->dev.platform_data;
+ pdev_data->pdata = dev_get_platdata(&spi->dev);
if (!pdev_data->pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 527590f2adf..ddad58f614d 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -179,7 +179,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out_sleep;
}
- ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
+ ret = wl1271_cmd_interrogate(wl, ie_id, cmd,
+ sizeof(struct acx_header), sizeof(*cmd));
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
goto out_free;
@@ -297,7 +298,8 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
ret = wl1271_plt_stop(wl);
break;
case PLT_ON:
- ret = wl1271_plt_start(wl, PLT_ON);
+ case PLT_CHIP_AWAKE:
+ ret = wl1271_plt_start(wl, val);
break;
case PLT_FEM_DETECT:
ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +363,7 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+ u32 nla_cmd;
int err;
err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +373,14 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!tb[WL1271_TM_ATTR_CMD_ID])
return -EINVAL;
- switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+ nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
+
+ /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
+ if (wl->plt_mode == PLT_CHIP_AWAKE &&
+ nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
+ return -EOPNOTSUPP;
+
+ switch (nla_cmd) {
case WL1271_TM_CMD_TEST:
return wl1271_tm_cmd_test(wl, tb);
case WL1271_TM_CMD_INTERROGATE:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7e93fe63a2c..87cd707affa 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -86,19 +86,34 @@ void wl1271_free_tx_id(struct wl1271 *wl, int id)
EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (!ieee80211_is_auth(hdr->frame_control))
+ return;
+
/*
* add the station to the known list before transmitting the
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- hdr = (struct ieee80211_hdr *)(skb->data +
- sizeof(struct wl1271_tx_hw_descr));
- if (ieee80211_is_auth(hdr->frame_control))
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+
+ /*
+ * ROC for 1 second on the AP channel for completing the connection.
+ * Note the ROC will be continued by the update_sta_state callbacks
+ * once the station reaches the associated state.
+ */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, true);
+ wlvif->pending_auth_reply_time = jiffies;
+ cancel_delayed_work(&wlvif->pending_auth_complete_work);
+ ieee80211_queue_delayed_work(wl->hw,
+ &wlvif->pending_auth_complete_work,
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
}
static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (WARN_ON(is_wep && wlvif->default_key != idx)) {
+ if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
@@ -404,7 +419,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
- wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
wl1271_tx_regulate_link(wl, wlvif, hlid);
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 55aa4acf910..35489c300da 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -56,6 +56,9 @@
/* Used for management frames and dummy packets */
#define WL1271_TID_MGMT 7
+/* stop a ROC for pending authentication reply after this time (ms) */
+#define WLCORE_PEND_AUTH_ROC_TIMEOUT 1000
+
struct wl127x_tx_mem {
/*
* Number of extra memory blocks to allocate for this packet
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0034979e97c..06efc12a39e 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -110,6 +110,7 @@ struct wlcore_ops {
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid);
+ u32 (*convert_hwaddr)(struct wl1271 *wl, u32 hwaddr);
bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk);
bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
@@ -290,6 +291,12 @@ struct wl1271 {
/* Number of valid bytes in the FW log buffer */
ssize_t fwlog_size;
+ /* FW log end marker */
+ u32 fwlog_end;
+
+ /* FW memory block size */
+ u32 fw_mem_block_size;
+
/* Sysfs FW log entry readers wait queue */
wait_queue_head_t fwlog_waitq;
@@ -307,6 +314,8 @@ struct wl1271 {
/* The mbox event mask */
u32 event_mask;
+ /* events to unmask only when ap interface is up */
+ u32 ap_event_mask;
/* Mailbox pointers */
u32 mbox_size;
@@ -481,6 +490,8 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
void wlcore_regdomain_config(struct wl1271 *wl);
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn);
static inline void
wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e5e146435fe..ce7261ce8b5 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -255,6 +255,7 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_CS_PROGRESS,
WLVIF_FLAG_AP_PROBE_RESP_SET,
WLVIF_FLAG_IN_USE,
+ WLVIF_FLAG_ACTIVE,
};
struct wl12xx_vif;
@@ -307,6 +308,7 @@ enum plt_mode {
PLT_OFF = 0,
PLT_ON = 1,
PLT_FEM_DETECT = 2,
+ PLT_CHIP_AWAKE = 3
};
struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@ struct wl12xx_vif {
*/
int hw_queue_base;
+ /* do we have a pending auth reply? (and ROC) */
+ bool ap_pending_auth_reply;
+
+ /* time when we sent the pending auth reply */
+ unsigned long pending_auth_reply_time;
+
+ /* work for canceling ROC after pending auth reply */
+ struct delayed_work pending_auth_complete_work;
+
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -539,6 +550,4 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
#define HW_HT_RATES_OFFSET 16
#define HW_MIMO_RATES_OFFSET 24
-#define WL12XX_HW_BLOCK_SIZE 256
-
#endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 7ef0b4a181e..84d94f572a4 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1619,7 +1619,7 @@ static void prepare_read_regs_int(struct zd_usb *usb,
atomic_set(&intr->read_regs_enabled, 1);
intr->read_regs.req = req;
intr->read_regs.req_count = count;
- INIT_COMPLETION(intr->read_regs.completion);
+ reinit_completion(&intr->read_regs.completion);
spin_unlock_irq(&intr->lock);
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5715318d6ba..08ae01b41c8 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -87,9 +87,13 @@ struct pending_tx_info {
struct xenvif_rx_meta {
int id;
int size;
+ int gso_type;
int gso_size;
};
+#define GSO_BIT(type) \
+ (1 << XEN_NETIF_GSO_TYPE_ ## type)
+
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
@@ -150,10 +154,12 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Frontend feature information. */
+ int gso_mask;
+ int gso_prefix_mask;
+
u8 can_sg:1;
- u8 gso:1;
- u8 gso_prefix:1;
- u8 csum:1;
+ u8 ip_csum:1;
+ u8 ipv6_csum:1;
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
@@ -163,6 +169,7 @@ struct xenvif {
unsigned long credit_usec;
unsigned long remaining_credit;
struct timer_list credit_timeout;
+ u64 credit_window_start;
/* Statistics */
unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 01bb854c7f6..2329cccf1fa 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -214,10 +214,14 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg)
features &= ~NETIF_F_SG;
- if (!vif->gso && !vif->gso_prefix)
+ if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO;
- if (!vif->csum)
+ if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+ features &= ~NETIF_F_TSO6;
+ if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM;
+ if (!vif->ipv6_csum)
+ features &= ~NETIF_F_IPV6_CSUM;
return features;
}
@@ -306,18 +310,19 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
- vif->csum = 1;
+ vif->ip_csum = 1;
vif->dev = dev;
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
- /* Initialize 'expires' now: it's used to track the credit window. */
- vif->credit_timeout.expires = jiffies;
+ vif->credit_window_start = get_jiffies_64();
dev->netdev_ops = &xenvif_netdev_ops;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- dev->features = dev->hw_features;
+ dev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+ dev->features = dev->hw_features | NETIF_F_RXCSUM;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
@@ -456,6 +461,9 @@ void xenvif_disconnect(struct xenvif *vif)
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
+ if (vif->task)
+ kthread_stop(vif->task);
+
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
unbind_from_irqhandler(vif->tx_irq, vif);
@@ -466,9 +474,6 @@ void xenvif_disconnect(struct xenvif *vif)
vif->tx_irq = 0;
}
- if (vif->task)
- kthread_stop(vif->task);
-
xenvif_unmap_frontend_rings(vif);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f3e591c611d..919b6509455 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
-/*
- * This is the amount of packet we copy rather than map, so that the
- * guest can't fiddle with the contents of the headers while we do
- * packet processing on them (netfilter, routing, etc).
+/* This is a miniumum size for the linear area to avoid lots of
+ * calls to __pskb_pull_tail() as we set up checksum offsets. The
+ * value 128 was chosen as it covers all IPv4 and most likely
+ * IPv6 headers.
*/
-#define PKT_PROT_LEN (ETH_HLEN + \
- VLAN_HLEN + \
- sizeof(struct iphdr) + MAX_IPOPTLEN + \
- sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+#define PKT_PROT_LEN 128
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso || vif->gso_prefix)
+ if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
return max;
@@ -317,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
@@ -339,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
+ int gso_type;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/* Leave a gap for the GSO descriptor. */
- if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ else
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+ if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
unsigned char *data;
int head = 1;
int old_meta_prod;
+ int gso_type;
+ int gso_size;
old_meta_prod = npo->meta_prod;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso_size = skb_shinfo(skb)->gso_size;
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ gso_size = skb_shinfo(skb)->gso_size;
+ } else {
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ gso_size = 0;
+ }
+
/* Set up a GSO prefix descriptor, if necessary */
- if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+ if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- meta->gso_size = skb_shinfo(skb)->gso_size;
+ meta->gso_type = gso_type;
+ meta->gso_size = gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -443,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- if (!vif->gso_prefix)
- meta->gso_size = skb_shinfo(skb)->gso_size;
- else
+ if ((1 << gso_type) & vif->gso_mask) {
+ meta->gso_type = gso_type;
+ meta->gso_size = gso_size;
+ } else {
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
+ }
meta->size = 0;
meta->id = req->id;
@@ -592,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif = netdev_priv(skb->dev);
- if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+ if ((1 << vif->meta[npo.meta_cons].gso_type) &
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
@@ -629,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif->meta[npo.meta_cons].size,
flags);
- if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+ if ((1 << vif->meta[npo.meta_cons].gso_type) &
+ vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
resp->flags |= XEN_NETRXF_extra_info;
+ gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
- gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -1101,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return -EINVAL;
}
- /* Currently only TCPv4 S.O. is supported. */
- if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ switch (gso->u.gso.type) {
+ case XEN_NETIF_GSO_TYPE_TCPV4:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ break;
+ case XEN_NETIF_GSO_TYPE_TCPV6:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+ default:
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
xenvif_fatal_tx_err(vif);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
+ /* If we need to pullup then pullup to the max, so we
+ * won't need to do it again.
+ */
+ int target = min_t(int, skb->len, MAX_TCP_HEADER);
+ __pskb_pull_tail(skb, target - skb_headlen(skb));
+ }
+}
+
+static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
+ int recalculate_partial_csum)
{
- struct iphdr *iph;
+ struct iphdr *iph = (void *)skb->data;
+ unsigned int header_size;
+ unsigned int off;
int err = -EPROTO;
- int recalculate_partial_csum = 0;
- /*
- * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
- * peers can fail to set NETRXF_csum_blank when sending a GSO
- * frame. In this case force the SKB to CHECKSUM_PARTIAL and
- * recalculate the partial checksum.
- */
- if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
- vif->rx_gso_checksum_fixup++;
- skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
- }
+ off = sizeof(struct iphdr);
- /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ header_size = skb->network_header + off + MAX_IPOPTLEN;
+ maybe_pull_tail(skb, header_size);
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
+ off = iph->ihl * 4;
- iph = (void *)skb->data;
switch (iph->protocol) {
case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+ if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct tcphdr);
+ maybe_pull_tail(skb, header_size);
+
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
+ skb->len - off,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+ if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct udphdr);
+ maybe_pull_tail(skb, header_size);
+
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
+ skb->len - off,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
+ "Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet\n",
iph->protocol);
goto out;
}
@@ -1183,11 +1226,162 @@ out:
return err;
}
+static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
+ int recalculate_partial_csum)
+{
+ int err = -EPROTO;
+ struct ipv6hdr *ipv6h = (void *)skb->data;
+ u8 nexthdr;
+ unsigned int header_size;
+ unsigned int off;
+ bool fragment;
+ bool done;
+
+ done = false;
+
+ off = sizeof(struct ipv6hdr);
+
+ header_size = skb->network_header + off;
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = ipv6h->nexthdr;
+
+ while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
+ !done) {
+ switch (nexthdr) {
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING: {
+ struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct ipv6_opt_hdr);
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ break;
+ }
+ case IPPROTO_AH: {
+ struct ip_auth_hdr *hp = (void *)(skb->data + off);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct ip_auth_hdr);
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = hp->nexthdr;
+ off += (hp->hdrlen+2)<<2;
+ break;
+ }
+ case IPPROTO_FRAGMENT:
+ fragment = true;
+ /* fall through */
+ default:
+ done = true;
+ break;
+ }
+ }
+
+ if (!done) {
+ if (net_ratelimit())
+ netdev_err(vif->dev, "Failed to parse packet header\n");
+ goto out;
+ }
+
+ if (fragment) {
+ if (net_ratelimit())
+ netdev_err(vif->dev, "Packet is a fragment!\n");
+ goto out;
+ }
+
+ switch (nexthdr) {
+ case IPPROTO_TCP:
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check)))
+ goto out;
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct tcphdr);
+ maybe_pull_tail(skb, header_size);
+
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
+ }
+ break;
+ case IPPROTO_UDP:
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check)))
+ goto out;
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct udphdr);
+ maybe_pull_tail(skb, header_size);
+
+ udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
+ }
+ break;
+ default:
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet\n",
+ nexthdr);
+ goto out;
+ }
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+{
+ int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ vif->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
+
+ return err;
+}
+
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
{
- unsigned long now = jiffies;
- unsigned long next_credit =
- vif->credit_timeout.expires +
+ u64 now = get_jiffies_64();
+ u64 next_credit = vif->credit_window_start +
msecs_to_jiffies(vif->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
@@ -1195,8 +1389,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return true;
/* Passed the point where we can replenish credit? */
- if (time_after_eq(now, next_credit)) {
- vif->credit_timeout.expires = now;
+ if (time_after_eq64(now, next_credit)) {
+ vif->credit_window_start = now;
tx_add_credit(vif);
}
@@ -1208,6 +1402,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
tx_credit_callback;
mod_timer(&vif->credit_timeout,
next_credit);
+ vif->credit_window_start = next_credit;
return true;
}
@@ -1428,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
xenvif_fill_frags(vif, skb);
- /*
- * If the initial fragment was < PKT_PROT_LEN then
- * pull through some bytes from the other fragments to
- * increase the linear region to PKT_PROT_LEN bytes.
- */
- if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+ if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1b08d879837..f0358992b04 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -105,6 +105,22 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ /* We support partial checksum setup for IPv6 packets */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-ipv6-csum-offload",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
/* We support rx-copy path. */
err = xenbus_printf(xbt, dev->nodename,
"feature-rx-copy", "%d", 1);
@@ -561,20 +577,50 @@ static int connect_rings(struct backend_info *be)
val = 0;
vif->can_sg = !!val;
+ vif->gso_mask = 0;
+ vif->gso_prefix_mask = 0;
+
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0)
val = 0;
- vif->gso = !!val;
+ if (val)
+ vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
"%d", &val) < 0)
val = 0;
- vif->gso_prefix = !!val;
+ if (val)
+ vif->gso_prefix_mask |= GSO_BIT(TCPV4);
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
+ "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->gso_mask |= GSO_BIT(TCPV6);
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
+ "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->gso_prefix_mask |= GSO_BIT(TCPV6);
+
+ if (vif->gso_mask & vif->gso_prefix_mask) {
+ xenbus_dev_fatal(dev, err,
+ "%s: gso and gso prefix flags are not "
+ "mutually exclusive",
+ dev->otherend);
+ return -EOPNOTSUPP;
+ }
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0)
val = 0;
- vif->csum = !val;
+ vif->ip_csum = !val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
+ "%d", &val) < 0)
+ val = 0;
+ vif->ipv6_csum = !!val;
/* Map the shared frame, irq etc. */
err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 36808bf2567..e59acb1daa2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -277,12 +277,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
if (!page) {
kfree_skb(skb);
no_skb:
- /* Any skbuffs queued for refill? Force them out. */
- if (i != 0)
- goto refill;
/* Could not allocate any skbuffs. Try again later. */
mod_timer(&np->rx_refill_timer,
jiffies + (HZ/10));
+
+ /* Any skbuffs queued for refill? Force them out. */
+ if (i != 0)
+ goto refill;
break;
}
@@ -952,7 +953,7 @@ static int handle_incoming_queue(struct net_device *dev,
u64_stats_update_end(&stats->syncp);
/* Pass it up. */
- netif_receive_skb(skb);
+ napi_gro_receive(&np->napi, skb);
}
return packets_dropped;
@@ -1051,6 +1052,8 @@ err:
if (work_done < budget) {
int more_to_do = 0;
+ napi_gro_flush(napi, false);
+
local_irq_save(flags);
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
@@ -1338,6 +1341,12 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
if (np->stats == NULL)
goto exit;
+ for_each_possible_cpu(i) {
+ struct netfront_stats *xen_nf_stats;
+ xen_nf_stats = per_cpu_ptr(np->stats, i);
+ u64_stats_init(&xen_nf_stats->syncp);
+ }
+
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index b0b64ccb7d7..c1fb2060333 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -46,6 +46,16 @@ config NFC_SIM
If unsure, say N.
+config NFC_PORT100
+ tristate "Sony NFC Port-100 Series USB device support"
+ depends on USB
+ depends on NFC_DIGITAL
+ help
+ This adds support for Sony Port-100 chip based USB devices such as the
+ RC-S380 dongle.
+
+ If unsure, say N.
+
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index be7636abcb3..c715fe8582a 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
+obj-$(CONFIG_NFC_PORT100) += port100.o
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 606bf55e76e..85f90090cc1 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/nfc.h>
@@ -60,13 +62,13 @@ int nfc_mei_phy_enable(void *phy_id)
r = mei_cl_enable_device(phy->device);
if (r < 0) {
- pr_err("MEI_PHY: Could not enable device\n");
+ pr_err("Could not enable device\n");
return r;
}
r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
if (r) {
- pr_err("MEY_PHY: Event cb registration failed\n");
+ pr_err("Event cb registration failed\n");
mei_cl_disable_device(phy->device);
phy->powered = 0;
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 101089495bf..696e3467ecc 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/delay.h>
@@ -95,12 +97,8 @@ static int check_crc(struct sk_buff *skb)
crc = crc ^ skb->data[i];
if (crc != skb->data[skb->len-1]) {
- pr_err(MICROREAD_I2C_DRIVER_NAME
- ": CRC error 0x%x != 0x%x\n",
- crc, skb->data[skb->len-1]);
-
- pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
-
+ pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]);
+ pr_info("%s: BAD CRC\n", __func__);
return -EPERM;
}
@@ -160,18 +158,15 @@ static int microread_i2c_read(struct microread_i2c_phy *phy,
u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1];
struct i2c_client *client = phy->i2c_dev;
- pr_debug("%s\n", __func__);
-
r = i2c_master_recv(client, &len, 1);
if (r != 1) {
- dev_err(&client->dev, "cannot read len byte\n");
+ nfc_err(&client->dev, "cannot read len byte\n");
return -EREMOTEIO;
}
if ((len < MICROREAD_I2C_LLC_MIN_SIZE) ||
(len > MICROREAD_I2C_LLC_MAX_SIZE)) {
- dev_err(&client->dev, "invalid len byte\n");
- pr_err("invalid len byte\n");
+ nfc_err(&client->dev, "invalid len byte\n");
r = -EBADMSG;
goto flush;
}
@@ -228,7 +223,6 @@ static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
}
client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
if (phy->hard_fault != 0)
return IRQ_HANDLED;
@@ -263,20 +257,18 @@ static int microread_i2c_probe(struct i2c_client *client,
dev_get_platdata(&client->dev);
int r;
- dev_dbg(&client->dev, "client %p", client);
+ dev_dbg(&client->dev, "client %p\n", client);
if (!pdata) {
- dev_err(&client->dev, "client %p: missing platform data",
+ nfc_err(&client->dev, "client %p: missing platform data\n",
client);
return -EINVAL;
}
phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
GFP_KERNEL);
- if (!phy) {
- dev_err(&client->dev, "Can't allocate microread phy");
+ if (!phy)
return -ENOMEM;
- }
i2c_set_clientdata(client, phy);
phy->i2c_dev = client;
@@ -285,7 +277,7 @@ static int microread_i2c_probe(struct i2c_client *client,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
MICROREAD_I2C_DRIVER_NAME, phy);
if (r) {
- dev_err(&client->dev, "Unable to register IRQ handler");
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
return r;
}
@@ -296,7 +288,7 @@ static int microread_i2c_probe(struct i2c_client *client,
if (r < 0)
goto err_irq;
- dev_info(&client->dev, "Probed");
+ nfc_info(&client->dev, "Probed");
return 0;
@@ -310,8 +302,6 @@ static int microread_i2c_remove(struct i2c_client *client)
{
struct microread_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
microread_remove(phy->hdev);
free_irq(client->irq, phy);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index cdf1bc53b25..72fafec3d46 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nfc.h>
@@ -59,8 +61,6 @@ static int microread_mei_remove(struct mei_cl_device *device)
{
struct nfc_mei_phy *phy = mei_cl_get_drvdata(device);
- pr_info("Removing microread\n");
-
microread_remove(phy->hdev);
nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index cdb9f6de132..970ded6bfcf 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -546,7 +548,7 @@ exit:
kfree_skb(skb);
if (r)
- pr_err("Failed to handle discovered target err=%d", r);
+ pr_err("Failed to handle discovered target err=%d\n", r);
}
static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
@@ -656,7 +658,6 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
info = kzalloc(sizeof(struct microread_info), GFP_KERNEL);
if (!info) {
- pr_err("Cannot allocate memory for microread_info.\n");
r = -ENOMEM;
goto err_info_alloc;
}
@@ -686,7 +687,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
MICROREAD_CMD_TAILROOM,
phy_payload);
if (!info->hdev) {
- pr_err("Cannot allocate nfc hdev.\n");
+ pr_err("Cannot allocate nfc hdev\n");
r = -ENOMEM;
goto err_alloc_hdev;
}
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 9a53f13c88d..93111fa8d28 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -19,10 +19,10 @@
#include <linux/nfc.h>
#include <net/nfc/nfc.h>
-#define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \
+#define DEV_ERR(_dev, fmt, args...) nfc_err(&_dev->nfc_dev->dev, \
"%s: " fmt, __func__, ## args)
-#define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \
+#define DEV_DBG(_dev, fmt, args...) dev_dbg(&_dev->nfc_dev->dev, \
"%s: " fmt, __func__, ## args)
#define NFCSIM_VERSION "0.1"
@@ -64,7 +64,7 @@ static struct workqueue_struct *wq;
static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
{
- DEV_DBG(dev, "shutdown=%d", shutdown);
+ DEV_DBG(dev, "shutdown=%d\n", shutdown);
mutex_lock(&dev->lock);
@@ -84,7 +84,7 @@ static int nfcsim_target_found(struct nfcsim *dev)
{
struct nfc_target nfc_tgt;
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
memset(&nfc_tgt, 0, sizeof(struct nfc_target));
@@ -98,7 +98,7 @@ static int nfcsim_dev_up(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
mutex_lock(&dev->lock);
@@ -113,7 +113,7 @@ static int nfcsim_dev_down(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
mutex_lock(&dev->lock);
@@ -143,7 +143,7 @@ static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len);
if (!remote_gb) {
- DEV_ERR(peer, "Can't get remote general bytes");
+ DEV_ERR(peer, "Can't get remote general bytes\n");
mutex_unlock(&peer->lock);
return -EINVAL;
@@ -155,7 +155,7 @@ static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len);
if (rc) {
- DEV_ERR(dev, "Can't set remote general bytes");
+ DEV_ERR(dev, "Can't set remote general bytes\n");
mutex_unlock(&dev->lock);
return rc;
}
@@ -172,7 +172,7 @@ static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
nfcsim_cleanup_dev(dev, 0);
@@ -188,7 +188,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
mutex_lock(&dev->lock);
if (dev->polling_mode != NFCSIM_POLL_NONE) {
- DEV_ERR(dev, "Already in polling mode");
+ DEV_ERR(dev, "Already in polling mode\n");
rc = -EBUSY;
goto exit;
}
@@ -200,7 +200,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
dev->polling_mode |= NFCSIM_POLL_TARGET;
if (dev->polling_mode == NFCSIM_POLL_NONE) {
- DEV_ERR(dev, "Unsupported polling mode");
+ DEV_ERR(dev, "Unsupported polling mode\n");
rc = -EINVAL;
goto exit;
}
@@ -210,7 +210,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
queue_delayed_work(wq, &dev->poll_work, 0);
- DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols,
+ DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X\n", im_protocols,
tm_protocols);
rc = 0;
@@ -224,7 +224,7 @@ static void nfcsim_stop_poll(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "Stop poll");
+ DEV_DBG(dev, "Stop poll\n");
mutex_lock(&dev->lock);
@@ -240,7 +240,7 @@ static int nfcsim_activate_target(struct nfc_dev *nfc_dev,
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
return -ENOTSUPP;
}
@@ -250,7 +250,7 @@ static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev,
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
}
static void nfcsim_wq_recv(struct work_struct *work)
@@ -267,7 +267,7 @@ static void nfcsim_wq_recv(struct work_struct *work)
if (dev->initiator) {
if (!dev->cb) {
- DEV_ERR(dev, "Null recv callback");
+ DEV_ERR(dev, "Null recv callback\n");
dev_kfree_skb(dev->clone_skb);
goto exit;
}
@@ -310,7 +310,7 @@ static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target,
peer->clone_skb = skb_clone(skb, GFP_KERNEL);
if (!peer->clone_skb) {
- DEV_ERR(dev, "skb_clone failed");
+ DEV_ERR(dev, "skb_clone failed\n");
mutex_unlock(&peer->lock);
err = -ENOMEM;
goto exit;
@@ -397,13 +397,13 @@ static void nfcsim_wq_poll(struct work_struct *work)
nfcsim_set_polling_mode(dev);
if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
- DEV_DBG(dev, "Not polling");
+ DEV_DBG(dev, "Not polling\n");
goto unlock;
}
DEV_DBG(dev, "Polling as %s",
dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ?
- "initiator" : "target");
+ "initiator\n" : "target\n");
if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
goto sched_work;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 59f95d8fc98..71308645593 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -146,13 +146,11 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
unsigned long comp_ret;
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "get_bts_file_name entry");
-
skb = nfcwilink_skb_alloc(sizeof(struct nci_vs_nfcc_info_cmd),
GFP_KERNEL);
if (!skb) {
- nfc_dev_err(&drv->pdev->dev,
- "no memory for nci_vs_nfcc_info_cmd");
+ nfc_err(&drv->pdev->dev,
+ "no memory for nci_vs_nfcc_info_cmd\n");
return -ENOMEM;
}
@@ -170,21 +168,19 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
comp_ret = wait_for_completion_timeout(&drv->completed,
msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
- nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
- comp_ret);
+ dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+ comp_ret);
if (comp_ret == 0) {
- nfc_dev_err(&drv->pdev->dev,
- "timeout on wait_for_completion_timeout");
+ nfc_err(&drv->pdev->dev,
+ "timeout on wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
- nfc_dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d",
- drv->nfcc_info.plen,
- drv->nfcc_info.status);
+ dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d\n",
+ drv->nfcc_info.plen, drv->nfcc_info.status);
if ((drv->nfcc_info.plen != 5) || (drv->nfcc_info.status != 0)) {
- nfc_dev_err(&drv->pdev->dev,
- "invalid nci_vs_nfcc_info_rsp");
+ nfc_err(&drv->pdev->dev, "invalid nci_vs_nfcc_info_rsp\n");
return -EINVAL;
}
@@ -195,7 +191,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
drv->nfcc_info.sw_ver_z,
drv->nfcc_info.patch_id);
- nfc_dev_info(&drv->pdev->dev, "nfcwilink FW file name: %s", file_name);
+ nfc_info(&drv->pdev->dev, "nfcwilink FW file name: %s\n", file_name);
return 0;
}
@@ -207,15 +203,13 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
unsigned long comp_ret;
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "send_bts_cmd entry");
-
/* verify valid cmd for the NFC channel */
if ((len <= sizeof(struct nfcwilink_hdr)) ||
(len > BTS_FILE_CMD_MAX_LEN) ||
(hdr->chnl != NFCWILINK_CHNL) ||
(hdr->opcode != NFCWILINK_OPCODE)) {
- nfc_dev_err(&drv->pdev->dev,
- "ignoring invalid bts cmd, len %d, chnl %d, opcode %d",
+ nfc_err(&drv->pdev->dev,
+ "ignoring invalid bts cmd, len %d, chnl %d, opcode %d\n",
len, hdr->chnl, hdr->opcode);
return 0;
}
@@ -226,7 +220,7 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
skb = nfcwilink_skb_alloc(len, GFP_KERNEL);
if (!skb) {
- nfc_dev_err(&drv->pdev->dev, "no memory for bts cmd");
+ nfc_err(&drv->pdev->dev, "no memory for bts cmd\n");
return -ENOMEM;
}
@@ -238,11 +232,11 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
comp_ret = wait_for_completion_timeout(&drv->completed,
msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
- nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
- comp_ret);
+ dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+ comp_ret);
if (comp_ret == 0) {
- nfc_dev_err(&drv->pdev->dev,
- "timeout on wait_for_completion_timeout");
+ nfc_err(&drv->pdev->dev,
+ "timeout on wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
@@ -257,8 +251,6 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
__u8 *ptr;
int len, rc;
- nfc_dev_dbg(&drv->pdev->dev, "download_fw entry");
-
set_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags);
rc = nfcwilink_get_bts_file_name(drv, file_name);
@@ -267,7 +259,7 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
rc = request_firmware(&fw, file_name, &drv->pdev->dev);
if (rc) {
- nfc_dev_err(&drv->pdev->dev, "request_firmware failed %d", rc);
+ nfc_err(&drv->pdev->dev, "request_firmware failed %d\n", rc);
/* if the file is not found, don't exit with failure */
if (rc == -ENOENT)
@@ -280,14 +272,14 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
ptr = (__u8 *)fw->data;
if ((len == 0) || (ptr == NULL)) {
- nfc_dev_dbg(&drv->pdev->dev,
- "request_firmware returned size %d", len);
+ dev_dbg(&drv->pdev->dev,
+ "request_firmware returned size %d\n", len);
goto release_fw;
}
if (__le32_to_cpu(((struct bts_file_hdr *)ptr)->magic) !=
BTS_FILE_HDR_MAGIC) {
- nfc_dev_err(&drv->pdev->dev, "wrong bts magic number");
+ nfc_err(&drv->pdev->dev, "wrong bts magic number\n");
rc = -EINVAL;
goto release_fw;
}
@@ -302,8 +294,8 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
action_len =
__le16_to_cpu(((struct bts_file_action *)ptr)->len);
- nfc_dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d",
- action_type, action_len);
+ dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d\n",
+ action_type, action_len);
switch (action_type) {
case BTS_FILE_ACTION_TYPE_SEND_CMD:
@@ -333,8 +325,6 @@ static void nfcwilink_register_complete(void *priv_data, char data)
{
struct nfcwilink *drv = priv_data;
- nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
-
/* store ST registration status */
drv->st_register_cb_status = data;
@@ -356,7 +346,7 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
return -EFAULT;
}
- nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+ dev_dbg(&drv->pdev->dev, "receive entry, len %d\n", skb->len);
/* strip the ST header
(apart for the chnl byte, which is not received in the hdr) */
@@ -370,7 +360,7 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
/* Forward skb to NCI core layer */
rc = nci_recv_frame(drv->ndev, skb);
if (rc < 0) {
- nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+ nfc_err(&drv->pdev->dev, "nci_recv_frame failed %d\n", rc);
return rc;
}
@@ -396,8 +386,6 @@ static int nfcwilink_open(struct nci_dev *ndev)
unsigned long comp_ret;
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "open entry");
-
if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
rc = -EBUSY;
goto exit;
@@ -415,9 +403,9 @@ static int nfcwilink_open(struct nci_dev *ndev)
&drv->completed,
msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
- nfc_dev_dbg(&drv->pdev->dev,
- "wait_for_completion_timeout returned %ld",
- comp_ret);
+ dev_dbg(&drv->pdev->dev,
+ "wait_for_completion_timeout returned %ld\n",
+ comp_ret);
if (comp_ret == 0) {
/* timeout */
@@ -425,13 +413,12 @@ static int nfcwilink_open(struct nci_dev *ndev)
goto clear_exit;
} else if (drv->st_register_cb_status != 0) {
rc = drv->st_register_cb_status;
- nfc_dev_err(&drv->pdev->dev,
- "st_register_cb failed %d", rc);
+ nfc_err(&drv->pdev->dev,
+ "st_register_cb failed %d\n", rc);
goto clear_exit;
}
} else {
- nfc_dev_err(&drv->pdev->dev,
- "st_register failed %d", rc);
+ nfc_err(&drv->pdev->dev, "st_register failed %d\n", rc);
goto clear_exit;
}
}
@@ -441,8 +428,8 @@ static int nfcwilink_open(struct nci_dev *ndev)
drv->st_write = nfcwilink_proto.write;
if (nfcwilink_download_fw(drv)) {
- nfc_dev_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d",
- rc);
+ nfc_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d\n",
+ rc);
/* open should succeed, even if the FW download failed */
}
@@ -460,14 +447,12 @@ static int nfcwilink_close(struct nci_dev *ndev)
struct nfcwilink *drv = nci_get_drvdata(ndev);
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "close entry");
-
if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
return 0;
rc = st_unregister(&nfcwilink_proto);
if (rc)
- nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+ nfc_err(&drv->pdev->dev, "st_unregister failed %d\n", rc);
drv->st_write = NULL;
@@ -480,7 +465,7 @@ static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
long len;
- nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+ dev_dbg(&drv->pdev->dev, "send entry, len %d\n", skb->len);
if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) {
kfree_skb(skb);
@@ -498,7 +483,7 @@ static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
len = drv->st_write(skb);
if (len < 0) {
kfree_skb(skb);
- nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+ nfc_err(&drv->pdev->dev, "st_write failed %ld\n", len);
return -EFAULT;
}
@@ -517,8 +502,6 @@ static int nfcwilink_probe(struct platform_device *pdev)
int rc;
__u32 protocols;
- nfc_dev_dbg(&pdev->dev, "probe entry");
-
drv = devm_kzalloc(&pdev->dev, sizeof(struct nfcwilink), GFP_KERNEL);
if (!drv) {
rc = -ENOMEM;
@@ -538,7 +521,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
NFCWILINK_HDR_LEN,
0);
if (!drv->ndev) {
- nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+ nfc_err(&pdev->dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
goto exit;
}
@@ -548,7 +531,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
rc = nci_register_device(drv->ndev);
if (rc < 0) {
- nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+ nfc_err(&pdev->dev, "nci_register_device failed %d\n", rc);
goto free_dev_exit;
}
@@ -568,8 +551,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
struct nci_dev *ndev;
- nfc_dev_dbg(&pdev->dev, "remove entry");
-
if (!drv)
return -EFAULT;
@@ -578,8 +559,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
nci_unregister_device(ndev);
nci_free_device(ndev);
- dev_set_drvdata(&pdev->dev, NULL);
-
return 0;
}
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 5df730be88a..2daf04c0733 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -150,6 +150,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
#define PN533_CMD_TG_GET_DATA 0x86
#define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_TG_SET_META_DATA 0x94
#define PN533_CMD_UNDEF 0xff
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -373,6 +374,8 @@ struct pn533 {
struct delayed_work poll_work;
struct work_struct mi_rx_work;
struct work_struct mi_tx_work;
+ struct work_struct mi_tm_rx_work;
+ struct work_struct mi_tm_tx_work;
struct work_struct tg_work;
struct work_struct rf_work;
@@ -387,6 +390,7 @@ struct pn533 {
struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
u8 poll_mod_count;
u8 poll_mod_curr;
+ u8 poll_dep;
u32 poll_protocols;
u32 listen_protocols;
struct timer_list listen_timer;
@@ -722,32 +726,32 @@ static void pn533_recv_response(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_dev_dbg(&dev->interface->dev,
- "The urb has been canceled (status %d)",
- urb->status);
+ dev_dbg(&dev->interface->dev,
+ "The urb has been canceled (status %d)\n",
+ urb->status);
goto sched_wq;
case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev,
- "Urb failure (status %d)", urb->status);
+ nfc_err(&dev->interface->dev,
+ "Urb failure (status %d)\n", urb->status);
goto sched_wq;
}
in_frame = dev->in_urb->transfer_buffer;
- nfc_dev_dbg(&dev->interface->dev, "Received a frame.");
+ dev_dbg(&dev->interface->dev, "Received a frame\n");
print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
dev->ops->rx_frame_size(in_frame), false);
if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
- nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
+ nfc_err(&dev->interface->dev, "Received an invalid frame\n");
cmd->status = -EIO;
goto sched_wq;
}
if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
- nfc_dev_err(&dev->interface->dev,
- "It it not the response to the last command");
+ nfc_err(&dev->interface->dev,
+ "It it not the response to the last command\n");
cmd->status = -EIO;
goto sched_wq;
}
@@ -777,29 +781,29 @@ static void pn533_recv_ack(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_dev_dbg(&dev->interface->dev,
- "The urb has been stopped (status %d)",
- urb->status);
+ dev_dbg(&dev->interface->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
goto sched_wq;
case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev,
- "Urb failure (status %d)", urb->status);
+ nfc_err(&dev->interface->dev,
+ "Urb failure (status %d)\n", urb->status);
goto sched_wq;
}
in_frame = dev->in_urb->transfer_buffer;
if (!pn533_std_rx_frame_is_ack(in_frame)) {
- nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
+ nfc_err(&dev->interface->dev, "Received an invalid ack\n");
cmd->status = -EIO;
goto sched_wq;
}
rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "usb_submit_urb failed with result %d", rc);
+ nfc_err(&dev->interface->dev,
+ "usb_submit_urb failed with result %d\n", rc);
cmd->status = rc;
goto sched_wq;
}
@@ -823,8 +827,6 @@ static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
/* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
dev->out_urb->transfer_buffer = ack;
dev->out_urb->transfer_buffer_length = sizeof(ack);
rc = usb_submit_urb(dev->out_urb, flags);
@@ -927,7 +929,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
struct pn533_cmd *cmd;
int rc = 0;
- nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code);
+ dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
@@ -954,8 +956,8 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
goto unlock;
}
- nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__,
- cmd_code);
+ dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n",
+ __func__, cmd_code);
INIT_LIST_HEAD(&cmd->queue);
list_add_tail(&cmd->queue, &dev->cmd_queue);
@@ -1168,14 +1170,14 @@ static void pn533_send_complete(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_dev_dbg(&dev->interface->dev,
- "The urb has been stopped (status %d)",
- urb->status);
+ dev_dbg(&dev->interface->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
break;
case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev,
- "Urb failure (status %d)", urb->status);
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
+ urb->status);
}
}
@@ -1452,8 +1454,8 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
struct nfc_target nfc_tgt;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__,
- dev->poll_mod_curr);
+ dev_dbg(&dev->interface->dev, "%s: modulation=%d\n",
+ __func__, dev->poll_mod_curr);
if (tg != 1)
return -EPROTO;
@@ -1475,8 +1477,8 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
break;
default:
- nfc_dev_err(&dev->interface->dev,
- "Unknown current poll modulation");
+ nfc_err(&dev->interface->dev,
+ "Unknown current poll modulation\n");
return -EPROTO;
}
@@ -1484,14 +1486,14 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
return rc;
if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
- nfc_dev_dbg(&dev->interface->dev,
- "The Tg found doesn't have the desired protocol");
+ dev_dbg(&dev->interface->dev,
+ "The Tg found doesn't have the desired protocol\n");
return -EAGAIN;
}
- nfc_dev_dbg(&dev->interface->dev,
- "Target found - supported protocols: 0x%x",
- nfc_tgt.supported_protocols);
+ dev_dbg(&dev->interface->dev,
+ "Target found - supported protocols: 0x%x\n",
+ nfc_tgt.supported_protocols);
dev->tgt_available_prots = nfc_tgt.supported_protocols;
@@ -1548,7 +1550,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
u8 nbtg, tg, *tgdata;
int rc, tgdata_len;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ /* Toggle the DEP polling */
+ dev->poll_dep = 1;
nbtg = resp->data[0];
tg = resp->data[1];
@@ -1624,37 +1627,130 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
#define PN533_CMD_DATAEXCH_HEAD_LEN 1
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+static void pn533_wq_tm_mi_recv(struct work_struct *work);
+static struct sk_buff *pn533_build_response(struct pn533 *dev);
+
static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
- u8 status;
+ struct sk_buff *skb;
+ u8 status, ret, mi;
+ int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
- if (IS_ERR(resp))
+ if (IS_ERR(resp)) {
+ skb_queue_purge(&dev->resp_q);
return PTR_ERR(resp);
+ }
status = resp->data[0];
+
+ ret = status & PN533_CMD_RET_MASK;
+ mi = status & PN533_CMD_MI_MASK;
+
skb_pull(resp, sizeof(status));
- if (status != 0) {
- nfc_tm_deactivated(dev->nfc_dev);
- dev->tgt_mode = 0;
- dev_kfree_skb(resp);
- return 0;
+ if (ret != PN533_CMD_RET_SUCCESS) {
+ rc = -EIO;
+ goto error;
}
- return nfc_tm_data_received(dev->nfc_dev, resp);
+ skb_queue_tail(&dev->resp_q, resp);
+
+ if (mi) {
+ queue_work(dev->wq, &dev->mi_tm_rx_work);
+ return -EINPROGRESS;
+ }
+
+ skb = pn533_build_response(dev);
+ if (!skb) {
+ rc = -EIO;
+ goto error;
+ }
+
+ return nfc_tm_data_received(dev->nfc_dev, skb);
+
+error:
+ nfc_tm_deactivated(dev->nfc_dev);
+ dev->tgt_mode = 0;
+ skb_queue_purge(&dev->resp_q);
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static void pn533_wq_tm_mi_recv(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
+ struct sk_buff *skb;
+ int rc;
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+ skb = pn533_alloc_skb(dev, 0);
+ if (!skb)
+ return;
+
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_TG_GET_DATA,
+ skb,
+ pn533_tm_get_data_complete,
+ NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+
+ return;
+}
+
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp);
+static void pn533_wq_tm_mi_send(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
+ struct sk_buff *skb;
+ int rc;
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+ /* Grab the first skb in the queue */
+ skb = skb_dequeue(&dev->fragment_skb);
+ if (skb == NULL) { /* No more data */
+ /* Reset the queue for future use */
+ skb_queue_head_init(&dev->fragment_skb);
+ goto error;
+ }
+
+ /* last entry - remove MI bit */
+ if (skb_queue_len(&dev->fragment_skb) == 0) {
+ rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA,
+ skb, pn533_tm_send_complete, NULL);
+ } else
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_TG_SET_META_DATA,
+ skb, pn533_tm_send_complete, NULL);
+
+ if (rc == 0) /* success */
+ return;
+
+ dev_err(&dev->interface->dev,
+ "Error %d when trying to perform set meta data_exchange", rc);
+
+ dev_kfree_skb(skb);
+
+error:
+ pn533_send_ack(dev, GFP_KERNEL);
+ queue_work(dev->wq, &dev->cmd_work);
}
static void pn533_wq_tg_get_data(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, tg_work);
-
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 0);
if (!skb)
@@ -1676,7 +1772,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
size_t gb_len;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (resp->len < ATR_REQ_GB_OFFSET + 1)
return -EINVAL;
@@ -1684,8 +1780,8 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
mode = resp->data[0];
cmd = &resp->data[1];
- nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
- mode, resp->len);
+ dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
+ mode, resp->len);
if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
PN533_INIT_TARGET_RESP_ACTIVE)
@@ -1700,8 +1796,8 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
comm_mode, gb, gb_len);
if (rc < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error when signaling target activation");
+ nfc_err(&dev->interface->dev,
+ "Error when signaling target activation\n");
return rc;
}
@@ -1715,7 +1811,7 @@ static void pn533_listen_mode_timer(unsigned long data)
{
struct pn533 *dev = (struct pn533 *)data;
- nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
+ dev_dbg(&dev->interface->dev, "Listen mode timeout\n");
dev->cancel_listen = 1;
@@ -1730,13 +1826,12 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
- __func__, rc);
+ nfc_err(&dev->interface->dev, "RF setting error %d", rc);
return rc;
}
@@ -1754,7 +1849,7 @@ static void pn533_wq_rf(struct work_struct *work)
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 2);
if (!skb)
@@ -1767,25 +1862,136 @@ static void pn533_wq_rf(struct work_struct *work)
pn533_rf_complete, NULL);
if (rc < 0) {
dev_kfree_skb(skb);
- nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+ nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
}
return;
}
+static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct pn533_cmd_jump_dep_response *rsp;
+ struct nfc_target nfc_target;
+ u8 target_gt_len;
+ int rc;
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+ rc = rsp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ /* Not target found, turn radio off */
+ queue_work(dev->wq, &dev->rf_work);
+
+ dev_kfree_skb(resp);
+ return 0;
+ }
+
+ dev_dbg(&dev->interface->dev, "Creating new target");
+
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ nfc_target.nfcid1_len = 10;
+ memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+ rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+ if (rc)
+ goto error;
+
+ dev->tgt_available_prots = 0;
+ dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+ /* ATR_RES general bytes are located at offset 17 */
+ target_gt_len = resp->len - 17;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+ rsp->gt, target_gt_len);
+ if (!rc) {
+ rc = nfc_dep_link_is_up(dev->nfc_dev,
+ dev->nfc_dev->targets[0].idx,
+ 0, NFC_RF_INITIATOR);
+
+ if (!rc)
+ pn533_poll_reset_mod_list(dev);
+ }
+error:
+ dev_kfree_skb(resp);
+ return rc;
+}
+
+#define PASSIVE_DATA_LEN 5
+static int pn533_poll_dep(struct nfc_dev *nfc_dev)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct sk_buff *skb;
+ int rc, skb_len;
+ u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
+ u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+
+ dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (!dev->gb) {
+ dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+
+ if (!dev->gb || !dev->gb_len) {
+ dev->poll_dep = 0;
+ queue_work(dev->wq, &dev->rf_work);
+ }
+ }
+
+ skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */
+ skb_len += PASSIVE_DATA_LEN;
+
+ /* NFCID3 */
+ skb_len += NFC_NFCID3_MAXSIZE;
+ nfcid3[0] = 0x1;
+ nfcid3[1] = 0xfe;
+ get_random_bytes(nfcid3 + 2, 6);
+
+ skb = pn533_alloc_skb(dev, skb_len);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = 0x01; /* Active */
+ *skb_put(skb, 1) = 0x02; /* 424 kbps */
+
+ next = skb_put(skb, 1); /* Next */
+ *next = 0;
+
+ /* Copy passive data */
+ memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+ *next |= 1;
+
+ /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+ memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+ NFC_NFCID3_MAXSIZE);
+ *next |= 2;
+
+ memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
+ *next |= 4; /* We have some Gi */
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+ pn533_poll_dep_complete, NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+
+ return rc;
+}
+
static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
struct pn533_poll_modulations *cur_mod;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_dev_err(&dev->interface->dev, "%s Poll complete error %d",
- __func__, rc);
+ nfc_err(&dev->interface->dev, "%s Poll complete error %d\n",
+ __func__, rc);
if (rc == -ENOENT) {
if (dev->poll_mod_count != 0)
@@ -1793,8 +1999,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
else
goto stop_poll;
} else if (rc < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when running poll", rc);
+ nfc_err(&dev->interface->dev,
+ "Error %d when running poll\n", rc);
goto stop_poll;
}
}
@@ -1813,7 +2019,7 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
goto done;
if (!dev->poll_mod_count) {
- nfc_dev_dbg(&dev->interface->dev, "Polling has been stopped.");
+ dev_dbg(&dev->interface->dev, "Polling has been stopped\n");
goto done;
}
@@ -1826,7 +2032,7 @@ done:
return rc;
stop_poll:
- nfc_dev_err(&dev->interface->dev, "Polling operation has been stopped");
+ nfc_err(&dev->interface->dev, "Polling operation has been stopped\n");
pn533_poll_reset_mod_list(dev);
dev->poll_protocols = 0;
@@ -1856,8 +2062,13 @@ static int pn533_send_poll_frame(struct pn533 *dev)
mod = dev->poll_mod_active[dev->poll_mod_curr];
- nfc_dev_dbg(&dev->interface->dev, "%s mod len %d\n",
- __func__, mod->len);
+ dev_dbg(&dev->interface->dev, "%s mod len %d\n",
+ __func__, mod->len);
+
+ if (dev->poll_dep) {
+ dev->poll_dep = 0;
+ return pn533_poll_dep(dev->nfc_dev);
+ }
if (mod->len == 0) { /* Listen mode */
cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
@@ -1868,7 +2079,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
}
if (!skb) {
- nfc_dev_err(&dev->interface->dev, "Failed to allocate skb.");
+ nfc_err(&dev->interface->dev, "Failed to allocate skb\n");
return -ENOMEM;
}
@@ -1876,7 +2087,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
NULL);
if (rc < 0) {
dev_kfree_skb(skb);
- nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+ nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc);
}
return rc;
@@ -1890,9 +2101,9 @@ static void pn533_wq_poll(struct work_struct *work)
cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
- nfc_dev_dbg(&dev->interface->dev,
- "%s cancel_listen %d modulation len %d",
- __func__, dev->cancel_listen, cur_mod->len);
+ dev_dbg(&dev->interface->dev,
+ "%s cancel_listen %d modulation len %d\n",
+ __func__, dev->cancel_listen, cur_mod->len);
if (dev->cancel_listen == 1) {
dev->cancel_listen = 0;
@@ -1913,21 +2124,23 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
u32 im_protocols, u32 tm_protocols)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_poll_modulations *cur_mod;
u8 rand_mod;
+ int rc;
- nfc_dev_dbg(&dev->interface->dev,
- "%s: im protocols 0x%x tm protocols 0x%x",
- __func__, im_protocols, tm_protocols);
+ dev_dbg(&dev->interface->dev,
+ "%s: im protocols 0x%x tm protocols 0x%x\n",
+ __func__, im_protocols, tm_protocols);
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot poll with a target already activated");
+ nfc_err(&dev->interface->dev,
+ "Cannot poll with a target already activated\n");
return -EBUSY;
}
if (dev->tgt_mode) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot poll while already being activated");
+ nfc_err(&dev->interface->dev,
+ "Cannot poll while already being activated\n");
return -EBUSY;
}
@@ -1946,20 +2159,26 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
rand_mod %= dev->poll_mod_count;
dev->poll_mod_curr = rand_mod;
- return pn533_send_poll_frame(dev);
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ rc = pn533_send_poll_frame(dev);
+
+ /* Start listen timer */
+ if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1)
+ mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
+
+ return rc;
}
static void pn533_stop_poll(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
del_timer(&dev->listen_timer);
if (!dev->poll_mod_count) {
- nfc_dev_dbg(&dev->interface->dev,
- "Polling operation was not running");
+ dev_dbg(&dev->interface->dev,
+ "Polling operation was not running\n");
return;
}
@@ -1973,11 +2192,10 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
struct pn533_cmd_activate_response *rsp;
u16 gt_len;
int rc;
-
struct sk_buff *skb;
struct sk_buff *resp;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
if (!skb)
@@ -1993,8 +2211,8 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
rsp = (struct pn533_cmd_activate_response *)resp->data;
rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev,
- "Target activation failed (error 0x%x)", rc);
+ nfc_err(&dev->interface->dev,
+ "Target activation failed (error 0x%x)\n", rc);
dev_kfree_skb(resp);
return -EIO;
}
@@ -2013,39 +2231,38 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__,
- protocol);
+ dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol);
if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot activate while polling");
+ nfc_err(&dev->interface->dev,
+ "Cannot activate while polling\n");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "There is already an active target");
+ nfc_err(&dev->interface->dev,
+ "There is already an active target\n");
return -EBUSY;
}
if (!dev->tgt_available_prots) {
- nfc_dev_err(&dev->interface->dev,
- "There is no available target to activate");
+ nfc_err(&dev->interface->dev,
+ "There is no available target to activate\n");
return -EINVAL;
}
if (!(dev->tgt_available_prots & (1 << protocol))) {
- nfc_dev_err(&dev->interface->dev,
- "Target doesn't support requested proto %u",
- protocol);
+ nfc_err(&dev->interface->dev,
+ "Target doesn't support requested proto %u\n",
+ protocol);
return -EINVAL;
}
if (protocol == NFC_PROTO_NFC_DEP) {
rc = pn533_activate_target_nfcdep(dev);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Activating target with DEP failed %d", rc);
+ nfc_err(&dev->interface->dev,
+ "Activating target with DEP failed %d\n", rc);
return rc;
}
}
@@ -2060,16 +2277,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
struct nfc_target *target)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-
struct sk_buff *skb;
struct sk_buff *resp;
-
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (!dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev, "There is no active target");
+ nfc_err(&dev->interface->dev, "There is no active target\n");
return;
}
@@ -2088,8 +2303,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
rc = resp->data[0] & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS)
- nfc_dev_err(&dev->interface->dev,
- "Error 0x%x when releasing the target", rc);
+ nfc_err(&dev->interface->dev,
+ "Error 0x%x when releasing the target\n", rc);
dev_kfree_skb(resp);
return;
@@ -2111,8 +2326,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
if (dev->tgt_available_prots &&
!(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
- nfc_dev_err(&dev->interface->dev,
- "The target does not support DEP");
+ nfc_err(&dev->interface->dev,
+ "The target does not support DEP\n");
rc = -EINVAL;
goto error;
}
@@ -2121,15 +2336,15 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev,
- "Bringing DEP link up failed (error 0x%x)", rc);
+ nfc_err(&dev->interface->dev,
+ "Bringing DEP link up failed (error 0x%x)\n", rc);
goto error;
}
if (!dev->tgt_available_prots) {
struct nfc_target nfc_target;
- nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+ dev_dbg(&dev->interface->dev, "Creating new target\n");
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
nfc_target.nfcid1_len = 10;
@@ -2158,7 +2373,6 @@ error:
}
static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
-#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8 *gb, size_t gb_len)
{
@@ -2166,20 +2380,19 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb;
int rc, skb_len;
u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
-
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot bring the DEP link up while polling");
+ nfc_err(&dev->interface->dev,
+ "Cannot bring the DEP link up while polling\n");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "There is already an active target");
+ nfc_err(&dev->interface->dev,
+ "There is already an active target\n");
return -EBUSY;
}
@@ -2249,7 +2462,7 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
pn533_poll_reset_mod_list(dev);
@@ -2274,7 +2487,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
struct sk_buff *skb, *tmp, *t;
unsigned int skb_len = 0, tmp_len = 0;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (skb_queue_empty(&dev->resp_q))
return NULL;
@@ -2287,8 +2500,8 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
skb_queue_walk_safe(&dev->resp_q, tmp, t)
skb_len += tmp->len;
- nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
- __func__, skb_len);
+ dev_dbg(&dev->interface->dev, "%s total length %d\n",
+ __func__, skb_len);
skb = alloc_skb(skb_len, GFP_KERNEL);
if (skb == NULL)
@@ -2315,7 +2528,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
int rc = 0;
u8 status, ret, mi;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -2329,8 +2542,8 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
skb_pull(resp, sizeof(status));
if (ret != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev,
- "Exchanging data failed (error 0x%x)", ret);
+ nfc_err(&dev->interface->dev,
+ "Exchanging data failed (error 0x%x)\n", ret);
rc = -EIO;
goto error;
}
@@ -2388,14 +2601,17 @@ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
break;
}
- /* Reserve the TG/MI byte */
- skb_reserve(frag, 1);
+ if (!dev->tgt_mode) {
+ /* Reserve the TG/MI byte */
+ skb_reserve(frag, 1);
- /* MI + TG */
- if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
- *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
- else
- *skb_push(frag, sizeof(u8)) = 1; /* TG */
+ /* MI + TG */
+ if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
+ *skb_push(frag, sizeof(u8)) =
+ (PN533_CMD_MI_MASK | 1);
+ else
+ *skb_push(frag, sizeof(u8)) = 1; /* TG */
+ }
memcpy(skb_put(frag, frag_size), skb->data, frag_size);
@@ -2420,11 +2636,11 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
struct pn533_data_exchange_arg *arg = NULL;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (!dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "Can't exchange data if there is no active target");
+ nfc_err(&dev->interface->dev,
+ "Can't exchange data if there is no active target\n");
rc = -EINVAL;
goto error;
}
@@ -2487,13 +2703,18 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
{
u8 status;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp))
return PTR_ERR(resp);
status = resp->data[0];
+ /* Prepare for the next round */
+ if (skb_queue_len(&dev->fragment_skb) > 0) {
+ queue_work(dev->wq, &dev->mi_tm_tx_work);
+ return -EINPROGRESS;
+ }
dev_kfree_skb(resp);
if (status != 0) {
@@ -2514,19 +2735,34 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ /* let's split in multiple chunks if size's too big */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
- nfc_dev_err(&dev->interface->dev,
- "Data length greater than the max allowed: %d",
- PN533_CMD_DATAEXCH_DATA_MAXLEN);
- return -ENOSYS;
+ rc = pn533_fill_fragment_skbs(dev, skb);
+ if (rc <= 0)
+ goto error;
+
+ /* get the first skb */
+ skb = skb_dequeue(&dev->fragment_skb);
+ if (!skb) {
+ rc = -EIO;
+ goto error;
+ }
+
+ rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
+ pn533_tm_send_complete, NULL);
+ } else {
+ /* Send th skb */
+ rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
+ pn533_tm_send_complete, NULL);
}
- rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
- pn533_tm_send_complete, NULL);
- if (rc < 0)
+error:
+ if (rc < 0) {
dev_kfree_skb(skb);
+ skb_queue_purge(&dev->fragment_skb);
+ }
return rc;
}
@@ -2534,11 +2770,10 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
static void pn533_wq_mi_recv(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
-
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
if (!skb)
@@ -2570,8 +2805,8 @@ static void pn533_wq_mi_recv(struct work_struct *work)
if (rc == 0) /* success */
return;
- nfc_dev_err(&dev->interface->dev,
- "Error %d when trying to perform data_exchange", rc);
+ nfc_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange\n", rc);
dev_kfree_skb(skb);
kfree(dev->cmd_complete_mi_arg);
@@ -2587,7 +2822,7 @@ static void pn533_wq_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
@@ -2625,8 +2860,8 @@ static void pn533_wq_mi_send(struct work_struct *work)
if (rc == 0) /* success */
return;
- nfc_dev_err(&dev->interface->dev,
- "Error %d when trying to perform data_exchange", rc);
+ nfc_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange\n", rc);
dev_kfree_skb(skb);
kfree(dev->cmd_complete_dep_arg);
@@ -2641,10 +2876,9 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
{
struct sk_buff *skb;
struct sk_buff *resp;
-
int skb_len;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
@@ -2691,7 +2925,7 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, sizeof(u8));
if (!skb)
@@ -2717,7 +2951,7 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
{
struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
- nfc_dev_dbg(&urb->dev->dev, "%s", __func__);
+ dev_dbg(&urb->dev->dev, "%s\n", __func__);
print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
urb->transfer_buffer, urb->transfer_buffer_length,
@@ -2737,7 +2971,7 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
void *cntx;
struct pn533_acr122_poweron_rdr_arg arg;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
init_completion(&arg.done);
cntx = dev->in_urb->context; /* backup context */
@@ -2755,16 +2989,15 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Reader power on cmd error %d", rc);
+ nfc_err(&dev->interface->dev,
+ "Reader power on cmd error %d\n", rc);
return rc;
}
rc = usb_submit_urb(dev->in_urb, GFP_KERNEL);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Can't submit for reader power on cmd response %d",
- rc);
+ nfc_err(&dev->interface->dev,
+ "Can't submit reader poweron cmd response %d\n", rc);
return rc;
}
@@ -2785,20 +3018,19 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
(u8 *)&rf_field, 1);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error on setting RF field");
+ nfc_err(&dev->interface->dev, "Error on setting RF field\n");
return rc;
}
return rc;
}
-int pn533_dev_up(struct nfc_dev *nfc_dev)
+static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
return pn533_rf_field(nfc_dev, 1);
}
-int pn533_dev_down(struct nfc_dev *nfc_dev)
+static int pn533_dev_down(struct nfc_dev *nfc_dev)
{
return pn533_rf_field(nfc_dev, 0);
}
@@ -2839,16 +3071,16 @@ static int pn533_setup(struct pn533 *dev)
break;
default:
- nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
- dev->device_type);
+ nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
return -EINVAL;
}
rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
(u8 *)&max_retries, sizeof(max_retries));
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error on setting MAX_RETRIES config");
+ nfc_err(&dev->interface->dev,
+ "Error on setting MAX_RETRIES config\n");
return rc;
}
@@ -2856,8 +3088,7 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
(u8 *)&timing, sizeof(timing));
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error on setting RF timings");
+ nfc_err(&dev->interface->dev, "Error on setting RF timings\n");
return rc;
}
@@ -2871,8 +3102,8 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
pasori_cfg, 3);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error while settings PASORI config");
+ nfc_err(&dev->interface->dev,
+ "Error while settings PASORI config\n");
return rc;
}
@@ -2917,8 +3148,8 @@ static int pn533_probe(struct usb_interface *interface,
}
if (!in_endpoint || !out_endpoint) {
- nfc_dev_err(&interface->dev,
- "Could not find bulk-in or bulk-out endpoint");
+ nfc_err(&interface->dev,
+ "Could not find bulk-in or bulk-out endpoint\n");
rc = -ENODEV;
goto error;
}
@@ -2941,6 +3172,8 @@ static int pn533_probe(struct usb_interface *interface,
INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
+ INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv);
+ INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send);
INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
INIT_WORK(&dev->rf_work, pn533_wq_rf);
dev->wq = alloc_ordered_workqueue("pn533", 0);
@@ -2978,16 +3211,15 @@ static int pn533_probe(struct usb_interface *interface,
rc = pn533_acr122_poweron_rdr(dev);
if (rc < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Couldn't poweron the reader (error %d)",
- rc);
+ nfc_err(&dev->interface->dev,
+ "Couldn't poweron the reader (error %d)\n", rc);
goto destroy_wq;
}
break;
default:
- nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
- dev->device_type);
+ nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
rc = -EINVAL;
goto destroy_wq;
}
@@ -2997,9 +3229,9 @@ static int pn533_probe(struct usb_interface *interface,
if (rc < 0)
goto destroy_wq;
- nfc_dev_info(&dev->interface->dev,
- "NXP PN5%02X firmware ver %d.%d now attached",
- fw_ver.ic, fw_ver.ver, fw_ver.rev);
+ nfc_info(&dev->interface->dev,
+ "NXP PN5%02X firmware ver %d.%d now attached\n",
+ fw_ver.ic, fw_ver.ver, fw_ver.rev);
dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
@@ -3070,7 +3302,7 @@ static void pn533_disconnect(struct usb_interface *interface)
usb_free_urb(dev->out_urb);
kfree(dev);
- nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
+ nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
}
static struct usb_driver pn533_driver = {
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 01e27d4bdd0..b158ee1c2ac 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/crc-ccitt.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -151,8 +153,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
int count = sizeof(rset_cmd);
- pr_info(DRIVER_DESC ": %s\n", __func__);
- dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
+ nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
/* Disable fw download */
gpio_set_value(phy->gpio_fw, 0);
@@ -173,7 +174,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
if (ret == count) {
- dev_info(&phy->i2c_dev->dev,
+ nfc_info(&phy->i2c_dev->dev,
"nfc_en polarity : active %s\n",
(polarity == 0 ? "low" : "high"));
goto out;
@@ -181,7 +182,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
}
}
- dev_err(&phy->i2c_dev->dev,
+ nfc_err(&phy->i2c_dev->dev,
"Could not detect nfc_en polarity, fallback to active high\n");
out:
@@ -201,7 +202,7 @@ static int pn544_hci_i2c_enable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info(DRIVER_DESC ": %s\n", __func__);
+ pr_info("%s\n", __func__);
pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
@@ -214,8 +215,6 @@ static void pn544_hci_i2c_disable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info(DRIVER_DESC ": %s\n", __func__);
-
gpio_set_value(phy->gpio_fw, 0);
gpio_set_value(phy->gpio_en, !phy->en_polarity);
usleep_range(10000, 15000);
@@ -298,11 +297,9 @@ static int check_crc(u8 *buf, int buflen)
crc = ~crc;
if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
- pr_err(PN544_HCI_I2C_DRIVER_NAME
- ": CRC error 0x%x != 0x%x 0x%x\n",
+ pr_err("CRC error 0x%x != 0x%x 0x%x\n",
crc, buf[len - 1], buf[len - 2]);
-
- pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+ pr_info("%s: BAD CRC\n", __func__);
print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
16, 2, buf, buflen, false);
return -EPERM;
@@ -328,13 +325,13 @@ static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
r = i2c_master_recv(client, &len, 1);
if (r != 1) {
- dev_err(&client->dev, "cannot read len byte\n");
+ nfc_err(&client->dev, "cannot read len byte\n");
return -EREMOTEIO;
}
if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
(len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
- dev_err(&client->dev, "invalid len byte\n");
+ nfc_err(&client->dev, "invalid len byte\n");
r = -EBADMSG;
goto flush;
}
@@ -386,7 +383,7 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
r = i2c_master_recv(client, (char *) &response, sizeof(response));
if (r != sizeof(response)) {
- dev_err(&client->dev, "cannot read fw status\n");
+ nfc_err(&client->dev, "cannot read fw status\n");
return -EIO;
}
@@ -478,8 +475,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
- firmware_name);
+ pr_info("Starting Firmware Download (%s)\n", firmware_name);
strcpy(phy->firmware_name, firmware_name);
@@ -493,7 +489,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
int result)
{
- pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+ pr_info("Firmware Download Complete, result=%d\n", result);
pn544_hci_i2c_disable(phy);
@@ -694,14 +690,14 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
}
phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
GFP_KERNEL);
if (!phy) {
- dev_err(&client->dev,
+ nfc_err(&client->dev,
"Cannot allocate memory for pn544 i2c phy.\n");
return -ENOMEM;
}
@@ -714,18 +710,18 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
if (pdata == NULL) {
- dev_err(&client->dev, "No platform data\n");
+ nfc_err(&client->dev, "No platform data\n");
return -EINVAL;
}
if (pdata->request_resources == NULL) {
- dev_err(&client->dev, "request_resources() missing\n");
+ nfc_err(&client->dev, "request_resources() missing\n");
return -EINVAL;
}
r = pdata->request_resources(client);
if (r) {
- dev_err(&client->dev, "Cannot get platform resources\n");
+ nfc_err(&client->dev, "Cannot get platform resources\n");
return r;
}
@@ -739,7 +735,7 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
PN544_HCI_I2C_DRIVER_NAME, phy);
if (r < 0) {
- dev_err(&client->dev, "Unable to register IRQ handler\n");
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
goto err_rti;
}
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 078e62feba1..74cfa0a88b9 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -41,6 +43,7 @@ enum pn544_state {
/* Proprietary commands */
#define PN544_WRITE 0x3f
+#define PN544_TEST_SWP 0x21
/* Proprietary gates, events, commands and registers */
@@ -81,14 +84,17 @@ enum pn544_state {
#define PN544_PL_NFCT_DEACTIVATED 0x09
#define PN544_SWP_MGMT_GATE 0xA0
+#define PN544_SWP_DEFAULT_MODE 0x01
#define PN544_NFC_WI_MGMT_GATE 0xA1
+#define PN544_NFC_ESE_DEFAULT_MODE 0x01
#define PN544_HCI_EVT_SND_DATA 0x01
#define PN544_HCI_EVT_ACTIVATED 0x02
#define PN544_HCI_EVT_DEACTIVATED 0x03
#define PN544_HCI_EVT_RCV_DATA 0x04
#define PN544_HCI_EVT_CONTINUE_MI 0x05
+#define PN544_HCI_EVT_SWITCH_MODE 0x03
#define PN544_HCI_CMD_ATTREQUEST 0x12
#define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13
@@ -187,13 +193,6 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
{{0x9e, 0xb4}, 0x00},
- {{0x9e, 0xd9}, 0xff},
- {{0x9e, 0xda}, 0xff},
- {{0x9e, 0xdb}, 0x23},
- {{0x9e, 0xdc}, 0x21},
- {{0x9e, 0xdd}, 0x22},
- {{0x9e, 0xde}, 0x24},
-
{{0x9c, 0x01}, 0x08},
{{0x9e, 0xaa}, 0x01},
@@ -394,7 +393,7 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
&hdev->gb_len);
- pr_debug("generate local bytes %p", hdev->gb);
+ pr_debug("generate local bytes %p\n", hdev->gb);
if (hdev->gb == NULL || hdev->gb_len == 0) {
im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
@@ -696,7 +695,7 @@ static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
struct nfc_target *target)
{
- pr_debug("supported protocol %d", target->supported_protocols);
+ pr_debug("supported protocol %d\b", target->supported_protocols);
if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
NFC_PROTO_ISO14443_B_MASK)) {
return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
@@ -733,7 +732,7 @@ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
struct sk_buff *rgb_skb = NULL;
int r;
- pr_debug("hci event %d", event);
+ pr_debug("hci event %d\n", event);
switch (event) {
case PN544_HCI_EVT_ACTIVATED:
if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) {
@@ -764,7 +763,7 @@ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
}
if (skb->data[0] != 0) {
- pr_debug("data0 %d", skb->data[0]);
+ pr_debug("data0 %d\n", skb->data[0]);
r = -EPROTO;
goto exit;
}
@@ -792,6 +791,108 @@ static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
return info->fw_download(info->phy_id, firmware_name);
}
+static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
+{
+ u32 se_idx = 0;
+ u8 ese_mode = 0x01; /* Default mode */
+ struct sk_buff *res_skb;
+ int r;
+
+ r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_TEST_SWP,
+ NULL, 0, &res_skb);
+
+ if (r == 0) {
+ if (res_skb->len == 2 && res_skb->data[0] == 0x00)
+ nfc_add_se(hdev->ndev, se_idx++, NFC_SE_UICC);
+
+ kfree_skb(res_skb);
+ }
+
+ r = nfc_hci_send_event(hdev, PN544_NFC_WI_MGMT_GATE,
+ PN544_HCI_EVT_SWITCH_MODE,
+ &ese_mode, 1);
+ if (r == 0)
+ nfc_add_se(hdev->ndev, se_idx++, NFC_SE_EMBEDDED);
+
+ return !se_idx;
+}
+
+#define PN544_SE_MODE_OFF 0x00
+#define PN544_SE_MODE_ON 0x01
+static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+ struct nfc_se *se;
+ u8 enable = PN544_SE_MODE_ON;
+ static struct uicc_gatelist {
+ u8 head;
+ u8 adr[2];
+ u8 value;
+ } uicc_gatelist[] = {
+ {0x00, {0x9e, 0xd9}, 0x23},
+ {0x00, {0x9e, 0xda}, 0x21},
+ {0x00, {0x9e, 0xdb}, 0x22},
+ {0x00, {0x9e, 0xdc}, 0x24},
+ };
+ struct uicc_gatelist *p = uicc_gatelist;
+ int count = ARRAY_SIZE(uicc_gatelist);
+ struct sk_buff *res_skb;
+ int r;
+
+ se = nfc_find_se(hdev->ndev, se_idx);
+
+ switch (se->type) {
+ case NFC_SE_UICC:
+ while (count--) {
+ r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE,
+ PN544_WRITE, (u8 *)p, 4, &res_skb);
+ if (r < 0)
+ return r;
+
+ if (res_skb->len != 1) {
+ kfree_skb(res_skb);
+ return -EPROTO;
+ }
+
+ if (res_skb->data[0] != p->value) {
+ kfree_skb(res_skb);
+ return -EIO;
+ }
+
+ kfree_skb(res_skb);
+
+ p++;
+ }
+
+ return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+ PN544_SWP_DEFAULT_MODE, &enable, 1);
+ case NFC_SE_EMBEDDED:
+ return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+ PN544_NFC_ESE_DEFAULT_MODE, &enable, 1);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+ struct nfc_se *se;
+ u8 disable = PN544_SE_MODE_OFF;
+
+ se = nfc_find_se(hdev->ndev, se_idx);
+
+ switch (se->type) {
+ case NFC_SE_UICC:
+ return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+ PN544_SWP_DEFAULT_MODE, &disable, 1);
+ case NFC_SE_EMBEDDED:
+ return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+ PN544_NFC_ESE_DEFAULT_MODE, &disable, 1);
+ default:
+ return -EINVAL;
+ }
+}
+
static struct nfc_hci_ops pn544_hci_ops = {
.open = pn544_hci_open,
.close = pn544_hci_close,
@@ -807,6 +908,9 @@ static struct nfc_hci_ops pn544_hci_ops = {
.check_presence = pn544_hci_check_presence,
.event_received = pn544_hci_event_received,
.fw_download = pn544_hci_fw_download,
+ .discover_se = pn544_hci_discover_se,
+ .enable_se = pn544_hci_enable_se,
+ .disable_se = pn544_hci_disable_se,
};
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
@@ -820,7 +924,6 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
if (!info) {
- pr_err("Cannot allocate memory for pn544_hci_info.\n");
r = -ENOMEM;
goto err_info_alloc;
}
@@ -853,7 +956,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
phy_headroom + PN544_CMDS_HEADROOM,
phy_tailroom, phy_payload);
if (!info->hdev) {
- pr_err("Cannot allocate nfc hdev.\n");
+ pr_err("Cannot allocate nfc hdev\n");
r = -ENOMEM;
goto err_alloc_hdev;
}
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
new file mode 100644
index 00000000000..8a0571eb262
--- /dev/null
+++ b/drivers/nfc/port100.c
@@ -0,0 +1,1529 @@
+/*
+ * Sony NFC Port-100 Series driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Partly based/Inspired by Stephen Tiedemann's nfcpy
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <net/nfc/digital.h>
+
+#define VERSION "0.1"
+
+#define SONY_VENDOR_ID 0x054c
+#define RCS380_PRODUCT_ID 0x06c1
+
+#define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_NFC_DEP_MASK)
+
+#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
+ NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+/* Standard port100 frame definitions */
+#define PORT100_FRAME_HEADER_LEN (sizeof(struct port100_frame) \
+ + 2) /* data[0] CC, data[1] SCC */
+#define PORT100_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+#define PORT100_COMM_RF_HEAD_MAX_LEN (sizeof(struct port100_tg_comm_rf_cmd))
+
+/*
+ * Max extended frame payload len, excluding CC and SCC
+ * which are already in PORT100_FRAME_HEADER_LEN.
+ */
+#define PORT100_FRAME_MAX_PAYLOAD_LEN 1001
+
+#define PORT100_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
+ Postamble (1) */
+static u8 ack_frame[PORT100_FRAME_ACK_SIZE] = {
+ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00
+};
+
+#define PORT100_FRAME_CHECKSUM(f) (f->data[le16_to_cpu(f->datalen)])
+#define PORT100_FRAME_POSTAMBLE(f) (f->data[le16_to_cpu(f->datalen) + 1])
+
+/* start of frame */
+#define PORT100_FRAME_SOF 0x00FF
+#define PORT100_FRAME_EXT 0xFFFF
+#define PORT100_FRAME_ACK 0x00FF
+
+/* Port-100 command: in or out */
+#define PORT100_FRAME_DIRECTION(f) (f->data[0]) /* CC */
+#define PORT100_FRAME_DIR_OUT 0xD6
+#define PORT100_FRAME_DIR_IN 0xD7
+
+/* Port-100 sub-command */
+#define PORT100_FRAME_CMD(f) (f->data[1]) /* SCC */
+
+#define PORT100_CMD_GET_FIRMWARE_VERSION 0x20
+#define PORT100_CMD_GET_COMMAND_TYPE 0x28
+#define PORT100_CMD_SET_COMMAND_TYPE 0x2A
+
+#define PORT100_CMD_IN_SET_RF 0x00
+#define PORT100_CMD_IN_SET_PROTOCOL 0x02
+#define PORT100_CMD_IN_COMM_RF 0x04
+
+#define PORT100_CMD_TG_SET_RF 0x40
+#define PORT100_CMD_TG_SET_PROTOCOL 0x42
+#define PORT100_CMD_TG_SET_RF_OFF 0x46
+#define PORT100_CMD_TG_COMM_RF 0x48
+
+#define PORT100_CMD_SWITCH_RF 0x06
+
+#define PORT100_CMD_RESPONSE(cmd) (cmd + 1)
+
+#define PORT100_CMD_TYPE_IS_SUPPORTED(mask, cmd_type) \
+ ((mask) & (0x01 << (cmd_type)))
+#define PORT100_CMD_TYPE_0 0
+#define PORT100_CMD_TYPE_1 1
+
+#define PORT100_CMD_STATUS_OK 0x00
+#define PORT100_CMD_STATUS_TIMEOUT 0x80
+
+#define PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK 0x01
+#define PORT100_MDAA_TGT_WAS_ACTIVATED_MASK 0x02
+
+struct port100;
+
+typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg,
+ struct sk_buff *resp);
+
+/**
+ * Setting sets structure for in_set_rf command
+ *
+ * @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table.
+ * This table contains multiple RF setting sets required for RF
+ * communication.
+ *
+ * @in_*_comm_type: Theses fields set the communication type to be used.
+ */
+struct port100_in_rf_setting {
+ u8 in_send_set_number;
+ u8 in_send_comm_type;
+ u8 in_recv_set_number;
+ u8 in_recv_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_IN_212F 0x01
+#define PORT100_COMM_TYPE_IN_424F 0x02
+#define PORT100_COMM_TYPE_IN_106A 0x03
+
+static const struct port100_in_rf_setting in_rf_settings[] = {
+ [NFC_DIGITAL_RF_TECH_212F] = {
+ .in_send_set_number = 1,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_212F,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_212F,
+ },
+ [NFC_DIGITAL_RF_TECH_424F] = {
+ .in_send_set_number = 1,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_424F,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_424F,
+ },
+ [NFC_DIGITAL_RF_TECH_106A] = {
+ .in_send_set_number = 2,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_106A,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_106A,
+ },
+};
+
+/**
+ * Setting sets structure for tg_set_rf command
+ *
+ * @tg_set_number: Represents the entry index in the port-100 RF Base Table.
+ * This table contains multiple RF setting sets required for RF
+ * communication. this field is used for both send and receive
+ * settings.
+ *
+ * @tg_comm_type: Sets the communication type to be used to send and receive
+ * data.
+ */
+struct port100_tg_rf_setting {
+ u8 tg_set_number;
+ u8 tg_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_TG_106A 0x0B
+#define PORT100_COMM_TYPE_TG_212F 0x0C
+#define PORT100_COMM_TYPE_TG_424F 0x0D
+
+static const struct port100_tg_rf_setting tg_rf_settings[] = {
+ [NFC_DIGITAL_RF_TECH_106A] = {
+ .tg_set_number = 8,
+ .tg_comm_type = PORT100_COMM_TYPE_TG_106A,
+ },
+ [NFC_DIGITAL_RF_TECH_212F] = {
+ .tg_set_number = 8,
+ .tg_comm_type = PORT100_COMM_TYPE_TG_212F,
+ },
+ [NFC_DIGITAL_RF_TECH_424F] = {
+ .tg_set_number = 8,
+ .tg_comm_type = PORT100_COMM_TYPE_TG_424F,
+ },
+};
+
+#define PORT100_IN_PROT_INITIAL_GUARD_TIME 0x00
+#define PORT100_IN_PROT_ADD_CRC 0x01
+#define PORT100_IN_PROT_CHECK_CRC 0x02
+#define PORT100_IN_PROT_MULTI_CARD 0x03
+#define PORT100_IN_PROT_ADD_PARITY 0x04
+#define PORT100_IN_PROT_CHECK_PARITY 0x05
+#define PORT100_IN_PROT_BITWISE_AC_RECV_MODE 0x06
+#define PORT100_IN_PROT_VALID_BIT_NUMBER 0x07
+#define PORT100_IN_PROT_CRYPTO1 0x08
+#define PORT100_IN_PROT_ADD_SOF 0x09
+#define PORT100_IN_PROT_CHECK_SOF 0x0A
+#define PORT100_IN_PROT_ADD_EOF 0x0B
+#define PORT100_IN_PROT_CHECK_EOF 0x0C
+#define PORT100_IN_PROT_DEAF_TIME 0x0E
+#define PORT100_IN_PROT_CRM 0x0F
+#define PORT100_IN_PROT_CRM_MIN_LEN 0x10
+#define PORT100_IN_PROT_T1_TAG_FRAME 0x11
+#define PORT100_IN_PROT_RFCA 0x12
+#define PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR 0x13
+#define PORT100_IN_PROT_END 0x14
+
+#define PORT100_IN_MAX_NUM_PROTOCOLS 19
+
+#define PORT100_TG_PROT_TU 0x00
+#define PORT100_TG_PROT_RF_OFF 0x01
+#define PORT100_TG_PROT_CRM 0x02
+#define PORT100_TG_PROT_END 0x03
+
+#define PORT100_TG_MAX_NUM_PROTOCOLS 3
+
+struct port100_protocol {
+ u8 number;
+ u8 value;
+} __packed;
+
+static struct port100_protocol
+in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
+ [NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
+ { PORT100_IN_PROT_ADD_CRC, 0 },
+ { PORT100_IN_PROT_CHECK_CRC, 0 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 0 },
+ { PORT100_IN_PROT_CHECK_PARITY, 1 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 7 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
+ { PORT100_IN_PROT_ADD_CRC, 0 },
+ { PORT100_IN_PROT_CHECK_CRC, 0 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 1 },
+ { PORT100_IN_PROT_CHECK_PARITY, 1 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 1 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 1 },
+ { PORT100_IN_PROT_CHECK_PARITY, 1 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+ /* nfc_digital_framing_nfca_short */
+ { PORT100_IN_PROT_ADD_CRC, 2 },
+ { PORT100_IN_PROT_CHECK_CRC, 2 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 2 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+ /* nfc_digital_framing_nfca_standard */
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 0 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+ /* nfc_digital_framing_nfca_standard */
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 },
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 1 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 0 },
+ { PORT100_IN_PROT_CHECK_PARITY, 0 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+ /* nfc_digital_framing_nfcf */
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+ /* nfc_digital_framing_nfcf */
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+ { PORT100_IN_PROT_END, 0 },
+ },
+};
+
+static struct port100_protocol
+tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
+ [NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+ { PORT100_TG_PROT_TU, 1 },
+ { PORT100_TG_PROT_RF_OFF, 0 },
+ { PORT100_TG_PROT_CRM, 7 },
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+ { PORT100_TG_PROT_TU, 1 },
+ { PORT100_TG_PROT_RF_OFF, 0 },
+ { PORT100_TG_PROT_CRM, 7 },
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+ { PORT100_TG_PROT_RF_OFF, 1 },
+ { PORT100_TG_PROT_END, 0 },
+ },
+};
+
+struct port100 {
+ struct nfc_digital_dev *nfc_digital_dev;
+
+ int skb_headroom;
+ int skb_tailroom;
+
+ struct usb_device *udev;
+ struct usb_interface *interface;
+
+ struct urb *out_urb;
+ struct urb *in_urb;
+
+ struct work_struct cmd_complete_work;
+
+ u8 cmd_type;
+
+ /* The digital stack serializes commands to be sent. There is no need
+ * for any queuing/locking mechanism at driver level.
+ */
+ struct port100_cmd *cmd;
+};
+
+struct port100_cmd {
+ u8 code;
+ int status;
+ struct sk_buff *req;
+ struct sk_buff *resp;
+ int resp_len;
+ port100_send_async_complete_t complete_cb;
+ void *complete_cb_context;
+};
+
+struct port100_frame {
+ u8 preamble;
+ __be16 start_frame;
+ __be16 extended_frame;
+ __le16 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
+struct port100_ack_frame {
+ u8 preamble;
+ __be16 start_frame;
+ __be16 ack_frame;
+ u8 postambule;
+} __packed;
+
+struct port100_cb_arg {
+ nfc_digital_cmd_complete_t complete_cb;
+ void *complete_arg;
+ u8 mdaa;
+};
+
+struct port100_tg_comm_rf_cmd {
+ __le16 guard_time;
+ __le16 send_timeout;
+ u8 mdaa;
+ u8 nfca_param[6];
+ u8 nfcf_param[18];
+ u8 mf_halted;
+ u8 arae_flag;
+ __le16 recv_timeout;
+ u8 data[];
+} __packed;
+
+struct port100_tg_comm_rf_res {
+ u8 comm_type;
+ u8 ar_status;
+ u8 target_activated;
+ __le32 status;
+ u8 data[];
+} __packed;
+
+/* The rule: value + checksum = 0 */
+static inline u8 port100_checksum(u16 value)
+{
+ return ~(((u8 *)&value)[0] + ((u8 *)&value)[1]) + 1;
+}
+
+/* The rule: sum(data elements) + checksum = 0 */
+static u8 port100_data_checksum(u8 *data, int datalen)
+{
+ u8 sum = 0;
+ int i;
+
+ for (i = 0; i < datalen; i++)
+ sum += data[i];
+
+ return port100_checksum(sum);
+}
+
+static void port100_tx_frame_init(void *_frame, u8 cmd_code)
+{
+ struct port100_frame *frame = _frame;
+
+ frame->preamble = 0;
+ frame->start_frame = cpu_to_be16(PORT100_FRAME_SOF);
+ frame->extended_frame = cpu_to_be16(PORT100_FRAME_EXT);
+ PORT100_FRAME_DIRECTION(frame) = PORT100_FRAME_DIR_OUT;
+ PORT100_FRAME_CMD(frame) = cmd_code;
+ frame->datalen = cpu_to_le16(2);
+}
+
+static void port100_tx_frame_finish(void *_frame)
+{
+ struct port100_frame *frame = _frame;
+
+ frame->datalen_checksum = port100_checksum(le16_to_cpu(frame->datalen));
+
+ PORT100_FRAME_CHECKSUM(frame) =
+ port100_data_checksum(frame->data, le16_to_cpu(frame->datalen));
+
+ PORT100_FRAME_POSTAMBLE(frame) = 0;
+}
+
+static void port100_tx_update_payload_len(void *_frame, int len)
+{
+ struct port100_frame *frame = _frame;
+
+ frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+}
+
+static bool port100_rx_frame_is_valid(void *_frame)
+{
+ u8 checksum;
+ struct port100_frame *frame = _frame;
+
+ if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) ||
+ frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT))
+ return false;
+
+ checksum = port100_checksum(le16_to_cpu(frame->datalen));
+ if (checksum != frame->datalen_checksum)
+ return false;
+
+ checksum = port100_data_checksum(frame->data,
+ le16_to_cpu(frame->datalen));
+ if (checksum != PORT100_FRAME_CHECKSUM(frame))
+ return false;
+
+ return true;
+}
+
+static bool port100_rx_frame_is_ack(struct port100_ack_frame *frame)
+{
+ return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) &&
+ frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK));
+}
+
+static inline int port100_rx_frame_size(void *frame)
+{
+ struct port100_frame *f = frame;
+
+ return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) +
+ PORT100_FRAME_TAIL_LEN;
+}
+
+static bool port100_rx_frame_is_cmd_response(struct port100 *dev, void *frame)
+{
+ struct port100_frame *f = frame;
+
+ return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code));
+}
+
+static void port100_recv_response(struct urb *urb)
+{
+ struct port100 *dev = urb->context;
+ struct port100_cmd *cmd = dev->cmd;
+ u8 *in_frame;
+
+ cmd->status = urb->status;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ nfc_err(&dev->interface->dev,
+ "The urb has been canceled (status %d)", urb->status);
+ goto sched_wq;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ urb->status);
+ goto sched_wq;
+ }
+
+ in_frame = dev->in_urb->transfer_buffer;
+
+ if (!port100_rx_frame_is_valid(in_frame)) {
+ nfc_err(&dev->interface->dev, "Received an invalid frame");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+ print_hex_dump_debug("PORT100 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
+ port100_rx_frame_size(in_frame), false);
+
+ if (!port100_rx_frame_is_cmd_response(dev, in_frame)) {
+ nfc_err(&dev->interface->dev,
+ "It's not the response to the last command");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+sched_wq:
+ schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_response(struct port100 *dev, gfp_t flags)
+{
+ dev->in_urb->complete = port100_recv_response;
+
+ return usb_submit_urb(dev->in_urb, flags);
+}
+
+static void port100_recv_ack(struct urb *urb)
+{
+ struct port100 *dev = urb->context;
+ struct port100_cmd *cmd = dev->cmd;
+ struct port100_ack_frame *in_frame;
+ int rc;
+
+ cmd->status = urb->status;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ nfc_err(&dev->interface->dev,
+ "The urb has been stopped (status %d)", urb->status);
+ goto sched_wq;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ urb->status);
+ goto sched_wq;
+ }
+
+ in_frame = dev->in_urb->transfer_buffer;
+
+ if (!port100_rx_frame_is_ack(in_frame)) {
+ nfc_err(&dev->interface->dev, "Received an invalid ack");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+ rc = port100_submit_urb_for_response(dev, GFP_ATOMIC);
+ if (rc) {
+ nfc_err(&dev->interface->dev,
+ "usb_submit_urb failed with result %d", rc);
+ cmd->status = rc;
+ goto sched_wq;
+ }
+
+ return;
+
+sched_wq:
+ schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
+{
+ dev->in_urb->complete = port100_recv_ack;
+
+ return usb_submit_urb(dev->in_urb, flags);
+}
+
+static int port100_send_ack(struct port100 *dev)
+{
+ int rc;
+
+ dev->out_urb->transfer_buffer = ack_frame;
+ dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
+ rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+
+ return rc;
+}
+
+static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
+ struct sk_buff *in, int in_len)
+{
+ int rc;
+
+ dev->out_urb->transfer_buffer = out->data;
+ dev->out_urb->transfer_buffer_length = out->len;
+
+ dev->in_urb->transfer_buffer = in->data;
+ dev->in_urb->transfer_buffer_length = in_len;
+
+ print_hex_dump_debug("PORT100 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ usb_unlink_urb(dev->out_urb);
+ return rc;
+}
+
+static void port100_build_cmd_frame(struct port100 *dev, u8 cmd_code,
+ struct sk_buff *skb)
+{
+ /* payload is already there, just update datalen */
+ int payload_len = skb->len;
+
+ skb_push(skb, PORT100_FRAME_HEADER_LEN);
+ skb_put(skb, PORT100_FRAME_TAIL_LEN);
+
+ port100_tx_frame_init(skb->data, cmd_code);
+ port100_tx_update_payload_len(skb->data, payload_len);
+ port100_tx_frame_finish(skb->data);
+}
+
+static void port100_send_async_complete(struct port100 *dev)
+{
+ struct port100_cmd *cmd = dev->cmd;
+ int status = cmd->status;
+
+ struct sk_buff *req = cmd->req;
+ struct sk_buff *resp = cmd->resp;
+
+ dev_kfree_skb(req);
+
+ dev->cmd = NULL;
+
+ if (status < 0) {
+ cmd->complete_cb(dev, cmd->complete_cb_context,
+ ERR_PTR(status));
+ dev_kfree_skb(resp);
+ goto done;
+ }
+
+ skb_put(resp, port100_rx_frame_size(resp->data));
+ skb_pull(resp, PORT100_FRAME_HEADER_LEN);
+ skb_trim(resp, resp->len - PORT100_FRAME_TAIL_LEN);
+
+ cmd->complete_cb(dev, cmd->complete_cb_context, resp);
+
+done:
+ kfree(cmd);
+}
+
+static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code,
+ struct sk_buff *req,
+ port100_send_async_complete_t complete_cb,
+ void *complete_cb_context)
+{
+ struct port100_cmd *cmd;
+ struct sk_buff *resp;
+ int rc;
+ int resp_len = PORT100_FRAME_HEADER_LEN +
+ PORT100_FRAME_MAX_PAYLOAD_LEN +
+ PORT100_FRAME_TAIL_LEN;
+
+ resp = alloc_skb(resp_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_kfree_skb(resp);
+ return -ENOMEM;
+ }
+
+ cmd->code = cmd_code;
+ cmd->req = req;
+ cmd->resp = resp;
+ cmd->resp_len = resp_len;
+ cmd->complete_cb = complete_cb;
+ cmd->complete_cb_context = complete_cb_context;
+
+ port100_build_cmd_frame(dev, cmd_code, req);
+
+ dev->cmd = cmd;
+
+ rc = port100_send_frame_async(dev, req, resp, resp_len);
+ if (rc) {
+ kfree(cmd);
+ dev_kfree_skb(resp);
+ dev->cmd = NULL;
+ }
+
+ return rc;
+}
+
+struct port100_sync_cmd_response {
+ struct sk_buff *resp;
+ struct completion done;
+};
+
+static void port100_wq_cmd_complete(struct work_struct *work)
+{
+ struct port100 *dev = container_of(work, struct port100,
+ cmd_complete_work);
+
+ port100_send_async_complete(dev);
+}
+
+static void port100_send_sync_complete(struct port100 *dev, void *_arg,
+ struct sk_buff *resp)
+{
+ struct port100_sync_cmd_response *arg = _arg;
+
+ arg->resp = resp;
+ complete(&arg->done);
+}
+
+static struct sk_buff *port100_send_cmd_sync(struct port100 *dev, u8 cmd_code,
+ struct sk_buff *req)
+{
+ int rc;
+ struct port100_sync_cmd_response arg;
+
+ init_completion(&arg.done);
+
+ rc = port100_send_cmd_async(dev, cmd_code, req,
+ port100_send_sync_complete, &arg);
+ if (rc) {
+ dev_kfree_skb(req);
+ return ERR_PTR(rc);
+ }
+
+ wait_for_completion(&arg.done);
+
+ return arg.resp;
+}
+
+static void port100_send_complete(struct urb *urb)
+{
+ struct port100 *dev = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ nfc_err(&dev->interface->dev,
+ "The urb has been stopped (status %d)", urb->status);
+ break;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ urb->status);
+ }
+}
+
+static void port100_abort_cmd(struct nfc_digital_dev *ddev)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+
+ /* An ack will cancel the last issued command */
+ port100_send_ack(dev);
+
+ /* cancel the urb request */
+ usb_kill_urb(dev->in_urb);
+}
+
+static struct sk_buff *port100_alloc_skb(struct port100 *dev, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size,
+ GFP_KERNEL);
+ if (skb)
+ skb_reserve(skb, dev->skb_headroom);
+
+ return skb;
+}
+
+static int port100_set_command_type(struct port100 *dev, u8 command_type)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+
+ skb = port100_alloc_skb(dev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, sizeof(u8)) = command_type;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static u64 port100_get_command_type_mask(struct port100 *dev)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ u64 mask;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ if (resp->len < 8)
+ mask = 0;
+ else
+ mask = be64_to_cpu(*(__be64 *)resp->data);
+
+ dev_kfree_skb(resp);
+
+ return mask;
+}
+
+static u16 port100_get_firmware_version(struct port100 *dev)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ u16 fw_ver;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+ return 0;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_FIRMWARE_VERSION,
+ skb);
+ if (IS_ERR(resp))
+ return 0;
+
+ fw_ver = le16_to_cpu(*(__le16 *)resp->data);
+
+ dev_kfree_skb(resp);
+
+ return fw_ver;
+}
+
+static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb, *resp;
+
+ skb = port100_alloc_skb(dev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = on ? 1 : 0;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ dev_kfree_skb(resp);
+
+ return 0;
+}
+
+static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+
+ if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+ return -EINVAL;
+
+ skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting));
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, sizeof(struct port100_in_rf_setting)),
+ &in_rf_settings[rf],
+ sizeof(struct port100_in_rf_setting));
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_protocol *protocols;
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int num_protocols;
+ size_t size;
+ int rc;
+
+ if (param >= NFC_DIGITAL_FRAMING_LAST)
+ return -EINVAL;
+
+ protocols = in_protocols[param];
+
+ num_protocols = 0;
+ while (protocols[num_protocols].number != PORT100_IN_PROT_END)
+ num_protocols++;
+
+ if (!num_protocols)
+ return 0;
+
+ size = sizeof(struct port100_protocol) * num_protocols;
+
+ skb = port100_alloc_skb(dev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, size), protocols, size);
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+ int param)
+{
+ if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+ return port100_in_set_rf(ddev, param);
+
+ if (type == NFC_DIGITAL_CONFIG_FRAMING)
+ return port100_in_set_framing(ddev, param);
+
+ return -EINVAL;
+}
+
+static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct port100_cb_arg *cb_arg = arg;
+ nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+ u32 status;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ goto exit;
+ }
+
+ if (resp->len < 4) {
+ nfc_err(&dev->interface->dev,
+ "Invalid packet length received.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ status = le32_to_cpu(*(__le32 *)resp->data);
+
+ skb_pull(resp, sizeof(u32));
+
+ if (status == PORT100_CMD_STATUS_TIMEOUT) {
+ rc = -ETIMEDOUT;
+ goto error;
+ }
+
+ if (status != PORT100_CMD_STATUS_OK) {
+ nfc_err(&dev->interface->dev,
+ "in_comm_rf failed with status 0x%08x\n", status);
+ rc = -EIO;
+ goto error;
+ }
+
+ /* Remove collision bits byte */
+ skb_pull(resp, 1);
+
+ goto exit;
+
+error:
+ kfree_skb(resp);
+ resp = ERR_PTR(rc);
+
+exit:
+ cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+ kfree(cb_arg);
+}
+
+static int port100_in_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 _timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_cb_arg *cb_arg;
+ __le16 timeout;
+
+ cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->complete_cb = cb;
+ cb_arg->complete_arg = arg;
+
+ timeout = cpu_to_le16(_timeout * 10);
+
+ memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16));
+
+ return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb,
+ port100_in_comm_rf_complete, cb_arg);
+}
+
+static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+
+ if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+ return -EINVAL;
+
+ skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting));
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, sizeof(struct port100_tg_rf_setting)),
+ &tg_rf_settings[rf],
+ sizeof(struct port100_tg_rf_setting));
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_protocol *protocols;
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+ int num_protocols;
+ size_t size;
+
+ if (param >= NFC_DIGITAL_FRAMING_LAST)
+ return -EINVAL;
+
+ protocols = tg_protocols[param];
+
+ num_protocols = 0;
+ while (protocols[num_protocols].number != PORT100_TG_PROT_END)
+ num_protocols++;
+
+ if (!num_protocols)
+ return 0;
+
+ size = sizeof(struct port100_protocol) * num_protocols;
+
+ skb = port100_alloc_skb(dev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, size), protocols, size);
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_tg_configure_hw(struct nfc_digital_dev *ddev, int type,
+ int param)
+{
+ if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+ return port100_tg_set_rf(ddev, param);
+
+ if (type == NFC_DIGITAL_CONFIG_FRAMING)
+ return port100_tg_set_framing(ddev, param);
+
+ return -EINVAL;
+}
+
+static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated)
+{
+ u8 mask;
+
+ switch (dev->cmd_type) {
+ case PORT100_CMD_TYPE_0:
+ mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK;
+ break;
+ case PORT100_CMD_TYPE_1:
+ mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK |
+ PORT100_MDAA_TGT_WAS_ACTIVATED_MASK;
+ break;
+ default:
+ nfc_err(&dev->interface->dev, "Unknonwn command type.\n");
+ return false;
+ }
+
+ return ((tgt_activated & mask) == mask);
+}
+
+static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ u32 status;
+ struct port100_cb_arg *cb_arg = arg;
+ nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+ struct port100_tg_comm_rf_res *hdr;
+
+ if (IS_ERR(resp))
+ goto exit;
+
+ hdr = (struct port100_tg_comm_rf_res *)resp->data;
+
+ status = le32_to_cpu(hdr->status);
+
+ if (cb_arg->mdaa &&
+ !port100_tg_target_activated(dev, hdr->target_activated)) {
+ kfree_skb(resp);
+ resp = ERR_PTR(-ETIMEDOUT);
+
+ goto exit;
+ }
+
+ skb_pull(resp, sizeof(struct port100_tg_comm_rf_res));
+
+ if (status != PORT100_CMD_STATUS_OK) {
+ kfree_skb(resp);
+
+ if (status == PORT100_CMD_STATUS_TIMEOUT)
+ resp = ERR_PTR(-ETIMEDOUT);
+ else
+ resp = ERR_PTR(-EIO);
+ }
+
+exit:
+ cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+ kfree(cb_arg);
+}
+
+static int port100_tg_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_tg_comm_rf_cmd *hdr;
+ struct port100_cb_arg *cb_arg;
+
+ cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->complete_cb = cb;
+ cb_arg->complete_arg = arg;
+
+ skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+
+ hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+ memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+ hdr->guard_time = cpu_to_le16(500);
+ hdr->send_timeout = cpu_to_le16(0xFFFF);
+ hdr->recv_timeout = cpu_to_le16(timeout);
+
+ return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+ port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen_mdaa(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *params,
+ u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_tg_comm_rf_cmd *hdr;
+ struct port100_cb_arg *cb_arg;
+ struct sk_buff *skb;
+ int rc;
+
+ rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+ NFC_DIGITAL_RF_TECH_106A);
+ if (rc)
+ return rc;
+
+ rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+ if (rc)
+ return rc;
+
+ cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->complete_cb = cb;
+ cb_arg->complete_arg = arg;
+ cb_arg->mdaa = 1;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb) {
+ kfree(cb_arg);
+ return -ENOMEM;
+ }
+
+ skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+ hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+ memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+
+ hdr->guard_time = 0;
+ hdr->send_timeout = cpu_to_le16(0xFFFF);
+ hdr->mdaa = 1;
+ hdr->nfca_param[0] = (params->sens_res >> 8) & 0xFF;
+ hdr->nfca_param[1] = params->sens_res & 0xFF;
+ memcpy(hdr->nfca_param + 2, params->nfcid1, 3);
+ hdr->nfca_param[5] = params->sel_res;
+ memcpy(hdr->nfcf_param, params->nfcid2, 8);
+ hdr->nfcf_param[16] = (params->sc >> 8) & 0xFF;
+ hdr->nfcf_param[17] = params->sc & 0xFF;
+ hdr->recv_timeout = cpu_to_le16(timeout);
+
+ return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+ port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
+}
+
+static struct nfc_digital_ops port100_digital_ops = {
+ .in_configure_hw = port100_in_configure_hw,
+ .in_send_cmd = port100_in_send_cmd,
+
+ .tg_listen_mdaa = port100_listen_mdaa,
+ .tg_listen = port100_listen,
+ .tg_configure_hw = port100_tg_configure_hw,
+ .tg_send_cmd = port100_tg_send_cmd,
+
+ .switch_rf = port100_switch_rf,
+ .abort_cmd = port100_abort_cmd,
+};
+
+static const struct usb_device_id port100_table[] = {
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = SONY_VENDOR_ID,
+ .idProduct = RCS380_PRODUCT_ID,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, port100_table);
+
+static int port100_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct port100 *dev;
+ int rc;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int in_endpoint;
+ int out_endpoint;
+ u16 fw_version;
+ u64 cmd_type_mask;
+ int i;
+
+ dev = devm_kzalloc(&interface->dev, sizeof(struct port100), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->udev = usb_get_dev(interface_to_usbdev(interface));
+ dev->interface = interface;
+ usb_set_intfdata(interface, dev);
+
+ in_endpoint = out_endpoint = 0;
+ iface_desc = interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
+ in_endpoint = endpoint->bEndpointAddress;
+
+ if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
+ out_endpoint = endpoint->bEndpointAddress;
+ }
+
+ if (!in_endpoint || !out_endpoint) {
+ nfc_err(&interface->dev,
+ "Could not find bulk-in or bulk-out endpoint\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!dev->in_urb || !dev->out_urb) {
+ nfc_err(&interface->dev, "Could not allocate USB URBs\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ usb_fill_bulk_urb(dev->in_urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev, in_endpoint),
+ NULL, 0, NULL, dev);
+ usb_fill_bulk_urb(dev->out_urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, out_endpoint),
+ NULL, 0, port100_send_complete, dev);
+
+ dev->skb_headroom = PORT100_FRAME_HEADER_LEN +
+ PORT100_COMM_RF_HEAD_MAX_LEN;
+ dev->skb_tailroom = PORT100_FRAME_TAIL_LEN;
+
+ INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete);
+
+ /* The first thing to do with the Port-100 is to set the command type
+ * to be used. If supported we use command type 1. 0 otherwise.
+ */
+ cmd_type_mask = port100_get_command_type_mask(dev);
+ if (!cmd_type_mask) {
+ nfc_err(&interface->dev,
+ "Could not get supported command types.\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (PORT100_CMD_TYPE_IS_SUPPORTED(cmd_type_mask, PORT100_CMD_TYPE_1))
+ dev->cmd_type = PORT100_CMD_TYPE_1;
+ else
+ dev->cmd_type = PORT100_CMD_TYPE_0;
+
+ rc = port100_set_command_type(dev, dev->cmd_type);
+ if (rc) {
+ nfc_err(&interface->dev,
+ "The device does not support command type %u.\n",
+ dev->cmd_type);
+ goto error;
+ }
+
+ fw_version = port100_get_firmware_version(dev);
+ if (!fw_version)
+ nfc_err(&interface->dev,
+ "Could not get device firmware version.\n");
+
+ nfc_info(&interface->dev,
+ "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n",
+ (fw_version & 0xFF00) >> 8, fw_version & 0xFF);
+
+ dev->nfc_digital_dev = nfc_digital_allocate_device(&port100_digital_ops,
+ PORT100_PROTOCOLS,
+ PORT100_CAPABILITIES,
+ dev->skb_headroom,
+ dev->skb_tailroom);
+ if (!dev->nfc_digital_dev) {
+ nfc_err(&interface->dev,
+ "Could not allocate nfc_digital_dev.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ nfc_digital_set_parent_dev(dev->nfc_digital_dev, &interface->dev);
+ nfc_digital_set_drvdata(dev->nfc_digital_dev, dev);
+
+ rc = nfc_digital_register_device(dev->nfc_digital_dev);
+ if (rc) {
+ nfc_err(&interface->dev,
+ "Could not register digital device.\n");
+ goto free_nfc_dev;
+ }
+
+ return 0;
+
+free_nfc_dev:
+ nfc_digital_free_device(dev->nfc_digital_dev);
+
+error:
+ usb_free_urb(dev->in_urb);
+ usb_free_urb(dev->out_urb);
+ usb_put_dev(dev->udev);
+
+ return rc;
+}
+
+static void port100_disconnect(struct usb_interface *interface)
+{
+ struct port100 *dev;
+
+ dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ nfc_digital_unregister_device(dev->nfc_digital_dev);
+ nfc_digital_free_device(dev->nfc_digital_dev);
+
+ usb_kill_urb(dev->in_urb);
+ usb_kill_urb(dev->out_urb);
+
+ usb_free_urb(dev->in_urb);
+ usb_free_urb(dev->out_urb);
+
+ kfree(dev->cmd);
+
+ nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected");
+}
+
+static struct usb_driver port100_driver = {
+ .name = "port100",
+ .probe = port100_probe,
+ .disconnect = port100_disconnect,
+ .id_table = port100_table,
+};
+
+module_usb_driver(port100_driver);
+
+MODULE_DESCRIPTION("NFC Port-100 series usb driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 12a9e83c008..d0222f13d15 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1034,10 +1034,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t pay_off, buff_off;
- dma_addr_t src, dest;
+ struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- unsigned long flags;
entry->len = len;
@@ -1045,35 +1044,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
goto err;
if (len < copy_bytes)
- goto err1;
+ goto err_wait;
device = chan->device;
pay_off = (size_t) offset & ~PAGE_MASK;
buff_off = (size_t) buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
- goto err1;
+ goto err_wait;
- dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(device->dev, dest))
- goto err1;
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
+ if (!unmap)
+ goto err_wait;
- src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
- if (dma_mapping_error(device->dev, src))
- goto err2;
+ unmap->len = len;
+ unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
+ pay_off, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(device->dev, unmap->addr[0]))
+ goto err_get_unmap;
+
+ unmap->to_cnt = 1;
- flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
- DMA_PREP_INTERRUPT;
- txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
+ buff_off, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(device->dev, unmap->addr[1]))
+ goto err_get_unmap;
+
+ unmap->from_cnt = 1;
+
+ txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (!txd)
- goto err3;
+ goto err_get_unmap;
txd->callback = ntb_rx_copy_callback;
txd->callback_param = entry;
+ dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
- goto err3;
+ goto err_set_unmap;
+
+ dmaengine_unmap_put(unmap);
qp->last_cookie = cookie;
@@ -1081,11 +1094,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
return;
-err3:
- dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
-err2:
- dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
-err1:
+err_set_unmap:
+ dmaengine_unmap_put(unmap);
+err_get_unmap:
+ dmaengine_unmap_put(unmap);
+err_wait:
/* If the callbacks come out of order, the writing of the index to the
* last completed will be out of order. This may result in the
* receive stalling forever.
@@ -1245,12 +1258,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t dest_off, buff_off;
- dma_addr_t src, dest;
+ struct dmaengine_unmap_data *unmap;
+ dma_addr_t dest;
dma_cookie_t cookie;
void __iomem *offset;
size_t len = entry->len;
void *buf = entry->buf;
- unsigned long flags;
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@@ -1273,28 +1286,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
- src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
- if (dma_mapping_error(device->dev, src))
+ unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
+ if (!unmap)
goto err;
- flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
- txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ unmap->len = len;
+ unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
+ buff_off, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(device->dev, unmap->addr[0]))
+ goto err_get_unmap;
+
+ unmap->to_cnt = 1;
+
+ txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (!txd)
- goto err1;
+ goto err_get_unmap;
txd->callback = ntb_tx_copy_callback;
txd->callback_param = entry;
+ dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
- goto err1;
+ goto err_set_unmap;
+
+ dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
qp->tx_async++;
return;
-err1:
- dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
+err_set_unmap:
+ dmaengine_unmap_put(unmap);
+err_get_unmap:
+ dmaengine_unmap_put(unmap);
err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 78cc7605332..de6f8990246 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -17,6 +17,7 @@ config PROC_DEVICETREE
config OF_SELFTEST
bool "Device Tree Runtime self tests"
+ depends on OF_IRQ
help
This option builds in test cases for the device tree infrastructure
that are executed one at boot time, and the results dumped to the
diff --git a/drivers/of/address.c b/drivers/of/address.c
index b55c2189076..4b9317bdb81 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -489,7 +489,7 @@ static u64 __of_translate_address(struct device_node *dev,
int na, ns, pna, pns;
u64 result = OF_BAD_ADDR;
- pr_debug("OF: ** translation for device %s **\n", dev->full_name);
+ pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev));
/* Increase refcount at current level */
of_node_get(dev);
@@ -504,13 +504,13 @@ static u64 __of_translate_address(struct device_node *dev,
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
- dev->full_name);
+ of_node_full_name(dev));
goto bail;
}
memcpy(addr, in_addr, na * 4);
pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
- bus->name, na, ns, parent->full_name);
+ bus->name, na, ns, of_node_full_name(parent));
of_dump_addr("OF: translating address:", addr, na);
/* Translate */
@@ -532,12 +532,12 @@ static u64 __of_translate_address(struct device_node *dev,
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
- dev->full_name);
+ of_node_full_name(dev));
break;
}
pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
- pbus->name, pna, pns, parent->full_name);
+ pbus->name, pna, pns, of_node_full_name(parent));
/* Apply bus translation */
if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
@@ -626,6 +626,14 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
+unsigned long __weak pci_address_to_pio(phys_addr_t address)
+{
+ if (address > IO_SPACE_LIMIT)
+ return (unsigned long)-1;
+
+ return (unsigned long) address;
+}
+
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
const char *name, struct resource *r)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7d4c70f859e..f807d0edabf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -74,6 +74,13 @@ int of_n_size_cells(struct device_node *np)
}
EXPORT_SYMBOL(of_n_size_cells);
+#ifdef CONFIG_NUMA
+int __weak of_node_to_nid(struct device_node *np)
+{
+ return numa_node_id();
+}
+#endif
+
#if defined(CONFIG_OF_DYNAMIC)
/**
* of_node_get - Increment refcount of a node
@@ -265,9 +272,9 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
- if (!cell)
+ if (!cell || !ac)
return false;
- prop_len /= sizeof(*cell);
+ prop_len /= sizeof(*cell) * ac;
for (tid = 0; tid < prop_len; tid++) {
hwid = of_read_number(cell, ac);
if (arch_match_cpu_phys_id(cpu, hwid)) {
@@ -280,6 +287,31 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
return false;
}
+/*
+ * arch_find_n_match_cpu_physical_id - See if the given device node is
+ * for the cpu corresponding to logical cpu 'cpu'. Return true if so,
+ * else false. If 'thread' is non-NULL, the local thread number within the
+ * core is returned in it.
+ */
+bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread)
+{
+ /* Check for non-standard "ibm,ppc-interrupt-server#s" property
+ * for thread ids on PowerPC. If it doesn't exist fallback to
+ * standard "reg" property.
+ */
+ if (IS_ENABLED(CONFIG_PPC) &&
+ __of_find_n_match_cpu_property(cpun,
+ "ibm,ppc-interrupt-server#s",
+ cpu, thread))
+ return true;
+
+ if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
+ return true;
+
+ return false;
+}
+
/**
* of_get_cpu_node - Get device node associated with the given logical CPU
*
@@ -300,24 +332,10 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
- struct device_node *cpun, *cpus;
+ struct device_node *cpun;
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return NULL;
-
- for_each_child_of_node(cpus, cpun) {
- if (of_node_cmp(cpun->type, "cpu"))
- continue;
- /* Check for non-standard "ibm,ppc-interrupt-server#s" property
- * for thread ids on PowerPC. If it doesn't exist fallback to
- * standard "reg" property.
- */
- if (IS_ENABLED(CONFIG_PPC) &&
- __of_find_n_match_cpu_property(cpun,
- "ibm,ppc-interrupt-server#s", cpu, thread))
- return cpun;
- if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
+ for_each_node_by_type(cpun, "cpu") {
+ if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
return cpun;
}
return NULL;
@@ -1174,6 +1192,15 @@ int of_property_count_strings(struct device_node *np, const char *propname)
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
+void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
+{
+ int i;
+ printk("%s %s", msg, of_node_full_name(args->np));
+ for (i = 0; i < args->args_count; i++)
+ printk(i ? ",%08x" : ":%08x", args->args[i]);
+ printk("\n");
+}
+
static int __of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name,
@@ -1882,3 +1909,34 @@ int of_device_is_stdout_path(struct device_node *dn)
return of_stdout == dn;
}
EXPORT_SYMBOL_GPL(of_device_is_stdout_path);
+
+/**
+ * of_find_next_cache_node - Find a node's subsidiary cache
+ * @np: node of type "cpu" or "cache"
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done. Caller should hold a reference
+ * to np.
+ */
+struct device_node *of_find_next_cache_node(const struct device_node *np)
+{
+ struct device_node *child;
+ const phandle *handle;
+
+ handle = of_get_property(np, "l2-cache", NULL);
+ if (!handle)
+ handle = of_get_property(np, "next-level-cache", NULL);
+
+ if (handle)
+ return of_find_node_by_phandle(be32_to_cpup(handle));
+
+ /* OF on pmac has nodes instead of properties named "l2-cache"
+ * beneath CPU nodes.
+ */
+ if (!strcmp(np->type, "cpu"))
+ for_each_child_of_node(np, child)
+ if (!strcmp(child->type, "cache"))
+ return child;
+
+ return NULL;
+}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a4fa9ad31b8..2fa024b97c4 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -618,12 +618,72 @@ int __init of_scan_flat_dt_by_path(const char *path,
return ret;
}
+const char * __init of_flat_dt_get_machine_name(void)
+{
+ const char *name;
+ unsigned long dt_root = of_get_flat_dt_root();
+
+ name = of_get_flat_dt_prop(dt_root, "model", NULL);
+ if (!name)
+ name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+ return name;
+}
+
+/**
+ * of_flat_dt_match_machine - Iterate match tables to find matching machine.
+ *
+ * @default_match: A machine specific ptr to return in case of no match.
+ * @get_next_compat: callback function to return next compatible match table.
+ *
+ * Iterate through machine match tables to find the best match for the machine
+ * compatible string in the FDT.
+ */
+const void * __init of_flat_dt_match_machine(const void *default_match,
+ const void * (*get_next_compat)(const char * const**))
+{
+ const void *data = NULL;
+ const void *best_data = default_match;
+ const char *const *compat;
+ unsigned long dt_root;
+ unsigned int best_score = ~1, score = 0;
+
+ dt_root = of_get_flat_dt_root();
+ while ((data = get_next_compat(&compat))) {
+ score = of_flat_dt_match(dt_root, compat);
+ if (score > 0 && score < best_score) {
+ best_data = data;
+ best_score = score;
+ }
+ }
+ if (!best_data) {
+ const char *prop;
+ long size;
+
+ pr_err("\n unrecognized device tree list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ if (prop) {
+ while (size > 0) {
+ printk("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ }
+ printk("]\n\n");
+ return NULL;
+ }
+
+ pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
+
+ return best_data;
+}
+
#ifdef CONFIG_BLK_DEV_INITRD
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
*/
-void __init early_init_dt_check_for_initrd(unsigned long node)
+static void __init early_init_dt_check_for_initrd(unsigned long node)
{
u64 start, end;
unsigned long len;
@@ -641,12 +701,15 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
return;
end = of_read_number(prop, len/4);
- early_init_dt_setup_initrd_arch(start, end);
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
(unsigned long long)start, (unsigned long long)end);
}
#else
-inline void early_init_dt_check_for_initrd(unsigned long node)
+static inline void early_init_dt_check_for_initrd(unsigned long node)
{
}
#endif /* CONFIG_BLK_DEV_INITRD */
@@ -774,6 +837,25 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
}
#ifdef CONFIG_HAVE_MEMBLOCK
+void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ const u64 phys_offset = __pa(PAGE_OFFSET);
+ base &= PAGE_MASK;
+ size &= PAGE_MASK;
+ if (base + size < phys_offset) {
+ pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
+ if (base < phys_offset) {
+ pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, phys_offset);
+ size -= phys_offset - base;
+ base = phys_offset;
+ }
+ memblock_add(base, size);
+}
+
/*
* called from unflatten_device_tree() to bootstrap devicetree itself
* Architectures can override this definition if memblock isn't used
@@ -784,6 +866,32 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#endif
+bool __init early_init_dt_scan(void *params)
+{
+ if (!params)
+ return false;
+
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* check device tree validity */
+ if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
+ initial_boot_params = NULL;
+ return false;
+ }
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ return true;
+}
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
@@ -801,4 +909,28 @@ void __init unflatten_device_tree(void)
of_alias_scan(early_init_dt_alloc_memory_arch);
}
+/**
+ * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
+ *
+ * Copies and unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used. This should only be used when the FDT memory has not been
+ * reserved such is the case when the FDT is built-in to the kernel init
+ * section. If the FDT memory is reserved already then unflatten_device_tree
+ * should be used instead.
+ */
+void __init unflatten_and_copy_device_tree(void)
+{
+ int size = __be32_to_cpu(initial_boot_params->totalsize);
+ void *dt = early_init_dt_alloc_memory_arch(size,
+ __alignof__(struct boot_param_header));
+
+ if (dt) {
+ memcpy(dt, initial_boot_params, size);
+ initial_boot_params = dt;
+ }
+ unflatten_device_tree();
+}
+
#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1752988d6aa..786b0b47fae 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -31,18 +31,17 @@
* @dev: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
- * This function is a wrapper that chains of_irq_map_one() and
+ * This function is a wrapper that chains of_irq_parse_one() and
* irq_create_of_mapping() to make things easier to callers
*/
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
{
- struct of_irq oirq;
+ struct of_phandle_args oirq;
- if (of_irq_map_one(dev, index, &oirq))
+ if (of_irq_parse_one(dev, index, &oirq))
return 0;
- return irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
+ return irq_create_of_mapping(&oirq);
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
@@ -79,33 +78,34 @@ struct device_node *of_irq_find_parent(struct device_node *child)
}
/**
- * of_irq_map_raw - Low level interrupt tree parsing
+ * of_irq_parse_raw - Low level interrupt tree parsing
* @parent: the device interrupt parent
- * @intspec: interrupt specifier ("interrupts" property of the device)
- * @ointsize: size of the passed in interrupt specifier
- * @addr: address specifier (start of "reg" property of the device)
- * @out_irq: structure of_irq filled by this function
+ * @addr: address specifier (start of "reg" property of the device) in be32 format
+ * @out_irq: structure of_irq updated by this function
*
* Returns 0 on success and a negative number on error
*
* This function is a low-level interrupt tree walking function. It
* can be used to do a partial walk with synthetized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
- * node exist for the parent.
+ * node exist for the parent. It takes an interrupt specifier structure as
+ * input, walks the tree looking for any interrupt-map properties, translates
+ * the specifier for each map, and then returns the translated map.
*/
-int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
- u32 ointsize, const __be32 *addr, struct of_irq *out_irq)
+int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
- const __be32 *tmp, *imap, *imask;
+ __be32 initial_match_array[MAX_PHANDLE_ARGS];
+ const __be32 *match_array = initial_match_array;
+ const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
int imaplen, match, i;
- pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
- parent->full_name, be32_to_cpup(intspec),
- be32_to_cpup(intspec + 1), ointsize);
+#ifdef DEBUG
+ of_print_phandle_args("of_irq_parse_raw: ", out_irq);
+#endif
- ipar = of_node_get(parent);
+ ipar = of_node_get(out_irq->np);
/* First get the #interrupt-cells property of the current cursor
* that tells us how to interpret the passed-in intspec. If there
@@ -126,9 +126,9 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
goto fail;
}
- pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
+ pr_debug("of_irq_parse_raw: ipar=%s, size=%d\n", of_node_full_name(ipar), intsize);
- if (ointsize != intsize)
+ if (out_irq->args_count != intsize)
return -EINVAL;
/* Look for this #address-cells. We have to implement the old linux
@@ -147,6 +147,16 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
pr_debug(" -> addrsize=%d\n", addrsize);
+ /* Range check so that the temporary buffer doesn't overflow */
+ if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS))
+ goto fail;
+
+ /* Precalculate the match array - this simplifies match loop */
+ for (i = 0; i < addrsize; i++)
+ initial_match_array[i] = addr ? addr[i] : 0;
+ for (i = 0; i < intsize; i++)
+ initial_match_array[addrsize + i] = cpu_to_be32(out_irq->args[i]);
+
/* Now start the actual "proper" walk of the interrupt tree */
while (ipar != NULL) {
/* Now check if cursor is an interrupt-controller and if it is
@@ -155,15 +165,19 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
- for (i = 0; i < intsize; i++)
- out_irq->specifier[i] =
- of_read_number(intspec +i, 1);
- out_irq->size = intsize;
- out_irq->controller = ipar;
of_node_put(old);
return 0;
}
+ /*
+ * interrupt-map parsing does not work without a reg
+ * property when #address-cells != 0
+ */
+ if (addrsize && !addr) {
+ pr_debug(" -> no reg passed in when needed !\n");
+ goto fail;
+ }
+
/* Now look for an interrupt-map */
imap = of_get_property(ipar, "interrupt-map", &imaplen);
/* No interrupt map, check for an interrupt parent */
@@ -176,34 +190,16 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
/* Look for a mask */
imask = of_get_property(ipar, "interrupt-map-mask", NULL);
-
- /* If we were passed no "reg" property and we attempt to parse
- * an interrupt-map, then #address-cells must be 0.
- * Fail if it's not.
- */
- if (addr == NULL && addrsize != 0) {
- pr_debug(" -> no reg passed in when needed !\n");
- goto fail;
- }
+ if (!imask)
+ imask = dummy_imask;
/* Parse interrupt-map */
match = 0;
while (imaplen > (addrsize + intsize + 1) && !match) {
/* Compare specifiers */
match = 1;
- for (i = 0; i < addrsize && match; ++i) {
- __be32 mask = imask ? imask[i]
- : cpu_to_be32(0xffffffffu);
- match = ((addr[i] ^ imap[i]) & mask) == 0;
- }
- for (; i < (addrsize + intsize) && match; ++i) {
- __be32 mask = imask ? imask[i]
- : cpu_to_be32(0xffffffffu);
- match =
- ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
- }
- imap += addrsize + intsize;
- imaplen -= addrsize + intsize;
+ for (i = 0; i < (addrsize + intsize); i++, imaplen--)
+ match &= !((match_array[i] ^ *imap++) & imask[i]);
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
@@ -237,6 +233,8 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
newintsize, newaddrsize);
/* Check for malformed properties */
+ if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS))
+ goto fail;
if (imaplen < (newaddrsize + newintsize))
goto fail;
@@ -248,12 +246,18 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
if (!match)
goto fail;
- of_node_put(old);
- old = of_node_get(newpar);
+ /*
+ * Successfully parsed an interrrupt-map translation; copy new
+ * interrupt specifier into the out_irq structure
+ */
+ of_node_put(out_irq->np);
+ out_irq->np = of_node_get(newpar);
+
+ match_array = imap - newaddrsize - newintsize;
+ for (i = 0; i < newintsize; i++)
+ out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
+ out_irq->args_count = intsize = newintsize;
addrsize = newaddrsize;
- intsize = newintsize;
- intspec = imap - intsize;
- addr = intspec - addrsize;
skiplevel:
/* Iterate again with new parent */
@@ -264,46 +268,53 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
}
fail:
of_node_put(ipar);
- of_node_put(old);
+ of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(of_irq_map_raw);
+EXPORT_SYMBOL_GPL(of_irq_parse_raw);
/**
- * of_irq_map_one - Resolve an interrupt for a device
+ * of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
* @out_irq: structure of_irq filled by this function
*
- * This function resolves an interrupt, walking the tree, for a given
- * device-tree node. It's the high level pendant to of_irq_map_raw().
+ * This function resolves an interrupt for a node by walking the interrupt tree,
+ * finding which interrupt controller node it is attached to, and returning the
+ * interrupt specifier that can be used to retrieve a Linux IRQ number.
*/
-int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
+int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
{
struct device_node *p;
const __be32 *intspec, *tmp, *addr;
u32 intsize, intlen;
- int res = -EINVAL;
+ int i, res = -EINVAL;
- pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
+ pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
/* OldWorld mac stuff is "special", handle out of line */
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
- return of_irq_map_oldworld(device, index, out_irq);
+ return of_irq_parse_oldworld(device, index, out_irq);
+
+ /* Get the reg property (if any) */
+ addr = of_get_property(device, "reg", NULL);
/* Get the interrupts property */
intspec = of_get_property(device, "interrupts", &intlen);
- if (intspec == NULL)
- return -EINVAL;
+ if (intspec == NULL) {
+ /* Try the new-style interrupts-extended */
+ res = of_parse_phandle_with_args(device, "interrupts-extended",
+ "#interrupt-cells", index, out_irq);
+ if (res)
+ return -EINVAL;
+ return of_irq_parse_raw(addr, out_irq);
+ }
intlen /= sizeof(*intspec);
pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
- /* Get the reg property (if any) */
- addr = of_get_property(device, "reg", NULL);
-
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
if (p == NULL)
@@ -321,14 +332,20 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
if ((index + 1) * intsize > intlen)
goto out;
- /* Get new specifier and map it */
- res = of_irq_map_raw(p, intspec + index * intsize, intsize,
- addr, out_irq);
+ /* Copy intspec into irq structure */
+ intspec += index * intsize;
+ out_irq->np = p;
+ out_irq->args_count = intsize;
+ for (i = 0; i < intsize; i++)
+ out_irq->args[i] = be32_to_cpup(intspec++);
+
+ /* Check if there are any interrupt-map translations to process */
+ res = of_irq_parse_raw(addr, out_irq);
out:
of_node_put(p);
return res;
}
-EXPORT_SYMBOL_GPL(of_irq_map_one);
+EXPORT_SYMBOL_GPL(of_irq_parse_one);
/**
* of_irq_to_resource - Decode a node's IRQ and return it as a resource
@@ -354,8 +371,8 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
&name);
r->start = r->end = irq;
- r->flags = IORESOURCE_IRQ;
- r->name = name ? name : dev->full_name;
+ r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
+ r->name = name ? name : of_node_full_name(dev);
}
return irq;
@@ -368,9 +385,10 @@ EXPORT_SYMBOL_GPL(of_irq_to_resource);
*/
int of_irq_count(struct device_node *dev)
{
+ struct of_phandle_args irq;
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL))
+ while (of_irq_parse_one(dev, nr, &irq) == 0)
nr++;
return nr;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index e5ca00893c0..84819963379 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -2,7 +2,6 @@
#include <linux/export.h>
#include <linux/of.h>
#include <linux/of_pci.h>
-#include <asm/prom.h>
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 67705381321..8736bc7676c 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -2,10 +2,9 @@
#include <linux/of_pci.h>
#include <linux/of_irq.h>
#include <linux/export.h>
-#include <asm/prom.h>
/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * of_irq_parse_pci - Resolve the interrupt for a PCI device
* @pdev: the device whose interrupt is to be resolved
* @out_irq: structure of_irq filled by this function
*
@@ -15,7 +14,7 @@
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
-int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
struct device_node *dn, *ppnode;
struct pci_dev *ppdev;
@@ -30,7 +29,7 @@ int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
*/
dn = pci_device_to_OF_node(pdev);
if (dn) {
- rc = of_irq_map_one(dn, 0, out_irq);
+ rc = of_irq_parse_one(dn, 0, out_irq);
if (!rc)
return rc;
}
@@ -85,9 +84,37 @@ int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
pdev = ppdev;
}
+ out_irq->np = ppnode;
+ out_irq->args_count = 1;
+ out_irq->args[0] = lspec;
lspec_be = cpu_to_be32(lspec);
laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
- laddr[1] = laddr[2] = cpu_to_be32(0);
- return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+ laddr[1] = laddr[2] = cpu_to_be32(0);
+ return of_irq_parse_raw(laddr, out_irq);
}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
+EXPORT_SYMBOL_GPL(of_irq_parse_pci);
+
+/**
+ * of_irq_parse_and_map_pci() - Decode a PCI irq from the device tree and map to a virq
+ * @dev: The pci device needing an irq
+ * @slot: PCI slot number; passed when used as map_irq callback. Unused
+ * @pin: PCI irq pin number; passed when used as map_irq callback. Unused
+ *
+ * @slot and @pin are unused, but included in the function so that this
+ * function can be used directly as the map_irq callback to pci_fixup_irqs().
+ */
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct of_phandle_args oirq;
+ int ret;
+
+ ret = of_irq_parse_pci(dev, &oirq);
+ if (ret) {
+ dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
+ return 0; /* Proper return code 0 == NO_IRQ */
+ }
+
+ return irq_create_of_mapping(&oirq);
+}
+EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
+
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 4ec19cbee57..7b666736c16 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_pdt.h>
-#include <asm/prom.h>
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f6dcde22082..404d1daebef 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -215,6 +215,8 @@ static struct platform_device *of_platform_device_create_pdata(
dev->archdata.dma_mask = 0xffffffffUL;
#endif
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ if (!dev->dev.dma_mask)
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
@@ -280,9 +282,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
else
of_device_make_bus_id(&dev->dev);
- /* setup amba-specific device info */
- dev->dma_mask = ~0;
-
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
if (prop)
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index 0eb5c38b4e0..e21012bde63 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -9,18 +9,24 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/device.h>
-static bool selftest_passed = true;
+static struct selftest_results {
+ int passed;
+ int failed;
+} selftest_results;
+
#define selftest(result, fmt, ...) { \
if (!(result)) { \
- pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
- selftest_passed = false; \
+ selftest_results.failed++; \
+ pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
- pr_info("pass %s:%i\n", __FILE__, __LINE__); \
+ selftest_results.passed++; \
+ pr_debug("pass %s():%i\n", __func__, __LINE__); \
} \
}
@@ -131,7 +137,6 @@ static void __init of_selftest_property_match_string(void)
struct device_node *np;
int rc;
- pr_info("start\n");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
if (!np) {
pr_err("No testcase data in device tree\n");
@@ -154,6 +159,147 @@ static void __init of_selftest_property_match_string(void)
selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
}
+static void __init of_selftest_parse_interrupts(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int i, rc;
+
+ np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ bool passed = true;
+ args.args_count = 0;
+ rc = of_irq_parse_one(np, i, &args);
+
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
+ }
+ of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/interrupts/interrupts1");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ bool passed = true;
+ args.args_count = 0;
+ rc = of_irq_parse_one(np, i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 9);
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 10);
+ passed &= (args.args[1] == 11);
+ passed &= (args.args[2] == 12);
+ break;
+ case 2:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 13);
+ passed &= (args.args[1] == 14);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 15);
+ passed &= (args.args[1] == 16);
+ break;
+ default:
+ passed = false;
+ }
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
+ }
+ of_node_put(np);
+}
+
+static void __init of_selftest_parse_interrupts_extended(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int i, rc;
+
+ np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 7; i++) {
+ bool passed = true;
+ rc = of_irq_parse_one(np, i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 1);
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 2);
+ passed &= (args.args[1] == 3);
+ passed &= (args.args[2] == 4);
+ break;
+ case 2:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 5);
+ passed &= (args.args[1] == 6);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 9);
+ break;
+ case 4:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 10);
+ passed &= (args.args[1] == 11);
+ passed &= (args.args[2] == 12);
+ break;
+ case 5:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 13);
+ passed &= (args.args[1] == 14);
+ break;
+ case 6:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 15);
+ break;
+ default:
+ passed = false;
+ }
+
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
+ }
+ of_node_put(np);
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -168,7 +314,10 @@ static int __init of_selftest(void)
pr_info("start of selftest - you will see error messages\n");
of_selftest_parse_phandle_with_args();
of_selftest_property_match_string();
- pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ of_selftest_parse_interrupts();
+ of_selftest_parse_interrupts_extended();
+ pr_info("end of selftest - %i passed, %i failed\n",
+ selftest_results.passed, selftest_results.failed);
return 0;
}
late_initcall(of_selftest);
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 70694ce38be..2872ece81f3 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -31,15 +31,18 @@ menuconfig PARPORT
If unsure, say Y.
+config ARCH_MIGHT_HAVE_PC_PARPORT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might have PC parallel port hardware.
+
if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
- depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
- (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
- !XTENSA && !CRIS && !H8300
-
- ---help---
+ depends on ARCH_MIGHT_HAVE_PC_PARPORT
+ help
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
parallel ports. PA-RISC owners should only say Y here if they
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index d4716273651..c864f82bd37 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -1331,7 +1331,7 @@ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
break;
/* Initialize mutex used to take interrupts into account */
- INIT_COMPLETION(priv->irq_complete);
+ reinit_completion(&priv->irq_complete);
/* Enable serviceIntr */
parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
@@ -1446,7 +1446,7 @@ static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
priv->irq_mode = PARPORT_IP32_IRQ_HERE;
parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
- INIT_COMPLETION(priv->irq_complete);
+ reinit_completion(&priv->irq_complete);
parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
nfault_timeout = min((unsigned long)physport->cad->timeout,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 903e1285fda..96376152622 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2004,6 +2004,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
struct resource *ECR_res = NULL;
struct resource *EPP_res = NULL;
struct platform_device *pdev = NULL;
+ int ret;
if (!dev) {
/* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@ struct parport *parport_pc_probe_port(unsigned long int base,
return NULL;
dev = &pdev->dev;
- dev->coherent_dma_mask = DMA_BIT_MASK(24);
- dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+ if (ret) {
+ dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+ dma = PARPORT_DMA_NONE;
+ }
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 95655d7c0d0..e52d7ffa38b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
* Otherwise is returns a bitmask with supported features. Current
* features reported are:
* PCI_PASID_CAP_EXEC - Execute permission supported
- * PCI_PASID_CAP_PRIV - Priviledged mode supported
+ * PCI_PASID_CAP_PRIV - Privileged mode supported
*/
int pci_pasid_features(struct pci_dev *pdev)
{
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 3d950481112..47d46c6d846 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -3,7 +3,7 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_KIRKWOOD
+ depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD
depends on OF
config PCIE_DW
@@ -15,8 +15,22 @@ config PCI_EXYNOS
select PCIEPORTBUS
select PCIE_DW
+config PCI_IMX6
+ bool "Freescale i.MX6 PCIe controller"
+ depends on SOC_IMX6Q
+ select PCIEPORTBUS
+ select PCIE_DW
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA
+config PCI_RCAR_GEN2
+ bool "Renesas R-Car Gen2 Internal PCI controller"
+ depends on ARM && (ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST)
+ help
+ Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+ There are 3 internal PCI controllers available with a single
+ built-in EHCI/OHCI host controller present on each one.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index c9a997b2690..13fb3333aa0 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 94e096bb2d0..24beed38ddc 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -48,6 +48,7 @@ struct exynos_pcie {
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
+#define IRQ_MSI_ENABLE (0x1 << 2)
#define PCIE_IRQ_EN_SPECIAL 0x014
#define PCIE_PWR_RESET 0x018
#define PCIE_CORE_RESET 0x01c
@@ -77,18 +78,28 @@ struct exynos_pcie {
#define PCIE_PHY_PLL_BIAS 0x00c
#define PCIE_PHY_DCC_FEEDBACK 0x014
#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_COMMON_POWER 0x064
+#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
#define PCIE_PHY_TRSV0_EMP_LVL 0x084
#define PCIE_PHY_TRSV0_DRV_LVL 0x088
#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_POWER 0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV0_LVCC 0x0dc
#define PCIE_PHY_TRSV1_EMP_LVL 0x144
#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_POWER 0x184
+#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV1_LVCC 0x19c
#define PCIE_PHY_TRSV2_EMP_LVL 0x204
#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_POWER 0x244
+#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV2_LVCC 0x25c
#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_POWER 0x304
+#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val &= ~PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val &= ~PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val &= ~PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val &= ~PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val &= ~PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val |= PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val |= PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val |= PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val |= PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val |= PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
static void exynos_pcie_init_phy(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
/* de-assert phy reset */
exynos_pcie_deassert_phy_reset(pp);
+ /* power on phy */
+ exynos_pcie_power_on_phy(pp);
+
/* initialize phy */
exynos_pcie_init_phy(pp);
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
PCIE_PHY_PLL_LOCKED);
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
}
+ /* power off phy */
+ exynos_pcie_power_off_phy(pp);
+
dev_err(pp->dev, "PCIe Link Fail\n");
return -EINVAL;
}
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ dw_handle_msi_irq(pp);
+
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ dw_pcie_msi_init(pp);
+
+ /* enable MSI interrupt */
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+ val |= IRQ_MSI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+ return;
+}
+
static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
{
exynos_pcie_enable_irq_pulse(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ exynos_pcie_msi_init(pp);
+
return;
}
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
return ret;
}
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (!pp->msi_irq) {
+ dev_err(&pdev->dev, "failed to get msi irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ exynos_pcie_msi_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request msi irq\n");
+ return ret;
+ }
+ }
+
pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
@@ -487,18 +599,24 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
- if (IS_ERR(exynos_pcie->elbi_base))
- return PTR_ERR(exynos_pcie->elbi_base);
+ if (IS_ERR(exynos_pcie->elbi_base)) {
+ ret = PTR_ERR(exynos_pcie->elbi_base);
+ goto fail_bus_clk;
+ }
phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
- if (IS_ERR(exynos_pcie->phy_base))
- return PTR_ERR(exynos_pcie->phy_base);
+ if (IS_ERR(exynos_pcie->phy_base)) {
+ ret = PTR_ERR(exynos_pcie->phy_base);
+ goto fail_bus_clk;
+ }
block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
- if (IS_ERR(exynos_pcie->block_base))
- return PTR_ERR(exynos_pcie->block_base);
+ if (IS_ERR(exynos_pcie->block_base)) {
+ ret = PTR_ERR(exynos_pcie->block_base);
+ goto fail_bus_clk;
+ }
ret = add_pcie_port(pp, pdev);
if (ret < 0)
@@ -535,7 +653,7 @@ static struct platform_driver exynos_pcie_driver = {
.driver = {
.name = "exynos-pcie",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(exynos_pcie_of_match),
+ .of_match_table = exynos_pcie_of_match,
},
};
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644
index 00000000000..bd70af8f31a
--- /dev/null
+++ b/drivers/pci/host/pci-imx6.c
@@ -0,0 +1,568 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ * http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+ int reset_gpio;
+ int power_on_gpio;
+ int wake_up_gpio;
+ int disable_gpio;
+ struct clk *lvds_gate;
+ struct clk *sata_ref_100m;
+ struct clk *pcie_ref_125m;
+ struct clk *pcie_axi;
+ struct pcie_port pp;
+ struct regmap *iomuxc_gpr;
+ void __iomem *mem_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+ u32 val;
+ u32 max_iterations = 10;
+ u32 wait_counter = 0;
+
+ do {
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ wait_counter++;
+
+ if (val == exp_val)
+ return 0;
+
+ udelay(1);
+ } while (wait_counter < max_iterations);
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+ u32 val;
+ int ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+ u32 val, phy_ctl;
+ int ret;
+
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ /* assert Read signal */
+ phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ *data = val & 0xffff;
+
+ /* deassert Read signal */
+ writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+ u32 var;
+ int ret;
+
+ /* write addr */
+ /* cap addr */
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* capture data */
+ var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert cap data */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ /* assert wr signal */
+ var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack */
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert wr signal */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+ return 0;
+}
+
+/* Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+ msleep(100);
+ gpio_set_value(imx6_pcie->reset_gpio, 1);
+
+ return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ int ret;
+
+ if (gpio_is_valid(imx6_pcie->power_on_gpio))
+ gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+ ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable sata_ref_100m\n");
+ goto err_sata_ref;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
+ goto err_pcie_ref;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable lvds_gate\n");
+ goto err_lvds_gate;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_axi\n");
+ goto err_pcie_axi;
+ }
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+
+ return 0;
+
+err_pcie_axi:
+ clk_disable_unprepare(imx6_pcie->lvds_gate);
+err_lvds_gate:
+ clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
+err_pcie_ref:
+ clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+err_sata_ref:
+ return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+ int count = 0;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ imx6_pcie_assert_core_reset(pp);
+
+ imx6_pcie_init_phy(pp);
+
+ imx6_pcie_deassert_core_reset(pp);
+
+ dw_pcie_setup_rc(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ count++;
+ if (count >= 200) {
+ dev_err(pp->dev, "phy link never came up\n");
+ dev_dbg(pp->dev,
+ "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ break;
+ }
+ }
+
+ return;
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+ u32 rc, ltssm, rx_valid, temp;
+
+ /* link is debug bit 36, debug register 1 starts at bit 32 */
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+ if (rc)
+ return -EAGAIN;
+
+ /*
+ * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+ * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+ * If (MAC/LTSSM.state == Recovery.RcvrLock)
+ * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+ * to gen2 is stuck
+ */
+ pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+ ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+
+ if (rx_valid & 0x01)
+ return 0;
+
+ if (ltssm != 0x0d)
+ return 0;
+
+ dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+
+ pcie_phy_read(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, &temp);
+ temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
+ | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, temp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, &temp);
+ temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
+ | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, temp);
+
+ return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+ .link_up = imx6_pcie_link_up,
+ .host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &imx6_pcie_host_ops;
+
+ spin_lock_init(&pp->conf_lock);
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+ return -ENOMEM;
+
+ pp = &imx6_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ /* Added for PCI abort handling */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dbi_base) {
+ dev_err(&pdev->dev, "dbi_base memory resource not found\n");
+ return -ENODEV;
+ }
+
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pp->dbi_base)) {
+ ret = PTR_ERR(pp->dbi_base);
+ goto err;
+ }
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
+ dev_err(&pdev->dev, "no reset-gpio defined\n");
+ ret = -ENODEV;
+ }
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ goto err;
+ }
+
+ imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->power_on_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "PCIe power enable");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get power-on gpio\n");
+ goto err;
+ }
+ }
+
+ imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->wake_up_gpio,
+ GPIOF_IN,
+ "PCIe wake up");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+ goto err;
+ }
+ }
+
+ imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->disable_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->disable_gpio,
+ GPIOF_OUT_INIT_HIGH,
+ "PCIe disable endpoint");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+ goto err;
+ }
+ }
+
+ /* Fetch clocks */
+ imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
+ if (IS_ERR(imx6_pcie->lvds_gate)) {
+ dev_err(&pdev->dev,
+ "lvds_gate clock select missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->lvds_gate);
+ goto err;
+ }
+
+ imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+ if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+ dev_err(&pdev->dev,
+ "sata_ref_100m clock source missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->sata_ref_100m);
+ goto err;
+ }
+
+ imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+ if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+ dev_err(&pdev->dev,
+ "pcie_ref_125m clock source missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
+ goto err;
+ }
+
+ imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+ if (IS_ERR(imx6_pcie->pcie_axi)) {
+ dev_err(&pdev->dev,
+ "pcie_axi clock source missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->pcie_axi);
+ goto err;
+ }
+
+ /* Grab GPR config register range */
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+ ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
+ goto err;
+ }
+
+ ret = imx6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto err;
+
+ platform_set_drvdata(pdev, imx6_pcie);
+ return 0;
+
+err:
+ return ret;
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+ .driver = {
+ .name = "imx6q-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = imx6_pcie_of_match,
+ },
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+ return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+fs_initcall(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 729d5a101d6..c269e430c76 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -9,13 +9,17 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/mbus.h>
+#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
-#include <linux/of_pci.h>
#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
#include <linux/of_platform.h>
/*
@@ -103,6 +107,7 @@ struct mvebu_pcie_port;
struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
+ struct msi_chip *msi;
struct resource io;
struct resource realio;
struct resource mem;
@@ -115,7 +120,6 @@ struct mvebu_pcie_port {
char *name;
void __iomem *base;
spinlock_t conf_lock;
- int haslink;
u32 port;
u32 lane;
int devfn;
@@ -124,6 +128,9 @@ struct mvebu_pcie_port {
unsigned int io_target;
unsigned int io_attr;
struct clk *clk;
+ int reset_gpio;
+ int reset_active_low;
+ char *reset_name;
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
@@ -133,29 +140,39 @@ struct mvebu_pcie_port {
size_t iowin_size;
};
+static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->base + reg);
+}
+
+static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
+{
+ return readl(port->base + reg);
+}
+
static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
{
- return !(readl(port->base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+ return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
}
static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
{
u32 stat;
- stat = readl(port->base + PCIE_STAT_OFF);
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
stat &= ~PCIE_STAT_BUS;
stat |= nr << 8;
- writel(stat, port->base + PCIE_STAT_OFF);
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
}
static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
{
u32 stat;
- stat = readl(port->base + PCIE_STAT_OFF);
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
stat &= ~PCIE_STAT_DEV;
stat |= nr << 16;
- writel(stat, port->base + PCIE_STAT_OFF);
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
}
/*
@@ -163,7 +180,7 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
* BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
* WIN[0-3] -> DRAM bank[0-3]
*/
-static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
{
const struct mbus_dram_target_info *dram;
u32 size;
@@ -173,33 +190,34 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
/* First, disable and clear BARs and windows. */
for (i = 1; i < 3; i++) {
- writel(0, port->base + PCIE_BAR_CTRL_OFF(i));
- writel(0, port->base + PCIE_BAR_LO_OFF(i));
- writel(0, port->base + PCIE_BAR_HI_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
}
for (i = 0; i < 5; i++) {
- writel(0, port->base + PCIE_WIN04_CTRL_OFF(i));
- writel(0, port->base + PCIE_WIN04_BASE_OFF(i));
- writel(0, port->base + PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
}
- writel(0, port->base + PCIE_WIN5_CTRL_OFF);
- writel(0, port->base + PCIE_WIN5_BASE_OFF);
- writel(0, port->base + PCIE_WIN5_REMAP_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
/* Setup windows for DDR banks. Count total DDR size on the fly. */
size = 0;
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
- writel(cs->base & 0xffff0000,
- port->base + PCIE_WIN04_BASE_OFF(i));
- writel(0, port->base + PCIE_WIN04_REMAP_OFF(i));
- writel(((cs->size - 1) & 0xffff0000) |
- (cs->mbus_attr << 8) |
- (dram->mbus_dram_target_id << 4) | 1,
- port->base + PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, cs->base & 0xffff0000,
+ PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port,
+ ((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ PCIE_WIN04_CTRL_OFF(i));
size += cs->size;
}
@@ -209,41 +227,40 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
size = 1 << fls(size);
/* Setup BAR[1] to all DRAM banks. */
- writel(dram->cs[0].base, port->base + PCIE_BAR_LO_OFF(1));
- writel(0, port->base + PCIE_BAR_HI_OFF(1));
- writel(((size - 1) & 0xffff0000) | 1,
- port->base + PCIE_BAR_CTRL_OFF(1));
+ mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
+ mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
+ PCIE_BAR_CTRL_OFF(1));
}
-static void __init mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
{
- u16 cmd;
- u32 mask;
+ u32 cmd, mask;
/* Point PCIe unit MBUS decode windows to DRAM space. */
mvebu_pcie_setup_wins(port);
/* Master + slave enable. */
- cmd = readw(port->base + PCIE_CMD_OFF);
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
cmd |= PCI_COMMAND_IO;
cmd |= PCI_COMMAND_MEMORY;
cmd |= PCI_COMMAND_MASTER;
- writew(cmd, port->base + PCIE_CMD_OFF);
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
/* Enable interrupt lines A-D. */
- mask = readl(port->base + PCIE_MASK_OFF);
+ mask = mvebu_readl(port, PCIE_MASK_OFF);
mask |= PCIE_MASK_ENABLE_INTS;
- writel(mask, port->base + PCIE_MASK_OFF);
+ mvebu_writel(port, mask, PCIE_MASK_OFF);
}
static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
- writel(PCIE_CONF_ADDR(bus->number, devfn, where),
- port->base + PCIE_CONF_ADDR_OFF);
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
- *val = readl(port->base + PCIE_CONF_DATA_OFF);
+ *val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
@@ -257,21 +274,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
- int ret = PCIBIOS_SUCCESSFUL;
+ u32 _val, shift = 8 * (where & 3);
- writel(PCIE_CONF_ADDR(bus->number, devfn, where),
- port->base + PCIE_CONF_ADDR_OFF);
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+ _val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
if (size == 4)
- writel(val, port->base + PCIE_CONF_DATA_OFF);
+ _val = val;
else if (size == 2)
- writew(val, port->base + PCIE_CONF_DATA_OFF + (where & 3));
+ _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift);
else if (size == 1)
- writeb(val, port->base + PCIE_CONF_DATA_OFF + (where & 3));
+ _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift);
else
- ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
- return ret;
+ mvebu_writel(port, _val, PCIE_CONF_DATA_OFF);
+
+ return PCIBIOS_SUCCESSFUL;
}
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
@@ -552,7 +572,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (bus->number == 0)
return mvebu_sw_pci_bridge_write(port, where, size, val);
- if (!port->haslink)
+ if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
@@ -594,7 +614,7 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
if (bus->number == 0)
return mvebu_sw_pci_bridge_read(port, where, size, val);
- if (!port->haslink) {
+ if (!mvebu_pcie_link_up(port)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -626,7 +646,7 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
-static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
+static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
@@ -645,19 +665,6 @@ static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
-static int __init mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct of_irq oirq;
- int ret;
-
- ret = of_irq_map_pci(dev, &oirq);
- if (ret)
- return ret;
-
- return irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
-}
-
static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
@@ -673,11 +680,17 @@ static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
return bus;
}
-resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align)
+static void mvebu_pcie_add_bus(struct pci_bus *bus)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ bus->msi = pcie->msi;
+}
+
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align)
{
if (dev->bus->number != 0)
return start;
@@ -696,7 +709,7 @@ resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
return start;
}
-static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
+static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
{
struct hw_pci hw;
@@ -706,9 +719,10 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
hw.private_data = (void **)&pcie;
hw.setup = mvebu_pcie_setup;
hw.scan = mvebu_pcie_scan_bus;
- hw.map_irq = mvebu_pcie_map_irq;
+ hw.map_irq = of_irq_parse_and_map_pci;
hw.ops = &mvebu_pcie_ops;
hw.align_resource = mvebu_pcie_align_resource;
+ hw.add_bus = mvebu_pcie_add_bus;
pci_common_init(&hw);
}
@@ -718,10 +732,8 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
* <...> property for one that matches the given port/lane. Once
* found, maps it.
*/
-static void __iomem * __init
-mvebu_pcie_map_registers(struct platform_device *pdev,
- struct device_node *np,
- struct mvebu_pcie_port *port)
+static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+ struct device_node *np, struct mvebu_pcie_port *port)
{
struct resource regs;
int ret = 0;
@@ -777,7 +789,22 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-static int __init mvebu_pcie_probe(struct platform_device *pdev)
+static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
+{
+ struct device_node *msi_node;
+
+ msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
+ "msi-parent", 0);
+ if (!msi_node)
+ return;
+
+ pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+
+ if (pcie->msi)
+ pcie->msi->dev = &pcie->pdev->dev;
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
@@ -790,6 +817,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
@@ -818,13 +846,14 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
return ret;
}
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
- pcie->nports++;
+ i++;
}
- pcie->ports = devm_kzalloc(&pdev->dev, pcie->nports *
+ pcie->ports = devm_kzalloc(&pdev->dev, i *
sizeof(struct mvebu_pcie_port),
GFP_KERNEL);
if (!pcie->ports)
@@ -833,6 +862,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
i = 0;
for_each_child_of_node(pdev->dev.of_node, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
+ enum of_gpio_flags flags;
if (!of_device_is_available(child))
continue;
@@ -873,45 +903,68 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ port->reset_gpio = of_get_named_gpio_flags(child,
+ "reset-gpios", 0, &flags);
+ if (gpio_is_valid(port->reset_gpio)) {
+ u32 reset_udelay = 20000;
+
+ port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ port->reset_name = kasprintf(GFP_KERNEL,
+ "pcie%d.%d-reset", port->port, port->lane);
+ of_property_read_u32(child, "reset-delay-us",
+ &reset_udelay);
+
+ ret = devm_gpio_request_one(&pdev->dev,
+ port->reset_gpio, GPIOF_DIR_OUT, port->reset_name);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ continue;
+ }
+
+ gpio_set_value(port->reset_gpio,
+ (port->reset_active_low) ? 1 : 0);
+ msleep(reset_udelay/1000);
+ }
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ continue;
+
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
port->port, port->lane);
port->base = NULL;
+ clk_disable_unprepare(port->clk);
continue;
}
mvebu_pcie_set_local_dev_nr(port, 1);
- if (mvebu_pcie_link_up(port)) {
- port->haslink = 1;
- dev_info(&pdev->dev, "PCIe%d.%d: link up\n",
- port->port, port->lane);
- } else {
- port->haslink = 0;
- dev_info(&pdev->dev, "PCIe%d.%d: link down\n",
- port->port, port->lane);
- }
-
port->clk = of_clk_get_by_name(child, NULL);
if (IS_ERR(port->clk)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
port->port, port->lane);
iounmap(port->base);
- port->haslink = 0;
continue;
}
port->dn = child;
-
- clk_prepare_enable(port->clk);
spin_lock_init(&port->conf_lock);
-
mvebu_sw_pci_bridge_init(port);
-
i++;
}
+ pcie->nports = i;
+ mvebu_pcie_msi_enable(pcie);
mvebu_pcie_enable(pcie);
return 0;
@@ -920,6 +973,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,armada-xp-pcie", },
{ .compatible = "marvell,armada-370-pcie", },
+ { .compatible = "marvell,dove-pcie", },
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
@@ -931,16 +985,12 @@ static struct platform_driver mvebu_pcie_driver = {
.name = "mvebu-pcie",
.of_match_table =
of_match_ptr(mvebu_pcie_of_match_table),
+ /* driver unloading/unbinding currently not supported */
+ .suppress_bind_attrs = true,
},
+ .probe = mvebu_pcie_probe,
};
-
-static int __init mvebu_pcie_init(void)
-{
- return platform_driver_probe(&mvebu_pcie_driver,
- mvebu_pcie_probe);
-}
-
-subsys_initcall(mvebu_pcie_init);
+module_platform_driver(mvebu_pcie_driver);
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_DESCRIPTION("Marvell EBU PCIe driver");
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
new file mode 100644
index 00000000000..cbaa5c4397e
--- /dev/null
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -0,0 +1,333 @@
+/*
+ * pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0 0x0
+#define RCAR_PCIAHB_PREFETCH4 0x1
+#define RCAR_PCIAHB_PREFETCH8 0x2
+#define RCAR_PCIAHB_PREFETCH16 0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST (1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_A (1 << 16)
+#define RCAR_PCI_INT_B (1 << 17)
+#define RCAR_PCI_INT_PME (1 << 19)
+
+#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17)
+#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \
+ RCAR_AHB_BUS_MMODE_BYTE_BURST | \
+ RCAR_AHB_BUS_MMODE_WR_INCR | \
+ RCAR_AHB_BUS_MMODE_HBUS_REQ | \
+ RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST (1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK (1 << 1)
+#define RCAR_USBCTR_PLL_RST (1 << 2)
+#define RCAR_USBCTR_DIRPD (1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+/* Number of internal PCI controllers */
+#define RCAR_PCI_NR_CONTROLLERS 3
+
+struct rcar_pci_priv {
+ void __iomem *reg;
+ struct resource io_res;
+ struct resource mem_res;
+ struct resource *cfg_res;
+ int irq;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+ int slot, val;
+
+ if (sys->busnr != bus->number || PCI_FUNC(devfn))
+ return NULL;
+
+ /* Only one EHCI/OHCI device built-in */
+ slot = PCI_SLOT(devfn);
+ if (slot > 2)
+ return NULL;
+
+ val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+ RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+ iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+ if (!reg)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *val = ioread8(reg);
+ break;
+ case 2:
+ *val = ioread16(reg);
+ break;
+ default:
+ *val = ioread32(reg);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+ if (!reg)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ iowrite8(val, reg);
+ break;
+ case 2:
+ iowrite16(val, reg);
+ break;
+ default:
+ iowrite32(val, reg);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI interrupt mapping */
+static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_sys_data *sys = dev->bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+
+ return priv->irq;
+}
+
+/* PCI host controller setup */
+static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pci_priv *priv = sys->private_data;
+ void __iomem *reg = priv->reg;
+ u32 val;
+
+ val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+ pr_info("PCI: bus%u revision %x\n", sys->busnr, val);
+
+ /* Disable Direct Power Down State and assert reset */
+ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+ val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+ udelay(4);
+
+ /* De-assert reset and set PCIAHB window1 size to 1GB */
+ val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+ RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+ iowrite32(val | RCAR_USBCTR_PCIAHB_WIN1_1G, reg + RCAR_USBCTR_REG);
+
+ /* Configure AHB master and slave modes */
+ iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+ /* Configure PCI arbiter */
+ val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+ val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+ RCAR_PCI_ARBITER_PCIBP_MODE;
+ iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+ /* PCI-AHB mapping: 0x40000000-0x80000000 */
+ iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
+ reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+ /* AHB-PCI mapping: OHCI/EHCI registers */
+ val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+ iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+ /* Enable AHB-PCI bridge PCI configuration access */
+ iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+ reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ /* Set PCI-AHB Window1 address */
+ iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ reg + PCI_BASE_ADDRESS_1);
+ /* Set AHB-PCI bridge PCI communication area address */
+ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+ iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+ val = ioread32(reg + PCI_COMMAND);
+ val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ iowrite32(val, reg + PCI_COMMAND);
+
+ /* Enable PCI interrupts */
+ iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+ reg + RCAR_PCI_INT_ENABLE_REG);
+
+ /* Add PCI resources */
+ pci_add_resource(&sys->resources, &priv->io_res);
+ pci_add_resource(&sys->resources, &priv->mem_res);
+
+ return 1;
+}
+
+static struct pci_ops rcar_pci_ops = {
+ .read = rcar_pci_read_config,
+ .write = rcar_pci_write_config,
+};
+
+static struct hw_pci rcar_hw_pci __initdata = {
+ .map_irq = rcar_pci_map_irq,
+ .ops = &rcar_pci_ops,
+ .setup = rcar_pci_setup,
+};
+
+static int rcar_pci_count __initdata;
+
+static int __init rcar_pci_add_controller(struct rcar_pci_priv *priv)
+{
+ void **private_data;
+ int count;
+
+ if (rcar_hw_pci.nr_controllers < rcar_pci_count)
+ goto add_priv;
+
+ /* (Re)allocate private data pointer array if needed */
+ count = rcar_pci_count + RCAR_PCI_NR_CONTROLLERS;
+ private_data = kzalloc(count * sizeof(void *), GFP_KERNEL);
+ if (!private_data)
+ return -ENOMEM;
+
+ rcar_pci_count = count;
+ if (rcar_hw_pci.private_data) {
+ memcpy(private_data, rcar_hw_pci.private_data,
+ rcar_hw_pci.nr_controllers * sizeof(void *));
+ kfree(rcar_hw_pci.private_data);
+ }
+
+ rcar_hw_pci.private_data = private_data;
+
+add_priv:
+ /* Add private data pointer to the array */
+ rcar_hw_pci.private_data[rcar_hw_pci.nr_controllers++] = priv;
+ return 0;
+}
+
+static int __init rcar_pci_probe(struct platform_device *pdev)
+{
+ struct resource *cfg_res, *mem_res;
+ struct rcar_pci_priv *priv;
+ void __iomem *reg;
+
+ cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+ if (!reg)
+ return -ENODEV;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem_res || !mem_res->start)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct rcar_pci_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mem_res = *mem_res;
+ /*
+ * The controller does not support/use port I/O,
+ * so setup a dummy port I/O region here.
+ */
+ priv->io_res.start = priv->mem_res.start;
+ priv->io_res.end = priv->mem_res.end;
+ priv->io_res.flags = IORESOURCE_IO;
+
+ priv->cfg_res = cfg_res;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ priv->reg = reg;
+
+ return rcar_pci_add_controller(priv);
+}
+
+static struct platform_driver rcar_pci_driver = {
+ .driver = {
+ .name = "pci-rcar-gen2",
+ },
+};
+
+static int __init rcar_pci_init(void)
+{
+ int retval;
+
+ retval = platform_driver_probe(&rcar_pci_driver, rcar_pci_probe);
+ if (!retval)
+ pci_common_init(&rcar_hw_pci);
+
+ /* Private data pointer array is not needed any more */
+ kfree(rcar_hw_pci.private_data);
+ rcar_hw_pci.private_data = NULL;
+
+ return retval;
+}
+
+subsys_initcall(rcar_pci_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
+MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 2e9888a0635..0afbbbc55c8 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -249,7 +249,7 @@ struct tegra_pcie {
void __iomem *afi;
int irq;
- struct list_head busses;
+ struct list_head buses;
struct resource *cs;
struct resource io;
@@ -399,24 +399,24 @@ free:
/*
* Look up a virtual address mapping for the specified bus number. If no such
- * mapping existis, try to create one.
+ * mapping exists, try to create one.
*/
static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct tegra_pcie_bus *bus;
- list_for_each_entry(bus, &pcie->busses, list)
+ list_for_each_entry(bus, &pcie->buses, list)
if (bus->nr == busnr)
- return bus->area->addr;
+ return (void __iomem *)bus->area->addr;
bus = tegra_pcie_bus_alloc(pcie, busnr);
if (IS_ERR(bus))
return NULL;
- list_add_tail(&bus->list, &pcie->busses);
+ list_add_tail(&bus->list, &pcie->buses);
- return bus->area->addr;
+ return (void __iomem *)bus->area->addr;
}
static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
@@ -808,7 +808,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
afi_writel(pcie, value, AFI_FUSE);
- /* initialze internal PHY, enable up to 16 PCIE lanes */
+ /* initialize internal PHY, enable up to 16 PCIE lanes */
pads_writel(pcie, 0x0, PADS_CTL_SEL);
/* override IDDQ to 1 on all 4 lanes */
@@ -1624,7 +1624,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
- INIT_LIST_HEAD(&pcie->busses);
+ INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
pcie->soc_data = match->data;
pcie->dev = &pdev->dev;
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c10e9ac9bbb..e33b68be039 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -11,8 +11,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -64,7 +67,7 @@
static struct hw_pci dw_pci;
-unsigned long global_io_offset;
+static unsigned long global_io_offset;
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
@@ -115,8 +118,8 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
writel(val, pp->dbi_base + reg);
}
-int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
{
int ret;
@@ -128,8 +131,8 @@ int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
return ret;
}
-int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
{
int ret;
@@ -142,6 +145,205 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
return ret;
}
+static struct irq_chip dw_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+void dw_handle_msi_irq(struct pcie_port *pp)
+{
+ unsigned long val;
+ int i, pos, irq;
+
+ for (i = 0; i < MAX_MSI_CTRLS; i++) {
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+ (u32 *)&val);
+ if (val) {
+ pos = 0;
+ while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+ irq = irq_find_mapping(pp->irq_domain,
+ i * 32 + pos);
+ generic_handle_irq(irq);
+ pos++;
+ }
+ }
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
+ }
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+ pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+ /* program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ virt_to_phys((void *)pp->msi_data));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+ int flag = 1;
+
+ do {
+ pos = find_next_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos);
+ /*if you have reached to the end then get out from here.*/
+ if (pos == MAX_MSI_IRQS)
+ return -ENOSPC;
+ /*
+ * Check if this position is at correct offset.nvec is always a
+ * power of two. pos0 must be nvec bit aligned.
+ */
+ if (pos % msgvec)
+ pos += msgvec - (pos % msgvec);
+ else
+ flag = 0;
+ } while (flag);
+
+ *pos0 = pos;
+ return 0;
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+ int res, bit, irq, pos0, pos1, i;
+ u32 val;
+ struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS);
+ if (pos0 % no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+ goto no_valid_irq;
+ }
+ if (no_irqs > 1) {
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ /* there must be nvec number of consecutive free bits */
+ while ((pos1 - pos0) < no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+ goto no_valid_irq;
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ }
+ }
+
+ irq = irq_find_mapping(pp->irq_domain, pos0);
+ if (!irq)
+ goto no_valid_irq;
+
+ i = 0;
+ while (i < no_irqs) {
+ set_bit(pos0 + i, pp->msi_irq_in_use);
+ irq_alloc_descs((irq + i), (irq + i), 1, 0);
+ irq_set_msi_desc(irq + i, desc);
+ /*Enable corresponding interrupt in MSI interrupt controller */
+ res = ((pos0 + i) / 32) * 12;
+ bit = (pos0 + i) % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ i++;
+ }
+
+ *pos = pos0;
+ return irq;
+
+no_valid_irq:
+ *pos = pos0;
+ return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+ int res, bit, val, pos;
+ struct irq_desc *desc;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ /* get the port structure */
+ desc = irq_to_desc(irq);
+ msi = irq_desc_get_msi_desc(desc);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ if (!pp) {
+ BUG();
+ return;
+ }
+
+ pos = data->hwirq;
+
+ irq_free_desc(irq);
+
+ clear_bit(pos, pp->msi_irq_in_use);
+
+ /* Disable corresponding interrupt on MSI interrupt controller */
+ res = (pos / 32) * 12;
+ bit = pos % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ int irq, pos, msgvec;
+ u16 msg_ctr;
+ struct msi_msg msg;
+ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+ &msg_ctr);
+ msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (msgvec == 0)
+ msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+ if (msgvec > 5)
+ msgvec = 0;
+
+ irq = assign_irq((1 << msgvec), desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
+ msg_ctr |= msgvec << 4;
+ pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+ msg_ctr);
+ desc->msi_attrib.multiple = msgvec;
+
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+ msg.data = pos;
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
+};
+
int dw_pcie_link_up(struct pcie_port *pp)
{
if (pp->ops->link_up)
@@ -150,12 +352,27 @@ int dw_pcie_link_up(struct pcie_port *pp)
return 0;
}
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = dw_pcie_msi_map,
+};
+
int __init dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct of_pci_range range;
struct of_pci_range_parser parser;
u32 val;
+ int i;
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pp->dev, "missing ranges property\n");
@@ -223,6 +440,19 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
return -EINVAL;
}
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+ MAX_MSI_IRQS, &msi_domain_ops,
+ &dw_pcie_msi_chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+ }
+
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -438,7 +668,7 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct pcie_port *pp;
@@ -461,7 +691,7 @@ int dw_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
-struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct pci_bus *bus;
struct pcie_port *pp = sys_to_pcie(sys);
@@ -478,17 +708,28 @@ struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
return bus;
}
-int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
return pp->irq;
}
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+ dw_pcie_msi_chip.dev = pp->dev;
+ bus->msi = &dw_pcie_msi_chip;
+ }
+}
+
static struct hw_pci dw_pci = {
.setup = dw_pcie_setup,
.scan = dw_pcie_scan_bus,
.map_irq = dw_pcie_map_irq,
+ .add_bus = dw_pcie_add_bus,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 133820f1da9..c15379be237 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -11,6 +11,9 @@
* published by the Free Software Foundation.
*/
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
struct pcie_port_info {
u32 cfg0_size;
u32 cfg1_size;
@@ -20,6 +23,14 @@ struct pcie_port_info {
phys_addr_t mem_bus_addr;
};
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
@@ -38,6 +49,10 @@ struct pcie_port {
int irq;
u32 lanes;
struct pcie_host_ops *ops;
+ int msi_irq;
+ struct irq_domain *irq_domain;
+ unsigned long msi_data;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
struct pcie_host_ops {
@@ -51,15 +66,12 @@ struct pcie_host_ops {
void (*host_init)(struct pcie_port *pp);
};
-extern unsigned long global_io_offset;
-
int cfg_read(void __iomem *addr, int where, int size, u32 *val);
int cfg_write(void __iomem *addr, int where, int size, u32 val);
-int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
-int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+void dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
-int dw_pcie_setup(int nr, struct pci_sys_data *sys);
-struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
-int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 0a648af8953..df8caec5978 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -133,8 +133,8 @@ config HOTPLUG_PCI_RPA_DLPAR
To compile this driver as a module, choose M here: the
module will be called rpadlpar_io.
-
- When in doubt, say N.
+
+ When in doubt, say N.
config HOTPLUG_PCI_SGI
tristate "SGI PCI Hotplug Support"
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 47ec8c80e16..3e6532b945c 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -31,7 +31,7 @@ pci_hotplug-objs += cpci_hotplug_core.o \
cpci_hotplug_pci.o
endif
ifdef CONFIG_ACPI
-pci_hotplug-objs += acpi_pcihp.o
+pci_hotplug-objs += acpi_pcihp.o
endif
cpqphp-objs := cpqphp_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 2a47e82821d..a94d850ae22 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,7 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+ flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
}
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
@@ -411,13 +411,10 @@ EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
static int pcihp_is_ejectable(acpi_handle handle)
{
acpi_status status;
- acpi_handle tmp;
unsigned long long removable;
- status = acpi_get_handle(handle, "_ADR", &tmp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "_ADR"))
return 0;
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "_EJ0"))
return 1;
status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
if (ACPI_SUCCESS(status) && removable)
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index f4e02892466..1592dbe4f90 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -39,16 +39,6 @@
#include <linux/mutex.h>
#include <linux/pci_hotplug.h>
-#define dbg(format, arg...) \
- do { \
- if (acpiphp_debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
- } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-
struct acpiphp_context;
struct acpiphp_bridge;
struct acpiphp_slot;
@@ -186,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
/* variables */
-extern bool acpiphp_debug;
extern bool acpiphp_disabled;
#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index bf2203ef130..dca66bc4457 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -31,6 +31,8 @@
*
*/
+#define pr_fmt(fmt) "acpiphp: " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -43,12 +45,9 @@
#include <linux/smp.h>
#include "acpiphp.h"
-#define MY_NAME "acpiphp"
-
/* name size which is used for entries in pcihpfs */
#define SLOT_NAME_SIZE 21 /* {_SUN} */
-bool acpiphp_debug;
bool acpiphp_disabled;
/* local variables */
@@ -61,9 +60,7 @@ static struct acpiphp_attention_info *attention_info;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(disable, "disable acpiphp driver");
-module_param_named(debug, acpiphp_debug, bool, 0644);
module_param_named(disable, acpiphp_disabled, bool, 0444);
/* export the attention callback registration methods */
@@ -114,7 +111,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
* @info: must match the pointer used to register
*
* Description: This is used to un-register a hardware specific acpi
- * driver that manipulates the attention LED. The pointer to the
+ * driver that manipulates the attention LED. The pointer to the
* info struct must be the same as the one used to set it.
*/
int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
@@ -139,7 +136,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* enable the specified slot */
return acpiphp_enable_slot(slot->acpi_slot);
@@ -156,7 +153,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* disable the specified slot */
return acpiphp_disable_and_eject_slot(slot->acpi_slot);
@@ -172,20 +169,21 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
* was registered with us. This allows hardware specific
* ACPI implementations to blink the light for us.
*/
- static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
- {
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
int retval = -ENODEV;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
-
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
+
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->set_attn(hotplug_slot, status);
module_put(attention_info->owner);
} else
attention_info = NULL;
return retval;
- }
-
+}
+
/**
* get_power_status - get power status of a slot
@@ -199,7 +197,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_power_status(slot->acpi_slot);
@@ -221,7 +219,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval = -EINVAL;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->get_attn(hotplug_slot, value);
@@ -244,7 +243,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_latch_status(slot->acpi_slot);
@@ -264,7 +263,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_adapter_status(slot->acpi_slot);
@@ -279,7 +278,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot);
kfree(slot);
@@ -322,11 +321,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
if (retval == -EBUSY)
goto error_hpslot;
if (retval) {
- err("pci_hp_register failed with error %d\n", retval);
+ pr_err("pci_hp_register failed with error %d\n", retval);
goto error_hpslot;
- }
+ }
- info("Slot [%s] registered\n", slot_name(slot));
+ pr_info("Slot [%s] registered\n", slot_name(slot));
return 0;
error_hpslot:
@@ -343,17 +342,17 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
struct slot *slot = acpiphp_slot->slot;
int retval = 0;
- info("Slot [%s] unregistered\n", slot_name(slot));
+ pr_info("Slot [%s] unregistered\n", slot_name(slot));
retval = pci_hp_deregister(slot->hotplug_slot);
if (retval)
- err("pci_hp_deregister failed with error %d\n", retval);
+ pr_err("pci_hp_deregister failed with error %d\n", retval);
}
void __init acpiphp_init(void)
{
- info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
acpiphp_disabled ? ", disabled by user; please report a bug"
: "");
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1ea75236a15..1cf605f6767 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -39,6 +39,8 @@
* bus. It loses the refcount when the the driver unloads.
*/
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
#include <linux/init.h>
#include <linux/module.h>
@@ -58,8 +60,6 @@ static LIST_HEAD(bridge_list);
static DEFINE_MUTEX(bridge_mutex);
static DEFINE_MUTEX(acpiphp_context_lock);
-#define MY_NAME "acpiphp_glue"
-
static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
@@ -325,7 +325,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
list_add_tail(&slot->node, &bridge->slots);
- /* Register slots for ejectable funtions only. */
+ /* Register slots for ejectable functions only. */
if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
unsigned long long sun;
int retval;
@@ -335,7 +335,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
if (ACPI_FAILURE(status))
sun = bridge->nr_slots;
- dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+ pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
sun, pci_domain_nr(pbus), pbus->number, device);
retval = acpiphp_register_hotplug_slot(slot, sun);
@@ -343,10 +343,10 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
slot->slot = NULL;
bridge->nr_slots--;
if (retval == -EBUSY)
- warn("Slot %llu already registered by another "
+ pr_warn("Slot %llu already registered by another "
"hotplug driver\n", sun);
else
- warn("acpiphp_register_hotplug_slot failed "
+ pr_warn("acpiphp_register_hotplug_slot failed "
"(err code = 0x%x)\n", retval);
}
/* Even if the slot registration fails, we can still use it. */
@@ -369,7 +369,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
if (register_hotplug_dock_device(handle,
&acpiphp_dock_ops, context,
acpiphp_dock_init, acpiphp_dock_release))
- dbg("failed to register dock device\n");
+ pr_debug("failed to register dock device\n");
}
/* install notify handler */
@@ -427,7 +427,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
ACPI_SYSTEM_NOTIFY,
handle_hotplug_event);
if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
+ pr_err("failed to remove notify handler\n");
}
}
if (slot->slot)
@@ -826,8 +826,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
- dbg("%s: Bus check notify on %s\n", __func__, objname);
- dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+ pr_debug("%s: Bus check notify on %s\n", __func__, objname);
+ pr_debug("%s: re-enumerating slots under %s\n",
+ __func__, objname);
if (bridge) {
acpiphp_check_bridge(bridge);
} else {
@@ -841,7 +842,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
- dbg("%s: Device check notify on %s\n", __func__, objname);
+ pr_debug("%s: Device check notify on %s\n", __func__, objname);
if (bridge) {
acpiphp_check_bridge(bridge);
} else {
@@ -862,7 +863,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
- dbg("%s: Device eject notify on %s\n", __func__, objname);
+ pr_debug("%s: Device eject notify on %s\n", __func__, objname);
acpiphp_disable_and_eject_slot(func->slot);
break;
}
@@ -871,21 +872,17 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
put_bridge(bridge);
}
-static void hotplug_event_work(struct work_struct *work)
+static void hotplug_event_work(void *data, u32 type)
{
- struct acpiphp_context *context;
- struct acpi_hp_work *hp_work;
+ struct acpiphp_context *context = data;
+ acpi_handle handle = context->handle;
- hp_work = container_of(work, struct acpi_hp_work, work);
- context = hp_work->context;
acpi_scan_lock_acquire();
- hotplug_event(hp_work->handle, hp_work->type, context);
+ hotplug_event(handle, type, context);
acpi_scan_lock_release();
- acpi_evaluate_hotplug_ost(hp_work->handle, hp_work->type,
- ACPI_OST_SC_SUCCESS, NULL);
- kfree(hp_work); /* allocated in handle_hotplug_event() */
+ acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
put_bridge(context->func.parent);
}
@@ -936,10 +933,10 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_lock(&acpiphp_context_lock);
context = acpiphp_get_context(handle);
- if (context) {
+ if (context && !WARN_ON(context->handle != handle)) {
get_bridge(context->func.parent);
acpiphp_put_context(context);
- alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
+ acpi_hotplug_execute(hotplug_event_work, context, type);
mutex_unlock(&acpiphp_context_lock);
return;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2f5786c8522..ecfac7e72d9 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -43,23 +45,11 @@
#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension"
-static bool debug;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, " Debugging mode enabled or not");
-#define MY_NAME "acpiphp_ibm"
-
-#undef dbg
-#define dbg(format, arg...) \
-do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
-} while (0)
#define FOUND_APCI 0x61504349
/* these are the names for the IBM ACPI pseudo-device */
@@ -126,7 +116,7 @@ static struct bin_attribute ibm_apci_table_attr = {
.read = ibm_read_apci_table,
.write = NULL,
};
-static struct acpiphp_attention_info ibm_attention_info =
+static struct acpiphp_attention_info ibm_attention_info =
{
.set_attn = ibm_set_attention_status,
.get_attn = ibm_get_attention_status,
@@ -181,15 +171,15 @@ ibm_slot_done:
*/
static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
{
- union acpi_object args[2];
+ union acpi_object args[2];
struct acpi_object_list params = { .pointer = args, .count = 2 };
- acpi_status stat;
+ acpi_status stat;
unsigned long long rc;
union apci_descriptor *ibm_slot;
ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
- dbg("%s: set slot %d (%d) attention status to %d\n", __func__,
+ pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
(status ? 1 : 0));
@@ -202,10 +192,10 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
if (ACPI_FAILURE(stat)) {
- err("APLS evaluation failed: 0x%08x\n", stat);
+ pr_err("APLS evaluation failed: 0x%08x\n", stat);
return -ENODEV;
} else if (!rc) {
- err("APLS method failed: 0x%08llx\n", rc);
+ pr_err("APLS method failed: 0x%08llx\n", rc);
return -ERANGE;
}
return 0;
@@ -218,7 +208,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
*
* Description: This method is registered with the acpiphp module as a
* callback to do the device specific task of getting the LED status.
- *
+ *
* Because there is no direct method of getting the LED status directly
* from an ACPI call, we read the aPCI table and parse out our
* slot descriptor to read the status from that.
@@ -234,7 +224,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status)
else
*status = 0;
- dbg("%s: get slot %d (%d) attention status is %d\n", __func__,
+ pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
*status);
@@ -266,10 +256,10 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
u8 subevent = event & 0xf0;
struct notification *note = context;
- dbg("%s: Received notification %02x\n", __func__, event);
+ pr_debug("%s: Received notification %02x\n", __func__, event);
if (subevent == 0x80) {
- dbg("%s: generationg bus event\n", __func__);
+ pr_debug("%s: generating bus event\n", __func__);
acpi_bus_generate_netlink_event(note->device->pnp.device_class,
dev_name(&note->device->dev),
note->event, detail);
@@ -301,7 +291,7 @@ static int ibm_get_table_from_acpi(char **bufp)
status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- err("%s: APCI evaluation failed\n", __func__);
+ pr_err("%s: APCI evaluation failed\n", __func__);
return -ENODEV;
}
@@ -309,13 +299,13 @@ static int ibm_get_table_from_acpi(char **bufp)
if (!(package) ||
(package->type != ACPI_TYPE_PACKAGE) ||
!(package->package.elements)) {
- err("%s: Invalid APCI object\n", __func__);
+ pr_err("%s: Invalid APCI object\n", __func__);
goto read_table_done;
}
for(size = 0, i = 0; i < package->package.count; i++) {
if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
- err("%s: Invalid APCI element %d\n", __func__, i);
+ pr_err("%s: Invalid APCI element %d\n", __func__, i);
goto read_table_done;
}
size += package->package.elements[i].buffer.length;
@@ -325,7 +315,7 @@ static int ibm_get_table_from_acpi(char **bufp)
goto read_table_done;
lbuf = kzalloc(size, GFP_KERNEL);
- dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+ pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
__func__, package->package.count, size, lbuf);
if (lbuf) {
@@ -370,8 +360,8 @@ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
{
int bytes_read = -EINVAL;
char *table = NULL;
-
- dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+ pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
if (pos == 0) {
bytes_read = ibm_get_table_from_acpi(&table);
@@ -397,13 +387,13 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
acpi_handle *phandle = (acpi_handle *)context;
- acpi_status status;
+ acpi_status status;
struct acpi_device_info *info;
int retval = 0;
status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status)) {
- err("%s: Failed to get device information status=0x%x\n",
+ pr_err("%s: Failed to get device information status=0x%x\n",
__func__, status);
return retval;
}
@@ -411,11 +401,11 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
if (info->current_status && (info->valid & ACPI_VALID_HID) &&
(!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
!strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
- dbg("found hardware: %s, handle: %p\n",
+ pr_debug("found hardware: %s, handle: %p\n",
info->hardware_id.string, handle);
*phandle = handle;
/* returning non-zero causes the search to stop
- * and returns this value to the caller of
+ * and returns this value to the caller of
* acpi_walk_namespace, but it also causes some warnings
* in the acpi debug code to print...
*/
@@ -432,18 +422,18 @@ static int __init ibm_acpiphp_init(void)
struct acpi_device *device;
struct kobject *sysdir = &pci_slots_kset->kobj;
- dbg("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
&ibm_acpi_handle, NULL) != FOUND_APCI) {
- err("%s: acpi_walk_namespace failed\n", __func__);
+ pr_err("%s: acpi_walk_namespace failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
- dbg("%s: found IBM aPCI device\n", __func__);
+ pr_debug("%s: found IBM aPCI device\n", __func__);
if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
- err("%s: acpi_bus_get_device failed\n", __func__);
+ pr_err("%s: acpi_bus_get_device failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
@@ -457,7 +447,7 @@ static int __init ibm_acpiphp_init(void)
ACPI_DEVICE_NOTIFY, ibm_handle_events,
&ibm_note);
if (ACPI_FAILURE(status)) {
- err("%s: Failed to register notification handler\n",
+ pr_err("%s: Failed to register notification handler\n",
__func__);
retval = -EBUSY;
goto init_cleanup;
@@ -479,17 +469,17 @@ static void __exit ibm_acpiphp_exit(void)
acpi_status status;
struct kobject *sysdir = &pci_slots_kset->kobj;
- dbg("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (acpiphp_unregister_attention(&ibm_attention_info))
- err("%s: attention info deregistration failed", __func__);
+ pr_err("%s: attention info deregistration failed", __func__);
status = acpi_remove_notify_handler(
ibm_acpi_handle,
ACPI_DEVICE_NOTIFY,
ibm_handle_events);
if (ACPI_FAILURE(status))
- err("%s: Notification handler removal failed\n", __func__);
+ pr_err("%s: Notification handler removal failed\n", __func__);
/* remove the /sys entries */
sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 2b4c412f94c..00c81a3cefc 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -46,7 +46,7 @@
do { \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index d8add34177f..d3add9819f6 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -39,7 +39,7 @@ extern int cpci_debug;
do { \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index a6a71c41cdf..7536eef620b 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -53,9 +53,9 @@
#define dbg(format, arg...) \
do { \
- if(debug) \
+ if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while(0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 449b4bbc830..e8c4a7ccf57 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -48,9 +48,9 @@
#define dbg(format, arg...) \
do { \
- if(debug) \
+ if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while(0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -285,7 +285,7 @@ static struct pci_device_id zt5550_hc_pci_tbl[] = {
{ 0, }
};
MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl);
-
+
static struct pci_driver zt5550_hc_driver = {
.name = "zt5550_hc",
.id_table = zt5550_hc_pci_tbl,
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h
index bebc6060a55..9a57fda5348 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.h
+++ b/drivers/pci/hotplug/cpcihp_zt5550.h
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -55,7 +55,7 @@
#define HC_CMD_REG 0x0C
#define ARB_CONFIG_GNT_REG 0x10
#define ARB_CONFIG_CFG_REG 0x12
-#define ARB_CONFIG_REG 0x10
+#define ARB_CONFIG_REG 0x10
#define ISOL_CONFIG_REG 0x18
#define FAULT_STATUS_REG 0x20
#define FAULT_CONFIG_REG 0x24
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c8eaeb43fa5..31273e155e6 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -862,10 +862,10 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- /* Check for the proper subsystem ID's
+ /* Check for the proper subsystem IDs
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
- * Also Intel HPC's may have RID=0.
+ * Also Intel HPCs may have RID=0.
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index d282019cda5..11845b79679 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1231,7 +1231,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
/* Only if mode change...*/
if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
- ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
+ ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
@@ -1828,7 +1828,7 @@ static void interrupt_event_handler(struct controller *ctrl)
if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) {
dbg("button pressed\n");
- } else if (ctrl->event_queue[loop].event_type ==
+ } else if (ctrl->event_queue[loop].event_type ==
INT_BUTTON_CANCEL) {
dbg("button cancel\n");
del_timer(&p_slot->task_event);
@@ -2411,11 +2411,11 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- /* find range of busses to use */
+ /* find range of buses to use */
dbg("find ranges of buses to use\n");
bus_node = get_max_resource(&(resources->bus_head), 1);
- /* If we don't have any busses to allocate, we can't continue */
+ /* If we don't have any buses to allocate, we can't continue */
if (!bus_node)
return -ENOMEM;
@@ -2900,7 +2900,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
- * alread mapped, set this one the same */
+ * already mapped, set this one the same */
if (temp_byte && resources->irqs &&
(resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 09801c6945c..6e4a12c91ad 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -291,7 +291,7 @@ int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 s
*
* Reads configuration for all slots in a PCI bus and saves info.
*
- * Note: For non-hot plug busses, the slot # saved is the device #
+ * Note: For non-hot plug buses, the slot # saved is the device #
*
* returns 0 if success
*/
@@ -455,7 +455,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
* cpqhp_save_slot_config
*
* Saves configuration info for all PCI devices in a given slot
- * including subordinate busses.
+ * including subordinate buses.
*
* returns 0 if success
*/
@@ -1556,4 +1556,3 @@ void cpqhp_destroy_board_resources (struct pci_func * func)
kfree(tres);
}
}
-
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 8c5b25871d0..e3e46a7b3ee 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -59,7 +59,7 @@ extern int ibmphp_debug;
/************************************************************
-* RESOURE TYPE *
+* RESOURCE TYPE *
************************************************************/
#define EBDA_RSRC_TYPE_MASK 0x03
@@ -103,7 +103,7 @@ extern int ibmphp_debug;
//--------------------------------------------------------------
struct rio_table_hdr {
- u8 ver_num;
+ u8 ver_num;
u8 scal_count;
u8 riodev_count;
u16 offset;
@@ -127,7 +127,7 @@ struct scal_detail {
};
//--------------------------------------------------------------
-// RIO DETAIL
+// RIO DETAIL
//--------------------------------------------------------------
struct rio_detail {
@@ -152,7 +152,7 @@ struct opt_rio {
u8 first_slot_num;
u8 middle_num;
struct list_head opt_rio_list;
-};
+};
struct opt_rio_lo {
u8 rio_type;
@@ -161,7 +161,7 @@ struct opt_rio_lo {
u8 middle_num;
u8 pack_count;
struct list_head opt_rio_lo_list;
-};
+};
/****************************************************************
* HPC DESCRIPTOR NODE *
@@ -574,7 +574,7 @@ void ibmphp_hpc_stop_poll_thread(void);
#define HPC_CTLR_IRQ_PENDG 0x80
//----------------------------------------------------------------------------
-// HPC_CTLR_WROKING status return codes
+// HPC_CTLR_WORKING status return codes
//----------------------------------------------------------------------------
#define HPC_CTLR_WORKING_NO 0x00
#define HPC_CTLR_WORKING_YES 0x01
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index cbd72d81d25..efdc13adbe4 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -58,7 +58,7 @@ MODULE_DESCRIPTION (DRIVER_DESC);
struct pci_bus *ibmphp_pci_bus;
static int max_slots;
-static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS
+static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
* tables don't provide default info for empty slots */
static int init_flag;
@@ -71,20 +71,20 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
return get_max_adapter_speed_1 (hs, value, 1);
}
*/
-static inline int get_cur_bus_info(struct slot **sl)
+static inline int get_cur_bus_info(struct slot **sl)
{
int rc = 1;
struct slot * slot_cur = *sl;
debug("options = %x\n", slot_cur->ctrl->options);
- debug("revision = %x\n", slot_cur->ctrl->revision);
+ debug("revision = %x\n", slot_cur->ctrl->revision);
- if (READ_BUS_STATUS(slot_cur->ctrl))
+ if (READ_BUS_STATUS(slot_cur->ctrl))
rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL);
-
- if (rc)
+
+ if (rc)
return rc;
-
+
slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus);
if (READ_BUS_MODE(slot_cur->ctrl))
slot_cur->bus_on->current_bus_mode =
@@ -96,7 +96,7 @@ static inline int get_cur_bus_info(struct slot **sl)
slot_cur->busstatus,
slot_cur->bus_on->current_speed,
slot_cur->bus_on->current_bus_mode);
-
+
*sl = slot_cur;
return 0;
}
@@ -104,8 +104,8 @@ static inline int get_cur_bus_info(struct slot **sl)
static inline int slot_update(struct slot **sl)
{
int rc;
- rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
- if (rc)
+ rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
+ if (rc)
return rc;
if (!init_flag)
rc = get_cur_bus_info(sl);
@@ -172,7 +172,7 @@ int ibmphp_init_devno(struct slot **cur_slot)
debug("(*cur_slot)->irq[3] = %x\n",
(*cur_slot)->irq[3]);
- debug("rtable->exlusive_irqs = %x\n",
+ debug("rtable->exclusive_irqs = %x\n",
rtable->exclusive_irqs);
debug("rtable->slots[loop].irq[0].bitmap = %x\n",
rtable->slots[loop].irq[0].bitmap);
@@ -271,7 +271,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
else
rc = -ENODEV;
}
- } else
+ } else
rc = -ENODEV;
ibmphp_unlock_operations();
@@ -288,7 +288,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
(ulong) hotplug_slot, (ulong) value);
-
+
ibmphp_lock_operations();
if (hotplug_slot) {
pslot = hotplug_slot->private;
@@ -406,14 +406,14 @@ static int get_max_bus_speed(struct slot *slot)
ibmphp_lock_operations();
mode = slot->supported_bus_mode;
- speed = slot->supported_speed;
+ speed = slot->supported_speed;
ibmphp_unlock_operations();
switch (speed) {
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
+ if (mode == BUS_MODE_PCIX)
speed += 0x01;
break;
case BUS_SPEED_100:
@@ -515,13 +515,13 @@ static int __init init_ops(void)
debug("BEFORE GETTING SLOT STATUS, slot # %x\n",
slot_cur->number);
- if (slot_cur->ctrl->revision == 0xFF)
+ if (slot_cur->ctrl->revision == 0xFF)
if (get_ctrl_revision(slot_cur,
&slot_cur->ctrl->revision))
return -1;
- if (slot_cur->bus_on->current_speed == 0xFF)
- if (get_cur_bus_info(&slot_cur))
+ if (slot_cur->bus_on->current_speed == 0xFF)
+ if (get_cur_bus_info(&slot_cur))
return -1;
get_max_bus_speed(slot_cur);
@@ -539,8 +539,8 @@ static int __init init_ops(void)
debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status));
debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status));
- if ((SLOT_PWRGD(slot_cur->status)) &&
- !(SLOT_PRESENT(slot_cur->status)) &&
+ if ((SLOT_PWRGD(slot_cur->status)) &&
+ !(SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status))) {
debug("BEFORE POWER OFF COMMAND\n");
rc = power_off(slot_cur);
@@ -581,13 +581,13 @@ static int validate(struct slot *slot_cur, int opn)
switch (opn) {
case ENABLE:
- if (!(SLOT_PWRGD(slot_cur->status)) &&
- (SLOT_PRESENT(slot_cur->status)) &&
+ if (!(SLOT_PWRGD(slot_cur->status)) &&
+ (SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status)))
return 0;
break;
case DISABLE:
- if ((SLOT_PWRGD(slot_cur->status)) &&
+ if ((SLOT_PWRGD(slot_cur->status)) &&
(SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status)))
return 0;
@@ -617,7 +617,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
err("out of system memory\n");
return -ENOMEM;
}
-
+
info->power_status = SLOT_PWRGD(slot_cur->status);
info->attention_status = SLOT_ATTN(slot_cur->status,
slot_cur->ext_status);
@@ -638,7 +638,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
+ if (mode == BUS_MODE_PCIX)
bus_speed += 0x01;
else if (mode == BUS_MODE_PCI)
;
@@ -654,8 +654,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
}
bus->cur_bus_speed = bus_speed;
- // To do: bus_names
-
+ // To do: bus_names
+
rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
kfree(info);
return rc;
@@ -729,8 +729,8 @@ static void ibm_unconfigure_device(struct pci_func *func)
}
/*
- * The following function is to fix kernel bug regarding
- * getting bus entries, here we manually add those primary
+ * The following function is to fix kernel bug regarding
+ * getting bus entries, here we manually add those primary
* bus entries to kernel bus structure whenever apply
*/
static u8 bus_structure_fixup(u8 busno)
@@ -814,7 +814,7 @@ static int ibm_configure_device(struct pci_func *func)
}
/*******************************************************
- * Returns whether the bus is empty or not
+ * Returns whether the bus is empty or not
*******************************************************/
static int is_bus_empty(struct slot * slot_cur)
{
@@ -842,7 +842,7 @@ static int is_bus_empty(struct slot * slot_cur)
}
/***********************************************************
- * If the HPC permits and the bus currently empty, tries to set the
+ * If the HPC permits and the bus currently empty, tries to set the
* bus speed and mode at the maximum card and bus capability
* Parameters: slot
* Returns: bus is set (0) or error code
@@ -856,7 +856,7 @@ static int set_bus(struct slot * slot_cur)
static struct pci_device_id ciobx[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) },
{ },
- };
+ };
debug("%s - entry slot # %d\n", __func__, slot_cur->number);
if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) {
@@ -877,7 +877,7 @@ static int set_bus(struct slot * slot_cur)
else if (!SLOT_BUS_MODE(slot_cur->ext_status))
/* if max slot/bus capability is 66 pci
and there's no bus mode mismatch, then
- the adapter supports 66 pci */
+ the adapter supports 66 pci */
cmd = HPC_BUS_66CONVMODE;
else
cmd = HPC_BUS_33CONVMODE;
@@ -930,7 +930,7 @@ static int set_bus(struct slot * slot_cur)
return -EIO;
}
}
- /* This is for x440, once Brandon fixes the firmware,
+ /* This is for x440, once Brandon fixes the firmware,
will not need this delay */
msleep(1000);
debug("%s -Exit\n", __func__);
@@ -938,9 +938,9 @@ static int set_bus(struct slot * slot_cur)
}
/* This routine checks the bus limitations that the slot is on from the BIOS.
- * This is used in deciding whether or not to power up the slot.
+ * This is used in deciding whether or not to power up the slot.
* (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on
- * same bus)
+ * same bus)
* Parameters: slot
* Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus
*/
@@ -986,7 +986,7 @@ static int check_limitations(struct slot *slot_cur)
static inline void print_card_capability(struct slot *slot_cur)
{
info("capability of the card is ");
- if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
+ if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
info(" 133 MHz PCI-X\n");
else if ((slot_cur->ext_status & CARD_INFO) == PCIX66)
info(" 66 MHz PCI-X\n");
@@ -1020,7 +1020,7 @@ static int enable_slot(struct hotplug_slot *hs)
}
attn_LED_blink(slot_cur);
-
+
rc = set_bus(slot_cur);
if (rc) {
err("was not able to set the bus\n");
@@ -1082,7 +1082,7 @@ static int enable_slot(struct hotplug_slot *hs)
rc = slot_update(&slot_cur);
if (rc)
goto error_power;
-
+
rc = -EINVAL;
if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) {
err("power fault occurred trying to power up...\n");
@@ -1093,7 +1093,7 @@ static int enable_slot(struct hotplug_slot *hs)
"speed and card capability\n");
print_card_capability(slot_cur);
goto error_power;
- }
+ }
/* Don't think this case will happen after above checks...
* but just in case, for paranoia sake */
if (!(SLOT_POWER(slot_cur->status))) {
@@ -1144,7 +1144,7 @@ static int enable_slot(struct hotplug_slot *hs)
ibmphp_print_test();
rc = ibmphp_update_slot_info(slot_cur);
exit:
- ibmphp_unlock_operations();
+ ibmphp_unlock_operations();
return rc;
error_nopower:
@@ -1180,7 +1180,7 @@ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int rc;
-
+
ibmphp_lock_operations();
rc = ibmphp_do_disable_slot(slot);
ibmphp_unlock_operations();
@@ -1192,12 +1192,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
int rc;
u8 flag;
- debug("DISABLING SLOT...\n");
-
+ debug("DISABLING SLOT...\n");
+
if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) {
return -ENODEV;
}
-
+
flag = slot_cur->flag;
slot_cur->flag = 1;
@@ -1210,7 +1210,7 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
attn_LED_blink(slot_cur);
if (slot_cur->func == NULL) {
- /* We need this for fncs's that were there on bootup */
+ /* We need this for functions that were there on bootup */
slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
if (!slot_cur->func) {
err("out of system memory\n");
@@ -1222,12 +1222,13 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
}
ibm_unconfigure_device(slot_cur->func);
-
- /* If we got here from latch suddenly opening on operating card or
- a power fault, there's no power to the card, so cannot
- read from it to determine what resources it occupied. This operation
- is forbidden anyhow. The best we can do is remove it from kernel
- lists at least */
+
+ /*
+ * If we got here from latch suddenly opening on operating card or
+ * a power fault, there's no power to the card, so cannot
+ * read from it to determine what resources it occupied. This operation
+ * is forbidden anyhow. The best we can do is remove it from kernel
+ * lists at least */
if (!flag) {
attn_off(slot_cur);
@@ -1264,7 +1265,7 @@ error:
rc = -EFAULT;
goto exit;
}
- if (flag)
+ if (flag)
ibmphp_update_slot_info(slot_cur);
goto exit;
}
@@ -1339,7 +1340,7 @@ static int __init ibmphp_init(void)
debug("AFTER Resource & EBDA INITIALIZATIONS\n");
max_slots = get_max_slots();
-
+
if ((rc = ibmphp_register_pci()))
goto error;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 9df78bc1454..bd044158b36 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,7 +123,7 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
static void __init print_bus_info (void)
{
struct bus_info *ptr;
-
+
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
@@ -131,7 +131,7 @@ static void __init print_bus_info (void)
debug ("%s - bus# = %x\n", __func__, ptr->busno);
debug ("%s - current_speed = %x\n", __func__, ptr->current_speed);
debug ("%s - controller_id = %x\n", __func__, ptr->controller_id);
-
+
debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv);
debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv);
debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix);
@@ -144,7 +144,7 @@ static void __init print_bus_info (void)
static void print_lo_info (void)
{
struct rio_detail *ptr;
- debug ("print_lo_info ----\n");
+ debug ("print_lo_info ----\n");
list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
@@ -176,7 +176,7 @@ static void __init print_ebda_pci_rsrc (void)
struct ebda_pci_rsrc *ptr;
list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
- debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
__func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
}
}
@@ -259,7 +259,7 @@ int __init ibmphp_access_ebda (void)
ebda_seg = readw (io_mem);
iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg);
-
+
io_mem = ioremap(ebda_seg<<4, 1);
if (!io_mem)
return -ENOMEM;
@@ -310,7 +310,7 @@ int __init ibmphp_access_ebda (void)
re = readw (io_mem + sub_addr); /* next sub blk */
sub_addr += 2;
- rc_id = readw (io_mem + sub_addr); /* sub blk id */
+ rc_id = readw (io_mem + sub_addr); /* sub blk id */
sub_addr += 2;
if (rc_id != 0x5243)
@@ -330,7 +330,7 @@ int __init ibmphp_access_ebda (void)
debug ("info about hpc descriptor---\n");
debug ("hot blk format: %x\n", format);
debug ("num of controller: %x\n", num_ctlrs);
- debug ("offset of hpc data structure enteries: %x\n ", sub_addr);
+ debug ("offset of hpc data structure entries: %x\n ", sub_addr);
sub_addr = base + re; /* re sub blk */
/* FIXME: rc is never used/checked */
@@ -359,7 +359,7 @@ int __init ibmphp_access_ebda (void)
debug ("info about rsrc descriptor---\n");
debug ("format: %x\n", format);
debug ("num of rsrc: %x\n", num_entries);
- debug ("offset of rsrc data structure enteries: %x\n ", sub_addr);
+ debug ("offset of rsrc data structure entries: %x\n ", sub_addr);
hs_complete = 1;
} else {
@@ -376,7 +376,7 @@ int __init ibmphp_access_ebda (void)
rio_table_ptr->scal_count = readb (io_mem + offset + 1);
rio_table_ptr->riodev_count = readb (io_mem + offset + 2);
rio_table_ptr->offset = offset +3 ;
-
+
debug("info about rio table hdr ---\n");
debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ",
rio_table_ptr->ver_num, rio_table_ptr->scal_count,
@@ -440,12 +440,12 @@ static int __init ebda_rio_table (void)
rio_detail_ptr->chassis_num = readb (io_mem + offset + 14);
// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status);
//create linked list of chassis
- if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
+ if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head);
- //create linked list of expansion box
- else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
+ //create linked list of expansion box
+ else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head);
- else
+ else
// not in my concern
kfree (rio_detail_ptr);
offset += 15;
@@ -456,7 +456,7 @@ static int __init ebda_rio_table (void)
}
/*
- * reorganizing linked list of chassis
+ * reorganizing linked list of chassis
*/
static struct opt_rio *search_opt_vg (u8 chassis_num)
{
@@ -464,7 +464,7 @@ static struct opt_rio *search_opt_vg (u8 chassis_num)
list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
- }
+ }
return NULL;
}
@@ -472,7 +472,7 @@ static int __init combine_wpg_for_chassis (void)
{
struct opt_rio *opt_rio_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
-
+
list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
if (!opt_rio_ptr) {
@@ -484,14 +484,14 @@ static int __init combine_wpg_for_chassis (void)
opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num;
list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head);
- } else {
+ } else {
opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num);
- }
+ }
}
print_opt_vg ();
- return 0;
-}
+ return 0;
+}
/*
* reorganizing linked list of expansion box
@@ -502,7 +502,7 @@ static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
- }
+ }
return NULL;
}
@@ -510,7 +510,7 @@ static int combine_wpg_for_expansion (void)
{
struct opt_rio_lo *opt_rio_lo_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
-
+
list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
if (!opt_rio_lo_ptr) {
@@ -522,22 +522,22 @@ static int combine_wpg_for_expansion (void)
opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->pack_count = 1;
-
+
list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head);
- } else {
+ } else {
opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->pack_count = 2;
- }
+ }
}
- return 0;
+ return 0;
}
-
+
/* Since we don't know the max slot number per each chassis, hence go
* through the list of all chassis to find out the range
- * Arguments: slot_num, 1st slot number of the chassis we think we are on,
- * var (0 = chassis, 1 = expansion box)
+ * Arguments: slot_num, 1st slot number of the chassis we think we are on,
+ * var (0 = chassis, 1 = expansion box)
*/
static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
{
@@ -547,7 +547,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
if (!var) {
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
- if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
+ if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
rc = -ENODEV;
break;
}
@@ -569,7 +569,7 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num)
list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
//check to see if this slot_num belongs to expansion box
- if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
+ if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
return opt_lo_ptr;
}
return NULL;
@@ -580,8 +580,8 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
struct opt_rio *opt_vg_ptr;
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
- //check to see if this slot_num belongs to chassis
- if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
+ //check to see if this slot_num belongs to chassis
+ if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
return opt_vg_ptr;
}
return NULL;
@@ -594,13 +594,13 @@ static u8 calculate_first_slot (u8 slot_num)
{
u8 first_slot = 1;
struct slot * slot_cur;
-
+
list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
if (slot_cur->ctrl) {
- if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
+ if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
first_slot = slot_cur->ctrl->ending_slot_num;
}
- }
+ }
return first_slot + 1;
}
@@ -622,11 +622,11 @@ static char *create_file_name (struct slot * slot_cur)
err ("Structure passed is empty\n");
return NULL;
}
-
+
slot_num = slot_cur->number;
memset (str, 0, sizeof(str));
-
+
if (rio_table_ptr) {
if (rio_table_ptr->ver_num == 3) {
opt_vg_ptr = find_chassis_num (slot_num);
@@ -660,7 +660,7 @@ static char *create_file_name (struct slot * slot_cur)
/* if both NULL and we DO have correct RIO table in BIOS */
return NULL;
}
- }
+ }
if (!flag) {
if (slot_cur->ctrl->ctlr_type == 4) {
first_slot = calculate_first_slot (slot_num);
@@ -798,7 +798,7 @@ static int __init ebda_rsrc_controller (void)
slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num);
slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num);
- // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
+ // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num);
if (!bus_info_ptr2) {
@@ -814,9 +814,9 @@ static int __init ebda_rsrc_controller (void)
bus_info_ptr1->index = bus_index++;
bus_info_ptr1->current_speed = 0xff;
bus_info_ptr1->current_bus_mode = 0xff;
-
+
bus_info_ptr1->controller_id = hpc_ptr->ctlr_id;
-
+
list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head);
} else {
@@ -851,7 +851,7 @@ static int __init ebda_rsrc_controller (void)
bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv;
bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix;
bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix;
- bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
+ bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
}
bus_ptr++;
}
@@ -864,7 +864,7 @@ static int __init ebda_rsrc_controller (void)
hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1);
hpc_ptr->irq = readb (io_mem + addr + 2);
addr += 3;
- debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
+ debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
hpc_ptr->u.pci_ctlr.bus,
hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq);
break;
@@ -932,7 +932,7 @@ static int __init ebda_rsrc_controller (void)
tmp_slot->supported_speed = 2;
else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX)
tmp_slot->supported_speed = 1;
-
+
if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP)
tmp_slot->supported_bus_mode = 1;
else
@@ -1000,7 +1000,7 @@ error_no_hpc:
return rc;
}
-/*
+/*
* map info (bus, devfun, start addr, end addr..) of i/o, memory,
* pfm from the physical addr to a list of resource.
*/
@@ -1057,7 +1057,7 @@ static int __init ebda_rsrc_rsrc (void)
addr += 10;
debug ("rsrc from mem or pfm ---\n");
- debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
@@ -1096,7 +1096,7 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num)
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
- if (ptr->busno == num)
+ if (ptr->busno == num)
return ptr;
}
return NULL;
@@ -1110,7 +1110,7 @@ int ibmphp_get_bus_index (u8 num)
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
- if (ptr->busno == num)
+ if (ptr->busno == num)
return ptr->index;
}
return -ENODEV;
@@ -1168,7 +1168,7 @@ static struct pci_device_id id_table[] = {
.subdevice = HPC_SUBSYSTEM_ID,
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
}, {}
-};
+};
MODULE_DEVICE_TABLE(pci, id_table);
@@ -1197,7 +1197,7 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
struct controller *ctrl;
debug ("inside ibmphp_probe\n");
-
+
list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
if (ctrl->ctlr_type == 1) {
if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
@@ -1210,4 +1210,3 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
}
return -ENODEV;
}
-
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index f59ed30512b..5fc7a089f53 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
{
u8 rc;
void __iomem *wpg_addr; // base addr + offset
- unsigned long wpg_data; // data to/from WPG LOHI format
+ unsigned long wpg_data; // data to/from WPG LOHI format
unsigned long ultemp;
unsigned long data; // actual data HILO format
int i;
@@ -351,7 +351,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
}
//------------------------------------------------------------
-// Read from ISA type HPC
+// Read from ISA type HPC
//------------------------------------------------------------
static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset)
{
@@ -372,7 +372,7 @@ static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data)
{
u16 start_address;
u16 port_address;
-
+
start_address = ctlr_ptr->u.isa_ctlr.io_start;
port_address = start_address + (u16) offset;
outb (data, port_address);
@@ -656,11 +656,11 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
//--------------------------------------------------------------------
// cleanup
//--------------------------------------------------------------------
-
+
// remove physical to logical address mapping
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
iounmap (wpg_bbar);
-
+
free_hpc_access ();
debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
@@ -835,7 +835,7 @@ static int poll_hpc(void *data)
down (&semOperations);
switch (poll_state) {
- case POLL_LATCH_REGISTER:
+ case POLL_LATCH_REGISTER:
oldlatchlow = curlatchlow;
ctrl_count = 0x00;
list_for_each (pslotlist, &ibmphp_slot_head) {
@@ -892,16 +892,16 @@ static int poll_hpc(void *data)
if (kthread_should_stop())
goto out_sleep;
-
+
down (&semOperations);
-
+
if (poll_count >= POLL_LATCH_CNT) {
poll_count = 0;
poll_state = POLL_SLOTS;
} else
poll_state = POLL_LATCH_REGISTER;
break;
- }
+ }
/* give up the hardware semaphore */
up (&semOperations);
/* sleep for a short time just for good measure */
@@ -958,7 +958,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
// bit 5 - HPC_SLOT_PWRGD
if ((pslot->status & 0x20) != (poldslot->status & 0x20))
// OFF -> ON: ignore, ON -> OFF: disable slot
- if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
+ if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
disable = 1;
// bit 6 - HPC_SLOT_BUS_SPEED
@@ -980,7 +980,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
pslot->status &= ~HPC_SLOT_POWER;
}
}
- // CLOSE -> OPEN
+ // CLOSE -> OPEN
else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD)
&& (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) {
disable = 1;
@@ -1075,7 +1075,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
debug ("before locking operations \n");
ibmphp_lock_operations ();
debug ("after locking operations \n");
-
+
// wait for poll thread to exit
debug ("before sem_exit down \n");
down (&sem_exit);
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index c60f5f3e838..639ea3a75e1 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1,8 +1,8 @@
/*
* IBM Hot Plug Controller Driver
- *
+ *
* Written By: Irene Zubarev, IBM Corporation
- *
+ *
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001,2002 IBM Corp.
*
@@ -42,7 +42,7 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno);
/*
* NOTE..... If BIOS doesn't provide default routing, we assign:
- * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
+ * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
* If adapter is bridged, then we assign 11 to it and devices behind it.
* We also assign the same irq numbers for multi function devices.
* These are PIC mode, so shouldn't matter n.e.ways (hopefully)
@@ -71,11 +71,11 @@ static void assign_alt_irq (struct pci_func * cur_func, u8 class_code)
* Configures the device to be added (will allocate needed resources if it
* can), the device can be a bridge or a regular pci device, can also be
* multi-functional
- *
+ *
* Input: function to be added
- *
+ *
* TO DO: The error case with Multifunction device or multi function bridge,
- * if there is an error, will need to go through all previous functions and
+ * if there is an error, will need to go through all previous functions and
* unconfigure....or can add some code into unconfigure_card....
*/
int ibmphp_configure_card (struct pci_func *func, u8 slotno)
@@ -98,7 +98,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func = func;
/* We only get bus and device from IRQ routing table. So at this point,
- * func->busno is correct, and func->device contains only device (at the 5
+ * func->busno is correct, and func->device contains only device (at the 5
* highest bits)
*/
@@ -151,7 +151,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func->device, cur_func->busno);
cleanup_count = 6;
goto error;
- }
+ }
cur_func->next = NULL;
function = 0x8;
break;
@@ -339,7 +339,7 @@ error:
}
/*
- * This function configures the pci BARs of a single device.
+ * This function configures the pci BARs of a single device.
* Input: pointer to the pci_func
* Output: configured PCI, 0, or error
*/
@@ -371,17 +371,17 @@ static int configure_device (struct pci_func *func)
for (count = 0; address[count]; count++) { /* for 6 BARs */
- /* not sure if i need this. per scott, said maybe need smth like this
+ /* not sure if i need this. per scott, said maybe need * something like this
if devices don't adhere 100% to the spec, so don't want to write
to the reserved bits
- pcibios_read_config_byte(cur_func->busno, cur_func->device,
+ pcibios_read_config_byte(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, &tmp);
if (tmp & 0x01) // IO
- pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD);
else // Memory
- pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF);
*/
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
@@ -421,8 +421,8 @@ static int configure_device (struct pci_func *func)
return -EIO;
}
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start);
-
- /* _______________This is for debugging purposes only_____________________ */
+
+ /* _______________This is for debugging purposes only_____________________ */
debug ("b4 writing, the IO address is %x\n", func->io[count]->start);
pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
debug ("after writing.... the start address is %x\n", bar[count]);
@@ -484,7 +484,7 @@ static int configure_device (struct pci_func *func)
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start);
- /*_______________This is for debugging purposes only______________________________*/
+ /*_______________This is for debugging purposes only______________________________*/
debug ("b4 writing, start address is %x\n", func->pfmem[count]->start);
pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
debug ("after writing, start address is %x\n", bar[count]);
@@ -559,7 +559,7 @@ static int configure_device (struct pci_func *func)
/******************************************************************************
* This routine configures a PCI-2-PCI bridge and the functions behind it
* Parameters: pci_func
- * Returns:
+ * Returns:
******************************************************************************/
static int configure_bridge (struct pci_func **func_passed, u8 slotno)
{
@@ -622,7 +622,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number);
-
+
/* __________________For debugging purposes only __________________________________
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
debug ("sec_number after write/read is %x\n", sec_number);
@@ -644,7 +644,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
@@ -670,7 +670,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("len[count] in IO = %x\n", len[count]);
bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
-
+
if (!bus_io[count]) {
err ("out of system memory\n");
retval = -ENOMEM;
@@ -735,7 +735,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
ibmphp_add_pfmem_from_mem (bus_pfmem[count]);
func->pfmem[count] = bus_pfmem[count];
} else {
- err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
+ err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
func->busno, func->device, len[count]);
kfree (mem_tmp);
kfree (bus_pfmem[count]);
@@ -805,7 +805,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("amount_needed->mem = %x\n", amount_needed->mem);
debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem);
- if (amount_needed->not_correct) {
+ if (amount_needed->not_correct) {
debug ("amount_needed is not correct\n");
for (count = 0; address[count]; count++) {
/* for 2 BARs */
@@ -830,7 +830,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
} else {
debug ("it wants %x IO behind the bridge\n", amount_needed->io);
io = kzalloc(sizeof(*io), GFP_KERNEL);
-
+
if (!io) {
err ("out of system memory\n");
retval = -ENOMEM;
@@ -959,7 +959,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
if (bus->noIORanges) {
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8);
- pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
/* _______________This is for debugging purposes only ____________________
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp);
@@ -980,7 +980,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
if (bus->noMemRanges) {
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16);
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16);
-
+
/* ____________________This is for debugging purposes only ________________________
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp);
debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
@@ -1017,7 +1017,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq);
if ((irq > 0x00) && (irq < 0x05))
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]);
- /*
+ /*
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR);
@@ -1071,7 +1071,7 @@ error:
* This function adds up the amount of resources needed behind the PPB bridge
* and passes it to the configure_bridge function
* Input: bridge function
- * Ouput: amount of resources needed
+ * Output: amount of resources needed
*****************************************************************************/
static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
{
@@ -1204,9 +1204,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
return amount;
}
-/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
- * upon bootup in the system, since we don't allocate func to such case, we need to read
- * the start addresses from pci config space and then find the corresponding entries in
+/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
+ * upon bootup in the system, since we don't allocate func to such case, we need to read
+ * the start addresses from pci config space and then find the corresponding entries in
* our resource lists. The functions return either 0, -ENODEV, or -1 (general failure)
* Change: we also call these functions even if we configured the card ourselves (i.e., not
* the bootup case), since it should work same way
@@ -1561,8 +1561,8 @@ static int unconfigure_boot_card (struct slot *slot_cur)
* unconfiguring the device
* TO DO: will probably need to add some code in case there was some resource,
* to remove it... this is from when we have errors in the configure_card...
- * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
- * Returns: 0, -1, -ENODEV
+ * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
+ * Returns: 0, -1, -ENODEV
*/
int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
{
@@ -1634,7 +1634,7 @@ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
* Input: bus and the amount of resources needed (we know we can assign those,
* since they've been checked already
* Output: bus added to the correct spot
- * 0, -1, error
+ * 0, -1, error
*/
static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno)
{
@@ -1650,7 +1650,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n");
return -ENODEV;
}
-
+
list_add (&bus->bus_list, &cur_bus->bus_list);
}
if (io) {
@@ -1679,7 +1679,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
}
if (pfmem) {
pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL);
- if (!pfmem_range) {
+ if (!pfmem_range) {
err ("out of system memory\n");
return -ENOMEM;
}
@@ -1726,4 +1726,3 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno)
return busno;
return 0xff;
}
-
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index e2dc289f767..a265acb2d51 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -72,7 +72,7 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8
static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr)
{
struct resource_node *rs;
-
+
if (!curr) {
err ("NULL passed to allocate\n");
return NULL;
@@ -128,7 +128,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
}
newrange->start = curr->start_addr;
newrange->end = curr->end_addr;
-
+
if (first_bus || (!num_ranges))
newrange->rangeno = 1;
else {
@@ -162,7 +162,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
newbus->rangePFMem = newrange;
if (first_bus)
newbus->noPFMemRanges = 1;
- else {
+ else {
debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
++newbus->noPFMemRanges;
fix_resources (newbus);
@@ -190,7 +190,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
* This is the Resource Management initialization function. It will go through
* the Resource list taken from EBDA and fill in this module's data structures
*
- * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
+ * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
* SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW
*
* Input: ptr to the head of the resource list from EBDA
@@ -382,7 +382,7 @@ int __init ibmphp_rsrc_init (void)
* pci devices' resources for the appropriate resource
*
* Input: type of the resource, range to add, current bus
- * Output: 0 or -1, bus and range ptrs
+ * Output: 0 or -1, bus and range ptrs
********************************************************************************/
static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
{
@@ -466,7 +466,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno)
switch (type) {
case MEM:
- if (bus_cur->firstMem)
+ if (bus_cur->firstMem)
res = bus_cur->firstMem;
break;
case PFMEM:
@@ -583,7 +583,7 @@ static void fix_resources (struct bus_node *bus_cur)
}
/*******************************************************************************
- * This routine adds a resource to the list of resources to the appropriate bus
+ * This routine adds a resource to the list of resources to the appropriate bus
* based on their resource type and sorted by their starting addresses. It assigns
* the ptrs to next and nextRange if needed.
*
@@ -605,11 +605,11 @@ int ibmphp_add_resource (struct resource_node *res)
err ("NULL passed to add\n");
return -ENODEV;
}
-
+
bus_cur = find_bus_wprev (res->busno, NULL, 0);
-
+
if (!bus_cur) {
- /* didn't find a bus, smth's wrong!!! */
+ /* didn't find a bus, something's wrong!!! */
debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
return -ENODEV;
}
@@ -648,7 +648,7 @@ int ibmphp_add_resource (struct resource_node *res)
if (!range_cur) {
switch (res->type) {
case IO:
- ++bus_cur->needIOUpdate;
+ ++bus_cur->needIOUpdate;
break;
case MEM:
++bus_cur->needMemUpdate;
@@ -659,13 +659,13 @@ int ibmphp_add_resource (struct resource_node *res)
}
res->rangeno = -1;
}
-
+
debug ("The range is %d\n", res->rangeno);
if (!res_start) {
/* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */
switch (res->type) {
case IO:
- bus_cur->firstIO = res;
+ bus_cur->firstIO = res;
break;
case MEM:
bus_cur->firstMem = res;
@@ -673,7 +673,7 @@ int ibmphp_add_resource (struct resource_node *res)
case PFMEM:
bus_cur->firstPFMem = res;
break;
- }
+ }
res->next = NULL;
res->nextRange = NULL;
} else {
@@ -770,7 +770,7 @@ int ibmphp_add_resource (struct resource_node *res)
* This routine will remove the resource from the list of resources
*
* Input: io, mem, and/or pfmem resource to be deleted
- * Ouput: modified resource list
+ * Output: modified resource list
* 0 or error code
****************************************************************************/
int ibmphp_remove_resource (struct resource_node *res)
@@ -825,7 +825,7 @@ int ibmphp_remove_resource (struct resource_node *res)
if (!res_cur) {
if (res->type == PFMEM) {
- /*
+ /*
* case where pfmem might be in the PFMemFromMem list
* so will also need to remove the corresponding mem
* entry
@@ -961,12 +961,12 @@ static struct range_node * find_range (struct bus_node *bus_cur, struct resource
}
/*****************************************************************************
- * This routine will check to make sure the io/mem/pfmem->len that the device asked for
+ * This routine will check to make sure the io/mem/pfmem->len that the device asked for
* can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL,
* otherwise, returns 0
*
* Input: resource
- * Ouput: the correct start and end address are inputted into the resource node,
+ * Output: the correct start and end address are inputted into the resource node,
* 0 or -EINVAL
*****************************************************************************/
int ibmphp_check_resource (struct resource_node *res, u8 bridge)
@@ -996,7 +996,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
bus_cur = find_bus_wprev (res->busno, NULL, 0);
if (!bus_cur) {
- /* didn't find a bus, smth's wrong!!! */
+ /* didn't find a bus, something's wrong!!! */
debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
return -EINVAL;
}
@@ -1066,7 +1066,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
break;
}
}
-
+
if (flag && len_cur == res->len) {
debug ("but we are not here, right?\n");
res->start = start_cur;
@@ -1118,10 +1118,10 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
if (res_prev) {
if (res_prev->rangeno != res_cur->rangeno) {
/* 1st device on this range */
- if ((res_cur->start != range->start) &&
+ if ((res_cur->start != range->start) &&
((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
- if ((range->start % tmp_divide) == 0) {
+ if ((range->start % tmp_divide) == 0) {
/* just perfect, starting address is divisible by length */
flag = 1;
len_cur = len_tmp;
@@ -1344,7 +1344,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
* This routine is called from remove_card if the card contained PPB.
* It will remove all the resources on the bus as well as the bus itself
* Input: Bus
- * Ouput: 0, -ENODEV
+ * Output: 0, -ENODEV
********************************************************************************/
int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
{
@@ -1353,7 +1353,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
struct bus_node *prev_bus;
int rc;
- prev_bus = find_bus_wprev (parent_busno, NULL, 0);
+ prev_bus = find_bus_wprev (parent_busno, NULL, 0);
if (!prev_bus) {
debug ("something terribly wrong. Cannot find parent bus to the one to remove\n");
@@ -1424,7 +1424,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
}
/******************************************************************************
- * This routine deletes the ranges from a given bus, and the entries from the
+ * This routine deletes the ranges from a given bus, and the entries from the
* parent's bus in the resources
* Input: current bus, previous bus
* Output: 0, -EINVAL
@@ -1453,7 +1453,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
if (bus_cur->noMemRanges) {
range_cur = bus_cur->rangeMem;
for (i = 0; i < bus_cur->noMemRanges; i++) {
- if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
return -EINVAL;
ibmphp_remove_resource (res);
@@ -1467,7 +1467,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
if (bus_cur->noPFMemRanges) {
range_cur = bus_cur->rangePFMem;
for (i = 0; i < bus_cur->noPFMemRanges; i++) {
- if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
return -EINVAL;
ibmphp_remove_resource (res);
@@ -1482,7 +1482,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
}
/*
- * find the resource node in the bus
+ * find the resource node in the bus
* Input: Resource needed, start address of the resource, type of resource
*/
int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag)
@@ -1512,7 +1512,7 @@ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resour
err ("wrong type of flag\n");
return -EINVAL;
}
-
+
while (res_cur) {
if (res_cur->start == start_address) {
*res = res_cur;
@@ -1718,7 +1718,7 @@ static int __init once_over (void)
} /* end for pfmem */
} /* end if */
} /* end list_for_each bus */
- return 0;
+ return 0;
}
int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem)
@@ -1760,9 +1760,9 @@ static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u
list_for_each (tmp, &gbuses) {
tmp_prev = tmp->prev;
bus_cur = list_entry (tmp, struct bus_node, bus_list);
- if (flag)
+ if (flag)
*prev = list_entry (tmp_prev, struct bus_node, bus_list);
- if (bus_cur->busno == bus_number)
+ if (bus_cur->busno == bus_number)
return bus_cur;
}
@@ -1776,7 +1776,7 @@ void ibmphp_print_test (void)
struct range_node *range;
struct resource_node *res;
struct list_head *tmp;
-
+
debug_pci ("*****************START**********************\n");
if ((!list_empty(&gbuses)) && flags) {
@@ -1906,7 +1906,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
return 1;
range_cur = range_cur->next;
}
-
+
return 0;
}
@@ -1920,7 +1920,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
* Returns: none
* Note: this function doesn't take into account IO restrictions etc,
* so will only work for bridges with no video/ISA devices behind them It
- * also will not work for onboard PPB's that can have more than 1 *bus
+ * also will not work for onboard PPBs that can have more than 1 *bus
* behind them All these are TO DO.
* Also need to add more error checkings... (from fnc returns etc)
*/
@@ -1963,7 +1963,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
case PCI_HEADER_TYPE_BRIDGE:
function = 0x8;
case PCI_HEADER_TYPE_MULTIBRIDGE:
- /* We assume here that only 1 bus behind the bridge
+ /* We assume here that only 1 bus behind the bridge
TO DO: add functionality for several:
temp = secondary;
while (temp < subordinate) {
@@ -1972,7 +1972,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
}
*/
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno);
- bus_sec = find_bus_wprev (sec_busno, NULL, 0);
+ bus_sec = find_bus_wprev (sec_busno, NULL, 0);
/* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */
if (!bus_sec) {
bus_sec = alloc_error_bus (NULL, sec_busno, 1);
@@ -2028,7 +2028,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
io->len = io->end - io->start + 1;
ibmphp_add_resource (io);
}
- }
+ }
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address);
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index ec20f74c898..cfa92a984e6 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -131,7 +131,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
}
module_put(slot->ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -177,7 +177,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
retval = ops->set_attention_status(slot->hotplug, attention);
module_put(ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -247,7 +247,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
retval = slot->ops->hardware_test(slot, test);
module_put(slot->ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -512,7 +512,7 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
* @hotplug: pointer to the slot whose info has changed
* @info: pointer to the info copy into the slot's info structure
*
- * @slot must have been registered with the pci
+ * @slot must have been registered with the pci
* hotplug subsystem previously with a call to pci_hp_register().
*
* Returns 0 if successful, anything else for an error.
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 541bbe6d534..21e865ded1d 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -180,5 +180,5 @@ static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
return 0;
}
-#endif /* CONFIG_ACPI */
+#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index ead7c534095..eddddd447d0 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
if (slot_detection_mode != PCIEHP_DETECT_ACPI)
return 0;
- if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev)))
+ if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
return 0;
return -ENODEV;
}
@@ -78,7 +78,7 @@ static int __initdata dup_slot_id;
static int __initdata acpi_slot_detected;
static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
-/* Dummy driver for dumplicate name detection */
+/* Dummy driver for duplicate name detection */
static int __init dummy_probe(struct pcie_device *dev)
{
u32 slot_cap;
@@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev)
dup_slot_id++;
}
list_add_tail(&slot->list, &dummy_slots);
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
acpi_slot_detected = 1;
return -ENODEV; /* dummy driver always returns error */
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f4a18f51a29..bbd48bbe4e9 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -351,8 +351,8 @@ static int __init pcied_init(void)
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
- dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval)
dbg("Failure to register service\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 51f56ef4ab6..3eea3fdd4b0 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -92,7 +92,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
{
/* Clamp to sane value */
if ((sec <= 0) || (sec > 60))
- sec = 2;
+ sec = 2;
ctrl->poll_timer.function = &int_poll_timeout;
ctrl->poll_timer.data = (unsigned long)ctrl;
@@ -194,7 +194,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
} else if (!NO_CMD_CMPL(ctrl)) {
/*
- * This controller semms to notify of command completed
+ * This controller seems to notify of command completed
* event even though it supports none of power
* controller, attention led, power led and EMI.
*/
@@ -926,7 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev)
if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
goto abort_ctrl;
- /* Disable sotfware notification */
+ /* Disable software notification */
pcie_disable_notification(ctrl);
ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index 1f00b937f72..ac69094e4b2 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -52,7 +52,7 @@ static LIST_HEAD(slot_list);
do { \
if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -287,7 +287,7 @@ static int __init init_slots(void)
hotplug_slot->release = &release_slot;
make_slot_name(slot);
hotplug_slot->ops = &skel_hotplug_slot_ops;
-
+
/*
* Initialize the slot info structure with some known
* good values.
@@ -296,7 +296,7 @@ static int __init init_slots(void)
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
-
+
dbg("registering slot %d\n", i);
retval = pci_hp_register(slot->hotplug_slot);
if (retval) {
@@ -336,7 +336,7 @@ static void __exit cleanup_slots(void)
pci_hp_deregister(slot->hotplug_slot);
}
}
-
+
static int __init pcihp_skel_init(void)
{
int retval;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index bb7af78e4ee..e9c044d15ad 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -217,7 +217,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
if (!pcibios_find_pci_bus(dn))
return -EINVAL;
- /* If pci slot is hotplugable, use hotplug to remove it */
+ /* If pci slot is hotpluggable, use hotplug to remove it */
slot = find_php_slot(dn);
if (slot && rpaphp_deregister_slot(slot)) {
printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 3135856e5e1..b2593e876a0 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -49,9 +49,9 @@
extern bool rpaphp_debug;
#define dbg(format, arg...) \
do { \
- if (rpaphp_debug) \
+ if (rpaphp_debug) \
printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
@@ -99,5 +99,5 @@ void dealloc_slot_struct(struct slot *slot);
struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
int rpaphp_register_slot(struct slot *slot);
int rpaphp_deregister_slot(struct slot *slot);
-
+
#endif /* _PPC64PHP_H */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 127d6e60018..b7fc5c9255a 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -226,7 +226,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
for (i = 0; i < indexes[0]; i++) {
if ((unsigned int) indexes[i + 1] == *my_index) {
if (drc_name)
- *drc_name = name_tmp;
+ *drc_name = name_tmp;
if (drc_type)
*drc_type = type_tmp;
if (drc_index)
@@ -289,7 +289,7 @@ static int is_php_dn(struct device_node *dn, const int **indexes,
* rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
* @dn: device node of slot
*
- * This subroutine will register a hotplugable slot with the
+ * This subroutine will register a hotpluggable slot with the
* PCI hotplug infrastructure. This routine is typically called
* during boot time, if the hotplug slots are present at boot time,
* or is called later, by the dlpar add code, if the slot is
@@ -328,7 +328,7 @@ int rpaphp_add_slot(struct device_node *dn)
return -ENOMEM;
slot->type = simple_strtoul(type, NULL, 10);
-
+
dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
indexes[i + 1], name, type);
@@ -356,7 +356,7 @@ static void __exit cleanup_slots(void)
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
- * memory will be freed in release_slot callback.
+ * memory will be freed in release_slot callback.
*/
list_for_each_safe(tmp, n, &rpaphp_slot_head) {
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 513e1e28239..9243f3e7a1c 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -44,7 +44,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
dbg("%s: slot must be power up to get sensor-state\n",
__func__);
- /* some slots have to be powered up
+ /* some slots have to be powered up
* before get-sensor will succeed.
*/
rc = rtas_set_power_level(slot->power_domain, POWER_ON,
@@ -133,4 +133,3 @@ int rpaphp_enable_slot(struct slot *slot)
return 0;
}
-
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index b283bbea6d2..a6082cc263f 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -1,5 +1,5 @@
/*
- * RPA Virtual I/O device functions
+ * RPA Virtual I/O device functions
* Copyright (C) 2004 Linda Xie <lxie@us.ibm.com>
*
* All rights reserved.
@@ -51,27 +51,27 @@ struct slot *alloc_slot_struct(struct device_node *dn,
int drc_index, char *drc_name, int power_domain)
{
struct slot *slot;
-
+
slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
if (!slot)
goto error_nomem;
slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
if (!slot->hotplug_slot)
- goto error_slot;
+ goto error_slot;
slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!slot->hotplug_slot->info)
goto error_hpslot;
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
- goto error_info;
+ goto error_info;
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
slot->hotplug_slot->release = &rpaphp_release_slot;
-
+
return (slot);
error_info:
@@ -91,7 +91,7 @@ static int is_registered(struct slot *slot)
list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) {
if (!strcmp(tmp_slot->name, slot->name))
return 1;
- }
+ }
return 0;
}
@@ -104,7 +104,7 @@ int rpaphp_deregister_slot(struct slot *slot)
__func__, slot->name);
list_del(&slot->rpaphp_slot_list);
-
+
retval = pci_hp_deregister(php_slot);
if (retval)
err("Problem unregistering a slot %s\n", slot->name);
@@ -120,7 +120,7 @@ int rpaphp_register_slot(struct slot *slot)
int retval;
int slotno;
- dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
+ dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
__func__, slot->dn->full_name, slot->index, slot->name,
slot->power_domain, slot->type);
@@ -128,7 +128,7 @@ int rpaphp_register_slot(struct slot *slot)
if (is_registered(slot)) {
err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name);
return -EAGAIN;
- }
+ }
if (slot->dn->child)
slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
@@ -145,4 +145,3 @@ int rpaphp_register_slot(struct slot *slot)
info("Slot [%s] registered\n", slot->name);
return 0;
}
-
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 66e505ca24e..3c7eb5dd91c 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -133,7 +133,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
@@ -183,10 +182,9 @@ int zpci_init_slot(struct zpci_dev *zdev)
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
- if (rc) {
- pr_err("pci_hp_register failed with error %d\n", rc);
+ if (rc)
goto error_reg;
- }
+
list_add(&slot->slot_list, &s390_hotplug_slot_list);
return 0;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b2781dfe60e..5b05a68cca6 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -9,6 +9,7 @@
* Work to add BIOS PROM support was completed by Mike Habeck.
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/types.h>
-#include <linux/acpi.h>
#include <asm/sn/acpi.h>
#include "../pci.h"
@@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_handle rethandle;
acpi_status ret;
- phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
if (acpi_bus_get_device(phandle, &pdevice)) {
dev_dbg(&slot->pci_bus->self->dev,
@@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
/* free the ACPI resources for the slot */
if (SN_ACPI_BASE_SUPPORT() &&
- PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
+ PCI_CONTROLLER(slot->pci_bus)->companion) {
unsigned long long adr;
struct acpi_device *device;
acpi_handle phandle;
@@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_status ret;
/* Get the rootbus node pointer */
- phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
acpi_scan_lock_acquire();
/*
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index e260f207a90..61529097464 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -191,7 +191,7 @@ static inline const char *slot_name(struct slot *slot)
#include <linux/pci-acpi.h>
static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
- u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
+ u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
#else
@@ -216,13 +216,13 @@ struct ctrl_reg {
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
- SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
+ BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
+ SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2),
- SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
+ SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config),
MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl),
- PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
+ PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
CMD = offsetof(struct ctrl_reg, cmd),
CMD_STATUS = offsetof(struct ctrl_reg, cmd_status),
INTR_LOC = offsetof(struct ctrl_reg, intr_loc),
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index d3f757df691..faf13abd5b9 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -143,11 +143,11 @@ static int init_slots(struct controller *ctrl)
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
- "hp_slot=%x sun=%x slot_device_offset=%x\n",
- pci_domain_nr(ctrl->pci_dev->subordinate),
- slot->bus, slot->device, slot->hp_slot, slot->number,
- ctrl->slot_device_offset);
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
+ "hp_slot=%x sun=%x slot_device_offset=%x\n",
+ pci_domain_nr(ctrl->pci_dev->subordinate),
+ slot->bus, slot->device, slot->hp_slot, slot->number,
+ ctrl->slot_device_offset);
retval = pci_hp_register(slot->hotplug_slot,
ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 75ba2311b54..2d7f474ca0e 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -116,7 +116,7 @@
#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
/*
- * SHPC Command Code definitnions
+ * SHPC Command Code definitions
*
* Slot Operation 00h - 3Fh
* Set Bus Segment Speed/Mode A 40h - 47h
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 1b90579b233..50ce6809829 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
char *type;
struct resource *res;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle)
return -EINVAL;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 21a7182dccd..1fe2d6fb19d 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -610,7 +610,7 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
struct resource tmp;
enum pci_bar_type type;
int reg = pci_iov_resource_bar(dev, resno, &type);
-
+
if (!reg)
return 0;
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index b008cf86b9c..6684f153ab5 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -25,7 +25,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
/**
* pci_lost_interrupt - reports a lost PCI interrupt
* @pdev: device whose interrupt is lost
- *
+ *
* The primary function of this routine is to report a lost interrupt
* in a standard way which users can recognise (instead of blaming the
* driver).
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d5f90d6383b..3fcd67a1667 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -185,7 +185,7 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
u32 mask_bits = desc->masked;
@@ -199,9 +199,14 @@ static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
return mask_bits;
}
+__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ return default_msi_mask_irq(desc, mask, flag);
+}
+
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- desc->masked = __msi_mask_irq(desc, mask, flag);
+ desc->masked = arch_msi_mask_irq(desc, mask, flag);
}
/*
@@ -211,7 +216,7 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
* file. This saves a few milliseconds when initialising devices with lots
* of MSI-X interrupts.
*/
-static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
+u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
@@ -224,9 +229,14 @@ static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
return mask_bits;
}
+__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ return default_msix_mask_irq(desc, flag);
+}
+
static void msix_mask_irq(struct msi_desc *desc, u32 flag)
{
- desc->masked = __msix_mask_irq(desc, flag);
+ desc->masked = arch_msix_mask_irq(desc, flag);
}
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
@@ -774,7 +784,7 @@ error:
* @nvec: how many MSIs have been requested ?
* @type: are we checking for MSI or MSI-X ?
*
- * Look at global flags, the device itself, and its parent busses
+ * Look at global flags, the device itself, and its parent buses
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 0, else return an error code.
**/
@@ -831,7 +841,7 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
int status, maxvec;
u16 msgctl;
- if (!dev->msi_cap)
+ if (!dev->msi_cap || dev->current_state != PCI_D0)
return -EINVAL;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
@@ -862,7 +872,7 @@ int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
int ret, nvec;
u16 msgctl;
- if (!dev->msi_cap)
+ if (!dev->msi_cap || dev->current_state != PCI_D0)
return -EINVAL;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
@@ -902,7 +912,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
/* Keep cached state to be restored */
- __msi_mask_irq(desc, mask, ~mask);
+ arch_msi_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -955,7 +965,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
int status, nr_entries;
int i, j;
- if (!entries || !dev->msix_cap)
+ if (!entries || !dev->msix_cap || dev->current_state != PCI_D0)
return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
@@ -998,7 +1008,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
/* Return the device with MSI-X masked as initial states */
list_for_each_entry(entry, &dev->msi_list, list) {
/* Keep cached states to be restored */
- __msix_mask_irq(entry, 1);
+ arch_msix_mask_irq(entry, 1);
}
msix_set_enable(dev, 0);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b0299e6d9a3..577074efbe6 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -141,7 +141,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
* if (_PRW at S-state x)
* choose from highest power _SxD to lowest power _SxW
* else // no _PRW at S-state x
- * choose highest power _SxD or any lower power
+ * choose highest power _SxD or any lower power
*/
static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
@@ -173,15 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_power_manageable(handle) : false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
- acpi_handle tmp;
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
@@ -192,7 +191,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
int error = -EINVAL;
/* If the ACPI device has _EJ0, ignore the device */
- if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+ if (!handle || acpi_has_method(handle, "_EJ0"))
return -ENODEV;
switch (state) {
@@ -218,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_can_wakeup(handle) : false;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 98f7b9b8950..9042fdbd724 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -135,6 +135,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
return retval;
return count;
}
+static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
/**
* store_remove_id - remove a PCI device ID from this driver
@@ -180,12 +181,14 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
return retval;
return count;
}
+static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
-static struct driver_attribute pci_drv_attrs[] = {
- __ATTR(new_id, S_IWUSR, NULL, store_new_id),
- __ATTR(remove_id, S_IWUSR, NULL, store_remove_id),
- __ATTR_NULL,
+static struct attribute *pci_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pci_drv);
/**
* pci_match_id - See if a pci device matches a given pci_id table
@@ -264,11 +267,19 @@ static long local_pci_probe(void *_ddi)
pm_runtime_get_sync(dev);
pci_dev->driver = pci_drv;
rc = pci_drv->probe(pci_dev, ddi->id);
- if (rc) {
+ if (!rc)
+ return rc;
+ if (rc < 0) {
pci_dev->driver = NULL;
pm_runtime_put_sync(dev);
+ return rc;
}
- return rc;
+ /*
+ * Probe function should return < 0 for failure, 0 for success
+ * Treat values > 0 as success, but warn.
+ */
+ dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+ return 0;
}
static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
@@ -301,7 +312,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* __pci_device_probe - check if a driver wants to claim a specific PCI device
* @drv: driver to call to check if it wants the PCI device
* @pci_dev: PCI device being probed
- *
+ *
* returns 0 on success, else error.
* side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
*/
@@ -367,7 +378,7 @@ static int pci_device_remove(struct device * dev)
* We would love to complain here if pci_dev->is_enabled is set, that
* the driver should have called pci_disable_device(), but the
* unfortunate fact is there are too many odd BIOS and bridge setups
- * that don't like drivers doing that all of the time.
+ * that don't like drivers doing that all of the time.
* Oh well, we can dream of sane hardware when we sleep, no matter how
* horrible the crap we have to deal with is when we are awake...
*/
@@ -599,18 +610,10 @@ static int pci_pm_prepare(struct device *dev)
return error;
}
-static void pci_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
#else /* !CONFIG_PM_SLEEP */
#define pci_pm_prepare NULL
-#define pci_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
@@ -1121,9 +1124,8 @@ static int pci_pm_runtime_idle(struct device *dev)
#ifdef CONFIG_PM
-const struct dev_pm_ops pci_dev_pm_ops = {
+static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
- .complete = pci_pm_complete,
.suspend = pci_pm_suspend,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
@@ -1154,10 +1156,10 @@ const struct dev_pm_ops pci_dev_pm_ops = {
* @drv: the driver structure to register
* @owner: owner module of drv
* @mod_name: module name string
- *
+ *
* Adds the driver structure to the list of registered drivers.
- * Returns a negative value on error, otherwise 0.
- * If no error occurred, the driver remains registered even if
+ * Returns a negative value on error, otherwise 0.
+ * If no error occurred, the driver remains registered even if
* no device was claimed during registration.
*/
int __pci_register_driver(struct pci_driver *drv, struct module *owner,
@@ -1179,7 +1181,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
/**
* pci_unregister_driver - unregister a pci driver
* @drv: the driver structure to unregister
- *
+ *
* Deletes the driver structure from the list of registered PCI drivers,
* gives it a chance to clean up by calling its remove() function for
* each device it was responsible for, and marks those devices as
@@ -1201,7 +1203,7 @@ static struct pci_driver pci_compat_driver = {
* pci_dev_driver - get the pci_driver of a device
* @dev: the device to query
*
- * Returns the appropriate pci_driver structure or %NULL if there is no
+ * Returns the appropriate pci_driver structure or %NULL if there is no
* registered driver for the device.
*/
struct pci_driver *
@@ -1222,7 +1224,7 @@ pci_dev_driver(const struct pci_dev *dev)
* pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
* @dev: the PCI device structure to match against
* @drv: the device driver to search for matching PCI device id structures
- *
+ *
* Used by a driver to check whether a PCI device present in the
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
@@ -1316,9 +1318,9 @@ struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
- .dev_attrs = pci_dev_attrs,
- .bus_attrs = pci_bus_attrs,
- .drv_attrs = pci_drv_attrs,
+ .dev_groups = pci_dev_groups,
+ .bus_groups = pci_bus_groups,
+ .drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
};
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index edaed6f4da6..d51f45aa669 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -263,7 +263,7 @@ device_has_dsm(struct device *dev)
acpi_handle handle;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle)
return FALSE;
@@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
acpi_handle handle;
int length;
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle)
return -1;
@@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
acpi_handle handle;
int length;
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle)
return -1;
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 6e47c519c51..2ff77509d8e 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -2,13 +2,13 @@
*
* Copyright (C) 2008 Red Hat, Inc.
* Author:
- * Chris Wright
+ * Chris Wright
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Usage is simple, allocate a new id to the stub driver and bind the
* device to it. For example:
- *
+ *
* # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7128cfdd64a..c91e6c18deb 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -10,7 +10,7 @@
*
* File attributes for PCI devices
*
- * Modeled after usb's driverfs.c
+ * Modeled after usb's driverfs.c
*
*/
@@ -42,7 +42,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
\
pdev = to_pci_dev (dev); \
return sprintf (buf, format_string, pdev->field); \
-}
+} \
+static DEVICE_ATTR_RO(field)
pci_config_attr(vendor, "0x%04x\n");
pci_config_attr(device, "0x%04x\n");
@@ -73,10 +74,13 @@ static ssize_t broken_parity_status_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(broken_parity_status);
-static ssize_t local_cpus_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
+static ssize_t pci_dev_show_local_cpu(struct device *dev,
+ int type,
+ struct device_attribute *attr,
+ char *buf)
+{
const struct cpumask *mask;
int len;
@@ -86,30 +90,28 @@ static ssize_t local_cpus_show(struct device *dev,
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
- len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
+ len = type ?
+ cpumask_scnprintf(buf, PAGE_SIZE-2, mask) :
+ cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
+
buf[len++] = '\n';
buf[len] = '\0';
return len;
}
+static ssize_t local_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_dev_show_local_cpu(dev, 1, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpus);
static ssize_t local_cpulist_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct cpumask *mask;
- int len;
-
-#ifdef CONFIG_NUMA
- mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
- cpumask_of_node(dev_to_node(dev));
-#else
- mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
-#endif
- len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
+ return pci_dev_show_local_cpu(dev, 0, attr, buf);
}
+static DEVICE_ATTR_RO(local_cpulist);
/*
* PCI Bus Class Devices
@@ -170,6 +172,7 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
}
return (str - buf);
}
+static DEVICE_ATTR_RO(resource);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -181,10 +184,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
(u8)(pci_dev->class));
}
+static DEVICE_ATTR_RO(modalias);
-static ssize_t is_enabled_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
@@ -208,14 +212,15 @@ static ssize_t is_enabled_store(struct device *dev,
return result < 0 ? result : count;
}
-static ssize_t is_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev (dev);
return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
+static DEVICE_ATTR_RW(enabled);
#ifdef CONFIG_NUMA
static ssize_t
@@ -223,6 +228,7 @@ numa_node_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf (buf, "%d\n", dev->numa_node);
}
+static DEVICE_ATTR_RO(numa_node);
#endif
static ssize_t
@@ -232,6 +238,7 @@ dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
}
+static DEVICE_ATTR_RO(dma_mask_bits);
static ssize_t
consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
@@ -239,6 +246,7 @@ consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
{
return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
}
+static DEVICE_ATTR_RO(consistent_dma_mask_bits);
static ssize_t
msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -262,13 +270,17 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /* bad things may happen if the no_msi flag is changed
- * while some drivers are loaded */
+ /*
+ * Bad things may happen if the no_msi flag is changed
+ * while drivers are loaded.
+ */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Maybe pci devices without subordinate busses shouldn't even have this
- * attribute in the first place? */
+ /*
+ * Maybe devices without subordinate buses shouldn't have this
+ * attribute in the first place?
+ */
if (!pdev->subordinate)
return count;
@@ -283,6 +295,7 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(msi_bus);
static DEFINE_MUTEX(pci_remove_rescan_mutex);
static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
@@ -302,10 +315,20 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
}
return count;
}
+static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
+
+static struct attribute *pci_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL,
+};
-struct bus_attribute pci_bus_attrs[] = {
- __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
- __ATTR_NULL
+static const struct attribute_group pci_bus_group = {
+ .attrs = pci_bus_attrs,
+};
+
+const struct attribute_group *pci_bus_groups[] = {
+ &pci_bus_group,
+ NULL,
};
static ssize_t
@@ -325,8 +348,9 @@ dev_rescan_store(struct device *dev, struct device_attribute *attr,
}
return count;
}
-struct device_attribute dev_rescan_attr = __ATTR(rescan, (S_IWUSR|S_IWGRP),
- NULL, dev_rescan_store);
+static struct device_attribute dev_rescan_attr = __ATTR(rescan,
+ (S_IWUSR|S_IWGRP),
+ NULL, dev_rescan_store);
static void remove_callback(struct device *dev)
{
@@ -356,8 +380,9 @@ remove_store(struct device *dev, struct device_attribute *dummy,
count = ret;
return count;
}
-struct device_attribute dev_remove_attr = __ATTR(remove, (S_IWUSR|S_IWGRP),
- NULL, remove_store);
+static struct device_attribute dev_remove_attr = __ATTR(remove,
+ (S_IWUSR|S_IWGRP),
+ NULL, remove_store);
static ssize_t
dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
@@ -404,6 +429,7 @@ static ssize_t d3cold_allowed_show(struct device *dev,
struct pci_dev *pdev = to_pci_dev(dev);
return sprintf (buf, "%u\n", pdev->d3cold_allowed);
}
+static DEVICE_ATTR_RW(d3cold_allowed);
#endif
#ifdef CONFIG_PCI_IOV
@@ -489,30 +515,38 @@ static struct device_attribute sriov_numvfs_attr =
sriov_numvfs_show, sriov_numvfs_store);
#endif /* CONFIG_PCI_IOV */
-struct device_attribute pci_dev_attrs[] = {
- __ATTR_RO(resource),
- __ATTR_RO(vendor),
- __ATTR_RO(device),
- __ATTR_RO(subsystem_vendor),
- __ATTR_RO(subsystem_device),
- __ATTR_RO(class),
- __ATTR_RO(irq),
- __ATTR_RO(local_cpus),
- __ATTR_RO(local_cpulist),
- __ATTR_RO(modalias),
+static struct attribute *pci_dev_attrs[] = {
+ &dev_attr_resource.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_class.attr,
+ &dev_attr_irq.attr,
+ &dev_attr_local_cpus.attr,
+ &dev_attr_local_cpulist.attr,
+ &dev_attr_modalias.attr,
#ifdef CONFIG_NUMA
- __ATTR_RO(numa_node),
+ &dev_attr_numa_node.attr,
#endif
- __ATTR_RO(dma_mask_bits),
- __ATTR_RO(consistent_dma_mask_bits),
- __ATTR(enable, 0600, is_enabled_show, is_enabled_store),
- __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
- broken_parity_status_show,broken_parity_status_store),
- __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
+ &dev_attr_dma_mask_bits.attr,
+ &dev_attr_consistent_dma_mask_bits.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_broken_parity_status.attr,
+ &dev_attr_msi_bus.attr,
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
- __ATTR(d3cold_allowed, 0644, d3cold_allowed_show, d3cold_allowed_store),
+ &dev_attr_d3cold_allowed.attr,
#endif
- __ATTR_NULL,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_group = {
+ .attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+ &pci_dev_group,
+ NULL,
};
static struct attribute *pcibus_attrs[] = {
@@ -544,7 +578,7 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
!!(pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW));
}
-struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
static ssize_t
pci_read_config(struct file *filp, struct kobject *kobj,
@@ -640,7 +674,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
size = dev->cfg_size - off;
count = size;
}
-
+
pci_config_pm_runtime_get(dev);
if ((off & 1) && size) {
@@ -648,7 +682,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
off++;
size--;
}
-
+
if ((off & 3) && size > 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
@@ -666,7 +700,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
off += 4;
size -= 4;
}
-
+
if (size >= 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
@@ -1199,21 +1233,21 @@ pci_read_rom(struct file *filp, struct kobject *kobj,
if (!pdev->rom_attr_enabled)
return -EINVAL;
-
+
rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
if (!rom || !size)
return -EIO;
-
+
if (off >= size)
count = 0;
else {
if (off + count > size)
count = size - off;
-
+
memcpy_fromio(buf, rom + off, count);
}
pci_unmap_rom(pdev, rom);
-
+
return count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index bdd64b1b481..33120d15666 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -198,7 +198,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
}
/**
- * pci_find_capability - query for devices' capabilities
+ * pci_find_capability - query for devices' capabilities
* @dev: PCI device to query
* @cap: capability code
*
@@ -207,12 +207,12 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
* device's PCI configuration space or 0 in case the device does not
* support it. Possible values for @cap:
*
- * %PCI_CAP_ID_PM Power Management
- * %PCI_CAP_ID_AGP Accelerated Graphics Port
- * %PCI_CAP_ID_VPD Vital Product Data
- * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
* %PCI_CAP_ID_MSI Message Signalled Interrupts
- * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
* %PCI_CAP_ID_PCIX PCI-X
* %PCI_CAP_ID_EXP PCI Express
*/
@@ -228,13 +228,13 @@ int pci_find_capability(struct pci_dev *dev, int cap)
}
/**
- * pci_bus_find_capability - query for devices' capabilities
+ * pci_bus_find_capability - query for devices' capabilities
* @bus: the PCI bus to query
* @devfn: PCI device to query
* @cap: capability code
*
* Like pci_find_capability() but works for pci devices that do not have a
- * pci_dev structure set up yet.
+ * pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
@@ -515,7 +515,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
/* Validate current state:
- * Can enter D0 from any state, but if we can only go deeper
+ * Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
@@ -998,7 +998,7 @@ static void pci_restore_config_space(struct pci_dev *pdev)
}
}
-/**
+/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
@@ -1030,7 +1030,7 @@ struct pci_saved_state {
* the device saved state.
* @dev: PCI device that we're dealing with
*
- * Rerturn NULL if no state or error.
+ * Return NULL if no state or error.
*/
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
{
@@ -1148,18 +1148,16 @@ int pci_reenable_device(struct pci_dev *dev)
static void pci_enable_bridge(struct pci_dev *dev)
{
+ struct pci_dev *bridge;
int retval;
- if (!dev)
- return;
-
- pci_enable_bridge(dev->bus->self);
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_enable_bridge(bridge);
if (pci_is_enabled(dev)) {
- if (!dev->is_busmaster) {
- dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
+ if (!dev->is_busmaster)
pci_set_master(dev);
- }
return;
}
@@ -1172,6 +1170,7 @@ static void pci_enable_bridge(struct pci_dev *dev)
static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
{
+ struct pci_dev *bridge;
int err;
int i, bars = 0;
@@ -1190,7 +1189,9 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
- pci_enable_bridge(dev->bus->self);
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_enable_bridge(bridge);
/* only skip sriov related */
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
@@ -1644,8 +1645,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
if (enable) {
pme_dev = kmalloc(sizeof(struct pci_pme_device),
GFP_KERNEL);
- if (!pme_dev)
- goto out;
+ if (!pme_dev) {
+ dev_warn(&dev->dev, "can't enable PME#\n");
+ return;
+ }
pme_dev->dev = dev;
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
@@ -1666,7 +1669,6 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
}
-out:
dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
@@ -1878,7 +1880,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
* pci_dev_run_wake - Check if device can generate run-time wake-up events.
* @dev: Device to check.
*
- * Return true if the device itself is cabable of generating wake-up events
+ * Return true if the device itself is capable of generating wake-up events
* (through the platform or using the native PCIe PME) or if the device supports
* PME and one of its upstream bridges can generate wake-up events.
*/
@@ -2445,7 +2447,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
switch (pci_pcie_type(pdev)) {
/*
* PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
- * but since their primary inteface is PCI/X, we conservatively
+ * but since their primary interface is PCI/X, we conservatively
* handle them as we would a non-PCIe device.
*/
case PCI_EXP_TYPE_PCIE_BRIDGE:
@@ -2469,7 +2471,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
/*
* PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
* implemented by the remaining PCIe types to indicate peer-to-peer
- * capabilities, but only when they are part of a multifunciton
+ * capabilities, but only when they are part of a multifunction
* device. The footnote for section 6.12 indicates the specific
* PCIe types included here.
*/
@@ -2484,7 +2486,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
}
/*
- * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable
+ * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
* to single function devices with the exception of downstream ports.
*/
return true;
@@ -2620,7 +2622,7 @@ void pci_release_region(struct pci_dev *pdev, int bar)
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
- * sysfs MMIO access.
+ * sysfs MMIO access.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -2632,7 +2634,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
if (pci_resource_len(pdev, bar) == 0)
return 0;
-
+
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name))
@@ -2692,7 +2694,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
*
* The key difference that _exclusive makes it that userspace is
* explicitly not allowed to map the resource via /dev/mem or
- * sysfs.
+ * sysfs.
*/
int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
{
@@ -2797,7 +2799,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
* successfully.
*
* pci_request_regions_exclusive() will mark the region so that
- * /dev/mem and the sysfs MMIO access will not be allowed.
+ * /dev/mem and the sysfs MMIO access will not be allowed.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -2860,7 +2862,7 @@ void __weak pcibios_set_master(struct pci_dev *dev)
lat = pcibios_max_latency;
else
return;
- dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
@@ -2965,7 +2967,7 @@ pci_set_mwi(struct pci_dev *dev)
cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
-
+
return 0;
}
@@ -3290,7 +3292,7 @@ clear:
*
* NOTE: This causes the caller to sleep for twice the device power transition
* cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
- * by devault (i.e. unless the @dev's d3_delay field has a different value).
+ * by default (i.e. unless the @dev's d3_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
static int pci_pm_reset(struct pci_dev *dev, int probe)
@@ -3339,7 +3341,7 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
/*
* PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
- * this to 2ms to ensure that we meet the minium requirement.
+ * this to 2ms to ensure that we meet the minimum requirement.
*/
msleep(2);
@@ -3978,6 +3980,7 @@ int pcie_get_mps(struct pci_dev *dev)
return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
}
+EXPORT_SYMBOL(pcie_get_mps);
/**
* pcie_set_mps - set PCI Express maximum payload size
@@ -3995,13 +3998,14 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
return -EINVAL;
v = ffs(mps) - 8;
- if (v > dev->pcie_mpss)
+ if (v > dev->pcie_mpss)
return -EINVAL;
v <<= 5;
return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_PAYLOAD, v);
}
+EXPORT_SYMBOL(pcie_set_mps);
/**
* pcie_get_minimum_link - determine minimum link settings of a PCI device
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8a00c063d7b..9c91ecc1301 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -153,10 +153,10 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
-extern struct device_attribute pci_dev_attrs[];
+extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pcibus_groups[];
extern struct device_type pci_dev_type;
-extern struct bus_attribute pci_bus_attrs[];
+extern const struct attribute_group *pci_bus_groups[];
/**
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 85ca36f2136..b2c8881da76 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -525,7 +525,7 @@ static void handle_error_source(struct pcie_device *aerdev,
if (info->severity == AER_CORRECTABLE) {
/*
- * Correctable error does not need software intevention.
+ * Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -574,7 +574,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
};
spin_lock_irqsave(&aer_recover_ring_lock, flags);
- if (kfifo_put(&aer_recover_ring, &entry))
+ if (kfifo_put(&aer_recover_ring, entry))
schedule_work(&aer_recover_work);
else
pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 403a44374ed..f1272dc54de 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -548,7 +548,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
- * It is called after the pcie and its children devices are scaned.
+ * It is called after the pcie and its children devices are scanned.
* @pdev: the root port or switch downstream port
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index e56e594ce11..bbc3bdd2b18 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -419,8 +419,8 @@ static void pcie_pme_remove(struct pcie_device *srv)
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
- .port_type = PCI_EXP_TYPE_ROOT_PORT,
- .service = PCIE_PORT_SERVICE_PME,
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d2eb80aab56..d525548404d 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -14,7 +14,7 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 4
/*
* According to the PCI Express Base Specification 2.0, the indices of
- * the MSI-X table entires used by port services must not exceed 31
+ * the MSI-X table entries used by port services must not exceed 31
*/
#define PCIE_PORT_MAX_MSIX_ENTRIES 32
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 67be55a7f26..87e79a6ffb5 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -18,8 +18,8 @@
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
struct bus_type pcie_port_bus_type = {
- .name = "pci_express",
- .match = pcie_port_bus_match,
+ .name = "pci_express",
+ .match = pcie_port_bus_match,
};
EXPORT_SYMBOL_GPL(pcie_port_bus_type);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 31063ac3099..0b6e7660406 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -46,7 +46,7 @@ static void release_pcie_device(struct device *dev)
* pcie_port_msix_add_entry - add entry to given array of MSI-X entries
* @entries: Array of MSI-X entries
* @new_entry: Index of the entry to add to the array
- * @nr_entries: Number of entries aleady in the array
+ * @nr_entries: Number of entries already in the array
*
* Return value: Position of the added entry in the array
*/
@@ -260,13 +260,14 @@ static int get_port_device_capability(struct pci_dev *dev)
if (pcie_ports_disabled)
return 0;
- err = pcie_port_platform_notify(dev, &cap_mask);
- if (!pcie_ports_auto) {
- cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
- | PCIE_PORT_SERVICE_VC;
- if (pci_aer_available())
- cap_mask |= PCIE_PORT_SERVICE_AER;
- } else if (err) {
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+ | PCIE_PORT_SERVICE_VC;
+ if (pci_aer_available())
+ cap_mask |= PCIE_PORT_SERVICE_AER;
+
+ if (pcie_ports_auto) {
+ err = pcie_port_platform_notify(dev, &cap_mask);
+ if (err)
return 0;
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 696caed5fdf..0d8fdc48e64 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -223,7 +223,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
static void pcie_portdrv_remove(struct pci_dev *dev)
{
pcie_port_device_remove(dev);
- pci_disable_device(dev);
}
static int error_detected_iter(struct device *device, void *data)
@@ -390,9 +389,9 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
- .err_handler = &pcie_portdrv_err_handler,
+ .err_handler = &pcie_portdrv_err_handler,
- .driver.pm = PCIE_PORTDRV_PM_OPS,
+ .driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
@@ -412,7 +411,7 @@ static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
.ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
- "MICRO-STAR INTERNATIONAL CO., LTD"),
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
},
},
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7ef0f868b3e..38e403dddf6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -582,7 +582,7 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat)
index = 1;
else
goto out;
-
+
if (agp3) {
index += 2;
if (index == 5)
@@ -641,8 +641,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
return;
}
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
- if (pos) {
+ if (pci_is_pcie(bridge)) {
u32 linkcap;
u16 linksta;
@@ -790,7 +789,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
}
/* Disable MasterAbortMode during probing to avoid reporting
- of bus errors (in some architectures) */
+ of bus errors (in some architectures) */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
@@ -984,7 +983,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
- pdev->is_pcie = 1;
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
@@ -1007,7 +1005,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
* pci_setup_device - fill in class and map information of a device
* @dev: the device structure to fill
*
- * Initialize the device structure with information about the device's
+ * Initialize the device structure with information about the device's
* vendor,class,memory and IO-space addresses,IRQ lines etc.
* Called at initialisation of the PCI subsystem and by CardBus services.
* Returns 0 on success and negative if unknown type of device (not normal,
@@ -1113,7 +1111,7 @@ int pci_setup_device(struct pci_dev *dev)
goto bad;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
- interface code of 0x01. */
+ interface code of 0x01. */
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
@@ -1572,7 +1570,7 @@ static void pcie_write_mrrs(struct pci_dev *dev)
* subsequent read will verify if the value is acceptable or not.
* If the MRRS value provided is not acceptable (e.g., too large),
* shrink the value until it is acceptable to the HW.
- */
+ */
while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
rc = pcie_set_readrq(dev, mrrs);
if (!rc)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index cdc7836d7e3..46d1378f2e9 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -222,7 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
default:
ret = -EINVAL;
break;
- };
+ }
return ret;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f6c31fabf3a..b3b1b9aa886 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -55,7 +55,7 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
-/* Deal with broken BIOS'es that neglect to enable passive release,
+/* Deal with broken BIOSes that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
{
@@ -78,11 +78,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
but VIA don't answer queries. If you happen to have good contacts at VIA
- ask them for me please -- Alan
-
- This appears to be BIOS not version dependent. So presumably there is a
+ ask them for me please -- Alan
+
+ This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
-
+
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
@@ -97,7 +97,7 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
@@ -157,10 +157,10 @@ static void quirk_triton(struct pci_dev *dev)
pci_pci_problems |= PCIPCI_TRITON;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
* VIA Apollo KT133 needs PCI latency patch
@@ -171,7 +171,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir
* the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
- * information provided by VIA
+ * information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
@@ -179,7 +179,7 @@ static void quirk_vialatency(struct pci_dev *dev)
u8 busarb;
/* Ok we have a potential problem chipset here. Now see if we have
a buggy southbridge */
-
+
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p!=NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
@@ -194,9 +194,9 @@ static void quirk_vialatency(struct pci_dev *dev)
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
-
+
/*
- * Ok we have the problem. Now set the PCI master grant to
+ * Ok we have the problem. Now set the PCI master grant to
* occur every master grant. The apparent bug is that under high
* PCI load (quite common in Linux of course) you can get data
* loss when the CPU is held off the bus for 3 bus master requests
@@ -209,7 +209,7 @@ static void quirk_vialatency(struct pci_dev *dev)
*/
pci_read_config_byte(dev, 0x76, &busarb);
- /* Set bit 4 and bi 5 of byte 76 to 0x01
+ /* Set bit 4 and bi 5 of byte 76 to 0x01
"Master priority rotation on every PCI master grant */
busarb &= ~(1<<5);
busarb |= (1<<4);
@@ -252,7 +252,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
* that DMA to AGP space. Latency must be set to 0xA and triton
* workaround applied too
* [Info kindly provided by ALi]
- */
+ */
static void quirk_alimagik(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
@@ -260,8 +260,8 @@ static void quirk_alimagik(struct pci_dev *dev)
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
/*
* Natoma has some interesting boundary conditions with Zoran stuff
@@ -274,12 +274,12 @@ static void quirk_natoma(struct pci_dev *dev)
pci_pci_problems |= PCIPCI_NATOMA;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
* This chip can cause PCI parity errors if config register 0xA0 is read
@@ -400,7 +400,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
/*
* For now we only print it out. Eventually we'll want to
* reserve it (at least if it's in the 0x1000+ range), but
- * let's get enough confirmation reports first.
+ * let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
@@ -425,7 +425,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
}
/*
* For now we only print it out. Eventually we'll want to
- * reserve it, but let's get enough confirmation reports first.
+ * reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
@@ -682,7 +682,7 @@ static void quirk_xio2000a(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
quirk_xio2000a);
-#ifdef CONFIG_X86_IO_APIC
+#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
@@ -696,12 +696,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
static void quirk_via_ioapic(struct pci_dev *dev)
{
u8 tmp;
-
+
if (nr_ioapics < 1)
tmp = 0; /* nothing routed to external APIC */
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
-
+
dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
tmp == 0 ? "Disa" : "Ena");
@@ -712,7 +712,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_i
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/*
- * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
+ * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
* This leads to doubled level interrupt rates.
* Set this bit to get rid of cycle wastage.
* Otherwise uncritical.
@@ -986,7 +986,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
-
+
if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
@@ -1094,11 +1094,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
* users to be irritated by just another PCI Device in the Win98 device
- * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
+ * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
* package 2.7.0 for details)
*
- * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
- * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
+ * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
+ * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- the chosen trigger
* is either the Host bridge (preferred) or on-board VGA controller.
*
@@ -1253,7 +1253,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asu
static void asus_hides_smbus_lpc(struct pci_dev *dev)
{
u16 val;
-
+
if (likely(!asus_hides_smbus))
return;
@@ -1640,8 +1640,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
/*
* disable boot interrupts on HT-1000
@@ -1673,8 +1673,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
/*
* disable boot interrupts on AMD and ATI chipsets
@@ -1730,8 +1730,8 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
#endif /* CONFIG_X86_IO_APIC */
/*
@@ -2127,8 +2127,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other busses controlled by the chipset even if Linux is not
- * aware of it. Instead of setting the flag on all busses in the
+ * some other buses controlled by the chipset even if Linux is not
+ * aware of it. Instead of setting the flag on all buses in the
* machine, simply disable MSI globally.
*/
static void quirk_disable_all_msi(struct pci_dev *dev)
@@ -2288,14 +2288,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
nvenet_msi_disable);
/*
- * Some versions of the MCP55 bridge from nvidia have a legacy irq routing
- * config register. This register controls the routing of legacy interrupts
- * from devices that route through the MCP55. If this register is misprogramed
- * interrupts are only sent to the bsp, unlike conventional systems where the
- * irq is broadxast to all online cpus. Not having this register set
- * properly prevents kdump from booting up properly, so lets make sure that
- * we have it set correctly.
- * Note this is an undocumented register.
+ * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
+ * config register. This register controls the routing of legacy
+ * interrupts from devices that route through the MCP55. If this register
+ * is misprogrammed, interrupts are only sent to the BSP, unlike
+ * conventional systems where the IRQ is broadcast to all online CPUs. Not
+ * having this register set properly prevents kdump from booting up
+ * properly, so let's make sure that we have it set correctly.
+ * Note that this is an undocumented register.
*/
static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
{
@@ -2626,7 +2626,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
- * kernel fails to allocate resources when hotplug device is
+ * kernel fails to allocate resources when hotplug device is
* inserted and PCI bus is rescanned.
*/
static void quirk_hotplug_bridge(struct pci_dev *dev)
@@ -2955,6 +2955,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
/*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+ dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
+/*
* Some devices may pass our check in pci_intx_mask_supported if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 8fc54b7327b..1576851028d 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -7,7 +7,7 @@ static void pci_free_resources(struct pci_dev *dev)
{
int i;
- msi_remove_pci_irq_vectors(dev);
+ msi_remove_pci_irq_vectors(dev);
pci_cleanup_rom(dev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index d0627fa9f36..3ff2ac7c14e 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -1,5 +1,5 @@
/*
- * PCI searching functions.
+ * PCI searching functions.
*
* Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang
@@ -96,12 +96,12 @@ struct pci_bus * pci_find_bus(int domain, int busnr)
* pci_find_next_bus - begin or continue searching for a PCI bus
* @from: Previous PCI bus found, or %NULL for new search.
*
- * Iterates through the list of known PCI busses. A new search is
+ * Iterates through the list of known PCI buses. A new search is
* initiated by passing %NULL as the @from argument. Otherwise if
* @from is not %NULL, searches continue from next device on the
* global list.
*/
-struct pci_bus *
+struct pci_bus *
pci_find_next_bus(const struct pci_bus *from)
{
struct list_head *n;
@@ -119,11 +119,11 @@ pci_find_next_bus(const struct pci_bus *from)
/**
* pci_get_slot - locate PCI device for a given PCI slot
* @bus: PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
+ * @devfn: encodes number of PCI slot in which the desired PCI
+ * device resides and the logical device number within that slot
* in case of multi-function devices.
*
- * Given a PCI bus and slot/function number, the desired PCI device
+ * Given a PCI bus and slot/function number, the desired PCI device
* is located in the list of PCI devices.
* If the device is found, its reference count is increased and this
* function returns a pointer to its data structure. The caller must
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index bc26d7990cc..219a4106480 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -292,8 +292,8 @@ static void assign_requested_resources_sorted(struct list_head *head,
(!(res->flags & IORESOURCE_ROM_ENABLE))))
add_to_list(fail_head,
dev_res->dev, res,
- 0 /* dont care */,
- 0 /* dont care */);
+ 0 /* don't care */,
+ 0 /* don't care */);
}
reset_resource(res);
}
@@ -667,9 +667,9 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
if (!io) {
pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io)
b_res[0].flags |= IORESOURCE_IO;
/* DECchip 21050 pass 2 errata: the bridge may miss an address
disconnect boundary by one PCI data phase.
@@ -819,7 +819,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
resource_size_t min_align, align;
if (!b_res)
- return;
+ return;
min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -950,7 +950,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (realloc_head && i >= PCI_IOV_RESOURCES &&
i <= PCI_IOV_RESOURCE_END) {
r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
+ add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
children_add_size += r_size;
continue;
}
@@ -982,7 +982,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
min_align = calculate_mem_align(aligns, max_order);
- min_align = max(min_align, window_alignment(bus, b_res->flags & mask));
+ min_align = max(min_align, window_alignment(bus, b_res->flags));
size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
if (children_add_size > add_size)
add_size = children_add_size;
@@ -1136,7 +1136,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
}
/* The root bus? */
- if (!bus->self)
+ if (pci_is_root_bus(bus))
return;
switch (bus->self->class >> 8) {
@@ -1456,8 +1456,8 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
/*
* first try will not touch pci bridge res
- * second and later try will clear small leaf bridge res
- * will stop till to the max deepth if can not find good one
+ * second and later try will clear small leaf bridge res
+ * will stop till to the max depth if can not find good one
*/
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 07f2eddc09c..83c4d3bc47a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -159,7 +159,7 @@ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
return 0;
}
-static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
int resno, resource_size_t size)
{
struct resource *root, *conflict;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index c1e9284a677..448ca562d1f 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -53,7 +53,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
- "66 MHz PCI-X", /* 0x02 */
+ "66 MHz PCI-X", /* 0x02 */
"100 MHz PCI-X", /* 0x03 */
"133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index e1c1ec54089..24750a1b39b 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -44,7 +44,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
default:
err = -EINVAL;
goto error;
- };
+ }
err = -EIO;
if (cfg_ret != PCIBIOS_SUCCESSFUL)
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index b8f5acf0226..de24232c519 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -245,7 +245,7 @@ static int at91_cf_dt_init(struct platform_device *pdev)
}
#endif
-static int __init at91_cf_probe(struct platform_device *pdev)
+static int at91_cf_probe(struct platform_device *pdev)
{
struct at91_cf_socket *cf;
struct at91_cf_data *board = pdev->dev.platform_data;
@@ -354,7 +354,7 @@ fail0a:
return status;
}
-static int __exit at91_cf_remove(struct platform_device *pdev)
+static int at91_cf_remove(struct platform_device *pdev)
{
struct at91_cf_socket *cf = platform_get_drvdata(pdev);
@@ -404,14 +404,13 @@ static struct platform_driver at91_cf_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_cf_dt_ids),
},
- .remove = __exit_p(at91_cf_remove),
+ .probe = at91_cf_probe,
+ .remove = at91_cf_remove,
.suspend = at91_cf_suspend,
.resume = at91_cf_resume,
};
-/*--------------------------------------------------------------------------*/
-
-module_platform_driver_probe(at91_cf_driver, at91_cf_probe);
+module_platform_driver(at91_cf_driver);
MODULE_DESCRIPTION("AT91 Compact Flash Driver");
MODULE_AUTHOR("David Brownell");
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 2deacbb2ffd..757119b8714 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -992,16 +992,17 @@ static ssize_t field##_show (struct device *dev, struct device_attribute *attr,
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \
-}
+} \
+static DEVICE_ATTR_RO(field);
#define pcmcia_device_stringattr(name, field) \
static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \
-}
+} \
+static DEVICE_ATTR_RO(name);
-pcmcia_device_attr(func, socket, "0x%02x\n");
pcmcia_device_attr(func_id, has_func_id, "0x%02x\n");
pcmcia_device_attr(manf_id, has_manf_id, "0x%04x\n");
pcmcia_device_attr(card_id, has_card_id, "0x%04x\n");
@@ -1010,8 +1011,16 @@ pcmcia_device_stringattr(prod_id2, prod_id[1]);
pcmcia_device_stringattr(prod_id3, prod_id[2]);
pcmcia_device_stringattr(prod_id4, prod_id[3]);
-static ssize_t pcmcia_show_resources(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t function_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ return p_dev->socket ? sprintf(buf, "0x%02x\n", p_dev->func) : -ENODEV;
+}
+static DEVICE_ATTR_RO(function);
+
+static ssize_t resources_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
char *str = buf;
@@ -1022,8 +1031,9 @@ static ssize_t pcmcia_show_resources(struct device *dev,
return str - buf;
}
+static DEVICE_ATTR_RO(resources);
-static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
@@ -1033,8 +1043,8 @@ static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute
return sprintf(buf, "on\n");
}
-static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pm_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
int ret = 0;
@@ -1049,7 +1059,7 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
return ret ? ret : count;
}
-
+static DEVICE_ATTR_RW(pm_state);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -1072,8 +1082,9 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
p_dev->func, p_dev->device_no,
hash[0], hash[1], hash[2], hash[3]);
}
+static DEVICE_ATTR_RO(modalias);
-static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
+static ssize_t allow_func_id_match_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
@@ -1088,22 +1099,24 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
return count;
}
-
-static struct device_attribute pcmcia_dev_attrs[] = {
- __ATTR(function, 0444, func_show, NULL),
- __ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state),
- __ATTR(resources, 0444, pcmcia_show_resources, NULL),
- __ATTR_RO(func_id),
- __ATTR_RO(manf_id),
- __ATTR_RO(card_id),
- __ATTR_RO(prod_id1),
- __ATTR_RO(prod_id2),
- __ATTR_RO(prod_id3),
- __ATTR_RO(prod_id4),
- __ATTR_RO(modalias),
- __ATTR(allow_func_id_match, 0200, NULL, pcmcia_store_allow_func_id_match),
- __ATTR_NULL,
+static DEVICE_ATTR_WO(allow_func_id_match);
+
+static struct attribute *pcmcia_dev_attrs[] = {
+ &dev_attr_resources.attr,
+ &dev_attr_pm_state.attr,
+ &dev_attr_function.attr,
+ &dev_attr_func_id.attr,
+ &dev_attr_manf_id.attr,
+ &dev_attr_card_id.attr,
+ &dev_attr_prod_id1.attr,
+ &dev_attr_prod_id2.attr,
+ &dev_attr_prod_id3.attr,
+ &dev_attr_prod_id4.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_allow_func_id_match.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pcmcia_dev);
/* PM support, also needed for reset */
@@ -1389,7 +1402,7 @@ struct bus_type pcmcia_bus_type = {
.name = "pcmcia",
.uevent = pcmcia_bus_uevent,
.match = pcmcia_bus_match,
- .dev_attrs = pcmcia_dev_attrs,
+ .dev_groups = pcmcia_dev_groups,
.probe = pcmcia_device_probe,
.remove = pcmcia_device_remove,
.suspend = pcmcia_dev_suspend,
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index a007321ad31..1b206eac5f9 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -30,6 +30,8 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 18c0d8d1ddf..182034d2ef5 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -48,7 +48,9 @@
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/bitops.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index a4c16ee5c71..622dd6fe734 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -777,15 +777,4 @@ static struct pci_driver pd6729_pci_driver = {
.remove = pd6729_pci_remove,
};
-static int pd6729_module_init(void)
-{
- return pci_register_driver(&pd6729_pci_driver);
-}
-
-static void pd6729_module_exit(void)
-{
- pci_unregister_driver(&pd6729_pci_driver);
-}
-
-module_init(pd6729_module_init);
-module_exit(pd6729_module_exit);
+module_pci_driver(pd6729_pci_driver);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 6b4ff099fb1..dc18a3a5e01 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1439,20 +1439,6 @@ static struct pci_driver yenta_cardbus_driver = {
.driver.pm = YENTA_PM_OPS,
};
-
-static int __init yenta_socket_init(void)
-{
- return pci_register_driver(&yenta_cardbus_driver);
-}
-
-
-static void __exit yenta_socket_exit(void)
-{
- pci_unregister_driver(&yenta_cardbus_driver);
-}
-
-
-module_init(yenta_socket_init);
-module_exit(yenta_socket_exit);
+module_pci_driver(yenta_cardbus_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
new file mode 100644
index 00000000000..a344f3d5236
--- /dev/null
+++ b/drivers/phy/Kconfig
@@ -0,0 +1,54 @@
+#
+# PHY
+#
+
+menu "PHY Subsystem"
+
+config GENERIC_PHY
+ tristate "PHY Core"
+ help
+ Generic PHY support.
+
+ This framework is designed to provide a generic interface for PHY
+ devices present in the kernel. This layer will have the generic
+ API by which phy drivers can create PHY using the phy framework and
+ phy users can obtain reference to the PHY. All the users of this
+ framework should select this config.
+
+config PHY_EXYNOS_MIPI_VIDEO
+ tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
+ help
+ Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
+ and EXYNOS SoCs.
+
+config OMAP_USB2
+ tristate "OMAP USB2 PHY Driver"
+ depends on ARCH_OMAP2PLUS
+ select GENERIC_PHY
+ select USB_PHY
+ select OMAP_CONTROL_USB
+ help
+ Enable this to support the transceiver that is part of SOC. This
+ driver takes care of all the PHY functionality apart from comparator.
+ The USB OTG controller communicates with the comparator using this
+ driver.
+
+config TWL4030_USB
+ tristate "TWL4030 USB Transceiver Driver"
+ depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+ select GENERIC_PHY
+ select USB_PHY
+ help
+ Enable this to support the USB OTG transceiver on TWL4030
+ family chips (including the TWL5030 and TPS659x0 devices).
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
+
+config PHY_EXYNOS_DP_VIDEO
+ tristate "EXYNOS SoC series Display Port PHY driver"
+ depends on OF
+ select GENERIC_PHY
+ help
+ Support for Display Port PHY found on Samsung EXYNOS SoCs.
+
+endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
new file mode 100644
index 00000000000..d0caae9cfb8
--- /dev/null
+++ b/drivers/phy/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the phy drivers.
+#
+
+obj-$(CONFIG_GENERIC_PHY) += phy-core.o
+obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
+obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
+obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
+obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
new file mode 100644
index 00000000000..03cf8fb8155
--- /dev/null
+++ b/drivers/phy/phy-core.c
@@ -0,0 +1,698 @@
+/*
+ * phy-core.c -- Generic Phy framework.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+
+static struct class *phy_class;
+static DEFINE_MUTEX(phy_provider_mutex);
+static LIST_HEAD(phy_provider_list);
+static DEFINE_IDA(phy_ida);
+
+static void devm_phy_release(struct device *dev, void *res)
+{
+ struct phy *phy = *(struct phy **)res;
+
+ phy_put(phy);
+}
+
+static void devm_phy_provider_release(struct device *dev, void *res)
+{
+ struct phy_provider *phy_provider = *(struct phy_provider **)res;
+
+ of_phy_provider_unregister(phy_provider);
+}
+
+static void devm_phy_consume(struct device *dev, void *res)
+{
+ struct phy *phy = *(struct phy **)res;
+
+ phy_destroy(phy);
+}
+
+static int devm_phy_match(struct device *dev, void *res, void *match_data)
+{
+ return res == match_data;
+}
+
+static struct phy *phy_lookup(struct device *device, const char *port)
+{
+ unsigned int count;
+ struct phy *phy;
+ struct device *dev;
+ struct phy_consumer *consumers;
+ struct class_dev_iter iter;
+
+ class_dev_iter_init(&iter, phy_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ phy = to_phy(dev);
+ count = phy->init_data->num_consumers;
+ consumers = phy->init_data->consumers;
+ while (count--) {
+ if (!strcmp(consumers->dev_name, dev_name(device)) &&
+ !strcmp(consumers->port, port)) {
+ class_dev_iter_exit(&iter);
+ return phy;
+ }
+ consumers++;
+ }
+ }
+
+ class_dev_iter_exit(&iter);
+ return ERR_PTR(-ENODEV);
+}
+
+static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
+{
+ struct phy_provider *phy_provider;
+
+ list_for_each_entry(phy_provider, &phy_provider_list, list) {
+ if (phy_provider->dev->of_node == node)
+ return phy_provider;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+int phy_pm_runtime_get(struct phy *phy)
+{
+ if (!pm_runtime_enabled(&phy->dev))
+ return -ENOTSUPP;
+
+ return pm_runtime_get(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
+
+int phy_pm_runtime_get_sync(struct phy *phy)
+{
+ if (!pm_runtime_enabled(&phy->dev))
+ return -ENOTSUPP;
+
+ return pm_runtime_get_sync(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
+
+int phy_pm_runtime_put(struct phy *phy)
+{
+ if (!pm_runtime_enabled(&phy->dev))
+ return -ENOTSUPP;
+
+ return pm_runtime_put(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
+
+int phy_pm_runtime_put_sync(struct phy *phy)
+{
+ if (!pm_runtime_enabled(&phy->dev))
+ return -ENOTSUPP;
+
+ return pm_runtime_put_sync(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
+
+void phy_pm_runtime_allow(struct phy *phy)
+{
+ if (!pm_runtime_enabled(&phy->dev))
+ return;
+
+ pm_runtime_allow(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
+
+void phy_pm_runtime_forbid(struct phy *phy)
+{
+ if (!pm_runtime_enabled(&phy->dev))
+ return;
+
+ pm_runtime_forbid(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
+
+int phy_init(struct phy *phy)
+{
+ int ret;
+
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
+
+ mutex_lock(&phy->mutex);
+ if (phy->init_count++ == 0 && phy->ops->init) {
+ ret = phy->ops->init(phy);
+ if (ret < 0) {
+ dev_err(&phy->dev, "phy init failed --> %d\n", ret);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put(phy);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_init);
+
+int phy_exit(struct phy *phy)
+{
+ int ret;
+
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
+
+ mutex_lock(&phy->mutex);
+ if (--phy->init_count == 0 && phy->ops->exit) {
+ ret = phy->ops->exit(phy);
+ if (ret < 0) {
+ dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put(phy);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_exit);
+
+int phy_power_on(struct phy *phy)
+{
+ int ret = -ENOTSUPP;
+
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
+
+ mutex_lock(&phy->mutex);
+ if (phy->power_count++ == 0 && phy->ops->power_on) {
+ ret = phy->ops->power_on(phy);
+ if (ret < 0) {
+ dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_power_on);
+
+int phy_power_off(struct phy *phy)
+{
+ int ret = -ENOTSUPP;
+
+ mutex_lock(&phy->mutex);
+ if (--phy->power_count == 0 && phy->ops->power_off) {
+ ret = phy->ops->power_off(phy);
+ if (ret < 0) {
+ dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put(phy);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_power_off);
+
+/**
+ * of_phy_get() - lookup and obtain a reference to a phy by phandle
+ * @dev: device that requests this phy
+ * @index: the index of the phy
+ *
+ * Returns the phy associated with the given phandle value,
+ * after getting a refcount to it or -ENODEV if there is no such phy or
+ * -EPROBE_DEFER if there is a phandle to the phy, but the device is
+ * not yet loaded. This function uses of_xlate call back function provided
+ * while registering the phy_provider to find the phy instance.
+ */
+static struct phy *of_phy_get(struct device *dev, int index)
+{
+ int ret;
+ struct phy_provider *phy_provider;
+ struct phy *phy = NULL;
+ struct of_phandle_args args;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "phys", "#phy-cells",
+ index, &args);
+ if (ret) {
+ dev_dbg(dev, "failed to get phy in %s node\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&phy_provider_mutex);
+ phy_provider = of_phy_provider_lookup(args.np);
+ if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
+ phy = ERR_PTR(-EPROBE_DEFER);
+ goto err0;
+ }
+
+ phy = phy_provider->of_xlate(phy_provider->dev, &args);
+ module_put(phy_provider->owner);
+
+err0:
+ mutex_unlock(&phy_provider_mutex);
+ of_node_put(args.np);
+
+ return phy;
+}
+
+/**
+ * phy_put() - release the PHY
+ * @phy: the phy returned by phy_get()
+ *
+ * Releases a refcount the caller received from phy_get().
+ */
+void phy_put(struct phy *phy)
+{
+ if (IS_ERR(phy))
+ return;
+
+ module_put(phy->ops->owner);
+ put_device(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_put);
+
+/**
+ * devm_phy_put() - release the PHY
+ * @dev: device that wants to release this phy
+ * @phy: the phy returned by devm_phy_get()
+ *
+ * destroys the devres associated with this phy and invokes phy_put
+ * to release the phy.
+ */
+void devm_phy_put(struct device *dev, struct phy *phy)
+{
+ int r;
+
+ r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
+ dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_phy_put);
+
+/**
+ * of_phy_simple_xlate() - returns the phy instance from phy provider
+ * @dev: the PHY provider device
+ * @args: of_phandle_args (not used here)
+ *
+ * Intended to be used by phy provider for the common case where #phy-cells is
+ * 0. For other cases where #phy-cells is greater than '0', the phy provider
+ * should provide a custom of_xlate function that reads the *args* and returns
+ * the appropriate phy.
+ */
+struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
+ *args)
+{
+ struct phy *phy;
+ struct class_dev_iter iter;
+ struct device_node *node = dev->of_node;
+
+ class_dev_iter_init(&iter, phy_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ phy = to_phy(dev);
+ if (node != phy->dev.of_node)
+ continue;
+
+ class_dev_iter_exit(&iter);
+ return phy;
+ }
+
+ class_dev_iter_exit(&iter);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
+
+/**
+ * phy_get() - lookup and obtain a reference to a phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * -ENODEV if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_get(struct device *dev, const char *string)
+{
+ int index = 0;
+ struct phy *phy = NULL;
+
+ if (string == NULL) {
+ dev_WARN(dev, "missing string\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dev->of_node) {
+ index = of_property_match_string(dev->of_node, "phy-names",
+ string);
+ phy = of_phy_get(dev, index);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "unable to find phy\n");
+ return phy;
+ }
+ } else {
+ phy = phy_lookup(dev, string);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "unable to find phy\n");
+ return phy;
+ }
+ }
+
+ if (!try_module_get(phy->ops->owner))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&phy->dev);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(phy_get);
+
+/**
+ * devm_phy_get() - lookup and obtain a reference to a phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct phy *devm_phy_get(struct device *dev, const char *string)
+{
+ struct phy **ptr, *phy;
+
+ ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ phy = phy_get(dev, string);
+ if (!IS_ERR(phy)) {
+ *ptr = phy;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_get);
+
+/**
+ * phy_create() - create a new phy
+ * @dev: device that is creating the new phy
+ * @ops: function pointers for performing phy operations
+ * @init_data: contains the list of PHY consumers or NULL
+ *
+ * Called to create a phy using phy framework.
+ */
+struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
+ struct phy_init_data *init_data)
+{
+ int ret;
+ int id;
+ struct phy *phy;
+
+ if (!dev) {
+ dev_WARN(dev, "no device provided for PHY\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ phy = kzalloc(sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "unable to get id\n");
+ ret = id;
+ goto err0;
+ }
+
+ device_initialize(&phy->dev);
+ mutex_init(&phy->mutex);
+
+ phy->dev.class = phy_class;
+ phy->dev.parent = dev;
+ phy->dev.of_node = dev->of_node;
+ phy->id = id;
+ phy->ops = ops;
+ phy->init_data = init_data;
+
+ ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
+ if (ret)
+ goto err1;
+
+ ret = device_add(&phy->dev);
+ if (ret)
+ goto err1;
+
+ if (pm_runtime_enabled(dev)) {
+ pm_runtime_enable(&phy->dev);
+ pm_runtime_no_callbacks(&phy->dev);
+ }
+
+ return phy;
+
+err1:
+ ida_remove(&phy_ida, phy->id);
+ put_device(&phy->dev);
+ kfree(phy);
+
+err0:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(phy_create);
+
+/**
+ * devm_phy_create() - create a new phy
+ * @dev: device that is creating the new phy
+ * @ops: function pointers for performing phy operations
+ * @init_data: contains the list of PHY consumers or NULL
+ *
+ * Creates a new PHY device adding it to the PHY class.
+ * While at that, it also associates the device with the phy using devres.
+ * On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct phy *devm_phy_create(struct device *dev, const struct phy_ops *ops,
+ struct phy_init_data *init_data)
+{
+ struct phy **ptr, *phy;
+
+ ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ phy = phy_create(dev, ops, init_data);
+ if (!IS_ERR(phy)) {
+ *ptr = phy;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_create);
+
+/**
+ * phy_destroy() - destroy the phy
+ * @phy: the phy to be destroyed
+ *
+ * Called to destroy the phy.
+ */
+void phy_destroy(struct phy *phy)
+{
+ pm_runtime_disable(&phy->dev);
+ device_unregister(&phy->dev);
+}
+EXPORT_SYMBOL_GPL(phy_destroy);
+
+/**
+ * devm_phy_destroy() - destroy the PHY
+ * @dev: device that wants to release this phy
+ * @phy: the phy returned by devm_phy_get()
+ *
+ * destroys the devres associated with this phy and invokes phy_destroy
+ * to destroy the phy.
+ */
+void devm_phy_destroy(struct device *dev, struct phy *phy)
+{
+ int r;
+
+ r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
+ dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_phy_destroy);
+
+/**
+ * __of_phy_provider_register() - create/register phy provider with the framework
+ * @dev: struct device of the phy provider
+ * @owner: the module owner containing of_xlate
+ * @of_xlate: function pointer to obtain phy instance from phy provider
+ *
+ * Creates struct phy_provider from dev and of_xlate function pointer.
+ * This is used in the case of dt boot for finding the phy instance from
+ * phy provider.
+ */
+struct phy_provider *__of_phy_provider_register(struct device *dev,
+ struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ struct phy_provider *phy_provider;
+
+ phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
+ if (!phy_provider)
+ return ERR_PTR(-ENOMEM);
+
+ phy_provider->dev = dev;
+ phy_provider->owner = owner;
+ phy_provider->of_xlate = of_xlate;
+
+ mutex_lock(&phy_provider_mutex);
+ list_add_tail(&phy_provider->list, &phy_provider_list);
+ mutex_unlock(&phy_provider_mutex);
+
+ return phy_provider;
+}
+EXPORT_SYMBOL_GPL(__of_phy_provider_register);
+
+/**
+ * __devm_of_phy_provider_register() - create/register phy provider with the
+ * framework
+ * @dev: struct device of the phy provider
+ * @owner: the module owner containing of_xlate
+ * @of_xlate: function pointer to obtain phy instance from phy provider
+ *
+ * Creates struct phy_provider from dev and of_xlate function pointer.
+ * This is used in the case of dt boot for finding the phy instance from
+ * phy provider. While at that, it also associates the device with the
+ * phy provider using devres. On driver detach, release function is invoked
+ * on the devres data, then, devres data is freed.
+ */
+struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
+ struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ struct phy_provider **ptr, *phy_provider;
+
+ ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
+ if (!IS_ERR(phy_provider)) {
+ *ptr = phy_provider;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return phy_provider;
+}
+EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
+
+/**
+ * of_phy_provider_unregister() - unregister phy provider from the framework
+ * @phy_provider: phy provider returned by of_phy_provider_register()
+ *
+ * Removes the phy_provider created using of_phy_provider_register().
+ */
+void of_phy_provider_unregister(struct phy_provider *phy_provider)
+{
+ if (IS_ERR(phy_provider))
+ return;
+
+ mutex_lock(&phy_provider_mutex);
+ list_del(&phy_provider->list);
+ kfree(phy_provider);
+ mutex_unlock(&phy_provider_mutex);
+}
+EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
+
+/**
+ * devm_of_phy_provider_unregister() - remove phy provider from the framework
+ * @dev: struct device of the phy provider
+ *
+ * destroys the devres associated with this phy provider and invokes
+ * of_phy_provider_unregister to unregister the phy provider.
+ */
+void devm_of_phy_provider_unregister(struct device *dev,
+ struct phy_provider *phy_provider) {
+ int r;
+
+ r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
+ phy_provider);
+ dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
+
+/**
+ * phy_release() - release the phy
+ * @dev: the dev member within phy
+ *
+ * When the last reference to the device is removed, it is called
+ * from the embedded kobject as release method.
+ */
+static void phy_release(struct device *dev)
+{
+ struct phy *phy;
+
+ phy = to_phy(dev);
+ dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
+ ida_remove(&phy_ida, phy->id);
+ kfree(phy);
+}
+
+static int __init phy_core_init(void)
+{
+ phy_class = class_create(THIS_MODULE, "phy");
+ if (IS_ERR(phy_class)) {
+ pr_err("failed to create phy class --> %ld\n",
+ PTR_ERR(phy_class));
+ return PTR_ERR(phy_class);
+ }
+
+ phy_class->dev_release = phy_release;
+
+ return 0;
+}
+module_init(phy_core_init);
+
+static void __exit phy_core_exit(void)
+{
+ class_destroy(phy_class);
+}
+module_exit(phy_core_exit);
+
+MODULE_DESCRIPTION("Generic PHY Framework");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
new file mode 100644
index 00000000000..1dbe6ce7b2c
--- /dev/null
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -0,0 +1,111 @@
+/*
+ * Samsung EXYNOS SoC series Display Port PHY driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* DPTX_PHY_CONTROL register */
+#define EXYNOS_DPTX_PHY_ENABLE (1 << 0)
+
+struct exynos_dp_video_phy {
+ void __iomem *regs;
+};
+
+static int __set_phy_state(struct exynos_dp_video_phy *state, unsigned int on)
+{
+ u32 reg;
+
+ reg = readl(state->regs);
+ if (on)
+ reg |= EXYNOS_DPTX_PHY_ENABLE;
+ else
+ reg &= ~EXYNOS_DPTX_PHY_ENABLE;
+ writel(reg, state->regs);
+
+ return 0;
+}
+
+static int exynos_dp_video_phy_power_on(struct phy *phy)
+{
+ struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
+
+ return __set_phy_state(state, 1);
+}
+
+static int exynos_dp_video_phy_power_off(struct phy *phy)
+{
+ struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
+
+ return __set_phy_state(state, 0);
+}
+
+static struct phy_ops exynos_dp_video_phy_ops = {
+ .power_on = exynos_dp_video_phy_power_on,
+ .power_off = exynos_dp_video_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int exynos_dp_video_phy_probe(struct platform_device *pdev)
+{
+ struct exynos_dp_video_phy *state;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ state->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create Display Port PHY\n");
+ return PTR_ERR(phy);
+ }
+ phy_set_drvdata(phy, state);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_dp_video_phy_of_match[] = {
+ { .compatible = "samsung,exynos5250-dp-video-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_dp_video_phy_of_match);
+
+static struct platform_driver exynos_dp_video_phy_driver = {
+ .probe = exynos_dp_video_phy_probe,
+ .driver = {
+ .name = "exynos-dp-video-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = exynos_dp_video_phy_of_match,
+ }
+};
+module_platform_driver(exynos_dp_video_phy_driver);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung EXYNOS SoC DP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
new file mode 100644
index 00000000000..0c5efab11af
--- /dev/null
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -0,0 +1,176 @@
+/*
+ * Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
+#define EXYNOS_MIPI_PHY_CONTROL(n) ((n) * 4)
+#define EXYNOS_MIPI_PHY_ENABLE (1 << 0)
+#define EXYNOS_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS_MIPI_PHY_RESET_MASK (3 << 1)
+
+enum exynos_mipi_phy_id {
+ EXYNOS_MIPI_PHY_ID_CSIS0,
+ EXYNOS_MIPI_PHY_ID_DSIM0,
+ EXYNOS_MIPI_PHY_ID_CSIS1,
+ EXYNOS_MIPI_PHY_ID_DSIM1,
+ EXYNOS_MIPI_PHYS_NUM
+};
+
+#define is_mipi_dsim_phy_id(id) \
+ ((id) == EXYNOS_MIPI_PHY_ID_DSIM0 || (id) == EXYNOS_MIPI_PHY_ID_DSIM1)
+
+struct exynos_mipi_video_phy {
+ spinlock_t slock;
+ struct video_phy_desc {
+ struct phy *phy;
+ unsigned int index;
+ } phys[EXYNOS_MIPI_PHYS_NUM];
+ void __iomem *regs;
+};
+
+static int __set_phy_state(struct exynos_mipi_video_phy *state,
+ enum exynos_mipi_phy_id id, unsigned int on)
+{
+ void __iomem *addr;
+ u32 reg, reset;
+
+ addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
+
+ if (is_mipi_dsim_phy_id(id))
+ reset = EXYNOS_MIPI_PHY_MRESETN;
+ else
+ reset = EXYNOS_MIPI_PHY_SRESETN;
+
+ spin_lock(&state->slock);
+ reg = readl(addr);
+ if (on)
+ reg |= reset;
+ else
+ reg &= ~reset;
+ writel(reg, addr);
+
+ /* Clear ENABLE bit only if MRESETN, SRESETN bits are not set. */
+ if (on)
+ reg |= EXYNOS_MIPI_PHY_ENABLE;
+ else if (!(reg & EXYNOS_MIPI_PHY_RESET_MASK))
+ reg &= ~EXYNOS_MIPI_PHY_ENABLE;
+
+ writel(reg, addr);
+ spin_unlock(&state->slock);
+ return 0;
+}
+
+#define to_mipi_video_phy(desc) \
+ container_of((desc), struct exynos_mipi_video_phy, phys[(desc)->index]);
+
+static int exynos_mipi_video_phy_power_on(struct phy *phy)
+{
+ struct video_phy_desc *phy_desc = phy_get_drvdata(phy);
+ struct exynos_mipi_video_phy *state = to_mipi_video_phy(phy_desc);
+
+ return __set_phy_state(state, phy_desc->index, 1);
+}
+
+static int exynos_mipi_video_phy_power_off(struct phy *phy)
+{
+ struct video_phy_desc *phy_desc = phy_get_drvdata(phy);
+ struct exynos_mipi_video_phy *state = to_mipi_video_phy(phy_desc);
+
+ return __set_phy_state(state, phy_desc->index, 0);
+}
+
+static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct exynos_mipi_video_phy *state = dev_get_drvdata(dev);
+
+ if (WARN_ON(args->args[0] > EXYNOS_MIPI_PHYS_NUM))
+ return ERR_PTR(-ENODEV);
+
+ return state->phys[args->args[0]].phy;
+}
+
+static struct phy_ops exynos_mipi_video_phy_ops = {
+ .power_on = exynos_mipi_video_phy_power_on,
+ .power_off = exynos_mipi_video_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
+{
+ struct exynos_mipi_video_phy *state;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ unsigned int i;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ state->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+
+ dev_set_drvdata(dev, state);
+ spin_lock_init(&state->slock);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ exynos_mipi_video_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
+ struct phy *phy = devm_phy_create(dev,
+ &exynos_mipi_video_phy_ops, NULL);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create PHY %d\n", i);
+ return PTR_ERR(phy);
+ }
+
+ state->phys[i].phy = phy;
+ state->phys[i].index = i;
+ phy_set_drvdata(phy, &state->phys[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id exynos_mipi_video_phy_of_match[] = {
+ { .compatible = "samsung,s5pv210-mipi-video-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_mipi_video_phy_of_match);
+
+static struct platform_driver exynos_mipi_video_phy_driver = {
+ .probe = exynos_mipi_video_phy_probe,
+ .driver = {
+ .of_match_table = exynos_mipi_video_phy_of_match,
+ .name = "exynos-mipi-video-phy",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(exynos_mipi_video_phy_driver);
+
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI CSI-2/DSI PHY driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index d266861d24f..bfc5c337f99 100644
--- a/drivers/usb/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -28,6 +28,8 @@
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/phy.h>
+#include <linux/of_platform.h>
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
@@ -118,10 +120,42 @@ static int omap_usb2_suspend(struct usb_phy *x, int suspend)
return 0;
}
+static int omap_usb_power_off(struct phy *x)
+{
+ struct omap_usb *phy = phy_get_drvdata(x);
+
+ omap_control_usb_phy_power(phy->control_dev, 0);
+
+ return 0;
+}
+
+static int omap_usb_power_on(struct phy *x)
+{
+ struct omap_usb *phy = phy_get_drvdata(x);
+
+ omap_control_usb_phy_power(phy->control_dev, 1);
+
+ return 0;
+}
+
+static struct phy_ops ops = {
+ .power_on = omap_usb_power_on,
+ .power_off = omap_usb_power_off,
+ .owner = THIS_MODULE,
+};
+
static int omap_usb2_probe(struct platform_device *pdev)
{
- struct omap_usb *phy;
- struct usb_otg *otg;
+ struct omap_usb *phy;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ struct usb_otg *otg;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+
+ if (!node)
+ return -EINVAL;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
@@ -143,12 +177,25 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
- phy->control_dev = omap_get_control_dev();
- if (IS_ERR(phy->control_dev)) {
- dev_dbg(&pdev->dev, "Failed to get control device\n");
- return -ENODEV;
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ control_node = of_parse_phandle(node, "ctrl-module", 0);
+ if (!control_node) {
+ dev_err(&pdev->dev, "Failed to get control device phandle\n");
+ return -EINVAL;
+ }
+
+ control_pdev = of_find_device_by_node(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ return -EINVAL;
}
+ phy->control_dev = &control_pdev->dev;
+
phy->is_suspended = 1;
omap_control_usb_phy_power(phy->control_dev, 0);
@@ -158,6 +205,15 @@ static int omap_usb2_probe(struct platform_device *pdev)
otg->start_srp = omap_usb_start_srp;
otg->phy = &phy->phy;
+ platform_set_drvdata(pdev, phy);
+ pm_runtime_enable(phy->dev);
+
+ generic_phy = devm_phy_create(phy->dev, &ops, NULL);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy);
+
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
@@ -173,10 +229,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
usb_add_phy_dev(&phy->phy);
- platform_set_drvdata(pdev, phy);
-
- pm_runtime_enable(phy->dev);
-
return 0;
}
diff --git a/drivers/usb/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 90730c8762b..daf65e68aaa 100644
--- a/drivers/usb/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
+#include <linux/phy/phy.h>
#include <linux/usb/musb-omap.h>
#include <linux/usb/ulpi.h>
#include <linux/i2c/twl.h>
@@ -421,17 +422,20 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
}
}
-static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
+static int twl4030_phy_power_off(struct phy *phy)
{
+ struct twl4030_usb *twl = phy_get_drvdata(phy);
+
if (twl->asleep)
- return;
+ return 0;
twl4030_phy_power(twl, 0);
twl->asleep = 1;
dev_dbg(twl->dev, "%s\n", __func__);
+ return 0;
}
-static void __twl4030_phy_resume(struct twl4030_usb *twl)
+static void __twl4030_phy_power_on(struct twl4030_usb *twl)
{
twl4030_phy_power(twl, 1);
twl4030_i2c_access(twl, 1);
@@ -440,11 +444,13 @@ static void __twl4030_phy_resume(struct twl4030_usb *twl)
twl4030_i2c_access(twl, 0);
}
-static void twl4030_phy_resume(struct twl4030_usb *twl)
+static int twl4030_phy_power_on(struct phy *phy)
{
+ struct twl4030_usb *twl = phy_get_drvdata(phy);
+
if (!twl->asleep)
- return;
- __twl4030_phy_resume(twl);
+ return 0;
+ __twl4030_phy_power_on(twl);
twl->asleep = 0;
dev_dbg(twl->dev, "%s\n", __func__);
@@ -457,6 +463,7 @@ static void twl4030_phy_resume(struct twl4030_usb *twl)
cancel_delayed_work(&twl->id_workaround_work);
schedule_delayed_work(&twl->id_workaround_work, HZ);
}
+ return 0;
}
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -587,9 +594,9 @@ static void twl4030_id_workaround_work(struct work_struct *work)
}
}
-static int twl4030_usb_phy_init(struct usb_phy *phy)
+static int twl4030_phy_init(struct phy *phy)
{
- struct twl4030_usb *twl = phy_to_twl(phy);
+ struct twl4030_usb *twl = phy_get_drvdata(phy);
enum omap_musb_vbus_id_status status;
/*
@@ -602,25 +609,15 @@ static int twl4030_usb_phy_init(struct usb_phy *phy)
status = twl4030_usb_linkstat(twl);
twl->linkstat = status;
- if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
+ if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
omap_musb_mailbox(twl->linkstat);
+ twl4030_phy_power_on(phy);
+ }
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
return 0;
}
-static int twl4030_set_suspend(struct usb_phy *x, int suspend)
-{
- struct twl4030_usb *twl = phy_to_twl(x);
-
- if (suspend)
- twl4030_phy_suspend(twl, 1);
- else
- twl4030_phy_resume(twl);
-
- return 0;
-}
-
static int twl4030_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
@@ -646,13 +643,23 @@ static int twl4030_set_host(struct usb_otg *otg, struct usb_bus *host)
return 0;
}
+static const struct phy_ops ops = {
+ .init = twl4030_phy_init,
+ .power_on = twl4030_phy_power_on,
+ .power_off = twl4030_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
static int twl4030_usb_probe(struct platform_device *pdev)
{
struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
struct twl4030_usb *twl;
+ struct phy *phy;
int status, err;
struct usb_otg *otg;
struct device_node *np = pdev->dev.of_node;
+ struct phy_provider *phy_provider;
+ struct phy_init_data *init_data = NULL;
twl = devm_kzalloc(&pdev->dev, sizeof *twl, GFP_KERNEL);
if (!twl)
@@ -661,9 +668,10 @@ static int twl4030_usb_probe(struct platform_device *pdev)
if (np)
of_property_read_u32(np, "usb_mode",
(enum twl4030_usb_mode *)&twl->usb_mode);
- else if (pdata)
+ else if (pdata) {
twl->usb_mode = pdata->usb_mode;
- else {
+ init_data = pdata->init_data;
+ } else {
dev_err(&pdev->dev, "twl4030 initialized without pdata\n");
return -EINVAL;
}
@@ -682,13 +690,24 @@ static int twl4030_usb_probe(struct platform_device *pdev)
twl->phy.label = "twl4030";
twl->phy.otg = otg;
twl->phy.type = USB_PHY_TYPE_USB2;
- twl->phy.set_suspend = twl4030_set_suspend;
- twl->phy.init = twl4030_usb_phy_init;
otg->phy = &twl->phy;
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
+ phy_provider = devm_of_phy_provider_register(twl->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ phy = devm_phy_create(twl->dev, &ops, init_data);
+ if (IS_ERR(phy)) {
+ dev_dbg(&pdev->dev, "Failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, twl);
+
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -705,6 +724,8 @@ static int twl4030_usb_probe(struct platform_device *pdev)
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
+ ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
+
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b6e864e8c9e..33f9dc1f14f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -49,6 +49,35 @@ config PINCTRL_AB8505
bool "AB8505 pin controller driver"
depends on PINCTRL_ABX500 && ARCH_U8500
+config PINCTRL_ADI2
+ bool "ADI pin controller driver"
+ depends on BLACKFIN
+ select PINMUX
+ select IRQ_DOMAIN
+ help
+ This is the pin controller and gpio driver for ADI BF54x, BF60x and
+ future processors. This option is selected automatically when specific
+ machine and arch are selected to build.
+
+config PINCTRL_AS3722
+ bool "Pinctrl and GPIO driver for ams AS3722 PMIC"
+ depends on MFD_AS3722 && GPIOLIB
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ AS3722 device supports the configuration of GPIO pins for different
+ functionality. This driver supports the pinmux, push-pull and
+ open drain configuration for the GPIO pins of AS3722 devices. It also
+ supports the GPIO functionality through gpiolib.
+
+config PINCTRL_BF54x
+ def_bool y if BF54x
+ select PINCTRL_ADI2
+
+config PINCTRL_BF60x
+ def_bool y if BF60x
+ select PINCTRL_ADI2
+
config PINCTRL_AT91
bool "AT91 pinctrl driver"
depends on OF
@@ -80,6 +109,19 @@ config PINCTRL_IMX
select PINMUX
select PINCONF
+config PINCTRL_IMX1_CORE
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_IMX27
+ bool "IMX27 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX27
+ select PINCTRL_IMX1_CORE
+ help
+ Say Y here to enable the imx27 pinctrl driver
+
config PINCTRL_IMX35
bool "IMX35 pinctrl driver"
depends on OF
@@ -88,6 +130,14 @@ config PINCTRL_IMX35
help
Say Y here to enable the imx35 pinctrl driver
+config PINCTRL_IMX50
+ bool "IMX50 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX50
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx50 pinctrl driver
+
config PINCTRL_IMX51
bool "IMX51 pinctrl driver"
depends on OF
@@ -292,6 +342,10 @@ config PINCTRL_XWAY
depends on SOC_TYPE_XWAY
depends on PINCTRL_LANTIQ
+config PINCTRL_TB10X
+ bool
+ depends on ARC_PLAT_TB10X
+
endmenu
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 496d9bf9e1b..4f7be2921aa 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -14,11 +14,18 @@ obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o
obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o
obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o
obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o
+obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
+obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
+obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
+obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
+obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
+obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o
+obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o
obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o
obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o
obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o
@@ -52,6 +59,7 @@ obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
+obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 92f86ab30a1..5ee61a47001 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -462,6 +462,20 @@ struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
}
EXPORT_SYMBOL_GPL(pinctrl_find_and_add_gpio_range);
+int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group,
+ const unsigned **pins, unsigned *num_pins)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ int gs;
+
+ gs = pinctrl_get_group_selector(pctldev, pin_group);
+ if (gs < 0)
+ return gs;
+
+ return pctlops->get_group_pins(pctldev, gs, pins, num_pins);
+}
+EXPORT_SYMBOL_GPL(pinctrl_get_group_pins);
+
/**
* pinctrl_find_gpio_range_from_pin() - locate the GPIO range for a pin
* @pctldev: the pin controller device to look in
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 48e21a22948..ae1f760cbdd 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -408,7 +408,7 @@ static struct platform_driver armada_370_pinctrl_driver = {
.driver = {
.name = "armada-370-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(armada_370_pinctrl_of_match),
+ .of_match_table = armada_370_pinctrl_of_match,
},
.probe = armada_370_pinctrl_probe,
.remove = armada_370_pinctrl_remove,
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index ab5dc04b3e8..843a51f9d12 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -455,7 +455,7 @@ static struct platform_driver armada_xp_pinctrl_driver = {
.driver = {
.name = "armada-xp-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(armada_xp_pinctrl_of_match),
+ .of_match_table = armada_xp_pinctrl_of_match,
},
.probe = armada_xp_pinctrl_probe,
.remove = armada_xp_pinctrl_remove,
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 29f7e4fc7ca..47268393af3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -335,7 +335,7 @@ static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1;
- gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION2);
+ gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION3);
switch (config) {
case 1:
@@ -806,7 +806,7 @@ static struct platform_driver dove_pinctrl_driver = {
.driver = {
.name = "dove-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(dove_pinctrl_of_match),
+ .of_match_table = dove_pinctrl_of_match,
},
.probe = dove_pinctrl_probe,
.remove = dove_pinctrl_remove,
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index cdd483df673..6b504b5935a 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -471,7 +471,7 @@ static struct platform_driver kirkwood_pinctrl_driver = {
.driver = {
.name = "kirkwood-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(kirkwood_pinctrl_of_match),
+ .of_match_table = kirkwood_pinctrl_of_match,
},
.probe = kirkwood_pinctrl_probe,
.remove = kirkwood_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-adi2-bf54x.c b/drivers/pinctrl/pinctrl-adi2-bf54x.c
new file mode 100644
index 00000000000..ea9d9ab9cda
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-adi2-bf54x.c
@@ -0,0 +1,592 @@
+/*
+ * Pinctrl Driver for ADI GPIO2 controller
+ *
+ * Copyright 2007-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPLv2 or later
+ */
+
+#include <asm/portmux.h>
+#include "pinctrl-adi2.h"
+
+static const struct pinctrl_pin_desc adi_pads[] = {
+ PINCTRL_PIN(0, "PA0"),
+ PINCTRL_PIN(1, "PA1"),
+ PINCTRL_PIN(2, "PA2"),
+ PINCTRL_PIN(3, "PG3"),
+ PINCTRL_PIN(4, "PA4"),
+ PINCTRL_PIN(5, "PA5"),
+ PINCTRL_PIN(6, "PA6"),
+ PINCTRL_PIN(7, "PA7"),
+ PINCTRL_PIN(8, "PA8"),
+ PINCTRL_PIN(9, "PA9"),
+ PINCTRL_PIN(10, "PA10"),
+ PINCTRL_PIN(11, "PA11"),
+ PINCTRL_PIN(12, "PA12"),
+ PINCTRL_PIN(13, "PA13"),
+ PINCTRL_PIN(14, "PA14"),
+ PINCTRL_PIN(15, "PA15"),
+ PINCTRL_PIN(16, "PB0"),
+ PINCTRL_PIN(17, "PB1"),
+ PINCTRL_PIN(18, "PB2"),
+ PINCTRL_PIN(19, "PB3"),
+ PINCTRL_PIN(20, "PB4"),
+ PINCTRL_PIN(21, "PB5"),
+ PINCTRL_PIN(22, "PB6"),
+ PINCTRL_PIN(23, "PB7"),
+ PINCTRL_PIN(24, "PB8"),
+ PINCTRL_PIN(25, "PB9"),
+ PINCTRL_PIN(26, "PB10"),
+ PINCTRL_PIN(27, "PB11"),
+ PINCTRL_PIN(28, "PB12"),
+ PINCTRL_PIN(29, "PB13"),
+ PINCTRL_PIN(30, "PB14"),
+ PINCTRL_PIN(32, "PC0"),
+ PINCTRL_PIN(33, "PC1"),
+ PINCTRL_PIN(34, "PC2"),
+ PINCTRL_PIN(35, "PC3"),
+ PINCTRL_PIN(36, "PC4"),
+ PINCTRL_PIN(37, "PC5"),
+ PINCTRL_PIN(38, "PC6"),
+ PINCTRL_PIN(39, "PC7"),
+ PINCTRL_PIN(40, "PC8"),
+ PINCTRL_PIN(41, "PC9"),
+ PINCTRL_PIN(42, "PC10"),
+ PINCTRL_PIN(43, "PC11"),
+ PINCTRL_PIN(44, "PC12"),
+ PINCTRL_PIN(45, "PC13"),
+ PINCTRL_PIN(48, "PD0"),
+ PINCTRL_PIN(49, "PD1"),
+ PINCTRL_PIN(50, "PD2"),
+ PINCTRL_PIN(51, "PD3"),
+ PINCTRL_PIN(52, "PD4"),
+ PINCTRL_PIN(53, "PD5"),
+ PINCTRL_PIN(54, "PD6"),
+ PINCTRL_PIN(55, "PD7"),
+ PINCTRL_PIN(56, "PD8"),
+ PINCTRL_PIN(57, "PD9"),
+ PINCTRL_PIN(58, "PD10"),
+ PINCTRL_PIN(59, "PD11"),
+ PINCTRL_PIN(60, "PD12"),
+ PINCTRL_PIN(61, "PD13"),
+ PINCTRL_PIN(62, "PD14"),
+ PINCTRL_PIN(63, "PD15"),
+ PINCTRL_PIN(64, "PE0"),
+ PINCTRL_PIN(65, "PE1"),
+ PINCTRL_PIN(66, "PE2"),
+ PINCTRL_PIN(67, "PE3"),
+ PINCTRL_PIN(68, "PE4"),
+ PINCTRL_PIN(69, "PE5"),
+ PINCTRL_PIN(70, "PE6"),
+ PINCTRL_PIN(71, "PE7"),
+ PINCTRL_PIN(72, "PE8"),
+ PINCTRL_PIN(73, "PE9"),
+ PINCTRL_PIN(74, "PE10"),
+ PINCTRL_PIN(75, "PE11"),
+ PINCTRL_PIN(76, "PE12"),
+ PINCTRL_PIN(77, "PE13"),
+ PINCTRL_PIN(78, "PE14"),
+ PINCTRL_PIN(79, "PE15"),
+ PINCTRL_PIN(80, "PF0"),
+ PINCTRL_PIN(81, "PF1"),
+ PINCTRL_PIN(82, "PF2"),
+ PINCTRL_PIN(83, "PF3"),
+ PINCTRL_PIN(84, "PF4"),
+ PINCTRL_PIN(85, "PF5"),
+ PINCTRL_PIN(86, "PF6"),
+ PINCTRL_PIN(87, "PF7"),
+ PINCTRL_PIN(88, "PF8"),
+ PINCTRL_PIN(89, "PF9"),
+ PINCTRL_PIN(90, "PF10"),
+ PINCTRL_PIN(91, "PF11"),
+ PINCTRL_PIN(92, "PF12"),
+ PINCTRL_PIN(93, "PF13"),
+ PINCTRL_PIN(94, "PF14"),
+ PINCTRL_PIN(95, "PF15"),
+ PINCTRL_PIN(96, "PG0"),
+ PINCTRL_PIN(97, "PG1"),
+ PINCTRL_PIN(98, "PG2"),
+ PINCTRL_PIN(99, "PG3"),
+ PINCTRL_PIN(100, "PG4"),
+ PINCTRL_PIN(101, "PG5"),
+ PINCTRL_PIN(102, "PG6"),
+ PINCTRL_PIN(103, "PG7"),
+ PINCTRL_PIN(104, "PG8"),
+ PINCTRL_PIN(105, "PG9"),
+ PINCTRL_PIN(106, "PG10"),
+ PINCTRL_PIN(107, "PG11"),
+ PINCTRL_PIN(108, "PG12"),
+ PINCTRL_PIN(109, "PG13"),
+ PINCTRL_PIN(110, "PG14"),
+ PINCTRL_PIN(111, "PG15"),
+ PINCTRL_PIN(112, "PH0"),
+ PINCTRL_PIN(113, "PH1"),
+ PINCTRL_PIN(114, "PH2"),
+ PINCTRL_PIN(115, "PH3"),
+ PINCTRL_PIN(116, "PH4"),
+ PINCTRL_PIN(117, "PH5"),
+ PINCTRL_PIN(118, "PH6"),
+ PINCTRL_PIN(119, "PH7"),
+ PINCTRL_PIN(120, "PH8"),
+ PINCTRL_PIN(121, "PH9"),
+ PINCTRL_PIN(122, "PH10"),
+ PINCTRL_PIN(123, "PH11"),
+ PINCTRL_PIN(124, "PH12"),
+ PINCTRL_PIN(125, "PH13"),
+ PINCTRL_PIN(128, "PI0"),
+ PINCTRL_PIN(129, "PI1"),
+ PINCTRL_PIN(130, "PI2"),
+ PINCTRL_PIN(131, "PI3"),
+ PINCTRL_PIN(132, "PI4"),
+ PINCTRL_PIN(133, "PI5"),
+ PINCTRL_PIN(134, "PI6"),
+ PINCTRL_PIN(135, "PI7"),
+ PINCTRL_PIN(136, "PI8"),
+ PINCTRL_PIN(137, "PI9"),
+ PINCTRL_PIN(138, "PI10"),
+ PINCTRL_PIN(139, "PI11"),
+ PINCTRL_PIN(140, "PI12"),
+ PINCTRL_PIN(141, "PI13"),
+ PINCTRL_PIN(142, "PI14"),
+ PINCTRL_PIN(143, "PI15"),
+ PINCTRL_PIN(144, "PJ0"),
+ PINCTRL_PIN(145, "PJ1"),
+ PINCTRL_PIN(146, "PJ2"),
+ PINCTRL_PIN(147, "PJ3"),
+ PINCTRL_PIN(148, "PJ4"),
+ PINCTRL_PIN(149, "PJ5"),
+ PINCTRL_PIN(150, "PJ6"),
+ PINCTRL_PIN(151, "PJ7"),
+ PINCTRL_PIN(152, "PJ8"),
+ PINCTRL_PIN(153, "PJ9"),
+ PINCTRL_PIN(154, "PJ10"),
+ PINCTRL_PIN(155, "PJ11"),
+ PINCTRL_PIN(156, "PJ12"),
+ PINCTRL_PIN(157, "PJ13"),
+};
+
+static const unsigned uart0_pins[] = {
+ GPIO_PE7, GPIO_PE8,
+};
+
+static const unsigned uart1_pins[] = {
+ GPIO_PH0, GPIO_PH1,
+};
+
+static const unsigned uart1_ctsrts_pins[] = {
+ GPIO_PE9, GPIO_PE10,
+};
+
+static const unsigned uart2_pins[] = {
+ GPIO_PB4, GPIO_PB5,
+};
+
+static const unsigned uart3_pins[] = {
+ GPIO_PB6, GPIO_PB7,
+};
+
+static const unsigned uart3_ctsrts_pins[] = {
+ GPIO_PB2, GPIO_PB3,
+};
+
+static const unsigned rsi0_pins[] = {
+ GPIO_PC8, GPIO_PC9, GPIO_PC10, GPIO_PC11, GPIO_PC12, GPIO_PC13,
+};
+
+static const unsigned spi0_pins[] = {
+ GPIO_PE0, GPIO_PE1, GPIO_PE2,
+};
+
+static const unsigned spi1_pins[] = {
+ GPIO_PG8, GPIO_PG9, GPIO_PG10,
+};
+
+static const unsigned twi0_pins[] = {
+ GPIO_PE14, GPIO_PE15,
+};
+
+static const unsigned twi1_pins[] = {
+ GPIO_PB0, GPIO_PB1,
+};
+
+static const unsigned rotary_pins[] = {
+ GPIO_PH4, GPIO_PH3, GPIO_PH5,
+};
+
+static const unsigned can0_pins[] = {
+ GPIO_PG13, GPIO_PG12,
+};
+
+static const unsigned can1_pins[] = {
+ GPIO_PG14, GPIO_PG15,
+};
+
+static const unsigned smc0_pins[] = {
+ GPIO_PH8, GPIO_PH9, GPIO_PH10, GPIO_PH11, GPIO_PH12, GPIO_PH13,
+ GPIO_PI0, GPIO_PI1, GPIO_PI2, GPIO_PI3, GPIO_PI4, GPIO_PI5, GPIO_PI6,
+ GPIO_PI7, GPIO_PI8, GPIO_PI9, GPIO_PI10, GPIO_PI11,
+ GPIO_PI12, GPIO_PI13, GPIO_PI14, GPIO_PI15,
+};
+
+static const unsigned sport0_pins[] = {
+ GPIO_PC0, GPIO_PC2, GPIO_PC3, GPIO_PC4, GPIO_PC6, GPIO_PC7,
+};
+
+static const unsigned sport1_pins[] = {
+ GPIO_PD0, GPIO_PD2, GPIO_PD3, GPIO_PD4, GPIO_PD6, GPIO_PD7,
+};
+
+static const unsigned sport2_pins[] = {
+ GPIO_PA0, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA6, GPIO_PA7,
+};
+
+static const unsigned sport3_pins[] = {
+ GPIO_PA8, GPIO_PA10, GPIO_PA11, GPIO_PA12, GPIO_PA14, GPIO_PA15,
+};
+
+static const unsigned ppi0_8b_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF13, GPIO_PG0, GPIO_PG1, GPIO_PG2,
+};
+
+static const unsigned ppi0_16b_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12,
+ GPIO_PF13, GPIO_PF14, GPIO_PF15,
+ GPIO_PG0, GPIO_PG1, GPIO_PG2,
+};
+
+static const unsigned ppi0_24b_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF8, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12,
+ GPIO_PF13, GPIO_PF14, GPIO_PF15, GPIO_PD0, GPIO_PD1, GPIO_PD2,
+ GPIO_PD3, GPIO_PD4, GPIO_PD5, GPIO_PG3, GPIO_PG4,
+ GPIO_PG0, GPIO_PG1, GPIO_PG2,
+};
+
+static const unsigned ppi1_8b_pins[] = {
+ GPIO_PD0, GPIO_PD1, GPIO_PD2, GPIO_PD3, GPIO_PD4, GPIO_PD5, GPIO_PD6,
+ GPIO_PD7, GPIO_PE11, GPIO_PE12, GPIO_PE13,
+};
+
+static const unsigned ppi1_16b_pins[] = {
+ GPIO_PD0, GPIO_PD1, GPIO_PD2, GPIO_PD3, GPIO_PD4, GPIO_PD5, GPIO_PD6,
+ GPIO_PD7, GPIO_PD8, GPIO_PD9, GPIO_PD10, GPIO_PD11, GPIO_PD12,
+ GPIO_PD13, GPIO_PD14, GPIO_PD15,
+ GPIO_PE11, GPIO_PE12, GPIO_PE13,
+};
+
+static const unsigned ppi2_8b_pins[] = {
+ GPIO_PD8, GPIO_PD9, GPIO_PD10, GPIO_PD11, GPIO_PD12,
+ GPIO_PD13, GPIO_PD14, GPIO_PD15,
+ GPIO_PA7, GPIO_PB0, GPIO_PB1, GPIO_PB2, GPIO_PB3,
+};
+
+static const unsigned atapi_pins[] = {
+ GPIO_PH2, GPIO_PJ3, GPIO_PJ4, GPIO_PJ5, GPIO_PJ6,
+ GPIO_PJ7, GPIO_PJ8, GPIO_PJ9, GPIO_PJ10,
+};
+
+static const unsigned atapi_alter_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF8, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12,
+ GPIO_PF13, GPIO_PF14, GPIO_PF15, GPIO_PG2, GPIO_PG3, GPIO_PG4,
+};
+
+static const unsigned nfc0_pins[] = {
+ GPIO_PJ1, GPIO_PJ2,
+};
+
+static const unsigned keys_4x4_pins[] = {
+ GPIO_PD8, GPIO_PD9, GPIO_PD10, GPIO_PD11,
+ GPIO_PD12, GPIO_PD13, GPIO_PD14, GPIO_PD15,
+};
+
+static const unsigned keys_8x8_pins[] = {
+ GPIO_PD8, GPIO_PD9, GPIO_PD10, GPIO_PD11,
+ GPIO_PD12, GPIO_PD13, GPIO_PD14, GPIO_PD15,
+ GPIO_PE0, GPIO_PE1, GPIO_PE2, GPIO_PE3,
+ GPIO_PE4, GPIO_PE5, GPIO_PE6, GPIO_PE7,
+};
+
+static const struct adi_pin_group adi_pin_groups[] = {
+ ADI_PIN_GROUP("uart0grp", uart0_pins),
+ ADI_PIN_GROUP("uart1grp", uart1_pins),
+ ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
+ ADI_PIN_GROUP("uart2grp", uart2_pins),
+ ADI_PIN_GROUP("uart3grp", uart3_pins),
+ ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins),
+ ADI_PIN_GROUP("rsi0grp", rsi0_pins),
+ ADI_PIN_GROUP("spi0grp", spi0_pins),
+ ADI_PIN_GROUP("spi1grp", spi1_pins),
+ ADI_PIN_GROUP("twi0grp", twi0_pins),
+ ADI_PIN_GROUP("twi1grp", twi1_pins),
+ ADI_PIN_GROUP("rotarygrp", rotary_pins),
+ ADI_PIN_GROUP("can0grp", can0_pins),
+ ADI_PIN_GROUP("can1grp", can1_pins),
+ ADI_PIN_GROUP("smc0grp", smc0_pins),
+ ADI_PIN_GROUP("sport0grp", sport0_pins),
+ ADI_PIN_GROUP("sport1grp", sport1_pins),
+ ADI_PIN_GROUP("sport2grp", sport2_pins),
+ ADI_PIN_GROUP("sport3grp", sport3_pins),
+ ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
+ ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
+ ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
+ ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
+ ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
+ ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
+ ADI_PIN_GROUP("atapigrp", atapi_pins),
+ ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins),
+ ADI_PIN_GROUP("nfc0grp", nfc0_pins),
+ ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins),
+ ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins),
+};
+
+static const unsigned short uart0_mux[] = {
+ P_UART0_TX, P_UART0_RX,
+ 0
+};
+
+static const unsigned short uart1_mux[] = {
+ P_UART1_TX, P_UART1_RX,
+ 0
+};
+
+static const unsigned short uart1_ctsrts_mux[] = {
+ P_UART1_RTS, P_UART1_CTS,
+ 0
+};
+
+static const unsigned short uart2_mux[] = {
+ P_UART2_TX, P_UART2_RX,
+ 0
+};
+
+static const unsigned short uart3_mux[] = {
+ P_UART3_TX, P_UART3_RX,
+ 0
+};
+
+static const unsigned short uart3_ctsrts_mux[] = {
+ P_UART3_RTS, P_UART3_CTS,
+ 0
+};
+
+static const unsigned short rsi0_mux[] = {
+ P_SD_D0, P_SD_D1, P_SD_D2, P_SD_D3, P_SD_CLK, P_SD_CMD,
+ 0
+};
+
+static const unsigned short spi0_mux[] = {
+ P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0
+};
+
+static const unsigned short spi1_mux[] = {
+ P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0
+};
+
+static const unsigned short twi0_mux[] = {
+ P_TWI0_SCL, P_TWI0_SDA, 0
+};
+
+static const unsigned short twi1_mux[] = {
+ P_TWI1_SCL, P_TWI1_SDA, 0
+};
+
+static const unsigned short rotary_mux[] = {
+ P_CNT_CUD, P_CNT_CDG, P_CNT_CZM, 0
+};
+
+static const unsigned short sport0_mux[] = {
+ P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
+};
+
+static const unsigned short sport1_mux[] = {
+ P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
+};
+
+static const unsigned short sport2_mux[] = {
+ P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
+ P_SPORT2_DRPRI, P_SPORT2_RSCLK, 0
+};
+
+static const unsigned short sport3_mux[] = {
+ P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
+ P_SPORT3_DRPRI, P_SPORT3_RSCLK, 0
+};
+
+static const unsigned short can0_mux[] = {
+ P_CAN0_RX, P_CAN0_TX, 0
+};
+
+static const unsigned short can1_mux[] = {
+ P_CAN1_RX, P_CAN1_TX, 0
+};
+
+static const unsigned short smc0_mux[] = {
+ P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12,
+ P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21,
+ P_A22, P_A23, P_A24, P_A25, P_NOR_CLK, 0,
+};
+
+static const unsigned short ppi0_8b_mux[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const unsigned short ppi0_16b_mux[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const unsigned short ppi0_24b_mux[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
+ P_PPI0_D16, P_PPI0_D17, P_PPI0_D18, P_PPI0_D19,
+ P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const unsigned short ppi1_8b_mux[] = {
+ P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3,
+ P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7,
+ P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2,
+ 0,
+};
+
+static const unsigned short ppi1_16b_mux[] = {
+ P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3,
+ P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7,
+ P_PPI1_D8, P_PPI1_D9, P_PPI1_D10, P_PPI1_D11,
+ P_PPI1_D12, P_PPI1_D13, P_PPI1_D14, P_PPI1_D15,
+ P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2,
+ 0,
+};
+
+static const unsigned short ppi2_8b_mux[] = {
+ P_PPI2_D0, P_PPI2_D1, P_PPI2_D2, P_PPI2_D3,
+ P_PPI2_D4, P_PPI2_D5, P_PPI2_D6, P_PPI2_D7,
+ P_PPI2_CLK, P_PPI2_FS1, P_PPI2_FS2,
+ 0,
+};
+
+static const unsigned short atapi_mux[] = {
+ P_ATAPI_RESET, P_ATAPI_DIOR, P_ATAPI_DIOW, P_ATAPI_CS0, P_ATAPI_CS1,
+ P_ATAPI_DMACK, P_ATAPI_DMARQ, P_ATAPI_INTRQ, P_ATAPI_IORDY,
+};
+
+static const unsigned short atapi_alter_mux[] = {
+ P_ATAPI_D0A, P_ATAPI_D1A, P_ATAPI_D2A, P_ATAPI_D3A, P_ATAPI_D4A,
+ P_ATAPI_D5A, P_ATAPI_D6A, P_ATAPI_D7A, P_ATAPI_D8A, P_ATAPI_D9A,
+ P_ATAPI_D10A, P_ATAPI_D11A, P_ATAPI_D12A, P_ATAPI_D13A, P_ATAPI_D14A,
+ P_ATAPI_D15A, P_ATAPI_A0A, P_ATAPI_A1A, P_ATAPI_A2A,
+ 0
+};
+
+static const unsigned short nfc0_mux[] = {
+ P_NAND_CE, P_NAND_RB,
+ 0
+};
+
+static const unsigned short keys_4x4_mux[] = {
+ P_KEY_ROW3, P_KEY_ROW2, P_KEY_ROW1, P_KEY_ROW0,
+ P_KEY_COL3, P_KEY_COL2, P_KEY_COL1, P_KEY_COL0,
+ 0
+};
+
+static const unsigned short keys_8x8_mux[] = {
+ P_KEY_ROW7, P_KEY_ROW6, P_KEY_ROW5, P_KEY_ROW4,
+ P_KEY_ROW3, P_KEY_ROW2, P_KEY_ROW1, P_KEY_ROW0,
+ P_KEY_COL7, P_KEY_COL6, P_KEY_COL5, P_KEY_COL4,
+ P_KEY_COL3, P_KEY_COL2, P_KEY_COL1, P_KEY_COL0,
+ 0
+};
+
+static const char * const uart0grp[] = { "uart0grp" };
+static const char * const uart1grp[] = { "uart1grp" };
+static const char * const uart1ctsrtsgrp[] = { "uart1ctsrtsgrp" };
+static const char * const uart2grp[] = { "uart2grp" };
+static const char * const uart3grp[] = { "uart3grp" };
+static const char * const uart3ctsrtsgrp[] = { "uart3ctsrtsgrp" };
+static const char * const rsi0grp[] = { "rsi0grp" };
+static const char * const spi0grp[] = { "spi0grp" };
+static const char * const spi1grp[] = { "spi1grp" };
+static const char * const twi0grp[] = { "twi0grp" };
+static const char * const twi1grp[] = { "twi1grp" };
+static const char * const rotarygrp[] = { "rotarygrp" };
+static const char * const can0grp[] = { "can0grp" };
+static const char * const can1grp[] = { "can1grp" };
+static const char * const smc0grp[] = { "smc0grp" };
+static const char * const sport0grp[] = { "sport0grp" };
+static const char * const sport1grp[] = { "sport1grp" };
+static const char * const sport2grp[] = { "sport2grp" };
+static const char * const sport3grp[] = { "sport3grp" };
+static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
+static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
+static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
+static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
+static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
+static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
+static const char * const atapigrp[] = { "atapigrp" };
+static const char * const atapialtergrp[] = { "atapialtergrp" };
+static const char * const nfc0grp[] = { "nfc0grp" };
+static const char * const keys_4x4grp[] = { "keys_4x4grp" };
+static const char * const keys_8x8grp[] = { "keys_8x8grp" };
+
+static const struct adi_pmx_func adi_pmx_functions[] = {
+ ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
+ ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
+ ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
+ ADI_PMX_FUNCTION("uart2", uart2grp, uart2_mux),
+ ADI_PMX_FUNCTION("uart3", uart3grp, uart3_mux),
+ ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp, uart3_ctsrts_mux),
+ ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
+ ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
+ ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
+ ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
+ ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
+ ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
+ ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
+ ADI_PMX_FUNCTION("can1", can1grp, can1_mux),
+ ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
+ ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
+ ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
+ ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
+ ADI_PMX_FUNCTION("sport3", sport3grp, sport3_mux),
+ ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
+ ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
+ ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
+ ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
+ ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
+ ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
+ ADI_PMX_FUNCTION("atapi", atapigrp, atapi_mux),
+ ADI_PMX_FUNCTION("atapi_alter", atapialtergrp, atapi_alter_mux),
+ ADI_PMX_FUNCTION("nfc0", nfc0grp, nfc0_mux),
+ ADI_PMX_FUNCTION("keys_4x4", keys_4x4grp, keys_4x4_mux),
+ ADI_PMX_FUNCTION("keys_8x8", keys_8x8grp, keys_8x8_mux),
+};
+
+static const struct adi_pinctrl_soc_data adi_bf54x_soc = {
+ .functions = adi_pmx_functions,
+ .nfunctions = ARRAY_SIZE(adi_pmx_functions),
+ .groups = adi_pin_groups,
+ .ngroups = ARRAY_SIZE(adi_pin_groups),
+ .pins = adi_pads,
+ .npins = ARRAY_SIZE(adi_pads),
+};
+
+void adi_pinctrl_soc_init(const struct adi_pinctrl_soc_data **soc)
+{
+ *soc = &adi_bf54x_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
new file mode 100644
index 00000000000..bf57aea2826
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -0,0 +1,521 @@
+/*
+ * Pinctrl Driver for ADI GPIO2 controller
+ *
+ * Copyright 2007-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPLv2 or later
+ */
+
+#include <asm/portmux.h>
+#include "pinctrl-adi2.h"
+
+static const struct pinctrl_pin_desc adi_pads[] = {
+ PINCTRL_PIN(0, "PA0"),
+ PINCTRL_PIN(1, "PA1"),
+ PINCTRL_PIN(2, "PA2"),
+ PINCTRL_PIN(3, "PG3"),
+ PINCTRL_PIN(4, "PA4"),
+ PINCTRL_PIN(5, "PA5"),
+ PINCTRL_PIN(6, "PA6"),
+ PINCTRL_PIN(7, "PA7"),
+ PINCTRL_PIN(8, "PA8"),
+ PINCTRL_PIN(9, "PA9"),
+ PINCTRL_PIN(10, "PA10"),
+ PINCTRL_PIN(11, "PA11"),
+ PINCTRL_PIN(12, "PA12"),
+ PINCTRL_PIN(13, "PA13"),
+ PINCTRL_PIN(14, "PA14"),
+ PINCTRL_PIN(15, "PA15"),
+ PINCTRL_PIN(16, "PB0"),
+ PINCTRL_PIN(17, "PB1"),
+ PINCTRL_PIN(18, "PB2"),
+ PINCTRL_PIN(19, "PB3"),
+ PINCTRL_PIN(20, "PB4"),
+ PINCTRL_PIN(21, "PB5"),
+ PINCTRL_PIN(22, "PB6"),
+ PINCTRL_PIN(23, "PB7"),
+ PINCTRL_PIN(24, "PB8"),
+ PINCTRL_PIN(25, "PB9"),
+ PINCTRL_PIN(26, "PB10"),
+ PINCTRL_PIN(27, "PB11"),
+ PINCTRL_PIN(28, "PB12"),
+ PINCTRL_PIN(29, "PB13"),
+ PINCTRL_PIN(30, "PB14"),
+ PINCTRL_PIN(31, "PB15"),
+ PINCTRL_PIN(32, "PC0"),
+ PINCTRL_PIN(33, "PC1"),
+ PINCTRL_PIN(34, "PC2"),
+ PINCTRL_PIN(35, "PC3"),
+ PINCTRL_PIN(36, "PC4"),
+ PINCTRL_PIN(37, "PC5"),
+ PINCTRL_PIN(38, "PC6"),
+ PINCTRL_PIN(39, "PC7"),
+ PINCTRL_PIN(40, "PC8"),
+ PINCTRL_PIN(41, "PC9"),
+ PINCTRL_PIN(42, "PC10"),
+ PINCTRL_PIN(43, "PC11"),
+ PINCTRL_PIN(44, "PC12"),
+ PINCTRL_PIN(45, "PC13"),
+ PINCTRL_PIN(46, "PC14"),
+ PINCTRL_PIN(47, "PC15"),
+ PINCTRL_PIN(48, "PD0"),
+ PINCTRL_PIN(49, "PD1"),
+ PINCTRL_PIN(50, "PD2"),
+ PINCTRL_PIN(51, "PD3"),
+ PINCTRL_PIN(52, "PD4"),
+ PINCTRL_PIN(53, "PD5"),
+ PINCTRL_PIN(54, "PD6"),
+ PINCTRL_PIN(55, "PD7"),
+ PINCTRL_PIN(56, "PD8"),
+ PINCTRL_PIN(57, "PD9"),
+ PINCTRL_PIN(58, "PD10"),
+ PINCTRL_PIN(59, "PD11"),
+ PINCTRL_PIN(60, "PD12"),
+ PINCTRL_PIN(61, "PD13"),
+ PINCTRL_PIN(62, "PD14"),
+ PINCTRL_PIN(63, "PD15"),
+ PINCTRL_PIN(64, "PE0"),
+ PINCTRL_PIN(65, "PE1"),
+ PINCTRL_PIN(66, "PE2"),
+ PINCTRL_PIN(67, "PE3"),
+ PINCTRL_PIN(68, "PE4"),
+ PINCTRL_PIN(69, "PE5"),
+ PINCTRL_PIN(70, "PE6"),
+ PINCTRL_PIN(71, "PE7"),
+ PINCTRL_PIN(72, "PE8"),
+ PINCTRL_PIN(73, "PE9"),
+ PINCTRL_PIN(74, "PE10"),
+ PINCTRL_PIN(75, "PE11"),
+ PINCTRL_PIN(76, "PE12"),
+ PINCTRL_PIN(77, "PE13"),
+ PINCTRL_PIN(78, "PE14"),
+ PINCTRL_PIN(79, "PE15"),
+ PINCTRL_PIN(80, "PF0"),
+ PINCTRL_PIN(81, "PF1"),
+ PINCTRL_PIN(82, "PF2"),
+ PINCTRL_PIN(83, "PF3"),
+ PINCTRL_PIN(84, "PF4"),
+ PINCTRL_PIN(85, "PF5"),
+ PINCTRL_PIN(86, "PF6"),
+ PINCTRL_PIN(87, "PF7"),
+ PINCTRL_PIN(88, "PF8"),
+ PINCTRL_PIN(89, "PF9"),
+ PINCTRL_PIN(90, "PF10"),
+ PINCTRL_PIN(91, "PF11"),
+ PINCTRL_PIN(92, "PF12"),
+ PINCTRL_PIN(93, "PF13"),
+ PINCTRL_PIN(94, "PF14"),
+ PINCTRL_PIN(95, "PF15"),
+ PINCTRL_PIN(96, "PG0"),
+ PINCTRL_PIN(97, "PG1"),
+ PINCTRL_PIN(98, "PG2"),
+ PINCTRL_PIN(99, "PG3"),
+ PINCTRL_PIN(100, "PG4"),
+ PINCTRL_PIN(101, "PG5"),
+ PINCTRL_PIN(102, "PG6"),
+ PINCTRL_PIN(103, "PG7"),
+ PINCTRL_PIN(104, "PG8"),
+ PINCTRL_PIN(105, "PG9"),
+ PINCTRL_PIN(106, "PG10"),
+ PINCTRL_PIN(107, "PG11"),
+ PINCTRL_PIN(108, "PG12"),
+ PINCTRL_PIN(109, "PG13"),
+ PINCTRL_PIN(110, "PG14"),
+ PINCTRL_PIN(111, "PG15"),
+};
+
+static const unsigned uart0_pins[] = {
+ GPIO_PD7, GPIO_PD8,
+};
+
+static const unsigned uart0_ctsrts_pins[] = {
+ GPIO_PD9, GPIO_PD10,
+};
+
+static const unsigned uart1_pins[] = {
+ GPIO_PG15, GPIO_PG14,
+};
+
+static const unsigned uart1_ctsrts_pins[] = {
+ GPIO_PG10, GPIO_PG13,
+};
+
+static const unsigned rsi0_pins[] = {
+ GPIO_PG3, GPIO_PG2, GPIO_PG0, GPIO_PE15, GPIO_PG5, GPIO_PG6,
+};
+
+static const unsigned eth0_pins[] = {
+ GPIO_PC6, GPIO_PC7, GPIO_PC2, GPIO_PC0, GPIO_PC3, GPIO_PC1,
+ GPIO_PB13, GPIO_PD6, GPIO_PC5, GPIO_PC4, GPIO_PB14, GPIO_PB15,
+};
+
+static const unsigned eth1_pins[] = {
+ GPIO_PE10, GPIO_PE11, GPIO_PG3, GPIO_PG0, GPIO_PG2, GPIO_PE15,
+ GPIO_PG5, GPIO_PE12, GPIO_PE13, GPIO_PE14, GPIO_PG6, GPIO_PC9,
+};
+
+static const unsigned spi0_pins[] = {
+ GPIO_PD4, GPIO_PD2, GPIO_PD3,
+};
+
+static const unsigned spi1_pins[] = {
+ GPIO_PD5, GPIO_PD14, GPIO_PD13,
+};
+
+static const unsigned twi0_pins[] = {
+};
+
+static const unsigned twi1_pins[] = {
+};
+
+static const unsigned rotary_pins[] = {
+ GPIO_PG7, GPIO_PG11, GPIO_PG12,
+};
+
+static const unsigned can0_pins[] = {
+ GPIO_PG1, GPIO_PG4,
+};
+
+static const unsigned smc0_pins[] = {
+ GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6,
+ GPIO_PA7, GPIO_PA8, GPIO_PA9, GPIO_PB2, GPIO_PA10, GPIO_PA11,
+ GPIO_PB3, GPIO_PA12, GPIO_PA13, GPIO_PA14, GPIO_PA15, GPIO_PB6,
+ GPIO_PB7, GPIO_PB8, GPIO_PB10, GPIO_PB11, GPIO_PB0,
+};
+
+static const unsigned sport0_pins[] = {
+ GPIO_PB5, GPIO_PB4, GPIO_PB9, GPIO_PB8, GPIO_PB7, GPIO_PB11,
+};
+
+static const unsigned sport1_pins[] = {
+ GPIO_PE2, GPIO_PE5, GPIO_PD15, GPIO_PE4, GPIO_PE3, GPIO_PE1,
+};
+
+static const unsigned sport2_pins[] = {
+ GPIO_PG4, GPIO_PG1, GPIO_PG9, GPIO_PG10, GPIO_PG7, GPIO_PB12,
+};
+
+static const unsigned ppi0_8b_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF13, GPIO_PF14, GPIO_PF15,
+ GPIO_PE6, GPIO_PE7, GPIO_PE8, GPIO_PE9,
+};
+
+static const unsigned ppi0_16b_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12,
+ GPIO_PF13, GPIO_PF14, GPIO_PF15,
+ GPIO_PE6, GPIO_PE7, GPIO_PE8, GPIO_PE9,
+};
+
+static const unsigned ppi0_24b_pins[] = {
+ GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6,
+ GPIO_PF7, GPIO_PF8, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12,
+ GPIO_PF13, GPIO_PF14, GPIO_PF15, GPIO_PE0, GPIO_PE1, GPIO_PE2,
+ GPIO_PE3, GPIO_PE4, GPIO_PE5, GPIO_PE6, GPIO_PE7, GPIO_PE8,
+ GPIO_PE9, GPIO_PD12, GPIO_PD15,
+};
+
+static const unsigned ppi1_8b_pins[] = {
+ GPIO_PC0, GPIO_PC1, GPIO_PC2, GPIO_PC3, GPIO_PC4, GPIO_PC5, GPIO_PC6,
+ GPIO_PC7, GPIO_PC8, GPIO_PB13, GPIO_PB14, GPIO_PB15, GPIO_PD6,
+};
+
+static const unsigned ppi1_16b_pins[] = {
+ GPIO_PC0, GPIO_PC1, GPIO_PC2, GPIO_PC3, GPIO_PC4, GPIO_PC5, GPIO_PC6,
+ GPIO_PC7, GPIO_PC9, GPIO_PC10, GPIO_PC11, GPIO_PC12,
+ GPIO_PC13, GPIO_PC14, GPIO_PC15,
+ GPIO_PB13, GPIO_PB14, GPIO_PB15, GPIO_PD6,
+};
+
+static const unsigned ppi2_8b_pins[] = {
+ GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6,
+ GPIO_PA7, GPIO_PB0, GPIO_PB1, GPIO_PB2, GPIO_PB3,
+};
+
+static const unsigned ppi2_16b_pins[] = {
+ GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6,
+ GPIO_PA7, GPIO_PA8, GPIO_PA9, GPIO_PA10, GPIO_PA11, GPIO_PA12,
+ GPIO_PA13, GPIO_PA14, GPIO_PA15, GPIO_PB0, GPIO_PB1, GPIO_PB2,
+};
+
+static const unsigned lp0_pins[] = {
+ GPIO_PB0, GPIO_PB1, GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3,
+ GPIO_PA4, GPIO_PA5, GPIO_PA6, GPIO_PA7,
+};
+
+static const unsigned lp1_pins[] = {
+ GPIO_PB3, GPIO_PB2, GPIO_PA8, GPIO_PA9, GPIO_PA10, GPIO_PA11,
+ GPIO_PA12, GPIO_PA13, GPIO_PA14, GPIO_PA15,
+};
+
+static const unsigned lp2_pins[] = {
+ GPIO_PE6, GPIO_PE7, GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3,
+ GPIO_PF4, GPIO_PF5, GPIO_PF6, GPIO_PF7,
+};
+
+static const unsigned lp3_pins[] = {
+ GPIO_PE9, GPIO_PE8, GPIO_PF8, GPIO_PF9, GPIO_PF10, GPIO_PF11,
+ GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15,
+};
+
+static const struct adi_pin_group adi_pin_groups[] = {
+ ADI_PIN_GROUP("uart0grp", uart0_pins),
+ ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins),
+ ADI_PIN_GROUP("uart1grp", uart1_pins),
+ ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
+ ADI_PIN_GROUP("rsi0grp", rsi0_pins),
+ ADI_PIN_GROUP("eth0grp", eth0_pins),
+ ADI_PIN_GROUP("eth1grp", eth1_pins),
+ ADI_PIN_GROUP("spi0grp", spi0_pins),
+ ADI_PIN_GROUP("spi1grp", spi1_pins),
+ ADI_PIN_GROUP("twi0grp", twi0_pins),
+ ADI_PIN_GROUP("twi1grp", twi1_pins),
+ ADI_PIN_GROUP("rotarygrp", rotary_pins),
+ ADI_PIN_GROUP("can0grp", can0_pins),
+ ADI_PIN_GROUP("smc0grp", smc0_pins),
+ ADI_PIN_GROUP("sport0grp", sport0_pins),
+ ADI_PIN_GROUP("sport1grp", sport1_pins),
+ ADI_PIN_GROUP("sport2grp", sport2_pins),
+ ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
+ ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
+ ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
+ ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
+ ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
+ ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
+ ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins),
+ ADI_PIN_GROUP("lp0grp", lp0_pins),
+ ADI_PIN_GROUP("lp1grp", lp1_pins),
+ ADI_PIN_GROUP("lp2grp", lp2_pins),
+ ADI_PIN_GROUP("lp3grp", lp3_pins),
+};
+
+static const unsigned short uart0_mux[] = {
+ P_UART0_TX, P_UART0_RX,
+ 0
+};
+
+static const unsigned short uart0_ctsrts_mux[] = {
+ P_UART0_RTS, P_UART0_CTS,
+ 0
+};
+
+static const unsigned short uart1_mux[] = {
+ P_UART1_TX, P_UART1_RX,
+ 0
+};
+
+static const unsigned short uart1_ctsrts_mux[] = {
+ P_UART1_RTS, P_UART1_CTS,
+ 0
+};
+
+static const unsigned short rsi0_mux[] = {
+ P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3,
+ P_RSI_CMD, P_RSI_CLK, 0
+};
+
+static const unsigned short eth0_mux[] = P_RMII0;
+static const unsigned short eth1_mux[] = P_RMII1;
+
+static const unsigned short spi0_mux[] = {
+ P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0
+};
+
+static const unsigned short spi1_mux[] = {
+ P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0
+};
+
+static const unsigned short twi0_mux[] = {
+ P_TWI0_SCL, P_TWI0_SDA, 0
+};
+
+static const unsigned short twi1_mux[] = {
+ P_TWI1_SCL, P_TWI1_SDA, 0
+};
+
+static const unsigned short rotary_mux[] = {
+ P_CNT_CUD, P_CNT_CDG, P_CNT_CZM, 0
+};
+
+static const unsigned short sport0_mux[] = {
+ P_SPORT0_ACLK, P_SPORT0_AFS, P_SPORT0_AD0, P_SPORT0_BCLK,
+ P_SPORT0_BFS, P_SPORT0_BD0, 0,
+};
+
+static const unsigned short sport1_mux[] = {
+ P_SPORT1_ACLK, P_SPORT1_AFS, P_SPORT1_AD0, P_SPORT1_BCLK,
+ P_SPORT1_BFS, P_SPORT1_BD0, 0,
+};
+
+static const unsigned short sport2_mux[] = {
+ P_SPORT2_ACLK, P_SPORT2_AFS, P_SPORT2_AD0, P_SPORT2_BCLK,
+ P_SPORT2_BFS, P_SPORT2_BD0, 0,
+};
+
+static const unsigned short can0_mux[] = {
+ P_CAN0_RX, P_CAN0_TX, 0
+};
+
+static const unsigned short smc0_mux[] = {
+ P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12,
+ P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21,
+ P_A22, P_A23, P_A24, P_A25, P_NORCK, 0,
+};
+
+static const unsigned short ppi0_8b_mux[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const unsigned short ppi0_16b_mux[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const unsigned short ppi0_24b_mux[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
+ P_PPI0_D16, P_PPI0_D17, P_PPI0_D18, P_PPI0_D19,
+ P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const unsigned short ppi1_8b_mux[] = {
+ P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3,
+ P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7,
+ P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2,
+ 0,
+};
+
+static const unsigned short ppi1_16b_mux[] = {
+ P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3,
+ P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7,
+ P_PPI1_D8, P_PPI1_D9, P_PPI1_D10, P_PPI1_D11,
+ P_PPI1_D12, P_PPI1_D13, P_PPI1_D14, P_PPI1_D15,
+ P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2,
+ 0,
+};
+
+static const unsigned short ppi2_8b_mux[] = {
+ P_PPI2_D0, P_PPI2_D1, P_PPI2_D2, P_PPI2_D3,
+ P_PPI2_D4, P_PPI2_D5, P_PPI2_D6, P_PPI2_D7,
+ P_PPI2_CLK, P_PPI2_FS1, P_PPI2_FS2,
+ 0,
+};
+
+static const unsigned short ppi2_16b_mux[] = {
+ P_PPI2_D0, P_PPI2_D1, P_PPI2_D2, P_PPI2_D3,
+ P_PPI2_D4, P_PPI2_D5, P_PPI2_D6, P_PPI2_D7,
+ P_PPI2_D8, P_PPI2_D9, P_PPI2_D10, P_PPI2_D11,
+ P_PPI2_D12, P_PPI2_D13, P_PPI2_D14, P_PPI2_D15,
+ P_PPI2_CLK, P_PPI2_FS1, P_PPI2_FS2,
+ 0,
+};
+
+static const unsigned short lp0_mux[] = {
+ P_LP0_CLK, P_LP0_ACK, P_LP0_D0, P_LP0_D1, P_LP0_D2,
+ P_LP0_D3, P_LP0_D4, P_LP0_D5, P_LP0_D6, P_LP0_D7,
+ 0
+};
+
+static const unsigned short lp1_mux[] = {
+ P_LP1_CLK, P_LP1_ACK, P_LP1_D0, P_LP1_D1, P_LP1_D2,
+ P_LP1_D3, P_LP1_D4, P_LP1_D5, P_LP1_D6, P_LP1_D7,
+ 0
+};
+
+static const unsigned short lp2_mux[] = {
+ P_LP2_CLK, P_LP2_ACK, P_LP2_D0, P_LP2_D1, P_LP2_D2,
+ P_LP2_D3, P_LP2_D4, P_LP2_D5, P_LP2_D6, P_LP2_D7,
+ 0
+};
+
+static const unsigned short lp3_mux[] = {
+ P_LP3_CLK, P_LP3_ACK, P_LP3_D0, P_LP3_D1, P_LP3_D2,
+ P_LP3_D3, P_LP3_D4, P_LP3_D5, P_LP3_D6, P_LP3_D7,
+ 0
+};
+
+static const char * const uart0grp[] = { "uart0grp" };
+static const char * const uart0ctsrtsgrp[] = { "uart0ctsrtsgrp" };
+static const char * const uart1grp[] = { "uart1grp" };
+static const char * const uart1ctsrtsgrp[] = { "uart1ctsrtsgrp" };
+static const char * const rsi0grp[] = { "rsi0grp" };
+static const char * const eth0grp[] = { "eth0grp" };
+static const char * const eth1grp[] = { "eth1grp" };
+static const char * const spi0grp[] = { "spi0grp" };
+static const char * const spi1grp[] = { "spi1grp" };
+static const char * const twi0grp[] = { "twi0grp" };
+static const char * const twi1grp[] = { "twi1grp" };
+static const char * const rotarygrp[] = { "rotarygrp" };
+static const char * const can0grp[] = { "can0grp" };
+static const char * const smc0grp[] = { "smc0grp" };
+static const char * const sport0grp[] = { "sport0grp" };
+static const char * const sport1grp[] = { "sport1grp" };
+static const char * const sport2grp[] = { "sport2grp" };
+static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
+static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
+static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
+static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
+static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
+static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
+static const char * const ppi2_16bgrp[] = { "ppi2_16bgrp" };
+static const char * const lp0grp[] = { "lp0grp" };
+static const char * const lp1grp[] = { "lp1grp" };
+static const char * const lp2grp[] = { "lp2grp" };
+static const char * const lp3grp[] = { "lp3grp" };
+
+static const struct adi_pmx_func adi_pmx_functions[] = {
+ ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
+ ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp, uart0_ctsrts_mux),
+ ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
+ ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
+ ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
+ ADI_PMX_FUNCTION("eth0", eth0grp, eth0_mux),
+ ADI_PMX_FUNCTION("eth1", eth1grp, eth1_mux),
+ ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
+ ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
+ ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
+ ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
+ ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
+ ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
+ ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
+ ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
+ ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
+ ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
+ ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
+ ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
+ ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
+ ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
+ ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
+ ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
+ ADI_PMX_FUNCTION("ppi2_16b", ppi2_16bgrp, ppi2_16b_mux),
+ ADI_PMX_FUNCTION("lp0", lp0grp, lp0_mux),
+ ADI_PMX_FUNCTION("lp1", lp1grp, lp1_mux),
+ ADI_PMX_FUNCTION("lp2", lp2grp, lp2_mux),
+ ADI_PMX_FUNCTION("lp3", lp3grp, lp3_mux),
+};
+
+static const struct adi_pinctrl_soc_data adi_bf60x_soc = {
+ .functions = adi_pmx_functions,
+ .nfunctions = ARRAY_SIZE(adi_pmx_functions),
+ .groups = adi_pin_groups,
+ .ngroups = ARRAY_SIZE(adi_pin_groups),
+ .pins = adi_pads,
+ .npins = ARRAY_SIZE(adi_pads),
+};
+
+void adi_pinctrl_soc_init(const struct adi_pinctrl_soc_data **soc)
+{
+ *soc = &adi_bf60x_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
new file mode 100644
index 00000000000..7a39562c3e4
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -0,0 +1,1164 @@
+/*
+ * Pinctrl Driver for ADI GPIO2 controller
+ *
+ * Copyright 2007-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPLv2 or later
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/platform_data/pinctrl-adi2.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/syscore_ops.h>
+#include <linux/gpio.h>
+#include <asm/portmux.h>
+#include "pinctrl-adi2.h"
+#include "core.h"
+
+/*
+According to the BF54x HRM, pint means "pin interrupt".
+http://www.analog.com/static/imported-files/processor_manuals/ADSP-BF54x_hwr_rev1.2.pdf
+
+ADSP-BF54x processor Blackfin processors have four SIC interrupt chan-
+nels dedicated to pin interrupt purposes. These channels are managed by
+four hardware blocks, called PINT0, PINT1, PINT2, and PINT3. Every PINTx
+block can sense to up to 32 pins. While PINT0 and PINT1 can sense the
+pins of port A and port B, PINT2 and PINT3 manage all the pins from port
+C to port J as shown in Figure 9-2.
+
+n BF54x HRM:
+The ten GPIO ports are subdivided into 8-bit half ports, resulting in lower and
+upper half 8-bit units. The PINTx_ASSIGN registers control the 8-bit multi-
+plexers shown in Figure 9-3. Lower half units of eight pins can be
+forwarded to either byte 0 or byte 2 of either associated PINTx block.
+Upper half units can be forwarded to either byte 1 or byte 3 of the pin
+interrupt blocks, without further restrictions.
+
+All MMR registers in the pin interrupt module are 32 bits wide. To simply the
+mapping logic, this driver only maps a 16-bit gpio port to the upper or lower
+16 bits of a PINTx block. You can find the Figure 9-3 on page 583.
+
+Each IRQ domain is binding to a GPIO bank device. 2 GPIO bank devices can map
+to one PINT device. Two in "struct gpio_pint" are used to ease the PINT
+interrupt handler.
+
+The GPIO bank mapping to the lower 16 bits of the PINT device set its IRQ
+domain pointer in domain[0]. The IRQ domain pointer of the other bank is set
+to domain[1]. PINT interrupt handler adi_gpio_handle_pint_irq() finds out
+the current domain pointer according to whether the interrupt request mask
+is in lower 16 bits (domain[0]) or upper 16bits (domain[1]).
+
+A PINT device is not part of a GPIO port device in Blackfin. Multiple GPIO
+port devices can be mapped to the same PINT device.
+
+*/
+
+static LIST_HEAD(adi_pint_list);
+static LIST_HEAD(adi_gpio_port_list);
+
+#define DRIVER_NAME "pinctrl-adi2"
+
+#define PINT_HI_OFFSET 16
+
+/**
+ * struct gpio_port_saved - GPIO port registers that should be saved between
+ * power suspend and resume operations.
+ *
+ * @fer: PORTx_FER register
+ * @data: PORTx_DATA register
+ * @dir: PORTx_DIR register
+ * @inen: PORTx_INEN register
+ * @mux: PORTx_MUX register
+ */
+struct gpio_port_saved {
+ u16 fer;
+ u16 data;
+ u16 dir;
+ u16 inen;
+ u32 mux;
+};
+
+/**
+ * struct gpio_pint - Pin interrupt controller device. Multiple ADI GPIO
+ * banks can be mapped into one Pin interrupt controller.
+ *
+ * @node: All gpio_pint instances are added to a global list.
+ * @base: PINT device register base address
+ * @irq: IRQ of the PINT device, it is the parent IRQ of all
+ * GPIO IRQs mapping to this device.
+ * @domain: [0] irq domain of the gpio port, whose hardware interrupts are
+ * mapping to the low 16-bit of the pint registers.
+ * [1] irq domain of the gpio port, whose hardware interrupts are
+ * mapping to the high 16-bit of the pint registers.
+ * @regs: address pointer to the PINT device
+ * @map_count: No more than 2 GPIO banks can be mapped to this PINT device.
+ * @lock: This lock make sure the irq_chip operations to one PINT device
+ * for different GPIO interrrupts are atomic.
+ * @pint_map_port: Set up the mapping between one PINT device and
+ * multiple GPIO banks.
+ */
+struct gpio_pint {
+ struct list_head node;
+ void __iomem *base;
+ int irq;
+ struct irq_domain *domain[2];
+ struct gpio_pint_regs *regs;
+ struct adi_pm_pint_save saved_data;
+ int map_count;
+ spinlock_t lock;
+
+ int (*pint_map_port)(struct gpio_pint *pint, bool assign,
+ u8 map, struct irq_domain *domain);
+};
+
+/**
+ * ADI pin controller
+ *
+ * @dev: a pointer back to containing device
+ * @pctl: the pinctrl device
+ * @soc: SoC data for this specific chip
+ */
+struct adi_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ const struct adi_pinctrl_soc_data *soc;
+};
+
+/**
+ * struct gpio_port - GPIO bank device. Multiple ADI GPIO banks can be mapped
+ * into one pin interrupt controller.
+ *
+ * @node: All gpio_port instances are added to a list.
+ * @base: GPIO bank device register base address
+ * @irq_base: base IRQ of the GPIO bank device
+ * @width: PIN number of the GPIO bank device
+ * @regs: address pointer to the GPIO bank device
+ * @saved_data: registers that should be saved between PM operations.
+ * @dev: device structure of this GPIO bank
+ * @pint: GPIO PINT device that this GPIO bank mapped to
+ * @pint_map: GIOP bank mapping code in PINT device
+ * @pint_assign: The 32-bit PINT registers can be divided into 2 parts. A
+ * GPIO bank can be mapped into either low 16 bits[0] or high 16
+ * bits[1] of each PINT register.
+ * @lock: This lock make sure the irq_chip operations to one PINT device
+ * for different GPIO interrrupts are atomic.
+ * @chip: abstract a GPIO controller
+ * @domain: The irq domain owned by the GPIO port.
+ * @rsvmap: Reservation map array for each pin in the GPIO bank
+ */
+struct gpio_port {
+ struct list_head node;
+ void __iomem *base;
+ unsigned int irq_base;
+ unsigned int width;
+ struct gpio_port_t *regs;
+ struct gpio_port_saved saved_data;
+ struct device *dev;
+
+ struct gpio_pint *pint;
+ u8 pint_map;
+ bool pint_assign;
+
+ spinlock_t lock;
+ struct gpio_chip chip;
+ struct irq_domain *domain;
+};
+
+static inline u8 pin_to_offset(struct pinctrl_gpio_range *range, unsigned pin)
+{
+ return pin - range->pin_base;
+}
+
+static inline u32 hwirq_to_pintbit(struct gpio_port *port, int hwirq)
+{
+ return port->pint_assign ? BIT(hwirq) << PINT_HI_OFFSET : BIT(hwirq);
+}
+
+static struct gpio_pint *find_gpio_pint(unsigned id)
+{
+ struct gpio_pint *pint;
+ int i = 0;
+
+ list_for_each_entry(pint, &adi_pint_list, node) {
+ if (id == i)
+ return pint;
+ i++;
+ }
+
+ return NULL;
+}
+
+static inline void port_setup(struct gpio_port *port, unsigned offset,
+ bool use_for_gpio)
+{
+ struct gpio_port_t *regs = port->regs;
+
+ if (use_for_gpio)
+ writew(readw(&regs->port_fer) & ~BIT(offset),
+ &regs->port_fer);
+ else
+ writew(readw(&regs->port_fer) | BIT(offset), &regs->port_fer);
+}
+
+static inline void portmux_setup(struct gpio_port *port, unsigned offset,
+ unsigned short function)
+{
+ struct gpio_port_t *regs = port->regs;
+ u32 pmux;
+
+ pmux = readl(&regs->port_mux);
+
+ /* The function field of each pin has 2 consecutive bits in
+ * the mux register.
+ */
+ pmux &= ~(0x3 << (2 * offset));
+ pmux |= (function & 0x3) << (2 * offset);
+
+ writel(pmux, &regs->port_mux);
+}
+
+static inline u16 get_portmux(struct gpio_port *port, unsigned offset)
+{
+ struct gpio_port_t *regs = port->regs;
+ u32 pmux = readl(&regs->port_mux);
+
+ /* The function field of each pin has 2 consecutive bits in
+ * the mux register.
+ */
+ return pmux >> (2 * offset) & 0x3;
+}
+
+static void adi_gpio_ack_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *regs = port->pint->regs;
+ unsigned pintbit = hwirq_to_pintbit(port, d->hwirq);
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
+ if (readl(&regs->invert_set) & pintbit)
+ writel(pintbit, &regs->invert_clear);
+ else
+ writel(pintbit, &regs->invert_set);
+ }
+
+ writel(pintbit, &regs->request);
+
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void adi_gpio_mask_ack_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *regs = port->pint->regs;
+ unsigned pintbit = hwirq_to_pintbit(port, d->hwirq);
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
+ if (readl(&regs->invert_set) & pintbit)
+ writel(pintbit, &regs->invert_clear);
+ else
+ writel(pintbit, &regs->invert_set);
+ }
+
+ writel(pintbit, &regs->request);
+ writel(pintbit, &regs->mask_clear);
+
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void adi_gpio_mask_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *regs = port->pint->regs;
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ writel(hwirq_to_pintbit(port, d->hwirq), &regs->mask_clear);
+
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void adi_gpio_unmask_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *regs = port->pint->regs;
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ writel(hwirq_to_pintbit(port, d->hwirq), &regs->mask_set);
+
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static unsigned int adi_gpio_irq_startup(struct irq_data *d)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *regs;
+
+ if (!port) {
+ pr_err("GPIO IRQ %d :Not exist\n", d->irq);
+ return -ENODEV;
+ }
+
+ regs = port->pint->regs;
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ port_setup(port, d->hwirq, true);
+ writew(BIT(d->hwirq), &port->regs->dir_clear);
+ writew(readw(&port->regs->inen) | BIT(d->hwirq), &port->regs->inen);
+
+ writel(hwirq_to_pintbit(port, d->hwirq), &regs->mask_set);
+
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void adi_gpio_irq_shutdown(struct irq_data *d)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *regs = port->pint->regs;
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ writel(hwirq_to_pintbit(port, d->hwirq), &regs->mask_clear);
+
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int adi_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ unsigned long flags;
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct gpio_pint_regs *pint_regs;
+ unsigned pintmask;
+ unsigned int irq = d->irq;
+ int ret = 0;
+ char buf[16];
+
+ if (!port) {
+ pr_err("GPIO IRQ %d :Not exist\n", d->irq);
+ return -ENODEV;
+ }
+
+ pint_regs = port->pint->regs;
+
+ pintmask = hwirq_to_pintbit(port, d->hwirq);
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->pint->lock);
+
+ /* In case of interrupt autodetect, set irq type to edge sensitive. */
+ if (type == IRQ_TYPE_PROBE)
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
+ IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ snprintf(buf, 16, "gpio-irq%d", irq);
+ port_setup(port, d->hwirq, true);
+ } else
+ goto out;
+
+ /* The GPIO interrupt is triggered only when its input value
+ * transfer from 0 to 1. So, invert the input value if the
+ * irq type is low or falling
+ */
+ if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
+ writel(pintmask, &pint_regs->invert_set);
+ else
+ writel(pintmask, &pint_regs->invert_clear);
+
+ /* In edge sensitive case, if the input value of the requested irq
+ * is already 1, invert it.
+ */
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ if (gpio_get_value(port->chip.base + d->hwirq))
+ writel(pintmask, &pint_regs->invert_set);
+ else
+ writel(pintmask, &pint_regs->invert_clear);
+ }
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
+ writel(pintmask, &pint_regs->edge_set);
+ __irq_set_handler_locked(irq, handle_edge_irq);
+ } else {
+ writel(pintmask, &pint_regs->edge_clear);
+ __irq_set_handler_locked(irq, handle_level_irq);
+ }
+
+out:
+ spin_unlock(&port->pint->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int adi_gpio_set_wake(struct irq_data *d, unsigned int state)
+{
+ struct gpio_port *port = irq_data_get_irq_chip_data(d);
+
+ if (!port || !port->pint || port->pint->irq != d->irq)
+ return -EINVAL;
+
+#ifndef SEC_GCTL
+ adi_internal_set_wake(port->pint->irq, state);
+#endif
+
+ return 0;
+}
+
+static int adi_pint_suspend(void)
+{
+ struct gpio_pint *pint;
+
+ list_for_each_entry(pint, &adi_pint_list, node) {
+ writel(0xffffffff, &pint->regs->mask_clear);
+ pint->saved_data.assign = readl(&pint->regs->assign);
+ pint->saved_data.edge_set = readl(&pint->regs->edge_set);
+ pint->saved_data.invert_set = readl(&pint->regs->invert_set);
+ }
+
+ return 0;
+}
+
+static void adi_pint_resume(void)
+{
+ struct gpio_pint *pint;
+
+ list_for_each_entry(pint, &adi_pint_list, node) {
+ writel(pint->saved_data.assign, &pint->regs->assign);
+ writel(pint->saved_data.edge_set, &pint->regs->edge_set);
+ writel(pint->saved_data.invert_set, &pint->regs->invert_set);
+ }
+}
+
+static int adi_gpio_suspend(void)
+{
+ struct gpio_port *port;
+
+ list_for_each_entry(port, &adi_gpio_port_list, node) {
+ port->saved_data.fer = readw(&port->regs->port_fer);
+ port->saved_data.mux = readl(&port->regs->port_mux);
+ port->saved_data.data = readw(&port->regs->data);
+ port->saved_data.inen = readw(&port->regs->inen);
+ port->saved_data.dir = readw(&port->regs->dir_set);
+ }
+
+ return adi_pint_suspend();
+}
+
+static void adi_gpio_resume(void)
+{
+ struct gpio_port *port;
+
+ adi_pint_resume();
+
+ list_for_each_entry(port, &adi_gpio_port_list, node) {
+ writel(port->saved_data.mux, &port->regs->port_mux);
+ writew(port->saved_data.fer, &port->regs->port_fer);
+ writew(port->saved_data.inen, &port->regs->inen);
+ writew(port->saved_data.data & port->saved_data.dir,
+ &port->regs->data_set);
+ writew(port->saved_data.dir, &port->regs->dir_set);
+ }
+
+}
+
+static struct syscore_ops gpio_pm_syscore_ops = {
+ .suspend = adi_gpio_suspend,
+ .resume = adi_gpio_resume,
+};
+#else /* CONFIG_PM */
+#define adi_gpio_set_wake NULL
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void preflow_handler(struct irq_desc *desc)
+{
+ if (desc->preflow_handler)
+ desc->preflow_handler(&desc->irq_data);
+}
+#else
+static inline void preflow_handler(struct irq_desc *desc) { }
+#endif
+
+static void adi_gpio_handle_pint_irq(unsigned int inta_irq,
+ struct irq_desc *desc)
+{
+ u32 request;
+ u32 level_mask, hwirq;
+ bool umask = false;
+ struct gpio_pint *pint = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct gpio_pint_regs *regs = pint->regs;
+ struct irq_domain *domain;
+
+ preflow_handler(desc);
+ chained_irq_enter(chip, desc);
+
+ request = readl(&regs->request);
+ level_mask = readl(&regs->edge_set) & request;
+
+ hwirq = 0;
+ domain = pint->domain[0];
+ while (request) {
+ /* domain pointer need to be changed only once at IRQ 16 when
+ * we go through IRQ requests from bit 0 to bit 31.
+ */
+ if (hwirq == PINT_HI_OFFSET)
+ domain = pint->domain[1];
+
+ if (request & 1) {
+ if (level_mask & BIT(hwirq)) {
+ umask = true;
+ chained_irq_exit(chip, desc);
+ }
+ generic_handle_irq(irq_find_mapping(domain,
+ hwirq % PINT_HI_OFFSET));
+ }
+
+ hwirq++;
+ request >>= 1;
+ }
+
+ if (!umask)
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip adi_gpio_irqchip = {
+ .name = "GPIO",
+ .irq_ack = adi_gpio_ack_irq,
+ .irq_mask = adi_gpio_mask_irq,
+ .irq_mask_ack = adi_gpio_mask_ack_irq,
+ .irq_unmask = adi_gpio_unmask_irq,
+ .irq_disable = adi_gpio_mask_irq,
+ .irq_enable = adi_gpio_unmask_irq,
+ .irq_set_type = adi_gpio_irq_type,
+ .irq_startup = adi_gpio_irq_startup,
+ .irq_shutdown = adi_gpio_irq_shutdown,
+ .irq_set_wake = adi_gpio_set_wake,
+};
+
+static int adi_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->soc->ngroups;
+}
+
+static const char *adi_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->soc->groups[selector].name;
+}
+
+static int adi_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pinctrl->soc->groups[selector].pins;
+ *num_pins = pinctrl->soc->groups[selector].num;
+ return 0;
+}
+
+static struct pinctrl_ops adi_pctrl_ops = {
+ .get_groups_count = adi_get_groups_count,
+ .get_group_name = adi_get_group_name,
+ .get_group_pins = adi_get_group_pins,
+};
+
+static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_port *port;
+ struct pinctrl_gpio_range *range;
+ unsigned long flags;
+ unsigned short *mux, pin;
+
+ mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+
+ while (*mux) {
+ pin = P_IDENT(*mux);
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+ if (range == NULL) /* should not happen */
+ return -ENODEV;
+
+ port = container_of(range->gc, struct gpio_port, chip);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ portmux_setup(port, pin_to_offset(range, pin),
+ P_FUNCT2MUX(*mux));
+ port_setup(port, pin_to_offset(range, pin), false);
+ mux++;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return 0;
+}
+
+static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_port *port;
+ struct pinctrl_gpio_range *range;
+ unsigned long flags;
+ unsigned short *mux, pin;
+
+ mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+
+ while (*mux) {
+ pin = P_IDENT(*mux);
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+ if (range == NULL) /* should not happen */
+ return;
+
+ port = container_of(range->gc, struct gpio_port, chip);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_setup(port, pin_to_offset(range, pin), true);
+ mux++;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+}
+
+static int adi_pinmux_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->soc->nfunctions;
+}
+
+static const char *adi_pinmux_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->soc->functions[selector].name;
+}
+
+static int adi_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pinctrl->soc->functions[selector].groups;
+ *num_groups = pinctrl->soc->functions[selector].num_groups;
+ return 0;
+}
+
+static int adi_pinmux_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned pin)
+{
+ struct gpio_port *port;
+ unsigned long flags;
+ u8 offset;
+
+ port = container_of(range->gc, struct gpio_port, chip);
+ offset = pin_to_offset(range, pin);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_setup(port, offset, true);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static struct pinmux_ops adi_pinmux_ops = {
+ .enable = adi_pinmux_enable,
+ .disable = adi_pinmux_disable,
+ .get_functions_count = adi_pinmux_get_funcs_count,
+ .get_function_name = adi_pinmux_get_func_name,
+ .get_function_groups = adi_pinmux_get_groups,
+ .gpio_request_enable = adi_pinmux_request_gpio,
+};
+
+
+static struct pinctrl_desc adi_pinmux_desc = {
+ .name = DRIVER_NAME,
+ .pctlops = &adi_pctrl_ops,
+ .pmxops = &adi_pinmux_ops,
+ .owner = THIS_MODULE,
+};
+
+static int adi_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void adi_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int adi_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_port *port;
+ unsigned long flags;
+
+ port = container_of(chip, struct gpio_port, chip);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ writew(BIT(offset), &port->regs->dir_clear);
+ writew(readw(&port->regs->inen) | BIT(offset), &port->regs->inen);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void adi_gpio_set_value(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct gpio_port *port = container_of(chip, struct gpio_port, chip);
+ struct gpio_port_t *regs = port->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (value)
+ writew(BIT(offset), &regs->data_set);
+ else
+ writew(BIT(offset), &regs->data_clear);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int adi_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct gpio_port *port = container_of(chip, struct gpio_port, chip);
+ struct gpio_port_t *regs = port->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ writew(readw(&regs->inen) & ~BIT(offset), &regs->inen);
+ if (value)
+ writew(BIT(offset), &regs->data_set);
+ else
+ writew(BIT(offset), &regs->data_clear);
+ writew(BIT(offset), &regs->dir_set);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static int adi_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_port *port = container_of(chip, struct gpio_port, chip);
+ struct gpio_port_t *regs = port->regs;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ret = !!(readw(&regs->data) & BIT(offset));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+}
+
+static int adi_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_port *port = container_of(chip, struct gpio_port, chip);
+
+ if (port->irq_base >= 0)
+ return irq_find_mapping(port->domain, offset);
+ else
+ return irq_create_mapping(port->domain, offset);
+}
+
+static int adi_pint_map_port(struct gpio_pint *pint, bool assign, u8 map,
+ struct irq_domain *domain)
+{
+ struct gpio_pint_regs *regs = pint->regs;
+ u32 map_mask;
+
+ if (pint->map_count > 1)
+ return -EINVAL;
+
+ pint->map_count++;
+
+ /* The map_mask of each gpio port is a 16-bit duplicate
+ * of the 8-bit map. It can be set to either high 16 bits or low
+ * 16 bits of the pint assignment register.
+ */
+ map_mask = (map << 8) | map;
+ if (assign) {
+ map_mask <<= PINT_HI_OFFSET;
+ writel((readl(&regs->assign) & 0xFFFF) | map_mask,
+ &regs->assign);
+ } else
+ writel((readl(&regs->assign) & 0xFFFF0000) | map_mask,
+ &regs->assign);
+
+ pint->domain[assign] = domain;
+
+ return 0;
+}
+
+static int adi_gpio_pint_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct gpio_pint *pint;
+
+ pint = devm_kzalloc(dev, sizeof(struct gpio_pint), GFP_KERNEL);
+ if (!pint) {
+ dev_err(dev, "Memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pint->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pint->base))
+ return PTR_ERR(pint->base);
+
+ pint->regs = (struct gpio_pint_regs *)pint->base;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "Invalid IRQ resource\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&pint->lock);
+
+ pint->irq = res->start;
+ pint->pint_map_port = adi_pint_map_port;
+ platform_set_drvdata(pdev, pint);
+
+ irq_set_chained_handler(pint->irq, adi_gpio_handle_pint_irq);
+ irq_set_handler_data(pint->irq, pint);
+
+ list_add_tail(&pint->node, &adi_pint_list);
+
+ return 0;
+}
+
+static int adi_gpio_pint_remove(struct platform_device *pdev)
+{
+ struct gpio_pint *pint = platform_get_drvdata(pdev);
+
+ list_del(&pint->node);
+ irq_set_handler(pint->irq, handle_simple_irq);
+
+ return 0;
+}
+
+static int adi_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct gpio_port *port = d->host_data;
+
+ if (!port)
+ return -EINVAL;
+
+ irq_set_chip_data(irq, port);
+ irq_set_chip_and_handler(irq, &adi_gpio_irqchip,
+ handle_level_irq);
+
+ return 0;
+}
+
+const struct irq_domain_ops adi_gpio_irq_domain_ops = {
+ .map = adi_gpio_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int adi_gpio_init_int(struct gpio_port *port)
+{
+ struct device_node *node = port->dev->of_node;
+ struct gpio_pint *pint = port->pint;
+ int ret;
+
+ port->domain = irq_domain_add_linear(node, port->width,
+ &adi_gpio_irq_domain_ops, port);
+ if (!port->domain) {
+ dev_err(port->dev, "Failed to create irqdomain\n");
+ return -ENOSYS;
+ }
+
+ /* According to BF54x and BF60x HRM, pin interrupt devices are not
+ * part of the GPIO port device. in GPIO interrupt mode, the GPIO
+ * pins of multiple port devices can be routed into one pin interrupt
+ * device. The mapping can be configured by setting pint assignment
+ * register with the mapping value of different GPIO port. This is
+ * done via function pint_map_port().
+ */
+ ret = pint->pint_map_port(port->pint, port->pint_assign,
+ port->pint_map, port->domain);
+ if (ret)
+ return ret;
+
+ if (port->irq_base >= 0) {
+ ret = irq_create_strict_mappings(port->domain, port->irq_base,
+ 0, port->width);
+ if (ret) {
+ dev_err(port->dev, "Couldn't associate to domain\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#define DEVNAME_SIZE 16
+
+static int adi_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct adi_pinctrl_gpio_platform_data *pdata;
+ struct resource *res;
+ struct gpio_port *port;
+ char pinctrl_devname[DEVNAME_SIZE];
+ static int gpio;
+ int ret = 0, ret1;
+
+ pdata = dev->platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ port = devm_kzalloc(dev, sizeof(struct gpio_port), GFP_KERNEL);
+ if (!port) {
+ dev_err(dev, "Memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ port->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->base))
+ return PTR_ERR(port->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ port->irq_base = -1;
+ else
+ port->irq_base = res->start;
+
+ port->width = pdata->port_width;
+ port->dev = dev;
+ port->regs = (struct gpio_port_t *)port->base;
+ port->pint_assign = pdata->pint_assign;
+ port->pint_map = pdata->pint_map;
+
+ port->pint = find_gpio_pint(pdata->pint_id);
+ if (port->pint) {
+ ret = adi_gpio_init_int(port);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_init(&port->lock);
+
+ platform_set_drvdata(pdev, port);
+
+ port->chip.label = "adi-gpio";
+ port->chip.direction_input = adi_gpio_direction_input;
+ port->chip.get = adi_gpio_get_value;
+ port->chip.direction_output = adi_gpio_direction_output;
+ port->chip.set = adi_gpio_set_value;
+ port->chip.request = adi_gpio_request;
+ port->chip.free = adi_gpio_free;
+ port->chip.to_irq = adi_gpio_to_irq;
+ if (pdata->port_gpio_base > 0)
+ port->chip.base = pdata->port_gpio_base;
+ else
+ port->chip.base = gpio;
+ port->chip.ngpio = port->width;
+ gpio = port->chip.base + port->width;
+
+ ret = gpiochip_add(&port->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to add GPIO chip.\n");
+ goto out_remove_domain;
+ }
+
+ /* Add gpio pin range */
+ snprintf(pinctrl_devname, DEVNAME_SIZE, "pinctrl-adi2.%d",
+ pdata->pinctrl_id);
+ pinctrl_devname[DEVNAME_SIZE - 1] = 0;
+ ret = gpiochip_add_pin_range(&port->chip, pinctrl_devname,
+ 0, pdata->port_pin_base, port->width);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to add pin range to %s.\n",
+ pinctrl_devname);
+ goto out_remove_gpiochip;
+ }
+
+ list_add_tail(&port->node, &adi_gpio_port_list);
+
+ return 0;
+
+out_remove_gpiochip:
+ ret1 = gpiochip_remove(&port->chip);
+out_remove_domain:
+ if (port->pint)
+ irq_domain_remove(port->domain);
+
+ return ret;
+}
+
+static int adi_gpio_remove(struct platform_device *pdev)
+{
+ struct gpio_port *port = platform_get_drvdata(pdev);
+ int ret;
+ u8 offset;
+
+ list_del(&port->node);
+ gpiochip_remove_pin_ranges(&port->chip);
+ ret = gpiochip_remove(&port->chip);
+ if (port->pint) {
+ for (offset = 0; offset < port->width; offset++)
+ irq_dispose_mapping(irq_find_mapping(port->domain,
+ offset));
+ irq_domain_remove(port->domain);
+ }
+
+ return ret;
+}
+
+static int adi_pinctrl_probe(struct platform_device *pdev)
+{
+ struct adi_pinctrl *pinctrl;
+
+ pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
+ if (!pinctrl)
+ return -ENOMEM;
+
+ pinctrl->dev = &pdev->dev;
+
+ adi_pinctrl_soc_init(&pinctrl->soc);
+
+ adi_pinmux_desc.pins = pinctrl->soc->pins;
+ adi_pinmux_desc.npins = pinctrl->soc->npins;
+
+ /* Now register the pin controller and all pins it handles */
+ pinctrl->pctl = pinctrl_register(&adi_pinmux_desc, &pdev->dev, pinctrl);
+ if (!pinctrl->pctl) {
+ dev_err(&pdev->dev, "could not register pinctrl ADI2 driver\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, pinctrl);
+
+ return 0;
+}
+
+static int adi_pinctrl_remove(struct platform_device *pdev)
+{
+ struct adi_pinctrl *pinctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pinctrl->pctl);
+
+ return 0;
+}
+
+static struct platform_driver adi_pinctrl_driver = {
+ .probe = adi_pinctrl_probe,
+ .remove = adi_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static struct platform_driver adi_gpio_pint_driver = {
+ .probe = adi_gpio_pint_probe,
+ .remove = adi_gpio_pint_remove,
+ .driver = {
+ .name = "adi-gpio-pint",
+ },
+};
+
+static struct platform_driver adi_gpio_driver = {
+ .probe = adi_gpio_probe,
+ .remove = adi_gpio_remove,
+ .driver = {
+ .name = "adi-gpio",
+ },
+};
+
+static int __init adi_pinctrl_setup(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&adi_pinctrl_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&adi_gpio_pint_driver);
+ if (ret)
+ goto pint_error;
+
+ ret = platform_driver_register(&adi_gpio_driver);
+ if (ret)
+ goto gpio_error;
+
+#ifdef CONFIG_PM
+ register_syscore_ops(&gpio_pm_syscore_ops);
+#endif
+ return ret;
+gpio_error:
+ platform_driver_unregister(&adi_gpio_pint_driver);
+pint_error:
+ platform_driver_unregister(&adi_pinctrl_driver);
+
+ return ret;
+}
+arch_initcall(adi_pinctrl_setup);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("ADI gpio2 pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-adi2.h b/drivers/pinctrl/pinctrl-adi2.h
new file mode 100644
index 00000000000..1f06f8df1fa
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-adi2.h
@@ -0,0 +1,75 @@
+/*
+ * Pinctrl Driver for ADI GPIO2 controller
+ *
+ * Copyright 2007-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPLv2 or later
+ */
+
+#ifndef PINCTRL_PINCTRL_ADI2_H
+#define PINCTRL_PINCTRL_ADI2_H
+
+#include <linux/pinctrl/pinctrl.h>
+
+ /**
+ * struct adi_pin_group - describes a pin group
+ * @name: the name of this pin group
+ * @pins: an array of pins
+ * @num: the number of pins in this array
+ */
+struct adi_pin_group {
+ const char *name;
+ const unsigned *pins;
+ const unsigned num;
+};
+
+#define ADI_PIN_GROUP(n, p) \
+ { \
+ .name = n, \
+ .pins = p, \
+ .num = ARRAY_SIZE(p), \
+ }
+
+ /**
+ * struct adi_pmx_func - describes function mux setting of pin groups
+ * @name: the name of this function mux setting
+ * @groups: an array of pin groups
+ * @num_groups: the number of pin groups in this array
+ * @mux: the function mux setting array, end by zero
+ */
+struct adi_pmx_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+ const unsigned short *mux;
+};
+
+#define ADI_PMX_FUNCTION(n, g, m) \
+ { \
+ .name = n, \
+ .groups = g, \
+ .num_groups = ARRAY_SIZE(g), \
+ .mux = m, \
+ }
+
+/**
+ * struct adi_pinctrl_soc_data - ADI pin controller per-SoC configuration
+ * @functions: The functions supported on this SoC.
+ * @nfunction: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The number of entries in @groups.
+ * @pins: An array describing all pins the pin controller affects.
+ * @npins: The number of entries in @pins.
+ */
+struct adi_pinctrl_soc_data {
+ const struct adi_pmx_func *functions;
+ int nfunctions;
+ const struct adi_pin_group *groups;
+ int ngroups;
+ const struct pinctrl_pin_desc *pins;
+ int npins;
+};
+
+void adi_pinctrl_soc_init(const struct adi_pinctrl_soc_data **soc);
+
+#endif /* PINCTRL_PINCTRL_ADI2_H */
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
new file mode 100644
index 00000000000..01bffc1d52f
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -0,0 +1,630 @@
+/*
+ * ams AS3722 pin control and GPIO driver.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/as3722.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+#define AS3722_PIN_GPIO0 0
+#define AS3722_PIN_GPIO1 1
+#define AS3722_PIN_GPIO2 2
+#define AS3722_PIN_GPIO3 3
+#define AS3722_PIN_GPIO4 4
+#define AS3722_PIN_GPIO5 5
+#define AS3722_PIN_GPIO6 6
+#define AS3722_PIN_GPIO7 7
+#define AS3722_PIN_NUM (AS3722_PIN_GPIO7 + 1)
+
+#define AS3722_GPIO_MODE_PULL_UP BIT(PIN_CONFIG_BIAS_PULL_UP)
+#define AS3722_GPIO_MODE_PULL_DOWN BIT(PIN_CONFIG_BIAS_PULL_DOWN)
+#define AS3722_GPIO_MODE_HIGH_IMPED BIT(PIN_CONFIG_BIAS_HIGH_IMPEDANCE)
+#define AS3722_GPIO_MODE_OPEN_DRAIN BIT(PIN_CONFIG_DRIVE_OPEN_DRAIN)
+
+struct as3722_pin_function {
+ const char *name;
+ const char * const *groups;
+ unsigned ngroups;
+ int mux_option;
+};
+
+struct as3722_gpio_pin_control {
+ bool enable_gpio_invert;
+ unsigned mode_prop;
+ int io_function;
+};
+
+struct as3722_pingroup {
+ const char *name;
+ const unsigned pins[1];
+ unsigned npins;
+};
+
+struct as3722_pctrl_info {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct as3722 *as3722;
+ struct gpio_chip gpio_chip;
+ int pins_current_opt[AS3722_PIN_NUM];
+ const struct as3722_pin_function *functions;
+ unsigned num_functions;
+ const struct as3722_pingroup *pin_groups;
+ int num_pin_groups;
+ const struct pinctrl_pin_desc *pins;
+ unsigned num_pins;
+ struct as3722_gpio_pin_control gpio_control[AS3722_PIN_NUM];
+};
+
+static const struct pinctrl_pin_desc as3722_pins_desc[] = {
+ PINCTRL_PIN(AS3722_PIN_GPIO0, "gpio0"),
+ PINCTRL_PIN(AS3722_PIN_GPIO1, "gpio1"),
+ PINCTRL_PIN(AS3722_PIN_GPIO2, "gpio2"),
+ PINCTRL_PIN(AS3722_PIN_GPIO3, "gpio3"),
+ PINCTRL_PIN(AS3722_PIN_GPIO4, "gpio4"),
+ PINCTRL_PIN(AS3722_PIN_GPIO5, "gpio5"),
+ PINCTRL_PIN(AS3722_PIN_GPIO6, "gpio6"),
+ PINCTRL_PIN(AS3722_PIN_GPIO7, "gpio7"),
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+};
+
+enum as3722_pinmux_option {
+ AS3722_PINMUX_GPIO = 0,
+ AS3722_PINMUX_INTERRUPT_OUT = 1,
+ AS3722_PINMUX_VSUB_VBAT_UNDEB_LOW_OUT = 2,
+ AS3722_PINMUX_GPIO_INTERRUPT = 3,
+ AS3722_PINMUX_PWM_INPUT = 4,
+ AS3722_PINMUX_VOLTAGE_IN_STBY = 5,
+ AS3722_PINMUX_OC_PG_SD0 = 6,
+ AS3722_PINMUX_PG_OUT = 7,
+ AS3722_PINMUX_CLK32K_OUT = 8,
+ AS3722_PINMUX_WATCHDOG_INPUT = 9,
+ AS3722_PINMUX_SOFT_RESET_IN = 11,
+ AS3722_PINMUX_PWM_OUTPUT = 12,
+ AS3722_PINMUX_VSUB_VBAT_LOW_DEB_OUT = 13,
+ AS3722_PINMUX_OC_PG_SD6 = 14,
+};
+
+#define FUNCTION_GROUP(fname, mux) \
+ { \
+ .name = #fname, \
+ .groups = gpio_groups, \
+ .ngroups = ARRAY_SIZE(gpio_groups), \
+ .mux_option = AS3722_PINMUX_##mux, \
+ }
+
+static const struct as3722_pin_function as3722_pin_function[] = {
+ FUNCTION_GROUP(gpio, GPIO),
+ FUNCTION_GROUP(interrupt-out, INTERRUPT_OUT),
+ FUNCTION_GROUP(gpio-in-interrupt, GPIO_INTERRUPT),
+ FUNCTION_GROUP(vsup-vbat-low-undebounce-out, VSUB_VBAT_UNDEB_LOW_OUT),
+ FUNCTION_GROUP(vsup-vbat-low-debounce-out, VSUB_VBAT_LOW_DEB_OUT),
+ FUNCTION_GROUP(voltage-in-standby, VOLTAGE_IN_STBY),
+ FUNCTION_GROUP(oc-pg-sd0, OC_PG_SD0),
+ FUNCTION_GROUP(oc-pg-sd6, OC_PG_SD6),
+ FUNCTION_GROUP(powergood-out, PG_OUT),
+ FUNCTION_GROUP(pwm-in, PWM_INPUT),
+ FUNCTION_GROUP(pwm-out, PWM_OUTPUT),
+ FUNCTION_GROUP(clk32k-out, CLK32K_OUT),
+ FUNCTION_GROUP(watchdog-in, WATCHDOG_INPUT),
+ FUNCTION_GROUP(soft-reset-in, SOFT_RESET_IN),
+};
+
+#define AS3722_PINGROUP(pg_name, pin_id) \
+ { \
+ .name = #pg_name, \
+ .pins = {AS3722_PIN_##pin_id}, \
+ .npins = 1, \
+ }
+
+static const struct as3722_pingroup as3722_pingroups[] = {
+ AS3722_PINGROUP(gpio0, GPIO0),
+ AS3722_PINGROUP(gpio1, GPIO1),
+ AS3722_PINGROUP(gpio2, GPIO2),
+ AS3722_PINGROUP(gpio3, GPIO3),
+ AS3722_PINGROUP(gpio4, GPIO4),
+ AS3722_PINGROUP(gpio5, GPIO5),
+ AS3722_PINGROUP(gpio6, GPIO6),
+ AS3722_PINGROUP(gpio7, GPIO7),
+};
+
+static int as3722_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return as_pci->num_pin_groups;
+}
+
+static const char *as3722_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return as_pci->pin_groups[group].name;
+}
+
+static int as3722_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins, unsigned *num_pins)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = as_pci->pin_groups[group].pins;
+ *num_pins = as_pci->pin_groups[group].npins;
+ return 0;
+}
+
+static const struct pinctrl_ops as3722_pinctrl_ops = {
+ .get_groups_count = as3722_pinctrl_get_groups_count,
+ .get_group_name = as3722_pinctrl_get_group_name,
+ .get_group_pins = as3722_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int as3722_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return as_pci->num_functions;
+}
+
+static const char *as3722_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return as_pci->functions[function].name;
+}
+
+static int as3722_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function, const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = as_pci->functions[function].groups;
+ *num_groups = as_pci->functions[function].ngroups;
+ return 0;
+}
+
+static int as3722_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+ int gpio_cntr_reg = AS3722_GPIOn_CONTROL_REG(group);
+ u8 val = AS3722_GPIO_IOSF_VAL(as_pci->functions[function].mux_option);
+ int ret;
+
+ dev_dbg(as_pci->dev, "%s(): GPIO %u pin to function %u and val %u\n",
+ __func__, group, function, val);
+
+ ret = as3722_update_bits(as_pci->as3722, gpio_cntr_reg,
+ AS3722_GPIO_IOSF_MASK, val);
+ if (ret < 0) {
+ dev_err(as_pci->dev, "GPIO%d_CTRL_REG update failed %d\n",
+ group, ret);
+ return ret;
+ }
+ as_pci->gpio_control[group].io_function = function;
+ return ret;
+}
+
+static int as3722_pinctrl_gpio_get_mode(unsigned gpio_mode_prop, bool input)
+{
+ if (gpio_mode_prop & AS3722_GPIO_MODE_HIGH_IMPED)
+ return -EINVAL;
+
+ if (gpio_mode_prop & AS3722_GPIO_MODE_OPEN_DRAIN) {
+ if (gpio_mode_prop & AS3722_GPIO_MODE_PULL_UP)
+ return AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP;
+ return AS3722_GPIO_MODE_IO_OPEN_DRAIN;
+ }
+ if (input) {
+ if (gpio_mode_prop & AS3722_GPIO_MODE_PULL_UP)
+ return AS3722_GPIO_MODE_INPUT_PULL_UP;
+ else if (gpio_mode_prop & AS3722_GPIO_MODE_PULL_DOWN)
+ return AS3722_GPIO_MODE_INPUT_PULL_DOWN;
+ return AS3722_GPIO_MODE_INPUT;
+ }
+ if (gpio_mode_prop & AS3722_GPIO_MODE_PULL_DOWN)
+ return AS3722_GPIO_MODE_OUTPUT_VDDL;
+ return AS3722_GPIO_MODE_OUTPUT_VDDH;
+}
+
+static int as3722_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+
+ if (as_pci->gpio_control[offset].io_function)
+ return -EBUSY;
+ return 0;
+}
+
+static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset, bool input)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+ struct as3722 *as3722 = as_pci->as3722;
+ int mode;
+
+ mode = as3722_pinctrl_gpio_get_mode(
+ as_pci->gpio_control[offset].mode_prop, input);
+ if (mode < 0) {
+ dev_err(as_pci->dev, "%s direction for GPIO %d not supported\n",
+ (input) ? "Input" : "Output", offset);
+ return mode;
+ }
+
+ if (as_pci->gpio_control[offset].enable_gpio_invert)
+ mode |= AS3722_GPIO_INV;
+
+ return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+}
+
+static const struct pinmux_ops as3722_pinmux_ops = {
+ .get_functions_count = as3722_pinctrl_get_funcs_count,
+ .get_function_name = as3722_pinctrl_get_func_name,
+ .get_function_groups = as3722_pinctrl_get_func_groups,
+ .enable = as3722_pinctrl_enable,
+ .gpio_request_enable = as3722_pinctrl_gpio_request_enable,
+ .gpio_set_direction = as3722_pinctrl_gpio_set_direction,
+};
+
+static int as3722_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned pin, unsigned long *config)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ int arg = 0;
+ u16 prop;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ prop = AS3722_GPIO_MODE_PULL_UP |
+ AS3722_GPIO_MODE_PULL_DOWN;
+ if (!(as_pci->gpio_control[pin].mode_prop & prop))
+ arg = 1;
+ prop = 0;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ prop = AS3722_GPIO_MODE_PULL_UP;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ prop = AS3722_GPIO_MODE_PULL_DOWN;
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ prop = AS3722_GPIO_MODE_OPEN_DRAIN;
+ break;
+
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ prop = AS3722_GPIO_MODE_HIGH_IMPED;
+ break;
+
+ default:
+ dev_err(as_pci->dev, "Properties not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (as_pci->gpio_control[pin].mode_prop & prop)
+ arg = 1;
+
+ *config = pinconf_to_config_packed(param, (u16)arg);
+ return 0;
+}
+
+static int as3722_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
+{
+ struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ int mode_prop;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ mode_prop = as_pci->gpio_control[pin].mode_prop;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ mode_prop &= ~(AS3722_GPIO_MODE_PULL_UP |
+ AS3722_GPIO_MODE_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mode_prop |= AS3722_GPIO_MODE_PULL_UP;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mode_prop |= AS3722_GPIO_MODE_PULL_DOWN;
+ break;
+
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ mode_prop |= AS3722_GPIO_MODE_HIGH_IMPED;
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mode_prop |= AS3722_GPIO_MODE_OPEN_DRAIN;
+ break;
+
+ default:
+ dev_err(as_pci->dev, "Properties not supported\n");
+ return -ENOTSUPP;
+ }
+
+ as_pci->gpio_control[pin].mode_prop = mode_prop;
+ }
+ return 0;
+}
+
+static const struct pinconf_ops as3722_pinconf_ops = {
+ .pin_config_get = as3722_pinconf_get,
+ .pin_config_set = as3722_pinconf_set,
+};
+
+static struct pinctrl_desc as3722_pinctrl_desc = {
+ .pctlops = &as3722_pinctrl_ops,
+ .pmxops = &as3722_pinmux_ops,
+ .confops = &as3722_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static inline struct as3722_pctrl_info *to_as_pci(struct gpio_chip *chip)
+{
+ return container_of(chip, struct as3722_pctrl_info, gpio_chip);
+}
+
+static int as3722_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct as3722_pctrl_info *as_pci = to_as_pci(chip);
+ struct as3722 *as3722 = as_pci->as3722;
+ int ret;
+ u32 reg;
+ u32 control;
+ u32 val;
+ int mode;
+ int invert_enable;
+
+ ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &control);
+ if (ret < 0) {
+ dev_err(as_pci->dev,
+ "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+ return ret;
+ }
+
+ invert_enable = !!(control & AS3722_GPIO_INV);
+ mode = control & AS3722_GPIO_MODE_MASK;
+ switch (mode) {
+ case AS3722_GPIO_MODE_INPUT:
+ case AS3722_GPIO_MODE_INPUT_PULL_UP:
+ case AS3722_GPIO_MODE_INPUT_PULL_DOWN:
+ case AS3722_GPIO_MODE_IO_OPEN_DRAIN:
+ case AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP:
+ reg = AS3722_GPIO_SIGNAL_IN_REG;
+ break;
+ case AS3722_GPIO_MODE_OUTPUT_VDDH:
+ case AS3722_GPIO_MODE_OUTPUT_VDDL:
+ reg = AS3722_GPIO_SIGNAL_OUT_REG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = as3722_read(as3722, reg, &val);
+ if (ret < 0) {
+ dev_err(as_pci->dev,
+ "GPIO_SIGNAL_IN_REG read failed: %d\n", ret);
+ return ret;
+ }
+
+ val = !!(val & AS3722_GPIOn_SIGNAL(offset));
+ return (invert_enable) ? !val : val;
+}
+
+static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct as3722_pctrl_info *as_pci = to_as_pci(chip);
+ struct as3722 *as3722 = as_pci->as3722;
+ int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+ u32 val;
+ int ret;
+
+ if (value)
+ val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
+ else
+ val = (en_invert) ? AS3722_GPIOn_SIGNAL(offset) : 0;
+
+ ret = as3722_update_bits(as3722, AS3722_GPIO_SIGNAL_OUT_REG,
+ AS3722_GPIOn_SIGNAL(offset), val);
+ if (ret < 0)
+ dev_err(as_pci->dev,
+ "GPIO_SIGNAL_OUT_REG update failed: %d\n", ret);
+}
+
+static int as3722_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int as3722_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ as3722_gpio_set(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int as3722_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct as3722_pctrl_info *as_pci = to_as_pci(chip);
+
+ return as3722_irq_get_virq(as_pci->as3722, offset);
+}
+
+static int as3722_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void as3722_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static const struct gpio_chip as3722_gpio_chip = {
+ .label = "as3722-gpio",
+ .owner = THIS_MODULE,
+ .request = as3722_gpio_request,
+ .free = as3722_gpio_free,
+ .get = as3722_gpio_get,
+ .set = as3722_gpio_set,
+ .direction_input = as3722_gpio_direction_input,
+ .direction_output = as3722_gpio_direction_output,
+ .to_irq = as3722_gpio_to_irq,
+ .can_sleep = 1,
+ .ngpio = AS3722_PIN_NUM,
+ .base = -1,
+};
+
+static int as3722_pinctrl_probe(struct platform_device *pdev)
+{
+ struct as3722_pctrl_info *as_pci;
+ int ret;
+ int tret;
+
+ as_pci = devm_kzalloc(&pdev->dev, sizeof(*as_pci), GFP_KERNEL);
+ if (!as_pci)
+ return -ENOMEM;
+
+ as_pci->dev = &pdev->dev;
+ as_pci->dev->of_node = pdev->dev.parent->of_node;
+ as_pci->as3722 = dev_get_drvdata(pdev->dev.parent);
+ platform_set_drvdata(pdev, as_pci);
+
+ as_pci->pins = as3722_pins_desc;
+ as_pci->num_pins = ARRAY_SIZE(as3722_pins_desc);
+ as_pci->functions = as3722_pin_function;
+ as_pci->num_functions = ARRAY_SIZE(as3722_pin_function);
+ as_pci->pin_groups = as3722_pingroups;
+ as_pci->num_pin_groups = ARRAY_SIZE(as3722_pingroups);
+ as3722_pinctrl_desc.name = dev_name(&pdev->dev);
+ as3722_pinctrl_desc.pins = as3722_pins_desc;
+ as3722_pinctrl_desc.npins = ARRAY_SIZE(as3722_pins_desc);
+ as_pci->pctl = pinctrl_register(&as3722_pinctrl_desc,
+ &pdev->dev, as_pci);
+ if (!as_pci->pctl) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return -EINVAL;
+ }
+
+ as_pci->gpio_chip = as3722_gpio_chip;
+ as_pci->gpio_chip.dev = &pdev->dev;
+ as_pci->gpio_chip.of_node = pdev->dev.parent->of_node;
+ ret = gpiochip_add(&as_pci->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
+ goto fail_chip_add;
+ }
+
+ ret = gpiochip_add_pin_range(&as_pci->gpio_chip, dev_name(&pdev->dev),
+ 0, 0, AS3722_PIN_NUM);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't add pin range, %d\n", ret);
+ goto fail_range_add;
+ }
+
+ return 0;
+
+fail_range_add:
+ tret = gpiochip_remove(&as_pci->gpio_chip);
+ if (tret < 0)
+ dev_warn(&pdev->dev, "Couldn't remove gpio chip, %d\n", tret);
+
+fail_chip_add:
+ pinctrl_unregister(as_pci->pctl);
+ return ret;
+}
+
+static int as3722_pinctrl_remove(struct platform_device *pdev)
+{
+ struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&as_pci->gpio_chip);
+ if (ret < 0)
+ return ret;
+ pinctrl_unregister(as_pci->pctl);
+ return 0;
+}
+
+static struct of_device_id as3722_pinctrl_of_match[] = {
+ { .compatible = "ams,as3722-pinctrl", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, as3722_pinctrl_of_match);
+
+static struct platform_driver as3722_pinctrl_driver = {
+ .driver = {
+ .name = "as3722-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = as3722_pinctrl_of_match,
+ },
+ .probe = as3722_pinctrl_probe,
+ .remove = as3722_pinctrl_remove,
+};
+module_platform_driver(as3722_pinctrl_driver);
+
+MODULE_ALIAS("platform:as3722-pinctrl");
+MODULE_DESCRIPTION("AS3722 pin control and GPIO driver");
+MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index f350fd2e170..a7549c4c83b 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -33,6 +33,7 @@
#include "core.h"
+#define MAX_GPIO_BANKS 5
#define MAX_NB_GPIO_PER_BANK 32
struct at91_pinctrl_mux_ops;
@@ -144,11 +145,11 @@ struct at91_pinctrl_mux_ops {
void (*mux_C_periph)(void __iomem *pio, unsigned mask);
void (*mux_D_periph)(void __iomem *pio, unsigned mask);
bool (*get_deglitch)(void __iomem *pio, unsigned pin);
- void (*set_deglitch)(void __iomem *pio, unsigned mask, bool in_on);
+ void (*set_deglitch)(void __iomem *pio, unsigned mask, bool is_on);
bool (*get_debounce)(void __iomem *pio, unsigned pin, u32 *div);
- void (*set_debounce)(void __iomem *pio, unsigned mask, bool in_on, u32 div);
+ void (*set_debounce)(void __iomem *pio, unsigned mask, bool is_on, u32 div);
bool (*get_pulldown)(void __iomem *pio, unsigned pin);
- void (*set_pulldown)(void __iomem *pio, unsigned mask, bool in_on);
+ void (*set_pulldown)(void __iomem *pio, unsigned mask, bool is_on);
bool (*get_schmitt_trig)(void __iomem *pio, unsigned pin);
void (*disable_schmitt_trig)(void __iomem *pio, unsigned mask);
/* irq */
@@ -243,7 +244,7 @@ static int at91_dt_node_to_map(struct pinctrl_dev *pctldev,
int i;
/*
- * first find the group of this node and check if we need create
+ * first find the group of this node and check if we need to create
* config maps for pins
*/
grp = at91_pinctrl_find_group_by_name(info, np->name);
@@ -417,6 +418,14 @@ static void at91_mux_set_deglitch(void __iomem *pio, unsigned mask, bool is_on)
__raw_writel(mask, pio + (is_on ? PIO_IFER : PIO_IFDR));
}
+static bool at91_mux_pio3_get_deglitch(void __iomem *pio, unsigned pin)
+{
+ if ((__raw_readl(pio + PIO_IFSR) >> pin) & 0x1)
+ return !((__raw_readl(pio + PIO_IFSCSR) >> pin) & 0x1);
+
+ return false;
+}
+
static void at91_mux_pio3_set_deglitch(void __iomem *pio, unsigned mask, bool is_on)
{
if (is_on)
@@ -428,7 +437,8 @@ static bool at91_mux_pio3_get_debounce(void __iomem *pio, unsigned pin, u32 *div
{
*div = __raw_readl(pio + PIO_SCDR);
- return (__raw_readl(pio + PIO_IFSCSR) >> pin) & 0x1;
+ return ((__raw_readl(pio + PIO_IFSR) >> pin) & 0x1) &&
+ ((__raw_readl(pio + PIO_IFSCSR) >> pin) & 0x1);
}
static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
@@ -438,9 +448,8 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
__raw_writel(mask, pio + PIO_IFSCER);
__raw_writel(div & PIO_SCDR_DIV, pio + PIO_SCDR);
__raw_writel(mask, pio + PIO_IFER);
- } else {
- __raw_writel(mask, pio + PIO_IFDR);
- }
+ } else
+ __raw_writel(mask, pio + PIO_IFSCDR);
}
static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
@@ -478,7 +487,7 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = {
.mux_B_periph = at91_mux_pio3_set_B_periph,
.mux_C_periph = at91_mux_pio3_set_C_periph,
.mux_D_periph = at91_mux_pio3_set_D_periph,
- .get_deglitch = at91_mux_get_deglitch,
+ .get_deglitch = at91_mux_pio3_get_deglitch,
.set_deglitch = at91_mux_pio3_set_deglitch,
.get_debounce = at91_mux_pio3_get_debounce,
.set_debounce = at91_mux_pio3_set_debounce,
@@ -564,7 +573,7 @@ static int at91_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
info->functions[selector].name, info->groups[group].name);
/* first check that all the pins of the group are valid with a valid
- * paramter */
+ * parameter */
for (i = 0; i < npins; i++) {
pin = &pins_conf[i];
ret = pin_check_config(info, info->groups[group].name, i, pin);
@@ -958,7 +967,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
at91_pinctrl_child_count(info, np);
if (info->nbanks < 1) {
- dev_err(&pdev->dev, "you need to specify atleast one gpio-controller\n");
+ dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
return -EINVAL;
}
@@ -1495,7 +1504,7 @@ static int at91_gpio_of_irq_setup(struct device_node *node,
if (at91_gpio->pioc_idx)
prev = gpio_chips[at91_gpio->pioc_idx - 1];
- /* The toplevel handler handles one bank of GPIOs, except
+ /* The top level handler handles one bank of GPIOs, except
* on some SoC it can handles up to three...
* We only set up the handler for the first of the list.
*/
@@ -1671,7 +1680,7 @@ static struct platform_driver at91_gpio_driver = {
.driver = {
.name = "gpio-at91",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(at91_gpio_of_match),
+ .of_match_table = at91_gpio_of_match,
},
.probe = at91_gpio_probe,
};
@@ -1680,7 +1689,7 @@ static struct platform_driver at91_pinctrl_driver = {
.driver = {
.name = "pinctrl-at91",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(at91_pinctrl_of_match),
+ .of_match_table = at91_pinctrl_of_match,
},
.probe = at91_pinctrl_probe,
.remove = at91_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index f22a2193d94..162ac0d7373 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -529,6 +529,10 @@ static void u300_gpio_irq_enable(struct irq_data *d)
dev_dbg(gpio->dev, "enable IRQ for hwirq %lu on port %s, offset %d\n",
d->hwirq, port->name, offset);
+ if (gpio_lock_as_irq(&gpio->chip, d->hwirq))
+ dev_err(gpio->dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, ien));
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
@@ -547,6 +551,7 @@ static void u300_gpio_irq_disable(struct irq_data *d)
val = readl(U300_PIN_REG(offset, ien));
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
local_irq_restore(flags);
+ gpio_unlock_as_irq(&gpio->chip, d->hwirq);
}
static struct irq_chip u300_gpio_irqchip = {
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c
index 544d469c5a7..8fe2ab0a769 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/pinctrl-exynos5440.c
@@ -1048,7 +1048,7 @@ static struct platform_driver exynos5440_pinctrl_driver = {
.driver = {
.name = "exynos5440-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(exynos5440_pinctrl_dt_match),
+ .of_match_table = exynos5440_pinctrl_dt_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index d78dd813bff..4779b8e0eee 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -245,11 +245,11 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
* The input_reg[i] here is actually some IOMUXC general
* purpose register, not regular select input register.
*/
- val = readl(ipctl->base + pin->input_val);
+ val = readl(ipctl->base + pin->input_reg);
val &= ~mask;
val |= select << shift;
- writel(val, ipctl->base + pin->input_val);
- } else if (pin->input_val) {
+ writel(val, ipctl->base + pin->input_reg);
+ } else if (pin->input_reg) {
/*
* Regular select input register can never be at offset
* 0, and we only print register value for regular case.
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
new file mode 100644
index 00000000000..f77914ac081
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -0,0 +1,653 @@
+/*
+ * Core driver for the imx pin controller in imx1/21/27
+ *
+ * Copyright (C) 2013 Pengutronix
+ * Author: Markus Pargmann <mpa@pengutronix.de>
+ *
+ * Based on pinctrl-imx.c:
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinctrl-imx1.h"
+
+struct imx1_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ void __iomem *base;
+ const struct imx1_pinctrl_soc_info *info;
+};
+
+/*
+ * MX1 register offsets
+ */
+
+#define MX1_DDIR 0x00
+#define MX1_OCR 0x04
+#define MX1_ICONFA 0x0c
+#define MX1_ICONFB 0x10
+#define MX1_GIUS 0x20
+#define MX1_GPR 0x38
+#define MX1_PUEN 0x40
+
+#define MX1_PORT_STRIDE 0x100
+
+
+/*
+ * MUX_ID format defines
+ */
+#define MX1_MUX_FUNCTION(val) (BIT(0) & val)
+#define MX1_MUX_GPIO(val) ((BIT(1) & val) >> 1)
+#define MX1_MUX_DIR(val) ((BIT(2) & val) >> 2)
+#define MX1_MUX_OCONF(val) (((BIT(4) | BIT(5)) & val) >> 4)
+#define MX1_MUX_ICONFA(val) (((BIT(8) | BIT(9)) & val) >> 8)
+#define MX1_MUX_ICONFB(val) (((BIT(10) | BIT(11)) & val) >> 10)
+
+
+/*
+ * IMX1 IOMUXC manages the pins based on ports. Each port has 32 pins. IOMUX
+ * control register are seperated into function, output configuration, input
+ * configuration A, input configuration B, GPIO in use and data direction.
+ *
+ * Those controls that are represented by 1 bit have a direct mapping between
+ * bit position and pin id. If they are represented by 2 bit, the lower 16 pins
+ * are in the first register and the upper 16 pins in the second (next)
+ * register. pin_id is stored in bit (pin_id%16)*2 and the bit above.
+ */
+
+/*
+ * Calculates the register offset from a pin_id
+ */
+static void __iomem *imx1_mem(struct imx1_pinctrl *ipctl, unsigned int pin_id)
+{
+ unsigned int port = pin_id / 32;
+ return ipctl->base + port * MX1_PORT_STRIDE;
+}
+
+/*
+ * Write to a register with 2 bits per pin. The function will automatically
+ * use the next register if the pin is managed in the second register.
+ */
+static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
+ u32 value, u32 reg_offset)
+{
+ void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
+ int offset = (pin_id % 16) * 2; /* offset, regardless of register used */
+ int mask = ~(0x3 << offset); /* Mask for 2 bits at offset */
+ u32 old_val;
+ u32 new_val;
+
+ dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+ reg, offset, value);
+
+ /* Use the next register if the pin's port pin number is >=16 */
+ if (pin_id % 32 >= 16)
+ reg += 0x04;
+
+ /* Get current state of pins */
+ old_val = readl(reg);
+ old_val &= mask;
+
+ new_val = value & 0x3; /* Make sure value is really 2 bit */
+ new_val <<= offset;
+ new_val |= old_val;/* Set new state for pin_id */
+
+ writel(new_val, reg);
+}
+
+static void imx1_write_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
+ u32 value, u32 reg_offset)
+{
+ void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
+ int offset = pin_id % 32;
+ int mask = ~BIT_MASK(offset);
+ u32 old_val;
+ u32 new_val;
+
+ /* Get current state of pins */
+ old_val = readl(reg);
+ old_val &= mask;
+
+ new_val = value & 0x1; /* Make sure value is really 1 bit */
+ new_val <<= offset;
+ new_val |= old_val;/* Set new state for pin_id */
+
+ writel(new_val, reg);
+}
+
+static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
+ u32 reg_offset)
+{
+ void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
+ int offset = pin_id % 16;
+
+ /* Use the next register if the pin's port pin number is >=16 */
+ if (pin_id % 32 >= 16)
+ reg += 0x04;
+
+ return (readl(reg) & (BIT(offset) | BIT(offset+1))) >> offset;
+}
+
+static int imx1_read_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
+ u32 reg_offset)
+{
+ void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
+ int offset = pin_id % 32;
+
+ return !!(readl(reg) & BIT(offset));
+}
+
+static const inline struct imx1_pin_group *imx1_pinctrl_find_group_by_name(
+ const struct imx1_pinctrl_soc_info *info,
+ const char *name)
+{
+ const struct imx1_pin_group *grp = NULL;
+ int i;
+
+ for (i = 0; i < info->ngroups; i++) {
+ if (!strcmp(info->groups[i].name, name)) {
+ grp = &info->groups[i];
+ break;
+ }
+ }
+
+ return grp;
+}
+
+static int imx1_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+
+ return info->ngroups;
+}
+
+static const char *imx1_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+
+ return info->groups[selector].name;
+}
+
+static int imx1_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned int **pins,
+ unsigned *npins)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+
+ if (selector >= info->ngroups)
+ return -EINVAL;
+
+ *pins = info->groups[selector].pin_ids;
+ *npins = info->groups[selector].npins;
+
+ return 0;
+}
+
+static void imx1_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ seq_printf(s, "GPIO %d, function %d, direction %d, oconf %d, iconfa %d, iconfb %d",
+ imx1_read_bit(ipctl, offset, MX1_GIUS),
+ imx1_read_bit(ipctl, offset, MX1_GPR),
+ imx1_read_bit(ipctl, offset, MX1_DDIR),
+ imx1_read_2bit(ipctl, offset, MX1_OCR),
+ imx1_read_2bit(ipctl, offset, MX1_ICONFA),
+ imx1_read_2bit(ipctl, offset, MX1_ICONFB));
+}
+
+static int imx1_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+ const struct imx1_pin_group *grp;
+ struct pinctrl_map *new_map;
+ struct device_node *parent;
+ int map_num = 1;
+ int i, j;
+
+ /*
+ * first find the group of this node and check if we need create
+ * config maps for pins
+ */
+ grp = imx1_pinctrl_find_group_by_name(info, np->name);
+ if (!grp) {
+ dev_err(info->dev, "unable to find group for node %s\n",
+ np->name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < grp->npins; i++)
+ map_num++;
+
+ new_map = kmalloc(sizeof(struct pinctrl_map) * map_num, GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+
+ *map = new_map;
+ *num_maps = map_num;
+
+ /* create mux map */
+ parent = of_get_parent(np);
+ if (!parent) {
+ kfree(new_map);
+ return -EINVAL;
+ }
+ new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
+ new_map[0].data.mux.function = parent->name;
+ new_map[0].data.mux.group = np->name;
+ of_node_put(parent);
+
+ /* create config map */
+ new_map++;
+ for (i = j = 0; i < grp->npins; i++) {
+ new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN;
+ new_map[j].data.configs.group_or_pin =
+ pin_get_name(pctldev, grp->pins[i].pin_id);
+ new_map[j].data.configs.configs = &grp->pins[i].config;
+ new_map[j].data.configs.num_configs = 1;
+ j++;
+ }
+
+ dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+ (*map)->data.mux.function, (*map)->data.mux.group, map_num);
+
+ return 0;
+}
+
+static void imx1_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ kfree(map);
+}
+
+static const struct pinctrl_ops imx1_pctrl_ops = {
+ .get_groups_count = imx1_get_groups_count,
+ .get_group_name = imx1_get_group_name,
+ .get_group_pins = imx1_get_group_pins,
+ .pin_dbg_show = imx1_pin_dbg_show,
+ .dt_node_to_map = imx1_dt_node_to_map,
+ .dt_free_map = imx1_dt_free_map,
+
+};
+
+static int imx1_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+ const struct imx1_pin *pins;
+ unsigned int npins;
+ int i;
+
+ /*
+ * Configure the mux mode for each pin in the group for a specific
+ * function.
+ */
+ pins = info->groups[group].pins;
+ npins = info->groups[group].npins;
+
+ WARN_ON(!pins || !npins);
+
+ dev_dbg(ipctl->dev, "enable function %s group %s\n",
+ info->functions[selector].name, info->groups[group].name);
+
+ for (i = 0; i < npins; i++) {
+ unsigned int mux = pins[i].mux_id;
+ unsigned int pin_id = pins[i].pin_id;
+ unsigned int afunction = MX1_MUX_FUNCTION(mux);
+ unsigned int gpio_in_use = MX1_MUX_GPIO(mux);
+ unsigned int direction = MX1_MUX_DIR(mux);
+ unsigned int gpio_oconf = MX1_MUX_OCONF(mux);
+ unsigned int gpio_iconfa = MX1_MUX_ICONFA(mux);
+ unsigned int gpio_iconfb = MX1_MUX_ICONFB(mux);
+
+ dev_dbg(pctldev->dev, "%s, pin 0x%x, function %d, gpio %d, direction %d, oconf %d, iconfa %d, iconfb %d\n",
+ __func__, pin_id, afunction, gpio_in_use,
+ direction, gpio_oconf, gpio_iconfa,
+ gpio_iconfb);
+
+ imx1_write_bit(ipctl, pin_id, gpio_in_use, MX1_GIUS);
+ imx1_write_bit(ipctl, pin_id, direction, MX1_DDIR);
+
+ if (gpio_in_use) {
+ imx1_write_2bit(ipctl, pin_id, gpio_oconf, MX1_OCR);
+ imx1_write_2bit(ipctl, pin_id, gpio_iconfa,
+ MX1_ICONFA);
+ imx1_write_2bit(ipctl, pin_id, gpio_iconfb,
+ MX1_ICONFB);
+ } else {
+ imx1_write_bit(ipctl, pin_id, afunction, MX1_GPR);
+ }
+ }
+
+ return 0;
+}
+
+static int imx1_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+
+ return info->nfunctions;
+}
+
+static const char *imx1_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+
+ return info->functions[selector].name;
+}
+
+static int imx1_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+
+ *groups = info->functions[selector].groups;
+ *num_groups = info->functions[selector].num_groups;
+
+ return 0;
+}
+
+static const struct pinmux_ops imx1_pmx_ops = {
+ .get_functions_count = imx1_pmx_get_funcs_count,
+ .get_function_name = imx1_pmx_get_func_name,
+ .get_function_groups = imx1_pmx_get_groups,
+ .enable = imx1_pmx_enable,
+};
+
+static int imx1_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned pin_id, unsigned long *config)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *config = imx1_read_bit(ipctl, pin_id, MX1_PUEN);
+
+ return 0;
+}
+
+static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned pin_id, unsigned long *configs,
+ unsigned num_configs)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+ int i;
+
+ for (i = 0; i != num_configs; ++i) {
+ imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
+
+ dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
+ info->pins[pin_id].name);
+ }
+
+ return 0;
+}
+
+static void imx1_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned pin_id)
+{
+ unsigned long config;
+
+ imx1_pinconf_get(pctldev, pin_id, &config);
+ seq_printf(s, "0x%lx", config);
+}
+
+static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned group)
+{
+ struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx1_pinctrl_soc_info *info = ipctl->info;
+ struct imx1_pin_group *grp;
+ unsigned long config;
+ const char *name;
+ int i, ret;
+
+ if (group > info->ngroups)
+ return;
+
+ seq_puts(s, "\n");
+ grp = &info->groups[group];
+ for (i = 0; i < grp->npins; i++) {
+ name = pin_get_name(pctldev, grp->pins[i].pin_id);
+ ret = imx1_pinconf_get(pctldev, grp->pins[i].pin_id, &config);
+ if (ret)
+ return;
+ seq_printf(s, "%s: 0x%lx", name, config);
+ }
+}
+
+static const struct pinconf_ops imx1_pinconf_ops = {
+ .pin_config_get = imx1_pinconf_get,
+ .pin_config_set = imx1_pinconf_set,
+ .pin_config_dbg_show = imx1_pinconf_dbg_show,
+ .pin_config_group_dbg_show = imx1_pinconf_group_dbg_show,
+};
+
+static struct pinctrl_desc imx1_pinctrl_desc = {
+ .pctlops = &imx1_pctrl_ops,
+ .pmxops = &imx1_pmx_ops,
+ .confops = &imx1_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int imx1_pinctrl_parse_groups(struct device_node *np,
+ struct imx1_pin_group *grp,
+ struct imx1_pinctrl_soc_info *info,
+ u32 index)
+{
+ int size;
+ const __be32 *list;
+ int i;
+
+ dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+
+ /* Initialise group */
+ grp->name = np->name;
+
+ /*
+ * the binding format is fsl,pins = <PIN MUX_ID CONFIG>
+ */
+ list = of_get_property(np, "fsl,pins", &size);
+ /* we do not check return since it's safe node passed down */
+ if (!size || size % 12) {
+ dev_notice(info->dev, "Not a valid fsl,pins property (%s)\n",
+ np->name);
+ return -EINVAL;
+ }
+
+ grp->npins = size / 12;
+ grp->pins = devm_kzalloc(info->dev,
+ grp->npins * sizeof(struct imx1_pin), GFP_KERNEL);
+ grp->pin_ids = devm_kzalloc(info->dev,
+ grp->npins * sizeof(unsigned int), GFP_KERNEL);
+
+ if (!grp->pins || !grp->pin_ids)
+ return -ENOMEM;
+
+ for (i = 0; i < grp->npins; i++) {
+ grp->pins[i].pin_id = be32_to_cpu(*list++);
+ grp->pins[i].mux_id = be32_to_cpu(*list++);
+ grp->pins[i].config = be32_to_cpu(*list++);
+
+ grp->pin_ids[i] = grp->pins[i].pin_id;
+ }
+
+ return 0;
+}
+
+static int imx1_pinctrl_parse_functions(struct device_node *np,
+ struct imx1_pinctrl_soc_info *info,
+ u32 index)
+{
+ struct device_node *child;
+ struct imx1_pmx_func *func;
+ struct imx1_pin_group *grp;
+ int ret;
+ static u32 grp_index;
+ u32 i = 0;
+
+ dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+
+ func = &info->functions[index];
+
+ /* Initialise function */
+ func->name = np->name;
+ func->num_groups = of_get_child_count(np);
+ if (func->num_groups <= 0)
+ return -EINVAL;
+
+ func->groups = devm_kzalloc(info->dev,
+ func->num_groups * sizeof(char *), GFP_KERNEL);
+
+ if (!func->groups)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ func->groups[i] = child->name;
+ grp = &info->groups[grp_index++];
+ ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
+ if (ret == -ENOMEM)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
+ struct imx1_pinctrl *pctl, struct imx1_pinctrl_soc_info *info)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ int ret;
+ u32 nfuncs = 0;
+ u32 ngroups = 0;
+ u32 ifunc = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ for_each_child_of_node(np, child) {
+ ++nfuncs;
+ ngroups += of_get_child_count(child);
+ }
+
+ if (!nfuncs) {
+ dev_err(&pdev->dev, "No pin functions defined\n");
+ return -EINVAL;
+ }
+
+ info->nfunctions = nfuncs;
+ info->functions = devm_kzalloc(&pdev->dev,
+ nfuncs * sizeof(struct imx1_pmx_func), GFP_KERNEL);
+
+ info->ngroups = ngroups;
+ info->groups = devm_kzalloc(&pdev->dev,
+ ngroups * sizeof(struct imx1_pin_group), GFP_KERNEL);
+
+
+ if (!info->functions || !info->groups)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
+ if (ret == -ENOMEM)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int imx1_pinctrl_core_probe(struct platform_device *pdev,
+ struct imx1_pinctrl_soc_info *info)
+{
+ struct imx1_pinctrl *ipctl;
+ struct resource *res;
+ struct pinctrl_desc *pctl_desc;
+ int ret;
+
+ if (!info || !info->pins || !info->npins) {
+ dev_err(&pdev->dev, "wrong pinctrl info\n");
+ return -EINVAL;
+ }
+ info->dev = &pdev->dev;
+
+ /* Create state holders etc for this driver */
+ ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL);
+ if (!ipctl)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!ipctl->base)
+ return -ENOMEM;
+
+ pctl_desc = &imx1_pinctrl_desc;
+ pctl_desc->name = dev_name(&pdev->dev);
+ pctl_desc->pins = info->pins;
+ pctl_desc->npins = info->npins;
+
+ ret = imx1_pinctrl_parse_dt(pdev, ipctl, info);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to probe dt properties\n");
+ return ret;
+ }
+
+ ipctl->info = info;
+ ipctl->dev = info->dev;
+ platform_set_drvdata(pdev, ipctl);
+ ipctl->pctl = pinctrl_register(pctl_desc, &pdev->dev, ipctl);
+ if (!ipctl->pctl) {
+ dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
+
+ return 0;
+}
+
+int imx1_pinctrl_core_remove(struct platform_device *pdev)
+{
+ struct imx1_pinctrl *ipctl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(ipctl->pctl);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/pinctrl-imx1.h b/drivers/pinctrl/pinctrl-imx1.h
new file mode 100644
index 00000000000..692a54c15cd
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx1.h
@@ -0,0 +1,73 @@
+/*
+ * IMX pinmux core definitions
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DRIVERS_PINCTRL_IMX1_H
+#define __DRIVERS_PINCTRL_IMX1_H
+
+struct platform_device;
+
+/**
+ * struct imx1_pin - describes an IMX1/21/27 pin.
+ * @pin_id: ID of the described pin.
+ * @mux_id: ID of the mux setup.
+ * @config: Configuration of the pin (currently only pullup-enable).
+ */
+struct imx1_pin {
+ unsigned int pin_id;
+ unsigned int mux_id;
+ unsigned long config;
+};
+
+/**
+ * struct imx1_pin_group - describes an IMX pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of imx1_pin structs used in this group
+ * @npins: the number of pins in this group array, i.e. the number of
+ * elements in .pins so we can iterate over that array
+ */
+struct imx1_pin_group {
+ const char *name;
+ unsigned int *pin_ids;
+ struct imx1_pin *pins;
+ unsigned npins;
+};
+
+/**
+ * struct imx1_pmx_func - describes IMX pinmux functions
+ * @name: the name of this specific function
+ * @groups: corresponding pin groups
+ * @num_groups: the number of groups
+ */
+struct imx1_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned num_groups;
+};
+
+struct imx1_pinctrl_soc_info {
+ struct device *dev;
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ struct imx1_pin_group *groups;
+ unsigned int ngroups;
+ struct imx1_pmx_func *functions;
+ unsigned int nfunctions;
+};
+
+#define IMX_PINCTRL_PIN(pin) PINCTRL_PIN(pin, #pin)
+
+int imx1_pinctrl_core_probe(struct platform_device *pdev,
+ struct imx1_pinctrl_soc_info *info);
+int imx1_pinctrl_core_remove(struct platform_device *pdev);
+#endif /* __DRIVERS_PINCTRL_IMX1_H */
diff --git a/drivers/pinctrl/pinctrl-imx27.c b/drivers/pinctrl/pinctrl-imx27.c
new file mode 100644
index 00000000000..417c99205bc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx27.c
@@ -0,0 +1,477 @@
+/*
+ * imx27 pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2013 Pengutronix
+ *
+ * Author: Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx1.h"
+
+#define PAD_ID(port, pin) (port*32 + pin)
+#define PA 0
+#define PB 1
+#define PC 2
+#define PD 3
+#define PE 4
+#define PF 5
+
+enum imx27_pads {
+ MX27_PAD_USBH2_CLK = PAD_ID(PA, 0),
+ MX27_PAD_USBH2_DIR = PAD_ID(PA, 1),
+ MX27_PAD_USBH2_DATA7 = PAD_ID(PA, 2),
+ MX27_PAD_USBH2_NXT = PAD_ID(PA, 3),
+ MX27_PAD_USBH2_STP = PAD_ID(PA, 4),
+ MX27_PAD_LSCLK = PAD_ID(PA, 5),
+ MX27_PAD_LD0 = PAD_ID(PA, 6),
+ MX27_PAD_LD1 = PAD_ID(PA, 7),
+ MX27_PAD_LD2 = PAD_ID(PA, 8),
+ MX27_PAD_LD3 = PAD_ID(PA, 9),
+ MX27_PAD_LD4 = PAD_ID(PA, 10),
+ MX27_PAD_LD5 = PAD_ID(PA, 11),
+ MX27_PAD_LD6 = PAD_ID(PA, 12),
+ MX27_PAD_LD7 = PAD_ID(PA, 13),
+ MX27_PAD_LD8 = PAD_ID(PA, 14),
+ MX27_PAD_LD9 = PAD_ID(PA, 15),
+ MX27_PAD_LD10 = PAD_ID(PA, 16),
+ MX27_PAD_LD11 = PAD_ID(PA, 17),
+ MX27_PAD_LD12 = PAD_ID(PA, 18),
+ MX27_PAD_LD13 = PAD_ID(PA, 19),
+ MX27_PAD_LD14 = PAD_ID(PA, 20),
+ MX27_PAD_LD15 = PAD_ID(PA, 21),
+ MX27_PAD_LD16 = PAD_ID(PA, 22),
+ MX27_PAD_LD17 = PAD_ID(PA, 23),
+ MX27_PAD_REV = PAD_ID(PA, 24),
+ MX27_PAD_CLS = PAD_ID(PA, 25),
+ MX27_PAD_PS = PAD_ID(PA, 26),
+ MX27_PAD_SPL_SPR = PAD_ID(PA, 27),
+ MX27_PAD_HSYNC = PAD_ID(PA, 28),
+ MX27_PAD_VSYNC = PAD_ID(PA, 29),
+ MX27_PAD_CONTRAST = PAD_ID(PA, 30),
+ MX27_PAD_OE_ACD = PAD_ID(PA, 31),
+
+ MX27_PAD_UNUSED0 = PAD_ID(PB, 0),
+ MX27_PAD_UNUSED1 = PAD_ID(PB, 1),
+ MX27_PAD_UNUSED2 = PAD_ID(PB, 2),
+ MX27_PAD_UNUSED3 = PAD_ID(PB, 3),
+ MX27_PAD_SD2_D0 = PAD_ID(PB, 4),
+ MX27_PAD_SD2_D1 = PAD_ID(PB, 5),
+ MX27_PAD_SD2_D2 = PAD_ID(PB, 6),
+ MX27_PAD_SD2_D3 = PAD_ID(PB, 7),
+ MX27_PAD_SD2_CMD = PAD_ID(PB, 8),
+ MX27_PAD_SD2_CLK = PAD_ID(PB, 9),
+ MX27_PAD_CSI_D0 = PAD_ID(PB, 10),
+ MX27_PAD_CSI_D1 = PAD_ID(PB, 11),
+ MX27_PAD_CSI_D2 = PAD_ID(PB, 12),
+ MX27_PAD_CSI_D3 = PAD_ID(PB, 13),
+ MX27_PAD_CSI_D4 = PAD_ID(PB, 14),
+ MX27_PAD_CSI_MCLK = PAD_ID(PB, 15),
+ MX27_PAD_CSI_PIXCLK = PAD_ID(PB, 16),
+ MX27_PAD_CSI_D5 = PAD_ID(PB, 17),
+ MX27_PAD_CSI_D6 = PAD_ID(PB, 18),
+ MX27_PAD_CSI_D7 = PAD_ID(PB, 19),
+ MX27_PAD_CSI_VSYNC = PAD_ID(PB, 20),
+ MX27_PAD_CSI_HSYNC = PAD_ID(PB, 21),
+ MX27_PAD_USBH1_SUSP = PAD_ID(PB, 22),
+ MX27_PAD_USB_PWR = PAD_ID(PB, 23),
+ MX27_PAD_USB_OC_B = PAD_ID(PB, 24),
+ MX27_PAD_USBH1_RCV = PAD_ID(PB, 25),
+ MX27_PAD_USBH1_FS = PAD_ID(PB, 26),
+ MX27_PAD_USBH1_OE_B = PAD_ID(PB, 27),
+ MX27_PAD_USBH1_TXDM = PAD_ID(PB, 28),
+ MX27_PAD_USBH1_TXDP = PAD_ID(PB, 29),
+ MX27_PAD_USBH1_RXDM = PAD_ID(PB, 30),
+ MX27_PAD_USBH1_RXDP = PAD_ID(PB, 31),
+
+ MX27_PAD_UNUSED4 = PAD_ID(PC, 0),
+ MX27_PAD_UNUSED5 = PAD_ID(PC, 1),
+ MX27_PAD_UNUSED6 = PAD_ID(PC, 2),
+ MX27_PAD_UNUSED7 = PAD_ID(PC, 3),
+ MX27_PAD_UNUSED8 = PAD_ID(PC, 4),
+ MX27_PAD_I2C2_SDA = PAD_ID(PC, 5),
+ MX27_PAD_I2C2_SCL = PAD_ID(PC, 6),
+ MX27_PAD_USBOTG_DATA5 = PAD_ID(PC, 7),
+ MX27_PAD_USBOTG_DATA6 = PAD_ID(PC, 8),
+ MX27_PAD_USBOTG_DATA0 = PAD_ID(PC, 9),
+ MX27_PAD_USBOTG_DATA2 = PAD_ID(PC, 10),
+ MX27_PAD_USBOTG_DATA1 = PAD_ID(PC, 11),
+ MX27_PAD_USBOTG_DATA4 = PAD_ID(PC, 12),
+ MX27_PAD_USBOTG_DATA3 = PAD_ID(PC, 13),
+ MX27_PAD_TOUT = PAD_ID(PC, 14),
+ MX27_PAD_TIN = PAD_ID(PC, 15),
+ MX27_PAD_SSI4_FS = PAD_ID(PC, 16),
+ MX27_PAD_SSI4_RXDAT = PAD_ID(PC, 17),
+ MX27_PAD_SSI4_TXDAT = PAD_ID(PC, 18),
+ MX27_PAD_SSI4_CLK = PAD_ID(PC, 19),
+ MX27_PAD_SSI1_FS = PAD_ID(PC, 20),
+ MX27_PAD_SSI1_RXDAT = PAD_ID(PC, 21),
+ MX27_PAD_SSI1_TXDAT = PAD_ID(PC, 22),
+ MX27_PAD_SSI1_CLK = PAD_ID(PC, 23),
+ MX27_PAD_SSI2_FS = PAD_ID(PC, 24),
+ MX27_PAD_SSI2_RXDAT = PAD_ID(PC, 25),
+ MX27_PAD_SSI2_TXDAT = PAD_ID(PC, 26),
+ MX27_PAD_SSI2_CLK = PAD_ID(PC, 27),
+ MX27_PAD_SSI3_FS = PAD_ID(PC, 28),
+ MX27_PAD_SSI3_RXDAT = PAD_ID(PC, 29),
+ MX27_PAD_SSI3_TXDAT = PAD_ID(PC, 30),
+ MX27_PAD_SSI3_CLK = PAD_ID(PC, 31),
+
+ MX27_PAD_SD3_CMD = PAD_ID(PD, 0),
+ MX27_PAD_SD3_CLK = PAD_ID(PD, 1),
+ MX27_PAD_ATA_DATA0 = PAD_ID(PD, 2),
+ MX27_PAD_ATA_DATA1 = PAD_ID(PD, 3),
+ MX27_PAD_ATA_DATA2 = PAD_ID(PD, 4),
+ MX27_PAD_ATA_DATA3 = PAD_ID(PD, 5),
+ MX27_PAD_ATA_DATA4 = PAD_ID(PD, 6),
+ MX27_PAD_ATA_DATA5 = PAD_ID(PD, 7),
+ MX27_PAD_ATA_DATA6 = PAD_ID(PD, 8),
+ MX27_PAD_ATA_DATA7 = PAD_ID(PD, 9),
+ MX27_PAD_ATA_DATA8 = PAD_ID(PD, 10),
+ MX27_PAD_ATA_DATA9 = PAD_ID(PD, 11),
+ MX27_PAD_ATA_DATA10 = PAD_ID(PD, 12),
+ MX27_PAD_ATA_DATA11 = PAD_ID(PD, 13),
+ MX27_PAD_ATA_DATA12 = PAD_ID(PD, 14),
+ MX27_PAD_ATA_DATA13 = PAD_ID(PD, 15),
+ MX27_PAD_ATA_DATA14 = PAD_ID(PD, 16),
+ MX27_PAD_I2C_DATA = PAD_ID(PD, 17),
+ MX27_PAD_I2C_CLK = PAD_ID(PD, 18),
+ MX27_PAD_CSPI2_SS2 = PAD_ID(PD, 19),
+ MX27_PAD_CSPI2_SS1 = PAD_ID(PD, 20),
+ MX27_PAD_CSPI2_SS0 = PAD_ID(PD, 21),
+ MX27_PAD_CSPI2_SCLK = PAD_ID(PD, 22),
+ MX27_PAD_CSPI2_MISO = PAD_ID(PD, 23),
+ MX27_PAD_CSPI2_MOSI = PAD_ID(PD, 24),
+ MX27_PAD_CSPI1_RDY = PAD_ID(PD, 25),
+ MX27_PAD_CSPI1_SS2 = PAD_ID(PD, 26),
+ MX27_PAD_CSPI1_SS1 = PAD_ID(PD, 27),
+ MX27_PAD_CSPI1_SS0 = PAD_ID(PD, 28),
+ MX27_PAD_CSPI1_SCLK = PAD_ID(PD, 29),
+ MX27_PAD_CSPI1_MISO = PAD_ID(PD, 30),
+ MX27_PAD_CSPI1_MOSI = PAD_ID(PD, 31),
+
+ MX27_PAD_USBOTG_NXT = PAD_ID(PE, 0),
+ MX27_PAD_USBOTG_STP = PAD_ID(PE, 1),
+ MX27_PAD_USBOTG_DIR = PAD_ID(PE, 2),
+ MX27_PAD_UART2_CTS = PAD_ID(PE, 3),
+ MX27_PAD_UART2_RTS = PAD_ID(PE, 4),
+ MX27_PAD_PWMO = PAD_ID(PE, 5),
+ MX27_PAD_UART2_TXD = PAD_ID(PE, 6),
+ MX27_PAD_UART2_RXD = PAD_ID(PE, 7),
+ MX27_PAD_UART3_TXD = PAD_ID(PE, 8),
+ MX27_PAD_UART3_RXD = PAD_ID(PE, 9),
+ MX27_PAD_UART3_CTS = PAD_ID(PE, 10),
+ MX27_PAD_UART3_RTS = PAD_ID(PE, 11),
+ MX27_PAD_UART1_TXD = PAD_ID(PE, 12),
+ MX27_PAD_UART1_RXD = PAD_ID(PE, 13),
+ MX27_PAD_UART1_CTS = PAD_ID(PE, 14),
+ MX27_PAD_UART1_RTS = PAD_ID(PE, 15),
+ MX27_PAD_RTCK = PAD_ID(PE, 16),
+ MX27_PAD_RESET_OUT_B = PAD_ID(PE, 17),
+ MX27_PAD_SD1_D0 = PAD_ID(PE, 18),
+ MX27_PAD_SD1_D1 = PAD_ID(PE, 19),
+ MX27_PAD_SD1_D2 = PAD_ID(PE, 20),
+ MX27_PAD_SD1_D3 = PAD_ID(PE, 21),
+ MX27_PAD_SD1_CMD = PAD_ID(PE, 22),
+ MX27_PAD_SD1_CLK = PAD_ID(PE, 23),
+ MX27_PAD_USBOTG_CLK = PAD_ID(PE, 24),
+ MX27_PAD_USBOTG_DATA7 = PAD_ID(PE, 25),
+ MX27_PAD_UNUSED9 = PAD_ID(PE, 26),
+ MX27_PAD_UNUSED10 = PAD_ID(PE, 27),
+ MX27_PAD_UNUSED11 = PAD_ID(PE, 28),
+ MX27_PAD_UNUSED12 = PAD_ID(PE, 29),
+ MX27_PAD_UNUSED13 = PAD_ID(PE, 30),
+ MX27_PAD_UNUSED14 = PAD_ID(PE, 31),
+
+ MX27_PAD_NFRB = PAD_ID(PF, 0),
+ MX27_PAD_NFCLE = PAD_ID(PF, 1),
+ MX27_PAD_NFWP_B = PAD_ID(PF, 2),
+ MX27_PAD_NFCE_B = PAD_ID(PF, 3),
+ MX27_PAD_NFALE = PAD_ID(PF, 4),
+ MX27_PAD_NFRE_B = PAD_ID(PF, 5),
+ MX27_PAD_NFWE_B = PAD_ID(PF, 6),
+ MX27_PAD_PC_POE = PAD_ID(PF, 7),
+ MX27_PAD_PC_RW_B = PAD_ID(PF, 8),
+ MX27_PAD_IOIS16 = PAD_ID(PF, 9),
+ MX27_PAD_PC_RST = PAD_ID(PF, 10),
+ MX27_PAD_PC_BVD2 = PAD_ID(PF, 11),
+ MX27_PAD_PC_BVD1 = PAD_ID(PF, 12),
+ MX27_PAD_PC_VS2 = PAD_ID(PF, 13),
+ MX27_PAD_PC_VS1 = PAD_ID(PF, 14),
+ MX27_PAD_CLKO = PAD_ID(PF, 15),
+ MX27_PAD_PC_PWRON = PAD_ID(PF, 16),
+ MX27_PAD_PC_READY = PAD_ID(PF, 17),
+ MX27_PAD_PC_WAIT_B = PAD_ID(PF, 18),
+ MX27_PAD_PC_CD2_B = PAD_ID(PF, 19),
+ MX27_PAD_PC_CD1_B = PAD_ID(PF, 20),
+ MX27_PAD_CS4_B = PAD_ID(PF, 21),
+ MX27_PAD_CS5_B = PAD_ID(PF, 22),
+ MX27_PAD_ATA_DATA15 = PAD_ID(PF, 23),
+ MX27_PAD_UNUSED15 = PAD_ID(PF, 24),
+ MX27_PAD_UNUSED16 = PAD_ID(PF, 25),
+ MX27_PAD_UNUSED17 = PAD_ID(PF, 26),
+ MX27_PAD_UNUSED18 = PAD_ID(PF, 27),
+ MX27_PAD_UNUSED19 = PAD_ID(PF, 28),
+ MX27_PAD_UNUSED20 = PAD_ID(PF, 29),
+ MX27_PAD_UNUSED21 = PAD_ID(PF, 30),
+ MX27_PAD_UNUSED22 = PAD_ID(PF, 31),
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX27_PAD_USBH2_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH2_DIR),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH2_DATA7),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH2_NXT),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH2_STP),
+ IMX_PINCTRL_PIN(MX27_PAD_LSCLK),
+ IMX_PINCTRL_PIN(MX27_PAD_LD0),
+ IMX_PINCTRL_PIN(MX27_PAD_LD1),
+ IMX_PINCTRL_PIN(MX27_PAD_LD2),
+ IMX_PINCTRL_PIN(MX27_PAD_LD3),
+ IMX_PINCTRL_PIN(MX27_PAD_LD4),
+ IMX_PINCTRL_PIN(MX27_PAD_LD5),
+ IMX_PINCTRL_PIN(MX27_PAD_LD6),
+ IMX_PINCTRL_PIN(MX27_PAD_LD7),
+ IMX_PINCTRL_PIN(MX27_PAD_LD8),
+ IMX_PINCTRL_PIN(MX27_PAD_LD9),
+ IMX_PINCTRL_PIN(MX27_PAD_LD10),
+ IMX_PINCTRL_PIN(MX27_PAD_LD11),
+ IMX_PINCTRL_PIN(MX27_PAD_LD12),
+ IMX_PINCTRL_PIN(MX27_PAD_LD13),
+ IMX_PINCTRL_PIN(MX27_PAD_LD14),
+ IMX_PINCTRL_PIN(MX27_PAD_LD15),
+ IMX_PINCTRL_PIN(MX27_PAD_LD16),
+ IMX_PINCTRL_PIN(MX27_PAD_LD17),
+ IMX_PINCTRL_PIN(MX27_PAD_REV),
+ IMX_PINCTRL_PIN(MX27_PAD_CLS),
+ IMX_PINCTRL_PIN(MX27_PAD_PS),
+ IMX_PINCTRL_PIN(MX27_PAD_SPL_SPR),
+ IMX_PINCTRL_PIN(MX27_PAD_HSYNC),
+ IMX_PINCTRL_PIN(MX27_PAD_VSYNC),
+ IMX_PINCTRL_PIN(MX27_PAD_CONTRAST),
+ IMX_PINCTRL_PIN(MX27_PAD_OE_ACD),
+
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED0),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED1),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED2),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED3),
+ IMX_PINCTRL_PIN(MX27_PAD_SD2_D0),
+ IMX_PINCTRL_PIN(MX27_PAD_SD2_D1),
+ IMX_PINCTRL_PIN(MX27_PAD_SD2_D2),
+ IMX_PINCTRL_PIN(MX27_PAD_SD2_D3),
+ IMX_PINCTRL_PIN(MX27_PAD_SD2_CMD),
+ IMX_PINCTRL_PIN(MX27_PAD_SD2_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D0),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D1),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D2),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D3),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D4),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_MCLK),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_PIXCLK),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D5),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D6),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_D7),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_VSYNC),
+ IMX_PINCTRL_PIN(MX27_PAD_CSI_HSYNC),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_SUSP),
+ IMX_PINCTRL_PIN(MX27_PAD_USB_PWR),
+ IMX_PINCTRL_PIN(MX27_PAD_USB_OC_B),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_RCV),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_FS),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_OE_B),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_TXDM),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_TXDP),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_RXDM),
+ IMX_PINCTRL_PIN(MX27_PAD_USBH1_RXDP),
+
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED4),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED5),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED6),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED7),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED8),
+ IMX_PINCTRL_PIN(MX27_PAD_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX27_PAD_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA5),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA6),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA0),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA2),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA1),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA4),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA3),
+ IMX_PINCTRL_PIN(MX27_PAD_TOUT),
+ IMX_PINCTRL_PIN(MX27_PAD_TIN),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI4_FS),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI4_RXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI4_TXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI4_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI1_FS),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI1_RXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI1_TXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI1_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI2_FS),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI2_RXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI2_TXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI2_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI3_FS),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI3_RXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI3_TXDAT),
+ IMX_PINCTRL_PIN(MX27_PAD_SSI3_CLK),
+
+ IMX_PINCTRL_PIN(MX27_PAD_SD3_CMD),
+ IMX_PINCTRL_PIN(MX27_PAD_SD3_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA0),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA1),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA2),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA3),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA4),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA5),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA6),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA7),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA8),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA9),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA10),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA11),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA12),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA13),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA14),
+ IMX_PINCTRL_PIN(MX27_PAD_I2C_DATA),
+ IMX_PINCTRL_PIN(MX27_PAD_I2C_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI2_SS2),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI2_SS1),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI2_SS0),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI2_MISO),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_RDY),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_SS2),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_SS1),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_SS0),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_MISO),
+ IMX_PINCTRL_PIN(MX27_PAD_CSPI1_MOSI),
+
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_NXT),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_STP),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DIR),
+ IMX_PINCTRL_PIN(MX27_PAD_UART2_CTS),
+ IMX_PINCTRL_PIN(MX27_PAD_UART2_RTS),
+ IMX_PINCTRL_PIN(MX27_PAD_PWMO),
+ IMX_PINCTRL_PIN(MX27_PAD_UART2_TXD),
+ IMX_PINCTRL_PIN(MX27_PAD_UART2_RXD),
+ IMX_PINCTRL_PIN(MX27_PAD_UART3_TXD),
+ IMX_PINCTRL_PIN(MX27_PAD_UART3_RXD),
+ IMX_PINCTRL_PIN(MX27_PAD_UART3_CTS),
+ IMX_PINCTRL_PIN(MX27_PAD_UART3_RTS),
+ IMX_PINCTRL_PIN(MX27_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(MX27_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(MX27_PAD_UART1_CTS),
+ IMX_PINCTRL_PIN(MX27_PAD_UART1_RTS),
+ IMX_PINCTRL_PIN(MX27_PAD_RTCK),
+ IMX_PINCTRL_PIN(MX27_PAD_RESET_OUT_B),
+ IMX_PINCTRL_PIN(MX27_PAD_SD1_D0),
+ IMX_PINCTRL_PIN(MX27_PAD_SD1_D1),
+ IMX_PINCTRL_PIN(MX27_PAD_SD1_D2),
+ IMX_PINCTRL_PIN(MX27_PAD_SD1_D3),
+ IMX_PINCTRL_PIN(MX27_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX27_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_CLK),
+ IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA7),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED9),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED10),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED11),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED12),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED13),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED14),
+
+ IMX_PINCTRL_PIN(MX27_PAD_NFRB),
+ IMX_PINCTRL_PIN(MX27_PAD_NFCLE),
+ IMX_PINCTRL_PIN(MX27_PAD_NFWP_B),
+ IMX_PINCTRL_PIN(MX27_PAD_NFCE_B),
+ IMX_PINCTRL_PIN(MX27_PAD_NFALE),
+ IMX_PINCTRL_PIN(MX27_PAD_NFRE_B),
+ IMX_PINCTRL_PIN(MX27_PAD_NFWE_B),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_POE),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_RW_B),
+ IMX_PINCTRL_PIN(MX27_PAD_IOIS16),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_RST),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_BVD2),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_BVD1),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_VS2),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_VS1),
+ IMX_PINCTRL_PIN(MX27_PAD_CLKO),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_PWRON),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_READY),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_WAIT_B),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_CD2_B),
+ IMX_PINCTRL_PIN(MX27_PAD_PC_CD1_B),
+ IMX_PINCTRL_PIN(MX27_PAD_CS4_B),
+ IMX_PINCTRL_PIN(MX27_PAD_CS5_B),
+ IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA15),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED15),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED16),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED17),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED18),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED19),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED20),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED21),
+ IMX_PINCTRL_PIN(MX27_PAD_UNUSED22),
+};
+
+static struct imx1_pinctrl_soc_info imx27_pinctrl_info = {
+ .pins = imx27_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx27_pinctrl_pads),
+};
+
+static struct of_device_id imx27_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx27-iomuxc", },
+ { /* sentinel */ }
+};
+
+struct imx27_pinctrl_private {
+ int num_gpio_childs;
+ struct platform_device **gpio_dev;
+ struct mxc_gpio_platform_data *gpio_pdata;
+};
+
+static int imx27_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx1_pinctrl_core_probe(pdev, &imx27_pinctrl_info);
+}
+
+static struct platform_driver imx27_pinctrl_driver = {
+ .driver = {
+ .name = "imx27-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(imx27_pinctrl_of_match),
+ },
+ .probe = imx27_pinctrl_probe,
+ .remove = imx1_pinctrl_core_remove,
+};
+
+static int __init imx27_pinctrl_init(void)
+{
+ return platform_driver_register(&imx27_pinctrl_driver);
+}
+arch_initcall(imx27_pinctrl_init);
+
+static void __exit imx27_pinctrl_exit(void)
+{
+ platform_driver_unregister(&imx27_pinctrl_driver);
+}
+module_exit(imx27_pinctrl_exit);
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale IMX27 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx35.c b/drivers/pinctrl/pinctrl-imx35.c
index c4549829fc4..278a04ae894 100644
--- a/drivers/pinctrl/pinctrl-imx35.c
+++ b/drivers/pinctrl/pinctrl-imx35.c
@@ -1019,7 +1019,7 @@ static struct platform_driver imx35_pinctrl_driver = {
.driver = {
.name = "imx35-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx35_pinctrl_of_match),
+ .of_match_table = imx35_pinctrl_of_match,
},
.probe = imx35_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-imx50.c b/drivers/pinctrl/pinctrl-imx50.c
new file mode 100644
index 00000000000..b06feed1b03
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx50.c
@@ -0,0 +1,426 @@
+/*
+ * imx50 pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2013 Greg Ungerer <gerg@uclinux.org>
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx50_pads {
+ MX50_PAD_RESERVE0 = 0,
+ MX50_PAD_RESERVE1 = 1,
+ MX50_PAD_RESERVE2 = 2,
+ MX50_PAD_RESERVE3 = 3,
+ MX50_PAD_RESERVE4 = 4,
+ MX50_PAD_RESERVE5 = 5,
+ MX50_PAD_RESERVE6 = 6,
+ MX50_PAD_RESERVE7 = 7,
+ MX50_PAD_KEY_COL0 = 8,
+ MX50_PAD_KEY_ROW0 = 9,
+ MX50_PAD_KEY_COL1 = 10,
+ MX50_PAD_KEY_ROW1 = 11,
+ MX50_PAD_KEY_COL2 = 12,
+ MX50_PAD_KEY_ROW2 = 13,
+ MX50_PAD_KEY_COL3 = 14,
+ MX50_PAD_KEY_ROW3 = 15,
+ MX50_PAD_I2C1_SCL = 16,
+ MX50_PAD_I2C1_SDA = 17,
+ MX50_PAD_I2C2_SCL = 18,
+ MX50_PAD_I2C2_SDA = 19,
+ MX50_PAD_I2C3_SCL = 20,
+ MX50_PAD_I2C3_SDA = 21,
+ MX50_PAD_PWM1 = 22,
+ MX50_PAD_PWM2 = 23,
+ MX50_PAD_0WIRE = 24,
+ MX50_PAD_EPITO = 25,
+ MX50_PAD_WDOG = 26,
+ MX50_PAD_SSI_TXFS = 27,
+ MX50_PAD_SSI_TXC = 28,
+ MX50_PAD_SSI_TXD = 29,
+ MX50_PAD_SSI_RXD = 30,
+ MX50_PAD_SSI_RXF = 31,
+ MX50_PAD_SSI_RXC = 32,
+ MX50_PAD_UART1_TXD = 33,
+ MX50_PAD_UART1_RXD = 34,
+ MX50_PAD_UART1_CTS = 35,
+ MX50_PAD_UART1_RTS = 36,
+ MX50_PAD_UART2_TXD = 37,
+ MX50_PAD_UART2_RXD = 38,
+ MX50_PAD_UART2_CTS = 39,
+ MX50_PAD_UART2_RTS = 40,
+ MX50_PAD_UART3_TXD = 41,
+ MX50_PAD_UART3_RXD = 42,
+ MX50_PAD_UART4_TXD = 43,
+ MX50_PAD_UART4_RXD = 44,
+ MX50_PAD_CSPI_CLK = 45,
+ MX50_PAD_CSPI_MOSI = 46,
+ MX50_PAD_CSPI_MISO = 47,
+ MX50_PAD_CSPI_SS0 = 48,
+ MX50_PAD_ECSPI1_CLK = 49,
+ MX50_PAD_ECSPI1_MOSI = 50,
+ MX50_PAD_ECSPI1_MISO = 51,
+ MX50_PAD_ECSPI1_SS0 = 52,
+ MX50_PAD_ECSPI2_CLK = 53,
+ MX50_PAD_ECSPI2_MOSI = 54,
+ MX50_PAD_ECSPI2_MISO = 55,
+ MX50_PAD_ECSPI2_SS0 = 56,
+ MX50_PAD_SD1_CLK = 57,
+ MX50_PAD_SD1_CMD = 58,
+ MX50_PAD_SD1_D0 = 59,
+ MX50_PAD_SD1_D1 = 60,
+ MX50_PAD_SD1_D2 = 61,
+ MX50_PAD_SD1_D3 = 62,
+ MX50_PAD_SD2_CLK = 63,
+ MX50_PAD_SD2_CMD = 64,
+ MX50_PAD_SD2_D0 = 65,
+ MX50_PAD_SD2_D1 = 66,
+ MX50_PAD_SD2_D2 = 67,
+ MX50_PAD_SD2_D3 = 68,
+ MX50_PAD_SD2_D4 = 69,
+ MX50_PAD_SD2_D5 = 70,
+ MX50_PAD_SD2_D6 = 71,
+ MX50_PAD_SD2_D7 = 72,
+ MX50_PAD_SD2_WP = 73,
+ MX50_PAD_SD2_CD = 74,
+ MX50_PAD_DISP_D0 = 75,
+ MX50_PAD_DISP_D1 = 76,
+ MX50_PAD_DISP_D2 = 77,
+ MX50_PAD_DISP_D3 = 78,
+ MX50_PAD_DISP_D4 = 79,
+ MX50_PAD_DISP_D5 = 80,
+ MX50_PAD_DISP_D6 = 81,
+ MX50_PAD_DISP_D7 = 82,
+ MX50_PAD_DISP_WR = 83,
+ MX50_PAD_DISP_RD = 84,
+ MX50_PAD_DISP_RS = 85,
+ MX50_PAD_DISP_CS = 86,
+ MX50_PAD_DISP_BUSY = 87,
+ MX50_PAD_DISP_RESET = 88,
+ MX50_PAD_SD3_CLK = 89,
+ MX50_PAD_SD3_CMD = 90,
+ MX50_PAD_SD3_D0 = 91,
+ MX50_PAD_SD3_D1 = 92,
+ MX50_PAD_SD3_D2 = 93,
+ MX50_PAD_SD3_D3 = 94,
+ MX50_PAD_SD3_D4 = 95,
+ MX50_PAD_SD3_D5 = 96,
+ MX50_PAD_SD3_D6 = 97,
+ MX50_PAD_SD3_D7 = 98,
+ MX50_PAD_SD3_WP = 99,
+ MX50_PAD_DISP_D8 = 100,
+ MX50_PAD_DISP_D9 = 101,
+ MX50_PAD_DISP_D10 = 102,
+ MX50_PAD_DISP_D11 = 103,
+ MX50_PAD_DISP_D12 = 104,
+ MX50_PAD_DISP_D13 = 105,
+ MX50_PAD_DISP_D14 = 106,
+ MX50_PAD_DISP_D15 = 107,
+ MX50_PAD_EPDC_D0 = 108,
+ MX50_PAD_EPDC_D1 = 109,
+ MX50_PAD_EPDC_D2 = 110,
+ MX50_PAD_EPDC_D3 = 111,
+ MX50_PAD_EPDC_D4 = 112,
+ MX50_PAD_EPDC_D5 = 113,
+ MX50_PAD_EPDC_D6 = 114,
+ MX50_PAD_EPDC_D7 = 115,
+ MX50_PAD_EPDC_D8 = 116,
+ MX50_PAD_EPDC_D9 = 117,
+ MX50_PAD_EPDC_D10 = 118,
+ MX50_PAD_EPDC_D11 = 119,
+ MX50_PAD_EPDC_D12 = 120,
+ MX50_PAD_EPDC_D13 = 121,
+ MX50_PAD_EPDC_D14 = 122,
+ MX50_PAD_EPDC_D15 = 123,
+ MX50_PAD_EPDC_GDCLK = 124,
+ MX50_PAD_EPDC_GDSP = 125,
+ MX50_PAD_EPDC_GDOE = 126,
+ MX50_PAD_EPDC_GDRL = 127,
+ MX50_PAD_EPDC_SDCLK = 128,
+ MX50_PAD_EPDC_SDOEZ = 129,
+ MX50_PAD_EPDC_SDOED = 130,
+ MX50_PAD_EPDC_SDOE = 131,
+ MX50_PAD_EPDC_SDLE = 132,
+ MX50_PAD_EPDC_SDCLKN = 133,
+ MX50_PAD_EPDC_SDSHR = 134,
+ MX50_PAD_EPDC_PWRCOM = 135,
+ MX50_PAD_EPDC_PWRSTAT = 136,
+ MX50_PAD_EPDC_PWRCTRL0 = 137,
+ MX50_PAD_EPDC_PWRCTRL1 = 138,
+ MX50_PAD_EPDC_PWRCTRL2 = 139,
+ MX50_PAD_EPDC_PWRCTRL3 = 140,
+ MX50_PAD_EPDC_VCOM0 = 141,
+ MX50_PAD_EPDC_VCOM1 = 142,
+ MX50_PAD_EPDC_BDR0 = 143,
+ MX50_PAD_EPDC_BDR1 = 144,
+ MX50_PAD_EPDC_SDCE0 = 145,
+ MX50_PAD_EPDC_SDCE1 = 146,
+ MX50_PAD_EPDC_SDCE2 = 147,
+ MX50_PAD_EPDC_SDCE3 = 148,
+ MX50_PAD_EPDC_SDCE4 = 149,
+ MX50_PAD_EPDC_SDCE5 = 150,
+ MX50_PAD_EIM_DA0 = 151,
+ MX50_PAD_EIM_DA1 = 152,
+ MX50_PAD_EIM_DA2 = 153,
+ MX50_PAD_EIM_DA3 = 154,
+ MX50_PAD_EIM_DA4 = 155,
+ MX50_PAD_EIM_DA5 = 156,
+ MX50_PAD_EIM_DA6 = 157,
+ MX50_PAD_EIM_DA7 = 158,
+ MX50_PAD_EIM_DA8 = 159,
+ MX50_PAD_EIM_DA9 = 160,
+ MX50_PAD_EIM_DA10 = 161,
+ MX50_PAD_EIM_DA11 = 162,
+ MX50_PAD_EIM_DA12 = 163,
+ MX50_PAD_EIM_DA13 = 164,
+ MX50_PAD_EIM_DA14 = 165,
+ MX50_PAD_EIM_DA15 = 166,
+ MX50_PAD_EIM_CS2 = 167,
+ MX50_PAD_EIM_CS1 = 168,
+ MX50_PAD_EIM_CS0 = 169,
+ MX50_PAD_EIM_EB0 = 170,
+ MX50_PAD_EIM_EB1 = 171,
+ MX50_PAD_EIM_WAIT = 172,
+ MX50_PAD_EIM_BCLK = 173,
+ MX50_PAD_EIM_RDY = 174,
+ MX50_PAD_EIM_OE = 175,
+ MX50_PAD_EIM_RW = 176,
+ MX50_PAD_EIM_LBA = 177,
+ MX50_PAD_EIM_CRE = 178,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx50_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX50_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_COL0),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_ROW0),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_COL1),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_ROW1),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_COL2),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_ROW2),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_COL3),
+ IMX_PINCTRL_PIN(MX50_PAD_KEY_ROW3),
+ IMX_PINCTRL_PIN(MX50_PAD_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX50_PAD_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX50_PAD_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX50_PAD_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX50_PAD_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX50_PAD_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX50_PAD_PWM1),
+ IMX_PINCTRL_PIN(MX50_PAD_PWM2),
+ IMX_PINCTRL_PIN(MX50_PAD_0WIRE),
+ IMX_PINCTRL_PIN(MX50_PAD_EPITO),
+ IMX_PINCTRL_PIN(MX50_PAD_WDOG),
+ IMX_PINCTRL_PIN(MX50_PAD_SSI_TXFS),
+ IMX_PINCTRL_PIN(MX50_PAD_SSI_TXC),
+ IMX_PINCTRL_PIN(MX50_PAD_SSI_TXD),
+ IMX_PINCTRL_PIN(MX50_PAD_SSI_RXD),
+ IMX_PINCTRL_PIN(MX50_PAD_SSI_RXF),
+ IMX_PINCTRL_PIN(MX50_PAD_SSI_RXC),
+ IMX_PINCTRL_PIN(MX50_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART1_CTS),
+ IMX_PINCTRL_PIN(MX50_PAD_UART1_RTS),
+ IMX_PINCTRL_PIN(MX50_PAD_UART2_TXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART2_RXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART2_CTS),
+ IMX_PINCTRL_PIN(MX50_PAD_UART2_RTS),
+ IMX_PINCTRL_PIN(MX50_PAD_UART3_TXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART3_RXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART4_TXD),
+ IMX_PINCTRL_PIN(MX50_PAD_UART4_RXD),
+ IMX_PINCTRL_PIN(MX50_PAD_CSPI_CLK),
+ IMX_PINCTRL_PIN(MX50_PAD_CSPI_MOSI),
+ IMX_PINCTRL_PIN(MX50_PAD_CSPI_MISO),
+ IMX_PINCTRL_PIN(MX50_PAD_CSPI_SS0),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI1_CLK),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI2_CLK),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX50_PAD_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX50_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX50_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX50_PAD_SD1_D0),
+ IMX_PINCTRL_PIN(MX50_PAD_SD1_D1),
+ IMX_PINCTRL_PIN(MX50_PAD_SD1_D2),
+ IMX_PINCTRL_PIN(MX50_PAD_SD1_D3),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_CLK),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_CMD),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D0),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D1),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D2),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D3),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D4),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D5),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D6),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_D7),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_WP),
+ IMX_PINCTRL_PIN(MX50_PAD_SD2_CD),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D0),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D1),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D2),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D3),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D4),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D5),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D6),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D7),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_WR),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_RD),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_RS),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_CS),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_BUSY),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_RESET),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_CLK),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_CMD),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D0),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D1),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D2),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D3),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D4),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D5),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D6),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_D7),
+ IMX_PINCTRL_PIN(MX50_PAD_SD3_WP),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D8),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D9),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D10),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D11),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D12),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D13),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D14),
+ IMX_PINCTRL_PIN(MX50_PAD_DISP_D15),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D0),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D1),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D2),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D3),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D4),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D5),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D6),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D7),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D8),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D9),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D10),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D11),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D12),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D13),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D14),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_D15),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_GDCLK),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_GDSP),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_GDOE),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_GDRL),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCLK),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDOEZ),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDOED),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDOE),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDLE),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCLKN),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDSHR),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_PWRCOM),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_PWRSTAT),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_PWRCTRL0),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_PWRCTRL1),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_PWRCTRL2),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_PWRCTRL3),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_VCOM0),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_VCOM1),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_BDR0),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_BDR1),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCE0),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCE1),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCE2),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCE3),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCE4),
+ IMX_PINCTRL_PIN(MX50_PAD_EPDC_SDCE5),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA0),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA1),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA2),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA3),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA4),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA5),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA6),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA7),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA8),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA9),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA10),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA11),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA12),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA13),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA14),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_DA15),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_CS2),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_CS1),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_CS0),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_EB0),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_EB1),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_WAIT),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_BCLK),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_RDY),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_OE),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_RW),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_LBA),
+ IMX_PINCTRL_PIN(MX50_PAD_EIM_CRE),
+};
+
+static struct imx_pinctrl_soc_info imx50_pinctrl_info = {
+ .pins = imx50_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx50_pinctrl_pads),
+};
+
+static struct of_device_id imx50_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx50-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx50_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx50_pinctrl_info);
+}
+
+static struct platform_driver imx50_pinctrl_driver = {
+ .driver = {
+ .name = "imx50-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(imx50_pinctrl_of_match),
+ },
+ .probe = imx50_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
+};
+
+static int __init imx50_pinctrl_init(void)
+{
+ return platform_driver_register(&imx50_pinctrl_driver);
+}
+arch_initcall(imx50_pinctrl_init);
+
+static void __exit imx50_pinctrl_exit(void)
+{
+ platform_driver_unregister(&imx50_pinctrl_driver);
+}
+module_exit(imx50_pinctrl_exit);
+MODULE_DESCRIPTION("Freescale IMX50 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
index db268b92007..19ab182bef6 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -782,7 +782,7 @@ static struct platform_driver imx51_pinctrl_driver = {
.driver = {
.name = "imx51-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx51_pinctrl_of_match),
+ .of_match_table = imx51_pinctrl_of_match,
},
.probe = imx51_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-imx53.c b/drivers/pinctrl/pinctrl-imx53.c
index 17562ae9005..f8d45c4cfde 100644
--- a/drivers/pinctrl/pinctrl-imx53.c
+++ b/drivers/pinctrl/pinctrl-imx53.c
@@ -468,7 +468,7 @@ static struct platform_driver imx53_pinctrl_driver = {
.driver = {
.name = "imx53-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx53_pinctrl_of_match),
+ .of_match_table = imx53_pinctrl_of_match,
},
.probe = imx53_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-imx6dl.c b/drivers/pinctrl/pinctrl-imx6dl.c
index a76b7242793..db2a1489bd9 100644
--- a/drivers/pinctrl/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/pinctrl-imx6dl.c
@@ -474,7 +474,7 @@ static struct platform_driver imx6dl_pinctrl_driver = {
.driver = {
.name = "imx6dl-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx6dl_pinctrl_of_match),
+ .of_match_table = imx6dl_pinctrl_of_match,
},
.probe = imx6dl_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
index 76dd9c4949f..8eb5ac1bd5f 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -480,7 +480,7 @@ static struct platform_driver imx6q_pinctrl_driver = {
.driver = {
.name = "imx6q-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx6q_pinctrl_of_match),
+ .of_match_table = imx6q_pinctrl_of_match,
},
.probe = imx6q_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-imx6sl.c b/drivers/pinctrl/pinctrl-imx6sl.c
index 4eb7ccab5f2..f21b7389df3 100644
--- a/drivers/pinctrl/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/pinctrl-imx6sl.c
@@ -380,7 +380,7 @@ static struct platform_driver imx6sl_pinctrl_driver = {
.driver = {
.name = "imx6sl-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx6sl_pinctrl_of_match),
+ .of_match_table = imx6sl_pinctrl_of_match,
},
.probe = imx6sl_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index d7c3ae300fa..7111c3b5913 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -634,6 +634,10 @@ static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
{
struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+ if (gpio_lock_as_irq(&nmk_chip->chip, d->hwirq))
+ dev_err(nmk_chip->chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
clk_enable(nmk_chip->clk);
nmk_gpio_irq_unmask(d);
return 0;
@@ -645,6 +649,7 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
nmk_gpio_irq_mask(d);
clk_disable(nmk_chip->clk);
+ gpio_unlock_as_irq(&nmk_chip->chip, d->hwirq);
}
static struct irq_chip nmk_gpio_irq_chip = {
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 30c4d356cb3..f13d0e78a41 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -891,9 +891,6 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
param = pinconf_to_config_param(configs[i]);
param_val = pinconf_to_config_argument(configs[i]);
- if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
- continue;
-
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
@@ -962,26 +959,9 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
return 0;
}
-static int palmas_pinconf_group_get(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long *config)
-{
- dev_err(pctldev->dev, "palmas_pinconf_group_get op not supported\n");
- return -ENOTSUPP;
-}
-
-static int palmas_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long *configs,
- unsigned num_configs)
-{
- dev_err(pctldev->dev, "palmas_pinconf_group_set op not supported\n");
- return -ENOTSUPP;
-}
-
static const struct pinconf_ops palmas_pinconf_ops = {
.pin_config_get = palmas_pinconf_get,
.pin_config_set = palmas_pinconf_set,
- .pin_config_group_get = palmas_pinconf_group_get,
- .pin_config_group_set = palmas_pinconf_group_set,
};
static struct pinctrl_desc palmas_pinctrl_desc = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index e0718b7c4ab..e939c28cbf1 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -56,8 +56,20 @@
#define GPIO_EXT_PORT 0x50
#define GPIO_LS_SYNC 0x60
+enum rockchip_pinctrl_type {
+ RK2928,
+ RK3066B,
+ RK3188,
+};
+
+enum rockchip_pin_bank_type {
+ COMMON_BANK,
+ RK3188_BANK0,
+};
+
/**
* @reg_base: register base of the gpio bank
+ * @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
* @pin_base: first pin number
@@ -74,12 +86,14 @@
*/
struct rockchip_pin_bank {
void __iomem *reg_base;
+ void __iomem *reg_pull;
struct clk *clk;
int irq;
u32 pin_base;
u8 nr_pins;
char *name;
u8 bank_num;
+ enum rockchip_pin_bank_type bank_type;
bool valid;
struct device_node *of_node;
struct rockchip_pinctrl *drvdata;
@@ -87,7 +101,7 @@ struct rockchip_pin_bank {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range grange;
spinlock_t slock;
-
+ u32 toggle_edge_mode;
};
#define PIN_BANK(id, pins, label) \
@@ -98,18 +112,16 @@ struct rockchip_pin_bank {
}
/**
- * @pull_auto: some SoCs don't allow pulls to be specified as up or down, but
- * instead decide this automatically based on the pad-type.
*/
struct rockchip_pin_ctrl {
struct rockchip_pin_bank *pin_banks;
u32 nr_banks;
u32 nr_pins;
char *label;
+ enum rockchip_pinctrl_type type;
int mux_offset;
- int pull_offset;
- bool pull_auto;
- int pull_bank_stride;
+ void (*pull_calc_reg)(struct rockchip_pin_bank *bank, int pin_num,
+ void __iomem **reg, u8 *bit);
};
struct rockchip_pin_config {
@@ -148,6 +160,7 @@ struct rockchip_pmx_func {
struct rockchip_pinctrl {
void __iomem *reg_base;
+ void __iomem *reg_pull;
struct device *dev;
struct rockchip_pin_ctrl *ctrl;
struct pinctrl_desc pctl;
@@ -354,31 +367,92 @@ static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
spin_unlock_irqrestore(&bank->slock, flags);
}
+#define RK2928_PULL_OFFSET 0x118
+#define RK2928_PULL_PINS_PER_REG 16
+#define RK2928_PULL_BANK_STRIDE 8
+
+static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, void __iomem **reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *reg = info->reg_base + RK2928_PULL_OFFSET;
+ *reg += bank->bank_num * RK2928_PULL_BANK_STRIDE;
+ *reg += (pin_num / RK2928_PULL_PINS_PER_REG) * 4;
+
+ *bit = pin_num % RK2928_PULL_PINS_PER_REG;
+};
+
+#define RK3188_PULL_BITS_PER_PIN 2
+#define RK3188_PULL_PINS_PER_REG 8
+#define RK3188_PULL_BANK_STRIDE 16
+
+static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, void __iomem **reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 12 pins of the first bank are located elsewhere */
+ if (bank->bank_type == RK3188_BANK0 && pin_num < 12) {
+ *reg = bank->reg_pull +
+ ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3188_PULL_PINS_PER_REG;
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+ } else {
+ *reg = info->reg_pull - 4;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ /*
+ * The bits in these registers have an inverse ordering
+ * with the lowest pin being in bits 15:14 and the highest
+ * pin in bits 1:0
+ */
+ *bit = 7 - (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+ }
+}
+
static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
void __iomem *reg;
u8 bit;
+ u32 data;
/* rk3066b does support any pulls */
- if (!ctrl->pull_offset)
+ if (ctrl->type == RK3066B)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg_base + ctrl->pull_offset;
-
- if (ctrl->pull_auto) {
- reg += bank->bank_num * ctrl->pull_bank_stride;
- reg += (pin_num / 16) * 4;
- bit = pin_num % 16;
+ ctrl->pull_calc_reg(bank, pin_num, &reg, &bit);
+ switch (ctrl->type) {
+ case RK2928:
return !(readl_relaxed(reg) & BIT(bit))
? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT
: PIN_CONFIG_BIAS_DISABLE;
- } else {
- dev_err(info->dev, "pull support for rk31xx not implemented\n");
+ case RK3188:
+ data = readl_relaxed(reg) >> bit;
+ data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
+
+ switch (data) {
+ case 0:
+ return PIN_CONFIG_BIAS_DISABLE;
+ case 1:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case 2:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ case 3:
+ return PIN_CONFIG_BIAS_BUS_HOLD;
+ }
+
+ dev_err(info->dev, "unknown pull setting\n");
return -EIO;
- }
+ default:
+ dev_err(info->dev, "unsupported pinctrl type\n");
+ return -EINVAL;
+ };
}
static int rockchip_set_pull(struct rockchip_pin_bank *bank,
@@ -395,22 +469,13 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
bank->bank_num, pin_num, pull);
/* rk3066b does support any pulls */
- if (!ctrl->pull_offset)
+ if (ctrl->type == RK3066B)
return pull ? -EINVAL : 0;
- reg = info->reg_base + ctrl->pull_offset;
-
- if (ctrl->pull_auto) {
- if (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT &&
- pull != PIN_CONFIG_BIAS_DISABLE) {
- dev_err(info->dev, "only PIN_DEFAULT and DISABLE allowed\n");
- return -EINVAL;
- }
-
- reg += bank->bank_num * ctrl->pull_bank_stride;
- reg += (pin_num / 16) * 4;
- bit = pin_num % 16;
+ ctrl->pull_calc_reg(bank, pin_num, &reg, &bit);
+ switch (ctrl->type) {
+ case RK2928:
spin_lock_irqsave(&bank->slock, flags);
data = BIT(bit + 16);
@@ -419,14 +484,38 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
writel(data, reg);
spin_unlock_irqrestore(&bank->slock, flags);
- } else {
- if (pull == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT) {
- dev_err(info->dev, "pull direction (up/down) needs to be specified\n");
+ break;
+ case RK3188:
+ spin_lock_irqsave(&bank->slock, flags);
+
+ /* enable the write to the equivalent lower bits */
+ data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16);
+
+ switch (pull) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ data |= (1 << bit);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ data |= (2 << bit);
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ data |= (3 << bit);
+ break;
+ default:
+ dev_err(info->dev, "unsupported pull setting %d\n",
+ pull);
return -EINVAL;
}
- dev_err(info->dev, "pull support for rk31xx not implemented\n");
- return -EIO;
+ writel(data, reg);
+
+ spin_unlock_irqrestore(&bank->slock, flags);
+ break;
+ default:
+ dev_err(info->dev, "unsupported pinctrl type\n");
+ return -EINVAL;
}
return 0;
@@ -556,20 +645,17 @@ static const struct pinmux_ops rockchip_pmx_ops = {
static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
enum pin_config_param pull)
{
- /* rk3066b does support any pulls */
- if (!ctrl->pull_offset)
+ switch (ctrl->type) {
+ case RK2928:
+ return (pull == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT ||
+ pull == PIN_CONFIG_BIAS_DISABLE);
+ case RK3066B:
return pull ? false : true;
-
- if (ctrl->pull_auto) {
- if (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT &&
- pull != PIN_CONFIG_BIAS_DISABLE)
- return false;
- } else {
- if (pull == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
- return false;
+ case RK3188:
+ return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
- return true;
+ return false;
}
/* set the pin config settings for a specified pin */
@@ -597,6 +683,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_BUS_HOLD:
if (!rockchip_pinconf_pull_valid(info->ctrl, param))
return -ENOTSUPP;
@@ -635,6 +722,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_BUS_HOLD:
if (!rockchip_pinconf_pull_valid(info->ctrl, param))
return -ENOTSUPP;
@@ -656,7 +744,11 @@ static const struct pinconf_ops rockchip_pinconf_ops = {
.pin_config_set = rockchip_pinconf_set,
};
-static const char *gpio_compat = "rockchip,gpio-bank";
+static const struct of_device_id rockchip_bank_match[] = {
+ { .compatible = "rockchip,gpio-bank" },
+ { .compatible = "rockchip,rk3188-gpio-bank0" },
+ {},
+};
static void rockchip_pinctrl_child_count(struct rockchip_pinctrl *info,
struct device_node *np)
@@ -664,7 +756,7 @@ static void rockchip_pinctrl_child_count(struct rockchip_pinctrl *info,
struct device_node *child;
for_each_child_of_node(np, child) {
- if (of_device_is_compatible(child, gpio_compat))
+ if (of_match_node(rockchip_bank_match, child))
continue;
info->nfunctions++;
@@ -807,8 +899,9 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
i = 0;
for_each_child_of_node(np, child) {
- if (of_device_is_compatible(child, gpio_compat))
+ if (of_match_node(rockchip_bank_match, child))
continue;
+
ret = rockchip_pinctrl_parse_functions(child, info, i++);
if (ret) {
dev_err(&pdev->dev, "failed to parse function\n");
@@ -985,7 +1078,9 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
+ u32 polarity = 0, data = 0;
u32 pend;
+ bool edge_changed = false;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -993,6 +1088,12 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
+ if (bank->toggle_edge_mode) {
+ polarity = readl_relaxed(bank->reg_base +
+ GPIO_INT_POLARITY);
+ data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+ }
+
while (pend) {
unsigned int virq;
@@ -1007,9 +1108,30 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
dev_dbg(bank->drvdata->dev, "handling irq %d\n", irq);
+ /*
+ * Triggering IRQ on both rising and falling edge
+ * needs manual intervention.
+ */
+ if (bank->toggle_edge_mode & BIT(irq)) {
+ if (data & BIT(irq))
+ polarity &= ~BIT(irq);
+ else
+ polarity |= BIT(irq);
+
+ edge_changed = true;
+ }
+
generic_handle_irq(virq);
}
+ if (bank->toggle_edge_mode && edge_changed) {
+ /* Interrupt params should only be set with ints disabled */
+ data = readl_relaxed(bank->reg_base + GPIO_INTEN);
+ writel_relaxed(0, bank->reg_base + GPIO_INTEN);
+ writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
+ writel(data, bank->reg_base + GPIO_INTEN);
+ }
+
chained_irq_exit(chip, desc);
}
@@ -1022,6 +1144,12 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
u32 level;
u32 data;
+ /* make sure the pin is configured as gpio input */
+ rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
+ data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
+ data &= ~mask;
+ writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
+
if (type & IRQ_TYPE_EDGE_BOTH)
__irq_set_handler_locked(d->irq, handle_edge_irq);
else
@@ -1033,19 +1161,37 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
polarity = readl_relaxed(gc->reg_base + GPIO_INT_POLARITY);
switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ bank->toggle_edge_mode |= mask;
+ level |= mask;
+
+ /*
+ * Determine gpio state. If 1 next interrupt should be falling
+ * otherwise rising.
+ */
+ data = readl(bank->reg_base + GPIO_EXT_PORT);
+ if (data & mask)
+ polarity &= ~mask;
+ else
+ polarity |= mask;
+ break;
case IRQ_TYPE_EDGE_RISING:
+ bank->toggle_edge_mode &= ~mask;
level |= mask;
polarity |= mask;
break;
case IRQ_TYPE_EDGE_FALLING:
+ bank->toggle_edge_mode &= ~mask;
level |= mask;
polarity &= ~mask;
break;
case IRQ_TYPE_LEVEL_HIGH:
+ bank->toggle_edge_mode &= ~mask;
level &= ~mask;
polarity |= mask;
break;
case IRQ_TYPE_LEVEL_LOW:
+ bank->toggle_edge_mode &= ~mask;
level &= ~mask;
polarity &= ~mask;
break;
@@ -1059,12 +1205,6 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
irq_gc_unlock(gc);
- /* make sure the pin is configured as gpio input */
- rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
- data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
- data &= ~mask;
- writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
-
return 0;
}
@@ -1205,6 +1345,26 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
+ /*
+ * special case, where parts of the pull setting-registers are
+ * part of the PMU register space
+ */
+ if (of_device_is_compatible(bank->of_node,
+ "rockchip,rk3188-gpio-bank0")) {
+ bank->bank_type = RK3188_BANK0;
+
+ if (of_address_to_resource(bank->of_node, 1, &res)) {
+ dev_err(dev, "cannot find IO resource for bank\n");
+ return -ENOENT;
+ }
+
+ bank->reg_pull = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(bank->reg_pull))
+ return PTR_ERR(bank->reg_pull);
+ } else {
+ bank->bank_type = COMMON_BANK;
+ }
+
bank->irq = irq_of_parse_and_map(bank->of_node, 0);
bank->clk = of_clk_get(bank->of_node, 0);
@@ -1289,6 +1449,14 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(info->reg_base))
return PTR_ERR(info->reg_base);
+ /* The RK3188 has its pull registers in a separate place */
+ if (ctrl->type == RK3188) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ info->reg_pull = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->reg_base))
+ return PTR_ERR(info->reg_base);
+ }
+
ret = rockchip_gpiolib_register(pdev, info);
if (ret)
return ret;
@@ -1315,10 +1483,9 @@ static struct rockchip_pin_ctrl rk2928_pin_ctrl = {
.pin_banks = rk2928_pin_banks,
.nr_banks = ARRAY_SIZE(rk2928_pin_banks),
.label = "RK2928-GPIO",
+ .type = RK2928,
.mux_offset = 0xa8,
- .pull_offset = 0x118,
- .pull_auto = 1,
- .pull_bank_stride = 8,
+ .pull_calc_reg = rk2928_calc_pull_reg_and_bit,
};
static struct rockchip_pin_bank rk3066a_pin_banks[] = {
@@ -1334,10 +1501,9 @@ static struct rockchip_pin_ctrl rk3066a_pin_ctrl = {
.pin_banks = rk3066a_pin_banks,
.nr_banks = ARRAY_SIZE(rk3066a_pin_banks),
.label = "RK3066a-GPIO",
+ .type = RK2928,
.mux_offset = 0xa8,
- .pull_offset = 0x118,
- .pull_auto = 1,
- .pull_bank_stride = 8,
+ .pull_calc_reg = rk2928_calc_pull_reg_and_bit,
};
static struct rockchip_pin_bank rk3066b_pin_banks[] = {
@@ -1351,8 +1517,8 @@ static struct rockchip_pin_ctrl rk3066b_pin_ctrl = {
.pin_banks = rk3066b_pin_banks,
.nr_banks = ARRAY_SIZE(rk3066b_pin_banks),
.label = "RK3066b-GPIO",
+ .type = RK3066B,
.mux_offset = 0x60,
- .pull_offset = -EINVAL,
};
static struct rockchip_pin_bank rk3188_pin_banks[] = {
@@ -1366,9 +1532,9 @@ static struct rockchip_pin_ctrl rk3188_pin_ctrl = {
.pin_banks = rk3188_pin_banks,
.nr_banks = ARRAY_SIZE(rk3188_pin_banks),
.label = "RK3188-GPIO",
+ .type = RK3188,
.mux_offset = 0x68,
- .pull_offset = 0x164,
- .pull_bank_stride = 16,
+ .pull_calc_reg = rk3188_calc_pull_reg_and_bit,
};
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 92a9d6c8db0..47ec2e8741e 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -1148,7 +1148,7 @@ static struct platform_driver samsung_pinctrl_driver = {
.driver = {
.name = "samsung-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(samsung_pinctrl_dt_match),
+ .of_match_table = samsung_pinctrl_dt_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a82ace4d9a2..829b98c5c66 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -15,15 +15,21 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/list.h>
+#include <linux/interrupt.h>
+
+#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_data/pinctrl-single.h>
+
#include "core.h"
#include "pinconf.h"
@@ -150,19 +156,36 @@ struct pcs_name {
};
/**
+ * struct pcs_soc_data - SoC specific settings
+ * @flags: initial SoC specific PCS_FEAT_xxx values
+ * @irq: optional interrupt for the controller
+ * @irq_enable_mask: optional SoC specific interrupt enable mask
+ * @irq_status_mask: optional SoC specific interrupt status mask
+ * @rearm: optional SoC specific wake-up rearm function
+ */
+struct pcs_soc_data {
+ unsigned flags;
+ int irq;
+ unsigned irq_enable_mask;
+ unsigned irq_status_mask;
+ void (*rearm)(void);
+};
+
+/**
* struct pcs_device - pinctrl device instance
* @res: resources
* @base: virtual address of the controller
* @size: size of the ioremapped area
* @dev: device entry
* @pctl: pin controller device
+ * @flags: mask of PCS_FEAT_xxx values
+ * @lock: spinlock for register access
* @mutex: mutex protecting the lists
* @width: bits per mux register
* @fmask: function register mask
* @fshift: function register shift
* @foff: value to turn mux off
* @fmax: max number of functions in fmask
- * @is_pinconf: whether supports pinconf
* @bits_per_pin:number of bits per pin
* @names: array of register names for pins
* @pins: physical pins on the SoC
@@ -171,6 +194,9 @@ struct pcs_name {
* @pingroups: list of pingroups
* @functions: list of functions
* @gpiofuncs: list of gpio functions
+ * @irqs: list of interrupt registers
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
* @ngroups: number of pingroups
* @nfuncs: number of functions
* @desc: pin controller descriptor
@@ -183,6 +209,12 @@ struct pcs_device {
unsigned size;
struct device *dev;
struct pinctrl_dev *pctl;
+ unsigned flags;
+#define PCS_QUIRK_SHARED_IRQ (1 << 2)
+#define PCS_FEAT_IRQ (1 << 1)
+#define PCS_FEAT_PINCONF (1 << 0)
+ struct pcs_soc_data socdata;
+ raw_spinlock_t lock;
struct mutex mutex;
unsigned width;
unsigned fmask;
@@ -190,7 +222,6 @@ struct pcs_device {
unsigned foff;
unsigned fmax;
bool bits_per_mux;
- bool is_pinconf;
unsigned bits_per_pin;
struct pcs_name *names;
struct pcs_data pins;
@@ -199,6 +230,9 @@ struct pcs_device {
struct list_head pingroups;
struct list_head functions;
struct list_head gpiofuncs;
+ struct list_head irqs;
+ struct irq_chip chip;
+ struct irq_domain *domain;
unsigned ngroups;
unsigned nfuncs;
struct pinctrl_desc desc;
@@ -206,6 +240,10 @@ struct pcs_device {
void (*write)(unsigned val, void __iomem *reg);
};
+#define PCS_QUIRK_HAS_SHARED_IRQ (pcs->flags & PCS_QUIRK_SHARED_IRQ)
+#define PCS_HAS_IRQ (pcs->flags & PCS_FEAT_IRQ)
+#define PCS_HAS_PINCONF (pcs->flags & PCS_FEAT_PINCONF)
+
static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *config);
static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
@@ -429,9 +467,11 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
for (i = 0; i < func->nvals; i++) {
struct pcs_func_vals *vals;
+ unsigned long flags;
unsigned val, mask;
vals = &func->vals[i];
+ raw_spin_lock_irqsave(&pcs->lock, flags);
val = pcs->read(vals->reg);
if (pcs->bits_per_mux)
@@ -442,6 +482,7 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
val &= ~mask;
val |= (vals->val & mask);
pcs->write(val, vals->reg);
+ raw_spin_unlock_irqrestore(&pcs->lock, flags);
}
return 0;
@@ -483,13 +524,16 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
for (i = 0; i < func->nvals; i++) {
struct pcs_func_vals *vals;
+ unsigned long flags;
unsigned val;
vals = &func->vals[i];
+ raw_spin_lock_irqsave(&pcs->lock, flags);
val = pcs->read(vals->reg);
val &= ~pcs->fmask;
val |= pcs->foff << pcs->fshift;
pcs->write(val, vals->reg);
+ raw_spin_unlock_irqrestore(&pcs->lock, flags);
}
}
@@ -1060,7 +1104,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
};
/* If pinconf isn't supported, don't parse properties in below. */
- if (!pcs->is_pinconf)
+ if (!PCS_HAS_PINCONF)
return 0;
/* cacluate how much properties are supported in current node */
@@ -1184,7 +1228,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (pcs->is_pinconf) {
+ if (PCS_HAS_PINCONF) {
res = pcs_parse_pinconf(pcs, np, function, map);
if (res)
goto free_pingroups;
@@ -1305,7 +1349,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (pcs->is_pinconf) {
+ if (PCS_HAS_PINCONF) {
dev_err(pcs->dev, "pinconf not supported\n");
goto free_pingroups;
}
@@ -1440,11 +1484,33 @@ static void pcs_free_pingroups(struct pcs_device *pcs)
}
/**
+ * pcs_irq_free() - free interrupt
+ * @pcs: pcs driver instance
+ */
+static void pcs_irq_free(struct pcs_device *pcs)
+{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
+
+ if (pcs_soc->irq < 0)
+ return;
+
+ if (pcs->domain)
+ irq_domain_remove(pcs->domain);
+
+ if (PCS_QUIRK_HAS_SHARED_IRQ)
+ free_irq(pcs_soc->irq, pcs_soc);
+ else
+ irq_set_chained_handler(pcs_soc->irq, NULL);
+}
+
+/**
* pcs_free_resources() - free memory used by this driver
* @pcs: pcs driver instance
*/
static void pcs_free_resources(struct pcs_device *pcs)
{
+ pcs_irq_free(pcs);
+
if (pcs->pctl)
pinctrl_unregister(pcs->pctl);
@@ -1493,6 +1559,264 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
}
return ret;
}
+/**
+ * @reg: virtual address of interrupt register
+ * @hwirq: hardware irq number
+ * @irq: virtual irq number
+ * @node: list node
+ */
+struct pcs_interrupt {
+ void __iomem *reg;
+ irq_hw_number_t hwirq;
+ unsigned int irq;
+ struct list_head node;
+};
+
+/**
+ * pcs_irq_set() - enables or disables an interrupt
+ *
+ * Note that this currently assumes one interrupt per pinctrl
+ * register that is typically used for wake-up events.
+ */
+static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
+ int irq, const bool enable)
+{
+ struct pcs_device *pcs;
+ struct list_head *pos;
+ unsigned mask;
+
+ pcs = container_of(pcs_soc, struct pcs_device, socdata);
+ list_for_each(pos, &pcs->irqs) {
+ struct pcs_interrupt *pcswi;
+ unsigned soc_mask;
+
+ pcswi = list_entry(pos, struct pcs_interrupt, node);
+ if (irq != pcswi->irq)
+ continue;
+
+ soc_mask = pcs_soc->irq_enable_mask;
+ raw_spin_lock(&pcs->lock);
+ mask = pcs->read(pcswi->reg);
+ if (enable)
+ mask |= soc_mask;
+ else
+ mask &= ~soc_mask;
+ pcs->write(mask, pcswi->reg);
+ raw_spin_unlock(&pcs->lock);
+ }
+
+ if (pcs_soc->rearm)
+ pcs_soc->rearm();
+}
+
+/**
+ * pcs_irq_mask() - mask pinctrl interrupt
+ * @d: interrupt data
+ */
+static void pcs_irq_mask(struct irq_data *d)
+{
+ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d);
+
+ pcs_irq_set(pcs_soc, d->irq, false);
+}
+
+/**
+ * pcs_irq_unmask() - unmask pinctrl interrupt
+ * @d: interrupt data
+ */
+static void pcs_irq_unmask(struct irq_data *d)
+{
+ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d);
+
+ pcs_irq_set(pcs_soc, d->irq, true);
+}
+
+/**
+ * pcs_irq_set_wake() - toggle the suspend and resume wake up
+ * @d: interrupt data
+ * @state: wake-up state
+ *
+ * Note that this should be called only for suspend and resume.
+ * For runtime PM, the wake-up events should be enabled by default.
+ */
+static int pcs_irq_set_wake(struct irq_data *d, unsigned int state)
+{
+ if (state)
+ pcs_irq_unmask(d);
+ else
+ pcs_irq_mask(d);
+
+ return 0;
+}
+
+/**
+ * pcs_irq_handle() - common interrupt handler
+ * @pcs_irq: interrupt data
+ *
+ * Note that this currently assumes we have one interrupt bit per
+ * mux register. This interrupt is typically used for wake-up events.
+ * For more complex interrupts different handlers can be specified.
+ */
+static int pcs_irq_handle(struct pcs_soc_data *pcs_soc)
+{
+ struct pcs_device *pcs;
+ struct list_head *pos;
+ int count = 0;
+
+ pcs = container_of(pcs_soc, struct pcs_device, socdata);
+ list_for_each(pos, &pcs->irqs) {
+ struct pcs_interrupt *pcswi;
+ unsigned mask;
+
+ pcswi = list_entry(pos, struct pcs_interrupt, node);
+ raw_spin_lock(&pcs->lock);
+ mask = pcs->read(pcswi->reg);
+ raw_spin_unlock(&pcs->lock);
+ if (mask & pcs_soc->irq_status_mask) {
+ generic_handle_irq(irq_find_mapping(pcs->domain,
+ pcswi->hwirq));
+ count++;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * pcs_irq_handler() - handler for the shared interrupt case
+ * @irq: interrupt
+ * @d: data
+ *
+ * Use this for cases where multiple instances of
+ * pinctrl-single share a single interrupt like on omaps.
+ */
+static irqreturn_t pcs_irq_handler(int irq, void *d)
+{
+ struct pcs_soc_data *pcs_soc = d;
+
+ return pcs_irq_handle(pcs_soc) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * pcs_irq_handle() - handler for the dedicated chained interrupt case
+ * @irq: interrupt
+ * @desc: interrupt descriptor
+ *
+ * Use this if you have a separate interrupt for each
+ * pinctrl-single instance.
+ */
+static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip;
+ int res;
+
+ chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
+ res = pcs_irq_handle(pcs_soc);
+ /* REVISIT: export and add handle_bad_irq(irq, desc)? */
+ chained_irq_exit(chip, desc);
+
+ return;
+}
+
+static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct pcs_soc_data *pcs_soc = d->host_data;
+ struct pcs_device *pcs;
+ struct pcs_interrupt *pcswi;
+
+ pcs = container_of(pcs_soc, struct pcs_device, socdata);
+ pcswi = devm_kzalloc(pcs->dev, sizeof(*pcswi), GFP_KERNEL);
+ if (!pcswi)
+ return -ENOMEM;
+
+ pcswi->reg = pcs->base + hwirq;
+ pcswi->hwirq = hwirq;
+ pcswi->irq = irq;
+
+ mutex_lock(&pcs->mutex);
+ list_add_tail(&pcswi->node, &pcs->irqs);
+ mutex_unlock(&pcs->mutex);
+
+ irq_set_chip_data(irq, pcs_soc);
+ irq_set_chip_and_handler(irq, &pcs->chip,
+ handle_level_irq);
+
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops pcs_irqdomain_ops = {
+ .map = pcs_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+/**
+ * pcs_irq_init_chained_handler() - set up a chained interrupt handler
+ * @pcs: pcs driver instance
+ * @np: device node pointer
+ */
+static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
+ struct device_node *np)
+{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
+ const char *name = "pinctrl";
+ int num_irqs;
+
+ if (!pcs_soc->irq_enable_mask ||
+ !pcs_soc->irq_status_mask) {
+ pcs_soc->irq = -1;
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&pcs->irqs);
+ pcs->chip.name = name;
+ pcs->chip.irq_ack = pcs_irq_mask;
+ pcs->chip.irq_mask = pcs_irq_mask;
+ pcs->chip.irq_unmask = pcs_irq_unmask;
+ pcs->chip.irq_set_wake = pcs_irq_set_wake;
+
+ if (PCS_QUIRK_HAS_SHARED_IRQ) {
+ int res;
+
+ res = request_irq(pcs_soc->irq, pcs_irq_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ name, pcs_soc);
+ if (res) {
+ pcs_soc->irq = -1;
+ return res;
+ }
+ } else {
+ irq_set_handler_data(pcs_soc->irq, pcs_soc);
+ irq_set_chained_handler(pcs_soc->irq,
+ pcs_irq_chain_handler);
+ }
+
+ /*
+ * We can use the register offset as the hardirq
+ * number as irq_domain_add_simple maps them lazily.
+ * This way we can easily support more than one
+ * interrupt per function if needed.
+ */
+ num_irqs = pcs->size;
+
+ pcs->domain = irq_domain_add_simple(np, num_irqs, 0,
+ &pcs_irqdomain_ops,
+ pcs_soc);
+ if (!pcs->domain) {
+ irq_set_chained_handler(pcs_soc->irq, NULL);
+ return -EINVAL;
+ }
+
+ return 0;
+}
#ifdef CONFIG_PM
static int pinctrl_single_suspend(struct platform_device *pdev,
@@ -1523,8 +1847,10 @@ static int pcs_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
+ struct pcs_pdata *pdata;
struct resource *res;
struct pcs_device *pcs;
+ const struct pcs_soc_data *soc;
int ret;
match = of_match_device(pcs_of_match, &pdev->dev);
@@ -1537,11 +1863,14 @@ static int pcs_probe(struct platform_device *pdev)
return -ENOMEM;
}
pcs->dev = &pdev->dev;
+ raw_spin_lock_init(&pcs->lock);
mutex_init(&pcs->mutex);
INIT_LIST_HEAD(&pcs->pingroups);
INIT_LIST_HEAD(&pcs->functions);
INIT_LIST_HEAD(&pcs->gpiofuncs);
- pcs->is_pinconf = match->data;
+ soc = match->data;
+ pcs->flags = soc->flags;
+ memcpy(&pcs->socdata, soc, sizeof(*soc));
PCS_GET_PROP_U32("pinctrl-single,register-width", &pcs->width,
"register width not specified\n");
@@ -1610,7 +1939,7 @@ static int pcs_probe(struct platform_device *pdev)
pcs->desc.name = DRIVER_NAME;
pcs->desc.pctlops = &pcs_pinctrl_ops;
pcs->desc.pmxops = &pcs_pinmux_ops;
- if (pcs->is_pinconf)
+ if (PCS_HAS_PINCONF)
pcs->desc.confops = &pcs_pinconf_ops;
pcs->desc.owner = THIS_MODULE;
@@ -1629,6 +1958,27 @@ static int pcs_probe(struct platform_device *pdev)
if (ret < 0)
goto free;
+ pcs->socdata.irq = irq_of_parse_and_map(np, 0);
+ if (pcs->socdata.irq)
+ pcs->flags |= PCS_FEAT_IRQ;
+
+ /* We still need auxdata for some omaps for PRM interrupts */
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ if (pdata->rearm)
+ pcs->socdata.rearm = pdata->rearm;
+ if (pdata->irq) {
+ pcs->socdata.irq = pdata->irq;
+ pcs->flags |= PCS_FEAT_IRQ;
+ }
+ }
+
+ if (PCS_HAS_IRQ) {
+ ret = pcs_irq_init_chained_handler(pcs, np);
+ if (ret < 0)
+ dev_warn(pcs->dev, "initialized with no interrupts\n");
+ }
+
dev_info(pcs->dev, "%i pins at pa %p size %u\n",
pcs->desc.npins, pcs->base, pcs->size);
@@ -1652,9 +2002,25 @@ static int pcs_remove(struct platform_device *pdev)
return 0;
}
+static const struct pcs_soc_data pinctrl_single_omap_wkup = {
+ .flags = PCS_QUIRK_SHARED_IRQ,
+ .irq_enable_mask = (1 << 14), /* OMAP_WAKEUP_EN */
+ .irq_status_mask = (1 << 15), /* OMAP_WAKEUP_EVENT */
+};
+
+static const struct pcs_soc_data pinctrl_single = {
+};
+
+static const struct pcs_soc_data pinconf_single = {
+ .flags = PCS_FEAT_PINCONF,
+};
+
static struct of_device_id pcs_of_match[] = {
- { .compatible = "pinctrl-single", .data = (void *)false },
- { .compatible = "pinconf-single", .data = (void *)true },
+ { .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "pinctrl-single", .data = &pinctrl_single },
+ { .compatible = "pinconf-single", .data = &pinconf_single },
{ },
};
MODULE_DEVICE_TABLE(of, pcs_of_match);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
new file mode 100644
index 00000000000..c5e0f6973a3
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -0,0 +1,875 @@
+/*
+ * Abilis Systems TB10x pin control driver
+ *
+ * Copyright (C) Abilis Systems 2012
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/stringify.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "pinctrl-utils.h"
+
+#define TB10X_PORT1 (0)
+#define TB10X_PORT2 (16)
+#define TB10X_PORT3 (32)
+#define TB10X_PORT4 (48)
+#define TB10X_PORT5 (128)
+#define TB10X_PORT6 (64)
+#define TB10X_PORT7 (80)
+#define TB10X_PORT8 (96)
+#define TB10X_PORT9 (112)
+#define TB10X_GPIOS (256)
+
+#define PCFG_PORT_BITWIDTH (2)
+#define PCFG_PORT_MASK(PORT) \
+ (((1 << PCFG_PORT_BITWIDTH) - 1) << (PCFG_PORT_BITWIDTH * (PORT)))
+
+static const struct pinctrl_pin_desc tb10x_pins[] = {
+ /* Port 1 */
+ PINCTRL_PIN(TB10X_PORT1 + 0, "MICLK_S0"),
+ PINCTRL_PIN(TB10X_PORT1 + 1, "MISTRT_S0"),
+ PINCTRL_PIN(TB10X_PORT1 + 2, "MIVAL_S0"),
+ PINCTRL_PIN(TB10X_PORT1 + 3, "MDI_S0"),
+ PINCTRL_PIN(TB10X_PORT1 + 4, "GPIOA0"),
+ PINCTRL_PIN(TB10X_PORT1 + 5, "GPIOA1"),
+ PINCTRL_PIN(TB10X_PORT1 + 6, "GPIOA2"),
+ PINCTRL_PIN(TB10X_PORT1 + 7, "MDI_S1"),
+ PINCTRL_PIN(TB10X_PORT1 + 8, "MIVAL_S1"),
+ PINCTRL_PIN(TB10X_PORT1 + 9, "MISTRT_S1"),
+ PINCTRL_PIN(TB10X_PORT1 + 10, "MICLK_S1"),
+ /* Port 2 */
+ PINCTRL_PIN(TB10X_PORT2 + 0, "MICLK_S2"),
+ PINCTRL_PIN(TB10X_PORT2 + 1, "MISTRT_S2"),
+ PINCTRL_PIN(TB10X_PORT2 + 2, "MIVAL_S2"),
+ PINCTRL_PIN(TB10X_PORT2 + 3, "MDI_S2"),
+ PINCTRL_PIN(TB10X_PORT2 + 4, "GPIOC0"),
+ PINCTRL_PIN(TB10X_PORT2 + 5, "GPIOC1"),
+ PINCTRL_PIN(TB10X_PORT2 + 6, "GPIOC2"),
+ PINCTRL_PIN(TB10X_PORT2 + 7, "MDI_S3"),
+ PINCTRL_PIN(TB10X_PORT2 + 8, "MIVAL_S3"),
+ PINCTRL_PIN(TB10X_PORT2 + 9, "MISTRT_S3"),
+ PINCTRL_PIN(TB10X_PORT2 + 10, "MICLK_S3"),
+ /* Port 3 */
+ PINCTRL_PIN(TB10X_PORT3 + 0, "MICLK_S4"),
+ PINCTRL_PIN(TB10X_PORT3 + 1, "MISTRT_S4"),
+ PINCTRL_PIN(TB10X_PORT3 + 2, "MIVAL_S4"),
+ PINCTRL_PIN(TB10X_PORT3 + 3, "MDI_S4"),
+ PINCTRL_PIN(TB10X_PORT3 + 4, "GPIOE0"),
+ PINCTRL_PIN(TB10X_PORT3 + 5, "GPIOE1"),
+ PINCTRL_PIN(TB10X_PORT3 + 6, "GPIOE2"),
+ PINCTRL_PIN(TB10X_PORT3 + 7, "MDI_S5"),
+ PINCTRL_PIN(TB10X_PORT3 + 8, "MIVAL_S5"),
+ PINCTRL_PIN(TB10X_PORT3 + 9, "MISTRT_S5"),
+ PINCTRL_PIN(TB10X_PORT3 + 10, "MICLK_S5"),
+ /* Port 4 */
+ PINCTRL_PIN(TB10X_PORT4 + 0, "MICLK_S6"),
+ PINCTRL_PIN(TB10X_PORT4 + 1, "MISTRT_S6"),
+ PINCTRL_PIN(TB10X_PORT4 + 2, "MIVAL_S6"),
+ PINCTRL_PIN(TB10X_PORT4 + 3, "MDI_S6"),
+ PINCTRL_PIN(TB10X_PORT4 + 4, "GPIOG0"),
+ PINCTRL_PIN(TB10X_PORT4 + 5, "GPIOG1"),
+ PINCTRL_PIN(TB10X_PORT4 + 6, "GPIOG2"),
+ PINCTRL_PIN(TB10X_PORT4 + 7, "MDI_S7"),
+ PINCTRL_PIN(TB10X_PORT4 + 8, "MIVAL_S7"),
+ PINCTRL_PIN(TB10X_PORT4 + 9, "MISTRT_S7"),
+ PINCTRL_PIN(TB10X_PORT4 + 10, "MICLK_S7"),
+ /* Port 5 */
+ PINCTRL_PIN(TB10X_PORT5 + 0, "PC_CE1N"),
+ PINCTRL_PIN(TB10X_PORT5 + 1, "PC_CE2N"),
+ PINCTRL_PIN(TB10X_PORT5 + 2, "PC_REGN"),
+ PINCTRL_PIN(TB10X_PORT5 + 3, "PC_INPACKN"),
+ PINCTRL_PIN(TB10X_PORT5 + 4, "PC_OEN"),
+ PINCTRL_PIN(TB10X_PORT5 + 5, "PC_WEN"),
+ PINCTRL_PIN(TB10X_PORT5 + 6, "PC_IORDN"),
+ PINCTRL_PIN(TB10X_PORT5 + 7, "PC_IOWRN"),
+ PINCTRL_PIN(TB10X_PORT5 + 8, "PC_RDYIRQN"),
+ PINCTRL_PIN(TB10X_PORT5 + 9, "PC_WAITN"),
+ PINCTRL_PIN(TB10X_PORT5 + 10, "PC_A0"),
+ PINCTRL_PIN(TB10X_PORT5 + 11, "PC_A1"),
+ PINCTRL_PIN(TB10X_PORT5 + 12, "PC_A2"),
+ PINCTRL_PIN(TB10X_PORT5 + 13, "PC_A3"),
+ PINCTRL_PIN(TB10X_PORT5 + 14, "PC_A4"),
+ PINCTRL_PIN(TB10X_PORT5 + 15, "PC_A5"),
+ PINCTRL_PIN(TB10X_PORT5 + 16, "PC_A6"),
+ PINCTRL_PIN(TB10X_PORT5 + 17, "PC_A7"),
+ PINCTRL_PIN(TB10X_PORT5 + 18, "PC_A8"),
+ PINCTRL_PIN(TB10X_PORT5 + 19, "PC_A9"),
+ PINCTRL_PIN(TB10X_PORT5 + 20, "PC_A10"),
+ PINCTRL_PIN(TB10X_PORT5 + 21, "PC_A11"),
+ PINCTRL_PIN(TB10X_PORT5 + 22, "PC_A12"),
+ PINCTRL_PIN(TB10X_PORT5 + 23, "PC_A13"),
+ PINCTRL_PIN(TB10X_PORT5 + 24, "PC_A14"),
+ PINCTRL_PIN(TB10X_PORT5 + 25, "PC_D0"),
+ PINCTRL_PIN(TB10X_PORT5 + 26, "PC_D1"),
+ PINCTRL_PIN(TB10X_PORT5 + 27, "PC_D2"),
+ PINCTRL_PIN(TB10X_PORT5 + 28, "PC_D3"),
+ PINCTRL_PIN(TB10X_PORT5 + 29, "PC_D4"),
+ PINCTRL_PIN(TB10X_PORT5 + 30, "PC_D5"),
+ PINCTRL_PIN(TB10X_PORT5 + 31, "PC_D6"),
+ PINCTRL_PIN(TB10X_PORT5 + 32, "PC_D7"),
+ PINCTRL_PIN(TB10X_PORT5 + 33, "PC_MOSTRT"),
+ PINCTRL_PIN(TB10X_PORT5 + 34, "PC_MOVAL"),
+ PINCTRL_PIN(TB10X_PORT5 + 35, "PC_MDO0"),
+ PINCTRL_PIN(TB10X_PORT5 + 36, "PC_MDO1"),
+ PINCTRL_PIN(TB10X_PORT5 + 37, "PC_MDO2"),
+ PINCTRL_PIN(TB10X_PORT5 + 38, "PC_MDO3"),
+ PINCTRL_PIN(TB10X_PORT5 + 39, "PC_MDO4"),
+ PINCTRL_PIN(TB10X_PORT5 + 40, "PC_MDO5"),
+ PINCTRL_PIN(TB10X_PORT5 + 41, "PC_MDO6"),
+ PINCTRL_PIN(TB10X_PORT5 + 42, "PC_MDO7"),
+ PINCTRL_PIN(TB10X_PORT5 + 43, "PC_MISTRT"),
+ PINCTRL_PIN(TB10X_PORT5 + 44, "PC_MIVAL"),
+ PINCTRL_PIN(TB10X_PORT5 + 45, "PC_MDI0"),
+ PINCTRL_PIN(TB10X_PORT5 + 46, "PC_MDI1"),
+ PINCTRL_PIN(TB10X_PORT5 + 47, "PC_MDI2"),
+ PINCTRL_PIN(TB10X_PORT5 + 48, "PC_MDI3"),
+ PINCTRL_PIN(TB10X_PORT5 + 49, "PC_MDI4"),
+ PINCTRL_PIN(TB10X_PORT5 + 50, "PC_MDI5"),
+ PINCTRL_PIN(TB10X_PORT5 + 51, "PC_MDI6"),
+ PINCTRL_PIN(TB10X_PORT5 + 52, "PC_MDI7"),
+ PINCTRL_PIN(TB10X_PORT5 + 53, "PC_MICLK"),
+ /* Port 6 */
+ PINCTRL_PIN(TB10X_PORT6 + 0, "T_MOSTRT_S0"),
+ PINCTRL_PIN(TB10X_PORT6 + 1, "T_MOVAL_S0"),
+ PINCTRL_PIN(TB10X_PORT6 + 2, "T_MDO_S0"),
+ PINCTRL_PIN(TB10X_PORT6 + 3, "T_MOSTRT_S1"),
+ PINCTRL_PIN(TB10X_PORT6 + 4, "T_MOVAL_S1"),
+ PINCTRL_PIN(TB10X_PORT6 + 5, "T_MDO_S1"),
+ PINCTRL_PIN(TB10X_PORT6 + 6, "T_MOSTRT_S2"),
+ PINCTRL_PIN(TB10X_PORT6 + 7, "T_MOVAL_S2"),
+ PINCTRL_PIN(TB10X_PORT6 + 8, "T_MDO_S2"),
+ PINCTRL_PIN(TB10X_PORT6 + 9, "T_MOSTRT_S3"),
+ /* Port 7 */
+ PINCTRL_PIN(TB10X_PORT7 + 0, "UART0_TXD"),
+ PINCTRL_PIN(TB10X_PORT7 + 1, "UART0_RXD"),
+ PINCTRL_PIN(TB10X_PORT7 + 2, "UART0_CTS"),
+ PINCTRL_PIN(TB10X_PORT7 + 3, "UART0_RTS"),
+ PINCTRL_PIN(TB10X_PORT7 + 4, "UART1_TXD"),
+ PINCTRL_PIN(TB10X_PORT7 + 5, "UART1_RXD"),
+ PINCTRL_PIN(TB10X_PORT7 + 6, "UART1_CTS"),
+ PINCTRL_PIN(TB10X_PORT7 + 7, "UART1_RTS"),
+ /* Port 8 */
+ PINCTRL_PIN(TB10X_PORT8 + 0, "SPI3_CLK"),
+ PINCTRL_PIN(TB10X_PORT8 + 1, "SPI3_MISO"),
+ PINCTRL_PIN(TB10X_PORT8 + 2, "SPI3_MOSI"),
+ PINCTRL_PIN(TB10X_PORT8 + 3, "SPI3_SSN"),
+ /* Port 9 */
+ PINCTRL_PIN(TB10X_PORT9 + 0, "SPI1_CLK"),
+ PINCTRL_PIN(TB10X_PORT9 + 1, "SPI1_MISO"),
+ PINCTRL_PIN(TB10X_PORT9 + 2, "SPI1_MOSI"),
+ PINCTRL_PIN(TB10X_PORT9 + 3, "SPI1_SSN0"),
+ PINCTRL_PIN(TB10X_PORT9 + 4, "SPI1_SSN1"),
+ /* Unmuxed GPIOs */
+ PINCTRL_PIN(TB10X_GPIOS + 0, "GPIOB0"),
+ PINCTRL_PIN(TB10X_GPIOS + 1, "GPIOB1"),
+
+ PINCTRL_PIN(TB10X_GPIOS + 2, "GPIOD0"),
+ PINCTRL_PIN(TB10X_GPIOS + 3, "GPIOD1"),
+
+ PINCTRL_PIN(TB10X_GPIOS + 4, "GPIOF0"),
+ PINCTRL_PIN(TB10X_GPIOS + 5, "GPIOF1"),
+
+ PINCTRL_PIN(TB10X_GPIOS + 6, "GPIOH0"),
+ PINCTRL_PIN(TB10X_GPIOS + 7, "GPIOH1"),
+
+ PINCTRL_PIN(TB10X_GPIOS + 8, "GPIOI0"),
+ PINCTRL_PIN(TB10X_GPIOS + 9, "GPIOI1"),
+ PINCTRL_PIN(TB10X_GPIOS + 10, "GPIOI2"),
+ PINCTRL_PIN(TB10X_GPIOS + 11, "GPIOI3"),
+ PINCTRL_PIN(TB10X_GPIOS + 12, "GPIOI4"),
+ PINCTRL_PIN(TB10X_GPIOS + 13, "GPIOI5"),
+ PINCTRL_PIN(TB10X_GPIOS + 14, "GPIOI6"),
+ PINCTRL_PIN(TB10X_GPIOS + 15, "GPIOI7"),
+ PINCTRL_PIN(TB10X_GPIOS + 16, "GPIOI8"),
+ PINCTRL_PIN(TB10X_GPIOS + 17, "GPIOI9"),
+ PINCTRL_PIN(TB10X_GPIOS + 18, "GPIOI10"),
+ PINCTRL_PIN(TB10X_GPIOS + 19, "GPIOI11"),
+
+ PINCTRL_PIN(TB10X_GPIOS + 20, "GPION0"),
+ PINCTRL_PIN(TB10X_GPIOS + 21, "GPION1"),
+ PINCTRL_PIN(TB10X_GPIOS + 22, "GPION2"),
+ PINCTRL_PIN(TB10X_GPIOS + 23, "GPION3"),
+#define MAX_PIN (TB10X_GPIOS + 24)
+ PINCTRL_PIN(MAX_PIN, "GPION4"),
+};
+
+
+/* Port 1 */
+static const unsigned mis0_pins[] = { TB10X_PORT1 + 0, TB10X_PORT1 + 1,
+ TB10X_PORT1 + 2, TB10X_PORT1 + 3};
+static const unsigned gpioa_pins[] = { TB10X_PORT1 + 4, TB10X_PORT1 + 5,
+ TB10X_PORT1 + 6};
+static const unsigned mis1_pins[] = { TB10X_PORT1 + 7, TB10X_PORT1 + 8,
+ TB10X_PORT1 + 9, TB10X_PORT1 + 10};
+static const unsigned mip1_pins[] = { TB10X_PORT1 + 0, TB10X_PORT1 + 1,
+ TB10X_PORT1 + 2, TB10X_PORT1 + 3,
+ TB10X_PORT1 + 4, TB10X_PORT1 + 5,
+ TB10X_PORT1 + 6, TB10X_PORT1 + 7,
+ TB10X_PORT1 + 8, TB10X_PORT1 + 9,
+ TB10X_PORT1 + 10};
+
+/* Port 2 */
+static const unsigned mis2_pins[] = { TB10X_PORT2 + 0, TB10X_PORT2 + 1,
+ TB10X_PORT2 + 2, TB10X_PORT2 + 3};
+static const unsigned gpioc_pins[] = { TB10X_PORT2 + 4, TB10X_PORT2 + 5,
+ TB10X_PORT2 + 6};
+static const unsigned mis3_pins[] = { TB10X_PORT2 + 7, TB10X_PORT2 + 8,
+ TB10X_PORT2 + 9, TB10X_PORT2 + 10};
+static const unsigned mip3_pins[] = { TB10X_PORT2 + 0, TB10X_PORT2 + 1,
+ TB10X_PORT2 + 2, TB10X_PORT2 + 3,
+ TB10X_PORT2 + 4, TB10X_PORT2 + 5,
+ TB10X_PORT2 + 6, TB10X_PORT2 + 7,
+ TB10X_PORT2 + 8, TB10X_PORT2 + 9,
+ TB10X_PORT2 + 10};
+
+/* Port 3 */
+static const unsigned mis4_pins[] = { TB10X_PORT3 + 0, TB10X_PORT3 + 1,
+ TB10X_PORT3 + 2, TB10X_PORT3 + 3};
+static const unsigned gpioe_pins[] = { TB10X_PORT3 + 4, TB10X_PORT3 + 5,
+ TB10X_PORT3 + 6};
+static const unsigned mis5_pins[] = { TB10X_PORT3 + 7, TB10X_PORT3 + 8,
+ TB10X_PORT3 + 9, TB10X_PORT3 + 10};
+static const unsigned mip5_pins[] = { TB10X_PORT3 + 0, TB10X_PORT3 + 1,
+ TB10X_PORT3 + 2, TB10X_PORT3 + 3,
+ TB10X_PORT3 + 4, TB10X_PORT3 + 5,
+ TB10X_PORT3 + 6, TB10X_PORT3 + 7,
+ TB10X_PORT3 + 8, TB10X_PORT3 + 9,
+ TB10X_PORT3 + 10};
+
+/* Port 4 */
+static const unsigned mis6_pins[] = { TB10X_PORT4 + 0, TB10X_PORT4 + 1,
+ TB10X_PORT4 + 2, TB10X_PORT4 + 3};
+static const unsigned gpiog_pins[] = { TB10X_PORT4 + 4, TB10X_PORT4 + 5,
+ TB10X_PORT4 + 6};
+static const unsigned mis7_pins[] = { TB10X_PORT4 + 7, TB10X_PORT4 + 8,
+ TB10X_PORT4 + 9, TB10X_PORT4 + 10};
+static const unsigned mip7_pins[] = { TB10X_PORT4 + 0, TB10X_PORT4 + 1,
+ TB10X_PORT4 + 2, TB10X_PORT4 + 3,
+ TB10X_PORT4 + 4, TB10X_PORT4 + 5,
+ TB10X_PORT4 + 6, TB10X_PORT4 + 7,
+ TB10X_PORT4 + 8, TB10X_PORT4 + 9,
+ TB10X_PORT4 + 10};
+
+/* Port 6 */
+static const unsigned mop_pins[] = { TB10X_PORT6 + 0, TB10X_PORT6 + 1,
+ TB10X_PORT6 + 2, TB10X_PORT6 + 3,
+ TB10X_PORT6 + 4, TB10X_PORT6 + 5,
+ TB10X_PORT6 + 6, TB10X_PORT6 + 7,
+ TB10X_PORT6 + 8, TB10X_PORT6 + 9};
+static const unsigned mos0_pins[] = { TB10X_PORT6 + 0, TB10X_PORT6 + 1,
+ TB10X_PORT6 + 2};
+static const unsigned mos1_pins[] = { TB10X_PORT6 + 3, TB10X_PORT6 + 4,
+ TB10X_PORT6 + 5};
+static const unsigned mos2_pins[] = { TB10X_PORT6 + 6, TB10X_PORT6 + 7,
+ TB10X_PORT6 + 8};
+static const unsigned mos3_pins[] = { TB10X_PORT6 + 9};
+
+/* Port 7 */
+static const unsigned uart0_pins[] = { TB10X_PORT7 + 0, TB10X_PORT7 + 1,
+ TB10X_PORT7 + 2, TB10X_PORT7 + 3};
+static const unsigned uart1_pins[] = { TB10X_PORT7 + 4, TB10X_PORT7 + 5,
+ TB10X_PORT7 + 6, TB10X_PORT7 + 7};
+static const unsigned gpiol_pins[] = { TB10X_PORT7 + 0, TB10X_PORT7 + 1,
+ TB10X_PORT7 + 2, TB10X_PORT7 + 3};
+static const unsigned gpiom_pins[] = { TB10X_PORT7 + 4, TB10X_PORT7 + 5,
+ TB10X_PORT7 + 6, TB10X_PORT7 + 7};
+
+/* Port 8 */
+static const unsigned spi3_pins[] = { TB10X_PORT8 + 0, TB10X_PORT8 + 1,
+ TB10X_PORT8 + 2, TB10X_PORT8 + 3};
+static const unsigned jtag_pins[] = { TB10X_PORT8 + 0, TB10X_PORT8 + 1,
+ TB10X_PORT8 + 2, TB10X_PORT8 + 3};
+
+/* Port 9 */
+static const unsigned spi1_pins[] = { TB10X_PORT9 + 0, TB10X_PORT9 + 1,
+ TB10X_PORT9 + 2, TB10X_PORT9 + 3,
+ TB10X_PORT9 + 4};
+static const unsigned gpion_pins[] = { TB10X_PORT9 + 0, TB10X_PORT9 + 1,
+ TB10X_PORT9 + 2, TB10X_PORT9 + 3,
+ TB10X_PORT9 + 4};
+
+/* Port 5 */
+static const unsigned gpioj_pins[] = { TB10X_PORT5 + 0, TB10X_PORT5 + 1,
+ TB10X_PORT5 + 2, TB10X_PORT5 + 3,
+ TB10X_PORT5 + 4, TB10X_PORT5 + 5,
+ TB10X_PORT5 + 6, TB10X_PORT5 + 7,
+ TB10X_PORT5 + 8, TB10X_PORT5 + 9,
+ TB10X_PORT5 + 10, TB10X_PORT5 + 11,
+ TB10X_PORT5 + 12, TB10X_PORT5 + 13,
+ TB10X_PORT5 + 14, TB10X_PORT5 + 15,
+ TB10X_PORT5 + 16, TB10X_PORT5 + 17,
+ TB10X_PORT5 + 18, TB10X_PORT5 + 19,
+ TB10X_PORT5 + 20, TB10X_PORT5 + 21,
+ TB10X_PORT5 + 22, TB10X_PORT5 + 23,
+ TB10X_PORT5 + 24, TB10X_PORT5 + 25,
+ TB10X_PORT5 + 26, TB10X_PORT5 + 27,
+ TB10X_PORT5 + 28, TB10X_PORT5 + 29,
+ TB10X_PORT5 + 30, TB10X_PORT5 + 31};
+static const unsigned gpiok_pins[] = { TB10X_PORT5 + 32, TB10X_PORT5 + 33,
+ TB10X_PORT5 + 34, TB10X_PORT5 + 35,
+ TB10X_PORT5 + 36, TB10X_PORT5 + 37,
+ TB10X_PORT5 + 38, TB10X_PORT5 + 39,
+ TB10X_PORT5 + 40, TB10X_PORT5 + 41,
+ TB10X_PORT5 + 42, TB10X_PORT5 + 43,
+ TB10X_PORT5 + 44, TB10X_PORT5 + 45,
+ TB10X_PORT5 + 46, TB10X_PORT5 + 47,
+ TB10X_PORT5 + 48, TB10X_PORT5 + 49,
+ TB10X_PORT5 + 50, TB10X_PORT5 + 51,
+ TB10X_PORT5 + 52, TB10X_PORT5 + 53};
+static const unsigned ciplus_pins[] = { TB10X_PORT5 + 0, TB10X_PORT5 + 1,
+ TB10X_PORT5 + 2, TB10X_PORT5 + 3,
+ TB10X_PORT5 + 4, TB10X_PORT5 + 5,
+ TB10X_PORT5 + 6, TB10X_PORT5 + 7,
+ TB10X_PORT5 + 8, TB10X_PORT5 + 9,
+ TB10X_PORT5 + 10, TB10X_PORT5 + 11,
+ TB10X_PORT5 + 12, TB10X_PORT5 + 13,
+ TB10X_PORT5 + 14, TB10X_PORT5 + 15,
+ TB10X_PORT5 + 16, TB10X_PORT5 + 17,
+ TB10X_PORT5 + 18, TB10X_PORT5 + 19,
+ TB10X_PORT5 + 20, TB10X_PORT5 + 21,
+ TB10X_PORT5 + 22, TB10X_PORT5 + 23,
+ TB10X_PORT5 + 24, TB10X_PORT5 + 25,
+ TB10X_PORT5 + 26, TB10X_PORT5 + 27,
+ TB10X_PORT5 + 28, TB10X_PORT5 + 29,
+ TB10X_PORT5 + 30, TB10X_PORT5 + 31,
+ TB10X_PORT5 + 32, TB10X_PORT5 + 33,
+ TB10X_PORT5 + 34, TB10X_PORT5 + 35,
+ TB10X_PORT5 + 36, TB10X_PORT5 + 37,
+ TB10X_PORT5 + 38, TB10X_PORT5 + 39,
+ TB10X_PORT5 + 40, TB10X_PORT5 + 41,
+ TB10X_PORT5 + 42, TB10X_PORT5 + 43,
+ TB10X_PORT5 + 44, TB10X_PORT5 + 45,
+ TB10X_PORT5 + 46, TB10X_PORT5 + 47,
+ TB10X_PORT5 + 48, TB10X_PORT5 + 49,
+ TB10X_PORT5 + 50, TB10X_PORT5 + 51,
+ TB10X_PORT5 + 52, TB10X_PORT5 + 53};
+static const unsigned mcard_pins[] = { TB10X_PORT5 + 3, TB10X_PORT5 + 10,
+ TB10X_PORT5 + 11, TB10X_PORT5 + 12,
+ TB10X_PORT5 + 22, TB10X_PORT5 + 23,
+ TB10X_PORT5 + 33, TB10X_PORT5 + 35,
+ TB10X_PORT5 + 36, TB10X_PORT5 + 37,
+ TB10X_PORT5 + 38, TB10X_PORT5 + 39,
+ TB10X_PORT5 + 40, TB10X_PORT5 + 41,
+ TB10X_PORT5 + 42, TB10X_PORT5 + 43,
+ TB10X_PORT5 + 45, TB10X_PORT5 + 46,
+ TB10X_PORT5 + 47, TB10X_PORT5 + 48,
+ TB10X_PORT5 + 49, TB10X_PORT5 + 50,
+ TB10X_PORT5 + 51, TB10X_PORT5 + 52,
+ TB10X_PORT5 + 53};
+static const unsigned stc0_pins[] = { TB10X_PORT5 + 34, TB10X_PORT5 + 35,
+ TB10X_PORT5 + 36, TB10X_PORT5 + 37,
+ TB10X_PORT5 + 38, TB10X_PORT5 + 39,
+ TB10X_PORT5 + 40};
+static const unsigned stc1_pins[] = { TB10X_PORT5 + 25, TB10X_PORT5 + 26,
+ TB10X_PORT5 + 27, TB10X_PORT5 + 28,
+ TB10X_PORT5 + 29, TB10X_PORT5 + 30,
+ TB10X_PORT5 + 44};
+
+/* Unmuxed GPIOs */
+static const unsigned gpiob_pins[] = { TB10X_GPIOS + 0, TB10X_GPIOS + 1};
+static const unsigned gpiod_pins[] = { TB10X_GPIOS + 2, TB10X_GPIOS + 3};
+static const unsigned gpiof_pins[] = { TB10X_GPIOS + 4, TB10X_GPIOS + 5};
+static const unsigned gpioh_pins[] = { TB10X_GPIOS + 6, TB10X_GPIOS + 7};
+static const unsigned gpioi_pins[] = { TB10X_GPIOS + 8, TB10X_GPIOS + 9,
+ TB10X_GPIOS + 10, TB10X_GPIOS + 11,
+ TB10X_GPIOS + 12, TB10X_GPIOS + 13,
+ TB10X_GPIOS + 14, TB10X_GPIOS + 15,
+ TB10X_GPIOS + 16, TB10X_GPIOS + 17,
+ TB10X_GPIOS + 18, TB10X_GPIOS + 19};
+
+struct tb10x_pinfuncgrp {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int pincnt;
+ const int port;
+ const unsigned int mode;
+ const int isgpio;
+};
+#define DEFPINFUNCGRP(NAME, PORT, MODE, ISGPIO) { \
+ .name = __stringify(NAME), \
+ .pins = NAME##_pins, .pincnt = ARRAY_SIZE(NAME##_pins), \
+ .port = (PORT), .mode = (MODE), \
+ .isgpio = (ISGPIO), \
+ }
+static const struct tb10x_pinfuncgrp tb10x_pingroups[] = {
+ DEFPINFUNCGRP(mis0, 0, 0, 0),
+ DEFPINFUNCGRP(gpioa, 0, 0, 1),
+ DEFPINFUNCGRP(mis1, 0, 0, 0),
+ DEFPINFUNCGRP(mip1, 0, 1, 0),
+ DEFPINFUNCGRP(mis2, 1, 0, 0),
+ DEFPINFUNCGRP(gpioc, 1, 0, 1),
+ DEFPINFUNCGRP(mis3, 1, 0, 0),
+ DEFPINFUNCGRP(mip3, 1, 1, 0),
+ DEFPINFUNCGRP(mis4, 2, 0, 0),
+ DEFPINFUNCGRP(gpioe, 2, 0, 1),
+ DEFPINFUNCGRP(mis5, 2, 0, 0),
+ DEFPINFUNCGRP(mip5, 2, 1, 0),
+ DEFPINFUNCGRP(mis6, 3, 0, 0),
+ DEFPINFUNCGRP(gpiog, 3, 0, 1),
+ DEFPINFUNCGRP(mis7, 3, 0, 0),
+ DEFPINFUNCGRP(mip7, 3, 1, 0),
+ DEFPINFUNCGRP(gpioj, 4, 0, 1),
+ DEFPINFUNCGRP(gpiok, 4, 0, 1),
+ DEFPINFUNCGRP(ciplus, 4, 1, 0),
+ DEFPINFUNCGRP(mcard, 4, 2, 0),
+ DEFPINFUNCGRP(stc0, 4, 3, 0),
+ DEFPINFUNCGRP(stc1, 4, 3, 0),
+ DEFPINFUNCGRP(mop, 5, 0, 0),
+ DEFPINFUNCGRP(mos0, 5, 1, 0),
+ DEFPINFUNCGRP(mos1, 5, 1, 0),
+ DEFPINFUNCGRP(mos2, 5, 1, 0),
+ DEFPINFUNCGRP(mos3, 5, 1, 0),
+ DEFPINFUNCGRP(uart0, 6, 0, 0),
+ DEFPINFUNCGRP(uart1, 6, 0, 0),
+ DEFPINFUNCGRP(gpiol, 6, 1, 1),
+ DEFPINFUNCGRP(gpiom, 6, 1, 1),
+ DEFPINFUNCGRP(spi3, 7, 0, 0),
+ DEFPINFUNCGRP(jtag, 7, 1, 0),
+ DEFPINFUNCGRP(spi1, 8, 0, 0),
+ DEFPINFUNCGRP(gpion, 8, 1, 1),
+ DEFPINFUNCGRP(gpiob, -1, 0, 1),
+ DEFPINFUNCGRP(gpiod, -1, 0, 1),
+ DEFPINFUNCGRP(gpiof, -1, 0, 1),
+ DEFPINFUNCGRP(gpioh, -1, 0, 1),
+ DEFPINFUNCGRP(gpioi, -1, 0, 1),
+};
+#undef DEFPINFUNCGRP
+
+struct tb10x_of_pinfunc {
+ const char *name;
+ const char *group;
+};
+
+#define TB10X_PORTS (9)
+
+/**
+ * struct tb10x_port - state of an I/O port
+ * @mode: Node this port is currently in.
+ * @count: Number of enabled functions which require this port to be
+ * configured in @mode.
+ */
+struct tb10x_port {
+ unsigned int mode;
+ unsigned int count;
+};
+
+/**
+ * struct tb10x_pinctrl - TB10x pin controller internal state
+ * @pctl: pointer to the pinctrl_dev structure of this pin controller.
+ * @base: register set base address.
+ * @pingroups: pointer to an array of the pin groups this driver manages.
+ * @pinfuncgrpcnt: number of pingroups in @pingroups.
+ * @pinfuncs: pointer to an array of pin functions this driver manages.
+ * @pinfuncnt: number of pin functions in @pinfuncs.
+ * @mutex: mutex for exclusive access to a pin controller's state.
+ * @ports: current state of each port.
+ * @gpios: Indicates if a given pin is currently used as GPIO (1) or not (0).
+ */
+struct tb10x_pinctrl {
+ struct pinctrl_dev *pctl;
+ void *base;
+ const struct tb10x_pinfuncgrp *pingroups;
+ unsigned int pinfuncgrpcnt;
+ struct tb10x_of_pinfunc *pinfuncs;
+ unsigned int pinfuncnt;
+ struct mutex mutex;
+ struct tb10x_port ports[TB10X_PORTS];
+ DECLARE_BITMAP(gpios, MAX_PIN + 1);
+};
+
+static inline void tb10x_pinctrl_set_config(struct tb10x_pinctrl *state,
+ unsigned int port, unsigned int mode)
+{
+ u32 pcfg;
+
+ if (state->ports[port].count)
+ return;
+
+ state->ports[port].mode = mode;
+
+ pcfg = ioread32(state->base) & ~(PCFG_PORT_MASK(port));
+ pcfg |= (mode << (PCFG_PORT_BITWIDTH * port)) & PCFG_PORT_MASK(port);
+ iowrite32(pcfg, state->base);
+}
+
+static inline unsigned int tb10x_pinctrl_get_config(
+ struct tb10x_pinctrl *state,
+ unsigned int port)
+{
+ return (ioread32(state->base) & PCFG_PORT_MASK(port))
+ >> (PCFG_PORT_BITWIDTH * port);
+}
+
+static int tb10x_get_groups_count(struct pinctrl_dev *pctl)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ return state->pinfuncgrpcnt;
+}
+
+static const char *tb10x_get_group_name(struct pinctrl_dev *pctl, unsigned n)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ return state->pingroups[n].name;
+}
+
+static int tb10x_get_group_pins(struct pinctrl_dev *pctl, unsigned n,
+ unsigned const **pins,
+ unsigned * const num_pins)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+
+ *pins = state->pingroups[n].pins;
+ *num_pins = state->pingroups[n].pincnt;
+
+ return 0;
+}
+
+static int tb10x_dt_node_to_map(struct pinctrl_dev *pctl,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ const char *string;
+ unsigned reserved_maps = 0;
+ int ret = 0;
+
+ if (of_property_read_string(np_config, "abilis,function", &string)) {
+ pr_err("%s: No abilis,function property in device tree.\n",
+ np_config->full_name);
+ return -EINVAL;
+ }
+
+ *map = NULL;
+ *num_maps = 0;
+
+ ret = pinctrl_utils_reserve_map(pctl, map, &reserved_maps,
+ num_maps, 1);
+ if (ret)
+ goto out;
+
+ ret = pinctrl_utils_add_map_mux(pctl, map, &reserved_maps,
+ num_maps, string, np_config->name);
+
+out:
+ return ret;
+}
+
+static struct pinctrl_ops tb10x_pinctrl_ops = {
+ .get_groups_count = tb10x_get_groups_count,
+ .get_group_name = tb10x_get_group_name,
+ .get_group_pins = tb10x_get_group_pins,
+ .dt_node_to_map = tb10x_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int tb10x_get_functions_count(struct pinctrl_dev *pctl)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ return state->pinfuncnt;
+}
+
+static const char *tb10x_get_function_name(struct pinctrl_dev *pctl,
+ unsigned n)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ return state->pinfuncs[n].name;
+}
+
+static int tb10x_get_function_groups(struct pinctrl_dev *pctl,
+ unsigned n, const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+
+ *groups = &state->pinfuncs[n].group;
+ *num_groups = 1;
+
+ return 0;
+}
+
+static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
+ struct pinctrl_gpio_range *range,
+ unsigned pin)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ int muxport = -1;
+ int muxmode = -1;
+ int i;
+
+ mutex_lock(&state->mutex);
+
+ /*
+ * Figure out to which port the requested GPIO belongs and how to
+ * configure that port.
+ * This loop also checks for pin conflicts between GPIOs and other
+ * functions.
+ */
+ for (i = 0; i < state->pinfuncgrpcnt; i++) {
+ const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
+ unsigned int port = pfg->port;
+ unsigned int mode = pfg->mode;
+ int j;
+
+ /*
+ * Skip pin groups which are always mapped and don't need
+ * to be configured.
+ */
+ if (port < 0)
+ continue;
+
+ for (j = 0; j < pfg->pincnt; j++) {
+ if (pin == pfg->pins[j]) {
+ if (pfg->isgpio) {
+ /*
+ * Remember the GPIO-only setting of
+ * the port this pin belongs to.
+ */
+ muxport = port;
+ muxmode = mode;
+ } else if (state->ports[port].count
+ && (state->ports[port].mode == mode)) {
+ /*
+ * Error: The requested pin is already
+ * used for something else.
+ */
+ mutex_unlock(&state->mutex);
+ return -EBUSY;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * If we haven't returned an error at this point, the GPIO pin is not
+ * used by another function and the GPIO request can be granted:
+ * Register pin as being used as GPIO so we don't allocate it to
+ * another function later.
+ */
+ set_bit(pin, state->gpios);
+
+ /*
+ * Potential conflicts between GPIOs and pin functions were caught
+ * earlier in this function and tb10x_pinctrl_set_config will do the
+ * Right Thing, either configure the port in GPIO only mode or leave
+ * another mode compatible with this GPIO request untouched.
+ */
+ if (muxport >= 0)
+ tb10x_pinctrl_set_config(state, muxport, muxmode);
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static void tb10x_gpio_disable_free(struct pinctrl_dev *pctl,
+ struct pinctrl_gpio_range *range,
+ unsigned pin)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+
+ mutex_lock(&state->mutex);
+
+ clear_bit(pin, state->gpios);
+
+ mutex_unlock(&state->mutex);
+}
+
+static int tb10x_pctl_enable(struct pinctrl_dev *pctl,
+ unsigned func_selector, unsigned group_selector)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ const struct tb10x_pinfuncgrp *grp = &state->pingroups[group_selector];
+ int i;
+
+ if (grp->port < 0)
+ return 0;
+
+ mutex_lock(&state->mutex);
+
+ /*
+ * Check if the requested function is compatible with previously
+ * requested functions.
+ */
+ if (state->ports[grp->port].count
+ && (state->ports[grp->port].mode != grp->mode)) {
+ mutex_unlock(&state->mutex);
+ return -EBUSY;
+ }
+
+ /*
+ * Check if the requested function is compatible with previously
+ * requested GPIOs.
+ */
+ for (i = 0; i < grp->pincnt; i++)
+ if (test_bit(grp->pins[i], state->gpios)) {
+ mutex_unlock(&state->mutex);
+ return -EBUSY;
+ }
+
+ tb10x_pinctrl_set_config(state, grp->port, grp->mode);
+
+ state->ports[grp->port].count++;
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static void tb10x_pctl_disable(struct pinctrl_dev *pctl,
+ unsigned func_selector, unsigned group_selector)
+{
+ struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
+ const struct tb10x_pinfuncgrp *grp = &state->pingroups[group_selector];
+
+ if (grp->port < 0)
+ return;
+
+ mutex_lock(&state->mutex);
+
+ state->ports[grp->port].count--;
+
+ mutex_unlock(&state->mutex);
+}
+
+static struct pinmux_ops tb10x_pinmux_ops = {
+ .get_functions_count = tb10x_get_functions_count,
+ .get_function_name = tb10x_get_function_name,
+ .get_function_groups = tb10x_get_function_groups,
+ .gpio_request_enable = tb10x_gpio_request_enable,
+ .gpio_disable_free = tb10x_gpio_disable_free,
+ .enable = tb10x_pctl_enable,
+ .disable = tb10x_pctl_disable,
+};
+
+static struct pinctrl_desc tb10x_pindesc = {
+ .name = "TB10x",
+ .pins = tb10x_pins,
+ .npins = ARRAY_SIZE(tb10x_pins),
+ .owner = THIS_MODULE,
+ .pctlops = &tb10x_pinctrl_ops,
+ .pmxops = &tb10x_pinmux_ops,
+};
+
+static int tb10x_pinctrl_probe(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct device_node *child;
+ struct tb10x_pinctrl *state;
+ int i;
+
+ if (!of_node) {
+ dev_err(dev, "No device tree node found.\n");
+ return -EINVAL;
+ }
+
+ if (!mem) {
+ dev_err(dev, "No memory resource defined.\n");
+ return -EINVAL;
+ }
+
+ state = devm_kzalloc(dev, sizeof(struct tb10x_pinctrl) +
+ of_get_child_count(of_node)
+ * sizeof(struct tb10x_of_pinfunc),
+ GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, state);
+ state->pinfuncs = (struct tb10x_of_pinfunc *)(state + 1);
+ mutex_init(&state->mutex);
+
+ state->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(state->base)) {
+ ret = PTR_ERR(state->base);
+ goto fail;
+ }
+
+ state->pingroups = tb10x_pingroups;
+ state->pinfuncgrpcnt = ARRAY_SIZE(tb10x_pingroups);
+
+ for (i = 0; i < TB10X_PORTS; i++)
+ state->ports[i].mode = tb10x_pinctrl_get_config(state, i);
+
+ for_each_child_of_node(of_node, child) {
+ const char *name;
+
+ if (!of_property_read_string(child, "abilis,function",
+ &name)) {
+ state->pinfuncs[state->pinfuncnt].name = child->name;
+ state->pinfuncs[state->pinfuncnt].group = name;
+ state->pinfuncnt++;
+ }
+ }
+
+ state->pctl = pinctrl_register(&tb10x_pindesc, dev, state);
+ if (!state->pctl) {
+ dev_err(dev, "could not register TB10x pin driver\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ mutex_destroy(&state->mutex);
+ return ret;
+}
+
+static int tb10x_pinctrl_remove(struct platform_device *pdev)
+{
+ struct tb10x_pinctrl *state = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(state->pctl);
+ mutex_destroy(&state->mutex);
+
+ return 0;
+}
+
+
+static const struct of_device_id tb10x_pinctrl_dt_ids[] = {
+ { .compatible = "abilis,tb10x-iomux" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tb10x_pinctrl_dt_ids);
+
+static struct platform_driver tb10x_pinctrl_pdrv = {
+ .probe = tb10x_pinctrl_probe,
+ .remove = tb10x_pinctrl_remove,
+ .driver = {
+ .name = "tb10x_pinctrl",
+ .of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids),
+ .owner = THIS_MODULE
+ }
+};
+
+module_platform_driver(tb10x_pinctrl_pdrv);
+
+MODULE_AUTHOR("Christian Ruppert <christian.ruppert@abilis.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-vf610.c b/drivers/pinctrl/pinctrl-vf610.c
index 68a970b1dbc..bddd913d28b 100644
--- a/drivers/pinctrl/pinctrl-vf610.c
+++ b/drivers/pinctrl/pinctrl-vf610.c
@@ -316,7 +316,7 @@ static struct platform_driver vf610_pinctrl_driver = {
.driver = {
.name = "vf610-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(vf610_pinctrl_of_match),
+ .of_match_table = vf610_pinctrl_of_match,
},
.probe = vf610_pinctrl_probe,
.remove = imx_pinctrl_remove,
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 9d144a263dc..9248ce4efed 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -505,16 +505,14 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting)
pin_free(pctldev, pins[i], NULL);
} else {
const char *gname;
- const char *pname;
- pname = desc ? desc->name : "non-existing";
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
dev_warn(pctldev->dev,
"not freeing pin %d (%s) as part of "
"deactivating group %s - it is already "
"used for some other setting",
- pins[i], pname, gname);
+ pins[i], desc->name, gname);
}
}
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 636a882b406..26187aa5cf5 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -45,6 +45,11 @@ config PINCTRL_PFC_R8A7790
depends on ARCH_R8A7790
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A7791
+ def_bool y
+ depends on ARCH_R8A7791
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_SH7203
def_bool y
depends on CPU_SUBTYPE_SH7203
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 5e0c222c12d..ad8f4cf9faa 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 738f14f65cf..d77ece5217f 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -431,6 +431,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7790_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7791
+ {
+ .compatible = "renesas,pfc-r8a7791",
+ .data = &r8a7791_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH7372
{
.compatible = "renesas,pfc-sh7372",
@@ -558,6 +564,9 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_R8A7790
{ "pfc-r8a7790", (kernel_ulong_t)&r8a7790_pinmux_info },
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7791
+ { "pfc-r8a7791", (kernel_ulong_t)&r8a7791_pinmux_info },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH7203
{ "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
#endif
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index a1b23762ac9..11ea8726865 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -69,6 +69,7 @@ extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 428d2a6857e..8b1881c2059 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -1288,6 +1288,49 @@ static struct sh_pfc_pin pinmux_pins[] = {
arg5##_MARK, arg6##_MARK, \
arg7##_MARK, arg8##_MARK, }
+/* - AUDIO macro -------------------------------------------------------------*/
+#define AUDIO_PFC_PIN(name, pin) SH_PFC_PINS(name, pin)
+#define AUDIO_PFC_DAT(name, pin) SH_PFC_MUX1(name, pin)
+
+/* - AUDIO clock -------------------------------------------------------------*/
+AUDIO_PFC_PIN(audio_clk_a, RCAR_GP_PIN(2, 22));
+AUDIO_PFC_DAT(audio_clk_a, AUDIO_CLKA);
+AUDIO_PFC_PIN(audio_clk_b, RCAR_GP_PIN(2, 23));
+AUDIO_PFC_DAT(audio_clk_b, AUDIO_CLKB);
+AUDIO_PFC_PIN(audio_clk_c, RCAR_GP_PIN(2, 7));
+AUDIO_PFC_DAT(audio_clk_c, AUDIO_CLKC);
+AUDIO_PFC_PIN(audio_clkout_a, RCAR_GP_PIN(2, 16));
+AUDIO_PFC_DAT(audio_clkout_a, AUDIO_CLKOUT_A);
+AUDIO_PFC_PIN(audio_clkout_b, RCAR_GP_PIN(1, 16));
+AUDIO_PFC_DAT(audio_clkout_b, AUDIO_CLKOUT_B);
+
+/* - CAN macro --------_----------------------------------------------------- */
+#define CAN_PFC_PINS(name, args...) SH_PFC_PINS(name, args)
+#define CAN_PFC_DATA(name, tx, rx) SH_PFC_MUX2(name, tx, rx)
+#define CAN_PFC_CLK(name, clk) SH_PFC_MUX1(name, clk)
+
+/* - CAN0 ------------------------------------------------------------------- */
+CAN_PFC_PINS(can0_data_a, RCAR_GP_PIN(1, 30), RCAR_GP_PIN(1, 31));
+CAN_PFC_DATA(can0_data_a, CAN0_TX_A, CAN0_RX_A);
+CAN_PFC_PINS(can0_data_b, RCAR_GP_PIN(2, 26), RCAR_GP_PIN(2, 27));
+CAN_PFC_DATA(can0_data_b, CAN0_TX_B, CAN0_RX_B);
+
+/* - CAN1 ------------------------------------------------------------------- */
+CAN_PFC_PINS(can1_data_a, RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19));
+CAN_PFC_DATA(can1_data_a, CAN1_TX_A, CAN1_RX_A);
+CAN_PFC_PINS(can1_data_b, RCAR_GP_PIN(2, 28), RCAR_GP_PIN(2, 29));
+CAN_PFC_DATA(can1_data_b, CAN1_TX_B, CAN1_RX_B);
+
+/* - CAN_CLK --------------------------------------------------------------- */
+CAN_PFC_PINS(can_clk_a, RCAR_GP_PIN(3, 24));
+CAN_PFC_CLK(can_clk_a, CAN_CLK_A);
+CAN_PFC_PINS(can_clk_b, RCAR_GP_PIN(1, 16));
+CAN_PFC_CLK(can_clk_b, CAN_CLK_B);
+CAN_PFC_PINS(can_clk_c, RCAR_GP_PIN(4, 24));
+CAN_PFC_CLK(can_clk_c, CAN_CLK_C);
+CAN_PFC_PINS(can_clk_d, RCAR_GP_PIN(2, 25));
+CAN_PFC_CLK(can_clk_d, CAN_CLK_D);
+
/* - Ether ------------------------------------------------------------------ */
SH_PFC_PINS(ether_rmii, RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 9),
@@ -1577,6 +1620,59 @@ SDHI_PFC_WPPN(sdhi2_wp_a, SD2_WP_A);
SDHI_PFC_PINS(sdhi2_wp_b, RCAR_GP_PIN(3, 28));
SDHI_PFC_WPPN(sdhi2_wp_b, SD2_WP_B);
+/* - SSI macro -------------------------------------------------------------- */
+#define SSI_PFC_PINS(name, args...) SH_PFC_PINS(name, args)
+#define SSI_PFC_CTRL(name, sck, ws) SH_PFC_MUX2(name, sck, ws)
+#define SSI_PFC_DATA(name, d) SH_PFC_MUX1(name, d)
+
+/* - SSI 0/1/2 -------------------------------------------------------------- */
+SSI_PFC_PINS(ssi012_ctrl, RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7));
+SSI_PFC_CTRL(ssi012_ctrl, SSI_SCK012, SSI_WS012);
+SSI_PFC_PINS(ssi0_data, RCAR_GP_PIN(3, 10));
+SSI_PFC_DATA(ssi0_data, SSI_SDATA0);
+SSI_PFC_PINS(ssi1_a_ctrl, RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 21));
+SSI_PFC_CTRL(ssi1_a_ctrl, SSI_SCK1_A, SSI_WS1_A);
+SSI_PFC_PINS(ssi1_b_ctrl, PIN_NUMBER(3, 20), RCAR_GP_PIN(1, 3));
+SSI_PFC_CTRL(ssi1_b_ctrl, SSI_SCK1_B, SSI_WS1_B);
+SSI_PFC_PINS(ssi1_data, RCAR_GP_PIN(3, 9));
+SSI_PFC_DATA(ssi1_data, SSI_SDATA1);
+SSI_PFC_PINS(ssi2_a_ctrl, RCAR_GP_PIN(2, 26), RCAR_GP_PIN(3, 4));
+SSI_PFC_CTRL(ssi2_a_ctrl, SSI_SCK2_A, SSI_WS2_A);
+SSI_PFC_PINS(ssi2_b_ctrl, RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 17));
+SSI_PFC_CTRL(ssi2_b_ctrl, SSI_SCK2_B, SSI_WS2_B);
+SSI_PFC_PINS(ssi2_data, RCAR_GP_PIN(3, 8));
+SSI_PFC_DATA(ssi2_data, SSI_SDATA2);
+
+/* - SSI 3/4 ---------------------------------------------------------------- */
+SSI_PFC_PINS(ssi34_ctrl, RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3));
+SSI_PFC_CTRL(ssi34_ctrl, SSI_SCK34, SSI_WS34);
+SSI_PFC_PINS(ssi3_data, RCAR_GP_PIN(3, 5));
+SSI_PFC_DATA(ssi3_data, SSI_SDATA3);
+SSI_PFC_PINS(ssi4_ctrl, RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 23));
+SSI_PFC_CTRL(ssi4_ctrl, SSI_SCK4, SSI_WS4);
+SSI_PFC_PINS(ssi4_data, RCAR_GP_PIN(3, 4));
+SSI_PFC_DATA(ssi4_data, SSI_SDATA4);
+
+/* - SSI 5 ------------------------------------------------------------------ */
+SSI_PFC_PINS(ssi5_ctrl, RCAR_GP_PIN(2, 31), RCAR_GP_PIN(3, 0));
+SSI_PFC_CTRL(ssi5_ctrl, SSI_SCK5, SSI_WS5);
+SSI_PFC_PINS(ssi5_data, RCAR_GP_PIN(3, 1));
+SSI_PFC_DATA(ssi5_data, SSI_SDATA5);
+
+/* - SSI 6 ------------------------------------------------------------------ */
+SSI_PFC_PINS(ssi6_ctrl, RCAR_GP_PIN(2, 28), RCAR_GP_PIN(2, 29));
+SSI_PFC_CTRL(ssi6_ctrl, SSI_SCK6, SSI_WS6);
+SSI_PFC_PINS(ssi6_data, RCAR_GP_PIN(2, 30));
+SSI_PFC_DATA(ssi6_data, SSI_SDATA6);
+
+/* - SSI 7/8 --------------------------------------------------------------- */
+SSI_PFC_PINS(ssi78_ctrl, RCAR_GP_PIN(2, 24), RCAR_GP_PIN(2, 25));
+SSI_PFC_CTRL(ssi78_ctrl, SSI_SCK78, SSI_WS78);
+SSI_PFC_PINS(ssi7_data, RCAR_GP_PIN(2, 27));
+SSI_PFC_DATA(ssi7_data, SSI_SDATA7);
+SSI_PFC_PINS(ssi8_data, RCAR_GP_PIN(2, 26));
+SSI_PFC_DATA(ssi8_data, SSI_SDATA8);
+
/* - USB0 ------------------------------------------------------------------- */
SH_PFC_PINS(usb0, RCAR_GP_PIN(0, 1));
SH_PFC_MUX1(usb0, PENC0);
@@ -1624,6 +1720,19 @@ VIN_PFC_PINS(vin1_sync, RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 22));
VIN_PFC_SYNC(vin1_sync, VI1_HSYNC, VI1_VSYNC);
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clk_c),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data_a),
+ SH_PFC_PIN_GROUP(can1_data_b),
+ SH_PFC_PIN_GROUP(can_clk_a),
+ SH_PFC_PIN_GROUP(can_clk_b),
+ SH_PFC_PIN_GROUP(can_clk_c),
+ SH_PFC_PIN_GROUP(can_clk_d),
SH_PFC_PIN_GROUP(ether_rmii),
SH_PFC_PIN_GROUP(ether_link),
SH_PFC_PIN_GROUP(ether_magic),
@@ -1713,6 +1822,25 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi2_data4_b),
SH_PFC_PIN_GROUP(sdhi2_wp_a),
SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(ssi012_ctrl),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi1_a_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_b_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data),
+ SH_PFC_PIN_GROUP(ssi2_a_ctrl),
+ SH_PFC_PIN_GROUP(ssi2_b_ctrl),
+ SH_PFC_PIN_GROUP(ssi2_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi8_data),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb0_ovc),
SH_PFC_PIN_GROUP(usb1),
@@ -1725,6 +1853,32 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(vin1_sync),
};
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a",
+ "audio_clk_b",
+ "audio_clk_c",
+ "audio_clkout_a",
+ "audio_clkout_b",
+};
+
+static const char * const can0_groups[] = {
+ "can0_data_a",
+ "can0_data_b",
+ "can_clk_a",
+ "can_clk_b",
+ "can_clk_c",
+ "can_clk_d",
+};
+
+static const char * const can1_groups[] = {
+ "can1_data_a",
+ "can1_data_b",
+ "can_clk_a",
+ "can_clk_b",
+ "can_clk_c",
+ "can_clk_d",
+};
+
static const char * const ether_groups[] = {
"ether_rmii",
"ether_link",
@@ -1875,6 +2029,28 @@ static const char * const sdhi2_groups[] = {
"sdhi2_wp_b",
};
+static const char * const ssi_groups[] = {
+ "ssi012_ctrl",
+ "ssi0_data",
+ "ssi1_a_ctrl",
+ "ssi1_b_ctrl",
+ "ssi1_data",
+ "ssi2_a_ctrl",
+ "ssi2_b_ctrl",
+ "ssi2_data",
+ "ssi34_ctrl",
+ "ssi3_data",
+ "ssi4_ctrl",
+ "ssi4_data",
+ "ssi5_ctrl",
+ "ssi5_data",
+ "ssi6_ctrl",
+ "ssi6_data",
+ "ssi78_ctrl",
+ "ssi7_data",
+ "ssi8_data",
+};
+
static const char * const usb0_groups[] = {
"usb0",
"usb0_ovc",
@@ -1898,6 +2074,9 @@ static const char * const vin1_groups[] = {
};
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
SH_PFC_FUNCTION(ether),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
@@ -1918,6 +2097,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi0),
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(vin0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 64fcc00693b..72786fc9395 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -781,6 +781,8 @@ enum {
ADICS_SAMP_MARK, DU2_CDE_MARK, QPOLB_MARK, SCIFA2_RXD_B_MARK,
USB1_PWEN_MARK, AUDIO_CLKOUT_D_MARK, USB1_OVC_MARK,
TCLK1_B_MARK,
+
+ I2C3_SCL_MARK, I2C3_SDA_MARK,
PINMUX_MARK_END,
};
@@ -1719,10 +1721,22 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D),
PINMUX_IPSR_DATA(IP16_7, USB1_OVC),
PINMUX_IPSR_MODSEL_DATA(IP16_7, TCLK1_B, SEL_TMU1_1),
+
+ PINMUX_DATA(I2C3_SCL_MARK, FN_SEL_IICDVFS_1),
+ PINMUX_DATA(I2C3_SDA_MARK, FN_SEL_IICDVFS_1),
};
+/* R8A7790 has 6 banks with 32 GPIOs in each = 192 GPIOs */
+#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
+#define PIN_NUMBER(r, c) (((r) - 'A') * 31 + (c) + 200)
+#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+
static struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+
+ /* Pins not associated with a GPIO port */
+ SH_PFC_PIN_NAMED(ROW_GROUP_A('J'), 15, AJ15),
+ SH_PFC_PIN_NAMED(ROW_GROUP_A('H'), 15, AH15),
};
/* - DU RGB ----------------------------------------------------------------- */
@@ -1990,6 +2004,72 @@ static const unsigned int hscif1_ctrl_b_pins[] = {
static const unsigned int hscif1_ctrl_b_mux[] = {
HRTS1_N_B_MARK, HCTS1_N_B_MARK,
};
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 17),
+};
+static const unsigned int i2c1_mux[] = {
+ I2C1_SCL_MARK, I2C1_SDA_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int i2c1_b_mux[] = {
+ I2C1_SCL_B_MARK, I2C1_SDA_B_MARK,
+};
+static const unsigned int i2c1_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 30), RCAR_GP_PIN(4, 27),
+};
+static const unsigned int i2c1_c_mux[] = {
+ I2C1_SCL_C_MARK, I2C1_SDA_C_MARK,
+};
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int i2c2_mux[] = {
+ I2C2_SCL_MARK, I2C2_SDA_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int i2c2_b_mux[] = {
+ I2C2_SCL_B_MARK, I2C2_SDA_B_MARK,
+};
+static const unsigned int i2c2_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int i2c2_c_mux[] = {
+ I2C2_SCL_C_MARK, I2C2_SDA_C_MARK,
+};
+static const unsigned int i2c2_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int i2c2_d_mux[] = {
+ I2C2_SCL_D_MARK, I2C2_SDA_D_MARK,
+};
+static const unsigned int i2c2_e_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int i2c2_e_mux[] = {
+ I2C2_SCL_E_MARK, I2C2_SDA_E_MARK,
+};
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SCL, SDA */
+ PIN_A_NUMBER('J', 15), PIN_A_NUMBER('H', 15),
+};
+static const unsigned int i2c3_mux[] = {
+ I2C3_SCL_MARK, I2C3_SDA_MARK,
+};
/* - INTC ------------------------------------------------------------------- */
static const unsigned int intc_irq0_pins[] = {
/* IRQ */
@@ -3047,6 +3127,15 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(hscif1_data_b),
SH_PFC_PIN_GROUP(hscif1_clk_b),
SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c2_e),
+ SH_PFC_PIN_GROUP(i2c3),
SH_PFC_PIN_GROUP(intc_irq0),
SH_PFC_PIN_GROUP(intc_irq1),
SH_PFC_PIN_GROUP(intc_irq2),
@@ -3243,6 +3332,24 @@ static const char * const hscif1_groups[] = {
"hscif1_ctrl_b",
};
+static const char * const i2c1_groups[] = {
+ "i2c1",
+ "i2c1_b",
+ "i2c1_c",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+ "i2c2_b",
+ "i2c2_c",
+ "i2c2_d",
+ "i2c2_e",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
static const char * const intc_groups[] = {
"intc_irq0",
"intc_irq1",
@@ -3469,6 +3576,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(eth),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(intc),
SH_PFC_FUNCTION(mmc0),
SH_PFC_FUNCTION(mmc1),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
new file mode 100644
index 00000000000..bf76a654c02
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -0,0 +1,4214 @@
+/*
+ * r8a7791 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_data/gpio-rcar.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_32(0, fn, sfx), \
+ PORT_GP_32(1, fn, sfx), \
+ PORT_GP_32(2, fn, sfx), \
+ PORT_GP_32(3, fn, sfx), \
+ PORT_GP_32(4, fn, sfx), \
+ PORT_GP_32(5, fn, sfx), \
+ PORT_GP_32(6, fn, sfx), \
+ PORT_GP_32(7, fn, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+
+ /* GPSR0 */
+ FN_IP0_0, FN_IP0_1, FN_IP0_2, FN_IP0_3, FN_IP0_4, FN_IP0_5,
+ FN_IP0_6, FN_IP0_7, FN_IP0_8, FN_IP0_9, FN_IP0_10, FN_IP0_11,
+ FN_IP0_12, FN_IP0_13, FN_IP0_14, FN_IP0_15, FN_IP0_18_16, FN_IP0_20_19,
+ FN_IP0_22_21, FN_IP0_24_23, FN_IP0_26_25, FN_IP0_28_27, FN_IP0_30_29,
+ FN_IP1_1_0, FN_IP1_3_2, FN_IP1_5_4, FN_IP1_7_6, FN_IP1_10_8,
+ FN_IP1_13_11, FN_IP1_16_14, FN_IP1_19_17, FN_IP1_22_20,
+
+ /* GPSR1 */
+ FN_IP1_25_23, FN_IP1_28_26, FN_IP1_31_29, FN_IP2_2_0, FN_IP2_4_3,
+ FN_IP2_6_5, FN_IP2_9_7, FN_IP2_12_10, FN_IP2_15_13, FN_IP2_18_16,
+ FN_IP2_20_19, FN_IP2_22_21, FN_EX_CS0_N, FN_IP2_24_23, FN_IP2_26_25,
+ FN_IP2_29_27, FN_IP3_2_0, FN_IP3_5_3, FN_IP3_8_6, FN_RD_N,
+ FN_IP3_11_9, FN_IP3_13_12, FN_IP3_15_14 , FN_IP3_17_16 , FN_IP3_19_18,
+ FN_IP3_21_20,
+
+ /* GPSR2 */
+ FN_IP3_27_25, FN_IP3_30_28, FN_IP4_1_0, FN_IP4_4_2, FN_IP4_7_5,
+ FN_IP4_9_8, FN_IP4_12_10, FN_IP4_15_13, FN_IP4_18_16, FN_IP4_19,
+ FN_IP4_20, FN_IP4_21, FN_IP4_23_22, FN_IP4_25_24, FN_IP4_27_26,
+ FN_IP4_30_28, FN_IP5_2_0, FN_IP5_5_3, FN_IP5_8_6, FN_IP5_11_9,
+ FN_IP5_14_12, FN_IP5_16_15, FN_IP5_19_17, FN_IP5_21_20, FN_IP5_23_22,
+ FN_IP5_25_24, FN_IP5_28_26, FN_IP5_31_29, FN_AUDIO_CLKA, FN_IP6_2_0,
+ FN_IP6_5_3, FN_IP6_7_6,
+
+ /* GPSR3 */
+ FN_IP7_5_3, FN_IP7_8_6, FN_IP7_10_9, FN_IP7_12_11, FN_IP7_14_13,
+ FN_IP7_16_15, FN_IP7_18_17, FN_IP7_20_19, FN_IP7_23_21, FN_IP7_26_24,
+ FN_IP7_29_27, FN_IP8_2_0, FN_IP8_5_3, FN_IP8_8_6, FN_IP8_11_9,
+ FN_IP8_14_12, FN_IP8_17_15, FN_IP8_20_18, FN_IP8_23_21, FN_IP8_25_24,
+ FN_IP8_27_26, FN_IP8_30_28, FN_IP9_2_0, FN_IP9_5_3, FN_IP9_6, FN_IP9_7,
+ FN_IP9_10_8, FN_IP9_11, FN_IP9_12, FN_IP9_15_13, FN_IP9_16,
+ FN_IP9_18_17,
+
+ /* GPSR4 */
+ FN_VI0_CLK, FN_IP9_20_19, FN_IP9_22_21, FN_IP9_24_23, FN_IP9_26_25,
+ FN_VI0_DATA0_VI0_B0, FN_VI0_DATA1_VI0_B1, FN_VI0_DATA2_VI0_B2,
+ FN_IP9_28_27, FN_VI0_DATA4_VI0_B4, FN_VI0_DATA5_VI0_B5,
+ FN_VI0_DATA6_VI0_B6, FN_VI0_DATA7_VI0_B7, FN_IP9_31_29, FN_IP10_2_0,
+ FN_IP10_5_3, FN_IP10_8_6, FN_IP10_11_9, FN_IP10_14_12, FN_IP10_16_15,
+ FN_IP10_18_17, FN_IP10_21_19, FN_IP10_24_22, FN_IP10_26_25,
+ FN_IP10_28_27, FN_IP10_31_29, FN_IP11_2_0, FN_IP11_5_3, FN_IP11_8_6,
+ FN_IP15_1_0, FN_IP15_3_2, FN_IP15_5_4,
+
+ /* GPSR5 */
+ FN_IP11_11_9, FN_IP11_14_12, FN_IP11_16_15, FN_IP11_18_17, FN_IP11_19,
+ FN_IP11_20, FN_IP11_21, FN_IP11_22, FN_IP11_23, FN_IP11_24,
+ FN_IP11_25, FN_IP11_26, FN_IP11_27, FN_IP11_29_28, FN_IP11_31_30,
+ FN_IP12_1_0, FN_IP12_3_2, FN_IP12_6_4, FN_IP12_9_7, FN_IP12_12_10,
+ FN_IP12_15_13, FN_IP12_17_16, FN_IP12_19_18, FN_IP12_21_20,
+ FN_IP12_23_22, FN_IP12_26_24, FN_IP12_29_27, FN_IP13_2_0, FN_IP13_4_3,
+ FN_IP13_6_5, FN_IP13_9_7, FN_IP3_24_22,
+
+ /* GPSR6 */
+ FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
+ FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23,
+ FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
+ FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
+ FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
+ FN_IP14_22_20, FN_IP14_25_23, FN_IP14_28_26, FN_IP14_31_29,
+ FN_USB1_OVC, FN_DU0_DOTCLKIN,
+
+ /* GPSR7 */
+ FN_IP15_17_15, FN_IP15_20_18, FN_IP15_23_21, FN_IP15_26_24,
+ FN_IP15_29_27, FN_IP16_2_0, FN_IP16_5_3, FN_IP16_7_6, FN_IP16_9_8,
+ FN_IP16_11_10, FN_IP6_9_8, FN_IP6_11_10, FN_IP6_13_12, FN_IP6_15_14,
+ FN_IP6_18_16, FN_IP6_20_19, FN_IP6_23_21, FN_IP6_26_24, FN_IP6_29_27,
+ FN_IP7_2_0, FN_IP15_8_6, FN_IP15_11_9, FN_IP15_14_12,
+ FN_USB0_PWEN, FN_USB0_OVC, FN_USB1_PWEN,
+
+ /* IPSR0 */
+ FN_D0, FN_D1, FN_D2, FN_D3, FN_D4, FN_D5, FN_D6, FN_D7, FN_D8,
+ FN_D9, FN_D10, FN_D11, FN_D12, FN_D13, FN_D14, FN_D15,
+ FN_A0, FN_ATAWR0_N_C, FN_MSIOF0_SCK_B, FN_SCL0_C, FN_PWM2_B,
+ FN_A1, FN_MSIOF0_SYNC_B, FN_A2, FN_MSIOF0_SS1_B,
+ FN_A3, FN_MSIOF0_SS2_B, FN_A4, FN_MSIOF0_TXD_B,
+ FN_A5, FN_MSIOF0_RXD_B, FN_A6, FN_MSIOF1_SCK,
+
+ /* IPSR1 */
+ FN_A7, FN_MSIOF1_SYNC, FN_A8, FN_MSIOF1_SS1, FN_SCL0,
+ FN_A9, FN_MSIOF1_SS2, FN_SDA0,
+ FN_A10, FN_MSIOF1_TXD, FN_MSIOF1_TXD_D,
+ FN_A11, FN_MSIOF1_RXD, FN_SCL3_D, FN_MSIOF1_RXD_D,
+ FN_A12, FN_FMCLK, FN_SDA3_D, FN_MSIOF1_SCK_D,
+ FN_A13, FN_ATAG0_N_C, FN_BPFCLK, FN_MSIOF1_SS1_D,
+ FN_A14, FN_ATADIR0_N_C, FN_FMIN, FN_FMIN_C, FN_MSIOF1_SYNC_D,
+ FN_A15, FN_BPFCLK_C,
+ FN_A16, FN_DREQ2_B, FN_FMCLK_C, FN_SCIFA1_SCK_B,
+ FN_A17, FN_DACK2_B, FN_SDA0_C,
+ FN_A18, FN_DREQ1, FN_SCIFA1_RXD_C, FN_SCIFB1_RXD_C,
+
+ /* IPSR2 */
+ FN_A19, FN_DACK1, FN_SCIFA1_TXD_C, FN_SCIFB1_TXD_C, FN_SCIFB1_SCK_B,
+ FN_A20, FN_SPCLK,
+ FN_A21, FN_ATAWR0_N_B, FN_MOSI_IO0,
+ FN_A22, FN_MISO_IO1, FN_FMCLK_B, FN_TX0, FN_SCIFA0_TXD,
+ FN_A23, FN_IO2, FN_BPFCLK_B, FN_RX0, FN_SCIFA0_RXD,
+ FN_A24, FN_DREQ2, FN_IO3, FN_TX1, FN_SCIFA1_TXD,
+ FN_A25, FN_DACK2, FN_SSL, FN_DREQ1_C, FN_RX1, FN_SCIFA1_RXD,
+ FN_CS0_N, FN_ATAG0_N_B, FN_SCL1,
+ FN_CS1_N_A26, FN_ATADIR0_N_B, FN_SDA1,
+ FN_EX_CS1_N, FN_MSIOF2_SCK,
+ FN_EX_CS2_N, FN_ATAWR0_N, FN_MSIOF2_SYNC,
+ FN_EX_CS3_N, FN_ATADIR0_N, FN_MSIOF2_TXD, FN_ATAG0_N, FN_EX_WAIT1,
+
+ /* IPSR3 */
+ FN_EX_CS4_N, FN_ATARD0_N, FN_MSIOF2_RXD, FN_EX_WAIT2,
+ FN_EX_CS5_N, FN_ATACS00_N, FN_MSIOF2_SS1, FN_HRX1_B,
+ FN_SCIFB1_RXD_B, FN_PWM1, FN_TPU_TO1,
+ FN_BS_N, FN_ATACS10_N, FN_MSIOF2_SS2, FN_HTX1_B,
+ FN_SCIFB1_TXD_B, FN_PWM2, FN_TPU_TO2,
+ FN_RD_WR_N, FN_HRX2_B, FN_FMIN_B, FN_SCIFB0_RXD_B, FN_DREQ1_D,
+ FN_WE0_N, FN_HCTS2_N_B, FN_SCIFB0_TXD_B,
+ FN_WE1_N, FN_ATARD0_N_B, FN_HTX2_B, FN_SCIFB0_RTS_N_B,
+ FN_EX_WAIT0, FN_HRTS2_N_B, FN_SCIFB0_CTS_N_B,
+ FN_DREQ0, FN_PWM3, FN_TPU_TO3,
+ FN_DACK0, FN_DRACK0, FN_REMOCON,
+ FN_SPEEDIN, FN_HSCK0_C, FN_HSCK2_C, FN_SCIFB0_SCK_B,
+ FN_SCIFB2_SCK_B, FN_DREQ2_C, FN_HTX2_D,
+ FN_SSI_SCK0129, FN_HRX0_C, FN_HRX2_C, FN_SCIFB0_RXD_C, FN_SCIFB2_RXD_C,
+ FN_SSI_WS0129, FN_HTX0_C, FN_HTX2_C, FN_SCIFB0_TXD_C, FN_SCIFB2_TXD_C,
+
+ /* IPSR4 */
+ FN_SSI_SDATA0, FN_SCL0_B, FN_SCL7_B, FN_MSIOF2_SCK_C,
+ FN_SSI_SCK1, FN_SDA0_B, FN_SDA7_B, FN_MSIOF2_SYNC_C, FN_GLO_I0_D,
+ FN_SSI_WS1, FN_SCL1_B, FN_SCL8_B, FN_MSIOF2_TXD_C, FN_GLO_I1_D,
+ FN_SSI_SDATA1, FN_SDA1_B, FN_SDA8_B, FN_MSIOF2_RXD_C,
+ FN_SSI_SCK2, FN_SCL2, FN_GPS_CLK_B, FN_GLO_Q0_D, FN_HSCK1_E,
+ FN_SSI_WS2, FN_SDA2, FN_GPS_SIGN_B, FN_RX2_E,
+ FN_GLO_Q1_D, FN_HCTS1_N_E,
+ FN_SSI_SDATA2, FN_GPS_MAG_B, FN_TX2_E, FN_HRTS1_N_E,
+ FN_SSI_SCK34, FN_SSI_WS34, FN_SSI_SDATA3,
+ FN_SSI_SCK4, FN_GLO_SS_D,
+ FN_SSI_WS4, FN_GLO_RFON_D,
+ FN_SSI_SDATA4, FN_MSIOF2_SCK_D,
+ FN_SSI_SCK5, FN_MSIOF1_SCK_C, FN_TS_SDATA0, FN_GLO_I0,
+ FN_MSIOF2_SYNC_D, FN_VI1_R2_B,
+
+ /* IPSR5 */
+ FN_SSI_WS5, FN_MSIOF1_SYNC_C, FN_TS_SCK0, FN_GLO_I1,
+ FN_MSIOF2_TXD_D, FN_VI1_R3_B,
+ FN_SSI_SDATA5, FN_MSIOF1_TXD_C, FN_TS_SDEN0, FN_GLO_Q0,
+ FN_MSIOF2_SS1_D, FN_VI1_R4_B,
+ FN_SSI_SCK6, FN_MSIOF1_RXD_C, FN_TS_SPSYNC0, FN_GLO_Q1,
+ FN_MSIOF2_RXD_D, FN_VI1_R5_B,
+ FN_SSI_WS6, FN_GLO_SCLK, FN_MSIOF2_SS2_D, FN_VI1_R6_B,
+ FN_SSI_SDATA6, FN_STP_IVCXO27_0_B, FN_GLO_SDATA, FN_VI1_R7_B,
+ FN_SSI_SCK78, FN_STP_ISCLK_0_B, FN_GLO_SS,
+ FN_SSI_WS78, FN_TX0_D, FN_STP_ISD_0_B, FN_GLO_RFON,
+ FN_SSI_SDATA7, FN_RX0_D, FN_STP_ISEN_0_B,
+ FN_SSI_SDATA8, FN_TX1_D, FN_STP_ISSYNC_0_B,
+ FN_SSI_SCK9, FN_RX1_D, FN_GLO_SCLK_D,
+ FN_SSI_WS9, FN_TX3_D, FN_CAN0_TX_D, FN_GLO_SDATA_D,
+ FN_SSI_SDATA9, FN_RX3_D, FN_CAN0_RX_D,
+
+ /* IPSR6 */
+ FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
+ FN_SCIF_CLK, FN_BPFCLK_E,
+ FN_AUDIO_CLKC, FN_SCIFB0_SCK_C, FN_MSIOF1_SYNC_B, FN_RX2,
+ FN_SCIFA2_RXD, FN_FMIN_E,
+ FN_AUDIO_CLKOUT, FN_MSIOF1_SS1_B, FN_TX2, FN_SCIFA2_TXD,
+ FN_IRQ0, FN_SCIFB1_RXD_D, FN_INTC_IRQ0_N,
+ FN_IRQ1, FN_SCIFB1_SCK_C, FN_INTC_IRQ1_N,
+ FN_IRQ2, FN_SCIFB1_TXD_D, FN_INTC_IRQ2_N,
+ FN_IRQ3, FN_SCL4_C, FN_MSIOF2_TXD_E, FN_INTC_IRQ3_N,
+ FN_IRQ4, FN_HRX1_C, FN_SDA4_C, FN_MSIOF2_RXD_E, FN_INTC_IRQ4_N,
+ FN_IRQ5, FN_HTX1_C, FN_SCL1_E, FN_MSIOF2_SCK_E,
+ FN_IRQ6, FN_HSCK1_C, FN_MSIOF1_SS2_B, FN_SDA1_E, FN_MSIOF2_SYNC_E,
+ FN_IRQ7, FN_HCTS1_N_C, FN_MSIOF1_TXD_B, FN_GPS_CLK_C, FN_GPS_CLK_D,
+ FN_IRQ8, FN_HRTS1_N_C, FN_MSIOF1_RXD_B, FN_GPS_SIGN_C, FN_GPS_SIGN_D,
+
+ /* IPSR7 */
+ FN_IRQ9, FN_DU1_DOTCLKIN_B, FN_CAN_CLK_D, FN_GPS_MAG_C,
+ FN_SCIF_CLK_B, FN_GPS_MAG_D,
+ FN_DU1_DR0, FN_LCDOUT0, FN_VI1_DATA0_B, FN_TX0_B,
+ FN_SCIFA0_TXD_B, FN_MSIOF2_SCK_B,
+ FN_DU1_DR1, FN_LCDOUT1, FN_VI1_DATA1_B, FN_RX0_B,
+ FN_SCIFA0_RXD_B, FN_MSIOF2_SYNC_B,
+ FN_DU1_DR2, FN_LCDOUT2, FN_SSI_SCK0129_B,
+ FN_DU1_DR3, FN_LCDOUT3, FN_SSI_WS0129_B,
+ FN_DU1_DR4, FN_LCDOUT4, FN_SSI_SDATA0_B,
+ FN_DU1_DR5, FN_LCDOUT5, FN_SSI_SCK1_B,
+ FN_DU1_DR6, FN_LCDOUT6, FN_SSI_WS1_B,
+ FN_DU1_DR7, FN_LCDOUT7, FN_SSI_SDATA1_B,
+ FN_DU1_DG0, FN_LCDOUT8, FN_VI1_DATA2_B, FN_TX1_B,
+ FN_SCIFA1_TXD_B, FN_MSIOF2_SS1_B,
+ FN_DU1_DG1, FN_LCDOUT9, FN_VI1_DATA3_B, FN_RX1_B,
+ FN_SCIFA1_RXD_B, FN_MSIOF2_SS2_B,
+ FN_DU1_DG2, FN_LCDOUT10, FN_VI1_DATA4_B, FN_SCIF1_SCK_B,
+ FN_SCIFA1_SCK, FN_SSI_SCK78_B,
+
+ /* IPSR8 */
+ FN_DU1_DG3, FN_LCDOUT11, FN_VI1_DATA5_B, FN_SSI_WS78_B,
+ FN_DU1_DG4, FN_LCDOUT12, FN_VI1_DATA6_B, FN_HRX0_B,
+ FN_SCIFB2_RXD_B, FN_SSI_SDATA7_B,
+ FN_DU1_DG5, FN_LCDOUT13, FN_VI1_DATA7_B, FN_HCTS0_N_B,
+ FN_SCIFB2_TXD_B, FN_SSI_SDATA8_B,
+ FN_DU1_DG6, FN_LCDOUT14, FN_HRTS0_N_B,
+ FN_SCIFB2_CTS_N_B, FN_SSI_SCK9_B,
+ FN_DU1_DG7, FN_LCDOUT15, FN_HTX0_B, FN_SCIFB2_RTS_N_B, FN_SSI_WS9_B,
+ FN_DU1_DB0, FN_LCDOUT16, FN_VI1_CLK_B, FN_TX2_B,
+ FN_SCIFA2_TXD_B, FN_MSIOF2_TXD_B,
+ FN_DU1_DB1, FN_LCDOUT17, FN_VI1_HSYNC_N_B, FN_RX2_B,
+ FN_SCIFA2_RXD_B, FN_MSIOF2_RXD_B,
+ FN_DU1_DB2, FN_LCDOUT18, FN_VI1_VSYNC_N_B, FN_SCIF2_SCK_B,
+ FN_SCIFA2_SCK, FN_SSI_SDATA9_B,
+ FN_DU1_DB3, FN_LCDOUT19, FN_VI1_CLKENB_B,
+ FN_DU1_DB4, FN_LCDOUT20, FN_VI1_FIELD_B, FN_CAN1_RX,
+ FN_DU1_DB5, FN_LCDOUT21, FN_TX3, FN_SCIFA3_TXD, FN_CAN1_TX,
+
+ /* IPSR9 */
+ FN_DU1_DB6, FN_LCDOUT22, FN_SCL3_C, FN_RX3, FN_SCIFA3_RXD,
+ FN_DU1_DB7, FN_LCDOUT23, FN_SDA3_C, FN_SCIF3_SCK, FN_SCIFA3_SCK,
+ FN_DU1_DOTCLKIN, FN_QSTVA_QVS,
+ FN_DU1_DOTCLKOUT0, FN_QCLK,
+ FN_DU1_DOTCLKOUT1, FN_QSTVB_QVE, FN_CAN0_TX,
+ FN_TX3_B, FN_SCL2_B, FN_PWM4,
+ FN_DU1_EXHSYNC_DU1_HSYNC, FN_QSTH_QHS,
+ FN_DU1_EXVSYNC_DU1_VSYNC, FN_QSTB_QHE,
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_QCPV_QDE,
+ FN_CAN0_RX, FN_RX3_B, FN_SDA2_B,
+ FN_DU1_DISP, FN_QPOLA,
+ FN_DU1_CDE, FN_QPOLB, FN_PWM4_B,
+ FN_VI0_CLKENB, FN_TX4, FN_SCIFA4_TXD, FN_TS_SDATA0_D,
+ FN_VI0_FIELD, FN_RX4, FN_SCIFA4_RXD, FN_TS_SCK0_D,
+ FN_VI0_HSYNC_N, FN_TX5, FN_SCIFA5_TXD, FN_TS_SDEN0_D,
+ FN_VI0_VSYNC_N, FN_RX5, FN_SCIFA5_RXD, FN_TS_SPSYNC0_D,
+ FN_VI0_DATA3_VI0_B3, FN_SCIF3_SCK_B, FN_SCIFA3_SCK_B,
+ FN_VI0_G0, FN_SCL8, FN_STP_IVCXO27_0_C, FN_SCL4,
+ FN_HCTS2_N, FN_SCIFB2_CTS_N, FN_ATAWR1_N,
+
+ /* IPSR10 */
+ FN_VI0_G1, FN_SDA8, FN_STP_ISCLK_0_C, FN_SDA4,
+ FN_HRTS2_N, FN_SCIFB2_RTS_N, FN_ATADIR1_N,
+ FN_VI0_G2, FN_VI2_HSYNC_N, FN_STP_ISD_0_C, FN_SCL3_B,
+ FN_HSCK2, FN_SCIFB2_SCK, FN_ATARD1_N,
+ FN_VI0_G3, FN_VI2_VSYNC_N, FN_STP_ISEN_0_C, FN_SDA3_B,
+ FN_HRX2, FN_SCIFB2_RXD, FN_ATACS01_N,
+ FN_VI0_G4, FN_VI2_CLKENB, FN_STP_ISSYNC_0_C,
+ FN_HTX2, FN_SCIFB2_TXD, FN_SCIFB0_SCK_D,
+ FN_VI0_G5, FN_VI2_FIELD, FN_STP_OPWM_0_C, FN_FMCLK_D,
+ FN_CAN0_TX_E, FN_HTX1_D, FN_SCIFB0_TXD_D,
+ FN_VI0_G6, FN_VI2_CLK, FN_BPFCLK_D,
+ FN_VI0_G7, FN_VI2_DATA0, FN_FMIN_D,
+ FN_VI0_R0, FN_VI2_DATA1, FN_GLO_I0_B,
+ FN_TS_SDATA0_C, FN_ATACS11_N,
+ FN_VI0_R1, FN_VI2_DATA2, FN_GLO_I1_B,
+ FN_TS_SCK0_C, FN_ATAG1_N,
+ FN_VI0_R2, FN_VI2_DATA3, FN_GLO_Q0_B, FN_TS_SDEN0_C,
+ FN_VI0_R3, FN_VI2_DATA4, FN_GLO_Q1_B, FN_TS_SPSYNC0_C,
+ FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_SCL1_D,
+
+ /* IPSR11 */
+ FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C, FN_SDA1_D,
+ FN_VI0_R6, FN_VI2_DATA7, FN_GLO_SS_B, FN_TX1_C, FN_SCL4_B,
+ FN_VI0_R7, FN_GLO_RFON_B, FN_RX1_C, FN_CAN0_RX_E,
+ FN_SDA4_B, FN_HRX1_D, FN_SCIFB0_RXD_D,
+ FN_VI1_HSYNC_N, FN_AVB_RXD0, FN_TS_SDATA0_B, FN_TX4_B, FN_SCIFA4_TXD_B,
+ FN_VI1_VSYNC_N, FN_AVB_RXD1, FN_TS_SCK0_B, FN_RX4_B, FN_SCIFA4_RXD_B,
+ FN_VI1_CLKENB, FN_AVB_RXD2, FN_TS_SDEN0_B,
+ FN_VI1_FIELD, FN_AVB_RXD3, FN_TS_SPSYNC0_B,
+ FN_VI1_CLK, FN_AVB_RXD4, FN_VI1_DATA0, FN_AVB_RXD5,
+ FN_VI1_DATA1, FN_AVB_RXD6, FN_VI1_DATA2, FN_AVB_RXD7,
+ FN_VI1_DATA3, FN_AVB_RX_ER, FN_VI1_DATA4, FN_AVB_MDIO,
+ FN_VI1_DATA5, FN_AVB_RX_DV, FN_VI1_DATA6, FN_AVB_MAGIC,
+ FN_VI1_DATA7, FN_AVB_MDC,
+ FN_ETH_MDIO, FN_AVB_RX_CLK, FN_SCL2_C,
+ FN_ETH_CRS_DV, FN_AVB_LINK, FN_SDA2_C,
+
+ /* IPSR12 */
+ FN_ETH_RX_ER, FN_AVB_CRS, FN_SCL3, FN_SCL7,
+ FN_ETH_RXD0, FN_AVB_PHY_INT, FN_SDA3, FN_SDA7,
+ FN_ETH_RXD1, FN_AVB_GTXREFCLK, FN_CAN0_TX_C,
+ FN_SCL2_D, FN_MSIOF1_RXD_E,
+ FN_ETH_LINK, FN_AVB_TXD0, FN_CAN0_RX_C, FN_SDA2_D, FN_MSIOF1_SCK_E,
+ FN_ETH_REFCLK, FN_AVB_TXD1, FN_SCIFA3_RXD_B,
+ FN_CAN1_RX_C, FN_MSIOF1_SYNC_E,
+ FN_ETH_TXD1, FN_AVB_TXD2, FN_SCIFA3_TXD_B,
+ FN_CAN1_TX_C, FN_MSIOF1_TXD_E,
+ FN_ETH_TX_EN, FN_AVB_TXD3, FN_TCLK1_B, FN_CAN_CLK_B,
+ FN_ETH_MAGIC, FN_AVB_TXD4, FN_IETX_C,
+ FN_ETH_TXD0, FN_AVB_TXD5, FN_IECLK_C,
+ FN_ETH_MDC, FN_AVB_TXD6, FN_IERX_C,
+ FN_STP_IVCXO27_0, FN_AVB_TXD7, FN_SCIFB2_TXD_D,
+ FN_ADIDATA_B, FN_MSIOF0_SYNC_C,
+ FN_STP_ISCLK_0, FN_AVB_TX_EN, FN_SCIFB2_RXD_D,
+ FN_ADICS_SAMP_B, FN_MSIOF0_SCK_C,
+
+ /* IPSR13 */
+ FN_STP_ISD_0, FN_AVB_TX_ER, FN_SCIFB2_SCK_C,
+ FN_ADICLK_B, FN_MSIOF0_SS1_C,
+ FN_STP_ISEN_0, FN_AVB_TX_CLK, FN_ADICHS0_B, FN_MSIOF0_SS2_C,
+ FN_STP_ISSYNC_0, FN_AVB_COL, FN_ADICHS1_B, FN_MSIOF0_RXD_C,
+ FN_STP_OPWM_0, FN_AVB_GTX_CLK, FN_PWM0_B,
+ FN_ADICHS2_B, FN_MSIOF0_TXD_C,
+ FN_SD0_CLK, FN_SPCLK_B, FN_SD0_CMD, FN_MOSI_IO0_B,
+ FN_SD0_DATA0, FN_MISO_IO1_B, FN_SD0_DATA1, FN_IO2_B,
+ FN_SD0_DATA2, FN_IO3_B, FN_SD0_DATA3, FN_SSL_B,
+ FN_SD0_CD, FN_MMC_D6_B, FN_SIM0_RST_B, FN_CAN0_RX_F,
+ FN_SCIFA5_TXD_B, FN_TX3_C,
+ FN_SD0_WP, FN_MMC_D7_B, FN_SIM0_D_B, FN_CAN0_TX_F,
+ FN_SCIFA5_RXD_B, FN_RX3_C,
+ FN_SD1_CMD, FN_REMOCON_B, FN_SD1_DATA0, FN_SPEEDIN_B,
+ FN_SD1_DATA1, FN_IETX_B, FN_SD1_DATA2, FN_IECLK_B,
+ FN_SD1_DATA3, FN_IERX_B,
+ FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_SCL1_C,
+
+ /* IPSR14 */
+ FN_SD1_WP, FN_PWM1_B, FN_SDA1_C,
+ FN_SD2_CLK, FN_MMC_CLK, FN_SD2_CMD, FN_MMC_CMD,
+ FN_SD2_DATA0, FN_MMC_D0, FN_SD2_DATA1, FN_MMC_D1,
+ FN_SD2_DATA2, FN_MMC_D2, FN_SD2_DATA3, FN_MMC_D3,
+ FN_SD2_CD, FN_MMC_D4, FN_SCL8_C, FN_TX5_B, FN_SCIFA5_TXD_C,
+ FN_SD2_WP, FN_MMC_D5, FN_SDA8_C, FN_RX5_B, FN_SCIFA5_RXD_C,
+ FN_MSIOF0_SCK, FN_RX2_C, FN_ADIDATA, FN_VI1_CLK_C, FN_VI1_G0_B,
+ FN_MSIOF0_SYNC, FN_TX2_C, FN_ADICS_SAMP, FN_VI1_CLKENB_C, FN_VI1_G1_B,
+ FN_MSIOF0_TXD, FN_ADICLK, FN_VI1_FIELD_C, FN_VI1_G2_B,
+ FN_MSIOF0_RXD, FN_ADICHS0, FN_VI1_DATA0_C, FN_VI1_G3_B,
+ FN_MSIOF0_SS1, FN_MMC_D6, FN_ADICHS1, FN_TX0_E,
+ FN_VI1_HSYNC_N_C, FN_SCL7_C, FN_VI1_G4_B,
+ FN_MSIOF0_SS2, FN_MMC_D7, FN_ADICHS2, FN_RX0_E,
+ FN_VI1_VSYNC_N_C, FN_SDA7_C, FN_VI1_G5_B,
+
+ /* IPSR15 */
+ FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D,
+ FN_SIM0_CLK, FN_IECLK, FN_CAN_CLK_C,
+ FN_SIM0_D, FN_IERX, FN_CAN1_RX_D,
+ FN_GPS_CLK, FN_DU1_DOTCLKIN_C, FN_AUDIO_CLKB_B,
+ FN_PWM5_B, FN_SCIFA3_TXD_C,
+ FN_GPS_SIGN, FN_TX4_C, FN_SCIFA4_TXD_C, FN_PWM5,
+ FN_VI1_G6_B, FN_SCIFA3_RXD_C,
+ FN_GPS_MAG, FN_RX4_C, FN_SCIFA4_RXD_C, FN_PWM6,
+ FN_VI1_G7_B, FN_SCIFA3_SCK_C,
+ FN_HCTS0_N, FN_SCIFB0_CTS_N, FN_GLO_I0_C, FN_TCLK1, FN_VI1_DATA1_C,
+ FN_HRTS0_N, FN_SCIFB0_RTS_N, FN_GLO_I1_C, FN_VI1_DATA2_C,
+ FN_HSCK0, FN_SCIFB0_SCK, FN_GLO_Q0_C, FN_CAN_CLK,
+ FN_TCLK2, FN_VI1_DATA3_C,
+ FN_HRX0, FN_SCIFB0_RXD, FN_GLO_Q1_C, FN_CAN0_RX_B, FN_VI1_DATA4_C,
+ FN_HTX0, FN_SCIFB0_TXD, FN_GLO_SCLK_C, FN_CAN0_TX_B, FN_VI1_DATA5_C,
+
+ /* IPSR16 */
+ FN_HRX1, FN_SCIFB1_RXD, FN_VI1_R0_B, FN_GLO_SDATA_C, FN_VI1_DATA6_C,
+ FN_HTX1, FN_SCIFB1_TXD, FN_VI1_R1_B, FN_GLO_SS_C, FN_VI1_DATA7_C,
+ FN_HSCK1, FN_SCIFB1_SCK, FN_MLB_CK, FN_GLO_RFON_C,
+ FN_HCTS1_N, FN_SCIFB1_CTS_N, FN_MLB_SIG, FN_CAN1_TX_B,
+ FN_HRTS1_N, FN_SCIFB1_RTS_N, FN_MLB_DAT, FN_CAN1_RX_B,
+
+ /* MOD_SEL */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ FN_SEL_SCIFB_0, FN_SEL_SCIFB_1, FN_SEL_SCIFB_2, FN_SEL_SCIFB_3,
+ FN_SEL_SCIFB2_0, FN_SEL_SCIFB2_1, FN_SEL_SCIFB2_2, FN_SEL_SCIFB2_3,
+ FN_SEL_SCIFB1_0, FN_SEL_SCIFB1_1, FN_SEL_SCIFB1_2, FN_SEL_SCIFB1_3,
+ FN_SEL_SCIFA1_0, FN_SEL_SCIFA1_1, FN_SEL_SCIFA1_2,
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1,
+ FN_SEL_SCFA_0, FN_SEL_SCFA_1,
+ FN_SEL_QSP_0, FN_SEL_QSP_1,
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1,
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2, FN_SEL_HSCIF1_3,
+ FN_SEL_HSCIF1_4,
+ FN_SEL_VI1_0, FN_SEL_VI1_1, FN_SEL_VI1_2,
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1,
+ FN_SEL_LBS_0, FN_SEL_LBS_1, FN_SEL_LBS_2, FN_SEL_LBS_3,
+ FN_SEL_TSIF0_0, FN_SEL_TSIF0_1, FN_SEL_TSIF0_2, FN_SEL_TSIF0_3,
+ FN_SEL_SOF0_0, FN_SEL_SOF0_1, FN_SEL_SOF0_2,
+
+ /* MOD_SEL2 */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ FN_SEL_SCIF0_4,
+ FN_SEL_SCIF_0, FN_SEL_SCIF_1,
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ FN_SEL_CAN0_4, FN_SEL_CAN0_5,
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2,
+ FN_SEL_ADG_0, FN_SEL_ADG_1,
+ FN_SEL_FM_0, FN_SEL_FM_1, FN_SEL_FM_2, FN_SEL_FM_3, FN_SEL_FM_4,
+ FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2,
+ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
+ FN_SEL_SCIFA4_0, FN_SEL_SCIFA4_1, FN_SEL_SCIFA4_2,
+ FN_SEL_SCIFA3_0, FN_SEL_SCIFA3_1, FN_SEL_SCIFA3_2,
+ FN_SEL_SIM_0, FN_SEL_SIM_1,
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1,
+
+ /* MOD_SEL3 */
+ FN_SEL_HSCIF2_0, FN_SEL_HSCIF2_1, FN_SEL_HSCIF2_2, FN_SEL_HSCIF2_3,
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2, FN_SEL_CANCLK_3,
+ FN_SEL_IIC8_0, FN_SEL_IIC8_1, FN_SEL_IIC8_2,
+ FN_SEL_IIC7_0, FN_SEL_IIC7_1, FN_SEL_IIC7_2,
+ FN_SEL_IIC4_0, FN_SEL_IIC4_1, FN_SEL_IIC4_2,
+ FN_SEL_IIC3_0, FN_SEL_IIC3_1, FN_SEL_IIC3_2, FN_SEL_IIC3_3,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
+ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+ FN_SEL_MMC_0, FN_SEL_MMC_1,
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
+ FN_SEL_IIC2_0, FN_SEL_IIC2_1, FN_SEL_IIC2_2, FN_SEL_IIC2_3,
+ FN_SEL_IIC1_0, FN_SEL_IIC1_1, FN_SEL_IIC1_2, FN_SEL_IIC1_3,
+ FN_SEL_IIC1_4,
+ FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2,
+
+ /* MOD_SEL4 */
+ FN_SEL_SOF1_0, FN_SEL_SOF1_1, FN_SEL_SOF1_2, FN_SEL_SOF1_3,
+ FN_SEL_SOF1_4,
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, FN_SEL_HSCIF0_2,
+ FN_SEL_DIS_0, FN_SEL_DIS_1, FN_SEL_DIS_2,
+ FN_SEL_RAD_0, FN_SEL_RAD_1,
+ FN_SEL_RCN_0, FN_SEL_RCN_1,
+ FN_SEL_RSP_0, FN_SEL_RSP_1,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, FN_SEL_SCIF2_3,
+ FN_SEL_SCIF2_4,
+ FN_SEL_SOF2_0, FN_SEL_SOF2_1, FN_SEL_SOF2_2, FN_SEL_SOF2_3,
+ FN_SEL_SOF2_4,
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1,
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ FN_SEL_SSP_0, FN_SEL_SSP_1, FN_SEL_SSP_2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ EX_CS0_N_MARK, RD_N_MARK,
+
+ AUDIO_CLKA_MARK,
+
+ VI0_CLK_MARK, VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK,
+ VI0_DATA2_VI0_B2_MARK, VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+ VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+
+ SD1_CLK_MARK,
+
+ USB0_PWEN_MARK, USB0_OVC_MARK, USB1_PWEN_MARK, USB1_OVC_MARK,
+ DU0_DOTCLKIN_MARK,
+
+ /* IPSR0 */
+ D0_MARK, D1_MARK, D2_MARK, D3_MARK, D4_MARK, D5_MARK,
+ D6_MARK, D7_MARK, D8_MARK,
+ D9_MARK, D10_MARK, D11_MARK, D12_MARK, D13_MARK, D14_MARK, D15_MARK,
+ A0_MARK, ATAWR0_N_C_MARK, MSIOF0_SCK_B_MARK, SCL0_C_MARK, PWM2_B_MARK,
+ A1_MARK, MSIOF0_SYNC_B_MARK, A2_MARK, MSIOF0_SS1_B_MARK,
+ A3_MARK, MSIOF0_SS2_B_MARK, A4_MARK, MSIOF0_TXD_B_MARK,
+ A5_MARK, MSIOF0_RXD_B_MARK, A6_MARK, MSIOF1_SCK_MARK,
+
+ /* IPSR1 */
+ A7_MARK, MSIOF1_SYNC_MARK, A8_MARK, MSIOF1_SS1_MARK, SCL0_MARK,
+ A9_MARK, MSIOF1_SS2_MARK, SDA0_MARK,
+ A10_MARK, MSIOF1_TXD_MARK, MSIOF1_TXD_D_MARK,
+ A11_MARK, MSIOF1_RXD_MARK, SCL3_D_MARK, MSIOF1_RXD_D_MARK,
+ A12_MARK, FMCLK_MARK, SDA3_D_MARK, MSIOF1_SCK_D_MARK,
+ A13_MARK, ATAG0_N_C_MARK, BPFCLK_MARK, MSIOF1_SS1_D_MARK,
+ A14_MARK, ATADIR0_N_C_MARK, FMIN_MARK, FMIN_C_MARK, MSIOF1_SYNC_D_MARK,
+ A15_MARK, BPFCLK_C_MARK,
+ A16_MARK, DREQ2_B_MARK, FMCLK_C_MARK, SCIFA1_SCK_B_MARK,
+ A17_MARK, DACK2_B_MARK, SDA0_C_MARK,
+ A18_MARK, DREQ1_MARK, SCIFA1_RXD_C_MARK, SCIFB1_RXD_C_MARK,
+
+ /* IPSR2 */
+ A19_MARK, DACK1_MARK, SCIFA1_TXD_C_MARK,
+ SCIFB1_TXD_C_MARK, SCIFB1_SCK_B_MARK,
+ A20_MARK, SPCLK_MARK,
+ A21_MARK, ATAWR0_N_B_MARK, MOSI_IO0_MARK,
+ A22_MARK, MISO_IO1_MARK, FMCLK_B_MARK, TX0_MARK, SCIFA0_TXD_MARK,
+ A23_MARK, IO2_MARK, BPFCLK_B_MARK, RX0_MARK, SCIFA0_RXD_MARK,
+ A24_MARK, DREQ2_MARK, IO3_MARK, TX1_MARK, SCIFA1_TXD_MARK,
+ A25_MARK, DACK2_MARK, SSL_MARK, DREQ1_C_MARK,
+ RX1_MARK, SCIFA1_RXD_MARK,
+ CS0_N_MARK, ATAG0_N_B_MARK, SCL1_MARK,
+ CS1_N_A26_MARK, ATADIR0_N_B_MARK, SDA1_MARK,
+ EX_CS1_N_MARK, MSIOF2_SCK_MARK,
+ EX_CS2_N_MARK, ATAWR0_N_MARK, MSIOF2_SYNC_MARK,
+ EX_CS3_N_MARK, ATADIR0_N_MARK, MSIOF2_TXD_MARK,
+ ATAG0_N_MARK, EX_WAIT1_MARK,
+
+ /* IPSR3 */
+ EX_CS4_N_MARK, ATARD0_N_MARK, MSIOF2_RXD_MARK, EX_WAIT2_MARK,
+ EX_CS5_N_MARK, ATACS00_N_MARK, MSIOF2_SS1_MARK, HRX1_B_MARK,
+ SCIFB1_RXD_B_MARK, PWM1_MARK, TPU_TO1_MARK,
+ BS_N_MARK, ATACS10_N_MARK, MSIOF2_SS2_MARK, HTX1_B_MARK,
+ SCIFB1_TXD_B_MARK, PWM2_MARK, TPU_TO2_MARK,
+ RD_WR_N_MARK, HRX2_B_MARK, FMIN_B_MARK,
+ SCIFB0_RXD_B_MARK, DREQ1_D_MARK,
+ WE0_N_MARK, HCTS2_N_B_MARK, SCIFB0_TXD_B_MARK,
+ WE1_N_MARK, ATARD0_N_B_MARK, HTX2_B_MARK, SCIFB0_RTS_N_B_MARK,
+ EX_WAIT0_MARK, HRTS2_N_B_MARK, SCIFB0_CTS_N_B_MARK,
+ DREQ0_MARK, PWM3_MARK, TPU_TO3_MARK,
+ DACK0_MARK, DRACK0_MARK, REMOCON_MARK,
+ SPEEDIN_MARK, HSCK0_C_MARK, HSCK2_C_MARK, SCIFB0_SCK_B_MARK,
+ SCIFB2_SCK_B_MARK, DREQ2_C_MARK, HTX2_D_MARK,
+ SSI_SCK0129_MARK, HRX0_C_MARK, HRX2_C_MARK,
+ SCIFB0_RXD_C_MARK, SCIFB2_RXD_C_MARK,
+ SSI_WS0129_MARK, HTX0_C_MARK, HTX2_C_MARK,
+ SCIFB0_TXD_C_MARK, SCIFB2_TXD_C_MARK,
+
+ /* IPSR4 */
+ SSI_SDATA0_MARK, SCL0_B_MARK, SCL7_B_MARK, MSIOF2_SCK_C_MARK,
+ SSI_SCK1_MARK, SDA0_B_MARK, SDA7_B_MARK,
+ MSIOF2_SYNC_C_MARK, GLO_I0_D_MARK,
+ SSI_WS1_MARK, SCL1_B_MARK, SCL8_B_MARK,
+ MSIOF2_TXD_C_MARK, GLO_I1_D_MARK,
+ SSI_SDATA1_MARK, SDA1_B_MARK, SDA8_B_MARK, MSIOF2_RXD_C_MARK,
+ SSI_SCK2_MARK, SCL2_MARK, GPS_CLK_B_MARK, GLO_Q0_D_MARK, HSCK1_E_MARK,
+ SSI_WS2_MARK, SDA2_MARK, GPS_SIGN_B_MARK, RX2_E_MARK,
+ GLO_Q1_D_MARK, HCTS1_N_E_MARK,
+ SSI_SDATA2_MARK, GPS_MAG_B_MARK, TX2_E_MARK, HRTS1_N_E_MARK,
+ SSI_SCK34_MARK, SSI_WS34_MARK, SSI_SDATA3_MARK,
+ SSI_SCK4_MARK, GLO_SS_D_MARK,
+ SSI_WS4_MARK, GLO_RFON_D_MARK,
+ SSI_SDATA4_MARK, MSIOF2_SCK_D_MARK,
+ SSI_SCK5_MARK, MSIOF1_SCK_C_MARK, TS_SDATA0_MARK, GLO_I0_MARK,
+ MSIOF2_SYNC_D_MARK, VI1_R2_B_MARK,
+
+ /* IPSR5 */
+ SSI_WS5_MARK, MSIOF1_SYNC_C_MARK, TS_SCK0_MARK, GLO_I1_MARK,
+ MSIOF2_TXD_D_MARK, VI1_R3_B_MARK,
+ SSI_SDATA5_MARK, MSIOF1_TXD_C_MARK, TS_SDEN0_MARK, GLO_Q0_MARK,
+ MSIOF2_SS1_D_MARK, VI1_R4_B_MARK,
+ SSI_SCK6_MARK, MSIOF1_RXD_C_MARK, TS_SPSYNC0_MARK, GLO_Q1_MARK,
+ MSIOF2_RXD_D_MARK, VI1_R5_B_MARK,
+ SSI_WS6_MARK, GLO_SCLK_MARK, MSIOF2_SS2_D_MARK, VI1_R6_B_MARK,
+ SSI_SDATA6_MARK, STP_IVCXO27_0_B_MARK, GLO_SDATA_MARK, VI1_R7_B_MARK,
+ SSI_SCK78_MARK, STP_ISCLK_0_B_MARK, GLO_SS_MARK,
+ SSI_WS78_MARK, TX0_D_MARK, STP_ISD_0_B_MARK, GLO_RFON_MARK,
+ SSI_SDATA7_MARK, RX0_D_MARK, STP_ISEN_0_B_MARK,
+ SSI_SDATA8_MARK, TX1_D_MARK, STP_ISSYNC_0_B_MARK,
+ SSI_SCK9_MARK, RX1_D_MARK, GLO_SCLK_D_MARK,
+ SSI_WS9_MARK, TX3_D_MARK, CAN0_TX_D_MARK, GLO_SDATA_D_MARK,
+ SSI_SDATA9_MARK, RX3_D_MARK, CAN0_RX_D_MARK,
+
+ /* IPSR6 */
+ AUDIO_CLKB_MARK, STP_OPWM_0_B_MARK, MSIOF1_SCK_B_MARK,
+ SCIF_CLK_MARK, BPFCLK_E_MARK,
+ AUDIO_CLKC_MARK, SCIFB0_SCK_C_MARK, MSIOF1_SYNC_B_MARK, RX2_MARK,
+ SCIFA2_RXD_MARK, FMIN_E_MARK,
+ AUDIO_CLKOUT_MARK, MSIOF1_SS1_B_MARK, TX2_MARK, SCIFA2_TXD_MARK,
+ IRQ0_MARK, SCIFB1_RXD_D_MARK, INTC_IRQ0_N_MARK,
+ IRQ1_MARK, SCIFB1_SCK_C_MARK, INTC_IRQ1_N_MARK,
+ IRQ2_MARK, SCIFB1_TXD_D_MARK, INTC_IRQ2_N_MARK,
+ IRQ3_MARK, SCL4_C_MARK, MSIOF2_TXD_E_MARK, INTC_IRQ3_N_MARK,
+ IRQ4_MARK, HRX1_C_MARK, SDA4_C_MARK,
+ MSIOF2_RXD_E_MARK, INTC_IRQ4_N_MARK,
+ IRQ5_MARK, HTX1_C_MARK, SCL1_E_MARK, MSIOF2_SCK_E_MARK,
+ IRQ6_MARK, HSCK1_C_MARK, MSIOF1_SS2_B_MARK,
+ SDA1_E_MARK, MSIOF2_SYNC_E_MARK,
+ IRQ7_MARK, HCTS1_N_C_MARK, MSIOF1_TXD_B_MARK,
+ GPS_CLK_C_MARK, GPS_CLK_D_MARK,
+ IRQ8_MARK, HRTS1_N_C_MARK, MSIOF1_RXD_B_MARK,
+ GPS_SIGN_C_MARK, GPS_SIGN_D_MARK,
+
+ /* IPSR7 */
+ IRQ9_MARK, DU1_DOTCLKIN_B_MARK, CAN_CLK_D_MARK, GPS_MAG_C_MARK,
+ SCIF_CLK_B_MARK, GPS_MAG_D_MARK,
+ DU1_DR0_MARK, LCDOUT0_MARK, VI1_DATA0_B_MARK, TX0_B_MARK,
+ SCIFA0_TXD_B_MARK, MSIOF2_SCK_B_MARK,
+ DU1_DR1_MARK, LCDOUT1_MARK, VI1_DATA1_B_MARK, RX0_B_MARK,
+ SCIFA0_RXD_B_MARK, MSIOF2_SYNC_B_MARK,
+ DU1_DR2_MARK, LCDOUT2_MARK, SSI_SCK0129_B_MARK,
+ DU1_DR3_MARK, LCDOUT3_MARK, SSI_WS0129_B_MARK,
+ DU1_DR4_MARK, LCDOUT4_MARK, SSI_SDATA0_B_MARK,
+ DU1_DR5_MARK, LCDOUT5_MARK, SSI_SCK1_B_MARK,
+ DU1_DR6_MARK, LCDOUT6_MARK, SSI_WS1_B_MARK,
+ DU1_DR7_MARK, LCDOUT7_MARK, SSI_SDATA1_B_MARK,
+ DU1_DG0_MARK, LCDOUT8_MARK, VI1_DATA2_B_MARK, TX1_B_MARK,
+ SCIFA1_TXD_B_MARK, MSIOF2_SS1_B_MARK,
+ DU1_DG1_MARK, LCDOUT9_MARK, VI1_DATA3_B_MARK, RX1_B_MARK,
+ SCIFA1_RXD_B_MARK, MSIOF2_SS2_B_MARK,
+ DU1_DG2_MARK, LCDOUT10_MARK, VI1_DATA4_B_MARK, SCIF1_SCK_B_MARK,
+ SCIFA1_SCK_MARK, SSI_SCK78_B_MARK,
+
+ /* IPSR8 */
+ DU1_DG3_MARK, LCDOUT11_MARK, VI1_DATA5_B_MARK, SSI_WS78_B_MARK,
+ DU1_DG4_MARK, LCDOUT12_MARK, VI1_DATA6_B_MARK, HRX0_B_MARK,
+ SCIFB2_RXD_B_MARK, SSI_SDATA7_B_MARK,
+ DU1_DG5_MARK, LCDOUT13_MARK, VI1_DATA7_B_MARK, HCTS0_N_B_MARK,
+ SCIFB2_TXD_B_MARK, SSI_SDATA8_B_MARK,
+ DU1_DG6_MARK, LCDOUT14_MARK, HRTS0_N_B_MARK,
+ SCIFB2_CTS_N_B_MARK, SSI_SCK9_B_MARK,
+ DU1_DG7_MARK, LCDOUT15_MARK, HTX0_B_MARK,
+ SCIFB2_RTS_N_B_MARK, SSI_WS9_B_MARK,
+ DU1_DB0_MARK, LCDOUT16_MARK, VI1_CLK_B_MARK, TX2_B_MARK,
+ SCIFA2_TXD_B_MARK, MSIOF2_TXD_B_MARK,
+ DU1_DB1_MARK, LCDOUT17_MARK, VI1_HSYNC_N_B_MARK, RX2_B_MARK,
+ SCIFA2_RXD_B_MARK, MSIOF2_RXD_B_MARK,
+ DU1_DB2_MARK, LCDOUT18_MARK, VI1_VSYNC_N_B_MARK, SCIF2_SCK_B_MARK,
+ SCIFA2_SCK_MARK, SSI_SDATA9_B_MARK,
+ DU1_DB3_MARK, LCDOUT19_MARK, VI1_CLKENB_B_MARK,
+ DU1_DB4_MARK, LCDOUT20_MARK, VI1_FIELD_B_MARK, CAN1_RX_MARK,
+ DU1_DB5_MARK, LCDOUT21_MARK, TX3_MARK, SCIFA3_TXD_MARK, CAN1_TX_MARK,
+
+ /* IPSR9 */
+ DU1_DB6_MARK, LCDOUT22_MARK, SCL3_C_MARK, RX3_MARK, SCIFA3_RXD_MARK,
+ DU1_DB7_MARK, LCDOUT23_MARK, SDA3_C_MARK,
+ SCIF3_SCK_MARK, SCIFA3_SCK_MARK,
+ DU1_DOTCLKIN_MARK, QSTVA_QVS_MARK,
+ DU1_DOTCLKOUT0_MARK, QCLK_MARK,
+ DU1_DOTCLKOUT1_MARK, QSTVB_QVE_MARK, CAN0_TX_MARK,
+ TX3_B_MARK, SCL2_B_MARK, PWM4_MARK,
+ DU1_EXHSYNC_DU1_HSYNC_MARK, QSTH_QHS_MARK,
+ DU1_EXVSYNC_DU1_VSYNC_MARK, QSTB_QHE_MARK,
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK,
+ CAN0_RX_MARK, RX3_B_MARK, SDA2_B_MARK,
+ DU1_DISP_MARK, QPOLA_MARK,
+ DU1_CDE_MARK, QPOLB_MARK, PWM4_B_MARK,
+ VI0_CLKENB_MARK, TX4_MARK, SCIFA4_TXD_MARK, TS_SDATA0_D_MARK,
+ VI0_FIELD_MARK, RX4_MARK, SCIFA4_RXD_MARK, TS_SCK0_D_MARK,
+ VI0_HSYNC_N_MARK, TX5_MARK, SCIFA5_TXD_MARK, TS_SDEN0_D_MARK,
+ VI0_VSYNC_N_MARK, RX5_MARK, SCIFA5_RXD_MARK, TS_SPSYNC0_D_MARK,
+ VI0_DATA3_VI0_B3_MARK, SCIF3_SCK_B_MARK, SCIFA3_SCK_B_MARK,
+ VI0_G0_MARK, SCL8_MARK, STP_IVCXO27_0_C_MARK, SCL4_MARK,
+ HCTS2_N_MARK, SCIFB2_CTS_N_MARK, ATAWR1_N_MARK,
+
+ /* IPSR10 */
+ VI0_G1_MARK, SDA8_MARK, STP_ISCLK_0_C_MARK, SDA4_MARK,
+ HRTS2_N_MARK, SCIFB2_RTS_N_MARK, ATADIR1_N_MARK,
+ VI0_G2_MARK, VI2_HSYNC_N_MARK, STP_ISD_0_C_MARK, SCL3_B_MARK,
+ HSCK2_MARK, SCIFB2_SCK_MARK, ATARD1_N_MARK,
+ VI0_G3_MARK, VI2_VSYNC_N_MARK, STP_ISEN_0_C_MARK, SDA3_B_MARK,
+ HRX2_MARK, SCIFB2_RXD_MARK, ATACS01_N_MARK,
+ VI0_G4_MARK, VI2_CLKENB_MARK, STP_ISSYNC_0_C_MARK,
+ HTX2_MARK, SCIFB2_TXD_MARK, SCIFB0_SCK_D_MARK,
+ VI0_G5_MARK, VI2_FIELD_MARK, STP_OPWM_0_C_MARK, FMCLK_D_MARK,
+ CAN0_TX_E_MARK, HTX1_D_MARK, SCIFB0_TXD_D_MARK,
+ VI0_G6_MARK, VI2_CLK_MARK, BPFCLK_D_MARK,
+ VI0_G7_MARK, VI2_DATA0_MARK, FMIN_D_MARK,
+ VI0_R0_MARK, VI2_DATA1_MARK, GLO_I0_B_MARK,
+ TS_SDATA0_C_MARK, ATACS11_N_MARK,
+ VI0_R1_MARK, VI2_DATA2_MARK, GLO_I1_B_MARK,
+ TS_SCK0_C_MARK, ATAG1_N_MARK,
+ VI0_R2_MARK, VI2_DATA3_MARK, GLO_Q0_B_MARK, TS_SDEN0_C_MARK,
+ VI0_R3_MARK, VI2_DATA4_MARK, GLO_Q1_B_MARK, TS_SPSYNC0_C_MARK,
+ VI0_R4_MARK, VI2_DATA5_MARK, GLO_SCLK_B_MARK, TX0_C_MARK, SCL1_D_MARK,
+
+ /* IPSR11 */
+ VI0_R5_MARK, VI2_DATA6_MARK, GLO_SDATA_B_MARK, RX0_C_MARK, SDA1_D_MARK,
+ VI0_R6_MARK, VI2_DATA7_MARK, GLO_SS_B_MARK, TX1_C_MARK, SCL4_B_MARK,
+ VI0_R7_MARK, GLO_RFON_B_MARK, RX1_C_MARK, CAN0_RX_E_MARK,
+ SDA4_B_MARK, HRX1_D_MARK, SCIFB0_RXD_D_MARK,
+ VI1_HSYNC_N_MARK, AVB_RXD0_MARK, TS_SDATA0_B_MARK,
+ TX4_B_MARK, SCIFA4_TXD_B_MARK,
+ VI1_VSYNC_N_MARK, AVB_RXD1_MARK, TS_SCK0_B_MARK,
+ RX4_B_MARK, SCIFA4_RXD_B_MARK,
+ VI1_CLKENB_MARK, AVB_RXD2_MARK, TS_SDEN0_B_MARK,
+ VI1_FIELD_MARK, AVB_RXD3_MARK, TS_SPSYNC0_B_MARK,
+ VI1_CLK_MARK, AVB_RXD4_MARK, VI1_DATA0_MARK, AVB_RXD5_MARK,
+ VI1_DATA1_MARK, AVB_RXD6_MARK, VI1_DATA2_MARK, AVB_RXD7_MARK,
+ VI1_DATA3_MARK, AVB_RX_ER_MARK, VI1_DATA4_MARK, AVB_MDIO_MARK,
+ VI1_DATA5_MARK, AVB_RX_DV_MARK, VI1_DATA6_MARK, AVB_MAGIC_MARK,
+ VI1_DATA7_MARK, AVB_MDC_MARK,
+ ETH_MDIO_MARK, AVB_RX_CLK_MARK, SCL2_C_MARK,
+ ETH_CRS_DV_MARK, AVB_LINK_MARK, SDA2_C_MARK,
+
+ /* IPSR12 */
+ ETH_RX_ER_MARK, AVB_CRS_MARK, SCL3_MARK, SCL7_MARK,
+ ETH_RXD0_MARK, AVB_PHY_INT_MARK, SDA3_MARK, SDA7_MARK,
+ ETH_RXD1_MARK, AVB_GTXREFCLK_MARK, CAN0_TX_C_MARK,
+ SCL2_D_MARK, MSIOF1_RXD_E_MARK,
+ ETH_LINK_MARK, AVB_TXD0_MARK, CAN0_RX_C_MARK,
+ SDA2_D_MARK, MSIOF1_SCK_E_MARK,
+ ETH_REFCLK_MARK, AVB_TXD1_MARK, SCIFA3_RXD_B_MARK,
+ CAN1_RX_C_MARK, MSIOF1_SYNC_E_MARK,
+ ETH_TXD1_MARK, AVB_TXD2_MARK, SCIFA3_TXD_B_MARK,
+ CAN1_TX_C_MARK, MSIOF1_TXD_E_MARK,
+ ETH_TX_EN_MARK, AVB_TXD3_MARK, TCLK1_B_MARK, CAN_CLK_B_MARK,
+ ETH_MAGIC_MARK, AVB_TXD4_MARK, IETX_C_MARK,
+ ETH_TXD0_MARK, AVB_TXD5_MARK, IECLK_C_MARK,
+ ETH_MDC_MARK, AVB_TXD6_MARK, IERX_C_MARK,
+ STP_IVCXO27_0_MARK, AVB_TXD7_MARK, SCIFB2_TXD_D_MARK,
+ ADIDATA_B_MARK, MSIOF0_SYNC_C_MARK,
+ STP_ISCLK_0_MARK, AVB_TX_EN_MARK, SCIFB2_RXD_D_MARK,
+ ADICS_SAMP_B_MARK, MSIOF0_SCK_C_MARK,
+
+ /* IPSR13 */
+ STP_ISD_0_MARK, AVB_TX_ER_MARK, SCIFB2_SCK_C_MARK,
+ ADICLK_B_MARK, MSIOF0_SS1_C_MARK,
+ STP_ISEN_0_MARK, AVB_TX_CLK_MARK, ADICHS0_B_MARK, MSIOF0_SS2_C_MARK,
+ STP_ISSYNC_0_MARK, AVB_COL_MARK, ADICHS1_B_MARK, MSIOF0_RXD_C_MARK,
+ STP_OPWM_0_MARK, AVB_GTX_CLK_MARK, PWM0_B_MARK,
+ ADICHS2_B_MARK, MSIOF0_TXD_C_MARK,
+ SD0_CLK_MARK, SPCLK_B_MARK, SD0_CMD_MARK, MOSI_IO0_B_MARK,
+ SD0_DATA0_MARK, MISO_IO1_B_MARK, SD0_DATA1_MARK, IO2_B_MARK,
+ SD0_DATA2_MARK, IO3_B_MARK, SD0_DATA3_MARK, SSL_B_MARK,
+ SD0_CD_MARK, MMC_D6_B_MARK, SIM0_RST_B_MARK, CAN0_RX_F_MARK,
+ SCIFA5_TXD_B_MARK, TX3_C_MARK,
+ SD0_WP_MARK, MMC_D7_B_MARK, SIM0_D_B_MARK, CAN0_TX_F_MARK,
+ SCIFA5_RXD_B_MARK, RX3_C_MARK,
+ SD1_CMD_MARK, REMOCON_B_MARK, SD1_DATA0_MARK, SPEEDIN_B_MARK,
+ SD1_DATA1_MARK, IETX_B_MARK, SD1_DATA2_MARK, IECLK_B_MARK,
+ SD1_DATA3_MARK, IERX_B_MARK,
+ SD1_CD_MARK, PWM0_MARK, TPU_TO0_MARK, SCL1_C_MARK,
+
+ /* IPSR14 */
+ SD1_WP_MARK, PWM1_B_MARK, SDA1_C_MARK,
+ SD2_CLK_MARK, MMC_CLK_MARK, SD2_CMD_MARK, MMC_CMD_MARK,
+ SD2_DATA0_MARK, MMC_D0_MARK, SD2_DATA1_MARK, MMC_D1_MARK,
+ SD2_DATA2_MARK, MMC_D2_MARK, SD2_DATA3_MARK, MMC_D3_MARK,
+ SD2_CD_MARK, MMC_D4_MARK, SCL8_C_MARK, TX5_B_MARK, SCIFA5_TXD_C_MARK,
+ SD2_WP_MARK, MMC_D5_MARK, SDA8_C_MARK, RX5_B_MARK, SCIFA5_RXD_C_MARK,
+ MSIOF0_SCK_MARK, RX2_C_MARK, ADIDATA_MARK,
+ VI1_CLK_C_MARK, VI1_G0_B_MARK,
+ MSIOF0_SYNC_MARK, TX2_C_MARK, ADICS_SAMP_MARK,
+ VI1_CLKENB_C_MARK, VI1_G1_B_MARK,
+ MSIOF0_TXD_MARK, ADICLK_MARK, VI1_FIELD_C_MARK, VI1_G2_B_MARK,
+ MSIOF0_RXD_MARK, ADICHS0_MARK, VI1_DATA0_C_MARK, VI1_G3_B_MARK,
+ MSIOF0_SS1_MARK, MMC_D6_MARK, ADICHS1_MARK, TX0_E_MARK,
+ VI1_HSYNC_N_C_MARK, SCL7_C_MARK, VI1_G4_B_MARK,
+ MSIOF0_SS2_MARK, MMC_D7_MARK, ADICHS2_MARK, RX0_E_MARK,
+ VI1_VSYNC_N_C_MARK, SDA7_C_MARK, VI1_G5_B_MARK,
+
+ /* IPSR15 */
+ SIM0_RST_MARK, IETX_MARK, CAN1_TX_D_MARK,
+ SIM0_CLK_MARK, IECLK_MARK, CAN_CLK_C_MARK,
+ SIM0_D_MARK, IERX_MARK, CAN1_RX_D_MARK,
+ GPS_CLK_MARK, DU1_DOTCLKIN_C_MARK, AUDIO_CLKB_B_MARK,
+ PWM5_B_MARK, SCIFA3_TXD_C_MARK,
+ GPS_SIGN_MARK, TX4_C_MARK, SCIFA4_TXD_C_MARK, PWM5_MARK,
+ VI1_G6_B_MARK, SCIFA3_RXD_C_MARK,
+ GPS_MAG_MARK, RX4_C_MARK, SCIFA4_RXD_C_MARK, PWM6_MARK,
+ VI1_G7_B_MARK, SCIFA3_SCK_C_MARK,
+ HCTS0_N_MARK, SCIFB0_CTS_N_MARK, GLO_I0_C_MARK,
+ TCLK1_MARK, VI1_DATA1_C_MARK,
+ HRTS0_N_MARK, SCIFB0_RTS_N_MARK, GLO_I1_C_MARK, VI1_DATA2_C_MARK,
+ HSCK0_MARK, SCIFB0_SCK_MARK, GLO_Q0_C_MARK, CAN_CLK_MARK,
+ TCLK2_MARK, VI1_DATA3_C_MARK,
+ HRX0_MARK, SCIFB0_RXD_MARK, GLO_Q1_C_MARK,
+ CAN0_RX_B_MARK, VI1_DATA4_C_MARK,
+ HTX0_MARK, SCIFB0_TXD_MARK, GLO_SCLK_C_MARK,
+ CAN0_TX_B_MARK, VI1_DATA5_C_MARK,
+
+ /* IPSR16 */
+ HRX1_MARK, SCIFB1_RXD_MARK, VI1_R0_B_MARK,
+ GLO_SDATA_C_MARK, VI1_DATA6_C_MARK,
+ HTX1_MARK, SCIFB1_TXD_MARK, VI1_R1_B_MARK,
+ GLO_SS_C_MARK, VI1_DATA7_C_MARK,
+ HSCK1_MARK, SCIFB1_SCK_MARK, MLB_CK_MARK, GLO_RFON_C_MARK,
+ HCTS1_N_MARK, SCIFB1_CTS_N_MARK, MLB_SIG_MARK, CAN1_TX_B_MARK,
+ HRTS1_N_MARK, SCIFB1_RTS_N_MARK, MLB_DAT_MARK, CAN1_RX_B_MARK,
+ PINMUX_MARK_END,
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_DATA(EX_CS0_N_MARK, FN_EX_CS0_N),
+ PINMUX_DATA(RD_N_MARK, FN_RD_N),
+ PINMUX_DATA(AUDIO_CLKA_MARK, FN_AUDIO_CLKA),
+ PINMUX_DATA(VI0_CLK_MARK, FN_VI0_CLK),
+ PINMUX_DATA(VI0_DATA0_VI0_B0_MARK, FN_VI0_DATA0_VI0_B0),
+ PINMUX_DATA(VI0_DATA1_VI0_B1_MARK, FN_VI0_DATA1_VI0_B1),
+ PINMUX_DATA(VI0_DATA2_VI0_B2_MARK, FN_VI0_DATA2_VI0_B2),
+ PINMUX_DATA(VI0_DATA4_VI0_B4_MARK, FN_VI0_DATA4_VI0_B4),
+ PINMUX_DATA(VI0_DATA5_VI0_B5_MARK, FN_VI0_DATA5_VI0_B5),
+ PINMUX_DATA(VI0_DATA6_VI0_B6_MARK, FN_VI0_DATA6_VI0_B6),
+ PINMUX_DATA(VI0_DATA7_VI0_B7_MARK, FN_VI0_DATA7_VI0_B7),
+ PINMUX_DATA(USB0_PWEN_MARK, FN_USB0_PWEN),
+ PINMUX_DATA(USB0_OVC_MARK, FN_USB0_OVC),
+ PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
+ PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
+ PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
+
+ /* IPSR0 */
+ PINMUX_IPSR_DATA(IP0_0, D0),
+ PINMUX_IPSR_DATA(IP0_1, D1),
+ PINMUX_IPSR_DATA(IP0_2, D2),
+ PINMUX_IPSR_DATA(IP0_3, D3),
+ PINMUX_IPSR_DATA(IP0_4, D4),
+ PINMUX_IPSR_DATA(IP0_5, D5),
+ PINMUX_IPSR_DATA(IP0_6, D6),
+ PINMUX_IPSR_DATA(IP0_7, D7),
+ PINMUX_IPSR_DATA(IP0_8, D8),
+ PINMUX_IPSR_DATA(IP0_9, D9),
+ PINMUX_IPSR_DATA(IP0_10, D10),
+ PINMUX_IPSR_DATA(IP0_11, D11),
+ PINMUX_IPSR_DATA(IP0_12, D12),
+ PINMUX_IPSR_DATA(IP0_13, D13),
+ PINMUX_IPSR_DATA(IP0_14, D14),
+ PINMUX_IPSR_DATA(IP0_15, D15),
+ PINMUX_IPSR_DATA(IP0_18_16, A0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SCL0_C, SEL_IIC0_2),
+ PINMUX_IPSR_DATA(IP0_18_16, PWM2_B),
+ PINMUX_IPSR_DATA(IP0_20_19, A1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
+ PINMUX_IPSR_DATA(IP0_22_21, A2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1),
+ PINMUX_IPSR_DATA(IP0_24_23, A3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1),
+ PINMUX_IPSR_DATA(IP0_26_25, A4),
+ PINMUX_IPSR_MODSEL_DATA(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1),
+ PINMUX_IPSR_DATA(IP0_28_27, A5),
+ PINMUX_IPSR_MODSEL_DATA(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1),
+ PINMUX_IPSR_DATA(IP0_30_29, A6),
+ PINMUX_IPSR_MODSEL_DATA(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0),
+
+ /* IPSR1 */
+ PINMUX_IPSR_DATA(IP1_1_0, A7),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
+ PINMUX_IPSR_DATA(IP1_3_2, A8),
+ PINMUX_IPSR_MODSEL_DATA(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_3_2, SCL0, SEL_IIC0_0),
+ PINMUX_IPSR_DATA(IP1_5_4, A9),
+ PINMUX_IPSR_MODSEL_DATA(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_5_4, SDA0, SEL_IIC0_0),
+ PINMUX_IPSR_DATA(IP1_7_6, A10),
+ PINMUX_IPSR_MODSEL_DATA(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
+ PINMUX_IPSR_DATA(IP1_10_8, A11),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_8, SCL3_D, SEL_IIC3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
+ PINMUX_IPSR_DATA(IP1_13_11, A12),
+ PINMUX_IPSR_MODSEL_DATA(IP1_13_11, FMCLK, SEL_FM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_13_11, SDA3_D, SEL_IIC3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
+ PINMUX_IPSR_DATA(IP1_16_14, A13),
+ PINMUX_IPSR_MODSEL_DATA(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_16_14, BPFCLK, SEL_FM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3),
+ PINMUX_IPSR_DATA(IP1_19_17, A14),
+ PINMUX_IPSR_MODSEL_DATA(IP1_19_17, ATADIR0_N_C, SEL_LBS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_19_17, FMIN, SEL_FM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_19_17, FMIN_C, SEL_FM_2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3),
+ PINMUX_IPSR_DATA(IP1_22_20, A15),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, BPFCLK_C, SEL_FM_2),
+ PINMUX_IPSR_DATA(IP1_25_23, A16),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, DREQ2_B, SEL_LBS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FMCLK_C, SEL_FM_2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
+ PINMUX_IPSR_DATA(IP1_28_26, A17),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, DACK2_B, SEL_LBS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SDA0_C, SEL_IIC0_2),
+ PINMUX_IPSR_DATA(IP1_31_29, A18),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, DREQ1, SEL_LBS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2),
+
+ /* IPSR2 */
+ PINMUX_IPSR_DATA(IP2_2_0, A19),
+ PINMUX_IPSR_DATA(IP2_2_0, DACK1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_0),
+ PINMUX_IPSR_DATA(IP2_2_0, A20),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SPCLK, SEL_QSP_0),
+ PINMUX_IPSR_DATA(IP2_6_5, A21),
+ PINMUX_IPSR_MODSEL_DATA(IP2_6_5, ATAWR0_N_B, SEL_LBS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_6_5, MOSI_IO0, SEL_QSP_0),
+ PINMUX_IPSR_DATA(IP2_9_7, A22),
+ PINMUX_IPSR_MODSEL_DATA(IP2_9_7, MISO_IO1, SEL_QSP_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_9_7, FMCLK_B, SEL_FM_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_9_7, TX0, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0),
+ PINMUX_IPSR_DATA(IP2_12_10, A23),
+ PINMUX_IPSR_MODSEL_DATA(IP2_12_10, IO2, SEL_QSP_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_12_10, BPFCLK_B, SEL_FM_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_12_10, RX0, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0),
+ PINMUX_IPSR_DATA(IP2_15_13, A24),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_13, DREQ2, SEL_LBS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_13, IO3, SEL_QSP_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_13, TX1, SEL_SCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0),
+ PINMUX_IPSR_DATA(IP2_18_16, A25),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DACK2, SEL_LBS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, SSL, SEL_QSP_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ1_C, SEL_LBS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, RX1, SEL_SCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
+ PINMUX_IPSR_DATA(IP2_20_19, CS0_N),
+ PINMUX_IPSR_MODSEL_DATA(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_20_19, SCL1, SEL_IIC1_0),
+ PINMUX_IPSR_DATA(IP2_22_21, CS1_N_A26),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_21, SDA1, SEL_IIC1_0),
+ PINMUX_IPSR_DATA(IP2_24_23, EX_CS1_N),
+ PINMUX_IPSR_MODSEL_DATA(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
+ PINMUX_IPSR_DATA(IP2_26_25, EX_CS2_N),
+ PINMUX_IPSR_MODSEL_DATA(IP2_26_25, ATAWR0_N, SEL_LBS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0),
+ PINMUX_IPSR_DATA(IP2_29_27, EX_CS3_N),
+ PINMUX_IPSR_MODSEL_DATA(IP2_29_27, ATADIR0_N, SEL_LBS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_29_27, ATAG0_N, SEL_LBS_0),
+ PINMUX_IPSR_DATA(IP2_29_27, EX_WAIT1),
+
+ /* IPSR3 */
+ PINMUX_IPSR_DATA(IP3_2_0, EX_CS4_N),
+ PINMUX_IPSR_MODSEL_DATA(IP3_2_0, ATARD0_N, SEL_LBS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0),
+ PINMUX_IPSR_DATA(IP3_2_0, EX_WAIT2),
+ PINMUX_IPSR_DATA(IP3_5_3, EX_CS5_N),
+ PINMUX_IPSR_DATA(IP3_5_3, ATACS00_N),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1),
+ PINMUX_IPSR_DATA(IP3_5_3, PWM1),
+ PINMUX_IPSR_DATA(IP3_5_3, TPU_TO1),
+ PINMUX_IPSR_DATA(IP3_8_6, BS_N),
+ PINMUX_IPSR_DATA(IP3_8_6, ATACS10_N),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, HTX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1),
+ PINMUX_IPSR_DATA(IP3_8_6, PWM2),
+ PINMUX_IPSR_DATA(IP3_8_6, TPU_TO2),
+ PINMUX_IPSR_DATA(IP3_11_9, RD_WR_N),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, HRX2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, FMIN_B, SEL_FM_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, DREQ1_D, SEL_LBS_1),
+ PINMUX_IPSR_DATA(IP3_13_12, WE0_N),
+ PINMUX_IPSR_MODSEL_DATA(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1),
+ PINMUX_IPSR_DATA(IP3_15_14, WE1_N),
+ PINMUX_IPSR_MODSEL_DATA(IP3_15_14, ATARD0_N_B, SEL_LBS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_15_14, HTX2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1),
+ PINMUX_IPSR_DATA(IP3_17_16, EX_WAIT0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1),
+ PINMUX_IPSR_DATA(IP3_19_18, DREQ0),
+ PINMUX_IPSR_DATA(IP3_19_18, PWM3),
+ PINMUX_IPSR_DATA(IP3_19_18, TPU_TO3),
+ PINMUX_IPSR_DATA(IP3_21_20, DACK0),
+ PINMUX_IPSR_DATA(IP3_21_20, DRACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_21_20, REMOCON, SEL_RCN_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SPEEDIN, SEL_RSP_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_24_22, HSCK0_C, SEL_HSCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_24_22, HSCK2_C, SEL_HSCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SCIFB0_SCK_B, SEL_SCIFB_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SCIFB2_SCK_B, SEL_SCIFB2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_24_22, DREQ2_C, SEL_LBS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SSI_SCK0129, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_27_25, HRX0_C, SEL_HSCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_27_25, HRX2_C, SEL_HSCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SCIFB0_RXD_C, SEL_SCIFB_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SCIFB2_RXD_C, SEL_SCIFB2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SSI_WS0129, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX0_C, SEL_HSCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SCIFB0_TXD_C, SEL_SCIFB_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SCIFB2_TXD_C, SEL_SCIFB2_2),
+
+ /* IPSR4 */
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SSI_SDATA0, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCL0_B, SEL_IIC0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCL7_B, SEL_IIC7_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, MSIOF2_SCK_C, SEL_SOF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK1, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SDA0_B, SEL_IIC0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SDA7_B, SEL_IIC7_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, MSIOF2_SYNC_C, SEL_SOF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, GLO_I0_D, SEL_GPS_3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SSI_WS1, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCL1_B, SEL_IIC1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCL8_B, SEL_IIC8_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, MSIOF2_TXD_C, SEL_SOF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, GLO_I1_D, SEL_GPS_3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SSI_SDATA1, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SDA1_B, SEL_IIC1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SDA8_B, SEL_IIC8_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
+ PINMUX_IPSR_DATA(IP4_12_10, SSI_SCK2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_12_10, SCL2, SEL_IIC2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP4_15_13, SSI_WS2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_15_13, SDA2, SEL_IIC2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_15_13, RX2_E, SEL_SCIF2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP4_18_16, SSI_SDATA2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_18_16, TX2_E, SEL_SCIF2_4),
+ PINMUX_IPSR_DATA(IP4_19, SSI_SCK34),
+ PINMUX_IPSR_DATA(IP4_20, SSI_WS34),
+ PINMUX_IPSR_DATA(IP4_21, SSI_SDATA3),
+ PINMUX_IPSR_DATA(IP4_23_22, SSI_SCK4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_23_22, GLO_SS_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP4_25_24, SSI_WS4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_25_24, GLO_RFON_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP4_27_26, SSI_SDATA4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3),
+ PINMUX_IPSR_DATA(IP4_30_28, SSI_SCK5),
+ PINMUX_IPSR_MODSEL_DATA(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_30_28, TS_SDATA0, SEL_TSIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_30_28, GLO_I0, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3),
+ PINMUX_IPSR_DATA(IP4_30_28, VI1_R2_B),
+
+ /* IPSR5 */
+ PINMUX_IPSR_DATA(IP5_2_0, SSI_WS5),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, TS_SCK0, SEL_TSIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, GLO_I1, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3),
+ PINMUX_IPSR_DATA(IP5_2_0, VI1_R3_B),
+ PINMUX_IPSR_DATA(IP5_5_3, SSI_SDATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TS_SDEN0, SEL_TSIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, GLO_Q0, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3),
+ PINMUX_IPSR_DATA(IP5_5_3, VI1_R4_B),
+ PINMUX_IPSR_DATA(IP5_8_6, SSI_SCK6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, GLO_Q1, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3),
+ PINMUX_IPSR_DATA(IP5_8_6, VI1_R5_B),
+ PINMUX_IPSR_DATA(IP5_11_9, SSI_WS6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, GLO_SCLK, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3),
+ PINMUX_IPSR_DATA(IP5_11_9, VI1_R6_B),
+ PINMUX_IPSR_DATA(IP5_14_12, SSI_SDATA6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, GLO_SDATA, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP5_14_12, VI1_R7_B),
+ PINMUX_IPSR_MODSEL_DATA(IP5_16_15, SSI_SCK78, SEL_SSI7_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_16_15, GLO_SS, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_19_17, SSI_WS78, SEL_SSI7_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_19_17, TX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_19_17, STP_ISD_0_B, SEL_SSP_1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_19_17, GLO_RFON, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_21_20, SSI_SDATA7, SEL_SSI7_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_21_20, RX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_21_20, STP_ISEN_0_B, SEL_SSP_1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_22, SSI_SDATA8, SEL_SSI8_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_22, TX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_22, STP_ISSYNC_0_B, SEL_SSP_1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_25_24, SSI_SCK9, SEL_SSI9_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_25_24, RX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_25_24, GLO_SCLK_D, SEL_GPS_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_28_26, SSI_WS9, SEL_SSI9_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_28_26, TX3_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_28_26, CAN0_TX_D, SEL_CAN0_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_28_26, GLO_SDATA_D, SEL_GPS_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_31_29, SSI_SDATA9, SEL_SSI9_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_31_29, RX3_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_31_29, CAN0_RX_D, SEL_CAN0_3),
+
+ /* IPSR6 */
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, AUDIO_CLKB, SEL_ADG_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, BPFCLK_E, SEL_FM_4),
+ PINMUX_IPSR_DATA(IP6_5_3, AUDIO_CLKC),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, RX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, FMIN_E, SEL_FM_4),
+ PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
+ PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
+ PINMUX_IPSR_DATA(IP6_9_8, INTC_IRQ0_N),
+ PINMUX_IPSR_DATA(IP6_11_10, IRQ1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2),
+ PINMUX_IPSR_DATA(IP6_11_10, INTC_IRQ1_N),
+ PINMUX_IPSR_DATA(IP6_13_12, IRQ2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
+ PINMUX_IPSR_DATA(IP6_13_12, INTC_IRQ2_N),
+ PINMUX_IPSR_DATA(IP6_15_14, IRQ3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCL4_C, SEL_IIC4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
+ PINMUX_IPSR_DATA(IP6_15_14, INTC_IRQ4_N),
+ PINMUX_IPSR_DATA(IP6_18_16, IRQ4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_18_16, SDA4_C, SEL_IIC4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
+ PINMUX_IPSR_DATA(IP6_18_16, INTC_IRQ4_N),
+ PINMUX_IPSR_DATA(IP6_20_19, IRQ5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_19, SCL1_E, SEL_IIC1_4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
+ PINMUX_IPSR_DATA(IP6_23_21, IRQ6),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, SDA1_E, SEL_IIC1_4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
+ PINMUX_IPSR_DATA(IP6_26_24, IRQ7),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_24, GPS_CLK_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_24, GPS_CLK_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP6_29_27, IRQ8),
+ PINMUX_IPSR_MODSEL_DATA(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_29_27, GPS_SIGN_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_29_27, GPS_SIGN_D, SEL_GPS_3),
+
+ /* IPSR7 */
+ PINMUX_IPSR_DATA(IP7_2_0, IRQ9),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, GPS_MAG_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, GPS_MAG_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP7_5_3, DU1_DR0),
+ PINMUX_IPSR_DATA(IP7_5_3, LCDOUT0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, VI1_DATA0_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1),
+ PINMUX_IPSR_DATA(IP7_8_6, DU1_DR1),
+ PINMUX_IPSR_DATA(IP7_8_6, LCDOUT1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, VI1_DATA1_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1),
+ PINMUX_IPSR_DATA(IP7_10_9, DU1_DR2),
+ PINMUX_IPSR_DATA(IP7_10_9, LCDOUT2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1),
+ PINMUX_IPSR_DATA(IP7_12_11, DU1_DR3),
+ PINMUX_IPSR_DATA(IP7_12_11, LCDOUT3),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1),
+ PINMUX_IPSR_DATA(IP7_14_13, DU1_DR4),
+ PINMUX_IPSR_DATA(IP7_14_13, LCDOUT4),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1),
+ PINMUX_IPSR_DATA(IP7_16_15, DU1_DR5),
+ PINMUX_IPSR_DATA(IP7_16_15, LCDOUT5),
+ PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1),
+ PINMUX_IPSR_DATA(IP7_18_17, DU1_DR6),
+ PINMUX_IPSR_DATA(IP7_18_17, LCDOUT6),
+ PINMUX_IPSR_MODSEL_DATA(IP7_18_17, SSI_WS1_B, SEL_SSI1_1),
+ PINMUX_IPSR_DATA(IP7_20_19, DU1_DR7),
+ PINMUX_IPSR_DATA(IP7_20_19, LCDOUT7),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1),
+ PINMUX_IPSR_DATA(IP7_23_21, DU1_DG0),
+ PINMUX_IPSR_DATA(IP7_23_21, LCDOUT8),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, VI1_DATA2_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1),
+ PINMUX_IPSR_DATA(IP7_26_24, DU1_DG1),
+ PINMUX_IPSR_DATA(IP7_26_24, LCDOUT9),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, VI1_DATA3_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1),
+ PINMUX_IPSR_DATA(IP7_29_27, DU1_DG2),
+ PINMUX_IPSR_DATA(IP7_29_27, LCDOUT10),
+ PINMUX_IPSR_MODSEL_DATA(IP7_29_27, VI1_DATA4_B, SEL_VI1_1),
+ PINMUX_IPSR_DATA(IP7_29_27, SCIF1_SCK_B),
+ PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1),
+
+ /* IPSR8 */
+ PINMUX_IPSR_DATA(IP8_2_0, DU1_DG3),
+ PINMUX_IPSR_DATA(IP8_2_0, LCDOUT11),
+ PINMUX_IPSR_MODSEL_DATA(IP8_2_0, VI1_DATA5_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SSI_WS78_B, SEL_SSI7_1),
+ PINMUX_IPSR_DATA(IP8_5_3, DU1_DG4),
+ PINMUX_IPSR_DATA(IP8_5_3, LCDOUT12),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_3, VI1_DATA6_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_3, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1),
+ PINMUX_IPSR_DATA(IP8_8_6, DU1_DG5),
+ PINMUX_IPSR_DATA(IP8_8_6, LCDOUT13),
+ PINMUX_IPSR_MODSEL_DATA(IP8_8_6, VI1_DATA7_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1),
+ PINMUX_IPSR_DATA(IP8_11_9, DU1_DG6),
+ PINMUX_IPSR_DATA(IP8_11_9, LCDOUT14),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_DATA(IP8_14_12, DU1_DG7),
+ PINMUX_IPSR_DATA(IP8_14_12, LCDOUT15),
+ PINMUX_IPSR_MODSEL_DATA(IP8_14_12, HTX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_DATA(IP8_17_15, DU1_DB0),
+ PINMUX_IPSR_DATA(IP8_17_15, LCDOUT16),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_15, VI1_CLK_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_15, TX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1),
+ PINMUX_IPSR_DATA(IP8_20_18, DU1_DB1),
+ PINMUX_IPSR_DATA(IP8_20_18, LCDOUT17),
+ PINMUX_IPSR_MODSEL_DATA(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_20_18, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1),
+ PINMUX_IPSR_DATA(IP8_23_21, DU1_DB2),
+ PINMUX_IPSR_DATA(IP8_23_21, LCDOUT18),
+ PINMUX_IPSR_MODSEL_DATA(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1),
+ PINMUX_IPSR_DATA(IP8_23_21, SCIF2_SCK_B),
+ PINMUX_IPSR_MODSEL_DATA(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1),
+ PINMUX_IPSR_DATA(IP8_25_24, DU1_DB3),
+ PINMUX_IPSR_DATA(IP8_25_24, LCDOUT19),
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1),
+ PINMUX_IPSR_DATA(IP8_27_26, DU1_DB4),
+ PINMUX_IPSR_DATA(IP8_27_26, LCDOUT20),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, VI1_FIELD_B, SEL_VI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CAN1_RX, SEL_CAN1_0),
+ PINMUX_IPSR_DATA(IP8_30_28, DU1_DB5),
+ PINMUX_IPSR_DATA(IP8_30_28, LCDOUT21),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, TX3, SEL_SCIF3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, CAN1_TX, SEL_CAN1_0),
+
+ /* IPSR9 */
+ PINMUX_IPSR_DATA(IP9_2_0, DU1_DB6),
+ PINMUX_IPSR_DATA(IP9_2_0, LCDOUT22),
+ PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCL3_C, SEL_IIC3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RX3, SEL_SCIF3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
+ PINMUX_IPSR_DATA(IP9_5_3, DU1_DB7),
+ PINMUX_IPSR_DATA(IP9_5_3, LCDOUT23),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SDA3_C, SEL_IIC3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
+ PINMUX_IPSR_DATA(IP9_6, QSTVA_QVS),
+ PINMUX_IPSR_DATA(IP9_7, DU1_DOTCLKOUT0),
+ PINMUX_IPSR_DATA(IP9_7, QCLK),
+ PINMUX_IPSR_DATA(IP9_10_8, DU1_DOTCLKOUT1),
+ PINMUX_IPSR_DATA(IP9_10_8, QSTVB_QVE),
+ PINMUX_IPSR_MODSEL_DATA(IP9_10_8, CAN0_TX, SEL_CAN0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_10_8, TX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_10_8, SCL2_B, SEL_IIC2_1),
+ PINMUX_IPSR_DATA(IP9_10_8, PWM4),
+ PINMUX_IPSR_DATA(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_DATA(IP9_11, QSTH_QHS),
+ PINMUX_IPSR_DATA(IP9_12, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_DATA(IP9_12, QSTB_QHE),
+ PINMUX_IPSR_DATA(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_DATA(IP9_15_13, QCPV_QDE),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_13, CAN0_RX, SEL_CAN0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_13, RX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_13, SDA2_B, SEL_IIC2_1),
+ PINMUX_IPSR_DATA(IP9_16, DU1_DISP),
+ PINMUX_IPSR_DATA(IP9_16, QPOLA),
+ PINMUX_IPSR_DATA(IP9_18_17, DU1_CDE),
+ PINMUX_IPSR_DATA(IP9_18_17, QPOLB),
+ PINMUX_IPSR_DATA(IP9_18_17, PWM4_B),
+ PINMUX_IPSR_DATA(IP9_20_19, VI0_CLKENB),
+ PINMUX_IPSR_MODSEL_DATA(IP9_20_19, TX4, SEL_SCIF4_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_DATA(IP9_22_21, VI0_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP9_22_21, RX4, SEL_SCIF4_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_DATA(IP9_24_23, VI0_HSYNC_N),
+ PINMUX_IPSR_MODSEL_DATA(IP9_24_23, TX5, SEL_SCIF5_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_DATA(IP9_26_25, VI0_VSYNC_N),
+ PINMUX_IPSR_MODSEL_DATA(IP9_26_25, RX5, SEL_SCIF5_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_DATA(IP9_28_27, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
+ PINMUX_IPSR_DATA(IP9_31_29, VI0_G0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCL8, SEL_IIC8_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCL4, SEL_IIC4_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
+ PINMUX_IPSR_DATA(IP9_31_29, ATAWR1_N),
+
+ /* IPSR10 */
+ PINMUX_IPSR_DATA(IP10_2_0, VI0_G1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SDA8, SEL_IIC8_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SDA4, SEL_IIC4_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
+ PINMUX_IPSR_DATA(IP10_2_0, ATADIR1_N),
+ PINMUX_IPSR_DATA(IP10_5_3, VI0_G2),
+ PINMUX_IPSR_DATA(IP10_5_3, VI2_HSYNC_N),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCL3_B, SEL_IIC3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK2, SEL_HSCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
+ PINMUX_IPSR_DATA(IP10_5_3, ATARD1_N),
+ PINMUX_IPSR_DATA(IP10_8_6, VI0_G3),
+ PINMUX_IPSR_DATA(IP10_8_6, VI2_VSYNC_N),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SDA3_B, SEL_IIC3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX2, SEL_HSCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
+ PINMUX_IPSR_DATA(IP10_8_6, ATACS01_N),
+ PINMUX_IPSR_DATA(IP10_11_9, VI0_G4),
+ PINMUX_IPSR_DATA(IP10_11_9, VI2_CLKENB),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX2, SEL_HSCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3),
+ PINMUX_IPSR_DATA(IP10_14_12, VI0_G5),
+ PINMUX_IPSR_DATA(IP10_14_12, VI2_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, FMCLK_D, SEL_FM_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, CAN0_TX_E, SEL_CAN0_4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HTX1_D, SEL_HSCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3),
+ PINMUX_IPSR_DATA(IP10_16_15, VI0_G6),
+ PINMUX_IPSR_DATA(IP10_16_15, VI2_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_16_15, BPFCLK_D, SEL_FM_3),
+ PINMUX_IPSR_DATA(IP10_18_17, VI0_G7),
+ PINMUX_IPSR_DATA(IP10_18_17, VI2_DATA0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_17, FMIN_D, SEL_FM_3),
+ PINMUX_IPSR_DATA(IP10_21_19, VI0_R0),
+ PINMUX_IPSR_DATA(IP10_21_19, VI2_DATA1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, GLO_I0_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_DATA(IP10_21_19, ATACS11_N),
+ PINMUX_IPSR_DATA(IP10_24_22, VI0_R1),
+ PINMUX_IPSR_DATA(IP10_24_22, VI2_DATA2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_24_22, GLO_I1_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_DATA(IP10_24_22, ATAG1_N),
+ PINMUX_IPSR_DATA(IP10_26_25, VI0_R2),
+ PINMUX_IPSR_DATA(IP10_26_25, VI2_DATA3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_26_25, GLO_Q0_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_DATA(IP10_28_27, VI0_R3),
+ PINMUX_IPSR_DATA(IP10_28_27, VI2_DATA4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_28_27, GLO_Q1_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_DATA(IP10_31_29, VI0_R4),
+ PINMUX_IPSR_DATA(IP10_31_29, VI2_DATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, TX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL1_D, SEL_IIC1_3),
+
+ /* IPSR11 */
+ PINMUX_IPSR_DATA(IP11_2_0, VI0_R5),
+ PINMUX_IPSR_DATA(IP11_2_0, VI2_DATA6),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2_0, RX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SDA1_D, SEL_IIC1_3),
+ PINMUX_IPSR_DATA(IP11_5_3, VI0_R6),
+ PINMUX_IPSR_DATA(IP11_5_3, VI2_DATA7),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, GLO_SS_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, TX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SCL4_B, SEL_IIC4_1),
+ PINMUX_IPSR_DATA(IP11_8_6, VI0_R7),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SDA4_B, SEL_IIC4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_11_9, AVB_RXD0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_9, TX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_14_12, AVB_RXD1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_14_12, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_16_15, VI1_CLKENB, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_16_15, AVB_RXD2),
+ PINMUX_IPSR_MODSEL_DATA(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_17, VI1_FIELD, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_18_17, AVB_RXD3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_19, VI1_CLK, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_19, AVB_RXD4),
+ PINMUX_IPSR_MODSEL_DATA(IP11_20, VI1_DATA0, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_20, AVB_RXD5),
+ PINMUX_IPSR_MODSEL_DATA(IP11_21, VI1_DATA1, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_21, AVB_RXD6),
+ PINMUX_IPSR_MODSEL_DATA(IP11_22, VI1_DATA2, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_22, AVB_RXD7),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23, VI1_DATA3, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_23, AVB_RX_ER),
+ PINMUX_IPSR_MODSEL_DATA(IP11_24, VI1_DATA4, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_24, AVB_MDIO),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25, VI1_DATA5, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_25, AVB_RX_DV),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26, VI1_DATA6, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_26, AVB_MAGIC),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27, VI1_DATA7, SEL_VI1_0),
+ PINMUX_IPSR_DATA(IP11_27, AVB_MDC),
+ PINMUX_IPSR_DATA(IP11_29_28, ETH_MDIO),
+ PINMUX_IPSR_DATA(IP11_29_28, AVB_RX_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP11_29_28, SCL2_C, SEL_IIC2_2),
+ PINMUX_IPSR_DATA(IP11_31_30, ETH_CRS_DV),
+ PINMUX_IPSR_DATA(IP11_31_30, AVB_LINK),
+ PINMUX_IPSR_MODSEL_DATA(IP11_31_30, SDA2_C, SEL_IIC2_2),
+
+ /* IPSR12 */
+ PINMUX_IPSR_DATA(IP12_1_0, ETH_RX_ER),
+ PINMUX_IPSR_DATA(IP12_1_0, AVB_CRS),
+ PINMUX_IPSR_MODSEL_DATA(IP12_1_0, SCL3, SEL_IIC3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP12_1_0, SCL7, SEL_IIC7_0),
+ PINMUX_IPSR_DATA(IP12_3_2, ETH_RXD0),
+ PINMUX_IPSR_DATA(IP12_3_2, AVB_PHY_INT),
+ PINMUX_IPSR_MODSEL_DATA(IP12_3_2, SDA3, SEL_IIC3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP12_3_2, SDA7, SEL_IIC7_0),
+ PINMUX_IPSR_DATA(IP12_6_4, ETH_RXD1),
+ PINMUX_IPSR_DATA(IP12_6_4, AVB_GTXREFCLK),
+ PINMUX_IPSR_MODSEL_DATA(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_6_4, SCL2_D, SEL_IIC2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
+ PINMUX_IPSR_DATA(IP12_9_7, ETH_LINK),
+ PINMUX_IPSR_DATA(IP12_9_7, AVB_TXD0),
+ PINMUX_IPSR_MODSEL_DATA(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_9_7, SDA2_D, SEL_IIC2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
+ PINMUX_IPSR_DATA(IP12_12_10, ETH_REFCLK),
+ PINMUX_IPSR_DATA(IP12_12_10, AVB_TXD1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_12_10, CAN1_RX_C, SEL_CAN1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4),
+ PINMUX_IPSR_DATA(IP12_15_13, ETH_TXD1),
+ PINMUX_IPSR_DATA(IP12_15_13, AVB_TXD2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_15_13, CAN1_TX_C, SEL_CAN1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4),
+ PINMUX_IPSR_DATA(IP12_17_16, ETH_TX_EN),
+ PINMUX_IPSR_DATA(IP12_17_16, AVB_TXD3),
+ PINMUX_IPSR_MODSEL_DATA(IP12_17_16, TCLK1_B, SEL_TMU1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1),
+ PINMUX_IPSR_DATA(IP12_19_18, ETH_MAGIC),
+ PINMUX_IPSR_DATA(IP12_19_18, AVB_TXD4),
+ PINMUX_IPSR_MODSEL_DATA(IP12_19_18, IETX_C, SEL_IEB_2),
+ PINMUX_IPSR_DATA(IP12_21_20, ETH_TXD0),
+ PINMUX_IPSR_DATA(IP12_21_20, AVB_TXD5),
+ PINMUX_IPSR_MODSEL_DATA(IP12_21_20, IECLK_C, SEL_IEB_2),
+ PINMUX_IPSR_DATA(IP12_23_22, ETH_MDC),
+ PINMUX_IPSR_DATA(IP12_23_22, AVB_TXD6),
+ PINMUX_IPSR_MODSEL_DATA(IP12_23_22, IERX_C, SEL_IEB_2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0),
+ PINMUX_IPSR_DATA(IP12_26_24, AVB_TXD7),
+ PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP12_26_24, ADIDATA_B, SEL_RAD_1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_29_27, STP_ISCLK_0, SEL_SSP_0),
+ PINMUX_IPSR_DATA(IP12_29_27, AVB_TX_EN),
+ PINMUX_IPSR_MODSEL_DATA(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2),
+
+ /* IPSR13 */
+ PINMUX_IPSR_MODSEL_DATA(IP13_2_0, STP_ISD_0, SEL_SSP_0),
+ PINMUX_IPSR_DATA(IP13_2_0, AVB_TX_ER),
+ PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP13_2_0, ADICLK_B, SEL_RAD_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP13_4_3, STP_ISEN_0, SEL_SSP_0),
+ PINMUX_IPSR_DATA(IP13_4_3, AVB_TX_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP13_4_3, ADICHS0_B, SEL_RAD_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0),
+ PINMUX_IPSR_DATA(IP13_6_5, AVB_COL),
+ PINMUX_IPSR_MODSEL_DATA(IP13_6_5, ADICHS1_B, SEL_RAD_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP13_9_7, STP_OPWM_0, SEL_SSP_0),
+ PINMUX_IPSR_DATA(IP13_9_7, AVB_GTX_CLK),
+ PINMUX_IPSR_DATA(IP13_9_7, PWM0_B),
+ PINMUX_IPSR_MODSEL_DATA(IP13_9_7, ADICHS2_B, SEL_RAD_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2),
+ PINMUX_IPSR_DATA(IP13_10, SD0_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP13_10, SPCLK_B, SEL_QSP_1),
+ PINMUX_IPSR_DATA(IP13_11, SD0_CMD),
+ PINMUX_IPSR_MODSEL_DATA(IP13_11, MOSI_IO0_B, SEL_QSP_1),
+ PINMUX_IPSR_DATA(IP13_12, SD0_DATA0),
+ PINMUX_IPSR_MODSEL_DATA(IP13_12, MISO_IO1_B, SEL_QSP_1),
+ PINMUX_IPSR_DATA(IP13_13, SD0_DATA1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_13, IO2_B, SEL_QSP_1),
+ PINMUX_IPSR_DATA(IP13_14, SD0_DATA2),
+ PINMUX_IPSR_MODSEL_DATA(IP13_14, IO3_B, SEL_QSP_1),
+ PINMUX_IPSR_DATA(IP13_15, SD0_DATA3),
+ PINMUX_IPSR_MODSEL_DATA(IP13_15, SSL_B, SEL_QSP_1),
+ PINMUX_IPSR_DATA(IP13_18_16, SD0_CD),
+ PINMUX_IPSR_MODSEL_DATA(IP13_18_16, MMC_D6_B, SEL_MMC_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SIM0_RST_B, SEL_SIM_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_18_16, CAN0_RX_F, SEL_CAN0_5),
+ PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_18_16, TX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_DATA(IP13_21_19, SD0_WP),
+ PINMUX_IPSR_MODSEL_DATA(IP13_21_19, MMC_D7_B, SEL_MMC_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_21_19, SIM0_D_B, SEL_SIM_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_21_19, CAN0_TX_F, SEL_CAN0_5),
+ PINMUX_IPSR_MODSEL_DATA(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_21_19, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_DATA(IP13_22, SD1_CMD),
+ PINMUX_IPSR_MODSEL_DATA(IP13_22, REMOCON_B, SEL_RCN_1),
+ PINMUX_IPSR_DATA(IP13_24_23, SD1_DATA0),
+ PINMUX_IPSR_MODSEL_DATA(IP13_24_23, SPEEDIN_B, SEL_RSP_1),
+ PINMUX_IPSR_DATA(IP13_25, SD1_DATA1),
+ PINMUX_IPSR_MODSEL_DATA(IP13_25, IETX_B, SEL_IEB_1),
+ PINMUX_IPSR_DATA(IP13_26, SD1_DATA2),
+ PINMUX_IPSR_MODSEL_DATA(IP13_26, IECLK_B, SEL_IEB_1),
+ PINMUX_IPSR_DATA(IP13_27, SD1_DATA3),
+ PINMUX_IPSR_MODSEL_DATA(IP13_27, IERX_B, SEL_IEB_1),
+ PINMUX_IPSR_DATA(IP13_30_28, SD1_CD),
+ PINMUX_IPSR_DATA(IP13_30_28, PWM0),
+ PINMUX_IPSR_DATA(IP13_30_28, TPU_TO0),
+ PINMUX_IPSR_MODSEL_DATA(IP13_30_28, SCL1_C, SEL_IIC1_2),
+
+ /* IPSR14 */
+ PINMUX_IPSR_DATA(IP14_1_0, SD1_WP),
+ PINMUX_IPSR_DATA(IP14_1_0, PWM1_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_1_0, SDA1_C, SEL_IIC1_2),
+ PINMUX_IPSR_DATA(IP14_2, SD2_CLK),
+ PINMUX_IPSR_DATA(IP14_2, MMC_CLK),
+ PINMUX_IPSR_DATA(IP14_3, SD2_CMD),
+ PINMUX_IPSR_DATA(IP14_3, MMC_CMD),
+ PINMUX_IPSR_DATA(IP14_4, SD2_DATA0),
+ PINMUX_IPSR_DATA(IP14_4, MMC_D0),
+ PINMUX_IPSR_DATA(IP14_5, SD2_DATA1),
+ PINMUX_IPSR_DATA(IP14_5, MMC_D1),
+ PINMUX_IPSR_DATA(IP14_6, SD2_DATA2),
+ PINMUX_IPSR_DATA(IP14_6, MMC_D2),
+ PINMUX_IPSR_DATA(IP14_7, SD2_DATA3),
+ PINMUX_IPSR_DATA(IP14_7, MMC_D3),
+ PINMUX_IPSR_DATA(IP14_10_8, SD2_CD),
+ PINMUX_IPSR_DATA(IP14_10_8, MMC_D4),
+ PINMUX_IPSR_MODSEL_DATA(IP14_10_8, SCL8_C, SEL_IIC8_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_10_8, TX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
+ PINMUX_IPSR_DATA(IP14_13_11, SD2_WP),
+ PINMUX_IPSR_DATA(IP14_13_11, MMC_D5),
+ PINMUX_IPSR_MODSEL_DATA(IP14_13_11, SDA8_C, SEL_IIC8_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_13_11, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_16_14, MSIOF0_SCK, SEL_SOF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_16_14, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_16_14, ADIDATA, SEL_RAD_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_16_14, VI1_CLK_C, SEL_VI1_2),
+ PINMUX_IPSR_DATA(IP14_16_14, VI1_G0_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_19_17, TX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_19_17, ADICS_SAMP, SEL_RAD_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2),
+ PINMUX_IPSR_DATA(IP14_19_17, VI1_G1_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_22_20, ADICLK, SEL_RAD_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_22_20, VI1_FIELD_C, SEL_VI1_2),
+ PINMUX_IPSR_DATA(IP14_22_20, VI1_G2_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_25_23, ADICHS0, SEL_RAD_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_25_23, VI1_DATA0_C, SEL_VI1_2),
+ PINMUX_IPSR_DATA(IP14_25_23, VI1_G3_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_28_26, MMC_D6, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_28_26, ADICHS1, SEL_RAD_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_28_26, TX0_E, SEL_SCIF0_4),
+ PINMUX_IPSR_MODSEL_DATA(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_28_26, SCL7_C, SEL_IIC7_2),
+ PINMUX_IPSR_DATA(IP14_28_26, VI1_G4_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_31_29, MMC_D7, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_31_29, ADICHS2, SEL_RAD_0),
+ PINMUX_IPSR_MODSEL_DATA(IP14_31_29, RX0_E, SEL_SCIF0_4),
+ PINMUX_IPSR_MODSEL_DATA(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_31_29, SDA7_C, SEL_IIC7_2),
+ PINMUX_IPSR_DATA(IP14_31_29, VI1_G5_B),
+
+ /* IPSR15 */
+ PINMUX_IPSR_MODSEL_DATA(IP15_1_0, SIM0_RST, SEL_SIM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_1_0, IETX, SEL_IEB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_1_0, CAN1_TX_D, SEL_CAN1_3),
+ PINMUX_IPSR_DATA(IP15_3_2, SIM0_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP15_3_2, IECLK, SEL_IEB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_5_4, SIM0_D, SEL_SIM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_5_4, IERX, SEL_IEB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_5_4, CAN1_RX_D, SEL_CAN1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, GPS_CLK, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1),
+ PINMUX_IPSR_DATA(IP15_8_6, PWM5_B),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_11_9, GPS_SIGN, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_11_9, TX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
+ PINMUX_IPSR_DATA(IP15_11_9, PWM5),
+ PINMUX_IPSR_DATA(IP15_11_9, VI1_G6_B),
+ PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_14_12, GPS_MAG, SEL_GPS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_14_12, RX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2),
+ PINMUX_IPSR_DATA(IP15_14_12, PWM6),
+ PINMUX_IPSR_DATA(IP15_14_12, VI1_G7_B),
+ PINMUX_IPSR_MODSEL_DATA(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_17_15, HCTS0_N, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_17_15, GLO_I0_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_17_15, TCLK1, SEL_TMU1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_17_15, VI1_DATA1_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_20_18, HRTS0_N, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_20_18, SCIFB0_RTS_N, SEL_SCIFB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_20_18, GLO_I1_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_20_18, VI1_DATA2_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_23_21, HSCK0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_23_21, GLO_Q0_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_23_21, CAN_CLK, SEL_CANCLK_0),
+ PINMUX_IPSR_DATA(IP15_23_21, TCLK2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_23_21, VI1_DATA3_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_26_24, HRX0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_26_24, GLO_Q1_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_26_24, CAN0_RX_B, SEL_CAN0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP15_26_24, VI1_DATA4_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_29_27, HTX0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_29_27, SCIFB0_TXD, SEL_SCIFB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_29_27, GLO_SCLK_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP15_29_27, CAN0_TX_B, SEL_CAN0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP15_29_27, VI1_DATA5_C, SEL_VI1_2),
+
+ /* IPSR16 */
+ PINMUX_IPSR_MODSEL_DATA(IP16_2_0, HRX1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0),
+ PINMUX_IPSR_DATA(IP16_2_0, VI1_R0_B),
+ PINMUX_IPSR_MODSEL_DATA(IP16_2_0, GLO_SDATA_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP16_2_0, VI1_DATA6_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP16_5_3, HTX1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0),
+ PINMUX_IPSR_DATA(IP16_5_3, VI1_R1_B),
+ PINMUX_IPSR_MODSEL_DATA(IP16_5_3, GLO_SS_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP16_7_6, HSCK1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
+ PINMUX_IPSR_DATA(IP16_7_6, MLB_CK),
+ PINMUX_IPSR_MODSEL_DATA(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
+ PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N),
+ PINMUX_IPSR_DATA(IP16_9_8, MLB_SIG),
+ PINMUX_IPSR_MODSEL_DATA(IP16_9_8, CAN1_TX_B, SEL_CAN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP16_11_10, HRTS1_N, SEL_HSCIF1_0),
+ PINMUX_IPSR_DATA(IP16_11_10, SCIFB1_RTS_N),
+ PINMUX_IPSR_DATA(IP16_11_10, MLB_DAT),
+ PINMUX_IPSR_MODSEL_DATA(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
+};
+
+static struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 2),
+ RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 13),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 10),
+ RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 21),
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+};
+static const unsigned int du_rgb666_mux[] = {
+ DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+ DU1_DR3_MARK, DU1_DR2_MARK,
+ DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+ DU1_DG3_MARK, DU1_DG2_MARK,
+ DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+ DU1_DB3_MARK, DU1_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 2),
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 13),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 10),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+ RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 21),
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+ DU1_DR3_MARK, DU1_DR2_MARK, DU1_DR1_MARK, DU1_DR0_MARK,
+ DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+ DU1_DG3_MARK, DU1_DG2_MARK, DU1_DG1_MARK, DU1_DG0_MARK,
+ DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+ DU1_DB3_MARK, DU1_DB2_MARK, DU1_DB1_MARK, DU1_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(3, 25),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+ DU1_DOTCLKOUT0_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(3, 26),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+ DU1_DOTCLKOUT1_MARK
+};
+static const unsigned int du_sync_1_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC, EXDISP/EXODDF/EXCDE */
+ RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 27),
+};
+static const unsigned int du_sync_1_mux[] = {
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+ DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK
+};
+static const unsigned int du_cde_disp_pins[] = {
+ /* CDE DISP */
+ RCAR_GP_PIN(3, 31), RCAR_GP_PIN(3, 30),
+};
+static const unsigned int du0_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int du0_clk_in_mux[] = {
+ DU0_DOTCLKIN_MARK
+};
+static const unsigned int du_cde_disp_mux[] = {
+ DU1_CDE_MARK, DU1_DISP_MARK
+};
+static const unsigned int du1_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19), RCAR_GP_PIN(3, 24),
+};
+static const unsigned int du1_clk_in_mux[] = {
+ DU1_DOTCLKIN_C_MARK, DU1_DOTCLKIN_B_MARK, DU1_DOTCLKIN_MARK
+};
+/* - ETH -------------------------------------------------------------------- */
+static const unsigned int eth_link_pins[] = {
+ /* LINK */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int eth_link_mux[] = {
+ ETH_LINK_MARK,
+};
+static const unsigned int eth_magic_pins[] = {
+ /* MAGIC */
+ RCAR_GP_PIN(5, 22),
+};
+static const unsigned int eth_magic_mux[] = {
+ ETH_MAGIC_MARK,
+};
+static const unsigned int eth_mdio_pins[] = {
+ /* MDC, MDIO */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 13),
+};
+static const unsigned int eth_mdio_mux[] = {
+ ETH_MDC_MARK, ETH_MDIO_MARK,
+};
+static const unsigned int eth_rmii_pins[] = {
+ /* RXD[0:1], RX_ER, CRS_DV, TXD[0:1], TX_EN, REF_CLK */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 15),
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 23), RCAR_GP_PIN(5, 20),
+ RCAR_GP_PIN(5, 21), RCAR_GP_PIN(5, 19),
+};
+static const unsigned int eth_rmii_mux[] = {
+ ETH_RXD0_MARK, ETH_RXD1_MARK, ETH_RX_ER_MARK, ETH_CRS_DV_MARK,
+ ETH_TXD0_MARK, ETH_TXD1_MARK, ETH_TX_EN_MARK, ETH_REFCLK_MARK,
+};
+/* - INTC ------------------------------------------------------------------- */
+static const unsigned int intc_irq0_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int intc_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_irq1_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(7, 11),
+};
+static const unsigned int intc_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_irq2_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(7, 12),
+};
+static const unsigned int intc_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_irq3_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(7, 13),
+};
+static const unsigned int intc_irq3_mux[] = {
+ IRQ3_MARK,
+};
+/* - MMCIF ------------------------------------------------------------------ */
+static const unsigned int mmc_data1_pins[] = {
+ /* D[0] */
+ RCAR_GP_PIN(6, 18),
+};
+static const unsigned int mmc_data1_mux[] = {
+ MMC_D0_MARK,
+};
+static const unsigned int mmc_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(6, 18), RCAR_GP_PIN(6, 19),
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int mmc_data4_mux[] = {
+ MMC_D0_MARK, MMC_D1_MARK, MMC_D2_MARK, MMC_D3_MARK,
+};
+static const unsigned int mmc_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(6, 18), RCAR_GP_PIN(6, 19),
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 21),
+ RCAR_GP_PIN(6, 22), RCAR_GP_PIN(6, 23),
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int mmc_data8_mux[] = {
+ MMC_D0_MARK, MMC_D1_MARK, MMC_D2_MARK, MMC_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK, MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 17),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_CLK_MARK, MMC_CMD_MARK,
+};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 25),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 27),
+};
+static const unsigned int msiof0_rx_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+static const unsigned int msiof0_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 26),
+};
+static const unsigned int msiof0_tx_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 22),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 23),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 24),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 25),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 27),
+};
+static const unsigned int msiof1_rx_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+static const unsigned int msiof1_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 26),
+};
+static const unsigned int msiof1_tx_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 18),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 16),
+};
+static const unsigned int msiof2_rx_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+static const unsigned int msiof2_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof2_tx_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+};
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+static const unsigned int scif0_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 26), RCAR_GP_PIN(4, 25),
+};
+static const unsigned int scif0_data_c_mux[] = {
+ RX0_C_MARK, TX0_C_MARK,
+};
+static const unsigned int scif0_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22),
+};
+static const unsigned int scif0_data_d_mux[] = {
+ RX0_D_MARK, TX0_D_MARK,
+};
+static const unsigned int scif0_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 29), RCAR_GP_PIN(6, 28),
+};
+static const unsigned int scif0_data_e_mux[] = {
+ RX0_E_MARK, TX0_E_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int scif1_clk_b_mux[] = {
+ SCIF1_SCK_B_MARK,
+};
+static const unsigned int scif1_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 27),
+};
+static const unsigned int scif1_data_c_mux[] = {
+ RX1_C_MARK, TX1_C_MARK,
+};
+static const unsigned int scif1_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 25), RCAR_GP_PIN(2, 24),
+};
+static const unsigned int scif1_data_d_mux[] = {
+ RX1_D_MARK, TX1_D_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 30), RCAR_GP_PIN(2, 31),
+};
+static const unsigned int scif2_data_mux[] = {
+ RX2_MARK, TX2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+static const unsigned int scif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 18),
+};
+static const unsigned int scif2_clk_b_mux[] = {
+ SCIF2_SCK_B_MARK,
+};
+static const unsigned int scif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int scif2_data_c_mux[] = {
+ RX2_C_MARK, TX2_C_MARK,
+};
+static const unsigned int scif2_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int scif2_data_e_mux[] = {
+ RX2_E_MARK, TX2_E_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 21),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 23),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCIF3_SCK_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+static const unsigned int scif3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 8),
+};
+static const unsigned int scif3_clk_b_mux[] = {
+ SCIF3_SCK_B_MARK,
+};
+static const unsigned int scif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int scif3_data_c_mux[] = {
+ RX3_C_MARK, TX3_C_MARK,
+};
+static const unsigned int scif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 27), RCAR_GP_PIN(2, 26),
+};
+static const unsigned int scif3_data_d_mux[] = {
+ RX3_D_MARK, TX3_D_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(7, 22), RCAR_GP_PIN(7, 21),
+};
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 3),
+};
+static const unsigned int scif5_data_mux[] = {
+ RX5_MARK, TX5_MARK,
+};
+static const unsigned int scif5_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 23), RCAR_GP_PIN(6, 22),
+};
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+/* - SCIFA0 ----------------------------------------------------------------- */
+static const unsigned int scifa0_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scifa0_data_mux[] = {
+ SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
+};
+static const unsigned int scifa0_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+};
+static const unsigned int scifa0_data_b_mux[] = {
+ SCIFA0_RXD_B_MARK, SCIFA0_TXD_B_MARK
+};
+/* - SCIFA1 ----------------------------------------------------------------- */
+static const unsigned int scifa1_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scifa1_data_mux[] = {
+ SCIFA1_RXD_MARK, SCIFA1_TXD_MARK,
+};
+static const unsigned int scifa1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int scifa1_clk_mux[] = {
+ SCIFA1_SCK_MARK,
+};
+static const unsigned int scifa1_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int scifa1_data_b_mux[] = {
+ SCIFA1_RXD_B_MARK, SCIFA1_TXD_B_MARK,
+};
+static const unsigned int scifa1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scifa1_clk_b_mux[] = {
+ SCIFA1_SCK_B_MARK,
+};
+static const unsigned int scifa1_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scifa1_data_c_mux[] = {
+ SCIFA1_RXD_C_MARK, SCIFA1_TXD_C_MARK,
+};
+/* - SCIFA2 ----------------------------------------------------------------- */
+static const unsigned int scifa2_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(2, 30), RCAR_GP_PIN(2, 31),
+};
+static const unsigned int scifa2_data_mux[] = {
+ SCIFA2_RXD_MARK, SCIFA2_TXD_MARK,
+};
+static const unsigned int scifa2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 18),
+};
+static const unsigned int scifa2_clk_mux[] = {
+ SCIFA2_SCK_MARK,
+};
+static const unsigned int scifa2_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int scifa2_data_b_mux[] = {
+ SCIFA2_RXD_B_MARK, SCIFA2_TXD_B_MARK,
+};
+/* - SCIFA3 ----------------------------------------------------------------- */
+static const unsigned int scifa3_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 21),
+};
+static const unsigned int scifa3_data_mux[] = {
+ SCIFA3_RXD_MARK, SCIFA3_TXD_MARK,
+};
+static const unsigned int scifa3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 23),
+};
+static const unsigned int scifa3_clk_mux[] = {
+ SCIFA3_SCK_MARK,
+};
+static const unsigned int scifa3_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 20),
+};
+static const unsigned int scifa3_data_b_mux[] = {
+ SCIFA3_RXD_B_MARK, SCIFA3_TXD_B_MARK,
+};
+static const unsigned int scifa3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 8),
+};
+static const unsigned int scifa3_clk_b_mux[] = {
+ SCIFA3_SCK_B_MARK,
+};
+static const unsigned int scifa3_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(7, 21), RCAR_GP_PIN(7, 20),
+};
+static const unsigned int scifa3_data_c_mux[] = {
+ SCIFA3_RXD_C_MARK, SCIFA3_TXD_C_MARK,
+};
+static const unsigned int scifa3_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 22),
+};
+static const unsigned int scifa3_clk_c_mux[] = {
+ SCIFA3_SCK_C_MARK,
+};
+/* - SCIFA4 ----------------------------------------------------------------- */
+static const unsigned int scifa4_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int scifa4_data_mux[] = {
+ SCIFA4_RXD_MARK, SCIFA4_TXD_MARK,
+};
+static const unsigned int scifa4_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scifa4_data_b_mux[] = {
+ SCIFA4_RXD_B_MARK, SCIFA4_TXD_B_MARK,
+};
+static const unsigned int scifa4_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(7, 22), RCAR_GP_PIN(7, 21),
+};
+static const unsigned int scifa4_data_c_mux[] = {
+ SCIFA4_RXD_C_MARK, SCIFA4_TXD_C_MARK,
+};
+/* - SCIFA5 ----------------------------------------------------------------- */
+static const unsigned int scifa5_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 3),
+};
+static const unsigned int scifa5_data_mux[] = {
+ SCIFA5_RXD_MARK, SCIFA5_TXD_MARK,
+};
+static const unsigned int scifa5_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int scifa5_data_b_mux[] = {
+ SCIFA5_RXD_B_MARK, SCIFA5_TXD_B_MARK,
+};
+static const unsigned int scifa5_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(6, 23), RCAR_GP_PIN(6, 22),
+};
+static const unsigned int scifa5_data_c_mux[] = {
+ SCIFA5_RXD_C_MARK, SCIFA5_TXD_C_MARK,
+};
+/* - SCIFB0 ----------------------------------------------------------------- */
+static const unsigned int scifb0_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(7, 3), RCAR_GP_PIN(7, 4),
+};
+static const unsigned int scifb0_data_mux[] = {
+ SCIFB0_RXD_MARK, SCIFB0_TXD_MARK,
+};
+static const unsigned int scifb0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int scifb0_clk_mux[] = {
+ SCIFB0_SCK_MARK,
+};
+static const unsigned int scifb0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(7, 1), RCAR_GP_PIN(7, 0),
+};
+static const unsigned int scifb0_ctrl_mux[] = {
+ SCIFB0_RTS_N_MARK, SCIFB0_CTS_N_MARK,
+};
+static const unsigned int scifb0_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(1, 20), RCAR_GP_PIN(1, 21),
+};
+static const unsigned int scifb0_data_b_mux[] = {
+ SCIFB0_RXD_B_MARK, SCIFB0_TXD_B_MARK,
+};
+static const unsigned int scifb0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 31),
+};
+static const unsigned int scifb0_clk_b_mux[] = {
+ SCIFB0_SCK_B_MARK,
+};
+static const unsigned int scifb0_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 23),
+};
+static const unsigned int scifb0_ctrl_b_mux[] = {
+ SCIFB0_RTS_N_B_MARK, SCIFB0_CTS_N_B_MARK,
+};
+static const unsigned int scifb0_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int scifb0_data_c_mux[] = {
+ SCIFB0_RXD_C_MARK, SCIFB0_TXD_C_MARK,
+};
+static const unsigned int scifb0_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 30),
+};
+static const unsigned int scifb0_clk_c_mux[] = {
+ SCIFB0_SCK_C_MARK,
+};
+static const unsigned int scifb0_data_d_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int scifb0_data_d_mux[] = {
+ SCIFB0_RXD_D_MARK, SCIFB0_TXD_D_MARK,
+};
+static const unsigned int scifb0_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int scifb0_clk_d_mux[] = {
+ SCIFB0_SCK_D_MARK,
+};
+/* - SCIFB1 ----------------------------------------------------------------- */
+static const unsigned int scifb1_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 6),
+};
+static const unsigned int scifb1_data_mux[] = {
+ SCIFB1_RXD_MARK, SCIFB1_TXD_MARK,
+};
+static const unsigned int scifb1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 7),
+};
+static const unsigned int scifb1_clk_mux[] = {
+ SCIFB1_SCK_MARK,
+};
+static const unsigned int scifb1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(7, 9), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int scifb1_ctrl_mux[] = {
+ SCIFB1_RTS_N_MARK, SCIFB1_CTS_N_MARK,
+};
+static const unsigned int scifb1_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18),
+};
+static const unsigned int scifb1_data_b_mux[] = {
+ SCIFB1_RXD_B_MARK, SCIFB1_TXD_B_MARK,
+};
+static const unsigned int scifb1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scifb1_clk_b_mux[] = {
+ SCIFB1_SCK_B_MARK,
+};
+static const unsigned int scifb1_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scifb1_data_c_mux[] = {
+ SCIFB1_RXD_C_MARK, SCIFB1_TXD_C_MARK,
+};
+static const unsigned int scifb1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 11),
+};
+static const unsigned int scifb1_clk_c_mux[] = {
+ SCIFB1_SCK_C_MARK,
+};
+static const unsigned int scifb1_data_d_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(7, 10), RCAR_GP_PIN(7, 12),
+};
+static const unsigned int scifb1_data_d_mux[] = {
+ SCIFB1_RXD_D_MARK, SCIFB1_TXD_D_MARK,
+};
+/* - SCIFB2 ----------------------------------------------------------------- */
+static const unsigned int scifb2_data_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 17),
+};
+static const unsigned int scifb2_data_mux[] = {
+ SCIFB2_RXD_MARK, SCIFB2_TXD_MARK,
+};
+static const unsigned int scifb2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int scifb2_clk_mux[] = {
+ SCIFB2_SCK_MARK,
+};
+static const unsigned int scifb2_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+};
+static const unsigned int scifb2_ctrl_mux[] = {
+ SCIFB2_RTS_N_MARK, SCIFB2_CTS_N_MARK,
+};
+static const unsigned int scifb2_data_b_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+};
+static const unsigned int scifb2_data_b_mux[] = {
+ SCIFB2_RXD_B_MARK, SCIFB2_TXD_B_MARK,
+};
+static const unsigned int scifb2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 31),
+};
+static const unsigned int scifb2_clk_b_mux[] = {
+ SCIFB2_SCK_B_MARK,
+};
+static const unsigned int scifb2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 14),
+};
+static const unsigned int scifb2_ctrl_b_mux[] = {
+ SCIFB2_RTS_N_B_MARK, SCIFB2_CTS_N_B_MARK,
+};
+static const unsigned int scifb2_data_c_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int scifb2_data_c_mux[] = {
+ SCIFB2_RXD_C_MARK, SCIFB2_TXD_C_MARK,
+};
+static const unsigned int scifb2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int scifb2_clk_c_mux[] = {
+ SCIFB2_SCK_C_MARK,
+};
+static const unsigned int scifb2_data_d_pins[] = {
+ /* RXD, TXD */
+ RCAR_GP_PIN(5, 26), RCAR_GP_PIN(5, 25),
+};
+static const unsigned int scifb2_data_d_mux[] = {
+ SCIFB2_RXD_D_MARK, SCIFB2_TXD_D_MARK,
+};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DATA0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3),
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DATA0_MARK, SD0_DATA1_MARK, SD0_DATA2_MARK, SD0_DATA3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(6, 6),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DATA0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 11),
+ RCAR_GP_PIN(6, 12), RCAR_GP_PIN(6, 13),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DATA0_MARK, SD1_DATA1_MARK, SD1_DATA2_MARK, SD1_DATA3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(6, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(6, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 18),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DATA0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(6, 18), RCAR_GP_PIN(6, 19),
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DATA0_MARK, SD2_DATA1_MARK, SD2_DATA2_MARK, SD2_DATA3_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 17),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(6, 22),
+};
+static const unsigned int sdhi2_cd_mux[] = {
+ SD2_CD_MARK,
+};
+static const unsigned int sdhi2_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int sdhi2_wp_mux[] = {
+ SD2_WP_MARK,
+};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pwen_pins[] = {
+ /* PWEN */
+ RCAR_GP_PIN(7, 23),
+};
+static const unsigned int usb0_pwen_mux[] = {
+ USB0_PWEN_MARK,
+};
+static const unsigned int usb0_ovc_pins[] = {
+ /* OVC */
+ RCAR_GP_PIN(7, 24),
+};
+static const unsigned int usb0_ovc_mux[] = {
+ USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pwen_pins[] = {
+ /* PWEN */
+ RCAR_GP_PIN(7, 25),
+};
+static const unsigned int usb1_pwen_mux[] = {
+ USB1_PWEN_MARK,
+};
+static const unsigned int usb1_ovc_pins[] = {
+ /* OVC */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int usb1_ovc_mux[] = {
+ USB1_OVC_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync_1),
+ SH_PFC_PIN_GROUP(du_cde_disp),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(eth_link),
+ SH_PFC_PIN_GROUP(eth_magic),
+ SH_PFC_PIN_GROUP(eth_mdio),
+ SH_PFC_PIN_GROUP(eth_rmii),
+ SH_PFC_PIN_GROUP(intc_irq0),
+ SH_PFC_PIN_GROUP(intc_irq1),
+ SH_PFC_PIN_GROUP(intc_irq2),
+ SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(mmc_data1),
+ SH_PFC_PIN_GROUP(mmc_data4),
+ SH_PFC_PIN_GROUP(mmc_data8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_rx),
+ SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_rx),
+ SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_rx),
+ SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_data_c),
+ SH_PFC_PIN_GROUP(scif0_data_d),
+ SH_PFC_PIN_GROUP(scif0_data_e),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_data_d),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif2_clk_b),
+ SH_PFC_PIN_GROUP(scif2_data_c),
+ SH_PFC_PIN_GROUP(scif2_data_e),
+ SH_PFC_PIN_GROUP(scif3_data),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_clk_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif3_data_d),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif5_data),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scifa0_data),
+ SH_PFC_PIN_GROUP(scifa0_data_b),
+ SH_PFC_PIN_GROUP(scifa1_data),
+ SH_PFC_PIN_GROUP(scifa1_clk),
+ SH_PFC_PIN_GROUP(scifa1_data_b),
+ SH_PFC_PIN_GROUP(scifa1_clk_b),
+ SH_PFC_PIN_GROUP(scifa1_data_c),
+ SH_PFC_PIN_GROUP(scifa2_data),
+ SH_PFC_PIN_GROUP(scifa2_clk),
+ SH_PFC_PIN_GROUP(scifa2_data_b),
+ SH_PFC_PIN_GROUP(scifa3_data),
+ SH_PFC_PIN_GROUP(scifa3_clk),
+ SH_PFC_PIN_GROUP(scifa3_data_b),
+ SH_PFC_PIN_GROUP(scifa3_clk_b),
+ SH_PFC_PIN_GROUP(scifa3_data_c),
+ SH_PFC_PIN_GROUP(scifa3_clk_c),
+ SH_PFC_PIN_GROUP(scifa4_data),
+ SH_PFC_PIN_GROUP(scifa4_data_b),
+ SH_PFC_PIN_GROUP(scifa4_data_c),
+ SH_PFC_PIN_GROUP(scifa5_data),
+ SH_PFC_PIN_GROUP(scifa5_data_b),
+ SH_PFC_PIN_GROUP(scifa5_data_c),
+ SH_PFC_PIN_GROUP(scifb0_data),
+ SH_PFC_PIN_GROUP(scifb0_clk),
+ SH_PFC_PIN_GROUP(scifb0_ctrl),
+ SH_PFC_PIN_GROUP(scifb0_data_b),
+ SH_PFC_PIN_GROUP(scifb0_clk_b),
+ SH_PFC_PIN_GROUP(scifb0_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb0_data_c),
+ SH_PFC_PIN_GROUP(scifb0_clk_c),
+ SH_PFC_PIN_GROUP(scifb0_data_d),
+ SH_PFC_PIN_GROUP(scifb0_clk_d),
+ SH_PFC_PIN_GROUP(scifb1_data),
+ SH_PFC_PIN_GROUP(scifb1_clk),
+ SH_PFC_PIN_GROUP(scifb1_ctrl),
+ SH_PFC_PIN_GROUP(scifb1_data_b),
+ SH_PFC_PIN_GROUP(scifb1_clk_b),
+ SH_PFC_PIN_GROUP(scifb1_data_c),
+ SH_PFC_PIN_GROUP(scifb1_clk_c),
+ SH_PFC_PIN_GROUP(scifb1_data_d),
+ SH_PFC_PIN_GROUP(scifb2_data),
+ SH_PFC_PIN_GROUP(scifb2_clk),
+ SH_PFC_PIN_GROUP(scifb2_ctrl),
+ SH_PFC_PIN_GROUP(scifb2_data_b),
+ SH_PFC_PIN_GROUP(scifb2_clk_b),
+ SH_PFC_PIN_GROUP(scifb2_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb2_data_c),
+ SH_PFC_PIN_GROUP(scifb2_clk_c),
+ SH_PFC_PIN_GROUP(scifb2_data_d),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd),
+ SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(usb0_pwen),
+ SH_PFC_PIN_GROUP(usb0_ovc),
+ SH_PFC_PIN_GROUP(usb1_pwen),
+ SH_PFC_PIN_GROUP(usb1_ovc),
+};
+
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync_1",
+ "du_cde_disp",
+};
+
+static const char * const du0_groups[] = {
+ "du0_clk_in",
+};
+
+static const char * const du1_groups[] = {
+ "du1_clk_in",
+};
+
+static const char * const eth_groups[] = {
+ "eth_link",
+ "eth_magic",
+ "eth_mdio",
+ "eth_rmii",
+};
+
+static const char * const intc_groups[] = {
+ "intc_irq0",
+ "intc_irq1",
+ "intc_irq2",
+ "intc_irq3",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_ctrl",
+ "msiof0_data",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_ctrl",
+ "msiof1_data",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_ctrl",
+ "msiof2_data",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_data_b",
+ "scif0_data_c",
+ "scif0_data_d",
+ "scif0_data_e",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_data_c",
+ "scif1_data_d",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data",
+ "scif2_data_b",
+ "scif2_clk_b",
+ "scif2_data_c",
+ "scif2_data_e",
+};
+static const char * const scif3_groups[] = {
+ "scif3_data",
+ "scif3_clk",
+ "scif3_data_b",
+ "scif3_clk_b",
+ "scif3_data_c",
+ "scif3_data_d",
+};
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_data_b",
+ "scif4_data_c",
+};
+static const char * const scif5_groups[] = {
+ "scif5_data",
+ "scif5_data_b",
+};
+static const char * const scifa0_groups[] = {
+ "scifa0_data",
+ "scifa0_data_b",
+};
+static const char * const scifa1_groups[] = {
+ "scifa1_data",
+ "scifa1_clk",
+ "scifa1_data_b",
+ "scifa1_clk_b",
+ "scifa1_data_c",
+};
+static const char * const scifa2_groups[] = {
+ "scifa2_data",
+ "scifa2_clk",
+ "scifa2_data_b",
+};
+static const char * const scifa3_groups[] = {
+ "scifa3_data",
+ "scifa3_clk",
+ "scifa3_data_b",
+ "scifa3_clk_b",
+ "scifa3_data_c",
+ "scifa3_clk_c",
+};
+static const char * const scifa4_groups[] = {
+ "scifa4_data",
+ "scifa4_data_b",
+ "scifa4_data_c",
+};
+static const char * const scifa5_groups[] = {
+ "scifa5_data",
+ "scifa5_data_b",
+ "scifa5_data_c",
+};
+static const char * const scifb0_groups[] = {
+ "scifb0_data",
+ "scifb0_clk",
+ "scifb0_ctrl",
+ "scifb0_data_b",
+ "scifb0_clk_b",
+ "scifb0_ctrl_b",
+ "scifb0_data_c",
+ "scifb0_clk_c",
+ "scifb0_data_d",
+ "scifb0_clk_d",
+};
+static const char * const scifb1_groups[] = {
+ "scifb1_data",
+ "scifb1_clk",
+ "scifb1_ctrl",
+ "scifb1_data_b",
+ "scifb1_clk_b",
+ "scifb1_data_c",
+ "scifb1_clk_c",
+ "scifb1_data_d",
+};
+static const char * const scifb2_groups[] = {
+ "scifb2_data",
+ "scifb2_clk",
+ "scifb2_ctrl",
+ "scifb2_data_b",
+ "scifb2_clk_b",
+ "scifb2_ctrl_b",
+ "scifb0_data_c",
+ "scifb2_clk_c",
+ "scifb2_data_d",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_ctrl",
+ "sdhi2_cd",
+ "sdhi2_wp",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0_pwen",
+ "usb0_ovc",
+};
+static const char * const usb1_groups[] = {
+ "usb1_pwen",
+ "usb1_ovc",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(eth),
+ SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scifa0),
+ SH_PFC_FUNCTION(scifa1),
+ SH_PFC_FUNCTION(scifa2),
+ SH_PFC_FUNCTION(scifa3),
+ SH_PFC_FUNCTION(scifa4),
+ SH_PFC_FUNCTION(scifa5),
+ SH_PFC_FUNCTION(scifb0),
+ SH_PFC_FUNCTION(scifb1),
+ SH_PFC_FUNCTION(scifb2),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ GP_0_31_FN, FN_IP1_22_20,
+ GP_0_30_FN, FN_IP1_19_17,
+ GP_0_29_FN, FN_IP1_16_14,
+ GP_0_28_FN, FN_IP1_13_11,
+ GP_0_27_FN, FN_IP1_10_8,
+ GP_0_26_FN, FN_IP1_7_6,
+ GP_0_25_FN, FN_IP1_5_4,
+ GP_0_24_FN, FN_IP1_3_2,
+ GP_0_23_FN, FN_IP1_1_0,
+ GP_0_22_FN, FN_IP0_30_29,
+ GP_0_21_FN, FN_IP0_28_27,
+ GP_0_20_FN, FN_IP0_26_25,
+ GP_0_19_FN, FN_IP0_24_23,
+ GP_0_18_FN, FN_IP0_22_21,
+ GP_0_17_FN, FN_IP0_20_19,
+ GP_0_16_FN, FN_IP0_18_16,
+ GP_0_15_FN, FN_IP0_15,
+ GP_0_14_FN, FN_IP0_14,
+ GP_0_13_FN, FN_IP0_13,
+ GP_0_12_FN, FN_IP0_12,
+ GP_0_11_FN, FN_IP0_11,
+ GP_0_10_FN, FN_IP0_10,
+ GP_0_9_FN, FN_IP0_9,
+ GP_0_8_FN, FN_IP0_8,
+ GP_0_7_FN, FN_IP0_7,
+ GP_0_6_FN, FN_IP0_6,
+ GP_0_5_FN, FN_IP0_5,
+ GP_0_4_FN, FN_IP0_4,
+ GP_0_3_FN, FN_IP0_3,
+ GP_0_2_FN, FN_IP0_2,
+ GP_0_1_FN, FN_IP0_1,
+ GP_0_0_FN, FN_IP0_0, }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_25_FN, FN_IP3_21_20,
+ GP_1_24_FN, FN_IP3_19_18,
+ GP_1_23_FN, FN_IP3_17_16,
+ GP_1_22_FN, FN_IP3_15_14,
+ GP_1_21_FN, FN_IP3_13_12,
+ GP_1_20_FN, FN_IP3_11_9,
+ GP_1_19_FN, FN_RD_N,
+ GP_1_18_FN, FN_IP3_8_6,
+ GP_1_17_FN, FN_IP3_5_3,
+ GP_1_16_FN, FN_IP3_2_0,
+ GP_1_15_FN, FN_IP2_29_27,
+ GP_1_14_FN, FN_IP2_26_25,
+ GP_1_13_FN, FN_IP2_24_23,
+ GP_1_12_FN, FN_EX_CS0_N,
+ GP_1_11_FN, FN_IP2_22_21,
+ GP_1_10_FN, FN_IP2_20_19,
+ GP_1_9_FN, FN_IP2_18_16,
+ GP_1_8_FN, FN_IP2_15_13,
+ GP_1_7_FN, FN_IP2_12_10,
+ GP_1_6_FN, FN_IP2_9_7,
+ GP_1_5_FN, FN_IP2_6_5,
+ GP_1_4_FN, FN_IP2_4_3,
+ GP_1_3_FN, FN_IP2_2_0,
+ GP_1_2_FN, FN_IP1_31_29,
+ GP_1_1_FN, FN_IP1_28_26,
+ GP_1_0_FN, FN_IP1_25_23, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ GP_2_31_FN, FN_IP6_7_6,
+ GP_2_30_FN, FN_IP6_5_3,
+ GP_2_29_FN, FN_IP6_2_0,
+ GP_2_28_FN, FN_AUDIO_CLKA,
+ GP_2_27_FN, FN_IP5_31_29,
+ GP_2_26_FN, FN_IP5_28_26,
+ GP_2_25_FN, FN_IP5_25_24,
+ GP_2_24_FN, FN_IP5_23_22,
+ GP_2_23_FN, FN_IP5_21_20,
+ GP_2_22_FN, FN_IP5_19_17,
+ GP_2_21_FN, FN_IP5_16_15,
+ GP_2_20_FN, FN_IP5_14_12,
+ GP_2_19_FN, FN_IP5_11_9,
+ GP_2_18_FN, FN_IP5_8_6,
+ GP_2_17_FN, FN_IP5_5_3,
+ GP_2_16_FN, FN_IP5_2_0,
+ GP_2_15_FN, FN_IP4_30_28,
+ GP_2_14_FN, FN_IP4_27_26,
+ GP_2_13_FN, FN_IP4_25_24,
+ GP_2_12_FN, FN_IP4_23_22,
+ GP_2_11_FN, FN_IP4_21,
+ GP_2_10_FN, FN_IP4_20,
+ GP_2_9_FN, FN_IP4_19,
+ GP_2_8_FN, FN_IP4_18_16,
+ GP_2_7_FN, FN_IP4_15_13,
+ GP_2_6_FN, FN_IP4_12_10,
+ GP_2_5_FN, FN_IP4_9_8,
+ GP_2_4_FN, FN_IP4_7_5,
+ GP_2_3_FN, FN_IP4_4_2,
+ GP_2_2_FN, FN_IP4_1_0,
+ GP_2_1_FN, FN_IP3_30_28,
+ GP_2_0_FN, FN_IP3_27_25 }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ GP_3_31_FN, FN_IP9_18_17,
+ GP_3_30_FN, FN_IP9_16,
+ GP_3_29_FN, FN_IP9_15_13,
+ GP_3_28_FN, FN_IP9_12,
+ GP_3_27_FN, FN_IP9_11,
+ GP_3_26_FN, FN_IP9_10_8,
+ GP_3_25_FN, FN_IP9_7,
+ GP_3_24_FN, FN_IP9_6,
+ GP_3_23_FN, FN_IP9_5_3,
+ GP_3_22_FN, FN_IP9_2_0,
+ GP_3_21_FN, FN_IP8_30_28,
+ GP_3_20_FN, FN_IP8_27_26,
+ GP_3_19_FN, FN_IP8_25_24,
+ GP_3_18_FN, FN_IP8_23_21,
+ GP_3_17_FN, FN_IP8_20_18,
+ GP_3_16_FN, FN_IP8_17_15,
+ GP_3_15_FN, FN_IP8_14_12,
+ GP_3_14_FN, FN_IP8_11_9,
+ GP_3_13_FN, FN_IP8_8_6,
+ GP_3_12_FN, FN_IP8_5_3,
+ GP_3_11_FN, FN_IP8_2_0,
+ GP_3_10_FN, FN_IP7_29_27,
+ GP_3_9_FN, FN_IP7_26_24,
+ GP_3_8_FN, FN_IP7_23_21,
+ GP_3_7_FN, FN_IP7_20_19,
+ GP_3_6_FN, FN_IP7_18_17,
+ GP_3_5_FN, FN_IP7_16_15,
+ GP_3_4_FN, FN_IP7_14_13,
+ GP_3_3_FN, FN_IP7_12_11,
+ GP_3_2_FN, FN_IP7_10_9,
+ GP_3_1_FN, FN_IP7_8_6,
+ GP_3_0_FN, FN_IP7_5_3 }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ GP_4_31_FN, FN_IP15_5_4,
+ GP_4_30_FN, FN_IP15_3_2,
+ GP_4_29_FN, FN_IP15_1_0,
+ GP_4_28_FN, FN_IP11_8_6,
+ GP_4_27_FN, FN_IP11_5_3,
+ GP_4_26_FN, FN_IP11_2_0,
+ GP_4_25_FN, FN_IP10_31_29,
+ GP_4_24_FN, FN_IP10_28_27,
+ GP_4_23_FN, FN_IP10_26_25,
+ GP_4_22_FN, FN_IP10_24_22,
+ GP_4_21_FN, FN_IP10_21_19,
+ GP_4_20_FN, FN_IP10_18_17,
+ GP_4_19_FN, FN_IP10_16_15,
+ GP_4_18_FN, FN_IP10_14_12,
+ GP_4_17_FN, FN_IP10_11_9,
+ GP_4_16_FN, FN_IP10_8_6,
+ GP_4_15_FN, FN_IP10_5_3,
+ GP_4_14_FN, FN_IP10_2_0,
+ GP_4_13_FN, FN_IP9_31_29,
+ GP_4_12_FN, FN_VI0_DATA7_VI0_B7,
+ GP_4_11_FN, FN_VI0_DATA6_VI0_B6,
+ GP_4_10_FN, FN_VI0_DATA5_VI0_B5,
+ GP_4_9_FN, FN_VI0_DATA4_VI0_B4,
+ GP_4_8_FN, FN_IP9_28_27,
+ GP_4_7_FN, FN_VI0_DATA2_VI0_B2,
+ GP_4_6_FN, FN_VI0_DATA1_VI0_B1,
+ GP_4_5_FN, FN_VI0_DATA0_VI0_B0,
+ GP_4_4_FN, FN_IP9_26_25,
+ GP_4_3_FN, FN_IP9_24_23,
+ GP_4_2_FN, FN_IP9_22_21,
+ GP_4_1_FN, FN_IP9_20_19,
+ GP_4_0_FN, FN_VI0_CLK }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ GP_5_31_FN, FN_IP3_24_22,
+ GP_5_30_FN, FN_IP13_9_7,
+ GP_5_29_FN, FN_IP13_6_5,
+ GP_5_28_FN, FN_IP13_4_3,
+ GP_5_27_FN, FN_IP13_2_0,
+ GP_5_26_FN, FN_IP12_29_27,
+ GP_5_25_FN, FN_IP12_26_24,
+ GP_5_24_FN, FN_IP12_23_22,
+ GP_5_23_FN, FN_IP12_21_20,
+ GP_5_22_FN, FN_IP12_19_18,
+ GP_5_21_FN, FN_IP12_17_16,
+ GP_5_20_FN, FN_IP12_15_13,
+ GP_5_19_FN, FN_IP12_12_10,
+ GP_5_18_FN, FN_IP12_9_7,
+ GP_5_17_FN, FN_IP12_6_4,
+ GP_5_16_FN, FN_IP12_3_2,
+ GP_5_15_FN, FN_IP12_1_0,
+ GP_5_14_FN, FN_IP11_31_30,
+ GP_5_13_FN, FN_IP11_29_28,
+ GP_5_12_FN, FN_IP11_27,
+ GP_5_11_FN, FN_IP11_26,
+ GP_5_10_FN, FN_IP11_25,
+ GP_5_9_FN, FN_IP11_24,
+ GP_5_8_FN, FN_IP11_23,
+ GP_5_7_FN, FN_IP11_22,
+ GP_5_6_FN, FN_IP11_21,
+ GP_5_5_FN, FN_IP11_20,
+ GP_5_4_FN, FN_IP11_19,
+ GP_5_3_FN, FN_IP11_18_17,
+ GP_5_2_FN, FN_IP11_16_15,
+ GP_5_1_FN, FN_IP11_14_12,
+ GP_5_0_FN, FN_IP11_11_9 }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1) {
+ GP_6_31_FN, FN_DU0_DOTCLKIN,
+ GP_6_30_FN, FN_USB1_OVC,
+ GP_6_29_FN, FN_IP14_31_29,
+ GP_6_28_FN, FN_IP14_28_26,
+ GP_6_27_FN, FN_IP14_25_23,
+ GP_6_26_FN, FN_IP14_22_20,
+ GP_6_25_FN, FN_IP14_19_17,
+ GP_6_24_FN, FN_IP14_16_14,
+ GP_6_23_FN, FN_IP14_13_11,
+ GP_6_22_FN, FN_IP14_10_8,
+ GP_6_21_FN, FN_IP14_7,
+ GP_6_20_FN, FN_IP14_6,
+ GP_6_19_FN, FN_IP14_5,
+ GP_6_18_FN, FN_IP14_4,
+ GP_6_17_FN, FN_IP14_3,
+ GP_6_16_FN, FN_IP14_2,
+ GP_6_15_FN, FN_IP14_1_0,
+ GP_6_14_FN, FN_IP13_30_28,
+ GP_6_13_FN, FN_IP13_27,
+ GP_6_12_FN, FN_IP13_26,
+ GP_6_11_FN, FN_IP13_25,
+ GP_6_10_FN, FN_IP13_24_23,
+ GP_6_9_FN, FN_IP13_22,
+ 0, 0,
+ GP_6_7_FN, FN_IP13_21_19,
+ GP_6_6_FN, FN_IP13_18_16,
+ GP_6_5_FN, FN_IP13_15,
+ GP_6_4_FN, FN_IP13_14,
+ GP_6_3_FN, FN_IP13_13,
+ GP_6_2_FN, FN_IP13_12,
+ GP_6_1_FN, FN_IP13_11,
+ GP_6_0_FN, FN_IP13_10 }
+ },
+ { PINMUX_CFG_REG("GPSR7", 0xE6060074, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_7_25_FN, FN_USB1_PWEN,
+ GP_7_24_FN, FN_USB0_OVC,
+ GP_7_23_FN, FN_USB0_PWEN,
+ GP_7_22_FN, FN_IP15_14_12,
+ GP_7_21_FN, FN_IP15_11_9,
+ GP_7_20_FN, FN_IP15_8_6,
+ GP_7_19_FN, FN_IP7_2_0,
+ GP_7_18_FN, FN_IP6_29_27,
+ GP_7_17_FN, FN_IP6_26_24,
+ GP_7_16_FN, FN_IP6_23_21,
+ GP_7_15_FN, FN_IP6_20_19,
+ GP_7_14_FN, FN_IP6_18_16,
+ GP_7_13_FN, FN_IP6_15_14,
+ GP_7_12_FN, FN_IP6_13_12,
+ GP_7_11_FN, FN_IP6_11_10,
+ GP_7_10_FN, FN_IP6_9_8,
+ GP_7_9_FN, FN_IP16_11_10,
+ GP_7_8_FN, FN_IP16_9_8,
+ GP_7_7_FN, FN_IP16_7_6,
+ GP_7_6_FN, FN_IP16_5_3,
+ GP_7_5_FN, FN_IP16_2_0,
+ GP_7_4_FN, FN_IP15_29_27,
+ GP_7_3_FN, FN_IP15_26_24,
+ GP_7_2_FN, FN_IP15_23_21,
+ GP_7_1_FN, FN_IP15_20_18,
+ GP_7_0_FN, FN_IP15_17_15 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
+ 1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP0_31 [1] */
+ 0, 0,
+ /* IP0_30_29 [2] */
+ FN_A6, FN_MSIOF1_SCK,
+ 0, 0,
+ /* IP0_28_27 [2] */
+ FN_A5, FN_MSIOF0_RXD_B,
+ 0, 0,
+ /* IP0_26_25 [2] */
+ FN_A4, FN_MSIOF0_TXD_B,
+ 0, 0,
+ /* IP0_24_23 [2] */
+ FN_A3, FN_MSIOF0_SS2_B,
+ 0, 0,
+ /* IP0_22_21 [2] */
+ FN_A2, FN_MSIOF0_SS1_B,
+ 0, 0,
+ /* IP0_20_19 [2] */
+ FN_A1, FN_MSIOF0_SYNC_B,
+ 0, 0,
+ /* IP0_18_16 [3] */
+ FN_A0, FN_ATAWR0_N_C, FN_MSIOF0_SCK_B, FN_SCL0_C, FN_PWM2_B,
+ 0, 0, 0,
+ /* IP0_15 [1] */
+ FN_D15, 0,
+ /* IP0_14 [1] */
+ FN_D14, 0,
+ /* IP0_13 [1] */
+ FN_D13, 0,
+ /* IP0_12 [1] */
+ FN_D12, 0,
+ /* IP0_11 [1] */
+ FN_D11, 0,
+ /* IP0_10 [1] */
+ FN_D10, 0,
+ /* IP0_9 [1] */
+ FN_D9, 0,
+ /* IP0_8 [1] */
+ FN_D8, 0,
+ /* IP0_7 [1] */
+ FN_D7, 0,
+ /* IP0_6 [1] */
+ FN_D6, 0,
+ /* IP0_5 [1] */
+ FN_D5, 0,
+ /* IP0_4 [1] */
+ FN_D4, 0,
+ /* IP0_3 [1] */
+ FN_D3, 0,
+ /* IP0_2 [1] */
+ FN_D2, 0,
+ /* IP0_1 [1] */
+ FN_D1, 0,
+ /* IP0_0 [1] */
+ FN_D0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32,
+ 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2) {
+ /* IP1_31_29 [3] */
+ FN_A18, FN_DREQ1, FN_SCIFA1_RXD_C, 0, FN_SCIFB1_RXD_C,
+ 0, 0, 0,
+ /* IP1_28_26 [3] */
+ FN_A17, FN_DACK2_B, 0, FN_SDA0_C,
+ 0, 0, 0, 0,
+ /* IP1_25_23 [3] */
+ FN_A16, FN_DREQ2_B, FN_FMCLK_C, 0, FN_SCIFA1_SCK_B,
+ 0, 0, 0,
+ /* IP1_22_20 [3] */
+ FN_A15, FN_BPFCLK_C,
+ 0, 0, 0, 0, 0, 0,
+ /* IP1_19_17 [3] */
+ FN_A14, FN_ATADIR0_N_C, FN_FMIN, FN_FMIN_C, FN_MSIOF1_SYNC_D,
+ 0, 0, 0,
+ /* IP1_16_14 [3] */
+ FN_A13, FN_ATAG0_N_C, FN_BPFCLK, FN_MSIOF1_SS1_D,
+ 0, 0, 0, 0,
+ /* IP1_13_11 [3] */
+ FN_A12, FN_FMCLK, FN_SDA3_D, FN_MSIOF1_SCK_D,
+ 0, 0, 0, 0,
+ /* IP1_10_8 [3] */
+ FN_A11, FN_MSIOF1_RXD, FN_SCL3_D, FN_MSIOF1_RXD_D,
+ 0, 0, 0, 0,
+ /* IP1_7_6 [2] */
+ FN_A10, FN_MSIOF1_TXD, 0, FN_MSIOF1_TXD_D,
+ /* IP1_5_4 [2] */
+ FN_A9, FN_MSIOF1_SS2, FN_SDA0, 0,
+ /* IP1_3_2 [2] */
+ FN_A8, FN_MSIOF1_SS1, FN_SCL0, 0,
+ /* IP1_1_0 [2] */
+ FN_A7, FN_MSIOF1_SYNC,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
+ 2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3) {
+ /* IP2_31_20 [2] */
+ 0, 0, 0, 0,
+ /* IP2_29_27 [3] */
+ FN_EX_CS3_N, FN_ATADIR0_N, FN_MSIOF2_TXD,
+ FN_ATAG0_N, 0, FN_EX_WAIT1,
+ 0, 0,
+ /* IP2_26_25 [2] */
+ FN_EX_CS2_N, FN_ATAWR0_N, FN_MSIOF2_SYNC, 0,
+ /* IP2_24_23 [2] */
+ FN_EX_CS1_N, FN_MSIOF2_SCK, 0, 0,
+ /* IP2_22_21 [2] */
+ FN_CS1_N_A26, FN_ATADIR0_N_B, FN_SDA1, 0,
+ /* IP2_20_19 [2] */
+ FN_CS0_N, FN_ATAG0_N_B, FN_SCL1, 0,
+ /* IP2_18_16 [3] */
+ FN_A25, FN_DACK2, FN_SSL, FN_DREQ1_C, FN_RX1, FN_SCIFA1_RXD,
+ 0, 0,
+ /* IP2_15_13 [3] */
+ FN_A24, FN_DREQ2, FN_IO3, FN_TX1, FN_SCIFA1_TXD,
+ 0, 0, 0,
+ /* IP2_12_0 [3] */
+ FN_A23, FN_IO2, FN_BPFCLK_B, FN_RX0, FN_SCIFA0_RXD,
+ 0, 0, 0,
+ /* IP2_9_7 [3] */
+ FN_A22, FN_MISO_IO1, FN_FMCLK_B, FN_TX0, FN_SCIFA0_TXD,
+ 0, 0, 0,
+ /* IP2_6_5 [2] */
+ FN_A21, FN_ATAWR0_N_B, FN_MOSI_IO0, 0,
+ /* IP2_4_3 [2] */
+ FN_A20, FN_SPCLK, 0, 0,
+ /* IP2_2_0 [3] */
+ FN_A19, FN_DACK1, FN_SCIFA1_TXD_C, 0,
+ FN_SCIFB1_TXD_C, 0, FN_SCIFB1_SCK_B, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xE606002C, 32,
+ 1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3) {
+ /* IP3_31 [1] */
+ 0, 0,
+ /* IP3_30_28 [3] */
+ FN_SSI_WS0129, FN_HTX0_C, FN_HTX2_C,
+ FN_SCIFB0_TXD_C, FN_SCIFB2_TXD_C,
+ 0, 0, 0,
+ /* IP3_27_25 [3] */
+ FN_SSI_SCK0129, FN_HRX0_C, FN_HRX2_C,
+ FN_SCIFB0_RXD_C, FN_SCIFB2_RXD_C,
+ 0, 0, 0,
+ /* IP3_24_22 [3] */
+ FN_SPEEDIN, 0, FN_HSCK0_C, FN_HSCK2_C, FN_SCIFB0_SCK_B,
+ FN_SCIFB2_SCK_B, FN_DREQ2_C, FN_HTX2_D,
+ /* IP3_21_20 [2] */
+ FN_DACK0, FN_DRACK0, FN_REMOCON, 0,
+ /* IP3_19_18 [2] */
+ FN_DREQ0, FN_PWM3, FN_TPU_TO3, 0,
+ /* IP3_17_16 [2] */
+ FN_EX_WAIT0, FN_HRTS2_N_B, FN_SCIFB0_CTS_N_B, 0,
+ /* IP3_15_14 [2] */
+ FN_WE1_N, FN_ATARD0_N_B, FN_HTX2_B, FN_SCIFB0_RTS_N_B,
+ /* IP3_13_12 [2] */
+ FN_WE0_N, FN_HCTS2_N_B, FN_SCIFB0_TXD_B, 0,
+ /* IP3_11_9 [3] */
+ FN_RD_WR_N, FN_HRX2_B, FN_FMIN_B, FN_SCIFB0_RXD_B, FN_DREQ1_D,
+ 0, 0, 0,
+ /* IP3_8_6 [3] */
+ FN_BS_N, FN_ATACS10_N, FN_MSIOF2_SS2, FN_HTX1_B,
+ FN_SCIFB1_TXD_B, FN_PWM2, FN_TPU_TO2, 0,
+ /* IP3_5_3 [3] */
+ FN_EX_CS5_N, FN_ATACS00_N, FN_MSIOF2_SS1, FN_HRX1_B,
+ FN_SCIFB1_RXD_B, FN_PWM1, FN_TPU_TO1, 0,
+ /* IP3_2_0 [3] */
+ FN_EX_CS4_N, FN_ATARD0_N, FN_MSIOF2_RXD, 0, FN_EX_WAIT2,
+ 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
+ 1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2, 3, 3, 2) {
+ /* IP4_31 [1] */
+ 0, 0,
+ /* IP4_30_28 [3] */
+ FN_SSI_SCK5, FN_MSIOF1_SCK_C, FN_TS_SDATA0, FN_GLO_I0,
+ FN_MSIOF2_SYNC_D, FN_VI1_R2_B,
+ 0, 0,
+ /* IP4_27_26 [2] */
+ FN_SSI_SDATA4, FN_MSIOF2_SCK_D, 0, 0,
+ /* IP4_25_24 [2] */
+ FN_SSI_WS4, FN_GLO_RFON_D, 0, 0,
+ /* IP4_23_22 [2] */
+ FN_SSI_SCK4, FN_GLO_SS_D, 0, 0,
+ /* IP4_21 [1] */
+ FN_SSI_SDATA3, 0,
+ /* IP4_20 [1] */
+ FN_SSI_WS34, 0,
+ /* IP4_19 [1] */
+ FN_SSI_SCK34, 0,
+ /* IP4_18_16 [3] */
+ FN_SSI_SDATA2, FN_GPS_MAG_B, FN_TX2_E, FN_HRTS1_N_E,
+ 0, 0, 0, 0,
+ /* IP4_15_13 [3] */
+ FN_SSI_WS2, FN_SDA2, FN_GPS_SIGN_B, FN_RX2_E,
+ FN_GLO_Q1_D, FN_HCTS1_N_E,
+ 0, 0,
+ /* IP4_12_10 [3] */
+ FN_SSI_SCK2, FN_SCL2, FN_GPS_CLK_B, FN_GLO_Q0_D, FN_HSCK1_E,
+ 0, 0, 0,
+ /* IP4_9_8 [2] */
+ FN_SSI_SDATA1, FN_SDA1_B, FN_SDA8_B, FN_MSIOF2_RXD_C,
+ /* IP4_7_5 [3] */
+ FN_SSI_WS1, FN_SCL1_B, FN_SCL8_B, FN_MSIOF2_TXD_C, FN_GLO_I1_D,
+ 0, 0, 0,
+ /* IP4_4_2 [3] */
+ FN_SSI_SCK1, FN_SDA0_B, FN_SDA7_B,
+ FN_MSIOF2_SYNC_C, FN_GLO_I0_D,
+ 0, 0, 0,
+ /* IP4_1_0 [2] */
+ FN_SSI_SDATA0, FN_SCL0_B, FN_SCL7_B, FN_MSIOF2_SCK_C, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
+ 3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 3, 3) {
+ /* IP5_31_29 [3] */
+ FN_SSI_SDATA9, FN_RX3_D, FN_CAN0_RX_D,
+ 0, 0, 0, 0, 0,
+ /* IP5_28_26 [3] */
+ FN_SSI_WS9, FN_TX3_D, FN_CAN0_TX_D, FN_GLO_SDATA_D,
+ 0, 0, 0, 0,
+ /* IP5_25_24 [2] */
+ FN_SSI_SCK9, FN_RX1_D, FN_GLO_SCLK_D, 0,
+ /* IP5_23_22 [2] */
+ FN_SSI_SDATA8, FN_TX1_D, FN_STP_ISSYNC_0_B, 0,
+ /* IP5_21_20 [2] */
+ FN_SSI_SDATA7, FN_RX0_D, FN_STP_ISEN_0_B, 0,
+ /* IP5_19_17 [3] */
+ FN_SSI_WS78, FN_TX0_D, FN_STP_ISD_0_B, FN_GLO_RFON,
+ 0, 0, 0, 0,
+ /* IP5_16_15 [2] */
+ FN_SSI_SCK78, FN_STP_ISCLK_0_B, FN_GLO_SS, 0,
+ /* IP5_14_12 [3] */
+ FN_SSI_SDATA6, FN_STP_IVCXO27_0_B, FN_GLO_SDATA, FN_VI1_R7_B,
+ 0, 0, 0, 0,
+ /* IP5_11_9 [3] */
+ FN_SSI_WS6, FN_GLO_SCLK, FN_MSIOF2_SS2_D, FN_VI1_R6_B,
+ 0, 0, 0, 0,
+ /* IP5_8_6 [3] */
+ FN_SSI_SCK6, FN_MSIOF1_RXD_C, FN_TS_SPSYNC0, FN_GLO_Q1,
+ FN_MSIOF2_RXD_D, FN_VI1_R5_B,
+ 0, 0,
+ /* IP5_5_3 [3] */
+ FN_SSI_SDATA5, FN_MSIOF1_TXD_C, FN_TS_SDEN0, FN_GLO_Q0,
+ FN_MSIOF2_SS1_D, FN_VI1_R4_B,
+ 0, 0,
+ /* IP5_2_0 [3] */
+ FN_SSI_WS5, FN_MSIOF1_SYNC_C, FN_TS_SCK0, FN_GLO_I1,
+ FN_MSIOF2_TXD_D, FN_VI1_R3_B,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
+ 2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3) {
+ /* IP6_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP6_29_27 [3] */
+ FN_IRQ8, FN_HRTS1_N_C, FN_MSIOF1_RXD_B,
+ FN_GPS_SIGN_C, FN_GPS_SIGN_D,
+ 0, 0, 0,
+ /* IP6_26_24 [3] */
+ FN_IRQ7, FN_HCTS1_N_C, FN_MSIOF1_TXD_B,
+ FN_GPS_CLK_C, FN_GPS_CLK_D,
+ 0, 0, 0,
+ /* IP6_23_21 [3] */
+ FN_IRQ6, FN_HSCK1_C, FN_MSIOF1_SS2_B,
+ FN_SDA1_E, FN_MSIOF2_SYNC_E,
+ 0, 0, 0,
+ /* IP6_20_19 [2] */
+ FN_IRQ5, FN_HTX1_C, FN_SCL1_E, FN_MSIOF2_SCK_E,
+ /* IP6_18_16 [3] */
+ FN_IRQ4, FN_HRX1_C, FN_SDA4_C, FN_MSIOF2_RXD_E, FN_INTC_IRQ4_N,
+ 0, 0, 0,
+ /* IP6_15_14 [2] */
+ FN_IRQ3, FN_SCL4_C, FN_MSIOF2_TXD_E, FN_INTC_IRQ3_N,
+ /* IP6_13_12 [2] */
+ FN_IRQ2, FN_SCIFB1_TXD_D, FN_INTC_IRQ2_N, 0,
+ /* IP6_11_10 [2] */
+ FN_IRQ1, FN_SCIFB1_SCK_C, FN_INTC_IRQ1_N, 0,
+ /* IP6_9_8 [2] */
+ FN_IRQ0, FN_SCIFB1_RXD_D, FN_INTC_IRQ0_N, 0,
+ /* IP6_7_6 [2] */
+ FN_AUDIO_CLKOUT, FN_MSIOF1_SS1_B, FN_TX2, FN_SCIFA2_TXD,
+ /* IP6_5_3 [3] */
+ FN_AUDIO_CLKC, FN_SCIFB0_SCK_C, FN_MSIOF1_SYNC_B, FN_RX2,
+ FN_SCIFA2_RXD, FN_FMIN_E,
+ 0, 0,
+ /* IP6_2_0 [3] */
+ FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
+ FN_SCIF_CLK, 0, FN_BPFCLK_E,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
+ 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3) {
+ /* IP7_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP7_29_27 [3] */
+ FN_DU1_DG2, FN_LCDOUT10, FN_VI1_DATA4_B, FN_SCIF1_SCK_B,
+ FN_SCIFA1_SCK, FN_SSI_SCK78_B,
+ 0, 0,
+ /* IP7_26_24 [3] */
+ FN_DU1_DG1, FN_LCDOUT9, FN_VI1_DATA3_B, FN_RX1_B,
+ FN_SCIFA1_RXD_B, FN_MSIOF2_SS2_B,
+ 0, 0,
+ /* IP7_23_21 [3] */
+ FN_DU1_DG0, FN_LCDOUT8, FN_VI1_DATA2_B, FN_TX1_B,
+ FN_SCIFA1_TXD_B, FN_MSIOF2_SS1_B,
+ 0, 0,
+ /* IP7_20_19 [2] */
+ FN_DU1_DR7, FN_LCDOUT7, FN_SSI_SDATA1_B, 0,
+ /* IP7_18_17 [2] */
+ FN_DU1_DR6, FN_LCDOUT6, FN_SSI_WS1_B, 0,
+ /* IP7_16_15 [2] */
+ FN_DU1_DR5, FN_LCDOUT5, FN_SSI_SCK1_B, 0,
+ /* IP7_14_13 [2] */
+ FN_DU1_DR4, FN_LCDOUT4, FN_SSI_SDATA0_B, 0,
+ /* IP7_12_11 [2] */
+ FN_DU1_DR3, FN_LCDOUT3, FN_SSI_WS0129_B, 0,
+ /* IP7_10_9 [2] */
+ FN_DU1_DR2, FN_LCDOUT2, FN_SSI_SCK0129_B, 0,
+ /* IP7_8_6 [3] */
+ FN_DU1_DR1, FN_LCDOUT1, FN_VI1_DATA1_B, FN_RX0_B,
+ FN_SCIFA0_RXD_B, FN_MSIOF2_SYNC_B,
+ 0, 0,
+ /* IP7_5_3 [3] */
+ FN_DU1_DR0, FN_LCDOUT0, FN_VI1_DATA0_B, FN_TX0_B,
+ FN_SCIFA0_TXD_B, FN_MSIOF2_SCK_B,
+ 0, 0,
+ /* IP7_2_0 [3] */
+ FN_IRQ9, FN_DU1_DOTCLKIN_B, FN_CAN_CLK_D, FN_GPS_MAG_C,
+ FN_SCIF_CLK_B, FN_GPS_MAG_D,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
+ 1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP8_31 [1] */
+ 0, 0,
+ /* IP8_30_28 [3] */
+ FN_DU1_DB5, FN_LCDOUT21, FN_TX3, FN_SCIFA3_TXD, FN_CAN1_TX,
+ 0, 0, 0,
+ /* IP8_27_26 [2] */
+ FN_DU1_DB4, FN_LCDOUT20, FN_VI1_FIELD_B, FN_CAN1_RX,
+ /* IP8_25_24 [2] */
+ FN_DU1_DB3, FN_LCDOUT19, FN_VI1_CLKENB_B, 0,
+ /* IP8_23_21 [3] */
+ FN_DU1_DB2, FN_LCDOUT18, FN_VI1_VSYNC_N_B, FN_SCIF2_SCK_B,
+ FN_SCIFA2_SCK, FN_SSI_SDATA9_B,
+ 0, 0,
+ /* IP8_20_18 [3] */
+ FN_DU1_DB1, FN_LCDOUT17, FN_VI1_HSYNC_N_B, FN_RX2_B,
+ FN_SCIFA2_RXD_B, FN_MSIOF2_RXD_B,
+ 0, 0,
+ /* IP8_17_15 [3] */
+ FN_DU1_DB0, FN_LCDOUT16, FN_VI1_CLK_B, FN_TX2_B,
+ FN_SCIFA2_TXD_B, FN_MSIOF2_TXD_B,
+ 0, 0,
+ /* IP8_14_12 [3] */
+ FN_DU1_DG7, FN_LCDOUT15, FN_HTX0_B,
+ FN_SCIFB2_RTS_N_B, FN_SSI_WS9_B,
+ 0, 0, 0,
+ /* IP8_11_9 [3] */
+ FN_DU1_DG6, FN_LCDOUT14, FN_HRTS0_N_B,
+ FN_SCIFB2_CTS_N_B, FN_SSI_SCK9_B,
+ 0, 0, 0,
+ /* IP8_8_6 [3] */
+ FN_DU1_DG5, FN_LCDOUT13, FN_VI1_DATA7_B, FN_HCTS0_N_B,
+ FN_SCIFB2_TXD_B, FN_SSI_SDATA8_B,
+ 0, 0,
+ /* IP8_5_3 [3] */
+ FN_DU1_DG4, FN_LCDOUT12, FN_VI1_DATA6_B, FN_HRX0_B,
+ FN_SCIFB2_RXD_B, FN_SSI_SDATA7_B,
+ 0, 0,
+ /* IP8_2_0 [3] */
+ FN_DU1_DG3, FN_LCDOUT11, FN_VI1_DATA5_B, 0, FN_SSI_WS78_B,
+ 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
+ 3, 2, 2, 2, 2, 2, 2, 1, 3, 1, 1, 3, 1, 1, 3, 3) {
+ /* IP9_31_29 [3] */
+ FN_VI0_G0, FN_SCL8, FN_STP_IVCXO27_0_C, FN_SCL4,
+ FN_HCTS2_N, FN_SCIFB2_CTS_N, FN_ATAWR1_N, 0,
+ /* IP9_28_27 [2] */
+ FN_VI0_DATA3_VI0_B3, FN_SCIF3_SCK_B, FN_SCIFA3_SCK_B, 0,
+ /* IP9_26_25 [2] */
+ FN_VI0_VSYNC_N, FN_RX5, FN_SCIFA5_RXD, FN_TS_SPSYNC0_D,
+ /* IP9_24_23 [2] */
+ FN_VI0_HSYNC_N, FN_TX5, FN_SCIFA5_TXD, FN_TS_SDEN0_D,
+ /* IP9_22_21 [2] */
+ FN_VI0_FIELD, FN_RX4, FN_SCIFA4_RXD, FN_TS_SCK0_D,
+ /* IP9_20_19 [2] */
+ FN_VI0_CLKENB, FN_TX4, FN_SCIFA4_TXD, FN_TS_SDATA0_D,
+ /* IP9_18_17 [2] */
+ FN_DU1_CDE, FN_QPOLB, FN_PWM4_B, 0,
+ /* IP9_16 [1] */
+ FN_DU1_DISP, FN_QPOLA,
+ /* IP9_15_13 [3] */
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_QCPV_QDE,
+ FN_CAN0_RX, FN_RX3_B, FN_SDA2_B,
+ 0, 0, 0,
+ /* IP9_12 [1] */
+ FN_DU1_EXVSYNC_DU1_VSYNC, FN_QSTB_QHE,
+ /* IP9_11 [1] */
+ FN_DU1_EXHSYNC_DU1_HSYNC, FN_QSTH_QHS,
+ /* IP9_10_8 [3] */
+ FN_DU1_DOTCLKOUT1, FN_QSTVB_QVE, FN_CAN0_TX,
+ FN_TX3_B, FN_SCL2_B, FN_PWM4,
+ 0, 0,
+ /* IP9_7 [1] */
+ FN_DU1_DOTCLKOUT0, FN_QCLK,
+ /* IP9_6 [1] */
+ FN_DU1_DOTCLKIN, FN_QSTVA_QVS,
+ /* IP9_5_3 [3] */
+ FN_DU1_DB7, FN_LCDOUT23, FN_SDA3_C,
+ FN_SCIF3_SCK, FN_SCIFA3_SCK,
+ 0, 0, 0,
+ /* IP9_2_0 [3] */
+ FN_DU1_DB6, FN_LCDOUT22, FN_SCL3_C, FN_RX3, FN_SCIFA3_RXD,
+ 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
+ 3, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3) {
+ /* IP10_31_29 [3] */
+ FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_SCL1_D,
+ 0, 0, 0,
+ /* IP10_28_27 [2] */
+ FN_VI0_R3, FN_VI2_DATA4, FN_GLO_Q1_B, FN_TS_SPSYNC0_C,
+ /* IP10_26_25 [2] */
+ FN_VI0_R2, FN_VI2_DATA3, FN_GLO_Q0_B, FN_TS_SDEN0_C,
+ /* IP10_24_22 [3] */
+ FN_VI0_R1, FN_VI2_DATA2, FN_GLO_I1_B, FN_TS_SCK0_C, FN_ATAG1_N,
+ 0, 0, 0,
+ /* IP10_21_29 [3] */
+ FN_VI0_R0, FN_VI2_DATA1, FN_GLO_I0_B,
+ FN_TS_SDATA0_C, FN_ATACS11_N,
+ 0, 0, 0,
+ /* IP10_18_17 [2] */
+ FN_VI0_G7, FN_VI2_DATA0, FN_FMIN_D, 0,
+ /* IP10_16_15 [2] */
+ FN_VI0_G6, FN_VI2_CLK, FN_BPFCLK_D, 0,
+ /* IP10_14_12 [3] */
+ FN_VI0_G5, FN_VI2_FIELD, FN_STP_OPWM_0_C, FN_FMCLK_D,
+ FN_CAN0_TX_E, FN_HTX1_D, FN_SCIFB0_TXD_D, 0,
+ /* IP10_11_9 [3] */
+ FN_VI0_G4, FN_VI2_CLKENB, FN_STP_ISSYNC_0_C,
+ FN_HTX2, FN_SCIFB2_TXD, FN_SCIFB0_SCK_D,
+ 0, 0,
+ /* IP10_8_6 [3] */
+ FN_VI0_G3, FN_VI2_VSYNC_N, FN_STP_ISEN_0_C, FN_SDA3_B,
+ FN_HRX2, FN_SCIFB2_RXD, FN_ATACS01_N, 0,
+ /* IP10_5_3 [3] */
+ FN_VI0_G2, FN_VI2_HSYNC_N, FN_STP_ISD_0_C, FN_SCL3_B,
+ FN_HSCK2, FN_SCIFB2_SCK, FN_ATARD1_N, 0,
+ /* IP10_2_0 [3] */
+ FN_VI0_G1, FN_SDA8, FN_STP_ISCLK_0_C, FN_SDA4,
+ FN_HRTS2_N, FN_SCIFB2_RTS_N, FN_ATADIR1_N, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
+ 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 3, 3, 3, 3, 3) {
+ /* IP11_31_30 [2] */
+ FN_ETH_CRS_DV, FN_AVB_LINK, FN_SDA2_C, 0,
+ /* IP11_29_28 [2] */
+ FN_ETH_MDIO, FN_AVB_RX_CLK, FN_SCL2_C, 0,
+ /* IP11_27 [1] */
+ FN_VI1_DATA7, FN_AVB_MDC,
+ /* IP11_26 [1] */
+ FN_VI1_DATA6, FN_AVB_MAGIC,
+ /* IP11_25 [1] */
+ FN_VI1_DATA5, FN_AVB_RX_DV,
+ /* IP11_24 [1] */
+ FN_VI1_DATA4, FN_AVB_MDIO,
+ /* IP11_23 [1] */
+ FN_VI1_DATA3, FN_AVB_RX_ER,
+ /* IP11_22 [1] */
+ FN_VI1_DATA2, FN_AVB_RXD7,
+ /* IP11_21 [1] */
+ FN_VI1_DATA1, FN_AVB_RXD6,
+ /* IP11_20 [1] */
+ FN_VI1_DATA0, FN_AVB_RXD5,
+ /* IP11_19 [1] */
+ FN_VI1_CLK, FN_AVB_RXD4,
+ /* IP11_18_17 [2] */
+ FN_VI1_FIELD, FN_AVB_RXD3, FN_TS_SPSYNC0_B, 0,
+ /* IP11_16_15 [2] */
+ FN_VI1_CLKENB, FN_AVB_RXD2, FN_TS_SDEN0_B, 0,
+ /* IP11_14_12 [3] */
+ FN_VI1_VSYNC_N, FN_AVB_RXD1, FN_TS_SCK0_B,
+ FN_RX4_B, FN_SCIFA4_RXD_B,
+ 0, 0, 0,
+ /* IP11_11_9 [3] */
+ FN_VI1_HSYNC_N, FN_AVB_RXD0, FN_TS_SDATA0_B,
+ FN_TX4_B, FN_SCIFA4_TXD_B,
+ 0, 0, 0,
+ /* IP11_8_6 [3] */
+ FN_VI0_R7, FN_GLO_RFON_B, FN_RX1_C, FN_CAN0_RX_E,
+ FN_SDA4_B, FN_HRX1_D, FN_SCIFB0_RXD_D, 0,
+ /* IP11_5_3 [3] */
+ FN_VI0_R6, FN_VI2_DATA7, FN_GLO_SS_B, FN_TX1_C, FN_SCL4_B,
+ 0, 0, 0,
+ /* IP11_2_0 [3] */
+ FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C, FN_SDA1_D,
+ 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
+ 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2) {
+ /* IP12_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP12_29_27 [3] */
+ FN_STP_ISCLK_0, FN_AVB_TX_EN, FN_SCIFB2_RXD_D,
+ FN_ADICS_SAMP_B, FN_MSIOF0_SCK_C,
+ 0, 0, 0,
+ /* IP12_26_24 [3] */
+ FN_STP_IVCXO27_0, FN_AVB_TXD7, FN_SCIFB2_TXD_D,
+ FN_ADIDATA_B, FN_MSIOF0_SYNC_C,
+ 0, 0, 0,
+ /* IP12_23_22 [2] */
+ FN_ETH_MDC, FN_AVB_TXD6, FN_IERX_C, 0,
+ /* IP12_21_20 [2] */
+ FN_ETH_TXD0, FN_AVB_TXD5, FN_IECLK_C, 0,
+ /* IP12_19_18 [2] */
+ FN_ETH_MAGIC, FN_AVB_TXD4, FN_IETX_C, 0,
+ /* IP12_17_16 [2] */
+ FN_ETH_TX_EN, FN_AVB_TXD3, FN_TCLK1_B, FN_CAN_CLK_B,
+ /* IP12_15_13 [3] */
+ FN_ETH_TXD1, FN_AVB_TXD2, FN_SCIFA3_TXD_B,
+ FN_CAN1_TX_C, FN_MSIOF1_TXD_E,
+ 0, 0, 0,
+ /* IP12_12_10 [3] */
+ FN_ETH_REFCLK, FN_AVB_TXD1, FN_SCIFA3_RXD_B,
+ FN_CAN1_RX_C, FN_MSIOF1_SYNC_E,
+ 0, 0, 0,
+ /* IP12_9_7 [3] */
+ FN_ETH_LINK, FN_AVB_TXD0, FN_CAN0_RX_C,
+ FN_SDA2_D, FN_MSIOF1_SCK_E,
+ 0, 0, 0,
+ /* IP12_6_4 [3] */
+ FN_ETH_RXD1, FN_AVB_GTXREFCLK, FN_CAN0_TX_C,
+ FN_SCL2_D, FN_MSIOF1_RXD_E,
+ 0, 0, 0,
+ /* IP12_3_2 [2] */
+ FN_ETH_RXD0, FN_AVB_PHY_INT, FN_SDA3, FN_SDA7,
+ /* IP12_1_0 [2] */
+ FN_ETH_RX_ER, FN_AVB_CRS, FN_SCL3, FN_SCL7, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
+ 1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1, 1, 1, 1,
+ 3, 2, 2, 3) {
+ /* IP13_31 [1] */
+ 0, 0,
+ /* IP13_30_28 [3] */
+ FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_SCL1_C,
+ 0, 0, 0, 0,
+ /* IP13_27 [1] */
+ FN_SD1_DATA3, FN_IERX_B,
+ /* IP13_26 [1] */
+ FN_SD1_DATA2, FN_IECLK_B,
+ /* IP13_25 [1] */
+ FN_SD1_DATA1, FN_IETX_B,
+ /* IP13_24_23 [2] */
+ FN_SD1_DATA0, FN_SPEEDIN_B, 0, 0,
+ /* IP13_22 [1] */
+ FN_SD1_CMD, FN_REMOCON_B,
+ /* IP13_21_19 [3] */
+ FN_SD0_WP, FN_MMC_D7_B, FN_SIM0_D_B, FN_CAN0_TX_F,
+ FN_SCIFA5_RXD_B, FN_RX3_C,
+ 0, 0,
+ /* IP13_18_16 [3] */
+ FN_SD0_CD, FN_MMC_D6_B, FN_SIM0_RST_B, FN_CAN0_RX_F,
+ FN_SCIFA5_TXD_B, FN_TX3_C,
+ 0, 0,
+ /* IP13_15 [1] */
+ FN_SD0_DATA3, FN_SSL_B,
+ /* IP13_14 [1] */
+ FN_SD0_DATA2, FN_IO3_B,
+ /* IP13_13 [1] */
+ FN_SD0_DATA1, FN_IO2_B,
+ /* IP13_12 [1] */
+ FN_SD0_DATA0, FN_MISO_IO1_B,
+ /* IP13_11 [1] */
+ FN_SD0_CMD, FN_MOSI_IO0_B,
+ /* IP13_10 [1] */
+ FN_SD0_CLK, FN_SPCLK_B,
+ /* IP13_9_7 [3] */
+ FN_STP_OPWM_0, FN_AVB_GTX_CLK, FN_PWM0_B,
+ FN_ADICHS2_B, FN_MSIOF0_TXD_C,
+ 0, 0, 0,
+ /* IP13_6_5 [2] */
+ FN_STP_ISSYNC_0, FN_AVB_COL, FN_ADICHS1_B, FN_MSIOF0_RXD_C,
+ /* IP13_4_3 [2] */
+ FN_STP_ISEN_0, FN_AVB_TX_CLK, FN_ADICHS0_B, FN_MSIOF0_SS2_C,
+ /* IP13_2_0 [3] */
+ FN_STP_ISD_0, FN_AVB_TX_ER, FN_SCIFB2_SCK_C,
+ FN_ADICLK_B, FN_MSIOF0_SS1_C,
+ 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR14", 0xE6060058, 32,
+ 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2) {
+ /* IP14_31_29 [3] */
+ FN_MSIOF0_SS2, FN_MMC_D7, FN_ADICHS2, FN_RX0_E,
+ FN_VI1_VSYNC_N_C, FN_SDA7_C, FN_VI1_G5_B, 0,
+ /* IP14_28_26 [3] */
+ FN_MSIOF0_SS1, FN_MMC_D6, FN_ADICHS1, FN_TX0_E,
+ FN_VI1_HSYNC_N_C, FN_SCL7_C, FN_VI1_G4_B, 0,
+ /* IP14_25_23 [3] */
+ FN_MSIOF0_RXD, FN_ADICHS0, 0, FN_VI1_DATA0_C, FN_VI1_G3_B,
+ 0, 0, 0,
+ /* IP14_22_20 [3] */
+ FN_MSIOF0_TXD, FN_ADICLK, 0, FN_VI1_FIELD_C, FN_VI1_G2_B,
+ 0, 0, 0,
+ /* IP14_19_17 [3] */
+ FN_MSIOF0_SYNC, FN_TX2_C, FN_ADICS_SAMP, 0,
+ FN_VI1_CLKENB_C, FN_VI1_G1_B,
+ 0, 0,
+ /* IP14_16_14 [3] */
+ FN_MSIOF0_SCK, FN_RX2_C, FN_ADIDATA, 0,
+ FN_VI1_CLK_C, FN_VI1_G0_B,
+ 0, 0,
+ /* IP14_13_11 [3] */
+ FN_SD2_WP, FN_MMC_D5, FN_SDA8_C, FN_RX5_B, FN_SCIFA5_RXD_C,
+ 0, 0, 0,
+ /* IP14_10_8 [3] */
+ FN_SD2_CD, FN_MMC_D4, FN_SCL8_C, FN_TX5_B, FN_SCIFA5_TXD_C,
+ 0, 0, 0,
+ /* IP14_7 [1] */
+ FN_SD2_DATA3, FN_MMC_D3,
+ /* IP14_6 [1] */
+ FN_SD2_DATA2, FN_MMC_D2,
+ /* IP14_5 [1] */
+ FN_SD2_DATA1, FN_MMC_D1,
+ /* IP14_4 [1] */
+ FN_SD2_DATA0, FN_MMC_D0,
+ /* IP14_3 [1] */
+ FN_SD2_CMD, FN_MMC_CMD,
+ /* IP14_2 [1] */
+ FN_SD2_CLK, FN_MMC_CLK,
+ /* IP14_1_0 [2] */
+ FN_SD1_WP, FN_PWM1_B, FN_SDA1_C, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2) {
+ /* IP15_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP15_29_27 [3] */
+ FN_HTX0, FN_SCIFB0_TXD, 0, FN_GLO_SCLK_C,
+ FN_CAN0_TX_B, FN_VI1_DATA5_C,
+ 0, 0,
+ /* IP15_26_24 [3] */
+ FN_HRX0, FN_SCIFB0_RXD, 0, FN_GLO_Q1_C,
+ FN_CAN0_RX_B, FN_VI1_DATA4_C,
+ 0, 0,
+ /* IP15_23_21 [3] */
+ FN_HSCK0, FN_SCIFB0_SCK, 0, FN_GLO_Q0_C, FN_CAN_CLK,
+ FN_TCLK2, FN_VI1_DATA3_C, 0,
+ /* IP15_20_18 [3] */
+ FN_HRTS0_N, FN_SCIFB0_RTS_N, 0, FN_GLO_I1_C, FN_VI1_DATA2_C,
+ 0, 0, 0,
+ /* IP15_17_15 [3] */
+ FN_HCTS0_N, FN_SCIFB0_CTS_N, 0, FN_GLO_I0_C,
+ FN_TCLK1, FN_VI1_DATA1_C,
+ 0, 0,
+ /* IP15_14_12 [3] */
+ FN_GPS_MAG, FN_RX4_C, FN_SCIFA4_RXD_C, FN_PWM6,
+ FN_VI1_G7_B, FN_SCIFA3_SCK_C,
+ 0, 0,
+ /* IP15_11_9 [3] */
+ FN_GPS_SIGN, FN_TX4_C, FN_SCIFA4_TXD_C, FN_PWM5,
+ FN_VI1_G6_B, FN_SCIFA3_RXD_C,
+ 0, 0,
+ /* IP15_8_6 [3] */
+ FN_GPS_CLK, FN_DU1_DOTCLKIN_C, FN_AUDIO_CLKB_B,
+ FN_PWM5_B, FN_SCIFA3_TXD_C,
+ 0, 0, 0,
+ /* IP15_5_4 [2] */
+ FN_SIM0_D, FN_IERX, FN_CAN1_RX_D, 0,
+ /* IP15_3_2 [2] */
+ FN_SIM0_CLK, FN_IECLK, FN_CAN_CLK_C, 0,
+ /* IP15_1_0 [2] */
+ FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32,
+ 4, 4, 4, 4, 4, 2, 2, 2, 3, 3) {
+ /* IP16_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_19_16 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_15_12 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_11_10 [2] */
+ FN_HRTS1_N, FN_SCIFB1_RTS_N, FN_MLB_DAT, FN_CAN1_RX_B,
+ /* IP16_9_8 [2] */
+ FN_HCTS1_N, FN_SCIFB1_CTS_N, FN_MLB_SIG, FN_CAN1_TX_B,
+ /* IP16_7_6 [2] */
+ FN_HSCK1, FN_SCIFB1_SCK, FN_MLB_CK, FN_GLO_RFON_C,
+ /* IP16_5_3 [3] */
+ FN_HTX1, FN_SCIFB1_TXD, FN_VI1_R1_B,
+ FN_GLO_SS_C, FN_VI1_DATA7_C,
+ 0, 0, 0,
+ /* IP16_2_0 [3] */
+ FN_HRX1, FN_SCIFB1_RXD, FN_VI1_R0_B,
+ FN_GLO_SDATA_C, FN_VI1_DATA6_C,
+ 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
+ 1, 2, 2, 2, 3, 2, 1, 1, 1, 1,
+ 3, 2, 2, 2, 1, 2, 2, 2) {
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_SCIF1 [2] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ /* SEL_SCIFB [2] */
+ FN_SEL_SCIFB_0, FN_SEL_SCIFB_1, FN_SEL_SCIFB_2, FN_SEL_SCIFB_3,
+ /* SEL_SCIFB2 [2] */
+ FN_SEL_SCIFB2_0, FN_SEL_SCIFB2_1,
+ FN_SEL_SCIFB2_2, FN_SEL_SCIFB2_3,
+ /* SEL_SCIFB1 [3] */
+ FN_SEL_SCIFB1_0, FN_SEL_SCIFB1_1,
+ FN_SEL_SCIFB1_2, FN_SEL_SCIFB1_3,
+ 0, 0, 0, 0,
+ /* SEL_SCIFA1 [2] */
+ FN_SEL_SCIFA1_0, FN_SEL_SCIFA1_1, FN_SEL_SCIFA1_2, 0,
+ /* SEL_SSI9 [1] */
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1,
+ /* SEL_SCFA [1] */
+ FN_SEL_SCFA_0, FN_SEL_SCFA_1,
+ /* SEL_QSP [1] */
+ FN_SEL_QSP_0, FN_SEL_QSP_1,
+ /* SEL_SSI7 [1] */
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1,
+ /* SEL_HSCIF1 [3] */
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2,
+ FN_SEL_HSCIF1_3, FN_SEL_HSCIF1_4,
+ 0, 0, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_VI1 [2] */
+ FN_SEL_VI1_0, FN_SEL_VI1_1, FN_SEL_VI1_2, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_TMU [1] */
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1,
+ /* SEL_LBS [2] */
+ FN_SEL_LBS_0, FN_SEL_LBS_1, FN_SEL_LBS_2, FN_SEL_LBS_3,
+ /* SEL_TSIF0 [2] */
+ FN_SEL_TSIF0_0, FN_SEL_TSIF0_1, FN_SEL_TSIF0_2, FN_SEL_TSIF0_3,
+ /* SEL_SOF0 [2] */
+ FN_SEL_SOF0_0, FN_SEL_SOF0_1, FN_SEL_SOF0_2, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
+ 3, 1, 1, 3, 2, 1, 1, 2, 2,
+ 1, 3, 2, 1, 2, 2, 2, 1, 1, 1) {
+ /* SEL_SCIF0 [3] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2,
+ FN_SEL_SCIF0_3, FN_SEL_SCIF0_4,
+ 0, 0, 0,
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_SCIF [1] */
+ FN_SEL_SCIF_0, FN_SEL_SCIF_1,
+ /* SEL_CAN0 [3] */
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ FN_SEL_CAN0_4, FN_SEL_CAN0_5,
+ 0, 0,
+ /* SEL_CAN1 [2] */
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_SCIFA2 [1] */
+ FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1,
+ /* SEL_SCIF4 [2] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_ADG [1] */
+ FN_SEL_ADG_0, FN_SEL_ADG_1,
+ /* SEL_FM [3] */
+ FN_SEL_FM_0, FN_SEL_FM_1, FN_SEL_FM_2,
+ FN_SEL_FM_3, FN_SEL_FM_4,
+ 0, 0, 0,
+ /* SEL_SCIFA5 [2] */
+ FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2, 0,
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_GPS [2] */
+ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
+ /* SEL_SCIFA4 [2] */
+ FN_SEL_SCIFA4_0, FN_SEL_SCIFA4_1, FN_SEL_SCIFA4_2, 0,
+ /* SEL_SCIFA3 [2] */
+ FN_SEL_SCIFA3_0, FN_SEL_SCIFA3_1, FN_SEL_SCIFA3_2, 0,
+ /* SEL_SIM [1] */
+ FN_SEL_SIM_0, FN_SEL_SIM_1,
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_SSI8 [1] */
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 2, 2, 3, 2, 2, 2, 1) {
+ /* SEL_HSCIF2 [2] */
+ FN_SEL_HSCIF2_0, FN_SEL_HSCIF2_1,
+ FN_SEL_HSCIF2_2, FN_SEL_HSCIF2_3,
+ /* SEL_CANCLK [2] */
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1,
+ FN_SEL_CANCLK_2, FN_SEL_CANCLK_3,
+ /* SEL_IIC8 [2] */
+ FN_SEL_IIC8_0, FN_SEL_IIC8_1, FN_SEL_IIC8_2, 0,
+ /* SEL_IIC7 [2] */
+ FN_SEL_IIC7_0, FN_SEL_IIC7_1, FN_SEL_IIC7_2, 0,
+ /* SEL_IIC4 [2] */
+ FN_SEL_IIC4_0, FN_SEL_IIC4_1, FN_SEL_IIC4_2, 0,
+ /* SEL_IIC3 [2] */
+ FN_SEL_IIC3_0, FN_SEL_IIC3_1, FN_SEL_IIC3_2, FN_SEL_IIC3_3,
+ /* SEL_SCIF3 [2] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
+ /* SEL_IEB [2] */
+ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+ /* SEL_MMC [1] */
+ FN_SEL_MMC_0, FN_SEL_MMC_1,
+ /* SEL_SCIF5 [1] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_IIC2 [2] */
+ FN_SEL_IIC2_0, FN_SEL_IIC2_1, FN_SEL_IIC2_2, FN_SEL_IIC2_3,
+ /* SEL_IIC1 [3] */
+ FN_SEL_IIC1_0, FN_SEL_IIC1_1, FN_SEL_IIC1_2, FN_SEL_IIC1_3,
+ FN_SEL_IIC1_4,
+ 0, 0, 0,
+ /* SEL_IIC0 [2] */
+ FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* RESEVED [1] */
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE606009C, 32,
+ 3, 2, 2, 1, 1, 1, 1, 3, 2,
+ 2, 3, 1, 1, 1, 2, 2, 2, 2) {
+ /* SEL_SOF1 [3] */
+ FN_SEL_SOF1_0, FN_SEL_SOF1_1, FN_SEL_SOF1_2, FN_SEL_SOF1_3,
+ FN_SEL_SOF1_4,
+ 0, 0, 0,
+ /* SEL_HSCIF0 [2] */
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, FN_SEL_HSCIF0_2, 0,
+ /* SEL_DIS [2] */
+ FN_SEL_DIS_0, FN_SEL_DIS_1, FN_SEL_DIS_2, 0,
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_RAD [1] */
+ FN_SEL_RAD_0, FN_SEL_RAD_1,
+ /* SEL_RCN [1] */
+ FN_SEL_RCN_0, FN_SEL_RCN_1,
+ /* SEL_RSP [1] */
+ FN_SEL_RSP_0, FN_SEL_RSP_1,
+ /* SEL_SCIF2 [3] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_3, FN_SEL_SCIF2_4,
+ 0, 0, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_SOF2 [3] */
+ FN_SEL_SOF2_0, FN_SEL_SOF2_1, FN_SEL_SOF2_2,
+ FN_SEL_SOF2_3, FN_SEL_SOF2_4,
+ 0, 0, 0,
+ /* RESEVED [1] */
+ 0, 0,
+ /* SEL_SSI1 [1] */
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1,
+ /* SEL_SSI0 [1] */
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ /* SEL_SSP [2] */
+ FN_SEL_SSP_0, FN_SEL_SSP_1, FN_SEL_SSP_2, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0,
+ /* RESEVED [2] */
+ 0, 0, 0, 0, }
+ },
+ { },
+};
+
+const struct sh_pfc_soc_info r8a7791_pinmux_info = {
+ .name = "r8a77910_pfc",
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index edf45a6940c..8ab7898d21b 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -122,6 +122,9 @@ static const struct pinctrl_pin_desc sirfsoc_pads[] = {
PINCTRL_PIN(100, "ac97_dout"),
PINCTRL_PIN(101, "ac97_din"),
PINCTRL_PIN(102, "x_rtc_io"),
+
+ PINCTRL_PIN(103, "x_usb1_dp"),
+ PINCTRL_PIN(104, "x_usb1_dn"),
};
static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
@@ -139,6 +142,7 @@ static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
static const struct sirfsoc_padmux lcd_16bits_padmux = {
.muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
.muxmask = lcd_16bits_sirfsoc_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = 0,
};
@@ -164,6 +168,7 @@ static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
static const struct sirfsoc_padmux lcd_18bits_padmux = {
.muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
.muxmask = lcd_18bits_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4) | BIT(15),
.funcval = 0,
};
@@ -189,6 +194,7 @@ static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
static const struct sirfsoc_padmux lcd_24bits_padmux = {
.muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
.muxmask = lcd_24bits_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4) | BIT(15),
.funcval = 0,
};
@@ -214,6 +220,7 @@ static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
static const struct sirfsoc_padmux lcdrom_padmux = {
.muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
.muxmask = lcdrom_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = BIT(4),
};
@@ -237,6 +244,7 @@ static const struct sirfsoc_muxmask uart0_muxmask[] = {
static const struct sirfsoc_padmux uart0_padmux = {
.muxmask_counts = ARRAY_SIZE(uart0_muxmask),
.muxmask = uart0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(9),
.funcval = BIT(9),
};
@@ -284,6 +292,7 @@ static const struct sirfsoc_muxmask uart2_muxmask[] = {
static const struct sirfsoc_padmux uart2_padmux = {
.muxmask_counts = ARRAY_SIZE(uart2_muxmask),
.muxmask = uart2_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(10),
.funcval = BIT(10),
};
@@ -317,6 +326,7 @@ static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
static const struct sirfsoc_padmux sdmmc3_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
.muxmask = sdmmc3_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(7),
.funcval = 0,
};
@@ -336,6 +346,7 @@ static const struct sirfsoc_muxmask spi0_muxmask[] = {
static const struct sirfsoc_padmux spi0_padmux = {
.muxmask_counts = ARRAY_SIZE(spi0_muxmask),
.muxmask = spi0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(7),
.funcval = BIT(7),
};
@@ -352,6 +363,7 @@ static const struct sirfsoc_muxmask cko1_muxmask[] = {
static const struct sirfsoc_padmux cko1_padmux = {
.muxmask_counts = ARRAY_SIZE(cko1_muxmask),
.muxmask = cko1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(3),
.funcval = 0,
};
@@ -371,6 +383,7 @@ static const struct sirfsoc_muxmask i2s_muxmask[] = {
static const struct sirfsoc_padmux i2s_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_muxmask),
.muxmask = i2s_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(3),
.funcval = BIT(3),
};
@@ -390,6 +403,7 @@ static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = {
static const struct sirfsoc_padmux i2s_no_din_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask),
.muxmask = i2s_no_din_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(3),
.funcval = BIT(3),
};
@@ -409,6 +423,7 @@ static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = {
static const struct sirfsoc_padmux i2s_6chn_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask),
.muxmask = i2s_6chn_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(1) | BIT(3) | BIT(9),
.funcval = BIT(1) | BIT(3) | BIT(9),
};
@@ -439,6 +454,7 @@ static const struct sirfsoc_muxmask spi1_muxmask[] = {
static const struct sirfsoc_padmux spi1_padmux = {
.muxmask_counts = ARRAY_SIZE(spi1_muxmask),
.muxmask = spi1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(16),
.funcval = 0,
};
@@ -455,6 +471,7 @@ static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
static const struct sirfsoc_padmux sdmmc1_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask),
.muxmask = sdmmc1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(5),
.funcval = BIT(5),
};
@@ -471,6 +488,7 @@ static const struct sirfsoc_muxmask gps_muxmask[] = {
static const struct sirfsoc_padmux gps_padmux = {
.muxmask_counts = ARRAY_SIZE(gps_muxmask),
.muxmask = gps_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(13),
.funcval = 0,
};
@@ -487,6 +505,7 @@ static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
static const struct sirfsoc_padmux sdmmc5_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
.muxmask = sdmmc5_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(13),
.funcval = BIT(13),
};
@@ -503,6 +522,7 @@ static const struct sirfsoc_muxmask usp0_muxmask[] = {
static const struct sirfsoc_padmux usp0_padmux = {
.muxmask_counts = ARRAY_SIZE(usp0_muxmask),
.muxmask = usp0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(1) | BIT(2) | BIT(9),
.funcval = 0,
};
@@ -535,6 +555,7 @@ static const struct sirfsoc_muxmask usp1_muxmask[] = {
static const struct sirfsoc_padmux usp1_padmux = {
.muxmask_counts = ARRAY_SIZE(usp1_muxmask),
.muxmask = usp1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(16),
.funcval = BIT(16),
};
@@ -554,6 +575,7 @@ static const struct sirfsoc_muxmask nand_muxmask[] = {
static const struct sirfsoc_padmux nand_padmux = {
.muxmask_counts = ARRAY_SIZE(nand_muxmask),
.muxmask = nand_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(5) | BIT(19),
.funcval = 0,
};
@@ -570,6 +592,7 @@ static const struct sirfsoc_muxmask sdmmc0_muxmask[] = {
static const struct sirfsoc_padmux sdmmc0_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc0_muxmask),
.muxmask = sdmmc0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(5) | BIT(19),
.funcval = BIT(19),
};
@@ -586,6 +609,7 @@ static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
static const struct sirfsoc_padmux sdmmc2_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
.muxmask = sdmmc2_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(11),
.funcval = 0,
};
@@ -602,6 +626,7 @@ static const struct sirfsoc_muxmask sdmmc2_nowp_muxmask[] = {
static const struct sirfsoc_padmux sdmmc2_nowp_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc2_nowp_muxmask),
.muxmask = sdmmc2_nowp_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(11),
.funcval = 0,
};
@@ -634,6 +659,7 @@ static const struct sirfsoc_muxmask vip_muxmask[] = {
static const struct sirfsoc_padmux vip_padmux = {
.muxmask_counts = ARRAY_SIZE(vip_muxmask),
.muxmask = vip_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(18),
.funcval = BIT(18),
};
@@ -654,6 +680,7 @@ static const struct sirfsoc_muxmask vip_noupli_muxmask[] = {
static const struct sirfsoc_padmux vip_noupli_padmux = {
.muxmask_counts = ARRAY_SIZE(vip_noupli_muxmask),
.muxmask = vip_noupli_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(15),
.funcval = BIT(15),
};
@@ -684,6 +711,7 @@ static const struct sirfsoc_muxmask i2c1_muxmask[] = {
static const struct sirfsoc_padmux i2c1_padmux = {
.muxmask_counts = ARRAY_SIZE(i2c1_muxmask),
.muxmask = i2c1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(16),
.funcval = 0,
};
@@ -700,6 +728,7 @@ static const struct sirfsoc_muxmask pwm0_muxmask[] = {
static const struct sirfsoc_padmux pwm0_padmux = {
.muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
.muxmask = pwm0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(12),
.funcval = 0,
};
@@ -772,6 +801,7 @@ static const struct sirfsoc_muxmask warm_rst_muxmask[] = {
static const struct sirfsoc_padmux warm_rst_padmux = {
.muxmask_counts = ARRAY_SIZE(warm_rst_muxmask),
.muxmask = warm_rst_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = 0,
};
@@ -789,6 +819,7 @@ static const struct sirfsoc_muxmask usb0_upli_drvbus_muxmask[] = {
static const struct sirfsoc_padmux usb0_upli_drvbus_padmux = {
.muxmask_counts = ARRAY_SIZE(usb0_upli_drvbus_muxmask),
.muxmask = usb0_upli_drvbus_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(18),
.funcval = 0,
};
@@ -805,12 +836,31 @@ static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
.muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
.muxmask = usb1_utmi_drvbus_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(11),
.funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
};
static const unsigned usb1_utmi_drvbus_pins[] = { 28 };
+static const struct sirfsoc_padmux usb1_dp_dn_padmux = {
+ .muxmask_counts = 0,
+ .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
+ .funcmask = BIT(2),
+ .funcval = BIT(2),
+};
+
+static const unsigned usb1_dp_dn_pins[] = { 103, 104 };
+
+static const struct sirfsoc_padmux uart1_route_io_usb1_padmux = {
+ .muxmask_counts = 0,
+ .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
+ .funcmask = BIT(2),
+ .funcval = 0,
+};
+
+static const unsigned uart1_route_io_usb1_pins[] = { 103, 104 };
+
static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
{
.group = 0,
@@ -859,6 +909,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
SIRFSOC_PIN_GROUP("usb0_upli_drvbusgrp", usb0_upli_drvbus_pins),
SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
+ SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins),
+ SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins),
SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins),
@@ -903,6 +955,8 @@ static const char * const sdmmc5grp[] = { "sdmmc5grp" };
static const char * const sdmmc2_nowpgrp[] = { "sdmmc2_nowpgrp" };
static const char * const usb0_upli_drvbusgrp[] = { "usb0_upli_drvbusgrp" };
static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
+static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" };
+static const char * const uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
static const char * const pulse_countgrp[] = { "pulse_countgrp" };
static const char * const i2sgrp[] = { "i2sgrp" };
static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" };
@@ -949,6 +1003,8 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("sdmmc2_nowp", sdmmc2_nowpgrp, sdmmc2_nowp_padmux),
SIRFSOC_PMX_FUNCTION("usb0_upli_drvbus", usb0_upli_drvbusgrp, usb0_upli_drvbus_padmux),
SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux),
+ SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 1f0ad1ef5a3..050777be0f1 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -126,6 +126,9 @@ static const struct pinctrl_pin_desc sirfsoc_pads[] = {
PINCTRL_PIN(112, "x_ldd[13]"),
PINCTRL_PIN(113, "x_ldd[14]"),
PINCTRL_PIN(114, "x_ldd[15]"),
+
+ PINCTRL_PIN(115, "x_usb1_dp"),
+ PINCTRL_PIN(116, "x_usb1_dn"),
};
static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
@@ -143,6 +146,7 @@ static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
static const struct sirfsoc_padmux lcd_16bits_padmux = {
.muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
.muxmask = lcd_16bits_sirfsoc_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = 0,
};
@@ -168,6 +172,7 @@ static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
static const struct sirfsoc_padmux lcd_18bits_padmux = {
.muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
.muxmask = lcd_18bits_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = 0,
};
@@ -193,6 +198,7 @@ static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
static const struct sirfsoc_padmux lcd_24bits_padmux = {
.muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
.muxmask = lcd_24bits_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = 0,
};
@@ -218,6 +224,7 @@ static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
static const struct sirfsoc_padmux lcdrom_padmux = {
.muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
.muxmask = lcdrom_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(4),
.funcval = BIT(4),
};
@@ -238,6 +245,7 @@ static const struct sirfsoc_muxmask uart0_muxmask[] = {
static const struct sirfsoc_padmux uart0_padmux = {
.muxmask_counts = ARRAY_SIZE(uart0_muxmask),
.muxmask = uart0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(9),
.funcval = BIT(9),
};
@@ -282,6 +290,7 @@ static const struct sirfsoc_muxmask uart2_muxmask[] = {
static const struct sirfsoc_padmux uart2_padmux = {
.muxmask_counts = ARRAY_SIZE(uart2_muxmask),
.muxmask = uart2_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(10),
.funcval = BIT(10),
};
@@ -315,6 +324,7 @@ static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
static const struct sirfsoc_padmux sdmmc3_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
.muxmask = sdmmc3_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(7),
.funcval = 0,
};
@@ -331,6 +341,7 @@ static const struct sirfsoc_muxmask spi0_muxmask[] = {
static const struct sirfsoc_padmux spi0_padmux = {
.muxmask_counts = ARRAY_SIZE(spi0_muxmask),
.muxmask = spi0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(7),
.funcval = BIT(7),
};
@@ -361,6 +372,7 @@ static const struct sirfsoc_muxmask cko1_muxmask[] = {
static const struct sirfsoc_padmux cko1_padmux = {
.muxmask_counts = ARRAY_SIZE(cko1_muxmask),
.muxmask = cko1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(3),
.funcval = 0,
};
@@ -379,6 +391,7 @@ static const struct sirfsoc_muxmask i2s_muxmask[] = {
static const struct sirfsoc_padmux i2s_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_muxmask),
.muxmask = i2s_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(3) | BIT(9),
.funcval = BIT(3),
};
@@ -395,6 +408,7 @@ static const struct sirfsoc_muxmask ac97_muxmask[] = {
static const struct sirfsoc_padmux ac97_padmux = {
.muxmask_counts = ARRAY_SIZE(ac97_muxmask),
.muxmask = ac97_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(8),
.funcval = 0,
};
@@ -411,6 +425,7 @@ static const struct sirfsoc_muxmask spi1_muxmask[] = {
static const struct sirfsoc_padmux spi1_padmux = {
.muxmask_counts = ARRAY_SIZE(spi1_muxmask),
.muxmask = spi1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(8),
.funcval = BIT(8),
};
@@ -441,6 +456,7 @@ static const struct sirfsoc_muxmask gps_muxmask[] = {
static const struct sirfsoc_padmux gps_padmux = {
.muxmask_counts = ARRAY_SIZE(gps_muxmask),
.muxmask = gps_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(12) | BIT(13) | BIT(14),
.funcval = BIT(12),
};
@@ -463,6 +479,7 @@ static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
static const struct sirfsoc_padmux sdmmc5_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
.muxmask = sdmmc5_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(13) | BIT(14),
.funcval = BIT(13) | BIT(14),
};
@@ -479,12 +496,27 @@ static const struct sirfsoc_muxmask usp0_muxmask[] = {
static const struct sirfsoc_padmux usp0_padmux = {
.muxmask_counts = ARRAY_SIZE(usp0_muxmask),
.muxmask = usp0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(1) | BIT(2) | BIT(6) | BIT(9),
.funcval = 0,
};
static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(20) | BIT(21),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
+ .muxmask = usp0_uart_nostreamctrl_muxmask,
+};
+
+static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
+
static const struct sirfsoc_muxmask usp1_muxmask[] = {
{
.group = 1,
@@ -495,12 +527,27 @@ static const struct sirfsoc_muxmask usp1_muxmask[] = {
static const struct sirfsoc_padmux usp1_padmux = {
.muxmask_counts = ARRAY_SIZE(usp1_muxmask),
.muxmask = usp1_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(1) | BIT(9) | BIT(10) | BIT(11),
.funcval = 0,
};
static const unsigned usp1_pins[] = { 56, 57, 58, 59, 60 };
+static const struct sirfsoc_muxmask usp1_uart_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(25) | BIT(26),
+ },
+};
+
+static const struct sirfsoc_padmux usp1_uart_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp1_uart_nostreamctrl_muxmask),
+ .muxmask = usp1_uart_nostreamctrl_muxmask,
+};
+
+static const unsigned usp1_uart_nostreamctrl_pins[] = { 57, 58 };
+
static const struct sirfsoc_muxmask usp2_muxmask[] = {
{
.group = 1,
@@ -514,12 +561,27 @@ static const struct sirfsoc_muxmask usp2_muxmask[] = {
static const struct sirfsoc_padmux usp2_padmux = {
.muxmask_counts = ARRAY_SIZE(usp2_muxmask),
.muxmask = usp2_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(13) | BIT(14),
.funcval = 0,
};
static const unsigned usp2_pins[] = { 61, 62, 63, 64, 65 };
+static const struct sirfsoc_muxmask usp2_uart_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(30) | BIT(31),
+ },
+};
+
+static const struct sirfsoc_padmux usp2_uart_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp2_uart_nostreamctrl_muxmask),
+ .muxmask = usp2_uart_nostreamctrl_muxmask,
+};
+
+static const unsigned usp2_uart_nostreamctrl_pins[] = { 62, 63 };
+
static const struct sirfsoc_muxmask nand_muxmask[] = {
{
.group = 2,
@@ -530,6 +592,7 @@ static const struct sirfsoc_muxmask nand_muxmask[] = {
static const struct sirfsoc_padmux nand_padmux = {
.muxmask_counts = ARRAY_SIZE(nand_muxmask),
.muxmask = nand_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(5),
.funcval = 0,
};
@@ -538,6 +601,7 @@ static const unsigned nand_pins[] = { 64, 65, 92, 93, 94 };
static const struct sirfsoc_padmux sdmmc0_padmux = {
.muxmask_counts = 0,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(5),
.funcval = 0,
};
@@ -554,6 +618,7 @@ static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
static const struct sirfsoc_padmux sdmmc2_padmux = {
.muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
.muxmask = sdmmc2_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(5),
.funcval = BIT(5),
};
@@ -586,6 +651,7 @@ static const struct sirfsoc_muxmask vip_muxmask[] = {
static const struct sirfsoc_padmux vip_padmux = {
.muxmask_counts = ARRAY_SIZE(vip_muxmask),
.muxmask = vip_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(0),
.funcval = 0,
};
@@ -635,6 +701,7 @@ static const struct sirfsoc_muxmask viprom_muxmask[] = {
static const struct sirfsoc_padmux viprom_padmux = {
.muxmask_counts = ARRAY_SIZE(viprom_muxmask),
.muxmask = viprom_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(0),
.funcval = BIT(0),
};
@@ -651,6 +718,7 @@ static const struct sirfsoc_muxmask pwm0_muxmask[] = {
static const struct sirfsoc_padmux pwm0_padmux = {
.muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
.muxmask = pwm0_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(12),
.funcval = 0,
};
@@ -722,6 +790,7 @@ static const struct sirfsoc_muxmask usb0_utmi_drvbus_muxmask[] = {
static const struct sirfsoc_padmux usb0_utmi_drvbus_padmux = {
.muxmask_counts = ARRAY_SIZE(usb0_utmi_drvbus_muxmask),
.muxmask = usb0_utmi_drvbus_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(6),
.funcval = BIT(6), /* refer to PAD_UTMI_DRVVBUS0_ENABLE */
};
@@ -738,12 +807,31 @@ static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
.muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
.muxmask = usb1_utmi_drvbus_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
.funcmask = BIT(11),
.funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
};
static const unsigned usb1_utmi_drvbus_pins[] = { 59 };
+static const struct sirfsoc_padmux usb1_dp_dn_padmux = {
+ .muxmask_counts = 0,
+ .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
+ .funcmask = BIT(2),
+ .funcval = BIT(2),
+};
+
+static const unsigned usb1_dp_dn_pins[] = { 115, 116 };
+
+static const struct sirfsoc_padmux uart1_route_io_usb1_padmux = {
+ .muxmask_counts = 0,
+ .ctrlreg = SIRFSOC_RSC_USB_UART_SHARE,
+ .funcmask = BIT(2),
+ .funcval = 0,
+};
+
+static const unsigned uart1_route_io_usb1_pins[] = { 115, 116 };
+
static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
{
.group = 0,
@@ -764,12 +852,19 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
+ SIRFSOC_PIN_GROUP("uart0_nostreamctrlgrp", uart0_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
+ SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
+ usp0_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
+ SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
+ usp1_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp2grp", usp2_pins),
+ SIRFSOC_PIN_GROUP("usp2_uart_nostreamctrl_grp",
+ usp2_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
@@ -789,6 +884,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
SIRFSOC_PIN_GROUP("usb0_utmi_drvbusgrp", usb0_utmi_drvbus_pins),
SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
+ SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins),
+ SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins),
SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
@@ -803,12 +900,19 @@ static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
static const char * const lcdromgrp[] = { "lcdromgrp" };
static const char * const uart0grp[] = { "uart0grp" };
+static const char * const uart0_nostreamctrlgrp[] = { "uart0_nostreamctrlgrp" };
static const char * const uart1grp[] = { "uart1grp" };
static const char * const uart2grp[] = { "uart2grp" };
static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
static const char * const usp0grp[] = { "usp0grp" };
+static const char * const usp0_uart_nostreamctrl_grp[] =
+ { "usp0_uart_nostreamctrl_grp" };
static const char * const usp1grp[] = { "usp1grp" };
+static const char * const usp1_uart_nostreamctrl_grp[] =
+ { "usp1_uart_nostreamctrl_grp" };
static const char * const usp2grp[] = { "usp2grp" };
+static const char * const usp2_uart_nostreamctrl_grp[] =
+ { "usp2_uart_nostreamctrl_grp" };
static const char * const i2c0grp[] = { "i2c0grp" };
static const char * const i2c1grp[] = { "i2c1grp" };
static const char * const pwm0grp[] = { "pwm0grp" };
@@ -828,6 +932,8 @@ static const char * const sdmmc4grp[] = { "sdmmc4grp" };
static const char * const sdmmc5grp[] = { "sdmmc5grp" };
static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" };
static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
+static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" };
+static const char * const uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
static const char * const pulse_countgrp[] = { "pulse_countgrp" };
static const char * const i2sgrp[] = { "i2sgrp" };
static const char * const ac97grp[] = { "ac97grp" };
@@ -842,12 +948,19 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
+ SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl", uart0_nostreamctrlgrp, uart0_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
+ usp0_uart_nostreamctrl_grp, usp0_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
+ SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
+ usp1_uart_nostreamctrl_grp, usp1_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp2", usp2grp, usp2_padmux),
+ SIRFSOC_PMX_FUNCTION("usp2_uart_nostreamctrl",
+ usp2_uart_nostreamctrl_grp, usp2_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
@@ -867,6 +980,8 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux),
+ SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 26f946af793..b81e388c50d 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -166,12 +166,12 @@ static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector
if (mux->funcmask && enable) {
u32 func_en_val;
+
func_en_val =
- readl(spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
+ readl(spmx->rsc_virtbase + mux->ctrlreg);
func_en_val =
- (func_en_val & ~mux->funcmask) | (mux->
- funcval);
- writel(func_en_val, spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
+ (func_en_val & ~mux->funcmask) | (mux->funcval);
+ writel(func_en_val, spmx->rsc_virtbase + mux->ctrlreg);
}
}
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.h b/drivers/pinctrl/sirf/pinctrl-sirf.h
index 17cc108510b..d7f16b499ad 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.h
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.h
@@ -9,8 +9,9 @@
#ifndef __PINMUX_SIRF_H__
#define __PINMUX_SIRF_H__
-#define SIRFSOC_NUM_PADS 622
-#define SIRFSOC_RSC_PIN_MUX 0x4
+#define SIRFSOC_NUM_PADS 622
+#define SIRFSOC_RSC_USB_UART_SHARE 0
+#define SIRFSOC_RSC_PIN_MUX 0x4
#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
#define SIRFSOC_GPIO_PAD_EN_CLR(g) ((g)*0x100 + 0x90)
@@ -61,6 +62,7 @@ struct sirfsoc_padmux {
unsigned long muxmask_counts;
const struct sirfsoc_muxmask *muxmask;
/* RSC_PIN_MUX set */
+ unsigned long ctrlreg;
unsigned long funcmask;
unsigned long funcval;
};
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 0a7f0bdbaa7..ff2940e9f2a 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -735,7 +735,7 @@ static struct platform_driver plgpio_driver = {
.owner = THIS_MODULE,
.name = "spear-plgpio",
.pm = &plgpio_dev_pm_ops,
- .of_match_table = of_match_ptr(plgpio_of_match),
+ .of_match_table = plgpio_of_match,
},
};
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8eea2efbbb6..b9429fbf1cd 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -289,7 +289,7 @@ static int gmux_switchto(enum vga_switcheroo_client_id id)
static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
enum vga_switcheroo_state state)
{
- INIT_COMPLETION(gmux_data->powerchange_done);
+ reinit_completion(&gmux_data->powerchange_done);
if (state == VGA_SWITCHEROO_ON) {
gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
@@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
gmux_data->power_state = VGA_SWITCHEROO_ON;
- gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev);
+ gmux_data->dhandle = ACPI_HANDLE(&pnp->dev);
if (!gmux_data->dhandle) {
pr_err("Cannot find acpi handle for pnp device %s\n",
dev_name(&pnp->dev));
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a6afd4108be..aefcc32e563 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -190,16 +190,10 @@ struct eeepc_laptop {
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
- struct acpi_object_list params;
- union acpi_object in_obj;
acpi_status status;
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = val;
+ status = acpi_execute_simple_method(handle, (char *)method, val);
- status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
return (status == AE_OK ? 0 : -1);
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 52b8a97efde..9d30d69aa78 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -219,8 +219,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
{ .type = ACPI_TYPE_INTEGER }
};
struct acpi_object_list arg_list = { 4, &params[0] };
- struct acpi_buffer output;
- union acpi_object out_obj;
+ unsigned long long value;
acpi_handle handle = NULL;
status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
@@ -235,10 +234,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
params[2].integer.value = arg1;
params[3].integer.value = arg2;
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+ status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
@@ -246,18 +242,10 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return -ENODEV;
}
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
- "return an integer\n",
- cmd, arg0, arg1, arg2);
- return -ENODEV;
- }
-
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
- cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
- return out_obj.integer.value;
+ cmd, arg0, arg1, arg2, (int)value);
+ return value;
}
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -317,8 +305,6 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
static int set_lcd_level(int level)
{
acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
acpi_handle handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
@@ -333,9 +319,8 @@ static int set_lcd_level(int level)
return -ENODEV;
}
- arg0.integer.value = level;
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ status = acpi_execute_simple_method(handle, NULL, level);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -345,8 +330,6 @@ static int set_lcd_level(int level)
static int set_lcd_level_alt(int level)
{
acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
acpi_handle handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
@@ -361,9 +344,7 @@ static int set_lcd_level_alt(int level)
return -ENODEV;
}
- arg0.integer.value = level;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ status = acpi_execute_simple_method(handle, NULL, level);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -586,11 +567,10 @@ static struct platform_driver fujitsupf_driver = {
static void dmi_check_cb_common(const struct dmi_system_id *id)
{
- acpi_handle handle;
pr_info("Identified laptop model '%s'\n", id->ident);
if (use_alt_lcd_levels == -1) {
- if (ACPI_SUCCESS(acpi_get_handle(NULL,
- "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+ if (acpi_has_method(NULL,
+ "\\_SB.PCI0.LPCB.FJEX.SBL2"))
use_alt_lcd_levels = 1;
else
use_alt_lcd_levels = 0;
@@ -653,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
static int acpi_fujitsu_add(struct acpi_device *device)
{
- acpi_handle handle;
int result = 0;
int state = 0;
struct input_dev *input;
@@ -702,8 +681,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
fujitsu->dev = device;
- if (ACPI_SUCCESS
- (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+ if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
(acpi_evaluate_object
@@ -803,7 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
{
- acpi_handle handle;
int result = 0;
int state = 0;
struct input_dev *input;
@@ -866,8 +843,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
fujitsu_hotkey->dev = device;
- if (ACPI_SUCCESS
- (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+ if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
(acpi_evaluate_object
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 89c4519d48a..6788acc22ab 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -72,8 +72,15 @@ enum {
VPCCMD_W_BL_POWER = 0x33,
};
+struct ideapad_rfk_priv {
+ int dev;
+ struct ideapad_private *priv;
+};
+
struct ideapad_private {
+ struct acpi_device *adev;
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+ struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
struct backlight_device *blightdev;
@@ -81,8 +88,6 @@ struct ideapad_private {
unsigned long cfg;
};
-static acpi_handle ideapad_handle;
-static struct ideapad_private *ideapad_priv;
static bool no_bt_rfkill;
module_param(no_bt_rfkill, bool, 0444);
MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -200,34 +205,38 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
*/
static int debugfs_status_show(struct seq_file *s, void *data)
{
+ struct ideapad_private *priv = s->private;
unsigned long value;
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+ if (!priv)
+ return -EINVAL;
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
seq_printf(s, "Backlight max:\t%lu\n", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
seq_printf(s, "Backlight now:\t%lu\n", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
seq_printf(s, "=====================\n");
- if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
seq_printf(s, "Radio status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
seq_printf(s, "Wifi status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
seq_printf(s, "BT status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
seq_printf(s, "3G status:\t%s(%lu)\n",
value ? "On" : "Off", value);
seq_printf(s, "=====================\n");
- if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
seq_printf(s, "Touchpad status:%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
seq_printf(s, "Camera status:\t%s(%lu)\n",
value ? "On" : "Off", value);
@@ -236,7 +245,7 @@ static int debugfs_status_show(struct seq_file *s, void *data)
static int debugfs_status_open(struct inode *inode, struct file *file)
{
- return single_open(file, debugfs_status_show, NULL);
+ return single_open(file, debugfs_status_show, inode->i_private);
}
static const struct file_operations debugfs_status_fops = {
@@ -249,21 +258,23 @@ static const struct file_operations debugfs_status_fops = {
static int debugfs_cfg_show(struct seq_file *s, void *data)
{
- if (!ideapad_priv) {
+ struct ideapad_private *priv = s->private;
+
+ if (!priv) {
seq_printf(s, "cfg: N/A\n");
} else {
seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
- ideapad_priv->cfg);
- if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+ priv->cfg);
+ if (test_bit(CFG_BT_BIT, &priv->cfg))
seq_printf(s, "Bluetooth ");
- if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_3G_BIT, &priv->cfg))
seq_printf(s, "3G ");
- if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_WIFI_BIT, &priv->cfg))
seq_printf(s, "Wireless ");
- if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
seq_printf(s, "Camera ");
seq_printf(s, "\nGraphic: ");
- switch ((ideapad_priv->cfg)&0x700) {
+ switch ((priv->cfg)&0x700) {
case 0x100:
seq_printf(s, "Intel");
break;
@@ -287,7 +298,7 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
static int debugfs_cfg_open(struct inode *inode, struct file *file)
{
- return single_open(file, debugfs_cfg_show, NULL);
+ return single_open(file, debugfs_cfg_show, inode->i_private);
}
static const struct file_operations debugfs_cfg_fops = {
@@ -308,14 +319,14 @@ static int ideapad_debugfs_init(struct ideapad_private *priv)
goto errout;
}
- node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+ node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv,
&debugfs_cfg_fops);
if (!node) {
pr_err("failed to create cfg in debugfs");
goto errout;
}
- node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+ node = debugfs_create_file("status", S_IRUGO, priv->debug, priv,
&debugfs_status_fops);
if (!node) {
pr_err("failed to create status in debugfs");
@@ -342,8 +353,9 @@ static ssize_t show_ideapad_cam(struct device *dev,
char *buf)
{
unsigned long result;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
- if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -353,12 +365,13 @@ static ssize_t store_ideapad_cam(struct device *dev,
const char *buf, size_t count)
{
int ret, state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
if (!count)
return 0;
if (sscanf(buf, "%i", &state) != 1)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
if (ret < 0)
return -EIO;
return count;
@@ -371,8 +384,9 @@ static ssize_t show_ideapad_fan(struct device *dev,
char *buf)
{
unsigned long result;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
- if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -382,6 +396,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
const char *buf, size_t count)
{
int ret, state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
if (!count)
return 0;
@@ -389,7 +404,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
return -EINVAL;
if (state < 0 || state > 4 || state == 3)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
if (ret < 0)
return -EIO;
return count;
@@ -415,7 +430,8 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
else if (attr == &dev_attr_fan_mode.attr) {
unsigned long value;
- supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+ supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
+ &value);
} else
supported = true;
@@ -445,9 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
static int ideapad_rfk_set(void *data, bool blocked)
{
- unsigned long opcode = (unsigned long)data;
+ struct ideapad_rfk_priv *priv = data;
- return write_ec_cmd(ideapad_handle, opcode, !blocked);
+ return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
}
static struct rfkill_ops ideapad_rfk_ops = {
@@ -459,7 +475,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
unsigned long hw_blocked;
int i;
- if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
return;
hw_blocked = !hw_blocked;
@@ -468,27 +484,30 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
rfkill_set_hw_state(priv->rfk[i], hw_blocked);
}
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int ret;
unsigned long sw_blocked;
if (no_bt_rfkill &&
(ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
/* Force to enable bluetooth when no_bt_rfkill=1 */
- write_ec_cmd(ideapad_handle,
+ write_ec_cmd(priv->adev->handle,
ideapad_rfk_data[dev].opcode, 1);
return 0;
}
-
- priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
- ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
- (void *)(long)dev);
+ priv->rfk_priv[dev].dev = dev;
+ priv->rfk_priv[dev].priv = priv;
+
+ priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+ &priv->platform_device->dev,
+ ideapad_rfk_data[dev].type,
+ &ideapad_rfk_ops,
+ &priv->rfk_priv[dev]);
if (!priv->rfk[dev])
return -ENOMEM;
- if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
+ if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
&sw_blocked)) {
rfkill_init_sw_state(priv->rfk[dev], 0);
} else {
@@ -504,10 +523,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
return 0;
}
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-
if (!priv->rfk[dev])
return;
@@ -518,37 +535,16 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
/*
* Platform device
*/
-static int ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_sysfs_init(struct ideapad_private *priv)
{
- int result;
-
- priv->platform_device = platform_device_alloc("ideapad", -1);
- if (!priv->platform_device)
- return -ENOMEM;
- platform_set_drvdata(priv->platform_device, priv);
-
- result = platform_device_add(priv->platform_device);
- if (result)
- goto fail_platform_device;
-
- result = sysfs_create_group(&priv->platform_device->dev.kobj,
+ return sysfs_create_group(&priv->platform_device->dev.kobj,
&ideapad_attribute_group);
- if (result)
- goto fail_sysfs;
- return 0;
-
-fail_sysfs:
- platform_device_del(priv->platform_device);
-fail_platform_device:
- platform_device_put(priv->platform_device);
- return result;
}
-static void ideapad_platform_exit(struct ideapad_private *priv)
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
{
sysfs_remove_group(&priv->platform_device->dev.kobj,
&ideapad_attribute_group);
- platform_device_unregister(priv->platform_device);
}
/*
@@ -623,7 +619,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
{
unsigned long long_pressed;
- if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
return;
if (long_pressed)
ideapad_input_report(priv, 17);
@@ -635,7 +631,7 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
{
unsigned long bit, value;
- read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+ read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
for (bit = 0; bit < 16; bit++) {
if (test_bit(bit, &value)) {
@@ -662,19 +658,28 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
*/
static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
{
+ struct ideapad_private *priv = bl_get_data(blightdev);
unsigned long now;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+ if (!priv)
+ return -EINVAL;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
return -EIO;
return now;
}
static int ideapad_backlight_update_status(struct backlight_device *blightdev)
{
- if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+ struct ideapad_private *priv = bl_get_data(blightdev);
+
+ if (!priv)
+ return -EINVAL;
+
+ if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
blightdev->props.brightness))
return -EIO;
- if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
+ if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
return -EIO;
@@ -692,11 +697,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
struct backlight_properties props;
unsigned long max, now, power;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
return -EIO;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
return -EIO;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return -EIO;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -734,7 +739,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
if (!blightdev)
return;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return;
blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
@@ -745,7 +750,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/* if we control brightness via acpi video driver */
if (priv->blightdev == NULL) {
- read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
+ read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
return;
}
@@ -755,19 +760,12 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/*
* module init/exit
*/
-static const struct acpi_device_id ideapad_device_ids[] = {
- { "VPC2004", 0},
- { "", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-
-static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
unsigned long value;
/* Without reading from EC touchpad LED doesn't switch state */
- if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
* switch the LED state. We (de)activate KBC AUX port to turn
* touchpad off and on. We send KEY_TOUCHPAD_OFF and
@@ -779,26 +777,77 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
}
}
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct ideapad_private *priv = data;
+ unsigned long vpc1, vpc2, vpc_bit;
+
+ if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+ return;
+ if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+ return;
+
+ vpc1 = (vpc2 << 8) | vpc1;
+ for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
+ if (test_bit(vpc_bit, &vpc1)) {
+ switch (vpc_bit) {
+ case 9:
+ ideapad_sync_rfk_state(priv);
+ break;
+ case 13:
+ case 11:
+ case 7:
+ case 6:
+ ideapad_input_report(priv, vpc_bit);
+ break;
+ case 5:
+ ideapad_sync_touchpad_state(priv);
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 3:
+ ideapad_input_novokey(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ case 0:
+ ideapad_check_special_buttons(priv);
+ break;
+ default:
+ pr_info("Unknown event: %lu\n", vpc_bit);
+ }
+ }
+ }
+}
+
+static int ideapad_acpi_add(struct platform_device *pdev)
{
int ret, i;
int cfg;
struct ideapad_private *priv;
+ struct acpi_device *adev;
+
+ ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (ret)
+ return -ENODEV;
- if (read_method_int(adevice->handle, "_CFG", &cfg))
+ if (read_method_int(adev->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- dev_set_drvdata(&adevice->dev, priv);
- ideapad_priv = priv;
- ideapad_handle = adevice->handle;
+
+ dev_set_drvdata(&pdev->dev, priv);
priv->cfg = cfg;
+ priv->adev = adev;
+ priv->platform_device = pdev;
- ret = ideapad_platform_init(priv);
+ ret = ideapad_sysfs_init(priv);
if (ret)
- goto platform_failed;
+ goto sysfs_failed;
ret = ideapad_debugfs_init(priv);
if (ret)
@@ -810,117 +859,92 @@ static int ideapad_acpi_add(struct acpi_device *adevice)
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
- ideapad_register_rfkill(adevice, i);
+ ideapad_register_rfkill(priv, i);
else
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(priv);
- ideapad_sync_touchpad_state(adevice);
+ ideapad_sync_touchpad_state(priv);
if (!acpi_video_backlight_support()) {
ret = ideapad_backlight_init(priv);
if (ret && ret != -ENODEV)
goto backlight_failed;
}
+ ret = acpi_install_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
+ if (ret)
+ goto notification_failed;
return 0;
-
+notification_failed:
+ ideapad_backlight_exit(priv);
backlight_failed:
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
- ideapad_unregister_rfkill(adevice, i);
+ ideapad_unregister_rfkill(priv, i);
ideapad_input_exit(priv);
input_failed:
ideapad_debugfs_exit(priv);
debugfs_failed:
- ideapad_platform_exit(priv);
-platform_failed:
+ ideapad_sysfs_exit(priv);
+sysfs_failed:
kfree(priv);
return ret;
}
-static int ideapad_acpi_remove(struct acpi_device *adevice)
+static int ideapad_acpi_remove(struct platform_device *pdev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
int i;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
ideapad_backlight_exit(priv);
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
- ideapad_unregister_rfkill(adevice, i);
+ ideapad_unregister_rfkill(priv, i);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
- ideapad_platform_exit(priv);
- dev_set_drvdata(&adevice->dev, NULL);
+ ideapad_sysfs_exit(priv);
+ dev_set_drvdata(&pdev->dev, NULL);
kfree(priv);
return 0;
}
-static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *device)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
- acpi_handle handle = adevice->handle;
- unsigned long vpc1, vpc2, vpc_bit;
-
- if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
- return;
- if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
- return;
+ struct ideapad_private *priv;
- vpc1 = (vpc2 << 8) | vpc1;
- for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
- if (test_bit(vpc_bit, &vpc1)) {
- switch (vpc_bit) {
- case 9:
- ideapad_sync_rfk_state(priv);
- break;
- case 13:
- case 11:
- case 7:
- case 6:
- ideapad_input_report(priv, vpc_bit);
- break;
- case 5:
- ideapad_sync_touchpad_state(adevice);
- break;
- case 4:
- ideapad_backlight_notify_brightness(priv);
- break;
- case 3:
- ideapad_input_novokey(priv);
- break;
- case 2:
- ideapad_backlight_notify_power(priv);
- break;
- case 0:
- ideapad_check_special_buttons(priv);
- break;
- default:
- pr_info("Unknown event: %lu\n", vpc_bit);
- }
- }
- }
-}
+ if (!device)
+ return -EINVAL;
+ priv = dev_get_drvdata(device);
-static int ideapad_acpi_resume(struct device *device)
-{
- ideapad_sync_rfk_state(ideapad_priv);
- ideapad_sync_touchpad_state(to_acpi_device(device));
+ ideapad_sync_rfk_state(priv);
+ ideapad_sync_touchpad_state(priv);
return 0;
}
-
+#endif
static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
-static struct acpi_driver ideapad_acpi_driver = {
- .name = "ideapad_acpi",
- .class = "IdeaPad",
- .ids = ideapad_device_ids,
- .ops.add = ideapad_acpi_add,
- .ops.remove = ideapad_acpi_remove,
- .ops.notify = ideapad_acpi_notify,
- .drv.pm = &ideapad_pm,
- .owner = THIS_MODULE,
+static const struct acpi_device_id ideapad_device_ids[] = {
+ { "VPC2004", 0},
+ { "", 0},
};
-module_acpi_driver(ideapad_acpi_driver);
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+ .probe = ideapad_acpi_add,
+ .remove = ideapad_acpi_remove,
+ .driver = {
+ .name = "ideapad_acpi",
+ .owner = THIS_MODULE,
+ .pm = &ideapad_pm,
+ .acpi_match_table = ACPI_PTR(ideapad_device_ids),
+ },
+};
+
+module_platform_driver(ideapad_acpi_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("IdeaPad ACPI Extras");
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 41b740cb28b..a2083a9e566 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -29,24 +29,16 @@ static ssize_t irst_show_wakeup_events(struct device *dev,
char *buf)
{
struct acpi_device *acpi;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
+ unsigned long long value;
acpi_status status;
acpi = to_acpi_device(dev);
- status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- return sprintf(buf, "%lld\n", result->integer.value);
+ return sprintf(buf, "%lld\n", value);
}
static ssize_t irst_store_wakeup_events(struct device *dev,
@@ -54,8 +46,6 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
const char *buf, size_t count)
{
struct acpi_device *acpi;
- struct acpi_object_list input;
- union acpi_object param;
acpi_status status;
unsigned long value;
int error;
@@ -67,13 +57,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
if (error)
return error;
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = value;
-
- input.count = 1;
- input.pointer = &param;
-
- status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
@@ -91,24 +75,16 @@ static ssize_t irst_show_wakeup_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
+ unsigned long long value;
acpi_status status;
acpi = to_acpi_device(dev);
- status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- return sprintf(buf, "%lld\n", result->integer.value);
+ return sprintf(buf, "%lld\n", value);
}
static ssize_t irst_store_wakeup_time(struct device *dev,
@@ -116,8 +92,6 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
const char *buf, size_t count)
{
struct acpi_device *acpi;
- struct acpi_object_list input;
- union acpi_object param;
acpi_status status;
unsigned long value;
int error;
@@ -129,13 +103,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
if (error)
return error;
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = value;
-
- input.count = 1;
- input.pointer = &param;
-
- status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 52259dcabec..1838400dc03 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -25,37 +25,18 @@ MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
{
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
- union acpi_object param;
+ unsigned long long value;
acpi_status status;
- status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- if (result->integer.value & 0x1) {
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = 0;
-
- input.count = 1;
- input.pointer = &param;
-
+ if (value & 0x1) {
dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
- status = acpi_evaluate_object(acpi->handle, "SAOS", &input,
- NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
}
- kfree(result);
-
return 0;
}
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index d6cfc1558c2..11244f8703c 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -156,19 +156,15 @@ static struct thermal_cooling_device_ops memory_cooling_ops = {
static int intel_menlow_memory_add(struct acpi_device *device)
{
int result = -ENODEV;
- acpi_status status = AE_OK;
- acpi_handle dummy;
struct thermal_cooling_device *cdev;
if (!device)
return -EINVAL;
- status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH))
goto end;
- status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH))
goto end;
cdev = thermal_cooling_device_register("Memory controller", device,
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 9215ed72bec..d654f831410 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <linux/sfi.h>
#include <linux/module.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
@@ -579,7 +579,7 @@ static struct pci_driver ipc_driver = {
static int __init intel_scu_ipc_init(void)
{
- platform = mrst_identify_cpu();
+ platform = intel_mid_identify_cpu();
if (platform == 0)
return -ENODEV;
return pci_register_driver(&ipc_driver);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 13ec195f0ca..47caab0ea7a 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1508,7 +1508,6 @@ static void sony_nc_function_resume(void)
static int sony_nc_resume(struct device *dev)
{
struct sony_nc_value *item;
- acpi_handle handle;
for (item = sony_nc_values; item->name; item++) {
int ret;
@@ -1523,15 +1522,13 @@ static int sony_nc_resume(struct device *dev)
}
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
int arg = 1;
if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle)))
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
sony_nc_function_resume();
return 0;
@@ -2682,7 +2679,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
static void sony_nc_backlight_setup(void)
{
- acpi_handle unused;
int max_brightness = 0;
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
@@ -2717,8 +2713,7 @@ static void sony_nc_backlight_setup(void)
sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
- } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
- &unused))) {
+ } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
ops = &sony_backlight_ops;
max_brightness = SONY_MAX_BRIGHTNESS - 1;
@@ -2750,7 +2745,6 @@ static int sony_nc_add(struct acpi_device *device)
{
acpi_status status;
int result = 0;
- acpi_handle handle;
struct sony_nc_value *item;
pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
@@ -2790,15 +2784,13 @@ static int sony_nc_add(struct acpi_device *device)
goto outplatform;
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
int arg = 1;
if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
dprintk("Doing SNC setup\n");
/* retrieve the available handles */
result = sony_nc_handles_setup(sony_pf_device);
@@ -2821,9 +2813,8 @@ static int sony_nc_add(struct acpi_device *device)
/* find the available acpiget as described in the DSDT */
for (; item->acpiget && *item->acpiget; ++item->acpiget) {
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
- *item->acpiget,
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiget)) {
dprintk("Found %s getter: %s\n",
item->name, *item->acpiget);
item->devattr.attr.mode |= S_IRUGO;
@@ -2833,9 +2824,8 @@ static int sony_nc_add(struct acpi_device *device)
/* find the available acpiset as described in the DSDT */
for (; item->acpiset && *item->acpiset; ++item->acpiset) {
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
- *item->acpiset,
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiset)) {
dprintk("Found %s setter: %s\n",
item->name, *item->acpiset);
item->devattr.attr.mode |= S_IWUSR;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 03ca6c139f1..05e046aa5e3 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -23,7 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define TPACPI_VERSION "0.24"
+#define TPACPI_VERSION "0.25"
#define TPACPI_SYSFS_VERSION 0x020700
/*
@@ -88,6 +88,7 @@
#include <linux/pci_ids.h>
+#include <linux/thinkpad_acpi.h>
/* ThinkPad CMOS commands */
#define TP_CMOS_VOLUME_DOWN 0
@@ -700,6 +701,14 @@ static void __init drv_acpi_handle_init(const char *name,
static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
u32 level, void *context, void **return_value)
{
+ struct acpi_device *dev;
+ if (!strcmp(context, "video")) {
+ if (acpi_bus_get_device(handle, &dev))
+ return AE_OK;
+ if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ }
+
*(acpi_handle *)return_value = handle;
return AE_CTRL_TERMINATE;
@@ -712,10 +721,10 @@ static void __init tpacpi_acpi_handle_locate(const char *name,
acpi_status status;
acpi_handle device_found;
- BUG_ON(!name || !hid || !handle);
+ BUG_ON(!name || !handle);
vdbg_printk(TPACPI_DBG_INIT,
"trying to locate ACPI handle for %s, using HID %s\n",
- name, hid);
+ name, hid ? hid : "NULL");
memset(&device_found, 0, sizeof(device_found));
status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
@@ -6090,19 +6099,28 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
+ struct acpi_device *device, *child;
int rc;
- if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
+ if (acpi_bus_get_device(handle, &device))
+ return 0;
+
+ rc = 0;
+ list_for_each_entry(child, &device->children, node) {
+ acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
+ NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
pr_err("Unknown _BCL data, please report this to %s\n",
- TPACPI_MAIL);
+ TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
}
- } else {
- return 0;
+ break;
}
kfree(buffer.pointer);
@@ -6118,7 +6136,7 @@ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
acpi_handle video_device;
int bcl_levels = 0;
- tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+ tpacpi_acpi_handle_locate("video", NULL, &video_device);
if (video_device)
bcl_levels = tpacpi_query_bcl_levels(video_device);
@@ -8350,6 +8368,91 @@ static struct ibm_struct fan_driver_data = {
.resume = fan_resume,
};
+/*************************************************************************
+ * Mute LED subdriver
+ */
+
+
+struct tp_led_table {
+ acpi_string name;
+ int on_value;
+ int off_value;
+ int state;
+};
+
+static struct tp_led_table led_tables[] = {
+ [TPACPI_LED_MUTE] = {
+ .name = "SSMS",
+ .on_value = 1,
+ .off_value = 0,
+ },
+ [TPACPI_LED_MICMUTE] = {
+ .name = "MMTS",
+ .on_value = 2,
+ .off_value = 0,
+ },
+};
+
+static int mute_led_on_off(struct tp_led_table *t, bool state)
+{
+ acpi_handle temp;
+ int output;
+
+ if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) {
+ pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
+ return -EIO;
+ }
+
+ if (!acpi_evalf(hkey_handle, &output, t->name, "dd",
+ state ? t->on_value : t->off_value))
+ return -EIO;
+
+ t->state = state;
+ return state;
+}
+
+int tpacpi_led_set(int whichled, bool on)
+{
+ struct tp_led_table *t;
+
+ if (whichled < 0 || whichled >= TPACPI_LED_MAX)
+ return -EINVAL;
+
+ t = &led_tables[whichled];
+ if (t->state < 0 || t->state == on)
+ return t->state;
+ return mute_led_on_off(t, on);
+}
+EXPORT_SYMBOL_GPL(tpacpi_led_set);
+
+static int mute_led_init(struct ibm_init_struct *iibm)
+{
+ acpi_handle temp;
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ struct tp_led_table *t = &led_tables[i];
+ if (ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp)))
+ mute_led_on_off(t, false);
+ else
+ t->state = -ENODEV;
+ }
+ return 0;
+}
+
+static void mute_led_exit(void)
+{
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++)
+ tpacpi_led_set(i, false);
+}
+
+static struct ibm_struct mute_led_driver_data = {
+ .name = "mute_led",
+ .exit = mute_led_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -8768,6 +8871,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = fan_init,
.data = &fan_driver_data,
},
+ {
+ .init = mute_led_init,
+ .data = &mute_led_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, struct kernel_param *kp)
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 4ab618c63b4..67897c8740b 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -80,13 +80,9 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event)
static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
{
acpi_status status;
- union acpi_object fncx_params[1] = {
- { .type = ACPI_TYPE_INTEGER }
- };
- struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
- fncx_params[0].integer.value = state ? 0x86 : 0x87;
- status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+ status = acpi_execute_simple_method(device->handle, "FNCX",
+ state ? 0x86 : 0x87);
if (ACPI_FAILURE(status)) {
pr_err("Unable to switch FNCX notifications\n");
return -ENODEV;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index eb3467ea6d8..0cfadb65f7c 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -191,16 +191,9 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value)
static int write_acpi_int(const char *methodName, int val)
{
- struct acpi_object_list params;
- union acpi_object in_objs[1];
acpi_status status;
- params.count = ARRAY_SIZE(in_objs);
- params.pointer = in_objs;
- in_objs[0].type = ACPI_TYPE_INTEGER;
- in_objs[0].integer.value = val;
-
- status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+ status = acpi_execute_simple_method(NULL, (char *)methodName, val);
return (status == AE_OK) ? 0 : -EIO;
}
@@ -947,21 +940,17 @@ static void toshiba_acpi_hotkey_work(struct work_struct *work)
*/
static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
{
- struct acpi_buffer buf;
- union acpi_object out_obj;
+ unsigned long long value;
acpi_status status;
- buf.pointer = &out_obj;
- buf.length = sizeof(out_obj);
-
- status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
- NULL, &buf);
- if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
+ status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI INFO method execution failed\n");
return -EIO;
}
- return out_obj.integer.value;
+ return value;
}
static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
@@ -981,7 +970,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
acpi_status status;
- acpi_handle ec_handle, handle;
+ acpi_handle ec_handle;
int error;
u32 hci_result;
@@ -1008,10 +997,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
*/
status = AE_ERROR;
ec_handle = ec_get_handle();
- if (ec_handle)
- status = acpi_get_handle(ec_handle, "NTFY", &handle);
-
- if (ACPI_SUCCESS(status)) {
+ if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
error = i8042_install_filter(toshiba_acpi_i8042_filter);
@@ -1027,10 +1013,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
* Determine hotkey query interface. Prefer using the INFO
* method when it is available.
*/
- status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
dev->info_supported = 1;
- } else {
+ else {
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
if (hci_result == HCI_SUCCESS)
dev->system_event_supported = 1;
@@ -1155,15 +1140,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
static const char *find_hci_method(acpi_handle handle)
{
- acpi_status status;
- acpi_handle hci_handle;
-
- status = acpi_get_handle(handle, "GHCI", &hci_handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "GHCI"))
return "GHCI";
- status = acpi_get_handle(handle, "SPFC", &hci_handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "SPFC"))
return "SPFC";
return NULL;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 601ea951224..62e8c221d01 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -252,8 +252,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
{
struct guid_block *block = NULL;
char method[5];
- struct acpi_object_list input;
- union acpi_object params[1];
acpi_status status;
acpi_handle handle;
@@ -263,13 +261,9 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
if (!block)
return AE_NOT_EXIST;
- input.count = 1;
- input.pointer = params;
- params[0].type = ACPI_TYPE_INTEGER;
- params[0].integer.value = enable;
snprintf(method, 5, "WE%02X", block->notify_id);
- status = acpi_evaluate_object(handle, method, &input, NULL);
+ status = acpi_execute_simple_method(handle, method, enable);
if (status != AE_OK && status != AE_NOT_FOUND)
return status;
@@ -353,10 +347,10 @@ struct acpi_buffer *out)
{
struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
- acpi_handle handle, wc_handle;
+ acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
- struct acpi_object_list input, wc_input;
- union acpi_object wc_params[1], wq_params[1];
+ struct acpi_object_list input;
+ union acpi_object wq_params[1];
char method[5];
char wc_method[5] = "WC";
@@ -386,11 +380,6 @@ struct acpi_buffer *out)
* enable collection.
*/
if (block->flags & ACPI_WMI_EXPENSIVE) {
- wc_input.count = 1;
- wc_input.pointer = wc_params;
- wc_params[0].type = ACPI_TYPE_INTEGER;
- wc_params[0].integer.value = 1;
-
strncat(wc_method, block->object_id, 2);
/*
@@ -398,10 +387,9 @@ struct acpi_buffer *out)
* expensive, but have no corresponding WCxx method. So we
* should not fail if this happens.
*/
- wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
- if (ACPI_SUCCESS(wc_status))
- wc_status = acpi_evaluate_object(handle, wc_method,
- &wc_input, NULL);
+ if (acpi_has_method(handle, wc_method))
+ wc_status = acpi_execute_simple_method(handle,
+ wc_method, 1);
}
strcpy(method, "WQ");
@@ -414,9 +402,7 @@ struct acpi_buffer *out)
* the WQxx method failed - we should disable collection anyway.
*/
if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
- wc_params[0].integer.value = 0;
- status = acpi_evaluate_object(handle,
- wc_method, &wc_input, NULL);
+ status = acpi_execute_simple_method(handle, wc_method, 0);
}
return status;
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index ffd53e3eb92..c8873b0ca55 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -4,7 +4,7 @@
*/
extern spinlock_t pnp_lock;
-extern struct device_attribute pnp_interface_attrs[];
+extern const struct attribute_group *pnp_dev_groups[];
void *pnp_alloc(long size);
int pnp_register_protocol(struct pnp_protocol *protocol);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index a39ee38a941..6936e0acedc 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -246,7 +246,7 @@ struct bus_type pnp_bus_type = {
.remove = pnp_device_remove,
.shutdown = pnp_device_shutdown,
.pm = &pnp_bus_dev_pm_ops,
- .dev_attrs = pnp_interface_attrs,
+ .dev_groups = pnp_dev_groups,
};
int pnp_register_driver(struct pnp_driver *drv)
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 0c201317284..e6c403be09a 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -203,8 +203,8 @@ static void pnp_print_option(pnp_info_buffer_t * buffer, char *space,
}
}
-static ssize_t pnp_show_options(struct device *dmdev,
- struct device_attribute *attr, char *buf)
+static ssize_t options_show(struct device *dmdev, struct device_attribute *attr,
+ char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_info_buffer_t *buffer;
@@ -241,10 +241,10 @@ static ssize_t pnp_show_options(struct device *dmdev,
kfree(buffer);
return ret;
}
+static DEVICE_ATTR_RO(options);
-static ssize_t pnp_show_current_resources(struct device *dmdev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t resources_show(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_info_buffer_t *buffer;
@@ -331,9 +331,9 @@ static char *pnp_get_resource_value(char *buf,
return buf;
}
-static ssize_t pnp_set_current_resources(struct device *dmdev,
- struct device_attribute *attr,
- const char *ubuf, size_t count)
+static ssize_t resources_store(struct device *dmdev,
+ struct device_attribute *attr, const char *ubuf,
+ size_t count)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
char *buf = (void *)ubuf;
@@ -434,9 +434,10 @@ done:
return retval;
return count;
}
+static DEVICE_ATTR_RW(resources);
-static ssize_t pnp_show_current_ids(struct device *dmdev,
- struct device_attribute *attr, char *buf)
+static ssize_t id_show(struct device *dmdev, struct device_attribute *attr,
+ char *buf)
{
char *str = buf;
struct pnp_dev *dev = to_pnp_dev(dmdev);
@@ -448,12 +449,20 @@ static ssize_t pnp_show_current_ids(struct device *dmdev,
}
return (str - buf);
}
+static DEVICE_ATTR_RO(id);
-struct device_attribute pnp_interface_attrs[] = {
- __ATTR(resources, S_IRUGO | S_IWUSR,
- pnp_show_current_resources,
- pnp_set_current_resources),
- __ATTR(options, S_IRUGO, pnp_show_options, NULL),
- __ATTR(id, S_IRUGO, pnp_show_current_ids, NULL),
- __ATTR_NULL,
+static struct attribute *pnp_dev_attrs[] = {
+ &dev_attr_resources.attr,
+ &dev_attr_options.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group pnp_dev_group = {
+ .attrs = pnp_dev_attrs,
+};
+
+const struct attribute_group *pnp_dev_groups[] = {
+ &pnp_dev_group,
+ NULL,
};
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 34049b0b4c7..14655a0f043 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
pnp_dbg(&dev->dev, "set resources\n");
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
@@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
dev_dbg(&dev->dev, "disable resources\n");
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
@@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
struct acpi_device *acpi_dev;
acpi_handle handle;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return false;
@@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
acpi_handle handle;
int error = 0;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
@@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
static int pnpacpi_resume(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
int error = 0;
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
@@ -239,8 +239,6 @@ static char *__init pnpacpi_get_id(struct acpi_device *device)
static int __init pnpacpi_add_device(struct acpi_device *device)
{
- acpi_handle temp = NULL;
- acpi_status status;
struct pnp_dev *dev;
char *pnpid;
struct acpi_hardware_id *id;
@@ -253,8 +251,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
* If a PnPacpi device is not present , the device
* driver should not be loaded.
*/
- status = acpi_get_handle(device->handle, "_CRS", &temp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, "_CRS"))
return 0;
pnpid = pnpacpi_get_id(device);
@@ -271,16 +268,14 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
- status = acpi_get_handle(device->handle, "_SRS", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_SRS"))
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
dev->capabilities |= PNP_WRITE;
if (device->flags.removable)
dev->capabilities |= PNP_REMOVABLE;
- status = acpi_get_handle(device->handle, "_DIS", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_DIS"))
dev->capabilities |= PNP_DISABLE;
if (strlen(acpi_device_name(device)))
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e6f92b45091..5e2054afe84 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -346,6 +346,12 @@ config CHARGER_BQ24190
help
Say Y to enable support for the TI BQ24190 battery charger.
+config CHARGER_BQ24735
+ tristate "TI BQ24735 battery charger support"
+ depends on I2C && GPIOLIB
+ help
+ Say Y to enable support for the TI BQ24735 battery charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index a4b74177706..372b4e8ab59 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
+obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index a4c4a10b3a4..19110aa613a 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -766,7 +766,6 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
ret = -ENXIO;
break;
}
- break;
case USB_STAT_CARKIT_1:
case USB_STAT_CARKIT_2:
case USB_STAT_ACA_DOCK_CHARGER:
@@ -1387,8 +1386,12 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
* the GPADC module independant of the AB8500 chargers
*/
if (!di->vddadc_en_ac) {
- regulator_enable(di->regu);
- di->vddadc_en_ac = true;
+ ret = regulator_enable(di->regu);
+ if (ret)
+ dev_warn(di->dev,
+ "Failed to enable regulator\n");
+ else
+ di->vddadc_en_ac = true;
}
/* Check if the requested voltage or current is valid */
@@ -1556,8 +1559,12 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
* the GPADC module independant of the AB8500 chargers
*/
if (!di->vddadc_en_usb) {
- regulator_enable(di->regu);
- di->vddadc_en_usb = true;
+ ret = regulator_enable(di->regu);
+ if (ret)
+ dev_warn(di->dev,
+ "Failed to enable regulator\n");
+ else
+ di->vddadc_en_usb = true;
}
/* Enable USB charging */
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 754970717c3..3cb4178e397 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -574,8 +574,8 @@ int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
}
/* Return and WFI */
- INIT_COMPLETION(di->ab8500_fg_started);
- INIT_COMPLETION(di->ab8500_fg_complete);
+ reinit_completion(&di->ab8500_fg_started);
+ reinit_completion(&di->ab8500_fg_complete);
enable_irq(di->irq);
/* Note: cc_lock is still locked */
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index 0727f925613..df893dd1447 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -605,9 +605,13 @@ static int bq2415x_set_battery_regulation_voltage(struct bq2415x_device *bq,
{
int val = (mV/10 - 350) / 2;
+ /*
+ * According to datasheet, maximum battery regulation voltage is
+ * 4440mV which is b101111 = 47.
+ */
if (val < 0)
val = 0;
- else if (val > 94) /* FIXME: Max is 94 or 122 ? Set max value ? */
+ else if (val > 47)
return -EINVAL;
return bq2415x_i2c_write_mask(bq, BQ2415X_REG_VOLTAGE, val,
diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c
new file mode 100644
index 00000000000..d022b823305
--- /dev/null
+++ b/drivers/power/bq24735-charger.c
@@ -0,0 +1,419 @@
+/*
+ * Battery charger driver for TI BQ24735
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include <linux/power/bq24735-charger.h>
+
+#define BQ24735_CHG_OPT 0x12
+#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0)
+#define BQ24735_CHG_OPT_AC_PRESENT (1 << 4)
+#define BQ24735_CHARGE_CURRENT 0x14
+#define BQ24735_CHARGE_CURRENT_MASK 0x1fc0
+#define BQ24735_CHARGE_VOLTAGE 0x15
+#define BQ24735_CHARGE_VOLTAGE_MASK 0x7ff0
+#define BQ24735_INPUT_CURRENT 0x3f
+#define BQ24735_INPUT_CURRENT_MASK 0x1f80
+#define BQ24735_MANUFACTURER_ID 0xfe
+#define BQ24735_DEVICE_ID 0xff
+
+struct bq24735 {
+ struct power_supply charger;
+ struct i2c_client *client;
+ struct bq24735_platform *pdata;
+};
+
+static inline struct bq24735 *to_bq24735(struct power_supply *psy)
+{
+ return container_of(psy, struct bq24735, charger);
+}
+
+static enum power_supply_property bq24735_charger_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static inline int bq24735_write_word(struct i2c_client *client, u8 reg,
+ u16 value)
+{
+ return i2c_smbus_write_word_data(client, reg, le16_to_cpu(value));
+}
+
+static inline int bq24735_read_word(struct i2c_client *client, u8 reg)
+{
+ s32 ret = i2c_smbus_read_word_data(client, reg);
+
+ return ret < 0 ? ret : le16_to_cpu(ret);
+}
+
+static int bq24735_update_word(struct i2c_client *client, u8 reg,
+ u16 mask, u16 value)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = bq24735_read_word(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp = ret & ~mask;
+ tmp |= value & mask;
+
+ return bq24735_write_word(client, reg, tmp);
+}
+
+static inline int bq24735_enable_charging(struct bq24735 *charger)
+{
+ return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+ BQ24735_CHG_OPT_CHARGE_DISABLE,
+ ~BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
+static inline int bq24735_disable_charging(struct bq24735 *charger)
+{
+ return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+ BQ24735_CHG_OPT_CHARGE_DISABLE,
+ BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
+static int bq24735_config_charger(struct bq24735 *charger)
+{
+ struct bq24735_platform *pdata = charger->pdata;
+ int ret;
+ u16 value;
+
+ if (pdata->charge_current) {
+ value = pdata->charge_current & BQ24735_CHARGE_CURRENT_MASK;
+
+ ret = bq24735_write_word(charger->client,
+ BQ24735_CHARGE_CURRENT, value);
+ if (ret < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to write charger current : %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (pdata->charge_voltage) {
+ value = pdata->charge_voltage & BQ24735_CHARGE_VOLTAGE_MASK;
+
+ ret = bq24735_write_word(charger->client,
+ BQ24735_CHARGE_VOLTAGE, value);
+ if (ret < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to write charger voltage : %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (pdata->input_current) {
+ value = pdata->input_current & BQ24735_INPUT_CURRENT_MASK;
+
+ ret = bq24735_write_word(charger->client,
+ BQ24735_INPUT_CURRENT, value);
+ if (ret < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to write input current : %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool bq24735_charger_is_present(struct bq24735 *charger)
+{
+ struct bq24735_platform *pdata = charger->pdata;
+ int ret;
+
+ if (pdata->status_gpio_valid) {
+ ret = gpio_get_value_cansleep(pdata->status_gpio);
+ return ret ^= pdata->status_gpio_active_low == 0;
+ } else {
+ int ac = 0;
+
+ ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+ if (ac < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to read charger options : %d\n",
+ ac);
+ return false;
+ }
+ return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false;
+ }
+
+ return false;
+}
+
+static irqreturn_t bq24735_charger_isr(int irq, void *devid)
+{
+ struct power_supply *psy = devid;
+ struct bq24735 *charger = to_bq24735(psy);
+
+ if (bq24735_charger_is_present(charger))
+ bq24735_enable_charging(charger);
+ else
+ bq24735_disable_charging(charger);
+
+ power_supply_changed(psy);
+
+ return IRQ_HANDLED;
+}
+
+static int bq24735_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq24735 *charger;
+
+ charger = container_of(psy, struct bq24735, charger);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = bq24735_charger_is_present(charger) ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client)
+{
+ struct bq24735_platform *pdata;
+ struct device_node *np = client->dev.of_node;
+ u32 val;
+ int ret;
+ enum of_gpio_flags flags;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev,
+ "Memory alloc for bq24735 pdata failed\n");
+ return NULL;
+ }
+
+ pdata->status_gpio = of_get_named_gpio_flags(np, "ti,ac-detect-gpios",
+ 0, &flags);
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ pdata->status_gpio_active_low = 1;
+
+ ret = of_property_read_u32(np, "ti,charge-current", &val);
+ if (!ret)
+ pdata->charge_current = val;
+
+ ret = of_property_read_u32(np, "ti,charge-voltage", &val);
+ if (!ret)
+ pdata->charge_voltage = val;
+
+ ret = of_property_read_u32(np, "ti,input-current", &val);
+ if (!ret)
+ pdata->input_current = val;
+
+ return pdata;
+}
+
+static int bq24735_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bq24735 *charger;
+ struct power_supply *supply;
+ char *name;
+
+ charger = devm_kzalloc(&client->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->pdata = client->dev.platform_data;
+
+ if (IS_ENABLED(CONFIG_OF) && !charger->pdata && client->dev.of_node)
+ charger->pdata = bq24735_parse_dt_data(client);
+
+ if (!charger->pdata) {
+ dev_err(&client->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
+
+ name = (char *)charger->pdata->name;
+ if (!name) {
+ name = kasprintf(GFP_KERNEL, "bq24735@%s",
+ dev_name(&client->dev));
+ if (!name) {
+ dev_err(&client->dev, "Failed to alloc device name\n");
+ return -ENOMEM;
+ }
+ }
+
+ charger->client = client;
+
+ supply = &charger->charger;
+
+ supply->name = name;
+ supply->type = POWER_SUPPLY_TYPE_MAINS;
+ supply->properties = bq24735_charger_properties;
+ supply->num_properties = ARRAY_SIZE(bq24735_charger_properties);
+ supply->get_property = bq24735_charger_get_property;
+ supply->supplied_to = charger->pdata->supplied_to;
+ supply->num_supplicants = charger->pdata->num_supplicants;
+ supply->of_node = client->dev.of_node;
+
+ i2c_set_clientdata(client, charger);
+
+ ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
+ ret);
+ goto err_free_name;
+ } else if (ret != 0x0040) {
+ dev_err(&client->dev,
+ "manufacturer id mismatch. 0x0040 != 0x%04x\n", ret);
+ ret = -ENODEV;
+ goto err_free_name;
+ }
+
+ ret = bq24735_read_word(client, BQ24735_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read device id : %d\n", ret);
+ goto err_free_name;
+ } else if (ret != 0x000B) {
+ dev_err(&client->dev,
+ "device id mismatch. 0x000b != 0x%04x\n", ret);
+ ret = -ENODEV;
+ goto err_free_name;
+ }
+
+ if (gpio_is_valid(charger->pdata->status_gpio)) {
+ ret = devm_gpio_request(&client->dev,
+ charger->pdata->status_gpio,
+ name);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed GPIO request for GPIO %d: %d\n",
+ charger->pdata->status_gpio, ret);
+ }
+
+ charger->pdata->status_gpio_valid = !ret;
+ }
+
+ ret = bq24735_config_charger(charger);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed in configuring charger");
+ goto err_free_name;
+ }
+
+ /* check for AC adapter presence */
+ if (bq24735_charger_is_present(charger)) {
+ ret = bq24735_enable_charging(charger);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to enable charging\n");
+ goto err_free_name;
+ }
+ }
+
+ ret = power_supply_register(&client->dev, supply);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register power supply: %d\n",
+ ret);
+ goto err_free_name;
+ }
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, bq24735_charger_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ supply->name, supply);
+ if (ret) {
+ dev_err(&client->dev,
+ "Unable to register IRQ %d err %d\n",
+ client->irq, ret);
+ goto err_unregister_supply;
+ }
+ }
+
+ return 0;
+err_unregister_supply:
+ power_supply_unregister(supply);
+err_free_name:
+ if (name != charger->pdata->name)
+ kfree(name);
+
+ return ret;
+}
+
+static int bq24735_charger_remove(struct i2c_client *client)
+{
+ struct bq24735 *charger = i2c_get_clientdata(client);
+
+ if (charger->client->irq)
+ devm_free_irq(&charger->client->dev, charger->client->irq,
+ &charger->charger);
+
+ power_supply_unregister(&charger->charger);
+
+ if (charger->charger.name != charger->pdata->name)
+ kfree(charger->charger.name);
+
+ return 0;
+}
+
+static const struct i2c_device_id bq24735_charger_id[] = {
+ { "bq24735-charger", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bq24735_charger_id);
+
+static const struct of_device_id bq24735_match_ids[] = {
+ { .compatible = "ti,bq24735", },
+ { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, bq24735_match_ids);
+
+static struct i2c_driver bq24735_charger_driver = {
+ .driver = {
+ .name = "bq24735-charger",
+ .owner = THIS_MODULE,
+ .of_match_table = bq24735_match_ids,
+ },
+ .probe = bq24735_charger_probe,
+ .remove = bq24735_charger_remove,
+ .id_table = bq24735_charger_id,
+};
+
+module_i2c_driver(bq24735_charger_driver);
+
+MODULE_DESCRIPTION("bq24735 battery charging driver");
+MODULE_AUTHOR("Darbha Sriharsha <dsriharsha@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index e30e847600b..7287c0efd6b 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -1378,7 +1378,8 @@ static int charger_manager_register_sysfs(struct charger_manager *cm)
charger = &desc->charger_regulators[i];
snprintf(buf, 10, "charger.%d", i);
- str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
+ str = devm_kzalloc(cm->dev,
+ sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto err;
@@ -1452,30 +1453,23 @@ static int charger_manager_probe(struct platform_device *pdev)
rtc_dev = NULL;
dev_err(&pdev->dev, "Cannot get RTC %s\n",
g_desc->rtc_name);
- ret = -ENODEV;
- goto err_alloc;
+ return -ENODEV;
}
}
if (!desc) {
dev_err(&pdev->dev, "No platform data (desc) found\n");
- ret = -ENODEV;
- goto err_alloc;
+ return -ENODEV;
}
- cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL);
- if (!cm) {
- ret = -ENOMEM;
- goto err_alloc;
- }
+ cm = devm_kzalloc(&pdev->dev,
+ sizeof(struct charger_manager), GFP_KERNEL);
+ if (!cm)
+ return -ENOMEM;
/* Basic Values. Unspecified are Null or 0 */
cm->dev = &pdev->dev;
- cm->desc = kmemdup(desc, sizeof(struct charger_desc), GFP_KERNEL);
- if (!cm->desc) {
- ret = -ENOMEM;
- goto err_alloc_desc;
- }
+ cm->desc = desc;
cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
/*
@@ -1498,27 +1492,23 @@ static int charger_manager_probe(struct platform_device *pdev)
}
if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
- ret = -EINVAL;
dev_err(&pdev->dev, "charger_regulators undefined\n");
- goto err_no_charger;
+ return -EINVAL;
}
if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) {
dev_err(&pdev->dev, "No power supply defined\n");
- ret = -EINVAL;
- goto err_no_charger_stat;
+ return -EINVAL;
}
/* Counting index only */
while (desc->psy_charger_stat[i])
i++;
- cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1),
- GFP_KERNEL);
- if (!cm->charger_stat) {
- ret = -ENOMEM;
- goto err_no_charger_stat;
- }
+ cm->charger_stat = devm_kzalloc(&pdev->dev,
+ sizeof(struct power_supply *) * i, GFP_KERNEL);
+ if (!cm->charger_stat)
+ return -ENOMEM;
for (i = 0; desc->psy_charger_stat[i]; i++) {
cm->charger_stat[i] = power_supply_get_by_name(
@@ -1526,8 +1516,7 @@ static int charger_manager_probe(struct platform_device *pdev)
if (!cm->charger_stat[i]) {
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
desc->psy_charger_stat[i]);
- ret = -ENODEV;
- goto err_chg_stat;
+ return -ENODEV;
}
}
@@ -1535,21 +1524,18 @@ static int charger_manager_probe(struct platform_device *pdev)
if (!cm->fuel_gauge) {
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
desc->psy_fuel_gauge);
- ret = -ENODEV;
- goto err_chg_stat;
+ return -ENODEV;
}
if (desc->polling_interval_ms == 0 ||
msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) {
dev_err(&pdev->dev, "polling_interval_ms is too small\n");
- ret = -EINVAL;
- goto err_chg_stat;
+ return -EINVAL;
}
if (!desc->temperature_out_of_range) {
dev_err(&pdev->dev, "there is no temperature_out_of_range\n");
- ret = -EINVAL;
- goto err_chg_stat;
+ return -EINVAL;
}
if (!desc->charging_max_duration_ms ||
@@ -1570,14 +1556,13 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.name = cm->psy_name_buf;
/* Allocate for psy properties because they may vary */
- cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property)
+ cm->charger_psy.properties = devm_kzalloc(&pdev->dev,
+ sizeof(enum power_supply_property)
* (ARRAY_SIZE(default_charger_props) +
- NUM_CHARGER_PSY_OPTIONAL),
- GFP_KERNEL);
- if (!cm->charger_psy.properties) {
- ret = -ENOMEM;
- goto err_chg_stat;
- }
+ NUM_CHARGER_PSY_OPTIONAL), GFP_KERNEL);
+ if (!cm->charger_psy.properties)
+ return -ENOMEM;
+
memcpy(cm->charger_psy.properties, default_charger_props,
sizeof(enum power_supply_property) *
ARRAY_SIZE(default_charger_props));
@@ -1614,7 +1599,7 @@ static int charger_manager_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Cannot register charger-manager with name \"%s\"\n",
cm->charger_psy.name);
- goto err_register;
+ return ret;
}
/* Register extcon device for charger cable */
@@ -1655,8 +1640,6 @@ err_reg_sysfs:
charger = &desc->charger_regulators[i];
sysfs_remove_group(&cm->charger_psy.dev->kobj,
&charger->attr_g);
-
- kfree(charger->attr_g.name);
}
err_reg_extcon:
for (i = 0; i < desc->num_charger_regulators; i++) {
@@ -1674,16 +1657,7 @@ err_reg_extcon:
}
power_supply_unregister(&cm->charger_psy);
-err_register:
- kfree(cm->charger_psy.properties);
-err_chg_stat:
- kfree(cm->charger_stat);
-err_no_charger_stat:
-err_no_charger:
- kfree(cm->desc);
-err_alloc_desc:
- kfree(cm);
-err_alloc:
+
return ret;
}
@@ -1718,11 +1692,6 @@ static int charger_manager_remove(struct platform_device *pdev)
try_charger_enable(cm, false);
- kfree(cm->charger_psy.properties);
- kfree(cm->charger_stat);
- kfree(cm->desc);
- kfree(cm);
-
return 0;
}
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index fc04d191579..1bb3a91b1ac 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -2,6 +2,7 @@
* ISP1704 USB Charger Detection driver
*
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2012 - 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,10 +66,6 @@ struct isp1704_charger {
unsigned present:1;
unsigned online:1;
unsigned current_max;
-
- /* temp storage variables */
- unsigned long event;
- unsigned max_power;
};
static inline int isp1704_read(struct isp1704_charger *isp, u32 reg)
@@ -231,56 +228,59 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp)
return ret;
}
+static inline int isp1704_charger_detect_dcp(struct isp1704_charger *isp)
+{
+ if (isp1704_charger_detect(isp) &&
+ isp1704_charger_type(isp) == POWER_SUPPLY_TYPE_USB_DCP)
+ return true;
+ else
+ return false;
+}
+
static void isp1704_charger_work(struct work_struct *data)
{
- int detect;
- unsigned long event;
- unsigned power;
struct isp1704_charger *isp =
container_of(data, struct isp1704_charger, work);
static DEFINE_MUTEX(lock);
- event = isp->event;
- power = isp->max_power;
-
mutex_lock(&lock);
- if (event != USB_EVENT_NONE)
- isp1704_charger_set_power(isp, 1);
-
- switch (event) {
+ switch (isp->phy->last_event) {
case USB_EVENT_VBUS:
- isp->online = true;
+ /* do not call wall charger detection more times */
+ if (!isp->present) {
+ isp->online = true;
+ isp->present = 1;
+ isp1704_charger_set_power(isp, 1);
+
+ /* detect wall charger */
+ if (isp1704_charger_detect_dcp(isp)) {
+ isp->psy.type = POWER_SUPPLY_TYPE_USB_DCP;
+ isp->current_max = 1800;
+ } else {
+ isp->psy.type = POWER_SUPPLY_TYPE_USB;
+ isp->current_max = 500;
+ }
- /* detect charger */
- detect = isp1704_charger_detect(isp);
-
- if (detect) {
- isp->present = detect;
- isp->psy.type = isp1704_charger_type(isp);
+ /* enable data pullups */
+ if (isp->phy->otg->gadget)
+ usb_gadget_connect(isp->phy->otg->gadget);
}
- switch (isp->psy.type) {
- case POWER_SUPPLY_TYPE_USB_DCP:
- isp->current_max = 1800;
- break;
- case POWER_SUPPLY_TYPE_USB_CDP:
+ if (isp->psy.type != POWER_SUPPLY_TYPE_USB_DCP) {
/*
* Only 500mA here or high speed chirp
* handshaking may break
*/
- isp->current_max = 500;
- /* FALLTHROUGH */
- case POWER_SUPPLY_TYPE_USB:
- default:
- /* enable data pullups */
- if (isp->phy->otg->gadget)
- usb_gadget_connect(isp->phy->otg->gadget);
+ if (isp->current_max > 500)
+ isp->current_max = 500;
+
+ if (isp->current_max > 100)
+ isp->psy.type = POWER_SUPPLY_TYPE_USB_CDP;
}
break;
case USB_EVENT_NONE:
isp->online = false;
- isp->current_max = 0;
isp->present = 0;
isp->current_max = 0;
isp->psy.type = POWER_SUPPLY_TYPE_USB;
@@ -298,12 +298,6 @@ static void isp1704_charger_work(struct work_struct *data)
isp1704_charger_set_power(isp, 0);
break;
- case USB_EVENT_ENUMERATED:
- if (isp->present)
- isp->current_max = 1800;
- else
- isp->current_max = power;
- break;
default:
goto out;
}
@@ -314,16 +308,11 @@ out:
}
static int isp1704_notifier_call(struct notifier_block *nb,
- unsigned long event, void *power)
+ unsigned long val, void *v)
{
struct isp1704_charger *isp =
container_of(nb, struct isp1704_charger, nb);
- isp->event = event;
-
- if (power)
- isp->max_power = *((unsigned *)power);
-
schedule_work(&isp->work);
return NOTIFY_OK;
@@ -462,13 +451,13 @@ static int isp1704_charger_probe(struct platform_device *pdev)
if (isp->phy->otg->gadget)
usb_gadget_disconnect(isp->phy->otg->gadget);
+ if (isp->phy->last_event == USB_EVENT_NONE)
+ isp1704_charger_set_power(isp, 0);
+
/* Detect charger if VBUS is valid (the cable was already plugged). */
- ret = isp1704_read(isp, ULPI_USB_INT_STS);
- isp1704_charger_set_power(isp, 0);
- if ((ret & ULPI_INT_VBUS_VALID) && !isp->phy->otg->default_a) {
- isp->event = USB_EVENT_VBUS;
+ if (isp->phy->last_event == USB_EVENT_VBUS &&
+ !isp->phy->otg->default_a)
schedule_work(&isp->work);
- }
return 0;
fail2:
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index d9686aa9270..6c8931d4ad6 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -73,7 +73,7 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
mutex_lock(&battery->lock);
- INIT_COMPLETION(battery->read_completion);
+ reinit_completion(&battery->read_completion);
enable_irq(battery->irq);
battery->cell->enable(battery->pdev);
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index d664ef58afa..e0b22f9b6fd 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -33,6 +33,7 @@
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
#include <linux/of.h>
+#include <linux/regmap.h>
/* Status register bits */
#define STATUS_POR_BIT (1 << 1)
@@ -67,6 +68,7 @@
struct max17042_chip {
struct i2c_client *client;
+ struct regmap *regmap;
struct power_supply battery;
enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
@@ -74,35 +76,6 @@ struct max17042_chip {
int init_complete;
};
-static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
-{
- int ret = i2c_smbus_write_word_data(client, reg, value);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static int max17042_read_reg(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_word_data(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static void max17042_set_reg(struct i2c_client *client,
- struct max17042_reg_data *data, int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- max17042_write_reg(client, data[i].addr, data[i].data);
-}
-
static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CYCLE_COUNT,
@@ -125,96 +98,98 @@ static int max17042_get_property(struct power_supply *psy,
{
struct max17042_chip *chip = container_of(psy,
struct max17042_chip, battery);
+ struct regmap *map = chip->regmap;
int ret;
+ u32 data;
if (!chip->init_complete)
return -EAGAIN;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- ret = max17042_read_reg(chip->client, MAX17042_STATUS);
+ ret = regmap_read(map, MAX17042_STATUS, &data);
if (ret < 0)
return ret;
- if (ret & MAX17042_STATUS_BattAbsent)
+ if (data & MAX17042_STATUS_BattAbsent)
val->intval = 0;
else
val->intval = 1;
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
- ret = max17042_read_reg(chip->client, MAX17042_Cycles);
+ ret = regmap_read(map, MAX17042_Cycles, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt);
+ ret = regmap_read(map, MAX17042_MinMaxVolt, &data);
if (ret < 0)
return ret;
- val->intval = ret >> 8;
+ val->intval = data >> 8;
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (chip->chip_type == MAX17042)
- ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ ret = regmap_read(map, MAX17042_V_empty, &data);
else
- ret = max17042_read_reg(chip->client, MAX17047_V_empty);
+ ret = regmap_read(map, MAX17047_V_empty, &data);
if (ret < 0)
return ret;
- val->intval = ret >> 7;
+ val->intval = data >> 7;
val->intval *= 10000; /* Units of LSB = 10mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = max17042_read_reg(chip->client, MAX17042_VCELL);
+ ret = regmap_read(map, MAX17042_VCELL, &data);
if (ret < 0)
return ret;
- val->intval = ret * 625 / 8;
+ val->intval = data * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
+ ret = regmap_read(map, MAX17042_AvgVCELL, &data);
if (ret < 0)
return ret;
- val->intval = ret * 625 / 8;
+ val->intval = data * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+ ret = regmap_read(map, MAX17042_OCVInternal, &data);
if (ret < 0)
return ret;
- val->intval = ret * 625 / 8;
+ val->intval = data * 625 / 8;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+ ret = regmap_read(map, MAX17042_RepSOC, &data);
if (ret < 0)
return ret;
- val->intval = ret >> 8;
+ val->intval = data >> 8;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
+ ret = regmap_read(map, MAX17042_FullCAP, &data);
if (ret < 0)
return ret;
- val->intval = ret * 1000 / 2;
+ val->intval = data * 1000 / 2;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
- ret = max17042_read_reg(chip->client, MAX17042_QH);
+ ret = regmap_read(map, MAX17042_QH, &data);
if (ret < 0)
return ret;
- val->intval = ret * 1000 / 2;
+ val->intval = data * 1000 / 2;
break;
case POWER_SUPPLY_PROP_TEMP:
- ret = max17042_read_reg(chip->client, MAX17042_TEMP);
+ ret = regmap_read(map, MAX17042_TEMP, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
/* The value is signed. */
if (val->intval & 0x8000) {
val->intval = (0x7fff & ~val->intval) + 1;
@@ -226,11 +201,11 @@ static int max17042_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (chip->pdata->enable_current_sense) {
- ret = max17042_read_reg(chip->client, MAX17042_Current);
+ ret = regmap_read(map, MAX17042_Current, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
@@ -244,12 +219,11 @@ static int max17042_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
if (chip->pdata->enable_current_sense) {
- ret = max17042_read_reg(chip->client,
- MAX17042_AvgCurrent);
+ ret = regmap_read(map, MAX17042_AvgCurrent, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
@@ -267,16 +241,15 @@ static int max17042_get_property(struct power_supply *psy,
return 0;
}
-static int max17042_write_verify_reg(struct i2c_client *client,
- u8 reg, u16 value)
+static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value)
{
int retries = 8;
int ret;
- u16 read_value;
+ u32 read_value;
do {
- ret = i2c_smbus_write_word_data(client, reg, value);
- read_value = max17042_read_reg(client, reg);
+ ret = regmap_write(map, reg, value);
+ regmap_read(map, reg, &read_value);
if (read_value != value) {
ret = -EIO;
retries--;
@@ -284,50 +257,51 @@ static int max17042_write_verify_reg(struct i2c_client *client,
} while (retries && read_value != value);
if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+ pr_err("%s: err %d\n", __func__, ret);
return ret;
}
-static inline void max17042_override_por(
- struct i2c_client *client, u8 reg, u16 value)
+static inline void max17042_override_por(struct regmap *map,
+ u8 reg, u16 value)
{
if (value)
- max17042_write_reg(client, reg, value);
+ regmap_write(map, reg, value);
}
static inline void max10742_unlock_model(struct max17042_chip *chip)
{
- struct i2c_client *client = chip->client;
- max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
- max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
+ struct regmap *map = chip->regmap;
+ regmap_write(map, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
+ regmap_write(map, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
}
static inline void max10742_lock_model(struct max17042_chip *chip)
{
- struct i2c_client *client = chip->client;
- max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
- max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
+ struct regmap *map = chip->regmap;
+
+ regmap_write(map, MAX17042_MLOCKReg1, MODEL_LOCK1);
+ regmap_write(map, MAX17042_MLOCKReg2, MODEL_LOCK2);
}
static inline void max17042_write_model_data(struct max17042_chip *chip,
u8 addr, int size)
{
- struct i2c_client *client = chip->client;
+ struct regmap *map = chip->regmap;
int i;
for (i = 0; i < size; i++)
- max17042_write_reg(client, addr + i,
- chip->pdata->config_data->cell_char_tbl[i]);
+ regmap_write(map, addr + i,
+ chip->pdata->config_data->cell_char_tbl[i]);
}
static inline void max17042_read_model_data(struct max17042_chip *chip,
- u8 addr, u16 *data, int size)
+ u8 addr, u32 *data, int size)
{
- struct i2c_client *client = chip->client;
+ struct regmap *map = chip->regmap;
int i;
for (i = 0; i < size; i++)
- data[i] = max17042_read_reg(client, addr + i);
+ regmap_read(map, addr + i, &data[i]);
}
static inline int max17042_model_data_compare(struct max17042_chip *chip,
@@ -350,7 +324,7 @@ static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
- u16 *temp_data;
+ u32 *temp_data;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
@@ -365,7 +339,7 @@ static int max17042_init_model(struct max17042_chip *chip)
ret = max17042_model_data_compare(
chip,
chip->pdata->config_data->cell_char_tbl,
- temp_data,
+ (u16 *)temp_data,
table_size);
max10742_lock_model(chip);
@@ -378,7 +352,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
- u16 *temp_data;
+ u32 *temp_data;
int ret = 0;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
@@ -398,40 +372,38 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
static void max17042_write_config_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
- max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
- max17042_write_reg(chip->client, MAX17042_FilterCFG,
+ regmap_write(map, MAX17042_CONFIG, config->config);
+ regmap_write(map, MAX17042_LearnCFG, config->learn_cfg);
+ regmap_write(map, MAX17042_FilterCFG,
config->filter_cfg);
- max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+ regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg);
if (chip->chip_type == MAX17047)
- max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+ regmap_write(map, MAX17047_FullSOCThr,
config->full_soc_thresh);
}
static void max17042_write_custom_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
- config->rcomp0);
- max17042_write_verify_reg(chip->client, MAX17042_TempCo,
- config->tcompc0);
- max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
- config->ichgt_term);
+ max17042_write_verify_reg(map, MAX17042_RCOMP0, config->rcomp0);
+ max17042_write_verify_reg(map, MAX17042_TempCo, config->tcompc0);
+ max17042_write_verify_reg(map, MAX17042_ICHGTerm, config->ichgt_term);
if (chip->chip_type == MAX17042) {
- max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+ regmap_write(map, MAX17042_EmptyTempCo, config->empty_tempco);
+ max17042_write_verify_reg(map, MAX17042_K_empty0,
config->kempty0);
} else {
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+ max17042_write_verify_reg(map, MAX17047_QRTbl00,
config->qrtbl00);
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+ max17042_write_verify_reg(map, MAX17047_QRTbl10,
config->qrtbl10);
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+ max17042_write_verify_reg(map, MAX17047_QRTbl20,
config->qrtbl20);
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+ max17042_write_verify_reg(map, MAX17047_QRTbl30,
config->qrtbl30);
}
}
@@ -439,58 +411,60 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
static void max17042_update_capacity_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+ max17042_write_verify_reg(map, MAX17042_FullCAP,
config->fullcap);
- max17042_write_reg(chip->client, MAX17042_DesignCap,
- config->design_cap);
- max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+ regmap_write(map, MAX17042_DesignCap, config->design_cap);
+ max17042_write_verify_reg(map, MAX17042_FullCAPNom,
config->fullcapnom);
}
static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
{
- u16 vfSoc;
+ unsigned int vfSoc;
+ struct regmap *map = chip->regmap;
- vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
- max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
- max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
- max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
+ regmap_read(map, MAX17042_VFSOC, &vfSoc);
+ regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
+ max17042_write_verify_reg(map, MAX17042_VFSOC0, vfSoc);
+ regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
}
static void max17042_load_new_capacity_params(struct max17042_chip *chip)
{
- u16 full_cap0, rep_cap, dq_acc, vfSoc;
+ u32 full_cap0, rep_cap, dq_acc, vfSoc;
u32 rem_cap;
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
- vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+ regmap_read(map, MAX17042_FullCAP0, &full_cap0);
+ regmap_read(map, MAX17042_VFSOC, &vfSoc);
/* fg_vfSoc needs to shifted by 8 bits to get the
* perc in 1% accuracy, to get the right rem_cap multiply
* full_cap0, fg_vfSoc and devide by 100
*/
rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
- max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
+ max17042_write_verify_reg(map, MAX17042_RemCap, rem_cap);
- rep_cap = (u16)rem_cap;
- max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
+ rep_cap = rem_cap;
+ max17042_write_verify_reg(map, MAX17042_RepCap, rep_cap);
/* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
dq_acc = config->fullcap / dQ_ACC_DIV;
- max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
- max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
+ max17042_write_verify_reg(map, MAX17042_dQacc, dq_acc);
+ max17042_write_verify_reg(map, MAX17042_dPacc, dP_ACC_200);
- max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+ max17042_write_verify_reg(map, MAX17042_FullCAP,
config->fullcap);
- max17042_write_reg(chip->client, MAX17042_DesignCap,
+ regmap_write(map, MAX17042_DesignCap,
config->design_cap);
- max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+ max17042_write_verify_reg(map, MAX17042_FullCAPNom,
config->fullcapnom);
/* Update SOC register with new SOC */
- max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
+ regmap_write(map, MAX17042_RepSOC, vfSoc);
}
/*
@@ -500,59 +474,60 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
*/
static inline void max17042_override_por_values(struct max17042_chip *chip)
{
- struct i2c_client *client = chip->client;
+ struct regmap *map = chip->regmap;
struct max17042_config_data *config = chip->pdata->config_data;
- max17042_override_por(client, MAX17042_TGAIN, config->tgain);
- max17042_override_por(client, MAx17042_TOFF, config->toff);
- max17042_override_por(client, MAX17042_CGAIN, config->cgain);
- max17042_override_por(client, MAX17042_COFF, config->coff);
-
- max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
- max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
- max17042_override_por(client, MAX17042_SALRT_Th,
- config->soc_alrt_thresh);
- max17042_override_por(client, MAX17042_CONFIG, config->config);
- max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
-
- max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
- max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
-
- max17042_override_por(client, MAX17042_AtRate, config->at_rate);
- max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
- max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
- max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
- max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
- max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
-
- max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
- max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
+ max17042_override_por(map, MAX17042_TGAIN, config->tgain);
+ max17042_override_por(map, MAx17042_TOFF, config->toff);
+ max17042_override_por(map, MAX17042_CGAIN, config->cgain);
+ max17042_override_por(map, MAX17042_COFF, config->coff);
+
+ max17042_override_por(map, MAX17042_VALRT_Th, config->valrt_thresh);
+ max17042_override_por(map, MAX17042_TALRT_Th, config->talrt_thresh);
+ max17042_override_por(map, MAX17042_SALRT_Th,
+ config->soc_alrt_thresh);
+ max17042_override_por(map, MAX17042_CONFIG, config->config);
+ max17042_override_por(map, MAX17042_SHDNTIMER, config->shdntimer);
+
+ max17042_override_por(map, MAX17042_DesignCap, config->design_cap);
+ max17042_override_por(map, MAX17042_ICHGTerm, config->ichgt_term);
+
+ max17042_override_por(map, MAX17042_AtRate, config->at_rate);
+ max17042_override_por(map, MAX17042_LearnCFG, config->learn_cfg);
+ max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg);
+ max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg);
+ max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg);
+ max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
+
+ max17042_override_por(map, MAX17042_FullCAP, config->fullcap);
+ max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom);
if (chip->chip_type == MAX17042)
- max17042_override_por(client, MAX17042_SOC_empty,
+ max17042_override_por(map, MAX17042_SOC_empty,
config->socempty);
- max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
- max17042_override_por(client, MAX17042_dQacc, config->dqacc);
- max17042_override_por(client, MAX17042_dPacc, config->dpacc);
+ max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
+ max17042_override_por(map, MAX17042_dQacc, config->dqacc);
+ max17042_override_por(map, MAX17042_dPacc, config->dpacc);
if (chip->chip_type == MAX17042)
- max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ max17042_override_por(map, MAX17042_V_empty, config->vempty);
else
- max17042_override_por(client, MAX17047_V_empty, config->vempty);
- max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
- max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
- max17042_override_por(client, MAX17042_FCTC, config->fctc);
- max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
- max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
+ max17042_override_por(map, MAX17047_V_empty, config->vempty);
+ max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
+ max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
+ max17042_override_por(map, MAX17042_FCTC, config->fctc);
+ max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
+ max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
if (chip->chip_type) {
- max17042_override_por(client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_override_por(client, MAX17042_K_empty0,
- config->kempty0);
+ max17042_override_por(map, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_override_por(map, MAX17042_K_empty0,
+ config->kempty0);
}
}
static int max17042_init_chip(struct max17042_chip *chip)
{
+ struct regmap *map = chip->regmap;
int ret;
int val;
@@ -597,31 +572,32 @@ static int max17042_init_chip(struct max17042_chip *chip)
max17042_load_new_capacity_params(chip);
/* Init complete, Clear the POR bit */
- val = max17042_read_reg(chip->client, MAX17042_STATUS);
- max17042_write_reg(chip->client, MAX17042_STATUS,
- val & (~STATUS_POR_BIT));
+ regmap_read(map, MAX17042_STATUS, &val);
+ regmap_write(map, MAX17042_STATUS, val & (~STATUS_POR_BIT));
return 0;
}
static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
{
- u16 soc, soc_tr;
+ struct regmap *map = chip->regmap;
+ u32 soc, soc_tr;
/* program interrupt thesholds such that we should
* get interrupt for every 'off' perc change in the soc
*/
- soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
+ regmap_read(map, MAX17042_RepSOC, &soc);
+ soc >>= 8;
soc_tr = (soc + off) << 8;
soc_tr |= (soc - off);
- max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
+ regmap_write(map, MAX17042_SALRT_Th, soc_tr);
}
static irqreturn_t max17042_thread_handler(int id, void *dev)
{
struct max17042_chip *chip = dev;
- u16 val;
+ u32 val;
- val = max17042_read_reg(chip->client, MAX17042_STATUS);
+ regmap_read(chip->regmap, MAX17042_STATUS, &val);
if ((val & STATUS_INTR_SOCMIN_BIT) ||
(val & STATUS_INTR_SOCMAX_BIT)) {
dev_info(&chip->client->dev, "SOC threshold INTR\n");
@@ -682,13 +658,20 @@ max17042_get_pdata(struct device *dev)
}
#endif
+static struct regmap_config max17042_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
static int max17042_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct max17042_chip *chip;
int ret;
- int reg;
+ int i;
+ u32 val;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
@@ -698,6 +681,12 @@ static int max17042_probe(struct i2c_client *client,
return -ENOMEM;
chip->client = client;
+ chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "Failed to initialize regmap\n");
+ return -EINVAL;
+ }
+
chip->pdata = max17042_get_pdata(&client->dev);
if (!chip->pdata) {
dev_err(&client->dev, "no platform data provided\n");
@@ -706,15 +695,15 @@ static int max17042_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- ret = max17042_read_reg(chip->client, MAX17042_DevName);
- if (ret == MAX17042_IC_VERSION) {
+ regmap_read(chip->regmap, MAX17042_DevName, &val);
+ if (val == MAX17042_IC_VERSION) {
dev_dbg(&client->dev, "chip type max17042 detected\n");
chip->chip_type = MAX17042;
- } else if (ret == MAX17047_IC_VERSION) {
+ } else if (val == MAX17047_IC_VERSION) {
dev_dbg(&client->dev, "chip type max17047/50 detected\n");
chip->chip_type = MAX17047;
} else {
- dev_err(&client->dev, "device version mismatch: %x\n", ret);
+ dev_err(&client->dev, "device version mismatch: %x\n", val);
return -EIO;
}
@@ -733,13 +722,15 @@ static int max17042_probe(struct i2c_client *client,
chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
if (chip->pdata->init_data)
- max17042_set_reg(client, chip->pdata->init_data,
- chip->pdata->num_init_data);
+ for (i = 0; i < chip->pdata->num_init_data; i++)
+ regmap_write(chip->regmap,
+ chip->pdata->init_data[i].addr,
+ chip->pdata->init_data[i].data);
if (!chip->pdata->enable_current_sense) {
- max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
- max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
- max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+ regmap_write(chip->regmap, MAX17042_CGAIN, 0x0000);
+ regmap_write(chip->regmap, MAX17042_MiscCFG, 0x0003);
+ regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007);
}
ret = power_supply_register(&client->dev, &chip->battery);
@@ -754,9 +745,9 @@ static int max17042_probe(struct i2c_client *client,
IRQF_TRIGGER_FALLING,
chip->battery.name, chip);
if (!ret) {
- reg = max17042_read_reg(client, MAX17042_CONFIG);
- reg |= CONFIG_ALRT_BIT_ENBL;
- max17042_write_reg(client, MAX17042_CONFIG, reg);
+ regmap_read(chip->regmap, MAX17042_CONFIG, &val);
+ val |= CONFIG_ALRT_BIT_ENBL;
+ regmap_write(chip->regmap, MAX17042_CONFIG, val);
max17042_set_soc_threshold(chip, 1);
} else {
client->irq = 0;
@@ -765,8 +756,8 @@ static int max17042_probe(struct i2c_client *client,
}
}
- reg = max17042_read_reg(chip->client, MAX17042_STATUS);
- if (reg & STATUS_POR_BIT) {
+ regmap_read(chip->regmap, MAX17042_STATUS, &val);
+ if (val & STATUS_POR_BIT) {
INIT_WORK(&chip->work, max17042_init_worker);
schedule_work(&chip->work);
} else {
@@ -786,7 +777,7 @@ static int max17042_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int max17042_suspend(struct device *dev)
{
struct max17042_chip *chip = dev_get_drvdata(dev);
@@ -816,17 +807,11 @@ static int max17042_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops max17042_pm_ops = {
- .suspend = max17042_suspend,
- .resume = max17042_resume,
-};
-
-#define MAX17042_PM_OPS (&max17042_pm_ops)
-#else
-#define MAX17042_PM_OPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(max17042_pm_ops, max17042_suspend,
+ max17042_resume);
+
#ifdef CONFIG_OF
static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
@@ -849,7 +834,7 @@ static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
.of_match_table = of_match_ptr(max17042_dt_match),
- .pm = MAX17042_PM_OPS,
+ .pm = &max17042_pm_ops,
},
.probe = max17042_probe,
.remove = max17042_remove,
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index ffa10ed83eb..62c15af58c9 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -205,7 +205,7 @@ static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
}
-int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
+static int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
{
queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
@@ -722,8 +722,12 @@ static int pm2xxx_charger_ac_en(struct ux500_charger *charger,
dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset);
if (!pm2->vddadc_en_ac) {
- regulator_enable(pm2->regu);
- pm2->vddadc_en_ac = true;
+ ret = regulator_enable(pm2->regu);
+ if (ret)
+ dev_warn(pm2->dev,
+ "Failed to enable vddadc regulator\n");
+ else
+ pm2->vddadc_en_ac = true;
}
ret = pm2xxx_charging_init(pm2);
@@ -953,37 +957,24 @@ static int pm2xxx_runtime_suspend(struct device *dev)
{
struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
struct pm2xxx_charger *pm2;
- int ret = 0;
pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client);
- if (!pm2) {
- dev_err(pm2->dev, "no pm2xxx_charger data supplied\n");
- ret = -EINVAL;
- return ret;
- }
-
clear_lpn_pin(pm2);
- return ret;
+ return 0;
}
static int pm2xxx_runtime_resume(struct device *dev)
{
struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
struct pm2xxx_charger *pm2;
- int ret = 0;
pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client);
- if (!pm2) {
- dev_err(pm2->dev, "no pm2xxx_charger data supplied\n");
- ret = -EINVAL;
- return ret;
- }
if (gpio_is_valid(pm2->lpn_pin) && gpio_get_value(pm2->lpn_pin) == 0)
set_lpn_pin(pm2);
- return ret;
+ return 0;
}
#endif
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c
index bdd7b9b2546..8fc9d6df87f 100644
--- a/drivers/power/tps65090-charger.c
+++ b/drivers/power/tps65090-charger.c
@@ -15,15 +15,17 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/slab.h>
+
#include <linux/mfd/tps65090.h>
#define TPS65090_REG_INTR_STS 0x00
@@ -185,10 +187,6 @@ static irqreturn_t tps65090_charger_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#if defined(CONFIG_OF)
-
-#include <linux/of_device.h>
-
static struct tps65090_platform_data *
tps65090_parse_dt_charger_data(struct platform_device *pdev)
{
@@ -210,13 +208,6 @@ static struct tps65090_platform_data *
return pdata;
}
-#else
-static struct tps65090_platform_data *
- tps65090_parse_dt_charger_data(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
static int tps65090_charger_probe(struct platform_device *pdev)
{
@@ -228,7 +219,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
pdata = dev_get_platdata(pdev->dev.parent);
- if (!pdata && pdev->dev.of_node)
+ if (IS_ENABLED(CONFIG_OF) && !pdata && pdev->dev.of_node)
pdata = tps65090_parse_dt_charger_data(pdev);
if (!pdata) {
@@ -277,13 +268,13 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (ret) {
dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq,
ret);
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
ret = tps65090_config_charger(cdata);
if (ret < 0) {
dev_err(&pdev->dev, "charger config failed, err %d\n", ret);
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
/* Check for charger presence */
@@ -292,14 +283,14 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__,
TPS65090_REG_CG_STATUS1);
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
if (status1 != 0) {
ret = tps65090_enable_charging(cdata);
if (ret < 0) {
dev_err(cdata->dev, "error enabling charger\n");
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
cdata->ac_online = 1;
power_supply_changed(&cdata->ac);
@@ -307,8 +298,6 @@ static int tps65090_charger_probe(struct platform_device *pdev)
return 0;
-fail_free_irq:
- devm_free_irq(cdata->dev, irq, cdata);
fail_unregister_supply:
power_supply_unregister(&cdata->ac);
@@ -319,7 +308,6 @@ static int tps65090_charger_remove(struct platform_device *pdev)
{
struct tps65090_charger *cdata = platform_get_drvdata(pdev);
- devm_free_irq(cdata->dev, cdata->irq, cdata);
power_supply_unregister(&cdata->ac);
return 0;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index d98abe911e3..f14108844e1 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -495,10 +495,38 @@ static enum power_supply_property twl4030_charger_props[] = {
POWER_SUPPLY_PROP_CURRENT_NOW,
};
+#ifdef CONFIG_OF
+static const struct twl4030_bci_platform_data *
+twl4030_bci_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct twl4030_bci_platform_data *pdata;
+ u32 num;
+
+ if (!np)
+ return NULL;
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return pdata;
+
+ if (of_property_read_u32(np, "ti,bb-uvolt", &num) == 0)
+ pdata->bb_uvolt = num;
+ if (of_property_read_u32(np, "ti,bb-uamp", &num) == 0)
+ pdata->bb_uamp = num;
+ return pdata;
+}
+#else
+static inline const struct twl4030_bci_platform_data *
+twl4030_bci_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int __init twl4030_bci_probe(struct platform_device *pdev)
{
struct twl4030_bci *bci;
- struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data;
+ const struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data;
int ret;
u32 reg;
@@ -506,6 +534,9 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
if (bci == NULL)
return -ENOMEM;
+ if (!pdata)
+ pdata = twl4030_bci_parse_dt(&pdev->dev);
+
bci->dev = &pdev->dev;
bci->irq_chg = platform_get_irq(pdev, 0);
bci->irq_bci = platform_get_irq(pdev, 1);
@@ -581,8 +612,11 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
twl4030_charger_enable_ac(true);
twl4030_charger_enable_usb(bci, true);
- twl4030_charger_enable_backup(pdata->bb_uvolt,
- pdata->bb_uamp);
+ if (pdata)
+ twl4030_charger_enable_backup(pdata->bb_uvolt,
+ pdata->bb_uamp);
+ else
+ twl4030_charger_enable_backup(0, 0);
return 0;
@@ -631,10 +665,17 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id twl_bci_of_match[] = {
+ {.compatible = "ti,twl4030-bci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, twl_bci_of_match);
+
static struct platform_driver twl4030_bci_driver = {
.driver = {
.name = "twl4030_bci",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(twl_bci_of_match),
},
.remove = __exit_p(twl4030_bci_remove),
};
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
new file mode 100644
index 00000000000..a7c81b53d88
--- /dev/null
+++ b/drivers/powercap/Kconfig
@@ -0,0 +1,32 @@
+#
+# Generic power capping sysfs interface configuration
+#
+
+menuconfig POWERCAP
+ bool "Generic powercap sysfs driver"
+ help
+ The power capping sysfs interface allows kernel subsystems to expose power
+ capping settings to user space in a consistent way. Usually, it consists
+ of multiple control types that determine which settings may be exposed and
+ power zones representing parts of the system that can be subject to power
+ capping.
+
+ If you want this code to be compiled in, say Y here.
+
+if POWERCAP
+# Client driver configurations go here.
+config INTEL_RAPL
+ tristate "Intel RAPL Support"
+ depends on X86
+ default n
+ ---help---
+ This enables support for the Intel Running Average Power Limit (RAPL)
+ technology which allows power limits to be enforced and monitored on
+ modern Intel processors (Sandy Bridge and later).
+
+ In RAPL, the platform level settings are divided into domains for
+ fine grained control. These domains include processor package, DRAM
+ controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+ 1), etc.
+
+endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
new file mode 100644
index 00000000000..0a21ef31372
--- /dev/null
+++ b/drivers/powercap/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWERCAP) += powercap_sys.o
+obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
new file mode 100644
index 00000000000..2a786c50446
--- /dev/null
+++ b/drivers/powercap/intel_rapl.c
@@ -0,0 +1,1395 @@
+/*
+ * Intel Running Average Power Limit (RAPL) Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/powercap.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+/* bitmasks for RAPL MSRs, used by primitive access functions */
+#define ENERGY_STATUS_MASK 0xffffffff
+
+#define POWER_LIMIT1_MASK 0x7FFF
+#define POWER_LIMIT1_ENABLE BIT(15)
+#define POWER_LIMIT1_CLAMP BIT(16)
+
+#define POWER_LIMIT2_MASK (0x7FFFULL<<32)
+#define POWER_LIMIT2_ENABLE BIT_ULL(47)
+#define POWER_LIMIT2_CLAMP BIT_ULL(48)
+#define POWER_PACKAGE_LOCK BIT_ULL(63)
+#define POWER_PP_LOCK BIT(31)
+
+#define TIME_WINDOW1_MASK (0x7FULL<<17)
+#define TIME_WINDOW2_MASK (0x7FULL<<49)
+
+#define POWER_UNIT_OFFSET 0
+#define POWER_UNIT_MASK 0x0F
+
+#define ENERGY_UNIT_OFFSET 0x08
+#define ENERGY_UNIT_MASK 0x1F00
+
+#define TIME_UNIT_OFFSET 0x10
+#define TIME_UNIT_MASK 0xF0000
+
+#define POWER_INFO_MAX_MASK (0x7fffULL<<32)
+#define POWER_INFO_MIN_MASK (0x7fffULL<<16)
+#define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48)
+#define POWER_INFO_THERMAL_SPEC_MASK 0x7fff
+
+#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
+#define PP_POLICY_MASK 0x1F
+
+/* Non HW constants */
+#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
+#define RAPL_PRIMITIVE_DUMMY BIT(2)
+
+/* scale RAPL units to avoid floating point math inside kernel */
+#define POWER_UNIT_SCALE (1000000)
+#define ENERGY_UNIT_SCALE (1000000)
+#define TIME_UNIT_SCALE (1000000)
+
+#define TIME_WINDOW_MAX_MSEC 40000
+#define TIME_WINDOW_MIN_MSEC 250
+
+enum unit_type {
+ ARBITRARY_UNIT, /* no translation */
+ POWER_UNIT,
+ ENERGY_UNIT,
+ TIME_UNIT,
+};
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM,/* DRAM control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_msr_id {
+ RAPL_DOMAIN_MSR_LIMIT,
+ RAPL_DOMAIN_MSR_STATUS,
+ RAPL_DOMAIN_MSR_PERF,
+ RAPL_DOMAIN_MSR_POLICY,
+ RAPL_DOMAIN_MSR_INFO,
+ RAPL_DOMAIN_MSR_MAX,
+};
+
+/* per domain data, some are optional */
+enum rapl_primitives {
+ ENERGY_COUNTER,
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ FW_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
+
+/* Can be expanded to include events, etc.*/
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+
+#define DOMAIN_STATE_INACTIVE BIT(0)
+#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
+#define DOMAIN_STATE_BIOS_LOCKED BIT(2)
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ int prim_id; /* primitive ID used to enable */
+ struct rapl_domain *domain;
+ const char *name;
+};
+
+static const char pl1_name[] = "long_term";
+static const char pl2_name[] = "short_term";
+
+struct rapl_domain {
+ const char *name;
+ enum rapl_domain_type id;
+ int msrs[RAPL_DOMAIN_MSR_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ int package_id;
+};
+#define power_zone_to_rapl_domain(_zone) \
+ container_of(_zone, struct rapl_domain, power_zone)
+
+
+/* Each physical package contains multiple domains, these are the common
+ * data across RAPL domains within a package.
+ */
+struct rapl_package {
+ unsigned int id; /* physical package/socket id */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ unsigned int power_unit_divisor;
+ unsigned int energy_unit_divisor;
+ unsigned int time_unit_divisor;
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ int nr_cpus; /* active cpus on the package, topology info is lost during
+ * cpu hotplug. so we have to track ourselves.
+ */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+};
+#define PACKAGE_PLN_INT_SAVED BIT(0)
+#define MAX_PRIM_NAME (32)
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+ const char *name;
+ u64 mask;
+ int shift;
+ enum rapl_domain_msr_id id;
+ enum unit_type unit;
+ u32 flag;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \
+ .name = #p, \
+ .mask = m, \
+ .shift = s, \
+ .id = i, \
+ .unit = u, \
+ .flag = f \
+ }
+
+static void rapl_init_domains(struct rapl_package *rp);
+static int rapl_read_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ bool xlate, u64 *data);
+static int rapl_write_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ unsigned long long value);
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ int to_raw);
+static void package_power_limit_irq_save(int package_id);
+
+static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
+
+static const char * const rapl_domain_names[] = {
+ "package",
+ "core",
+ "uncore",
+ "dram",
+};
+
+static struct powercap_control_type *control_type; /* PowerCap Controller */
+
+/* caller to ensure CPU hotplug lock is held */
+static struct rapl_package *find_package_by_id(int id)
+{
+ struct rapl_package *rp;
+
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (rp->id == id)
+ return rp;
+ }
+
+ return NULL;
+}
+
+/* caller to ensure CPU hotplug lock is held */
+static int find_active_cpu_on_package(int package_id)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ if (topology_physical_package_id(i) == package_id)
+ return i;
+ }
+ /* all CPUs on this package are offline */
+
+ return -ENODEV;
+}
+
+/* caller must hold cpu hotplug lock */
+static void rapl_cleanup_data(void)
+{
+ struct rapl_package *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
+ kfree(p->domains);
+ list_del(&p->plist);
+ kfree(p);
+ }
+}
+
+static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+{
+ struct rapl_domain *rd;
+ u64 energy_now;
+
+ /* prevent CPU hotplug, make sure the RAPL domain does not go
+ * away while reading the counter.
+ */
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+
+ if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
+ *energy_raw = energy_now;
+ put_online_cpus();
+
+ return 0;
+ }
+ put_online_cpus();
+
+ return -EIO;
+}
+
+static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+{
+ *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+ return 0;
+}
+
+static int release_zone(struct powercap_zone *power_zone)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ struct rapl_package *rp;
+
+ /* package zone is the last zone of a package, we can free
+ * memory here since all children has been unregistered.
+ */
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rp = find_package_by_id(rd->package_id);
+ if (!rp) {
+ dev_warn(&power_zone->dev, "no package id %s\n",
+ rd->name);
+ return -ENODEV;
+ }
+ kfree(rd);
+ rp->domains = NULL;
+ }
+
+ return 0;
+
+}
+
+static int find_nr_power_limit(struct rapl_domain *rd)
+{
+ int i;
+
+ for (i = 0; i < NR_POWER_LIMITS; i++) {
+ if (rd->rpl[i].name == NULL)
+ break;
+ }
+
+ return i;
+}
+
+static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ int nr_powerlimit;
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
+ return -EACCES;
+ get_online_cpus();
+ nr_powerlimit = find_nr_power_limit(rd);
+ /* here we activate/deactivate the hardware for power limiting */
+ rapl_write_data_raw(rd, PL1_ENABLE, mode);
+ /* always enable clamp such that p-state can go below OS requested
+ * range. power capping priority over guranteed frequency.
+ */
+ rapl_write_data_raw(rd, PL1_CLAMP, mode);
+ /* some domains have pl2 */
+ if (nr_powerlimit > 1) {
+ rapl_write_data_raw(rd, PL2_ENABLE, mode);
+ rapl_write_data_raw(rd, PL2_CLAMP, mode);
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ u64 val;
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ *mode = false;
+ return 0;
+ }
+ get_online_cpus();
+ if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
+ put_online_cpus();
+ return -EIO;
+ }
+ *mode = val;
+ put_online_cpus();
+
+ return 0;
+}
+
+/* per RAPL domain ops, in the order of rapl_domain_type */
+static struct powercap_zone_ops zone_ops[] = {
+ /* RAPL_DOMAIN_PACKAGE */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_PP0 */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_PP1 */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_DRAM */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+};
+
+static int set_power_limit(struct powercap_zone *power_zone, int id,
+ u64 power_limit)
+{
+ struct rapl_domain *rd;
+ struct rapl_package *rp;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ rp = find_package_by_id(rd->package_id);
+ if (!rp) {
+ ret = -ENODEV;
+ goto set_exit;
+ }
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
+ rd->name);
+ ret = -EACCES;
+ goto set_exit;
+ }
+
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
+ break;
+ case PL2_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ package_power_limit_irq_save(rd->package_id);
+set_exit:
+ put_online_cpus();
+ return ret;
+}
+
+static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+ u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int prim;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ prim = POWER_LIMIT1;
+ break;
+ case PL2_ENABLE:
+ prim = POWER_LIMIT2;
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (rapl_read_data_raw(rd, prim, true, &val))
+ ret = -EIO;
+ else
+ *data = val;
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static int set_time_window(struct powercap_zone *power_zone, int id,
+ u64 window)
+{
+ struct rapl_domain *rd;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ rapl_write_data_raw(rd, TIME_WINDOW1, window);
+ break;
+ case PL2_ENABLE:
+ rapl_write_data_raw(rd, TIME_WINDOW2, window);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ put_online_cpus();
+ return ret;
+}
+
+static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (!ret)
+ *data = val;
+ put_online_cpus();
+
+ return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+{
+ struct rapl_power_limit *rpl;
+ struct rapl_domain *rd;
+
+ rd = power_zone_to_rapl_domain(power_zone);
+ rpl = (struct rapl_power_limit *) &rd->rpl[id];
+
+ return rpl->name;
+}
+
+
+static int get_max_power(struct powercap_zone *power_zone, int id,
+ u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int prim;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ prim = THERMAL_SPEC_POWER;
+ break;
+ case PL2_ENABLE:
+ prim = MAX_POWER;
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (rapl_read_data_raw(rd, prim, true, &val))
+ ret = -EIO;
+ else
+ *data = val;
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+ .set_power_limit_uw = set_power_limit,
+ .get_power_limit_uw = get_current_power_limit,
+ .set_time_window_us = set_time_window,
+ .get_time_window_us = get_time_window,
+ .get_max_power_uw = get_max_power,
+ .get_name = get_constraint_name,
+};
+
+/* called after domain detection and package level data are set */
+static void rapl_init_domains(struct rapl_package *rp)
+{
+ int i;
+ struct rapl_domain *rd = rp->domains;
+
+ for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+ unsigned int mask = rp->domain_map & (1 << i);
+ switch (mask) {
+ case BIT(RAPL_DOMAIN_PACKAGE):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE];
+ rd->id = RAPL_DOMAIN_PACKAGE;
+ rd->msrs[0] = MSR_PKG_POWER_LIMIT;
+ rd->msrs[1] = MSR_PKG_ENERGY_STATUS;
+ rd->msrs[2] = MSR_PKG_PERF_STATUS;
+ rd->msrs[3] = 0;
+ rd->msrs[4] = MSR_PKG_POWER_INFO;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ rd->rpl[1].prim_id = PL2_ENABLE;
+ rd->rpl[1].name = pl2_name;
+ break;
+ case BIT(RAPL_DOMAIN_PP0):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PP0];
+ rd->id = RAPL_DOMAIN_PP0;
+ rd->msrs[0] = MSR_PP0_POWER_LIMIT;
+ rd->msrs[1] = MSR_PP0_ENERGY_STATUS;
+ rd->msrs[2] = 0;
+ rd->msrs[3] = MSR_PP0_POLICY;
+ rd->msrs[4] = 0;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ case BIT(RAPL_DOMAIN_PP1):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PP1];
+ rd->id = RAPL_DOMAIN_PP1;
+ rd->msrs[0] = MSR_PP1_POWER_LIMIT;
+ rd->msrs[1] = MSR_PP1_ENERGY_STATUS;
+ rd->msrs[2] = 0;
+ rd->msrs[3] = MSR_PP1_POLICY;
+ rd->msrs[4] = 0;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ case BIT(RAPL_DOMAIN_DRAM):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM];
+ rd->id = RAPL_DOMAIN_DRAM;
+ rd->msrs[0] = MSR_DRAM_POWER_LIMIT;
+ rd->msrs[1] = MSR_DRAM_ENERGY_STATUS;
+ rd->msrs[2] = MSR_DRAM_PERF_STATUS;
+ rd->msrs[3] = 0;
+ rd->msrs[4] = MSR_DRAM_POWER_INFO;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ }
+ if (mask) {
+ rd->package_id = rp->id;
+ rd++;
+ }
+ }
+}
+
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ int to_raw)
+{
+ u64 divisor = 1;
+ int scale = 1; /* scale to user friendly data without floating point */
+ u64 f, y; /* fraction and exp. used for time unit */
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package);
+ if (!rp)
+ return value;
+
+ switch (type) {
+ case POWER_UNIT:
+ divisor = rp->power_unit_divisor;
+ scale = POWER_UNIT_SCALE;
+ break;
+ case ENERGY_UNIT:
+ scale = ENERGY_UNIT_SCALE;
+ divisor = rp->energy_unit_divisor;
+ break;
+ case TIME_UNIT:
+ divisor = rp->time_unit_divisor;
+ scale = TIME_UNIT_SCALE;
+ /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
+ * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
+ */
+ if (!to_raw) {
+ f = (value & 0x60) >> 5;
+ y = value & 0x1f;
+ value = (1 << y) * (4 + f) * scale / 4;
+ return div64_u64(value, divisor);
+ } else {
+ do_div(value, scale);
+ value *= divisor;
+ y = ilog2(value);
+ f = div64_u64(4 * (value - (1 << y)), 1 << y);
+ value = (y & 0x1f) | ((f & 0x3) << 5);
+ return value;
+ }
+ break;
+ case ARBITRARY_UNIT:
+ default:
+ return value;
+ };
+
+ if (to_raw)
+ return div64_u64(value * divisor, scale);
+ else
+ return div64_u64(value * scale, divisor);
+}
+
+/* in the order of enum rapl_primitives */
+static struct rapl_primitive_info rpi[] = {
+ /* name, mask, shift, msr index, unit divisor */
+ PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+ RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+ RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+ RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+ RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+ RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+ 0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+ RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+ RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+ RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+ RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+ RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0),
+ /* non-hardware */
+ PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+ RAPL_PRIMITIVE_DERIVED),
+ {NULL, 0, 0, 0},
+};
+
+/* Read primitive data based on its related struct rapl_primitive_info.
+ * if xlate flag is set, return translated data based on data units, i.e.
+ * time, energy, and power.
+ * RAPL MSRs are non-architectual and are laid out not consistently across
+ * domains. Here we use primitive info to allow writing consolidated access
+ * functions.
+ * For a given primitive, it is processed by MSR mask and shift. Unit conversion
+ * is pre-assigned based on RAPL unit MSRs read at init time.
+ * 63-------------------------- 31--------------------------- 0
+ * | xxxxx (mask) |
+ * | |<- shift ----------------|
+ * 63-------------------------- 31--------------------------- 0
+ */
+static int rapl_read_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ bool xlate, u64 *data)
+{
+ u64 value, final;
+ u32 msr;
+ struct rapl_primitive_info *rp = &rpi[prim];
+ int cpu;
+
+ if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+ return -EINVAL;
+
+ msr = rd->msrs[rp->id];
+ if (!msr)
+ return -EINVAL;
+ /* use physical package id to look up active cpus */
+ cpu = find_active_cpu_on_package(rd->package_id);
+ if (cpu < 0)
+ return cpu;
+
+ /* special-case package domain, which uses a different bit*/
+ if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
+ rp->mask = POWER_PACKAGE_LOCK;
+ rp->shift = 63;
+ }
+ /* non-hardware data are collected by the polling thread */
+ if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+ *data = rd->rdd.primitives[prim];
+ return 0;
+ }
+
+ if (rdmsrl_safe_on_cpu(cpu, msr, &value)) {
+ pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+
+ final = value & rp->mask;
+ final = final >> rp->shift;
+ if (xlate)
+ *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
+ else
+ *data = final;
+
+ return 0;
+}
+
+/* Similar use of primitive info in the read counterpart */
+static int rapl_write_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ unsigned long long value)
+{
+ u64 msr_val;
+ u32 msr;
+ struct rapl_primitive_info *rp = &rpi[prim];
+ int cpu;
+
+ cpu = find_active_cpu_on_package(rd->package_id);
+ if (cpu < 0)
+ return cpu;
+ msr = rd->msrs[rp->id];
+ if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
+ dev_dbg(&rd->power_zone.dev,
+ "failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+ value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
+ msr_val &= ~rp->mask;
+ msr_val |= value << rp->shift;
+ if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+ dev_dbg(&rd->power_zone.dev,
+ "failed to write msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rapl_check_unit(struct rapl_package *rp, int cpu)
+{
+ u64 msr_val;
+ u32 value;
+
+ if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
+ pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
+ MSR_RAPL_POWER_UNIT, cpu);
+ return -ENODEV;
+ }
+
+ /* Raw RAPL data stored in MSRs are in certain scales. We need to
+ * convert them into standard units based on the divisors reported in
+ * the RAPL unit MSRs.
+ * i.e.
+ * energy unit: 1/enery_unit_divisor Joules
+ * power unit: 1/power_unit_divisor Watts
+ * time unit: 1/time_unit_divisor Seconds
+ */
+ value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+ rp->energy_unit_divisor = 1 << value;
+
+
+ value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+ rp->power_unit_divisor = 1 << value;
+
+ value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+ rp->time_unit_divisor = 1 << value;
+
+ pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n",
+ rp->id,
+ rp->energy_unit_divisor,
+ rp->time_unit_divisor,
+ rp->power_unit_divisor);
+
+ return 0;
+}
+
+/* REVISIT:
+ * When package power limit is set artificially low by RAPL, LVT
+ * thermal interrupt for package power limit should be ignored
+ * since we are not really exceeding the real limit. The intention
+ * is to avoid excessive interrupts while we are trying to save power.
+ * A useful feature might be routing the package_power_limit interrupt
+ * to userspace via eventfd. once we have a usecase, this is simple
+ * to do by adding an atomic notifier.
+ */
+
+static void package_power_limit_irq_save(int package_id)
+{
+ u32 l, h = 0;
+ int cpu;
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package_id);
+ if (!rp)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ cpu = find_active_cpu_on_package(package_id);
+ if (cpu < 0)
+ return;
+ /* save the state of PLN irq mask bit before disabling it */
+ rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+ rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+ rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+ }
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+ wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(int package_id)
+{
+ u32 l, h;
+ int cpu;
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package_id);
+ if (!rp)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ cpu = find_active_cpu_on_package(package_id);
+ if (cpu < 0)
+ return;
+
+ /* irq enable state not saved, nothing to restore */
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+ return;
+ rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+ if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+ l |= PACKAGE_THERM_INT_PLN_ENABLE;
+ else
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static const struct x86_cpu_id rapl_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
+ { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
+ { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+ /* TODO: Add more CPU IDs after testing */
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
+
+/* read once for all raw primitive data for all packages, domains */
+static void rapl_update_domain_data(void)
+{
+ int dmn, prim;
+ u64 val;
+ struct rapl_package *rp;
+
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+ pr_debug("update package %d domain %s data\n", rp->id,
+ rp->domains[dmn].name);
+ /* exclude non-raw primitives */
+ for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
+ if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+ rpi[prim].unit,
+ &val))
+ rp->domains[dmn].rdd.primitives[prim] =
+ val;
+ }
+ }
+
+}
+
+static int rapl_unregister_powercap(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd, *rd_package = NULL;
+
+ /* unregister all active rapl packages from the powercap layer,
+ * hotplug lock held
+ */
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ package_power_limit_irq_restore(rp->id);
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+ rd++) {
+ pr_debug("remove package, undo power limit on %d: %s\n",
+ rp->id, rd->name);
+ rapl_write_data_raw(rd, PL1_ENABLE, 0);
+ rapl_write_data_raw(rd, PL2_ENABLE, 0);
+ rapl_write_data_raw(rd, PL1_CLAMP, 0);
+ rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rd_package = rd;
+ continue;
+ }
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ /* do the package zone last */
+ if (rd_package)
+ powercap_unregister_zone(control_type,
+ &rd_package->power_zone);
+ }
+ powercap_unregister_control_type(control_type);
+
+ return 0;
+}
+
+static int rapl_package_register_powercap(struct rapl_package *rp)
+{
+ struct rapl_domain *rd;
+ int ret = 0;
+ char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
+ struct powercap_zone *power_zone = NULL;
+ int nr_pl;
+
+ /* first we register package domain as the parent zone*/
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ nr_pl = find_nr_power_limit(rd);
+ pr_debug("register socket %d package domain %s\n",
+ rp->id, rd->name);
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, sizeof(dev_name), "%s-%d",
+ rd->name, rp->id);
+ power_zone = powercap_register_zone(&rd->power_zone,
+ control_type,
+ dev_name, NULL,
+ &zone_ops[rd->id],
+ nr_pl,
+ &constraint_ops);
+ if (IS_ERR(power_zone)) {
+ pr_debug("failed to register package, %d\n",
+ rp->id);
+ ret = PTR_ERR(power_zone);
+ goto exit_package;
+ }
+ /* track parent zone in per package/socket data */
+ rp->power_zone = power_zone;
+ /* done, only one package domain per socket */
+ break;
+ }
+ }
+ if (!power_zone) {
+ pr_err("no package domain found, unknown topology!\n");
+ ret = -ENODEV;
+ goto exit_package;
+ }
+ /* now register domains as children of the socket/package*/
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE)
+ continue;
+ /* number of power limits per domain varies */
+ nr_pl = find_nr_power_limit(rd);
+ power_zone = powercap_register_zone(&rd->power_zone,
+ control_type, rd->name,
+ rp->power_zone,
+ &zone_ops[rd->id], nr_pl,
+ &constraint_ops);
+
+ if (IS_ERR(power_zone)) {
+ pr_debug("failed to register power_zone, %d:%s:%s\n",
+ rp->id, rd->name, dev_name);
+ ret = PTR_ERR(power_zone);
+ goto err_cleanup;
+ }
+ }
+
+exit_package:
+ return ret;
+err_cleanup:
+ /* clean up previously initialized domains within the package if we
+ * failed after the first domain setup.
+ */
+ while (--rd >= rp->domains) {
+ pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+
+ return ret;
+}
+
+static int rapl_register_powercap(void)
+{
+ struct rapl_domain *rd;
+ struct rapl_package *rp;
+ int ret = 0;
+
+ control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+ if (IS_ERR(control_type)) {
+ pr_debug("failed to register powercap control_type.\n");
+ return PTR_ERR(control_type);
+ }
+ /* read the initial data */
+ rapl_update_domain_data();
+ list_for_each_entry(rp, &rapl_packages, plist)
+ if (rapl_package_register_powercap(rp))
+ goto err_cleanup_package;
+ return ret;
+
+err_cleanup_package:
+ /* clean up previously initialized packages */
+ list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+ rd++) {
+ pr_debug("unregister zone/package %d, %s domain\n",
+ rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ }
+
+ return ret;
+}
+
+static int rapl_check_domain(int cpu, int domain)
+{
+ unsigned msr;
+ u64 val1, val2 = 0;
+ int retry = 0;
+
+ switch (domain) {
+ case RAPL_DOMAIN_PACKAGE:
+ msr = MSR_PKG_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_PP0:
+ msr = MSR_PP0_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_PP1:
+ msr = MSR_PP1_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_DRAM:
+ msr = MSR_DRAM_ENERGY_STATUS;
+ break;
+ default:
+ pr_err("invalid domain id %d\n", domain);
+ return -EINVAL;
+ }
+ if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
+ return -ENODEV;
+
+ /* energy counters roll slowly on some domains */
+ while (++retry < 10) {
+ usleep_range(10000, 15000);
+ rdmsrl_safe_on_cpu(cpu, msr, &val2);
+ if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
+ return 0;
+ }
+ /* if energy counter does not change, report as bad domain */
+ pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
+ rapl_domain_names[domain], val1, val2);
+
+ return -ENODEV;
+}
+
+/* Detect active and valid domains for the given CPU, caller must
+ * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
+ */
+static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+{
+ int i;
+ int ret = 0;
+ struct rapl_domain *rd;
+ u64 locked;
+
+ for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+ /* use physical package id to read counters */
+ if (!rapl_check_domain(cpu, i))
+ rp->domain_map |= 1 << i;
+ }
+ rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
+ if (!rp->nr_domains) {
+ pr_err("no valid rapl domains found in package %d\n", rp->id);
+ ret = -ENODEV;
+ goto done;
+ }
+ pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+
+ rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+ GFP_KERNEL);
+ if (!rp->domains) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ rapl_init_domains(rp);
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ /* check if the domain is locked by BIOS */
+ if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+ pr_info("RAPL package %d domain %s locked by BIOS\n",
+ rp->id, rd->name);
+ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ }
+ }
+
+
+done:
+ return ret;
+}
+
+static bool is_package_new(int package)
+{
+ struct rapl_package *rp;
+
+ /* caller prevents cpu hotplug, there will be no new packages added
+ * or deleted while traversing the package list, no need for locking.
+ */
+ list_for_each_entry(rp, &rapl_packages, plist)
+ if (package == rp->id)
+ return false;
+
+ return true;
+}
+
+/* RAPL interface can be made of a two-level hierarchy: package level and domain
+ * level. We first detect the number of packages then domains of each package.
+ * We have to consider the possiblity of CPU online/offline due to hotplug and
+ * other scenarios.
+ */
+static int rapl_detect_topology(void)
+{
+ int i;
+ int phy_package_id;
+ struct rapl_package *new_package, *rp;
+
+ for_each_online_cpu(i) {
+ phy_package_id = topology_physical_package_id(i);
+ if (is_package_new(phy_package_id)) {
+ new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
+ if (!new_package) {
+ rapl_cleanup_data();
+ return -ENOMEM;
+ }
+ /* add the new package to the list */
+ new_package->id = phy_package_id;
+ new_package->nr_cpus = 1;
+
+ /* check if the package contains valid domains */
+ if (rapl_detect_domains(new_package, i) ||
+ rapl_check_unit(new_package, i)) {
+ kfree(new_package->domains);
+ kfree(new_package);
+ /* free up the packages already initialized */
+ rapl_cleanup_data();
+ return -ENODEV;
+ }
+ INIT_LIST_HEAD(&new_package->plist);
+ list_add(&new_package->plist, &rapl_packages);
+ } else {
+ rp = find_package_by_id(phy_package_id);
+ if (rp)
+ ++rp->nr_cpus;
+ }
+ }
+
+ return 0;
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static void rapl_remove_package(struct rapl_package *rp)
+{
+ struct rapl_domain *rd, *rd_package = NULL;
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rd_package = rd;
+ continue;
+ }
+ pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ /* do parent zone last */
+ powercap_unregister_zone(control_type, &rd_package->power_zone);
+ list_del(&rp->plist);
+ kfree(rp);
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static int rapl_add_package(int cpu)
+{
+ int ret = 0;
+ int phy_package_id;
+ struct rapl_package *rp;
+
+ phy_package_id = topology_physical_package_id(cpu);
+ rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ /* add the new package to the list */
+ rp->id = phy_package_id;
+ rp->nr_cpus = 1;
+ /* check if the package contains valid domains */
+ if (rapl_detect_domains(rp, cpu) ||
+ rapl_check_unit(rp, cpu)) {
+ ret = -ENODEV;
+ goto err_free_package;
+ }
+ if (!rapl_package_register_powercap(rp)) {
+ INIT_LIST_HEAD(&rp->plist);
+ list_add(&rp->plist, &rapl_packages);
+ return ret;
+ }
+
+err_free_package:
+ kfree(rp->domains);
+ kfree(rp);
+
+ return ret;
+}
+
+/* Handles CPU hotplug on multi-socket systems.
+ * If a CPU goes online as the first CPU of the physical package
+ * we add the RAPL package to the system. Similarly, when the last
+ * CPU of the package is removed, we remove the RAPL package and its
+ * associated domains. Cooling devices are handled accordingly at
+ * per-domain level.
+ */
+static int rapl_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ int phy_package_id;
+ struct rapl_package *rp;
+
+ phy_package_id = topology_physical_package_id(cpu);
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ rp = find_package_by_id(phy_package_id);
+ if (rp)
+ ++rp->nr_cpus;
+ else
+ rapl_add_package(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ rp = find_package_by_id(phy_package_id);
+ if (!rp)
+ break;
+ if (--rp->nr_cpus == 0)
+ rapl_remove_package(rp);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_cpu_notifier = {
+ .notifier_call = rapl_cpu_callback,
+};
+
+static int __init rapl_init(void)
+{
+ int ret = 0;
+
+ if (!x86_match_cpu(rapl_ids)) {
+ pr_err("driver does not support CPU family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ return -ENODEV;
+ }
+ /* prevent CPU hotplug during detection */
+ get_online_cpus();
+ ret = rapl_detect_topology();
+ if (ret)
+ goto done;
+
+ if (rapl_register_powercap()) {
+ rapl_cleanup_data();
+ ret = -ENODEV;
+ goto done;
+ }
+ register_hotcpu_notifier(&rapl_cpu_notifier);
+done:
+ put_online_cpus();
+
+ return ret;
+}
+
+static void __exit rapl_exit(void)
+{
+ get_online_cpus();
+ unregister_hotcpu_notifier(&rapl_cpu_notifier);
+ rapl_unregister_powercap();
+ rapl_cleanup_data();
+ put_online_cpus();
+}
+
+module_init(rapl_init);
+module_exit(rapl_exit);
+
+MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
new file mode 100644
index 00000000000..8d0fe431dbd
--- /dev/null
+++ b/drivers/powercap/powercap_sys.c
@@ -0,0 +1,685 @@
+/*
+ * Power capping class
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/powercap.h>
+
+#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
+#define to_powercap_control_type(n) \
+ container_of(n, struct powercap_control_type, dev)
+
+/* Power zone show function */
+#define define_power_zone_show(_attr) \
+static ssize_t _attr##_show(struct device *dev, \
+ struct device_attribute *dev_attr,\
+ char *buf) \
+{ \
+ u64 value; \
+ ssize_t len = -EINVAL; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ \
+ if (power_zone->ops->get_##_attr) { \
+ if (!power_zone->ops->get_##_attr(power_zone, &value)) \
+ len = sprintf(buf, "%lld\n", value); \
+ } \
+ \
+ return len; \
+}
+
+/* The only meaningful input is 0 (reset), others are silently ignored */
+#define define_power_zone_store(_attr) \
+static ssize_t _attr##_store(struct device *dev,\
+ struct device_attribute *dev_attr, \
+ const char *buf, size_t count) \
+{ \
+ int err; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ u64 value; \
+ \
+ err = kstrtoull(buf, 10, &value); \
+ if (err) \
+ return -EINVAL; \
+ if (value) \
+ return count; \
+ if (power_zone->ops->reset_##_attr) { \
+ if (!power_zone->ops->reset_##_attr(power_zone)) \
+ return count; \
+ } \
+ \
+ return -EINVAL; \
+}
+
+/* Power zone constraint show function */
+#define define_power_zone_constraint_show(_attr) \
+static ssize_t show_constraint_##_attr(struct device *dev, \
+ struct device_attribute *dev_attr,\
+ char *buf) \
+{ \
+ u64 value; \
+ ssize_t len = -ENODATA; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ int id; \
+ struct powercap_zone_constraint *pconst;\
+ \
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ return -EINVAL; \
+ if (id >= power_zone->const_id_cnt) \
+ return -EINVAL; \
+ pconst = &power_zone->constraints[id]; \
+ if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
+ if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
+ len = sprintf(buf, "%lld\n", value); \
+ } \
+ \
+ return len; \
+}
+
+/* Power zone constraint store function */
+#define define_power_zone_constraint_store(_attr) \
+static ssize_t store_constraint_##_attr(struct device *dev,\
+ struct device_attribute *dev_attr, \
+ const char *buf, size_t count) \
+{ \
+ int err; \
+ u64 value; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ int id; \
+ struct powercap_zone_constraint *pconst;\
+ \
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ return -EINVAL; \
+ if (id >= power_zone->const_id_cnt) \
+ return -EINVAL; \
+ pconst = &power_zone->constraints[id]; \
+ err = kstrtoull(buf, 10, &value); \
+ if (err) \
+ return -EINVAL; \
+ if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
+ if (!pconst->ops->set_##_attr(power_zone, id, value)) \
+ return count; \
+ } \
+ \
+ return -ENODATA; \
+}
+
+/* Power zone information callbacks */
+define_power_zone_show(power_uw);
+define_power_zone_show(max_power_range_uw);
+define_power_zone_show(energy_uj);
+define_power_zone_store(energy_uj);
+define_power_zone_show(max_energy_range_uj);
+
+/* Power zone attributes */
+static DEVICE_ATTR_RO(max_power_range_uw);
+static DEVICE_ATTR_RO(power_uw);
+static DEVICE_ATTR_RO(max_energy_range_uj);
+static DEVICE_ATTR_RW(energy_uj);
+
+/* Power zone constraint attributes callbacks */
+define_power_zone_constraint_show(power_limit_uw);
+define_power_zone_constraint_store(power_limit_uw);
+define_power_zone_constraint_show(time_window_us);
+define_power_zone_constraint_store(time_window_us);
+define_power_zone_constraint_show(max_power_uw);
+define_power_zone_constraint_show(min_power_uw);
+define_power_zone_constraint_show(max_time_window_us);
+define_power_zone_constraint_show(min_time_window_us);
+
+/* For one time seeding of constraint device attributes */
+struct powercap_constraint_attr {
+ struct device_attribute power_limit_attr;
+ struct device_attribute time_window_attr;
+ struct device_attribute max_power_attr;
+ struct device_attribute min_power_attr;
+ struct device_attribute max_time_window_attr;
+ struct device_attribute min_time_window_attr;
+ struct device_attribute name_attr;
+};
+
+static struct powercap_constraint_attr
+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+
+/* A list of powercap control_types */
+static LIST_HEAD(powercap_cntrl_list);
+/* Mutex to protect list of powercap control_types */
+static DEFINE_MUTEX(powercap_cntrl_list_lock);
+
+#define POWERCAP_CONSTRAINT_NAME_LEN 30 /* Some limit to avoid overflow */
+static ssize_t show_constraint_name(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ const char *name;
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ int id;
+ ssize_t len = -ENODATA;
+ struct powercap_zone_constraint *pconst;
+
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+ return -EINVAL;
+ if (id >= power_zone->const_id_cnt)
+ return -EINVAL;
+ pconst = &power_zone->constraints[id];
+
+ if (pconst && pconst->ops && pconst->ops->get_name) {
+ name = pconst->ops->get_name(power_zone, id);
+ if (name) {
+ snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN,
+ "%s\n", name);
+ buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0';
+ len = strlen(buf);
+ }
+ }
+
+ return len;
+}
+
+static int create_constraint_attribute(int id, const char *name,
+ int mode,
+ struct device_attribute *dev_attr,
+ ssize_t (*show)(struct device *,
+ struct device_attribute *, char *),
+ ssize_t (*store)(struct device *,
+ struct device_attribute *,
+ const char *, size_t)
+ )
+{
+
+ dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+ id, name);
+ if (!dev_attr->attr.name)
+ return -ENOMEM;
+ dev_attr->attr.mode = mode;
+ dev_attr->show = show;
+ dev_attr->store = store;
+
+ return 0;
+}
+
+static void free_constraint_attributes(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+ kfree(constraint_attrs[i].power_limit_attr.attr.name);
+ kfree(constraint_attrs[i].time_window_attr.attr.name);
+ kfree(constraint_attrs[i].name_attr.attr.name);
+ kfree(constraint_attrs[i].max_power_attr.attr.name);
+ kfree(constraint_attrs[i].min_power_attr.attr.name);
+ kfree(constraint_attrs[i].max_time_window_attr.attr.name);
+ kfree(constraint_attrs[i].min_time_window_attr.attr.name);
+ }
+}
+
+static int seed_constraint_attributes(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+ ret = create_constraint_attribute(i, "power_limit_uw",
+ S_IWUSR | S_IRUGO,
+ &constraint_attrs[i].power_limit_attr,
+ show_constraint_power_limit_uw,
+ store_constraint_power_limit_uw);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "time_window_us",
+ S_IWUSR | S_IRUGO,
+ &constraint_attrs[i].time_window_attr,
+ show_constraint_time_window_us,
+ store_constraint_time_window_us);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "name", S_IRUGO,
+ &constraint_attrs[i].name_attr,
+ show_constraint_name,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+ &constraint_attrs[i].max_power_attr,
+ show_constraint_max_power_uw,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+ &constraint_attrs[i].min_power_attr,
+ show_constraint_min_power_uw,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "max_time_window_us",
+ S_IRUGO,
+ &constraint_attrs[i].max_time_window_attr,
+ show_constraint_max_time_window_us,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "min_time_window_us",
+ S_IRUGO,
+ &constraint_attrs[i].min_time_window_attr,
+ show_constraint_min_time_window_us,
+ NULL);
+ if (ret)
+ goto err_alloc;
+
+ }
+
+ return 0;
+
+err_alloc:
+ free_constraint_attributes();
+
+ return ret;
+}
+
+static int create_constraints(struct powercap_zone *power_zone,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops)
+{
+ int i;
+ int ret = 0;
+ int count;
+ struct powercap_zone_constraint *pconst;
+
+ if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
+ !const_ops->set_power_limit_uw ||
+ !const_ops->get_time_window_us ||
+ !const_ops->set_time_window_us)
+ return -EINVAL;
+
+ count = power_zone->zone_attr_count;
+ for (i = 0; i < nr_constraints; ++i) {
+ pconst = &power_zone->constraints[i];
+ pconst->ops = const_ops;
+ pconst->id = power_zone->const_id_cnt;
+ power_zone->const_id_cnt++;
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].power_limit_attr.attr;
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].time_window_attr.attr;
+ if (pconst->ops->get_name)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].name_attr.attr;
+ if (pconst->ops->get_max_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].max_power_attr.attr;
+ if (pconst->ops->get_min_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].min_power_attr.attr;
+ if (pconst->ops->get_max_time_window_us)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].max_time_window_attr.attr;
+ if (pconst->ops->get_min_time_window_us)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].min_time_window_attr.attr;
+ }
+ power_zone->zone_attr_count = count;
+
+ return ret;
+}
+
+static bool control_type_valid(void *control_type)
+{
+ struct powercap_control_type *pos = NULL;
+ bool found = false;
+
+ mutex_lock(&powercap_cntrl_list_lock);
+
+ list_for_each_entry(pos, &powercap_cntrl_list, node) {
+ if (pos == control_type) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return found;
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+ return sprintf(buf, "%s\n", power_zone->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+/* Create zone and attributes in sysfs */
+static void create_power_zone_common_attributes(
+ struct powercap_zone *power_zone)
+{
+ int count = 0;
+
+ power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
+ if (power_zone->ops->get_max_energy_range_uj)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_max_energy_range_uj.attr;
+ if (power_zone->ops->get_energy_uj)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_energy_uj.attr;
+ if (power_zone->ops->get_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_power_uw.attr;
+ if (power_zone->ops->get_max_power_range_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_max_power_range_uw.attr;
+ power_zone->zone_dev_attrs[count] = NULL;
+ power_zone->zone_attr_count = count;
+}
+
+static void powercap_release(struct device *dev)
+{
+ bool allocated;
+
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+ /* Store flag as the release() may free memory */
+ allocated = power_zone->allocated;
+ /* Remove id from parent idr struct */
+ idr_remove(power_zone->parent_idr, power_zone->id);
+ /* Destroy idrs allocated for this zone */
+ idr_destroy(&power_zone->idr);
+ kfree(power_zone->name);
+ kfree(power_zone->zone_dev_attrs);
+ kfree(power_zone->constraints);
+ if (power_zone->ops->release)
+ power_zone->ops->release(power_zone);
+ if (allocated)
+ kfree(power_zone);
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+
+ /* Store flag as the release() may free memory */
+ allocated = control_type->allocated;
+ idr_destroy(&control_type->idr);
+ mutex_destroy(&control_type->lock);
+ if (control_type->ops && control_type->ops->release)
+ control_type->ops->release(control_type);
+ if (allocated)
+ kfree(control_type);
+ }
+}
+
+static ssize_t enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ bool mode = true;
+
+ /* Default is enabled */
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ if (power_zone->ops->get_enable)
+ if (power_zone->ops->get_enable(power_zone, &mode))
+ mode = false;
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+ if (control_type->ops && control_type->ops->get_enable)
+ if (control_type->ops->get_enable(control_type, &mode))
+ mode = false;
+ }
+
+ return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool mode;
+
+ if (strtobool(buf, &mode))
+ return -EINVAL;
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ if (power_zone->ops->set_enable)
+ if (!power_zone->ops->set_enable(power_zone, mode))
+ return len;
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+ if (control_type->ops && control_type->ops->set_enable)
+ if (!control_type->ops->set_enable(control_type, mode))
+ return len;
+ }
+
+ return -ENOSYS;
+}
+
+static DEVICE_ATTR_RW(enabled);
+
+static struct attribute *powercap_attrs[] = {
+ &dev_attr_enabled.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(powercap);
+
+static struct class powercap_class = {
+ .name = "powercap",
+ .dev_release = powercap_release,
+ .dev_groups = powercap_groups,
+};
+
+struct powercap_zone *powercap_register_zone(
+ struct powercap_zone *power_zone,
+ struct powercap_control_type *control_type,
+ const char *name,
+ struct powercap_zone *parent,
+ const struct powercap_zone_ops *ops,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops)
+{
+ int result;
+ int nr_attrs;
+
+ if (!name || !control_type || !ops ||
+ nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
+ (!ops->get_energy_uj && !ops->get_power_uw) ||
+ !control_type_valid(control_type))
+ return ERR_PTR(-EINVAL);
+
+ if (power_zone) {
+ if (!ops->release)
+ return ERR_PTR(-EINVAL);
+ memset(power_zone, 0, sizeof(*power_zone));
+ } else {
+ power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
+ if (!power_zone)
+ return ERR_PTR(-ENOMEM);
+ power_zone->allocated = true;
+ }
+ power_zone->ops = ops;
+ power_zone->control_type_inst = control_type;
+ if (!parent) {
+ power_zone->dev.parent = &control_type->dev;
+ power_zone->parent_idr = &control_type->idr;
+ } else {
+ power_zone->dev.parent = &parent->dev;
+ power_zone->parent_idr = &parent->idr;
+ }
+ power_zone->dev.class = &powercap_class;
+
+ mutex_lock(&control_type->lock);
+ /* Using idr to get the unique id */
+ result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
+ if (result < 0)
+ goto err_idr_alloc;
+
+ power_zone->id = result;
+ idr_init(&power_zone->idr);
+ power_zone->name = kstrdup(name, GFP_KERNEL);
+ if (!power_zone->name)
+ goto err_name_alloc;
+ dev_set_name(&power_zone->dev, "%s:%x",
+ dev_name(power_zone->dev.parent),
+ power_zone->id);
+ power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
+ nr_constraints, GFP_KERNEL);
+ if (!power_zone->constraints)
+ goto err_const_alloc;
+
+ nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
+ POWERCAP_ZONE_MAX_ATTRS + 1;
+ power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
+ nr_attrs, GFP_KERNEL);
+ if (!power_zone->zone_dev_attrs)
+ goto err_attr_alloc;
+ create_power_zone_common_attributes(power_zone);
+ result = create_constraints(power_zone, nr_constraints, const_ops);
+ if (result)
+ goto err_dev_ret;
+
+ power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
+ power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
+ power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+ power_zone->dev_attr_groups[1] = NULL;
+ power_zone->dev.groups = power_zone->dev_attr_groups;
+ result = device_register(&power_zone->dev);
+ if (result)
+ goto err_dev_ret;
+
+ control_type->nr_zones++;
+ mutex_unlock(&control_type->lock);
+
+ return power_zone;
+
+err_dev_ret:
+ kfree(power_zone->zone_dev_attrs);
+err_attr_alloc:
+ kfree(power_zone->constraints);
+err_const_alloc:
+ kfree(power_zone->name);
+err_name_alloc:
+ idr_remove(power_zone->parent_idr, power_zone->id);
+err_idr_alloc:
+ if (power_zone->allocated)
+ kfree(power_zone);
+ mutex_unlock(&control_type->lock);
+
+ return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(powercap_register_zone);
+
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+ struct powercap_zone *power_zone)
+{
+ if (!power_zone || !control_type)
+ return -EINVAL;
+
+ mutex_lock(&control_type->lock);
+ control_type->nr_zones--;
+ mutex_unlock(&control_type->lock);
+
+ device_unregister(&power_zone->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_zone);
+
+struct powercap_control_type *powercap_register_control_type(
+ struct powercap_control_type *control_type,
+ const char *name,
+ const struct powercap_control_type_ops *ops)
+{
+ int result;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+ if (control_type) {
+ if (!ops || !ops->release)
+ return ERR_PTR(-EINVAL);
+ memset(control_type, 0, sizeof(*control_type));
+ } else {
+ control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
+ if (!control_type)
+ return ERR_PTR(-ENOMEM);
+ control_type->allocated = true;
+ }
+ mutex_init(&control_type->lock);
+ control_type->ops = ops;
+ INIT_LIST_HEAD(&control_type->node);
+ control_type->dev.class = &powercap_class;
+ dev_set_name(&control_type->dev, "%s", name);
+ result = device_register(&control_type->dev);
+ if (result) {
+ if (control_type->allocated)
+ kfree(control_type);
+ return ERR_PTR(result);
+ }
+ idr_init(&control_type->idr);
+
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_add_tail(&control_type->node, &powercap_cntrl_list);
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return control_type;
+}
+EXPORT_SYMBOL_GPL(powercap_register_control_type);
+
+int powercap_unregister_control_type(struct powercap_control_type *control_type)
+{
+ struct powercap_control_type *pos = NULL;
+
+ if (control_type->nr_zones) {
+ dev_err(&control_type->dev, "Zones of this type still not freed\n");
+ return -EINVAL;
+ }
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_for_each_entry(pos, &powercap_cntrl_list, node) {
+ if (pos == control_type) {
+ list_del(&control_type->node);
+ mutex_unlock(&powercap_cntrl_list_lock);
+ device_unregister(&control_type->dev);
+ return 0;
+ }
+ }
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
+
+static int __init powercap_init(void)
+{
+ int result = 0;
+
+ result = seed_constraint_attributes();
+ if (result)
+ return result;
+
+ result = class_register(&powercap_class);
+
+ return result;
+}
+
+device_initcall(powercap_init);
+
+MODULE_DESCRIPTION("PowerCap sysfs Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 9966124ad98..f41bacfdc3d 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -201,7 +201,7 @@ static struct platform_driver pps_gpio_driver = {
.driver = {
.name = PPS_GPIO_NAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(pps_gpio_dt_ids),
+ .of_match_table = pps_gpio_dt_ids,
},
};
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index d49b85164fd..4a08727fcaf 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -259,8 +259,15 @@ static struct ixp_clock ixp_clock;
static int setup_interrupt(int gpio)
{
int irq;
+ int err;
- gpio_line_config(gpio, IXP4XX_GPIO_IN);
+ err = gpio_request(gpio, "ixp4-ptp");
+ if (err)
+ return err;
+
+ err = gpio_direction_input(gpio);
+ if (err)
+ return err;
irq = gpio_to_irq(gpio);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 75840b5cea6..eece329d787 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -62,6 +62,15 @@ config PWM_BFIN
To compile this driver as a module, choose M here: the module
will be called pwm-bfin.
+config PWM_EP93XX
+ tristate "Cirrus Logic EP93xx PWM support"
+ depends on ARCH_EP93XX
+ help
+ Generic PWM framework driver for Cirrus Logic EP93xx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-ep93xx.
+
config PWM_IMX
tristate "i.MX PWM support"
depends on ARCH_MXC
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 77a8c185c5b..8b754e4dba4 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
+obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index ba6ce01035e..f3dcd02390f 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -249,6 +249,8 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
}
}
+ cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS);
+
__raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
if (index == 0)
@@ -305,7 +307,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
i = slowclk;
rate = 32768;
min = div_u64(NSEC_PER_SEC, rate);
- max = min << 16;
+ max = min << tc->tcb_config->counter_width;
/* If period is too big return ERANGE error */
if (max < period_ns)
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
new file mode 100644
index 00000000000..33aa4461e1c
--- /dev/null
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -0,0 +1,230 @@
+/*
+ * PWM framework driver for Cirrus Logic EP93xx
+ *
+ * Copyright (c) 2009 Matthieu Crapet <mcrapet@gmail.com>
+ * Copyright (c) 2009, 2013 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * EP9301/02 have only one channel:
+ * platform device ep93xx-pwm.1 - PWMOUT1 (EGPIO14)
+ *
+ * EP9307 has only one channel:
+ * platform device ep93xx-pwm.0 - PWMOUT
+ *
+ * EP9312/15 have two channels:
+ * platform device ep93xx-pwm.0 - PWMOUT
+ * platform device ep93xx-pwm.1 - PWMOUT1 (EGPIO14)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+
+#include <asm/div64.h>
+
+#include <mach/platform.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
+
+#define EP93XX_PWMx_TERM_COUNT 0x00
+#define EP93XX_PWMx_DUTY_CYCLE 0x04
+#define EP93XX_PWMx_ENABLE 0x08
+#define EP93XX_PWMx_INVERT 0x0c
+
+struct ep93xx_pwm {
+ void __iomem *base;
+ struct clk *clk;
+ struct pwm_chip chip;
+};
+
+static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ep93xx_pwm, chip);
+}
+
+static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct platform_device *pdev = to_platform_device(chip->dev);
+
+ return ep93xx_pwm_acquire_gpio(pdev);
+}
+
+static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct platform_device *pdev = to_platform_device(chip->dev);
+
+ ep93xx_pwm_release_gpio(pdev);
+}
+
+static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+ void __iomem *base = ep93xx_pwm->base;
+ unsigned long long c;
+ unsigned long period_cycles;
+ unsigned long duty_cycles;
+ unsigned long term;
+ int ret = 0;
+
+ /*
+ * The clock needs to be enabled to access the PWM registers.
+ * Configuration can be changed at any time.
+ */
+ if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ ret = clk_enable(ep93xx_pwm->clk);
+ if (ret)
+ return ret;
+ }
+
+ c = clk_get_rate(ep93xx_pwm->clk);
+ c *= period_ns;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ c = period_cycles;
+ c *= duty_ns;
+ do_div(c, period_ns);
+ duty_cycles = c;
+
+ if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
+ term = readw(base + EP93XX_PWMx_TERM_COUNT);
+
+ /* Order is important if PWM is running */
+ if (period_cycles > term) {
+ writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
+ writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
+ } else {
+ writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
+ writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ clk_disable(ep93xx_pwm->clk);
+
+ return ret;
+}
+
+static int ep93xx_pwm_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+ int ret;
+
+ /*
+ * The clock needs to be enabled to access the PWM registers.
+ * Polarity can only be changed when the PWM is disabled.
+ */
+ ret = clk_enable(ep93xx_pwm->clk);
+ if (ret)
+ return ret;
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
+ else
+ writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
+
+ clk_disable(ep93xx_pwm->clk);
+
+ return 0;
+}
+
+static int ep93xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+ int ret;
+
+ ret = clk_enable(ep93xx_pwm->clk);
+ if (ret)
+ return ret;
+
+ writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+
+ return 0;
+}
+
+static void ep93xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+
+ writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+ clk_disable(ep93xx_pwm->clk);
+}
+
+static const struct pwm_ops ep93xx_pwm_ops = {
+ .request = ep93xx_pwm_request,
+ .free = ep93xx_pwm_free,
+ .config = ep93xx_pwm_config,
+ .set_polarity = ep93xx_pwm_polarity,
+ .enable = ep93xx_pwm_enable,
+ .disable = ep93xx_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int ep93xx_pwm_probe(struct platform_device *pdev)
+{
+ struct ep93xx_pwm *ep93xx_pwm;
+ struct resource *res;
+ int ret;
+
+ ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL);
+ if (!ep93xx_pwm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ep93xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ep93xx_pwm->base))
+ return PTR_ERR(ep93xx_pwm->base);
+
+ ep93xx_pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk");
+ if (IS_ERR(ep93xx_pwm->clk))
+ return PTR_ERR(ep93xx_pwm->clk);
+
+ ep93xx_pwm->chip.dev = &pdev->dev;
+ ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
+ ep93xx_pwm->chip.base = -1;
+ ep93xx_pwm->chip.npwm = 1;
+
+ ret = pwmchip_add(&ep93xx_pwm->chip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, ep93xx_pwm);
+ return 0;
+}
+
+static int ep93xx_pwm_remove(struct platform_device *pdev)
+{
+ struct ep93xx_pwm *ep93xx_pwm = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&ep93xx_pwm->chip);
+}
+
+static struct platform_driver ep93xx_pwm_driver = {
+ .driver = {
+ .name = "ep93xx-pwm",
+ },
+ .probe = ep93xx_pwm_probe,
+ .remove = ep93xx_pwm_remove,
+};
+module_platform_driver(ep93xx_pwm_driver);
+
+MODULE_DESCRIPTION("Cirrus Logic EP93xx PWM driver");
+MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
+ "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_ALIAS("platform:ep93xx-pwm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 2b7c4f88b46..cc477334487 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pwm.h>
+#include <linux/of.h>
#include <linux/of_device.h>
/* i.MX1 and i.MX21 share the same PWM function block: */
@@ -296,7 +297,7 @@ static struct platform_driver imx_pwm_driver = {
.driver = {
.name = "imx-pwm",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(imx_pwm_dt_ids),
+ .of_match_table = imx_pwm_dt_ids,
},
.probe = imx_pwm_probe,
.remove = imx_pwm_remove,
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index efac99e03d5..9dc0f9d42bf 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -169,7 +169,7 @@ static struct platform_driver lpc32xx_pwm_driver = {
.driver = {
.name = "lpc32xx-pwm",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(lpc32xx_pwm_dt_ids),
+ .of_match_table = lpc32xx_pwm_dt_ids,
},
.probe = lpc32xx_pwm_probe,
.remove = lpc32xx_pwm_remove,
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index c2c5a4fd1b9..9475bc7a6f9 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -189,7 +189,7 @@ static struct platform_driver mxs_pwm_driver = {
.driver = {
.name = "mxs-pwm",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(mxs_pwm_dt_ids),
+ .of_match_table = mxs_pwm_dt_ids,
},
.probe = mxs_pwm_probe,
.remove = mxs_pwm_remove,
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index fcc8b9adde9..b59639e0c02 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -224,8 +225,8 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- pwm_set_chip_data(pwm, NULL);
devm_kfree(chip->dev, pwm_get_chip_data(pwm));
+ pwm_set_chip_data(pwm, NULL);
}
static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index c2e2e585236..4e5c3d13d4f 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -26,7 +26,6 @@
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
#include "pwm-tipwmss.h"
@@ -208,11 +207,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
struct clk *clk;
struct ecap_pwm_chip *pc;
u16 status;
- struct pinctrl *pinctrl;
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "unable to select pin group\n");
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 084f5524653..a4d8f519d96 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
#include "pwm-tipwmss.h"
@@ -439,11 +438,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
struct clk *clk;
struct ehrpwm_pwm_chip *pc;
u16 status;
- struct pinctrl *pinctrl;
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "unable to select pin group\n");
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 29d1bba4804..b964470025c 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -21,6 +21,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/i2c/twl.h>
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index eef910580ea..b99a50e626a 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -18,6 +18,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/i2c/twl.h>
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 3e9b6a78ad1..c9ae692d345 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -223,8 +223,8 @@ struct device rio_bus = {
struct bus_type rio_bus_type = {
.name = "rapidio",
.match = rio_match_bus,
- .dev_attrs = rio_dev_attrs,
- .bus_attrs = rio_bus_attrs,
+ .dev_groups = rio_dev_groups,
+ .bus_groups = rio_bus_groups,
.probe = rio_device_probe,
.remove = rio_device_remove,
.uevent = rio_uevent,
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 9331be646dc..e0221c6d0cc 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -27,6 +27,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
\
return sprintf(buf, format_string, rdev->field); \
} \
+static DEVICE_ATTR_RO(field);
rio_config_attr(did, "0x%04x\n");
rio_config_attr(vid, "0x%04x\n");
@@ -54,6 +55,7 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
return (str - buf);
}
+static DEVICE_ATTR_RO(routes);
static ssize_t lprev_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -63,6 +65,7 @@ static ssize_t lprev_show(struct device *dev,
return sprintf(buf, "%s\n",
(rdev->prev) ? rio_name(rdev->prev) : "root");
}
+static DEVICE_ATTR_RO(lprev);
static ssize_t lnext_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -83,6 +86,7 @@ static ssize_t lnext_show(struct device *dev,
return str - buf;
}
+static DEVICE_ATTR_RO(lnext);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -92,23 +96,29 @@ static ssize_t modalias_show(struct device *dev,
return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n",
rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did);
}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *rio_dev_attrs[] = {
+ &dev_attr_did.attr,
+ &dev_attr_vid.attr,
+ &dev_attr_device_rev.attr,
+ &dev_attr_asm_did.attr,
+ &dev_attr_asm_vid.attr,
+ &dev_attr_asm_rev.attr,
+ &dev_attr_lprev.attr,
+ &dev_attr_destid.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
-struct device_attribute rio_dev_attrs[] = {
- __ATTR_RO(did),
- __ATTR_RO(vid),
- __ATTR_RO(device_rev),
- __ATTR_RO(asm_did),
- __ATTR_RO(asm_vid),
- __ATTR_RO(asm_rev),
- __ATTR_RO(lprev),
- __ATTR_RO(destid),
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static const struct attribute_group rio_dev_group = {
+ .attrs = rio_dev_attrs,
};
-static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
-static DEVICE_ATTR(lnext, S_IRUGO, lnext_show, NULL);
-static DEVICE_ATTR(hopcount, S_IRUGO, hopcount_show, NULL);
+const struct attribute_group *rio_dev_groups[] = {
+ &rio_dev_group,
+ NULL,
+};
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
@@ -316,8 +326,18 @@ exit:
return rc;
}
+static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store);
+
+static struct attribute *rio_bus_attrs[] = {
+ &bus_attr_scan.attr,
+ NULL,
+};
+
+static const struct attribute_group rio_bus_group = {
+ .attrs = rio_bus_attrs,
+};
-struct bus_attribute rio_bus_attrs[] = {
- __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store),
- __ATTR_NULL
+const struct attribute_group *rio_bus_groups[] = {
+ &rio_bus_group,
+ NULL,
};
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 085215cd850..5f99d22ad0b 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -48,8 +48,8 @@ extern struct rio_mport *rio_find_mport(int mport_id);
extern int rio_mport_scan(int mport_id);
/* Structures internal to the RIO core code */
-extern struct device_attribute rio_dev_attrs[];
-extern struct bus_attribute rio_bus_attrs[];
+extern const struct attribute_group *rio_dev_groups[];
+extern const struct attribute_group *rio_bus_groups[];
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 3459f60dcfd..d333f7eac10 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -141,18 +141,14 @@ struct pm800_regulators {
/* Ranges are sorted in ascending order. */
static const struct regulator_linear_range buck1_volt_range[] = {
- { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
- .uV_step = 12500 },
- { .min_uV = 1600000, .max_uV = 1800000, .min_sel = 0x50,
- .max_sel = 0x54, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
+ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
};
/* BUCK 2~5 have same ranges. */
static const struct regulator_linear_range buck2_5_volt_range[] = {
- { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
- .uV_step = 12500 },
- { .min_uV = 1600000, .max_uV = 3300000, .min_sel = 0x50,
- .max_sel = 0x72, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
+ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
};
static const unsigned int ldo1_volt_table[] = {
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 70230974468..f704d83c93c 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -391,7 +391,8 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
else
config.regmap = chip->regmap_companion;
- info->regulator = regulator_register(&info->desc, &config);
+ info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
@@ -402,14 +403,6 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pm8607_regulator_remove(struct platform_device *pdev)
-{
- struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
-
- regulator_unregister(info->regulator);
- return 0;
-}
-
static struct platform_device_id pm8607_regulator_driver_ids[] = {
{
.name = "88pm860x-regulator",
@@ -428,7 +421,6 @@ static struct platform_driver pm8607_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = pm8607_regulator_probe,
- .remove = pm8607_regulator_remove,
.id_table = pm8607_regulator_driver_ids,
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index dfe58096b37..ce785f48128 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -28,16 +28,6 @@ config REGULATOR_DEBUG
help
Say yes here to enable debugging support.
-config REGULATOR_DUMMY
- bool "Provide a dummy regulator if regulator lookups fail"
- help
- If this option is enabled then when a regulator lookup fails
- and the board has not specified that it has provided full
- constraints the regulator core will provide an always
- enabled dummy regulator, allowing consumer drivers to continue.
-
- A warning will be generated when this substitution is done.
-
config REGULATOR_FIXED_VOLTAGE
tristate "Fixed voltage regulator support"
help
@@ -133,6 +123,14 @@ config REGULATOR_AS3711
This driver provides support for the voltage regulators on the
AS3711 PMIC
+config REGULATOR_AS3722
+ tristate "AMS AS3722 PMIC Regulators"
+ depends on MFD_AS3722
+ help
+ This driver provides support for the voltage regulators on the
+ AS3722 PMIC. This will enable support for all the software
+ controllable DCDC/LDO regulators.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -429,6 +427,14 @@ config REGULATOR_TI_ABB
on TI SoCs may be unstable without enabling this as it provides
device specific optimized bias to allow/optimize functionality.
+config REGULATOR_STW481X_VMMC
+ bool "ST Microelectronics STW481X VMMC regulator"
+ depends on MFD_STW481X
+ default y if MFD_STW481X
+ help
+ This driver supports the internal VMMC regulator in the STw481x
+ PMIC chips.
+
config REGULATOR_TPS51632
tristate "TI TPS51632 Power Regulator"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 185cce24602..01c597ea174 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -3,7 +3,7 @@
#
-obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
@@ -18,6 +18,7 @@ obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
+obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 881159dfcb5..f70a9bfa5ff 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -176,7 +176,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
config.driver_data = ri;
config.init_data = dev_get_platdata(&pdev->dev);
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
@@ -187,21 +187,12 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int aat2870_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
- return 0;
-}
-
static struct platform_driver aat2870_regulator_driver = {
.driver = {
.name = "aat2870-regulator",
.owner = THIS_MODULE,
},
.probe = aat2870_regulator_probe,
- .remove = aat2870_regulator_remove,
};
static int __init aat2870_regulator_init(void)
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 7d5eaa874b2..77b46d0b37a 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -535,7 +535,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
config.dev = &pdev->dev;
config.driver_data = reg;
- rdev = regulator_register(desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
dev_err(&pdev->dev,
@@ -616,7 +616,6 @@ static int ab3100_regulators_remove(struct platform_device *pdev)
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
struct ab3100_regulator *reg = &ab3100_regulators[i];
- regulator_unregister(reg->rdev);
reg->rdev = NULL;
}
return 0;
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 02ff691cdb8..29c0faaf8eb 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -413,16 +413,12 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
&pdata->ext_regulator[i];
/* register regulator with framework */
- info->rdev = regulator_register(&info->desc, &config);
+ info->rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- /* when we fail, un-register all earlier regulators */
- while (--i >= 0) {
- info = &ab8500_ext_regulator_info[i];
- regulator_unregister(info->rdev);
- }
return err;
}
@@ -433,26 +429,8 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int ab8500_ext_regulator_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
- struct ab8500_ext_regulator_info *info = NULL;
- info = &ab8500_ext_regulator_info[i];
-
- dev_vdbg(rdev_get_dev(info->rdev),
- "%s-remove\n", info->desc.name);
-
- regulator_unregister(info->rdev);
- }
-
- return 0;
-}
-
static struct platform_driver ab8500_ext_regulator_driver = {
.probe = ab8500_ext_regulator_probe,
- .remove = ab8500_ext_regulator_remove,
.driver = {
.name = "ab8500-ext-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index b2b203cb6b2..48016a050d5 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -219,7 +219,6 @@ static int ad5398_probe(struct i2c_client *client,
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
- int ret;
if (!init_data)
return -EINVAL;
@@ -240,33 +239,21 @@ static int ad5398_probe(struct i2c_client *client,
chip->current_offset = df->current_offset;
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
- chip->rdev = regulator_register(&ad5398_reg, &config);
+ chip->rdev = devm_regulator_register(&client->dev, &ad5398_reg,
+ &config);
if (IS_ERR(chip->rdev)) {
- ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
id->name, ad5398_reg.name);
- goto err;
+ return PTR_ERR(chip->rdev);
}
i2c_set_clientdata(client, chip);
dev_dbg(&client->dev, "%s regulator driver is registered.\n", id->name);
return 0;
-
-err:
- return ret;
-}
-
-static int ad5398_remove(struct i2c_client *client)
-{
- struct ad5398_chip_info *chip = i2c_get_clientdata(client);
-
- regulator_unregister(chip->rdev);
- return 0;
}
static struct i2c_driver ad5398_driver = {
.probe = ad5398_probe,
- .remove = ad5398_remove,
.driver = {
.name = "ad5398",
},
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 0d4a8ccbb53..c734d098082 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -200,7 +200,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
config.regmap = sreg->anatop;
/* register regulator */
- rdev = regulator_register(rdesc, &config);
+ rdev = devm_regulator_register(dev, rdesc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n",
rdesc->name);
@@ -223,7 +223,6 @@ static int anatop_regulator_remove(struct platform_device *pdev)
struct anatop_regulator *sreg = rdev_get_drvdata(rdev);
const char *name = sreg->name;
- regulator_unregister(rdev);
kfree(name);
return 0;
@@ -256,7 +255,7 @@ static void __exit anatop_regulator_exit(void)
}
module_exit(anatop_regulator_exit);
-MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>, "
- "Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
+MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>");
+MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
MODULE_DESCRIPTION("ANATOP Regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 81d8681c319..4f6c2055f6b 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -226,7 +226,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
else
config.init_data = &ldo1->init_data;
- ldo1->regulator = regulator_register(desc, &config);
+ ldo1->regulator = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(ldo1->regulator)) {
ret = PTR_ERR(ldo1->regulator);
dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n",
@@ -239,18 +239,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
return 0;
}
-static int arizona_ldo1_remove(struct platform_device *pdev)
-{
- struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo1->regulator);
-
- return 0;
-}
-
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
- .remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index e87536bf0be..724706a97dc 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -225,7 +225,9 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
regmap_update_bits(arizona->regmap, ARIZONA_MIC_CHARGE_PUMP_1,
ARIZONA_CPMIC_BYPASS, 0);
- micsupp->regulator = regulator_register(&arizona_micsupp, &config);
+ micsupp->regulator = devm_regulator_register(&pdev->dev,
+ &arizona_micsupp,
+ &config);
if (IS_ERR(micsupp->regulator)) {
ret = PTR_ERR(micsupp->regulator);
dev_err(arizona->dev, "Failed to register mic supply: %d\n",
@@ -238,18 +240,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
return 0;
}
-static int arizona_micsupp_remove(struct platform_device *pdev)
-{
- struct arizona_micsupp *micsupp = platform_get_drvdata(pdev);
-
- regulator_unregister(micsupp->regulator);
-
- return 0;
-}
-
static struct platform_driver arizona_micsupp_driver = {
.probe = arizona_micsupp_probe,
- .remove = arizona_micsupp_remove,
.driver = {
.name = "arizona-micsupp",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 8406cd745da..c77a58478cc 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -117,26 +117,19 @@ static struct regulator_ops as3711_dldo_ops = {
};
static const struct regulator_linear_range as3711_sd_ranges[] = {
- { .min_uV = 612500, .max_uV = 1400000,
- .min_sel = 0x1, .max_sel = 0x40, .uV_step = 12500 },
- { .min_uV = 1425000, .max_uV = 2600000,
- .min_sel = 0x41, .max_sel = 0x70, .uV_step = 25000 },
- { .min_uV = 2650000, .max_uV = 3350000,
- .min_sel = 0x71, .max_sel = 0x7f, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(612500, 0x1, 0x40, 12500),
+ REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
+ REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7f, 50000),
};
static const struct regulator_linear_range as3711_aldo_ranges[] = {
- { .min_uV = 1200000, .max_uV = 1950000,
- .min_sel = 0, .max_sel = 0xf, .uV_step = 50000 },
- { .min_uV = 1800000, .max_uV = 3300000,
- .min_sel = 0x10, .max_sel = 0x1f, .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(1200000, 0, 0xf, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x10, 0x1f, 100000),
};
static const struct regulator_linear_range as3711_dldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1700000,
- .min_sel = 0, .max_sel = 0x10, .uV_step = 50000 },
- { .min_uV = 1750000, .max_uV = 3300000,
- .min_sel = 0x20, .max_sel = 0x3f, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 0x10, 50000),
+ REGULATOR_LINEAR_RANGE(1750000, 0x20, 0x3f, 50000),
};
#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx) \
@@ -273,33 +266,16 @@ static int as3711_regulator_probe(struct platform_device *pdev)
config.regmap = as3711->regmap;
config.of_node = of_node[id];
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
- ret = PTR_ERR(rdev);
- goto eregreg;
+ return PTR_ERR(rdev);
}
reg->rdev = rdev;
}
platform_set_drvdata(pdev, regs);
return 0;
-
-eregreg:
- while (--id >= 0)
- regulator_unregister(regs[id].rdev);
-
- return ret;
-}
-
-static int as3711_regulator_remove(struct platform_device *pdev)
-{
- struct as3711_regulator *regs = platform_get_drvdata(pdev);
- int id;
-
- for (id = 0; id < AS3711_REGULATOR_NUM; ++id)
- regulator_unregister(regs[id].rdev);
- return 0;
}
static struct platform_driver as3711_regulator_driver = {
@@ -308,7 +284,6 @@ static struct platform_driver as3711_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = as3711_regulator_probe,
- .remove = as3711_regulator_remove,
};
static int __init as3711_regulator_init(void)
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
new file mode 100644
index 00000000000..5917fe3dc98
--- /dev/null
+++ b/drivers/regulator/as3722-regulator.c
@@ -0,0 +1,908 @@
+/*
+ * Voltage regulator support for AMS AS3722 PMIC
+ *
+ * Copyright (C) 2013 ams
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/as3722.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* Regulator IDs */
+enum as3722_regulators_id {
+ AS3722_REGULATOR_ID_SD0,
+ AS3722_REGULATOR_ID_SD1,
+ AS3722_REGULATOR_ID_SD2,
+ AS3722_REGULATOR_ID_SD3,
+ AS3722_REGULATOR_ID_SD4,
+ AS3722_REGULATOR_ID_SD5,
+ AS3722_REGULATOR_ID_SD6,
+ AS3722_REGULATOR_ID_LDO0,
+ AS3722_REGULATOR_ID_LDO1,
+ AS3722_REGULATOR_ID_LDO2,
+ AS3722_REGULATOR_ID_LDO3,
+ AS3722_REGULATOR_ID_LDO4,
+ AS3722_REGULATOR_ID_LDO5,
+ AS3722_REGULATOR_ID_LDO6,
+ AS3722_REGULATOR_ID_LDO7,
+ AS3722_REGULATOR_ID_LDO9,
+ AS3722_REGULATOR_ID_LDO10,
+ AS3722_REGULATOR_ID_LDO11,
+ AS3722_REGULATOR_ID_MAX,
+};
+
+struct as3722_register_mapping {
+ u8 regulator_id;
+ const char *name;
+ const char *sname;
+ u8 vsel_reg;
+ u8 vsel_mask;
+ int n_voltages;
+ u32 enable_reg;
+ u8 enable_mask;
+ u32 control_reg;
+ u8 mode_mask;
+ u32 sleep_ctrl_reg;
+ u8 sleep_ctrl_mask;
+};
+
+struct as3722_regulator_config_data {
+ struct regulator_init_data *reg_init;
+ bool enable_tracking;
+ int ext_control;
+};
+
+struct as3722_regulators {
+ struct device *dev;
+ struct as3722 *as3722;
+ struct regulator_dev *rdevs[AS3722_REGULATOR_ID_MAX];
+ struct regulator_desc desc[AS3722_REGULATOR_ID_MAX];
+ struct as3722_regulator_config_data
+ reg_config_data[AS3722_REGULATOR_ID_MAX];
+};
+
+static const struct as3722_register_mapping as3722_reg_lookup[] = {
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD0,
+ .name = "as3722-sd0",
+ .vsel_reg = AS3722_SD0_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(0),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD0_CONTROL_REG,
+ .mode_mask = AS3722_SD0_MODE_FAST,
+ .n_voltages = AS3722_SD0_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD1,
+ .name = "as3722-sd1",
+ .vsel_reg = AS3722_SD1_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(1),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD1_CONTROL_REG,
+ .mode_mask = AS3722_SD1_MODE_FAST,
+ .n_voltages = AS3722_SD0_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD2,
+ .name = "as3722-sd2",
+ .sname = "vsup-sd2",
+ .vsel_reg = AS3722_SD2_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(2),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD2_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD23_CONTROL_REG,
+ .mode_mask = AS3722_SD2_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD3,
+ .name = "as3722-sd3",
+ .sname = "vsup-sd3",
+ .vsel_reg = AS3722_SD3_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(3),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD3_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD23_CONTROL_REG,
+ .mode_mask = AS3722_SD3_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD4,
+ .name = "as3722-sd4",
+ .sname = "vsup-sd4",
+ .vsel_reg = AS3722_SD4_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(4),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG,
+ .sleep_ctrl_mask = AS3722_SD4_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD4_CONTROL_REG,
+ .mode_mask = AS3722_SD4_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD5,
+ .name = "as3722-sd5",
+ .sname = "vsup-sd5",
+ .vsel_reg = AS3722_SD5_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(5),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG,
+ .sleep_ctrl_mask = AS3722_SD5_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD5_CONTROL_REG,
+ .mode_mask = AS3722_SD5_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD6,
+ .name = "as3722-sd6",
+ .vsel_reg = AS3722_SD6_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(6),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG,
+ .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD6_CONTROL_REG,
+ .mode_mask = AS3722_SD6_MODE_FAST,
+ .n_voltages = AS3722_SD0_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO0,
+ .name = "as3722-ldo0",
+ .sname = "vin-ldo0",
+ .vsel_reg = AS3722_LDO0_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO0_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO0_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO0_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO0_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO1,
+ .name = "as3722-ldo1",
+ .sname = "vin-ldo1-6",
+ .vsel_reg = AS3722_LDO1_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO1_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO1_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO2,
+ .name = "as3722-ldo2",
+ .sname = "vin-ldo2-5-7",
+ .vsel_reg = AS3722_LDO2_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO2_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO2_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO3,
+ .name = "as3722-ldo3",
+ .name = "vin-ldo3-4",
+ .vsel_reg = AS3722_LDO3_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO3_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO3_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO3_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO3_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO4,
+ .name = "as3722-ldo4",
+ .name = "vin-ldo3-4",
+ .vsel_reg = AS3722_LDO4_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO4_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO4_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO5,
+ .name = "as3722-ldo5",
+ .sname = "vin-ldo2-5-7",
+ .vsel_reg = AS3722_LDO5_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO5_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO5_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO6,
+ .name = "as3722-ldo6",
+ .sname = "vin-ldo1-6",
+ .vsel_reg = AS3722_LDO6_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO6_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO6_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO7,
+ .name = "as3722-ldo7",
+ .sname = "vin-ldo2-5-7",
+ .vsel_reg = AS3722_LDO7_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO7_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO7_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO9,
+ .name = "as3722-ldo9",
+ .sname = "vin-ldo9-10",
+ .vsel_reg = AS3722_LDO9_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL1_REG,
+ .enable_mask = AS3722_LDO9_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG,
+ .sleep_ctrl_mask = AS3722_LDO9_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO10,
+ .name = "as3722-ldo10",
+ .sname = "vin-ldo9-10",
+ .vsel_reg = AS3722_LDO10_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL1_REG,
+ .enable_mask = AS3722_LDO10_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG,
+ .sleep_ctrl_mask = AS3722_LDO10_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO11,
+ .name = "as3722-ldo11",
+ .sname = "vin-ldo11",
+ .vsel_reg = AS3722_LDO11_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL1_REG,
+ .enable_mask = AS3722_LDO11_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG,
+ .sleep_ctrl_mask = AS3722_LDO11_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+};
+
+
+static const int as3722_ldo_current[] = { 150000, 300000 };
+static const int as3722_sd016_current[] = { 2500000, 3000000, 3500000 };
+
+static int as3722_current_to_index(int min_uA, int max_uA,
+ const int *curr_table, int n_currents)
+{
+ int i;
+
+ for (i = n_currents - 1; i >= 0; i--) {
+ if ((min_uA <= curr_table[i]) && (curr_table[i] <= max_uA))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int as3722_ldo_get_current_limit(struct regulator_dev *rdev)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ u32 val;
+ int ret;
+
+ ret = as3722_read(as3722, as3722_reg_lookup[id].vsel_reg, &val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ as3722_reg_lookup[id].vsel_reg, ret);
+ return ret;
+ }
+ if (val & AS3722_LDO_ILIMIT_MASK)
+ return 300000;
+ return 150000;
+}
+
+static int as3722_ldo_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 reg = 0;
+
+ ret = as3722_current_to_index(min_uA, max_uA, as3722_ldo_current,
+ ARRAY_SIZE(as3722_ldo_current));
+ if (ret < 0) {
+ dev_err(as3722_regs->dev,
+ "Current range min:max = %d:%d does not support\n",
+ min_uA, max_uA);
+ return ret;
+ }
+ if (ret)
+ reg = AS3722_LDO_ILIMIT_BIT;
+ return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg,
+ AS3722_LDO_ILIMIT_MASK, reg);
+}
+
+static struct regulator_ops as3722_ldo0_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static struct regulator_ops as3722_ldo0_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static int as3722_ldo3_set_tracking_mode(struct as3722_regulators *as3722_reg,
+ int id, u8 mode)
+{
+ struct as3722 *as3722 = as3722_reg->as3722;
+
+ switch (mode) {
+ case AS3722_LDO3_MODE_PMOS:
+ case AS3722_LDO3_MODE_PMOS_TRACKING:
+ case AS3722_LDO3_MODE_NMOS:
+ case AS3722_LDO3_MODE_SWITCH:
+ return as3722_update_bits(as3722,
+ as3722_reg_lookup[id].vsel_reg,
+ AS3722_LDO3_MODE_MASK, mode);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int as3722_ldo3_get_current_limit(struct regulator_dev *rdev)
+{
+ return 150000;
+}
+
+static struct regulator_ops as3722_ldo3_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo3_get_current_limit,
+};
+
+static struct regulator_ops as3722_ldo3_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo3_get_current_limit,
+};
+
+static const struct regulator_linear_range as3722_ldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
+ REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
+};
+
+static struct regulator_ops as3722_ldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static struct regulator_ops as3722_ldo_extcntrl_ops = {
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static unsigned int as3722_sd_get_mode(struct regulator_dev *rdev)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ u32 val;
+ int ret;
+
+ if (!as3722_reg_lookup[id].control_reg)
+ return -ENOTSUPP;
+
+ ret = as3722_read(as3722, as3722_reg_lookup[id].control_reg, &val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ as3722_reg_lookup[id].control_reg, ret);
+ return ret;
+ }
+
+ if (val & as3722_reg_lookup[id].mode_mask)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int as3722_sd_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ u8 id = rdev_get_id(rdev);
+ u8 val = 0;
+ int ret;
+
+ if (!as3722_reg_lookup[id].control_reg)
+ return -ERANGE;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = as3722_reg_lookup[id].mode_mask;
+ case REGULATOR_MODE_NORMAL: /* fall down */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = as3722_update_bits(as3722, as3722_reg_lookup[id].control_reg,
+ as3722_reg_lookup[id].mode_mask, val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n",
+ as3722_reg_lookup[id].control_reg, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int as3722_sd016_get_current_limit(struct regulator_dev *rdev)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ u32 val, reg;
+ int mask;
+ int ret;
+
+ switch (id) {
+ case AS3722_REGULATOR_ID_SD0:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD1:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD6:
+ reg = AS3722_OVCURRENT_DEB_REG;
+ mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = as3722_read(as3722, reg, &val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ reg, ret);
+ return ret;
+ }
+ val &= mask;
+ val >>= ffs(mask) - 1;
+ if (val == 3)
+ return -EINVAL;
+ return as3722_sd016_current[val];
+}
+
+static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ int ret;
+ int val;
+ int mask;
+ u32 reg;
+
+ ret = as3722_current_to_index(min_uA, max_uA, as3722_sd016_current,
+ ARRAY_SIZE(as3722_sd016_current));
+ if (ret < 0) {
+ dev_err(as3722_regs->dev,
+ "Current range min:max = %d:%d does not support\n",
+ min_uA, max_uA);
+ return ret;
+ }
+
+ switch (id) {
+ case AS3722_REGULATOR_ID_SD0:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD1:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD6:
+ reg = AS3722_OVCURRENT_DEB_REG;
+ mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val = ret & mask;
+ val <<= ffs(mask) - 1;
+ return as3722_update_bits(as3722, reg, mask, val);
+}
+
+static const struct regulator_linear_range as3722_sd2345_ranges[] = {
+ REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
+ REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
+ REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
+};
+
+static struct regulator_ops as3722_sd016_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_sd016_get_current_limit,
+ .set_current_limit = as3722_sd016_set_current_limit,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static struct regulator_ops as3722_sd016_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_sd016_get_current_limit,
+ .set_current_limit = as3722_sd016_set_current_limit,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static struct regulator_ops as3722_sd2345_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static struct regulator_ops as3722_sd2345_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static int as3722_extreg_init(struct as3722_regulators *as3722_regs, int id,
+ int ext_pwr_ctrl)
+{
+ int ret;
+ unsigned int val;
+
+ if ((ext_pwr_ctrl < AS3722_EXT_CONTROL_ENABLE1) ||
+ (ext_pwr_ctrl > AS3722_EXT_CONTROL_ENABLE3))
+ return -EINVAL;
+
+ val = ext_pwr_ctrl << (ffs(as3722_reg_lookup[id].sleep_ctrl_mask) - 1);
+ ret = as3722_update_bits(as3722_regs->as3722,
+ as3722_reg_lookup[id].sleep_ctrl_reg,
+ as3722_reg_lookup[id].sleep_ctrl_mask, val);
+ if (ret < 0)
+ dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n",
+ as3722_reg_lookup[id].sleep_ctrl_reg, ret);
+ return ret;
+}
+
+static struct of_regulator_match as3722_regulator_matches[] = {
+ { .name = "sd0", },
+ { .name = "sd1", },
+ { .name = "sd2", },
+ { .name = "sd3", },
+ { .name = "sd4", },
+ { .name = "sd5", },
+ { .name = "sd6", },
+ { .name = "ldo0", },
+ { .name = "ldo1", },
+ { .name = "ldo2", },
+ { .name = "ldo3", },
+ { .name = "ldo4", },
+ { .name = "ldo5", },
+ { .name = "ldo6", },
+ { .name = "ldo7", },
+ { .name = "ldo9", },
+ { .name = "ldo10", },
+ { .name = "ldo11", },
+};
+
+static int as3722_get_regulator_dt_data(struct platform_device *pdev,
+ struct as3722_regulators *as3722_regs)
+{
+ struct device_node *np;
+ struct as3722_regulator_config_data *reg_config;
+ u32 prop;
+ int id;
+ int ret;
+
+ np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!np) {
+ dev_err(&pdev->dev, "Device is not having regulators node\n");
+ return -ENODEV;
+ }
+ pdev->dev.of_node = np;
+
+ ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches,
+ ARRAY_SIZE(as3722_regulator_matches));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(as3722_regulator_matches); ++id) {
+ struct device_node *reg_node;
+
+ reg_config = &as3722_regs->reg_config_data[id];
+ reg_config->reg_init = as3722_regulator_matches[id].init_data;
+ reg_node = as3722_regulator_matches[id].of_node;
+
+ if (!reg_config->reg_init || !reg_node)
+ continue;
+
+ ret = of_property_read_u32(reg_node, "ams,ext-control", &prop);
+ if (!ret) {
+ if (prop < 3)
+ reg_config->ext_control = prop;
+ else
+ dev_warn(&pdev->dev,
+ "ext-control have invalid option: %u\n",
+ prop);
+ }
+ reg_config->enable_tracking =
+ of_property_read_bool(reg_node, "ams,enable-tracking");
+ }
+ return 0;
+}
+
+static int as3722_regulator_probe(struct platform_device *pdev)
+{
+ struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent);
+ struct as3722_regulators *as3722_regs;
+ struct as3722_regulator_config_data *reg_config;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ struct regulator_ops *ops;
+ int id;
+ int ret;
+
+ as3722_regs = devm_kzalloc(&pdev->dev, sizeof(*as3722_regs),
+ GFP_KERNEL);
+ if (!as3722_regs)
+ return -ENOMEM;
+
+ as3722_regs->dev = &pdev->dev;
+ as3722_regs->as3722 = as3722;
+ platform_set_drvdata(pdev, as3722_regs);
+
+ ret = as3722_get_regulator_dt_data(pdev, as3722_regs);
+ if (ret < 0)
+ return ret;
+
+ config.dev = &pdev->dev;
+ config.driver_data = as3722_regs;
+ config.regmap = as3722->regmap;
+
+ for (id = 0; id < AS3722_REGULATOR_ID_MAX; id++) {
+ reg_config = &as3722_regs->reg_config_data[id];
+
+ as3722_regs->desc[id].name = as3722_reg_lookup[id].name;
+ as3722_regs->desc[id].supply_name = as3722_reg_lookup[id].sname;
+ as3722_regs->desc[id].id = as3722_reg_lookup[id].regulator_id;
+ as3722_regs->desc[id].n_voltages =
+ as3722_reg_lookup[id].n_voltages;
+ as3722_regs->desc[id].type = REGULATOR_VOLTAGE;
+ as3722_regs->desc[id].owner = THIS_MODULE;
+ as3722_regs->desc[id].enable_reg =
+ as3722_reg_lookup[id].enable_reg;
+ as3722_regs->desc[id].enable_mask =
+ as3722_reg_lookup[id].enable_mask;
+ as3722_regs->desc[id].vsel_reg = as3722_reg_lookup[id].vsel_reg;
+ as3722_regs->desc[id].vsel_mask =
+ as3722_reg_lookup[id].vsel_mask;
+ switch (id) {
+ case AS3722_REGULATOR_ID_LDO0:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo0_extcntrl_ops;
+ else
+ ops = &as3722_ldo0_ops;
+ as3722_regs->desc[id].min_uV = 825000;
+ as3722_regs->desc[id].uV_step = 25000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 500;
+ break;
+ case AS3722_REGULATOR_ID_LDO3:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo3_extcntrl_ops;
+ else
+ ops = &as3722_ldo3_ops;
+ as3722_regs->desc[id].min_uV = 620000;
+ as3722_regs->desc[id].uV_step = 20000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 500;
+ if (reg_config->enable_tracking) {
+ ret = as3722_ldo3_set_tracking_mode(as3722_regs,
+ id, AS3722_LDO3_MODE_PMOS_TRACKING);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "LDO3 tracking failed: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ break;
+ case AS3722_REGULATOR_ID_SD0:
+ case AS3722_REGULATOR_ID_SD1:
+ case AS3722_REGULATOR_ID_SD6:
+ if (reg_config->ext_control)
+ ops = &as3722_sd016_extcntrl_ops;
+ else
+ ops = &as3722_sd016_ops;
+ as3722_regs->desc[id].min_uV = 610000;
+ as3722_regs->desc[id].uV_step = 10000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ break;
+ case AS3722_REGULATOR_ID_SD2:
+ case AS3722_REGULATOR_ID_SD3:
+ case AS3722_REGULATOR_ID_SD4:
+ case AS3722_REGULATOR_ID_SD5:
+ if (reg_config->ext_control)
+ ops = &as3722_sd2345_extcntrl_ops;
+ else
+ ops = &as3722_sd2345_ops;
+ as3722_regs->desc[id].linear_ranges =
+ as3722_sd2345_ranges;
+ as3722_regs->desc[id].n_linear_ranges =
+ ARRAY_SIZE(as3722_sd2345_ranges);
+ break;
+ default:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo_extcntrl_ops;
+ else
+ ops = &as3722_ldo_ops;
+ as3722_regs->desc[id].min_uV = 825000;
+ as3722_regs->desc[id].uV_step = 25000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 500;
+ as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
+ as3722_regs->desc[id].n_linear_ranges =
+ ARRAY_SIZE(as3722_ldo_ranges);
+ break;
+ }
+ as3722_regs->desc[id].ops = ops;
+ config.init_data = reg_config->reg_init;
+ config.of_node = as3722_regulator_matches[id].of_node;
+ rdev = devm_regulator_register(&pdev->dev,
+ &as3722_regs->desc[id], &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "regulator %d register failed %d\n",
+ id, ret);
+ return ret;
+ }
+
+ as3722_regs->rdevs[id] = rdev;
+ if (reg_config->ext_control) {
+ ret = regulator_enable_regmap(rdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Regulator %d enable failed: %d\n",
+ id, ret);
+ return ret;
+ }
+ ret = as3722_extreg_init(as3722_regs, id,
+ reg_config->ext_control);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "AS3722 ext control failed: %d", ret);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id of_as3722_regulator_match[] = {
+ { .compatible = "ams,as3722-regulator", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_as3722_regulator_match);
+
+static struct platform_driver as3722_regulator_driver = {
+ .driver = {
+ .name = "as3722-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = of_as3722_regulator_match,
+ },
+ .probe = as3722_regulator_probe,
+};
+
+module_platform_driver(as3722_regulator_driver);
+
+MODULE_ALIAS("platform:as3722-regulator");
+MODULE_DESCRIPTION("AS3722 regulator driver");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a01b8b3b70c..6382f0af353 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -36,6 +36,7 @@
#include <trace/events/regulator.h>
#include "dummy.h"
+#include "internal.h"
#define rdev_crit(rdev, fmt, ...) \
pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
@@ -52,8 +53,8 @@ static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_list);
static LIST_HEAD(regulator_map_list);
static LIST_HEAD(regulator_ena_gpio_list);
+static LIST_HEAD(regulator_supply_alias_list);
static bool has_full_constraints;
-static bool board_wants_dummy_regulator;
static struct dentry *debugfs_root;
@@ -83,22 +84,16 @@ struct regulator_enable_gpio {
};
/*
- * struct regulator
+ * struct regulator_supply_alias
*
- * One for each consumer device.
+ * Used to map lookups for a supply onto an alternative device.
*/
-struct regulator {
- struct device *dev;
+struct regulator_supply_alias {
struct list_head list;
- unsigned int always_on:1;
- unsigned int bypass:1;
- int uA_load;
- int min_uV;
- int max_uV;
- char *supply_name;
- struct device_attribute dev_attr;
- struct regulator_dev *rdev;
- struct dentry *debugfs;
+ struct device *src_dev;
+ const char *src_supply;
+ struct device *alias_dev;
+ const char *alias_supply;
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
@@ -923,6 +918,36 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
return 0;
}
+static int machine_constraints_current(struct regulator_dev *rdev,
+ struct regulation_constraints *constraints)
+{
+ struct regulator_ops *ops = rdev->desc->ops;
+ int ret;
+
+ if (!constraints->min_uA && !constraints->max_uA)
+ return 0;
+
+ if (constraints->min_uA > constraints->max_uA) {
+ rdev_err(rdev, "Invalid current constraints\n");
+ return -EINVAL;
+ }
+
+ if (!ops->set_current_limit || !ops->get_current_limit) {
+ rdev_warn(rdev, "Operation of current configuration missing\n");
+ return 0;
+ }
+
+ /* Set regulator current in constraints range */
+ ret = ops->set_current_limit(rdev, constraints->min_uA,
+ constraints->max_uA);
+ if (ret < 0) {
+ rdev_err(rdev, "Failed to set current constraint, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -953,6 +978,10 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (ret != 0)
goto out;
+ ret = machine_constraints_current(rdev, rdev->constraints);
+ if (ret != 0)
+ goto out;
+
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
@@ -1186,11 +1215,39 @@ overflow_err:
static int _regulator_get_enable_time(struct regulator_dev *rdev)
{
+ if (rdev->constraints && rdev->constraints->enable_time)
+ return rdev->constraints->enable_time;
if (!rdev->desc->ops->enable_time)
return rdev->desc->enable_time;
return rdev->desc->ops->enable_time(rdev);
}
+static struct regulator_supply_alias *regulator_find_supply_alias(
+ struct device *dev, const char *supply)
+{
+ struct regulator_supply_alias *map;
+
+ list_for_each_entry(map, &regulator_supply_alias_list, list)
+ if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0)
+ return map;
+
+ return NULL;
+}
+
+static void regulator_supply_alias(struct device **dev, const char **supply)
+{
+ struct regulator_supply_alias *map;
+
+ map = regulator_find_supply_alias(*dev, *supply);
+ if (map) {
+ dev_dbg(*dev, "Mapping supply %s to %s,%s\n",
+ *supply, map->alias_supply,
+ dev_name(map->alias_dev));
+ *dev = map->alias_dev;
+ *supply = map->alias_supply;
+ }
+}
+
static struct regulator_dev *regulator_dev_lookup(struct device *dev,
const char *supply,
int *ret)
@@ -1200,6 +1257,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
struct regulator_map *map;
const char *devname = NULL;
+ regulator_supply_alias(&dev, &supply);
+
/* first do a dt based lookup */
if (dev && dev->of_node) {
node = of_get_regulator(dev, supply);
@@ -1243,16 +1302,16 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
- bool exclusive)
+ bool exclusive, bool allow_dummy)
{
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = 0;
+ int ret = -EPROBE_DEFER;
if (id == NULL) {
pr_err("get() with no identifier\n");
- return regulator;
+ return ERR_PTR(-EINVAL);
}
if (dev)
@@ -1264,34 +1323,32 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (rdev)
goto found;
+ regulator = ERR_PTR(ret);
+
/*
* If we have return value from dev_lookup fail, we do not expect to
* succeed, so, quit with appropriate error value
*/
- if (ret) {
- regulator = ERR_PTR(ret);
+ if (ret && ret != -ENODEV) {
goto out;
}
- if (board_wants_dummy_regulator) {
- rdev = dummy_regulator_rdev;
- goto found;
- }
-
-#ifdef CONFIG_REGULATOR_DUMMY
if (!devname)
devname = "deviceless";
- /* If the board didn't flag that it was fully constrained then
- * substitute in a dummy regulator so consumers can continue.
+ /*
+ * Assume that a regulator is physically present and enabled
+ * even if it isn't hooked up and just provide a dummy.
*/
- if (!has_full_constraints) {
+ if (has_full_constraints && allow_dummy) {
pr_warn("%s supply %s not found, using dummy regulator\n",
devname, id);
+
rdev = dummy_regulator_rdev;
goto found;
+ } else {
+ dev_err(dev, "dummy supplies not allowed\n");
}
-#endif
mutex_unlock(&regulator_list_mutex);
return regulator;
@@ -1349,44 +1406,10 @@ out:
*/
struct regulator *regulator_get(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, false);
+ return _regulator_get(dev, id, false, true);
}
EXPORT_SYMBOL_GPL(regulator_get);
-static void devm_regulator_release(struct device *dev, void *res)
-{
- regulator_put(*(struct regulator **)res);
-}
-
-/**
- * devm_regulator_get - Resource managed regulator_get()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Managed regulator_get(). Regulators returned from this function are
- * automatically regulator_put() on driver detach. See regulator_get() for more
- * information.
- */
-struct regulator *devm_regulator_get(struct device *dev, const char *id)
-{
- struct regulator **ptr, *regulator;
-
- ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regulator = regulator_get(dev, id);
- if (!IS_ERR(regulator)) {
- *ptr = regulator;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regulator;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_get);
-
/**
* regulator_get_exclusive - obtain exclusive access to a regulator.
* @dev: device for regulator "consumer"
@@ -1410,7 +1433,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_get);
*/
struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, true);
+ return _regulator_get(dev, id, true, false);
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
@@ -1439,40 +1462,10 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
*/
struct regulator *regulator_get_optional(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, 0);
+ return _regulator_get(dev, id, false, false);
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/**
- * devm_regulator_get_optional - Resource managed regulator_get_optional()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Managed regulator_get_optional(). Regulators returned from this
- * function are automatically regulator_put() on driver detach. See
- * regulator_get_optional() for more information.
- */
-struct regulator *devm_regulator_get_optional(struct device *dev,
- const char *id)
-{
- struct regulator **ptr, *regulator;
-
- ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regulator = regulator_get_optional(dev, id);
- if (!IS_ERR(regulator)) {
- *ptr = regulator;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regulator;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
-
/* Locks held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
@@ -1499,36 +1492,6 @@ static void _regulator_put(struct regulator *regulator)
}
/**
- * devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Managed regulator_get_exclusive(). Regulators returned from this function
- * are automatically regulator_put() on driver detach. See regulator_get() for
- * more information.
- */
-struct regulator *devm_regulator_get_exclusive(struct device *dev,
- const char *id)
-{
- struct regulator **ptr, *regulator;
-
- ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regulator = _regulator_get(dev, id, 1);
- if (!IS_ERR(regulator)) {
- *ptr = regulator;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regulator;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
-
-/**
* regulator_put - "free" the regulator source
* @regulator: regulator source
*
@@ -1544,34 +1507,133 @@ void regulator_put(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_put);
-static int devm_regulator_match(struct device *dev, void *res, void *data)
+/**
+ * regulator_register_supply_alias - Provide device alias for supply lookup
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: Supply name or regulator ID that should be used to lookup the
+ * supply
+ *
+ * All lookups for id on dev will instead be conducted for alias_id on
+ * alias_dev.
+ */
+int regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ struct regulator_supply_alias *map;
+
+ map = regulator_find_supply_alias(dev, id);
+ if (map)
+ return -EEXIST;
+
+ map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ map->src_dev = dev;
+ map->src_supply = id;
+ map->alias_dev = alias_dev;
+ map->alias_supply = alias_id;
+
+ list_add(&map->list, &regulator_supply_alias_list);
+
+ pr_info("Adding alias for supply %s,%s -> %s,%s\n",
+ id, dev_name(dev), alias_id, dev_name(alias_dev));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_register_supply_alias);
+
+/**
+ * regulator_unregister_supply_alias - Remove device alias
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ *
+ * Remove a lookup alias if one exists for id on dev.
+ */
+void regulator_unregister_supply_alias(struct device *dev, const char *id)
{
- struct regulator **r = res;
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
+ struct regulator_supply_alias *map;
+
+ map = regulator_find_supply_alias(dev, id);
+ if (map) {
+ list_del(&map->list);
+ kfree(map);
}
- return *r == data;
}
+EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias);
/**
- * devm_regulator_put - Resource managed regulator_put()
- * @regulator: regulator to free
+ * regulator_bulk_register_supply_alias - register multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: List of supply names or regulator IDs that should be used to
+ * lookup the supply
+ * @num_id: Number of aliases to register
+ *
+ * @return 0 on success, an errno on failure.
*
- * Deallocate a regulator allocated with devm_regulator_get(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
+ * This helper function allows drivers to register several supply
+ * aliases in one operation. If any of the aliases cannot be
+ * registered any aliases that were registered will be removed
+ * before returning to the caller.
*/
-void devm_regulator_put(struct regulator *regulator)
+int regulator_bulk_register_supply_alias(struct device *dev, const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id)
{
- int rc;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_id; ++i) {
+ ret = regulator_register_supply_alias(dev, id[i], alias_dev,
+ alias_id[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_err(dev,
+ "Failed to create supply alias %s,%s -> %s,%s\n",
+ id[i], dev_name(dev), alias_id[i], dev_name(alias_dev));
+
+ while (--i >= 0)
+ regulator_unregister_supply_alias(dev, id[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias);
+
+/**
+ * regulator_bulk_unregister_supply_alias - unregister multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @num_id: Number of aliases to unregister
+ *
+ * This helper function allows drivers to unregister several supply
+ * aliases in one operation.
+ */
+void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id)
+{
+ int i;
- rc = devres_release(regulator->dev, devm_regulator_release,
- devm_regulator_match, regulator);
- if (rc != 0)
- WARN_ON(rc);
+ for (i = 0; i < num_id; ++i)
+ regulator_unregister_supply_alias(dev, id[i]);
}
-EXPORT_SYMBOL_GPL(devm_regulator_put);
+EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias);
+
/* Manage enable GPIO list. Same GPIO pin can be shared among regulators */
static int regulator_ena_gpio_request(struct regulator_dev *rdev,
@@ -1704,11 +1766,39 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* together. */
trace_regulator_enable_delay(rdev_get_name(rdev));
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
+ /*
+ * Delay for the requested amount of time as per the guidelines in:
+ *
+ * Documentation/timers/timers-howto.txt
+ *
+ * The assumption here is that regulators will never be enabled in
+ * atomic context and therefore sleeping functions can be used.
+ */
+ if (delay) {
+ unsigned int ms = delay / 1000;
+ unsigned int us = delay % 1000;
+
+ if (ms > 0) {
+ /*
+ * For small enough values, handle super-millisecond
+ * delays in the usleep_range() call below.
+ */
+ if (ms < 20)
+ us += ms * 1000;
+ else
+ msleep(ms);
+ }
+
+ /*
+ * Give the scheduler some room to coalesce with any other
+ * wakeup sources. For delays shorter than 10 us, don't even
+ * bother setting up high-resolution timers and just busy-
+ * loop.
+ */
+ if (us >= 10)
+ usleep_range(us, us + 100);
+ else
+ udelay(us);
}
trace_regulator_enable_complete(rdev_get_name(rdev));
@@ -2489,6 +2579,8 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
ret = rdev->desc->ops->get_voltage(rdev);
} else if (rdev->desc->ops->list_voltage) {
ret = rdev->desc->ops->list_voltage(rdev, 0);
+ } else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) {
+ ret = rdev->desc->fixed_uV;
} else {
return -EINVAL;
}
@@ -2912,52 +3004,6 @@ err:
}
EXPORT_SYMBOL_GPL(regulator_bulk_get);
-/**
- * devm_regulator_bulk_get - managed get multiple regulator consumers
- *
- * @dev: Device to supply
- * @num_consumers: Number of consumers to register
- * @consumers: Configuration of consumers; clients are stored here.
- *
- * @return 0 on success, an errno on failure.
- *
- * This helper function allows drivers to get several regulator
- * consumers in one operation with management, the regulators will
- * automatically be freed when the device is unbound. If any of the
- * regulators cannot be acquired then any regulators that were
- * allocated will be freed before returning to the caller.
- */
-int devm_regulator_bulk_get(struct device *dev, int num_consumers,
- struct regulator_bulk_data *consumers)
-{
- int i;
- int ret;
-
- for (i = 0; i < num_consumers; i++)
- consumers[i].consumer = NULL;
-
- for (i = 0; i < num_consumers; i++) {
- consumers[i].consumer = devm_regulator_get(dev,
- consumers[i].supply);
- if (IS_ERR(consumers[i].consumer)) {
- ret = PTR_ERR(consumers[i].consumer);
- dev_err(dev, "Failed to get supply '%s': %d\n",
- consumers[i].supply, ret);
- consumers[i].consumer = NULL;
- goto err;
- }
- }
-
- return 0;
-
-err:
- for (i = 0; i < num_consumers && consumers[i].consumer; i++)
- devm_regulator_put(consumers[i].consumer);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
-
static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
{
struct regulator_bulk_data *bulk = data;
@@ -3170,7 +3216,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
/* some attributes need specific methods to be displayed */
if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
(ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
- (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0)) {
+ (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
+ (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) {
status = device_create_file(dev, &dev_attr_microvolts);
if (status < 0)
return status;
@@ -3614,22 +3661,6 @@ void regulator_has_full_constraints(void)
EXPORT_SYMBOL_GPL(regulator_has_full_constraints);
/**
- * regulator_use_dummy_regulator - Provide a dummy regulator when none is found
- *
- * Calling this function will cause the regulator API to provide a
- * dummy regulator to consumers if no physical regulator is found,
- * allowing most consumers to proceed as though a regulator were
- * configured. This allows systems such as those with software
- * controllable regulators for the CPU core only to be brought up more
- * readily.
- */
-void regulator_use_dummy_regulator(void)
-{
- board_wants_dummy_regulator = true;
-}
-EXPORT_SYMBOL_GPL(regulator_use_dummy_regulator);
-
-/**
* rdev_get_drvdata - get rdev regulator driver data
* @rdev: regulator
*
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index f06854cf8cf..b431ae357fc 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -253,10 +253,8 @@ static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
}
static const struct regulator_linear_range da9034_ldo12_ranges[] = {
- { .min_uV = 1700000, .max_uV = 2050000, .min_sel = 0, .max_sel = 7,
- .uV_step = 50000 },
- { .min_uV = 2700000, .max_uV = 3050000, .min_sel = 8, .max_sel = 15,
- .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 50000),
+ REGULATOR_LINEAR_RANGE(2700000, 8, 15, 50000),
};
static struct regulator_ops da903x_regulator_ldo_ops = {
@@ -463,7 +461,7 @@ static int da903x_regulator_probe(struct platform_device *pdev)
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = ri;
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
@@ -474,21 +472,12 @@ static int da903x_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int da903x_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
- return 0;
-}
-
static struct platform_driver da903x_regulator_driver = {
.driver = {
.name = "da903x-regulator",
.owner = THIS_MODULE,
},
.probe = da903x_regulator_probe,
- .remove = da903x_regulator_remove,
};
static int __init da903x_regulator_init(void)
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 1e4d483f616..3adeaeffc48 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -70,6 +70,7 @@ struct da9052_regulator_info {
int step_uV;
int min_uV;
int max_uV;
+ unsigned char activate_bit;
};
struct da9052_regulator {
@@ -209,6 +210,36 @@ static int da9052_map_voltage(struct regulator_dev *rdev,
return sel;
}
+static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_update(regulator->da9052, rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some LDOs and DCDCs are DVC controlled which requires enabling of
+ * the activate bit to implment the changes on the output.
+ */
+ switch (id) {
+ case DA9052_ID_BUCK1:
+ case DA9052_ID_BUCK2:
+ case DA9052_ID_BUCK3:
+ case DA9052_ID_LDO2:
+ case DA9052_ID_LDO3:
+ ret = da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+ info->activate_bit, info->activate_bit);
+ break;
+ }
+
+ return ret;
+}
+
static struct regulator_ops da9052_dcdc_ops = {
.get_current_limit = da9052_dcdc_get_current_limit,
.set_current_limit = da9052_dcdc_set_current_limit,
@@ -216,7 +247,7 @@ static struct regulator_ops da9052_dcdc_ops = {
.list_voltage = da9052_list_voltage,
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_sel = da9052_regulator_set_voltage_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -226,7 +257,7 @@ static struct regulator_ops da9052_ldo_ops = {
.list_voltage = da9052_list_voltage,
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_sel = da9052_regulator_set_voltage_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -243,14 +274,13 @@ static struct regulator_ops da9052_ldo_ops = {
.owner = THIS_MODULE,\
.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.vsel_mask = (1 << (sbits)) - 1,\
- .apply_reg = DA9052_SUPPLY_REG, \
- .apply_bit = (abits), \
.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.enable_mask = 1 << (ebits),\
},\
.min_uV = (min) * 1000,\
.max_uV = (max) * 1000,\
.step_uV = (step) * 1000,\
+ .activate_bit = (abits),\
}
#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
@@ -264,14 +294,13 @@ static struct regulator_ops da9052_ldo_ops = {
.owner = THIS_MODULE,\
.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.vsel_mask = (1 << (sbits)) - 1,\
- .apply_reg = DA9052_SUPPLY_REG, \
- .apply_bit = (abits), \
.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.enable_mask = 1 << (ebits),\
},\
.min_uV = (min) * 1000,\
.max_uV = (max) * 1000,\
.step_uV = (step) * 1000,\
+ .activate_bit = (abits),\
}
static struct da9052_regulator_info da9052_regulator_info[] = {
@@ -389,8 +418,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
#endif
}
- regulator->rdev = regulator_register(&regulator->info->reg_desc,
- &config);
+ regulator->rdev = devm_regulator_register(&pdev->dev,
+ &regulator->info->reg_desc,
+ &config);
if (IS_ERR(regulator->rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
regulator->info->reg_desc.name);
@@ -402,17 +432,8 @@ static int da9052_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int da9052_regulator_remove(struct platform_device *pdev)
-{
- struct da9052_regulator *regulator = platform_get_drvdata(pdev);
-
- regulator_unregister(regulator->rdev);
- return 0;
-}
-
static struct platform_driver da9052_regulator_driver = {
.probe = da9052_regulator_probe,
- .remove = da9052_regulator_remove,
.driver = {
.name = "da9052-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 77b53e5a231..7f340206d32 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -564,13 +564,13 @@ static int da9055_regulator_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- regulator->rdev = regulator_register(&regulator->info->reg_desc,
- &config);
+ regulator->rdev = devm_regulator_register(&pdev->dev,
+ &regulator->info->reg_desc,
+ &config);
if (IS_ERR(regulator->rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
regulator->info->reg_desc.name);
- ret = PTR_ERR(regulator->rdev);
- return ret;
+ return PTR_ERR(regulator->rdev);
}
/* Only LDO 5 and 6 has got the over current interrupt */
@@ -588,7 +588,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Failed to request Regulator IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ return ret;
}
}
}
@@ -596,24 +596,10 @@ static int da9055_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, regulator);
return 0;
-
-err_regulator:
- regulator_unregister(regulator->rdev);
- return ret;
-}
-
-static int da9055_regulator_remove(struct platform_device *pdev)
-{
- struct da9055_regulator *regulator = platform_get_drvdata(pdev);
-
- regulator_unregister(regulator->rdev);
-
- return 0;
}
static struct platform_driver da9055_regulator_driver = {
.probe = da9055_regulator_probe,
- .remove = da9055_regulator_remove,
.driver = {
.name = "da9055-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index b9f2653e4ef..56727eb745d 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -717,7 +717,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
{
struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev);
- struct of_regulator_match *da9063_reg_matches;
+ struct of_regulator_match *da9063_reg_matches = NULL;
struct da9063_regulators_pdata *regl_pdata;
const struct da9063_dev_model *model;
struct da9063_regulators *regulators;
@@ -847,13 +847,13 @@ static int da9063_regulator_probe(struct platform_device *pdev)
if (da9063_reg_matches)
config.of_node = da9063_reg_matches[id].of_node;
config.regmap = da9063->regmap;
- regl->rdev = regulator_register(&regl->desc, &config);
+ regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc,
+ &config);
if (IS_ERR(regl->rdev)) {
dev_err(&pdev->dev,
"Failed to register %s regulator\n",
regl->desc.name);
- ret = PTR_ERR(regl->rdev);
- goto err;
+ return PTR_ERR(regl->rdev);
}
id++;
n++;
@@ -862,9 +862,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
/* LDOs overcurrent event support */
irq = platform_get_irq_byname(pdev, "LDO_LIM");
if (irq < 0) {
- ret = irq;
dev_err(&pdev->dev, "Failed to get IRQ.\n");
- goto err;
+ return irq;
}
regulators->irq_ldo_lim = regmap_irq_get_virq(da9063->regmap_irq, irq);
@@ -881,27 +880,15 @@ static int da9063_regulator_probe(struct platform_device *pdev)
}
return 0;
-
-err:
- /* Wind back regulators registeration */
- while (--n >= 0)
- regulator_unregister(regulators->regulator[n].rdev);
-
- return ret;
}
static int da9063_regulator_remove(struct platform_device *pdev)
{
struct da9063_regulators *regulators = platform_get_drvdata(pdev);
- struct da9063_regulator *regl;
free_irq(regulators->irq_ldo_lim, regulators);
free_irq(regulators->irq_uvov, regulators);
- for (regl = &regulators->regulator[regulators->n_regulators - 1];
- regl >= &regulators->regulator[0]; regl--)
- regulator_unregister(regl->rdev);
-
return 0;
}
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f0fe54b3897..6f5ecbe1132 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
#include "da9210-regulator.h"
@@ -126,7 +127,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct da9210 *chip;
- struct da9210_pdata *pdata = i2c->dev.platform_data;
+ struct device *dev = &i2c->dev;
+ struct da9210_pdata *pdata = dev_get_platdata(dev);
struct regulator_dev *rdev = NULL;
struct regulator_config config = { };
int error;
@@ -147,12 +149,13 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
}
config.dev = &i2c->dev;
- if (pdata)
- config.init_data = &pdata->da9210_constraints;
+ config.init_data = pdata ? &pdata->da9210_constraints :
+ of_get_regulator_init_data(dev, dev->of_node);
config.driver_data = chip;
config.regmap = chip->regmap;
+ config.of_node = dev->of_node;
- rdev = regulator_register(&da9210_reg, &config);
+ rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
return PTR_ERR(rdev);
@@ -165,13 +168,6 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int da9210_i2c_remove(struct i2c_client *i2c)
-{
- struct da9210 *chip = i2c_get_clientdata(i2c);
- regulator_unregister(chip->rdev);
- return 0;
-}
-
static const struct i2c_device_id da9210_i2c_id[] = {
{"da9210", 0},
{},
@@ -185,7 +181,6 @@ static struct i2c_driver da9210_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = da9210_i2c_probe,
- .remove = da9210_i2c_remove,
.id_table = da9210_i2c_id,
};
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
new file mode 100644
index 00000000000..f44818b838d
--- /dev/null
+++ b/drivers/regulator/devres.c
@@ -0,0 +1,415 @@
+/*
+ * devres.c -- Voltage/Current Regulator framework devres implementation.
+ *
+ * Copyright 2013 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+enum {
+ NORMAL_GET,
+ EXCLUSIVE_GET,
+ OPTIONAL_GET,
+};
+
+static void devm_regulator_release(struct device *dev, void *res)
+{
+ regulator_put(*(struct regulator **)res);
+}
+
+static struct regulator *_devm_regulator_get(struct device *dev, const char *id,
+ int get_type)
+{
+ struct regulator **ptr, *regulator;
+
+ ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ switch (get_type) {
+ case NORMAL_GET:
+ regulator = regulator_get(dev, id);
+ break;
+ case EXCLUSIVE_GET:
+ regulator = regulator_get_exclusive(dev, id);
+ break;
+ case OPTIONAL_GET:
+ regulator = regulator_get_optional(dev, id);
+ break;
+ default:
+ regulator = ERR_PTR(-EINVAL);
+ }
+
+ if (!IS_ERR(regulator)) {
+ *ptr = regulator;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return regulator;
+}
+
+/**
+ * devm_regulator_get - Resource managed regulator_get()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get(). Regulators returned from this function are
+ * automatically regulator_put() on driver detach. See regulator_get() for more
+ * information.
+ */
+struct regulator *devm_regulator_get(struct device *dev, const char *id)
+{
+ return _devm_regulator_get(dev, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get);
+
+/**
+ * devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_exclusive(). Regulators returned from this function
+ * are automatically regulator_put() on driver detach. See regulator_get() for
+ * more information.
+ */
+struct regulator *devm_regulator_get_exclusive(struct device *dev,
+ const char *id)
+{
+ return _devm_regulator_get(dev, id, EXCLUSIVE_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
+
+/**
+ * devm_regulator_get_optional - Resource managed regulator_get_optional()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_optional(). Regulators returned from this
+ * function are automatically regulator_put() on driver detach. See
+ * regulator_get_optional() for more information.
+ */
+struct regulator *devm_regulator_get_optional(struct device *dev,
+ const char *id)
+{
+ return _devm_regulator_get(dev, id, OPTIONAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
+
+static int devm_regulator_match(struct device *dev, void *res, void *data)
+{
+ struct regulator **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regulator_put - Resource managed regulator_put()
+ * @regulator: regulator to free
+ *
+ * Deallocate a regulator allocated with devm_regulator_get(). Normally
+ * this function will not need to be called and the resource management
+ * code will ensure that the resource is freed.
+ */
+void devm_regulator_put(struct regulator *regulator)
+{
+ int rc;
+
+ rc = devres_release(regulator->dev, devm_regulator_release,
+ devm_regulator_match, regulator);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_put);
+
+/**
+ * devm_regulator_bulk_get - managed get multiple regulator consumers
+ *
+ * @dev: Device to supply
+ * @num_consumers: Number of consumers to register
+ * @consumers: Configuration of consumers; clients are stored here.
+ *
+ * @return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to get several regulator
+ * consumers in one operation with management, the regulators will
+ * automatically be freed when the device is unbound. If any of the
+ * regulators cannot be acquired then any regulators that were
+ * allocated will be freed before returning to the caller.
+ */
+int devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].consumer = NULL;
+
+ for (i = 0; i < num_consumers; i++) {
+ consumers[i].consumer = devm_regulator_get(dev,
+ consumers[i].supply);
+ if (IS_ERR(consumers[i].consumer)) {
+ ret = PTR_ERR(consumers[i].consumer);
+ dev_err(dev, "Failed to get supply '%s': %d\n",
+ consumers[i].supply, ret);
+ consumers[i].consumer = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < num_consumers && consumers[i].consumer; i++)
+ devm_regulator_put(consumers[i].consumer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
+
+static void devm_rdev_release(struct device *dev, void *res)
+{
+ regulator_unregister(*(struct regulator_dev **)res);
+}
+
+/**
+ * devm_regulator_register - Resource managed regulator_register()
+ * @regulator_desc: regulator to register
+ * @config: runtime configuration for regulator
+ *
+ * Called by regulator drivers to register a regulator. Returns a
+ * valid pointer to struct regulator_dev on success or an ERR_PTR() on
+ * error. The regulator will automatically be released when the device
+ * is unbound.
+ */
+struct regulator_dev *devm_regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config)
+{
+ struct regulator_dev **ptr, *rdev;
+
+ ptr = devres_alloc(devm_rdev_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ rdev = regulator_register(regulator_desc, config);
+ if (!IS_ERR(rdev)) {
+ *ptr = rdev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return rdev;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_register);
+
+static int devm_rdev_match(struct device *dev, void *res, void *data)
+{
+ struct regulator_dev **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regulator_unregister - Resource managed regulator_unregister()
+ * @regulator: regulator to free
+ *
+ * Unregister a regulator registered with devm_regulator_register().
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_rdev_release, devm_rdev_match, rdev);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_unregister);
+
+struct regulator_supply_alias_match {
+ struct device *dev;
+ const char *id;
+};
+
+static int devm_regulator_match_supply_alias(struct device *dev, void *res,
+ void *data)
+{
+ struct regulator_supply_alias_match *match = res;
+ struct regulator_supply_alias_match *target = data;
+
+ return match->dev == target->dev && strcmp(match->id, target->id) == 0;
+}
+
+static void devm_regulator_destroy_supply_alias(struct device *dev, void *res)
+{
+ struct regulator_supply_alias_match *match = res;
+
+ regulator_unregister_supply_alias(match->dev, match->id);
+}
+
+/**
+ * devm_regulator_register_supply_alias - Resource managed
+ * regulator_register_supply_alias()
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: Supply name or regulator ID that should be used to lookup the
+ * supply
+ *
+ * The supply alias will automatically be unregistered when the source
+ * device is unbound.
+ */
+int devm_regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ struct regulator_supply_alias_match *match;
+ int ret;
+
+ match = devres_alloc(devm_regulator_destroy_supply_alias,
+ sizeof(struct regulator_supply_alias_match),
+ GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+
+ match->dev = dev;
+ match->id = id;
+
+ ret = regulator_register_supply_alias(dev, id, alias_dev, alias_id);
+ if (ret < 0) {
+ devres_free(match);
+ return ret;
+ }
+
+ devres_add(dev, match);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
+
+/**
+ * devm_regulator_unregister_supply_alias - Resource managed
+ * regulator_unregister_supply_alias()
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ *
+ * Unregister an alias registered with
+ * devm_regulator_register_supply_alias(). Normally this function
+ * will not need to be called and the resource management code
+ * will ensure that the resource is freed.
+ */
+void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
+{
+ struct regulator_supply_alias_match match;
+ int rc;
+
+ match.dev = dev;
+ match.id = id;
+
+ rc = devres_release(dev, devm_regulator_destroy_supply_alias,
+ devm_regulator_match_supply_alias, &match);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias);
+
+/**
+ * devm_regulator_bulk_register_supply_alias - Managed register
+ * multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: List of supply names or regulator IDs that should be used to
+ * lookup the supply
+ * @num_id: Number of aliases to register
+ *
+ * @return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to register several supply
+ * aliases in one operation, the aliases will be automatically
+ * unregisters when the source device is unbound. If any of the
+ * aliases cannot be registered any aliases that were registered
+ * will be removed before returning to the caller.
+ */
+int devm_regulator_bulk_register_supply_alias(struct device *dev,
+ const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_id; ++i) {
+ ret = devm_regulator_register_supply_alias(dev, id[i],
+ alias_dev,
+ alias_id[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_err(dev,
+ "Failed to create supply alias %s,%s -> %s,%s\n",
+ id[i], dev_name(dev), alias_id[i], dev_name(alias_dev));
+
+ while (--i >= 0)
+ devm_regulator_unregister_supply_alias(dev, id[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
+
+/**
+ * devm_regulator_bulk_unregister_supply_alias - Managed unregister
+ * multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @num_id: Number of aliases to unregister
+ *
+ * Unregister aliases registered with
+ * devm_regulator_bulk_register_supply_alias(). Normally this function
+ * will not need to be called and the resource management code
+ * will ensure that the resource is freed.
+ */
+void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id)
+{
+ int i;
+
+ for (i = 0; i < num_id; ++i)
+ devm_regulator_unregister_supply_alias(dev, id[i]);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_unregister_supply_alias);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 70b7220c587..7ca3d9e3b0f 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -218,9 +218,8 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->vsel_mask = VSEL_NSEL_MASK;
rdesc->owner = THIS_MODULE;
- di->rdev = regulator_register(&di->desc, config);
+ di->rdev = devm_regulator_register(di->dev, &di->desc, config);
return PTR_ERR_OR_ZERO(di->rdev);
-
}
static struct regmap_config fan53555_regmap_config = {
@@ -291,14 +290,6 @@ static int fan53555_regulator_probe(struct i2c_client *client,
}
-static int fan53555_regulator_remove(struct i2c_client *client)
-{
- struct fan53555_device_info *di = i2c_get_clientdata(client);
-
- regulator_unregister(di->rdev);
- return 0;
-}
-
static const struct i2c_device_id fan53555_id[] = {
{"fan53555", -1},
{ },
@@ -309,7 +300,6 @@ static struct i2c_driver fan53555_regulator_driver = {
.name = "fan53555-regulator",
},
.probe = fan53555_regulator_probe,
- .remove = fan53555_regulator_remove,
.id_table = fan53555_id,
};
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 7610920014d..5ea64b94341 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -34,7 +34,6 @@
struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
- int microvolts;
};
@@ -108,30 +107,7 @@ of_get_fixed_voltage_config(struct device *dev)
return config;
}
-static int fixed_voltage_get_voltage(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- if (data->microvolts)
- return data->microvolts;
- else
- return -EINVAL;
-}
-
-static int fixed_voltage_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- if (selector != 0)
- return -EINVAL;
-
- return data->microvolts;
-}
-
static struct regulator_ops fixed_voltage_ops = {
- .get_voltage = fixed_voltage_get_voltage,
- .list_voltage = fixed_voltage_list_voltage,
};
static int reg_fixed_voltage_probe(struct platform_device *pdev)
@@ -186,23 +162,21 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
if (config->microvolts)
drvdata->desc.n_voltages = 1;
- drvdata->microvolts = config->microvolts;
+ drvdata->desc.fixed_uV = config->microvolts;
if (config->gpio >= 0)
cfg.ena_gpio = config->gpio;
cfg.ena_gpio_invert = !config->enable_high;
if (config->enabled_at_boot) {
- if (config->enable_high) {
+ if (config->enable_high)
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- } else {
+ else
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- }
} else {
- if (config->enable_high) {
+ if (config->enable_high)
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- } else {
+ else
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- }
}
if (config->gpio_is_open_drain)
cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN;
@@ -222,7 +196,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
dev_dbg(&pdev->dev, "%s supplying %duV\n", drvdata->desc.name,
- drvdata->microvolts);
+ drvdata->desc.fixed_uV);
return 0;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 98a98ffa7fe..04406a918c0 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -283,7 +283,6 @@ static int gpio_regulator_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No regulator type set\n");
ret = -EINVAL;
goto err_memgpio;
- break;
}
drvdata->nr_gpios = config->nr_gpios;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 6e30df14714..e221a271ba5 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -284,9 +284,13 @@ int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
}
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ int linear_max_uV;
+
range = &rdev->desc->linear_ranges[i];
+ linear_max_uV = range->min_uV +
+ (range->max_sel - range->min_sel) * range->uV_step;
- if (!(min_uV <= range->max_uV && max_uV >= range->min_uV))
+ if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV))
continue;
if (min_uV <= range->min_uV)
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
new file mode 100644
index 00000000000..84bbda10c39
--- /dev/null
+++ b/drivers/regulator/internal.h
@@ -0,0 +1,38 @@
+/*
+ * internal.h -- Voltage/Current Regulator framework internal code
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright 2008 SlimLogic Ltd.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __REGULATOR_INTERNAL_H
+#define __REGULATOR_INTERNAL_H
+
+/*
+ * struct regulator
+ *
+ * One for each consumer device.
+ */
+struct regulator {
+ struct device *dev;
+ struct list_head list;
+ unsigned int always_on:1;
+ unsigned int bypass:1;
+ int uA_load;
+ int min_uV;
+ int max_uV;
+ char *supply_name;
+ struct device_attribute dev_attr;
+ struct regulator_dev *rdev;
+ struct dentry *debugfs;
+};
+
+#endif
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 88c1a3acf56..6e5da95fa02 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -112,7 +112,7 @@ static int isl6271a_probe(struct i2c_client *i2c,
struct regulator_config config = { };
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct isl_pmic *pmic;
- int err, i;
+ int i;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
@@ -133,32 +133,17 @@ static int isl6271a_probe(struct i2c_client *i2c,
config.init_data = NULL;
config.driver_data = pmic;
- pmic->rdev[i] = regulator_register(&isl_rd[i], &config);
+ pmic->rdev[i] = devm_regulator_register(&i2c->dev, &isl_rd[i],
+ &config);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
- err = PTR_ERR(pmic->rdev[i]);
- goto error;
+ return PTR_ERR(pmic->rdev[i]);
}
}
i2c_set_clientdata(i2c, pmic);
return 0;
-
-error:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return err;
-}
-
-static int isl6271a_remove(struct i2c_client *i2c)
-{
- struct isl_pmic *pmic = i2c_get_clientdata(i2c);
- int i;
-
- for (i = 0; i < 3; i++)
- regulator_unregister(pmic->rdev[i]);
- return 0;
}
static const struct i2c_device_id isl6271a_id[] = {
@@ -174,7 +159,6 @@ static struct i2c_driver isl6271a_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = isl6271a_probe,
- .remove = isl6271a_remove,
.id_table = isl6271a_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 5a4604ee5ea..947c05ffe0a 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -474,8 +474,8 @@ static int lp3971_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id lp3971_i2c_id[] = {
- { "lp3971", 0 },
- { }
+ { "lp3971", 0 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 2b84b727a3c..2e4734ff79f 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -785,7 +785,7 @@ static int lp872x_regulator_register(struct lp872x *lp)
struct regulator_desc *desc;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
- int i, ret;
+ int i;
for (i = 0; i < lp->num_regulators; i++) {
desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
@@ -796,34 +796,16 @@ static int lp872x_regulator_register(struct lp872x *lp)
cfg.driver_data = lp;
cfg.regmap = lp->regmap;
- rdev = regulator_register(desc, &cfg);
+ rdev = devm_regulator_register(lp->dev, desc, &cfg);
if (IS_ERR(rdev)) {
dev_err(lp->dev, "regulator register err");
- ret = PTR_ERR(rdev);
- goto err;
+ return PTR_ERR(rdev);
}
*(lp->regulators + i) = rdev;
}
return 0;
-err:
- while (--i >= 0) {
- rdev = *(lp->regulators + i);
- regulator_unregister(rdev);
- }
- return ret;
-}
-
-static void lp872x_regulator_unregister(struct lp872x *lp)
-{
- struct regulator_dev *rdev;
- int i;
-
- for (i = 0; i < lp->num_regulators; i++) {
- rdev = *(lp->regulators + i);
- regulator_unregister(rdev);
- }
}
static const struct regmap_config lp872x_regmap_config = {
@@ -979,14 +961,6 @@ err_dev:
return ret;
}
-static int lp872x_remove(struct i2c_client *cl)
-{
- struct lp872x *lp = i2c_get_clientdata(cl);
-
- lp872x_regulator_unregister(lp);
- return 0;
-}
-
static const struct of_device_id lp872x_dt_ids[] = {
{ .compatible = "ti,lp8720", },
{ .compatible = "ti,lp8725", },
@@ -1008,7 +982,6 @@ static struct i2c_driver lp872x_driver = {
.of_match_table = of_match_ptr(lp872x_dt_ids),
},
.probe = lp872x_probe,
- .remove = lp872x_remove,
.id_table = lp872x_ids,
};
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index 0b015f2a7fd..948afc249e2 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -515,7 +515,7 @@ static int lp8788_buck_probe(struct platform_device *pdev)
cfg.driver_data = buck;
cfg.regmap = lp->regmap;
- rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
+ rdev = devm_regulator_register(&pdev->dev, &lp8788_buck_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "BUCK%d regulator register err = %d\n",
@@ -529,18 +529,8 @@ static int lp8788_buck_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_buck_remove(struct platform_device *pdev)
-{
- struct lp8788_buck *buck = platform_get_drvdata(pdev);
-
- regulator_unregister(buck->regulator);
-
- return 0;
-}
-
static struct platform_driver lp8788_buck_driver = {
.probe = lp8788_buck_probe,
- .remove = lp8788_buck_remove,
.driver = {
.name = LP8788_DEV_BUCK,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 0527d87c6dd..b9a29a29933 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -543,7 +543,7 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
- rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
+ rdev = devm_regulator_register(&pdev->dev, &lp8788_dldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "DLDO%d regulator register err = %d\n",
@@ -557,18 +557,8 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_dldo_remove(struct platform_device *pdev)
-{
- struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver lp8788_dldo_driver = {
.probe = lp8788_dldo_probe,
- .remove = lp8788_dldo_remove,
.driver = {
.name = LP8788_DEV_DLDO,
.owner = THIS_MODULE,
@@ -603,7 +593,7 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
- rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
+ rdev = devm_regulator_register(&pdev->dev, &lp8788_aldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "ALDO%d regulator register err = %d\n",
@@ -617,18 +607,8 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_aldo_remove(struct platform_device *pdev)
-{
- struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver lp8788_aldo_driver = {
.probe = lp8788_aldo_probe,
- .remove = lp8788_aldo_remove,
.driver = {
.name = LP8788_DEV_ALDO,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 3a599ee0a45..e242dd316d3 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -166,7 +166,7 @@ static int max1586_pmic_probe(struct i2c_client *client,
struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max1586_data *max1586;
- int i, id, ret = -ENOMEM;
+ int i, id;
max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) +
sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
@@ -193,7 +193,7 @@ static int max1586_pmic_probe(struct i2c_client *client,
continue;
if (id < MAX1586_V3 || id > MAX1586_V6) {
dev_err(&client->dev, "invalid regulator id %d\n", id);
- goto err;
+ return -EINVAL;
}
if (id == MAX1586_V3) {
@@ -207,33 +207,18 @@ static int max1586_pmic_probe(struct i2c_client *client,
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
- rdev[i] = regulator_register(&max1586_reg[id], &config);
+ rdev[i] = devm_regulator_register(&client->dev,
+ &max1586_reg[id], &config);
if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
max1586_reg[id].name);
- goto err;
+ return PTR_ERR(rdev[i]);
}
}
i2c_set_clientdata(client, max1586);
dev_info(&client->dev, "Maxim 1586 regulator driver loaded\n");
return 0;
-
-err:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
- return ret;
-}
-
-static int max1586_pmic_remove(struct i2c_client *client)
-{
- struct max1586_data *max1586 = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i <= MAX1586_V6; i++)
- regulator_unregister(max1586->rdev[i]);
- return 0;
}
static const struct i2c_device_id max1586_id[] = {
@@ -244,7 +229,6 @@ MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
- .remove = max1586_pmic_remove,
.driver = {
.name = "max1586",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index f563057e569..ae001ccf26f 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -478,32 +478,16 @@ static int max77686_pmic_probe(struct platform_device *pdev)
config.of_node = pdata->regulators[i].of_node;
max77686->opmode[i] = regulators[i].enable_mask;
- max77686->rdev[i] = regulator_register(&regulators[i], &config);
+ max77686->rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
if (IS_ERR(max77686->rdev[i])) {
- ret = PTR_ERR(max77686->rdev[i]);
dev_err(&pdev->dev,
"regulator init failed for %d\n", i);
- max77686->rdev[i] = NULL;
- goto err;
+ return PTR_ERR(max77686->rdev[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(max77686->rdev[i]);
- return ret;
-}
-
-static int max77686_pmic_remove(struct platform_device *pdev)
-{
- struct max77686_data *max77686 = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < MAX77686_REGULATORS; i++)
- regulator_unregister(max77686->rdev[i]);
-
- return 0;
}
static const struct platform_device_id max77686_pmic_id[] = {
@@ -518,7 +502,6 @@ static struct platform_driver max77686_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max77686_pmic_probe,
- .remove = max77686_pmic_remove,
.id_table = max77686_pmic_id,
};
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index ce4b96c15eb..feb20bf4cca 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -230,7 +230,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max77693_pmic_dev *max77693_pmic;
struct max77693_regulator_data *rdata = NULL;
- int num_rdata, i, ret;
+ int num_rdata, i;
struct regulator_config config;
num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
@@ -266,36 +266,16 @@ static int max77693_pmic_probe(struct platform_device *pdev)
config.init_data = rdata[i].initdata;
config.of_node = rdata[i].of_node;
- max77693_pmic->rdev[i] = regulator_register(&regulators[id],
- &config);
+ max77693_pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[id], &config);
if (IS_ERR(max77693_pmic->rdev[i])) {
- ret = PTR_ERR(max77693_pmic->rdev[i]);
dev_err(max77693_pmic->dev,
"Failed to initialize regulator-%d\n", id);
- max77693_pmic->rdev[i] = NULL;
- goto err;
+ return PTR_ERR(max77693_pmic->rdev[i]);
}
}
return 0;
- err:
- while (--i >= 0)
- regulator_unregister(max77693_pmic->rdev[i]);
-
- return ret;
-}
-
-static int max77693_pmic_remove(struct platform_device *pdev)
-{
- struct max77693_pmic_dev *max77693_pmic = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = max77693_pmic->rdev;
- int i;
-
- for (i = 0; i < max77693_pmic->num_regulators; i++)
- if (rdev[i])
- regulator_unregister(rdev[i]);
-
- return 0;
}
static const struct platform_device_id max77693_pmic_id[] = {
@@ -311,7 +291,6 @@ static struct platform_driver max77693_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max77693_pmic_probe,
- .remove = max77693_pmic_remove,
.id_table = max77693_pmic_id,
};
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 19c6f08eafd..7f049c92ee5 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -234,7 +234,8 @@ static int max8649_regulator_probe(struct i2c_client *client,
config.driver_data = info;
config.regmap = info->regmap;
- info->regulator = regulator_register(&dcdc_desc, &config);
+ info->regulator = devm_regulator_register(&client->dev, &dcdc_desc,
+ &config);
if (IS_ERR(info->regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
@@ -244,16 +245,6 @@ static int max8649_regulator_probe(struct i2c_client *client,
return 0;
}
-static int max8649_regulator_remove(struct i2c_client *client)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(client);
-
- if (info)
- regulator_unregister(info->regulator);
-
- return 0;
-}
-
static const struct i2c_device_id max8649_id[] = {
{ "max8649", 0 },
{ }
@@ -262,7 +253,6 @@ MODULE_DEVICE_TABLE(i2c, max8649_id);
static struct i2c_driver max8649_driver = {
.probe = max8649_regulator_probe,
- .remove = max8649_regulator_remove,
.driver = {
.name = "max8649",
},
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 144bcacd734..8d94d3d7f97 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -439,7 +439,7 @@ static int max8660_probe(struct i2c_client *client,
for (i = 0; i < pdata->num_subdevs; i++) {
if (!pdata->subdevs[i].platform_data)
- goto err_out;
+ return ret;
boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
@@ -465,7 +465,7 @@ static int max8660_probe(struct i2c_client *client,
case MAX8660_V7:
if (type == MAX8661) {
dev_err(dev, "Regulator not on this chip!\n");
- goto err_out;
+ return -EINVAL;
}
if (boot_on)
@@ -475,7 +475,7 @@ static int max8660_probe(struct i2c_client *client,
default:
dev_err(dev, "invalid regulator %s\n",
pdata->subdevs[i].name);
- goto err_out;
+ return ret;
}
}
@@ -489,33 +489,18 @@ static int max8660_probe(struct i2c_client *client,
config.of_node = of_node[i];
config.driver_data = max8660;
- rdev[i] = regulator_register(&max8660_reg[id], &config);
+ rdev[i] = devm_regulator_register(&client->dev,
+ &max8660_reg[id], &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
- dev_err(dev, "failed to register %s\n",
+ dev_err(&client->dev, "failed to register %s\n",
max8660_reg[id].name);
- goto err_unregister;
+ return PTR_ERR(rdev[i]);
}
}
i2c_set_clientdata(client, max8660);
return 0;
-
-err_unregister:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
-err_out:
- return ret;
-}
-
-static int max8660_remove(struct i2c_client *client)
-{
- struct max8660 *max8660 = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < MAX8660_V_END; i++)
- regulator_unregister(max8660->rdev[i]);
- return 0;
}
static const struct i2c_device_id max8660_id[] = {
@@ -527,7 +512,6 @@ MODULE_DEVICE_TABLE(i2c, max8660_id);
static struct i2c_driver max8660_driver = {
.probe = max8660_probe,
- .remove = max8660_remove,
.driver = {
.name = "max8660",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 4568c15fa78..0c5fe6c6ac2 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -350,33 +350,17 @@ static int max8907_regulator_probe(struct platform_device *pdev)
pmic->desc[i].ops = &max8907_out5v_hwctl_ops;
}
- pmic->rdev[i] = regulator_register(&pmic->desc[i], &config);
+ pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ &pmic->desc[i], &config);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pmic->desc[i].name);
- ret = PTR_ERR(pmic->rdev[i]);
- goto err_unregister_regulator;
+ return PTR_ERR(pmic->rdev[i]);
}
}
return 0;
-
-err_unregister_regulator:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return ret;
-}
-
-static int max8907_regulator_remove(struct platform_device *pdev)
-{
- struct max8907_regulator *pmic = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < MAX8907_NUM_REGULATORS; i++)
- regulator_unregister(pmic->rdev[i]);
-
- return 0;
}
static struct platform_driver max8907_regulator_driver = {
@@ -385,7 +369,6 @@ static struct platform_driver max8907_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = max8907_regulator_probe,
- .remove = max8907_regulator_remove,
};
static int __init max8907_regulator_init(void)
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index d80b5fa758a..759510789e7 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -312,7 +312,7 @@ static int max8925_regulator_probe(struct platform_device *pdev)
if (pdata)
config.init_data = pdata;
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
@@ -323,22 +323,12 @@ static int max8925_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int max8925_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver max8925_regulator_driver = {
.driver = {
.name = "max8925-regulator",
.owner = THIS_MODULE,
},
.probe = max8925_regulator_probe,
- .remove = max8925_regulator_remove,
};
static int __init max8925_regulator_init(void)
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 5b77ab7762e..892aa1e5b96 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -467,7 +467,7 @@ static int max8973_probe(struct i2c_client *client,
config.regmap = max->regmap;
/* Register the regulators */
- rdev = regulator_register(&max->desc, &config);
+ rdev = devm_regulator_register(&client->dev, &max->desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(max->dev, "regulator register failed, err %d\n", ret);
@@ -478,14 +478,6 @@ static int max8973_probe(struct i2c_client *client,
return 0;
}
-static int max8973_remove(struct i2c_client *client)
-{
- struct max8973_chip *max = i2c_get_clientdata(client);
-
- regulator_unregister(max->rdev);
- return 0;
-}
-
static const struct i2c_device_id max8973_id[] = {
{.name = "max8973",},
{},
@@ -499,7 +491,6 @@ static struct i2c_driver max8973_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max8973_probe,
- .remove = max8973_remove,
.id_table = max8973_id,
};
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index df20069f053..2d618fc9c1a 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -690,8 +690,9 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
if (max8997->ignore_gpiodvs_side_effect == false)
return -EINVAL;
- dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
- " %d -> %d\n", max8997->buck125_gpioindex, tmp_idx);
+ dev_warn(&rdev->dev,
+ "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET: %d -> %d\n",
+ max8997->buck125_gpioindex, tmp_idx);
out:
if (new_idx < 0 || new_val < 0)
@@ -1081,7 +1082,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
pdata->buck1_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
- goto err_out;
+ return ret;
max8997->buck2_vol[i] = ret =
max8997_get_voltage_proper_val(
@@ -1090,7 +1091,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
pdata->buck2_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
- goto err_out;
+ return ret;
max8997->buck5_vol[i] = ret =
max8997_get_voltage_proper_val(
@@ -1099,7 +1100,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
pdata->buck5_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
- goto err_out;
+ return ret;
if (max_buck1 < max8997->buck1_vol[i])
max_buck1 = max8997->buck1_vol[i];
@@ -1143,24 +1144,23 @@ static int max8997_pmic_probe(struct platform_device *pdev)
!gpio_is_valid(pdata->buck125_gpios[1]) ||
!gpio_is_valid(pdata->buck125_gpios[2])) {
dev_err(&pdev->dev, "GPIO NOT VALID\n");
- ret = -EINVAL;
- goto err_out;
+ return -EINVAL;
}
ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
"MAX8997 SET1");
if (ret)
- goto err_out;
+ return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
"MAX8997 SET2");
if (ret)
- goto err_out;
+ return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
"MAX8997 SET3");
if (ret)
- goto err_out;
+ return ret;
gpio_direction_output(pdata->buck125_gpios[0],
(max8997->buck125_gpioindex >> 2)
@@ -1205,33 +1205,16 @@ static int max8997_pmic_probe(struct platform_device *pdev)
config.driver_data = max8997;
config.of_node = pdata->regulators[i].reg_node;
- rdev[i] = regulator_register(&regulators[id], &config);
+ rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
+ &config);
if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
dev_err(max8997->dev, "regulator init failed for %d\n",
id);
- rdev[i] = NULL;
- goto err;
+ return PTR_ERR(rdev[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
-err_out:
- return ret;
-}
-
-static int max8997_pmic_remove(struct platform_device *pdev)
-{
- struct max8997_data *max8997 = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = max8997->rdev;
- int i;
-
- for (i = 0; i < max8997->num_regulators; i++)
- regulator_unregister(rdev[i]);
- return 0;
}
static const struct platform_device_id max8997_pmic_id[] = {
@@ -1246,7 +1229,6 @@ static struct platform_driver max8997_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max8997_pmic_probe,
- .remove = max8997_pmic_remove,
.id_table = max8997_pmic_id,
};
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index a4c53b2d1aa..ae3f0656feb 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -790,16 +790,14 @@ static int max8998_pmic_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"MAX8998 SET1 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set1);
- ret = -EIO;
- goto err_out;
+ return -EIO;
}
/* Check if SET2 is not equal to 0 */
if (!pdata->buck1_set2) {
dev_err(&pdev->dev,
"MAX8998 SET2 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set2);
- ret = -EIO;
- goto err_out;
+ return -EIO;
}
gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -823,7 +821,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
ret = max8998_write_reg(i2c,
MAX8998_REG_BUCK1_VOLTAGE1 + v, i);
if (ret)
- goto err_out;
+ return ret;
}
}
@@ -833,8 +831,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"MAX8998 SET3 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck2_set3);
- ret = -EIO;
- goto err_out;
+ return -EIO;
}
gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
gpio_direction_output(pdata->buck2_set3,
@@ -852,7 +849,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
ret = max8998_write_reg(i2c,
MAX8998_REG_BUCK2_VOLTAGE1 + v, i);
if (ret)
- goto err_out;
+ return ret;
}
}
@@ -875,34 +872,19 @@ static int max8998_pmic_probe(struct platform_device *pdev)
config.init_data = pdata->regulators[i].initdata;
config.driver_data = max8998;
- rdev[i] = regulator_register(&regulators[index], &config);
+ rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[index], &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8998->dev, "regulator %s init failed (%d)\n",
regulators[index].name, ret);
rdev[i] = NULL;
- goto err;
+ return ret;
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
-err_out:
- return ret;
-}
-
-static int max8998_pmic_remove(struct platform_device *pdev)
-{
- struct max8998_data *max8998 = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = max8998->rdev;
- int i;
-
- for (i = 0; i < max8998->num_regulators; i++)
- regulator_unregister(rdev[i]);
- return 0;
}
static const struct platform_device_id max8998_pmic_id[] = {
@@ -918,7 +900,6 @@ static struct platform_driver max8998_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max8998_pmic_probe,
- .remove = max8998_pmic_remove,
.id_table = max8998_pmic_id,
};
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 5ff99d2703d..7f4a67edf78 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -258,34 +258,34 @@ static struct mc13xxx_regulator mc13783_regulators[] = {
MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,
mc13783_violo_val),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
mc13783_vdig_val),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0,
mc13783_vgen_val),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfdig_val),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfref_val),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfcp_val),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0,
mc13783_vsim_val),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0,
mc13783_vesim_val),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
mc13783_vcam_val),
MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,
mc13783_vvib_val),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
@@ -400,7 +400,7 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
dev_get_platdata(&pdev->dev);
struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
- int i, ret, num_regulators;
+ int i, num_regulators;
num_regulators = mc13xxx_get_num_regulators_dt(pdev);
@@ -444,32 +444,16 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
config.driver_data = priv;
config.of_node = node;
- priv->regulators[i] = regulator_register(desc, &config);
+ priv->regulators[i] = devm_regulator_register(&pdev->dev, desc,
+ &config);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
mc13783_regulators[i].desc.name);
- ret = PTR_ERR(priv->regulators[i]);
- goto err;
+ return PTR_ERR(priv->regulators[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(priv->regulators[i]);
-
- return ret;
-}
-
-static int mc13783_regulator_remove(struct platform_device *pdev)
-{
- struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < priv->num_regulators; i++)
- regulator_unregister(priv->regulators[i]);
-
- return 0;
}
static struct platform_driver mc13783_regulator_driver = {
@@ -477,7 +461,6 @@ static struct platform_driver mc13783_regulator_driver = {
.name = "mc13783-regulator",
.owner = THIS_MODULE,
},
- .remove = mc13783_regulator_remove,
.probe = mc13783_regulator_probe,
};
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1037e07937c..96c9f80d955 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -611,43 +611,27 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
config.driver_data = priv;
config.of_node = node;
- priv->regulators[i] = regulator_register(desc, &config);
+ priv->regulators[i] = devm_regulator_register(&pdev->dev, desc,
+ &config);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
mc13892_regulators[i].desc.name);
- ret = PTR_ERR(priv->regulators[i]);
- goto err;
+ return PTR_ERR(priv->regulators[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(priv->regulators[i]);
- return ret;
err_unlock:
mc13xxx_unlock(mc13892);
return ret;
}
-static int mc13892_regulator_remove(struct platform_device *pdev)
-{
- struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < priv->num_regulators; i++)
- regulator_unregister(priv->regulators[i]);
-
- return 0;
-}
-
static struct platform_driver mc13892_regulator_driver = {
.driver = {
.name = "mc13892-regulator",
.owner = THIS_MODULE,
},
- .remove = mc13892_regulator_remove,
.probe = mc13892_regulator_probe,
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 7827384680d..ea4f36f2cbe 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -23,6 +23,8 @@ static void of_get_regulation_constraints(struct device_node *np,
const __be32 *min_uA, *max_uA, *ramp_delay;
struct property *prop;
struct regulation_constraints *constraints = &(*init_data)->constraints;
+ int ret;
+ u32 pval;
constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -73,6 +75,10 @@ static void of_get_regulation_constraints(struct device_node *np,
else
constraints->ramp_disable = true;
}
+
+ ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval);
+ if (!ret)
+ constraints->enable_time = pval;
}
/**
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 7e2b165972e..9c62b1d3468 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -33,6 +33,7 @@ struct regs_info {
u8 vsel_addr;
u8 ctrl_addr;
u8 tstep_addr;
+ int sleep_id;
};
static const struct regs_info palmas_regs_info[] = {
@@ -42,6 +43,7 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS12_VOLTAGE,
.ctrl_addr = PALMAS_SMPS12_CTRL,
.tstep_addr = PALMAS_SMPS12_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
},
{
.name = "SMPS123",
@@ -49,12 +51,14 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS12_VOLTAGE,
.ctrl_addr = PALMAS_SMPS12_CTRL,
.tstep_addr = PALMAS_SMPS12_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
},
{
.name = "SMPS3",
.sname = "smps3-in",
.vsel_addr = PALMAS_SMPS3_VOLTAGE,
.ctrl_addr = PALMAS_SMPS3_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS3,
},
{
.name = "SMPS45",
@@ -62,6 +66,7 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS45_VOLTAGE,
.ctrl_addr = PALMAS_SMPS45_CTRL,
.tstep_addr = PALMAS_SMPS45_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
},
{
.name = "SMPS457",
@@ -69,6 +74,7 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS45_VOLTAGE,
.ctrl_addr = PALMAS_SMPS45_CTRL,
.tstep_addr = PALMAS_SMPS45_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
},
{
.name = "SMPS6",
@@ -76,12 +82,14 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS6_VOLTAGE,
.ctrl_addr = PALMAS_SMPS6_CTRL,
.tstep_addr = PALMAS_SMPS6_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS6,
},
{
.name = "SMPS7",
.sname = "smps7-in",
.vsel_addr = PALMAS_SMPS7_VOLTAGE,
.ctrl_addr = PALMAS_SMPS7_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS7,
},
{
.name = "SMPS8",
@@ -89,108 +97,128 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS8_VOLTAGE,
.ctrl_addr = PALMAS_SMPS8_CTRL,
.tstep_addr = PALMAS_SMPS8_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS8,
},
{
.name = "SMPS9",
.sname = "smps9-in",
.vsel_addr = PALMAS_SMPS9_VOLTAGE,
.ctrl_addr = PALMAS_SMPS9_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS9,
},
{
.name = "SMPS10_OUT2",
.sname = "smps10-in",
.ctrl_addr = PALMAS_SMPS10_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
},
{
.name = "SMPS10_OUT1",
.sname = "smps10-out2",
.ctrl_addr = PALMAS_SMPS10_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
},
{
.name = "LDO1",
.sname = "ldo1-in",
.vsel_addr = PALMAS_LDO1_VOLTAGE,
.ctrl_addr = PALMAS_LDO1_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO1,
},
{
.name = "LDO2",
.sname = "ldo2-in",
.vsel_addr = PALMAS_LDO2_VOLTAGE,
.ctrl_addr = PALMAS_LDO2_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO2,
},
{
.name = "LDO3",
.sname = "ldo3-in",
.vsel_addr = PALMAS_LDO3_VOLTAGE,
.ctrl_addr = PALMAS_LDO3_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO3,
},
{
.name = "LDO4",
.sname = "ldo4-in",
.vsel_addr = PALMAS_LDO4_VOLTAGE,
.ctrl_addr = PALMAS_LDO4_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO4,
},
{
.name = "LDO5",
.sname = "ldo5-in",
.vsel_addr = PALMAS_LDO5_VOLTAGE,
.ctrl_addr = PALMAS_LDO5_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO5,
},
{
.name = "LDO6",
.sname = "ldo6-in",
.vsel_addr = PALMAS_LDO6_VOLTAGE,
.ctrl_addr = PALMAS_LDO6_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO6,
},
{
.name = "LDO7",
.sname = "ldo7-in",
.vsel_addr = PALMAS_LDO7_VOLTAGE,
.ctrl_addr = PALMAS_LDO7_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO7,
},
{
.name = "LDO8",
.sname = "ldo8-in",
.vsel_addr = PALMAS_LDO8_VOLTAGE,
.ctrl_addr = PALMAS_LDO8_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO8,
},
{
.name = "LDO9",
.sname = "ldo9-in",
.vsel_addr = PALMAS_LDO9_VOLTAGE,
.ctrl_addr = PALMAS_LDO9_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO9,
},
{
.name = "LDOLN",
.sname = "ldoln-in",
.vsel_addr = PALMAS_LDOLN_VOLTAGE,
.ctrl_addr = PALMAS_LDOLN_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDOLN,
},
{
.name = "LDOUSB",
.sname = "ldousb-in",
.vsel_addr = PALMAS_LDOUSB_VOLTAGE,
.ctrl_addr = PALMAS_LDOUSB_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDOUSB,
},
{
.name = "REGEN1",
.ctrl_addr = PALMAS_REGEN1_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_REGEN1,
},
{
.name = "REGEN2",
.ctrl_addr = PALMAS_REGEN2_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_REGEN2,
},
{
.name = "REGEN3",
.ctrl_addr = PALMAS_REGEN3_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_REGEN3,
},
{
.name = "SYSEN1",
.ctrl_addr = PALMAS_SYSEN1_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SYSEN1,
},
{
.name = "SYSEN2",
.ctrl_addr = PALMAS_SYSEN2_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SYSEN2,
},
};
@@ -478,6 +506,17 @@ static struct regulator_ops palmas_ops_smps = {
.set_ramp_delay = palmas_smps_set_ramp_delay,
};
+static struct regulator_ops palmas_ops_ext_control_smps = {
+ .set_mode = palmas_set_mode_smps,
+ .get_mode = palmas_get_mode_smps,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = palmas_list_voltage_smps,
+ .map_voltage = palmas_map_voltage_smps,
+ .set_voltage_time_sel = palma_smps_set_voltage_smps_time_sel,
+ .set_ramp_delay = palmas_smps_set_ramp_delay,
+};
+
static struct regulator_ops palmas_ops_smps10 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -513,12 +552,37 @@ static struct regulator_ops palmas_ops_ldo = {
.map_voltage = regulator_map_voltage_linear,
};
+static struct regulator_ops palmas_ops_ext_control_ldo = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
static struct regulator_ops palmas_ops_extreg = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
+static struct regulator_ops palmas_ops_ext_control_extreg = {
+};
+
+static int palmas_regulator_config_external(struct palmas *palmas, int id,
+ struct palmas_reg_init *reg_init)
+{
+ int sleep_id = palmas_regs_info[id].sleep_id;
+ int ret;
+
+ ret = palmas_ext_control_req_config(palmas, sleep_id,
+ reg_init->roof_floor, true);
+ if (ret < 0)
+ dev_err(palmas->dev,
+ "Ext control config for regulator %d failed %d\n",
+ id, ret);
+ return ret;
+}
+
/*
* setup the hardware based sleep configuration of the SMPS/LDO regulators
* from the platform data. This is different to the software based control
@@ -577,7 +641,22 @@ static int palmas_smps_init(struct palmas *palmas, int id,
return ret;
}
+ if (reg_init->roof_floor && (id != PALMAS_REG_SMPS10_OUT1) &&
+ (id != PALMAS_REG_SMPS10_OUT2)) {
+ /* Enable externally controlled regulator */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_smps_read(palmas, addr, &reg);
+ if (ret < 0)
+ return ret;
+ if (!(reg & PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK)) {
+ reg |= SMPS_CTRL_MODE_ON;
+ ret = palmas_smps_write(palmas, addr, reg);
+ if (ret < 0)
+ return ret;
+ }
+ return palmas_regulator_config_external(palmas, id, reg_init);
+ }
return 0;
}
@@ -608,6 +687,20 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
if (ret)
return ret;
+ if (reg_init->roof_floor) {
+ /* Enable externally controlled regulator */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_update_bits(palmas, PALMAS_LDO_BASE,
+ addr, PALMAS_LDO1_CTRL_MODE_ACTIVE,
+ PALMAS_LDO1_CTRL_MODE_ACTIVE);
+ if (ret < 0) {
+ dev_err(palmas->dev,
+ "LDO Register 0x%02x update failed %d\n",
+ addr, ret);
+ return ret;
+ }
+ return palmas_regulator_config_external(palmas, id, reg_init);
+ }
return 0;
}
@@ -630,6 +723,21 @@ static int palmas_extreg_init(struct palmas *palmas, int id,
addr, ret);
return ret;
}
+
+ if (reg_init->roof_floor) {
+ /* Enable externally controlled regulator */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+ addr, PALMAS_REGEN1_CTRL_MODE_ACTIVE,
+ PALMAS_REGEN1_CTRL_MODE_ACTIVE);
+ if (ret < 0) {
+ dev_err(palmas->dev,
+ "Resource Register 0x%02x update failed %d\n",
+ addr, ret);
+ return ret;
+ }
+ return palmas_regulator_config_external(palmas, id, reg_init);
+ }
return 0;
}
@@ -712,7 +820,7 @@ static void palmas_dt_to_pdata(struct device *dev,
int idx, ret;
node = of_node_get(node);
- regulators = of_find_node_by_name(node, "regulators");
+ regulators = of_get_child_by_name(node, "regulators");
if (!regulators) {
dev_info(dev, "regulator node not found\n");
return;
@@ -740,9 +848,35 @@ static void palmas_dt_to_pdata(struct device *dev,
of_property_read_bool(palmas_matches[idx].of_node,
"ti,warm-reset");
- pdata->reg_init[idx]->roof_floor =
- of_property_read_bool(palmas_matches[idx].of_node,
- "ti,roof-floor");
+ ret = of_property_read_u32(palmas_matches[idx].of_node,
+ "ti,roof-floor", &prop);
+ /* EINVAL: Property not found */
+ if (ret != -EINVAL) {
+ int econtrol;
+
+ /* use default value, when no value is specified */
+ econtrol = PALMAS_EXT_CONTROL_NSLEEP;
+ if (!ret) {
+ switch (prop) {
+ case 1:
+ econtrol = PALMAS_EXT_CONTROL_ENABLE1;
+ break;
+ case 2:
+ econtrol = PALMAS_EXT_CONTROL_ENABLE2;
+ break;
+ case 3:
+ econtrol = PALMAS_EXT_CONTROL_NSLEEP;
+ break;
+ default:
+ WARN_ON(1);
+ dev_warn(dev,
+ "%s: Invalid roof-floor option: %u\n",
+ palmas_matches[idx].name, prop);
+ break;
+ }
+ }
+ pdata->reg_init[idx]->roof_floor = econtrol;
+ }
ret = of_property_read_u32(palmas_matches[idx].of_node,
"ti,mode-sleep", &prop);
@@ -856,7 +990,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"reading TSTEP reg failed: %d\n", ret);
- goto err_unregister_regulator;
+ return ret;
}
pmic->desc[id].ramp_delay =
palmas_smps_ramp_delay[reg & 0x3];
@@ -868,7 +1002,9 @@ static int palmas_regulators_probe(struct platform_device *pdev)
reg_init = pdata->reg_init[id];
ret = palmas_smps_init(palmas, id, reg_init);
if (ret)
- goto err_unregister_regulator;
+ return ret;
+ } else {
+ reg_init = NULL;
}
/* Register the regulators */
@@ -909,11 +1045,15 @@ static int palmas_regulators_probe(struct platform_device *pdev)
ret = palmas_smps_read(pmic->palmas, addr, &reg);
if (ret)
- goto err_unregister_regulator;
+ return ret;
if (reg & PALMAS_SMPS12_VOLTAGE_RANGE)
pmic->range[id] = 1;
- pmic->desc[id].ops = &palmas_ops_smps;
+ if (reg_init && reg_init->roof_floor)
+ pmic->desc[id].ops =
+ &palmas_ops_ext_control_smps;
+ else
+ pmic->desc[id].ops = &palmas_ops_smps;
pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
pmic->desc[id].vsel_reg =
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
@@ -925,7 +1065,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
addr = palmas_regs_info[id].ctrl_addr;
ret = palmas_smps_read(pmic->palmas, addr, &reg);
if (ret)
- goto err_unregister_regulator;
+ return ret;
pmic->current_reg_mode[id] = reg &
PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
}
@@ -941,13 +1081,13 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].supply_name = palmas_regs_info[id].sname;
config.of_node = palmas_matches[id].of_node;
- rdev = regulator_register(&pmic->desc[id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pdev->name);
- ret = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -956,6 +1096,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
/* Start this loop from the id left from previous loop */
for (; id < PALMAS_NUM_REGS; id++) {
+ if (pdata && pdata->reg_init[id])
+ reg_init = pdata->reg_init[id];
+ else
+ reg_init = NULL;
/* Miss out regulators which are not available due
* to alternate functions.
@@ -969,7 +1113,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
if (id < PALMAS_REG_REGEN1) {
pmic->desc[id].n_voltages = PALMAS_LDO_NUM_VOLTAGES;
- pmic->desc[id].ops = &palmas_ops_ldo;
+ if (reg_init && reg_init->roof_floor)
+ pmic->desc[id].ops =
+ &palmas_ops_ext_control_ldo;
+ else
+ pmic->desc[id].ops = &palmas_ops_ldo;
pmic->desc[id].min_uV = 900000;
pmic->desc[id].uV_step = 50000;
pmic->desc[id].linear_min_sel = 1;
@@ -999,7 +1147,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].enable_time = 2000;
} else {
pmic->desc[id].n_voltages = 1;
- pmic->desc[id].ops = &palmas_ops_extreg;
+ if (reg_init && reg_init->roof_floor)
+ pmic->desc[id].ops =
+ &palmas_ops_ext_control_extreg;
+ else
+ pmic->desc[id].ops = &palmas_ops_extreg;
pmic->desc[id].enable_reg =
PALMAS_BASE_TO_REG(PALMAS_RESOURCE_BASE,
palmas_regs_info[id].ctrl_addr);
@@ -1015,13 +1167,13 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].supply_name = palmas_regs_info[id].sname;
config.of_node = palmas_matches[id].of_node;
- rdev = regulator_register(&pmic->desc[id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pdev->name);
- ret = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -1037,31 +1189,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
else
ret = palmas_extreg_init(palmas,
id, reg_init);
- if (ret) {
- regulator_unregister(pmic->rdev[id]);
- goto err_unregister_regulator;
- }
+ if (ret)
+ return ret;
}
}
}
return 0;
-
-err_unregister_regulator:
- while (--id >= 0)
- regulator_unregister(pmic->rdev[id]);
- return ret;
-}
-
-static int palmas_regulators_remove(struct platform_device *pdev)
-{
- struct palmas_pmic *pmic = platform_get_drvdata(pdev);
- int id;
-
- for (id = 0; id < PALMAS_NUM_REGS; id++)
- regulator_unregister(pmic->rdev[id]);
- return 0;
}
static struct of_device_id of_palmas_match_tbl[] = {
@@ -1083,7 +1218,6 @@ static struct platform_driver palmas_driver = {
.owner = THIS_MODULE,
},
.probe = palmas_regulators_probe,
- .remove = palmas_regulators_remove,
};
static int __init palmas_init(void)
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index b49eaeedea8..3727b7d0e9a 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -246,7 +246,8 @@ static int pcap_regulator_probe(struct platform_device *pdev)
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcap;
- rdev = regulator_register(&pcap_regulators[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pcap_regulators[pdev->id],
+ &config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -255,22 +256,12 @@ static int pcap_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pcap_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver pcap_regulator_driver = {
.driver = {
.name = "pcap-regulator",
.owner = THIS_MODULE,
},
.probe = pcap_regulator_probe,
- .remove = pcap_regulator_remove,
};
static int __init pcap_regulator_init(void)
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 0f3576d48ab..d7da1c15a6d 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -90,7 +90,8 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
config.driver_data = pcf;
config.regmap = pcf->regmap;
- rdev = regulator_register(&regulators[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[pdev->id],
+ &config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -102,21 +103,11 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pcf50633_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver pcf50633_regulator_driver = {
.driver = {
.name = "pcf50633-regltr",
},
.probe = pcf50633_regulator_probe,
- .remove = pcf50633_regulator_remove,
};
static int __init pcf50633_regulator_init(void)
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 5885b450459..b58affb3314 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -173,33 +173,16 @@ skip_ext_pwr_config:
config.driver_data = reg;
config.regmap = rc5t583->regmap;
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
- ret = PTR_ERR(rdev);
- goto clean_exit;
+ return PTR_ERR(rdev);
}
reg->rdev = rdev;
}
platform_set_drvdata(pdev, regs);
return 0;
-
-clean_exit:
- while (--id >= 0)
- regulator_unregister(regs[id].rdev);
-
- return ret;
-}
-
-static int rc5t583_regulator_remove(struct platform_device *pdev)
-{
- struct rc5t583_regulator *regs = platform_get_drvdata(pdev);
- int id;
-
- for (id = 0; id < RC5T583_REGULATOR_MAX; ++id)
- regulator_unregister(regs[id].rdev);
- return 0;
}
static struct platform_driver rc5t583_regulator_driver = {
@@ -208,7 +191,6 @@ static struct platform_driver rc5t583_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = rc5t583_regulator_probe,
- .remove = rc5t583_regulator_remove,
};
static int __init rc5t583_regulator_init(void)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 5eba2ff8c0e..333677d68d0 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -448,33 +448,17 @@ common_reg:
config.of_node = rdata[i].of_node;
}
- s2mps11->rdev[i] = regulator_register(&regulators[i], &config);
+ s2mps11->rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
if (IS_ERR(s2mps11->rdev[i])) {
ret = PTR_ERR(s2mps11->rdev[i]);
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
- s2mps11->rdev[i] = NULL;
- goto err;
+ return ret;
}
}
return 0;
-err:
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
- regulator_unregister(s2mps11->rdev[i]);
-
- return ret;
-}
-
-static int s2mps11_pmic_remove(struct platform_device *pdev)
-{
- struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
- regulator_unregister(s2mps11->rdev[i]);
-
- return 0;
}
static const struct platform_device_id s2mps11_pmic_id[] = {
@@ -489,7 +473,6 @@ static struct platform_driver s2mps11_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = s2mps11_pmic_probe,
- .remove = s2mps11_pmic_remove,
.id_table = s2mps11_pmic_id,
};
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index c24448bc43c..cbf91e25cf7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -522,7 +522,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct device_node *pmic_np, *regulators_np, *reg_np;
struct sec_regulator_data *rdata;
struct sec_opmode_data *rmode;
- unsigned int i, dvs_voltage_nr = 1, ret;
+ unsigned int i, dvs_voltage_nr = 8, ret;
pmic_np = iodev->dev->of_node;
if (!pmic_np) {
@@ -586,15 +586,39 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rmode++;
}
- if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL))
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) {
pdata->buck2_gpiodvs = true;
- if (of_get_property(pmic_np, "s5m8767,pmic-buck3-uses-gpio-dvs", NULL))
+ if (of_property_read_u32_array(pmic_np,
+ "s5m8767,pmic-buck2-dvs-voltage",
+ pdata->buck2_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck2 voltages not specified\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck3-uses-gpio-dvs", NULL)) {
pdata->buck3_gpiodvs = true;
- if (of_get_property(pmic_np, "s5m8767,pmic-buck4-uses-gpio-dvs", NULL))
+ if (of_property_read_u32_array(pmic_np,
+ "s5m8767,pmic-buck3-dvs-voltage",
+ pdata->buck3_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck3 voltages not specified\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck4-uses-gpio-dvs", NULL)) {
pdata->buck4_gpiodvs = true;
+ if (of_property_read_u32_array(pmic_np,
+ "s5m8767,pmic-buck4-dvs-voltage",
+ pdata->buck4_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck4 voltages not specified\n");
+ return -EINVAL;
+ }
+ }
+
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
ret = s5m8767_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
@@ -612,32 +636,26 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
"invalid value for default dvs index, use 0\n");
}
}
- dvs_voltage_nr = 8;
}
ret = s5m8767_pmic_dt_parse_ds_gpio(iodev, pdata, pmic_np);
if (ret)
return -EINVAL;
- if (of_property_read_u32_array(pmic_np,
- "s5m8767,pmic-buck2-dvs-voltage",
- pdata->buck2_voltage, dvs_voltage_nr)) {
- dev_err(iodev->dev, "buck2 voltages not specified\n");
- return -EINVAL;
- }
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck2-ramp-enable", NULL))
+ pdata->buck2_ramp_enable = true;
- if (of_property_read_u32_array(pmic_np,
- "s5m8767,pmic-buck3-dvs-voltage",
- pdata->buck3_voltage, dvs_voltage_nr)) {
- dev_err(iodev->dev, "buck3 voltages not specified\n");
- return -EINVAL;
- }
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck3-ramp-enable", NULL))
+ pdata->buck3_ramp_enable = true;
- if (of_property_read_u32_array(pmic_np,
- "s5m8767,pmic-buck4-dvs-voltage",
- pdata->buck4_voltage, dvs_voltage_nr)) {
- dev_err(iodev->dev, "buck4 voltages not specified\n");
- return -EINVAL;
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck4-ramp-enable", NULL))
+ pdata->buck4_ramp_enable = true;
+
+ if (pdata->buck2_ramp_enable || pdata->buck3_ramp_enable
+ || pdata->buck4_ramp_enable) {
+ if (of_property_read_u32(pmic_np, "s5m8767,pmic-buck-ramp-delay",
+ &pdata->buck_ramp_delay))
+ pdata->buck_ramp_delay = 0;
}
return 0;
@@ -910,34 +928,17 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.regmap = iodev->regmap;
config.of_node = pdata->regulators[i].reg_node;
- rdev[i] = regulator_register(&regulators[id], &config);
+ rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
+ &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(s5m8767->dev, "regulator init failed for %d\n",
id);
- rdev[i] = NULL;
- goto err;
+ return ret;
}
}
return 0;
-err:
- for (i = 0; i < s5m8767->num_regulators; i++)
- regulator_unregister(rdev[i]);
-
- return ret;
-}
-
-static int s5m8767_pmic_remove(struct platform_device *pdev)
-{
- struct s5m8767_info *s5m8767 = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = s5m8767->rdev;
- int i;
-
- for (i = 0; i < s5m8767->num_regulators; i++)
- regulator_unregister(rdev[i]);
-
- return 0;
}
static const struct platform_device_id s5m8767_pmic_id[] = {
@@ -952,7 +953,6 @@ static struct platform_driver s5m8767_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = s5m8767_pmic_probe,
- .remove = s5m8767_pmic_remove,
.id_table = s5m8767_pmic_id,
};
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
new file mode 100644
index 00000000000..f78857bd6a1
--- /dev/null
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -0,0 +1,111 @@
+/*
+ * Regulator driver for STw4810/STw4811 VMMC regulator.
+ *
+ * Copyright (C) 2013 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/stw481x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int stw481x_vmmc_voltages[] = {
+ 1800000,
+ 1800000,
+ 2850000,
+ 3000000,
+ 1850000,
+ 2600000,
+ 2700000,
+ 3300000,
+};
+
+static struct regulator_ops stw481x_vmmc_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static struct regulator_desc vmmc_regulator = {
+ .name = "VMMC",
+ .id = 0,
+ .ops = &stw481x_vmmc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(stw481x_vmmc_voltages),
+ .volt_table = stw481x_vmmc_voltages,
+ .enable_time = 200, /* FIXME: look this up */
+ .enable_reg = STW_CONF1,
+ .enable_mask = STW_CONF1_PDN_VMMC,
+ .vsel_reg = STW_CONF1,
+ .vsel_mask = STW_CONF1_VMMC_MASK,
+};
+
+static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
+{
+ struct stw481x *stw481x = dev_get_platdata(&pdev->dev);
+ struct regulator_config config = { };
+ int ret;
+
+ /* First disable the external VMMC if it's active */
+ ret = regmap_update_bits(stw481x->map, STW_CONF2,
+ STW_CONF2_VMMC_EXT, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "could not disable external VMMC\n");
+ return ret;
+ }
+
+ /* Register VMMC regulator */
+ config.dev = &pdev->dev;
+ config.driver_data = stw481x;
+ config.regmap = stw481x->map;
+ config.of_node = pdev->dev.of_node;
+ config.init_data = of_get_regulator_init_data(&pdev->dev,
+ pdev->dev.of_node);
+
+ stw481x->vmmc_regulator = regulator_register(&vmmc_regulator, &config);
+ if (IS_ERR(stw481x->vmmc_regulator)) {
+ dev_err(&pdev->dev,
+ "error initializing STw481x VMMC regulator\n");
+ return PTR_ERR(stw481x->vmmc_regulator);
+ }
+
+ dev_info(&pdev->dev, "initialized STw481x VMMC regulator\n");
+ return 0;
+}
+
+static int stw481x_vmmc_regulator_remove(struct platform_device *pdev)
+{
+ struct stw481x *stw481x = dev_get_platdata(&pdev->dev);
+
+ regulator_unregister(stw481x->vmmc_regulator);
+ return 0;
+}
+
+static const struct of_device_id stw481x_vmmc_match[] = {
+ { .compatible = "st,stw481x-vmmc", },
+ {},
+};
+
+static struct platform_driver stw481x_vmmc_regulator_driver = {
+ .driver = {
+ .name = "stw481x-vmmc-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = stw481x_vmmc_match,
+ },
+ .probe = stw481x_vmmc_regulator_probe,
+ .remove = stw481x_vmmc_regulator_remove,
+};
+
+module_platform_driver(stw481x_vmmc_regulator_driver);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 20c271d49dc..b187b6bba7a 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -615,7 +615,7 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
pname, *volt_table, vset_mask);
continue;
}
- info->vset = efuse_val & vset_mask >> __ffs(vset_mask);
+ info->vset = (efuse_val & vset_mask) >> __ffs(vset_mask);
dev_dbg(dev, "[%d]v=%d vset=%x\n", i, *volt_table, info->vset);
check_abb:
switch (info->opp_sel) {
@@ -708,39 +708,31 @@ static int ti_abb_probe(struct platform_device *pdev)
match = of_match_device(ti_abb_of_match, dev);
if (!match) {
/* We do not expect this to happen */
- ret = -ENODEV;
dev_err(dev, "%s: Unable to match device\n", __func__);
- goto err;
+ return -ENODEV;
}
if (!match->data) {
- ret = -EINVAL;
dev_err(dev, "%s: Bad data in match\n", __func__);
- goto err;
+ return -EINVAL;
}
abb = devm_kzalloc(dev, sizeof(struct ti_abb), GFP_KERNEL);
- if (!abb) {
- dev_err(dev, "%s: Unable to allocate ABB struct\n", __func__);
- ret = -ENOMEM;
- goto err;
- }
+ if (!abb)
+ return -ENOMEM;
abb->regs = match->data;
/* Map ABB resources */
pname = "base-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
abb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(abb->base)) {
- ret = PTR_ERR(abb->base);
- goto err;
- }
+ if (IS_ERR(abb->base))
+ return PTR_ERR(abb->base);
pname = "int-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
if (!res) {
dev_err(dev, "Missing '%s' IO resource\n", pname);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/*
* We may have shared interrupt register offsets which are
@@ -750,8 +742,7 @@ static int ti_abb_probe(struct platform_device *pdev)
resource_size(res));
if (!abb->int_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
/* Map Optional resources */
@@ -771,17 +762,19 @@ static int ti_abb_probe(struct platform_device *pdev)
resource_size(res));
if (!abb->efuse_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
pname = "ldo-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->ldo_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(abb->ldo_base)) {
- ret = PTR_ERR(abb->ldo_base);
- goto err;
+ if (!res) {
+ dev_dbg(dev, "Missing '%s' IO resource\n", pname);
+ ret = -ENODEV;
+ goto skip_opt;
}
+ abb->ldo_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->ldo_base))
+ return PTR_ERR(abb->ldo_base);
/* IF ldo_base is set, the following are mandatory */
pname = "ti,ldovbb-override-mask";
@@ -790,12 +783,11 @@ static int ti_abb_probe(struct platform_device *pdev)
&abb->ldovbb_override_mask);
if (ret) {
dev_err(dev, "Missing '%s' (%d)\n", pname, ret);
- goto err;
+ return ret;
}
if (!abb->ldovbb_override_mask) {
dev_err(dev, "Invalid property:'%s' set as 0!\n", pname);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
pname = "ti,ldovbb-vset-mask";
@@ -804,12 +796,11 @@ static int ti_abb_probe(struct platform_device *pdev)
&abb->ldovbb_vset_mask);
if (ret) {
dev_err(dev, "Missing '%s' (%d)\n", pname, ret);
- goto err;
+ return ret;
}
if (!abb->ldovbb_vset_mask) {
dev_err(dev, "Invalid property:'%s' set as 0!\n", pname);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
skip_opt:
@@ -819,31 +810,29 @@ skip_opt:
&abb->txdone_mask);
if (ret) {
dev_err(dev, "Missing '%s' (%d)\n", pname, ret);
- goto err;
+ return ret;
}
if (!abb->txdone_mask) {
dev_err(dev, "Invalid property:'%s' set as 0!\n", pname);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
initdata = of_get_regulator_init_data(dev, pdev->dev.of_node);
if (!initdata) {
- ret = -ENOMEM;
dev_err(dev, "%s: Unable to alloc regulator init data\n",
__func__);
- goto err;
+ return -ENOMEM;
}
/* init ABB opp_sel table */
ret = ti_abb_init_table(dev, abb, initdata);
if (ret)
- goto err;
+ return ret;
/* init ABB timing */
ret = ti_abb_init_timings(dev, abb);
if (ret)
- goto err;
+ return ret;
desc = &abb->rdesc;
desc->name = dev_name(dev);
@@ -861,12 +850,12 @@ skip_opt:
config.driver_data = abb;
config.of_node = pdev->dev.of_node;
- rdev = regulator_register(desc, &config);
+ rdev = devm_regulator_register(dev, desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(dev, "%s: failed to register regulator(%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, rdev);
@@ -874,31 +863,12 @@ skip_opt:
ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->regs->setup_reg, abb->base);
return 0;
-
-err:
- dev_err(dev, "%s: Failed to initialize(%d)\n", __func__, ret);
- return ret;
-}
-
-/**
- * ti_abb_remove() - cleanups
- * @pdev: ABB platform device
- *
- * Return: 0
- */
-static int ti_abb_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
- return 0;
}
MODULE_ALIAS("platform:ti_abb");
static struct platform_driver ti_abb_driver = {
.probe = ti_abb_probe,
- .remove = ti_abb_remove,
.driver = {
.name = "ti_abb",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 9392a7ca3d2..b0a3f0917a2 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -343,7 +343,7 @@ static int tps51632_probe(struct i2c_client *client,
config.regmap = tps->regmap;
config.of_node = client->dev.of_node;
- rdev = regulator_register(&tps->desc, &config);
+ rdev = devm_regulator_register(&client->dev, &tps->desc, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "regulator register failed\n");
return PTR_ERR(rdev);
@@ -353,14 +353,6 @@ static int tps51632_probe(struct i2c_client *client,
return 0;
}
-static int tps51632_remove(struct i2c_client *client)
-{
- struct tps51632_chip *tps = i2c_get_clientdata(client);
-
- regulator_unregister(tps->rdev);
- return 0;
-}
-
static const struct i2c_device_id tps51632_id[] = {
{.name = "tps51632",},
{},
@@ -375,7 +367,6 @@ static struct i2c_driver tps51632_i2c_driver = {
.of_match_table = of_match_ptr(tps51632_of_match),
},
.probe = tps51632_probe,
- .remove = tps51632_remove,
.id_table = tps51632_id,
};
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index ec9453ffb77..c1e33a3d397 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -137,7 +137,7 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
/* This instance is not set for regulator mode so bail out */
if (pdata->mode != TPS6105X_MODE_VOLTAGE) {
dev_info(&pdev->dev,
- "chip not in voltage mode mode, exit probe \n");
+ "chip not in voltage mode mode, exit probe\n");
return 0;
}
@@ -146,8 +146,9 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
config.driver_data = tps6105x;
/* Register regulator with framework */
- tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
- &config);
+ tps6105x->regulator = devm_regulator_register(&pdev->dev,
+ &tps6105x_regulator_desc,
+ &config);
if (IS_ERR(tps6105x->regulator)) {
ret = PTR_ERR(tps6105x->regulator);
dev_err(&tps6105x->client->dev,
@@ -159,20 +160,12 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int tps6105x_regulator_remove(struct platform_device *pdev)
-{
- struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
- regulator_unregister(tps6105x->regulator);
- return 0;
-}
-
static struct platform_driver tps6105x_regulator_driver = {
.driver = {
.name = "tps6105x-regulator",
.owner = THIS_MODULE,
},
.probe = tps6105x_regulator_probe,
- .remove = tps6105x_regulator_remove,
};
static __init int tps6105x_regulator_init(void)
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index 0b7ebb1ebf8..c2c0185a2dc 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -476,7 +476,7 @@ static int tps62360_probe(struct i2c_client *client,
config.of_node = client->dev.of_node;
/* Register the regulators */
- rdev = regulator_register(&tps->desc, &config);
+ rdev = devm_regulator_register(&client->dev, &tps->desc, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev,
"%s(): regulator register failed with err %s\n",
@@ -488,20 +488,6 @@ static int tps62360_probe(struct i2c_client *client,
return 0;
}
-/**
- * tps62360_remove - tps62360 driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
-static int tps62360_remove(struct i2c_client *client)
-{
- struct tps62360_chip *tps = i2c_get_clientdata(client);
-
- regulator_unregister(tps->rdev);
- return 0;
-}
-
static void tps62360_shutdown(struct i2c_client *client)
{
struct tps62360_chip *tps = i2c_get_clientdata(client);
@@ -535,7 +521,6 @@ static struct i2c_driver tps62360_i2c_driver = {
.of_match_table = of_match_ptr(tps62360_of_match),
},
.probe = tps62360_probe,
- .remove = tps62360_remove,
.shutdown = tps62360_shutdown,
.id_table = tps62360_id,
};
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index a15263d4bdf..3ef67a86115 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -277,12 +277,12 @@ static int tps_65023_probe(struct i2c_client *client,
config.regmap = tps->regmap;
/* Register the regulators */
- rdev = regulator_register(&tps->desc[i], &config);
+ rdev = devm_regulator_register(&client->dev, &tps->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
- error = PTR_ERR(rdev);
- goto fail;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -293,24 +293,10 @@ static int tps_65023_probe(struct i2c_client *client,
/* Enable setting output voltage by I2C */
regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
+ TPS65023_REG_CTRL2_CORE_ADJ,
+ TPS65023_REG_CTRL2_CORE_ADJ);
return 0;
-
- fail:
- while (--i >= 0)
- regulator_unregister(tps->rdev[i]);
- return error;
-}
-
-static int tps_65023_remove(struct i2c_client *client)
-{
- struct tps_pmic *tps = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
- regulator_unregister(tps->rdev[i]);
- return 0;
}
static const struct tps_info tps65020_regs[] = {
@@ -430,7 +416,6 @@ static struct i2c_driver tps_65023_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = tps_65023_probe,
- .remove = tps_65023_remove,
.id_table = tps_65023_id,
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 4117ff52dba..162a0fae20b 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -508,13 +508,13 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
config.of_node = tps6507x_reg_matches[i].of_node;
}
- rdev = regulator_register(&tps->desc[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &tps->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
pdev->name);
- error = PTR_ERR(rdev);
- goto fail;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -525,22 +525,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tps6507x_dev);
return 0;
-
-fail:
- while (--i >= 0)
- regulator_unregister(tps->rdev[i]);
- return error;
-}
-
-static int tps6507x_pmic_remove(struct platform_device *pdev)
-{
- struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
- struct tps6507x_pmic *tps = tps6507x_dev->pmic;
- int i;
-
- for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
- regulator_unregister(tps->rdev[i]);
- return 0;
}
static struct platform_driver tps6507x_pmic_driver = {
@@ -549,7 +533,6 @@ static struct platform_driver tps6507x_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = tps6507x_pmic_probe,
- .remove = tps6507x_pmic_remove,
};
static int __init tps6507x_pmic_init(void)
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index c8e70451df3..676f75548f0 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -180,7 +180,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
return ERR_PTR(-ENOMEM);
}
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return ERR_PTR(-ENODEV);
@@ -279,7 +279,7 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"failed disable ext control\n");
- goto scrub;
+ return ret;
}
}
}
@@ -296,12 +296,11 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
else
config.of_node = NULL;
- rdev = regulator_register(ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc->name);
- ret = PTR_ERR(rdev);
- goto scrub;
+ return PTR_ERR(rdev);
}
ri->rdev = rdev;
@@ -309,36 +308,13 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
if (tps_pdata && is_dcdc(num) && tps_pdata->reg_init_data &&
tps_pdata->enable_ext_control) {
ret = tps65090_config_ext_control(ri, true);
- if (ret < 0) {
- /* Increment num to get unregister rdev */
- num++;
- goto scrub;
- }
+ if (ret < 0)
+ return ret;
}
}
platform_set_drvdata(pdev, pmic);
return 0;
-
-scrub:
- while (--num >= 0) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return ret;
-}
-
-static int tps65090_regulator_remove(struct platform_device *pdev)
-{
- struct tps65090_regulator *pmic = platform_get_drvdata(pdev);
- struct tps65090_regulator *ri;
- int num;
-
- for (num = 0; num < TPS65090_REGULATOR_MAX; ++num) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return 0;
}
static struct platform_driver tps65090_regulator_driver = {
@@ -347,7 +323,6 @@ static struct platform_driver tps65090_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps65090_regulator_probe,
- .remove = tps65090_regulator_remove,
};
static int __init tps65090_regulator_init(void)
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 90861d68a0b..9ea1bf26bd1 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -52,25 +52,17 @@ static const unsigned int LDO1_VSEL_table[] = {
};
static const struct regulator_linear_range tps65217_uv1_ranges[] = {
- { .min_uV = 900000, .max_uV = 1500000, .min_sel = 0, .max_sel = 24,
- .uV_step = 25000 },
- { .min_uV = 1550000, .max_uV = 1800000, .min_sel = 25, .max_sel = 30,
- .uV_step = 50000 },
- { .min_uV = 1850000, .max_uV = 2900000, .min_sel = 31, .max_sel = 52,
- .uV_step = 50000 },
- { .min_uV = 3000000, .max_uV = 3200000, .min_sel = 53, .max_sel = 55,
- .uV_step = 100000 },
- { .min_uV = 3300000, .max_uV = 3300000, .min_sel = 56, .max_sel = 62,
- .uV_step = 0 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 24, 25000),
+ REGULATOR_LINEAR_RANGE(1550000, 25, 30, 50000),
+ REGULATOR_LINEAR_RANGE(1850000, 31, 52, 50000),
+ REGULATOR_LINEAR_RANGE(3000000, 53, 55, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 56, 62, 0),
};
static const struct regulator_linear_range tps65217_uv2_ranges[] = {
- { .min_uV = 1500000, .max_uV = 1900000, .min_sel = 0, .max_sel = 8,
- .uV_step = 50000 },
- { .min_uV = 2000000, .max_uV = 2400000, .min_sel = 9, .max_sel = 13,
- .uV_step = 100000 },
- { .min_uV = 2450000, .max_uV = 3300000, .min_sel = 14, .max_sel = 31,
- .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(1500000, 0, 8, 50000),
+ REGULATOR_LINEAR_RANGE(2000000, 9, 13, 100000),
+ REGULATOR_LINEAR_RANGE(2450000, 14, 31, 50000),
};
static int tps65217_pmic_enable(struct regulator_dev *dev)
@@ -233,7 +225,7 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
struct regulator_config config = { };
- int i, ret;
+ int i;
if (tps->dev->of_node)
pdata = tps65217_parse_dt(pdev);
@@ -269,35 +261,18 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
if (tps->dev->of_node)
config.of_node = pdata->of_node[i];
- rdev = regulator_register(&regulators[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "failed to register %s regulator\n",
pdev->name);
- ret = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
tps->rdev[i] = rdev;
}
return 0;
-
-err_unregister_regulator:
- while (--i >= 0)
- regulator_unregister(tps->rdev[i]);
-
- return ret;
-}
-
-static int tps65217_regulator_remove(struct platform_device *pdev)
-{
- struct tps65217 *tps = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
- regulator_unregister(tps->rdev[i]);
-
- return 0;
}
static struct platform_driver tps65217_regulator_driver = {
@@ -305,7 +280,6 @@ static struct platform_driver tps65217_regulator_driver = {
.name = "tps65217-pmic",
},
.probe = tps65217_regulator_probe,
- .remove = tps65217_regulator_remove,
};
static int __init tps65217_regulator_init(void)
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 62e8d28beab..9f6bfda711b 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -577,21 +577,6 @@ static struct regulator_ops regulator_ops = {
.get_current_limit = get_current_limit,
};
-static int pmic_remove(struct spi_device *spi)
-{
- struct tps6524x *hw = spi_get_drvdata(spi);
- int i;
-
- if (!hw)
- return 0;
- for (i = 0; i < N_REGULATORS; i++) {
- regulator_unregister(hw->rdev[i]);
- hw->rdev[i] = NULL;
- }
- spi_set_drvdata(spi, NULL);
- return 0;
-}
-
static int pmic_probe(struct spi_device *spi)
{
struct tps6524x *hw;
@@ -599,7 +584,7 @@ static int pmic_probe(struct spi_device *spi)
const struct supply_info *info = supply_info;
struct regulator_init_data *init_data;
struct regulator_config config = { };
- int ret = 0, i;
+ int i;
init_data = dev_get_platdata(dev);
if (!init_data) {
@@ -632,24 +617,17 @@ static int pmic_probe(struct spi_device *spi)
config.init_data = init_data;
config.driver_data = hw;
- hw->rdev[i] = regulator_register(&hw->desc[i], &config);
- if (IS_ERR(hw->rdev[i])) {
- ret = PTR_ERR(hw->rdev[i]);
- hw->rdev[i] = NULL;
- goto fail;
- }
+ hw->rdev[i] = devm_regulator_register(dev, &hw->desc[i],
+ &config);
+ if (IS_ERR(hw->rdev[i]))
+ return PTR_ERR(hw->rdev[i]);
}
return 0;
-
-fail:
- pmic_remove(spi);
- return ret;
}
static struct spi_driver pmic_driver = {
.probe = pmic_probe,
- .remove = pmic_remove,
.driver = {
.name = "tps6524x",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 2c9155b66f0..e8e3a8afd3e 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -298,7 +298,7 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
struct tps6586x_platform_data *pdata;
int err;
- regs = of_find_node_by_name(np, "regulators");
+ regs = of_get_child_by_name(np, "regulators");
if (!regs) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
@@ -379,15 +379,14 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
ri = find_regulator_info(id);
if (!ri) {
dev_err(&pdev->dev, "invalid regulator ID specified\n");
- err = -EINVAL;
- goto fail;
+ return -EINVAL;
}
err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
if (err) {
dev_err(&pdev->dev,
"regulator %d preinit failed, e %d\n", id, err);
- goto fail;
+ return err;
}
config.dev = pdev->dev.parent;
@@ -397,12 +396,12 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
if (tps6586x_reg_matches)
config.of_node = tps6586x_reg_matches[id].of_node;
- rdev[id] = regulator_register(&ri->desc, &config);
+ rdev[id] = devm_regulator_register(&pdev->dev, &ri->desc,
+ &config);
if (IS_ERR(rdev[id])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
- err = PTR_ERR(rdev[id]);
- goto fail;
+ return PTR_ERR(rdev[id]);
}
if (reg_data) {
@@ -411,30 +410,13 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev,
"Slew rate config failed, e %d\n", err);
- regulator_unregister(rdev[id]);
- goto fail;
+ return err;
}
}
}
platform_set_drvdata(pdev, rdev);
return 0;
-
-fail:
- while (--id >= 0)
- regulator_unregister(rdev[id]);
- return err;
-}
-
-static int tps6586x_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev **rdev = platform_get_drvdata(pdev);
- int id = TPS6586X_ID_MAX_REGULATOR;
-
- while (--id >= 0)
- regulator_unregister(rdev[id]);
-
- return 0;
}
static struct platform_driver tps6586x_regulator_driver = {
@@ -443,7 +425,6 @@ static struct platform_driver tps6586x_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps6586x_regulator_probe,
- .remove = tps6586x_regulator_remove,
};
static int __init tps6586x_regulator_init(void)
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 45c16447744..a00132e31ec 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -481,7 +481,7 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
/* multiplier 0 == 1 but 2,3 normal */
if (!mult)
- mult=1;
+ mult = 1;
if (sr) {
/* normalise to valid range */
@@ -685,7 +685,7 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
case TPS65910_REG_VDD2:
mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
volt = VDD1_2_MIN_VOLT +
- (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
+ (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -703,7 +703,7 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
int step_mv = 0, id = rdev_get_id(dev);
- switch(id) {
+ switch (id) {
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
@@ -906,7 +906,7 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
}
ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0);
if (ret < 0) {
- dev_err(mfd->dev, "Error in settting sr register\n");
+ dev_err(mfd->dev, "Error in setting sr register\n");
return ret;
}
}
@@ -982,7 +982,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
}
np = of_node_get(pdev->dev.parent->of_node);
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
@@ -1074,7 +1074,7 @@ static int tps65910_probe(struct platform_device *pdev)
tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
- switch(tps65910_chip_id(tps65910)) {
+ switch (tps65910_chip_id(tps65910)) {
case TPS65910:
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
@@ -1177,35 +1177,19 @@ static int tps65910_probe(struct platform_device *pdev)
if (tps65910_reg_matches)
config.of_node = tps65910_reg_matches[i].of_node;
- rdev = regulator_register(&pmic->desc[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps65910->dev,
"failed to register %s regulator\n",
pdev->name);
- err = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
pmic->rdev[i] = rdev;
}
return 0;
-
-err_unregister_regulator:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return err;
-}
-
-static int tps65910_remove(struct platform_device *pdev)
-{
- struct tps65910_reg *pmic = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < pmic->num_regulators; i++)
- regulator_unregister(pmic->rdev[i]);
-
- return 0;
}
static void tps65910_shutdown(struct platform_device *pdev)
@@ -1244,7 +1228,6 @@ static struct platform_driver tps65910_driver = {
.owner = THIS_MODULE,
},
.probe = tps65910_probe,
- .remove = tps65910_remove,
.shutdown = tps65910_shutdown,
};
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 281e52ac64b..9cafaa0f945 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -119,12 +119,9 @@ struct tps65912_reg {
};
static const struct regulator_linear_range tps65912_ldo_ranges[] = {
- { .min_uV = 800000, .max_uV = 1600000, .min_sel = 0, .max_sel = 32,
- .uV_step = 25000 },
- { .min_uV = 1650000, .max_uV = 3000000, .min_sel = 33, .max_sel = 60,
- .uV_step = 50000 },
- { .min_uV = 3100000, .max_uV = 3300000, .min_sel = 61, .max_sel = 63,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(800000, 0, 32, 25000),
+ REGULATOR_LINEAR_RANGE(1650000, 33, 60, 50000),
+ REGULATOR_LINEAR_RANGE(3100000, 61, 63, 100000),
};
static int tps65912_get_range(struct tps65912_reg *pmic, int id)
@@ -461,7 +458,7 @@ static int tps65912_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
struct tps65912_reg *pmic;
struct tps65912_board *pmic_plat_data;
- int i, err;
+ int i;
pmic_plat_data = dev_get_platdata(tps65912->dev);
if (!pmic_plat_data)
@@ -504,34 +501,19 @@ static int tps65912_probe(struct platform_device *pdev)
config.init_data = reg_data;
config.driver_data = pmic;
- rdev = regulator_register(&pmic->desc[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps65912->dev,
"failed to register %s regulator\n",
pdev->name);
- err = PTR_ERR(rdev);
- goto err;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
pmic->rdev[i] = rdev;
}
return 0;
-
-err:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return err;
-}
-
-static int tps65912_remove(struct platform_device *pdev)
-{
- struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
- regulator_unregister(tps65912_reg->rdev[i]);
- return 0;
}
static struct platform_driver tps65912_driver = {
@@ -540,7 +522,6 @@ static struct platform_driver tps65912_driver = {
.owner = THIS_MODULE,
},
.probe = tps65912_probe,
- .remove = tps65912_remove,
};
static int __init tps65912_init(void)
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 6511d0bfd89..71f457a4262 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -719,7 +719,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"regulator config failed, e %d\n", ret);
- goto fail;
+ return ret;
}
ret = tps80031_power_req_config(pdev->dev.parent,
@@ -727,41 +727,22 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"pwr_req config failed, err %d\n", ret);
- goto fail;
+ return ret;
}
}
- rdev = regulator_register(&ri->rinfo->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->rinfo->desc,
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"register regulator failed %s\n",
ri->rinfo->desc.name);
- ret = PTR_ERR(rdev);
- goto fail;
+ return PTR_ERR(rdev);
}
ri->rdev = rdev;
}
platform_set_drvdata(pdev, pmic);
return 0;
-fail:
- while (--num >= 0) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return ret;
-}
-
-static int tps80031_regulator_remove(struct platform_device *pdev)
-{
- struct tps80031_regulator *pmic = platform_get_drvdata(pdev);
- struct tps80031_regulator *ri = NULL;
- int num;
-
- for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return 0;
}
static struct platform_driver tps80031_regulator_driver = {
@@ -770,7 +751,6 @@ static struct platform_driver tps80031_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps80031_regulator_probe,
- .remove = tps80031_regulator_remove,
};
static int __init tps80031_regulator_init(void)
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 78aae4cbb00..8ebd785485c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1188,7 +1188,7 @@ static int twlreg_probe(struct platform_device *pdev)
config.driver_data = info;
config.of_node = pdev->dev.of_node;
- rdev = regulator_register(&info->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
info->desc.name, PTR_ERR(rdev));
@@ -1217,7 +1217,6 @@ static int twlreg_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct twlreg_info *info = rdev->reg_data;
- regulator_unregister(rdev);
kfree(info);
return 0;
}
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index 4668c7f8133..f3ae28a7e66 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -96,7 +96,7 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
config.driver_data = reg;
config.of_node = pdev->dev.of_node;
- reg->regdev = regulator_register(&reg->desc, &config);
+ reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config);
if (IS_ERR(reg->regdev)) {
err = PTR_ERR(reg->regdev);
goto error_regulator_register;
@@ -119,7 +119,6 @@ static int vexpress_regulator_remove(struct platform_device *pdev)
struct vexpress_regulator *reg = platform_get_drvdata(pdev);
vexpress_config_func_put(reg->func);
- regulator_unregister(reg->regdev);
return 0;
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 11861cb861d..6823e6f2b88 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -387,8 +387,9 @@ static struct regulator_ops wm831x_buckv_ops = {
* Set up DVS control. We just log errors since we can still run
* (with reduced performance) if we fail.
*/
-static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
- struct wm831x_buckv_pdata *pdata)
+static void wm831x_buckv_dvs_init(struct platform_device *pdev,
+ struct wm831x_dcdc *dcdc,
+ struct wm831x_buckv_pdata *pdata)
{
struct wm831x *wm831x = dcdc->wm831x;
int ret;
@@ -402,9 +403,9 @@ static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
*/
dcdc->dvs_gpio_state = pdata->dvs_init_state;
- ret = gpio_request_one(pdata->dvs_gpio,
- dcdc->dvs_gpio_state ? GPIOF_INIT_HIGH : 0,
- "DCDC DVS");
+ ret = devm_gpio_request_one(&pdev->dev, pdata->dvs_gpio,
+ dcdc->dvs_gpio_state ? GPIOF_INIT_HIGH : 0,
+ "DCDC DVS");
if (ret < 0) {
dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
dcdc->name, ret);
@@ -513,7 +514,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
if (pdata && pdata->dcdc[id])
- wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
+ wm831x_buckv_dvs_init(pdev, dcdc,
+ pdata->dcdc[id]->driver_data);
config.dev = pdev->dev.parent;
if (pdata)
@@ -521,7 +523,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -530,57 +533,35 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name, dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
- IRQF_TRIGGER_RISING, dcdc->name, dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_oc_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n",
irq, ret);
- goto err_uv;
+ goto err;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-err_uv:
- free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
-err_regulator:
- regulator_unregister(dcdc->regulator);
err:
- if (dcdc->dvs_gpio)
- gpio_free(dcdc->dvs_gpio);
return ret;
}
-static int wm831x_buckv_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
-
- free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
- dcdc);
- free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
- regulator_unregister(dcdc->regulator);
- if (dcdc->dvs_gpio)
- gpio_free(dcdc->dvs_gpio);
-
- return 0;
-}
-
static struct platform_driver wm831x_buckv_driver = {
.probe = wm831x_buckv_probe,
- .remove = wm831x_buckv_remove,
.driver = {
.name = "wm831x-buckv",
.owner = THIS_MODULE,
@@ -681,7 +662,8 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -690,38 +672,25 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name, dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-err_regulator:
- regulator_unregister(dcdc->regulator);
err:
return ret;
}
-static int wm831x_buckp_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
- regulator_unregister(dcdc->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_buckp_driver = {
.probe = wm831x_buckp_probe,
- .remove = wm831x_buckp_remove,
.driver = {
.name = "wm831x-buckp",
.owner = THIS_MODULE,
@@ -813,7 +782,8 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -822,39 +792,26 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name,
+ dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-err_regulator:
- regulator_unregister(dcdc->regulator);
err:
return ret;
}
-static int wm831x_boostp_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
- regulator_unregister(dcdc->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_boostp_driver = {
.probe = wm831x_boostp_probe,
- .remove = wm831x_boostp_remove,
.driver = {
.name = "wm831x-boostp",
.owner = THIS_MODULE,
@@ -914,7 +871,8 @@ static int wm831x_epe_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
@@ -930,18 +888,8 @@ err:
return ret;
}
-static int wm831x_epe_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-
- regulator_unregister(dcdc->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_epe_driver = {
.probe = wm831x_epe_probe,
- .remove = wm831x_epe_remove,
.driver = {
.name = "wm831x-epe",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 4eb373de1fa..0339b886df5 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -194,7 +194,8 @@ static int wm831x_isink_probe(struct platform_device *pdev)
config.init_data = pdata->isink[id];
config.driver_data = isink;
- isink->regulator = regulator_register(&isink->desc, &config);
+ isink->regulator = devm_regulator_register(&pdev->dev, &isink->desc,
+ &config);
if (IS_ERR(isink->regulator)) {
ret = PTR_ERR(isink->regulator);
dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
@@ -203,38 +204,26 @@ static int wm831x_isink_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
- ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
- IRQF_TRIGGER_RISING, isink->name, isink);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_isink_irq,
+ IRQF_TRIGGER_RISING, isink->name,
+ isink);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, isink);
return 0;
-err_regulator:
- regulator_unregister(isink->regulator);
err:
return ret;
}
-static int wm831x_isink_remove(struct platform_device *pdev)
-{
- struct wm831x_isink *isink = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
-
- regulator_unregister(isink->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_isink_driver = {
.probe = wm831x_isink_probe,
- .remove = wm831x_isink_remove,
.driver = {
.name = "wm831x-isink",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 2205fbc2c37..46d6700467b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,10 +63,8 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
*/
static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
- .uV_step = 50000 },
- { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -279,7 +277,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm831x->regmap;
- ldo->regulator = regulator_register(&ldo->desc, &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -288,39 +287,26 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name,
- ldo);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name,
+ ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
-err_regulator:
- regulator_unregister(ldo->regulator);
err:
return ret;
}
-static int wm831x_gp_ldo_remove(struct platform_device *pdev)
-{
- struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(ldo->wm831x,
- platform_get_irq_byname(pdev, "UV")), ldo);
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_gp_ldo_driver = {
.probe = wm831x_gp_ldo_probe,
- .remove = wm831x_gp_ldo_remove,
.driver = {
.name = "wm831x-ldo",
.owner = THIS_MODULE,
@@ -332,10 +318,8 @@ static struct platform_driver wm831x_gp_ldo_driver = {
*/
static const struct regulator_linear_range wm831x_aldo_ranges[] = {
- { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
- .uV_step = 50000 },
- { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(1000000, 0, 12, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 13, 31, 100000),
};
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -505,7 +489,8 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm831x->regmap;
- ldo->regulator = regulator_register(&ldo->desc, &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -514,38 +499,25 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name, ldo);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
-err_regulator:
- regulator_unregister(ldo->regulator);
err:
return ret;
}
-static int wm831x_aldo_remove(struct platform_device *pdev)
-{
- struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
- ldo);
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_aldo_driver = {
.probe = wm831x_aldo_probe,
- .remove = wm831x_aldo_remove,
.driver = {
.name = "wm831x-aldo",
.owner = THIS_MODULE,
@@ -663,7 +635,8 @@ static int wm831x_alive_ldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm831x->regmap;
- ldo->regulator = regulator_register(&ldo->desc, &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -679,18 +652,8 @@ err:
return ret;
}
-static int wm831x_alive_ldo_remove(struct platform_device *pdev)
-{
- struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_alive_ldo_driver = {
.probe = wm831x_alive_ldo_probe,
- .remove = wm831x_alive_ldo_remove,
.driver = {
.name = "wm831x-alive-ldo",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 61ca9292a42..de7b9c73e3f 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,10 +543,8 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
}
static const struct regulator_linear_range wm8350_ldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
- .uV_step = 50000 },
- { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 15, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 16, 31, 100000),
};
static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
@@ -1206,7 +1204,8 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
config.regmap = wm8350->regmap;
/* register regulator */
- rdev = regulator_register(&wm8350_reg[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &wm8350_reg[pdev->id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
wm8350_reg[pdev->id].name);
@@ -1217,7 +1216,6 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
ret = wm8350_register_irq(wm8350, wm8350_reg[pdev->id].irq,
pmic_uv_handler, 0, "UV", rdev);
if (ret < 0) {
- regulator_unregister(rdev);
dev_err(&pdev->dev, "failed to register regulator %s IRQ\n",
wm8350_reg[pdev->id].name);
return ret;
@@ -1233,8 +1231,6 @@ static int wm8350_regulator_remove(struct platform_device *pdev)
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev);
- regulator_unregister(rdev);
-
return 0;
}
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 58f51bec13f..82d82900085 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -20,10 +20,8 @@
#include <linux/mfd/wm8400-private.h>
static const struct regulator_linear_range wm8400_ldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
- .uV_step = 50000 },
- { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
static struct regulator_ops wm8400_ldo_ops = {
@@ -219,7 +217,8 @@ static int wm8400_regulator_probe(struct platform_device *pdev)
config.driver_data = wm8400;
config.regmap = wm8400->regmap;
- rdev = regulator_register(&regulators[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[pdev->id],
+ &config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -228,21 +227,11 @@ static int wm8400_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int wm8400_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver wm8400_regulator_driver = {
.driver = {
.name = "wm8400-regulator",
},
.probe = wm8400_regulator_probe,
- .remove = wm8400_regulator_remove,
};
/**
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 5ee2a208457..71c5911f2e7 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -165,7 +165,9 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
ldo->init_data = *pdata->ldo[id].init_data;
}
- ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8994_ldo_desc[id],
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
@@ -181,18 +183,8 @@ err:
return ret;
}
-static int wm8994_ldo_remove(struct platform_device *pdev)
-{
- struct wm8994_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm8994_ldo_driver = {
.probe = wm8994_ldo_probe,
- .remove = wm8994_ldo_remove,
.driver = {
.name = "wm8994-ldo",
.owner = THIS_MODULE,
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index b09c75c21b6..a34b50690b4 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -30,7 +30,7 @@
#include "remoteproc_internal.h"
/* kick the remote processor, and let it know which virtqueue to poke at */
-static void rproc_virtio_notify(struct virtqueue *vq)
+static bool rproc_virtio_notify(struct virtqueue *vq)
{
struct rproc_vring *rvring = vq->priv;
struct rproc *rproc = rvring->rvdev->rproc;
@@ -39,6 +39,7 @@ static void rproc_virtio_notify(struct virtqueue *vq)
dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
rproc->ops->kick(rproc, notifyid);
+ return true;
}
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 9654aa3c05c..00773022211 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -153,6 +153,16 @@ config RTC_DRV_88PM80X
This driver can also be built as a module. If so, the module
will be called rtc-88pm80x.
+config RTC_DRV_AS3722
+ tristate "ams AS3722 RTC driver"
+ depends on MFD_AS3722
+ help
+ If you say yes here you get support for the RTC of ams AS3722 PMIC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-as3722.
+
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
help
@@ -497,6 +507,16 @@ config RTC_DRV_RV3029C2
This driver can also be built as a module. If so, the module
will be called rtc-rv3029c2.
+config RTC_DRV_S5M
+ tristate "Samsung S5M series"
+ depends on MFD_SEC_CORE
+ help
+ If you say yes here you will get support for the
+ RTC of Samsung S5M PMIC series.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-s5m.
+
endif # I2C
comment "SPI RTC drivers"
@@ -606,7 +626,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
- depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64
+ depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64
default y if X86
help
Say "yes" here to get direct support for the real time clock
@@ -623,6 +643,14 @@ config RTC_DRV_CMOS
This driver can also be built as a module. If so, the module
will be called rtc-cmos.
+config RTC_DRV_ALPHA
+ bool "Alpha PC-style CMOS"
+ depends on ALPHA
+ default y
+ help
+ Direct support for the real-time clock found on every Alpha
+ system, specifically MC146818 compatibles. If in doubt, say Y.
+
config RTC_DRV_VRTC
tristate "Virtual RTC for Intel MID platforms"
depends on X86_INTEL_MID
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2dff3d2009b..27b4bd88406 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
+obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
@@ -107,6 +108,7 @@ obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
+obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 72c5cdbe079..544be722937 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -72,6 +72,7 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
} else
err = -EINVAL;
+ pm_stay_awake(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
@@ -113,6 +114,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
err = -EINVAL;
}
+ pm_stay_awake(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
@@ -771,9 +773,10 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
alarm.time = rtc_ktime_to_tm(timer->node.expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
- if (err == -ETIME)
+ if (err == -ETIME) {
+ pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
- else if (err) {
+ } else if (err) {
timerqueue_del(&rtc->timerqueue, &timer->node);
timer->enabled = 0;
return err;
@@ -818,8 +821,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
- if (err == -ETIME)
+ if (err == -ETIME) {
+ pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
+ }
}
}
@@ -845,7 +850,6 @@ void rtc_timer_do_work(struct work_struct *work)
mutex_lock(&rtc->ops_lock);
again:
- pm_relax(rtc->dev.parent);
__rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
while ((next = timerqueue_getnext(&rtc->timerqueue))) {
@@ -880,6 +884,7 @@ again:
} else
rtc_alarm_disable(rtc);
+ pm_relax(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 354c937a586..0916089c7c3 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -251,14 +251,15 @@ static SIMPLE_DEV_PM_OPS(pm80x_rtc_pm_ops, pm80x_rtc_suspend, pm80x_rtc_resume);
static int pm80x_rtc_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm80x_platform_data *pm80x_pdata;
+ struct pm80x_platform_data *pm80x_pdata =
+ dev_get_platdata(pdev->dev.parent);
struct pm80x_rtc_pdata *pdata = NULL;
struct pm80x_rtc_info *info;
struct rtc_time tm;
unsigned long ticks = 0;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata == NULL)
dev_warn(&pdev->dev, "No platform data!\n");
@@ -326,8 +327,7 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
PM800_RTC1_USE_XO);
- if (pdev->dev.parent->platform_data) {
- pm80x_pdata = pdev->dev.parent->platform_data;
+ if (pm80x_pdata) {
pdata = pm80x_pdata->rtc;
if (pdata)
info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 4e30c85728e..816504846cd 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -316,7 +316,7 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
unsigned long ticks = 0;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info),
GFP_KERNEL);
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
new file mode 100644
index 00000000000..9cfa8170a2d
--- /dev/null
+++ b/drivers/rtc/rtc-as3722.c
@@ -0,0 +1,275 @@
+/*
+ * rtc-as3722.c - Real Time Clock driver for ams AS3722 PMICs
+ *
+ * Copyright (C) 2013 ams AG
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bcd.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/as3722.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+
+#define AS3722_RTC_START_YEAR 2000
+struct as3722_rtc {
+ struct rtc_device *rtc;
+ struct device *dev;
+ struct as3722 *as3722;
+ int alarm_irq;
+ bool irq_enable;
+};
+
+static void as3722_time_to_reg(u8 *rbuff, struct rtc_time *tm)
+{
+ rbuff[0] = bin2bcd(tm->tm_sec);
+ rbuff[1] = bin2bcd(tm->tm_min);
+ rbuff[2] = bin2bcd(tm->tm_hour);
+ rbuff[3] = bin2bcd(tm->tm_mday);
+ rbuff[4] = bin2bcd(tm->tm_mon);
+ rbuff[5] = bin2bcd(tm->tm_year - (AS3722_RTC_START_YEAR - 1900));
+}
+
+static void as3722_reg_to_time(u8 *rbuff, struct rtc_time *tm)
+{
+ tm->tm_sec = bcd2bin(rbuff[0] & 0x7F);
+ tm->tm_min = bcd2bin(rbuff[1] & 0x7F);
+ tm->tm_hour = bcd2bin(rbuff[2] & 0x3F);
+ tm->tm_mday = bcd2bin(rbuff[3] & 0x3F);
+ tm->tm_mon = bcd2bin(rbuff[4] & 0x1F);
+ tm->tm_year = (AS3722_RTC_START_YEAR - 1900) + bcd2bin(rbuff[5] & 0x7F);
+ return;
+}
+
+static int as3722_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+ struct as3722 *as3722 = as3722_rtc->as3722;
+ u8 as_time_array[6];
+ int ret;
+
+ ret = as3722_block_read(as3722, AS3722_RTC_SECOND_REG,
+ 6, as_time_array);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECOND reg block read failed %d\n", ret);
+ return ret;
+ }
+ as3722_reg_to_time(as_time_array, tm);
+ return 0;
+}
+
+static int as3722_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+ struct as3722 *as3722 = as3722_rtc->as3722;
+ u8 as_time_array[6];
+ int ret;
+
+ if (tm->tm_year < (AS3722_RTC_START_YEAR - 1900))
+ return -EINVAL;
+
+ as3722_time_to_reg(as_time_array, tm);
+ ret = as3722_block_write(as3722, AS3722_RTC_SECOND_REG, 6,
+ as_time_array);
+ if (ret < 0)
+ dev_err(dev, "RTC_SECOND reg block write failed %d\n", ret);
+ return ret;
+}
+
+static int as3722_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+
+ if (enabled && !as3722_rtc->irq_enable) {
+ enable_irq(as3722_rtc->alarm_irq);
+ as3722_rtc->irq_enable = true;
+ } else if (!enabled && as3722_rtc->irq_enable) {
+ disable_irq(as3722_rtc->alarm_irq);
+ as3722_rtc->irq_enable = false;
+ }
+ return 0;
+}
+
+static int as3722_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+ struct as3722 *as3722 = as3722_rtc->as3722;
+ u8 as_time_array[6];
+ int ret;
+
+ ret = as3722_block_read(as3722, AS3722_RTC_ALARM_SECOND_REG, 6,
+ as_time_array);
+ if (ret < 0) {
+ dev_err(dev, "RTC_ALARM_SECOND block read failed %d\n", ret);
+ return ret;
+ }
+
+ as3722_reg_to_time(as_time_array, &alrm->time);
+ return 0;
+}
+
+static int as3722_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+ struct as3722 *as3722 = as3722_rtc->as3722;
+ u8 as_time_array[6];
+ int ret;
+
+ if (alrm->time.tm_year < (AS3722_RTC_START_YEAR - 1900))
+ return -EINVAL;
+
+ ret = as3722_rtc_alarm_irq_enable(dev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Disable RTC alarm failed\n");
+ return ret;
+ }
+
+ as3722_time_to_reg(as_time_array, &alrm->time);
+ ret = as3722_block_write(as3722, AS3722_RTC_ALARM_SECOND_REG, 6,
+ as_time_array);
+ if (ret < 0) {
+ dev_err(dev, "RTC_ALARM_SECOND block write failed %d\n", ret);
+ return ret;
+ }
+
+ if (alrm->enabled)
+ ret = as3722_rtc_alarm_irq_enable(dev, alrm->enabled);
+ return ret;
+}
+
+static irqreturn_t as3722_alarm_irq(int irq, void *data)
+{
+ struct as3722_rtc *as3722_rtc = data;
+
+ rtc_update_irq(as3722_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops as3722_rtc_ops = {
+ .read_time = as3722_rtc_read_time,
+ .set_time = as3722_rtc_set_time,
+ .read_alarm = as3722_rtc_read_alarm,
+ .set_alarm = as3722_rtc_set_alarm,
+ .alarm_irq_enable = as3722_rtc_alarm_irq_enable,
+};
+
+static int as3722_rtc_probe(struct platform_device *pdev)
+{
+ struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent);
+ struct as3722_rtc *as3722_rtc;
+ int ret;
+
+ as3722_rtc = devm_kzalloc(&pdev->dev, sizeof(*as3722_rtc), GFP_KERNEL);
+ if (!as3722_rtc)
+ return -ENOMEM;
+
+ as3722_rtc->as3722 = as3722;
+ as3722_rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, as3722_rtc);
+
+ /* Enable the RTC to make sure it is running. */
+ ret = as3722_update_bits(as3722, AS3722_RTC_CONTROL_REG,
+ AS3722_RTC_ON | AS3722_RTC_ALARM_WAKEUP_EN,
+ AS3722_RTC_ON | AS3722_RTC_ALARM_WAKEUP_EN);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RTC_CONTROL reg write failed: %d\n", ret);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ as3722_rtc->rtc = rtc_device_register("as3722", &pdev->dev,
+ &as3722_rtc_ops, THIS_MODULE);
+ if (IS_ERR(as3722_rtc->rtc)) {
+ ret = PTR_ERR(as3722_rtc->rtc);
+ dev_err(&pdev->dev, "RTC register failed: %d\n", ret);
+ return ret;
+ }
+
+ as3722_rtc->alarm_irq = platform_get_irq(pdev, 0);
+ dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
+
+ ret = request_threaded_irq(as3722_rtc->alarm_irq, NULL,
+ as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ "rtc-alarm", as3722_rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
+ as3722_rtc->alarm_irq, ret);
+ goto scrub;
+ }
+ disable_irq(as3722_rtc->alarm_irq);
+ return 0;
+scrub:
+ rtc_device_unregister(as3722_rtc->rtc);
+ return ret;
+}
+
+static int as3722_rtc_remove(struct platform_device *pdev)
+{
+ struct as3722_rtc *as3722_rtc = platform_get_drvdata(pdev);
+
+ free_irq(as3722_rtc->alarm_irq, as3722_rtc);
+ rtc_device_unregister(as3722_rtc->rtc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int as3722_rtc_suspend(struct device *dev)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(as3722_rtc->alarm_irq);
+
+ return 0;
+}
+
+static int as3722_rtc_resume(struct device *dev)
+{
+ struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(as3722_rtc->alarm_irq);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops as3722_rtc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(as3722_rtc_suspend, as3722_rtc_resume)
+};
+
+static struct platform_driver as3722_rtc_driver = {
+ .probe = as3722_rtc_probe,
+ .remove = as3722_rtc_remove,
+ .driver = {
+ .name = "as3722-rtc",
+ .pm = &as3722_rtc_pm_ops,
+ },
+};
+module_platform_driver(as3722_rtc_driver);
+
+MODULE_DESCRIPTION("RTC driver for AS3722 PMICs");
+MODULE_ALIAS("platform:as3722-rtc");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 741892632ae..c0da95e9570 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -376,7 +376,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
return -ENXIO;
}
- at91_rtc_regs = ioremap(regs->start, resource_size(regs));
+ at91_rtc_regs = devm_ioremap(&pdev->dev, regs->start,
+ resource_size(regs));
if (!at91_rtc_regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
return -ENOMEM;
@@ -390,12 +391,12 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- ret = request_irq(irq, at91_rtc_interrupt,
+ ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
- goto err_unmap;
+ return ret;
}
/* cpu init code should really have flagged this device as
@@ -404,23 +405,14 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
- rtc = rtc_device_register(pdev->name, &pdev->dev,
+ rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&at91_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto err_free_irq;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
platform_set_drvdata(pdev, rtc);
dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
return 0;
-
-err_free_irq:
- free_irq(irq, pdev);
-err_unmap:
- iounmap(at91_rtc_regs);
-
- return ret;
}
/*
@@ -428,20 +420,22 @@ err_unmap:
*/
static int __exit at91_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
/* Disable all interrupts */
at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- free_irq(irq, pdev);
-
- rtc_device_unregister(rtc);
- iounmap(at91_rtc_regs);
return 0;
}
+static void at91_rtc_shutdown(struct platform_device *pdev)
+{
+ /* Disable all interrupts */
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ AT91_RTC_SECEV | AT91_RTC_TIMEV |
+ AT91_RTC_CALEV);
+}
+
#ifdef CONFIG_PM_SLEEP
/* AT91RM9200 RTC Power management control */
@@ -480,6 +474,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
static struct platform_driver at91_rtc_driver = {
.remove = __exit_p(at91_rtc_remove),
+ .shutdown = at91_rtc_shutdown,
.driver = {
.name = "at91_rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 24e733c98f8..f14876256a4 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -595,7 +595,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
static int INITSECTION
cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
{
- struct cmos_rtc_board_info *info = dev->platform_data;
+ struct cmos_rtc_board_info *info = dev_get_platdata(dev);
int retval = 0;
unsigned char rtc_control;
unsigned address_space;
@@ -789,7 +789,6 @@ static void __exit cmos_do_remove(struct device *dev)
cmos->iomem = NULL;
cmos->dev = NULL;
- dev_set_drvdata(dev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index e00642b6107..48cb2ac3bd3 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -278,7 +278,7 @@ static int da9055_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
rtc->da9055 = dev_get_drvdata(pdev->dev.parent);
- pdata = rtc->da9055->dev->platform_data;
+ pdata = dev_get_platdata(rtc->da9055->dev);
platform_set_drvdata(pdev, rtc);
ret = da9055_rtc_device_init(rtc->da9055, pdata);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index dd6170acde9..80f323731ee 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -606,7 +606,7 @@ static int ds1305_probe(struct spi_device *spi)
struct ds1305 *ds1305;
int status;
u8 addr, value;
- struct ds1305_platform_data *pdata = spi->dev.platform_data;
+ struct ds1305_platform_data *pdata = dev_get_platdata(&spi->dev);
bool write_ctrl = false;
/* Sanity check board setup data. This may be hooked up
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index ca18fd1433b..4e75345a559 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -670,9 +670,9 @@ static int ds1307_probe(struct i2c_client *client,
int tmp;
const struct chip_desc *chip = &chips[id->driver_data];
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- int want_irq = false;
+ bool want_irq = false;
unsigned char *buf;
- struct ds1307_platform_data *pdata = client->dev.platform_data;
+ struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
@@ -956,7 +956,7 @@ read_rtc:
GFP_KERNEL);
if (!ds1307->nvram) {
err = -ENOMEM;
- goto exit;
+ goto err_irq;
}
ds1307->nvram->attr.name = "nvram";
ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
@@ -967,13 +967,15 @@ read_rtc:
ds1307->nvram_offset = chip->nvram_offset;
err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram);
if (err)
- goto exit;
+ goto err_irq;
set_bit(HAS_NVRAM, &ds1307->flags);
dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size);
}
return 0;
+err_irq:
+ free_irq(client->irq, client);
exit:
return err;
}
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 2ca5a23aba8..fc209dc4e24 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -224,7 +224,7 @@ static const struct rtc_class_ops ds2404_rtc_ops = {
static int rtc_probe(struct platform_device *pdev)
{
- struct ds2404_platform_data *pdata = pdev->dev.platform_data;
+ struct ds2404_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ds2404 *chip;
int retval = -EBUSY;
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 580e7b56bde..5e4f5dc40ba 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -42,7 +42,7 @@ struct ep93xx_rtc {
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
unsigned short *delete)
{
- struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
unsigned long comp;
comp = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
@@ -60,7 +60,7 @@ static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
unsigned long time;
time = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
@@ -71,7 +71,7 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs)
{
- struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
__raw_writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
return 0;
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 4e2a81854f5..965a9da7086 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -209,7 +209,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
platform_get_drvdata(to_platform_device(dev));
int ret;
- INIT_COMPLETION(time_state->comp_last_time);
+ reinit_completion(&time_state->comp_last_time);
/* get a report with all values through requesting one value */
sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
@@ -236,7 +236,7 @@ static const struct rtc_class_ops hid_time_rtc_ops = {
static int hid_time_probe(struct platform_device *pdev)
{
int ret = 0;
- struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
struct hid_time_state *time_state = devm_kzalloc(&pdev->dev,
sizeof(struct hid_time_state), GFP_KERNEL);
@@ -275,24 +275,44 @@ static int hid_time_probe(struct platform_device *pdev)
return ret;
}
+ ret = sensor_hub_device_open(hsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to open sensor hub device!\n");
+ goto err_open;
+ }
+
+ /*
+ * Enable HID input processing early in order to be able to read the
+ * clock already in devm_rtc_device_register().
+ */
+ hid_device_io_start(hsdev->hdev);
+
time_state->rtc = devm_rtc_device_register(&pdev->dev,
"hid-sensor-time", &hid_time_rtc_ops,
THIS_MODULE);
if (IS_ERR_OR_NULL(time_state->rtc)) {
+ hid_device_io_stop(hsdev->hdev);
ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
time_state->rtc = NULL;
- sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
dev_err(&pdev->dev, "rtc device register failed!\n");
+ goto err_rtc;
}
return ret;
+
+err_rtc:
+ sensor_hub_device_close(hsdev);
+err_open:
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
+ return ret;
}
static int hid_time_remove(struct platform_device *pdev)
{
- struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
+ sensor_hub_device_close(hsdev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
return 0;
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index c016ad81767..c3c549d511b 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -144,11 +144,7 @@ isl1208_i2c_validate_client(struct i2c_client *client)
static int
isl1208_i2c_get_sr(struct i2c_client *client)
{
- int sr = i2c_smbus_read_byte_data(client, ISL1208_REG_SR);
- if (sr < 0)
- return -EIO;
-
- return sr;
+ return i2c_smbus_read_byte_data(client, ISL1208_REG_SR);
}
static int
@@ -647,10 +643,11 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
"chip found, driver version " DRV_VERSION "\n");
if (client->irq > 0) {
- rc = request_threaded_irq(client->irq, NULL,
- isl1208_rtc_interrupt,
- IRQF_SHARED,
- isl1208_driver.driver.name, client);
+ rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ isl1208_rtc_interrupt,
+ IRQF_SHARED,
+ isl1208_driver.driver.name,
+ client);
if (!rc) {
device_init_wakeup(&client->dev, 1);
enable_irq_wake(client->irq);
@@ -662,20 +659,18 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- rtc = rtc_device_register(isl1208_driver.driver.name,
- &client->dev, &isl1208_rtc_ops,
+ rtc = devm_rtc_device_register(&client->dev, isl1208_driver.driver.name,
+ &isl1208_rtc_ops,
THIS_MODULE);
- if (IS_ERR(rtc)) {
- rc = PTR_ERR(rtc);
- goto exit_free_irq;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
rc = isl1208_i2c_get_sr(client);
if (rc < 0) {
dev_err(&client->dev, "reading status failed\n");
- goto exit_unregister;
+ return rc;
}
if (rc & ISL1208_REG_SR_RTCF)
@@ -684,28 +679,15 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
if (rc)
- goto exit_unregister;
+ return rc;
return 0;
-
-exit_unregister:
- rtc_device_unregister(rtc);
-exit_free_irq:
- if (client->irq)
- free_irq(client->irq, client);
-
- return rc;
}
static int
isl1208_remove(struct i2c_client *client)
{
- struct rtc_device *rtc = i2c_get_clientdata(client);
-
sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
- rtc_device_unregister(rtc);
- if (client->irq)
- free_irq(client->irq, client);
return 0;
}
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index fcb03291f14..11880c1e9da 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -68,7 +68,7 @@ m48t59_mem_readb(struct device *dev, u32 ofs)
static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
unsigned long flags;
u8 val;
@@ -111,7 +111,7 @@ static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
unsigned long flags;
u8 val = 0;
@@ -158,7 +158,7 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
struct rtc_time *tm = &alrm->time;
unsigned long flags;
@@ -205,7 +205,7 @@ static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
struct rtc_time *tm = &alrm->time;
u8 mday, hour, min, sec;
@@ -266,7 +266,7 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
unsigned long flags;
@@ -283,7 +283,7 @@ static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
unsigned long flags;
u8 val;
@@ -304,7 +304,7 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
{
struct device *dev = (struct device *)dev_id;
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
u8 event;
@@ -340,7 +340,7 @@ static ssize_t m48t59_nvram_read(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
ssize_t cnt = 0;
unsigned long flags;
@@ -360,7 +360,7 @@ static ssize_t m48t59_nvram_write(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
ssize_t cnt = 0;
unsigned long flags;
@@ -385,7 +385,7 @@ static struct bin_attribute m48t59_nvram_attr = {
static int m48t59_rtc_probe(struct platform_device *pdev)
{
- struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct m48t59_private *m48t59 = NULL;
struct resource *res;
int ret = -ENOMEM;
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 2d30314fa07..32f64c94262 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -46,7 +46,7 @@ static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char reg;
struct platform_device *pdev = to_platform_device(dev);
- struct m48t86_ops *ops = pdev->dev.platform_data;
+ struct m48t86_ops *ops = dev_get_platdata(&pdev->dev);
reg = ops->readbyte(M48T86_REG_B);
@@ -84,7 +84,7 @@ static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char reg;
struct platform_device *pdev = to_platform_device(dev);
- struct m48t86_ops *ops = pdev->dev.platform_data;
+ struct m48t86_ops *ops = dev_get_platdata(&pdev->dev);
reg = ops->readbyte(M48T86_REG_B);
@@ -123,7 +123,7 @@ static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq)
{
unsigned char reg;
struct platform_device *pdev = to_platform_device(dev);
- struct m48t86_ops *ops = pdev->dev.platform_data;
+ struct m48t86_ops *ops = dev_get_platdata(&pdev->dev);
reg = ops->readbyte(M48T86_REG_B);
@@ -147,7 +147,7 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
static int m48t86_rtc_probe(struct platform_device *dev)
{
unsigned char reg;
- struct m48t86_ops *ops = dev->dev.platform_data;
+ struct m48t86_ops *ops = dev_get_platdata(&dev->dev);
struct rtc_device *rtc;
rtc = devm_rtc_device_register(&dev->dev, "m48t86",
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 55969b1b771..4804985b876 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -164,14 +164,7 @@ static int max6900_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
static int max6900_i2c_clear_write_protect(struct i2c_client *client)
{
- int rc;
- rc = i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0);
- if (rc < 0) {
- dev_err(&client->dev, "%s: control register write failed\n",
- __func__);
- return -EIO;
- }
- return 0;
+ return i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0);
}
static int
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 9c8f6090379..dc4f14255cc 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -14,7 +14,9 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 578baf9d972..e2436d14017 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -38,8 +38,8 @@
#include <asm-generic/rtc.h>
#include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
struct mrst_rtc {
struct rtc_device *rtc;
@@ -380,7 +380,6 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
cleanup1:
rtc_device_unregister(mrst_rtc.rtc);
cleanup0:
- dev_set_drvdata(dev, NULL);
mrst_rtc.dev = NULL;
release_mem_region(iomem->start, resource_size(iomem));
dev_err(dev, "rtc-mrst: unable to initialise\n");
@@ -412,7 +411,6 @@ static void rtc_mrst_do_remove(struct device *dev)
mrst->iomem = NULL;
mrst->dev = NULL;
- dev_set_drvdata(dev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c7d97ee5932..26de5f8c2ae 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -553,7 +553,7 @@ static struct platform_driver omap_rtc_driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.pm = &omap_rtc_pm_ops,
- .of_match_table = of_match_ptr(omap_rtc_of_match),
+ .of_match_table = omap_rtc_of_match,
},
.id_table = omap_rtc_devtype,
};
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 1725b5090e3..d1953bb244c 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -327,7 +327,7 @@ kfree_exit:
static int pcf2123_remove(struct spi_device *spi)
{
- struct pcf2123_plat_data *pdata = spi->dev.platform_data;
+ struct pcf2123_plat_data *pdata = dev_get_platdata(&spi->dev);
int i;
if (pdata) {
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 22bacdbf913..f85a1a93e66 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -106,7 +106,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto err_req;
- rtc = kmalloc(sizeof(*rtc), GFP_KERNEL);
+ rtc = devm_kzalloc(&dev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc) {
ret = -ENOMEM;
goto err_rtc;
@@ -115,7 +115,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
- goto err_map;
+ goto err_rtc;
}
__raw_writel(0, rtc->base + RTC_CR);
@@ -141,8 +141,6 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
free_irq(dev->irq[0], rtc);
err_irq:
iounmap(rtc->base);
- err_map:
- kfree(rtc);
err_rtc:
amba_release_regions(dev);
err_req:
@@ -153,14 +151,11 @@ static int pl030_remove(struct amba_device *dev)
{
struct pl030_rtc *rtc = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
writel(0, rtc->base + RTC_CR);
free_irq(dev->irq[0], rtc);
rtc_device_unregister(rtc->rtc);
iounmap(rtc->base);
- kfree(rtc);
amba_release_regions(dev);
return 0;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 0f0609b1aa2..99181fff88f 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -305,7 +305,6 @@ static int pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
- amba_set_drvdata(adev, NULL);
free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
iounmap(ldata->base);
@@ -371,6 +370,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ device_init_wakeup(&adev->dev, 1);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
@@ -384,15 +384,12 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_irq;
}
- device_init_wakeup(&adev->dev, 1);
-
return 0;
out_no_irq:
rtc_device_unregister(ldata->rtc);
out_no_rtc:
iounmap(ldata->base);
- amba_set_drvdata(adev, NULL);
out_no_remap:
kfree(ldata);
out:
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 402732cfb32..1ecfe3bd92a 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -53,11 +53,11 @@ static irqreturn_t puv3_rtc_tickirq(int irq, void *id)
}
/* Update control registers */
-static void puv3_rtc_setaie(int to)
+static void puv3_rtc_setaie(struct device *dev, int to)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __func__, to);
+ dev_dbg(dev, "%s: aie=%d\n", __func__, to);
tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE;
@@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
{
unsigned int tmp;
- pr_debug("%s: pie=%d\n", __func__, enabled);
+ dev_debug(dev, "%s: pie=%d\n", __func__, enabled);
spin_lock_irq(&puv3_rtc_pie_lock);
tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
@@ -90,7 +90,7 @@ static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
- pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
+ dev_dbg(dev, "read time %02x.%02x.%02x %02x/%02x/%02x\n",
rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
@@ -101,7 +101,7 @@ static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
{
unsigned long rtc_count = 0;
- pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
+ dev_dbg(dev, "set time %02d.%02d.%02d %02d/%02d/%02d\n",
tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -119,7 +119,7 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
- pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+ dev_dbg(dev, "read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
alrm->enabled,
alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
@@ -132,7 +132,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
unsigned long rtcalarm_count = 0;
- pr_debug("puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+ dev_dbg(dev, "puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
alrm->enabled,
tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
@@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
rtc_tm_to_time(tm, &rtcalarm_count);
writel(rtcalarm_count, RTC_RTAR);
- puv3_rtc_setaie(alrm->enabled);
+ puv3_rtc_setaie(&dev->dev, alrm->enabled);
if (alrm->enabled)
enable_irq_wake(puv3_rtc_alarmno);
@@ -227,7 +227,7 @@ static int puv3_rtc_remove(struct platform_device *dev)
rtc_device_unregister(rtc);
puv3_rtc_setpie(&dev->dev, 0);
- puv3_rtc_setaie(0);
+ puv3_rtc_setaie(&dev->dev, 0);
release_resource(puv3_rtc_mem);
kfree(puv3_rtc_mem);
@@ -241,7 +241,7 @@ static int puv3_rtc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- pr_debug("%s: probe=%p\n", __func__, pdev);
+ dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev);
/* find the IRQs */
puv3_rtc_tickno = platform_get_irq(pdev, 1);
@@ -256,7 +256,7 @@ static int puv3_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- pr_debug("PKUnity_rtc: tick irq %d, alarm irq %d\n",
+ dev_dbg(&pdev->dev, "PKUnity_rtc: tick irq %d, alarm irq %d\n",
puv3_rtc_tickno, puv3_rtc_alarmno);
/* get the memory region */
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index f7a90a116a3..090a101c1c8 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -64,7 +64,7 @@ static int
rs5c348_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
- struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+ struct rs5c348_plat_data *pdata = dev_get_platdata(&spi->dev);
u8 txbuf[5+7], *txp;
int ret;
@@ -100,7 +100,7 @@ static int
rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
- struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+ struct rs5c348_plat_data *pdata = dev_get_platdata(&spi->dev);
u8 txbuf[5], rxbuf[7];
int ret;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
new file mode 100644
index 00000000000..b7fd02bc0a1
--- /dev/null
+++ b/drivers/rtc/rtc-s5m.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * Copyright (C) 2013 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/bcd.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/rtc.h>
+
+struct s5m_rtc_info {
+ struct device *dev;
+ struct sec_pmic_dev *s5m87xx;
+ struct regmap *rtc;
+ struct rtc_device *rtc_dev;
+ int irq;
+ int device_type;
+ int rtc_24hr_mode;
+ bool wtsr_smpl;
+};
+
+static void s5m8767_data_to_tm(u8 *data, struct rtc_time *tm,
+ int rtc_24hr_mode)
+{
+ tm->tm_sec = data[RTC_SEC] & 0x7f;
+ tm->tm_min = data[RTC_MIN] & 0x7f;
+ if (rtc_24hr_mode) {
+ tm->tm_hour = data[RTC_HOUR] & 0x1f;
+ } else {
+ tm->tm_hour = data[RTC_HOUR] & 0x0f;
+ if (data[RTC_HOUR] & HOUR_PM_MASK)
+ tm->tm_hour += 12;
+ }
+
+ tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f);
+ tm->tm_mday = data[RTC_DATE] & 0x1f;
+ tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
+ tm->tm_year = (data[RTC_YEAR1] & 0x7f) + 100;
+ tm->tm_yday = 0;
+ tm->tm_isdst = 0;
+}
+
+static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = tm->tm_sec;
+ data[RTC_MIN] = tm->tm_min;
+
+ if (tm->tm_hour >= 12)
+ data[RTC_HOUR] = tm->tm_hour | HOUR_PM_MASK;
+ else
+ data[RTC_HOUR] = tm->tm_hour & ~HOUR_PM_MASK;
+
+ data[RTC_WEEKDAY] = 1 << tm->tm_wday;
+ data[RTC_DATE] = tm->tm_mday;
+ data[RTC_MONTH] = tm->tm_mon + 1;
+ data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
+
+ if (tm->tm_year < 100) {
+ pr_err("s5m8767 RTC cannot handle the year %d.\n",
+ 1900 + tm->tm_year);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+}
+
+static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
+{
+ int ret;
+ unsigned int data;
+
+ ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to read update reg(%d)\n", ret);
+ return ret;
+ }
+
+ data |= RTC_TIME_EN_MASK;
+ data |= RTC_UDR_MASK;
+
+ ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to write update reg(%d)\n", ret);
+ return ret;
+ }
+
+ do {
+ ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ } while ((data & RTC_UDR_MASK) && !ret);
+
+ return ret;
+}
+
+static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
+{
+ int ret;
+ unsigned int data;
+
+ ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read update reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ data &= ~RTC_TIME_EN_MASK;
+ data |= RTC_UDR_MASK;
+
+ ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write update reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ do {
+ ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ } while ((data & RTC_UDR_MASK) && !ret);
+
+ return ret;
+}
+
+static void s5m8763_data_to_tm(u8 *data, struct rtc_time *tm)
+{
+ tm->tm_sec = bcd2bin(data[RTC_SEC]);
+ tm->tm_min = bcd2bin(data[RTC_MIN]);
+
+ if (data[RTC_HOUR] & HOUR_12) {
+ tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x1f);
+ if (data[RTC_HOUR] & HOUR_PM)
+ tm->tm_hour += 12;
+ } else {
+ tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x3f);
+ }
+
+ tm->tm_wday = data[RTC_WEEKDAY] & 0x07;
+ tm->tm_mday = bcd2bin(data[RTC_DATE]);
+ tm->tm_mon = bcd2bin(data[RTC_MONTH]);
+ tm->tm_year = bcd2bin(data[RTC_YEAR1]) + bcd2bin(data[RTC_YEAR2]) * 100;
+ tm->tm_year -= 1900;
+}
+
+static void s5m8763_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = bin2bcd(tm->tm_sec);
+ data[RTC_MIN] = bin2bcd(tm->tm_min);
+ data[RTC_HOUR] = bin2bcd(tm->tm_hour);
+ data[RTC_WEEKDAY] = tm->tm_wday;
+ data[RTC_DATE] = bin2bcd(tm->tm_mday);
+ data[RTC_MONTH] = bin2bcd(tm->tm_mon);
+ data[RTC_YEAR1] = bin2bcd(tm->tm_year % 100);
+ data[RTC_YEAR2] = bin2bcd((tm->tm_year + 1900) / 100);
+}
+
+static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ int ret;
+
+ ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ switch (info->device_type) {
+ case S5M8763X:
+ s5m8763_data_to_tm(data, tm);
+ break;
+
+ case S5M8767X:
+ s5m8767_data_to_tm(data, tm, info->rtc_24hr_mode);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+
+ return rtc_valid_tm(tm);
+}
+
+static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ int ret = 0;
+
+ switch (info->device_type) {
+ case S5M8763X:
+ s5m8763_tm_to_data(tm, data);
+ break;
+ case S5M8767X:
+ ret = s5m8767_tm_to_data(tm, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+
+ ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ ret = s5m8767_rtc_set_time_reg(info);
+
+ return ret;
+}
+
+static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ unsigned int val;
+ int ret, i;
+
+ ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ switch (info->device_type) {
+ case S5M8763X:
+ s5m8763_data_to_tm(data, &alrm->time);
+ ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val);
+ if (ret < 0)
+ return ret;
+
+ alrm->enabled = !!val;
+
+ ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case S5M8767X:
+ s5m8767_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+ dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + alrm->time.tm_year, 1 + alrm->time.tm_mon,
+ alrm->time.tm_mday, alrm->time.tm_hour,
+ alrm->time.tm_min, alrm->time.tm_sec,
+ alrm->time.tm_wday);
+
+ alrm->enabled = 0;
+ for (i = 0; i < 7; i++) {
+ if (data[i] & ALARM_ENABLE_MASK) {
+ alrm->enabled = 1;
+ break;
+ }
+ }
+
+ alrm->pending = 0;
+ ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+ if (ret < 0)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (val & ALARM0_STATUS)
+ alrm->pending = 1;
+ else
+ alrm->pending = 0;
+
+ return 0;
+}
+
+static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
+{
+ u8 data[8];
+ int ret, i;
+ struct rtc_time tm;
+
+ ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ s5m8767_data_to_tm(data, &tm, info->rtc_24hr_mode);
+ dev_dbg(info->dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_wday);
+
+ switch (info->device_type) {
+ case S5M8763X:
+ ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0);
+ break;
+
+ case S5M8767X:
+ for (i = 0; i < 7; i++)
+ data[i] &= ~ALARM_ENABLE_MASK;
+
+ ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ ret = s5m8767_rtc_set_alarm_reg(info);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
+{
+ int ret;
+ u8 data[8];
+ u8 alarm0_conf;
+ struct rtc_time tm;
+
+ ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ s5m8767_data_to_tm(data, &tm, info->rtc_24hr_mode);
+ dev_dbg(info->dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_wday);
+
+ switch (info->device_type) {
+ case S5M8763X:
+ alarm0_conf = 0x77;
+ ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf);
+ break;
+
+ case S5M8767X:
+ data[RTC_SEC] |= ALARM_ENABLE_MASK;
+ data[RTC_MIN] |= ALARM_ENABLE_MASK;
+ data[RTC_HOUR] |= ALARM_ENABLE_MASK;
+ data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+ if (data[RTC_DATE] & 0x1f)
+ data[RTC_DATE] |= ALARM_ENABLE_MASK;
+ if (data[RTC_MONTH] & 0xf)
+ data[RTC_MONTH] |= ALARM_ENABLE_MASK;
+ if (data[RTC_YEAR1] & 0x7f)
+ data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
+
+ ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+ ret = s5m8767_rtc_set_alarm_reg(info);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[8];
+ int ret;
+
+ switch (info->device_type) {
+ case S5M8763X:
+ s5m8763_tm_to_data(&alrm->time, data);
+ break;
+
+ case S5M8767X:
+ s5m8767_tm_to_data(&alrm->time, data);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + alrm->time.tm_year, 1 + alrm->time.tm_mon,
+ alrm->time.tm_mday, alrm->time.tm_hour, alrm->time.tm_min,
+ alrm->time.tm_sec, alrm->time.tm_wday);
+
+ ret = s5m_rtc_stop_alarm(info);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ if (ret < 0)
+ return ret;
+
+ ret = s5m8767_rtc_set_alarm_reg(info);
+ if (ret < 0)
+ return ret;
+
+ if (alrm->enabled)
+ ret = s5m_rtc_start_alarm(info);
+
+ return ret;
+}
+
+static int s5m_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+
+ if (enabled)
+ return s5m_rtc_start_alarm(info);
+ else
+ return s5m_rtc_stop_alarm(info);
+}
+
+static irqreturn_t s5m_rtc_alarm_irq(int irq, void *data)
+{
+ struct s5m_rtc_info *info = data;
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops s5m_rtc_ops = {
+ .read_time = s5m_rtc_read_time,
+ .set_time = s5m_rtc_set_time,
+ .read_alarm = s5m_rtc_read_alarm,
+ .set_alarm = s5m_rtc_set_alarm,
+ .alarm_irq_enable = s5m_rtc_alarm_irq_enable,
+};
+
+static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
+{
+ int ret;
+ ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+ WTSR_ENABLE_MASK,
+ enable ? WTSR_ENABLE_MASK : 0);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
+ __func__, ret);
+}
+
+static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
+{
+ int ret;
+ ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+ SMPL_ENABLE_MASK,
+ enable ? SMPL_ENABLE_MASK : 0);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
+ __func__, ret);
+}
+
+static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
+{
+ u8 data[2];
+ unsigned int tp_read;
+ int ret;
+ struct rtc_time tm;
+
+ ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read control reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Set RTC control register : Binary mode, 24hour mode */
+ data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+ info->rtc_24hr_mode = 1;
+ ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* In first boot time, Set rtc time to 1/1/2012 00:00:00(SUN) */
+ if ((tp_read & RTC_TCON_MASK) == 0) {
+ dev_dbg(info->dev, "rtc init\n");
+ tm.tm_sec = 0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_wday = 0;
+ tm.tm_mday = 1;
+ tm.tm_mon = 0;
+ tm.tm_year = 112;
+ tm.tm_yday = 0;
+ tm.tm_isdst = 0;
+ ret = s5m_rtc_set_time(info->dev, &tm);
+ }
+
+ ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON,
+ RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int s5m_rtc_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
+ struct sec_platform_data *pdata = s5m87xx->pdata;
+ struct s5m_rtc_info *info;
+ int ret;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "Platform data not supplied\n");
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ info->s5m87xx = s5m87xx;
+ info->rtc = s5m87xx->rtc;
+ info->device_type = s5m87xx->device_type;
+ info->wtsr_smpl = s5m87xx->wtsr_smpl;
+
+ switch (pdata->device_type) {
+ case S5M8763X:
+ info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0;
+ break;
+
+ case S5M8767X:
+ info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1;
+ break;
+
+ default:
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Unsupported device type: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = s5m8767_rtc_init_reg(info);
+
+ if (info->wtsr_smpl) {
+ s5m_rtc_enable_wtsr(info, true);
+ s5m_rtc_enable_smpl(info, true);
+ }
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
+ &s5m_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ s5m_rtc_alarm_irq, 0, "rtc-alarm0",
+ info);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->irq, ret);
+
+ return ret;
+}
+
+static void s5m_rtc_shutdown(struct platform_device *pdev)
+{
+ struct s5m_rtc_info *info = platform_get_drvdata(pdev);
+ int i;
+ unsigned int val = 0;
+ if (info->wtsr_smpl) {
+ for (i = 0; i < 3; i++) {
+ s5m_rtc_enable_wtsr(info, false);
+ regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val);
+ pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
+ if (val & WTSR_ENABLE_MASK)
+ pr_emerg("%s: fail to disable WTSR\n",
+ __func__);
+ else {
+ pr_info("%s: success to disable WTSR\n",
+ __func__);
+ break;
+ }
+ }
+ }
+ /* Disable SMPL when power off */
+ s5m_rtc_enable_smpl(info, false);
+}
+
+static const struct platform_device_id s5m_rtc_id[] = {
+ { "s5m-rtc", 0 },
+};
+
+static struct platform_driver s5m_rtc_driver = {
+ .driver = {
+ .name = "s5m-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = s5m_rtc_probe,
+ .shutdown = s5m_rtc_shutdown,
+ .id_table = s5m_rtc_id,
+};
+
+module_platform_driver(s5m_rtc_driver);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5M RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s5m-rtc");
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 6d87e26355a..d0d2b047658 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -649,8 +649,9 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
clk_enable(rtc->clk);
rtc->capabilities = RTC_DEF_CAPABILITIES;
- if (pdev->dev.platform_data) {
- struct sh_rtc_platform_info *pinfo = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev)) {
+ struct sh_rtc_platform_info *pinfo =
+ dev_get_platdata(&pdev->dev);
/*
* Some CPUs have special capabilities in addition to the
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index 63460cf80f1..3eb3642ae29 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -59,7 +59,7 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
unsigned long rtc_alarm, rtc_count;
struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+ rtcdrv = dev_get_drvdata(dev);
local_irq_disable();
@@ -94,7 +94,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
{
unsigned long rtc_status_reg, rtc_alarm;
struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+ rtcdrv = dev_get_drvdata(dev);
if (alrm->enabled) {
rtc_tm_to_time(&(alrm->time), &rtc_alarm);
@@ -157,7 +157,7 @@ static int sirfsoc_rtc_read_time(struct device *dev,
{
unsigned long tmp_rtc = 0;
struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+ rtcdrv = dev_get_drvdata(dev);
/*
* This patch is taken from WinCE - Need to validate this for
* correctness. To work around sirfsoc RTC counter double sync logic
@@ -178,7 +178,7 @@ static int sirfsoc_rtc_set_time(struct device *dev,
{
unsigned long rtc_time;
struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+ rtcdrv = dev_get_drvdata(dev);
rtc_tm_to_time(tm, &rtc_time);
@@ -274,7 +274,7 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base);
if (err) {
dev_err(&pdev->dev, "unable to find base address of rtc node in dtb\n");
- goto error;
+ return err;
}
platform_set_drvdata(pdev, rtcdrv);
@@ -290,7 +290,7 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
rtc_div = ((32768 / RTC_HZ) / 2) - 1;
sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
- rtcdrv->rtc = rtc_device_register(pdev->name, &(pdev->dev),
+ rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&sirfsoc_rtc_ops, THIS_MODULE);
if (IS_ERR(rtcdrv->rtc)) {
err = PTR_ERR(rtcdrv->rtc);
@@ -322,24 +322,15 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
rtcdrv);
if (err) {
dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n");
- goto error;
+ return err;
}
return 0;
-
-error:
- if (rtcdrv->rtc)
- rtc_device_unregister(rtcdrv->rtc);
-
- return err;
}
static int sirfsoc_rtc_remove(struct platform_device *pdev)
{
- struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
-
device_init_wakeup(&pdev->dev, 0);
- rtc_device_unregister(rtcdrv->rtc);
return 0;
}
@@ -373,7 +364,7 @@ static int sirfsoc_rtc_thaw(struct device *dev)
{
u32 tmp;
struct sirfsoc_rtc_drv *rtcdrv;
- rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+ rtcdrv = dev_get_drvdata(dev);
/*
* if resume from snapshot and the rtc power is losed,
@@ -467,7 +458,7 @@ static struct platform_driver sirfsoc_rtc_driver = {
#ifdef CONFIG_PM
.pm = &sirfsoc_rtc_pm_ops,
#endif
- .of_match_table = of_match_ptr(sirfsoc_rtc_of_match),
+ .of_match_table = sirfsoc_rtc_of_match,
},
.probe = sirfsoc_rtc_probe,
.remove = sirfsoc_rtc_remove,
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 316a342115b..fa384fe2898 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -329,7 +329,7 @@ static struct platform_driver snvs_rtc_driver = {
.name = "snvs_rtc",
.owner = THIS_MODULE,
.pm = &snvs_rtc_pm_ops,
- .of_match_table = of_match_ptr(snvs_dt_ids),
+ .of_match_table = snvs_dt_ids,
},
.probe = snvs_rtc_probe,
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 26019531db1..ea96492357b 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -343,7 +343,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
.name = "stmp3xxx-rtc",
.owner = THIS_MODULE,
.pm = &stmp3xxx_rtc_pm_ops,
- .of_match_table = of_match_ptr(rtc_dt_ids),
+ .of_match_table = rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index a9caf043b0c..7af00208d63 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -22,7 +22,6 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/mfd/tps65910.h>
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index d07d8982302..25222cdccdc 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -303,7 +303,7 @@ static const struct rtc_class_ops v3020_rtc_ops = {
static int rtc_probe(struct platform_device *pdev)
{
- struct v3020_platform_data *pdata = pdev->dev.platform_data;
+ struct v3020_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct v3020 *chip;
int retval = -EBUSY;
int i;
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 54e104e197e..aabc22c587f 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -27,11 +28,10 @@
#include <linux/rtc.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/log2.h>
#include <asm/div64.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index c2d6331fc71..df2ef3eba7c 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -228,7 +228,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
if (vt8500_rtc->irq_alarm < 0) {
dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
- return -ENXIO;
+ return vt8500_rtc->irq_alarm;
}
vt8500_rtc->res = devm_request_mem_region(&pdev->dev,
@@ -296,7 +296,7 @@ static struct platform_driver vt8500_rtc_driver = {
.driver = {
.name = "vt8500-rtc",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(wmt_dt_ids),
+ .of_match_table = wmt_dt_ids,
},
};
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 451bf99582f..f302efa937e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -698,10 +698,11 @@ static void dasd_profile_start(struct dasd_block *block,
}
spin_lock(&block->profile.lock);
- if (block->profile.data)
+ if (block->profile.data) {
block->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
block->profile.data->dasd_read_nr_req[counter]++;
+ }
spin_unlock(&block->profile.lock);
/*
@@ -2978,12 +2979,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
elevator_exit(block->request_queue->elevator);
block->request_queue->elevator = NULL;
+ mutex_lock(&block->request_queue->sysfs_lock);
rc = elevator_init(block->request_queue, "deadline");
- if (rc) {
+ if (rc)
blk_cleanup_queue(block->request_queue);
- return rc;
- }
- return 0;
+ mutex_unlock(&block->request_queue->sysfs_lock);
+ return rc;
}
/*
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 5d73e6e49af..d0ab5019d88 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -118,22 +118,6 @@ static void scm_request_done(struct scm_request *scmrq)
spin_unlock_irqrestore(&list_lock, flags);
}
-static int scm_open(struct block_device *blkdev, fmode_t mode)
-{
- return scm_get_ref();
-}
-
-static void scm_release(struct gendisk *gendisk, fmode_t mode)
-{
- scm_put_ref();
-}
-
-static const struct block_device_operations scm_blk_devops = {
- .owner = THIS_MODULE,
- .open = scm_open,
- .release = scm_release,
-};
-
static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
{
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
@@ -223,8 +207,12 @@ static void scm_blk_request(struct request_queue *rq)
int ret;
while ((req = blk_peek_request(rq))) {
- if (req->cmd_type != REQ_TYPE_FS)
+ if (req->cmd_type != REQ_TYPE_FS) {
+ blk_start_request(req);
+ blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
+ blk_end_request_all(req, -EIO);
continue;
+ }
if (!scm_permit_request(bdev, req)) {
scm_ensure_queue_restart(bdev);
@@ -252,7 +240,7 @@ static void scm_blk_request(struct request_queue *rq)
atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
- ret = scm_start_aob(scmrq->aob);
+ ret = eadm_start_aob(scmrq->aob);
if (ret) {
SCM_LOG(5, "no subchannel");
scm_request_requeue(scmrq);
@@ -316,7 +304,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
}
restart:
- if (!scm_start_aob(scmrq->aob))
+ if (!eadm_start_aob(scmrq->aob))
return;
requeue:
@@ -359,6 +347,10 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
blk_run_queue(bdev->rq);
}
+static const struct block_device_operations scm_blk_devops = {
+ .owner = THIS_MODULE,
+};
+
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
struct request_queue *rq;
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 8b387b32fd6..e59331e6c2e 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -107,7 +107,7 @@ extern debug_info_t *scm_debug;
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
- if (level > scm_debug->level)
+ if (!debug_level_enabled(scm_debug, level))
return;
while (length > 0) {
debug_event(scm_debug, level, data, length);
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index c0d102e3a48..27f930cd657 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -187,7 +187,7 @@ bool scm_need_cluster_request(struct scm_request *scmrq)
void scm_initiate_cluster_request(struct scm_request *scmrq)
{
scm_prepare_cluster_request(scmrq);
- if (scm_start_aob(scmrq->aob))
+ if (eadm_start_aob(scmrq->aob))
scm_request_requeue(scmrq);
}
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 17821a026c9..b69ab17f13f 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,8 @@
#
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
- sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o
+ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
+ sclp_early.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index f93cc32eb81..71e97473801 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -564,6 +564,7 @@ static void __exit
fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
+ device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 4600aa10a1c..668b32b0dc1 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
struct appldata_product_id id;
int rc;
- strcpy(id.prod_nr, "LNXAPPL");
+ strncpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 24a08e8f19e..2cdec21e892 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -615,10 +615,10 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
if (rp->state != RAW3270_STATE_RESET)
return;
- if (rq && rq->rc) {
+ if (rq->rc) {
/* Reset command failed. */
rp->state = RAW3270_STATE_INIT;
- } else if (0 && MACHINE_IS_VM) {
+ } else if (MACHINE_IS_VM) {
raw3270_size_device_vm(rp);
raw3270_size_device_done(rp);
} else
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 40d1406289e..6fbe09686d1 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -99,6 +99,7 @@ struct init_sccb {
} __attribute__((packed));
extern u64 sclp_facilities;
+
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
@@ -179,6 +180,10 @@ void sclp_sdias_exit(void);
extern int sclp_console_pages;
extern int sclp_console_drop;
extern unsigned long sclp_console_full;
+extern u8 sclp_fac84;
+extern unsigned long long sclp_rzm;
+extern unsigned long long sclp_rnmax;
+extern __initdata int sclp_early_read_info_sccb_valid;
/* useful inlines */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 77df9cb0068..eaa21d542c5 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -28,168 +28,6 @@
#include "sclp.h"
-#define SCLP_CMDW_READ_SCP_INFO 0x00020001
-#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
-
-struct read_info_sccb {
- struct sccb_header header; /* 0-7 */
- u16 rnmax; /* 8-9 */
- u8 rnsize; /* 10 */
- u8 _reserved0[24 - 11]; /* 11-15 */
- u8 loadparm[8]; /* 24-31 */
- u8 _reserved1[48 - 32]; /* 32-47 */
- u64 facilities; /* 48-55 */
- u8 _reserved2[84 - 56]; /* 56-83 */
- u8 fac84; /* 84 */
- u8 fac85; /* 85 */
- u8 _reserved3[91 - 86]; /* 86-90 */
- u8 flags; /* 91 */
- u8 _reserved4[100 - 92]; /* 92-99 */
- u32 rnsize2; /* 100-103 */
- u64 rnmax2; /* 104-111 */
- u8 _reserved5[4096 - 112]; /* 112-4095 */
-} __attribute__((packed, aligned(PAGE_SIZE)));
-
-static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
-static struct read_info_sccb __initdata early_read_info_sccb;
-static int __initdata early_read_info_sccb_valid;
-
-u64 sclp_facilities;
-static u8 sclp_fac84;
-static unsigned long long rzm;
-static unsigned long long rnmax;
-
-static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
-{
- int rc;
-
- __ctl_set_bit(0, 9);
- rc = sclp_service_call(cmd, sccb);
- if (rc)
- goto out;
- __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
- PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
- local_irq_disable();
-out:
- /* Contents of the sccb might have changed. */
- barrier();
- __ctl_clear_bit(0, 9);
- return rc;
-}
-
-static void __init sclp_read_info_early(void)
-{
- int rc;
- int i;
- struct read_info_sccb *sccb;
- sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
- SCLP_CMDW_READ_SCP_INFO};
-
- sccb = &early_read_info_sccb;
- for (i = 0; i < ARRAY_SIZE(commands); i++) {
- do {
- memset(sccb, 0, sizeof(*sccb));
- sccb->header.length = sizeof(*sccb);
- sccb->header.function_code = 0x80;
- sccb->header.control_mask[2] = 0x80;
- rc = sclp_cmd_sync_early(commands[i], sccb);
- } while (rc == -EBUSY);
-
- if (rc)
- break;
- if (sccb->header.response_code == 0x10) {
- early_read_info_sccb_valid = 1;
- break;
- }
- if (sccb->header.response_code != 0x1f0)
- break;
- }
-}
-
-static void __init sclp_event_mask_early(void)
-{
- struct init_sccb *sccb = &early_event_mask_sccb;
- int rc;
-
- do {
- memset(sccb, 0, sizeof(*sccb));
- sccb->header.length = sizeof(*sccb);
- sccb->mask_length = sizeof(sccb_mask_t);
- rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
- } while (rc == -EBUSY);
-}
-
-void __init sclp_facilities_detect(void)
-{
- struct read_info_sccb *sccb;
-
- sclp_read_info_early();
- if (!early_read_info_sccb_valid)
- return;
-
- sccb = &early_read_info_sccb;
- sclp_facilities = sccb->facilities;
- sclp_fac84 = sccb->fac84;
- if (sccb->fac85 & 0x02)
- S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
- rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
- rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
- rzm <<= 20;
-
- sclp_event_mask_early();
-}
-
-bool __init sclp_has_linemode(void)
-{
- struct init_sccb *sccb = &early_event_mask_sccb;
-
- if (sccb->header.response_code != 0x20)
- return 0;
- if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- return 1;
-}
-
-bool __init sclp_has_vt220(void)
-{
- struct init_sccb *sccb = &early_event_mask_sccb;
-
- if (sccb->header.response_code != 0x20)
- return 0;
- if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
- return 1;
- return 0;
-}
-
-unsigned long long sclp_get_rnmax(void)
-{
- return rnmax;
-}
-
-unsigned long long sclp_get_rzm(void)
-{
- return rzm;
-}
-
-/*
- * This function will be called after sclp_facilities_detect(), which gets
- * called from early.c code. Therefore the sccb should have valid contents.
- */
-void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
-{
- struct read_info_sccb *sccb;
-
- if (!early_read_info_sccb_valid)
- return;
- sccb = &early_read_info_sccb;
- info->is_valid = 1;
- if (sccb->flags & 0x2)
- info->has_dump = 1;
- memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
-}
-
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
@@ -356,14 +194,14 @@ struct assign_storage_sccb {
int arch_get_memory_phys_device(unsigned long start_pfn)
{
- if (!rzm)
+ if (!sclp_rzm)
return 0;
- return PFN_PHYS(start_pfn) >> ilog2(rzm);
+ return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm);
}
static unsigned long long rn2addr(u16 rn)
{
- return (unsigned long long) (rn - 1) * rzm;
+ return (unsigned long long) (rn - 1) * sclp_rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
@@ -404,7 +242,7 @@ static int sclp_assign_storage(u16 rn)
if (rc)
return rc;
start = rn2addr(rn);
- storage_key_init_range(start, start + rzm);
+ storage_key_init_range(start, start + sclp_rzm);
return 0;
}
@@ -462,7 +300,7 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size,
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
- if (start > istart + rzm - 1)
+ if (start > istart + sclp_rzm - 1)
continue;
if (online)
rc |= sclp_assign_storage(incr->rn);
@@ -526,7 +364,7 @@ static void __init add_memory_merged(u16 rn)
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
- size = (unsigned long long ) num * rzm;
+ size = (unsigned long long) num * sclp_rzm;
if (start >= VMEM_MAX_PHYS)
goto skip_add;
if (start + size > VMEM_MAX_PHYS)
@@ -574,7 +412,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
}
if (!assigned)
new_incr->rn = last_rn + 1;
- if (new_incr->rn > rnmax) {
+ if (new_incr->rn > sclp_rnmax) {
kfree(new_incr);
return;
}
@@ -617,7 +455,7 @@ static int __init sclp_detect_standby_memory(void)
if (OLDMEM_BASE) /* No standby memory in kdump mode */
return 0;
- if (!early_read_info_sccb_valid)
+ if (!sclp_early_read_info_sccb_valid)
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
@@ -661,7 +499,7 @@ static int __init sclp_detect_standby_memory(void)
}
if (rc || list_empty(&sclp_mem_list))
goto out;
- for (i = 1; i <= rnmax - assigned; i++)
+ for (i = 1; i <= sclp_rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
new file mode 100644
index 00000000000..f7aa080e9b2
--- /dev/null
+++ b/drivers/s390/char/sclp_early.c
@@ -0,0 +1,264 @@
+/*
+ * SCLP early driver
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#define KMSG_COMPONENT "sclp_early"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <asm/ctl_reg.h>
+#include <asm/sclp.h>
+#include <asm/ipl.h>
+#include "sclp_sdias.h"
+#include "sclp.h"
+
+#define SCLP_CMDW_READ_SCP_INFO 0x00020001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
+
+struct read_info_sccb {
+ struct sccb_header header; /* 0-7 */
+ u16 rnmax; /* 8-9 */
+ u8 rnsize; /* 10 */
+ u8 _reserved0[24 - 11]; /* 11-15 */
+ u8 loadparm[8]; /* 24-31 */
+ u8 _reserved1[48 - 32]; /* 32-47 */
+ u64 facilities; /* 48-55 */
+ u8 _reserved2[84 - 56]; /* 56-83 */
+ u8 fac84; /* 84 */
+ u8 fac85; /* 85 */
+ u8 _reserved3[91 - 86]; /* 86-90 */
+ u8 flags; /* 91 */
+ u8 _reserved4[100 - 92]; /* 92-99 */
+ u32 rnsize2; /* 100-103 */
+ u64 rnmax2; /* 104-111 */
+ u8 _reserved5[4096 - 112]; /* 112-4095 */
+} __packed __aligned(PAGE_SIZE);
+
+static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE);
+static __initdata struct read_info_sccb early_read_info_sccb;
+static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
+static unsigned long sclp_hsa_size;
+
+__initdata int sclp_early_read_info_sccb_valid;
+u64 sclp_facilities;
+u8 sclp_fac84;
+unsigned long long sclp_rzm;
+unsigned long long sclp_rnmax;
+
+static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
+{
+ int rc;
+
+ __ctl_set_bit(0, 9);
+ rc = sclp_service_call(cmd, sccb);
+ if (rc)
+ goto out;
+ __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
+ PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
+ local_irq_disable();
+out:
+ /* Contents of the sccb might have changed. */
+ barrier();
+ __ctl_clear_bit(0, 9);
+ return rc;
+}
+
+static void __init sclp_read_info_early(void)
+{
+ int rc;
+ int i;
+ struct read_info_sccb *sccb;
+ sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+ SCLP_CMDW_READ_SCP_INFO};
+
+ sccb = &early_read_info_sccb;
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ do {
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->header.function_code = 0x80;
+ sccb->header.control_mask[2] = 0x80;
+ rc = sclp_cmd_sync_early(commands[i], sccb);
+ } while (rc == -EBUSY);
+
+ if (rc)
+ break;
+ if (sccb->header.response_code == 0x10) {
+ sclp_early_read_info_sccb_valid = 1;
+ break;
+ }
+ if (sccb->header.response_code != 0x1f0)
+ break;
+ }
+}
+
+static void __init sclp_facilities_detect(void)
+{
+ struct read_info_sccb *sccb;
+
+ sclp_read_info_early();
+ if (!sclp_early_read_info_sccb_valid)
+ return;
+
+ sccb = &early_read_info_sccb;
+ sclp_facilities = sccb->facilities;
+ sclp_fac84 = sccb->fac84;
+ if (sccb->fac85 & 0x02)
+ S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+ sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+ sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+ sclp_rzm <<= 20;
+}
+
+bool __init sclp_has_linemode(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+
+ if (sccb->header.response_code != 0x20)
+ return 0;
+ if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ return 1;
+}
+
+bool __init sclp_has_vt220(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+
+ if (sccb->header.response_code != 0x20)
+ return 0;
+ if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+ return 1;
+ return 0;
+}
+
+unsigned long long sclp_get_rnmax(void)
+{
+ return sclp_rnmax;
+}
+
+unsigned long long sclp_get_rzm(void)
+{
+ return sclp_rzm;
+}
+
+/*
+ * This function will be called after sclp_facilities_detect(), which gets
+ * called from early.c code. Therefore the sccb should have valid contents.
+ */
+void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
+{
+ struct read_info_sccb *sccb;
+
+ if (!sclp_early_read_info_sccb_valid)
+ return;
+ sccb = &early_read_info_sccb;
+ info->is_valid = 1;
+ if (sccb->flags & 0x2)
+ info->has_dump = 1;
+ memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
+}
+
+static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
+{
+ int rc;
+
+ do {
+ rc = sclp_cmd_sync_early(cmd, sccb);
+ } while (rc == -EBUSY);
+
+ if (rc)
+ return -EIO;
+ if (((struct sccb_header *) sccb)->response_code != 0x0020)
+ return -EIO;
+ return 0;
+}
+
+static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
+{
+ memset(sccb, 0, sizeof(*sccb));
+
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.dbs = 1;
+}
+
+static int __init sclp_set_event_mask(unsigned long receive_mask,
+ unsigned long send_mask)
+{
+ struct init_sccb *sccb = (void *) &sccb_early;
+
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->mask_length = sizeof(sccb_mask_t);
+ sccb->receive_mask = receive_mask;
+ sccb->send_mask = send_mask;
+ return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
+}
+
+static long __init sclp_hsa_size_init(void)
+{
+ struct sdias_sccb *sccb = (void *) &sccb_early;
+
+ sccb_init_eq_size(sccb);
+ if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
+ return -EIO;
+ if (sccb->evbuf.blk_cnt != 0)
+ return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
+ return 0;
+}
+
+static long __init sclp_hsa_copy_wait(void)
+{
+ struct sccb_header *sccb = (void *) &sccb_early;
+
+ memset(sccb, 0, PAGE_SIZE);
+ sccb->length = PAGE_SIZE;
+ if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
+ return -EIO;
+ return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
+}
+
+unsigned long sclp_get_hsa_size(void)
+{
+ return sclp_hsa_size;
+}
+
+static void __init sclp_hsa_size_detect(void)
+{
+ long size;
+
+ /* First try synchronous interface (LPAR) */
+ if (sclp_set_event_mask(0, 0x40000010))
+ return;
+ size = sclp_hsa_size_init();
+ if (size < 0)
+ return;
+ if (size != 0)
+ goto out;
+ /* Then try asynchronous interface (z/VM) */
+ if (sclp_set_event_mask(0x00000010, 0x40000010))
+ return;
+ size = sclp_hsa_size_init();
+ if (size < 0)
+ return;
+ size = sclp_hsa_copy_wait();
+ if (size < 0)
+ return;
+out:
+ sclp_hsa_size = size;
+}
+
+void __init sclp_early_detect(void)
+{
+ sclp_facilities_detect();
+ sclp_hsa_size_detect();
+ sclp_set_event_mask(0, 0);
+}
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index b1032931a1c..561a0414b35 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -1,7 +1,7 @@
/*
- * Sclp "store data in absolut storage"
+ * SCLP "store data in absolute storage"
*
- * Copyright IBM Corp. 2003, 2007
+ * Copyright IBM Corp. 2003, 2013
* Author(s): Michael Holzheu
*/
@@ -14,6 +14,7 @@
#include <asm/debug.h>
#include <asm/ipl.h>
+#include "sclp_sdias.h"
#include "sclp.h"
#include "sclp_rw.h"
@@ -22,46 +23,12 @@
#define SDIAS_RETRIES 300
#define SDIAS_SLEEP_TICKS 50
-#define EQ_STORE_DATA 0x0
-#define EQ_SIZE 0x1
-#define DI_FCP_DUMP 0x0
-#define ASA_SIZE_32 0x0
-#define ASA_SIZE_64 0x1
-#define EVSTATE_ALL_STORED 0x0
-#define EVSTATE_NO_DATA 0x3
-#define EVSTATE_PART_STORED 0x10
-
static struct debug_info *sdias_dbf;
static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
-struct sdias_evbuf {
- struct evbuf_header hdr;
- u8 event_qual;
- u8 data_id;
- u64 reserved2;
- u32 event_id;
- u16 reserved3;
- u8 asa_size;
- u8 event_status;
- u32 reserved4;
- u32 blk_cnt;
- u64 asa;
- u32 reserved5;
- u32 fbn;
- u32 reserved6;
- u32 lbn;
- u16 reserved7;
- u16 dbs;
-} __attribute__((packed));
-
-struct sdias_sccb {
- struct sccb_header hdr;
- struct sdias_evbuf evbuf;
-} __attribute__((packed));
-
static struct sdias_sccb sccb __attribute__((aligned(4096)));
static struct sdias_evbuf sdias_evbuf;
@@ -148,8 +115,8 @@ int sclp_sdias_blk_count(void)
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.event_qual = EQ_SIZE;
- sccb.evbuf.data_id = DI_FCP_DUMP;
+ sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
+ sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
sccb.evbuf.dbs = 1;
@@ -208,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.hdr.flags = 0;
- sccb.evbuf.event_qual = EQ_STORE_DATA;
- sccb.evbuf.data_id = DI_FCP_DUMP;
+ sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
+ sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
#ifdef CONFIG_64BIT
- sccb.evbuf.asa_size = ASA_SIZE_64;
+ sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
#else
- sccb.evbuf.asa_size = ASA_SIZE_32;
+ sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32;
#endif
sccb.evbuf.event_status = 0;
sccb.evbuf.blk_cnt = nr_blks;
@@ -240,20 +207,19 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
}
switch (sdias_evbuf.event_status) {
- case EVSTATE_ALL_STORED:
- TRACE("all stored\n");
- break;
- case EVSTATE_PART_STORED:
- TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
- break;
- case EVSTATE_NO_DATA:
- TRACE("no data\n");
- /* fall through */
- default:
- pr_err("Error from SCLP while copying hsa. "
- "Event status = %x\n",
- sdias_evbuf.event_status);
- rc = -EIO;
+ case SDIAS_EVSTATE_ALL_STORED:
+ TRACE("all stored\n");
+ break;
+ case SDIAS_EVSTATE_PART_STORED:
+ TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
+ break;
+ case SDIAS_EVSTATE_NO_DATA:
+ TRACE("no data\n");
+ /* fall through */
+ default:
+ pr_err("Error from SCLP while copying hsa. Event status = %x\n",
+ sdias_evbuf.event_status);
+ rc = -EIO;
}
out:
mutex_unlock(&sdias_mutex);
diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h
new file mode 100644
index 00000000000..f2431c41415
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.h
@@ -0,0 +1,46 @@
+/*
+ * SCLP "store data in absolute storage"
+ *
+ * Copyright IBM Corp. 2003, 2013
+ */
+
+#ifndef SCLP_SDIAS_H
+#define SCLP_SDIAS_H
+
+#include "sclp.h"
+
+#define SDIAS_EQ_STORE_DATA 0x0
+#define SDIAS_EQ_SIZE 0x1
+#define SDIAS_DI_FCP_DUMP 0x0
+#define SDIAS_ASA_SIZE_32 0x0
+#define SDIAS_ASA_SIZE_64 0x1
+#define SDIAS_EVSTATE_ALL_STORED 0x0
+#define SDIAS_EVSTATE_NO_DATA 0x3
+#define SDIAS_EVSTATE_PART_STORED 0x10
+
+struct sdias_evbuf {
+ struct evbuf_header hdr;
+ u8 event_qual;
+ u8 data_id;
+ u64 reserved2;
+ u32 event_id;
+ u16 reserved3;
+ u8 asa_size;
+ u8 event_status;
+ u32 reserved4;
+ u32 blk_cnt;
+ u64 asa;
+ u32 reserved5;
+ u32 fbn;
+ u32 reserved6;
+ u32 lbn;
+ u16 reserved7;
+ u16 dbs;
+} __packed;
+
+struct sdias_sccb {
+ struct sccb_header hdr;
+ struct sdias_evbuf evbuf;
+} __packed;
+
+#endif /* SCLP_SDIAS_H */
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 794820a123d..3d8e4d63f51 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -151,7 +151,7 @@ static int __init init_cpu_info(enum arch_id arch)
/* get info for boot cpu from lowcore, stored in the HSA */
- sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+ sa = dump_save_area_create(0);
if (!sa)
return -ENOMEM;
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
@@ -159,7 +159,6 @@ static int __init init_cpu_info(enum arch_id arch)
kfree(sa);
return -EIO;
}
- zfcpdump_save_areas[0] = sa;
return 0;
}
@@ -246,24 +245,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
{
unsigned long end;
- int i = 0;
+ int i;
if (count == 0)
return 0;
end = start + count;
- while (zfcpdump_save_areas[i]) {
+ for (i = 0; i < dump_save_areas.count; i++) {
unsigned long cp_start, cp_end; /* copy range */
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
+ struct save_area *save_area = dump_save_areas.areas[i];
- prefix = zfcpdump_save_areas[i]->pref_reg;
+ prefix = save_area->pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
if ((end < sa_start) || (start > sa_end))
- goto next;
+ continue;
cp_start = max(start, sa_start);
cp_end = min(end, sa_end);
@@ -272,10 +272,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
len = cp_end - cp_start;
TRACE("copy_lc for: %lx\n", start);
- if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+ if (copy_lc(buf + buf_off, save_area, sa_off, len))
return -EFAULT;
-next:
- i++;
}
return 0;
}
@@ -330,9 +328,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
mem_offs = 0;
/* Copy from HSA data */
- if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
- size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
- - mem_start));
+ if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) {
+ size = min((count - hdr_count),
+ (size_t) (sclp_get_hsa_size() - mem_start));
rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
if (rc)
goto fail;
@@ -492,7 +490,7 @@ static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
static char str[18];
if (hsa_available)
- snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE);
+ snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size());
else
snprintf(str, sizeof(str), "0\n");
return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
@@ -586,17 +584,9 @@ static int __init sys_info_init(enum arch_id arch, unsigned long mem_end)
static int __init check_sdias(void)
{
- int rc, act_hsa_size;
-
- rc = sclp_sdias_blk_count();
- if (rc < 0) {
+ if (!sclp_get_hsa_size()) {
TRACE("Could not determine HSA size\n");
- return rc;
- }
- act_hsa_size = (rc - 1) * PAGE_SIZE;
- if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
- TRACE("HSA size too small: %i\n", act_hsa_size);
- return -EINVAL;
+ return -ENODEV;
}
return 0;
}
@@ -637,8 +627,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
hdr->num_pages = mem_size / PAGE_SIZE;
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- prefix = zfcpdump_save_areas[i]->pref_reg;
+ for (i = 0; i < dump_save_areas.count; i++) {
+ prefix = dump_save_areas.areas[i]->pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
@@ -664,7 +654,7 @@ static int __init zcore_reipl_init(void)
ipl_block = (void *) __get_free_page(GFP_KERNEL);
if (!ipl_block)
return -ENOMEM;
- if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
+ if (ipib_info.ipib < sclp_get_hsa_size())
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index d028fd800c9..f055df0b167 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -194,15 +194,14 @@ EXPORT_SYMBOL(airq_iv_release);
*/
unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
if (!iv->avail)
return -1UL;
spin_lock(&iv->lock);
- bit = find_first_bit_left(iv->avail, iv->bits);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
if (bit < iv->bits) {
- clear_bit(bit ^ be_to_le, iv->avail);
+ clear_bit_inv(bit, iv->avail);
if (bit >= iv->end)
iv->end = bit + 1;
} else
@@ -220,19 +219,17 @@ EXPORT_SYMBOL(airq_iv_alloc_bit);
*/
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
-
if (!iv->avail)
return;
spin_lock(&iv->lock);
/* Clear (possibly left over) interrupt bit */
- clear_bit(bit ^ be_to_le, iv->vector);
+ clear_bit_inv(bit, iv->vector);
/* Make the bit position available again */
- set_bit(bit ^ be_to_le, iv->avail);
+ set_bit_inv(bit, iv->avail);
if (bit == iv->end - 1) {
/* Find new end of bit-field */
while (--iv->end > 0)
- if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+ if (!test_bit_inv(iv->end - 1, iv->avail))
break;
}
spin_unlock(&iv->lock);
@@ -251,15 +248,13 @@ EXPORT_SYMBOL(airq_iv_free_bit);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
/* Find non-zero bit starting from 'ivs->next'. */
- bit = find_next_bit_left(iv->vector, end, start);
+ bit = find_next_bit_inv(iv->vector, end, start);
if (bit >= end)
return -1UL;
- /* Clear interrupt bit (find left uses big-endian bit numbers) */
- clear_bit(bit ^ be_to_le, iv->vector);
+ clear_bit_inv(bit, iv->vector);
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index d9eddcba7e8..3a2ee4a740b 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel_stat.h>
+#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/device.h>
@@ -42,7 +43,7 @@ static debug_info_t *eadm_debug;
static void EADM_LOG_HEX(int level, void *data, int length)
{
- if (level > eadm_debug->level)
+ if (!debug_level_enabled(eadm_debug, level))
return;
while (length > 0) {
debug_event(eadm_debug, level, data, length);
@@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch)
}
scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
}
static struct subchannel *eadm_get_idle_sch(void)
@@ -186,7 +190,7 @@ static struct subchannel *eadm_get_idle_sch(void)
return NULL;
}
-static int eadm_start_aob(struct aob *aob)
+int eadm_start_aob(struct aob *aob)
{
struct eadm_private *private;
struct subchannel *sch;
@@ -214,6 +218,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(eadm_start_aob);
static int eadm_subchannel_probe(struct subchannel *sch)
{
@@ -255,13 +260,32 @@ out:
static void eadm_quiesce(struct subchannel *sch)
{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
int ret;
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
do {
- spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
- spin_unlock_irq(sch->lock);
} while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
}
static int eadm_subchannel_remove(struct subchannel *sch)
@@ -357,11 +381,6 @@ static struct css_driver eadm_subchannel_driver = {
.restore = eadm_subchannel_restore,
};
-static struct eadm_ops eadm_ops = {
- .eadm_start = eadm_start_aob,
- .owner = THIS_MODULE,
-};
-
static int __init eadm_sch_init(void)
{
int ret;
@@ -381,7 +400,6 @@ static int __init eadm_sch_init(void)
if (ret)
goto cleanup;
- register_eadm_ops(&eadm_ops);
return ret;
cleanup:
@@ -392,7 +410,6 @@ cleanup:
static void __exit eadm_sch_exit(void)
{
- unregister_eadm_ops(&eadm_ops);
css_driver_unregister(&eadm_subchannel_driver);
isc_unregister(EADM_SCH_ISC);
debug_unregister(eadm_debug);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
index 2779be09398..9664e4653f9 100644
--- a/drivers/s390/cio/eadm_sch.h
+++ b/drivers/s390/cio/eadm_sch.h
@@ -1,6 +1,7 @@
#ifndef EADM_SCH_H
#define EADM_SCH_H
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/timer.h>
#include <linux/list.h>
@@ -9,9 +10,10 @@
struct eadm_private {
union orb orb;
enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
struct timer_list timer;
struct list_head head;
- struct subchannel *sch;
} __aligned(8);
#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 647b422bb22..dfac9bfefea 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -16,12 +16,6 @@
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_error;
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
#define DBF_DEV_EVENT(level, device, text...) \
do { \
char debug_buffer[QDIO_DBF_LEN]; \
- if (qdio_dbf_passes(device->debug_area, level)) { \
+ if (debug_level_enabled(device->debug_area, level)) { \
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
debug_text_event(device->debug_area, level, debug_buffer); \
} \
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index bbd3e511c77..3e602e8affa 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -528,7 +528,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- if (atomic_sub(count, &q->nr_buf_used) == 0)
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 46ec25632e8..15268edc54a 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -15,8 +15,6 @@
#include "chsc.h"
static struct device *scm_root;
-static struct eadm_ops *eadm_ops;
-static DEFINE_MUTEX(eadm_ops_mutex);
#define to_scm_dev(n) container_of(n, struct scm_device, dev)
#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
@@ -73,49 +71,6 @@ void scm_driver_unregister(struct scm_driver *scmdrv)
}
EXPORT_SYMBOL_GPL(scm_driver_unregister);
-int scm_get_ref(void)
-{
- int ret = 0;
-
- mutex_lock(&eadm_ops_mutex);
- if (!eadm_ops || !try_module_get(eadm_ops->owner))
- ret = -ENOENT;
- mutex_unlock(&eadm_ops_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(scm_get_ref);
-
-void scm_put_ref(void)
-{
- mutex_lock(&eadm_ops_mutex);
- module_put(eadm_ops->owner);
- mutex_unlock(&eadm_ops_mutex);
-}
-EXPORT_SYMBOL_GPL(scm_put_ref);
-
-void register_eadm_ops(struct eadm_ops *ops)
-{
- mutex_lock(&eadm_ops_mutex);
- eadm_ops = ops;
- mutex_unlock(&eadm_ops_mutex);
-}
-EXPORT_SYMBOL_GPL(register_eadm_ops);
-
-void unregister_eadm_ops(struct eadm_ops *ops)
-{
- mutex_lock(&eadm_ops_mutex);
- eadm_ops = NULL;
- mutex_unlock(&eadm_ops_mutex);
-}
-EXPORT_SYMBOL_GPL(unregister_eadm_ops);
-
-int scm_start_aob(struct aob *aob)
-{
- return eadm_ops->eadm_start(aob);
-}
-EXPORT_SYMBOL_GPL(scm_start_aob);
-
void scm_irq_handler(struct aob *aob, int error)
{
struct aob_rq_header *aobrq = (void *) aob->request.data;
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 841ea72e4a4..28d9349de1a 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -11,12 +11,6 @@
/* that gives us 15 characters in the text event views */
#define ZCRYPT_DBF_LEN 16
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -25,7 +19,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_COMMON(level, text...) \
do { \
- if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
+ if (debug_level_enabled(zcrypt_dbf_common, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_common, level, \
@@ -35,7 +29,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_DEVICES(level, text...) \
do { \
- if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
+ if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_devices, level, \
@@ -45,7 +39,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_DEV(level, device, text...) \
do { \
- if (zcrypt_dbf_passes(device->dbf_area, level)) { \
+ if (debug_level_enabled(device->dbf_area, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(device->dbf_area, level, \
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index af2166fa515..1abd0db2991 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -166,11 +166,15 @@ static void kvm_reset(struct virtio_device *vdev)
* make a hypercall. We hand the address of the virtqueue so the Host
* knows which virtqueue we're talking about.
*/
-static void kvm_notify(struct virtqueue *vq)
+static bool kvm_notify(struct virtqueue *vq)
{
+ long rc;
struct kvm_vqconfig *config = vq->priv;
- kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+ rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+ if (rc < 0)
+ return false;
+ return true;
}
/*
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 779dc513629..d6297176ab8 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -162,7 +162,7 @@ static inline long do_kvm_notify(struct subchannel_id schid,
return __rc;
}
-static void virtio_ccw_kvm_notify(struct virtqueue *vq)
+static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
{
struct virtio_ccw_vq_info *info = vq->priv;
struct virtio_ccw_device *vcdev;
@@ -171,6 +171,9 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq)
vcdev = to_vc_device(info->vq->vdev);
ccw_device_get_schid(vcdev->cdev, &schid);
info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
+ if (info->cookie < 0)
+ return false;
+ return true;
}
static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1bc5904df19..3339b9b607b 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -114,15 +114,9 @@ do { \
debug_event(claw_dbf_##name,level,(void*)(addr),len); \
} while (0)
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define CLAW_DBF_TEXT_(level,name,text...) \
do { \
- if (claw_dbf_passes(claw_dbf_##name, level)) { \
+ if (debug_level_enabled(claw_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(claw_dbf_##name, level, \
debug_buffer); \
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 6514e1cb3f1..8363f1c966e 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -66,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
char dbf_txt_buf[64];
va_list args;
- if (level > (ctcm_dbf[dbf_nix].id)->level)
+ if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 8c03392ac83..150fcb4cebc 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -16,15 +16,9 @@ do { \
debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
} while (0)
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define LCS_DBF_TEXT_(level,name,text...) \
do { \
- if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+ if (debug_level_enabled(lcs_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(lcs_dbf_##name, level, debug_buffer); \
} \
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 279ad504ec3..9b333fcf1a4 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -105,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
- if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+ if (debug_level_enabled(iucv_dbf_##name, level)) { \
char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
sprintf(__buf, text); \
debug_text_event(iucv_dbf_##name, level, __buf); \
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0a328d0d11b..eb4e1f809fe 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4451,7 +4451,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_snmp_ureq *ureq;
- int req_len;
+ unsigned int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
@@ -4467,6 +4467,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
/* skip 4 bytes (data_len struct member) to get req_len */
if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
return -EFAULT;
+ if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
+ sizeof(struct qeth_ipacmd_hdr) -
+ sizeof(struct qeth_ipacmd_setadpparms_hdr)))
+ return -EINVAL;
ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
if (IS_ERR(ureq)) {
QETH_CARD_TEXT(card, 2, "snmpnome");
@@ -5096,7 +5100,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
char dbf_txt_buf[32];
va_list args;
- if (level > id->level)
+ if (!debug_level_enabled(id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 132a905b6bd..0ca64484cfa 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -344,7 +344,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
/**
* zfcp_dbf_san_req - trace event for issued SAN request
- * @tag: indentifier for event
+ * @tag: identifier for event
* @fsf_req: request containing issued CT data
* d_id: destination ID
*/
@@ -361,7 +361,7 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
/**
* zfcp_dbf_san_res - trace event for received SAN request
- * @tag: indentifier for event
+ * @tag: identifier for event
* @fsf_req: request containing issued CT data
*/
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
@@ -377,7 +377,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
/**
* zfcp_dbf_san_in_els - trace event for incoming ELS
- * @tag: indentifier for event
+ * @tag: identifier for event
* @fsf_req: request containing issued CT data
*/
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3ac7a4b30dd..0be3d48681a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -278,7 +278,7 @@ struct zfcp_dbf {
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
- if (level <= req->adapter->dbf->hba->level)
+ if (debug_level_enabled(req->adapter->dbf->hba, level))
zfcp_dbf_hba_fsf_res(tag, req);
}
@@ -317,7 +317,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
struct zfcp_adapter *adapter = (struct zfcp_adapter *)
scmd->device->host->hostdata[0];
- if (level <= adapter->dbf->scsi->level)
+ if (debug_level_enabled(adapter->dbf->scsi, level))
zfcp_dbf_scsi(tag, scmd, req);
}
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 757eb0716d4..972f8176665 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -26,8 +26,8 @@
*/
-#define blogic_drvr_version "2.1.16"
-#define blogic_drvr_date "18 July 2002"
+#define blogic_drvr_version "2.1.17"
+#define blogic_drvr_date "12 September 2013"
#include <linux/module.h>
#include <linux/init.h>
@@ -311,12 +311,14 @@ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter)
caller.
*/
-static void blogic_dealloc_ccb(struct blogic_ccb *ccb)
+static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
{
struct blogic_adapter *adapter = ccb->adapter;
- scsi_dma_unmap(ccb->command);
- pci_unmap_single(adapter->pci_device, ccb->sensedata,
+ if (ccb->command != NULL)
+ scsi_dma_unmap(ccb->command);
+ if (dma_unmap)
+ pci_unmap_single(adapter->pci_device, ccb->sensedata,
ccb->sense_datalen, PCI_DMA_FROMDEVICE);
ccb->command = NULL;
@@ -2762,8 +2764,8 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Place CCB back on the Host Adapter's free list.
*/
- blogic_dealloc_ccb(ccb);
-#if 0 /* this needs to be redone different for new EH */
+ blogic_dealloc_ccb(ccb, 1);
+#if 0 /* this needs to be redone different for new EH */
/*
Bus Device Reset CCBs have the command field
non-NULL only when a Bus Device Reset was requested
@@ -2791,7 +2793,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (ccb->status == BLOGIC_CCB_RESET &&
ccb->tgt_id == tgt_id) {
command = ccb->command;
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
adapter->active_cmds[tgt_id]--;
command->result = DID_RESET << 16;
command->scsi_done(command);
@@ -2862,7 +2864,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Place CCB back on the Host Adapter's free list.
*/
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
/*
Call the SCSI Command Completion Routine.
*/
@@ -3034,6 +3036,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
int buflen = scsi_bufflen(command);
int count;
struct blogic_ccb *ccb;
+ dma_addr_t sense_buf;
/*
SCSI REQUEST_SENSE commands will be executed automatically by the
@@ -3179,10 +3182,17 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
}
memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
- ccb->sensedata = pci_map_single(adapter->pci_device,
+ ccb->command = command;
+ sense_buf = pci_map_single(adapter->pci_device,
command->sense_buffer, ccb->sense_datalen,
PCI_DMA_FROMDEVICE);
- ccb->command = command;
+ if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
+ blogic_err("DMA mapping for sense data buffer failed\n",
+ adapter);
+ blogic_dealloc_ccb(ccb, 0);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ ccb->sensedata = sense_buf;
command->scsi_done = comp_cb;
if (blogic_multimaster_type(adapter)) {
/*
@@ -3203,7 +3213,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
ccb)) {
blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
command->scsi_done(command);
}
@@ -3337,7 +3347,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
if (ccb->status == BLOGIC_CCB_ACTIVE)
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
/*
* Wait a few seconds between the Host Adapter Hard Reset which
* initiates a SCSI Bus Reset and issuing any SCSI Commands. Some
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index d85ac1a9d2c..fbcd48d0bfc 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
+ if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
+ (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
rcode = -EINVAL;
goto cleanup;
}
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index c67e401954c..d8145888e66 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2511,8 +2511,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %u, host_no %d, last_reset %d,\n",
- s->host_busy, s->host_no, (unsigned)s->last_reset);
+ printk(" host_busy %u, host_no %d,\n",
+ s->host_busy, s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n",
- shost->host_busy, shost->last_reset, shost->max_id,
+ " host_busy %u, max_id %u, max_lun %u, max_channel %u\n",
+ shost->host_busy, shost->max_id,
shost->max_lun, shost->max_channel);
seq_printf(m,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 33c52bc2c7b..97fd450aff0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1035,7 +1035,6 @@ static void arcmsr_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
scsi_host_put(host);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void arcmsr_shutdown(struct pci_dev *pdev)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 15a629d8ed0..a795d81ef87 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3144,8 +3144,6 @@ static void atp870u_remove (struct pci_dev *pdev)
atp870u_free_tables(pshost);
printk(KERN_INFO "scsi_host_put : %p\n",pshost);
scsi_host_put(pshost);
- printk(KERN_INFO "pci_set_drvdata : %p\n",pdev);
- pci_set_drvdata(pdev, NULL);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 777e7c0bbb4..2e28f6c419f 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -128,7 +128,7 @@ struct be_ctrl_info {
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-#define mcc_timeout 120000 /* 5s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index e66aa7c11a8..3338391b64d 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -17,9 +17,9 @@
#include <scsi/iscsi_proto.h>
+#include "be_main.h"
#include "be.h"
#include "be_mgmt.h"
-#include "be_main.h"
int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
{
@@ -158,8 +158,10 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
struct be_cmd_resp_hdr *ioctl_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- if (beiscsi_error(phba))
+ if (beiscsi_error(phba)) {
+ free_mcc_tag(&phba->ctrl, tag);
return -EIO;
+ }
/* wait for the mccq completion */
rc = wait_event_interruptible_timeout(
@@ -173,7 +175,11 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n");
- rc = -EAGAIN;
+ rc = -EBUSY;
+
+ /* decrement the mccq used count */
+ atomic_dec(&phba->ctrl.mcc_obj.q.used);
+
goto release_mcc_tag;
} else
rc = 0;
@@ -208,10 +214,18 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
- if (ioctl_resp_hdr->response_length)
- goto release_mcc_tag;
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : Insufficent Buffer Error "
+ "Resp_Len : %d Actual_Resp_Len : %d\n",
+ ioctl_resp_hdr->response_length,
+ ioctl_resp_hdr->actual_resp_len);
+
+ rc = -EAGAIN;
+ goto release_mcc_tag;
}
- rc = -EAGAIN;
+ rc = -EIO;
}
release_mcc_tag:
@@ -363,7 +377,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_UP;
+ phba->state = BE_ADAPTER_LINK_UP;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -486,33 +500,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
**/
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- uint32_t wait = 0;
+ unsigned long timeout;
+ bool read_flag = false;
+ int ret = 0, i;
u32 ready;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
- do {
+ if (beiscsi_error(phba))
+ return -EIO;
- if (beiscsi_error(phba))
- return -EIO;
+ timeout = jiffies + (HZ * 110);
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
- if (ready)
- break;
+ do {
+ for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
+ ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ if (ready) {
+ read_flag = true;
+ break;
+ }
+ mdelay(1);
+ }
- if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
+ if (!read_flag) {
+ wait_event_timeout(rdybit_check_q,
+ (read_flag != true),
+ HZ * 5);
+ }
+ } while ((time_before(jiffies, timeout)) && !read_flag);
+
+ if (!read_flag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
phba->fw_timeout = true;
beiscsi_ue_detect(phba);
- return -EBUSY;
- }
+ ret = -EBUSY;
+ }
- mdelay(1);
- wait++;
- } while (true);
- return 0;
+ return ret;
}
/*
@@ -699,7 +727,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb;
- BUG_ON(atomic_read(&mccq->used) >= mccq->len);
+ WARN_ON(atomic_read(&mccq->used) >= mccq->len);
wrb = queue_head_node(mccq);
memset(wrb, 0, sizeof(*wrb));
wrb->tag0 = (mccq->head & 0x000000FF) << 16;
@@ -1009,10 +1037,29 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
return status;
}
+/**
+ * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
+ * @ctrl: ptr to ctrl_info
+ * @cq: Completion Queue
+ * @dq: Default Queue
+ * @lenght: ring size
+ * @entry_size: size of each entry in DEFQ
+ * @is_header: Header or Data DEFQ
+ * @ulp_num: Bind to which ULP
+ *
+ * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
+ * on this queue by the FW
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ *
+ **/
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
- int entry_size)
+ int entry_size, uint8_t is_header,
+ uint8_t ulp_num)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_defq_create_req *req = embedded_payload(wrb);
@@ -1030,6 +1077,11 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ if (phba->fw_config.dual_ulp_aware) {
+ req->ulp_num = ulp_num;
+ req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+ req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+ }
if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_be_default_pdu_context,
@@ -1067,22 +1119,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl);
if (!status) {
+ struct be_ring *defq_ring;
struct be_defq_create_resp *resp = embedded_payload(wrb);
dq->id = le16_to_cpu(resp->id);
dq->created = true;
+ if (is_header)
+ defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
+ else
+ defq_ring = &phba->phwi_ctrlr->
+ default_pdu_data[ulp_num];
+
+ defq_ring->id = dq->id;
+
+ if (!phba->fw_config.dual_ulp_aware) {
+ defq_ring->ulp_num = BEISCSI_ULP0;
+ defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
+ } else {
+ defq_ring->ulp_num = resp->ulp_num;
+ defq_ring->doorbell_offset = resp->doorbell_offset;
+ }
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
-int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
- struct be_queue_info *wrbq)
+/**
+ * be_cmd_wrbq_create()- Create WRBQ
+ * @ctrl: ptr to ctrl_info
+ * @q_mem: memory details for the queue
+ * @wrbq: queue info
+ * @pwrb_context: ptr to wrb_context
+ * @ulp_num: ULP on which the WRBQ is to be created
+ *
+ * Create WRBQ on the passed ULP_NUM.
+ *
+ **/
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem,
+ struct be_queue_info *wrbq,
+ struct hwi_wrb_context *pwrb_context,
+ uint8_t ulp_num)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_wrbq_create_req *req = embedded_payload(wrb);
struct be_wrbq_create_resp *resp = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
spin_lock(&ctrl->mbox_lock);
@@ -1093,17 +1176,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ if (phba->fw_config.dual_ulp_aware) {
+ req->ulp_num = ulp_num;
+ req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+ req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+ }
+
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
if (!status) {
wrbq->id = le16_to_cpu(resp->cid);
wrbq->created = true;
+
+ pwrb_context->cid = wrbq->id;
+ if (!phba->fw_config.dual_ulp_aware) {
+ pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
+ pwrb_context->ulp_num = BEISCSI_ULP0;
+ } else {
+ pwrb_context->ulp_num = resp->ulp_num;
+ pwrb_context->doorbell_offset = resp->doorbell_offset;
+ }
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_template_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
+ sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_remove_template_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
+ sizeof(*req));
+
+ req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+
+ status = be_mbox_notify(ctrl);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem,
u32 page_offset, u32 num_pages)
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 99073086dfe..627ebbe0172 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -40,6 +40,7 @@ struct be_mcc_wrb {
u32 tag1; /* dword 3 */
u32 rsvd; /* dword 4 */
union {
+#define EMBED_MBX_MAX_PAYLOAD_SIZE 220
u8 embedded_payload[236]; /* used by embedded cmds */
struct be_sge sgl[19]; /* used by non-embedded cmds */
} payload;
@@ -162,6 +163,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
@@ -217,6 +220,10 @@ struct phys_addr {
u32 hi;
};
+struct virt_addr {
+ u32 lo;
+ u32 hi;
+};
/**************************
* BE Command definitions *
**************************/
@@ -722,7 +729,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl);
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
- int entry_size);
+ int entry_size, uint8_t is_header,
+ uint8_t ulp_num);
+
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem);
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, u32 page_offset,
@@ -731,7 +744,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
- struct be_queue_info *wrbq);
+ struct be_queue_info *wrbq,
+ struct hwi_wrb_context *pwrb_context,
+ uint8_t ulp_num);
bool is_link_state_evt(u32 trailer);
@@ -776,7 +791,9 @@ struct be_defq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u8 ulp_num;
- u8 rsvd0;
+#define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */
+#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */
+ u8 dua_feature;
struct be_default_pdu_context context;
struct phys_addr pages[8];
} __packed;
@@ -784,6 +801,27 @@ struct be_defq_create_req {
struct be_defq_create_resp {
struct be_cmd_req_hdr hdr;
u16 id;
+ u8 rsvd0;
+ u8 ulp_num;
+ u32 doorbell_offset;
+ u16 register_set;
+ u16 doorbell_format;
+} __packed;
+
+struct be_post_template_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1
+ u16 type;
+ struct phys_addr scratch_pa;
+ struct virt_addr scratch_va;
+ struct virt_addr pages_va;
+ struct phys_addr pages[16];
+} __packed;
+
+struct be_remove_template_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 type;
u16 rsvd0;
} __packed;
@@ -800,14 +838,18 @@ struct be_wrbq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u8 ulp_num;
- u8 rsvd0;
+ u8 dua_feature;
struct phys_addr pages[8];
} __packed;
struct be_wrbq_create_resp {
struct be_cmd_resp_hdr resp_hdr;
u16 cid;
- u16 rsvd0;
+ u8 rsvd0;
+ u8 ulp_num;
+ u32 doorbell_offset;
+ u16 register_set;
+ u16 doorbell_format;
} __packed;
#define SOL_CID_MASK 0x0000FFC0
@@ -1002,6 +1044,7 @@ union tcp_upload_params {
} __packed;
struct be_ulp_fw_cfg {
+#define BEISCSI_ULP_ISCSI_INI_MODE 0x10
u32 ulp_mode;
u32 etx_base;
u32 etx_count;
@@ -1017,14 +1060,26 @@ struct be_ulp_fw_cfg {
u32 icd_count;
};
+struct be_ulp_chain_icd {
+ u32 chain_base;
+ u32 chain_count;
+};
+
struct be_fw_cfg {
struct be_cmd_req_hdr hdr;
u32 be_config_number;
u32 asic_revision;
u32 phys_port;
+#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10
+#define BEISCSI_FUNC_DUA_MODE 0x800
u32 function_mode;
struct be_ulp_fw_cfg ulp[2];
u32 function_caps;
+ u32 cqid_base;
+ u32 cqid_count;
+ u32 eqid_base;
+ u32 eqid_count;
+ struct be_ulp_chain_icd chain_icd[2];
} __packed;
struct be_cmd_get_all_if_id_req {
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ef36be003f6..ffadbee0b4d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
}
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
- shost = phba->shost;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_session_create\n");
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : PCI_ERROR Recovery\n");
+ return NULL;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_session_create\n");
+ }
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
}
+ shost = phba->shost;
cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
shost, cmds_max,
sizeof(*beiscsi_sess),
@@ -194,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
@@ -214,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EEXIST;
}
+ pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
+ beiscsi_ep->ep_cid)];
+
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
+ beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
@@ -265,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
{
- struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_get_if_info_resp *if_info;
- if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
+ if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
beiscsi_create_ipv4_iface(phba);
+ kfree(if_info);
+ }
- if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
+ if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
beiscsi_create_ipv6_iface(phba);
+ kfree(if_info);
+ }
}
void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
@@ -467,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
uint32_t rm_len = dt_len;
int ret = 0 ;
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ }
+
nla_for_each_attr(attrib, data, dt_len, rm_len) {
iface_param = nla_data(attrib);
@@ -512,59 +534,60 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
struct iscsi_iface *iface, int param,
char *buf)
{
- struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_get_if_info_resp *if_info;
int len, ip_type = BE2_IPV4;
- memset(&if_info, 0, sizeof(if_info));
-
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
ip_type = BE2_IPV6;
len = mgmt_get_if_info(phba, ip_type, &if_info);
- if (len)
+ if (len) {
+ kfree(if_info);
return len;
+ }
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
- len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
+ len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);
break;
case ISCSI_NET_PARAM_IPV6_ADDR:
- len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
+ len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
- if (!if_info.dhcp_state)
+ if (!if_info->dhcp_state)
len = sprintf(buf, "static\n");
else
len = sprintf(buf, "dhcp\n");
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
- len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
+ len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
- (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
? "Disabled\n" : "Enabled\n");
break;
case ISCSI_NET_PARAM_VLAN_ID:
- if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL;
else
len = sprintf(buf, "%d\n",
- (if_info.vlan_priority &
+ (if_info->vlan_priority &
ISCSI_MAX_VLAN_ID));
break;
case ISCSI_NET_PARAM_VLAN_PRIORITY:
- if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL;
else
len = sprintf(buf, "%d\n",
- ((if_info.vlan_priority >> 13) &
+ ((if_info->vlan_priority >> 13) &
ISCSI_MAX_VLAN_PRIORITY));
break;
default:
WARN_ON(1);
}
+ kfree(if_info);
return len;
}
@@ -577,6 +600,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
struct be_cmd_get_def_gateway_resp gateway;
int len = -ENOSYS;
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ }
+
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
case ISCSI_NET_PARAM_IPV4_SUBNET:
@@ -672,8 +701,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
session->max_burst = 262144;
break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
- if ((conn->max_xmit_dlength > 65536) ||
- (conn->max_xmit_dlength == 0))
+ if (conn->max_xmit_dlength > 65536)
conn->max_xmit_dlength = 65536;
default:
return 0;
@@ -727,7 +755,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
+ ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
}
@@ -795,9 +823,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_get_host_param,"
- " param= %d\n", param);
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_get_host_param,"
+ " param = %d\n", param);
+ }
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
@@ -840,7 +875,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
struct be_cmd_get_nic_conf_resp resp;
int rc;
- if (strlen(phba->mac_address))
+ if (phba->mac_addr_set)
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
memset(&resp, 0, sizeof(resp));
@@ -848,6 +883,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
if (rc)
return rc;
+ phba->mac_addr_set = true;
memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
}
@@ -923,6 +959,10 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
session->max_r2t);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1));
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ max_recv_data_segment_length, params,
+ conn->max_recv_dlength);
+
}
/**
@@ -935,10 +975,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
+ struct beiscsi_hba *phba;
- beiscsi_log(beiscsi_conn->phba, KERN_INFO,
- BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_conn_start\n");
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ } else {
+ beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start\n");
+ }
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
@@ -960,15 +1009,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
*/
static int beiscsi_get_cid(struct beiscsi_hba *phba)
{
- unsigned short cid = 0xFFFF;
-
- if (!phba->avlbl_cids)
- return cid;
-
- cid = phba->cid_array[phba->cid_alloc++];
- if (phba->cid_alloc == phba->params.cxns_per_ctrl)
- phba->cid_alloc = 0;
- phba->avlbl_cids--;
+ unsigned short cid = 0xFFFF, cid_from_ulp;
+ struct ulp_cid_info *cid_info = NULL;
+ uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+
+ /* Find the ULP which has more CID available */
+ cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
+ BEISCSI_ULP0_AVLBL_CID(phba) : 0;
+ cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ?
+ BEISCSI_ULP1_AVLBL_CID(phba) : 0;
+ cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
+ BEISCSI_ULP0 : BEISCSI_ULP1;
+
+ if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
+ cid_info = phba->cid_array_info[cid_from_ulp];
+ if (!cid_info->avlbl_cids)
+ return cid;
+
+ cid = cid_info->cid_array[cid_info->cid_alloc++];
+
+ if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
+ phba, cid_from_ulp))
+ cid_info->cid_alloc = 0;
+
+ cid_info->avlbl_cids--;
+ }
return cid;
}
@@ -979,10 +1044,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
*/
static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
{
- phba->avlbl_cids++;
- phba->cid_array[phba->cid_free++] = cid;
- if (phba->cid_free == phba->params.cxns_per_ctrl)
- phba->cid_free = 0;
+ uint16_t cid_post_ulp;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+ struct ulp_cid_info *cid_info = NULL;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+ cid_post_ulp = pwrb_context->ulp_num;
+
+ cid_info = phba->cid_array_info[cid_post_ulp];
+ cid_info->avlbl_cids++;
+
+ cid_info->cid_array[cid_info->cid_free++] = cid;
+ if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
+ cid_info->cid_free = 0;
}
/**
@@ -1135,7 +1212,12 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
- if (phba->state != BE_ADAPTER_UP) {
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ ret = -EBUSY;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return ERR_PTR(ret);
+ } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
ret = -EBUSY;
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BS_%d : The Adapter Port state is Down!!!\n");
@@ -1260,6 +1342,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
}
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : PCI_ERROR Recovery\n");
+ goto free_ep;
+ }
+
tag = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid,
mgmt_invalidate_flag,
@@ -1272,6 +1360,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
+free_ep:
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index a1f5ac7a980..1f375051483 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -149,18 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
"\t\t\t\tMiscellaneous Events : 0x04\n"
"\t\t\t\tError Handling : 0x08\n"
"\t\t\t\tIO Path Events : 0x10\n"
- "\t\t\t\tConfiguration Path : 0x20\n");
+ "\t\t\t\tConfiguration Path : 0x20\n"
+ "\t\t\t\tiSCSI Protocol : 0x40\n");
DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
-DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
+DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
+DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
+ beiscsi_active_session_disp, NULL);
+DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
+ beiscsi_free_session_disp, NULL);
struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
&dev_attr_beiscsi_drvr_ver,
&dev_attr_beiscsi_adapter_family,
&dev_attr_beiscsi_fw_ver,
- &dev_attr_beiscsi_active_cid_count,
+ &dev_attr_beiscsi_active_session_count,
+ &dev_attr_beiscsi_free_session_count,
+ &dev_attr_beiscsi_phys_port,
NULL,
};
@@ -239,6 +246,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
spin_unlock_bh(&session->lock);
+ /* Invalidate WRB Posted for this Task */
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ aborted_io_task->pwrb_handle->pwrb,
+ 1);
+
conn = aborted_task->conn;
beiscsi_conn = conn->dd_data;
phba = beiscsi_conn->phba;
@@ -316,6 +328,11 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
continue;
+ /* Invalidate WRB Posted for this Task */
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ abrt_io_task->pwrb_handle->pwrb,
+ 1);
+
inv_tbl->cid = cid;
inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
num_invalidate++;
@@ -699,30 +716,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
return status;
}
+/**
+ * beiscsi_get_params()- Set the config paramters
+ * @phba: ptr device priv structure
+ **/
static void beiscsi_get_params(struct beiscsi_hba *phba)
{
- phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
- - (phba->fw_config.iscsi_cid_count
- + BE2_TMFS
- + BE2_NOPOUT_REQ));
- phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
- phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
- phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
+ uint32_t total_cid_count = 0;
+ uint32_t total_icd_count = 0;
+ uint8_t ulp_num = 0;
+
+ total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+ BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ uint32_t align_mask = 0;
+ uint32_t icd_post_per_page = 0;
+ uint32_t icd_count_unavailable = 0;
+ uint32_t icd_start = 0, icd_count = 0;
+ uint32_t icd_start_align = 0, icd_count_align = 0;
+
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+ icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+
+ /* Get ICD count that can be posted on each page */
+ icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
+ sizeof(struct iscsi_sge)));
+ align_mask = (icd_post_per_page - 1);
+
+ /* Check if icd_start is aligned ICD per page posting */
+ if (icd_start % icd_post_per_page) {
+ icd_start_align = ((icd_start +
+ icd_post_per_page) &
+ ~(align_mask));
+ phba->fw_config.
+ iscsi_icd_start[ulp_num] =
+ icd_start_align;
+ }
+
+ icd_count_align = (icd_count & ~align_mask);
+
+ /* ICD discarded in the process of alignment */
+ if (icd_start_align)
+ icd_count_unavailable = ((icd_start_align -
+ icd_start) +
+ (icd_count -
+ icd_count_align));
+
+ /* Updated ICD count available */
+ phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
+ icd_count_unavailable);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Aligned ICD values\n"
+ "\t ICD Start : %d\n"
+ "\t ICD Count : %d\n"
+ "\t ICD Discarded : %d\n",
+ phba->fw_config.
+ iscsi_icd_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ icd_count_unavailable);
+ break;
+ }
+ }
+
+ total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+ phba->params.ios_per_ctrl = (total_icd_count -
+ (total_cid_count +
+ BE2_TMFS + BE2_NOPOUT_REQ));
+ phba->params.cxns_per_ctrl = total_cid_count;
+ phba->params.asyncpdus_per_ctrl = total_cid_count;
+ phba->params.icds_per_ctrl = total_icd_count;
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
phba->params.eq_timer = 64;
- phba->params.num_eq_entries =
- (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
- + BE2_TMFS) / 512) + 1) * 512;
- phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
- ? 1024 : phba->params.num_eq_entries;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : phba->params.num_eq_entries=%d\n",
- phba->params.num_eq_entries);
- phba->params.num_cq_entries =
- (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
- + BE2_TMFS) / 512) + 1) * 512;
+ phba->params.num_eq_entries = 1024;
+ phba->params.num_cq_entries = 1024;
phba->params.wrbs_per_cxn = 256;
}
@@ -1613,8 +1685,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
WARN_ON(!pasync_handle);
- pasync_handle->cri =
- BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+ pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
pasync_handle->is_header = is_header;
pasync_handle->buffer_len = dpl;
*pcq_index = index;
@@ -1674,18 +1746,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba,
}
static void hwi_free_async_msg(struct beiscsi_hba *phba,
- unsigned int cri)
+ struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int cri)
{
- struct hwi_controller *phwi_ctrlr;
- struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle, *tmp_handle;
struct list_head *plist;
- phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
-
plist = &pasync_ctx->async_entry[cri].wait_queue.list;
-
list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
list_del(&pasync_handle->link);
@@ -1720,7 +1787,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
}
static void hwi_post_async_buffers(struct beiscsi_hba *phba,
- unsigned int is_header)
+ unsigned int is_header, uint8_t ulp_num)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_async_pdu_context *pasync_ctx;
@@ -1728,13 +1795,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
struct list_head *pfree_link, *pbusy_list;
struct phys_addr *pasync_sge;
unsigned int ring_id, num_entries;
- unsigned int host_write_num;
+ unsigned int host_write_num, doorbell_offset;
unsigned int writables;
unsigned int i = 0;
u32 doorbell = 0;
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
num_entries = pasync_ctx->num_entries;
if (is_header) {
@@ -1742,13 +1809,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
pasync_ctx->async_header.free_entries);
pfree_link = pasync_ctx->async_header.free_list.next;
host_write_num = pasync_ctx->async_header.host_write_ptr;
- ring_id = phwi_ctrlr->default_pdu_hdr.id;
+ ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
+ doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
+ doorbell_offset;
} else {
writables = min(pasync_ctx->async_data.writables,
pasync_ctx->async_data.free_entries);
pfree_link = pasync_ctx->async_data.free_list.next;
host_write_num = pasync_ctx->async_data.host_write_ptr;
- ring_id = phwi_ctrlr->default_pdu_data.id;
+ ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
+ doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
+ doorbell_offset;
}
writables = (writables / 8) * 8;
@@ -1796,7 +1867,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
<< DB_DEF_PDU_CQPROC_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va + doorbell_offset);
}
}
@@ -1808,9 +1879,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle = NULL;
unsigned int cq_index = -1;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
pdpdu_cqe, &cq_index);
@@ -1819,8 +1894,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
hwi_update_async_writables(phba, pasync_ctx,
pasync_handle->is_header, cq_index);
- hwi_free_async_msg(phba, pasync_handle->cri);
- hwi_post_async_buffers(phba, pasync_handle->is_header);
+ hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
+ hwi_post_async_buffers(phba, pasync_handle->is_header,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
}
static unsigned int
@@ -1859,7 +1936,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
phdr, hdr_len, pfirst_buffer,
offset);
- hwi_free_async_msg(phba, cri);
+ hwi_free_async_msg(phba, pasync_ctx, cri);
return 0;
}
@@ -1875,13 +1952,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
struct pdu_base *ppdu;
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ BE_GET_CRI_FROM_CID(beiscsi_conn->
+ beiscsi_conn_cid)));
list_del(&pasync_handle->link);
if (pasync_handle->is_header) {
pasync_ctx->async_header.busy_entries--;
if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
- hwi_free_async_msg(phba, cri);
+ hwi_free_async_msg(phba, pasync_ctx, cri);
BUG();
}
@@ -1936,9 +2016,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle = NULL;
unsigned int cq_index = -1;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
+
pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
pdpdu_cqe, &cq_index);
@@ -1947,7 +2032,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
pasync_handle->is_header, cq_index);
hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
- hwi_post_async_buffers(phba, pasync_handle->is_header);
+ hwi_post_async_buffers(phba, pasync_handle->is_header,
+ BEISCSI_GET_ULP_FROM_CRI(
+ phwi_ctrlr, cri_index));
}
static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
@@ -2072,8 +2159,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
break;
case UNSOL_DATA_NOTIFY:
beiscsi_log(phba, KERN_INFO,
@@ -2081,8 +2170,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
break;
case CXN_INVALIDATE_INDEX_NOTIFY:
case CMD_INVALIDATED_NOTIFY:
@@ -2110,8 +2201,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
(struct i_t_dpdu_cqe *) sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
break;
case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
case CXN_KILLED_BURST_LEN_MISMATCH:
@@ -2476,26 +2569,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
}
+/**
+ * beiscsi_find_mem_req()- Find mem needed
+ * @phba: ptr to HBA struct
+ **/
static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
{
+ uint8_t mem_descr_index, ulp_num;
unsigned int num_cq_pages, num_async_pdu_buf_pages;
unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
sizeof(struct sol_cqe));
- num_async_pdu_buf_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- phba->params.defpdu_hdr_sz);
- num_async_pdu_buf_sgl_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- sizeof(struct phys_addr));
- num_async_pdu_data_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- phba->params.defpdu_data_sz);
- num_async_pdu_data_sgl_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- sizeof(struct phys_addr));
phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
@@ -2517,24 +2603,79 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
phba->params.icds_per_ctrl;
phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
-
- phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
- num_async_pdu_buf_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
- num_async_pdu_data_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
- num_async_pdu_buf_sgl_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
- num_async_pdu_data_sgl_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
- phba->params.asyncpdus_per_ctrl *
- sizeof(struct async_pdu_handle);
- phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
- phba->params.asyncpdus_per_ctrl *
- sizeof(struct async_pdu_handle);
- phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
- sizeof(struct hwi_async_pdu_context) +
- (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ num_async_pdu_buf_sgl_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ sizeof(struct phys_addr));
+
+ num_async_pdu_buf_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ phba->params.defpdu_hdr_sz);
+
+ num_async_pdu_data_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ phba->params.defpdu_data_sz);
+
+ num_async_pdu_data_sgl_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ sizeof(struct phys_addr));
+
+ mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_buf_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_data_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_buf_sgl_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_data_sgl_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct async_pdu_handle);
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct async_pdu_handle);
+
+ mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ sizeof(struct hwi_async_pdu_context) +
+ (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct hwi_async_entry));
+ }
+ }
}
static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
@@ -2576,6 +2717,12 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
mem_descr = phba->init_mem;
for (i = 0; i < SE_MEM_MAX; i++) {
+ if (!phba->mem_req[i]) {
+ mem_descr->mem_array = NULL;
+ mem_descr++;
+ continue;
+ }
+
j = 0;
mem_arr = mem_arr_orig;
alloc_size = phba->mem_req[i];
@@ -2697,7 +2844,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
/* Allocate memory for WRBQ */
phwi_ctxt = phwi_ctrlr->phwi_ctxt;
phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
- phba->fw_config.iscsi_cid_count,
+ phba->params.cxns_per_ctrl,
GFP_KERNEL);
if (!phwi_ctxt->be_wrbq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -2779,6 +2926,7 @@ init_wrb_hndl_failed:
static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
{
+ uint8_t ulp_num;
struct hwi_controller *phwi_ctrlr;
struct hba_parameters *p = &phba->params;
struct hwi_async_pdu_context *pasync_ctx;
@@ -2786,155 +2934,150 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
unsigned int index, idx, num_per_mem, num_async_data;
struct be_mem_descriptor *mem_descr;
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
-
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
+ (struct hwi_async_pdu_context *)
+ mem_descr->mem_array[0].virtual_address;
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ memset(pasync_ctx, 0, sizeof(*pasync_ctx));
+
+ pasync_ctx->async_entry =
+ (struct hwi_async_entry *)
+ ((long unsigned int)pasync_ctx +
+ sizeof(struct hwi_async_pdu_context));
+
+ pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
+ ulp_num);
+ pasync_ctx->buffer_size = p->defpdu_hdr_sz;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.va_base =
mem_descr->mem_array[0].virtual_address;
- pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
- memset(pasync_ctx, 0, sizeof(*pasync_ctx));
- pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
- phba->fw_config.iscsi_cid_count,
- GFP_KERNEL);
- if (!pasync_ctx->async_entry) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
- return -ENOMEM;
- }
-
- pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
- pasync_ctx->buffer_size = p->defpdu_hdr_sz;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_header.va_base =
- mem_descr->mem_array[0].virtual_address;
-
- pasync_ctx->async_header.pa_base.u.a64.address =
- mem_descr->mem_array[0].bus_address.u.a64.address;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_RING;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_header.ring_base =
- mem_descr->mem_array[0].virtual_address;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_header.handle_base =
- mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_header.writables = 0;
- INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
-
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_RING;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_DATA_RING va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_data.ring_base =
- mem_descr->mem_array[0].virtual_address;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
- if (!mem_descr->mem_array[0].virtual_address)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
+ pasync_ctx->async_header.pa_base.u.a64.address =
+ mem_descr->mem_array[0].
+ bus_address.u.a64.address;
- pasync_ctx->async_data.handle_base =
- mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_data.writables = 0;
- INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.ring_base =
+ mem_descr->mem_array[0].virtual_address;
- pasync_header_h =
- (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
- pasync_data_h =
- (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_header.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_data.ring_base =
+ mem_descr->mem_array[0].virtual_address;
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_BUF;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (!mem_descr->mem_array[0].virtual_address)
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
- idx = 0;
- pasync_ctx->async_data.va_base =
- mem_descr->mem_array[idx].virtual_address;
- pasync_ctx->async_data.pa_base.u.a64.address =
- mem_descr->mem_array[idx].bus_address.u.a64.address;
-
- num_async_data = ((mem_descr->mem_array[idx].size) /
- phba->params.defpdu_data_sz);
- num_per_mem = 0;
-
- for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
- pasync_header_h->cri = -1;
- pasync_header_h->index = (char)index;
- INIT_LIST_HEAD(&pasync_header_h->link);
- pasync_header_h->pbuffer =
- (void *)((unsigned long)
- (pasync_ctx->async_header.va_base) +
- (p->defpdu_hdr_sz * index));
-
- pasync_header_h->pa.u.a64.address =
- pasync_ctx->async_header.pa_base.u.a64.address +
- (p->defpdu_hdr_sz * index);
-
- list_add_tail(&pasync_header_h->link,
- &pasync_ctx->async_header.free_list);
- pasync_header_h++;
- pasync_ctx->async_header.free_entries++;
- pasync_ctx->async_header.writables++;
-
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
- header_busy_list);
- pasync_data_h->cri = -1;
- pasync_data_h->index = (char)index;
- INIT_LIST_HEAD(&pasync_data_h->link);
-
- if (!num_async_data) {
- num_per_mem = 0;
- idx++;
+ pasync_ctx->async_data.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_data.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+
+ pasync_header_h =
+ (struct async_pdu_handle *)
+ pasync_ctx->async_header.handle_base;
+ pasync_data_h =
+ (struct async_pdu_handle *)
+ pasync_ctx->async_data.handle_base;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ idx = 0;
pasync_ctx->async_data.va_base =
mem_descr->mem_array[idx].virtual_address;
pasync_ctx->async_data.pa_base.u.a64.address =
@@ -2943,32 +3086,83 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
num_async_data = ((mem_descr->mem_array[idx].size) /
phba->params.defpdu_data_sz);
- }
- pasync_data_h->pbuffer =
- (void *)((unsigned long)
- (pasync_ctx->async_data.va_base) +
- (p->defpdu_data_sz * num_per_mem));
-
- pasync_data_h->pa.u.a64.address =
- pasync_ctx->async_data.pa_base.u.a64.address +
- (p->defpdu_data_sz * num_per_mem);
- num_per_mem++;
- num_async_data--;
+ num_per_mem = 0;
- list_add_tail(&pasync_data_h->link,
- &pasync_ctx->async_data.free_list);
- pasync_data_h++;
- pasync_ctx->async_data.free_entries++;
- pasync_ctx->async_data.writables++;
+ for (index = 0; index < BEISCSI_GET_CID_COUNT
+ (phba, ulp_num); index++) {
+ pasync_header_h->cri = -1;
+ pasync_header_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_header_h->link);
+ pasync_header_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->
+ async_header.va_base) +
+ (p->defpdu_hdr_sz * index));
+
+ pasync_header_h->pa.u.a64.address =
+ pasync_ctx->async_header.pa_base.u.a64.
+ address + (p->defpdu_hdr_sz * index);
+
+ list_add_tail(&pasync_header_h->link,
+ &pasync_ctx->async_header.
+ free_list);
+ pasync_header_h++;
+ pasync_ctx->async_header.free_entries++;
+ pasync_ctx->async_header.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ wait_queue.list);
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ header_busy_list);
+ pasync_data_h->cri = -1;
+ pasync_data_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_data_h->link);
+
+ if (!num_async_data) {
+ num_per_mem = 0;
+ idx++;
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[idx].
+ virtual_address;
+ pasync_ctx->async_data.pa_base.u.
+ a64.address =
+ mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+ num_async_data =
+ ((mem_descr->mem_array[idx].
+ size) /
+ phba->params.defpdu_data_sz);
+ }
+ pasync_data_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->async_data.va_base) +
+ (p->defpdu_data_sz * num_per_mem));
+
+ pasync_data_h->pa.u.a64.address =
+ pasync_ctx->async_data.pa_base.u.a64.
+ address + (p->defpdu_data_sz *
+ num_per_mem);
+ num_per_mem++;
+ num_async_data--;
+
+ list_add_tail(&pasync_data_h->link,
+ &pasync_ctx->async_data.
+ free_list);
+ pasync_data_h++;
+ pasync_ctx->async_data.free_entries++;
+ pasync_ctx->async_data.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ data_busy_list);
+ }
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
+ pasync_ctx->async_header.host_write_ptr = 0;
+ pasync_ctx->async_header.ep_read_ptr = -1;
+ pasync_ctx->async_data.host_write_ptr = 0;
+ pasync_ctx->async_data.ep_read_ptr = -1;
+ }
}
- pasync_ctx->async_header.host_write_ptr = 0;
- pasync_ctx->async_header.ep_read_ptr = -1;
- pasync_ctx->async_data.host_write_ptr = 0;
- pasync_ctx->async_data.ep_read_ptr = -1;
-
return 0;
}
@@ -3164,7 +3358,7 @@ static int
beiscsi_create_def_hdr(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context,
struct hwi_controller *phwi_ctrlr,
- unsigned int def_pdu_ring_sz)
+ unsigned int def_pdu_ring_sz, uint8_t ulp_num)
{
unsigned int idx;
int ret;
@@ -3174,36 +3368,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
void *dq_vaddress;
idx = 0;
- dq = &phwi_context->be_def_hdrq;
+ dq = &phwi_context->be_def_hdrq[ulp_num];
cq = &phwi_context->be_cq[0];
mem = &dq->dma_mem;
mem_descr = phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_RING;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
dq_vaddress = mem_descr->mem_array[idx].virtual_address;
ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
sizeof(struct phys_addr),
sizeof(struct phys_addr), dq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
+ "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
+ ulp_num);
+
return ret;
}
mem->dma = (unsigned long)mem_descr->mem_array[idx].
bus_address.u.a64.address;
ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
def_pdu_ring_sz,
- phba->params.defpdu_hdr_sz);
+ phba->params.defpdu_hdr_sz,
+ BEISCSI_DEFQ_HDR, ulp_num);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+ "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
+ ulp_num);
+
return ret;
}
- phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : iscsi def pdu id is %d\n",
- phwi_context->be_def_hdrq.id);
- hwi_post_async_buffers(phba, 1);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
+ ulp_num,
+ phwi_context->be_def_hdrq[ulp_num].id);
+ hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
return 0;
}
@@ -3211,7 +3411,7 @@ static int
beiscsi_create_def_data(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context,
struct hwi_controller *phwi_ctrlr,
- unsigned int def_pdu_ring_sz)
+ unsigned int def_pdu_ring_sz, uint8_t ulp_num)
{
unsigned int idx;
int ret;
@@ -3221,43 +3421,86 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
void *dq_vaddress;
idx = 0;
- dataq = &phwi_context->be_def_dataq;
+ dataq = &phwi_context->be_def_dataq[ulp_num];
cq = &phwi_context->be_cq[0];
mem = &dataq->dma_mem;
mem_descr = phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_RING;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
dq_vaddress = mem_descr->mem_array[idx].virtual_address;
ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
sizeof(struct phys_addr),
sizeof(struct phys_addr), dq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
+ "BM_%d : be_fill_queue Failed for DEF PDU "
+ "DATA on ULP : %d\n",
+ ulp_num);
+
return ret;
}
mem->dma = (unsigned long)mem_descr->mem_array[idx].
bus_address.u.a64.address;
ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
def_pdu_ring_sz,
- phba->params.defpdu_data_sz);
+ phba->params.defpdu_data_sz,
+ BEISCSI_DEFQ_DATA, ulp_num);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d be_cmd_create_default_pdu_queue"
- " Failed for DEF PDU DATA\n");
+ " Failed for DEF PDU DATA on ULP : %d\n",
+ ulp_num);
return ret;
}
- phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : iscsi def data id is %d\n",
- phwi_context->be_def_dataq.id);
+ "BM_%d : iscsi def data id on ULP : %d is %d\n",
+ ulp_num,
+ phwi_context->be_def_dataq[ulp_num].id);
- hwi_post_async_buffers(phba, 0);
+ hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : DEFAULT PDU DATA RING CREATED\n");
+ "BM_%d : DEFAULT PDU DATA RING CREATED"
+ "on ULP : %d\n", ulp_num);
return 0;
}
+
+static int
+beiscsi_post_template_hdr(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ struct mem_array *pm_arr;
+ struct be_dma_mem sgl;
+ int status, ulp_num;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ pm_arr = mem_descr->mem_array;
+
+ hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+ status = be_cmd_iscsi_post_template_hdr(
+ &phba->ctrl, &sgl);
+
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Post Template HDR Failed for"
+ "ULP_%d\n", ulp_num);
+ return status;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Template HDR Pages Posted for"
+ "ULP_%d\n", ulp_num);
+ }
+ }
+ return 0;
+}
+
static int
beiscsi_post_pages(struct beiscsi_hba *phba)
{
@@ -3265,14 +3508,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
struct mem_array *pm_arr;
unsigned int page_offset, i;
struct be_dma_mem sgl;
- int status;
+ int status, ulp_num = 0;
mem_descr = phba->init_mem;
mem_descr += HWI_MEM_SGE;
pm_arr = mem_descr->mem_array;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+
page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
- phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
+ phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
for (i = 0; i < mem_descr->num_elements; i++) {
hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
@@ -3324,13 +3571,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
{
unsigned int wrb_mem_index, offset, size, num_wrb_rings;
u64 pa_addr_lo;
- unsigned int idx, num, i;
+ unsigned int idx, num, i, ulp_num;
struct mem_array *pwrb_arr;
void *wrb_vaddr;
struct be_dma_mem sgl;
struct be_mem_descriptor *mem_descr;
struct hwi_wrb_context *pwrb_context;
int status;
+ uint8_t ulp_count = 0, ulp_base_num = 0;
+ uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
idx = 0;
mem_descr = phba->init_mem;
@@ -3374,14 +3623,37 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
num_wrb_rings--;
}
}
+
+ /* Get the ULP Count */
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ ulp_count++;
+ ulp_base_num = ulp_num;
+ cid_count_ulp[ulp_num] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num);
+ }
+
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
wrb_mem_index = 0;
offset = 0;
size = 0;
+ if (ulp_count > 1) {
+ ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
+
+ if (!cid_count_ulp[ulp_base_num])
+ ulp_base_num = (ulp_base_num + 1) %
+ BEISCSI_ULP_COUNT;
+
+ cid_count_ulp[ulp_base_num]--;
+ }
+
+
hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
- &phwi_context->be_wrbq[i]);
+ &phwi_context->be_wrbq[i],
+ &phwi_ctrlr->wrb_context[i],
+ ulp_base_num);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : wrbq create failed.");
@@ -3389,7 +3661,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
return status;
}
pwrb_context = &phwi_ctrlr->wrb_context[i];
- pwrb_context->cid = phwi_context->be_wrbq[i].id;
BE_SET_CID_TO_CRI(i, pwrb_context->cid);
}
kfree(pwrb_arr);
@@ -3433,10 +3704,13 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct hwi_async_pdu_context *pasync_ctx;
- int i, eq_num;
+ int i, eq_num, ulp_num;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ be_cmd_iscsi_remove_template_hdr(ctrl);
+
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
q = &phwi_context->be_wrbq[i];
if (q->created)
@@ -3445,13 +3719,20 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
kfree(phwi_context->be_wrbq);
free_wrb_handles(phba);
- q = &phwi_context->be_def_hdrq;
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
- q = &phwi_context->be_def_dataq;
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+ q = &phwi_context->be_def_hdrq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ q = &phwi_context->be_def_dataq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ }
+ }
beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
@@ -3470,9 +3751,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
}
be_mcc_queues_destroy(phba);
-
- pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
- kfree(pasync_ctx->async_entry);
be_cmd_fw_uninit(ctrl);
}
@@ -3538,8 +3816,19 @@ static void find_num_cpus(struct beiscsi_hba *phba)
BEISCSI_MAX_NUM_CPUS : num_cpus;
break;
case BE_GEN4:
- phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
- OC_SKH_MAX_NUM_CPUS : num_cpus;
+ /*
+ * If eqid_count == 1 fall back to
+ * INTX mechanism
+ **/
+ if (phba->fw_config.eqid_count == 1) {
+ enable_msix = 0;
+ phba->num_cpus = 1;
+ return;
+ }
+
+ phba->num_cpus =
+ (num_cpus > (phba->fw_config.eqid_count - 1)) ?
+ (phba->fw_config.eqid_count - 1) : num_cpus;
break;
default:
phba->num_cpus = 1;
@@ -3552,10 +3841,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
struct hwi_context_memory *phwi_context;
unsigned int def_pdu_ring_sz;
struct be_ctrl_info *ctrl = &phba->ctrl;
- int status;
+ int status, ulp_num;
- def_pdu_ring_sz =
- phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
phwi_context->max_eqd = 0;
@@ -3588,27 +3875,48 @@ static int hwi_init_port(struct beiscsi_hba *phba)
goto error;
}
- status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
- def_pdu_ring_sz);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Default Header not created\n");
- goto error;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ def_pdu_ring_sz =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct phys_addr);
+
+ status = beiscsi_create_def_hdr(phba, phwi_context,
+ phwi_ctrlr,
+ def_pdu_ring_sz,
+ ulp_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Header not created for ULP : %d\n",
+ ulp_num);
+ goto error;
+ }
+
+ status = beiscsi_create_def_data(phba, phwi_context,
+ phwi_ctrlr,
+ def_pdu_ring_sz,
+ ulp_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Data not created for ULP : %d\n",
+ ulp_num);
+ goto error;
+ }
+ }
}
- status = beiscsi_create_def_data(phba, phwi_context,
- phwi_ctrlr, def_pdu_ring_sz);
+ status = beiscsi_post_pages(phba);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Default Data not created\n");
+ "BM_%d : Post SGL Pages Failed\n");
goto error;
}
- status = beiscsi_post_pages(phba);
+ status = beiscsi_post_template_hdr(phba);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Post SGL Pages Failed\n");
- goto error;
+ "BM_%d : Template HDR Posting for CXN Failed\n");
}
status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
@@ -3618,6 +3926,26 @@ static int hwi_init_port(struct beiscsi_hba *phba)
goto error;
}
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ uint16_t async_arr_idx = 0;
+
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ uint16_t cri = 0;
+ struct hwi_async_pdu_context *pasync_ctx;
+
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
+ phwi_ctrlr, ulp_num);
+ for (cri = 0; cri <
+ phba->params.cxns_per_ctrl; cri++) {
+ if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
+ (phwi_ctrlr, cri))
+ pasync_ctx->cid_to_async_cri_map[
+ phwi_ctrlr->wrb_context[cri].cid] =
+ async_arr_idx++;
+ }
+ }
+ }
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : hwi_init_port success\n");
return 0;
@@ -3682,6 +4010,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
(unsigned long)mem_descr->mem_array[j - 1].
bus_address.u.a64.address);
}
+
kfree(mem_descr->mem_array);
mem_descr++;
}
@@ -3721,6 +4050,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
struct sgl_handle *psgl_handle;
struct iscsi_sge *pfrag;
unsigned int arr_index, i, idx;
+ unsigned int ulp_icd_start, ulp_num = 0;
phba->io_sgl_hndl_avbl = 0;
phba->eh_sgl_hndl_avbl = 0;
@@ -3787,6 +4117,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
"\n BM_%d : mem_descr_sg->num_elements=%d\n",
mem_descr_sg->num_elements);
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+
+ ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+
arr_index = 0;
idx = 0;
while (idx < mem_descr_sg->num_elements) {
@@ -3805,8 +4141,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
pfrag += phba->params.num_sge_per_io;
- psgl_handle->sgl_index =
- phba->fw_config.iscsi_icd_start + arr_index++;
+ psgl_handle->sgl_index = ulp_icd_start + arr_index++;
}
idx++;
}
@@ -3819,15 +4154,46 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
{
- int i;
+ int ret;
+ uint16_t i, ulp_num;
+ struct ulp_cid_info *ptr_cid_info = NULL;
- phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
- GFP_KERNEL);
- if (!phba->cid_array) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory in "
- "hba_setup_cid_tbls\n");
- return -ENOMEM;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
+ GFP_KERNEL);
+
+ if (!ptr_cid_info) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory"
+ "for ULP_CID_INFO for ULP : %d\n",
+ ulp_num);
+ ret = -ENOMEM;
+ goto free_memory;
+
+ }
+
+ /* Allocate memory for CID array */
+ ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
+ BEISCSI_GET_CID_COUNT(phba,
+ ulp_num), GFP_KERNEL);
+ if (!ptr_cid_info->cid_array) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory"
+ "for CID_ARRAY for ULP : %d\n",
+ ulp_num);
+ kfree(ptr_cid_info);
+ ptr_cid_info = NULL;
+ ret = -ENOMEM;
+
+ goto free_memory;
+ }
+ ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
+ phba, ulp_num);
+
+ /* Save the cid_info_array ptr */
+ phba->cid_array_info[ulp_num] = ptr_cid_info;
+ }
}
phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
phba->params.cxns_per_ctrl, GFP_KERNEL);
@@ -3835,9 +4201,9 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory in "
"hba_setup_cid_tbls\n");
- kfree(phba->cid_array);
- phba->cid_array = NULL;
- return -ENOMEM;
+ ret = -ENOMEM;
+
+ goto free_memory;
}
phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
@@ -3847,18 +4213,44 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
"BM_%d : Failed to allocate memory in"
"hba_setup_cid_tbls\n");
- kfree(phba->cid_array);
kfree(phba->ep_array);
- phba->cid_array = NULL;
phba->ep_array = NULL;
- return -ENOMEM;
+ ret = -ENOMEM;
+ }
+
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
+
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+ ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
+ phba->phwi_ctrlr->wrb_context[i].cid;
+
}
- for (i = 0; i < phba->params.cxns_per_ctrl; i++)
- phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
- phba->avlbl_cids = phba->params.cxns_per_ctrl;
+ ptr_cid_info->cid_alloc = 0;
+ ptr_cid_info->cid_free = 0;
+ }
+ }
return 0;
+
+free_memory:
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ if (ptr_cid_info) {
+ kfree(ptr_cid_info->cid_array);
+ kfree(ptr_cid_info);
+ phba->cid_array_info[ulp_num] = NULL;
+ }
+ }
+ }
+
+ return ret;
}
static void hwi_enable_intr(struct beiscsi_hba *phba)
@@ -4113,20 +4505,39 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
static void beiscsi_clean_port(struct beiscsi_hba *phba)
{
- int mgmt_status;
-
- mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
- if (mgmt_status)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : mgmt_epfw_cleanup FAILED\n");
+ int mgmt_status, ulp_num;
+ struct ulp_cid_info *ptr_cid_info = NULL;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
+ if (mgmt_status)
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : mgmt_epfw_cleanup FAILED"
+ " for ULP_%d\n", ulp_num);
+ }
+ }
hwi_purge_eq(phba);
hwi_cleanup(phba);
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
- kfree(phba->cid_array);
kfree(phba->ep_array);
kfree(phba->conn_table);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ if (ptr_cid_info) {
+ kfree(ptr_cid_info->cid_array);
+ kfree(ptr_cid_info);
+ phba->cid_array_info[ulp_num] = NULL;
+ }
+ }
+ }
+
}
/**
@@ -4255,8 +4666,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
<< DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
}
static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4481,7 +4892,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
DB_DEF_PDU_WRB_INDEX_MASK) <<
DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
return 0;
}
@@ -4536,7 +4948,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
return 0;
}
@@ -4638,7 +5051,8 @@ static int beiscsi_mtask(struct iscsi_task *task)
doorbell |= (io_task->pwrb_handle->wrb_index &
DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
return 0;
}
@@ -4663,8 +5077,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
struct beiscsi_hba *phba = NULL;
phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
- "BM_%d : scsi_dma_map Failed\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
+ "BM_%d : scsi_dma_map Failed "
+ "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
+ be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
+ io_task->libiscsi_itt, scsi_bufflen(sc));
return num_sg;
}
@@ -4769,10 +5187,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
/*
* beiscsi_quiesce()- Cleanup Driver resources
* @phba: Instance Priv structure
+ * @unload_state:i Clean or EEH unload state
*
* Free the OS and HW resources held by the driver
**/
-static void beiscsi_quiesce(struct beiscsi_hba *phba)
+static void beiscsi_quiesce(struct beiscsi_hba *phba,
+ uint32_t unload_state)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
@@ -4785,28 +5205,37 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
if (phba->msix_enabled) {
for (i = 0; i <= phba->num_cpus; i++) {
msix_vec = phba->msix_entries[i].vector;
+ synchronize_irq(msix_vec);
free_irq(msix_vec, &phwi_context->be_eq[i]);
kfree(phba->msi_name[i]);
}
} else
- if (phba->pcidev->irq)
+ if (phba->pcidev->irq) {
+ synchronize_irq(phba->pcidev->irq);
free_irq(phba->pcidev->irq, phba);
+ }
pci_disable_msix(phba->pcidev);
- destroy_workqueue(phba->wq);
+
if (blk_iopoll_enabled)
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
blk_iopoll_disable(&pbe_eq->iopoll);
}
- beiscsi_clean_port(phba);
- beiscsi_free_mem(phba);
+ if (unload_state == BEISCSI_CLEAN_UNLOAD) {
+ destroy_workqueue(phba->wq);
+ beiscsi_clean_port(phba);
+ beiscsi_free_mem(phba);
- beiscsi_unmap_pci_function(phba);
- pci_free_consistent(phba->pcidev,
- phba->ctrl.mbox_mem_alloced.size,
- phba->ctrl.mbox_mem_alloced.va,
- phba->ctrl.mbox_mem_alloced.dma);
+ beiscsi_unmap_pci_function(phba);
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+ } else {
+ hwi_purge_eq(phba);
+ hwi_cleanup(phba);
+ }
cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
}
@@ -4823,11 +5252,13 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
beiscsi_destroy_def_ifaces(phba);
- beiscsi_quiesce(phba);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_disable_pcie_error_reporting(pcidev);
+ pci_set_drvdata(pcidev, NULL);
pci_disable_device(pcidev);
}
@@ -4842,7 +5273,7 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
return;
}
- beiscsi_quiesce(phba);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
pci_disable_device(pcidev);
}
@@ -4880,6 +5311,167 @@ beiscsi_hw_health_check(struct work_struct *work)
msecs_to_jiffies(1000));
}
+
+static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct beiscsi_hba *phba = NULL;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+ phba->state |= BE_ADAPTER_PCI_ERR;
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH error detected\n");
+
+ beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+
+ if (state == pci_channel_io_perm_failure) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH : State PERM Failure");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_disable_device(pdev);
+
+ /* The error could cause the FW to trigger a flash debug dump.
+ * Resetting the card while flash dump is in progress
+ * can cause it not to recover; wait for it to finish.
+ * Wait only for first function as it is needed only once per
+ * adapter.
+ **/
+ if (pdev->devfn == 0)
+ ssleep(30);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
+{
+ struct beiscsi_hba *phba = NULL;
+ int status = 0;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset\n");
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* Wait for the CHIP Reset to complete */
+ status = be_chk_reset_complete(phba);
+ if (!status) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset Completed\n");
+ } else {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset Completion Failure\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void beiscsi_eeh_resume(struct pci_dev *pdev)
+{
+ int ret = 0, i;
+ struct be_eq_obj *pbe_eq;
+ struct beiscsi_hba *phba = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+ pci_save_state(pdev);
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed\n");
+ goto ret_err;
+ }
+
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset.\n");
+ goto ret_err;
+ }
+
+ beiscsi_get_params(phba);
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->can_queue = phba->params.ios_per_ctrl;
+ ret = hwi_init_controller(phba);
+
+ for (i = 0; i < MAX_MCC_CMD; i++) {
+ init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+ phba->ctrl.mcc_tag[i] = i + 1;
+ phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_available++;
+ }
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ if (blk_iopoll_enabled) {
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
+ }
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+ } else {
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ } else {
+ pbe_eq = &phwi_context->be_eq[0];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ }
+
+ ret = beiscsi_init_irqs(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_eeh_resume - "
+ "Failed to beiscsi_init_irqs\n");
+ goto ret_err;
+ }
+
+ hwi_enable_intr(phba);
+ phba->state &= ~BE_ADAPTER_PCI_ERR;
+
+ return;
+ret_err:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : AER EEH Resume Failed\n");
+}
+
static int beiscsi_dev_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
@@ -4887,7 +5479,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
- int ret, i;
+ int ret = 0, i;
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
@@ -4903,10 +5495,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto disable_pci;
}
+ /* Enable EEH reporting */
+ ret = pci_enable_pcie_error_reporting(pcidev);
+ if (ret)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : PCIe Error Reporting "
+ "Enabling Failed\n");
+
+ pci_save_state(pcidev);
+
/* Initialize Driver configuration Paramters */
beiscsi_hba_attrs_init(phba);
phba->fw_timeout = false;
+ phba->mac_addr_set = false;
switch (pcidev->device) {
@@ -4929,20 +5531,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->generation = 0;
}
- if (enable_msix)
- find_num_cpus(phba);
- else
- phba->num_cpus = 1;
-
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : num_cpus = %d\n",
- phba->num_cpus);
-
- if (enable_msix) {
- beiscsi_msix_enable(phba);
- if (!phba->msix_enabled)
- phba->num_cpus = 1;
- }
ret = be_ctrl_init(phba, pcidev);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4954,27 +5542,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
ret = beiscsi_cmd_reset_function(phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed. Aborting Crashdump\n");
+ "BM_%d : Reset Failed\n");
goto hba_free;
}
ret = be_chk_reset_complete(phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset."
- "Aborting Crashdump\n");
+ "BM_%d : Failed to get out of reset.\n");
goto hba_free;
}
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
spin_lock_init(&phba->isr_lock);
+ spin_lock_init(&phba->async_pdu_lock);
ret = mgmt_get_fw_config(&phba->ctrl, phba);
if (ret != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Error getting fw config\n");
goto free_port;
}
- phba->shost->max_id = phba->fw_config.iscsi_cid_count;
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : num_cpus = %d\n",
+ phba->num_cpus);
+
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
beiscsi_get_params(phba);
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
@@ -4985,7 +5589,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_port;
}
- for (i = 0; i < MAX_MCC_CMD ; i++) {
+ for (i = 0; i < MAX_MCC_CMD; i++) {
init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
phba->ctrl.mcc_tag[i] = i + 1;
phba->ctrl.mcc_numtag[i + 1] = 0;
@@ -5089,6 +5693,12 @@ disable_pci:
return ret;
}
+static struct pci_error_handlers beiscsi_eeh_handlers = {
+ .error_detected = beiscsi_eeh_err_detected,
+ .slot_reset = beiscsi_eeh_reset,
+ .resume = beiscsi_eeh_resume,
+};
+
struct iscsi_transport beiscsi_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRV_NAME,
@@ -5127,7 +5737,8 @@ static struct pci_driver beiscsi_pci_driver = {
.probe = beiscsi_dev_probe,
.remove = beiscsi_remove,
.shutdown = beiscsi_shutdown,
- .id_table = beiscsi_pci_id_table
+ .id_table = beiscsi_pci_id_table,
+ .err_handler = &beiscsi_eeh_handlers
};
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 2c06ef3c02a..31fa27b4a9b 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -26,6 +26,7 @@
#include <linux/in.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <linux/aer.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -34,9 +35,8 @@
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
-#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.0.467.0"
+#define BUILD_STR "10.0.659.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -66,7 +66,6 @@
#define MAX_CPUS 64
#define BEISCSI_MAX_NUM_CPUS 7
-#define OC_SKH_MAX_NUM_CPUS 31
#define BEISCSI_VER_STRLEN 32
@@ -74,6 +73,7 @@
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
+#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@@ -97,14 +97,19 @@
#define INVALID_SESS_HANDLE 0xFFFFFFFF
-#define BE_ADAPTER_UP 0x00000000
-#define BE_ADAPTER_LINK_DOWN 0x00000001
+#define BE_ADAPTER_LINK_UP 0x001
+#define BE_ADAPTER_LINK_DOWN 0x002
+#define BE_ADAPTER_PCI_ERR 0x004
+
+#define BEISCSI_CLEAN_UNLOAD 0x01
+#define BEISCSI_EEH_UNLOAD 0x02
/**
* hardware needs the async PDU buffers to be posted in multiples of 8
* So have atleast 8 of them by default
*/
-#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
+#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \
+ (phwi->phwi_ctxt->pasync_ctx[ulp_num])
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
@@ -149,29 +154,41 @@
#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
-#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
- (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
-#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
- (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
+#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id)
+#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)
#define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
+#define MEM_DESCR_OFFSET 8
+#define BEISCSI_DEFQ_HDR 1
+#define BEISCSI_DEFQ_DATA 0
enum be_mem_enum {
HWI_MEM_ADDN_CONTEXT,
HWI_MEM_WRB,
HWI_MEM_WRBH,
HWI_MEM_SGLH,
HWI_MEM_SGE,
- HWI_MEM_ASYNC_HEADER_BUF, /* 5 */
- HWI_MEM_ASYNC_DATA_BUF,
- HWI_MEM_ASYNC_HEADER_RING,
- HWI_MEM_ASYNC_DATA_RING,
- HWI_MEM_ASYNC_HEADER_HANDLE,
- HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */
- HWI_MEM_ASYNC_PDU_CONTEXT,
+ HWI_MEM_TEMPLATE_HDR_ULP0,
+ HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */
+ HWI_MEM_ASYNC_DATA_BUF_ULP0,
+ HWI_MEM_ASYNC_HEADER_RING_ULP0,
+ HWI_MEM_ASYNC_DATA_RING_ULP0,
+ HWI_MEM_ASYNC_HEADER_HANDLE_ULP0,
+ HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */
+ HWI_MEM_ASYNC_PDU_CONTEXT_ULP0,
+ HWI_MEM_TEMPLATE_HDR_ULP1,
+ HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */
+ HWI_MEM_ASYNC_DATA_BUF_ULP1,
+ HWI_MEM_ASYNC_HEADER_RING_ULP1,
+ HWI_MEM_ASYNC_DATA_RING_ULP1,
+ HWI_MEM_ASYNC_HEADER_HANDLE_ULP1,
+ HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */
+ HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,
ISCSI_MEM_GLOBAL_HEADER,
SE_MEM_MAX
};
@@ -266,9 +283,49 @@ struct invalidate_command_table {
unsigned short cid;
} __packed;
+#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
+ (phwi_ctrlr->wrb_context[cri].ulp_num)
+struct hwi_wrb_context {
+ struct list_head wrb_handle_list;
+ struct list_head wrb_handle_drvr_list;
+ struct wrb_handle **pwrb_handle_base;
+ struct wrb_handle **pwrb_handle_basestd;
+ struct iscsi_wrb *plast_wrb;
+ unsigned short alloc_index;
+ unsigned short free_index;
+ unsigned short wrb_handles_available;
+ unsigned short cid;
+ uint8_t ulp_num; /* ULP to which CID binded */
+ uint16_t register_set;
+ uint16_t doorbell_format;
+ uint32_t doorbell_offset;
+};
+
+struct ulp_cid_info {
+ unsigned short *cid_array;
+ unsigned short avlbl_cids;
+ unsigned short cid_alloc;
+ unsigned short cid_free;
+};
+
+#include "be.h"
#define chip_be2(phba) (phba->generation == BE_GEN2)
#define chip_be3_r(phba) (phba->generation == BE_GEN3)
#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
+
+#define BEISCSI_ULP0 0
+#define BEISCSI_ULP1 1
+#define BEISCSI_ULP_COUNT 2
+#define BEISCSI_ULP0_LOADED 0x01
+#define BEISCSI_ULP1_LOADED 0x02
+
+#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \
+ (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids)
+#define BEISCSI_ULP0_AVLBL_CID(phba) \
+ BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0)
+#define BEISCSI_ULP1_AVLBL_CID(phba) \
+ BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1)
+
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@@ -303,17 +360,15 @@ struct beiscsi_hba {
spinlock_t io_sgl_lock;
spinlock_t mgmt_sgl_lock;
spinlock_t isr_lock;
+ spinlock_t async_pdu_lock;
unsigned int age;
- unsigned short avlbl_cids;
- unsigned short cid_alloc;
- unsigned short cid_free;
struct list_head hba_queue;
#define BE_MAX_SESSION 2048
#define BE_SET_CID_TO_CRI(cri_index, cid) \
(phba->cid_to_cri_map[cid] = cri_index)
#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
unsigned short cid_to_cri_map[BE_MAX_SESSION];
- unsigned short *cid_array;
+ struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
struct iscsi_endpoint **ep_array;
struct beiscsi_conn **conn_table;
struct iscsi_boot_kset *boot_kset;
@@ -325,20 +380,21 @@ struct beiscsi_hba {
* group together since they are used most frequently
* for cid to cri conversion
*/
- unsigned int iscsi_cid_start;
unsigned int phys_port;
+ unsigned int eqid_count;
+ unsigned int cqid_count;
+ unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
+#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \
+ (phba->fw_config.iscsi_cid_count[ulp_num])
+ unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT];
- unsigned int isr_offset;
- unsigned int iscsi_icd_start;
- unsigned int iscsi_cid_count;
- unsigned int iscsi_icd_count;
- unsigned int pci_function;
-
- unsigned short cid_alloc;
- unsigned short cid_free;
- unsigned short avlbl_cids;
unsigned short iscsi_features;
- spinlock_t cid_lock;
+ uint16_t dual_ulp_aware;
+ unsigned long ulp_supported;
} fw_config;
unsigned int state;
@@ -346,6 +402,7 @@ struct beiscsi_hba {
bool ue_detected;
struct delayed_work beiscsi_hw_check_task;
+ bool mac_addr_set;
u8 mac_address[ETH_ALEN];
char fw_ver_str[BEISCSI_VER_STRLEN];
char wq_name[20];
@@ -374,6 +431,7 @@ struct beiscsi_conn {
struct iscsi_conn *conn;
struct beiscsi_hba *phba;
u32 exp_statsn;
+ u32 doorbell_offset;
u32 beiscsi_conn_cid;
struct beiscsi_endpoint *ep;
unsigned short login_in_progress;
@@ -474,7 +532,7 @@ struct amap_iscsi_sge {
};
struct beiscsi_offload_params {
- u32 dw[5];
+ u32 dw[6];
};
#define OFFLD_PARAMS_ERL 0x00000003
@@ -504,6 +562,7 @@ struct amap_beiscsi_offload_params {
u8 max_r2t[16];
u8 pad[8];
u8 exp_statsn[32];
+ u8 max_recv_data_segment_length[32];
};
/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
@@ -567,7 +626,8 @@ struct hwi_async_pdu_context {
unsigned int buffer_size;
unsigned int num_entries;
-
+#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
+ unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
/**
* This is a varying size list! Do not add anything
* after this entry!!
@@ -885,30 +945,32 @@ struct amap_iscsi_target_context_update_wrb_v2 {
u8 first_burst_length[24]; /* DWORD 3 */
u8 rsvd3[8]; /* DOWRD 3 */
u8 max_r2t[16]; /* DWORD 4 */
- u8 rsvd4[10]; /* DWORD 4 */
+ u8 rsvd4; /* DWORD 4 */
u8 hde; /* DWORD 4 */
u8 dde; /* DWORD 4 */
u8 erl[2]; /* DWORD 4 */
+ u8 rsvd5[6]; /* DWORD 4 */
u8 imd; /* DWORD 4 */
u8 ir2t; /* DWORD 4 */
+ u8 rsvd6[3]; /* DWORD 4 */
u8 stat_sn[32]; /* DWORD 5 */
- u8 rsvd5[32]; /* DWORD 6 */
- u8 rsvd6[32]; /* DWORD 7 */
+ u8 rsvd7[32]; /* DWORD 6 */
+ u8 rsvd8[32]; /* DWORD 7 */
u8 max_recv_dataseg_len[24]; /* DWORD 8 */
- u8 rsvd7[8]; /* DWORD 8 */
- u8 rsvd8[32]; /* DWORD 9 */
- u8 rsvd9[32]; /* DWORD 10 */
+ u8 rsvd9[8]; /* DWORD 8 */
+ u8 rsvd10[32]; /* DWORD 9 */
+ u8 rsvd11[32]; /* DWORD 10 */
u8 max_cxns[16]; /* DWORD 11 */
- u8 rsvd10[11]; /* DWORD 11*/
+ u8 rsvd12[11]; /* DWORD 11*/
u8 invld; /* DWORD 11 */
- u8 rsvd11;/* DWORD 11*/
+ u8 rsvd13;/* DWORD 11*/
u8 dmsg; /* DWORD 11 */
u8 data_seq_inorder; /* DWORD 11 */
u8 pdu_seq_inorder; /* DWORD 11 */
- u8 rsvd12[32]; /*DWORD 12 */
- u8 rsvd13[32]; /* DWORD 13 */
- u8 rsvd14[32]; /* DWORD 14 */
- u8 rsvd15[32]; /* DWORD 15 */
+ u8 rsvd14[32]; /*DWORD 12 */
+ u8 rsvd15[32]; /* DWORD 13 */
+ u8 rsvd16[32]; /* DWORD 14 */
+ u8 rsvd17[32]; /* DWORD 15 */
} __packed;
@@ -919,6 +981,10 @@ struct be_ring {
u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */
u32 item_size; /* size in bytes of one object */
+ u8 ulp_num; /* ULP to which CID binded */
+ u16 register_set;
+ u16 doorbell_format;
+ u32 doorbell_offset;
void *va; /* The virtual address of the ring. This
* should be last to allow 32 & 64 bit debugger
@@ -926,18 +992,6 @@ struct be_ring {
*/
};
-struct hwi_wrb_context {
- struct list_head wrb_handle_list;
- struct list_head wrb_handle_drvr_list;
- struct wrb_handle **pwrb_handle_base;
- struct wrb_handle **pwrb_handle_basestd;
- struct iscsi_wrb *plast_wrb;
- unsigned short alloc_index;
- unsigned short free_index;
- unsigned short wrb_handles_available;
- unsigned short cid;
-};
-
struct hwi_controller {
struct list_head io_sgl_list;
struct list_head eh_sgl_list;
@@ -946,8 +1000,8 @@ struct hwi_controller {
struct hwi_wrb_context *wrb_context;
struct mcc_wrb *pmcc_wrb_base;
- struct be_ring default_pdu_hdr;
- struct be_ring default_pdu_data;
+ struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
+ struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
struct hwi_context_memory *phwi_ctxt;
};
@@ -978,11 +1032,10 @@ struct hwi_context_memory {
struct be_eq_obj be_eq[MAX_CPUS];
struct be_queue_info be_cq[MAX_CPUS - 1];
- struct be_queue_info be_def_hdrq;
- struct be_queue_info be_def_dataq;
-
struct be_queue_info *be_wrbq;
- struct hwi_async_pdu_context *pasync_ctx;
+ struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
+ struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
+ struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
};
/* Logging related definitions */
@@ -992,6 +1045,7 @@ struct hwi_context_memory {
#define BEISCSI_LOG_EH 0x0008 /* Error Handler */
#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
+#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */
#define beiscsi_log(phba, level, mask, fmt, arg...) \
do { \
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 245a9595a93..b2fcac78fea 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -278,6 +278,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
return tag;
}
+/**
+ * mgmt_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
@@ -291,31 +303,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
status = be_mbox_notify(ctrl);
if (!status) {
+ uint8_t ulp_num = 0;
struct be_fw_cfg *pfw_cfg;
pfw_cfg = req;
+
+ if (!is_chip_be2_be3r(phba)) {
+ phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+ phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_INIT,
+ "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+ phba->fw_config.eqid_count,
+ phba->fw_config.cqid_count);
+ }
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (pfw_cfg->ulp[ulp_num].ulp_mode &
+ BEISCSI_ULP_ISCSI_INI_MODE)
+ set_bit(ulp_num,
+ &phba->fw_config.ulp_supported);
+
phba->fw_config.phys_port = pfw_cfg->phys_port;
- phba->fw_config.iscsi_icd_start =
- pfw_cfg->ulp[0].icd_base;
- phba->fw_config.iscsi_icd_count =
- pfw_cfg->ulp[0].icd_count;
- phba->fw_config.iscsi_cid_start =
- pfw_cfg->ulp[0].sq_base;
- phba->fw_config.iscsi_cid_count =
- pfw_cfg->ulp[0].sq_count;
- if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : FW reported MAX CXNS as %d\t"
- "Max Supported = %d.\n",
- phba->fw_config.iscsi_cid_count,
- BE2_MAX_SESSIONS);
- phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ phba->fw_config.iscsi_cid_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_base;
+ phba->fw_config.iscsi_cid_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_count;
+
+ phba->fw_config.iscsi_icd_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_base;
+ phba->fw_config.iscsi_icd_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_count;
+
+ phba->fw_config.iscsi_chain_start[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_base;
+ phba->fw_config.iscsi_chain_count[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_count;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Function loaded on ULP : %d\n"
+ "\tiscsi_cid_count : %d\n"
+ "\tiscsi_cid_start : %d\n"
+ "\t iscsi_icd_count : %d\n"
+ "\t iscsi_icd_start : %d\n",
+ ulp_num,
+ phba->fw_config.
+ iscsi_cid_count[ulp_num],
+ phba->fw_config.
+ iscsi_cid_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ phba->fw_config.
+ iscsi_icd_start[ulp_num]);
+ }
}
+
+ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+ BEISCSI_FUNC_DUA_MODE);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : DUA Mode : 0x%x\n",
+ phba->fw_config.dual_ulp_aware);
+
} else {
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_get_fw_config\n");
+ status = -EINVAL;
}
spin_unlock(&ctrl->mbox_lock);
@@ -448,7 +508,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag;
}
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
+/**
+ * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp_num: ULP number.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
@@ -462,9 +531,9 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
- req->chute = chute;
- req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
- req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));
+ req->chute = (1 << ulp_num);
+ req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
+ req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
status = be_mcc_notify_wait(phba);
if (status)
@@ -585,6 +654,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
return tag;
}
+/**
+ * mgmt_open_connection()- Establish a TCP CXN
+ * @dst_addr: Destination Address
+ * @beiscsi_ep: ptr to device endpoint struct
+ * @nonemb_cmd: ptr to memory allocated for command
+ *
+ * return
+ * Success: Tag number of the MBX Command issued
+ * Failure: Error code
+ **/
int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep,
@@ -602,14 +681,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address;
unsigned int tag = 0;
- unsigned int i;
+ unsigned int i, ulp_num;
unsigned short cid = beiscsi_ep->ep_cid;
struct be_sge *sge;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
- def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
+
+ ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num;
+
+ def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num);
+ def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
@@ -748,11 +830,14 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
if (rc) {
+ /* Check if the IOCTL needs to be re-issued */
+ if (rc == -EAGAIN)
+ return rc;
+
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
- rc = -EIO;
goto free_cmd;
}
@@ -861,7 +946,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
uint32_t boot_proto)
{
struct be_cmd_get_def_gateway_resp gtway_addr_set;
- struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_get_if_info_resp *if_info;
struct be_cmd_set_dhcp_req *dhcpreq;
struct be_cmd_rel_dhcp_req *reldhcp;
struct be_dma_mem nonemb_cmd;
@@ -872,16 +957,17 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (mgmt_get_all_if_id(phba))
return -EIO;
- memset(&if_info, 0, sizeof(if_info));
ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
BE2_IPV6 : BE2_IPV4 ;
rc = mgmt_get_if_info(phba, ip_type, &if_info);
- if (rc)
+ if (rc) {
+ kfree(if_info);
return rc;
+ }
if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
- if (if_info.dhcp_state) {
+ if (if_info->dhcp_state) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : DHCP Already Enabled\n");
return 0;
@@ -894,9 +980,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
IP_V6_LEN : IP_V4_LEN;
} else {
- if (if_info.dhcp_state) {
+ if (if_info->dhcp_state) {
- memset(&if_info, 0, sizeof(if_info));
+ memset(if_info, 0, sizeof(*if_info));
rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
sizeof(*reldhcp));
@@ -919,8 +1005,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
}
/* Delete the Static IP Set */
- if (if_info.ip_addr.addr[0]) {
- rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
+ if (if_info->ip_addr.addr[0]) {
+ rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
IP_ACTION_DEL);
if (rc)
return rc;
@@ -966,7 +1052,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
} else {
- return mgmt_static_ip_modify(phba, &if_info, ip_param,
+ return mgmt_static_ip_modify(phba, if_info, ip_param,
subnet_param, IP_ACTION_ADD);
}
@@ -1031,27 +1117,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
}
int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_if_info_resp *if_info)
+ struct be_cmd_get_if_info_resp **if_info)
{
struct be_cmd_get_if_info_req *req;
struct be_dma_mem nonemb_cmd;
+ uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
int rc;
if (mgmt_get_all_if_id(phba))
return -EIO;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
- sizeof(*if_info));
- if (rc)
- return rc;
+ do {
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+ ioctl_size);
+ if (rc)
+ return rc;
- req = nonemb_cmd.va;
- req->interface_hndl = phba->interface_handle;
- req->ip_type = ip_type;
+ req = nonemb_cmd.va;
+ req->interface_hndl = phba->interface_handle;
+ req->ip_type = ip_type;
+
+ /* Allocate memory for if_info */
+ *if_info = kzalloc(ioctl_size, GFP_KERNEL);
+ if (!*if_info) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : Memory Allocation Failure\n");
+
+ /* Free the DMA memory for the IOCTL issuing */
+ pci_free_consistent(phba->ctrl.pdev,
+ nonemb_cmd.size,
+ nonemb_cmd.va,
+ nonemb_cmd.dma);
+ return -ENOMEM;
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
+ ioctl_size);
+
+ /* Check if the error is because of Insufficent_Buffer */
+ if (rc == -EAGAIN) {
+
+ /* Get the new memory size */
+ ioctl_size = ((struct be_cmd_resp_hdr *)
+ nonemb_cmd.va)->actual_resp_len;
+ ioctl_size += sizeof(struct be_cmd_req_hdr);
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
- sizeof(*if_info));
+ /* Free the previous allocated DMA memory */
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va,
+ nonemb_cmd.dma);
+
+ /* Free the virtual memory */
+ kfree(*if_info);
+ } else
+ break;
+ } while (true);
+ return rc;
}
int mgmt_get_nic_conf(struct beiscsi_hba *phba,
@@ -1281,7 +1404,7 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
}
/**
- * beiscsi_active_cid_disp()- Display Sessions Active
+ * beiscsi_active_session_disp()- Display Sessions Active
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text Session Count
@@ -1290,14 +1413,56 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
* size of the formatted string
**/
ssize_t
-beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
+ total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ (total_cids - avlbl_cids));
+ } else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
+ }
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+ return len;
+}
+
+/**
+ * beiscsi_free_session_disp()- Display Avaliable Session
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint16_t ulp_num, len = 0;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
+ }
+
+ return len;
}
/**
@@ -1338,6 +1503,25 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
}
}
+/**
+ * beiscsi_phys_port()- Display Physical Port Identifier
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text port identifier
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+ phba->fw_config.phys_port);
+}
void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle,
@@ -1411,10 +1595,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_burst_length, pwrb, params->dw[offsetof
- (struct amap_beiscsi_offload_params,
- max_burst_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_burst_length, pwrb, params->dw[offsetof
(struct amap_beiscsi_offload_params,
@@ -1436,7 +1616,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
params->dw[offsetof(struct amap_beiscsi_offload_params,
first_burst_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
- max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+ max_recv_dataseg_len, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_recv_data_segment_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_cxns, pwrb, BEISCSI_MAX_CXNS);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 04af7e74fe4..01b8c97284c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -294,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
struct be_cmd_get_nic_conf_resp *mac);
int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_if_info_resp *if_info);
+ struct be_cmd_get_if_info_resp **if_info);
int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
struct be_cmd_get_def_gateway_resp *gateway);
@@ -315,12 +315,19 @@ ssize_t beiscsi_drvr_ver_disp(struct device *dev,
ssize_t beiscsi_fw_ver_disp(struct device *dev,
struct device_attribute *attr, char *buf);
-ssize_t beiscsi_active_cid_disp(struct device *dev,
- struct device_attribute *attr, char *buf);
+ssize_t beiscsi_active_session_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
ssize_t beiscsi_adap_family_disp(struct device *dev,
struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_free_session_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_phys_port_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle,
struct be_mem_descriptor *mem_descr);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index f8ca7becacc..fc80a325a1e 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
bfad->pcidev = pdev;
/* Adjust PCIe Maximum Read Request Size */
- if (pcie_max_read_reqsz > 0) {
- int pcie_cap_reg;
- u16 pcie_dev_ctl;
- u16 mask = 0xffff;
-
- switch (pcie_max_read_reqsz) {
- case 128:
- mask = 0x0;
- break;
- case 256:
- mask = 0x1000;
- break;
- case 512:
- mask = 0x2000;
- break;
- case 1024:
- mask = 0x3000;
- break;
- case 2048:
- mask = 0x4000;
- break;
- case 4096:
- mask = 0x5000;
- break;
- default:
- break;
- }
-
- pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (mask != 0xffff && pcie_cap_reg) {
- pcie_cap_reg += 0x08;
- pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
- if ((pcie_dev_ctl & 0x7000) != mask) {
- printk(KERN_WARNING "BFA[%s]: "
+ if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+ if (pcie_max_read_reqsz >= 128 &&
+ pcie_max_read_reqsz <= 4096 &&
+ is_power_of_2(pcie_max_read_reqsz)) {
+ int max_rq = pcie_get_readrq(pdev);
+ printk(KERN_WARNING "BFA[%s]: "
"pcie_max_read_request_size is %d, "
- "reset to %d\n", bfad->pci_name,
- (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
+ "reset to %d\n", bfad->pci_name, max_rq,
pcie_max_read_reqsz);
-
- pcie_dev_ctl &= ~0x7000;
- pci_write_config_word(pdev, pcie_cap_reg,
- pcie_dev_ctl | mask);
- }
+ pcie_set_readrq(pdev, pcie_max_read_reqsz);
+ } else {
+ printk(KERN_WARNING "BFA[%s]: invalid "
+ "pcie_max_read_request_size %d ignored\n",
+ bfad->pci_name, pcie_max_read_reqsz);
}
}
@@ -833,7 +804,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
/* Disable PCIE Advanced Error Recovery (AER) */
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
bfa_status_t
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d7ca9305ff4..1ebf3fb683e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.14"
+#define BNX2FC_VERSION "2.4.1"
#define PFX "bnx2fc: "
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 69ac55495c1..9b948505d11 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Mar 08, 2013"
+#define DRV_MODULE_RELDATE "Sep 17, 2013"
static char version[] =
@@ -542,8 +542,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
if (vn_port) {
port = lport_priv(vn_port);
- if (compare_ether_addr(port->data_src_addr, dest_mac)
- != 0) {
+ if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
put_cpu();
kfree_skb(skb);
@@ -1381,6 +1380,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
return NULL;
}
ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ ctlr->cdev = ctlr_dev;
interface = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
kref_init(&interface->kref);
@@ -2004,6 +2004,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
+/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
+static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
+{
+ struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+ if (interface->enabled == true) {
+ if (!ctlr->lp) {
+ pr_err(PFX "__bnx2fc_disable: lport not found\n");
+ return -ENODEV;
+ } else {
+ interface->enabled = false;
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ }
+ }
+ return 0;
+}
+
/**
* Deperecated: Use bnx2fc_enabled()
*/
@@ -2018,20 +2036,34 @@ static int bnx2fc_disable(struct net_device *netdev)
interface = bnx2fc_interface_lookup(netdev);
ctlr = bnx2fc_to_ctlr(interface);
- if (!interface || !ctlr->lp) {
+
+ if (!interface) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
+ pr_err(PFX "bnx2fc_disable: interface not found\n");
} else {
- interface->enabled = false;
- fcoe_ctlr_link_down(ctlr);
- fcoe_clean_pending_queue(ctlr->lp);
+ rc = __bnx2fc_disable(ctlr);
}
-
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return rc;
}
+static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
+{
+ struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+ if (interface->enabled == false) {
+ if (!ctlr->lp) {
+ pr_err(PFX "__bnx2fc_enable: lport not found\n");
+ return -ENODEV;
+ } else if (!bnx2fc_link_ok(ctlr->lp)) {
+ fcoe_ctlr_link_up(ctlr);
+ interface->enabled = true;
+ }
+ }
+ return 0;
+}
+
/**
* Deprecated: Use bnx2fc_enabled()
*/
@@ -2046,12 +2078,11 @@ static int bnx2fc_enable(struct net_device *netdev)
interface = bnx2fc_interface_lookup(netdev);
ctlr = bnx2fc_to_ctlr(interface);
- if (!interface || !ctlr->lp) {
+ if (!interface) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
- } else if (!bnx2fc_link_ok(ctlr->lp)) {
- fcoe_ctlr_link_up(ctlr);
- interface->enabled = true;
+ pr_err(PFX "bnx2fc_enable: interface not found\n");
+ } else {
+ rc = __bnx2fc_enable(ctlr);
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -2072,14 +2103,12 @@ static int bnx2fc_enable(struct net_device *netdev)
static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
- struct fc_lport *lport = ctlr->lp;
- struct net_device *netdev = bnx2fc_netdev(lport);
switch (cdev->enabled) {
case FCOE_CTLR_ENABLED:
- return bnx2fc_enable(netdev);
+ return __bnx2fc_enable(ctlr);
case FCOE_CTLR_DISABLED:
- return bnx2fc_disable(netdev);
+ return __bnx2fc_disable(ctlr);
case FCOE_CTLR_UNUSED:
default:
return -ENOTSUPP;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 575142e92d9..ed880891cb7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1246,6 +1246,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
rc = bnx2fc_expl_logo(lport, io_req);
+ /* This only occurs when an task abort was requested while ABTS
+ is in progress. Setting the IO_CLEANUP flag will skip the
+ RRQ process in the case when the fw generated SCSI_CMD cmpl
+ was a result from the ABTS request rather than the CLEANUP
+ request */
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
goto out;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5be718c241c..e4cf23df4b4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -126,7 +126,7 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
/**
* bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
* @action: action, ARM or DISARM. For now only ARM_CQE is used
*
* Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
@@ -756,7 +756,7 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
/**
* bnx2i_send_conn_destroy - initiates iscsi connection teardown process
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
* iscsi connection context clean-up process
@@ -791,7 +791,7 @@ int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/**
* bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
@@ -851,7 +851,7 @@ static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
/**
* bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
@@ -920,7 +920,7 @@ static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
* bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
*
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
@@ -939,7 +939,7 @@ int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/**
* setup_qp_page_tables - iscsi QP page table setup function
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
* 64-bit address in big endian format. Whereas 10G/sec (57710) requires
@@ -1046,7 +1046,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
/**
* bnx2i_alloc_qp_resc - allocates required resources for QP.
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* Allocate QP (transport layer for iSCSI connection) resources, DMA'able
* memory for SQ/RQ/CQ and page tables. EP structure elements such
@@ -1191,7 +1191,7 @@ mem_alloc_err:
/**
* bnx2i_free_qp_resc - free memory resources held by QP
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* Free QP resources - SQ/RQ/CQ memory and page tables.
*/
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fabeb88602a..854dad7d5b0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -596,7 +596,7 @@ void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
/**
* bnx2i_ep_destroy_list_add - add an entry to EP destroy list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* EP destroy queue manager
*/
@@ -613,7 +613,7 @@ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
* bnx2i_ep_destroy_list_del - add an entry to EP destroy list
*
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* EP destroy queue manager
*/
@@ -630,7 +630,7 @@ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
/**
* bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* pending conn offload completion queue manager
*/
@@ -646,7 +646,7 @@ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
/**
* bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* pending conn offload completion queue manager
*/
@@ -721,7 +721,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
/**
* bnx2i_ep_active_list_add - add an entry to ep active list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* current active conn queue manager
*/
@@ -737,7 +737,7 @@ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
/**
* bnx2i_ep_active_list_del - deletes an entry to ep active list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* current active conn queue manager
*/
@@ -1695,7 +1695,7 @@ no_nx2_route:
/**
* bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
* @hba: pointer to adapter instance
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* destroys cm_sock structure and on chip iscsi context
*/
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 0eb35b9b378..0eaec474895 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw)
return 0;
}
-static void
-csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
-{
- uint16_t val;
- int pcie_cap;
-
- if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
- pci_read_config_word(hw->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, &val);
- val &= 0xfff0;
- val |= range ;
- pci_write_config_word(hw->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, val);
- }
-}
-
/*****************************************************************************/
/* HW State machine assists */
/*****************************************************************************/
@@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw)
goto out;
}
- /* Set pci completion timeout value to 4 seconds. */
- csio_set_pcie_completion_timeout(hw, 0xd);
+ /* Set PCIe completion timeout to 4 seconds */
+ if (pci_is_pcie(hw->pdev))
+ pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 00346fe939d..1aafc331ee6 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1010,7 +1010,6 @@ err_lnode_exit:
csio_hw_stop(hw);
spin_unlock_irq(&hw->lock);
csio_lnodes_unblock_request(hw);
- pci_set_drvdata(hw->pdev, NULL);
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
err_pci_exit:
@@ -1044,7 +1043,6 @@ static void csio_remove_one(struct pci_dev *pdev)
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
- pci_set_drvdata(pdev, NULL);
csio_pci_exit(pdev, &bars);
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 694e13c45df..83d9bf6fa6c 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -308,6 +308,8 @@ struct AdapterCtlBlk {
struct timer_list waiting_timer;
struct timer_list selto_timer;
+ unsigned long last_reset;
+
u16 srb_count;
u8 sel_timeout;
@@ -860,9 +862,9 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
init_timer(&acb->waiting_timer);
acb->waiting_timer.function = waiting_timeout;
acb->waiting_timer.data = (unsigned long) acb;
- if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
+ if (time_before(jiffies + to, acb->last_reset - HZ / 2))
acb->waiting_timer.expires =
- acb->scsi_host->last_reset - HZ / 2 + 1;
+ acb->last_reset - HZ / 2 + 1;
else
acb->waiting_timer.expires = jiffies + to + 1;
add_timer(&acb->waiting_timer);
@@ -1319,7 +1321,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
udelay(500);
/* We may be in serious trouble. Wait some seconds */
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + 3 * HZ / 2 +
HZ * acb->eeprom.delay_time;
@@ -1462,9 +1464,9 @@ static void selto_timer(struct AdapterCtlBlk *acb)
acb->selto_timer.function = selection_timeout_missed;
acb->selto_timer.data = (unsigned long) acb;
if (time_before
- (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
+ (jiffies + HZ, acb->last_reset + HZ / 2))
acb->selto_timer.expires =
- acb->scsi_host->last_reset + HZ / 2 + 1;
+ acb->last_reset + HZ / 2 + 1;
else
acb->selto_timer.expires = jiffies + HZ + 1;
add_timer(&acb->selto_timer);
@@ -1535,7 +1537,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
}
/* Allow starting of SCSI commands half a second before we allow the mid-level
* to queue them again after a reset */
- if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
+ if (time_before(jiffies, acb->last_reset - HZ / 2)) {
dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
return 1;
}
@@ -3031,7 +3033,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
dprintkl(KERN_ERR, "disconnect: No such device\n");
udelay(500);
/* Suspend queue for a while */
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + HZ / 2 +
HZ * acb->eeprom.delay_time;
clear_fifo(acb, "disconnectEx");
@@ -3053,7 +3055,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
waiting_process_next(acb);
} else if (srb->state & SRB_ABORT_SENT) {
dcb->flag &= ~ABORT_DEV_;
- acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
+ acb->last_reset = jiffies + HZ / 2 + 1;
dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
waiting_process_next(acb);
@@ -3649,7 +3651,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb)
/*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
udelay(500);
/* Maybe we locked up the bus? Then lets wait even longer ... */
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + 5 * HZ / 2 +
HZ * acb->eeprom.delay_time;
@@ -4426,7 +4428,7 @@ static void adapter_init_scsi_host(struct Scsi_Host *host)
host->dma_channel = -1;
host->unique_id = acb->io_port_base;
host->irq = acb->irq_level;
- host->last_reset = jiffies;
+ acb->last_reset = jiffies;
host->max_id = 16;
if (host->max_id - 1 == eeprom->scsi_id)
@@ -4484,7 +4486,7 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
/*spin_unlock_irq (&io_request_lock); */
udelay(500);
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + HZ / 2 +
HZ * acb->eeprom.delay_time;
@@ -4859,7 +4861,6 @@ static void dc395x_remove_one(struct pci_dev *dev)
adapter_uninit(acb);
pci_disable_device(dev);
scsi_host_put(scsi_host);
- pci_set_drvdata(dev, NULL);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 68adb8955d2..5248c888552 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -481,6 +481,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
+ /*
+ * Device internal reset
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
/*
* Mode Parameters Changed
@@ -517,12 +522,13 @@ static int alua_check_sense(struct scsi_device *sdev,
/*
* alua_rtpg - Evaluate REPORT TARGET GROUP STATES
* @sdev: the device to be evaluated.
+ * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
*
* Evaluate the Target Port Group State.
* Returns SCSI_DH_DEV_OFFLINED if the path is
* found to be unusable.
*/
-static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
{
struct scsi_sense_hdr sense_hdr;
int len, k, off, valid_states = 0;
@@ -594,7 +600,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
else
h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
- if (orig_transition_tmo != h->transition_tmo) {
+ if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
sdev_printk(KERN_INFO, sdev,
"%s: transition timeout set to %d seconds\n",
ALUA_DH_NAME, h->transition_tmo);
@@ -632,14 +638,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
switch (h->state) {
case TPGS_STATE_TRANSITIONING:
- if (time_before(jiffies, expiry)) {
- /* State transition, retry */
- interval += 2000;
- msleep(interval);
- goto retry;
+ if (wait_for_transition) {
+ if (time_before(jiffies, expiry)) {
+ /* State transition, retry */
+ interval += 2000;
+ msleep(interval);
+ goto retry;
+ }
+ err = SCSI_DH_RETRY;
+ } else {
+ err = SCSI_DH_OK;
}
+
/* Transitioning time exceeded, set port to standby */
- err = SCSI_DH_RETRY;
h->state = TPGS_STATE_STANDBY;
break;
case TPGS_STATE_OFFLINE:
@@ -673,7 +684,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
if (err != SCSI_DH_OK)
goto out;
- err = alua_rtpg(sdev, h);
+ err = alua_rtpg(sdev, h, 0);
if (err != SCSI_DH_OK)
goto out;
@@ -733,7 +744,7 @@ static int alua_activate(struct scsi_device *sdev,
int err = SCSI_DH_OK;
int stpg = 0;
- err = alua_rtpg(sdev, h);
+ err = alua_rtpg(sdev, h, 1);
if (err != SCSI_DH_OK)
goto out;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 69c915aa77c..4b9cf93f3fb 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1742"},
{"IBM", "1745"},
{"IBM", "1746"},
+ {"IBM", "1813"},
{"IBM", "1814"},
{"IBM", "1815"},
{"IBM", "1818"},
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 19e1b422260..c0ae8fa57a3 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -448,19 +448,8 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
}
rmb();
- /*
- * TODO: I need to block here if I am processing ioctl cmds
- * but if the outstanding cmds all finish before the ioctl,
- * the scsi-core will not know to start sending cmds to me again.
- * I need to a way to restart the scsi-cores queues or should I block
- * calling scsi_done on the outstanding cmds instead
- * for now we don't set the IOCTL state
- */
- if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
- pHba->host->last_reset = jiffies;
- pHba->host->resetting = 1;
- return 1;
- }
+ if ((pHba->state) & DPTI_STATE_RESET)
+ return SCSI_MLQUEUE_HOST_BUSY;
// TODO if the cmd->device if offline then I may need to issue a bus rescan
// followed by a get_lct to see if the device is there anymore
@@ -1811,21 +1800,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
}
do {
- if(pHba->host)
+ /*
+ * Stop any new commands from enterring the
+ * controller while processing the ioctl
+ */
+ if (pHba->host) {
+ scsi_block_requests(pHba->host);
spin_lock_irqsave(pHba->host->host_lock, flags);
- // This state stops any new commands from enterring the
- // controller while processing the ioctl
-// pHba->state |= DPTI_STATE_IOCTL;
-// We can't set this now - The scsi subsystem sets host_blocked and
-// the queue empties and stops. We need a way to restart the queue
+ }
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
if (rcode != 0)
printk("adpt_i2o_passthru: post wait failed %d %p\n",
rcode, reply);
-// pHba->state &= ~DPTI_STATE_IOCTL;
- if(pHba->host)
+ if (pHba->host) {
spin_unlock_irqrestore(pHba->host->host_lock, flags);
- } while(rcode == -ETIMEDOUT);
+ scsi_unblock_requests(pHba->host);
+ }
+ } while (rcode == -ETIMEDOUT);
if(rcode){
goto cleanup;
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index beded716f93..aeb046186c8 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -202,7 +202,6 @@ struct adpt_channel {
// HBA state flags
#define DPTI_STATE_RESET (0x01)
-#define DPTI_STATE_IOCTL (0x02)
typedef struct _adpt_hba {
struct _adpt_hba *next;
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 0838e265e0b..3fd305d6b67 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -799,47 +799,47 @@ struct esas2r_adapter {
struct esas2r_target *targetdb_end;
unsigned char *regs;
unsigned char *data_window;
- u32 volatile flags;
- #define AF_PORT_CHANGE (u32)(0x00000001)
- #define AF_CHPRST_NEEDED (u32)(0x00000004)
- #define AF_CHPRST_PENDING (u32)(0x00000008)
- #define AF_CHPRST_DETECTED (u32)(0x00000010)
- #define AF_BUSRST_NEEDED (u32)(0x00000020)
- #define AF_BUSRST_PENDING (u32)(0x00000040)
- #define AF_BUSRST_DETECTED (u32)(0x00000080)
- #define AF_DISABLED (u32)(0x00000100)
- #define AF_FLASH_LOCK (u32)(0x00000200)
- #define AF_OS_RESET (u32)(0x00002000)
- #define AF_FLASHING (u32)(0x00004000)
- #define AF_POWER_MGT (u32)(0x00008000)
- #define AF_NVR_VALID (u32)(0x00010000)
- #define AF_DEGRADED_MODE (u32)(0x00020000)
- #define AF_DISC_PENDING (u32)(0x00040000)
- #define AF_TASKLET_SCHEDULED (u32)(0x00080000)
- #define AF_HEARTBEAT (u32)(0x00200000)
- #define AF_HEARTBEAT_ENB (u32)(0x00400000)
- #define AF_NOT_PRESENT (u32)(0x00800000)
- #define AF_CHPRST_STARTED (u32)(0x01000000)
- #define AF_FIRST_INIT (u32)(0x02000000)
- #define AF_POWER_DOWN (u32)(0x04000000)
- #define AF_DISC_IN_PROG (u32)(0x08000000)
- #define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
- #define AF_LEGACY_SGE_MODE (u32)(0x20000000)
- #define AF_DISC_POLLED (u32)(0x40000000)
- u32 volatile flags2;
- #define AF2_SERIAL_FLASH (u32)(0x00000001)
- #define AF2_DEV_SCAN (u32)(0x00000002)
- #define AF2_DEV_CNT_OK (u32)(0x00000004)
- #define AF2_COREDUMP_AVAIL (u32)(0x00000008)
- #define AF2_COREDUMP_SAVED (u32)(0x00000010)
- #define AF2_VDA_POWER_DOWN (u32)(0x00000100)
- #define AF2_THUNDERLINK (u32)(0x00000200)
- #define AF2_THUNDERBOLT (u32)(0x00000400)
- #define AF2_INIT_DONE (u32)(0x00000800)
- #define AF2_INT_PENDING (u32)(0x00001000)
- #define AF2_TIMER_TICK (u32)(0x00002000)
- #define AF2_IRQ_CLAIMED (u32)(0x00004000)
- #define AF2_MSI_ENABLED (u32)(0x00008000)
+ long flags;
+ #define AF_PORT_CHANGE 0
+ #define AF_CHPRST_NEEDED 1
+ #define AF_CHPRST_PENDING 2
+ #define AF_CHPRST_DETECTED 3
+ #define AF_BUSRST_NEEDED 4
+ #define AF_BUSRST_PENDING 5
+ #define AF_BUSRST_DETECTED 6
+ #define AF_DISABLED 7
+ #define AF_FLASH_LOCK 8
+ #define AF_OS_RESET 9
+ #define AF_FLASHING 10
+ #define AF_POWER_MGT 11
+ #define AF_NVR_VALID 12
+ #define AF_DEGRADED_MODE 13
+ #define AF_DISC_PENDING 14
+ #define AF_TASKLET_SCHEDULED 15
+ #define AF_HEARTBEAT 16
+ #define AF_HEARTBEAT_ENB 17
+ #define AF_NOT_PRESENT 18
+ #define AF_CHPRST_STARTED 19
+ #define AF_FIRST_INIT 20
+ #define AF_POWER_DOWN 21
+ #define AF_DISC_IN_PROG 22
+ #define AF_COMM_LIST_TOGGLE 23
+ #define AF_LEGACY_SGE_MODE 24
+ #define AF_DISC_POLLED 25
+ long flags2;
+ #define AF2_SERIAL_FLASH 0
+ #define AF2_DEV_SCAN 1
+ #define AF2_DEV_CNT_OK 2
+ #define AF2_COREDUMP_AVAIL 3
+ #define AF2_COREDUMP_SAVED 4
+ #define AF2_VDA_POWER_DOWN 5
+ #define AF2_THUNDERLINK 6
+ #define AF2_THUNDERBOLT 7
+ #define AF2_INIT_DONE 8
+ #define AF2_INT_PENDING 9
+ #define AF2_TIMER_TICK 10
+ #define AF2_IRQ_CLAIMED 11
+ #define AF2_MSI_ENABLED 12
atomic_t disable_cnt;
atomic_t dis_ints_cnt;
u32 int_stat;
@@ -1150,16 +1150,6 @@ void esas2r_queue_fw_event(struct esas2r_adapter *a,
int data_sz);
/* Inline functions */
-static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
-{
- return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
-}
-
-static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
-{
- return test_and_clear_bit(ilog2(bits),
- (volatile unsigned long *)flags);
-}
/* Allocate a chip scatter/gather list entry */
static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
@@ -1217,7 +1207,6 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
struct esas2r_adapter *a)
{
union atto_vda_req *vrq = rq->vrq;
- u32 handle;
INIT_LIST_HEAD(&rq->sg_table_head);
rq->data_buf = (void *)(vrq + 1);
@@ -1253,11 +1242,9 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
/*
* add a reference number to the handle to make it unique (until it
- * wraps of course) while preserving the upper word
+ * wraps of course) while preserving the least significant word
*/
-
- handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
- vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
+ vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
/*
* the following formats a SCSI request. the caller can override as
@@ -1303,10 +1290,13 @@ static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
{
- return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
- | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
- | AF_PORT_CHANGE))
- ? true : false;
+
+ return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
+ test_bit(AF_BUSRST_DETECTED, &a->flags) ||
+ test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+ test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+ test_bit(AF_PORT_CHANGE, &a->flags);
+
}
/*
@@ -1345,24 +1335,24 @@ static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
{
/* make sure we don't schedule twice */
- if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
- ilog2(AF_TASKLET_SCHEDULED)))
+ if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
tasklet_hi_schedule(&a->tasklet);
}
static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
{
- if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
- && (a->nvram->options2 & SASNVR2_HEARTBEAT))
- esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ (a->nvram->options2 & SASNVR2_HEARTBEAT))
+ set_bit(AF_HEARTBEAT_ENB, &a->flags);
else
- esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+ clear_bit(AF_HEARTBEAT_ENB, &a->flags);
}
static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
{
- esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
- esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+ clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+ clear_bit(AF_HEARTBEAT, &a->flags);
}
/* Set the initial state for resetting the adapter on the next pass through
@@ -1372,9 +1362,9 @@ static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
{
esas2r_disable_heartbeat(a);
- esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
- esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
- esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+ set_bit(AF_CHPRST_NEEDED, &a->flags);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
}
/* See if an interrupt is pending on the adapter. */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
index dec6c334ce3..1c079f4300a 100644
--- a/drivers/scsi/esas2r/esas2r_disc.c
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -86,9 +86,9 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)
esas2r_trace_enter();
- esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
- esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
- esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+ clear_bit(AF2_DEV_SCAN, &a->flags2);
+ clear_bit(AF2_DEV_CNT_OK, &a->flags2);
a->disc_start_time = jiffies_to_msecs(jiffies);
a->disc_wait_time = nvr->dev_wait_time * 1000;
@@ -107,7 +107,8 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)
a->general_req.interrupt_cx = NULL;
- if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+ test_bit(AF_POWER_MGT, &a->flags)) {
if (a->prev_dev_cnt == 0) {
/* Don't bother waiting if there is nothing to wait
* for.
@@ -212,9 +213,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
|| a->disc_wait_cnt == 0)) {
/* After three seconds of waiting, schedule a scan. */
if (time >= 3000
- && !(esas2r_lock_set_flags(&a->flags2,
- AF2_DEV_SCAN) &
- ilog2(AF2_DEV_SCAN))) {
+ && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags);
@@ -228,18 +227,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
* We are done waiting...we think. Adjust the wait time to
* consume events after the count is met.
*/
- if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
- & ilog2(AF2_DEV_CNT_OK)))
+ if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
a->disc_wait_time = time + 3000;
/* If we haven't done a full scan yet, do it now. */
- if (!(esas2r_lock_set_flags(&a->flags2,
- AF2_DEV_SCAN) &
- ilog2(AF2_DEV_SCAN))) {
+ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags);
-
esas2r_trace_exit();
return;
}
@@ -253,9 +248,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
return;
}
} else {
- if (!(esas2r_lock_set_flags(&a->flags2,
- AF2_DEV_SCAN) &
- ilog2(AF2_DEV_SCAN))) {
+ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags);
@@ -265,8 +258,8 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
/* We want to stop waiting for devices. */
a->disc_wait_time = 0;
- if ((a->flags & AF_DISC_POLLED)
- && (a->flags & AF_DISC_IN_PROG)) {
+ if (test_bit(AF_DISC_POLLED, &a->flags) &&
+ test_bit(AF_DISC_IN_PROG, &a->flags)) {
/*
* Polled discovery is still pending so continue the active
* discovery until it is done. At that point, we will stop
@@ -280,14 +273,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
* driven; i.e. There is no transition.
*/
esas2r_disc_fix_curr_requests(a);
- esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+ clear_bit(AF_DISC_PENDING, &a->flags);
/*
* We have deferred target state changes until now because we
* don't want to report any removals (due to the first arrival)
* until the device wait time expires.
*/
- esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+ set_bit(AF_PORT_CHANGE, &a->flags);
}
esas2r_trace_exit();
@@ -308,7 +301,8 @@ void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
* Don't start discovery before or during polled discovery. if we did,
* we would have a deadlock if we are in the ISR already.
*/
- if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_POLLED, &a->flags))
esas2r_disc_start_port(a);
esas2r_trace_exit();
@@ -322,7 +316,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
esas2r_trace_enter();
- if (a->flags & AF_DISC_IN_PROG) {
+ if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
esas2r_trace_exit();
return false;
@@ -330,7 +324,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
/* If there is a discovery waiting, process it. */
if (dc->disc_evt) {
- if ((a->flags & AF_DISC_POLLED)
+ if (test_bit(AF_DISC_POLLED, &a->flags)
&& a->disc_wait_time == 0) {
/*
* We are doing polled discovery, but we no longer want
@@ -347,7 +341,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
esas2r_hdebug("disc done");
- esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+ set_bit(AF_PORT_CHANGE, &a->flags);
esas2r_trace_exit();
@@ -356,10 +350,10 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
/* Handle the discovery context */
esas2r_trace("disc_evt: %d", dc->disc_evt);
- esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
+ set_bit(AF_DISC_IN_PROG, &a->flags);
dc->flags = 0;
- if (a->flags & AF_DISC_POLLED)
+ if (test_bit(AF_DISC_POLLED, &a->flags))
dc->flags |= DCF_POLLED;
rq->interrupt_cx = dc;
@@ -379,7 +373,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
}
/* Continue interrupt driven discovery */
- if (!(a->flags & AF_DISC_POLLED))
+ if (!test_bit(AF_DISC_POLLED, &a->flags))
ret = esas2r_disc_continue(a, rq);
else
ret = true;
@@ -453,10 +447,10 @@ static bool esas2r_disc_continue(struct esas2r_adapter *a,
/* Discovery is done...for now. */
rq->interrupt_cx = NULL;
- if (!(a->flags & AF_DISC_PENDING))
+ if (!test_bit(AF_DISC_PENDING, &a->flags))
esas2r_disc_fix_curr_requests(a);
- esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
/* Start the next discovery. */
return esas2r_disc_start_port(a);
@@ -480,7 +474,8 @@ static bool esas2r_disc_start_request(struct esas2r_adapter *a,
spin_lock_irqsave(&a->queue_lock, flags);
- if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_FLASHING, &a->flags))
esas2r_disc_local_start_request(a, rq);
else
list_add_tail(&rq->req_list, &a->defer_list);
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 2ec3c23275b..b7dc59fca7a 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -231,7 +231,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
* RS_PENDING, FM API tasks will continue.
*/
rq->req_stat = RS_PENDING;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
/* not suppported for now */;
else
build_flash_msg(a, rq);
@@ -315,7 +315,7 @@ static bool complete_fmapi_req(struct esas2r_adapter *a,
memset(fc->scratch, 0, FM_BUF_SZ);
esas2r_enable_heartbeat(a);
- esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
+ clear_bit(AF_FLASH_LOCK, &a->flags);
return false;
}
@@ -526,7 +526,7 @@ no_cfg:
* The download is complete. If in degraded mode,
* attempt a chip reset.
*/
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
esas2r_local_reset_adapter(a);
a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
@@ -890,7 +890,7 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
}
}
- if (a->flags & AF_DEGRADED_MODE) {
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
fs->status = ATTO_STS_DEGRADED;
return false;
}
@@ -945,8 +945,12 @@ static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
/* Now wait for the firmware to process it */
starttime = jiffies_to_msecs(jiffies);
- timeout = a->flags &
- (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+ test_bit(AF_DISC_PENDING, &a->flags))
+ timeout = 40000;
+ else
+ timeout = 5000;
while (true) {
intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
@@ -1008,7 +1012,7 @@ bool esas2r_read_flash_block(struct esas2r_adapter *a,
u32 offset;
u32 iatvr;
- if (a->flags2 & AF2_SERIAL_FLASH)
+ if (test_bit(AF2_SERIAL_FLASH, &a->flags2))
iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
else
iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
@@ -1236,9 +1240,9 @@ static void esas2r_nvram_callback(struct esas2r_adapter *a,
if (rq->req_stat != RS_PENDING) {
/* update the NVRAM state */
if (rq->req_stat == RS_SUCCESS)
- esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+ set_bit(AF_NVR_VALID, &a->flags);
else
- esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+ clear_bit(AF_NVR_VALID, &a->flags);
esas2r_enable_heartbeat(a);
@@ -1258,7 +1262,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return false;
if (down_interruptible(&a->nvram_semaphore))
@@ -1302,7 +1306,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram));
- if (a->flags & AF_LEGACY_SGE_MODE) {
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
vrq->data.sge[0].length =
cpu_to_le32(SGE_LAST |
@@ -1337,7 +1341,7 @@ bool esas2r_nvram_validate(struct esas2r_adapter *a)
} else if (n->version > SASNVR_VERSION) {
esas2r_hdebug("invalid NVRAM version");
} else {
- esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+ set_bit(AF_NVR_VALID, &a->flags);
rslt = true;
}
@@ -1359,7 +1363,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
struct esas2r_sas_nvram *n = a->nvram;
u32 time = jiffies_to_msecs(jiffies);
- esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+ clear_bit(AF_NVR_VALID, &a->flags);
*n = default_sas_nvram;
n->sas_addr[3] |= 0x0F;
n->sas_addr[4] = HIBYTE(LOWORD(time));
@@ -1389,7 +1393,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
u8 j;
struct esas2r_component_header *ch;
- if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
+ if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {
/* flag was already set */
fi->status = FI_STAT_BUSY;
return false;
@@ -1413,7 +1417,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
}
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
switch (fi->action) {
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index da1869df240..b9750e296d7 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -216,7 +216,7 @@ use_legacy_interrupts:
goto use_legacy_interrupts;
}
a->intr_mode = INTR_MODE_MSI;
- esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
+ set_bit(AF2_MSI_ENABLED, &a->flags2);
break;
@@ -252,7 +252,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
return;
}
- esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
+ set_bit(AF2_IRQ_CLAIMED, &a->flags2);
esas2r_log(ESAS2R_LOG_INFO,
"claimed IRQ %d flags: 0x%lx",
a->pcid->irq, flags);
@@ -380,10 +380,10 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
/* interrupts will be disabled until we are done with init */
atomic_inc(&a->dis_ints_cnt);
atomic_inc(&a->disable_cnt);
- a->flags |= AF_CHPRST_PENDING
- | AF_DISC_PENDING
- | AF_FIRST_INIT
- | AF_LEGACY_SGE_MODE;
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+ set_bit(AF_FIRST_INIT, &a->flags);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
a->init_msg = ESAS2R_INIT_MSG_START;
a->max_vdareq_size = 128;
@@ -440,11 +440,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
esas2r_claim_interrupts(a);
- if (a->flags2 & AF2_IRQ_CLAIMED)
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
esas2r_enable_chip_interrupts(a);
- esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
- if (!(a->flags & AF_DEGRADED_MODE))
+ set_bit(AF2_INIT_DONE, &a->flags2);
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags))
esas2r_kickoff_timer(a);
esas2r_debug("esas2r_init_adapter done for %p (%d)",
a, a->disable_cnt);
@@ -457,8 +457,8 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
{
struct esas2r_mem_desc *memdesc, *next;
- if ((a->flags2 & AF2_INIT_DONE)
- && (!(a->flags & AF_DEGRADED_MODE))) {
+ if ((test_bit(AF2_INIT_DONE, &a->flags2))
+ && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
if (!power_management) {
del_timer_sync(&a->timer);
tasklet_kill(&a->tasklet);
@@ -508,19 +508,19 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
}
/* Clean up interrupts */
- if (a->flags2 & AF2_IRQ_CLAIMED) {
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev),
"free_irq(%d) called", a->pcid->irq);
free_irq(a->pcid->irq, a);
esas2r_debug("IRQ released");
- esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
+ clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
}
- if (a->flags2 & AF2_MSI_ENABLED) {
+ if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
pci_disable_msi(a->pcid);
- esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
+ clear_bit(AF2_MSI_ENABLED, &a->flags2);
esas2r_debug("MSI disabled");
}
@@ -641,12 +641,10 @@ void esas2r_kill_adapter(int i)
pci_set_drvdata(a->pcid, NULL);
esas2r_adapters[i] = NULL;
- if (a->flags2 & AF2_INIT_DONE) {
- esas2r_lock_clear_flags(&a->flags2,
- AF2_INIT_DONE);
+ if (test_bit(AF2_INIT_DONE, &a->flags2)) {
+ clear_bit(AF2_INIT_DONE, &a->flags2);
- esas2r_lock_set_flags(&a->flags,
- AF_DEGRADED_MODE);
+ set_bit(AF_DEGRADED_MODE, &a->flags);
esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->host->shost_gendev),
@@ -759,7 +757,7 @@ int esas2r_resume(struct pci_dev *pdev)
esas2r_claim_interrupts(a);
- if (a->flags2 & AF2_IRQ_CLAIMED) {
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
/*
* Now that system interrupt(s) are claimed, we can enable
* chip interrupts.
@@ -781,7 +779,7 @@ error_exit:
bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
{
- esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+ set_bit(AF_DEGRADED_MODE, &a->flags);
esas2r_log(ESAS2R_LOG_CRIT,
"setting adapter to degraded mode: %s\n", error_str);
return false;
@@ -809,7 +807,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
int pcie_cap_reg;
pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
- if (0xffff & pcie_cap_reg) {
+ if (pcie_cap_reg) {
u16 devcontrol;
pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -896,7 +894,7 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
&& (a->pcid->subsystem_device & ATTO_SSDID_TBT))
a->flags2 |= AF2_THUNDERBOLT;
- if (a->flags2 & AF2_THUNDERBOLT)
+ if (test_bit(AF2_THUNDERBOLT, &a->flags2))
a->flags2 |= AF2_SERIAL_FLASH;
if (a->pcid->subsystem_device == ATTO_TLSH_1068)
@@ -956,14 +954,14 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
a->outbound_copy = (u32 volatile *)high;
high += sizeof(u32);
- if (!(a->flags & AF_NVR_VALID))
+ if (!test_bit(AF_NVR_VALID, &a->flags))
esas2r_nvram_set_defaults(a);
/* update the caller's uncached memory area pointer */
*uncached_area = (void *)high;
/* initialize the allocated memory */
- if (a->flags & AF_FIRST_INIT) {
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
memset(a->req_table, 0,
(num_requests + num_ae_requests +
1) * sizeof(struct esas2r_request *));
@@ -1019,7 +1017,7 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)
* if the chip reset detected flag is set, we can bypass a bunch of
* stuff.
*/
- if (a->flags & AF_CHPRST_DETECTED)
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags))
goto skip_chip_reset;
/*
@@ -1057,14 +1055,12 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)
doorbell);
if (ver == DRBL_FW_VER_0) {
- esas2r_lock_set_flags(&a->flags,
- AF_LEGACY_SGE_MODE);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge;
} else if (ver == DRBL_FW_VER_1) {
- esas2r_lock_clear_flags(&a->flags,
- AF_LEGACY_SGE_MODE);
+ clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
a->max_vdareq_size = 1024;
a->build_sgl = esas2r_build_sg_list_prd;
@@ -1139,7 +1135,7 @@ skip_chip_reset:
*a->outbound_copy =
a->last_write =
a->last_read = a->list_size - 1;
- esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
a->last_write);
esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
@@ -1204,9 +1200,9 @@ skip_chip_reset:
*/
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
if (doorbell & DRBL_POWER_DOWN)
- esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+ set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
else
- esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+ clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
/*
* enable assertion of outbound queue and doorbell interrupts in the
@@ -1239,8 +1235,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
0,
NULL);
ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
- ci->sgl_page_size = sgl_page_size;
- ci->epoch_time = now.tv_sec;
+ ci->sgl_page_size = cpu_to_le32(sgl_page_size);
+ ci->epoch_time = cpu_to_le32(now.tv_sec);
rq->flags |= RF_FAILURE_OK;
a->init_msg = ESAS2R_INIT_MSG_INIT;
break;
@@ -1250,12 +1246,15 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
if (rq->req_stat == RS_SUCCESS) {
u32 major;
u32 minor;
+ u16 fw_release;
a->fw_version = le16_to_cpu(
rq->func_rsp.cfg_rsp.vda_version);
a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
- major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
- minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
+ fw_release = le16_to_cpu(
+ rq->func_rsp.cfg_rsp.fw_release);
+ major = LOBYTE(fw_release);
+ minor = HIBYTE(fw_release);
a->fw_version += (major << 16) + (minor << 24);
} else {
esas2r_hdebug("FAILED");
@@ -1266,9 +1265,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
* unsupported config requests correctly.
*/
- if ((a->flags2 & AF2_THUNDERBOLT)
- || (be32_to_cpu(a->fw_version) >
- be32_to_cpu(0x47020052))) {
+ if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
+ || (be32_to_cpu(a->fw_version) > 0x00524702)) {
esas2r_hdebug("CFG get init");
esas2r_build_cfg_req(a,
rq,
@@ -1361,10 +1359,10 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
struct esas2r_request *rq;
u32 i;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
goto exit;
- if (!(a->flags & AF_NVR_VALID)) {
+ if (!test_bit(AF_NVR_VALID, &a->flags)) {
if (!esas2r_nvram_read_direct(a))
esas2r_log(ESAS2R_LOG_WARN,
"invalid/missing NVRAM parameters");
@@ -1376,8 +1374,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
}
/* The firmware is ready. */
- esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+ clear_bit(AF_DEGRADED_MODE, &a->flags);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
/* Post all the async event requests */
for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
@@ -1398,8 +1396,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
esas2r_hdebug("firmware revision: %s", a->fw_rev);
- if ((a->flags & AF_CHPRST_DETECTED)
- && (a->flags & AF_FIRST_INIT)) {
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags)
+ && (test_bit(AF_FIRST_INIT, &a->flags))) {
esas2r_enable_chip_interrupts(a);
return true;
}
@@ -1423,18 +1421,18 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
* Block Tasklets from getting scheduled and indicate this is
* polled discovery.
*/
- esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
- esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
+ set_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ set_bit(AF_DISC_POLLED, &a->flags);
/*
* Temporarily bring the disable count to zero to enable
* deferred processing. Note that the count is already zero
* after the first initialization.
*/
- if (a->flags & AF_FIRST_INIT)
+ if (test_bit(AF_FIRST_INIT, &a->flags))
atomic_dec(&a->disable_cnt);
- while (a->flags & AF_DISC_PENDING) {
+ while (test_bit(AF_DISC_PENDING, &a->flags)) {
schedule_timeout_interruptible(msecs_to_jiffies(100));
/*
@@ -1453,7 +1451,7 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
* we have to make sure the timer tick processes the
* doorbell indicating the firmware is ready.
*/
- if (!(a->flags & AF_CHPRST_PENDING))
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags))
esas2r_disc_check_for_work(a);
/* Simulate a timer tick. */
@@ -1473,11 +1471,11 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
}
- if (a->flags & AF_FIRST_INIT)
+ if (test_bit(AF_FIRST_INIT, &a->flags))
atomic_inc(&a->disable_cnt);
- esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
- esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ clear_bit(AF_DISC_POLLED, &a->flags);
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
}
@@ -1504,26 +1502,26 @@ exit:
* need to get done before we exit.
*/
- if ((a->flags & AF_CHPRST_DETECTED)
- && (a->flags & AF_FIRST_INIT)) {
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
+ test_bit(AF_FIRST_INIT, &a->flags)) {
/*
* Reinitialization was performed during the first
* initialization. Only clear the chip reset flag so the
* original device polling is not cancelled.
*/
if (!rslt)
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
} else {
/* First initialization or a subsequent re-init is complete. */
if (!rslt) {
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
- esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ clear_bit(AF_DISC_PENDING, &a->flags);
}
/* Enable deferred processing after the first initialization. */
- if (a->flags & AF_FIRST_INIT) {
- esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ clear_bit(AF_FIRST_INIT, &a->flags);
if (atomic_dec_return(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
@@ -1535,7 +1533,7 @@ exit:
void esas2r_reset_adapter(struct esas2r_adapter *a)
{
- esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+ set_bit(AF_OS_RESET, &a->flags);
esas2r_local_reset_adapter(a);
esas2r_schedule_tasklet(a);
}
@@ -1550,17 +1548,17 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
* dump is located in the upper 512KB of the onchip SRAM. Make sure
* to not overwrite a previous crash that was saved.
*/
- if ((a->flags2 & AF2_COREDUMP_AVAIL)
- && !(a->flags2 & AF2_COREDUMP_SAVED)) {
+ if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
+ !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
esas2r_read_mem_block(a,
a->fw_coredump_buff,
MW_DATA_ADDR_SRAM + 0x80000,
ESAS2R_FWCOREDUMP_SZ);
- esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
+ set_bit(AF2_COREDUMP_SAVED, &a->flags2);
}
- esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+ clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
/* Reset the chip */
if (a->pcid->revision == MVR_FREY_B2)
@@ -1606,10 +1604,10 @@ static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
*/
void esas2r_power_down(struct esas2r_adapter *a)
{
- esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
- esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
+ set_bit(AF_POWER_MGT, &a->flags);
+ set_bit(AF_POWER_DOWN, &a->flags);
- if (!(a->flags & AF_DEGRADED_MODE)) {
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
u32 starttime;
u32 doorbell;
@@ -1649,14 +1647,14 @@ void esas2r_power_down(struct esas2r_adapter *a)
* For versions of firmware that support it tell them the driver
* is powering down.
*/
- if (a->flags2 & AF2_VDA_POWER_DOWN)
+ if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
esas2r_power_down_notify_firmware(a);
}
/* Suspend I/O processing. */
- esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
- esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
- esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+ set_bit(AF_OS_RESET, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
esas2r_process_adapter_reset(a);
@@ -1673,9 +1671,9 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
{
bool ret;
- esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
+ clear_bit(AF_POWER_DOWN, &a->flags);
esas2r_init_pci_cfg_space(a);
- esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
+ set_bit(AF_FIRST_INIT, &a->flags);
atomic_inc(&a->disable_cnt);
/* reinitialize the adapter */
@@ -1687,17 +1685,17 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
esas2r_send_reset_ae(a, true);
/* clear this flag after initialization. */
- esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
+ clear_bit(AF_POWER_MGT, &a->flags);
return ret;
}
bool esas2r_is_adapter_present(struct esas2r_adapter *a)
{
- if (a->flags & AF_NOT_PRESENT)
+ if (test_bit(AF_NOT_PRESENT, &a->flags))
return false;
if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
- esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
+ set_bit(AF_NOT_PRESENT, &a->flags);
return false;
}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
index c2d4ff57c5c..f16d6bcf9bb 100644
--- a/drivers/scsi/esas2r/esas2r_int.c
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -96,7 +96,7 @@ irqreturn_t esas2r_interrupt(int irq, void *dev_id)
if (!esas2r_adapter_interrupt_pending(a))
return IRQ_NONE;
- esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
+ set_bit(AF2_INT_PENDING, &a->flags2);
esas2r_schedule_tasklet(a);
return IRQ_HANDLED;
@@ -317,9 +317,10 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)
* = 2 - can start any request
*/
- if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
+ if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+ test_bit(AF_FLASHING, &a->flags))
startreqs = 0;
- else if (a->flags & AF_DISC_PENDING)
+ else if (test_bit(AF_DISC_PENDING, &a->flags))
startreqs = 1;
atomic_inc(&a->disable_cnt);
@@ -367,7 +368,7 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)
* Flashing could have been set by last local
* start
*/
- if (a->flags & AF_FLASHING)
+ if (test_bit(AF_FLASHING, &a->flags))
break;
}
}
@@ -404,7 +405,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)
dc->disc_evt = 0;
- esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
}
/*
@@ -425,7 +426,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)
a->last_write =
a->last_read = a->list_size - 1;
- esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
/* Kill all the requests on the active list */
list_for_each(element, &a->defer_list) {
@@ -470,7 +471,7 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)
if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
- esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
+ clear_bit(AF_OS_RESET, &a->flags);
esas2r_trace_exit();
}
@@ -478,10 +479,10 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)
static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
{
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
- esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
- esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
- esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+ clear_bit(AF_CHPRST_NEEDED, &a->flags);
+ clear_bit(AF_BUSRST_NEEDED, &a->flags);
+ clear_bit(AF_BUSRST_DETECTED, &a->flags);
+ clear_bit(AF_BUSRST_PENDING, &a->flags);
/*
* Make sure we don't get attempt more than 3 resets
* when the uptime between resets does not exceed one
@@ -507,10 +508,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
* prevent the heartbeat from trying to recover.
*/
- esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
- esas2r_lock_set_flags(&a->flags, AF_DISABLED);
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
- esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+ set_bit(AF_DISABLED, &a->flags);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ clear_bit(AF_DISC_PENDING, &a->flags);
esas2r_disable_chip_interrupts(a);
a->int_mask = 0;
@@ -519,18 +520,17 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
esas2r_log(ESAS2R_LOG_CRIT,
"Adapter disabled because of hardware failure");
} else {
- u32 flags =
- esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
+ bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
- if (!(flags & AF_CHPRST_STARTED))
+ if (!alrdyrst)
/*
* Only disable interrupts if this is
* the first reset attempt.
*/
esas2r_disable_chip_interrupts(a);
- if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
- !(flags & AF_CHPRST_STARTED)) {
+ if ((test_bit(AF_POWER_MGT, &a->flags)) &&
+ !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
/*
* Don't reset the chip on the first
* deferred power up attempt.
@@ -543,10 +543,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
/* Kick off the reinitialization */
a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
a->chip_init_time = jiffies_to_msecs(jiffies);
- if (!(a->flags & AF_POWER_MGT)) {
+ if (!test_bit(AF_POWER_MGT, &a->flags)) {
esas2r_process_adapter_reset(a);
- if (!(flags & AF_CHPRST_STARTED)) {
+ if (!alrdyrst) {
/* Remove devices now that I/O is cleaned up. */
a->prev_dev_cnt =
esas2r_targ_db_get_tgt_cnt(a);
@@ -560,38 +560,37 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
{
- while (a->flags & AF_CHPRST_DETECTED) {
+ while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
/*
* Balance the enable in esas2r_initadapter_hw.
* Esas2r_power_down already took care of it for power
* management.
*/
- if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
- AF_POWER_MGT))
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_POWER_MGT, &a->flags))
esas2r_disable_chip_interrupts(a);
/* Reinitialize the chip. */
esas2r_check_adapter(a);
esas2r_init_adapter_hw(a, 0);
- if (a->flags & AF_CHPRST_NEEDED)
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
break;
- if (a->flags & AF_POWER_MGT) {
+ if (test_bit(AF_POWER_MGT, &a->flags)) {
/* Recovery from power management. */
- if (a->flags & AF_FIRST_INIT) {
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
/* Chip reset during normal power up */
esas2r_log(ESAS2R_LOG_CRIT,
"The firmware was reset during a normal power-up sequence");
} else {
/* Deferred power up complete. */
- esas2r_lock_clear_flags(&a->flags,
- AF_POWER_MGT);
+ clear_bit(AF_POWER_MGT, &a->flags);
esas2r_send_reset_ae(a, true);
}
} else {
/* Recovery from online chip reset. */
- if (a->flags & AF_FIRST_INIT) {
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
/* Chip reset during driver load */
} else {
/* Chip reset after driver load */
@@ -602,14 +601,14 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
"Recovering from a chip reset while the chip was online");
}
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
+ clear_bit(AF_CHPRST_STARTED, &a->flags);
esas2r_enable_chip_interrupts(a);
/*
* Clear this flag last! this indicates that the chip has been
* reset already during initialization.
*/
- esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
+ clear_bit(AF_CHPRST_DETECTED, &a->flags);
}
}
@@ -617,26 +616,28 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
/* Perform deferred tasks when chip interrupts are disabled */
void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
{
- if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
- if (a->flags & AF_CHPRST_NEEDED)
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+ test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
esas2r_chip_rst_needed_during_tasklet(a);
esas2r_handle_chip_rst_during_tasklet(a);
}
- if (a->flags & AF_BUSRST_NEEDED) {
+ if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
esas2r_hdebug("hard resetting bus");
- esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+ clear_bit(AF_BUSRST_NEEDED, &a->flags);
- if (a->flags & AF_FLASHING)
- esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+ if (test_bit(AF_FLASHING, &a->flags))
+ set_bit(AF_BUSRST_DETECTED, &a->flags);
else
esas2r_write_register_dword(a, MU_DOORBELL_IN,
DRBL_RESET_BUS);
}
- if (a->flags & AF_BUSRST_DETECTED) {
+ if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
esas2r_process_bus_reset(a);
esas2r_log_dev(ESAS2R_LOG_WARN,
@@ -645,14 +646,14 @@ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
scsi_report_bus_reset(a->host, 0);
- esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
- esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+ clear_bit(AF_BUSRST_DETECTED, &a->flags);
+ clear_bit(AF_BUSRST_PENDING, &a->flags);
esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
}
- if (a->flags & AF_PORT_CHANGE) {
- esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
+ if (test_bit(AF_PORT_CHANGE, &a->flags)) {
+ clear_bit(AF_PORT_CHANGE, &a->flags);
esas2r_targ_db_report_changes(a);
}
@@ -672,10 +673,10 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
if (doorbell & DRBL_RESET_BUS)
- esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+ set_bit(AF_BUSRST_DETECTED, &a->flags);
if (doorbell & DRBL_FORCE_INT)
- esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+ clear_bit(AF_HEARTBEAT, &a->flags);
if (doorbell & DRBL_PANIC_REASON_MASK) {
esas2r_hdebug("*** Firmware Panic ***");
@@ -683,7 +684,7 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
}
if (doorbell & DRBL_FW_RESET) {
- esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+ set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
esas2r_local_reset_adapter(a);
}
@@ -918,7 +919,7 @@ void esas2r_complete_request(struct esas2r_adapter *a,
{
if (rq->vrq->scsi.function == VDA_FUNC_FLASH
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
- esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
+ clear_bit(AF_FLASHING, &a->flags);
/* See if we setup a callback to do special processing */
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
index 324e2626a08..a8df916cd57 100644
--- a/drivers/scsi/esas2r/esas2r_io.c
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -49,7 +49,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
struct esas2r_request *startrq = rq;
unsigned long flags;
- if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
+ if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
+ test_bit(AF_POWER_DOWN, &a->flags))) {
if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
rq->req_stat = RS_SEL2;
else
@@ -69,8 +70,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
* Note that if AF_DISC_PENDING is set than this will
* go on the defer queue.
*/
- if (unlikely(t->target_state != TS_PRESENT
- && !(a->flags & AF_DISC_PENDING)))
+ if (unlikely(t->target_state != TS_PRESENT &&
+ !test_bit(AF_DISC_PENDING, &a->flags)))
rq->req_stat = RS_SEL;
}
}
@@ -91,8 +92,9 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
spin_lock_irqsave(&a->queue_lock, flags);
if (likely(list_empty(&a->defer_list) &&
- !(a->flags &
- (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_FLASHING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)))
esas2r_local_start_request(a, startrq);
else
list_add_tail(&startrq->req_list, &a->defer_list);
@@ -124,7 +126,7 @@ void esas2r_local_start_request(struct esas2r_adapter *a,
if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
- esas2r_lock_set_flags(&a->flags, AF_FLASHING);
+ set_bit(AF_FLASHING, &a->flags);
list_add_tail(&rq->req_list, &a->active_list);
esas2r_start_vda_request(a, rq);
@@ -147,11 +149,10 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,
if (a->last_write >= a->list_size) {
a->last_write = 0;
/* update the toggle bit */
- if (a->flags & AF_COMM_LIST_TOGGLE)
- esas2r_lock_clear_flags(&a->flags,
- AF_COMM_LIST_TOGGLE);
+ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+ clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
else
- esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
}
element =
@@ -169,7 +170,7 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,
/* Update the write pointer */
dw = a->last_write;
- if (a->flags & AF_COMM_LIST_TOGGLE)
+ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
dw |= MU_ILW_TOGGLE;
esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
@@ -687,18 +688,14 @@ static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
if (ver == DRBL_FW_VER_0) {
- esas2r_lock_set_flags(&a->flags,
- AF_CHPRST_DETECTED);
- esas2r_lock_set_flags(&a->flags,
- AF_LEGACY_SGE_MODE);
+ set_bit(AF_CHPRST_DETECTED, &a->flags);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge;
} else if (ver == DRBL_FW_VER_1) {
- esas2r_lock_set_flags(&a->flags,
- AF_CHPRST_DETECTED);
- esas2r_lock_clear_flags(&a->flags,
- AF_LEGACY_SGE_MODE);
+ set_bit(AF_CHPRST_DETECTED, &a->flags);
+ clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
a->max_vdareq_size = 1024;
a->build_sgl = esas2r_build_sg_list_prd;
@@ -719,28 +716,27 @@ void esas2r_timer_tick(struct esas2r_adapter *a)
a->last_tick_time = currtime;
/* count down the uptime */
- if (a->chip_uptime
- && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+ if (a->chip_uptime &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)) {
if (deltatime >= a->chip_uptime)
a->chip_uptime = 0;
else
a->chip_uptime -= deltatime;
}
- if (a->flags & AF_CHPRST_PENDING) {
- if (!(a->flags & AF_CHPRST_NEEDED)
- && !(a->flags & AF_CHPRST_DETECTED))
+ if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
+ if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
+ !test_bit(AF_CHPRST_DETECTED, &a->flags))
esas2r_handle_pending_reset(a, currtime);
} else {
- if (a->flags & AF_DISC_PENDING)
+ if (test_bit(AF_DISC_PENDING, &a->flags))
esas2r_disc_check_complete(a);
-
- if (a->flags & AF_HEARTBEAT_ENB) {
- if (a->flags & AF_HEARTBEAT) {
+ if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
+ if (test_bit(AF_HEARTBEAT, &a->flags)) {
if ((currtime - a->heartbeat_time) >=
ESAS2R_HEARTBEAT_TIME) {
- esas2r_lock_clear_flags(&a->flags,
- AF_HEARTBEAT);
+ clear_bit(AF_HEARTBEAT, &a->flags);
esas2r_hdebug("heartbeat failed");
esas2r_log(ESAS2R_LOG_CRIT,
"heartbeat failed");
@@ -748,7 +744,7 @@ void esas2r_timer_tick(struct esas2r_adapter *a)
esas2r_local_reset_adapter(a);
}
} else {
- esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
+ set_bit(AF_HEARTBEAT, &a->flags);
a->heartbeat_time = currtime;
esas2r_force_interrupt(a);
}
@@ -812,7 +808,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
rqaux->vrq->scsi.flags |=
cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
- if (a->flags & AF_FLASHING) {
+ if (test_bit(AF_FLASHING, &a->flags)) {
/* Assume success. if there are active requests, return busy */
rqaux->req_stat = RS_SUCCESS;
@@ -831,7 +827,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
spin_unlock_irqrestore(&a->queue_lock, flags);
- if (!(a->flags & AF_FLASHING))
+ if (!test_bit(AF_FLASHING, &a->flags))
esas2r_start_request(a, rqaux);
esas2r_comp_list_drain(a, &comp_list);
@@ -848,11 +844,12 @@ void esas2r_reset_bus(struct esas2r_adapter *a)
{
esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
- if (!(a->flags & AF_DEGRADED_MODE)
- && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
- esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
- esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
- esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)) {
+ set_bit(AF_BUSRST_NEEDED, &a->flags);
+ set_bit(AF_BUSRST_PENDING, &a->flags);
+ set_bit(AF_OS_RESET, &a->flags);
esas2r_schedule_tasklet(a);
}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index e5b09027e06..d89a0277a8e 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -347,7 +347,7 @@ static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
{
struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return false;
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
@@ -463,7 +463,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
gcc->bios_build_rev = LOWORD(a->flash_ver);
- if (a->flags2 & AF2_THUNDERLINK)
+ if (test_bit(AF2_THUNDERLINK, &a->flags2))
gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
| CSMI_CNTLRF_SATA_HBA;
else
@@ -485,7 +485,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
{
struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
gcs->status = CSMI_CNTLR_STS_FAILED;
else
gcs->status = CSMI_CNTLR_STS_GOOD;
@@ -819,10 +819,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
gai->adap_type = ATTO_GAI_AT_ESASRAID2;
- if (a->flags2 & AF2_THUNDERLINK)
+ if (test_bit(AF2_THUNDERLINK, &a->flags2))
gai->adap_type = ATTO_GAI_AT_TLSASHBA;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
@@ -938,7 +938,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
u32 total_len = ESAS2R_FWCOREDUMP_SZ;
/* Size is zero if a core dump isn't present */
- if (!(a->flags2 & AF2_COREDUMP_SAVED))
+ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
total_len = 0;
if (len > total_len)
@@ -960,8 +960,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
memset(a->fw_coredump_buff, 0,
ESAS2R_FWCOREDUMP_SZ);
- esas2r_lock_clear_flags(&a->flags2,
- AF2_COREDUMP_SAVED);
+ clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
hi->status = ATTO_STS_UNSUPPORTED;
break;
@@ -973,7 +972,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
trc->total_length = ESAS2R_FWCOREDUMP_SZ;
/* Return zero length buffer if core dump not present */
- if (!(a->flags2 & AF2_COREDUMP_SAVED))
+ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
trc->total_length = 0;
} else {
hi->status = ATTO_STS_UNSUPPORTED;
@@ -1048,6 +1047,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
+
if (!esas2r_build_sg_list(a, rq, sgc)) {
hi->status = ATTO_STS_OUT_OF_RSRC;
break;
@@ -1139,15 +1139,15 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
break;
}
- if (a->flags & AF_CHPRST_NEEDED)
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
ac->adap_state = ATTO_AC_AS_RST_SCHED;
- else if (a->flags & AF_CHPRST_PENDING)
+ else if (test_bit(AF_CHPRST_PENDING, &a->flags))
ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
- else if (a->flags & AF_DISC_PENDING)
+ else if (test_bit(AF_DISC_PENDING, &a->flags))
ac->adap_state = ATTO_AC_AS_RST_DISC;
- else if (a->flags & AF_DISABLED)
+ else if (test_bit(AF_DISABLED, &a->flags))
ac->adap_state = ATTO_AC_AS_DISABLED;
- else if (a->flags & AF_DEGRADED_MODE)
+ else if (test_bit(AF_DEGRADED_MODE, &a->flags))
ac->adap_state = ATTO_AC_AS_DEGRADED;
else
ac->adap_state = ATTO_AC_AS_OK;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 4abf1272e1e..f37f3e3dd5d 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -889,7 +889,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Assume success, if it fails we will fix the result later. */
cmd->result = DID_OK << 16;
- if (unlikely(a->flags & AF_DEGRADED_MODE)) {
+ if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
@@ -1050,7 +1050,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
- if (a->flags & AF_DEGRADED_MODE) {
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
cmd->result = DID_ABORT << 16;
scsi_set_resid(cmd, 0);
@@ -1131,7 +1131,7 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
struct esas2r_adapter *a =
(struct esas2r_adapter *)cmd->device->host->hostdata;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED;
if (host_reset)
@@ -1141,14 +1141,14 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
/* above call sets the AF_OS_RESET flag. wait for it to clear. */
- while (a->flags & AF_OS_RESET) {
+ while (test_bit(AF_OS_RESET, &a->flags)) {
msleep(10);
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED;
}
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED;
return SUCCESS;
@@ -1176,7 +1176,7 @@ static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
u8 task_management_status = RS_PENDING;
bool completed;
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED;
retry:
@@ -1229,7 +1229,7 @@ retry:
msleep(10);
}
- if (a->flags & AF_DEGRADED_MODE)
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED;
if (task_management_status == RS_BUSY) {
@@ -1666,13 +1666,13 @@ void esas2r_adapter_tasklet(unsigned long context)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)context;
- if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
- esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
+ if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) {
+ clear_bit(AF2_TIMER_TICK, &a->flags2);
esas2r_timer_tick(a);
}
- if (likely(a->flags2 & AF2_INT_PENDING)) {
- esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
+ if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) {
+ clear_bit(AF2_INT_PENDING, &a->flags2);
esas2r_adapter_interrupt(a);
}
@@ -1680,12 +1680,12 @@ void esas2r_adapter_tasklet(unsigned long context)
esas2r_do_tasklet_tasks(a);
if (esas2r_is_tasklet_pending(a)
- || (a->flags2 & AF2_INT_PENDING)
- || (a->flags2 & AF2_TIMER_TICK)) {
- esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ || (test_bit(AF2_INT_PENDING, &a->flags2))
+ || (test_bit(AF2_TIMER_TICK, &a->flags2))) {
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
esas2r_schedule_tasklet(a);
} else {
- esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
}
}
@@ -1707,7 +1707,7 @@ static void esas2r_timer_callback(unsigned long context)
{
struct esas2r_adapter *a = (struct esas2r_adapter *)context;
- esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
+ set_bit(AF2_TIMER_TICK, &a->flags2);
esas2r_schedule_tasklet(a);
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
index e540a2fa3d1..bf45beaad43 100644
--- a/drivers/scsi/esas2r/esas2r_targdb.c
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -86,7 +86,7 @@ void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
esas2r_trace_enter();
- if (a->flags & AF_DISC_PENDING) {
+ if (test_bit(AF_DISC_PENDING, &a->flags)) {
esas2r_trace_exit();
return;
}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
index fd139287964..30028e56df6 100644
--- a/drivers/scsi/esas2r/esas2r_vda.c
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -84,7 +84,7 @@ bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
return false;
}
- if (a->flags & AF_DEGRADED_MODE) {
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
vi->status = ATTO_STS_DEGRADED;
return false;
}
@@ -310,9 +310,9 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
le32_to_cpu(rsp->vda_version);
cfg->data.init.fw_build = rsp->fw_build;
- snprintf(buf, sizeof(buf), "%1d.%02d",
- (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
- (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+ snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
+ (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+ (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
memcpy(&cfg->data.init.fw_release, buf,
sizeof(cfg->data.init.fw_release));
@@ -389,7 +389,7 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a,
vrq->length = cpu_to_le32(length);
if (vrq->length) {
- if (a->flags & AF_LEGACY_SGE_MODE) {
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
vrq->sg_list_offset = (u8)offsetof(
struct atto_vda_mgmt_req, sge);
@@ -427,7 +427,7 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
- if (a->flags & AF_LEGACY_SGE_MODE) {
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
vrq->sg_list_offset =
(u8)offsetof(struct atto_vda_ae_req, sge);
vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 07453bbf05e..f3170008ae7 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -408,6 +408,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
}
ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ ctlr->cdev = ctlr_dev;
fcoe = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
@@ -1440,22 +1441,28 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
ctlr = fcoe_to_ctlr(fcoe);
lport = ctlr->lp;
if (unlikely(!lport)) {
- FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
+ FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n");
goto err2;
}
if (!lport->link_up)
goto err2;
- FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
- "data:%p tail:%p end:%p sum:%d dev:%s",
+ FCOE_NETDEV_DBG(netdev,
+ "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
skb->len, skb->data_len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb),
skb->csum, skb->dev ? skb->dev->name : "<NULL>");
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+
+ if (skb == NULL)
+ return NET_RX_DROP;
+
eh = eth_hdr(skb);
if (is_fip_mode(ctlr) &&
- compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
+ !ether_addr_equal(eh->h_source, ctlr->dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source);
goto err;
@@ -1540,13 +1547,13 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
wake_up_process(fps->thread);
spin_unlock(&fps->fcoe_rx_list.lock);
- return 0;
+ return NET_RX_SUCCESS;
err:
per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
put_cpu();
err2:
kfree_skb(skb);
- return -1;
+ return NET_RX_DROP;
}
/**
@@ -1788,13 +1795,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
lport = fr->fr_dev;
if (unlikely(!lport)) {
if (skb->destructor != fcoe_percpu_flush_done)
- FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
+ FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
kfree_skb(skb);
return;
}
- FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
- "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
+ FCOE_NETDEV_DBG(skb->dev,
+ "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
skb->len, skb->data_len,
skb->head, skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->csum,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 203415e0251..34a1b1f333b 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,74 +160,113 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
}
EXPORT_SYMBOL(fcoe_ctlr_init);
+/**
+ * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
+ * @new: The newly discovered FCF
+ *
+ * Called with fip->ctlr_mutex held
+ */
static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
{
struct fcoe_ctlr *fip = new->fip;
- struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
- struct fcoe_fcf_device temp, *fcf_dev;
- int rc = 0;
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_fcf_device *temp, *fcf_dev;
+ int rc = -ENOMEM;
LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
new->fabric_name, new->fcf_mac);
- mutex_lock(&ctlr_dev->lock);
-
- temp.fabric_name = new->fabric_name;
- temp.switch_name = new->switch_name;
- temp.fc_map = new->fc_map;
- temp.vfid = new->vfid;
- memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
- temp.priority = new->pri;
- temp.fka_period = new->fka_period;
- temp.selected = 0; /* default to unselected */
-
- fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
- if (unlikely(!fcf_dev)) {
- rc = -ENOMEM;
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
goto out;
- }
+
+ temp->fabric_name = new->fabric_name;
+ temp->switch_name = new->switch_name;
+ temp->fc_map = new->fc_map;
+ temp->vfid = new->vfid;
+ memcpy(temp->mac, new->fcf_mac, ETH_ALEN);
+ temp->priority = new->pri;
+ temp->fka_period = new->fka_period;
+ temp->selected = 0; /* default to unselected */
/*
- * The fcoe_sysfs layer can return a CONNECTED fcf that
- * has a priv (fcf was never deleted) or a CONNECTED fcf
- * that doesn't have a priv (fcf was deleted). However,
- * libfcoe will always delete FCFs before trying to add
- * them. This is ensured because both recv_adv and
- * age_fcfs are protected by the the fcoe_ctlr's mutex.
- * This means that we should never get a FCF with a
- * non-NULL priv pointer.
+ * If ctlr_dev doesn't exist then it means we're a libfcoe user
+ * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device.
+ * fnic would be an example of a driver with this behavior. In this
+ * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we
+ * don't want to make sysfs changes.
*/
- BUG_ON(fcf_dev->priv);
- fcf_dev->priv = new;
- new->fcf_dev = fcf_dev;
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ if (ctlr_dev) {
+ mutex_lock(&ctlr_dev->lock);
+ fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp);
+ if (unlikely(!fcf_dev)) {
+ rc = -ENOMEM;
+ mutex_unlock(&ctlr_dev->lock);
+ goto out;
+ }
+
+ /*
+ * The fcoe_sysfs layer can return a CONNECTED fcf that
+ * has a priv (fcf was never deleted) or a CONNECTED fcf
+ * that doesn't have a priv (fcf was deleted). However,
+ * libfcoe will always delete FCFs before trying to add
+ * them. This is ensured because both recv_adv and
+ * age_fcfs are protected by the the fcoe_ctlr's mutex.
+ * This means that we should never get a FCF with a
+ * non-NULL priv pointer.
+ */
+ BUG_ON(fcf_dev->priv);
+
+ fcf_dev->priv = new;
+ new->fcf_dev = fcf_dev;
+ mutex_unlock(&ctlr_dev->lock);
+ }
list_add(&new->list, &fip->fcfs);
fip->fcf_count++;
+ rc = 0;
out:
- mutex_unlock(&ctlr_dev->lock);
+ kfree(temp);
return rc;
}
+/**
+ * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
+ * @new: The FCF to be removed
+ *
+ * Called with fip->ctlr_mutex held
+ */
static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
{
struct fcoe_ctlr *fip = new->fip;
- struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_ctlr_device *cdev;
struct fcoe_fcf_device *fcf_dev;
list_del(&new->list);
fip->fcf_count--;
- mutex_lock(&ctlr_dev->lock);
-
- fcf_dev = fcoe_fcf_to_fcf_dev(new);
- WARN_ON(!fcf_dev);
- new->fcf_dev = NULL;
- fcoe_fcf_device_delete(fcf_dev);
- kfree(new);
-
- mutex_unlock(&ctlr_dev->lock);
+ /*
+ * If ctlr_dev doesn't exist then it means we're a libfcoe user
+ * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device
+ * or a fcoe_fcf_device.
+ *
+ * fnic would be an example of a driver with this behavior. In this
+ * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above),
+ * but we don't want to make sysfs changes.
+ */
+ cdev = fcoe_ctlr_to_ctlr_dev(fip);
+ if (cdev) {
+ mutex_lock(&cdev->lock);
+ fcf_dev = fcoe_fcf_to_fcf_dev(new);
+ WARN_ON(!fcf_dev);
+ new->fcf_dev = NULL;
+ fcoe_fcf_device_delete(fcf_dev);
+ kfree(new);
+ mutex_unlock(&cdev->lock);
+ }
}
/**
@@ -300,7 +339,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf;
- if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr))
+ if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
goto unlock;
if (!is_zero_ether_addr(fip->dest_addr)) {
printk(KERN_NOTICE "libfcoe: host%d: "
@@ -1000,7 +1039,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map &&
- compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
+ ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {
found = 1;
break;
}
@@ -1340,7 +1379,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
mp = (struct fip_mac_desc *)desc;
if (dlen < sizeof(*mp))
goto err;
- if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
+ if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac))
goto err;
desc_mask &= ~BIT(FIP_DT_MAC);
break;
@@ -1418,8 +1457,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* 'port_id' is already validated, check MAC address and
* wwpn
*/
- if (compare_ether_addr(fip->get_src_addr(vn_port),
- vp->fd_mac) != 0 ||
+ if (!ether_addr_equal(fip->get_src_addr(vn_port),
+ vp->fd_mac) ||
get_unaligned_be64(&vp->fd_wwpn) !=
vn_port->wwpn)
continue;
@@ -1453,6 +1492,9 @@ err:
*/
void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
skb_queue_tail(&fip->fip_recv_list, skb);
schedule_work(&fip->recv_work);
}
@@ -1479,12 +1521,12 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
eh = eth_hdr(skb);
if (fip->mode == FIP_MODE_VN2VN) {
- if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
- compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) &&
- compare_ether_addr(eh->h_dest, fcoe_all_p2p))
+ if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
+ !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) &&
+ !ether_addr_equal(eh->h_dest, fcoe_all_p2p))
goto drop;
- } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
- compare_ether_addr(eh->h_dest, fcoe_all_enode))
+ } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
+ !ether_addr_equal(eh->h_dest, fcoe_all_enode))
goto drop;
fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op);
@@ -1856,7 +1898,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
* address_mode flag to use FC_OUI-based Ethernet DA.
* Otherwise we use the FCoE gateway addr
*/
- if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
+ if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
fcoe_ctlr_map_dest(fip);
} else {
memcpy(fip->dest_addr, sa, ETH_ALEN);
@@ -2825,8 +2867,8 @@ unlock:
* disabled, so that should ensure that this routine is only called
* when nothing is happening.
*/
-void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
- enum fip_state fip_mode)
+static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
+ enum fip_state fip_mode)
{
void *priv;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index c9382d6eee7..045c4e11ee5 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -300,29 +300,29 @@ static ssize_t store_ctlr_mode(struct device *dev,
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
- LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+ LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");
return -EBUSY;
case FCOE_CTLR_DISABLED:
if (!ctlr->f->set_fcoe_ctlr_mode) {
LIBFCOE_SYSFS_DBG(ctlr,
- "Mode change not supported by LLD.");
+ "Mode change not supported by LLD.\n");
return -ENOTSUPP;
}
ctlr->mode = fcoe_parse_mode(mode);
if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
- LIBFCOE_SYSFS_DBG(ctlr,
- "Unknown mode %s provided.", buf);
+ LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
+ buf);
return -EINVAL;
}
ctlr->f->set_fcoe_ctlr_mode(ctlr);
- LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
return count;
case FCOE_CTLR_UNUSED:
default:
- LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
return -ENOTSUPP;
};
}
@@ -553,16 +553,20 @@ static struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
-static struct bus_attribute fcoe_bus_attr_group[] = {
- __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
- __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
- __ATTR_NULL
+static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store);
+static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store);
+
+static struct attribute *fcoe_bus_attrs[] = {
+ &bus_attr_ctlr_create.attr,
+ &bus_attr_ctlr_destroy.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(fcoe_bus);
static struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
- .bus_attrs = fcoe_bus_attr_group,
+ .bus_groups = fcoe_bus_groups,
};
/**
@@ -653,7 +657,7 @@ static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
if (new->switch_name == old->switch_name &&
new->fabric_name == old->fabric_name &&
new->fc_map == old->fc_map &&
- compare_ether_addr(new->mac, old->mac) == 0)
+ ether_addr_equal(new->mac, old->mac))
return 1;
return 0;
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e4dd3d7cd23..528d43b7b56 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,6 +27,7 @@
#include "fnic_io.h"
#include "fnic_res.h"
#include "fnic_trace.h"
+#include "fnic_stats.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
@@ -38,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.23"
+#define DRV_VERSION "1.5.0.45"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -232,6 +233,13 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct dentry *fnic_stats_debugfs_host;
+ struct dentry *fnic_stats_debugfs_file;
+ struct dentry *fnic_reset_debugfs_file;
+ unsigned int reset_stats;
+ atomic64_t io_cmpl_skip;
+ struct fnic_stats fnic_stats;
+
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index cbcb0121c84..b6073f87576 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -23,6 +23,58 @@
static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
+static struct dentry *fnic_stats_debugfs_root;
+
+/*
+ * fnic_debugfs_init - Initialize debugfs for fnic debug logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory and statistics directory for trace buffer and
+ * stats logging.
+ */
+int fnic_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG
+ "fnic root directory doesn't exist in debugfs\n");
+ return rc;
+ }
+
+ fnic_stats_debugfs_root = debugfs_create_dir("statistics",
+ fnic_trace_debugfs_root);
+ if (!fnic_stats_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create Statistics directory\n");
+ return rc;
+ }
+
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic.
+ */
+void fnic_debugfs_terminate(void)
+{
+ debugfs_remove(fnic_stats_debugfs_root);
+ fnic_stats_debugfs_root = NULL;
+
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+}
/*
* fnic_trace_ctrl_open - Open the trace_enable file
@@ -241,16 +293,16 @@ static const struct file_operations fnic_trace_debugfs_fops = {
* Description:
* When Debugfs is configured this routine sets up the fnic debugfs
* file system. If not already created, this routine will create the
- * fnic directory. It will create file trace to log fnic trace buffer
- * output into debugfs and it will also create file trace_enable to
- * control enable/disable of trace logging into trace buffer.
+ * create file trace to log fnic trace buffer output into debugfs and
+ * it will also create file trace_enable to control enable/disable of
+ * trace logging into trace buffer.
*/
int fnic_trace_debugfs_init(void)
{
int rc = -1;
- fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
if (!fnic_trace_debugfs_root) {
- printk(KERN_DEBUG "Cannot create debugfs root\n");
+ printk(KERN_DEBUG
+ "FNIC Debugfs root directory doesn't exist\n");
return rc;
}
fnic_trace_enable = debugfs_create_file("tracing_enable",
@@ -259,8 +311,8 @@ int fnic_trace_debugfs_init(void)
NULL, &fnic_trace_ctrl_fops);
if (!fnic_trace_enable) {
- printk(KERN_DEBUG "Cannot create trace_enable file"
- " under debugfs");
+ printk(KERN_DEBUG
+ "Cannot create trace_enable file under debugfs\n");
return rc;
}
@@ -271,7 +323,8 @@ int fnic_trace_debugfs_init(void)
&fnic_trace_debugfs_fops);
if (!fnic_trace_debugfs_file) {
- printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ printk(KERN_DEBUG
+ "Cannot create trace file under debugfs\n");
return rc;
}
rc = 0;
@@ -295,8 +348,323 @@ void fnic_trace_debugfs_terminate(void)
debugfs_remove(fnic_trace_enable);
fnic_trace_enable = NULL;
}
- if (fnic_trace_debugfs_root) {
- debugfs_remove(fnic_trace_debugfs_root);
- fnic_trace_debugfs_root = NULL;
+}
+
+/*
+ * fnic_reset_stats_open - Open the reset_stats file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the stats reset flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file reset_stats and stores i_private data
+ * to debug structure to retrieve later for while performing other
+ * file oprations.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_reset_stats_open(struct inode *inode, struct file *file)
+{
+ struct stats_debug_info *debug;
+
+ debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+/*
+ * fnic_reset_stats_read - Read a reset_stats debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable reset_stats
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_reset_stats_read(struct file *file,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ struct fnic *fnic = (struct fnic *)debug->i_private;
+ char buf[64];
+ int len;
+
+ len = sprintf(buf, "%u\n", fnic->reset_stats);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_reset_stats_write - Write to reset_stats debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * resets cumulative stats of fnic.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_reset_stats_write(struct file *file,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ struct fnic *fnic = (struct fnic *)debug->i_private;
+ struct fnic_stats *stats = &fnic->fnic_stats;
+ u64 *io_stats_p = (u64 *)&stats->io_stats;
+ u64 *fw_stats_p = (u64 *)&stats->fw_stats;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic->reset_stats = val;
+
+ if (fnic->reset_stats) {
+ /* Skip variable is used to avoid descrepancies to Num IOs
+ * and IO Completions stats. Skip incrementing No IO Compls
+ * for pending active IOs after reset stats
+ */
+ atomic64_set(&fnic->io_cmpl_skip,
+ atomic64_read(&stats->io_stats.active_ios));
+ memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
+ memset(&stats->term_stats, 0,
+ sizeof(struct terminate_stats));
+ memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
+ memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
+ memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
+ memset(io_stats_p+1, 0,
+ sizeof(struct io_path_stats) - sizeof(u64));
+ memset(fw_stats_p+1, 0,
+ sizeof(struct fw_stats) - sizeof(u64));
}
+
+ (*ppos)++;
+ return cnt;
+}
+
+/*
+ * fnic_reset_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_reset_stats_release(struct inode *inode,
+ struct file *file)
+{
+ struct stats_debug_info *debug = file->private_data;
+ kfree(debug);
+ return 0;
+}
+
+/*
+ * fnic_stats_debugfs_open - Open the stats file for specific host
+ * and get fnic stats.
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the specific host statistics.
+ *
+ * Description:
+ * This routine opens a debugsfs file stats of specific host and print
+ * fnic stats.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_stats_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ struct fnic *fnic = inode->i_private;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct stats_debug_info *debug;
+ int buf_size = 2 * PAGE_SIZE;
+
+ debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->debug_buffer = vmalloc(buf_size);
+ if (!debug->debug_buffer) {
+ kfree(debug);
+ return -ENOMEM;
+ }
+
+ debug->buf_size = buf_size;
+ memset((void *)debug->debug_buffer, 0, buf_size);
+ debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+/*
+ * fnic_stats_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_stats_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ debug->debug_buffer,
+ debug->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_stats_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_stats_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ struct stats_debug_info *debug = file->private_data;
+ vfree(debug->debug_buffer);
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations fnic_stats_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_stats_debugfs_open,
+ .read = fnic_stats_debugfs_read,
+ .release = fnic_stats_debugfs_release,
+};
+
+static const struct file_operations fnic_reset_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_reset_stats_open,
+ .read = fnic_reset_stats_read,
+ .write = fnic_reset_stats_write,
+ .release = fnic_reset_stats_release,
+};
+
+/*
+ * fnic_stats_init - Initialize stats struct and create stats file per fnic
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the stats file per fnic
+ * It will create file stats and reset_stats under statistics/host# directory
+ * to log per fnic stats.
+ */
+int fnic_stats_debugfs_init(struct fnic *fnic)
+{
+ int rc = -1;
+ char name[16];
+
+ snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+
+ if (!fnic_stats_debugfs_root) {
+ printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
+ return rc;
+ }
+ fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
+ fnic_stats_debugfs_root);
+ if (!fnic->fnic_stats_debugfs_host) {
+ printk(KERN_DEBUG "Cannot create host directory\n");
+ return rc;
+ }
+
+ fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic->fnic_stats_debugfs_host,
+ fnic,
+ &fnic_stats_debugfs_fops);
+ if (!fnic->fnic_stats_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create host stats file\n");
+ return rc;
+ }
+
+ fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic->fnic_stats_debugfs_host,
+ fnic,
+ &fnic_reset_debugfs_fops);
+ if (!fnic->fnic_reset_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create host stats file\n");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic stats.
+ */
+void fnic_stats_debugfs_remove(struct fnic *fnic)
+{
+ if (!fnic)
+ return;
+
+ debugfs_remove(fnic->fnic_stats_debugfs_file);
+ fnic->fnic_stats_debugfs_file = NULL;
+
+ debugfs_remove(fnic->fnic_reset_debugfs_file);
+ fnic->fnic_reset_debugfs_file = NULL;
+
+ debugfs_remove(fnic->fnic_stats_debugfs_host);
+ fnic->fnic_stats_debugfs_host = NULL;
}
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 006fa92a02d..1671325aec7 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -302,6 +302,7 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
{
struct fcoe_ctlr *fip = &fnic->ctlr;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb;
char *eth_fr;
int fr_len;
@@ -337,6 +338,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP);
@@ -354,6 +356,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
struct fcoe_ctlr *fip = &fnic->ctlr;
struct fip_header *fiph;
struct fip_desc *desc;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u16 vid;
size_t rlen;
size_t dlen;
@@ -402,6 +405,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
/* any VLAN descriptors present ? */
if (list_empty(&fnic->vlans)) {
/* retry from timer */
+ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
@@ -533,6 +537,7 @@ drop:
void fnic_handle_fip_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
struct sk_buff *skb;
struct ethhdr *eh;
@@ -567,6 +572,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
* fcf's & restart from scratch
*/
if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+ atomic64_inc(
+ &fnic_stats->vlan_stats.flogi_rejects);
shost_printk(KERN_INFO, fnic->lport->host,
"Trigger a Link down - VLAN Disc\n");
fcoe_ctlr_link_down(&fnic->ctlr);
@@ -651,13 +658,13 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
if (is_zero_ether_addr(new))
new = ctl;
- if (!compare_ether_addr(data, new))
+ if (ether_addr_equal(data, new))
return;
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
- if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
+ if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
memcpy(data, new, ETH_ALEN);
- if (compare_ether_addr(new, ctl))
+ if (!ether_addr_equal(new, ctl))
vnic_dev_add_addr(fnic->vdev, new);
}
@@ -753,6 +760,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
struct fc_frame *fp;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
@@ -803,6 +811,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written);
if (!fcs_ok) {
+ atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fcs error. dropping packet.\n");
goto drop;
@@ -818,6 +827,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
}
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
@@ -1205,6 +1215,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u64 sol_time;
spin_lock_irqsave(&fnic->fnic_lock, flags);
@@ -1273,6 +1284,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
vlan->state = FIP_VLAN_SENT; /* sent now */
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
vlan->sol_count++;
sol_time = jiffies + msecs_to_jiffies
(FCOE_CTLR_START_DELAY);
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 5c1f223cabc..7d9b54ae7f6 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
if (!pba)
return IRQ_NONE;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
if (pba & (1 << FNIC_INTX_NOTIFY)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
fnic_handle_link_event(fnic);
@@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
struct fnic *fnic = data;
unsigned long work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
struct fnic *fnic = data;
unsigned long rq_work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
rq_work_done,
@@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
struct fnic *fnic = data;
unsigned long wq_work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
wq_work_done,
@@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
wq_copy_work_done,
@@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
{
struct fnic *fnic = data;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
fnic_log_q_error(fnic);
fnic_handle_link_event(fnic);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index bbf81ea3a25..33e4ec2bfe7 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -556,6 +556,13 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
host->transportt = fnic_fc_transport;
+ err = fnic_stats_debugfs_init(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to initialize debugfs for stats\n");
+ fnic_stats_debugfs_remove(fnic);
+ }
+
/* Setup PCI resources */
pci_set_drvdata(pdev, fnic);
@@ -917,6 +924,7 @@ err_out_release_regions:
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_hba:
+ fnic_stats_debugfs_remove(fnic);
scsi_host_put(lp->host);
err_out:
return err;
@@ -969,6 +977,7 @@ static void fnic_remove(struct pci_dev *pdev)
fcoe_ctlr_destroy(&fnic->ctlr);
fc_lport_destroy(lp);
+ fnic_stats_debugfs_remove(fnic);
/*
* This stops the fnic device, masks all interrupts. Completed
@@ -996,7 +1005,6 @@ static void fnic_remove(struct pci_dev *pdev)
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
scsi_host_put(lp->host);
}
@@ -1014,6 +1022,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Create debugfs entries for fnic */
+ err = fnic_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to create fnic directory "
+ "for tracing and stats logging\n");
+ fnic_debugfs_terminate();
+ }
+
/* Allocate memory for trace buffer */
err = fnic_trace_buf_init();
if (err < 0) {
@@ -1102,6 +1118,7 @@ err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
fnic_trace_free();
+ fnic_debugfs_terminate();
return err;
}
@@ -1118,6 +1135,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
+ fnic_debugfs_terminate();
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d014aae1913..0521436d05d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -226,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
- else
+ else {
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(
+ &fnic->fnic_stats.fw_stats.active_fw_reqs));
+ }
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
- if (!ret)
+ if (!ret) {
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else {
+ } else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
@@ -291,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
fc_id, fnic->ctlr.map_dest, gw_mac);
}
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
flogi_reg_ioreq_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return ret;
@@ -310,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
u8 pri_tag = 0;
unsigned int i;
unsigned long intr_flags;
@@ -358,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
+ atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -386,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
rport->maxframe_size, rp->r_a_tov,
rp->e_d_tov);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return 0;
}
@@ -401,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
struct fnic *fnic = lport_priv(lp);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
int ret;
u64 cmd_trace;
@@ -414,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
@@ -436,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -462,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
GFP_ATOMIC);
if (!io_req->sgl_list) {
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -509,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
mempool_free(io_req, fnic->io_req_pool);
}
} else {
+ atomic64_inc(&fnic_stats->io_stats.active_ios);
+ atomic64_inc(&fnic_stats->io_stats.num_ios);
+ if (atomic64_read(&fnic_stats->io_stats.active_ios) >
+ atomic64_read(&fnic_stats->io_stats.max_active_ios))
+ atomic64_set(&fnic_stats->io_stats.max_active_ios,
+ atomic64_read(&fnic_stats->io_stats.active_ios));
+
/* REVISIT: Use per IO lock in the final code */
CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
@@ -542,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
struct fcpio_tag tag;
int ret = 0;
unsigned long flags;
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+ atomic64_inc(&reset_stats->fw_reset_completions);
+
/* Clean up all outstanding io requests */
fnic_cleanup_io(fnic, SCSI_NO_TAG);
+ atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
+ atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+
spin_lock_irqsave(&fnic->fnic_lock, flags);
/* fnic should be in FC_TRANS_ETH_MODE */
@@ -571,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
* reset the firmware. Free the cached flogi
*/
fnic->state = FNIC_IN_FC_MODE;
+ atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
@@ -578,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic->lport->host,
"Unexpected state %s while processing"
" reset cmpl\n", fnic_state_to_str(fnic->state));
+ atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
@@ -701,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
if (is_ack_index_in_range(wq, request_out)) {
fnic->fw_ack_index[0] = request_out;
fnic->fw_ack_recd[0] = 1;
- }
+ } else
+ atomic64_inc(
+ &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
@@ -726,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct fcpio_icmnd_cmpl *icmnd_cmpl;
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
spinlock_t *io_lock;
u64 cmd_trace;
@@ -746,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
@@ -766,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host,
@@ -824,31 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual;
+ if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
+ atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
break;
case FCPIO_TIMEOUT: /* request was timed out */
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_ABORTED: /* request was aborted */
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+ atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
scsi_set_resid(sc, icmnd_cmpl->residual);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
+ atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
break;
- case FCPIO_INVALID_HEADER: /* header contains invalid data */
- case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
- case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
+
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
+ atomic64_inc(&fnic_stats->io_stats.io_not_found);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
- case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
+ atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
case FCPIO_FW_ERR: /* request was terminated due fw error */
+ atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
+ atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_INVALID_HEADER: /* header contains invalid data */
+ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
+ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
default:
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
@@ -856,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
break;
}
+ if (hdr_status != FCPIO_SUCCESS) {
+ atomic64_inc(&fnic_stats->io_stats.io_failures);
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ }
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE;
@@ -889,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else
fnic->lport->host_stats.fcp_control_requests++;
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
@@ -906,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
u32 id;
struct scsi_cmnd *sc;
struct fnic_io_req *io_req;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
spinlock_t *io_lock;
unsigned long start_time;
@@ -923,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
if (!sc) {
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), id);
@@ -933,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
@@ -957,6 +1045,31 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ break;
+ case FCPIO_TIMEOUT:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_fw_timeouts);
+ else
+ atomic64_inc(
+ &term_stats->terminate_fw_timeouts);
+ break;
+ case FCPIO_IO_NOT_FOUND:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_io_not_found);
+ else
+ atomic64_inc(
+ &term_stats->terminate_io_not_found);
+ break;
+ default:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_failures);
+ else
+ atomic64_inc(
+ &term_stats->terminate_failures);
+ break;
+ }
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
@@ -964,6 +1077,16 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
CMD_ABTS_STATUS(sc) = hdr_status;
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+ atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1067,6 +1190,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
struct fnic *fnic = vnic_dev_priv(vdev);
switch (desc->hdr.type) {
+ case FCPIO_ICMND_CMPL: /* fw completed a command */
+ case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+ case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+ case FCPIO_RESET_CMPL: /* fw completed reset */
+ atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ break;
+ default:
+ break;
+ }
+
+ switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
fnic_fcpio_ack_handler(fnic, cq_index, desc);
break;
@@ -1126,6 +1261,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
struct scsi_cmnd *sc;
spinlock_t *io_lock;
unsigned long start_time = 0;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
for (i = 0; i < fnic->fnic_max_tag_id; i++) {
if (i == exclude_id)
@@ -1179,6 +1315,11 @@ cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
" DID_TRANSPORT_DISRUPTED\n");
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
/* Complete the command to SCSI */
if (sc->scsi_done) {
FNIC_TRACE(fnic_cleanup_io,
@@ -1262,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
spin_lock_irqsave(host->host_lock, flags);
@@ -1283,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
atomic_dec(&fnic->in_flight);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_queue_abort_io_req: failure: no descriptors\n");
+ atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
0, task_req, tag, fc_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
atomic_dec(&fnic->in_flight);
@@ -1299,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
int abt_tag;
+ int term_cnt = 0;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_cmnd *sc;
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
@@ -1366,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag = (tag | FNIC_TAG_DEV_RST);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst sc 0x%p\n",
@@ -1402,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
else
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ term_cnt++;
}
}
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, term_cnt);
}
@@ -1411,6 +1568,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
int abt_tag;
+ int term_cnt = 0;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1420,6 +1578,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
struct fc_lport *lport;
struct fnic *fnic;
struct fc_rport *cmd_rport;
+ struct reset_stats *reset_stats;
+ struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
if (!rport) {
@@ -1448,6 +1608,9 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
if (fnic->in_remove)
return;
+ reset_stats = &fnic->fnic_stats.reset_stats;
+ term_stats = &fnic->fnic_stats.term_stats;
+
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag;
io_lock = fnic_io_lock_tag(fnic, tag);
@@ -1504,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag = (tag | FNIC_TAG_DEV_RST);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
@@ -1540,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
else
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ term_cnt++;
}
}
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, term_cnt);
}
@@ -1562,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
int ret = SUCCESS;
u32 task_req = 0;
struct scsi_lun fc_lun;
+ struct fnic_stats *fnic_stats;
+ struct abort_stats *abts_stats;
+ struct terminate_stats *term_stats;
int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1572,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
+ fnic_stats = &fnic->fnic_stats;
+ abts_stats = &fnic->fnic_stats.abts_stats;
+ term_stats = &fnic->fnic_stats.term_stats;
+
rport = starget_to_rport(scsi_target(sc->device));
tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG,
@@ -1630,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
*/
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
- else
+ else {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM;
+ }
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
@@ -1646,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED;
goto fnic_abort_cmd_end;
}
- if (task_req == FCPIO_ITMF_ABT_TASK)
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
- else
+ atomic64_inc(&fnic_stats->abts_stats.aborts);
+ } else {
CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+ atomic64_inc(&fnic_stats->term_stats.terminates);
+ }
/*
* We queued an abort IO, wait for its completion.
@@ -1667,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
@@ -1677,6 +1858,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
+ FNIC_SCSI_DBG(KERN_INFO,
+ fnic->lport->host, "Abort Driver Timeout\n");
+ atomic64_inc(&abts_stats->abort_drv_timeouts);
+ } else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Terminate Driver Timeout\n");
+ atomic64_inc(&term_stats->terminate_drv_timeouts);
+ }
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
@@ -1721,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
@@ -1742,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
if (!vnic_wq_copy_desc_avail(wq)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"queue_dr_io_req failure - no descriptors\n");
+ atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
goto lr_io_req_end;
}
@@ -1754,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
atomic_dec(&fnic->in_flight);
@@ -1988,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
unsigned long flags;
unsigned long start_time = 0;
struct scsi_lun fc_lun;
+ struct fnic_stats *fnic_stats;
+ struct reset_stats *reset_stats;
int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
@@ -1999,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
+ fnic_stats = &fnic->fnic_stats;
+ reset_stats = &fnic->fnic_stats.reset_stats;
+
+ atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2009,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
goto fnic_device_reset_end;
/* Check if remote port up */
- if (fc_remote_port_chkready(rport))
+ if (fc_remote_port_chkready(rport)) {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
goto fnic_device_reset_end;
+ }
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
@@ -2086,6 +2292,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* gets cleaned up during higher levels of EH
*/
if (status == FCPIO_INVALID_CODE) {
+ atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
@@ -2199,6 +2406,10 @@ fnic_device_reset_end:
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
+
+ if (ret == FAILED)
+ atomic64_inc(&reset_stats->device_reset_failures);
+
return ret;
}
@@ -2207,26 +2418,34 @@ int fnic_reset(struct Scsi_Host *shost)
{
struct fc_lport *lp;
struct fnic *fnic;
- int ret = SUCCESS;
+ int ret = 0;
+ struct reset_stats *reset_stats;
lp = shost_priv(shost);
fnic = lport_priv(lp);
+ reset_stats = &fnic->fnic_stats.reset_stats;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_reset called\n");
+ atomic64_inc(&reset_stats->fnic_resets);
+
/*
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
- if (lp->tt.lport_reset(lp))
- ret = FAILED;
+ ret = lp->tt.lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
- (ret == SUCCESS) ?
+ (ret == 0) ?
"SUCCESS" : "FAILED");
+ if (ret == 0)
+ atomic64_inc(&reset_stats->fnic_reset_completions);
+ else
+ atomic64_inc(&reset_stats->fnic_reset_failures);
+
return ret;
}
@@ -2251,7 +2470,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
- ret = fnic_reset(shost);
+ ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
if (ret == SUCCESS) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
new file mode 100644
index 00000000000..540cceb843c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_STATS_H_
+#define _FNIC_STATS_H_
+struct io_path_stats {
+ atomic64_t active_ios;
+ atomic64_t max_active_ios;
+ atomic64_t io_completions;
+ atomic64_t io_failures;
+ atomic64_t ioreq_null;
+ atomic64_t alloc_failures;
+ atomic64_t sc_null;
+ atomic64_t io_not_found;
+ atomic64_t num_ios;
+};
+
+struct abort_stats {
+ atomic64_t aborts;
+ atomic64_t abort_failures;
+ atomic64_t abort_drv_timeouts;
+ atomic64_t abort_fw_timeouts;
+ atomic64_t abort_io_not_found;
+};
+
+struct terminate_stats {
+ atomic64_t terminates;
+ atomic64_t max_terminates;
+ atomic64_t terminate_drv_timeouts;
+ atomic64_t terminate_fw_timeouts;
+ atomic64_t terminate_io_not_found;
+ atomic64_t terminate_failures;
+};
+
+struct reset_stats {
+ atomic64_t device_resets;
+ atomic64_t device_reset_failures;
+ atomic64_t device_reset_aborts;
+ atomic64_t device_reset_timeouts;
+ atomic64_t device_reset_terminates;
+ atomic64_t fw_resets;
+ atomic64_t fw_reset_completions;
+ atomic64_t fw_reset_failures;
+ atomic64_t fnic_resets;
+ atomic64_t fnic_reset_completions;
+ atomic64_t fnic_reset_failures;
+};
+
+struct fw_stats {
+ atomic64_t active_fw_reqs;
+ atomic64_t max_fw_reqs;
+ atomic64_t fw_out_of_resources;
+ atomic64_t io_fw_errs;
+};
+
+struct vlan_stats {
+ atomic64_t vlan_disc_reqs;
+ atomic64_t resp_withno_vlanID;
+ atomic64_t sol_expiry_count;
+ atomic64_t flogi_rejects;
+};
+
+struct misc_stats {
+ u64 last_isr_time;
+ u64 last_ack_time;
+ atomic64_t isr_count;
+ atomic64_t max_cq_entries;
+ atomic64_t ack_index_out_of_range;
+ atomic64_t data_count_mismatch;
+ atomic64_t fcpio_timeout;
+ atomic64_t fcpio_aborted;
+ atomic64_t sgl_invalid;
+ atomic64_t mss_invalid;
+ atomic64_t abts_cpwq_alloc_failures;
+ atomic64_t devrst_cpwq_alloc_failures;
+ atomic64_t io_cpwq_alloc_failures;
+ atomic64_t no_icmnd_itmf_cmpls;
+ atomic64_t queue_fulls;
+ atomic64_t rport_not_ready;
+ atomic64_t frame_errors;
+};
+
+struct fnic_stats {
+ struct io_path_stats io_stats;
+ struct abort_stats abts_stats;
+ struct terminate_stats term_stats;
+ struct reset_stats reset_stats;
+ struct fw_stats fw_stats;
+ struct vlan_stats vlan_stats;
+ struct misc_stats misc_stats;
+};
+
+struct stats_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buf_size;
+ int buffer_len;
+};
+
+int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
+int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_remove(struct fnic *);
+#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 23a60e3d852..e002e7187dc 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -189,6 +189,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
}
/*
+ * fnic_get_stats_data - Copy fnic stats buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
+ *
+ * Description:
+ * This routine gathers the fnic stats debugfs data from the fnic_stats struct
+ * and dumps it to stats_debug_info.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into
+ * stats_debug_info
+ */
+int fnic_get_stats_data(struct stats_debug_info *debug,
+ struct fnic_stats *stats)
+{
+ int len = 0;
+ int buf_size = debug->buf_size;
+ struct timespec val1, val2;
+
+ len = snprintf(debug->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\tIO Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
+ "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
+ "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
+ "Number of Memory alloc Failures: %lld\n"
+ "Number of IOREQ Null: %lld\n"
+ "Number of SCSI cmd pointer Null: %lld\n",
+ (u64)atomic64_read(&stats->io_stats.active_ios),
+ (u64)atomic64_read(&stats->io_stats.max_active_ios),
+ (u64)atomic64_read(&stats->io_stats.num_ios),
+ (u64)atomic64_read(&stats->io_stats.io_completions),
+ (u64)atomic64_read(&stats->io_stats.io_failures),
+ (u64)atomic64_read(&stats->io_stats.io_not_found),
+ (u64)atomic64_read(&stats->io_stats.alloc_failures),
+ (u64)atomic64_read(&stats->io_stats.ioreq_null),
+ (u64)atomic64_read(&stats->io_stats.sc_null));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tAbort Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Aborts: %lld\n"
+ "Number of Abort Failures: %lld\n"
+ "Number of Abort Driver Timeouts: %lld\n"
+ "Number of Abort FW Timeouts: %lld\n"
+ "Number of Abort IO NOT Found: %lld\n",
+ (u64)atomic64_read(&stats->abts_stats.aborts),
+ (u64)atomic64_read(&stats->abts_stats.abort_failures),
+ (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
+ (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
+ (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tTerminate Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Terminates: %lld\n"
+ "Maximum Terminates: %lld\n"
+ "Number of Terminate Driver Timeouts: %lld\n"
+ "Number of Terminate FW Timeouts: %lld\n"
+ "Number of Terminate IO NOT Found: %lld\n"
+ "Number of Terminate Failures: %lld\n",
+ (u64)atomic64_read(&stats->term_stats.terminates),
+ (u64)atomic64_read(&stats->term_stats.max_terminates),
+ (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
+ (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
+ (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
+ (u64)atomic64_read(&stats->term_stats.terminate_failures));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tReset Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Device Resets: %lld\n"
+ "Number of Device Reset Failures: %lld\n"
+ "Number of Device Reset Aborts: %lld\n"
+ "Number of Device Reset Timeouts: %lld\n"
+ "Number of Device Reset Terminates: %lld\n"
+ "Number of FW Resets: %lld\n"
+ "Number of FW Reset Completions: %lld\n"
+ "Number of FW Reset Failures: %lld\n"
+ "Number of Fnic Reset: %lld\n"
+ "Number of Fnic Reset Completions: %lld\n"
+ "Number of Fnic Reset Failures: %lld\n",
+ (u64)atomic64_read(&stats->reset_stats.device_resets),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
+ (u64)atomic64_read(
+ &stats->reset_stats.device_reset_terminates),
+ (u64)atomic64_read(&stats->reset_stats.fw_resets),
+ (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
+ (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
+ (u64)atomic64_read(&stats->reset_stats.fnic_resets),
+ (u64)atomic64_read(
+ &stats->reset_stats.fnic_reset_completions),
+ (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tFirmware Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Active FW Requests %lld\n"
+ "Maximum FW Requests: %lld\n"
+ "Number of FW out of resources: %lld\n"
+ "Number of FW IO errors: %lld\n",
+ (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
+ (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
+ (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
+ (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tVlan Discovery Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Vlan Discovery Requests Sent %lld\n"
+ "Vlan Response Received with no FCF VLAN ID: %lld\n"
+ "No solicitations recvd after vlan set, expiry count: %lld\n"
+ "Flogi rejects count: %lld\n",
+ (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
+ (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
+ (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
+ (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tOther Important Statistics\n"
+ "------------------------------------------\n");
+
+ jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Last ISR time: %llu (%8lu.%8lu)\n"
+ "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Number of ISRs: %lld\n"
+ "Maximum CQ Entries: %lld\n"
+ "Number of ACK index out of range: %lld\n"
+ "Number of data count mismatch: %lld\n"
+ "Number of FCPIO Timeouts: %lld\n"
+ "Number of FCPIO Aborted: %lld\n"
+ "Number of SGL Invalid: %lld\n"
+ "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
+ "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
+ "Number of Copy WQ Alloc Failures for IOs: %lld\n"
+ "Number of no icmnd itmf Completions: %lld\n"
+ "Number of QUEUE Fulls: %lld\n"
+ "Number of rport not ready: %lld\n"
+ "Number of receive frame errors: %lld\n",
+ (u64)stats->misc_stats.last_isr_time,
+ val1.tv_sec, val1.tv_nsec,
+ (u64)stats->misc_stats.last_ack_time,
+ val2.tv_sec, val2.tv_nsec,
+ (u64)atomic64_read(&stats->misc_stats.isr_count),
+ (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
+ (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
+ (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
+ (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
+ (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
+ (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
+ (u64)atomic64_read(
+ &stats->misc_stats.abts_cpwq_alloc_failures),
+ (u64)atomic64_read(
+ &stats->misc_stats.devrst_cpwq_alloc_failures),
+ (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
+ (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
+ (u64)atomic64_read(&stats->misc_stats.queue_fulls),
+ (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
+ (u64)atomic64_read(&stats->misc_stats.frame_errors));
+
+ return len;
+
+}
+
+/*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
*
* Description:
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index cef42b4c4d6..d412f2ee3c4 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -84,7 +84,8 @@ fnic_trace_data_t *fnic_trace_get_buf(void);
int fnic_get_trace_data(fnic_dbgfs_t *);
int fnic_trace_buf_init(void);
void fnic_trace_free(void);
+int fnic_debugfs_init(void);
+void fnic_debugfs_terminate(void);
int fnic_trace_debugfs_init(void);
void fnic_trace_debugfs_terminate(void);
-
#endif
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 6d55b4e7e79..ee4fa40a50b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -594,8 +594,6 @@ static void gdth_pci_remove_one(struct pci_dev *pdev)
{
gdth_ha_str *ha = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, NULL);
-
list_del(&ha->list);
gdth_remove_one(ha);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index df0c3c71ea4..f334859024c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -316,6 +316,12 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
+static unsigned int shost_eh_deadline;
+
+module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(eh_deadline,
+ "SCSI EH timeout in seconds (should be between 1 and 2^32-1)");
+
static struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
@@ -388,6 +394,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering;
shost->ordered_tag = sht->ordered_tag;
+ shost->eh_deadline = shost_eh_deadline * HZ;
if (sht->supported_mode == MODE_UNKNOWN)
/* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 891c86b6625..22f6432eb47 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -100,7 +100,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
@@ -5018,7 +5017,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
kfree(h->hba_inquiry_data);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(h);
}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9e2588a6881..add6d1566ec 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
struct iscsi_conn *conn = sk->sk_user_data;
if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
+ (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
!atomic_read(&sk->sk_rmem_alloc)) {
ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 587992952b3..1b3a0947345 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/log2.h>
#include <scsi/fc/fc_fc2.h>
@@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
fr_eof(fp) = FC_EOF_N;
}
- /*
- * Initialize remainig fh fields
- * from fc_fill_fc_hdr
- */
+ /* Initialize remaining fh fields from fc_fill_fc_hdr */
fh->fh_ox_id = htons(ep->oxid);
fh->fh_rx_id = htons(ep->rxid);
fh->fh_seq_id = ep->seq.id;
@@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
- if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
- msecs_to_jiffies(timer_msec)))
- fc_exch_hold(ep); /* hold for timer */
+ fc_exch_hold(ep); /* hold for timer */
+ if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ fc_exch_release(ep);
}
/**
@@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
/**
* fc_exch_done_locked() - Complete an exchange with the exchange lock held
* @ep: The exchange that is complete
+ *
+ * Note: May sleep if invoked from outside a response handler.
*/
static int fc_exch_done_locked(struct fc_exch *ep)
{
@@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep)
* ep, and in that case we only clear the resp and set it as
* complete, so it can be reused by the timer to send the rrq.
*/
- ep->resp = NULL;
if (ep->state & FC_EX_DONE)
return rc;
ep->esb_stat |= ESB_ST_COMPLETE;
@@ -464,15 +464,21 @@ static void fc_exch_delete(struct fc_exch *ep)
}
static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
- struct fc_frame *fp)
+ struct fc_frame *fp)
{
struct fc_exch *ep;
struct fc_frame_header *fh = fc_frame_header_get(fp);
- int error;
+ int error = -ENXIO;
u32 f_ctl;
u8 fh_type = fh->fh_type;
ep = fc_seq_exch(sp);
+
+ if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
+ fc_frame_free(fp);
+ goto out;
+ }
+
WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
f_ctl = ntoh24(fh->fh_f_ctl);
@@ -515,6 +521,9 @@ out:
* @lport: The local port that the exchange will be sent on
* @sp: The sequence to be sent
* @fp: The frame to be sent on the exchange
+ *
+ * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
+ * or indirectly by calling libfc_function_template.frame_send().
*/
static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp)
@@ -581,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
/*
* Set the response handler for the exchange associated with a sequence.
+ *
+ * Note: May sleep if invoked from outside a response handler.
*/
static void fc_seq_set_resp(struct fc_seq *sp,
void (*resp)(struct fc_seq *, struct fc_frame *,
@@ -588,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp,
void *arg)
{
struct fc_exch *ep = fc_seq_exch(sp);
+ DEFINE_WAIT(wait);
spin_lock_bh(&ep->ex_lock);
+ while (ep->resp_active && ep->resp_task != current) {
+ prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&ep->ex_lock);
+
+ schedule();
+
+ spin_lock_bh(&ep->ex_lock);
+ }
+ finish_wait(&ep->resp_wq, &wait);
ep->resp = resp;
ep->arg = arg;
spin_unlock_bh(&ep->ex_lock);
@@ -622,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
if (!sp)
return -ENOMEM;
- ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
if (timer_msec)
fc_exch_timer_set_locked(ep, timer_msec);
- /*
- * If not logged into the fabric, don't send ABTS but leave
- * sequence active until next timeout.
- */
- if (!ep->sid)
- return 0;
-
- /*
- * Send an abort for the sequence that timed out.
- */
- fp = fc_frame_alloc(ep->lp, 0);
- if (fp) {
- fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
- FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
- error = fc_seq_send_locked(ep->lp, sp, fp);
- } else
- error = -ENOBUFS;
+ if (ep->sid) {
+ /*
+ * Send an abort for the sequence that timed out.
+ */
+ fp = fc_frame_alloc(ep->lp, 0);
+ if (fp) {
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
+ FC_TYPE_BLS, FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ error = fc_seq_send_locked(ep->lp, sp, fp);
+ } else {
+ error = -ENOBUFS;
+ }
+ } else {
+ /*
+ * If not logged into the fabric, don't send ABTS but leave
+ * sequence active until next timeout.
+ */
+ error = 0;
+ }
+ ep->esb_stat |= ESB_ST_ABNORMAL;
return error;
}
@@ -669,6 +694,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
}
/**
+ * fc_invoke_resp() - invoke ep->resp()
+ *
+ * Notes:
+ * It is assumed that after initialization finished (this means the
+ * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
+ * modified only via fc_seq_set_resp(). This guarantees that none of these
+ * two variables changes if ep->resp_active > 0.
+ *
+ * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
+ * this function is invoked, the first spin_lock_bh() call in this function
+ * will wait until fc_seq_set_resp() has finished modifying these variables.
+ *
+ * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
+ * ep->resp() won't be invoked after fc_exch_done() has returned.
+ *
+ * The response handler itself may invoke fc_exch_done(), which will clear the
+ * ep->resp pointer.
+ *
+ * Return value:
+ * Returns true if and only if ep->resp has been invoked.
+ */
+static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *arg;
+ bool res = false;
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp_active++;
+ if (ep->resp_task != current)
+ ep->resp_task = !ep->resp_task ? current : NULL;
+ resp = ep->resp;
+ arg = ep->arg;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (resp) {
+ resp(sp, fp, arg);
+ res = true;
+ } else if (!IS_ERR(fp)) {
+ fc_frame_free(fp);
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ if (--ep->resp_active == 0)
+ ep->resp_task = NULL;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (ep->resp_active == 0)
+ wake_up(&ep->resp_wq);
+
+ return res;
+}
+
+/**
* fc_exch_timeout() - Handle exchange timer expiration
* @work: The work_struct identifying the exchange that timed out
*/
@@ -677,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work)
struct fc_exch *ep = container_of(work, struct fc_exch,
timeout_work.work);
struct fc_seq *sp = &ep->seq;
- void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
- void *arg;
u32 e_stat;
int rc = 1;
@@ -696,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work)
fc_exch_rrq(ep);
goto done;
} else {
- resp = ep->resp;
- arg = ep->arg;
- ep->resp = NULL;
if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_delete(ep);
- if (resp)
- resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
+ fc_seq_set_resp(sp, NULL, ep->arg);
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done;
}
@@ -792,6 +867,8 @@ hit:
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
ep->rxid = FC_XID_UNKNOWN;
ep->class = mp->class;
+ ep->resp_active = 0;
+ init_waitqueue_head(&ep->resp_wq);
INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
out:
return ep;
@@ -838,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
- if (ep && ep->xid == xid)
+ if (ep) {
+ WARN_ON(ep->xid != xid);
fc_exch_hold(ep);
+ }
spin_unlock_bh(&pool->lock);
}
return ep;
@@ -850,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
* fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
* the memory allocated for the related objects may be freed.
* @sp: The sequence that has completed
+ *
+ * Note: May sleep if invoked from outside a response handler.
*/
static void fc_exch_done(struct fc_seq *sp)
{
@@ -859,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp)
spin_lock_bh(&ep->ex_lock);
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+
+ fc_seq_set_resp(sp, NULL, ep->arg);
if (!rc)
fc_exch_delete(ep);
}
@@ -987,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
}
}
+ spin_lock_bh(&ep->ex_lock);
/*
* At this point, we have the exchange held.
* Find or create the sequence.
@@ -1014,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
* sending RSP, hence write request on other
* end never finishes.
*/
- spin_lock_bh(&ep->ex_lock);
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
- spin_unlock_bh(&ep->ex_lock);
} else {
+ spin_unlock_bh(&ep->ex_lock);
+
/* sequence/exch should exist */
reject = FC_RJT_SEQ_ID;
goto rel;
@@ -1029,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
fr_seq(fp) = sp;
out:
@@ -1291,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
if (!ep)
goto reject;
+
+ fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+ if (!fp)
+ goto free;
+
spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock);
+
+ fc_frame_free(fp);
goto reject;
}
- if (!(ep->esb_stat & ESB_ST_REC_QUAL))
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
fc_exch_hold(ep); /* hold for REC_QUAL */
- ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
- fc_exch_timer_set_locked(ep, ep->r_a_tov);
-
- fp = fc_frame_alloc(ep->lp, sizeof(*ap));
- if (!fp) {
- spin_unlock_bh(&ep->ex_lock);
- goto free;
}
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
fh = fc_frame_header_get(fp);
ap = fc_frame_payload_get(fp, sizeof(*ap));
memset(ap, 0, sizeof(*ap));
@@ -1319,14 +1406,16 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
}
sp = fc_seq_start_next_locked(sp);
fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+ ep->esb_stat |= ESB_ST_ABNORMAL;
spin_unlock_bh(&ep->ex_lock);
+
+free:
fc_frame_free(rx_fp);
return;
reject:
fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
-free:
- fc_frame_free(rx_fp);
+ goto free;
}
/**
@@ -1416,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* If new exch resp handler is valid then call that
* first.
*/
- if (ep->resp)
- ep->resp(sp, fp, ep->arg);
- else
+ if (!fc_invoke_resp(ep, sp, fp))
lport->tt.lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */
} else {
@@ -1442,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
struct fc_exch *ep;
enum fc_sof sof;
u32 f_ctl;
- void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
- void *ex_resp_arg;
int rc;
ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
@@ -1478,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = sp;
+
+ spin_lock_bh(&ep->ex_lock);
if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
if (fc_sof_needs_ack(sof))
fc_seq_send_ack(sp, fp);
- resp = ep->resp;
- ex_resp_arg = ep->arg;
if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock);
- resp = ep->resp;
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
@@ -1511,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If new exch resp handler is valid then call that
* first.
*/
- if (resp)
- resp(sp, fp, ex_resp_arg);
- else
- fc_frame_free(fp);
+ fc_invoke_resp(ep, sp, fp);
+
fc_exch_release(ep);
return;
rel:
@@ -1553,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
*/
static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
{
- void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
- void *ex_resp_arg;
struct fc_frame_header *fh;
struct fc_ba_acc *ap;
struct fc_seq *sp;
@@ -1599,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
break;
}
- resp = ep->resp;
- ex_resp_arg = ep->arg;
-
/* do we need to do some other checks here. Can we reuse more of
* fc_exch_recv_seq_resp
*/
@@ -1613,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+
+ fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
-
- if (resp)
- resp(sp, fp, ex_resp_arg);
- else
- fc_frame_free(fp);
-
+ fc_invoke_resp(ep, sp, fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
-
+ fc_exch_release(ep);
}
/**
@@ -1662,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
break;
default:
if (ep)
- FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+ FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl));
break;
@@ -1745,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
/**
* fc_exch_reset() - Reset an exchange
* @ep: The exchange to be reset
+ *
+ * Note: May sleep if invoked from outside a response handler.
*/
static void fc_exch_reset(struct fc_exch *ep)
{
struct fc_seq *sp;
- void (*resp)(struct fc_seq *, struct fc_frame *, void *);
- void *arg;
int rc = 1;
spin_lock_bh(&ep->ex_lock);
fc_exch_abort_locked(ep, 0);
ep->state |= FC_EX_RST_CLEANUP;
fc_exch_timer_cancel(ep);
- resp = ep->resp;
- ep->resp = NULL;
if (ep->esb_stat & ESB_ST_REC_QUAL)
atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
ep->esb_stat &= ~ESB_ST_REC_QUAL;
- arg = ep->arg;
sp = &ep->seq;
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+
+ fc_exch_hold(ep);
+
if (!rc)
fc_exch_delete(ep);
- if (resp)
- resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ fc_exch_release(ep);
}
/**
@@ -1956,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
switch (op) {
case ELS_LS_RJT:
- FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
+ FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
/* fall through */
case ELS_LS_ACC:
goto cleanup;
default:
- FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
- "for RRQ", op);
+ FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
+ op);
return;
}
@@ -2533,13 +2609,8 @@ int fc_setup_exch_mgr(void)
* cpu on which exchange originated by simple bitwise
* AND operation between fc_cpu_mask and exchange id.
*/
- fc_cpu_mask = 1;
- fc_cpu_order = 0;
- while (fc_cpu_mask < nr_cpu_ids) {
- fc_cpu_mask <<= 1;
- fc_cpu_order++;
- }
- fc_cpu_mask--;
+ fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
+ fc_cpu_mask = (1 << fc_cpu_order) - 1;
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
if (!fc_exch_workqueue)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5fd0f1fbe58..1d7e76e8b44 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -902,7 +902,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
/*
* Check for missing or extra data frames.
*/
- if (unlikely(fsp->xfer_len != expected_len)) {
+ if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
+ fsp->xfer_len != expected_len)) {
if (fsp->xfer_len < expected_len) {
/*
* Some data may be queued locally,
@@ -955,12 +956,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
* Test for transport underrun, independent of response
* underrun status.
*/
- if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+ if (fsp->cdb_status == SAM_STAT_GOOD &&
+ fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
- fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
fsp->status_code = FC_DATA_UNDRUN;
- fsp->io_status = 0;
- }
}
seq = fsp->seq_ptr;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index f04d15c67df..e01a29863c3 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -516,7 +516,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
* @lport: The local port receiving the LOGO
* @fp: The LOGO request frame
*
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
* this function.
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -1088,7 +1088,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
{
unsigned long delay = 0;
FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
- PTR_ERR(fp), fc_lport_state(lport),
+ IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
lport->retry_count);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index c710d908fda..589ff9aedd3 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1705,7 +1705,7 @@ reject:
* @rdata: The remote port that sent the PRLI request
* @rx_fp: The PRLI request frame
*
- * Locking Note: The rport lock is exected to be held before calling
+ * Locking Note: The rport lock is expected to be held before calling
* this function.
*/
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
@@ -1824,7 +1824,7 @@ drop:
* @rdata: The remote port that sent the PRLO request
* @rx_fp: The PRLO request frame
*
- * Locking Note: The rport lock is exected to be held before calling
+ * Locking Note: The rport lock is expected to be held before calling
* this function.
*/
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
@@ -1895,7 +1895,7 @@ drop:
* @lport: The local port that received the LOGO request
* @fp: The LOGO request frame
*
- * Locking Note: The rport lock is exected to be held before calling
+ * Locking Note: The rport lock is expected to be held before calling
* this function.
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b92aec989d6..82134d20e2d 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2629,7 +2629,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
rspiocbq,
(phba->fc_ratov * 2)
+ LPFC_DRVR_TIMEOUT);
- if (iocb_stat) {
+ if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
ret_val = -EIO;
goto err_get_xri_exit;
}
@@ -3204,8 +3204,9 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOCB_SUCCESS))) {
+ if ((iocb_stat != IOCB_SUCCESS) ||
+ ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (rsp->ulpStatus != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 02e8cd923d0..da61d8dc044 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -280,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context1 = NULL;
+ ctiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, ctiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7801601aa5d..883ea2d9f23 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4171,8 +4171,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
}
struct lpfc_nodelist *
@@ -4217,6 +4215,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
if (state != NLP_STE_UNUSED_NODE)
lpfc_nlp_set_state(vport, ndlp, state);
@@ -5617,6 +5618,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 086c3f28caa..5464b116d32 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3439,7 +3439,8 @@ struct els_request64_wqe {
#define els_req64_hopcnt_SHIFT 24
#define els_req64_hopcnt_MASK 0x000000ff
#define els_req64_hopcnt_WORD word13
- uint32_t reserved[2];
+ uint32_t word14;
+ uint32_t max_response_payload_len;
};
struct xmit_els_rsp64_wqe {
@@ -3554,7 +3555,8 @@ struct gen_req64_wqe {
uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4];
+ uint32_t rsvd_12_14[3];
+ uint32_t max_response_payload_len;
};
struct create_xri_wqe {
@@ -3584,7 +3586,13 @@ struct abort_cmd_wqe {
struct fcp_iwrite64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_offset_len;
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
@@ -3594,7 +3602,13 @@ struct fcp_iwrite64_wqe {
struct fcp_iread64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_offset_len; /* word 3 */
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -3604,7 +3618,13 @@ struct fcp_iread64_wqe {
struct fcp_icmnd64_wqe {
struct ulp_bde64 bde; /* words 0-2 */
- uint32_t rsrvd3; /* word 3 */
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
uint32_t rsrvd4; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 647f5bfb3bd..68c94cc85c3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4545,7 +4545,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_save_state(pdev);
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
- if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(pdev))
pdev->needs_freset = 1;
return 0;
@@ -4581,8 +4581,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
/* Release PCI resource and disable PCI device */
pci_release_selected_regions(pdev, bars);
pci_disable_device(pdev);
- /* Null out PCI private reference to driver */
- pci_set_drvdata(pdev, NULL);
return;
}
@@ -9429,7 +9427,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
- pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c913e8cc3b2..b2ede05a5f0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1012,20 +1012,25 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break;
}
- /* Allocate iotag for psb->cur_iocbq. */
- iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
- if (iotag == 0) {
+
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ psb->data, psb->dma_handle);
kfree(psb);
break;
}
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ psb->data, psb->dma_handle);
kfree(psb);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3368 Failed to allocated IOTAG for"
+ " XRI:0x%x\n", lxri);
+ lpfc_sli4_free_xri(phba, lxri);
break;
}
psb->cur_iocbq.sli4_lxritag = lxri;
@@ -4485,9 +4490,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb->ulpContext =
vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
}
- if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
- piocb->ulpFCP2Rcvy = 1;
- }
+ piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
/* ulpTimeout is only one byte */
@@ -4981,6 +4984,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
}
}
+
+/**
+ * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ *
+ * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t rsp_info;
+ uint32_t rsp_len;
+ uint8_t rsp_info_code;
+ int ret = FAILED;
+
+
+ if (fcprsp == NULL)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0703 fcp_rsp is missing\n");
+ else {
+ rsp_info = fcprsp->rspStatus2;
+ rsp_len = be32_to_cpu(fcprsp->rspRspLen);
+ rsp_info_code = fcprsp->rspInfo3;
+
+
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_FCP,
+ "0706 fcp_rsp valid 0x%x,"
+ " rsp len=%d code 0x%x\n",
+ rsp_info,
+ rsp_len, rsp_info_code);
+
+ if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+ switch (rsp_info_code) {
+ case RSP_NO_FAILURE:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0715 Task Mgmt No Failure\n");
+ ret = SUCCESS;
+ break;
+ case RSP_TM_NOT_SUPPORTED: /* TM rejected */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0716 Task Mgmt Target "
+ "reject\n");
+ break;
+ case RSP_TM_NOT_COMPLETED: /* TM failed */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0717 Task Mgmt Target "
+ "failed TM\n");
+ break;
+ case RSP_TM_INVALID_LU: /* TM to invalid LU! */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0718 Task Mgmt to invalid "
+ "LUN\n");
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
@@ -5042,12 +5112,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
- if (status != IOCB_SUCCESS) {
- if (status == IOCB_TIMEDOUT) {
- ret = TIMEOUT_ERROR;
- } else
- ret = FAILED;
- lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ if ((status != IOCB_SUCCESS) ||
+ (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
"iocb_flag x%x\n",
@@ -5055,9 +5121,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
iocbqrsp->iocb.un.ulpWord[4],
iocbq->iocb_flag);
- } else if (status == IOCB_BUSY)
- ret = FAILED;
- else
+ /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
+ if (status == IOCB_SUCCESS) {
+ if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ /* Something in the FCP_RSP was invalid.
+ * Check conditions */
+ ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
+ else
+ ret = FAILED;
+ } else if (status == IOCB_TIMEDOUT) {
+ ret = TIMEOUT_ERROR;
+ } else {
+ ret = FAILED;
+ }
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ } else
ret = SUCCESS;
lpfc_sli_release_iocbq(phba, iocbqrsp);
@@ -5181,7 +5259,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
- int status, ret = SUCCESS;
+ int status;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5222,9 +5300,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
- ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ if (status == SUCCESS)
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_LUN);
- return ret;
+
+ return status;
}
/**
@@ -5248,7 +5328,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
- int status, ret = SUCCESS;
+ int status;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5289,9 +5369,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
- ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ if (status == SUCCESS)
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
- return ret;
+ return status;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index b1d9f7fcb91..852ff7def49 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -73,6 +73,7 @@ struct fcp_rsp {
#define RSP_RO_MISMATCH_ERR 0x03
#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
+#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */
uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 612f48973ff..8f580fda443 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -6566,6 +6568,108 @@ lpfc_mbox_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ * are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+ uint32_t idx;
+ struct lpfc_queue *mcq;
+ struct lpfc_mcqe *mcqe;
+ bool pending_completions = false;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Check for completions on mailbox completion queue */
+
+ mcq = phba->sli4_hba.mbx_cq;
+ idx = mcq->hba_index;
+ while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+ mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+ if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+ (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+ pending_completions = true;
+ break;
+ }
+ idx = (idx + 1) % mcq->entry_count;
+ if (mcq->hba_index == idx)
+ break;
+ }
+ return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ * that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+ uint32_t eqidx;
+ struct lpfc_queue *fpeq = NULL;
+ struct lpfc_eqe *eqe;
+ bool mbox_pending;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Find the eq associated with the mcq */
+
+ if (phba->sli4_hba.hba_eq)
+ for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+ if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+ phba->sli4_hba.mbx_cq->assoc_qid) {
+ fpeq = phba->sli4_hba.hba_eq[eqidx];
+ break;
+ }
+ if (!fpeq)
+ return false;
+
+ /* Turn off interrupts from this EQ */
+
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /* Check to see if a mbox completion is pending */
+
+ mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+ /*
+ * If a mbox completion is pending, process all the events on EQ
+ * associated with the mbox completion queue (this could include
+ * mailbox commands, async events, els commands, receive queue data
+ * and fcp commands)
+ */
+
+ if (mbox_pending)
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ return mbox_pending;
+
+}
/**
* lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@@ -6583,6 +6687,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
+ /* If the mailbox completed, process the completion and return */
+ if (lpfc_sli4_process_missed_mbox_completions(phba))
+ return;
+
/* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the
* worklist and the mailbox actually completing. When this
@@ -7077,6 +7185,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Make sure the mailbox is really active */
+ if (timeout)
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
@@ -8076,6 +8188,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ wqe->els_req.max_response_payload_len = total_len - xmit_len;
break;
case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@@ -8120,8 +8233,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iwrite.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iwrite,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@@ -8139,8 +8254,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iread.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iread,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iread,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@@ -8156,8 +8273,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd,
+ 0);
/* word3 iocb=IO_TAG wqe=reserved */
- wqe->fcp_icmd.rsrvd3 = 0;
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@@ -8203,6 +8325,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ wqe->gen_req.max_response_payload_len = total_len - xmit_len;
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
@@ -10073,6 +10196,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
if (iocb_completed) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n");
+ /* Note: we are not indicating if the IOCB has a success
+ * status or not - that's for the caller to check.
+ * IOCB_SUCCESS means just that the command was sent and
+ * completed. Not that it completed successfully.
+ * */
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0338 IOCB wait timeout error - no "
@@ -11074,8 +11202,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ int numBdes, i;
unsigned long iflags;
- uint32_t status;
+ uint32_t status, max_response;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl, bde;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -11092,7 +11223,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ switch (pIocbOut->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+ max_response = bde.tus.f.bdeSize;
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ max_response = 0;
+ if (!pIocbOut->context3)
+ break;
+ numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+ sizeof(struct ulp_bde64);
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ max_response += bde.tus.f.bdeSize;
+ }
+ break;
+ default:
+ max_response = wcqe->total_data_placed;
+ break;
+ }
+ if (max_response < wcqe->total_data_placed)
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+ else
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+ wcqe->total_data_placed;
}
/* Convert BG errors for completion status */
@@ -15098,6 +15258,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
uint16_t max_rpi, rpi_limit;
uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
+ unsigned long iflag;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
@@ -15106,7 +15267,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15122,7 +15283,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -15131,7 +15292,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* extents.
*/
if (!phba->sli4_hba.rpi_hdrs_in_use) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -15142,7 +15303,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* how many are supported max by the device.
*/
rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 85120b77aa0..298c8cd1a89 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -673,6 +673,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f58f18342bc..e3094c4e143 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.42"
+#define LPFC_DRIVER_VERSION "8.3.43"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 515c9629e9f..d1a4b82836e 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -534,7 +534,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_cmm_unreg:
- pci_set_drvdata(pdev, NULL);
megaraid_cmm_unregister(adapter);
out_fini_mbox:
megaraid_fini_mbox(adapter);
@@ -594,11 +593,6 @@ megaraid_detach_one(struct pci_dev *pdev)
// detach from the IO sub-system
megaraid_io_detach(adapter);
- // reset the device state in the PCI structure. We check this
- // condition when we enter here. If the device state is NULL,
- // that would mean the device has already been removed
- pci_set_drvdata(pdev, NULL);
-
// Unregister from common management module
//
// FIXME: this must return success or failure for conditions if there
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 0c73ba4bf45..e9e543c5848 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1531,6 +1531,7 @@ struct megasas_instance {
struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
+ struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3020921a474..0a743a5d164 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3194,19 +3194,21 @@ megasas_get_pd_list(struct megasas_instance *instance)
(le32_to_cpu(ci->count) <
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
- memset(instance->pd_list, 0,
+ memset(instance->local_pd_list, 0,
MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
- instance->pd_list[pd_addr->deviceId].tid =
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
le16_to_cpu(pd_addr->deviceId);
- instance->pd_list[pd_addr->deviceId].driveType =
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
pd_addr->scsiDevType;
- instance->pd_list[pd_addr->deviceId].driveState =
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
MR_PD_STATE_SYSTEM;
pd_addr++;
}
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
}
pci_free_consistent(instance->pdev,
@@ -3998,7 +4000,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
* values
*/
if ((prev_aen.members.class <= curr_aen.members.class) &&
- !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
curr_aen.members.locale)) {
/*
* Previously issued event registration includes
@@ -4006,7 +4008,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*/
return 0;
} else {
- curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
+ curr_aen.members.locale |= prev_aen.members.locale;
if (prev_aen.members.class < curr_aen.members.class)
curr_aen.members.class = prev_aen.members.class;
@@ -4097,7 +4099,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
class_locale.members.class = MR_EVT_CLASS_DEBUG;
return megasas_register_aen(instance,
- le32_to_cpu(eli.newest_seq_num) + 1,
+ eli.newest_seq_num + 1,
class_locale.word);
}
@@ -4449,7 +4451,6 @@ retry_irq_register:
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
megasas_mgmt_info.max_index--;
- pci_set_drvdata(pdev, NULL);
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
for (i = 0 ; i < instance->msix_vectors; i++)
@@ -4805,8 +4806,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
}
}
- pci_set_drvdata(instance->pdev, NULL);
-
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
@@ -4848,8 +4847,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->evt_detail, instance->evt_detail_h);
scsi_host_put(host);
- pci_set_drvdata(pdev, NULL);
-
pci_disable_device(pdev);
return;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7b7381d7671..5ff978be249 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,7 +657,6 @@ static void mvs_pci_remove(struct pci_dev *pdev)
tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#endif
- pci_set_drvdata(pdev, NULL);
sas_unregister_ha(sha);
sas_remove_host(mvi->shost);
scsi_remove_host(mvi->shost);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 6b1b4e91e53..6c1f223a8e1 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1411,7 +1411,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
if (res) {
del_timer(&task->slow_task->timer);
- mv_printk("executing internel task failed:%d\n", res);
+ mv_printk("executing internal task failed:%d\n", res);
goto ex_err;
}
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index c3601b57a80..edbee8dc62c 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2583,7 +2583,6 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
fail_io_attach:
- pci_set_drvdata(pdev, NULL);
mhba->instancet->disable_intr(mhba);
free_irq(mhba->pdev->irq, mhba);
fail_init_irq:
@@ -2618,7 +2617,6 @@ static void mvumi_detach_one(struct pci_dev *pdev)
free_irq(mhba->pdev->irq, mhba);
mvumi_release_fw(mhba);
scsi_host_put(host);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
dev_dbg(&pdev->dev, "driver is removed!\n");
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 5982a587bab..7d014b11df6 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1615,7 +1615,7 @@ struct ncb {
spinlock_t smp_lock; /* Lock for SMP threading */
/*----------------------------------------------------------------
- ** Chip and controller indentification.
+ ** Chip and controller identification.
**----------------------------------------------------------------
*/
int unit; /* Unit number */
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index d99f41c2ca1..a04b4ff8c7f 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -309,6 +309,117 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
}
static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
/**
+ * pm8001_ctl_ib_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int offset;
+ char *str = buf;
+ int start = 0;
+#define IB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[IB].virt_ptr + \
+ pm8001_ha->evtlog_ib_offset + (c)))
+
+ for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+ if (pm8001_ha->chip_id != chip_8001)
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ else
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ start = start + 4;
+ }
+ pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
+ if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id != chip_8001))
+ pm8001_ha->evtlog_ib_offset = 0;
+ if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id == chip_8001))
+ pm8001_ha->evtlog_ib_offset = 0;
+
+ return str - buf;
+}
+
+static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
+/**
+ * pm8001_ctl_ob_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int offset;
+ char *str = buf;
+ int start = 0;
+#define OB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[OB].virt_ptr + \
+ pm8001_ha->evtlog_ob_offset + (c)))
+
+ for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+ if (pm8001_ha->chip_id != chip_8001)
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ else
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ start = start + 4;
+ }
+ pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
+ if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id != chip_8001))
+ pm8001_ha->evtlog_ob_offset = 0;
+ if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id == chip_8001))
+ pm8001_ha->evtlog_ob_offset = 0;
+
+ return str - buf;
+}
+static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
+/**
+ * pm8001_ctl_bios_version_show - Bios version Display
+ * @cdev:pointer to embedded class device
+ * @buf:the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ char *str = buf;
+ void *virt_addr;
+ int bios_index;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+
+ pm8001_ha->nvmd_completion = &completion;
+ payload.minor_function = 7;
+ payload.offset = 0;
+ payload.length = 4096;
+ payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ wait_for_completion(&completion);
+ virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+ for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
+ bios_index++)
+ str += sprintf(str, "%c",
+ *((u8 *)((u8 *)virt_addr+bios_index)));
+ return str - buf;
+}
+static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
+/**
* pm8001_ctl_aap_log_show - IOP event log
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -344,6 +455,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
+/**
+ ** pm8001_ctl_fatal_log_show - fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+
+static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm80xx_get_fatal_dump(cdev, attr, buf);
+ return count;
+}
+
+static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+
+
+/**
+ ** pm8001_ctl_gsm_log_show - gsm dump collection
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
+ return count;
+}
+
+static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
+
#define FLASH_CMD_NONE 0x00
#define FLASH_CMD_UPDATE 0x01
#define FLASH_CMD_SET_NVMD 0x02
@@ -603,12 +751,17 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_update_fw,
&dev_attr_aap_log,
&dev_attr_iop_log,
+ &dev_attr_fatal_log,
+ &dev_attr_gsm_log,
&dev_attr_max_out_io,
&dev_attr_max_devices,
&dev_attr_max_sg_list,
&dev_attr_sas_spec_support,
&dev_attr_logging_level,
&dev_attr_host_sas_address,
+ &dev_attr_bios_version,
+ &dev_attr_ib_log,
+ &dev_attr_ob_log,
NULL,
};
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 63ad4aa0c42..d0d43a250b9 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,6 +45,8 @@
#define HEADER_LEN 28
#define SIZE_OFFSET 16
+#define BIOSOFFSET 56
+#define BIOS_OFFSET_LIMIT 61
#define FLASH_OK 0x000000
#define FAIL_OPEN_BIOS_FILE 0x000100
@@ -53,5 +55,9 @@
#define FAIL_OUT_MEMORY 0x000c00
#define FLASH_IN_PROGRESS 0x001000
+#define IB_OB_READ_TIMES 256
+#define SYSFS_OFFSET 1024
+#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024)
+#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024)
#endif /* PM8001_CTL_H_INCLUDED */
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 479c5a7a863..74a4bb9af07 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -46,7 +46,10 @@ enum chip_flavors {
chip_8008,
chip_8009,
chip_8018,
- chip_8019
+ chip_8019,
+ chip_8074,
+ chip_8076,
+ chip_8077
};
enum phy_speed {
@@ -99,7 +102,8 @@ enum memory_region_num {
NVMD, /* NVM device */
DEV_MEM, /* memory for devices */
CCB_MEM, /* memory for command control block */
- FW_FLASH /* memory for fw flash update */
+ FW_FLASH, /* memory for fw flash update */
+ FORENSIC_MEM /* memory for fw forensic data */
};
#define PM8001_EVENT_LOG_SIZE (128 * 1024)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4a219575219..f16ece91b94 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1868,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW))
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2276,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 param;
u32 status;
u32 tag;
+ int i, j;
+ u8 sata_addr_low[4];
+ u32 temp_sata_addr_low;
+ u8 sata_addr_hi[4];
+ u32 temp_sata_addr_hi;
struct sata_completion_resp *psataPayload;
struct task_status_struct *ts;
struct ata_task_resp *resp ;
@@ -2325,7 +2337,46 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
-
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW)) {
+ if (!((t->dev->parent) &&
+ (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+ sata_addr_low[i] = pm8001_ha->sas_addr[j];
+ for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+ sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+ memcpy(&temp_sata_addr_low, sata_addr_low,
+ sizeof(sata_addr_low));
+ memcpy(&temp_sata_addr_hi, sata_addr_hi,
+ sizeof(sata_addr_hi));
+ temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+ |((temp_sata_addr_hi << 8) &
+ 0xff0000) |
+ ((temp_sata_addr_hi >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_hi << 24) &
+ 0xff000000));
+ temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+ & 0xff) |
+ ((temp_sata_addr_low << 8)
+ & 0xff0000) |
+ ((temp_sata_addr_low >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_low << 24)
+ & 0xff000000)) +
+ pm8001_dev->attached_phy +
+ 0x10);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%08x%08x", temp_sata_addr_hi,
+ temp_sata_addr_low));
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ }
+ }
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3087,8 +3138,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = ccb->device;
u32 status = le32_to_cpu(pPayload->status);
u32 device_id = le32_to_cpu(pPayload->device_id);
- u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
- u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
+ u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
+ u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
"from 0x%x to 0x%x status = 0x%x!\n",
device_id, pds, nds, status));
@@ -4700,6 +4751,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+ if (pm8001_ha->chip_id != chip_8001)
+ sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
return ret;
@@ -4778,6 +4831,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
break;
}
+ case IOP_RDUMP: {
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
default:
break;
}
@@ -4938,6 +5001,89 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
return rc;
}
+ssize_t
+pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
+{
+ u32 value, rem, offset = 0, bar = 0;
+ u32 index, work_offset, dw_length;
+ u32 shift_value, gsm_base, gsm_dump_offset;
+ char *direct_data;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ direct_data = buf;
+ gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
+
+ /* check max is 1 Mbytes */
+ if ((length > 0x100000) || (gsm_dump_offset & 3) ||
+ ((gsm_dump_offset + length) > 0x1000000))
+ return 1;
+
+ if (pm8001_ha->chip_id == chip_8001)
+ bar = 2;
+ else
+ bar = 1;
+
+ work_offset = gsm_dump_offset & 0xFFFF0000;
+ offset = gsm_dump_offset & 0x0000FFFF;
+ gsm_dump_offset = work_offset;
+ /* adjust length to dword boundary */
+ rem = length & 3;
+ dw_length = length >> 2;
+
+ for (index = 0; index < dw_length; index++) {
+ if ((work_offset + offset) & 0xFFFF0000) {
+ if (pm8001_ha->chip_id == chip_8001)
+ shift_value = ((gsm_dump_offset + offset) &
+ SHIFT_REG_64K_MASK);
+ else
+ shift_value = (((gsm_dump_offset + offset) &
+ SHIFT_REG_64K_MASK) >>
+ SHIFT_REG_BIT_SHIFT);
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ gsm_base = GSM_BASE;
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ (gsm_base + shift_value)))
+ return 1;
+ } else {
+ gsm_base = 0;
+ if (-1 == pm80xx_bar4_shift(pm8001_ha,
+ (gsm_base + shift_value)))
+ return 1;
+ }
+ gsm_dump_offset = (gsm_dump_offset + offset) &
+ 0xFFFF0000;
+ work_offset = 0;
+ offset = offset & 0x0000FFFF;
+ }
+ value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+ 0x0000FFFF);
+ direct_data += sprintf(direct_data, "%08x ", value);
+ offset += 4;
+ }
+ if (rem != 0) {
+ value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+ 0x0000FFFF);
+ /* xfr for non_dw */
+ direct_data += sprintf(direct_data, "%08x ", value);
+ }
+ /* Shift back to BAR4 original address */
+ if (pm8001_ha->chip_id == chip_8001) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+ return 1;
+ } else {
+ if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
+ return 1;
+ }
+ pm8001_ha->fatal_forensic_shift_offset += 1024;
+
+ if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ return direct_data - buf;
+}
+
int
pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev, u32 state)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d7c1e203422..6d91e244654 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -1027,5 +1027,8 @@ struct set_dev_state_resp {
#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
+#define GSM_BASE 0x4F0000
+#define SHIFT_REG_64K_MASK 0xffff0000
+#define SHIFT_REG_BIT_SHIFT 8
#endif
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f7c189606b8..34f5f5ffef0 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -54,6 +54,9 @@ static const struct pm8001_chip_info pm8001_chips[] = {
[chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
[chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
[chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
+ [chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
+ [chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
+ [chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
};
static int pm8001_id;
@@ -344,6 +347,10 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
/* Memory region for fw flash */
pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_mem_alloc(pm8001_ha->pdev,
&pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -664,6 +671,31 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
#endif
}
+/*
+ * pm8001_get_phy_settings_info : Read phy setting values.
+ * @pm8001_ha : our hba.
+ */
+void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+{
+
+#ifdef PM8001_READ_VPD
+ /*OPTION ROM FLASH read for the SPC cards */
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+
+ pm8001_ha->nvmd_completion = &completion;
+ /* SAS ADDRESS read from flash / EEPROM */
+ payload.minor_function = 6;
+ payload.offset = 0;
+ payload.length = 4096;
+ payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ /* Read phy setting values from flash */
+ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ wait_for_completion(&completion);
+ pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+#endif
+}
+
#ifdef PM8001_USE_MSIX
/**
* pm8001_setup_msix - enable MSI-X interrupt
@@ -844,6 +876,10 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
}
pm8001_init_sas_add(pm8001_ha);
+ /* phy setting support for motherboard controller */
+ if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
+ pdev->subsystem_vendor != 0)
+ pm8001_get_phy_settings_info(pm8001_ha);
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc)
@@ -873,7 +909,6 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
struct pm8001_hba_info *pm8001_ha;
int i;
pm8001_ha = sha->lldd_ha;
- pci_set_drvdata(pdev, NULL);
sas_unregister_ha(sha);
sas_remove_host(pm8001_ha->shost);
list_del(&pm8001_ha->list);
@@ -1037,6 +1072,12 @@ static struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
@@ -1057,6 +1098,24 @@ static struct pci_device_id pm8001_pci_table[] = {
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
{} /* terminate list */
};
@@ -1108,8 +1167,11 @@ module_init(pm8001_init);
module_exit(pm8001_exit);
MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
+MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
+MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
MODULE_DESCRIPTION(
- "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
+ "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+ "SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index a85d73de7c8..f4eb18e5163 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -447,7 +447,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
rc = pm8001_task_prep_ata(pm8001_ha, ccb);
break;
default:
@@ -704,6 +703,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
int res, retry;
struct sas_task *task = NULL;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
for (retry = 0; retry < 3; retry++) {
task = sas_alloc_slow_task(GFP_KERNEL);
@@ -729,6 +730,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
goto ex_err;
}
wait_for_completion(&task->slow_task->completion);
+ if (pm8001_ha->chip_id != chip_8001) {
+ pm8001_dev->setds_completion = &completion_setstate;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion_setstate);
+ }
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 570819464d9..6037d477a18 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -104,6 +104,9 @@ do { \
#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
+#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \
+ || (dev->device == 0X8076) \
+ || (dev->device == 0X8077))
#define PM8001_NAME_LENGTH 32/* generic length of strings */
extern struct list_head hba_list;
@@ -129,6 +132,61 @@ struct pm8001_ioctl_payload {
u8 *func_specific;
};
+#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
+#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
+#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */
+#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */
+#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */
+#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
+#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
+#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
+#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3
+#define TYPE_GSM_SPACE 1
+#define TYPE_QUEUE 2
+#define TYPE_FATAL 3
+#define TYPE_NON_FATAL 4
+#define TYPE_INBOUND 1
+#define TYPE_OUTBOUND 2
+struct forensic_data {
+ u32 data_type;
+ union {
+ struct {
+ u32 direct_len;
+ u32 direct_offset;
+ void *direct_data;
+ } gsm_buf;
+ struct {
+ u16 queue_type;
+ u16 queue_index;
+ u32 direct_len;
+ void *direct_data;
+ } queue_buf;
+ struct {
+ u32 direct_len;
+ u32 direct_offset;
+ u32 read_len;
+ void *direct_data;
+ } data_buf;
+ };
+};
+
+/* bit31-26 - mask bar */
+#define SCRATCH_PAD0_BAR_MASK 0xFC000000
+/* bit25-0 - offset mask */
+#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF
+/* if AAP error state */
+#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF
+/* Inbound doorbell bit7 */
+#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80
+/* Inbound doorbell bit7 SPCV */
+#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80
+#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */
+
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -343,6 +401,7 @@ union main_cfg_table {
u32 phy_attr_table_offset;
u32 port_recovery_timer;
u32 interrupt_reassertion_delay;
+ u32 fatal_n_non_fatal_dump; /* 0x28 */
} pm80xx_tbl;
};
@@ -417,6 +476,13 @@ struct pm8001_hba_info {
struct pm8001_hba_memspace io_mem[6];
struct mpi_mem_req memoryMap;
struct encrypt encrypt_info; /* support encryption */
+ struct forensic_data forensic_info;
+ u32 fatal_bar_loc;
+ u32 forensic_last_offset;
+ u32 fatal_forensic_shift_offset;
+ u32 forensic_fatal_step;
+ u32 evtlog_ib_offset;
+ u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
@@ -425,6 +491,7 @@ struct pm8001_hba_info {
void __iomem *pspa_q_tbl_addr;
/*MPI SAS PHY attributes Queue Config Table Addr*/
void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
+ void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */
union main_cfg_table main_cfg_tbl;
union general_status_table gs_tbl;
struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
@@ -629,7 +696,12 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
-
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+ u32 length, u8 *buf);
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf);
+ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9f91030211e..8987b170621 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -45,6 +45,228 @@
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
+
+
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
+{
+ u32 reg_val;
+ unsigned long start;
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
+ /* confirm the setting is written */
+ start = jiffies + HZ; /* 1 sec */
+ do {
+ reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
+ } while ((reg_val != shift_value) && time_before(jiffies, start));
+ if (reg_val != shift_value) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+ " = 0x%x\n", reg_val));
+ return -1;
+ }
+ return 0;
+}
+
+void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
+ const void *destination,
+ u32 dw_count, u32 bus_base_number)
+{
+ u32 index, value, offset;
+ u32 *destination1;
+ destination1 = (u32 *)destination;
+
+ for (index = 0; index < dw_count; index += 4, destination1++) {
+ offset = (soffset + index / 4);
+ if (offset < (64 * 1024)) {
+ value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
+ *destination1 = cpu_to_le32(value);
+ }
+ }
+ return;
+}
+
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
+ u32 status = 1;
+ u32 accum_len , reg_val, index, *temp;
+ unsigned long start;
+ u8 *direct_data;
+ char *fatal_error_data = buf;
+
+ pm8001_ha->forensic_info.data_buf.direct_data = buf;
+ if (pm8001_ha->chip_id == chip_8001) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "Not supported for SPC controller");
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+ direct_data = (u8 *)fatal_error_data;
+ pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
+ pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ }
+
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+ /* start to get data */
+ /* Program the MEMBASE II Shifting Register with 0x00.*/
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_ha->forensic_last_offset = 0;
+ pm8001_ha->forensic_fatal_step = 0;
+ pm8001_ha->fatal_bar_loc = 0;
+ }
+ /* Read until accum_len is retrived */
+ accum_len = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
+ accum_len));
+ if (accum_len == 0xFFFFFFFF) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Possible PCI issue 0x%x not expected\n",
+ accum_len));
+ return status;
+ }
+ if (accum_len == 0 || accum_len >= 0x100000) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+ if (pm8001_ha->forensic_fatal_step == 0) {
+moreData:
+ if (pm8001_ha->forensic_info.data_buf.direct_data) {
+ /* Data is in bar, copy to host memory */
+ pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len ,
+ 1);
+ }
+ pm8001_ha->fatal_bar_loc +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_info.data_buf.direct_offset +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_last_offset +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_info.data_buf.read_len =
+ pm8001_ha->forensic_info.data_buf.direct_len;
+
+ if (pm8001_ha->forensic_last_offset >= accum_len) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 3);
+ for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+
+ pm8001_ha->fatal_bar_loc = 0;
+ pm8001_ha->forensic_fatal_step = 1;
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ pm8001_ha->forensic_last_offset = 0;
+ status = 0;
+ return (char *)pm8001_ha->
+ forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", 2);
+ for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+ status = 0;
+ return (char *)pm8001_ha->
+ forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+
+ /* Increment the MEMBASE II Shifting Register value by 0x100.*/
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 2);
+ for (index = 0; index < 256; index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+ pm8001_ha->fatal_forensic_shift_offset += 0x100;
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_ha->fatal_bar_loc = 0;
+ status = 0;
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->forensic_fatal_step == 1) {
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ /* Read 64K of the debug data. */
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+ /* Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
+ } while ((reg_val) && time_before(jiffies, start));
+
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+ " = 0x%x\n", reg_val));
+ return -1;
+ }
+
+ /* Read the next 64K of the debug data. */
+ pm8001_ha->forensic_fatal_step = 0;
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) !=
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ status = 0;
+ }
+ }
+
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+}
+
/**
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
@@ -430,7 +652,11 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
table is updated */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
- max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+ if (IS_SPCV_12G(pm8001_ha->pdev)) {
+ max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ } else {
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ }
do {
udelay(1);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -579,6 +805,9 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->pspa_q_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
0xFFFFFF);
+ pm8001_ha->fatal_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
+ 0xFFFFFF);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("GST OFFSET 0x%x\n",
@@ -913,7 +1142,11 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
/* wait until Inbound DoorBell Clear Register toggled */
- max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
+ if (IS_SPCV_12G(pm8001_ha->pdev)) {
+ max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ } else {
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ }
do {
udelay(1);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -959,6 +1192,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 regval;
u32 bootloader_state;
+ u32 ibutton0, ibutton1;
/* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) {
@@ -1017,7 +1251,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (-1 == check_fw_ready(pm8001_ha)) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Firmware is not ready!\n"));
- return -EBUSY;
+ /* check iButton feature support for motherboard controller */
+ if (pm8001_ha->pdev->subsystem_vendor !=
+ PCI_VENDOR_ID_ADAPTEC2 &&
+ pm8001_ha->pdev->subsystem_vendor != 0) {
+ ibutton0 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_6);
+ ibutton1 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_7);
+ if (!ibutton0 && !ibutton1) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("iButton Feature is"
+ " not Available!!!\n"));
+ return -EBUSY;
+ }
+ if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("CRC Check for iButton"
+ " Feature Failed!!!\n"));
+ return -EBUSY;
+ }
+ }
}
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("SPCv soft reset Complete\n"));
@@ -1268,6 +1522,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW))
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive"
+ ":%016llx", SAS_ADDR(t->dev->sas_addr)));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha,
@@ -1691,6 +1952,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 param;
u32 status;
u32 tag;
+ int i, j;
+ u8 sata_addr_low[4];
+ u32 temp_sata_addr_low, temp_sata_addr_hi;
+ u8 sata_addr_hi[4];
struct sata_completion_resp *psataPayload;
struct task_status_struct *ts;
struct ata_task_resp *resp ;
@@ -1740,7 +2005,47 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW)) {
+ if (!((t->dev->parent) &&
+ (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+ sata_addr_low[i] = pm8001_ha->sas_addr[j];
+ for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+ sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+ memcpy(&temp_sata_addr_low, sata_addr_low,
+ sizeof(sata_addr_low));
+ memcpy(&temp_sata_addr_hi, sata_addr_hi,
+ sizeof(sata_addr_hi));
+ temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+ |((temp_sata_addr_hi << 8) &
+ 0xff0000) |
+ ((temp_sata_addr_hi >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_hi << 24) &
+ 0xff000000));
+ temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+ & 0xff) |
+ ((temp_sata_addr_low << 8)
+ & 0xff0000) |
+ ((temp_sata_addr_low >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_low << 24)
+ & 0xff000000)) +
+ pm8001_dev->attached_phy +
+ 0x10);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%08x%08x", temp_sata_addr_hi,
+ temp_sata_addr_low));
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ }
+ }
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3103,9 +3408,27 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ u8 page_code;
+ struct set_phy_profile_resp *pPayload =
+ (struct set_phy_profile_resp *)(piomb + 4);
+ u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
+ u32 status = le32_to_cpu(pPayload->status);
+ page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
+ if (status) {
+ /* status is FAILED */
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("PhyProfile command failed with status "
+ "0x%08X \n", status));
+ return -1;
+ } else {
+ if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Invalid page code 0x%X\n",
+ page_code));
+ return -1;
+ }
+ }
return 0;
}
@@ -3484,8 +3807,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
else
pm8001_ha->smp_exp_mode = SMP_INDIRECT;
- /* DIRECT MODE support only in spcv/ve */
- pm8001_ha->smp_exp_mode = SMP_DIRECT;
tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
preq_dma_addr = (char *)phys_to_virt(tmp_addr);
@@ -3501,7 +3822,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
/* exclude top 4 bytes for SMP req header */
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address
- (&task->smp_task.smp_req) - 4);
+ (&task->smp_task.smp_req) + 4);
/* exclude 4 bytes for SMP req header and CRC */
smp_cmd.long_smp_req.long_req_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
@@ -3604,10 +3925,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
int ret;
- u64 phys_addr;
+ u64 phys_addr, start_addr, end_addr;
+ u32 end_addr_high, end_addr_low;
struct inbound_queue_table *circularQ;
- static u32 inb;
- static u32 outb;
+ u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -3626,7 +3947,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
@@ -3658,6 +3980,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + ssp_cmd.enc_len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != ssp_cmd.enc_addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.enc_len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ ssp_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ }
} else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0;
ssp_cmd.enc_addr_high = 0;
@@ -3674,7 +4020,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
} else {
PM8001_IO_DBG(pm8001_ha, pm8001_printk(
"Sending Normal SAS command 0x%x inb q %x\n",
- task->ssp_task.cmd->cmnd[0], inb));
+ task->ssp_task.cmd->cmnd[0], q_index));
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem,
@@ -3693,6 +4039,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + ssp_cmd.len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != ssp_cmd.addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ ssp_cmd.addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.esgl = cpu_to_le32(1<<31);
+ }
} else if (task->num_scatter == 0) {
ssp_cmd.addr_low = 0;
ssp_cmd.addr_high = 0;
@@ -3700,11 +4070,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0;
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
-
- /* rotate the outb queue */
- outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
-
+ q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ &ssp_cmd, q_index);
return ret;
}
@@ -3716,18 +4084,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag;
int ret;
- static u32 inb;
- static u32 outb;
+ u32 q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- u64 phys_addr;
+ u64 phys_addr, start_addr, end_addr;
+ u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
u32 dir;
struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
if (task->data_dir == PCI_DMA_NONE) {
ATAP = 0x04; /* no data*/
@@ -3788,6 +4157,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + sata_cmd.enc_len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != sata_cmd.enc_addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low"
+ "=0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.enc_len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ sata_cmd.enc_addr_low =
+ lower_32_bits(phys_addr);
+ sata_cmd.enc_addr_high =
+ upper_32_bits(phys_addr);
+ sata_cmd.enc_esgl =
+ cpu_to_le32(1 << 31);
+ }
} else if (task->num_scatter == 0) {
sata_cmd.enc_addr_low = 0;
sata_cmd.enc_addr_high = 0;
@@ -3808,7 +4202,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
} else {
PM8001_IO_DBG(pm8001_ha, pm8001_printk(
"Sending Normal SATA command 0x%x inb %x\n",
- sata_cmd.sata_fis.command, inb));
+ sata_cmd.sata_fis.command, q_index));
/* dad (bit 0-1) is 0 */
sata_cmd.ncqtag_atap_dir_m_dad =
cpu_to_le32(((ncg_tag & 0xff)<<16) |
@@ -3829,6 +4223,30 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + sata_cmd.len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != sata_cmd.addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x"
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ sata_cmd.addr_low =
+ lower_32_bits(phys_addr);
+ sata_cmd.addr_high =
+ upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1 << 31);
+ }
} else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0;
sata_cmd.addr_high = 0;
@@ -3905,12 +4323,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
}
-
+ q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, outb++);
-
- /* rotate the outb queue */
- outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+ &sata_cmd, q_index);
return ret;
}
@@ -3941,9 +4356,16 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
** [14] 0b disable spin up hold; 1b enable spin up hold
** [15] ob no change in current PHY analig setup 1b enable using SPAST
*/
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | phy_id);
+ if (!IS_SPCV_12G(pm8001_ha->pdev))
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | phy_id);
+ else
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
+ phy_id);
+
/* SSC Disable and SAS Analog ST configuration */
/**
payload.ase_sh_lm_slr_phyid =
@@ -4102,6 +4524,45 @@ pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
return IRQ_HANDLED;
}
+void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
+ u32 operation, u32 phyid, u32 length, u32 *buf)
+{
+ u32 tag , i, j = 0;
+ int rc;
+ struct set_phy_profile_req payload;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SET_PHY_PROFILE;
+
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk(" phy profile command for phy %x ,length is %d\n",
+ payload.ppc_phyid, length));
+ for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
+ payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
+ j++;
+ }
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+ u32 length, u8 *buf)
+{
+ u32 page_code, i;
+
+ page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ mpi_set_phy_profile_req(pm8001_ha,
+ SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
+ length = length + PHY_DWORD_LENGTH;
+ }
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+}
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 2b760ba75d7..c86816bea42 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -168,6 +168,11 @@
#define LINKRATE_15 (0x01 << 8)
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x06 << 8)
+#define LINKRATE_120 (0x08 << 8)
+
+/* phy_profile */
+#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
+#define PHY_DWORD_LENGTH 0xC
/* Thermal related */
#define THERMAL_ENABLE 0x1
@@ -1223,10 +1228,10 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
/* MSGU CONFIGURATION TABLE*/
-#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01
-#define SPCv_MSGU_CFG_TABLE_RESET 0x02
-#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04
-#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08
+#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001
+#define SPCv_MSGU_CFG_TABLE_RESET 0x002
+#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008
#define MSGU_IBDB_SET 0x00
#define MSGU_HOST_INT_STATUS 0x08
#define MSGU_HOST_INT_MASK 0x0C
@@ -1520,4 +1525,6 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
+
+#define MEMBASE_II_SHIFT_REGISTER 0x1010
#endif
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 1eb7b0280a4..bd6f743d87a 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1512,7 +1512,8 @@ static int pmcraid_notify_aen(
}
result =
- genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
+ genlmsg_multicast(&pmcraid_event_family, skb, 0,
+ pmcraid_event_family.id, GFP_ATOMIC);
/* If there are no listeners, genlmsg_multicast may return non-zero
* value.
@@ -6049,7 +6050,6 @@ out_release_regions:
out_disable_device:
atomic_dec(&pmcraid_adapter_count);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 62ee7131b20..30d20e74e48 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/* PCIe -- adjust Maximum Read Request Size (2048). */
- if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(ha->pdev))
pcie_set_readrq(ha->pdev, 2048);
ha->chip_revision = ha->pdev->revision;
@@ -660,10 +660,8 @@ char *
qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
{
struct qla_hw_data *ha = vha->hw;
- int pcie_reg;
- pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
+ if (pci_is_pcie(ha->pdev)) {
strcpy(str, "PCIe iSA");
return str;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9f01bbbf3a2..52be35e0300 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -494,18 +494,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
static char *pci_bus_modes[] = { "33", "66", "100", "133", };
struct qla_hw_data *ha = vha->hw;
uint32_t pci_bus;
- int pcie_reg;
- pcie_reg = pci_pcie_cap(ha->pdev);
- if (pcie_reg) {
+ if (pci_is_pcie(ha->pdev)) {
char lwstr[6];
- uint16_t pcie_lstat, lspeed, lwidth;
+ uint32_t lstat, lspeed, lwidth;
- pcie_reg += PCI_EXP_LNKCAP;
- pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
- lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
- lwidth = (pcie_lstat &
- (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
+ pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+ lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+ lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
strcpy(str, "PCIe (");
switch (lspeed) {
@@ -3183,7 +3179,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index f85b9e5c1f0..7eb19be35d4 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+ return tpg->tpg_attrib.generate_node_acls;
}
static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
@@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+ return tpg->tpg_attrib.cache_dynamic_acls;
}
static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
@@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+ return tpg->tpg_attrib.demo_mode_write_protect;
}
static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
@@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+ return tpg->tpg_attrib.prod_mode_write_protect;
}
static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
@@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
+ return tpg->tpg_attrib.demo_mode_login_only;
}
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
@@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
\
- return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
} \
\
static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
@@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
* By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
* NodeACLs
*/
- QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
- QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
- QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
- QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
tcm_qla2xxx_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the fabric for use within TCM
*/
@@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void)
/*
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the npiv_fabric for use within TCM
*/
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 329327528a5..771f7b81644 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg {
struct se_portal_group se_tpg;
};
-#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
-
struct tcm_qla2xxx_fc_loopid {
struct se_node_acl *se_nacl;
};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 41327d46ecf..084d1fd59c9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -306,6 +306,7 @@ struct ddb_entry {
struct qla_ddb_index {
struct list_head list;
uint16_t fw_ddb_idx;
+ uint16_t flash_ddb_idx;
struct dev_db_entry fw_ddb;
uint8_t flash_isid[6];
};
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 51d1a70f8b4..1243e5942b7 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -539,6 +539,10 @@ struct qla_flt_region {
#define ENABLE_INTERNAL_LOOPBACK 0x04
#define ENABLE_EXTERNAL_LOOPBACK 0x08
+/* generic defines to enable/disable params */
+#define QL4_PARAM_DISABLE 0
+#define QL4_PARAM_ENABLE 1
+
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index e6f2a2669db..5cef2527180 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
char *password, int bidi, uint16_t *chap_index);
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx, int bidi);
void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 8503ad643bd..655b7bb644d 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha)
__qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+
+static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
+{
+ int type;
+
+ if (chap_entry->flags & BIT_7)
+ type = LOCAL_CHAP;
+ else
+ type = BIDI_CHAP;
+
+ return type;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 62d4208af21..22cbd005bdf 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1530,13 +1530,26 @@ exit_get_chap:
return ret;
}
-static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
- char *password, uint16_t idx, int bidi)
+/**
+ * qla4xxx_set_chap - Make a chap entry at the given index
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to set
+ * @password: CHAP password to set
+ * @idx: CHAP index at which to make the entry
+ * @bidi: type of chap entry (chap_in or chap_out)
+ *
+ * Create chap entry at the given index with the information provided.
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx, int bidi)
{
int ret = 0;
int rval = QLA_ERROR;
uint32_t offset = 0;
struct ql4_chap_table *chap_table;
+ uint32_t chap_size = 0;
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
@@ -1554,7 +1567,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
- offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
+
+ if (is_qla40XX(ha)) {
+ chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
+ offset = FLASH_CHAP_OFFSET;
+ } else { /* Single region contains CHAP info for both ports which is
+ * divided into half for each port.
+ */
+ chap_size = ha->hw.flt_chap_size / 2;
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ if (ha->port_num == 1)
+ offset += chap_size;
+ }
+
+ offset += (idx * sizeof(struct ql4_chap_table));
rval = qla4xxx_set_flash(ha, chap_dma, offset,
sizeof(struct ql4_chap_table),
FLASH_OPT_RMW_COMMIT);
@@ -1611,7 +1637,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
goto exit_unlock_uni_chap;
}
- if (!(chap_table->flags & BIT_6)) {
+ if (!(chap_table->flags & BIT_7)) {
ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f8a0a26a3cd..a28d5e624aa 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -149,6 +149,8 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf);
static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
+ int len);
/*
* SCSI host template entry points
@@ -252,6 +254,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.send_ping = qla4xxx_send_ping,
.get_chap = qla4xxx_get_chap_list,
.delete_chap = qla4xxx_delete_chap,
+ .set_chap = qla4xxx_set_chap_entry,
.get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
.set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
.new_flashnode = qla4xxx_sysfs_ddb_add,
@@ -508,6 +511,95 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
return 0;
}
+static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
+ int16_t chap_index,
+ struct ql4_chap_table **chap_entry)
+{
+ int rval = QLA_ERROR;
+ int max_chap_entries;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+ rval = QLA_ERROR;
+ goto exit_get_chap;
+ }
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_index > max_chap_entries) {
+ ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+ rval = QLA_ERROR;
+ goto exit_get_chap;
+ }
+
+ *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
+ if ((*chap_entry)->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ rval = QLA_ERROR;
+ *chap_entry = NULL;
+ } else {
+ rval = QLA_SUCCESS;
+ }
+
+exit_get_chap:
+ return rval;
+}
+
+/**
+ * qla4xxx_find_free_chap_index - Find the first free chap index
+ * @ha: pointer to adapter structure
+ * @chap_index: CHAP index to be returned
+ *
+ * Find the first free chap index available in the chap table
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
+ uint16_t *chap_index)
+{
+ int i, rval;
+ int free_index = -1;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+ rval = QLA_ERROR;
+ goto exit_find_chap;
+ }
+
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+
+ if ((chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+ (i > MAX_RESRV_CHAP_IDX)) {
+ free_index = i;
+ break;
+ }
+ }
+
+ if (free_index != -1) {
+ *chap_index = free_index;
+ rval = QLA_SUCCESS;
+ } else {
+ rval = QLA_ERROR;
+ }
+
+exit_find_chap:
+ return rval;
+}
+
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf)
{
@@ -691,6 +783,111 @@ exit_delete_chap:
return ret;
}
+/**
+ * qla4xxx_set_chap_entry - Make chap entry with given information
+ * @shost: pointer to host
+ * @data: chap info - credentials, index and type to make chap entry
+ * @len: length of data
+ *
+ * Add or update chap entry with the given information
+ **/
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_chap_rec chap_rec;
+ struct ql4_chap_table *chap_entry = NULL;
+ struct iscsi_param_info *param_info;
+ struct nlattr *attr;
+ int max_chap_entries = 0;
+ int type;
+ int rem = len;
+ int rc = 0;
+
+ memset(&chap_rec, 0, sizeof(chap_rec));
+
+ nla_for_each_attr(attr, data, len, rem) {
+ param_info = nla_data(attr);
+
+ switch (param_info->param) {
+ case ISCSI_CHAP_PARAM_INDEX:
+ chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
+ break;
+ case ISCSI_CHAP_PARAM_CHAP_TYPE:
+ chap_rec.chap_type = param_info->value[0];
+ break;
+ case ISCSI_CHAP_PARAM_USERNAME:
+ memcpy(chap_rec.username, param_info->value,
+ param_info->len);
+ break;
+ case ISCSI_CHAP_PARAM_PASSWORD:
+ memcpy(chap_rec.password, param_info->value,
+ param_info->len);
+ break;
+ case ISCSI_CHAP_PARAM_PASSWORD_LEN:
+ chap_rec.password_length = param_info->value[0];
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha,
+ "%s: No such sysfs attribute\n", __func__);
+ rc = -ENOSYS;
+ goto exit_set_chap;
+ };
+ }
+
+ if (chap_rec.chap_type == CHAP_TYPE_IN)
+ type = BIDI_CHAP;
+ else
+ type = LOCAL_CHAP;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ mutex_lock(&ha->chap_sem);
+ if (chap_rec.chap_tbl_idx < max_chap_entries) {
+ rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
+ &chap_entry);
+ if (!rc) {
+ if (!(type == qla4xxx_get_chap_type(chap_entry))) {
+ ql4_printk(KERN_INFO, ha,
+ "Type mismatch for CHAP entry %d\n",
+ chap_rec.chap_tbl_idx);
+ rc = -EINVAL;
+ goto exit_unlock_chap;
+ }
+
+ /* If chap index is in use then don't modify it */
+ rc = qla4xxx_is_chap_active(shost,
+ chap_rec.chap_tbl_idx);
+ if (rc) {
+ ql4_printk(KERN_INFO, ha,
+ "CHAP entry %d is in use\n",
+ chap_rec.chap_tbl_idx);
+ rc = -EBUSY;
+ goto exit_unlock_chap;
+ }
+ }
+ } else {
+ rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
+ if (rc) {
+ ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
+ rc = -EBUSY;
+ goto exit_unlock_chap;
+ }
+ }
+
+ rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
+ chap_rec.chap_tbl_idx, type);
+
+exit_unlock_chap:
+ mutex_unlock(&ha->chap_sem);
+
+exit_set_chap:
+ return rc;
+}
+
static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf)
@@ -1455,9 +1652,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
struct iscsi_session *sess = cls_sess->dd_data;
struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
+ struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
+ struct ql4_chap_table chap_tbl;
int rval, len;
uint16_t idx;
+ memset(&chap_tbl, 0, sizeof(chap_tbl));
switch (param) {
case ISCSI_PARAM_CHAP_IN_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username_in,
@@ -1469,14 +1669,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
len = sprintf(buf, "%hu\n", idx);
break;
case ISCSI_PARAM_CHAP_OUT_IDX:
- rval = qla4xxx_get_chap_index(ha, sess->username,
- sess->password, LOCAL_CHAP,
- &idx);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+ idx = ddb_entry->chap_tbl_idx;
+ rval = QLA_SUCCESS;
+ } else {
+ rval = QLA_ERROR;
+ }
+ } else {
+ rval = qla4xxx_get_chap_index(ha, sess->username,
+ sess->password,
+ LOCAL_CHAP, &idx);
+ }
if (rval)
len = sprintf(buf, "\n");
else
len = sprintf(buf, "%hu\n", idx);
break;
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ /* First, populate session username and password for FLASH DDB,
+ * if not already done. This happens when session login fails
+ * for a FLASH DDB.
+ */
+ if (ddb_entry->ddb_type == FLASH_DDB &&
+ ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
+ !sess->username && !sess->password) {
+ idx = ddb_entry->chap_tbl_idx;
+ rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+ chap_tbl.secret,
+ idx);
+ if (!rval) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+ (char *)chap_tbl.name,
+ strlen((char *)chap_tbl.name));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+ (char *)chap_tbl.secret,
+ chap_tbl.secret_len);
+ }
+ }
+ /* allow fall-through */
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
@@ -2373,11 +2605,6 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
COPY_ISID(sess->isid, fw_ddb_entry->isid);
ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
- if (ddb_link < MAX_DDB_ENTRIES)
- sess->discovery_parent_idx = ddb_link;
- else
- sess->discovery_parent_idx = DDB_NO_LINK;
-
if (ddb_link == DDB_ISNS)
disc_parent = ISCSI_DISC_PARENT_ISNS;
else if (ddb_link == DDB_NO_LINK)
@@ -2402,6 +2629,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
int buflen = 0;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
+ struct ql4_chap_table chap_tbl;
struct iscsi_conn *conn;
char ip_addr[DDB_IPADDR_LEN];
uint16_t options = 0;
@@ -2409,6 +2637,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
conn = cls_conn->dd_data;
+ memset(&chap_tbl, 0, sizeof(chap_tbl));
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
@@ -2435,6 +2664,19 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
(char *)fw_ddb_entry->iscsi_name, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
(char *)ha->name_string, buflen);
+
+ if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+ if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+ chap_tbl.secret,
+ ddb_entry->chap_tbl_idx)) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+ (char *)chap_tbl.name,
+ strlen((char *)chap_tbl.name));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+ (char *)chap_tbl.secret,
+ chap_tbl.secret_len);
+ }
+ }
}
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -4937,7 +5179,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
}
static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
- struct dev_db_entry *fw_ddb_entry)
+ struct dev_db_entry *fw_ddb_entry,
+ uint32_t *index)
{
struct ddb_entry *ddb_entry;
struct ql4_tuple_ddb *fw_tddb = NULL;
@@ -4971,6 +5214,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
ret = QLA_SUCCESS; /* found */
+ if (index != NULL)
+ *index = idx;
goto exit_check;
}
}
@@ -5206,6 +5451,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
ddb_entry->ha = ha;
ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+ ddb_entry->chap_tbl_idx = INVALID_ENTRY;
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
@@ -5267,6 +5513,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
} while (time_after(wtime, jiffies));
}
+static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
+ struct dev_db_entry *flash_ddb_entry)
+{
+ uint16_t options = 0;
+ size_t ip_len = IP_ADDR_LEN;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ ip_len = IPv6_ADDR_LEN;
+
+ if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
+ return QLA_ERROR;
+
+ if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
+ sizeof(fw_ddb_entry->isid)))
+ return QLA_ERROR;
+
+ if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
+ sizeof(fw_ddb_entry->port)))
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint32_t fw_idx, uint32_t *flash_index)
+{
+ struct dev_db_entry *flash_ddb_entry;
+ dma_addr_t flash_ddb_entry_dma;
+ uint32_t idx = 0;
+ int max_ddbs;
+ int ret = QLA_ERROR, status;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &flash_ddb_entry_dma);
+ if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "Out of memory\n");
+ goto exit_find_st_idx;
+ }
+
+ status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+ flash_ddb_entry_dma, fw_idx);
+ if (status == QLA_SUCCESS) {
+ status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+ if (status == QLA_SUCCESS) {
+ *flash_index = fw_idx;
+ ret = QLA_SUCCESS;
+ goto exit_find_st_idx;
+ }
+ }
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+ flash_ddb_entry_dma, idx);
+ if (status == QLA_ERROR)
+ continue;
+
+ status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+ if (status == QLA_SUCCESS) {
+ *flash_index = idx;
+ ret = QLA_SUCCESS;
+ goto exit_find_st_idx;
+ }
+ }
+
+ if (idx == max_ddbs)
+ ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
+ fw_idx);
+
+exit_find_st_idx:
+ if (flash_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
+ flash_ddb_entry_dma);
+
+ return ret;
+}
+
static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
struct list_head *list_st)
{
@@ -5278,6 +5605,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
+ uint32_t flash_index = -1;
uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5310,6 +5638,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
if (!st_ddb_idx)
break;
+ ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
+ &flash_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha,
+ "No flash entry for ST at idx [%d]\n", idx);
+ st_ddb_idx->flash_ddb_idx = idx;
+ } else {
+ ql4_printk(KERN_INFO, ha,
+ "ST at idx [%d] is stored at flash [%d]\n",
+ idx, flash_index);
+ st_ddb_idx->flash_ddb_idx = flash_index;
+ }
+
st_ddb_idx->fw_ddb_idx = idx;
list_add_tail(&st_ddb_idx->list, list_st);
@@ -5354,6 +5695,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
}
}
+static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ uint32_t max_ddbs = 0;
+ uint16_t ddb_link = -1;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < max_ddbs)
+ sess->discovery_parent_idx = ddb_link;
+ else
+ sess->discovery_parent_idx = DDB_NO_LINK;
+}
+
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
int is_reset, uint16_t idx)
@@ -5418,6 +5781,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
/* Update sess/conn params */
qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+ qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
if (is_reset == RESET_ADAPTER) {
iscsi_block_session(cls_sess);
@@ -5434,17 +5798,43 @@ exit_setup:
return ret;
}
+static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
+ struct list_head *list_ddb,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint16_t ddb_link;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ if (ddb_idx->fw_ddb_idx == ddb_link) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Updating NT parent idx from [%d] to [%d]\n",
+ ddb_link, ddb_idx->flash_ddb_idx));
+ fw_ddb_entry->ddb_link =
+ cpu_to_le16(ddb_idx->flash_ddb_idx);
+ return;
+ }
+ }
+}
+
static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
- struct list_head *list_nt, int is_reset)
+ struct list_head *list_nt,
+ struct list_head *list_st,
+ int is_reset)
{
struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
dma_addr_t fw_ddb_dma;
int max_ddbs;
int fw_idx_size;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
+ uint32_t ddb_idx = -1;
uint16_t conn_id = 0;
+ uint16_t ddb_link = -1;
struct qla_ddb_index *nt_ddb_idx;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5471,12 +5861,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt;
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < max_ddbs)
+ qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
+
if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED))
+ state == DDB_DS_SESSION_FAILED) &&
+ (is_reset == INIT_ADAPTER))
goto continue_next_nt;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Adding DDB to session = 0x%x\n", idx));
+
if (is_reset == INIT_ADAPTER) {
nt_ddb_idx = vmalloc(fw_idx_size);
if (!nt_ddb_idx)
@@ -5506,9 +5902,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
list_add_tail(&nt_ddb_idx->list, list_nt);
} else if (is_reset == RESET_ADAPTER) {
- if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
- QLA_SUCCESS)
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
+ &ddb_idx);
+ if (ret == QLA_SUCCESS) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+ ddb_idx);
+ if (ddb_entry != NULL)
+ qla4xxx_update_sess_disc_idx(ha,
+ ddb_entry,
+ fw_ddb_entry);
goto continue_next_nt;
+ }
}
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
@@ -5526,7 +5930,8 @@ exit_nt_list:
}
static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
- struct list_head *list_nt)
+ struct list_head *list_nt,
+ uint16_t target_id)
{
struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_dma;
@@ -5571,13 +5976,16 @@ static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
nt_ddb_idx->fw_ddb_idx = idx;
- ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
if (ret == QLA_SUCCESS) {
/* free nt_ddb_idx and do not add to list_nt */
vfree(nt_ddb_idx);
goto continue_next_new_nt;
}
+ if (target_id < max_ddbs)
+ fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
+
list_add_tail(&nt_ddb_idx->list, list_nt);
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
@@ -5894,7 +6302,8 @@ exit_ddb_conn_open:
}
static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
- struct dev_db_entry *fw_ddb_entry)
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t target_id)
{
struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
struct list_head list_nt;
@@ -5919,7 +6328,7 @@ static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
if (ret == QLA_ERROR)
goto exit_login_st;
- qla4xxx_build_new_nt_list(ha, &list_nt);
+ qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
list_del_init(&ddb_idx->list);
@@ -5946,7 +6355,7 @@ static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
{
int ret = QLA_ERROR;
- ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
if (ret != QLA_SUCCESS)
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
idx);
@@ -6001,7 +6410,8 @@ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
fw_ddb_entry->cookie = DDB_VALID_COOKIE;
if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
- ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
+ ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
+ fnode_sess->target_id);
else
ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
fnode_sess->target_id);
@@ -6522,10 +6932,13 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
struct scsi_qla_host *ha = to_qla_host(shost);
struct iscsi_flashnode_param_info *fnode_param;
+ struct ql4_chap_table chap_tbl;
struct nlattr *attr;
+ uint16_t chap_out_idx = INVALID_ENTRY;
int rc = QLA_ERROR;
uint32_t rem = len;
+ memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) {
fnode_param = nla_data(attr);
@@ -6567,6 +6980,10 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
break;
case ISCSI_FLASHNODE_CHAP_AUTH_EN:
fnode_sess->chap_auth_en = fnode_param->value[0];
+ /* Invalidate chap index if chap auth is disabled */
+ if (!fnode_sess->chap_auth_en)
+ fnode_sess->chap_out_idx = INVALID_ENTRY;
+
break;
case ISCSI_FLASHNODE_SNACK_REQ_EN:
fnode_conn->snack_req_en = fnode_param->value[0];
@@ -6705,6 +7122,17 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
fnode_conn->exp_statsn =
*(uint32_t *)fnode_param->value;
break;
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ chap_out_idx = *(uint16_t *)fnode_param->value;
+ if (!qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ chap_out_idx)) {
+ fnode_sess->chap_out_idx = chap_out_idx;
+ /* Enable chap auth if chap index is valid */
+ fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
+ }
+ break;
default:
ql4_printk(KERN_ERR, ha,
"%s: No such sysfs attribute\n", __func__);
@@ -6926,11 +7354,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
schedule_timeout_uninterruptible(HZ / 10);
} while (time_after(wtime, jiffies));
- /* Free up the sendtargets list */
- qla4xxx_free_ddb_list(&list_st);
- qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+ qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
+ qla4xxx_free_ddb_list(&list_st);
qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha);
@@ -7400,7 +7827,6 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/**
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index eaa808e6ba9..fe0bcb18fb2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -78,11 +78,6 @@ static void scsi_done(struct scsi_cmnd *cmd);
* Definitions and constants.
*/
-#define MIN_RESET_DELAY (2*HZ)
-
-/* Do not call reset on error if we just did a reset within 15 sec. */
-#define MIN_RESET_PERIOD (15*HZ)
-
/*
* Note - the initial logging level can be set here to log events at boot time.
* After the system is up, you may enable logging via the /proc interface.
@@ -658,7 +653,6 @@ EXPORT_SYMBOL(scsi_cmd_get_serial);
int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
- unsigned long timeout;
int rtn = 0;
atomic_inc(&cmd->device->iorequest_cnt);
@@ -704,28 +698,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
(cmd->device->lun << 5 & 0xe0);
}
- /*
- * We will wait MIN_RESET_DELAY clock ticks after the last reset so
- * we can avoid the drive not being ready.
- */
- timeout = host->last_reset + MIN_RESET_DELAY;
-
- if (host->resetting && time_before(jiffies, timeout)) {
- int ticks_remaining = timeout - jiffies;
- /*
- * NOTE: This may be executed from within an interrupt
- * handler! This is bad, but for now, it'll do. The irq
- * level of the interrupt handler has been masked out by the
- * platform dependent interrupt handling code already, so the
- * sti() here will not cause another call to the SCSI host's
- * interrupt handler (assuming there is one irq-level per
- * host).
- */
- while (--ticks_remaining >= 0)
- mdelay(1 + 999 / HZ);
- host->resetting = 0;
- }
-
scsi_log_send(cmd);
/*
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 01c0ffa3127..80b8b10edf4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -169,7 +169,7 @@ static int scsi_debug_dix = DEF_DIX;
static int scsi_debug_dsense = DEF_D_SENSE;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
static int scsi_debug_fake_rw = DEF_FAKE_RW;
-static int scsi_debug_guard = DEF_GUARD;
+static unsigned int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
@@ -293,6 +293,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
0, 0, 0x0, 0x0};
+static void *fake_store(unsigned long long lba)
+{
+ lba = do_div(lba, sdebug_store_sectors);
+
+ return fake_storep + lba * scsi_debug_sector_size;
+}
+
+static struct sd_dif_tuple *dif_store(sector_t sector)
+{
+ sector = do_div(sector, sdebug_store_sectors);
+
+ return dif_storep + sector;
+}
+
static int sdebug_add_adapter(void);
static void sdebug_remove_adapter(void);
@@ -1731,25 +1745,22 @@ static int do_device_access(struct scsi_cmnd *scmd,
return ret;
}
-static u16 dif_compute_csum(const void *buf, int len)
+static __be16 dif_compute_csum(const void *buf, int len)
{
- u16 csum;
+ __be16 csum;
- switch (scsi_debug_guard) {
- case 1:
- csum = ip_compute_csum(buf, len);
- break;
- case 0:
+ if (scsi_debug_guard)
+ csum = (__force __be16)ip_compute_csum(buf, len);
+ else
csum = cpu_to_be16(crc_t10dif(buf, len));
- break;
- }
+
return csum;
}
static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
sector_t sector, u32 ei_lba)
{
- u16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+ __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
if (sdt->guard_tag != csum) {
pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@@ -1775,59 +1786,71 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
return 0;
}
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
- unsigned int sectors, u32 ei_lba)
+static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+ unsigned int sectors, bool read)
{
unsigned int i, resid;
struct scatterlist *psgl;
- struct sd_dif_tuple *sdt;
- sector_t sector;
- sector_t tmp_sec = start_sec;
void *paddr;
+ const void *dif_store_end = dif_storep + sdebug_store_sectors;
- start_sec = do_div(tmp_sec, sdebug_store_sectors);
+ /* Bytes of protection data to copy into sgl */
+ resid = sectors * sizeof(*dif_storep);
- sdt = dif_storep + start_sec;
+ scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
+ int len = min(psgl->length, resid);
+ void *start = dif_store(sector);
+ int rest = 0;
- for (i = 0 ; i < sectors ; i++) {
- int ret;
+ if (dif_store_end < start + len)
+ rest = start + len - dif_store_end;
- if (sdt[i].app_tag == 0xffff)
- continue;
+ paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
- sector = start_sec + i;
+ if (read)
+ memcpy(paddr, start, len - rest);
+ else
+ memcpy(start, paddr, len - rest);
- ret = dif_verify(&sdt[i],
- fake_storep + sector * scsi_debug_sector_size,
- sector, ei_lba);
- if (ret) {
- dif_errors++;
- return ret;
+ if (rest) {
+ if (read)
+ memcpy(paddr + len - rest, dif_storep, rest);
+ else
+ memcpy(dif_storep, paddr + len - rest, rest);
}
- ei_lba++;
+ sector += len / sizeof(*dif_storep);
+ resid -= len;
+ kunmap_atomic(paddr);
}
+}
- /* Bytes of protection data to copy into sgl */
- resid = sectors * sizeof(*dif_storep);
- sector = start_sec;
+static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+ unsigned int sectors, u32 ei_lba)
+{
+ unsigned int i;
+ struct sd_dif_tuple *sdt;
+ sector_t sector;
- scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
- int len = min(psgl->length, resid);
+ for (i = 0; i < sectors; i++) {
+ int ret;
- paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
- memcpy(paddr, dif_storep + sector, len);
+ sector = start_sec + i;
+ sdt = dif_store(sector);
- sector += len / sizeof(*dif_storep);
- if (sector >= sdebug_store_sectors) {
- /* Force wrap */
- tmp_sec = sector;
- sector = do_div(tmp_sec, sdebug_store_sectors);
+ if (sdt->app_tag == cpu_to_be16(0xffff))
+ continue;
+
+ ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ return ret;
}
- resid -= len;
- kunmap_atomic(paddr);
+
+ ei_lba++;
}
+ dif_copy_prot(SCpnt, start_sec, sectors, true);
dix_reads++;
return 0;
@@ -1910,15 +1933,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
{
int i, j, ret;
struct sd_dif_tuple *sdt;
- struct scatterlist *dsgl = scsi_sglist(SCpnt);
+ struct scatterlist *dsgl;
struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
void *daddr, *paddr;
- sector_t tmp_sec = start_sec;
- sector_t sector;
+ sector_t sector = start_sec;
int ppage_offset;
- sector = do_div(tmp_sec, sdebug_store_sectors);
-
BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
@@ -1946,25 +1966,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
sdt = paddr + ppage_offset;
- ret = dif_verify(sdt, daddr + j, start_sec, ei_lba);
+ ret = dif_verify(sdt, daddr + j, sector, ei_lba);
if (ret) {
dump_sector(daddr + j, scsi_debug_sector_size);
goto out;
}
- /* Would be great to copy this in bigger
- * chunks. However, for the sake of
- * correctness we need to verify each sector
- * before writing it to "stable" storage
- */
- memcpy(dif_storep + sector, sdt, sizeof(*sdt));
-
sector++;
-
- if (sector == sdebug_store_sectors)
- sector = 0; /* Force wrap */
-
- start_sec++;
ei_lba++;
ppage_offset += sizeof(struct sd_dif_tuple);
}
@@ -1973,6 +1981,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
kunmap_atomic(daddr);
}
+ dif_copy_prot(SCpnt, start_sec, sectors, false);
dix_writes++;
return 0;
@@ -2742,7 +2751,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
@@ -3172,7 +3181,7 @@ DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
}
DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 83e591b6019..e8bee9f0ad0 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -87,6 +87,18 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(scsi_schedule_eh);
+static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
+{
+ if (!shost->last_reset || !shost->eh_deadline)
+ return 0;
+
+ if (time_before(jiffies,
+ shost->last_reset + shost->eh_deadline))
+ return 0;
+
+ return 1;
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -109,6 +121,9 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
goto out_unlock;
+ if (shost->eh_deadline && !shost->last_reset)
+ shost->last_reset = jiffies;
+
ret = 1;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
@@ -138,6 +153,9 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
+ if (host->eh_deadline && !host->last_reset)
+ host->last_reset = jiffies;
+
if (host->transportt->eh_timed_out)
rtn = host->transportt->eh_timed_out(scmd);
else if (host->hostt->eh_timed_out)
@@ -990,13 +1008,26 @@ int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ struct Scsi_Host *shost;
int rtn;
+ unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
SCSI_SENSE_VALID(scmd))
continue;
+ shost = scmd->device->host;
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n",
current->comm));
@@ -1082,11 +1113,28 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
struct scsi_cmnd *scmd, *next;
struct scsi_device *sdev;
int finish_cmds;
+ unsigned long flags;
while (!list_empty(cmd_list)) {
scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
sdev = scmd->device;
+ if (!try_stu) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ /* Push items back onto work_q */
+ list_splice_init(cmd_list, work_q);
+ spin_unlock_irqrestore(sdev->host->host_lock,
+ flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, sdev->host,
+ "skip %s, past eh deadline",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ }
+
finish_cmds = !scsi_device_online(scmd->device) ||
(try_stu && !scsi_eh_try_stu(scmd) &&
!scsi_eh_tur(scmd)) ||
@@ -1122,26 +1170,42 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list);
int rtn;
+ struct Scsi_Host *shost;
+ unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
continue;
+ shost = scmd->device->host;
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ list_splice_init(&check_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ return list_empty(work_q);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
"0x%p\n", current->comm,
scmd));
- rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
- if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
- scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (rtn == FAST_IO_FAIL)
- scsi_eh_finish_cmd(scmd, done_q);
- else
- list_move_tail(&scmd->eh_entry, &check_list);
- } else
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn == FAILED) {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
"0x%p\n",
current->comm,
scmd));
+ list_splice_init(&check_list, work_q);
+ return list_empty(work_q);
+ }
+ scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
+ if (rtn == FAST_IO_FAIL)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
@@ -1187,8 +1251,19 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *stu_scmd, *next;
struct scsi_device *sdev;
+ unsigned long flags;
shost_for_each_device(sdev, shost) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
stu_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
@@ -1241,9 +1316,20 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev;
+ unsigned long flags;
int rtn;
shost_for_each_device(sdev, shost) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
bdr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev) {
@@ -1303,6 +1389,21 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct scsi_cmnd *next, *scmd;
int rtn;
unsigned int id;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ /* push back on work queue for further processing */
+ list_splice_init(&check_list, work_q);
+ list_splice_init(&tmp_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ return list_empty(work_q);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
id = scmd_id(scmd);
@@ -1347,6 +1448,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
LIST_HEAD(check_list);
unsigned int channel;
int rtn;
+ unsigned long flags;
/*
* we really want to loop over the various channels, and do this on
@@ -1356,6 +1458,18 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
*/
for (channel = 0; channel <= shost->max_channel; channel++) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ list_splice_init(&check_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ return list_empty(work_q);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
chan_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
if (channel == scmd_channel(scmd)) {
@@ -1755,8 +1869,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* will be requests for character device operations, and also for
* ioctls to queued block devices.
*/
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
- __func__));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ printk("scsi_eh_%d waking up host to restart\n",
+ shost->host_no));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1883,6 +1998,10 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->eh_deadline)
+ shost->last_reset = 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_flush_done_q(&eh_done_q);
}
@@ -1909,7 +2028,7 @@ int scsi_error_handler(void *data)
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != shost->host_busy) {
SCSI_LOG_ERROR_RECOVERY(1,
- printk("Error handler scsi_eh_%d sleeping\n",
+ printk("scsi_eh_%d: sleeping\n",
shost->host_no));
schedule();
continue;
@@ -1917,8 +2036,9 @@ int scsi_error_handler(void *data)
__set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1,
- printk("Error handler scsi_eh_%d waking up\n",
- shost->host_no));
+ printk("scsi_eh_%d: waking up %d/%d/%d\n",
+ shost->host_no, shost->host_eh_scheduled,
+ shost->host_failed, shost->host_busy));
/*
* We have a host that is failing for some reason. Figure out
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d1549b74e2d..7bd7f0d5f05 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = *host_dev->dma_mask;
+ bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 4c5aabe2175..af4c050ce6e 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -54,7 +54,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
/*
* All the high-level SCSI drivers that implement runtime
* PM treat runtime suspend, system suspend, and system
- * hibernate identically.
+ * hibernate nearly identically. In all cases the requirements
+ * for runtime suspension are stricter.
*/
if (pm_runtime_suspended(dev))
return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 40c639491b2..8ff62c26a41 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -281,6 +281,42 @@ exit_store_host_reset:
static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
+static ssize_t
+show_shost_eh_deadline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ return sprintf(buf, "%d\n", shost->eh_deadline / HZ);
+}
+
+static ssize_t
+store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ int ret = -EINVAL;
+ int deadline;
+ unsigned long flags;
+
+ if (shost->transportt && shost->transportt->eh_strategy_handler)
+ return ret;
+
+ if (sscanf(buf, "%d\n", &deadline) == 1) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_in_recovery(shost))
+ ret = -EBUSY;
+ else {
+ shost->eh_deadline = deadline * HZ;
+ ret = count;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
+
shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(host_busy, "%hu\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -308,6 +344,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_prot_capabilities.attr,
&dev_attr_prot_guard_type.attr,
&dev_attr_host_reset.attr,
+ &dev_attr_eh_deadline.attr,
NULL
};
@@ -529,6 +566,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)
*/
sdev_rd_attr (device_blocked, "%d\n");
sdev_rd_attr (queue_depth, "%d\n");
+sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n");
sdev_rd_attr (vendor, "%.8s\n");
@@ -750,6 +788,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_device_blocked.attr,
&dev_attr_type.attr,
&dev_attr_scsi_level.attr,
+ &dev_attr_device_busy.attr,
&dev_attr_vendor.attr,
&dev_attr_model.attr,
&dev_attr_rev.attr,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e4a989fa477..63a6ca49d4e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2744,6 +2744,28 @@ exit_get_chap:
return err;
}
+static int iscsi_set_chap(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err = 0;
+
+ if (!transport->set_chap)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_path.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.set_path.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_chap(shost, data, len);
+ scsi_host_put(shost);
+ return err;
+}
+
static int iscsi_delete_chap(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@@ -3234,6 +3256,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
err = iscsi_logout_flashnode_sid(transport, ev);
break;
+ case ISCSI_UEVENT_SET_CHAP:
+ err = iscsi_set_chap(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
default:
err = -ENOSYS;
break;
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index f379c7f3034..2700a5a09bd 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,12 +24,15 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_srp.h>
+#include "scsi_priv.h"
#include "scsi_transport_srp_internal.h"
struct srp_host_attrs {
@@ -38,7 +41,7 @@ struct srp_host_attrs {
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
#define SRP_HOST_ATTRS 0
-#define SRP_RPORT_ATTRS 3
+#define SRP_RPORT_ATTRS 8
struct srp_internal {
struct scsi_transport_template t;
@@ -54,6 +57,36 @@ struct srp_internal {
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
+static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
+{
+ return dev_to_shost(r->dev.parent);
+}
+
+/**
+ * srp_tmo_valid() - check timeout combination validity
+ *
+ * The combination of the timeout parameters must be such that SCSI commands
+ * are finished in a reasonable time. Hence do not allow the fast I/O fail
+ * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these
+ * parameters must be such that multipath can detect failed paths timely.
+ * Hence do not allow all three parameters to be disabled simultaneously.
+ */
+int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
+{
+ if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
+ return -EINVAL;
+ if (reconnect_delay == 0)
+ return -EINVAL;
+ if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
+ if (dev_loss_tmo >= LONG_MAX / HZ)
+ return -EINVAL;
+ if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
+ fast_io_fail_tmo >= dev_loss_tmo)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(srp_tmo_valid);
static int srp_host_setup(struct transport_container *tc, struct device *dev,
struct device *cdev)
@@ -134,10 +167,465 @@ static ssize_t store_srp_rport_delete(struct device *dev,
static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
+static ssize_t show_srp_rport_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ static const char *const state_name[] = {
+ [SRP_RPORT_RUNNING] = "running",
+ [SRP_RPORT_BLOCKED] = "blocked",
+ [SRP_RPORT_FAIL_FAST] = "fail-fast",
+ [SRP_RPORT_LOST] = "lost",
+ };
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ enum srp_rport_state state = rport->state;
+
+ return sprintf(buf, "%s\n",
+ (unsigned)state < ARRAY_SIZE(state_name) ?
+ state_name[state] : "???");
+}
+
+static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
+
+static ssize_t srp_show_tmo(char *buf, int tmo)
+{
+ return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
+}
+
+static int srp_parse_tmo(int *tmo, const char *buf)
+{
+ int res = 0;
+
+ if (strncmp(buf, "off", 3) != 0)
+ res = kstrtoint(buf, 0, tmo);
+ else
+ *tmo = -1;
+
+ return res;
+}
+
+static ssize_t show_reconnect_delay(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->reconnect_delay);
+}
+
+static ssize_t store_reconnect_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, const size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res, delay;
+
+ res = srp_parse_tmo(&delay, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
+ rport->dev_loss_tmo);
+ if (res)
+ goto out;
+
+ if (rport->reconnect_delay <= 0 && delay > 0 &&
+ rport->state != SRP_RPORT_RUNNING) {
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ delay * HZ);
+ } else if (delay <= 0) {
+ cancel_delayed_work(&rport->reconnect_work);
+ }
+ rport->reconnect_delay = delay;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
+ store_reconnect_delay);
+
+static ssize_t show_failed_reconnects(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return sprintf(buf, "%d\n", rport->failed_reconnects);
+}
+
+static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
+
+static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->fast_io_fail_tmo);
+}
+
+static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res;
+ int fast_io_fail_tmo;
+
+ res = srp_parse_tmo(&fast_io_fail_tmo, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
+ rport->dev_loss_tmo);
+ if (res)
+ goto out;
+ rport->fast_io_fail_tmo = fast_io_fail_tmo;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ show_srp_rport_fast_io_fail_tmo,
+ store_srp_rport_fast_io_fail_tmo);
+
+static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->dev_loss_tmo);
+}
+
+static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res;
+ int dev_loss_tmo;
+
+ res = srp_parse_tmo(&dev_loss_tmo, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
+ dev_loss_tmo);
+ if (res)
+ goto out;
+ rport->dev_loss_tmo = dev_loss_tmo;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_srp_rport_dev_loss_tmo,
+ store_srp_rport_dev_loss_tmo);
+
+static int srp_rport_set_state(struct srp_rport *rport,
+ enum srp_rport_state new_state)
+{
+ enum srp_rport_state old_state = rport->state;
+
+ lockdep_assert_held(&rport->mutex);
+
+ switch (new_state) {
+ case SRP_RPORT_RUNNING:
+ switch (old_state) {
+ case SRP_RPORT_LOST:
+ goto invalid;
+ default:
+ break;
+ }
+ break;
+ case SRP_RPORT_BLOCKED:
+ switch (old_state) {
+ case SRP_RPORT_RUNNING:
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case SRP_RPORT_FAIL_FAST:
+ switch (old_state) {
+ case SRP_RPORT_LOST:
+ goto invalid;
+ default:
+ break;
+ }
+ break;
+ case SRP_RPORT_LOST:
+ break;
+ }
+ rport->state = new_state;
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+/**
+ * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
+ */
+static void srp_reconnect_work(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, reconnect_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ int delay, res;
+
+ res = srp_reconnect_rport(rport);
+ if (res != 0) {
+ shost_printk(KERN_ERR, shost,
+ "reconnect attempt %d failed (%d)\n",
+ ++rport->failed_reconnects, res);
+ delay = rport->reconnect_delay *
+ min(100, max(1, rport->failed_reconnects - 10));
+ if (delay > 0)
+ queue_delayed_work(system_long_wq,
+ &rport->reconnect_work, delay * HZ);
+ }
+}
+
+static void __rport_fail_io_fast(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i;
+
+ lockdep_assert_held(&rport->mutex);
+
+ if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
+ return;
+ scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
+
+ /* Involve the LLD if possible to terminate all I/O on the rport. */
+ i = to_srp_internal(shost->transportt);
+ if (i->f->terminate_rport_io)
+ i->f->terminate_rport_io(rport);
+}
+
+/**
+ * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
+ */
+static void rport_fast_io_fail_timedout(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, fast_io_fail_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+
+ pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
+ dev_name(&rport->dev), dev_name(&shost->shost_gendev));
+
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ mutex_unlock(&rport->mutex);
+}
+
+/**
+ * rport_dev_loss_timedout() - device loss timeout handler
+ */
+static void rport_dev_loss_timedout(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, dev_loss_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
+ dev_name(&rport->dev), dev_name(&shost->shost_gendev));
+
+ mutex_lock(&rport->mutex);
+ WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
+ scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
+ mutex_unlock(&rport->mutex);
+
+ i->f->rport_delete(rport);
+}
+
+static void __srp_start_tl_fail_timers(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ int delay, fast_io_fail_tmo, dev_loss_tmo;
+
+ lockdep_assert_held(&rport->mutex);
+
+ if (!rport->deleted) {
+ delay = rport->reconnect_delay;
+ fast_io_fail_tmo = rport->fast_io_fail_tmo;
+ dev_loss_tmo = rport->dev_loss_tmo;
+ pr_debug("%s current state: %d\n",
+ dev_name(&shost->shost_gendev), rport->state);
+
+ if (delay > 0)
+ queue_delayed_work(system_long_wq,
+ &rport->reconnect_work,
+ 1UL * delay * HZ);
+ if (fast_io_fail_tmo >= 0 &&
+ srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
+ pr_debug("%s new state: %d\n",
+ dev_name(&shost->shost_gendev),
+ rport->state);
+ scsi_target_block(&shost->shost_gendev);
+ queue_delayed_work(system_long_wq,
+ &rport->fast_io_fail_work,
+ 1UL * fast_io_fail_tmo * HZ);
+ }
+ if (dev_loss_tmo >= 0)
+ queue_delayed_work(system_long_wq,
+ &rport->dev_loss_work,
+ 1UL * dev_loss_tmo * HZ);
+ } else {
+ pr_debug("%s has already been deleted\n",
+ dev_name(&shost->shost_gendev));
+ srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST);
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ }
+}
+
+/**
+ * srp_start_tl_fail_timers() - start the transport layer failure timers
+ *
+ * Start the transport layer fast I/O failure and device loss timers. Do not
+ * modify a timer that was already started.
+ */
+void srp_start_tl_fail_timers(struct srp_rport *rport)
+{
+ mutex_lock(&rport->mutex);
+ __srp_start_tl_fail_timers(rport);
+ mutex_unlock(&rport->mutex);
+}
+EXPORT_SYMBOL(srp_start_tl_fail_timers);
+
+/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ */
+static int scsi_request_fn_active(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ struct request_queue *q;
+ int request_fn_active = 0;
+
+ shost_for_each_device(sdev, shost) {
+ q = sdev->request_queue;
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active += q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return request_fn_active;
+}
+
+/**
+ * srp_reconnect_rport() - reconnect to an SRP target port
+ *
+ * Blocks SCSI command queueing before invoking reconnect() such that
+ * queuecommand() won't be invoked concurrently with reconnect() from outside
+ * the SCSI EH. This is important since a reconnect() implementation may
+ * reallocate resources needed by queuecommand().
+ *
+ * Notes:
+ * - This function neither waits until outstanding requests have finished nor
+ * tries to abort these. It is the responsibility of the reconnect()
+ * function to finish outstanding commands before reconnecting to the target
+ * port.
+ * - It is the responsibility of the caller to ensure that the resources
+ * reallocated by the reconnect() function won't be used while this function
+ * is in progress. One possible strategy is to invoke this function from
+ * the context of the SCSI EH thread only. Another possible strategy is to
+ * lock the rport mutex inside each SCSI LLD callback that can be invoked by
+ * the SCSI EH (the scsi_host_template.eh_*() functions and also the
+ * scsi_host_template.queuecommand() function).
+ */
+int srp_reconnect_rport(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+ struct scsi_device *sdev;
+ int res;
+
+ pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
+
+ res = mutex_lock_interruptible(&rport->mutex);
+ if (res)
+ goto out;
+ scsi_target_block(&shost->shost_gendev);
+ while (scsi_request_fn_active(shost))
+ msleep(20);
+ res = i->f->reconnect(rport);
+ pr_debug("%s (state %d): transport.reconnect() returned %d\n",
+ dev_name(&shost->shost_gendev), rport->state, res);
+ if (res == 0) {
+ cancel_delayed_work(&rport->fast_io_fail_work);
+ cancel_delayed_work(&rport->dev_loss_work);
+
+ rport->failed_reconnects = 0;
+ srp_rport_set_state(rport, SRP_RPORT_RUNNING);
+ scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
+ /*
+ * If the SCSI error handler has offlined one or more devices,
+ * invoking scsi_target_unblock() won't change the state of
+ * these devices into running so do that explicitly.
+ */
+ spin_lock_irq(shost->host_lock);
+ __shost_for_each_device(sdev, shost)
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ sdev->sdev_state = SDEV_RUNNING;
+ spin_unlock_irq(shost->host_lock);
+ } else if (rport->state == SRP_RPORT_RUNNING) {
+ /*
+ * srp_reconnect_rport() was invoked with fast_io_fail
+ * off. Mark the port as failed and start the TL failure
+ * timers if these had not yet been started.
+ */
+ __rport_fail_io_fast(rport);
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ __srp_start_tl_fail_timers(rport);
+ } else if (rport->state != SRP_RPORT_BLOCKED) {
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ }
+ mutex_unlock(&rport->mutex);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL(srp_reconnect_rport);
+
+/**
+ * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
+ *
+ * If a timeout occurs while an rport is in the blocked state, ask the SCSI
+ * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
+ * handle the timeout (BLK_EH_NOT_HANDLED).
+ *
+ * Note: This function is called from soft-IRQ context and with the request
+ * queue lock held.
+ */
+static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
+ return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
+ BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+}
+
static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
+ cancel_delayed_work_sync(&rport->reconnect_work);
+ cancel_delayed_work_sync(&rport->fast_io_fail_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+
put_device(dev->parent);
kfree(rport);
}
@@ -185,6 +673,24 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)
}
/**
+ * srp_rport_get() - increment rport reference count
+ */
+void srp_rport_get(struct srp_rport *rport)
+{
+ get_device(&rport->dev);
+}
+EXPORT_SYMBOL(srp_rport_get);
+
+/**
+ * srp_rport_put() - decrement rport reference count
+ */
+void srp_rport_put(struct srp_rport *rport)
+{
+ put_device(&rport->dev);
+}
+EXPORT_SYMBOL(srp_rport_put);
+
+/**
* srp_rport_add - add a SRP remote port to the device hierarchy
* @shost: scsi host the remote port is connected to.
* @ids: The port id for the remote port.
@@ -196,12 +702,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
{
struct srp_rport *rport;
struct device *parent = &shost->shost_gendev;
+ struct srp_internal *i = to_srp_internal(shost->transportt);
int id, ret;
rport = kzalloc(sizeof(*rport), GFP_KERNEL);
if (!rport)
return ERR_PTR(-ENOMEM);
+ mutex_init(&rport->mutex);
+
device_initialize(&rport->dev);
rport->dev.parent = get_device(parent);
@@ -210,6 +719,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
rport->roles = ids->roles;
+ if (i->f->reconnect)
+ rport->reconnect_delay = i->f->reconnect_delay ?
+ *i->f->reconnect_delay : 10;
+ INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
+ rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
+ *i->f->fast_io_fail_tmo : 15;
+ rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
+ INIT_DELAYED_WORK(&rport->fast_io_fail_work,
+ rport_fast_io_fail_timedout);
+ INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
+
id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
@@ -259,6 +779,13 @@ void srp_rport_del(struct srp_rport *rport)
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
+
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ rport->deleted = true;
+ mutex_unlock(&rport->mutex);
+
put_device(dev);
}
EXPORT_SYMBOL_GPL(srp_rport_del);
@@ -310,6 +837,8 @@ srp_attach_transport(struct srp_function_template *ft)
if (!i)
return NULL;
+ i->t.eh_timed_out = srp_timed_out;
+
i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
i->t.it_nexus_response = srp_it_nexus_response;
@@ -327,6 +856,15 @@ srp_attach_transport(struct srp_function_template *ft)
count = 0;
i->rport_attrs[count++] = &dev_attr_port_id;
i->rport_attrs[count++] = &dev_attr_roles;
+ if (ft->has_rport_state) {
+ i->rport_attrs[count++] = &dev_attr_state;
+ i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
+ i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
+ }
+ if (ft->reconnect) {
+ i->rport_attrs[count++] = &dev_attr_reconnect_delay;
+ i->rport_attrs[count++] = &dev_attr_failed_reconnects;
+ }
if (ft->rport_delete)
i->rport_attrs[count++] = &dev_attr_delete;
i->rport_attrs[count++] = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5693f6d7edd..e6c4bff0433 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -105,7 +105,8 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *);
+static int sd_suspend_system(struct device *);
+static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
@@ -484,11 +485,11 @@ static struct class sd_disk_class = {
};
static const struct dev_pm_ops sd_pm_ops = {
- .suspend = sd_suspend,
+ .suspend = sd_suspend_system,
.resume = sd_resume,
- .poweroff = sd_suspend,
+ .poweroff = sd_suspend_system,
.restore = sd_resume,
- .runtime_suspend = sd_suspend,
+ .runtime_suspend = sd_suspend_runtime,
.runtime_resume = sd_resume,
};
@@ -829,7 +830,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
{
- rq->timeout = SD_FLUSH_TIMEOUT;
+ rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;
rq->retries = SD_MAX_RETRIES;
rq->cmd[0] = SYNCHRONIZE_CACHE;
rq->cmd_len = 10;
@@ -1002,7 +1003,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
goto out;
}
@@ -1433,12 +1434,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
{
int retries, res;
struct scsi_device *sdp = sdkp->device;
+ const int timeout = sdp->request_queue->rq_timeout
+ * SD_FLUSH_TIMEOUT_MULTIPLIER;
struct scsi_sense_hdr sshdr;
if (!scsi_device_online(sdp))
return -ENODEV;
-
for (retries = 3; retries > 0; --retries) {
unsigned char cmd[10] = { 0 };
@@ -1448,20 +1450,39 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* flush everything.
*/
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
- &sshdr, SD_FLUSH_TIMEOUT,
- SD_MAX_RETRIES, NULL, REQ_PM);
+ &sshdr, timeout, SD_MAX_RETRIES,
+ NULL, REQ_PM);
if (res == 0)
break;
}
if (res) {
sd_print_result(sdkp, res);
+
if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
+ /* we need to evaluate the error return */
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ /* this is no error here */
+ return 0;
+
+ switch (host_byte(res)) {
+ /* ignore errors due to racing a disconnection */
+ case DID_BAD_TARGET:
+ case DID_NO_CONNECT:
+ return 0;
+ /* signal the upper layer it might try again */
+ case DID_BUS_BUSY:
+ case DID_IMM_RETRY:
+ case DID_REQUEUE:
+ case DID_SOFT_ERROR:
+ return -EBUSY;
+ default:
+ return -EIO;
+ }
}
-
- if (res)
- return -EIO;
return 0;
}
@@ -2639,13 +2660,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_device *sdev = sdkp->device;
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+ /* too large values might cause issues with arcmsr */
+ int vpd_buf_len = 64;
+
sdev->no_report_opcodes = 1;
/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
* CODES is unsupported and the device has an ATA
* Information VPD page (SAT).
*/
- if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
+ if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
sdev->no_write_same = 1;
}
@@ -3058,9 +3082,17 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
sd_print_result(sdkp, res);
if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ res = 0;
}
- return res;
+ /* SCSI error codes must not go to the generic layer */
+ if (res)
+ return -EIO;
+
+ return 0;
}
/*
@@ -3078,7 +3110,7 @@ static void sd_shutdown(struct device *dev)
if (pm_runtime_suspended(dev))
goto exit;
- if (sdkp->WCE) {
+ if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp);
}
@@ -3092,7 +3124,7 @@ exit:
scsi_disk_put(sdkp);
}
-static int sd_suspend(struct device *dev)
+static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0;
@@ -3100,16 +3132,23 @@ static int sd_suspend(struct device *dev)
if (!sdkp)
return 0; /* this can happen */
- if (sdkp->WCE) {
+ if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp);
- if (ret)
+ if (ret) {
+ /* ignore OFFLINE device */
+ if (ret == -ENODEV)
+ ret = 0;
goto done;
+ }
}
if (sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ /* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
+ if (ignore_stop_errors)
+ ret = 0;
}
done:
@@ -3117,6 +3156,16 @@ done:
return ret;
}
+static int sd_suspend_system(struct device *dev)
+{
+ return sd_suspend_common(dev, true);
+}
+
+static int sd_suspend_runtime(struct device *dev)
+{
+ return sd_suspend_common(dev, false);
+}
+
static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 7a049de2205..26895ff247c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -13,7 +13,11 @@
*/
#define SD_TIMEOUT (30 * HZ)
#define SD_MOD_TIMEOUT (75 * HZ)
-#define SD_FLUSH_TIMEOUT (60 * HZ)
+/*
+ * Flush timeout is a multiplier over the standard device timeout which is
+ * user modifiable via sysfs but initially set to SD_TIMEOUT
+ */
+#define SD_FLUSH_TIMEOUT_MULTIPLIER 2
#define SD_WRITE_SAME_TIMEOUT (120 * HZ)
/*
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 325c31caa6e..1aa4befcfbd 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1790,8 +1790,6 @@ static void stex_remove(struct pci_dev *pdev)
scsi_remove_host(hba->host);
- pci_set_drvdata(pdev, NULL);
-
stex_hba_stop(hba);
stex_hba_free(hba);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index b80bf709f10..805369521df 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -174,7 +174,7 @@ struct sym_slcb {
*/
struct sym_shcb {
/*
- * Chip and controller indentification.
+ * Chip and controller identification.
*/
int unit;
char inst_name[16];
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 9327f5fcec4..b006cf789ba 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -521,7 +521,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
pACB->SelConn++;
return 1;
}
- if (time_before (jiffies, pACB->pScsiHost->last_reset))
+ if (time_before (jiffies, pACB->last_reset))
{
DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
return 1;
@@ -1863,7 +1863,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB )
/* delay half a second */
udelay (1000);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
- pACB->pScsiHost->last_reset = jiffies + 5*HZ/2
+ pACB->last_reset = jiffies + 5*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
pACB->Connected = 0;
@@ -2048,9 +2048,9 @@ static int DC390_bus_reset (struct scsi_cmnd *cmd)
dc390_ResetDevParam(pACB);
mdelay(1);
- pACB->pScsiHost->last_reset = jiffies + 3*HZ/2
+ pACB->last_reset = jiffies + 3*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
-
+
DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
DC390_read8(INT_Status); /* Reset Pending INT */
@@ -2383,7 +2383,7 @@ static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
if (pACB->Gmode2 & RST_SCSI_BUS) {
dc390_ResetSCSIBus(pACB);
udelay(1000);
- shost->last_reset = jiffies + HZ/2 +
+ pACB->last_reset = jiffies + HZ/2 +
HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
}
@@ -2455,8 +2455,8 @@ static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->irq = pdev->irq;
shost->base = io_port;
shost->unique_id = io_port;
- shost->last_reset = jiffies;
-
+
+ pACB->last_reset = jiffies;
pACB->pScsiHost = shost;
pACB->IOPortBase = (u16) io_port;
pACB->IRQLevel = pdev->irq;
@@ -2553,7 +2553,6 @@ static void dc390_remove_one(struct pci_dev *dev)
pci_disable_device(dev);
scsi_host_put(scsi_host);
- pci_set_drvdata(dev, NULL);
}
static struct pci_device_id tmscsim_pci_tbl[] = {
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index 77adc54dbd1..3d1bb4ad182 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -143,6 +143,7 @@ u8 Ignore_IRQ; /* Not used */
struct pci_dev *pdev;
+unsigned long last_reset;
unsigned long Cmds;
u32 SelLost;
u32 SelConn;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index a823cf44e94..8b9531204c2 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -132,7 +132,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- pci_set_drvdata(pdev, NULL);
}
/**
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 74b88efde6a..c3173dced87 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -224,6 +224,9 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
virtqueue_disable_cb(vq);
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf);
+
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
@@ -710,19 +713,15 @@ static struct scsi_host_template virtscsi_host_template_multi = {
#define virtscsi_config_get(vdev, fld) \
({ \
typeof(((struct virtio_scsi_config *)0)->fld) __val; \
- vdev->config->get(vdev, \
- offsetof(struct virtio_scsi_config, fld), \
- &__val, sizeof(__val)); \
+ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
- (void)({ \
+ do { \
typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
- vdev->config->set(vdev, \
- offsetof(struct virtio_scsi_config, fld), \
- &__val, sizeof(__val)); \
- })
+ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
+ } while(0)
static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
{
@@ -954,7 +953,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
scsi_host_put(shost);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtscsi_freeze(struct virtio_device *vdev)
{
virtscsi_remove_vqs(vdev);
@@ -988,7 +987,7 @@ static struct virtio_driver virtio_scsi_driver = {
.id_table = id_table,
.probe = virtscsi_probe,
.scan = virtscsi_scan,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
#endif
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 3bfaa66fa0d..b9755ec0e81 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1405,7 +1405,6 @@ out_release_resources:
out_free_host:
scsi_host_put(host);
out_disable_device:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return error;
@@ -1445,7 +1444,6 @@ static void pvscsi_remove(struct pci_dev *pdev)
scsi_host_put(host);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b9c53cc40e1..eb1f1ef5fa2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -264,6 +264,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select SPI_BITBANG
+ depends on SOC_VF610 || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -369,7 +370,7 @@ config SPI_PXA2XX_PCI
config SPI_RSPI
tristate "Renesas RSPI controller"
- depends on SUPERH && SH_DMAE_BASE
+ depends on (SUPERH || ARCH_SHMOBILE) && SH_DMAE_BASE
help
SPI driver for Renesas RSPI blocks.
@@ -393,7 +394,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+ depends on PLAT_SAMSUNG
select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 9a64c3fee21..595b62cb545 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -219,7 +219,7 @@ static int altera_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = spi_master_get(master);
+ hw->bitbang.master = master;
if (!hw->bitbang.master)
return err;
hw->bitbang.chipselect = altera_spi_chipsel;
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 37bad952ab3..821bf7ac218 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -231,7 +231,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->num_chipselect = pdata->num_chipselect;
}
- sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index d4ac60b4a56..273db0beb2b 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -170,18 +170,18 @@
/* Bit manipulation macros */
#define SPI_BIT(name) \
(1 << SPI_##name##_OFFSET)
-#define SPI_BF(name,value) \
+#define SPI_BF(name, value) \
(((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
-#define SPI_BFEXT(name,value) \
+#define SPI_BFEXT(name, value) \
(((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
-#define SPI_BFINS(name,value,old) \
- ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
- | SPI_BF(name,value))
+#define SPI_BFINS(name, value, old) \
+ (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name, value))
/* Register access macros */
-#define spi_readl(port,reg) \
+#define spi_readl(port, reg) \
__raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port,reg,value) \
+#define spi_writel(port, reg, value) \
__raw_writel((value), (port)->regs + SPI_##reg)
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
@@ -1401,8 +1401,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev, "you can't yet change "
- "bits_per_word in transfers\n");
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
return -ENOPROTOOPT;
}
}
@@ -1516,7 +1516,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
/* setup spi core then atmel-specific driver state */
ret = -ENOMEM;
- master = spi_alloc_master(&pdev->dev, sizeof *as);
+ master = spi_alloc_master(&pdev->dev, sizeof(*as));
if (!master)
goto out_free;
@@ -1546,9 +1546,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
- as->regs = ioremap(regs->start, resource_size(regs));
- if (!as->regs)
+ as->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(as->regs)) {
+ ret = PTR_ERR(as->regs);
goto out_free_buffer;
+ }
as->phybase = regs->start;
as->irq = irq;
as->clk = clk;
@@ -1617,7 +1619,6 @@ out_free_dma:
out_free_irq:
free_irq(irq, master);
out_unmap_regs:
- iounmap(as->regs);
out_free_buffer:
if (!as->use_pdc)
tasklet_kill(&as->tasklet);
@@ -1669,36 +1670,36 @@ static int atmel_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(as->clk);
clk_put(as->clk);
free_irq(as->irq, master);
- iounmap(as->regs);
spi_unregister_master(master);
return 0;
}
-#ifdef CONFIG_PM
-
-static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int atmel_spi_suspend(struct device *dev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
clk_disable_unprepare(as->clk);
return 0;
}
-static int atmel_spi_resume(struct platform_device *pdev)
+static int atmel_spi_resume(struct device *dev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
- return clk_prepare_enable(as->clk);
+ clk_prepare_enable(as->clk);
return 0;
}
+static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
+
+#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
#else
-#define atmel_spi_suspend NULL
-#define atmel_spi_resume NULL
+#define ATMEL_SPI_PM_OPS NULL
#endif
#if defined(CONFIG_OF)
@@ -1714,10 +1715,9 @@ static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.owner = THIS_MODULE,
+ .pm = ATMEL_SPI_PM_OPS,
.of_match_table = of_match_ptr(atmel_spi_dt_ids),
},
- .suspend = atmel_spi_suspend,
- .resume = atmel_spi_resume,
.probe = atmel_spi_probe,
.remove = atmel_spi_remove,
};
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 1d00d9b397d..c4141c92bcf 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -775,7 +775,7 @@ static int au1550_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
@@ -985,6 +985,7 @@ static int au1550_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:au1550-spi");
static struct platform_driver au1550_spi_drv = {
+ .probe = au1550_spi_probe,
.remove = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
@@ -1004,7 +1005,7 @@ static int __init au1550_spi_init(void)
printk(KERN_ERR "au1550-spi: cannot add memory"
"dbdma device\n");
}
- return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe);
+ return platform_driver_register(&au1550_spi_drv);
}
module_init(au1550_spi_init);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 52c81481c5c..3ed666fe840 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -217,7 +217,7 @@ static int bcm2835_spi_start_transfer(struct spi_device *spi,
cs |= spi->chip_select;
}
- INIT_COMPLETION(bs->done);
+ reinit_completion(&bs->done);
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
bs->len = tfr->len;
@@ -358,7 +358,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
- err = spi_register_master(master);
+ err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_free_irq;
@@ -381,14 +381,12 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
struct bcm2835_spi *bs = spi_master_get_devdata(master);
free_irq(bs->irq, master);
- spi_unregister_master(master);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
clk_disable_unprepare(bs->clk);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 536b0e36382..80d56b214eb 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -412,7 +412,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(dev, "spi register failed\n");
goto out_clk_disable;
@@ -438,8 +438,6 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- spi_unregister_master(master);
-
/* reset spi block */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
@@ -447,8 +445,6 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(bs->clk);
clk_put(bs->clk);
- spi_master_put(master);
-
return 0;
}
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 91921b5f581..38941e5920b 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -592,7 +592,7 @@ bfin_sport_spi_setup(struct spi_device *spi)
*/
if (chip_info->ctl_reg || chip_info->enable_dma) {
ret = -EINVAL;
- dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n");
goto error;
}
chip->cs_chg_udelay = chip_info->cs_chg_udelay;
@@ -879,11 +879,10 @@ static int bfin_sport_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int
-bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int bfin_sport_spi_suspend(struct device *dev)
{
- struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
int status;
status = bfin_sport_spi_stop_queue(drv_data);
@@ -896,10 +895,9 @@ bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
return status;
}
-static int
-bfin_sport_spi_resume(struct platform_device *pdev)
+static int bfin_sport_spi_resume(struct device *dev)
{
- struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
int status;
/* Enable the SPI interface */
@@ -912,19 +910,22 @@ bfin_sport_spi_resume(struct platform_device *pdev)
return status;
}
+
+static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
+ bfin_sport_spi_resume);
+
+#define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops)
#else
-# define bfin_sport_spi_suspend NULL
-# define bfin_sport_spi_resume NULL
+#define BFIN_SPORT_SPI_PM_OPS NULL
#endif
static struct platform_driver bfin_sport_spi_driver = {
.driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = BFIN_SPORT_SPI_PM_OPS,
},
.probe = bfin_sport_spi_probe,
.remove = bfin_sport_spi_remove,
- .suspend = bfin_sport_spi_suspend,
- .resume = bfin_sport_spi_resume,
};
module_platform_driver(bfin_sport_spi_driver);
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c
index f4bf81347d6..8f8598834b3 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-bfin-v3.c
@@ -867,7 +867,7 @@ static int bfin_spi_probe(struct platform_device *pdev)
tasklet_init(&drv_data->pump_transfers,
bfin_spi_pump_transfers, (unsigned long)drv_data);
/* register with the SPI framework */
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(dev, "can not register spi master\n");
goto err_free_peripheral;
@@ -898,7 +898,6 @@ static int bfin_spi_remove(struct platform_device *pdev)
free_dma(drv_data->rx_dma);
free_dma(drv_data->tx_dma);
- spi_unregister_master(drv_data->master);
return 0;
}
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 45bdf73d686..f0f195af75d 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -524,7 +524,7 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
timeout = jiffies + HZ;
while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
if (!time_before(jiffies, timeout)) {
- dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
+ dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF\n");
break;
} else
cpu_relax();
@@ -913,8 +913,9 @@ static void bfin_spi_pump_messages(struct work_struct *work)
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
struct spi_transfer, transfer_list);
- dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
- "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
+ dev_dbg(&drv_data->pdev->dev,
+ "got a message to pump, state is set to: baud "
+ "%d, flag 0x%x, ctl 0x%x\n",
drv_data->cur_chip->baud, drv_data->cur_chip->flag,
drv_data->cur_chip->ctl_reg);
@@ -1013,8 +1014,8 @@ static int bfin_spi_setup(struct spi_device *spi)
* but let's assume (for now) they do.
*/
if (chip_info->ctl_reg & ~bfin_ctl_reg) {
- dev_err(&spi->dev, "do not set bits in ctl_reg "
- "that the SPI framework manages\n");
+ dev_err(&spi->dev,
+ "do not set bits in ctl_reg that the SPI framework manages\n");
goto error;
}
chip->enable_dma = chip_info->enable_dma != 0
@@ -1050,17 +1051,17 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->chip_select_num = spi->chip_select;
if (chip->chip_select_num < MAX_CTRL_CS) {
if (!(spi->mode & SPI_CPHA))
- dev_warn(&spi->dev, "Warning: SPI CPHA not set:"
- " Slave Select not under software control!\n"
- " See Documentation/blackfin/bfin-spi-notes.txt");
+ dev_warn(&spi->dev,
+ "Warning: SPI CPHA not set: Slave Select not under software control!\n"
+ "See Documentation/blackfin/bfin-spi-notes.txt\n");
chip->flag = (1 << spi->chip_select) << 8;
} else
chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS;
if (chip->enable_dma && chip->pio_interrupt) {
- dev_err(&spi->dev, "enable_dma is set, "
- "do not set pio_interrupt\n");
+ dev_err(&spi->dev,
+ "enable_dma is set, do not set pio_interrupt\n");
goto error;
}
/*
@@ -1410,10 +1411,10 @@ static int bfin_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int bfin_spi_suspend(struct device *dev)
{
- struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
int status = 0;
status = bfin_spi_stop_queue(drv_data);
@@ -1432,9 +1433,9 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int bfin_spi_resume(struct platform_device *pdev)
+static int bfin_spi_resume(struct device *dev)
{
- struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
int status = 0;
bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
@@ -1443,31 +1444,34 @@ static int bfin_spi_resume(struct platform_device *pdev)
/* Start the queue running */
status = bfin_spi_start_queue(drv_data);
if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
+ dev_err(dev, "problem starting queue (%d)\n", status);
return status;
}
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(bfin_spi_pm_ops, bfin_spi_suspend, bfin_spi_resume);
+
+#define BFIN_SPI_PM_OPS (&bfin_spi_pm_ops)
#else
-#define bfin_spi_suspend NULL
-#define bfin_spi_resume NULL
-#endif /* CONFIG_PM */
+#define BFIN_SPI_PM_OPS NULL
+#endif
MODULE_ALIAS("platform:bfin-spi");
static struct platform_driver bfin_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = BFIN_SPI_PM_OPS,
},
- .suspend = bfin_spi_suspend,
- .resume = bfin_spi_resume,
+ .probe = bfin_spi_probe,
.remove = bfin_spi_remove,
};
static int __init bfin_spi_init(void)
{
- return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe);
+ return platform_driver_register(&bfin_spi_driver);
}
subsys_initcall(bfin_spi_init);
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 8c11355dec2..bd222f6b677 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -191,7 +191,7 @@ int spi_bitbang_setup(struct spi_device *spi)
bitbang = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -258,7 +258,7 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned long flags;
bitbang = spi_master_get_devdata(spi);
@@ -273,7 +273,7 @@ static int spi_bitbang_prepare_hardware(struct spi_master *spi)
static int spi_bitbang_transfer_one(struct spi_master *master,
struct spi_message *m)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned nsecs;
struct spi_transfer *t = NULL;
unsigned cs_change;
@@ -292,7 +292,7 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
cs_change = 1;
status = 0;
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
/* override speed or wordsize? */
if (t->speed_hz || t->bits_per_word)
@@ -349,7 +349,8 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
if (t->delay_usecs)
udelay(t->delay_usecs);
- if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
+ if (cs_change &&
+ !list_is_last(&t->transfer_list, &m->transfers)) {
/* sometimes a short mid-message deselect of the chip
* may be needed to terminate a mode or command
*/
@@ -378,7 +379,7 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned long flags;
bitbang = spi_master_get_devdata(spi);
@@ -414,10 +415,16 @@ static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
* This routine registers the spi_master, which will process requests in a
* dedicated task, keeping IRQs unblocked most of the time. To stop
* processing those requests, call spi_bitbang_stop().
+ *
+ * On success, this routine will take a reference to master. The caller is
+ * responsible for calling spi_bitbang_stop() to decrement the reference and
+ * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
+ * leak.
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
+ int ret;
if (!master || !bitbang->chipselect)
return -EINVAL;
@@ -449,7 +456,11 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
- return spi_register_master(master);
+ ret = spi_register_master(spi_master_get(master));
+ if (ret)
+ spi_master_put(master);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 5ed08e53743..8081f96bd1d 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -147,8 +147,8 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
/* we only needed to implement one mode here, and choose SPI_MODE_0 */
-#define spidelay(X) do{}while(0)
-//#define spidelay ndelay
+#define spidelay(X) do { } while (0)
+/* #define spidelay ndelay */
#include "spi-bitbang-txrx.h"
@@ -171,15 +171,15 @@ static struct mtd_partition partitions[] = { {
/* sector 0 = 8 pages * 264 bytes/page (1 block)
* sector 1 = 248 pages * 264 bytes/page
*/
- .name = "bookkeeping", // 66 KB
+ .name = "bookkeeping", /* 66 KB */
.offset = 0,
.size = (8 + 248) * 264,
-// .mask_flags = MTD_WRITEABLE,
+ /* .mask_flags = MTD_WRITEABLE, */
}, {
/* sector 2 = 256 pages * 264 bytes/page
* sectors 3-5 = 512 pages * 264 bytes/page
*/
- .name = "filesystem", // 462 KB
+ .name = "filesystem", /* 462 KB */
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
} };
@@ -209,7 +209,7 @@ static void butterfly_attach(struct parport *p)
* and no way to be selective about what it binds to.
*/
- master = spi_alloc_master(dev, sizeof *pp);
+ master = spi_alloc_master(dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto done;
@@ -225,7 +225,7 @@ static void butterfly_attach(struct parport *p)
master->bus_num = 42;
master->num_chipselect = 2;
- pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.master = master;
pp->bitbang.chipselect = butterfly_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
@@ -289,7 +289,6 @@ static void butterfly_attach(struct parport *p)
pr_debug("%s: dataflash at %s\n", p->name,
dev_name(&pp->dataflash->dev));
- // dev_info(_what?_, ...)
pr_info("%s: AVR Butterfly\n", p->name);
butterfly = pp;
return;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 6416798828e..6f03d7e6435 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -105,7 +105,7 @@ static int spi_clps711x_transfer_one_message(struct spi_master *master,
gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
- INIT_COMPLETION(hw->done);
+ reinit_completion(&hw->done);
hw->count = 0;
hw->len = xfer->len;
@@ -226,10 +226,10 @@ static int spi_clps711x_probe(struct platform_device *pdev)
dev_name(&pdev->dev), hw);
if (ret) {
dev_err(&pdev->dev, "Can't request IRQ\n");
- goto clk_out;
+ goto err_out;
}
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (!ret) {
dev_info(&pdev->dev,
"SPI bus driver initialized. Master clock %u Hz\n",
@@ -239,7 +239,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register master\n");
-clk_out:
err_out:
while (--i >= 0)
if (gpio_is_valid(hw->chipselect[i]))
@@ -260,8 +259,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
if (gpio_is_valid(hw->chipselect[i]))
gpio_free(hw->chipselect[i]);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 8fbfe2483ff..50b2d88c819 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -279,7 +279,8 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
struct davinci_spi *dspi;
struct davinci_spi_config *spicfg;
u8 bits_per_word = 0;
- u32 hz = 0, spifmt = 0, prescale = 0;
+ u32 hz = 0, spifmt = 0;
+ int prescale;
dspi = spi_master_get_devdata(spi->master);
spicfg = (struct davinci_spi_config *)spi->controller_data;
@@ -553,7 +554,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
- INIT_COMPLETION(dspi->done);
+ reinit_completion(&dspi->done);
if (spicfg->io_type == SPI_IO_TYPE_INTR)
set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
@@ -916,7 +917,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (ret)
goto unmap_io;
- dspi->bitbang.master = spi_master_get(master);
+ dspi->bitbang.master = master;
if (dspi->bitbang.master == NULL) {
ret = -ENODEV;
goto irq_free;
@@ -925,7 +926,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
- goto put_master;
+ goto irq_free;
}
clk_prepare_enable(dspi->clk);
@@ -1015,8 +1016,6 @@ free_dma:
free_clk:
clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
-put_master:
- spi_master_put(master);
irq_free:
free_irq(dspi->irq, dspi);
unmap_io:
@@ -1024,7 +1023,7 @@ unmap_io:
release_region:
release_mem_region(dspi->pbase, resource_size(r));
free_master:
- kfree(master);
+ spi_master_put(master);
err:
return ret;
}
@@ -1051,11 +1050,11 @@ static int davinci_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
- spi_master_put(master);
free_irq(dspi->irq, dspi);
iounmap(dspi->base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(dspi->pbase, resource_size(r));
+ spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index b9f0192758d..6d207afec8c 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 4aa8be865cc..168c620947f 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -74,7 +74,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dwsmmio->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dwsmmio->clk)) {
ret = PTR_ERR(dwsmmio->clk);
- goto err_irq;
+ goto err_unmap;
}
clk_enable(dwsmmio->clk);
@@ -94,8 +94,6 @@ err_clk:
clk_disable(dwsmmio->clk);
clk_put(dwsmmio->clk);
dwsmmio->clk = NULL;
-err_irq:
- free_irq(dws->irq, dws);
err_unmap:
iounmap(dws->regs);
err_release_reg:
@@ -115,7 +113,6 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
clk_put(dwsmmio->clk);
dwsmmio->clk = NULL;
- free_irq(dwsmmio->dws.irq, &dwsmmio->dws);
dw_spi_remove_host(&dwsmmio->dws);
iounmap(dwsmmio->dws.regs);
kfree(dwsmmio);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 6055c8d9fdd..66fa9955ea1 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -40,7 +40,7 @@ static int spi_pci_probe(struct pci_dev *pdev,
int pci_bar = 0;
int ret;
- printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
+ dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
ret = pci_enable_device(pdev);
@@ -109,7 +109,6 @@ static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, NULL);
dw_spi_remove_host(&dwpci->dws);
iounmap(dwpci->dws.regs);
pci_release_region(pdev, 0);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 79c958e49f6..b897c4adb39 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -870,8 +870,8 @@ void dw_spi_remove_host(struct dw_spi *dws)
/* Remove the queue */
status = destroy_queue(dws);
if (status != 0)
- dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
- "complete, message memory not freed\n");
+ dev_err(&dws->master->dev,
+ "dw_spi_remove: workqueue will not complete, message memory not freed\n");
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index 7d84418a01d..d4d3cc53479 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -280,10 +280,6 @@ static irqreturn_t efm32_spi_txirq(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct efm32_spi_pdata efm32_spi_pdata_default = {
- .location = 1,
-};
-
static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
{
u32 reg = efm32_spi_read32(ddata, REG_ROUTE);
@@ -347,7 +343,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
ddata = spi_master_get_devdata(master);
- ddata->bitbang.master = spi_master_get(master);
+ ddata->bitbang.master = master;
ddata->bitbang.chipselect = efm32_spi_chipselect;
ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
@@ -387,7 +383,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- if (resource_size(res) < 60) {
+ if (resource_size(res) < 0x60) {
ret = -EINVAL;
dev_err(&pdev->dev, "memory resource too small\n");
goto err;
@@ -467,7 +463,6 @@ err_disable_clk:
clk_disable_unprepare(ddata->clk);
err:
spi_master_put(master);
- kfree(master);
}
return ret;
@@ -478,13 +473,14 @@ static int efm32_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct efm32_spi_ddata *ddata = spi_master_get_devdata(master);
+ spi_bitbang_stop(&ddata->bitbang);
+
efm32_spi_write32(ddata, 0, REG_IEN);
free_irq(ddata->txirq, ddata);
free_irq(ddata->rxirq, ddata);
clk_disable_unprepare(ddata->clk);
spi_master_put(master);
- kfree(master);
return 0;
}
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index d22c00a227b..1bfaed6e407 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -330,7 +330,7 @@ static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
chip->spi->mode, div_cpsr, div_scr, dss);
- dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
+ dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
ep93xx_spi_write_u16(espi, SSPCR0, cr0);
@@ -509,7 +509,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
}
if (WARN_ON(len)) {
- dev_warn(&espi->pdev->dev, "len = %zu expected 0!", len);
+ dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len);
return ERR_PTR(-EINVAL);
}
@@ -942,7 +942,7 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
/* make sure that the hardware is disabled */
ep93xx_spi_write_u8(espi, SSPCR1, 0);
- error = spi_register_master(master);
+ error = devm_spi_register_master(&pdev->dev, master);
if (error) {
dev_err(&pdev->dev, "failed to register SPI master\n");
goto fail_free_dma;
@@ -968,7 +968,6 @@ static int ep93xx_spi_remove(struct platform_device *pdev)
ep93xx_spi_release_dma(espi);
- spi_unregister_master(master);
return 0;
}
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 07971e3fe58..54b06376f03 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <asm/cpm.h>
#include <asm/qe.h>
@@ -299,7 +300,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
switch (mspi->subblock) {
default:
- dev_warn(dev, "cell-index unspecified, assuming SPI1");
+ dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
/* fall through */
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4e44575bd87..8641b03bdd7 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -108,7 +108,7 @@ struct fsl_dspi {
struct spi_bitbang bitbang;
struct platform_device *pdev;
- void *base;
+ void __iomem *base;
int irq;
struct clk *clk;
@@ -165,7 +165,7 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
}
}
- pr_warn("Can not find valid buad rate,speed_hz is %d,clkrate is %ld\
+ pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\
,we use the max prescaler value.\n", speed_hz, clkrate);
*pbr = ARRAY_SIZE(pbr_tbl) - 1;
*br = ARRAY_SIZE(brs) - 1;
@@ -450,7 +450,7 @@ static int dspi_probe(struct platform_device *pdev)
dspi = spi_master_get_devdata(master);
dspi->pdev = pdev;
- dspi->bitbang.master = spi_master_get(master);
+ dspi->bitbang.master = master;
dspi->bitbang.chipselect = dspi_chipselect;
dspi->bitbang.setup_transfer = dspi_setup_transfer;
dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
@@ -520,7 +520,6 @@ out_clk_put:
clk_disable_unprepare(dspi->clk);
out_master_put:
spi_master_put(master);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -531,6 +530,7 @@ static int dspi_remove(struct platform_device *pdev)
/* Disconnect from the SPI framework */
spi_bitbang_stop(&dspi->bitbang);
+ clk_disable_unprepare(dspi->clk);
spi_master_put(dspi->bitbang.master);
return 0;
@@ -547,5 +547,5 @@ static struct platform_driver fsl_dspi_driver = {
module_platform_driver(fsl_dspi_driver);
MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index b8f1103fe28..80d8f40f7e0 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -16,6 +16,8 @@
#include <linux/fsl_devices.h>
#include <linux/mm.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/err.h>
@@ -230,7 +232,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
- INIT_COMPLETION(mpc8xxx_spi->done);
+ reinit_completion(&mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
@@ -289,8 +291,8 @@ static void fsl_espi_do_trans(struct spi_message *m,
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
espi_trans->status = -EINVAL;
- dev_err(mspi->dev, "bits_per_word/speed_hz should be"
- " same for the same SPI transfer\n");
+ dev_err(mspi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
return;
}
@@ -687,7 +689,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- struct resource irq;
+ unsigned int irq;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
@@ -702,13 +704,13 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
if (ret)
goto err;
- ret = of_irq_to_resource(np, 0, &irq);
+ irq = irq_of_parse_and_map(np, 0);
if (!ret) {
ret = -EINVAL;
goto err;
}
- master = fsl_espi_probe(dev, &mem, irq.start);
+ master = fsl_espi_probe(dev, &mem, irq);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 2129fcd1c31..119f7af9453 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -339,7 +339,7 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
- INIT_COMPLETION(mpc8xxx_spi->done);
+ reinit_completion(&mpc8xxx_spi->done);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 68b69fec13a..3fb09f98198 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -467,7 +468,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
}
#endif
- spi_gpio->bitbang.master = spi_master_get(master);
+ spi_gpio->bitbang.master = master;
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
@@ -486,7 +487,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
status = spi_bitbang_start(&spi_gpio->bitbang);
if (status < 0) {
- spi_master_put(spi_gpio->bitbang.master);
gpio_free:
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
@@ -510,13 +510,13 @@ static int spi_gpio_remove(struct platform_device *pdev)
/* stop() unregisters child devices too */
status = spi_bitbang_stop(&spi_gpio->bitbang);
- spi_master_put(spi_gpio->bitbang.master);
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
gpio_free(SPI_MOSI_GPIO);
gpio_free(SPI_SCK_GPIO);
+ spi_master_put(spi_gpio->bitbang.master);
return status;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 15323d8bd9c..b80f2f70fef 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -749,6 +749,35 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
+static int
+spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
+ return 0;
+}
+
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -786,7 +815,7 @@ static int spi_imx_probe(struct platform_device *pdev)
master->num_chipselect = num_cs;
spi_imx = spi_master_get_devdata(master);
- spi_imx->bitbang.master = spi_master_get(master);
+ spi_imx->bitbang.master = master;
for (i = 0; i < master->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -810,6 +839,8 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
spi_imx->bitbang.master->setup = spi_imx_setup;
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
+ spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
+ spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
init_completion(&spi_imx->xfer_done);
@@ -872,6 +903,8 @@ static int spi_imx_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "probed\n");
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
return ret;
out_clk_put:
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 0759b5db988..41c5765be74 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -222,7 +222,7 @@ static void spi_lm70llp_attach(struct parport *p)
/*
* SPI and bitbang hookup.
*/
- pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.master = master;
pp->bitbang.chipselect = lm70_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
pp->bitbang.flags = SPI_3WIRE;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 6adf4e35816..9602bbd8d7e 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/completion.h>
#include <linux/io.h>
@@ -166,7 +167,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
}
/* have the ISR trigger when the TX FIFO is empty */
- INIT_COMPLETION(mps->txisrdone);
+ reinit_completion(&mps->txisrdone);
out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
wait_for_completion(&mps->txisrdone);
@@ -536,7 +537,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
if (ret < 0)
goto free_clock;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret < 0)
goto free_clock;
@@ -559,12 +560,10 @@ static int mpc512x_psc_spi_do_remove(struct device *dev)
struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
- spi_unregister_master(master);
clk_disable_unprepare(mps->clk_mclk);
free_irq(mps->irq, mps);
if (mps->psc)
iounmap(mps->psc);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 6e925dc3439..00ba910ab30 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -383,8 +383,8 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->irq = irq;
if (pdata == NULL) {
- dev_warn(dev, "probe called without platform data, no "
- "cs_control function will be called\n");
+ dev_warn(dev,
+ "probe called without platform data, no cs_control function will be called\n");
mps->cs_control = NULL;
mps->sysclk = 0;
master->bus_num = bus_num;
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index de7b1141b90..73afb56c08c 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -57,34 +57,53 @@
#define SG_MAXLEN 0xff00
+/*
+ * Flags for txrx functions. More efficient that using an argument register for
+ * each one.
+ */
+#define TXRX_WRITE (1<<0) /* This is a write */
+#define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
+
struct mxs_spi {
struct mxs_ssp ssp;
struct completion c;
+ unsigned int sck; /* Rate requested (vs actual) */
};
static int mxs_spi_setup_transfer(struct spi_device *dev,
- struct spi_transfer *t)
+ const struct spi_transfer *t)
{
struct mxs_spi *spi = spi_master_get_devdata(dev->master);
struct mxs_ssp *ssp = &spi->ssp;
- uint32_t hz = 0;
+ const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
- hz = dev->max_speed_hz;
- if (t && t->speed_hz)
- hz = min(hz, t->speed_hz);
if (hz == 0) {
- dev_err(&dev->dev, "Cannot continue with zero clock\n");
+ dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
return -EINVAL;
}
- mxs_ssp_set_clk_rate(ssp, hz);
+ if (hz != spi->sck) {
+ mxs_ssp_set_clk_rate(ssp, hz);
+ /*
+ * Save requested rate, hz, rather than the actual rate,
+ * ssp->clk_rate. Otherwise we would set the rate every trasfer
+ * when the actual rate is not quite the same as requested rate.
+ */
+ spi->sck = hz;
+ /*
+ * Perhaps we should return an error if the actual clock is
+ * nowhere close to what was requested?
+ */
+ }
+
+ writel(BM_SSP_CTRL0_LOCK_CS,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
- BF_SSP_CTRL1_WORD_LENGTH
- (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
- ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
- ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
- ssp->base + HW_SSP_CTRL1(ssp));
+ BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
+ ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+ ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
+ ssp->base + HW_SSP_CTRL1(ssp));
writel(0x0, ssp->base + HW_SSP_CMD0);
writel(0x0, ssp->base + HW_SSP_CMD1);
@@ -94,26 +113,15 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
static int mxs_spi_setup(struct spi_device *dev)
{
- int err = 0;
-
if (!dev->bits_per_word)
dev->bits_per_word = 8;
- if (dev->mode & ~(SPI_CPOL | SPI_CPHA))
- return -EINVAL;
-
- err = mxs_spi_setup_transfer(dev, NULL);
- if (err) {
- dev_err(&dev->dev,
- "Failed to setup transfer, error = %d\n", err);
- }
-
- return err;
+ return 0;
}
-static uint32_t mxs_spi_cs_to_reg(unsigned cs)
+static u32 mxs_spi_cs_to_reg(unsigned cs)
{
- uint32_t select = 0;
+ u32 select = 0;
/*
* i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
@@ -131,43 +139,11 @@ static uint32_t mxs_spi_cs_to_reg(unsigned cs)
return select;
}
-static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs)
-{
- const uint32_t mask =
- BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ;
- uint32_t select;
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
- select = mxs_spi_cs_to_reg(cs);
- writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
-}
-
-static inline void mxs_spi_enable(struct mxs_spi *spi)
-{
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(BM_SSP_CTRL0_LOCK_CS,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- writel(BM_SSP_CTRL0_IGNORE_CRC,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
-}
-
-static inline void mxs_spi_disable(struct mxs_spi *spi)
-{
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(BM_SSP_CTRL0_LOCK_CS,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(BM_SSP_CTRL0_IGNORE_CRC,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
-}
-
static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
struct mxs_ssp *ssp = &spi->ssp;
- uint32_t reg;
+ u32 reg;
do {
reg = readl_relaxed(ssp->base + offset);
@@ -200,9 +176,9 @@ static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
+static int mxs_spi_txrx_dma(struct mxs_spi *spi,
unsigned char *buf, int len,
- int *first, int *last, int write)
+ unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
struct dma_async_tx_descriptor *desc = NULL;
@@ -211,11 +187,11 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
const int sgs = DIV_ROUND_UP(len, desc_len);
int sg_count;
int min, ret;
- uint32_t ctrl0;
+ u32 ctrl0;
struct page *vm_page;
void *sg_buf;
struct {
- uint32_t pio[4];
+ u32 pio[4];
struct scatterlist sg;
} *dma_xfer;
@@ -226,23 +202,27 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
if (!dma_xfer)
return -ENOMEM;
- INIT_COMPLETION(spi->c);
+ reinit_completion(&spi->c);
+ /* Chip select was already programmed into CTRL0 */
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
- ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
- ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
+ ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
+ BM_SSP_CTRL0_READ);
+ ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
- if (*first)
- ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
- if (!write)
+ if (!(flags & TXRX_WRITE))
ctrl0 |= BM_SSP_CTRL0_READ;
/* Queue the DMA data transfer. */
for (sg_count = 0; sg_count < sgs; sg_count++) {
+ /* Prepare the transfer descriptor. */
min = min(len, desc_len);
- /* Prepare the transfer descriptor. */
- if ((sg_count + 1 == sgs) && *last)
+ /*
+ * De-assert CS on last segment if flag is set (i.e., no more
+ * transfers will follow)
+ */
+ if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
if (ssp->devid == IMX23_SSP) {
@@ -267,7 +247,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
len -= min;
buf += min;
@@ -287,7 +267,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
- write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
@@ -324,7 +304,7 @@ err_vmalloc:
while (--sg_count >= 0) {
err_mapped:
dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
kfree(dma_xfer);
@@ -332,20 +312,19 @@ err_mapped:
return ret;
}
-static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
+static int mxs_spi_txrx_pio(struct mxs_spi *spi,
unsigned char *buf, int len,
- int *first, int *last, int write)
+ unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
- if (*first)
- mxs_spi_enable(spi);
-
- mxs_spi_set_cs(spi, cs);
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
while (len--) {
- if (*last && len == 0)
- mxs_spi_disable(spi);
+ if (len == 0 && (flags & TXRX_DEASSERT_CS))
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (ssp->devid == IMX23_SSP) {
writel(BM_SSP_CTRL0_XFER_COUNT,
@@ -356,7 +335,7 @@ static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
writel(1, ssp->base + HW_SSP_XFER_SIZE);
}
- if (write)
+ if (flags & TXRX_WRITE)
writel(BM_SSP_CTRL0_READ,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
else
@@ -369,13 +348,13 @@ static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
return -ETIMEDOUT;
- if (write)
+ if (flags & TXRX_WRITE)
writel(*buf, ssp->base + HW_SSP_DATA(ssp));
writel(BM_SSP_CTRL0_DATA_XFER,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- if (!write) {
+ if (!(flags & TXRX_WRITE)) {
if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
BM_SSP_STATUS_FIFO_EMPTY, 0))
return -ETIMEDOUT;
@@ -400,14 +379,15 @@ static int mxs_spi_transfer_one(struct spi_master *master,
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
- int first, last;
struct spi_transfer *t, *tmp_t;
+ unsigned int flag;
int status = 0;
- int cs;
-
- first = last = 0;
- cs = m->spi->chip_select;
+ /* Program CS register bits here, it will be used for all transfers. */
+ writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(mxs_spi_cs_to_reg(m->spi->chip_select),
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
@@ -415,16 +395,9 @@ static int mxs_spi_transfer_one(struct spi_master *master,
if (status)
break;
- if (&t->transfer_list == m->transfers.next)
- first = 1;
- if (&t->transfer_list == m->transfers.prev)
- last = 1;
- if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
- dev_err(ssp->dev,
- "Cannot send and receive simultaneously\n");
- status = -EINVAL;
- break;
- }
+ /* De-assert on last transfer, inverted by cs_change flag */
+ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
+ TXRX_DEASSERT_CS : 0;
/*
* Small blocks can be transfered via PIO.
@@ -441,26 +414,26 @@ static int mxs_spi_transfer_one(struct spi_master *master,
STMP_OFFSET_REG_CLR);
if (t->tx_buf)
- status = mxs_spi_txrx_pio(spi, cs,
+ status = mxs_spi_txrx_pio(spi,
(void *)t->tx_buf,
- t->len, &first, &last, 1);
+ t->len, flag | TXRX_WRITE);
if (t->rx_buf)
- status = mxs_spi_txrx_pio(spi, cs,
+ status = mxs_spi_txrx_pio(spi,
t->rx_buf, t->len,
- &first, &last, 0);
+ flag);
} else {
writel(BM_SSP_CTRL1_DMA_ENABLE,
ssp->base + HW_SSP_CTRL1(ssp) +
STMP_OFFSET_REG_SET);
if (t->tx_buf)
- status = mxs_spi_txrx_dma(spi, cs,
+ status = mxs_spi_txrx_dma(spi,
(void *)t->tx_buf, t->len,
- &first, &last, 1);
+ flag | TXRX_WRITE);
if (t->rx_buf)
- status = mxs_spi_txrx_dma(spi, cs,
+ status = mxs_spi_txrx_dma(spi,
t->rx_buf, t->len,
- &first, &last, 0);
+ flag);
}
if (status) {
@@ -469,7 +442,6 @@ static int mxs_spi_transfer_one(struct spi_master *master,
}
m->actual_length += t->len;
- first = last = 0;
}
m->status = status;
@@ -563,7 +535,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_dma_release;
clk_set_rate(ssp->clk, clk_freq);
- ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
ret = stmp_reset_block(ssp->base);
if (ret)
@@ -571,7 +542,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
goto out_disable_clk;
@@ -598,10 +569,8 @@ static int mxs_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- spi_unregister_master(master);
clk_disable_unprepare(ssp->clk);
dma_release_channel(ssp->dmach);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 47a68b43bcd..e0c32bc69ee 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -349,7 +349,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
}
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
@@ -435,7 +435,6 @@ err_iomap:
kfree(hw->ioarea);
err_pdata:
spi_master_put(hw->master);
-
err_nomem:
return err;
}
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 333cb1badcd..91c66859620 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -306,7 +306,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = spi_master_get(master);
+ hw->bitbang.master = master;
if (!hw->bitbang.master)
return err;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 5f28ddbe4f7..67249a48b39 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -272,7 +272,7 @@ static int octeon_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
- err = spi_register_master(master);
+ err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "register master failed: %d\n", err);
goto fail;
@@ -292,8 +292,6 @@ static int octeon_spi_remove(struct platform_device *pdev)
struct octeon_spi *p = spi_master_get_devdata(master);
u64 register_base = p->register_base;
- spi_unregister_master(master);
-
/* Clear the CSENA* and put everything in a known state. */
cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 69ecf05757d..b6ed82beb01 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -457,7 +457,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
goto err;
}
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto err;
@@ -485,8 +485,6 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index a6a8f096175..9313fd3b413 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -557,7 +557,8 @@ static struct platform_driver uwire_driver = {
.name = "omap_uwire",
.owner = THIS_MODULE,
},
- .remove = uwire_remove,
+ .probe = uwire_probe,
+ .remove = uwire_remove,
// suspend ... unuse ck
// resume ... use ck
};
@@ -579,7 +580,7 @@ static int __init omap_uwire_init(void)
omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9);
}
- return platform_driver_probe(&uwire_driver, uwire_probe);
+ return platform_driver_register(&uwire_driver);
}
static void __exit omap_uwire_exit(void)
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index ed4af4708d9..443df39840b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -276,7 +276,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
unsigned int wcnt;
- int fifo_depth, bytes_per_word;
+ int max_fifo_depth, fifo_depth, bytes_per_word;
u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master);
@@ -287,7 +287,12 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
if (t->len % bytes_per_word != 0)
goto disable_fifo;
- fifo_depth = gcd(t->len, OMAP2_MCSPI_MAX_FIFODEPTH);
+ if (t->rx_buf != NULL && t->tx_buf != NULL)
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+ else
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
+
+ fifo_depth = gcd(t->len, max_fifo_depth);
if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
goto disable_fifo;
@@ -299,7 +304,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
xferlevel |= (fifo_depth - 1) << 8;
- } else {
+ }
+ if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
xferlevel |= fifo_depth - 1;
}
@@ -498,7 +504,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
((u32 *)xfer->rx_buf)[elements++] = w;
} else {
int bytes_per_word = mcspi_bytes_per_word(word_len);
- dev_err(&spi->dev, "DMA RX penultimate word empty");
+ dev_err(&spi->dev, "DMA RX penultimate word empty\n");
count -= (bytes_per_word << 1);
omap2_mcspi_set_enable(spi, 1);
return count;
@@ -516,7 +522,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
else /* word_len <= 32 */
((u32 *)xfer->rx_buf)[elements] = w;
} else {
- dev_err(&spi->dev, "DMA RX last word empty");
+ dev_err(&spi->dev, "DMA RX last word empty\n");
count -= mcspi_bytes_per_word(word_len);
}
omap2_mcspi_set_enable(spi, 1);
@@ -1407,7 +1413,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
if (status < 0)
goto disable_pm;
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto disable_pm;
@@ -1435,7 +1441,6 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
kfree(dma_channels);
return 0;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 1d1d321d90c..744841e095e 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -84,8 +84,8 @@ static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
ORION_SPI_IF_8_16_BIT_MODE);
} else {
- pr_debug("Bad bits per word value %d (only 8 or 16 are "
- "allowed).\n", size);
+ pr_debug("Bad bits per word value %d (only 8 or 16 are allowed).\n",
+ size);
return -EINVAL;
}
@@ -407,7 +407,7 @@ static int orion_spi_probe(struct platform_device *pdev)
const u32 *iprop;
int size;
- master = spi_alloc_master(&pdev->dev, sizeof *spi);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
@@ -457,7 +457,7 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out_rel_clk;
master->dev.of_node = pdev->dev.of_node;
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto out_rel_clk;
@@ -483,8 +483,6 @@ static int orion_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(spi->clk);
clk_put(spi->clk);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 9c511a954d2..2789b452e71 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1619,7 +1619,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
- break;
}
switch (chip_info->tx_lev_trig) {
case SSP_TX_1_OR_MORE_EMPTY_LOC:
@@ -1645,7 +1644,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
- break;
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -2175,8 +2173,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
status = -ENOMEM;
goto err_no_ioremap;
}
- printk(KERN_INFO "pl022: mapped registers from %pa to %p\n",
- &adev->res.start, pl022->virtbase);
+ dev_info(&adev->dev, "mapped registers from %pa to %p\n",
+ &adev->res.start, pl022->virtbase);
pl022->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
@@ -2227,7 +2225,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
/* Register with the SPI framework */
amba_set_drvdata(adev, pl022);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&adev->dev, master);
if (status != 0) {
dev_err(&adev->dev,
"probe - problem registering spi master\n");
@@ -2287,8 +2285,6 @@ pl022_remove(struct amba_device *adev)
clk_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
- spi_unregister_master(pl022->master);
- amba_set_drvdata(adev, NULL);
return 0;
}
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 0ee53c25ba5..5ee56726f8d 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -29,6 +29,8 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/interrupt.h>
@@ -396,7 +398,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
master->dev.of_node = np;
platform_set_drvdata(op, master);
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->dev = dev;
init_completion(&hw->done);
@@ -558,6 +560,7 @@ static int spi_ppc4xx_of_remove(struct platform_device *op)
free_irq(hw->irqnum, hw);
iounmap(hw->regs);
free_gpios(hw);
+ spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index c1a50674c1e..cb0e1f1137a 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -573,8 +573,8 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
write_SSTO(0, reg);
write_SSSR_CS(drv_data, drv_data->clear_sr);
- dev_err(&drv_data->pdev->dev, "bad message state "
- "in interrupt handler\n");
+ dev_err(&drv_data->pdev->dev,
+ "bad message state in interrupt handler\n");
/* Never fail */
return IRQ_HANDLED;
@@ -651,8 +651,8 @@ static void pump_transfers(unsigned long data)
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&drv_data->pdev->dev,
- "pump_transfers: mapped transfer length "
- "of %u is greater than %d\n",
+ "pump_transfers: mapped transfer length of "
+ "%u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
message->status = -EINVAL;
giveback(drv_data);
@@ -660,11 +660,10 @@ static void pump_transfers(unsigned long data)
}
/* warn ... we force this to PIO mode */
- if (printk_ratelimit())
- dev_warn(&message->spi->dev, "pump_transfers: "
- "DMA disabled for transfer length %ld "
- "greater than %d\n",
- (long)drv_data->len, MAX_DMA_LEN);
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA disabled for transfer length %ld "
+ "greater than %d\n",
+ (long)drv_data->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
@@ -726,11 +725,8 @@ static void pump_transfers(unsigned long data)
message->spi,
bits, &dma_burst,
&dma_thresh))
- if (printk_ratelimit())
- dev_warn(&message->spi->dev,
- "pump_transfers: "
- "DMA burst size reduced to "
- "match bits_per_word\n");
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA burst size reduced to match bits_per_word\n");
}
cr0 = clk_div
@@ -854,8 +850,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
if (gpio_is_valid(chip_info->gpio_cs)) {
err = gpio_request(chip_info->gpio_cs, "SPI_CS");
if (err) {
- dev_err(&spi->dev, "failed to request chip select "
- "GPIO%d\n", chip_info->gpio_cs);
+ dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
+ chip_info->gpio_cs);
return err;
}
@@ -899,8 +895,8 @@ static int setup(struct spi_device *spi)
if (drv_data->ssp_type == CE4100_SSP) {
if (spi->chip_select > 4) {
- dev_err(&spi->dev, "failed setup: "
- "cs number must not be > 4.\n");
+ dev_err(&spi->dev,
+ "failed setup: cs number must not be > 4.\n");
kfree(chip);
return -EINVAL;
}
@@ -956,8 +952,8 @@ static int setup(struct spi_device *spi)
spi->bits_per_word,
&chip->dma_burst_size,
&chip->dma_threshold)) {
- dev_warn(&spi->dev, "in setup: DMA burst size reduced "
- "to match bits_per_word\n");
+ dev_warn(&spi->dev,
+ "in setup: DMA burst size reduced to match bits_per_word\n");
}
}
@@ -1205,7 +1201,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
goto out_error_clock_enabled;
@@ -1257,9 +1253,6 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
/* Release SSP */
pxa_ssp_free(ssp);
- /* Disconnect from the SPI framework */
- spi_unregister_master(drv_data->master);
-
return 0;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 8719206a03a..58449ad4ad0 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -59,6 +59,14 @@
#define RSPI_SPCMD6 0x1c
#define RSPI_SPCMD7 0x1e
+/*qspi only */
+#define QSPI_SPBFCR 0x18
+#define QSPI_SPBDCR 0x1a
+#define QSPI_SPBMUL0 0x1c
+#define QSPI_SPBMUL1 0x20
+#define QSPI_SPBMUL2 0x24
+#define QSPI_SPBMUL3 0x28
+
/* SPCR */
#define SPCR_SPRIE 0x80
#define SPCR_SPE 0x40
@@ -126,6 +134,8 @@
#define SPCMD_LSBF 0x1000
#define SPCMD_SPB_MASK 0x0f00
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
+#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
+#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
#define SPCMD_SPB_32BIT 0x0200
@@ -135,6 +145,10 @@
#define SPCMD_CPOL 0x0002
#define SPCMD_CPHA 0x0001
+/* SPBFCR */
+#define SPBFCR_TXRST 0x80 /* qspi only */
+#define SPBFCR_RXRST 0x40 /* qspi only */
+
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
@@ -145,6 +159,7 @@ struct rspi_data {
spinlock_t lock;
struct clk *clk;
unsigned char spsr;
+ const struct spi_ops *ops;
/* for dmaengine */
struct dma_chan *chan_tx;
@@ -165,6 +180,11 @@ static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
iowrite16(data, rspi->addr + offset);
}
+static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
+{
+ iowrite32(data, rspi->addr + offset);
+}
+
static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
{
return ioread8(rspi->addr + offset);
@@ -175,17 +195,103 @@ static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
return ioread16(rspi->addr + offset);
}
-static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
+/* optional functions */
+struct spi_ops {
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t);
+ int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t);
+
+};
+
+/*
+ * functions for RSPI
+ */
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
+ rspi_write8(rspi, 0x00, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Sets number of frames to be used: 1 frame */
+ rspi_write8(rspi, 0x00, RSPI_SPDCR);
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
+ RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for QSPI
+ */
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- int tmp;
- unsigned char spbr;
+ u16 spcmd;
+ int spbr;
+
+ /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
+ rspi_write8(rspi, 0x00, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Sets number of frames to be used: 1 frame */
+ rspi_write8(rspi, 0x00, RSPI_SPDCR);
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Data Length Setting */
+ if (access_size == 8)
+ spcmd = SPCMD_SPB_8BIT;
+ else if (access_size == 16)
+ spcmd = SPCMD_SPB_16BIT;
+ else if (access_size == 32)
+ spcmd = SPCMD_SPB_32BIT;
+
+ spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
+
+ /* Resets transfer data length */
+ rspi_write32(rspi, 0, QSPI_SPBMUL0);
+
+ /* Resets transmit and receive buffer */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ /* Sets buffer to allow normal operation */
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, spcmd, RSPI_SPCMD0);
- tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
- spbr = clamp(tmp, 0, 255);
+ /* Enables SPI function in a master mode */
+ rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
- return spbr;
+ return 0;
}
+#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
+
static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -220,54 +326,60 @@ static void rspi_negate_ssl(struct rspi_data *rspi)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
}
-static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
{
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
-
- /* Sets transfer bit rate */
- rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
-
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ int remain = t->len;
+ u8 *data;
- /* Sets RSPCK, SSL, next-access delay value */
- rspi_write8(rspi, 0x00, RSPI_SPCKD);
- rspi_write8(rspi, 0x00, RSPI_SSLND);
- rspi_write8(rspi, 0x00, RSPI_SPND);
+ data = (u8 *)t->tx_buf;
+ while (remain > 0) {
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
+ RSPI_SPCR);
- /* Sets parity, interrupt mask */
- rspi_write8(rspi, 0x00, RSPI_SPCR2);
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: tx empty timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
- /* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
- RSPI_SPCMD0);
+ rspi_write16(rspi, *data, RSPI_SPDR);
+ data++;
+ remain--;
+ }
- /* Sets RSPI mode */
- rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+ /* Waiting for the last transmition */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
return 0;
}
-static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
+static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
struct spi_transfer *t)
{
int remain = t->len;
u8 *data;
+ rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
data = (u8 *)t->tx_buf;
while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
- RSPI_SPCR);
if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
dev_err(&rspi->master->dev,
"%s: tx empty timeout\n", __func__);
return -ETIMEDOUT;
}
+ rspi_write8(rspi, *data++, RSPI_SPDR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: receive timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ rspi_read8(rspi, RSPI_SPDR);
- rspi_write16(rspi, *data, RSPI_SPDR);
- data++;
remain--;
}
@@ -277,6 +389,8 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
+#define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
+
static void rspi_dma_complete(void *arg)
{
struct rspi_data *rspi = arg;
@@ -442,6 +556,51 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
+static void qspi_receive_init(struct rspi_data *rspi)
+{
+ unsigned char spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read8(rspi, RSPI_SPDR); /* dummy read */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+}
+
+static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int remain = t->len;
+ u8 *data;
+
+ qspi_receive_init(rspi);
+
+ data = (u8 *)t->rx_buf;
+ while (remain > 0) {
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: tx empty timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ /* dummy write for generate clock */
+ rspi_write8(rspi, 0x00, RSPI_SPDR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: receive timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ /* SPDR allows 8, 16 or 32-bit access */
+ *data++ = rspi_read8(rspi, RSPI_SPDR);
+ remain--;
+ }
+
+ return 0;
+}
+
+#define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
+
static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg, sg_dummy;
@@ -581,7 +740,7 @@ static void rspi_work(struct work_struct *work)
if (rspi_is_dma(rspi, t))
ret = rspi_send_dma(rspi, t);
else
- ret = rspi_send_pio(rspi, mesg, t);
+ ret = send_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
@@ -589,7 +748,7 @@ static void rspi_work(struct work_struct *work)
if (rspi_is_dma(rspi, t))
ret = rspi_receive_dma(rspi, t);
else
- ret = rspi_receive_pio(rspi, mesg, t);
+ ret = receive_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
@@ -616,7 +775,7 @@ static int rspi_setup(struct spi_device *spi)
spi->bits_per_word = 8;
rspi->max_speed_hz = spi->max_speed_hz;
- rspi_set_config_register(rspi, 8);
+ set_config_register(rspi, 8);
return 0;
}
@@ -745,7 +904,16 @@ static int rspi_probe(struct platform_device *pdev)
struct rspi_data *rspi;
int ret, irq;
char clk_name[16];
-
+ struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ const struct spi_ops *ops;
+ const struct platform_device_id *id_entry = pdev->id_entry;
+
+ ops = (struct spi_ops *)id_entry->driver_data;
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ return -ENODEV;
+ }
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
@@ -767,7 +935,7 @@ static int rspi_probe(struct platform_device *pdev)
rspi = spi_master_get_devdata(master);
platform_set_drvdata(pdev, rspi);
-
+ rspi->ops = ops;
rspi->master = master;
rspi->addr = ioremap(res->start, resource_size(res));
if (rspi->addr == NULL) {
@@ -776,7 +944,7 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
- snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
+ snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
rspi->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
@@ -790,7 +958,10 @@ static int rspi_probe(struct platform_device *pdev)
INIT_WORK(&rspi->ws, rspi_work);
init_waitqueue_head(&rspi->wait);
- master->num_chipselect = 2;
+ master->num_chipselect = rspi_pd->num_chipselect;
+ if (!master->num_chipselect)
+ master->num_chipselect = 2; /* default */
+
master->bus_num = pdev->id;
master->setup = rspi_setup;
master->transfer = rspi_transfer;
@@ -832,11 +1003,32 @@ error1:
return ret;
}
+static struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .send_pio = rspi_send_pio,
+ .receive_pio = rspi_receive_pio,
+};
+
+static struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .send_pio = qspi_send_pio,
+ .receive_pio = qspi_receive_pio,
+};
+
+static struct platform_device_id spi_driver_ids[] = {
+ { "rspi", (kernel_ulong_t)&rspi_ops },
+ { "qspi", (kernel_ulong_t)&qspi_ops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
+ .id_table = spi_driver_ids,
.driver = {
- .name = "rspi",
+ .name = "renesas_spi",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index ce318d95a6e..0dc32a11bd3 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -280,7 +280,7 @@ static inline u32 ack_bit(unsigned int irq)
* so the caller does not need to do anything more than start the transfer
* as normal, since the IRQ will have been re-routed to the FIQ handler.
*/
-void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
{
struct pt_regs regs;
enum spi_fiq_mode mode;
@@ -524,7 +524,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
memset(hw, 0, sizeof(struct s3c24xx_spi));
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->pdata = pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index a80376dc3a1..4c4b0a1219a 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -205,7 +205,6 @@ struct s3c64xx_spi_driver_data {
#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
- unsigned long gpios[4];
bool cs_gpio;
};
@@ -559,25 +558,18 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
- struct s3c64xx_spi_csinfo *cs;
-
if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
- cs = sdd->tgl_spi->controller_data;
- if (sdd->cs_gpio)
- gpio_set_value(cs->line,
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio,
spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
- cs = spi->controller_data;
- if (sdd->cs_gpio)
- gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
-
- /* Start the signals */
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@@ -702,16 +694,11 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
- struct s3c64xx_spi_csinfo *cs = spi->controller_data;
-
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
- if (sdd->cs_gpio)
- gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
-
- /* Quiese the signals */
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -862,16 +849,12 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
}
}
-static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int s3c64xx_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
- struct spi_transfer *xfer;
- int status = 0, cs_toggle = 0;
- u32 speed;
- u8 bpw;
/* If Master's(controller) state differs from that needed by Slave */
if (sdd->cur_speed != spi->max_speed_hz
@@ -887,106 +870,98 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
if (s3c64xx_spi_map_mssg(sdd, msg)) {
dev_err(&spi->dev,
"Xfer: Unable to map message buffers!\n");
- status = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- unsigned long flags;
- int use_dma;
-
- INIT_COMPLETION(sdd->xfer_completion);
-
- /* Only BPW and Speed may change across transfers */
- bpw = xfer->bits_per_word;
- speed = xfer->speed_hz ? : spi->max_speed_hz;
-
- if (xfer->len % (bpw / 8)) {
- dev_err(&spi->dev,
- "Xfer length(%u) not a multiple of word size(%u)\n",
- xfer->len, bpw / 8);
- status = -EIO;
- goto out;
- }
+ return 0;
+}
- if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
- sdd->cur_bpw = bpw;
- sdd->cur_speed = speed;
- s3c64xx_spi_config(sdd);
- }
+static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int status;
+ u32 speed;
+ u8 bpw;
+ unsigned long flags;
+ int use_dma;
- /* Polling method for xfers not bigger than FIFO capacity */
- use_dma = 0;
- if (!is_polling(sdd) &&
- (sdd->rx_dma.ch && sdd->tx_dma.ch &&
- (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
- use_dma = 1;
+ reinit_completion(&sdd->xfer_completion);
- spin_lock_irqsave(&sdd->lock, flags);
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word;
+ speed = xfer->speed_hz ? : spi->max_speed_hz;
- /* Pending only which is to be done */
- sdd->state &= ~RXBUSY;
- sdd->state &= ~TXBUSY;
+ if (xfer->len % (bpw / 8)) {
+ dev_err(&spi->dev,
+ "Xfer length(%u) not a multiple of word size(%u)\n",
+ xfer->len, bpw / 8);
+ return -EIO;
+ }
- enable_datapath(sdd, spi, xfer, use_dma);
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ s3c64xx_spi_config(sdd);
+ }
- /* Slave Select */
- enable_cs(sdd, spi);
+ /* Polling method for xfers not bigger than FIFO capacity */
+ use_dma = 0;
+ if (!is_polling(sdd) &&
+ (sdd->rx_dma.ch && sdd->tx_dma.ch &&
+ (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
+ use_dma = 1;
- spin_unlock_irqrestore(&sdd->lock, flags);
+ spin_lock_irqsave(&sdd->lock, flags);
- status = wait_for_xfer(sdd, xfer, use_dma);
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
- if (status) {
- dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
- xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
- (sdd->state & RXBUSY) ? 'f' : 'p',
- (sdd->state & TXBUSY) ? 'f' : 'p',
- xfer->len);
+ enable_datapath(sdd, spi, xfer, use_dma);
- if (use_dma) {
- if (xfer->tx_buf != NULL
- && (sdd->state & TXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
- if (xfer->rx_buf != NULL
- && (sdd->state & RXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
- }
+ /* Start the signals */
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- goto out;
- }
+ /* Start the signals */
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ spin_unlock_irqrestore(&sdd->lock, flags);
- if (xfer->cs_change) {
- /* Hint that the next mssg is gonna be
- for the same device */
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers))
- cs_toggle = 1;
+ status = wait_for_xfer(sdd, xfer, use_dma);
+
+ if (status) {
+ dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len);
+
+ if (use_dma) {
+ if (xfer->tx_buf != NULL
+ && (sdd->state & TXBUSY))
+ s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
+ if (xfer->rx_buf != NULL
+ && (sdd->state & RXBUSY))
+ s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
}
-
- msg->actual_length += xfer->len;
-
+ } else {
flush_fifo(sdd);
}
-out:
- if (!cs_toggle || status)
- disable_cs(sdd, spi);
- else
- sdd->tgl_spi = spi;
-
- s3c64xx_spi_unmap_mssg(sdd, msg);
+ return status;
+}
- msg->status = status;
+static int s3c64xx_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- spi_finalize_current_message(master);
+ s3c64xx_spi_unmap_mssg(sdd, msg);
return 0;
}
@@ -1071,6 +1046,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
cs->line, err);
goto err_gpio_req;
}
+
+ spi->cs_gpio = cs->line;
}
spi_set_ctldata(spi, cs);
@@ -1117,11 +1094,14 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
pm_runtime_put(&sdd->pdev->dev);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
disable_cs(sdd, spi);
return 0;
setup_exit:
+ pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
disable_cs(sdd, spi);
gpio_free(cs->line);
@@ -1140,8 +1120,8 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi)
struct s3c64xx_spi_driver_data *sdd;
sdd = spi_master_get_devdata(spi->master);
- if (cs && sdd->cs_gpio) {
- gpio_free(cs->line);
+ if (spi->cs_gpio) {
+ gpio_free(spi->cs_gpio);
if (spi->dev.of_node)
kfree(cs);
}
@@ -1359,7 +1339,9 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->setup = s3c64xx_spi_setup;
master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
- master->transfer_one_message = s3c64xx_spi_transfer_one_message;
+ master->prepare_message = s3c64xx_spi_prepare_message;
+ master->transfer_one = s3c64xx_spi_transfer_one;
+ master->unprepare_message = s3c64xx_spi_unprepare_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
@@ -1428,11 +1410,12 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- if (spi_register_master(master)) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
- ret = -EBUSY;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
goto err3;
}
@@ -1461,16 +1444,12 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
-
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
clk_disable_unprepare(sdd->src_clk);
clk_disable_unprepare(sdd->clk);
- spi_master_put(master);
-
return 0;
}
@@ -1480,11 +1459,14 @@ static int s3c64xx_spi_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- spi_master_suspend(master);
+ int ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
- /* Disable the clock */
- clk_disable_unprepare(sdd->src_clk);
- clk_disable_unprepare(sdd->clk);
+ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ }
sdd->cur_speed = 0; /* Output Clock is stopped */
@@ -1500,15 +1482,14 @@ static int s3c64xx_spi_resume(struct device *dev)
if (sci->cfg_gpio)
sci->cfg_gpio();
- /* Enable the clock */
- clk_prepare_enable(sdd->src_clk);
- clk_prepare_enable(sdd->clk);
+ if (!pm_runtime_suspended(dev)) {
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
+ }
s3c64xx_spi_hwinit(sdd, sdd->port_id);
- spi_master_resume(master);
-
- return 0;
+ return spi_master_resume(master);
}
#endif /* CONFIG_PM_SLEEP */
@@ -1528,9 +1509,17 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int ret;
- clk_prepare_enable(sdd->src_clk);
- clk_prepare_enable(sdd->clk);
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret != 0)
+ return ret;
+
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret != 0) {
+ clk_disable_unprepare(sdd->src_clk);
+ return ret;
+ }
return 0;
}
@@ -1616,6 +1605,18 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {
};
static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,s3c2443-spi",
+ .data = (void *)&s3c2443_spi_port_config,
+ },
+ { .compatible = "samsung,s3c6410-spi",
+ .data = (void *)&s3c6410_spi_port_config,
+ },
+ { .compatible = "samsung,s5pc100-spi",
+ .data = (void *)&s5pc100_spi_port_config,
+ },
+ { .compatible = "samsung,s5pv210-spi",
+ .data = (void *)&s5pv210_spi_port_config,
+ },
{ .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config,
},
@@ -1633,22 +1634,13 @@ static struct platform_driver s3c64xx_spi_driver = {
.pm = &s3c64xx_spi_pm,
.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
+ .probe = s3c64xx_spi_probe,
.remove = s3c64xx_spi_remove,
.id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
-static int __init s3c64xx_spi_init(void)
-{
- return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
-}
-subsys_initcall(s3c64xx_spi_init);
-
-static void __exit s3c64xx_spi_exit(void)
-{
- platform_driver_unregister(&s3c64xx_spi_driver);
-}
-module_exit(s3c64xx_spi_exit);
+module_platform_driver(s3c64xx_spi_driver);
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index e488a90a98b..292567ab4c6 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -137,7 +137,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
rate /= 16;
/* CLKCx calculation */
- rate /= (((idiv_clk & 0x1F) + 1) * 2) ;
+ rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
tmp = abs(target_rate - rate);
@@ -303,9 +303,10 @@ static int hspi_probe(struct platform_device *pdev)
master->setup = hspi_setup;
master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
goto error1;
@@ -328,17 +329,23 @@ static int hspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_put(hspi->clk);
- spi_unregister_master(hspi->master);
return 0;
}
+static struct of_device_id hspi_of_match[] = {
+ { .compatible = "renesas,hspi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hspi_of_match);
+
static struct platform_driver hspi_driver = {
.probe = hspi_probe,
.remove = hspi_remove,
.driver = {
.name = "sh-hspi",
.owner = THIS_MODULE,
+ .of_match_table = hspi_of_match,
},
};
module_platform_driver(hspi_driver);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2a95435a6a1..c74298cf70e 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -465,7 +465,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
/* start by setting frame bit */
- INIT_COMPLETION(p->done);
+ reinit_completion(&p->done);
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
if (ret) {
dev_err(&p->pdev->dev, "failed to start hardware\n");
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 8eefeb6007d..38eb24df796 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -133,7 +133,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
sp->info = dev_get_platdata(&dev->dev);
/* setup spi bitbang adaptor */
- sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master = master;
sp->bitbang.master->bus_num = sp->info->bus_num;
sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
sp->bitbang.chipselect = sh_sci_spi_chipselect;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index a1f21b74773..ed5e501c465 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -305,8 +305,8 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
- INIT_COMPLETION(sspi->rx_done);
- INIT_COMPLETION(sspi->tx_done);
+ reinit_completion(&sspi->rx_done);
+ reinit_completion(&sspi->tx_done);
writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
@@ -632,7 +632,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
if (ret)
goto free_master;
- sspi->bitbang.master = spi_master_get(master);
+ sspi->bitbang.master = master;
sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 145dd435483..aaecfb3ebf5 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -182,6 +182,7 @@ struct tegra_spi_data {
u32 cur_speed;
struct spi_device *cur_spi;
+ struct spi_device *cs_control;
unsigned cur_pos;
unsigned cur_len;
unsigned words_per_32bit;
@@ -267,7 +268,7 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
unsigned max_len;
unsigned total_fifo_words;
- tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if (bits_per_word == 8 || bits_per_word == 16) {
tspi->is_packed = 1;
@@ -450,7 +451,7 @@ static void tegra_spi_dma_complete(void *args)
static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
{
- INIT_COMPLETION(tspi->tx_dma_complete);
+ reinit_completion(&tspi->tx_dma_complete);
tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -469,7 +470,7 @@ static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
{
- INIT_COMPLETION(tspi->rx_dma_complete);
+ reinit_completion(&tspi->rx_dma_complete);
tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -676,15 +677,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
-static int tegra_spi_start_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, bool is_first_of_msg,
- bool is_single_xfer)
+static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
- unsigned total_fifo_words;
- int ret;
unsigned long command1;
int req_mode;
@@ -698,7 +696,6 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
tspi->cur_rx_pos = 0;
tspi->cur_tx_pos = 0;
tspi->curr_xfer = t;
- total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
if (is_first_of_msg) {
tegra_spi_clear_status(tspi);
@@ -717,7 +714,12 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
else if (req_mode == SPI_MODE_3)
command1 |= SPI_CONTROL_MODE_3;
- tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ if (tspi->cs_control) {
+ if (tspi->cs_control != spi)
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->cs_control = NULL;
+ } else
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
@@ -732,6 +734,18 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
}
+ return command1;
+}
+
+static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, unsigned long command1)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned total_fifo_words;
+ int ret;
+
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+
if (tspi->is_packed)
command1 |= SPI_PACKED;
@@ -803,29 +817,50 @@ static int tegra_spi_setup(struct spi_device *spi)
return 0;
}
+static void tegra_spi_transfer_delay(int delay)
+{
+ if (!delay)
+ return;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+
+ udelay(delay % 1000);
+}
+
static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
bool is_first_msg = true;
- int single_xfer;
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
+ bool skip = false;
msg->status = 0;
msg->actual_length = 0;
- single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- INIT_COMPLETION(tspi->xfer_completion);
- ret = tegra_spi_start_transfer_one(spi, xfer,
- is_first_msg, single_xfer);
+ unsigned long cmd1;
+
+ reinit_completion(&tspi->xfer_completion);
+
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
+
+ if (!xfer->len) {
+ ret = 0;
+ skip = true;
+ goto complete_xfer;
+ }
+
+ ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
if (ret < 0) {
dev_err(tspi->dev,
"spi can not start transfer, err %d\n", ret);
- goto exit;
+ goto complete_xfer;
}
+
is_first_msg = false;
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SPI_DMA_TIMEOUT);
@@ -833,24 +868,40 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
dev_err(tspi->dev,
"spi trasfer timeout, err %d\n", ret);
ret = -EIO;
- goto exit;
+ goto complete_xfer;
}
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "Error in Transfer\n");
ret = -EIO;
- goto exit;
+ goto complete_xfer;
}
msg->actual_length += xfer->len;
- if (xfer->cs_change && xfer->delay_usecs) {
+
+complete_xfer:
+ if (ret < 0 || skip) {
tegra_spi_writel(tspi, tspi->def_command1_reg,
SPI_COMMAND1);
- udelay(xfer->delay_usecs);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ goto exit;
+ } else if (msg->transfers.prev == &xfer->transfer_list) {
+ /* This is the last transfer in message */
+ if (xfer->cs_change)
+ tspi->cs_control = spi;
+ else {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ }
+ } else if (xfer->cs_change) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
}
+
}
ret = 0;
exit:
- tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
msg->status = ret;
spi_finalize_current_message(master);
return ret;
@@ -1115,7 +1166,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
@@ -1142,7 +1193,6 @@ static int tegra_spi_remove(struct platform_device *pdev)
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
free_irq(tspi->irq, tspi);
- spi_unregister_master(master);
if (tspi->tx_dma_chan)
tegra_spi_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 1d814dc6e00..4dc8e812945 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -173,7 +173,7 @@ static unsigned tegra_sflash_calculate_curr_xfer_param(
unsigned remain_len = t->len - tsd->cur_pos;
unsigned max_word;
- tsd->bytes_per_word = (t->bits_per_word - 1) / 8 + 1;
+ tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
max_word = remain_len / tsd->bytes_per_word;
if (max_word > SPI_FIFO_DEPTH)
max_word = SPI_FIFO_DEPTH;
@@ -339,7 +339,7 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
msg->actual_length = 0;
single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- INIT_COMPLETION(tsd->xfer_completion);
+ reinit_completion(&tsd->xfer_completion);
ret = tegra_sflash_start_transfer_one(spi, xfer,
is_first_msg, single_xfer);
if (ret < 0) {
@@ -529,7 +529,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
@@ -553,7 +553,6 @@ static int tegra_sflash_remove(struct platform_device *pdev)
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
free_irq(tsd->irq, tsd);
- spi_unregister_master(master);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index c70353672a2..e66715ba37e 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -278,12 +278,12 @@ static unsigned tegra_slink_calculate_curr_xfer_param(
{
unsigned remain_len = t->len - tspi->cur_pos;
unsigned max_word;
- unsigned bits_per_word ;
+ unsigned bits_per_word;
unsigned max_len;
unsigned total_fifo_words;
bits_per_word = t->bits_per_word;
- tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if (bits_per_word == 8 || bits_per_word == 16) {
tspi->is_packed = 1;
@@ -462,7 +462,7 @@ static void tegra_slink_dma_complete(void *args)
static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
{
- INIT_COMPLETION(tspi->tx_dma_complete);
+ reinit_completion(&tspi->tx_dma_complete);
tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -481,7 +481,7 @@ static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
{
- INIT_COMPLETION(tspi->rx_dma_complete);
+ reinit_completion(&tspi->rx_dma_complete);
tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -707,8 +707,7 @@ static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
}
static int tegra_slink_start_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, bool is_first_of_msg,
- bool is_single_xfer)
+ struct spi_transfer *t)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
u32 speed;
@@ -732,32 +731,12 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
tspi->curr_xfer = t;
total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
- if (is_first_of_msg) {
- tegra_slink_clear_status(tspi);
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
- command = tspi->def_command_reg;
- command |= SLINK_BIT_LENGTH(bits_per_word - 1);
- command |= SLINK_CS_SW | SLINK_CS_VALUE;
-
- command2 = tspi->def_command2_reg;
- command2 |= SLINK_SS_EN_CS(spi->chip_select);
-
- command &= ~SLINK_MODES;
- if (spi->mode & SPI_CPHA)
- command |= SLINK_CK_SDA;
-
- if (spi->mode & SPI_CPOL)
- command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
- else
- command |= SLINK_IDLE_SCLK_DRIVE_LOW;
- } else {
- command = tspi->command_reg;
- command &= ~SLINK_BIT_LENGTH(~0);
- command |= SLINK_BIT_LENGTH(bits_per_word - 1);
-
- command2 = tspi->command2_reg;
- command2 &= ~(SLINK_RXEN | SLINK_TXEN);
- }
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
tegra_slink_writel(tspi, command, SLINK_COMMAND);
tspi->command_reg = command;
@@ -824,58 +803,72 @@ static int tegra_slink_setup(struct spi_device *spi)
return 0;
}
-static int tegra_slink_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int tegra_slink_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- bool is_first_msg = true;
- int single_xfer;
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
- int ret;
- msg->status = 0;
- msg->actual_length = 0;
+ tegra_slink_clear_status(tspi);
- single_xfer = list_is_singular(&msg->transfers);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- INIT_COMPLETION(tspi->xfer_completion);
- ret = tegra_slink_start_transfer_one(spi, xfer,
- is_first_msg, single_xfer);
- if (ret < 0) {
- dev_err(tspi->dev,
- "spi can not start transfer, err %d\n", ret);
- goto exit;
- }
- is_first_msg = false;
- ret = wait_for_completion_timeout(&tspi->xfer_completion,
- SLINK_DMA_TIMEOUT);
- if (WARN_ON(ret == 0)) {
- dev_err(tspi->dev,
- "spi trasfer timeout, err %d\n", ret);
- ret = -EIO;
- goto exit;
- }
+ tspi->command_reg = tspi->def_command_reg;
+ tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
- if (tspi->tx_status || tspi->rx_status) {
- dev_err(tspi->dev, "Error in Transfer\n");
- ret = -EIO;
- goto exit;
- }
- msg->actual_length += xfer->len;
- if (xfer->cs_change && xfer->delay_usecs) {
- tegra_slink_writel(tspi, tspi->def_command_reg,
- SLINK_COMMAND);
- udelay(xfer->delay_usecs);
- }
+ tspi->command2_reg = tspi->def_command2_reg;
+ tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select);
+
+ tspi->command_reg &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ tspi->command_reg |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ return 0;
+}
+
+static int tegra_slink_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ reinit_completion(&tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ return ret;
}
- ret = 0;
-exit:
+
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ return -EIO;
+ }
+
+ if (tspi->tx_status)
+ return tspi->tx_status;
+ if (tspi->rx_status)
+ return tspi->rx_status;
+
+ return 0;
+}
+
+static int tegra_slink_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
- msg->status = ret;
- spi_finalize_current_message(master);
- return ret;
+
+ return 0;
}
static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
@@ -1078,7 +1071,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_slink_setup;
- master->transfer_one_message = tegra_slink_transfer_one_message;
+ master->prepare_message = tegra_slink_prepare_message;
+ master->transfer_one = tegra_slink_transfer_one;
+ master->unprepare_message = tegra_slink_unprepare_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
@@ -1164,7 +1159,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
@@ -1191,7 +1186,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
free_irq(tspi->irq, tspi);
- spi_unregister_master(master);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index e12d962a289..0b71270fbf6 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -41,9 +41,6 @@ struct ti_qspi_regs {
struct ti_qspi {
struct completion transfer_complete;
- /* IRQ synchronization */
- spinlock_t lock;
-
/* list synchronization */
struct mutex list_lock;
@@ -57,7 +54,6 @@ struct ti_qspi {
u32 spi_max_frequency;
u32 cmd;
u32 dc;
- u32 stat;
};
#define QSPI_PID (0x0)
@@ -397,13 +393,12 @@ static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
{
struct ti_qspi *qspi = dev_id;
u16 int_stat;
+ u32 stat;
irqreturn_t ret = IRQ_HANDLED;
- spin_lock(&qspi->lock);
-
int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
- qspi->stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
if (!int_stat) {
dev_dbg(qspi->dev, "No IRQ triggered\n");
@@ -411,35 +406,14 @@ static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
goto out;
}
- ret = IRQ_WAKE_THREAD;
-
- ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
QSPI_INTR_STATUS_ENABLED_CLEAR);
-
+ if (stat & WC)
+ complete(&qspi->transfer_complete);
out:
- spin_unlock(&qspi->lock);
-
return ret;
}
-static irqreturn_t ti_qspi_threaded_isr(int this_irq, void *dev_id)
-{
- struct ti_qspi *qspi = dev_id;
- unsigned long flags;
-
- spin_lock_irqsave(&qspi->lock, flags);
-
- if (qspi->stat & WC)
- complete(&qspi->transfer_complete);
-
- spin_unlock_irqrestore(&qspi->lock, flags);
-
- ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
-
- return IRQ_HANDLED;
-}
-
static int ti_qspi_runtime_resume(struct device *dev)
{
struct ti_qspi *qspi;
@@ -472,7 +446,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
master->bus_num = -1;
master->flags = SPI_MASTER_HALF_DUPLEX;
@@ -499,7 +473,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
return irq;
}
- spin_lock_init(&qspi->lock);
mutex_init(&qspi->list_lock);
qspi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -508,8 +481,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, ti_qspi_isr,
- ti_qspi_threaded_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
dev_name(&pdev->dev), qspi);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
@@ -532,7 +504,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
qspi->spi_max_frequency = max_freq;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto free_master;
@@ -547,7 +519,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
{
struct ti_qspi *qspi = platform_get_drvdata(pdev);
- spi_unregister_master(qspi->master);
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
return 0;
}
@@ -558,7 +530,7 @@ static const struct dev_pm_ops ti_qspi_pm_ops = {
static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
- .remove = ti_qspi_remove,
+ .remove = ti_qspi_remove,
.driver = {
.name = "ti,dra7xxx-qspi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index eaeeed51bbb..446131308ac 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -506,8 +506,8 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
goto err_out;
}
- dev_dbg(&pspi->dev, "%s Transfer List not empty. "
- "Transfer Speed is set.\n", __func__);
+ dev_dbg(&pspi->dev,
+ "%s Transfer List not empty. Transfer Speed is set.\n", __func__);
spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
@@ -526,8 +526,9 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
goto err_return_spinlock;
}
- dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
- " valid\n", __func__);
+ dev_dbg(&pspi->dev,
+ "%s Tx/Rx buffer valid. Transfer length valid\n",
+ __func__);
/* if baud rate has been specified validate the same */
if (transfer->speed_hz > PCH_MAX_BAUDRATE)
@@ -1181,8 +1182,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
- "flushing queue\n", __func__);
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n", __func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
@@ -1410,13 +1411,13 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
/* baseaddress + address offset) */
data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
PCH_ADDRESS_SIZE * plat_dev->id;
- data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
- PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
if (!data->io_remap_addr) {
dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
ret = -ENOMEM;
goto err_pci_iomap;
}
+ data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 7c6d15766c7..637cce2b8bd 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -177,7 +177,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
| 0x08,
TXx9_SPCR0);
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
const void *txbuf = t->tx_buf;
void *rxbuf = t->rx_buf;
u32 data;
@@ -308,7 +308,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
m->actual_length = 0;
/* check each transfer's parameters */
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
u8 bits_per_word = t->bits_per_word;
@@ -406,7 +406,7 @@ static int txx9spi_probe(struct platform_device *dev)
master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&dev->dev, master);
if (ret)
goto exit;
return 0;
@@ -428,11 +428,9 @@ static int txx9spi_remove(struct platform_device *dev)
struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
struct txx9spi *c = spi_master_get_devdata(master);
- spi_unregister_master(master);
destroy_workqueue(c->workqueue);
clk_disable(c->clk);
clk_put(c->clk);
- spi_master_put(master);
return 0;
}
@@ -440,6 +438,7 @@ static int txx9spi_remove(struct platform_device *dev)
MODULE_ALIAS("platform:spi_txx9");
static struct platform_driver txx9spi_driver = {
+ .probe = txx9spi_probe,
.remove = txx9spi_remove,
.driver = {
.name = "spi_txx9",
@@ -449,7 +448,7 @@ static struct platform_driver txx9spi_driver = {
static int __init txx9spi_init(void)
{
- return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
+ return platform_driver_register(&txx9spi_driver);
}
subsys_initcall(txx9spi_init);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 0bf1b2c457a..6d4ce461516 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -258,7 +258,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->tx_ptr = t->tx_buf;
xspi->rx_ptr = t->rx_buf;
xspi->remaining_bytes = t->len;
- INIT_COMPLETION(xspi->done);
+ reinit_completion(&xspi->done);
/* Enable the transmit empty interrupt, which we use to determine
@@ -372,7 +372,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA;
xspi = spi_master_get_devdata(master);
- xspi->bitbang.master = spi_master_get(master);
+ xspi->bitbang.master = master;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9e039c60c06..18cc625d887 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -39,6 +39,9 @@
#include <linux/ioport.h>
#include <linux/acpi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/spi.h>
+
static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
@@ -58,11 +61,13 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute spi_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *spi_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(spi_dev);
/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
@@ -229,7 +234,7 @@ static const struct dev_pm_ops spi_pm = {
struct bus_type spi_bus_type = {
.name = "spi",
- .dev_attrs = spi_dev_attrs,
+ .dev_groups = spi_dev_groups,
.match = spi_match_device,
.uevent = spi_uevent,
.pm = &spi_pm,
@@ -240,15 +245,27 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ acpi_dev_pm_attach(&spi->dev, true);
+ ret = sdrv->probe(spi);
+ if (ret)
+ acpi_dev_pm_detach(&spi->dev, true);
- return sdrv->probe(to_spi_device(dev));
+ return ret;
}
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ ret = sdrv->remove(spi);
+ acpi_dev_pm_detach(&spi->dev, true);
- return sdrv->remove(to_spi_device(dev));
+ return ret;
}
static void spi_drv_shutdown(struct device *dev)
@@ -323,7 +340,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
if (!spi_master_get(master))
return NULL;
- spi = kzalloc(sizeof *spi, GFP_KERNEL);
+ spi = kzalloc(sizeof(*spi), GFP_KERNEL);
if (!spi) {
dev_err(dev, "cannot alloc spi_device\n");
spi_master_put(master);
@@ -340,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_alloc_device);
+static void spi_dev_set_name(struct spi_device *spi)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
+
+ if (adev) {
+ dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+ spi->chip_select);
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -366,9 +396,7 @@ int spi_add_device(struct spi_device *spi)
}
/* Set the bus ID string */
- dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
- spi->chip_select);
-
+ spi_dev_set_name(spi);
/* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
@@ -523,6 +551,95 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
/*-------------------------------------------------------------------------*/
+static void spi_set_cs(struct spi_device *spi, bool enable)
+{
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !enable);
+ else if (spi->master->set_cs)
+ spi->master->set_cs(spi, !enable);
+}
+
+/*
+ * spi_transfer_one_message - Default implementation of transfer_one_message()
+ *
+ * This is a standard implementation of transfer_one_message() for
+ * drivers which impelment a transfer_one() operation. It provides
+ * standard handling of delays and chip select management.
+ */
+static int spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ bool cur_cs = true;
+ bool keep_cs = false;
+ int ret = 0;
+
+ spi_set_cs(msg->spi, true);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ trace_spi_transfer_start(msg, xfer);
+
+ reinit_completion(&master->xfer_completion);
+
+ ret = master->transfer_one(master, msg->spi, xfer);
+ if (ret < 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer failed: %d\n", ret);
+ goto out;
+ }
+
+ if (ret > 0)
+ wait_for_completion(&master->xfer_completion);
+
+ trace_spi_transfer_stop(msg, xfer);
+
+ if (msg->status != -EINPROGRESS)
+ goto out;
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ cur_cs = !cur_cs;
+ spi_set_cs(msg->spi, cur_cs);
+ }
+ }
+
+ msg->actual_length += xfer->len;
+ }
+
+out:
+ if (ret != 0 || !keep_cs)
+ spi_set_cs(msg->spi, false);
+
+ if (msg->status == -EINPROGRESS)
+ msg->status = ret;
+
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+/**
+ * spi_finalize_current_transfer - report completion of a transfer
+ *
+ * Called by SPI drivers using the core transfer_one_message()
+ * implementation to notify it that the current interrupt driven
+ * transfer has finised and the next one may be scheduled.
+ */
+void spi_finalize_current_transfer(struct spi_master *master)
+{
+ complete(&master->xfer_completion);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+
/**
* spi_pump_messages - kthread work function which processes spi message queue
* @work: pointer to kthread work struct contained in the master struct
@@ -557,6 +674,7 @@ static void spi_pump_messages(struct kthread_work *work)
pm_runtime_mark_last_busy(master->dev.parent);
pm_runtime_put_autosuspend(master->dev.parent);
}
+ trace_spi_master_idle(master);
return;
}
@@ -585,6 +703,9 @@ static void spi_pump_messages(struct kthread_work *work)
}
}
+ if (!was_busy)
+ trace_spi_master_busy(master);
+
if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
@@ -597,6 +718,20 @@ static void spi_pump_messages(struct kthread_work *work)
}
}
+ trace_spi_message_start(master->cur_msg);
+
+ if (master->prepare_message) {
+ ret = master->prepare_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare message: %d\n", ret);
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+ master->cur_msg_prepared = true;
+ }
+
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
@@ -678,6 +813,7 @@ void spi_finalize_current_message(struct spi_master *master)
{
struct spi_message *mesg;
unsigned long flags;
+ int ret;
spin_lock_irqsave(&master->queue_lock, flags);
mesg = master->cur_msg;
@@ -686,9 +822,20 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (master->cur_msg_prepared && master->unprepare_message) {
+ ret = master->unprepare_message(master, mesg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to unprepare message: %d\n", ret);
+ }
+ }
+ master->cur_msg_prepared = false;
+
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
+
+ trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
@@ -803,6 +950,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
master->queued = true;
master->transfer = spi_queued_transfer;
+ if (!master->transfer_one_message)
+ master->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(master);
@@ -838,10 +987,8 @@ static void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
- const __be32 *prop;
- char modalias[SPI_NAME_SIZE + 4];
int rc;
- int len;
+ u32 value;
if (!master->dev.of_node)
return;
@@ -866,14 +1013,14 @@ static void of_register_spi_devices(struct spi_master *master)
}
/* Device address */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'reg' property\n",
- nc->full_name);
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
spi_dev_put(spi);
continue;
}
- spi->chip_select = be32_to_cpup(prop);
+ spi->chip_select = value;
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
@@ -886,55 +1033,53 @@ static void of_register_spi_devices(struct spi_master *master)
spi->mode |= SPI_3WIRE;
/* Device DUAL/QUAD mode */
- prop = of_get_property(nc, "spi-tx-bus-width", &len);
- if (prop && len == sizeof(*prop)) {
- switch (be32_to_cpup(prop)) {
- case SPI_NBITS_SINGLE:
+ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
+ switch (value) {
+ case 1:
break;
- case SPI_NBITS_DUAL:
+ case 2:
spi->mode |= SPI_TX_DUAL;
break;
- case SPI_NBITS_QUAD:
+ case 4:
spi->mode |= SPI_TX_QUAD;
break;
default:
dev_err(&master->dev,
"spi-tx-bus-width %d not supported\n",
- be32_to_cpup(prop));
+ value);
spi_dev_put(spi);
continue;
}
}
- prop = of_get_property(nc, "spi-rx-bus-width", &len);
- if (prop && len == sizeof(*prop)) {
- switch (be32_to_cpup(prop)) {
- case SPI_NBITS_SINGLE:
+ if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
+ switch (value) {
+ case 1:
break;
- case SPI_NBITS_DUAL:
+ case 2:
spi->mode |= SPI_RX_DUAL;
break;
- case SPI_NBITS_QUAD:
+ case 4:
spi->mode |= SPI_RX_QUAD;
break;
default:
dev_err(&master->dev,
"spi-rx-bus-width %d not supported\n",
- be32_to_cpup(prop));
+ value);
spi_dev_put(spi);
continue;
}
}
/* Device speed */
- prop = of_get_property(nc, "spi-max-frequency", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
- nc->full_name);
+ rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+ nc->full_name, rc);
spi_dev_put(spi);
continue;
}
- spi->max_speed_hz = be32_to_cpup(prop);
+ spi->max_speed_hz = value;
/* IRQ */
spi->irq = irq_of_parse_and_map(nc, 0);
@@ -944,9 +1089,7 @@ static void of_register_spi_devices(struct spi_master *master)
spi->dev.of_node = nc;
/* Register the new device */
- snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
- spi->modalias);
- request_module(modalias);
+ request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
@@ -1012,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_NO_MEMORY;
}
- ACPI_HANDLE_SET(&spi->dev, handle);
+ ACPI_COMPANION_SET(&spi->dev, adev);
spi->irq = -1;
INIT_LIST_HEAD(&resource_list);
@@ -1025,8 +1168,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_OK;
}
- strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
+ adev->power.flags.ignore_parent = true;
+ strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
if (spi_add_device(spi)) {
+ adev->power.flags.ignore_parent = false;
dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
dev_name(&adev->dev));
spi_dev_put(spi);
@@ -1097,7 +1242,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
if (!dev)
return NULL;
- master = kzalloc(size + sizeof *master, GFP_KERNEL);
+ master = kzalloc(size + sizeof(*master), GFP_KERNEL);
if (!master)
return NULL;
@@ -1122,7 +1267,7 @@ static int of_spi_register_master(struct spi_master *master)
return 0;
nb = of_gpio_named_count(np, "cs-gpios");
- master->num_chipselect = max(nb, (int)master->num_chipselect);
+ master->num_chipselect = max_t(int, nb, master->num_chipselect);
/* Return error only for an incorrectly formed cs-gpios property */
if (nb == 0 || nb == -ENOENT)
@@ -1209,6 +1354,7 @@ int spi_register_master(struct spi_master *master)
spin_lock_init(&master->bus_lock_spinlock);
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
+ init_completion(&master->xfer_completion);
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
@@ -1245,6 +1391,41 @@ done:
}
EXPORT_SYMBOL_GPL(spi_register_master);
+static void devm_spi_unregister(struct device *dev, void *res)
+{
+ spi_unregister_master(*(struct spi_master **)res);
+}
+
+/**
+ * dev_spi_register_master - register managed SPI master controller
+ * @dev: device managing SPI master
+ * @master: initialized master, originally from spi_alloc_master()
+ * Context: can sleep
+ *
+ * Register a SPI device as with spi_register_master() which will
+ * automatically be unregister
+ */
+int devm_spi_register_master(struct device *dev, struct spi_master *master)
+{
+ struct spi_master **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ *ptr = master;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_spi_register_master);
+
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
@@ -1402,8 +1583,7 @@ int spi_setup(struct spi_device *spi)
if (spi->master->setup)
status = spi->master->setup(spi);
- dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
- "%u bits/w, %u Hz max --> %d\n",
+ dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
@@ -1421,6 +1601,10 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
+ message->spi = spi;
+
+ trace_spi_message_submit(message);
+
if (list_empty(&message->transfers))
return -EINVAL;
if (!message->complete)
@@ -1520,7 +1704,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
}
}
- message->spi = spi;
message->status = -EINPROGRESS;
return master->transfer(spi, message);
}
@@ -1762,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_bus_unlock);
/* portable code must never pass more than 32 bytes */
-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
+#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
static u8 *buf;
@@ -1811,7 +1994,7 @@ int spi_write_then_read(struct spi_device *spi,
}
spi_message_init(&message);
- memset(x, 0, sizeof x);
+ memset(x, 0, sizeof(x));
if (n_tx) {
x[0].len = n_tx;
spi_message_add_tail(&x[0], &message);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index ca5bcfe874d..d7c6e36021e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -37,7 +37,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
@@ -206,9 +206,9 @@ spidev_write(struct file *filp, const char __user *buf,
mutex_lock(&spidev->buf_lock);
missing = copy_from_user(spidev->buffer, buf, count);
- if (missing == 0) {
+ if (missing == 0)
status = spidev_sync_write(spidev, count);
- } else
+ else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
@@ -629,7 +629,6 @@ static int spidev_remove(struct spi_device *spi)
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
- spi_set_drvdata(spi, NULL);
spin_unlock_irq(&spidev->spi_lock);
/* prevent new opens */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index e55ddf7cd7c..32a811d11c2 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -374,7 +374,8 @@ static ssize_t \
attrib##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, format_string, dev_to_ssb_dev(dev)->field); \
-}
+} \
+static DEVICE_ATTR_RO(attrib);
ssb_config_attr(core_num, core_index, "%u\n")
ssb_config_attr(coreid, id.coreid, "0x%04x\n")
@@ -387,16 +388,18 @@ name_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n",
ssb_core_name(dev_to_ssb_dev(dev)->id.coreid));
}
-
-static struct device_attribute ssb_device_attrs[] = {
- __ATTR_RO(name),
- __ATTR_RO(core_num),
- __ATTR_RO(coreid),
- __ATTR_RO(vendor),
- __ATTR_RO(revision),
- __ATTR_RO(irq),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *ssb_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_core_num.attr,
+ &dev_attr_coreid.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_irq.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(ssb_device);
static struct bus_type ssb_bustype = {
.name = "ssb",
@@ -407,7 +410,7 @@ static struct bus_type ssb_bustype = {
.suspend = ssb_device_suspend,
.resume = ssb_device_resume,
.uevent = ssb_device_uevent,
- .dev_attrs = ssb_device_attrs,
+ .dev_groups = ssb_device_groups,
};
static void ssb_buses_lock(void)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3626dbc8eb0..3bfdaa8d80a 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -136,6 +136,8 @@ source "drivers/staging/goldfish/Kconfig"
source "drivers/staging/netlogic/Kconfig"
+source "drivers/staging/mt29f_spinand/Kconfig"
+
source "drivers/staging/dwc2/Kconfig"
source "drivers/staging/lustre/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index d1b4b8003c2..b0d3303b468 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -66,3 +66,4 @@ obj-$(CONFIG_USB_BTMTK) += btmtk_usb/
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_DGAP) += dgap/
+obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index c0c95be0f96..1e9ab6dfc90 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -10,6 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
+ depends on MMU
default n
---help---
Binder is used in Android for both communication between processes,
@@ -76,7 +77,7 @@ config SYNC
bool "Synchronization framework"
default n
select ANON_INODES
- help
+ ---help---
This option enables the framework for synchronization between multiple
drivers. Sync implementations can take advantage of hardware
synchronization built into devices like GPUs.
@@ -85,7 +86,7 @@ config SW_SYNC
bool "Software synchronization objects"
default n
depends on SYNC
- help
+ ---help---
A sync object driver that uses a 32bit counter to coordinate
syncrhronization. Useful when there is no hardware primitive backing
the synchronization.
@@ -94,7 +95,7 @@ config SW_SYNC_USER
bool "Userspace API for SW_SYNC"
default n
depends on SW_SYNC
- help
+ ---help---
Provides a user space API to the sw sync object.
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index 6dc27dac679..647694f43dc 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -60,7 +60,12 @@ struct devalarm {
static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
-
+/**
+ * is_wakeup() - Checks to see if this alarm can wake the device
+ * @type: The type of alarm being checked
+ *
+ * Return: 1 if this is a wakeup alarm, otherwise 0
+ */
static int is_wakeup(enum android_alarm_type type)
{
return (type == ANDROID_ALARM_RTC_WAKEUP ||
@@ -76,7 +81,6 @@ static void devalarm_start(struct devalarm *alrm, ktime_t exp)
hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS);
}
-
static int devalarm_try_to_cancel(struct devalarm *alrm)
{
if (is_wakeup(alrm->type))
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8e76ddca099..23948f16701 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -37,41 +37,59 @@
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
-/*
- * ashmem_area - anonymous shared memory area
- * Lifecycle: From our parent file's open() until its release()
- * Locking: Protected by `ashmem_mutex'
- * Big Note: Mappings do NOT pin this structure; it dies on close()
+/**
+ * struct ashmem_area - The anonymous shared memory area
+ * @name: The optional name in /proc/pid/maps
+ * @unpinned_list: The list of all ashmem areas
+ * @file: The shmem-based backing file
+ * @size: The size of the mapping, in bytes
+ * @prot_masks: The allowed protection bits, as vm_flags
+ *
+ * The lifecycle of this structure is from our parent file's open() until
+ * its release(). It is also protected by 'ashmem_mutex'
+ *
+ * Warning: Mappings do NOT pin this structure; It dies on close()
*/
struct ashmem_area {
- char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
- struct list_head unpinned_list; /* list of all ashmem areas */
- struct file *file; /* the shmem-based backing file */
- size_t size; /* size of the mapping, in bytes */
- unsigned long prot_mask; /* allowed prot bits, as vm_flags */
+ char name[ASHMEM_FULL_NAME_LEN];
+ struct list_head unpinned_list;
+ struct file *file;
+ size_t size;
+ unsigned long prot_mask;
};
-/*
- * ashmem_range - represents an interval of unpinned (evictable) pages
- * Lifecycle: From unpin to pin
- * Locking: Protected by `ashmem_mutex'
+/**
+ * struct ashmem_range - A range of unpinned/evictable pages
+ * @lru: The entry in the LRU list
+ * @unpinned: The entry in its area's unpinned list
+ * @asma: The associated anonymous shared memory area.
+ * @pgstart: The starting page (inclusive)
+ * @pgend: The ending page (inclusive)
+ * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
+ *
+ * The lifecycle of this structure is from unpin to pin.
+ * It is protected by 'ashmem_mutex'
*/
struct ashmem_range {
- struct list_head lru; /* entry in LRU list */
- struct list_head unpinned; /* entry in its area's unpinned list */
- struct ashmem_area *asma; /* associated area */
- size_t pgstart; /* starting page, inclusive */
- size_t pgend; /* ending page, inclusive */
- unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+ struct list_head lru;
+ struct list_head unpinned;
+ struct ashmem_area *asma;
+ size_t pgstart;
+ size_t pgend;
+ unsigned int purged;
};
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
-/* Count of pages on our LRU list, protected by ashmem_mutex */
+/**
+ * long lru_count - The count of pages on our LRU list.
+ *
+ * This is protected by ashmem_mutex.
+ */
static unsigned long lru_count;
-/*
+/**
* ashmem_mutex - protects the list of and each individual ashmem_area
*
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
@@ -105,28 +123,43 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly;
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
+/**
+ * lru_add() - Adds a range of memory to the LRU list
+ * @range: The memory range being added.
+ *
+ * The range is first added to the end (tail) of the LRU list.
+ * After this, the size of the range is added to @lru_count
+ */
static inline void lru_add(struct ashmem_range *range)
{
list_add_tail(&range->lru, &ashmem_lru_list);
lru_count += range_size(range);
}
+/**
+ * lru_del() - Removes a range of memory from the LRU list
+ * @range: The memory range being removed
+ *
+ * The range is first deleted from the LRU list.
+ * After this, the size of the range is removed from @lru_count
+ */
static inline void lru_del(struct ashmem_range *range)
{
list_del(&range->lru);
lru_count -= range_size(range);
}
-/*
- * range_alloc - allocate and initialize a new ashmem_range structure
+/**
+ * range_alloc() - Allocates and initializes a new ashmem_range structure
+ * @asma: The associated ashmem_area
+ * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
+ * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * @start: The starting page (inclusive)
+ * @end: The ending page (inclusive)
*
- * 'asma' - associated ashmem_area
- * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
- * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
- * 'start' - starting page, inclusive
- * 'end' - ending page, inclusive
+ * This function is protected by ashmem_mutex.
*
- * Caller must hold ashmem_mutex.
+ * Return: 0 if successful, or -ENOMEM if there is an error
*/
static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *prev_range, unsigned int purged,
@@ -151,6 +184,10 @@ static int range_alloc(struct ashmem_area *asma,
return 0;
}
+/**
+ * range_del() - Deletes and dealloctes an ashmem_range structure
+ * @range: The associated ashmem_range that has previously been allocated
+ */
static void range_del(struct ashmem_range *range)
{
list_del(&range->unpinned);
@@ -159,10 +196,17 @@ static void range_del(struct ashmem_range *range)
kmem_cache_free(ashmem_range_cachep, range);
}
-/*
- * range_shrink - shrinks a range
+/**
+ * range_shrink() - Shrinks an ashmem_range
+ * @range: The associated ashmem_range being shrunk
+ * @start: The starting byte of the new range
+ * @end: The ending byte of the new range
*
- * Caller must hold ashmem_mutex.
+ * This does not modify the data inside the existing range in any way - It
+ * simply shrinks the boundaries of the range.
+ *
+ * Theoretically, with a little tweaking, this could eventually be changed
+ * to range_resize, and expand the lru_count if the new range is larger.
*/
static inline void range_shrink(struct ashmem_range *range,
size_t start, size_t end)
@@ -176,6 +220,16 @@ static inline void range_shrink(struct ashmem_range *range,
lru_count -= pre - range_size(range);
}
+/**
+ * ashmem_open() - Opens an Anonymous Shared Memory structure
+ * @inode: The backing file's index node(?)
+ * @file: The backing file
+ *
+ * Please note that the ashmem_area is not returned by this function - It is
+ * instead written to "file->private_data".
+ *
+ * Return: 0 if successful, or another code if unsuccessful.
+ */
static int ashmem_open(struct inode *inode, struct file *file)
{
struct ashmem_area *asma;
@@ -197,6 +251,14 @@ static int ashmem_open(struct inode *inode, struct file *file)
return 0;
}
+/**
+ * ashmem_release() - Releases an Anonymous Shared Memory structure
+ * @ignored: The backing file's Index Node(?) - It is ignored here.
+ * @file: The backing file
+ *
+ * Return: 0 if successful. If it is anything else, go have a coffee and
+ * try again.
+ */
static int ashmem_release(struct inode *ignored, struct file *file)
{
struct ashmem_area *asma = file->private_data;
@@ -214,6 +276,15 @@ static int ashmem_release(struct inode *ignored, struct file *file)
return 0;
}
+/**
+ * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
+ * @file: The associated backing file.
+ * @buf: The buffer of data being written to
+ * @len: The number of bytes being read
+ * @pos: The position of the first byte to read.
+ *
+ * Return: 0 if successful, or another return code if not.
+ */
static ssize_t ashmem_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
@@ -706,7 +777,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
.gfp_mask = GFP_KERNEL,
.nr_to_scan = LONG_MAX,
};
-
+ ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
nodes_setall(sc.nodes_to_scan);
ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 98ac020bf91..eaec1dab7fe 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -1700,7 +1700,8 @@ err_no_context_mgr_node:
thread->return_error = return_error;
}
-int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+static int binder_thread_write(struct binder_proc *proc,
+ struct binder_thread *thread,
void __user *buffer, size_t size, size_t *consumed)
{
uint32_t cmd;
@@ -1773,7 +1774,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
void __user *node_ptr;
- void *cookie;
+ void __user *cookie;
struct binder_node *node;
if (get_user(node_ptr, (void * __user *)ptr))
@@ -2055,8 +2056,8 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
return 0;
}
-void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
- uint32_t cmd)
+static void binder_stat_br(struct binder_proc *proc,
+ struct binder_thread *thread, uint32_t cmd)
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
index ec907ab2ff5..905c7cc9588 100644
--- a/drivers/staging/android/timed_output.h
+++ b/drivers/staging/android/timed_output.h
@@ -31,7 +31,7 @@ struct timed_output_dev {
int state;
};
-extern int timed_output_dev_register(struct timed_output_dev *dev);
-extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+int timed_output_dev_register(struct timed_output_dev *dev);
+void timed_output_dev_unregister(struct timed_output_dev *dev);
#endif
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index 1d8bf08b5bf..9cd59871adb 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -35,7 +35,7 @@ struct bcm_link_request {
#define MAX_PROTOCOL_LENGTH 32
#define IPV6_ADDRESS_SIZEINBYTES 0x10
-typedef union _U_IP_ADDRESS {
+union u_ip_address {
struct {
ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH]; /* Source Ip Address Range */
ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH]; /* Source Ip Mask Address Range */
@@ -52,7 +52,7 @@ typedef union _U_IP_ADDRESS {
UCHAR ucIpv6Address[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES];
UCHAR ucIpv6Mask[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES];
};
-} U_IP_ADDRESS;
+};
struct bcm_hdr_suppression_contextinfo {
UCHAR ucaHdrSuppressionInBuf[MAX_PHS_LENGTHS]; /* Intermediate buffer to accumulate pkt Header for PHS */
@@ -63,13 +63,13 @@ struct bcm_classifier_rule {
ULONG ulSFID;
UCHAR ucReserved[2];
B_UINT16 uiClassifierRuleIndex;
- BOOLEAN bUsed;
+ bool bUsed;
USHORT usVCID_Value;
B_UINT8 u8ClassifierRulePriority; /* This field detemines the Classifier Priority */
- U_IP_ADDRESS stSrcIpAddress;
+ union u_ip_address stSrcIpAddress;
UCHAR ucIPSourceAddressLength; /* Ip Source Address Length */
- U_IP_ADDRESS stDestIpAddress;
+ union u_ip_address stDestIpAddress;
UCHAR ucIPDestinationAddressLength; /* Ip Destination Address Length */
UCHAR ucIPTypeOfServiceLength; /* Type of service Length */
UCHAR ucTosLow; /* Tos Low */
@@ -86,14 +86,14 @@ struct bcm_classifier_rule {
USHORT usDestPortRangeHi[MAX_PORT_RANGE];
UCHAR ucDestPortRangeLength;
- BOOLEAN bProtocolValid;
- BOOLEAN bTOSValid;
- BOOLEAN bDestIpValid;
- BOOLEAN bSrcIpValid;
+ bool bProtocolValid;
+ bool bTOSValid;
+ bool bDestIpValid;
+ bool bSrcIpValid;
/* For IPv6 Addressing */
UCHAR ucDirection;
- BOOLEAN bIpv6Protocol;
+ bool bIpv6Protocol;
UINT32 u32PHSRuleID;
struct bcm_phs_rule sPhsRule;
UCHAR u8AssociatedPHSI;
@@ -113,11 +113,11 @@ struct bcm_classifier_rule {
};
struct bcm_fragmented_packet_info {
- BOOLEAN bUsed;
+ bool bUsed;
ULONG ulSrcIpAddress;
USHORT usIpIdentification;
struct bcm_classifier_rule *pstMatchedClassifierEntry;
- BOOLEAN bOutOfOrderFragment;
+ bool bOutOfOrderFragment;
};
struct bcm_packet_info {
@@ -128,9 +128,9 @@ struct bcm_packet_info {
/* This field determines the priority of the SF Queues */
B_UINT8 u8TrafficPriority;
- BOOLEAN bValid;
- BOOLEAN bActive;
- BOOLEAN bActivateRequestSent;
+ bool bValid;
+ bool bActive;
+ bool bActivateRequestSent;
B_UINT8 u8QueueType; /* BE or rtPS */
@@ -170,17 +170,17 @@ struct bcm_packet_info {
};
};
- BOOLEAN bProtocolValid;
- BOOLEAN bTOSValid;
- BOOLEAN bDestIpValid;
- BOOLEAN bSrcIpValid;
+ bool bProtocolValid;
+ bool bTOSValid;
+ bool bDestIpValid;
+ bool bSrcIpValid;
- BOOLEAN bActiveSet;
- BOOLEAN bAdmittedSet;
- BOOLEAN bAuthorizedSet;
- BOOLEAN bClassifierPriority;
+ bool bActiveSet;
+ bool bAdmittedSet;
+ bool bAuthorizedSet;
+ bool bClassifierPriority;
UCHAR ucServiceClassName[MAX_CLASS_NAME_LENGTH];
- BOOLEAN bHeaderSuppressionEnabled;
+ bool bHeaderSuppressionEnabled;
spinlock_t SFQueueLock;
void *pstSFIndication;
struct timeval stLastUpdateTokenAt;
@@ -196,8 +196,8 @@ struct bcm_tarang_data {
struct sk_buff *RxAppControlHead;
struct sk_buff *RxAppControlTail;
int AppCtrlQueueLen;
- BOOLEAN MacTracingEnabled;
- BOOLEAN bApplicationToExit;
+ bool MacTracingEnabled;
+ bool bApplicationToExit;
struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs;
ULONG RxCntrlMsgBitMask;
};
@@ -205,7 +205,7 @@ struct bcm_tarang_data {
struct bcm_targetdsx_buffer {
ULONG ulTargetDsxBuffer;
B_UINT16 tid;
- BOOLEAN valid;
+ bool valid;
};
typedef int (*FP_FLASH_WRITE)(struct bcm_mini_adapter *, UINT, PVOID);
@@ -221,11 +221,11 @@ struct bcm_mini_adapter {
u32 msg_enable;
CHAR *caDsxReqResp;
atomic_t ApplicationRunning;
- BOOLEAN AppCtrlQueueOverFlow;
+ bool AppCtrlQueueOverFlow;
atomic_t CurrentApplicationCount;
atomic_t RegisteredApplicationCount;
- BOOLEAN LinkUpStatus;
- BOOLEAN TimerActive;
+ bool LinkUpStatus;
+ bool TimerActive;
u32 StatisticsPointer;
struct sk_buff *RxControlHead;
struct sk_buff *RxControlTail;
@@ -249,25 +249,25 @@ struct bcm_mini_adapter {
UINT u32TotalDSD;
struct bcm_packet_info PackInfo[NO_OF_QUEUES];
struct bcm_classifier_rule astClassifierTable[MAX_CLASSIFIERS];
- BOOLEAN TransferMode;
+ bool TransferMode;
/*************** qos ******************/
- BOOLEAN bETHCSEnabled;
+ bool bETHCSEnabled;
ULONG BEBucketSize;
ULONG rtPSBucketSize;
UCHAR LinkStatus;
- BOOLEAN AutoLinkUp;
- BOOLEAN AutoSyncup;
+ bool AutoLinkUp;
+ bool AutoSyncup;
int major;
int minor;
wait_queue_head_t tx_packet_wait_queue;
wait_queue_head_t process_rx_cntrlpkt;
atomic_t process_waiting;
- BOOLEAN fw_download_done;
+ bool fw_download_done;
char *txctlpacket[MAX_CNTRL_PKTS];
- atomic_t cntrlpktCnt ;
+ atomic_t cntrlpktCnt;
atomic_t index_app_read_cntrlpkt;
atomic_t index_wr_txcntrlpkt;
atomic_t index_rd_txcntrlpkt;
@@ -280,19 +280,19 @@ struct bcm_mini_adapter {
ULONG ulTotalTargetBuffersAvailable;
unsigned long chip_id;
wait_queue_head_t lowpower_mode_wait_queue;
- BOOLEAN bFlashBoot;
- BOOLEAN bBinDownloaded;
- BOOLEAN bCfgDownloaded;
- BOOLEAN bSyncUpRequestSent;
+ bool bFlashBoot;
+ bool bBinDownloaded;
+ bool bCfgDownloaded;
+ bool bSyncUpRequestSent;
USHORT usBestEffortQueueIndex;
wait_queue_head_t ioctl_fw_dnld_wait_queue;
- BOOLEAN waiting_to_fw_download_done;
+ bool waiting_to_fw_download_done;
pid_t fw_download_process_pid;
struct bcm_target_params *pstargetparams;
- BOOLEAN device_removed;
- BOOLEAN DeviceAccess;
- BOOLEAN bIsAutoCorrectEnabled;
- BOOLEAN bDDRInitDone;
+ bool device_removed;
+ bool DeviceAccess;
+ bool bIsAutoCorrectEnabled;
+ bool bDDRInitDone;
int DDRSetting;
ULONG ulPowerSaveMode;
spinlock_t txtransmitlock;
@@ -324,22 +324,22 @@ struct bcm_mini_adapter {
PVOID,
int);
int (*interface_transmit)(PVOID, PVOID , UINT);
- BOOLEAN IdleMode;
- BOOLEAN bDregRequestSentInIdleMode;
- BOOLEAN bTriedToWakeUpFromlowPowerMode;
- BOOLEAN bShutStatus;
- BOOLEAN bWakeUpDevice;
+ bool IdleMode;
+ bool bDregRequestSentInIdleMode;
+ bool bTriedToWakeUpFromlowPowerMode;
+ bool bShutStatus;
+ bool bWakeUpDevice;
unsigned int usIdleModePattern;
/* BOOLEAN bTriedToWakeUpFromShutdown; */
- BOOLEAN bLinkDownRequested;
+ bool bLinkDownRequested;
int downloadDDR;
struct bcm_phs_extension stBCMPhsContext;
struct bcm_hdr_suppression_contextinfo stPhsTxContextInfo;
uint8_t ucaPHSPktRestoreBuf[2048];
uint8_t bPHSEnabled;
- BOOLEAN AutoFirmDld;
- BOOLEAN bMipsConfig;
- BOOLEAN bDPLLConfig;
+ bool AutoFirmDld;
+ bool bMipsConfig;
+ bool bDPLLConfig;
UINT32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
UINT32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
struct bcm_fragmented_packet_info astFragmentedPktClassifierTable[MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES];
@@ -348,8 +348,8 @@ struct bcm_mini_adapter {
enum bcm_nvm_type eNVMType;
UINT uiSectorSize;
UINT uiSectorSizeInCFG;
- BOOLEAN bSectorSizeOverride;
- BOOLEAN bStatusWrite;
+ bool bSectorSizeOverride;
+ bool bStatusWrite;
UINT uiNVMDSDSize;
UINT uiVendorExtnFlag;
/* it will always represent chosen DSD at any point of time.
@@ -376,18 +376,18 @@ struct bcm_mini_adapter {
UINT uiActiveDSDOffsetAtFwDld; /* For accessing Active DSD chosen before f/w download */
UINT uiFlashLayoutMajorVersion;
UINT uiFlashLayoutMinorVersion;
- BOOLEAN bAllDSDWriteAllow;
- BOOLEAN bSigCorrupted;
+ bool bAllDSDWriteAllow;
+ bool bSigCorrupted;
/* this should be set who so ever want to change the Headers. after Wrtie it should be reset immediately. */
- BOOLEAN bHeaderChangeAllowed;
+ bool bHeaderChangeAllowed;
int SelectedChip;
- BOOLEAN bEndPointHalted;
+ bool bEndPointHalted;
/* while bFlashRawRead will be true, Driver ignore map lay out and consider flash as of without any map. */
- BOOLEAN bFlashRawRead;
- BOOLEAN bPreparingForLowPowerMode;
- BOOLEAN bDoSuspend;
+ bool bFlashRawRead;
+ bool bPreparingForLowPowerMode;
+ bool bDoSuspend;
UINT syscfgBefFwDld;
- BOOLEAN StopAllXaction;
+ bool StopAllXaction;
UINT32 liTimeSinceLastNetEntry; /* Used to Support extended CAPI requirements from */
struct semaphore LowPowerModeSync;
ULONG liDrainCalculated;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 639ba96adb3..87b74ca84c4 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -49,11 +49,8 @@ static int bcm_char_release(struct inode *inode, struct file *filp)
pTarang = (struct bcm_tarang_data *)filp->private_data;
- if (pTarang == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "ptarang is null\n");
+ if (pTarang == NULL)
return 0;
- }
Adapter = pTarang->Adapter;
@@ -119,7 +116,7 @@ static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size,
return -ENODEV;
}
- if (FALSE == Adapter->fw_download_done)
+ if (false == Adapter->fw_download_done)
return -EACCES;
down(&Adapter->RxAppControlQueuelock);
@@ -180,7 +177,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Adapter->device_removed)
return -EFAULT;
- if (FALSE == Adapter->fw_download_done) {
+ if (false == Adapter->fw_download_done) {
switch (cmd) {
case IOCTL_MAC_ADDR_REQ:
case IOCTL_LINK_REQ:
@@ -425,7 +422,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
uiOperation = gpio_info.uiGpioValue;
value = (1<<uiBit);
- if (IsReqGpioIsLedInNVM(Adapter, value) == FALSE) {
+ if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", value);
Status = -EINVAL;
break;
@@ -572,7 +569,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == FALSE) {
+ if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
@@ -665,7 +662,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
/* Validating the request */
- if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == FALSE) {
+ if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
@@ -768,10 +765,10 @@ cntrlEnd:
if (down_trylock(&Adapter->fw_download_sema))
return -EBUSY;
- Adapter->bBinDownloaded = FALSE;
+ Adapter->bBinDownloaded = false;
Adapter->fw_download_process_pid = current->pid;
- Adapter->bCfgDownloaded = FALSE;
- Adapter->fw_download_done = FALSE;
+ Adapter->bCfgDownloaded = false;
+ Adapter->fw_download_done = false;
netif_carrier_off(Adapter->dev);
netif_stop_queue(Adapter->dev);
Status = reset_card_proc(Adapter);
@@ -848,7 +845,7 @@ cntrlEnd:
if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bLedInitDone = false;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
}
@@ -900,7 +897,7 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = FALSE;
+ Adapter->waiting_to_fw_download_done = false;
wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
Adapter->waiting_to_fw_download_done, timeout);
Adapter->fw_download_process_pid = INVALID_PID;
@@ -1052,7 +1049,7 @@ cntrlEnd:
if (tracing_flag)
Adapter->pTarangs->MacTracingEnabled = TRUE;
else
- Adapter->pTarangs->MacTracingEnabled = FALSE;
+ Adapter->pTarangs->MacTracingEnabled = false;
break;
}
@@ -1109,7 +1106,7 @@ cntrlEnd:
}
case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
- if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) {
+ if ((false == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) {
Adapter->usIdleModePattern = ABORT_IDLE_MODE;
Adapter->bWakeUpDevice = TRUE;
wake_up(&Adapter->process_rx_cntrlpkt);
@@ -1168,7 +1165,7 @@ cntrlEnd:
break;
}
- if (pBulkBuffer->SwapEndian == FALSE)
+ if (pBulkBuffer->SwapEndian == false)
Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
else
Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
@@ -1387,7 +1384,7 @@ cntrlEnd:
if (IsFlash2x(Adapter))
BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
- Adapter->bHeaderChangeAllowed = FALSE;
+ Adapter->bHeaderChangeAllowed = false;
up(&Adapter->NVMRdmWrmLock);
@@ -1432,7 +1429,7 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
/* This was internal to driver for raw read. now it has ben exposed to user space app. */
- if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == FALSE)
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == false)
return STATUS_FAILURE;
NOB = sFlash2xRead.numOfBytes;
@@ -1510,7 +1507,7 @@ cntrlEnd:
}
/* First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite */
- Adapter->bAllDSDWriteAllow = FALSE;
+ Adapter->bAllDSDWriteAllow = false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
@@ -1531,7 +1528,7 @@ cntrlEnd:
return -EINVAL;
}
- if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == FALSE)
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == false)
return STATUS_FAILURE;
InputAddr = sFlash2xWrite.pDataBuff;
@@ -1686,7 +1683,7 @@ cntrlEnd:
case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: {
/* Right Now we are taking care of only DSD */
- Adapter->bAllDSDWriteAllow = FALSE;
+ Adapter->bAllDSDWriteAllow = false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
Status = STATUS_SUCCESS;
}
@@ -1697,7 +1694,7 @@ cntrlEnd:
Status = STATUS_SUCCESS;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called");
- Adapter->bAllDSDWriteAllow = FALSE;
+ Adapter->bAllDSDWriteAllow = false;
if (IsFlash2x(Adapter) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
return -EINVAL;
@@ -1720,12 +1717,12 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
- if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == FALSE) {
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection);
return -EINVAL;
}
- if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == FALSE) {
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection);
return -EINVAL;
}
@@ -1924,7 +1921,7 @@ cntrlEnd:
OutPutBuff = OutPutBuff + ReadBytes;
}
}
- Adapter->bFlashRawRead = FALSE;
+ Adapter->bFlashRawRead = false;
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
break;
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 4e470d4bb4e..53fee2f9a49 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -6,7 +6,7 @@ static INT bcm_open(struct net_device *dev)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(dev);
- if (Adapter->fw_download_done == FALSE) {
+ if (Adapter->fw_download_done == false) {
pr_notice(PFX "%s: link up failed (download in progress)\n",
dev->name);
return -EBUSY;
@@ -142,7 +142,8 @@ static void bcm_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(dev);
- struct bcm_interface_adapter *psIntfAdapter = Adapter->pvInterfaceAdapter;
+ struct bcm_interface_adapter *psIntfAdapter =
+ Adapter->pvInterfaceAdapter;
struct usb_device *udev = interface_to_usbdev(psIntfAdapter->interface);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index 97651450292..cc91b5e934a 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -113,7 +113,7 @@ static VOID deleteSFBySfid(struct bcm_mini_adapter *Adapter, UINT uiSearchRuleIn
static inline VOID
CopyIpAddrToClassifier(struct bcm_classifier_rule *pstClassifierEntry,
B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
- BOOLEAN bIpVersion6, enum bcm_ipaddr_context eIpAddrContext)
+ bool bIpVersion6, enum bcm_ipaddr_context eIpAddrContext)
{
int i = 0;
UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
@@ -213,7 +213,7 @@ CopyIpAddrToClassifier(struct bcm_classifier_rule *pstClassifierEntry,
}
}
-void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, BOOLEAN bFreeAll)
+void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, bool bFreeAll)
{
int i;
@@ -256,7 +256,7 @@ static inline VOID CopyClassifierRuleToSF(struct bcm_mini_adapter *Adapter, stru
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
if (pstClassifierEntry) {
/* Store if Ipv6 */
- pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE;
+ pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false;
/* Destinaiton Port */
pstClassifierEntry->ucDestPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength / 4;
@@ -301,7 +301,7 @@ static inline VOID CopyClassifierRuleToSF(struct bcm_mini_adapter *Adapter, stru
psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
(Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
- TRUE : FALSE, eDestIpAddress);
+ TRUE : false, eDestIpAddress);
/* Source Ip Address and Mask */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Source Parameters : ");
@@ -309,7 +309,7 @@ static inline VOID CopyClassifierRuleToSF(struct bcm_mini_adapter *Adapter, stru
CopyIpAddrToClassifier(pstClassifierEntry,
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false,
eSrcIpAddress);
/* TOS */
@@ -383,7 +383,7 @@ static inline VOID DeleteClassifierRuleFromSF(struct bcm_mini_adapter *Adapter,
u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
if (pstClassifierEntry) {
- pstClassifierEntry->bUsed = FALSE;
+ pstClassifierEntry->bUsed = false;
pstClassifierEntry->uiClassifierRuleIndex = 0;
memset(pstClassifierEntry, 0, sizeof(struct bcm_classifier_rule));
@@ -685,7 +685,7 @@ static VOID CopyToAdapter(register struct bcm_mini_adapter *Adapter, /* <Pointer
memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
sPhsRule.u8RefCnt = 0;
- sPhsRule.bUnclassifiedPHSRule = FALSE;
+ sPhsRule.bUnclassifiedPHSRule = false;
sPhsRule.PHSModifiedBytes = 0;
sPhsRule.PHSModifiedNumPackets = 0;
sPhsRule.PHSErrorNumPackets = 0;
@@ -837,7 +837,7 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
UINT nCurClassifierCnt;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- pstAddIndication = (struct bcm_add_indication_alt *)pvBuffer;
+ pstAddIndication = pvBuffer;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
@@ -1339,14 +1339,14 @@ ULONG StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, PVOID pvBu
UINT uiSearchRuleIndex;
ULONG ulSFID;
- pstAddIndicationAlt = (struct bcm_add_indication_alt *)(pvBuffer);
+ pstAddIndicationAlt = pvBuffer;
/*
* In case of DSD Req By MS, we should immediately delete this SF so that
* we can stop the further classifying the pkt for this SF.
*/
if (pstAddIndicationAlt->u8Type == DSD_REQ) {
- pstDeletionRequest = (struct bcm_del_request *)pvBuffer;
+ pstDeletionRequest = pvBuffer;
ulSFID = ntohl(pstDeletionRequest->u32SFID);
uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
@@ -1452,12 +1452,12 @@ static inline struct bcm_add_indication_alt
struct bcm_add_indication *pstAddIndication = NULL;
struct bcm_add_indication_alt *pstAddIndicationDest = NULL;
- pstAddIndication = (struct bcm_add_indication *)(pvBuffer);
+ pstAddIndication = pvBuffer;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>");
if ((pstAddIndication->u8Type == DSD_REQ) ||
(pstAddIndication->u8Type == DSD_RSP) ||
(pstAddIndication->u8Type == DSD_ACK))
- return (struct bcm_add_indication_alt *)pvBuffer;
+ return pvBuffer;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
/*
@@ -1577,7 +1577,7 @@ static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter, B_UIN
ULONG idx, max_try;
if ((Adapter->ulTotalTargetBuffersAvailable == 0) || (Adapter->ulFreeTargetBufferCnt == 0)) {
- ClearTargetDSXBuffer(Adapter, tid, FALSE);
+ ClearTargetDSXBuffer(Adapter, tid, false);
return 0;
}
@@ -1590,7 +1590,7 @@ static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter, B_UIN
if (max_try == 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", Adapter->ulFreeTargetBufferCnt);
- ClearTargetDSXBuffer(Adapter, tid, FALSE);
+ ClearTargetDSXBuffer(Adapter, tid, false);
return 0;
}
@@ -1630,7 +1630,7 @@ int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter)
* for the Connection Management.
* @return - Queue index for the free SFID else returns Invalid Index.
*/
-BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
+bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
{
struct bcm_connect_mgr_params *psfLocalSet = NULL;
@@ -1644,9 +1644,9 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
*/
pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
if (pstAddIndication == NULL) {
- ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication *)pvBuffer)->u16TID, FALSE);
+ ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication *)pvBuffer)->u16TID, false);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
- return FALSE;
+ return false;
}
DumpCmControlPacket(pstAddIndication);
@@ -1656,7 +1656,7 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
pLeader->Vcid = 0;
- ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, FALSE);
+ ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, false);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
switch (pstAddIndication->u8Type) {
case DSA_REQ:
@@ -1708,9 +1708,9 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
- if (pstAddIndication->sfActiveSet.bValid == FALSE) {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+ if (pstAddIndication->sfActiveSet.bValid == false) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false;
if (pstAddIndication->sfAdmittedSet.bValid)
psfLocalSet = &pstAddIndication->sfAdmittedSet;
else if (pstAddIndication->sfAuthorizedSet.bValid)
@@ -1722,8 +1722,8 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
if (!psfLocalSet) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
} else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
@@ -1759,15 +1759,15 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
}
}
} else {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
}
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
kfree(pstAddIndication);
- return FALSE;
+ return false;
}
}
break;
@@ -1812,9 +1812,9 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
- if (pstChangeIndication->sfActiveSet.bValid == FALSE) {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+ if (pstChangeIndication->sfActiveSet.bValid == false) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false;
if (pstChangeIndication->sfAdmittedSet.bValid)
psfLocalSet = &pstChangeIndication->sfAdmittedSet;
@@ -1827,8 +1827,8 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
if (!psfLocalSet) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
} else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
@@ -1847,7 +1847,7 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
kfree(pstAddIndication);
- return FALSE;
+ return false;
}
}
break;
@@ -1883,7 +1883,7 @@ BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer
break;
default:
kfree(pstAddIndication);
- return FALSE;
+ return false;
}
return TRUE;
}
@@ -1934,13 +1934,13 @@ VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter, PUINT puiBuffer
continue;
}
- if (pHostInfo->RetainSF == FALSE) {
+ if (pHostInfo->RetainSF == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Going to Delete SF");
deleteSFBySfid(Adapter, uiSearchRuleIndex);
} else {
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID);
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID);
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "pHostInfo->QoSParamSet: 0x%x\n", pHostInfo->QoSParamSet);
diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h
index 4ddfc3d45bc..0887d3f49e2 100644
--- a/drivers/staging/bcm/CmHost.h
+++ b/drivers/staging/bcm/CmHost.h
@@ -55,7 +55,7 @@ unsigned long StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, vo
int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
unsigned long SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter);
-BOOLEAN CmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer);
+bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer);
#pragma pack(pop)
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index f5eda96f0f8..9f7e30f637e 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -1106,7 +1106,7 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
unsigned long ul_ddr_setting_load_addr = DDR_DUMP_INTERNAL_DEVICE_MEMORY;
UINT value = 0;
int retval = STATUS_SUCCESS;
- BOOLEAN bOverrideSelfRefresh = FALSE;
+ bool bOverrideSelfRefresh = false;
switch (Adapter->chip_id)
{
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
index 1bb53e247a6..495fe3dc514 100644
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ b/drivers/staging/bcm/HandleControlPacket.c
@@ -14,10 +14,10 @@
static VOID handle_rx_control_packet(struct bcm_mini_adapter *Adapter, struct sk_buff *skb)
{
struct bcm_tarang_data *pTarang = NULL;
- BOOLEAN HighPriorityMessage = FALSE;
+ bool HighPriorityMessage = false;
struct sk_buff *newPacket = NULL;
CHAR cntrl_msg_mask_bit = 0;
- BOOLEAN drop_pkt_flag = TRUE;
+ bool drop_pkt_flag = TRUE;
USHORT usStatus = *(PUSHORT)(skb->data);
if (netif_msg_pktdata(Adapter))
@@ -91,13 +91,13 @@ static VOID handle_rx_control_packet(struct bcm_mini_adapter *Adapter, struct sk
* cntrl_msg_mask_bit);
*/
if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit))
- drop_pkt_flag = FALSE;
+ drop_pkt_flag = false;
if ((drop_pkt_flag == TRUE) ||
(pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN)
|| ((pTarang->AppCtrlQueueLen >
MAX_APP_QUEUE_LEN / 2) &&
- (HighPriorityMessage == FALSE))) {
+ (HighPriorityMessage == false))) {
/*
* Assumption:-
* 1. every tarang manages it own dropped pkt
@@ -175,8 +175,8 @@ int control_packet_handler(struct bcm_mini_adapter *Adapter /* pointer to adapte
return 0;
}
if (TRUE == Adapter->bWakeUpDevice) {
- Adapter->bWakeUpDevice = FALSE;
- if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode)
+ Adapter->bWakeUpDevice = false;
+ if ((false == Adapter->bTriedToWakeUpFromlowPowerMode)
&& ((TRUE == Adapter->IdleMode) ||
(TRUE == Adapter->bShutStatus))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c
index 6d803e7b094..cd160670e02 100644
--- a/drivers/staging/bcm/IPv6Protocol.c
+++ b/drivers/staging/bcm/IPv6Protocol.c
@@ -1,13 +1,13 @@
#include "headers.h"
-static BOOLEAN MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
+static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
struct bcm_ipv6_hdr *pstIpv6Header);
-static BOOLEAN MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
+static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
struct bcm_ipv6_hdr *pstIpv6Header);
static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header);
static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
- UCHAR *pucNextHeader, BOOLEAN *bParseDone, USHORT *pusPayloadLength)
+ UCHAR *pucNextHeader, bool *bParseDone, USHORT *pusPayloadLength)
{
UCHAR *pucRetHeaderPtr = NULL;
UCHAR *pucPayloadPtr = NULL;
@@ -29,7 +29,7 @@ static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
}
/* Get the Nextt Header Type */
- *bParseDone = FALSE;
+ *bParseDone = false;
switch (*pucNextHeader) {
@@ -124,7 +124,7 @@ static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
}
- if (*bParseDone == FALSE) {
+ if (*bParseDone == false) {
if (*pusPayloadLength <= usNextHeaderOffset) {
*bParseDone = TRUE;
} else {
@@ -144,7 +144,7 @@ static UCHAR GetIpv6ProtocolPorts(UCHAR *pucPayload, USHORT *pusSrcPort,
USHORT *pusDestPort, USHORT usPayloadLength, UCHAR ucNextHeader)
{
UCHAR *pIpv6HdrScanContext = pucPayload;
- BOOLEAN bDone = FALSE;
+ bool bDone = false;
UCHAR ucHeaderType = 0;
UCHAR *pucNextHeader = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -187,12 +187,12 @@ USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
USHORT ushSrcPort = 0;
UCHAR ucNextProtocolAboveIP = 0;
struct bcm_ipv6_hdr *pstIpv6Header = NULL;
- BOOLEAN bClassificationSucceed = FALSE;
+ bool bClassificationSucceed = false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL, "IpVersion6 ==========>\n");
- pstIpv6Header = (struct bcm_ipv6_hdr *)pcIpHeader;
+ pstIpv6Header = pcIpHeader;
DumpIpv6Header(pstIpv6Header);
@@ -277,10 +277,10 @@ USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
INT iMatchedSFQueueIndex = 0;
iMatchedSFQueueIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
if (iMatchedSFQueueIndex >= NO_OF_QUEUES) {
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
} else {
- if (Adapter->PackInfo[iMatchedSFQueueIndex].bActive == FALSE)
- bClassificationSucceed = FALSE;
+ if (Adapter->PackInfo[iMatchedSFQueueIndex].bActive == false)
+ bClassificationSucceed = false;
}
}
@@ -288,7 +288,7 @@ USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
}
-static BOOLEAN MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
+static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
struct bcm_ipv6_hdr *pstIpv6Header)
{
UINT uiLoopIndex = 0;
@@ -341,10 +341,10 @@ static BOOLEAN MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule
}
}
}
- return FALSE;
+ return false;
}
-static BOOLEAN MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
+static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
struct bcm_ipv6_hdr *pstIpv6Header)
{
UINT uiLoopIndex = 0;
@@ -398,7 +398,7 @@ static BOOLEAN MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRul
}
}
}
- return FALSE;
+ return false;
}
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index 348ad75b340..463bdee9dfc 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -6,7 +6,7 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
mm_segment_t oldfs = {0};
int errno = 0, len = 0; /* ,is_config_file = 0 */
loff_t pos = 0;
- struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)arg;
+ struct bcm_interface_adapter *psIntfAdapter = arg;
/* struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; */
char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
@@ -61,7 +61,7 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
loff_t pos = 0;
static int fw_down;
INT Status = STATUS_SUCCESS;
- struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)arg;
+ struct bcm_interface_adapter *psIntfAdapter = arg;
int bytes;
buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
@@ -166,7 +166,7 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
}
if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bLedInitDone = false;
Adapter->DriverState = DRIVER_INIT;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
@@ -214,7 +214,7 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
* Firmware. Check for the Config file to be first to be sent from the
* Application
*/
- atomic_set(&Adapter->uiMBupdate, FALSE);
+ atomic_set(&Adapter->uiMBupdate, false);
if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
/* Can't Download Firmware. */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n");
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 53478286604..5959fbdcd1b 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -89,8 +89,8 @@ int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *pui
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device Up from Idle Mode");
/* Set Idle Mode Flag to False and Clear IdleMode reg. */
- Adapter->IdleMode = FALSE;
- Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
+ Adapter->IdleMode = false;
+ Adapter->bTriedToWakeUpFromlowPowerMode = false;
wake_up(&Adapter->lowpower_mode_wait_queue);
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 79058ce5b33..3acdb58a10f 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -332,7 +332,7 @@ static int device_run(struct bcm_interface_adapter *psIntfAdapter)
* now register the cntrl interface.
* after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
*/
- psIntfAdapter->psAdapter->waiting_to_fw_download_done = FALSE;
+ psIntfAdapter->psAdapter->waiting_to_fw_download_done = false;
value = wait_event_timeout(psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
psIntfAdapter->psAdapter->waiting_to_fw_download_done, 5*HZ);
@@ -430,7 +430,7 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
unsigned long value;
int retval = 0;
int usedIntOutForBulkTransfer = 0 ;
- BOOLEAN bBcm16 = FALSE;
+ bool bBcm16 = false;
UINT uiData = 0;
int bytes;
@@ -472,7 +472,7 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
retval = usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"BCM16 is applicable on this dongle\n");
- if (retval || (psIntfAdapter->bHighSpeedDevice == FALSE)) {
+ if (retval || (psIntfAdapter->bHighSpeedDevice == false)) {
usedIntOutForBulkTransfer = EP2 ;
endpoint = &iface_desc->endpoint[EP2].desc;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
@@ -481,8 +481,8 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
* If Modem is high speed device EP2 should be INT OUT End point
* If Mode is FS then EP2 should be bulk end point
*/
- if (((psIntfAdapter->bHighSpeedDevice == TRUE) && (bcm_usb_endpoint_is_int_out(endpoint) == FALSE))
- || ((psIntfAdapter->bHighSpeedDevice == FALSE) && (bcm_usb_endpoint_is_bulk_out(endpoint) == FALSE))) {
+ if (((psIntfAdapter->bHighSpeedDevice == TRUE) && (bcm_usb_endpoint_is_int_out(endpoint) == false))
+ || ((psIntfAdapter->bHighSpeedDevice == false) && (bcm_usb_endpoint_is_bulk_out(endpoint) == false))) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"Configuring the EEPROM\n");
/* change the EP2, EP4 to INT OUT end point */
@@ -501,7 +501,7 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
}
}
- if ((psIntfAdapter->bHighSpeedDevice == FALSE) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+ if ((psIntfAdapter->bHighSpeedDevice == false) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
/* Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail. */
UINT _uiData = ntohl(EP2_CFG_INT);
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
@@ -513,7 +513,7 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
endpoint = &iface_desc->endpoint[EP4].desc;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"Choosing AltSetting as a default setting.\n");
- if (bcm_usb_endpoint_is_int_out(endpoint) == FALSE) {
+ if (bcm_usb_endpoint_is_int_out(endpoint) == false) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"Dongle does not have BCM16 Fix.\n");
/* change the EP2, EP4 to INT OUT end point and use EP4 in altsetting */
@@ -619,7 +619,7 @@ static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
psIntfAdapter->bSuspended = TRUE;
if (TRUE == psIntfAdapter->bPreparingForBusSuspend) {
- psIntfAdapter->bPreparingForBusSuspend = FALSE;
+ psIntfAdapter->bPreparingForBusSuspend = false;
if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
psIntfAdapter->psAdapter->IdleMode = TRUE ;
@@ -631,7 +631,7 @@ static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
"Host Entered in PMU Shutdown Mode.\n");
}
}
- psIntfAdapter->psAdapter->bPreparingForLowPowerMode = FALSE;
+ psIntfAdapter->psAdapter->bPreparingForLowPowerMode = false;
/* Signaling the control pkt path */
wake_up(&psIntfAdapter->psAdapter->lowpower_mode_wait_queue);
@@ -644,7 +644,7 @@ static int InterfaceResume(struct usb_interface *intf)
struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
mdelay(100);
- psIntfAdapter->bSuspended = FALSE;
+ psIntfAdapter->bSuspended = false;
StartInterruptUrb(psIntfAdapter);
InterfaceRx(psIntfAdapter);
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
index 8322f1b76e2..7b39f4f5f1a 100644
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ b/drivers/staging/bcm/InterfaceIsr.c
@@ -60,7 +60,7 @@ static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
psIntfAdapter->psAdapter->downloadDDR +=1;
wake_up(&Adapter->tx_packet_wait_queue);
}
- if(FALSE == Adapter->waiting_to_fw_download_done)
+ if(false == Adapter->waiting_to_fw_download_done)
{
Adapter->waiting_to_fw_download_done = TRUE;
wake_up(&Adapter->ioctl_fw_dnld_wait_queue);
@@ -147,11 +147,11 @@ INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
{
INT status = 0;
- if( FALSE == psIntfAdapter->psAdapter->device_removed &&
- FALSE == psIntfAdapter->psAdapter->bEndPointHalted &&
- FALSE == psIntfAdapter->bSuspended &&
- FALSE == psIntfAdapter->bPreparingForBusSuspend &&
- FALSE == psIntfAdapter->psAdapter->StopAllXaction)
+ if( false == psIntfAdapter->psAdapter->device_removed &&
+ false == psIntfAdapter->psAdapter->bEndPointHalted &&
+ false == psIntfAdapter->bSuspended &&
+ false == psIntfAdapter->bPreparingForBusSuspend &&
+ false == psIntfAdapter->psAdapter->StopAllXaction)
{
status = usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
if (status)
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index afca010f9db..4173fd7d671 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -44,7 +44,7 @@ int InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter,
else
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", bytes);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ psIntfAdapter->psAdapter->DeviceAccess = false;
return bytes;
}
@@ -90,10 +90,10 @@ int InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter,
if (retval < 0) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "WRM failed status :%d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ psIntfAdapter->psAdapter->DeviceAccess = false;
return retval;
} else {
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ psIntfAdapter->psAdapter->DeviceAccess = false;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "WRM sent %d", retval);
return STATUS_SUCCESS;
}
@@ -104,7 +104,7 @@ int BcmRDM(void *arg,
void *buff,
int len)
{
- return InterfaceRDM((struct bcm_interface_adapter*)arg, addr, buff, len);
+ return InterfaceRDM((struct bcm_interface_adapter *)arg, addr, buff, len);
}
int BcmWRM(void *arg,
@@ -211,7 +211,7 @@ void putUsbSuspend(struct work_struct *work)
psIntfAdapter = container_of(work, struct bcm_interface_adapter, usbSuspendWork);
intf = psIntfAdapter->interface;
- if (psIntfAdapter->bSuspended == FALSE)
+ if (psIntfAdapter->bSuspended == false)
usb_autopm_put_interface(intf);
}
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
index 26f5bc76111..f2973f5e503 100644
--- a/drivers/staging/bcm/InterfaceRx.c
+++ b/drivers/staging/bcm/InterfaceRx.c
@@ -19,7 +19,7 @@ GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter)
UINT index = 0;
if((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == FALSE))
+ (psIntfAdapter->psAdapter->StopAllXaction == false))
{
index = atomic_read(&psIntfAdapter->uCurrRcb);
pRcb = &psIntfAdapter->asUsbRcb[index];
@@ -38,7 +38,7 @@ GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter)
static void read_bulk_callback(struct urb *urb)
{
struct sk_buff *skb = NULL;
- BOOLEAN bHeaderSupressionEnabled = FALSE;
+ bool bHeaderSupressionEnabled = false;
int QueueIndex = NO_OF_QUEUES + 1;
UINT uiIndex=0;
int process_done = 1;
@@ -57,7 +57,7 @@ static void read_bulk_callback(struct urb *urb)
(0 == urb->actual_length)
)
{
- pRcb->bUsed = FALSE;
+ pRcb->bUsed = false;
atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
@@ -73,7 +73,7 @@ static void read_bulk_callback(struct urb *urb)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"Rx URB has got cancelled. status :%d", urb->status);
}
- pRcb->bUsed = FALSE;
+ pRcb->bUsed = false;
atomic_dec(&psIntfAdapter->uNumRcbUsed);
urb->status = STATUS_SUCCESS ;
return ;
@@ -192,7 +192,7 @@ static void read_bulk_callback(struct urb *urb)
}
}
Adapter->PrevNumRecvDescs++;
- pRcb->bUsed = FALSE;
+ pRcb->bUsed = false;
atomic_dec(&psIntfAdapter->uNumRcbUsed);
}
@@ -205,10 +205,10 @@ static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_us
psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback,
pRcb);
- if(FALSE == psIntfAdapter->psAdapter->device_removed &&
- FALSE == psIntfAdapter->psAdapter->bEndPointHalted &&
- FALSE == psIntfAdapter->bSuspended &&
- FALSE == psIntfAdapter->bPreparingForBusSuspend)
+ if(false == psIntfAdapter->psAdapter->device_removed &&
+ false == psIntfAdapter->psAdapter->bEndPointHalted &&
+ false == psIntfAdapter->bSuspended &&
+ false == psIntfAdapter->bPreparingForBusSuspend)
{
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
@@ -240,7 +240,7 @@ Return: TRUE - If Rx was successful.
Other - If an error occurred.
*/
-BOOLEAN InterfaceRx (struct bcm_interface_adapter *psIntfAdapter)
+bool InterfaceRx (struct bcm_interface_adapter *psIntfAdapter)
{
USHORT RxDescCount = NUM_RX_DESC - atomic_read(&psIntfAdapter->uNumRcbUsed);
struct bcm_usb_rcb *pRcb = NULL;
@@ -253,7 +253,7 @@ BOOLEAN InterfaceRx (struct bcm_interface_adapter *psIntfAdapter)
if(pRcb == NULL)
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer");
- return FALSE;
+ return false;
}
//atomic_inc(&psIntfAdapter->uNumRcbUsed);
ReceiveRcb(psIntfAdapter, pRcb);
diff --git a/drivers/staging/bcm/InterfaceRx.h b/drivers/staging/bcm/InterfaceRx.h
index 424645e9e47..b4e858bcda3 100644
--- a/drivers/staging/bcm/InterfaceRx.h
+++ b/drivers/staging/bcm/InterfaceRx.h
@@ -1,7 +1,7 @@
#ifndef _INTERFACE_RX_H
#define _INTERFACE_RX_H
-BOOLEAN InterfaceRx(struct bcm_interface_adapter *Adapter);
+bool InterfaceRx(struct bcm_interface_adapter *Adapter);
#endif
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
index b8c785556dd..b9c2784e981 100644
--- a/drivers/staging/bcm/InterfaceTx.c
+++ b/drivers/staging/bcm/InterfaceTx.c
@@ -7,7 +7,7 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter;
struct bcm_link_request *pControlMsg = (struct bcm_link_request *)urb->transfer_buffer;
struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter ;
- BOOLEAN bpowerDownMsg = FALSE ;
+ bool bpowerDownMsg = false ;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
if (unlikely(netif_msg_tx_done(Adapter)))
@@ -26,7 +26,7 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
}
}
- pTcb->bUsed = FALSE;
+ pTcb->bUsed = false;
atomic_dec(&psIntfAdapter->uNumTcbUsed);
@@ -42,7 +42,7 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
//This covers the bus err while Idle Request msg sent down.
if(urb->status != STATUS_SUCCESS)
{
- psAdapter->bPreparingForLowPowerMode = FALSE ;
+ psAdapter->bPreparingForLowPowerMode = false ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Idle Mode Request msg failed to reach to Modem");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
@@ -50,11 +50,11 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
goto err_exit;
}
- if(psAdapter->bDoSuspend == FALSE)
+ if(psAdapter->bDoSuspend == false)
{
psAdapter->IdleMode = TRUE;
//since going in Idle mode completed hence making this var false;
- psAdapter->bPreparingForLowPowerMode = FALSE ;
+ psAdapter->bPreparingForLowPowerMode = false ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State...");
//Signalling the cntrl pkt path in Ioctl
@@ -70,7 +70,7 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
//This covers the bus err while shutdown Request msg sent down.
if(urb->status != STATUS_SUCCESS)
{
- psAdapter->bPreparingForLowPowerMode = FALSE ;
+ psAdapter->bPreparingForLowPowerMode = false ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Shutdown Request Msg failed to reach to Modem");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
@@ -79,11 +79,11 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
}
bpowerDownMsg = TRUE ;
- if(psAdapter->bDoSuspend == FALSE)
+ if(psAdapter->bDoSuspend == false)
{
psAdapter->bShutStatus = TRUE;
//since going in shutdown mode completed hence making this var false;
- psAdapter->bPreparingForLowPowerMode = FALSE ;
+ psAdapter->bPreparingForLowPowerMode = false ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Host Entered in shutdown Mode State...");
//Signalling the cntrl pkt path in Ioctl
wake_up(&psAdapter->lowpower_mode_wait_queue);
@@ -113,7 +113,7 @@ static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAda
UINT index = 0;
if((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction ==FALSE))
+ (psIntfAdapter->psAdapter->StopAllXaction ==false))
{
index = atomic_read(&psIntfAdapter->uCurrTcb);
pTcb = &psIntfAdapter->asUsbTcb[index];
@@ -161,10 +161,10 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u
}
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */
- if(FALSE == psIntfAdapter->psAdapter->device_removed &&
- FALSE == psIntfAdapter->psAdapter->bEndPointHalted &&
- FALSE == psIntfAdapter->bSuspended &&
- FALSE == psIntfAdapter->bPreparingForBusSuspend)
+ if(false == psIntfAdapter->psAdapter->device_removed &&
+ false == psIntfAdapter->psAdapter->bEndPointHalted &&
+ false == psIntfAdapter->bSuspended &&
+ false == psIntfAdapter->bPreparingForBusSuspend)
{
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
@@ -184,7 +184,7 @@ int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
{
struct bcm_usb_tcb *pTcb= NULL;
- struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)arg;
+ struct bcm_interface_adapter *psIntfAdapter = arg;
pTcb= GetBulkOutTcb(psIntfAdapter);
if(pTcb == NULL)
{
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
index bc486163332..f95b06713a2 100644
--- a/drivers/staging/bcm/LeakyBucket.c
+++ b/drivers/staging/bcm/LeakyBucket.c
@@ -82,7 +82,7 @@ static ULONG GetSFTokenCount(struct bcm_mini_adapter *Adapter, struct bcm_packet
return 0;
}
- if (FALSE != psSF->bValid && psSF->ucDirection) {
+ if (false != psSF->bValid && psSF->ucDirection) {
if (0 != psSF->uiCurrentTokenCount) {
return psSF->uiCurrentTokenCount;
} else {
@@ -188,7 +188,7 @@ static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter, struct
spin_unlock_bh(&psSF->SFQueueLock);
Status = SendPacketFromQueue(Adapter, psSF, QueuePacket);
- psSF->uiPendedLast = FALSE;
+ psSF->uiPendedLast = false;
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
@@ -250,7 +250,7 @@ VOID transmit_packets(struct bcm_mini_adapter *Adapter)
UINT uiPrevTotalCount = 0;
int iIndex = 0;
- BOOLEAN exit_flag = TRUE;
+ bool exit_flag = TRUE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "=====>");
@@ -299,7 +299,7 @@ VOID transmit_packets(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
uiPrevTotalCount--;
- exit_flag = FALSE;
+ exit_flag = false;
}
}
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index 4cfc2c33c69..7b2fa0f4a2e 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -53,7 +53,7 @@ int InitAdapter(struct bcm_mini_adapter *psAdapter)
init_waitqueue_head(&psAdapter->ioctl_fw_dnld_wait_queue);
init_waitqueue_head(&psAdapter->lowpower_mode_wait_queue);
psAdapter->waiting_to_fw_download_done = TRUE;
- psAdapter->fw_download_done = FALSE;
+ psAdapter->fw_download_done = false;
default_wimax_protocol_initialize(psAdapter);
for (i = 0; i < MAX_CNTRL_PKTS; i++) {
@@ -255,7 +255,7 @@ int CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter, void *ioBuffer)
if (Adapter->bShutStatus == TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC UP IN SHUTDOWN..Device WakeUp\n");
- if (Adapter->bTriedToWakeUpFromlowPowerMode == FALSE) {
+ if (Adapter->bTriedToWakeUpFromlowPowerMode == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Waking up for the First Time..\n");
Adapter->usIdleModePattern = ABORT_SHUTDOWN_MODE; /* change it to 1 for current support. */
Adapter->bWakeUpDevice = TRUE;
@@ -346,7 +346,7 @@ int CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter, void *ioBuffer)
pktlen = pLeader->PLength;
Status = StoreCmControlResponseMessage(Adapter, pucAddIndication, &pktlen);
if (Status != 1) {
- ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication_alt *)pucAddIndication)->u16TID, FALSE);
+ ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication_alt *)pucAddIndication)->u16TID, false);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, " Error Restoring The DSX Control Packet. Dsx Buffers on Target may not be Setup Properly ");
return STATUS_FAILURE;
}
@@ -499,7 +499,7 @@ void LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuff
Adapter->bETHCSEnabled = *(pucBuffer+4) & ETH_CS_MASK;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHS Support Status Received In LinkUp Ack : %x\n", Adapter->bPHSEnabled);
- if ((FALSE == Adapter->bShutStatus) && (FALSE == Adapter->IdleMode)) {
+ if ((false == Adapter->bShutStatus) && (false == Adapter->IdleMode)) {
if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = NORMAL_OPERATION;
wake_up(&Adapter->LEDInfo.notify_led_event);
@@ -517,8 +517,8 @@ void LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuff
Adapter->LinkUpStatus = 0;
Adapter->LinkStatus = 0;
Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
- Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
- Adapter->IdleMode = FALSE;
+ Adapter->bTriedToWakeUpFromlowPowerMode = false;
+ Adapter->IdleMode = false;
beceem_protocol_reset(Adapter);
break;
@@ -578,7 +578,7 @@ void SendIdleModeResponse(struct bcm_mini_adapter *Adapter)
stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "HOST IS NACKING Idle mode To F/W!!!!!!!!");
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Adapter->bPreparingForLowPowerMode = false;
} else {
stIdleResponse.szData[1] = TARGET_CAN_GO_TO_IDLE_MODE; /* 2; Idle ACK */
Adapter->StatisticsPointer = 0;
@@ -613,7 +613,7 @@ void SendIdleModeResponse(struct bcm_mini_adapter *Adapter)
if (Adapter->bDoSuspend == TRUE)
Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
} else {
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Adapter->bPreparingForLowPowerMode = false;
}
if (!NVMAccess)
@@ -626,7 +626,7 @@ void SendIdleModeResponse(struct bcm_mini_adapter *Adapter)
status = CopyBufferToControlPacket(Adapter, &stIdleResponse);
if ((status != STATUS_SUCCESS)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "fail to send the Idle mode Request\n");
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Adapter->bPreparingForLowPowerMode = false;
StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
}
do_gettimeofday(&tv);
@@ -651,8 +651,8 @@ void DumpPackInfo(struct bcm_mini_adapter *Adapter)
for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "*********** Showing Details Of Queue %d***** ******", uiLoopIndex);
- if (FALSE == Adapter->PackInfo[uiLoopIndex].bValid) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid is FALSE for %X index\n", uiLoopIndex);
+ if (false == Adapter->PackInfo[uiLoopIndex].bValid) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid is false for %X index\n", uiLoopIndex);
continue;
}
@@ -783,7 +783,7 @@ int reset_card_proc(struct bcm_mini_adapter *ps_adapter)
int bytes;
psIntfAdapter = ((struct bcm_interface_adapter *)(ps_adapter->pvInterfaceAdapter));
- ps_adapter->bDDRInitDone = FALSE;
+ ps_adapter->bDDRInitDone = false;
if (ps_adapter->chip_id >= T3LPB) {
/* SYS_CFG register is write protected hence for modifying this reg value, it should be read twice before */
@@ -803,7 +803,7 @@ int reset_card_proc(struct bcm_mini_adapter *ps_adapter)
if (ps_adapter->chip_id >= T3LPB) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Resetting UMA-B\n");
retval = usb_reset_device(psIntfAdapter->udev);
- psIntfAdapter->psAdapter->StopAllXaction = FALSE;
+ psIntfAdapter->psAdapter->StopAllXaction = false;
if (retval != STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reset failed with ret value :%d", retval);
@@ -888,7 +888,7 @@ int reset_card_proc(struct bcm_mini_adapter *ps_adapter)
wrmalt(ps_adapter, 0x0f01186c, &uiResetValue, sizeof(uiResetValue));
err_exit:
- psIntfAdapter->psAdapter->StopAllXaction = FALSE;
+ psIntfAdapter->psAdapter->StopAllXaction = false;
return retval;
}
@@ -968,7 +968,7 @@ int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter)
return -EIO;
}
- if (FALSE == ps_adapter->AutoFirmDld) {
+ if (false == ps_adapter->AutoFirmDld) {
BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoFirmDld Disabled in CFG File..\n");
/* If Auto f/w download is disable, register the control interface, */
/* register the control interface after the mailbox. */
@@ -1094,7 +1094,7 @@ void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter)
if (ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE) {
pr_info(DRV_NAME ": AutoSyncup is Disabled\n");
- Adapter->AutoSyncup = FALSE;
+ Adapter->AutoSyncup = false;
} else {
pr_info(DRV_NAME ": AutoSyncup is Enabled\n");
Adapter->AutoSyncup = TRUE;
@@ -1105,7 +1105,7 @@ void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter)
Adapter->AutoLinkUp = TRUE;
} else {
pr_info(DRV_NAME ": Disabling autolink up");
- Adapter->AutoLinkUp = FALSE;
+ Adapter->AutoLinkUp = false;
}
/* Setting the DDR Setting.. */
Adapter->DDRSetting = (ntohl(Adapter->pstargetparams->HostDrvrConfig6) >> 8)&0x0F;
@@ -1117,7 +1117,7 @@ void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter)
Adapter->AutoFirmDld = TRUE;
} else {
pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = FALSE;
+ Adapter->AutoFirmDld = false;
}
uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6);
Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01;
@@ -1155,21 +1155,21 @@ static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter)
if (reporting_mode == TRUE) {
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "can't do suspen/resume as reporting mode is enable");
- psAdapter->bDoSuspend = FALSE;
+ psAdapter->bDoSuspend = false;
}
if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB)) {
/* If reporting mode is enable, switch PMU to PMC */
{
psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING;
- psAdapter->bDoSuspend = FALSE;
+ psAdapter->bDoSuspend = false;
}
/* clearing space bit[15..12] */
psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl((0xF << 12)));
/* placing the power save mode option */
psAdapter->pstargetparams->HostDrvrConfig6 |= htonl((psAdapter->ulPowerSaveMode << 12));
- } else if (psAdapter->bIsAutoCorrectEnabled == FALSE) {
+ } else if (psAdapter->bIsAutoCorrectEnabled == false) {
/* remove the autocorrect disable bit set before dumping. */
psAdapter->ulPowerSaveMode &= ~(1 << 3);
psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl(1 << 15));
@@ -1302,8 +1302,8 @@ static void HandleShutDownModeWakeup(struct bcm_mini_adapter *Adapter)
wake_up(&Adapter->LEDInfo.notify_led_event);
}
- Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
- Adapter->bShutStatus = FALSE;
+ Adapter->bTriedToWakeUpFromlowPowerMode = false;
+ Adapter->bShutStatus = false;
wake_up(&Adapter->lowpower_mode_wait_queue);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
}
@@ -1341,7 +1341,7 @@ static void SendShutModeResponse(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Device Access is going on NACK the Shut Down MODE\n");
stShutdownResponse.szData[2] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Adapter->bPreparingForLowPowerMode = false;
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Sending SHUTDOWN MODE ACK\n");
stShutdownResponse.szData[2] = SHUTDOWN_ACK_FROM_DRIVER; /* ShutDown ACK */
@@ -1374,7 +1374,7 @@ static void SendShutModeResponse(struct bcm_mini_adapter *Adapter)
if (Adapter->bDoSuspend == TRUE)
Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
} else {
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Adapter->bPreparingForLowPowerMode = false;
}
if (!NVMAccess)
@@ -1387,7 +1387,7 @@ static void SendShutModeResponse(struct bcm_mini_adapter *Adapter)
Status = CopyBufferToControlPacket(Adapter, &stShutdownResponse);
if ((Status != STATUS_SUCCESS)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "fail to send the Idle mode Request\n");
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Adapter->bPreparingForLowPowerMode = false;
StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
}
}
@@ -1430,11 +1430,11 @@ void ResetCounters(struct bcm_mini_adapter *Adapter)
Adapter->LinkStatus = 0;
atomic_set(&Adapter->cntrlpktCnt, 0);
atomic_set(&Adapter->TotalPacketCount, 0);
- Adapter->fw_download_done = FALSE;
+ Adapter->fw_download_done = false;
Adapter->LinkStatus = 0;
- Adapter->AutoLinkUp = FALSE;
- Adapter->IdleMode = FALSE;
- Adapter->bShutStatus = FALSE;
+ Adapter->AutoLinkUp = false;
+ Adapter->IdleMode = false;
+ Adapter->bShutStatus = false;
}
struct bcm_classifier_rule *GetFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIP)
@@ -1521,7 +1521,7 @@ void update_per_sf_desc_cnts(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid VCID : %x\n", Adapter->PackInfo[iIndex].usVCID_Value);
}
}
- atomic_set(&Adapter->uiMBupdate, FALSE);
+ atomic_set(&Adapter->uiMBupdate, false);
}
void flush_queue(struct bcm_mini_adapter *Adapter, unsigned int iQIndex)
@@ -1557,8 +1557,8 @@ static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter)
netif_carrier_off(Adapter->dev);
netif_stop_queue(Adapter->dev);
- Adapter->IdleMode = FALSE;
- Adapter->LinkUpStatus = FALSE;
+ Adapter->IdleMode = false;
+ Adapter->LinkUpStatus = false;
ClearTargetDSXBuffer(Adapter, 0, TRUE);
/* Delete All Classifier Rules */
@@ -1568,7 +1568,7 @@ static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter)
flush_all_queues(Adapter);
if (Adapter->TimerActive == TRUE)
- Adapter->TimerActive = FALSE;
+ Adapter->TimerActive = false;
memset(Adapter->astFragmentedPktClassifierTable, 0, sizeof(struct bcm_fragmented_packet_info) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index af5d22faa7f..892ebc65cdd 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -8,9 +8,9 @@ static UINT CreateClassifierPHSRule(B_UINT16 uiClsId, struct bcm_phs_classifier
static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId, struct bcm_phs_classifier_entry *pstClassifierEntry, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
-static BOOLEAN ValidatePHSRuleComplete(struct bcm_phs_rule *psPhsRule);
+static bool ValidatePHSRuleComplete(struct bcm_phs_rule *psPhsRule);
-static BOOLEAN DerefPhsRule(B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule);
+static bool DerefPhsRule(B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule);
static UINT GetClassifierEntry(struct bcm_phs_classifier_table *pstClassifierTable, B_UINT32 uiClsid, enum bcm_phs_classifier_context eClsContext, struct bcm_phs_classifier_entry **ppstClassifierEntry);
@@ -67,7 +67,7 @@ int PHSTransmit(struct bcm_mini_adapter *Adapter,
struct sk_buff **pPacket,
USHORT Vcid,
B_UINT16 uiClassifierRuleID,
- BOOLEAN bHeaderSuppressionEnabled,
+ bool bHeaderSuppressionEnabled,
UINT *PacketLen,
UCHAR bEthCSSupport)
{
@@ -81,7 +81,7 @@ int PHSTransmit(struct bcm_mini_adapter *Adapter,
PUCHAR pucPHSPktHdrOutBuf = Adapter->stPhsTxContextInfo.ucaHdrSuppressionOutBuf;
UINT usPacketType;
UINT BytesToRemove = 0;
- BOOLEAN bPHSI = 0;
+ bool bPHSI = 0;
LONG ulPhsStatus = 0;
UINT numBytesCompressed = 0;
struct sk_buff *newPacket = NULL;
@@ -569,7 +569,7 @@ ULONG PhsDeleteSFRules(IN void *pvContext, IN B_UINT16 uiVcid)
memset(&pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex], 0, sizeof(struct bcm_phs_classifier_entry));
}
}
- pstServiceFlowEntry->bUsed = FALSE;
+ pstServiceFlowEntry->bUsed = false;
pstServiceFlowEntry->uiVcid = 0;
}
@@ -596,7 +596,7 @@ ULONG PhsDeleteSFRules(IN void *pvContext, IN B_UINT16 uiVcid)
* 0 if successful,
* >0 Error.
*/
-ULONG PhsCompress(IN void *pvContext,
+static ULONG PhsCompress(IN void *pvContext,
IN B_UINT16 uiVcid,
IN B_UINT16 uiClsId,
IN void *pvInputBuffer,
@@ -677,7 +677,7 @@ ULONG PhsCompress(IN void *pvContext,
* 0 if successful,
* >0 Error.
*/
-ULONG PhsDeCompress(IN void *pvContext,
+static ULONG PhsDeCompress(IN void *pvContext,
IN B_UINT16 uiVcid,
IN void *pvInputBuffer,
OUT void *pvOutputBuffer,
@@ -788,26 +788,26 @@ static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesT
psServiceFlowRulesTable = NULL;
}
-static BOOLEAN ValidatePHSRuleComplete(IN struct bcm_phs_rule *psPhsRule)
+static bool ValidatePHSRuleComplete(IN struct bcm_phs_rule *psPhsRule)
{
if (psPhsRule) {
if (!psPhsRule->u8PHSI) {
/* PHSI is not valid */
- return FALSE;
+ return false;
}
if (!psPhsRule->u8PHSS) {
/* PHSS Is Undefined */
- return FALSE;
+ return false;
}
/* Check if PHSF is defines for the PHS Rule */
if (!psPhsRule->u8PHSFLength) /* If any part of PHSF is valid then Rule contains valid PHSF */
- return FALSE;
+ return false;
return TRUE;
} else
- return FALSE;
+ return false;
}
UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable,
@@ -829,7 +829,7 @@ UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable,
return PHS_INVALID_TABLE_INDEX;
}
-UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
+static UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
IN B_UINT32 uiClsid, enum bcm_phs_classifier_context eClsContext,
OUT struct bcm_phs_classifier_entry **ppstClassifierEntry)
{
@@ -880,7 +880,7 @@ static UINT GetPhsRuleEntry(IN struct bcm_phs_classifier_table *pstClassifierTab
return PHS_INVALID_TABLE_INDEX;
}
-UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid, IN B_UINT16 uiClsId,
+static UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid, IN B_UINT16 uiClsId,
IN struct bcm_phs_table *psServiceFlowTable,
struct bcm_phs_rule *psPhsRule,
B_UINT8 u8AssociatedPHSI)
@@ -888,7 +888,7 @@ UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid, IN B_UINT16 uiClsId,
struct bcm_phs_classifier_table *psaClassifiertable = NULL;
UINT uiStatus = 0;
int iSfIndex;
- BOOLEAN bFreeEntryFound = FALSE;
+ bool bFreeEntryFound = false;
/* Check for a free entry in SFID table */
for (iSfIndex = 0; iSfIndex < MAX_SERVICEFLOWS; iSfIndex++) {
@@ -913,7 +913,7 @@ UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid, IN B_UINT16 uiClsId,
return uiStatus;
}
-UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
+static UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
IN B_UINT16 uiClsId,
IN struct bcm_phs_entry *pstServiceFlowEntry,
struct bcm_phs_rule *psPhsRule,
@@ -1009,7 +1009,7 @@ static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
B_UINT8 u8AssociatedPHSI)
{
UINT iClassifierIndex = 0;
- BOOLEAN bFreeEntryFound = FALSE;
+ bool bFreeEntryFound = false;
struct bcm_phs_classifier_entry *psClassifierRules = NULL;
UINT nStatus = PHS_SUCCESS;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -1102,7 +1102,7 @@ static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
{
struct bcm_phs_rule *pstAddPhsRule = NULL;
UINT nPhsRuleIndex = 0;
- BOOLEAN bPHSRuleOrphaned = FALSE;
+ bool bPHSRuleOrphaned = false;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
psPhsRule->u8RefCnt = 0;
@@ -1124,7 +1124,7 @@ static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
}
/* Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for uiClsId */
- if (FALSE == bPHSRuleOrphaned) {
+ if (false == bPHSRuleOrphaned) {
pstClassifierEntry->pstPhsRule = kmalloc(sizeof(struct bcm_phs_rule), GFP_KERNEL);
if (NULL == pstClassifierEntry->pstPhsRule)
@@ -1150,10 +1150,10 @@ static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
return PHS_SUCCESS;
}
-static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule)
+static bool DerefPhsRule(IN B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule)
{
if (pstPhsRule == NULL)
- return FALSE;
+ return false;
if (pstPhsRule->u8RefCnt)
pstPhsRule->u8RefCnt--;
@@ -1166,7 +1166,7 @@ static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId, struct bcm_phs_classifier_tabl
*/
return TRUE;
} else
- return FALSE;
+ return false;
}
void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension)
@@ -1239,7 +1239,7 @@ void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension)
* header.
* 0 -If PHS rule is NULL.If PHSI is 0 indicateing packet as uncompressed.
*/
-int phs_decompress(unsigned char *in_buf,
+static int phs_decompress(unsigned char *in_buf,
unsigned char *out_buf,
struct bcm_phs_rule *decomp_phs_rules,
UINT *header_size)
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
index 82d86828418..d697f9c860c 100644
--- a/drivers/staging/bcm/PHSModule.h
+++ b/drivers/staging/bcm/PHSModule.h
@@ -5,7 +5,7 @@ int PHSTransmit(struct bcm_mini_adapter *Adapter,
struct sk_buff **pPacket,
USHORT Vcid,
B_UINT16 uiClassifierRuleID,
- BOOLEAN bHeaderSuppressionEnabled,
+ bool bHeaderSuppressionEnabled,
PUINT PacketLen,
UCHAR bEthCSSupport);
@@ -39,7 +39,7 @@ ULONG PhsDeleteClassifierRule(void* pvContext, B_UINT16 uiVcid ,B_UINT16 uiClsI
ULONG PhsDeleteSFRules(void* pvContext,B_UINT16 uiVcid) ;
-BOOLEAN ValidatePHSRule(struct bcm_phs_rule *psPhsRule);
+bool ValidatePHSRule(struct bcm_phs_rule *psPhsRule);
UINT GetServiceFlowEntry(struct bcm_phs_table *psServiceFlowTable,B_UINT16 uiVcid, struct bcm_phs_entry **ppstServiceFlowEntry);
diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h
index 2a673b125f0..fb53a00591e 100644
--- a/drivers/staging/bcm/Prototypes.h
+++ b/drivers/staging/bcm/Prototypes.h
@@ -111,7 +111,7 @@ void update_per_cid_rx (struct bcm_mini_adapter *Adapter);
void update_per_sf_desc_cnts( struct bcm_mini_adapter *Adapter);
-void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter,B_UINT16 TID,BOOLEAN bFreeAll);
+void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter,B_UINT16 TID,bool bFreeAll);
void flush_queue(struct bcm_mini_adapter *Adapter, UINT iQIndex);
@@ -138,7 +138,7 @@ INT BeceemEEPROMBulkWrite(
PUCHAR pBuffer,
UINT uiOffset,
UINT uiNumBytes,
- BOOLEAN bVerify);
+ bool bVerify);
INT ReadBeceemEEPROM(struct bcm_mini_adapter *Adapter,UINT dwAddress, UINT *pdwData);
@@ -155,13 +155,13 @@ INT BeceemNVMWrite(
PUINT pBuffer,
UINT uiOffset,
UINT uiNumBytes,
- BOOLEAN bVerify);
+ bool bVerify);
INT BcmInitNVM(struct bcm_mini_adapter *Adapter);
INT BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter,UINT uiSectorSize);
-BOOLEAN IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section);
+bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section);
INT BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap);
@@ -198,7 +198,7 @@ INT BcmCopySection(struct bcm_mini_adapter *Adapter,
UINT numOfBytes);
-BOOLEAN IsNonCDLessDevice(struct bcm_mini_adapter *Adapter);
+bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter);
VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter,PUINT puiBuffer);
@@ -212,7 +212,7 @@ INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer
VOID putUsbSuspend(struct work_struct *work);
-BOOLEAN IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios);
+bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios);
#endif
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 2d4a77cca91..1609a2bdc52 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -5,7 +5,7 @@ This file contains the routines related to Quality of Service.
#include "headers.h"
static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter, PVOID pvEthPayload, struct bcm_eth_packet_info *pstEthCsPktInfo);
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo, struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
+static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo, struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, struct iphdr *iphd,
struct bcm_classifier_rule *pstClassifierRule);
@@ -24,7 +24,7 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
+bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
{
UCHAR ucLoopIndex = 0;
@@ -43,7 +43,7 @@ BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG u
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Not Matched");
- return FALSE;
+ return false;
}
@@ -58,7 +58,7 @@ BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG u
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
+bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
{
UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -77,7 +77,7 @@ BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address Not Matched");
- return FALSE;
+ return false;
}
@@ -91,7 +91,7 @@ BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG
*
* Returns - TRUE(If address matches) else FAIL.
**************************************************************************/
-BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
+bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -103,7 +103,7 @@ BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfSe
return TRUE;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Type Of Service Not Matched");
- return FALSE;
+ return false;
}
@@ -132,7 +132,7 @@ bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucProtoc
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Not Matched");
- return FALSE;
+ return false;
}
@@ -164,7 +164,7 @@ bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPo
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port: %x Not Matched ", ushSrcPort);
- return FALSE;
+ return false;
}
@@ -197,7 +197,7 @@ bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushDest
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dest Port: %x Not Matched", ushDestPort);
- return FALSE;
+ return false;
}
/**
@ingroup tx_functions
@@ -209,7 +209,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
struct bcm_classifier_rule *pstClassifierRule)
{
struct bcm_transport_header *xprt_hdr = NULL;
- BOOLEAN bClassificationSucceed = FALSE;
+ bool bClassificationSucceed = false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "========>");
@@ -223,7 +223,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
//Checking classifier validity
if (!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
{
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
break;
}
@@ -233,17 +233,17 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
//**************Checking IP header parameter**************************//
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
- if (FALSE == (bClassificationSucceed =
+ if (false == (bClassificationSucceed =
MatchSrcIpAddress(pstClassifierRule, iphd->saddr)))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
- if (FALSE == (bClassificationSucceed =
+ if (false == (bClassificationSucceed =
MatchDestIpAddress(pstClassifierRule, iphd->daddr)))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
- if (FALSE == (bClassificationSucceed =
+ if (false == (bClassificationSucceed =
MatchTos(pstClassifierRule, iphd->tos)))
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
@@ -251,7 +251,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
- if (FALSE == (bClassificationSucceed =
+ if (false == (bClassificationSucceed =
MatchProtocol(pstClassifierRule, iphd->protocol)))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
@@ -263,7 +263,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
(iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
- if (FALSE == (bClassificationSucceed =
+ if (false == (bClassificationSucceed =
MatchSrcPort(pstClassifierRule,
ntohs((iphd->protocol == UDP) ?
xprt_hdr->uhdr.source : xprt_hdr->thdr.source))))
@@ -273,7 +273,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
(iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
xprt_hdr->thdr.dest);
- if (FALSE == (bClassificationSucceed =
+ if (false == (bClassificationSucceed =
MatchDestPort(pstClassifierRule,
ntohs((iphd->protocol == UDP) ?
xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest))))
@@ -286,13 +286,13 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
iMatchedSFQueueIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
if (iMatchedSFQueueIndex >= NO_OF_QUEUES)
{
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
}
else
{
- if (FALSE == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
+ if (false == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
{
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
}
}
}
@@ -451,7 +451,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
struct iphdr *pIpHeader = NULL;
INT uiSfIndex = 0;
USHORT usIndex = Adapter->usBestEffortQueueIndex;
- BOOLEAN bFragmentedPkt = FALSE, bClassificationSucceed = FALSE;
+ bool bFragmentedPkt = false, bClassificationSucceed = false;
USHORT usCurrFragment = 0;
struct bcm_tcp_header *pTcpHeader;
@@ -529,16 +529,16 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
//to classify the packet until match found
do
{
- if (FALSE == Adapter->astClassifierTable[uiLoopIndex].bUsed)
+ if (false == Adapter->astClassifierTable[uiLoopIndex].bUsed)
{
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
break;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Adapter->PackInfo[%d].bvalid=True\n", uiLoopIndex);
if (0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection)
{
- bClassificationSucceed = FALSE;//cannot be processed for classification.
+ bClassificationSucceed = false;//cannot be processed for classification.
break; // it is a down link connection
}
@@ -556,7 +556,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
if (eEthUnsupportedFrame == stEthCsPktInfo.eNwpktEthFrameType)
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame\n");
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
break;
}
@@ -577,7 +577,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
if (eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType)
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF\n");
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
break;
}
}
@@ -590,7 +590,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
if (stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket)
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet is Not an IP Packet\n");
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
break;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dump IP Header :\n");
@@ -636,7 +636,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
stFragPktInfo.ulSrcIpAddress = pIpHeader->saddr;
stFragPktInfo.usIpIdentification = pIpHeader->id;
stFragPktInfo.pstMatchedClassifierEntry = pstClassifierRule;
- stFragPktInfo.bOutOfOrderFragment = FALSE;
+ stFragPktInfo.bOutOfOrderFragment = false;
AddFragIPClsEntry(Adapter, &stFragPktInfo);
}
@@ -649,7 +649,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
return INVALID_QUEUE_INDEX;
}
-static BOOLEAN EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
+static bool EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
{
UINT i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -661,12 +661,12 @@ static BOOLEAN EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifier
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", i, Mac[i], pstClassifierRule->au8EThCSSrcMAC[i], pstClassifierRule->au8EThCSSrcMACMask[i]);
if ((pstClassifierRule->au8EThCSSrcMAC[i] & pstClassifierRule->au8EThCSSrcMACMask[i]) !=
(Mac[i] & pstClassifierRule->au8EThCSSrcMACMask[i]))
- return FALSE;
+ return false;
}
return TRUE;
}
-static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
+static bool EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
{
UINT i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -678,12 +678,12 @@ static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifie
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", i, Mac[i], pstClassifierRule->au8EThCSDestMAC[i], pstClassifierRule->au8EThCSDestMACMask[i]);
if ((pstClassifierRule->au8EThCSDestMAC[i] & pstClassifierRule->au8EThCSDestMACMask[i]) !=
(Mac[i] & pstClassifierRule->au8EThCSDestMACMask[i]))
- return FALSE;
+ return false;
}
return TRUE;
}
-static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
+static bool EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
if ((pstClassifierRule->ucEtherTypeLen == 0) ||
@@ -698,29 +698,29 @@ static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRul
if (memcmp(&pstEthCsPktInfo->usEtherType, &pstClassifierRule->au8EthCSEtherType[1], 2) == 0)
return TRUE;
else
- return FALSE;
+ return false;
}
if (pstClassifierRule->au8EthCSEtherType[0] == 2)
{
if (eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
- return FALSE;
+ return false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s EthCS DSAP:%x EtherType[2]:%x\n", __FUNCTION__, pstEthCsPktInfo->ucDSAP, pstClassifierRule->au8EthCSEtherType[2]);
if (pstEthCsPktInfo->ucDSAP == pstClassifierRule->au8EthCSEtherType[2])
return TRUE;
else
- return FALSE;
+ return false;
}
- return FALSE;
+ return false;
}
-static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
+static bool EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
- BOOLEAN bClassificationSucceed = FALSE;
+ bool bClassificationSucceed = false;
USHORT usVLANID;
B_UINT8 uPriority = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -731,7 +731,7 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
if (pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID))
{
if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
- return FALSE;
+ return false;
uPriority = (ntohs(*(USHORT *)(skb->data + sizeof(struct bcm_eth_header))) & 0xF000) >> 13;
@@ -739,17 +739,17 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
bClassificationSucceed = TRUE;
if (!bClassificationSucceed)
- return FALSE;
+ return false;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS 802.1 D User Priority Rule Matched\n");
- bClassificationSucceed = FALSE;
+ bClassificationSucceed = false;
if (pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_VLANID_VALID))
{
if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
- return FALSE;
+ return false;
usVLANID = ntohs(*(USHORT *)(skb->data + sizeof(struct bcm_eth_header))) & 0xFFF;
@@ -759,7 +759,7 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
bClassificationSucceed = TRUE;
if (!bClassificationSucceed)
- return FALSE;
+ return false;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS 802.1 Q VLAN ID Rule Matched\n");
@@ -768,26 +768,26 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
}
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb,
+static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb,
struct bcm_eth_packet_info *pstEthCsPktInfo,
struct bcm_classifier_rule *pstClassifierRule,
B_UINT8 EthCSCupport)
{
- BOOLEAN bClassificationSucceed = FALSE;
+ bool bClassificationSucceed = false;
bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule, ((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
if (!bClassificationSucceed)
- return FALSE;
+ return false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS SrcMAC Matched\n");
bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule, ((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
if (!bClassificationSucceed)
- return FALSE;
+ return false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS DestMAC Matched\n");
//classify on ETHType/802.2SAP TLV
bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule, skb, pstEthCsPktInfo);
if (!bClassificationSucceed)
- return FALSE;
+ return false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS EthType/802.2SAP Matched\n");
@@ -795,7 +795,7 @@ static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff
bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule, skb, pstEthCsPktInfo);
if (!bClassificationSucceed)
- return FALSE;
+ return false;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS 802.1 VLAN Rules Matched\n");
return bClassificationSucceed;
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
index f8dc3e20b47..2ed4836b965 100644
--- a/drivers/staging/bcm/Transmit.c
+++ b/drivers/staging/bcm/Transmit.c
@@ -84,7 +84,7 @@ int SendControlPacket(struct bcm_mini_adapter *Adapter, char *pControlPacket)
int SetupNextSend(struct bcm_mini_adapter *Adapter, struct sk_buff *Packet, USHORT Vcid)
{
int status = 0;
- BOOLEAN bHeaderSupressionEnabled = FALSE;
+ bool bHeaderSupressionEnabled = false;
B_UINT16 uiClassifierRuleID;
u16 QueueIndex = skb_get_queue_mapping(Packet);
struct bcm_leader Leader = {0};
@@ -204,7 +204,7 @@ int tx_pkt_handler(struct bcm_mini_adapter *Adapter /**< pointer to adapter obje
/* Check end point for halt/stall. */
if (Adapter->bEndPointHalted == TRUE) {
Bcm_clear_halt_of_endpoints(Adapter);
- Adapter->bEndPointHalted = FALSE;
+ Adapter->bEndPointHalted = false;
StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
}
diff --git a/drivers/staging/bcm/Typedefs.h b/drivers/staging/bcm/Typedefs.h
index a985abf194f..832adcfd1e3 100644
--- a/drivers/staging/bcm/Typedefs.h
+++ b/drivers/staging/bcm/Typedefs.h
@@ -6,10 +6,10 @@
#define STATUS_SUCCESS 0
#define STATUS_FAILURE -1
-#define FALSE 0
+
#define TRUE 1
-typedef char BOOLEAN;
+
typedef char CHAR;
typedef int INT;
typedef short SHORT;
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 05a948a3698..eee4f4795a7 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -13,12 +13,12 @@ static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
return u16CheckSum;
}
-BOOLEAN IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios)
+bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios)
{
INT Status;
Status = (Adapter->gpioBitMap & gpios) ^ gpios;
if (Status)
- return FALSE;
+ return false;
else
return TRUE;
}
@@ -27,7 +27,7 @@ static INT LED_Blink(struct bcm_mini_adapter *Adapter, UINT GPIO_Num, UCHAR uiLe
ULONG timeout, INT num_of_time, enum bcm_led_events currdriverstate)
{
int Status = STATUS_SUCCESS;
- BOOLEAN bInfinite = FALSE;
+ bool bInfinite = false;
/* Check if num_of_time is -ve. If yes, blink led in infinite loop */
if (num_of_time < 0) {
@@ -67,7 +67,7 @@ static INT LED_Blink(struct bcm_mini_adapter *Adapter, UINT GPIO_Num, UCHAR uiLe
currdriverstate != Adapter->DriverState ||
kthread_should_stop(),
msecs_to_jiffies(timeout));
- if (bInfinite == FALSE)
+ if (bInfinite == false)
num_of_time--;
}
return Status;
@@ -108,7 +108,7 @@ static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter, UCHAR GPIO_N
int Status = STATUS_SUCCESS;
INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
UINT remDelay = 0;
- BOOLEAN bBlinkBothLED = TRUE;
+ bool bBlinkBothLED = TRUE;
/* UINT GPIO_num = DISABLE_GPIO_NUM; */
ulong timeout = 0;
@@ -120,7 +120,7 @@ static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter, UCHAR GPIO_N
num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
- while ((Adapter->device_removed == FALSE)) {
+ while ((Adapter->device_removed == false)) {
timeout = 50;
/*
* Blink Tx and Rx LED when both Tx and Rx is
@@ -478,7 +478,7 @@ static int ReadLEDInformationFromEEPROM(struct bcm_mini_adapter *Adapter,
static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter,
- BOOLEAN *bEnableThread)
+ bool *bEnableThread)
{
int Status = STATUS_SUCCESS;
/* Array to store GPIO numbers from EEPROM */
@@ -499,10 +499,10 @@ static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter,
/* Read the GPIO numbers from EEPROM */
Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
- *bEnableThread = FALSE;
+ *bEnableThread = false;
return STATUS_SUCCESS;
} else if (Status) {
- *bEnableThread = FALSE;
+ *bEnableThread = false;
return Status;
}
@@ -561,7 +561,7 @@ static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter,
uiNum_of_LED_Type++;
}
if (uiNum_of_LED_Type >= NUM_OF_LEDS)
- *bEnableThread = FALSE;
+ *bEnableThread = false;
return Status;
}
@@ -602,7 +602,7 @@ static VOID LedGpioInit(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "LED Thread: WRM Failed\n");
- Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = false;
}
static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter, UCHAR *GPIO_num_tx,
@@ -660,7 +660,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
UCHAR dummyIndex = 0;
/* currdriverstate = Adapter->DriverState; */
- Adapter->LEDInfo.bIdleMode_tx_from_host = FALSE;
+ Adapter->LEDInfo.bIdleMode_tx_from_host = false;
/*
* Wait till event is triggered
@@ -698,7 +698,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
if (GPIO_num != DISABLE_GPIO_NUM)
TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
- if (Adapter->LEDInfo.bLedInitDone == FALSE) {
+ if (Adapter->LEDInfo.bLedInitDone == false) {
LedGpioInit(Adapter);
Adapter->LEDInfo.bLedInitDone = TRUE;
}
@@ -757,7 +757,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
UCHAR uiLEDTx = 0;
UCHAR uiLEDRx = 0;
currdriverstate = NORMAL_OPERATION;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = false;
BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx,
&GPIO_num_rx, &uiLEDTx, &uiLEDRx,
@@ -803,7 +803,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
}
/* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
- Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bLedInitDone = false;
Adapter->LEDInfo.bIdle_led_off = TRUE;
wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
GPIO_num = DISABLE_GPIO_NUM;
@@ -830,7 +830,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
currdriverstate = LED_THREAD_INACTIVE;
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_RUNNING_INACTIVELY;
- Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bLedInitDone = false;
/* disable ALL LED */
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
@@ -841,7 +841,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
case LED_THREAD_ACTIVE:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "Activating LED thread again...");
- if (Adapter->LinkUpStatus == FALSE)
+ if (Adapter->LinkUpStatus == false)
Adapter->DriverState = NO_NETWORK_ENTRY;
else
Adapter->DriverState = NORMAL_OPERATION;
@@ -860,7 +860,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
int InitLedSettings(struct bcm_mini_adapter *Adapter)
{
int Status = STATUS_SUCCESS;
- BOOLEAN bEnableThread = TRUE;
+ bool bEnableThread = TRUE;
UCHAR uiIndex = 0;
/*
@@ -899,7 +899,7 @@ int InitLedSettings(struct bcm_mini_adapter *Adapter)
init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_RUNNING_ACTIVELY;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = false;
Adapter->LEDInfo.led_cntrl_threadid =
kthread_run((int (*)(void *)) LEDControlThread,
Adapter, "led_control_thread");
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 91a5715964b..9e5f955a1a0 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -45,7 +45,7 @@ static int BeceemFlashBulkWrite(
PUINT pBuffer,
unsigned int uiOffset,
unsigned int uiNumBytes,
- BOOLEAN bVerify);
+ bool bVerify);
static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter);
@@ -103,7 +103,7 @@ static UCHAR ReadEEPROMStatusRegister(struct bcm_mini_adapter *Adapter)
}
if (!(dwRetries%RETRIES_PER_DELAY))
udelay(1000);
- uiStatus = 0 ;
+ uiStatus = 0;
}
return uiData;
} /* ReadEEPROMStatusRegister */
@@ -1034,7 +1034,7 @@ static int BeceemFlashBulkWrite(struct bcm_mini_adapter *Adapter,
PUINT pBuffer,
unsigned int uiOffset,
unsigned int uiNumBytes,
- BOOLEAN bVerify)
+ bool bVerify)
{
PCHAR pTempBuff = NULL;
PUCHAR pcBuffer = (PUCHAR)pBuffer;
@@ -1084,18 +1084,18 @@ static int BeceemFlashBulkWrite(struct bcm_mini_adapter *Adapter,
* for DSD calibration, allow it without checking of sector permission
*/
- if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == FALSE)) {
+ if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) {
index = 0;
uiTemp = uiNumSectTobeRead;
while (uiTemp) {
- if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == FALSE) {
+ if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%X> is not writable",
(uiOffsetFromSectStart + index * Adapter->uiSectorSize));
Status = SECTOR_IS_NOT_WRITABLE;
goto BeceemFlashBulkWrite_EXIT;
}
uiTemp = uiTemp - 1;
- index = index + 1 ;
+ index = index + 1;
}
}
Adapter->SelectedChip = RESET_CHIP_SELECT;
@@ -1222,7 +1222,7 @@ static int BeceemFlashBulkWriteStatus(struct bcm_mini_adapter *Adapter,
PUINT pBuffer,
unsigned int uiOffset,
unsigned int uiNumBytes,
- BOOLEAN bVerify)
+ bool bVerify)
{
PCHAR pTempBuff = NULL;
PUCHAR pcBuffer = (PUCHAR)pBuffer;
@@ -1265,18 +1265,18 @@ static int BeceemFlashBulkWriteStatus(struct bcm_mini_adapter *Adapter,
uiNumSectTobeRead++;
}
- if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == FALSE)) {
+ if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) {
index = 0;
uiTemp = uiNumSectTobeRead;
while (uiTemp) {
- if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == FALSE) {
+ if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%x> is not writable",
(uiOffsetFromSectStart + index * Adapter->uiSectorSize));
Status = SECTOR_IS_NOT_WRITABLE;
goto BeceemFlashBulkWriteStatus_EXIT;
}
uiTemp = uiTemp - 1;
- index = index + 1 ;
+ index = index + 1;
}
}
@@ -1525,7 +1525,7 @@ static int BeceemEEPROMReadBackandVerify(struct bcm_mini_adapter *Adapter,
if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE)) {
/* re-write */
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, MAX_RW_SIZE, FALSE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, MAX_RW_SIZE, false);
mdelay(3);
BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE);
@@ -1539,7 +1539,7 @@ static int BeceemEEPROMReadBackandVerify(struct bcm_mini_adapter *Adapter,
BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4);
if (uiData != pBuffer[uiIndex]) {
/* re-write */
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, 4, FALSE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, 4, false);
mdelay(3);
BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4);
if (uiData != pBuffer[uiIndex])
@@ -1724,7 +1724,7 @@ int BeceemEEPROMBulkWrite(struct bcm_mini_adapter *Adapter,
PUCHAR pBuffer,
unsigned int uiOffset,
unsigned int uiNumBytes,
- BOOLEAN bVerify)
+ bool bVerify)
{
unsigned int uiBytesToCopy = uiNumBytes;
/* unsigned int uiRdbk = 0; */
@@ -1819,7 +1819,7 @@ int BeceemNVMRead(struct bcm_mini_adapter *Adapter,
#endif
if (Adapter->eNVMType == NVM_FLASH) {
- if (Adapter->bFlashRawRead == FALSE) {
+ if (Adapter->bFlashRawRead == false) {
if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD))
return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes);
@@ -1870,7 +1870,7 @@ int BeceemNVMWrite(struct bcm_mini_adapter *Adapter,
PUINT pBuffer,
unsigned int uiOffset,
unsigned int uiNumBytes,
- BOOLEAN bVerify)
+ bool bVerify)
{
int Status = 0;
unsigned int uiTemp = 0;
@@ -2425,7 +2425,7 @@ static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
B_UINT32 i = 0;
unsigned int uiSizeSection = 0;
- Adapter->uiVendorExtnFlag = FALSE;
+ Adapter->uiVendorExtnFlag = false;
for (i = 0; i < TOTAL_SECTIONS; i++)
Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart = UNINIT_PTR_IN_CS;
@@ -2685,12 +2685,12 @@ int BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash
switch (eFlashSectionVal) {
case ISO_IMAGE1:
if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == FALSE))
+ (IsNonCDLessDevice(Adapter) == false))
SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start);
break;
case ISO_IMAGE2:
if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == FALSE))
+ (IsNonCDLessDevice(Adapter) == false))
SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start);
break;
case DSD0:
@@ -2770,12 +2770,12 @@ int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x
switch (eFlash2xSectionVal) {
case ISO_IMAGE1:
if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == FALSE))
+ (IsNonCDLessDevice(Adapter) == false))
SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End);
break;
case ISO_IMAGE2:
if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == FALSE))
+ (IsNonCDLessDevice(Adapter) == false))
SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End);
break;
case DSD0:
@@ -2831,7 +2831,7 @@ int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x
SectEndOffset = INVALID_OFFSET;
}
- return SectEndOffset ;
+ return SectEndOffset;
}
/*
@@ -3037,7 +3037,7 @@ static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter)
*
* Return Value:-
* Success:-TRUE , offset is writable
- * Failure:-FALSE, offset is RO
+ * Failure:-false, offset is RO
*
*/
@@ -3062,7 +3062,7 @@ B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset
if (permissionBits == SECTOR_READWRITE_PERMISSION)
return TRUE;
else
- return FALSE;
+ return false;
}
static int BcmDumpFlash2xSectionBitMap(struct bcm_flash2x_bitmap *psFlash2xBitMap)
@@ -3105,13 +3105,13 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
struct bcm_flash2x_cs_info *psFlash2xCSInfo = Adapter->psFlash2xCSInfo;
enum bcm_flash2x_section_val uiHighestPriDSD = 0;
enum bcm_flash2x_section_val uiHighestPriISO = 0;
- BOOLEAN SetActiveDSDDone = FALSE;
- BOOLEAN SetActiveISODone = FALSE;
+ bool SetActiveDSDDone = false;
+ bool SetActiveISODone = false;
/* For 1.x map all the section except DSD0 will be shown as not present
* This part will be used by calibration tool to detect the number of DSD present in Flash.
*/
- if (IsFlash2x(Adapter) == FALSE) {
+ if (IsFlash2x(Adapter) == false) {
psFlash2xBitMap->ISO_IMAGE2 = 0;
psFlash2xBitMap->ISO_IMAGE1 = 0;
psFlash2xBitMap->DSD0 = FLASH2X_SECTION_VALID | FLASH2X_SECTION_ACT | FLASH2X_SECTION_PRESENT; /* 0xF; 0000(Reseved)1(Active)0(RW)1(valid)1(present) */
@@ -3143,10 +3143,10 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_VALID;
/* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, ISO_IMAGE2) == FALSE)
+ if (IsSectionWritable(Adapter, ISO_IMAGE2) == false)
psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_RO;
- if (SetActiveISODone == FALSE && uiHighestPriISO == ISO_IMAGE2) {
+ if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE2) {
psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_ACT;
SetActiveISODone = TRUE;
}
@@ -3163,10 +3163,10 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_VALID;
/* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, ISO_IMAGE1) == FALSE)
+ if (IsSectionWritable(Adapter, ISO_IMAGE1) == false)
psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_RO;
- if (SetActiveISODone == FALSE && uiHighestPriISO == ISO_IMAGE1) {
+ if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE1) {
psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_ACT;
SetActiveISODone = TRUE;
}
@@ -3183,11 +3183,11 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_VALID;
/* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, DSD2) == FALSE) {
+ if (IsSectionWritable(Adapter, DSD2) == false) {
psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_RO;
} else {
/* Means section is writable */
- if ((SetActiveDSDDone == FALSE) && (uiHighestPriDSD == DSD2)) {
+ if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD2)) {
psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_ACT;
SetActiveDSDDone = TRUE;
}
@@ -3205,11 +3205,11 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_VALID;
/* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, DSD1) == FALSE) {
+ if (IsSectionWritable(Adapter, DSD1) == false) {
psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_RO;
} else {
/* Means section is writable */
- if ((SetActiveDSDDone == FALSE) && (uiHighestPriDSD == DSD1)) {
+ if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD1)) {
psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_ACT;
SetActiveDSDDone = TRUE;
}
@@ -3227,11 +3227,11 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_VALID;
/* Setting Access permission */
- if (IsSectionWritable(Adapter, DSD0) == FALSE) {
+ if (IsSectionWritable(Adapter, DSD0) == false) {
psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_RO;
} else {
/* Means section is writable */
- if ((SetActiveDSDDone == FALSE) && (uiHighestPriDSD == DSD0)) {
+ if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD0)) {
psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_ACT;
SetActiveDSDDone = TRUE;
}
@@ -3249,7 +3249,7 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_VALID;
/* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, VSA0) == FALSE)
+ if (IsSectionWritable(Adapter, VSA0) == false)
psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_RO;
/* By Default section is Active */
@@ -3267,7 +3267,7 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_VALID;
/* Checking For Access permission */
- if (IsSectionWritable(Adapter, VSA1) == FALSE)
+ if (IsSectionWritable(Adapter, VSA1) == false)
psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_RO;
/* By Default section is Active */
@@ -3285,7 +3285,7 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_VALID;
/* Checking For Access permission */
- if (IsSectionWritable(Adapter, VSA2) == FALSE)
+ if (IsSectionWritable(Adapter, VSA2) == false)
psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_RO;
/* By Default section is Active */
@@ -3303,7 +3303,7 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->SCSI |= FLASH2X_SECTION_VALID;
/* Checking For Access permission */
- if (IsSectionWritable(Adapter, SCSI) == FALSE)
+ if (IsSectionWritable(Adapter, SCSI) == false)
psFlash2xBitMap->SCSI |= FLASH2X_SECTION_RO;
/* By Default section is Active */
@@ -3321,7 +3321,7 @@ int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_fl
psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_VALID;
/* Checking For Access permission */
- if (IsSectionWritable(Adapter, CONTROL_SECTION) == FALSE)
+ if (IsSectionWritable(Adapter, CONTROL_SECTION) == false)
psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_RO;
/* By Default section is Active */
@@ -3358,7 +3358,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
/* struct bcm_dsd_header sDSD = {0};
* struct bcm_iso_header sISO = {0};
*/
- int HighestPriDSD = 0 ;
+ int HighestPriDSD = 0;
int HighestPriISO = 0;
Status = IsSectionWritable(Adapter, eFlash2xSectVal);
@@ -3517,7 +3517,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
break;
}
- Adapter->bHeaderChangeAllowed = FALSE;
+ Adapter->bHeaderChangeAllowed = false;
return Status;
}
@@ -3536,7 +3536,7 @@ int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section
enum bcm_flash2x_section_val eISOReadPart = 0, eISOWritePart = 0;
unsigned int uiReadOffsetWithinPart = 0, uiWriteOffsetWithinPart = 0;
unsigned int uiTotalDataToCopy = 0;
- BOOLEAN IsThisHeaderSector = FALSE;
+ bool IsThisHeaderSector = false;
unsigned int sigOffset = 0;
unsigned int ISOLength = 0;
unsigned int Status = STATUS_SUCCESS;
@@ -3669,14 +3669,14 @@ int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section
break;
}
- Adapter->bHeaderChangeAllowed = FALSE;
+ Adapter->bHeaderChangeAllowed = false;
if (IsThisHeaderSector == TRUE) {
WriteToFlashWithoutSectorErase(Adapter,
SigBuff,
eISOWritePart,
sigOffset,
MAX_RW_SIZE);
- IsThisHeaderSector = FALSE;
+ IsThisHeaderSector = false;
}
/* subtracting the written Data */
uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize;
@@ -3782,7 +3782,7 @@ int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section
break;
}
- Adapter->bHeaderChangeAllowed = FALSE;
+ Adapter->bHeaderChangeAllowed = false;
if (IsThisHeaderSector == TRUE) {
WriteToFlashWithoutSectorErase(Adapter,
SigBuff,
@@ -3790,7 +3790,7 @@ int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section
sigOffset,
MAX_RW_SIZE);
- IsThisHeaderSector = FALSE;
+ IsThisHeaderSector = false;
}
/* subtracting the written Data */
@@ -3848,13 +3848,13 @@ int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sectio
unsigned int uiOffset = 0;
/* struct bcm_dsd_header dsdHeader = {0}; */
- if (Adapter->bSigCorrupted == FALSE) {
+ if (Adapter->bSigCorrupted == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is not corrupted by driver, hence not restoring\n");
return STATUS_SUCCESS;
}
- if (Adapter->bAllDSDWriteAllow == FALSE) {
- if (IsSectionWritable(Adapter, eFlashSectionVal) == FALSE) {
+ if (Adapter->bAllDSDWriteAllow == false) {
+ if (IsSectionWritable(Adapter, eFlashSectionVal) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Write signature");
return SECTOR_IS_NOT_WRITABLE;
}
@@ -3886,9 +3886,9 @@ int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sectio
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature");
Adapter->bHeaderChangeAllowed = TRUE;
- Adapter->bSigCorrupted = FALSE;
+ Adapter->bSigCorrupted = false;
BcmFlash2xBulkWrite(Adapter, &uiSignature, eFlashSectionVal, uiOffset, SIGNATURE_SIZE, TRUE);
- Adapter->bHeaderChangeAllowed = FALSE;
+ Adapter->bHeaderChangeAllowed = false;
return STATUS_SUCCESS;
}
@@ -3899,7 +3899,7 @@ int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sectio
* @Adapater :- Bcm Driver Private Data Structure
* @psFlash2xReadWrite :-Flash2x Read/write structure pointer
*
- * Return values:-Return TRUE is request is valid else FALSE.
+ * Return values:-Return TRUE is request is valid else false.
*/
int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_readwrite *psFlash2xReadWrite)
@@ -3912,7 +3912,7 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2
if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exixt in Flash", psFlash2xReadWrite->Section);
- return FALSE;
+ return false;
}
uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Start offset :%x ,section :%d\n", uiSectStartOffset, psFlash2xReadWrite->Section);
@@ -3949,7 +3949,7 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2
return TRUE;
else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return FALSE;
+ return false;
}
}
@@ -3966,7 +3966,7 @@ int IsFlash2x(struct bcm_mini_adapter *Adapter)
if (Adapter->uiFlashLayoutMajorVersion >= FLASH_2X_MAJOR_NUMBER)
return TRUE;
else
- return FALSE;
+ return false;
}
/*
@@ -3986,7 +3986,7 @@ static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter)
* For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr
* In case of Raw Read... use the default value
*/
- if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == FALSE) &&
+ if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) &&
!((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
uiBaseAddr = Adapter->uiFlashBaseAdd;
else
@@ -3996,7 +3996,7 @@ static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter)
* For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr
* In case of Raw Read... use the default value
*/
- if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == FALSE) &&
+ if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) &&
!((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
uiBaseAddr = Adapter->uiFlashBaseAdd | FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
else
@@ -4094,7 +4094,7 @@ int BcmCopySection(struct bcm_mini_adapter *Adapter,
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed at offset :%d for NOB :%d", SrcSection, BytesToBeCopied);
break;
}
- Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pBuff, DstSection, offset, BytesToBeCopied, FALSE);
+ Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pBuff, DstSection, offset, BytesToBeCopied, false);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed at offset :%d for NOB :%d", DstSection, BytesToBeCopied);
break;
@@ -4110,7 +4110,7 @@ int BcmCopySection(struct bcm_mini_adapter *Adapter,
} while (numOfBytes > 0);
kfree(pBuff);
- Adapter->bHeaderChangeAllowed = FALSE;
+ Adapter->bHeaderChangeAllowed = false;
return Status;
}
@@ -4129,7 +4129,7 @@ int BcmCopySection(struct bcm_mini_adapter *Adapter,
int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
{
unsigned int offsetToProtect = 0, HeaderSizeToProtect = 0;
- BOOLEAN bHasHeader = FALSE;
+ bool bHasHeader = false;
PUCHAR pTempBuff = NULL;
unsigned int uiSectAlignAddr = 0;
unsigned int sig = 0;
@@ -4153,7 +4153,7 @@ int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned
bHasHeader = TRUE;
}
/* If Header is present overwrite passed buffer with this */
- if (bHasHeader && (Adapter->bHeaderChangeAllowed == FALSE)) {
+ if (bHasHeader && (Adapter->bHeaderChangeAllowed == false)) {
pTempBuff = kzalloc(HeaderSizeToProtect, GFP_KERNEL);
if (!pTempBuff) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed");
@@ -4172,13 +4172,13 @@ int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned
sig = ntohl(sig);
if ((sig & 0xFF000000) != CORRUPTED_PATTERN) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Desired pattern is not at sig offset. Hence won't restore");
- Adapter->bSigCorrupted = FALSE;
+ Adapter->bSigCorrupted = false;
return STATUS_SUCCESS;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Corrupted sig is :%X", sig);
*((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber))) = htonl(DSD_IMAGE_MAGIC_NUMBER);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature in Header Write only");
- Adapter->bSigCorrupted = FALSE;
+ Adapter->bSigCorrupted = false;
}
return STATUS_SUCCESS;
@@ -4450,7 +4450,7 @@ int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
BcmDoChipSelect(Adapter, uiOffset);
uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
- for (i = 0 ; i < uiNumBytes; i += Adapter->ulFlashWriteSize) {
+ for (i = 0; i < uiNumBytes; i += Adapter->ulFlashWriteSize) {
if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT)
Status = flashByteWrite(Adapter, uiPartOffset, pcBuff);
else
@@ -4469,19 +4469,19 @@ int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
return Status;
}
-BOOLEAN IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section)
+bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section)
{
- BOOLEAN SectionPresent = FALSE;
+ bool SectionPresent = false;
switch (section) {
case ISO_IMAGE1:
if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == FALSE))
+ (IsNonCDLessDevice(Adapter) == false))
SectionPresent = TRUE;
break;
case ISO_IMAGE2:
if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == FALSE))
+ (IsNonCDLessDevice(Adapter) == false))
SectionPresent = TRUE;
break;
case DSD0:
@@ -4518,7 +4518,7 @@ BOOLEAN IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x
break;
default:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x");
- SectionPresent = FALSE;
+ SectionPresent = false;
}
return SectionPresent;
@@ -4527,17 +4527,17 @@ BOOLEAN IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x
int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
{
int offset = STATUS_FAILURE;
- int Status = FALSE;
+ int Status = false;
- if (IsSectionExistInFlash(Adapter, Section) == FALSE) {
+ if (IsSectionExistInFlash(Adapter, Section) == false) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exixt", Section);
- return FALSE;
+ return false;
}
offset = BcmGetSectionValStartOffset(Adapter, Section);
if (offset == INVALID_OFFSET) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exixt", Section);
- return FALSE;
+ return false;
}
if (IsSectionExistInVendorInfo(Adapter, Section))
@@ -4555,8 +4555,8 @@ static int CorruptDSDSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sect
unsigned int BlockStatus = 0;
unsigned int uiSectAlignAddr = 0;
- Adapter->bSigCorrupted = FALSE;
- if (Adapter->bAllDSDWriteAllow == FALSE) {
+ Adapter->bSigCorrupted = false;
+ if (Adapter->bAllDSDWriteAllow == false) {
if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature");
return SECTOR_IS_NOT_WRITABLE;
@@ -4615,7 +4615,7 @@ static int CorruptISOSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sect
unsigned int sig = 0;
unsigned int uiOffset = 0;
- Adapter->bSigCorrupted = FALSE;
+ Adapter->bSigCorrupted = false;
if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature");
@@ -4656,10 +4656,10 @@ static int CorruptISOSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sect
return STATUS_SUCCESS;
}
-BOOLEAN IsNonCDLessDevice(struct bcm_mini_adapter *Adapter)
+bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter)
{
if (Adapter->psFlash2xCSInfo->IsCDLessDeviceBootSig == NON_CDLESS_DEVICE_BOOT_SIG)
return TRUE;
else
- return FALSE;
+ return false;
}
diff --git a/drivers/staging/bcm/vendorspecificextn.c b/drivers/staging/bcm/vendorspecificextn.c
index d38a06f762d..2c57a16788c 100644
--- a/drivers/staging/bcm/vendorspecificextn.c
+++ b/drivers/staging/bcm/vendorspecificextn.c
@@ -113,7 +113,7 @@ INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_sect
* STATUS_SUCCESS/STATUS_FAILURE
*/
INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes, BOOLEAN bVerify)
+ UINT offset, UINT numOfBytes, bool bVerify)
{
return STATUS_FAILURE;
}
diff --git a/drivers/staging/bcm/vendorspecificextn.h b/drivers/staging/bcm/vendorspecificextn.h
index 52890d216ed..ff57f057045 100644
--- a/drivers/staging/bcm/vendorspecificextn.h
+++ b/drivers/staging/bcm/vendorspecificextn.h
@@ -11,7 +11,7 @@ INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg);
INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
UINT offset, UINT numOfBytes);
INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes, BOOLEAN bVerify);
+ UINT offset, UINT numOfBytes, bool bVerify);
INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
UINT offset, UINT numOfBytes);
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
index 0e783e8d71c..7a9bf3b5781 100644
--- a/drivers/staging/btmtk_usb/btmtk_usb.c
+++ b/drivers/staging/btmtk_usb/btmtk_usb.c
@@ -16,7 +16,8 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- * or on the worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ * or on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
*/
@@ -72,8 +73,9 @@ static int btmtk_usb_reset(struct usb_device *udev)
BT_DBG("%s\n", __func__);
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, DEVICE_VENDOR_REQUEST_OUT,
- 0x01, 0x00, NULL, 0x00, CONTROL_TIMEOUT_JIFFIES);
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
+ DEVICE_VENDOR_REQUEST_OUT, 0x01, 0x00,
+ NULL, 0x00, CONTROL_TIMEOUT_JIFFIES);
if (ret < 0) {
BT_ERR("%s error(%d)\n", __func__, ret);
@@ -91,20 +93,22 @@ static int btmtk_usb_io_read32(struct btmtk_usb_data *data, u32 reg, u32 *val)
u8 request = data->r_request;
struct usb_device *udev = data->udev;
int ret;
+ __le32 val_le;
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, DEVICE_VENDOR_REQUEST_IN,
- 0x0, reg, data->io_buf, 4,
- CONTROL_TIMEOUT_JIFFIES);
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request,
+ DEVICE_VENDOR_REQUEST_IN, 0x0, reg, data->io_buf,
+ 4, CONTROL_TIMEOUT_JIFFIES);
if (ret < 0) {
*val = 0xffffffff;
- BT_ERR("%s error(%d), reg=%x, value=%x\n", __func__, ret, reg, *val);
+ BT_ERR("%s error(%d), reg=%x, value=%x\n",
+ __func__, ret, reg, *val);
return ret;
}
- memmove(val, data->io_buf, 4);
+ memmove(&val_le, data->io_buf, 4);
- *val = le32_to_cpu(*val);
+ *val = le32_to_cpu(val_le);
if (ret > 0)
ret = 0;
@@ -122,12 +126,13 @@ static int btmtk_usb_io_write32(struct btmtk_usb_data *data, u32 reg, u32 val)
index = (u16)reg;
value = val & 0x0000ffff;
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, DEVICE_VENDOR_REQUEST_OUT,
- value, index, NULL, 0,
- CONTROL_TIMEOUT_JIFFIES);
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request,
+ DEVICE_VENDOR_REQUEST_OUT, value, index,
+ NULL, 0, CONTROL_TIMEOUT_JIFFIES);
if (ret < 0) {
- BT_ERR("%s error(%d), reg=%x, value=%x\n", __func__, ret, reg, val);
+ BT_ERR("%s error(%d), reg=%x, value=%x\n",
+ __func__, ret, reg, val);
return ret;
}
@@ -139,7 +144,8 @@ static int btmtk_usb_io_write32(struct btmtk_usb_data *data, u32 reg, u32 val)
value, index, NULL, 0, CONTROL_TIMEOUT_JIFFIES);
if (ret < 0) {
- BT_ERR("%s error(%d), reg=%x, value=%x\n", __func__, ret, reg, val);
+ BT_ERR("%s error(%d), reg=%x, value=%x\n",
+ __func__, ret, reg, val);
return ret;
}
@@ -186,13 +192,15 @@ static void btmtk_usb_cap_init(struct btmtk_usb_data *data)
ret = request_firmware(&firmware, MT7650_FIRMWARE, &udev->dev);
if (ret < 0) {
if (ret == -ENOENT) {
- BT_ERR("Firmware file \"%s\" not found \n", MT7650_FIRMWARE);
+ BT_ERR("Firmware file \"%s\" not found\n",
+ MT7650_FIRMWARE);
} else {
- BT_ERR("Firmware file \"%s\" request failed (err=%d) \n",
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
MT7650_FIRMWARE, ret);
}
} else {
- BT_DBG("Firmware file \"%s\" Found \n", MT7650_FIRMWARE);
+ BT_DBG("Firmware file \"%s\" Found\n",
+ MT7650_FIRMWARE);
/* load firmware here */
data->firmware = firmware;
btmtk_usb_load_fw(data);
@@ -205,7 +213,8 @@ static void btmtk_usb_cap_init(struct btmtk_usb_data *data)
ret = request_firmware(&firmware, MT7662_FIRMWARE, &udev->dev);
if (ret < 0) {
if (ret == -ENOENT) {
- BT_ERR("Firmware file \"%s\" not found\n", MT7662_FIRMWARE);
+ BT_ERR("Firmware file \"%s\" not found\n",
+ MT7662_FIRMWARE);
} else {
BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
MT7662_FIRMWARE, ret);
@@ -241,9 +250,8 @@ static u16 checksume16(u8 *pData, int len)
if (len)
sum += *((u8 *)pData);
- while (sum >> 16) {
+ while (sum >> 16)
sum = (sum & 0xFFFF) + (sum >> 16);
- }
return ~sum;
}
@@ -258,13 +266,12 @@ static int btmtk_usb_chk_crc(struct btmtk_usb_data *data, u32 checksum_len)
memmove(data->io_buf, &data->rom_patch_offset, 4);
memmove(&data->io_buf[4], &checksum_len, 4);
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x1, DEVICE_VENDOR_REQUEST_IN,
- 0x20, 0x00, data->io_buf, 8,
- CONTROL_TIMEOUT_JIFFIES);
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x1,
+ DEVICE_VENDOR_REQUEST_IN, 0x20, 0x00, data->io_buf,
+ 8, CONTROL_TIMEOUT_JIFFIES);
- if (ret < 0) {
+ if (ret < 0)
BT_ERR("%s error(%d)\n", __func__, ret);
- }
return ret;
}
@@ -274,6 +281,7 @@ static u16 btmtk_usb_get_crc(struct btmtk_usb_data *data)
int ret = 0;
struct usb_device *udev = data->udev;
u16 crc, count = 0;
+ __le16 crc_le;
BT_DBG("%s\n", __func__);
@@ -288,9 +296,9 @@ static u16 btmtk_usb_get_crc(struct btmtk_usb_data *data)
BT_ERR("%s error(%d)\n", __func__, ret);
}
- memmove(&crc, data->io_buf, 2);
+ memmove(&crc_le, data->io_buf, 2);
- crc = le16_to_cpu(crc);
+ crc = le16_to_cpu(crc_le);
if (crc != 0xFFFF)
break;
@@ -318,8 +326,8 @@ static int btmtk_usb_reset_wmt(struct btmtk_usb_data *data)
BT_DBG("%s\n", __func__);
ret = usb_control_msg(data->udev, usb_sndctrlpipe(data->udev, 0), 0x01,
- DEVICE_CLASS_REQUEST_OUT, 0x12, 0x00, data->io_buf,
- 8, CONTROL_TIMEOUT_JIFFIES);
+ DEVICE_CLASS_REQUEST_OUT, 0x12, 0x00,
+ data->io_buf, 8, CONTROL_TIMEOUT_JIFFIES);
if (ret)
BT_ERR("%s:(%d)\n", __func__, ret);
@@ -350,7 +358,8 @@ static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *data)
unsigned char phase;
void *buf;
char *pos;
- unsigned int pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
+ unsigned int pipe;
+ pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
if (!data->firmware) {
BT_ERR("%s:please assign a rom patch\n", __func__);
@@ -391,7 +400,8 @@ load_patch_protect:
goto error0;
}
- buf = usb_alloc_coherent(data->udev, UPLOAD_PATCH_UNIT, GFP_ATOMIC, &data_dma);
+ buf = usb_alloc_coherent(data->udev, UPLOAD_PATCH_UNIT,
+ GFP_ATOMIC, &data_dma);
if (!buf) {
ret = -ENOMEM;
@@ -409,78 +419,82 @@ load_patch_protect:
/* loading rom patch */
while (1) {
s32 sent_len_max = UPLOAD_PATCH_UNIT - PATCH_HEADER_SIZE;
- sent_len = (patch_len - cur_len) >= sent_len_max ? sent_len_max : (patch_len - cur_len);
+ sent_len = min_t(s32, (patch_len - cur_len), sent_len_max);
BT_DBG("patch_len = %d\n", patch_len);
BT_DBG("cur_len = %d\n", cur_len);
BT_DBG("sent_len = %d\n", sent_len);
- if (sent_len > 0) {
- if (first_block == 1) {
- if (sent_len < sent_len_max)
- phase = PATCH_PHASE3;
- else
- phase = PATCH_PHASE1;
- first_block = 0;
- } else if (sent_len == sent_len_max) {
- phase = PATCH_PHASE2;
- } else {
+ if (sent_len <= 0)
+ break;
+
+ if (first_block == 1) {
+ if (sent_len < sent_len_max)
phase = PATCH_PHASE3;
- }
+ else
+ phase = PATCH_PHASE1;
+ first_block = 0;
+ } else if (sent_len == sent_len_max) {
+ phase = PATCH_PHASE2;
+ } else {
+ phase = PATCH_PHASE3;
+ }
- /* prepare HCI header */
- pos[0] = 0x6F;
- pos[1] = 0xFC;
- pos[2] = (sent_len + 5) & 0xFF;
- pos[3] = ((sent_len + 5) >> 8) & 0xFF;
+ /* prepare HCI header */
+ pos[0] = 0x6F;
+ pos[1] = 0xFC;
+ pos[2] = (sent_len + 5) & 0xFF;
+ pos[3] = ((sent_len + 5) >> 8) & 0xFF;
- /* prepare WMT header */
- pos[4] = 0x01;
- pos[5] = 0x01;
- pos[6] = (sent_len + 1) & 0xFF;
- pos[7] = ((sent_len + 1) >> 8) & 0xFF;
+ /* prepare WMT header */
+ pos[4] = 0x01;
+ pos[5] = 0x01;
+ pos[6] = (sent_len + 1) & 0xFF;
+ pos[7] = ((sent_len + 1) >> 8) & 0xFF;
- pos[8] = phase;
+ pos[8] = phase;
- memcpy(&pos[9], data->firmware->data + PATCH_INFO_SIZE + cur_len, sent_len);
+ memcpy(&pos[9],
+ data->firmware->data + PATCH_INFO_SIZE + cur_len,
+ sent_len);
- BT_DBG("sent_len + PATCH_HEADER_SIZE = %d, phase = %d\n",
- sent_len + PATCH_HEADER_SIZE, phase);
+ BT_DBG("sent_len + PATCH_HEADER_SIZE = %d, phase = %d\n",
+ sent_len + PATCH_HEADER_SIZE, phase);
- usb_fill_bulk_urb(urb,
- data->udev,
- pipe,
- buf,
- sent_len + PATCH_HEADER_SIZE,
- load_rom_patch_complete,
- &sent_to_mcu_done);
+ usb_fill_bulk_urb(urb,
+ data->udev,
+ pipe,
+ buf,
+ sent_len + PATCH_HEADER_SIZE,
+ load_rom_patch_complete,
+ &sent_to_mcu_done);
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_dma = data_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret)
- goto error2;
+ if (ret)
+ goto error2;
- if (!wait_for_completion_timeout(&sent_to_mcu_done, msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload rom_patch timeout\n");
- goto error2;
- }
+ if (!wait_for_completion_timeout(&sent_to_mcu_done,
+ msecs_to_jiffies(1000))) {
+ usb_kill_urb(urb);
+ BT_ERR("upload rom_patch timeout\n");
+ goto error2;
+ }
- BT_DBG(".");
+ BT_DBG(".");
- mdelay(200);
+ mdelay(200);
- cur_len += sent_len;
+ cur_len += sent_len;
- } else {
- break;
- }
}
- total_checksum = checksume16((u8 *)data->firmware->data + PATCH_INFO_SIZE, patch_len);
+ total_checksum = checksume16(
+ (u8 *)data->firmware->data + PATCH_INFO_SIZE,
+ patch_len);
BT_DBG("Send checksum req..\n");
@@ -520,8 +534,8 @@ static int load_fw_iv(struct btmtk_usb_data *data)
memmove(buf, data->firmware->data + 32, 64);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
- DEVICE_VENDOR_REQUEST_OUT, 0x12, 0x0, buf, 64,
- CONTROL_TIMEOUT_JIFFIES);
+ DEVICE_VENDOR_REQUEST_OUT, 0x12, 0x0, buf, 64,
+ CONTROL_TIMEOUT_JIFFIES);
if (ret < 0) {
BT_ERR("%s error(%d) step4\n", __func__, ret);
@@ -552,6 +566,7 @@ static int btmtk_usb_load_fw(struct btmtk_usb_data *data)
void *buf;
u32 cur_len = 0;
u32 packet_header = 0;
+ __le32 packet_header_le;
u32 value;
u32 ilm_len = 0, dlm_len = 0;
u16 fw_ver, build_ver;
@@ -559,7 +574,8 @@ static int btmtk_usb_load_fw(struct btmtk_usb_data *data)
dma_addr_t data_dma;
int ret = 0, sent_len;
struct completion sent_to_mcu_done;
- unsigned int pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
+ unsigned int pipe;
+ pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
if (!data->firmware) {
BT_ERR("%s:please assign a fw\n", __func__);
@@ -598,9 +614,11 @@ loadfw_protect:
| (*(data->firmware->data + 5) << 8)
| (*(data->firmware->data + 4));
- fw_ver = (*(data->firmware->data + 11) << 8) | (*(data->firmware->data + 10));
+ fw_ver = (*(data->firmware->data + 11) << 8) |
+ (*(data->firmware->data + 10));
- build_ver = (*(data->firmware->data + 9) << 8) | (*(data->firmware->data + 8));
+ build_ver = (*(data->firmware->data + 9) << 8) |
+ (*(data->firmware->data + 8));
BT_DBG("fw version:%d.%d.%02d ",
(fw_ver & 0xf000) >> 8,
@@ -657,22 +675,22 @@ loadfw_protect:
/* Loading ILM */
while (1) {
- sent_len = (ilm_len - cur_len) >= 14336 ? 14336 : (ilm_len - cur_len);
+ sent_len = min_t(s32, (ilm_len - cur_len), 14336);
if (sent_len > 0) {
packet_header &= ~(0xffffffff);
packet_header |= (sent_len << 16);
- packet_header = cpu_to_le32(packet_header);
+ packet_header_le = cpu_to_le32(packet_header);
- memmove(buf, &packet_header, 4);
- memmove(buf + 4, data->firmware->data + 32 + cur_len, sent_len);
+ memmove(buf, &packet_header_le, 4);
+ memmove(buf + 4, data->firmware->data + 32 + cur_len,
+ sent_len);
/* U2M_PDMA descriptor */
btmtk_usb_io_write32(data, 0x230, cur_len);
- while ((sent_len % 4) != 0) {
+ while ((sent_len % 4) != 0)
sent_len++;
- }
/* U2M_PDMA length */
btmtk_usb_io_write32(data, 0x234, sent_len << 16);
@@ -693,7 +711,8 @@ loadfw_protect:
if (ret)
goto error3;
- if (!wait_for_completion_timeout(&sent_to_mcu_done, msecs_to_jiffies(1000))) {
+ if (!wait_for_completion_timeout(&sent_to_mcu_done,
+ msecs_to_jiffies(1000))) {
usb_kill_urb(urb);
BT_ERR("upload ilm fw timeout\n");
goto error3;
@@ -714,58 +733,60 @@ loadfw_protect:
/* Loading DLM */
while (1) {
- sent_len = (dlm_len - cur_len) >= 14336 ? 14336 : (dlm_len - cur_len);
+ sent_len = min_t(s32, (dlm_len - cur_len), 14336);
- if (sent_len > 0) {
- packet_header &= ~(0xffffffff);
- packet_header |= (sent_len << 16);
- packet_header = cpu_to_le32(packet_header);
+ if (sent_len <= 0)
+ break;
- memmove(buf, &packet_header, 4);
- memmove(buf + 4, data->firmware->data + 32 + ilm_len + cur_len, sent_len);
+ packet_header &= ~(0xffffffff);
+ packet_header |= (sent_len << 16);
+ packet_header_le = cpu_to_le32(packet_header);
- /* U2M_PDMA descriptor */
- btmtk_usb_io_write32(data, 0x230, 0x80000 + cur_len);
+ memmove(buf, &packet_header_le, 4);
+ memmove(buf + 4,
+ data->firmware->data + 32 + ilm_len + cur_len,
+ sent_len);
- while ((sent_len % 4) != 0) {
- BT_DBG("sent_len is not divided by 4\n");
- sent_len++;
- }
+ /* U2M_PDMA descriptor */
+ btmtk_usb_io_write32(data, 0x230, 0x80000 + cur_len);
- /* U2M_PDMA length */
- btmtk_usb_io_write32(data, 0x234, sent_len << 16);
+ while ((sent_len % 4) != 0) {
+ BT_DBG("sent_len is not divided by 4\n");
+ sent_len++;
+ }
- usb_fill_bulk_urb(urb,
- udev,
- pipe,
- buf,
- sent_len + 4,
- load_fw_complete,
- &sent_to_mcu_done);
+ /* U2M_PDMA length */
+ btmtk_usb_io_write32(data, 0x234, sent_len << 16);
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_fill_bulk_urb(urb,
+ udev,
+ pipe,
+ buf,
+ sent_len + 4,
+ load_fw_complete,
+ &sent_to_mcu_done);
- ret = usb_submit_urb(urb, GFP_ATOMIC);
+ urb->transfer_dma = data_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- if (ret)
- goto error3;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (!wait_for_completion_timeout(&sent_to_mcu_done, msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload dlm fw timeout\n");
- goto error3;
- }
+ if (ret)
+ goto error3;
- BT_DBG(".");
+ if (!wait_for_completion_timeout(&sent_to_mcu_done,
+ msecs_to_jiffies(1000))) {
+ usb_kill_urb(urb);
+ BT_ERR("upload dlm fw timeout\n");
+ goto error3;
+ }
- mdelay(500);
+ BT_DBG(".");
- cur_len += sent_len;
+ mdelay(500);
+
+ cur_len += sent_len;
- } else {
- break;
- }
}
/* upload 64bytes interrupt vector */
@@ -921,9 +942,8 @@ static void btmtk_usb_bulk_in_complete(struct urb *urb)
BT_DBG("%s:%s urb %p status %d count %d", __func__, hdev->name,
urb, urb->status, urb->actual_length);
- if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
return;
- }
if (urb->status == 0) {
hdev->stat.byte_rx += urb->actual_length;
@@ -978,8 +998,8 @@ static int btmtk_usb_submit_bulk_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress);
- usb_fill_bulk_urb(urb, data->udev, pipe,
- buf, size, btmtk_usb_bulk_in_complete, hdev);
+ usb_fill_bulk_urb(urb, data->udev, pipe, buf, size,
+ btmtk_usb_bulk_in_complete, hdev);
urb->transfer_flags |= URB_FREE_BUFFER;
@@ -1015,7 +1035,8 @@ static void btmtk_usb_isoc_in_complete(struct urb *urb)
if (urb->status == 0) {
for (i = 0; i < urb->number_of_packets; i++) {
unsigned int offset = urb->iso_frame_desc[i].offset;
- unsigned int length = urb->iso_frame_desc[i].actual_length;
+ unsigned int length;
+ length = urb->iso_frame_desc[i].actual_length;
if (urb->iso_frame_desc[i].status)
continue;
@@ -1096,8 +1117,9 @@ static int btmtk_usb_submit_isoc_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
- usb_fill_int_urb(urb, data->udev, pipe, buf, size, btmtk_usb_isoc_in_complete,
- hdev, data->isoc_rx_ep->bInterval);
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size,
+ btmtk_usb_isoc_in_complete, hdev,
+ data->isoc_rx_ep->bInterval);
urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
@@ -1306,7 +1328,8 @@ static int btmtk_usb_send_frame(struct sk_buff *skb)
}
usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
- skb->data, skb->len, btmtk_usb_tx_complete, skb);
+ skb->data, skb->len,
+ btmtk_usb_tx_complete, skb);
hdev->stat.cmd_tx++;
break;
@@ -1322,8 +1345,8 @@ static int btmtk_usb_send_frame(struct sk_buff *skb)
pipe = usb_sndbulkpipe(data->udev,
data->bulk_tx_ep->bEndpointAddress);
- usb_fill_bulk_urb(urb, data->udev, pipe,
- skb->data, skb->len, btmtk_usb_tx_complete, skb);
+ usb_fill_bulk_urb(urb, data->udev, pipe, skb->data,
+ skb->len, btmtk_usb_tx_complete, skb);
hdev->stat.acl_tx++;
BT_DBG("HCI_ACLDATA_PKT:\n");
@@ -1442,7 +1465,8 @@ static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
static void btmtk_usb_work(struct work_struct *work)
{
- struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data, work);
+ struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data,
+ work);
struct hci_dev *hdev = data->hdev;
int new_alts;
int err;
@@ -1451,7 +1475,8 @@ static void btmtk_usb_work(struct work_struct *work)
if (hdev->conn_hash.sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
+ err = usb_autopm_get_interface(data->isoc ?
+ data->isoc : data->intf);
if (err < 0) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -1489,13 +1514,15 @@ static void btmtk_usb_work(struct work_struct *work)
__set_isoc_interface(hdev, 0);
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
+ usb_autopm_put_interface(data->isoc ?
+ data->isoc : data->intf);
}
}
static void btmtk_usb_waker(struct work_struct *work)
{
- struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data, waker);
+ struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data,
+ waker);
int err;
err = usb_autopm_get_interface(data->intf);
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index 2dbaf39e2fc..62efd74b8c0 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -692,10 +692,7 @@ static int SetArea(DEVICE_EXTENSION *pdx, int nArea, char __user *puBuf,
__func__, puBuf, dwLength, bCircular);
/* To pin down user pages we must first acquire the mapping semaphore. */
- down_read(&current->mm->mmap_sem); /* get memory map semaphore */
- nPages = get_user_pages(current, current->mm, ulStart, len, 1, 0,
- pPages, NULL);
- up_read(&current->mm->mmap_sem); /* release the semaphore */
+ nPages = get_user_pages_fast(ulStart, len, 1, pPages);
dev_dbg(&pdx->interface->dev, "%s nPages = %d", __func__, nPages);
if (nPages > 0) { /* if we succeeded */
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index f73287eab37..bfa27e7fc01 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -485,6 +485,7 @@ config COMEDI_NI_ATMIO
tristate "NI AT-MIO E series ISA-PNP card support"
select COMEDI_8255
select COMEDI_NI_TIO
+ select COMEDI_FC
---help---
Enable support for National Instruments AT-MIO E series cards
National Instruments AT-MIO-16E-1 (ni_atmio),
@@ -990,8 +991,6 @@ config COMEDI_ME_DAQ
config COMEDI_NI_6527
tristate "NI 6527 support"
- depends on HAS_DMA
- select COMEDI_MITE
---help---
Enable support for the National Instruments 6527 PCI card
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 94b2385fb0a..4e26bd7fc84 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -344,7 +344,7 @@ unsigned int comedi_buf_read_free(struct comedi_async *async,
}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
-int comedi_buf_put(struct comedi_async *async, short x)
+int comedi_buf_put(struct comedi_async *async, unsigned short x)
{
unsigned int n = __comedi_buf_write_alloc(async, sizeof(short), 1);
@@ -352,20 +352,20 @@ int comedi_buf_put(struct comedi_async *async, short x)
async->events |= COMEDI_CB_ERROR;
return 0;
}
- *(short *)(async->prealloc_buf + async->buf_write_ptr) = x;
+ *(unsigned short *)(async->prealloc_buf + async->buf_write_ptr) = x;
comedi_buf_write_free(async, sizeof(short));
return 1;
}
EXPORT_SYMBOL_GPL(comedi_buf_put);
-int comedi_buf_get(struct comedi_async *async, short *x)
+int comedi_buf_get(struct comedi_async *async, unsigned short *x)
{
unsigned int n = comedi_buf_read_n_available(async);
if (n < sizeof(short))
return 0;
comedi_buf_read_alloc(async, sizeof(short));
- *x = *(short *)(async->prealloc_buf + async->buf_read_ptr);
+ *x = *(unsigned short *)(async->prealloc_buf + async->buf_read_ptr);
comedi_buf_read_free(async, sizeof(short));
return 1;
}
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 2dfb06aedb1..1e9da405d83 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -86,9 +86,6 @@ struct comedi32_insnlist_struct {
static int translated_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- if (!file->f_op)
- return -ENOTTY;
-
if (file->f_op->unlocked_ioctl)
return file->f_op->unlocked_ioctl(file, cmd, arg);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 1636c7ca57e..f3d59e2a115 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -543,7 +543,7 @@ void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size)
{
s->private = kzalloc(size, GFP_KERNEL);
if (s->private)
- comedi_set_subdevice_runflags(s, ~0, SRF_FREE_SPRIV);
+ s->runflags |= SRF_FREE_SPRIV;
return s->private;
}
EXPORT_SYMBOL_GPL(comedi_alloc_spriv);
@@ -806,7 +806,6 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
} else {
us->range_type = 0; /* XXX */
}
- us->flags = s->flags;
if (s->busy)
us->subd_flags |= SDF_BUSY;
@@ -818,8 +817,6 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
us->subd_flags |= SDF_LOCK_OWNER;
if (!s->maxdata && s->maxdata_list)
us->subd_flags |= SDF_MAXDATA;
- if (s->flaglist)
- us->subd_flags |= SDF_FLAGS;
if (s->range_table_list)
us->subd_flags |= SDF_RANGETYPE;
if (s->do_cmd)
@@ -829,8 +826,6 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
us->insn_bits_support = COMEDI_SUPPORTED;
else
us->insn_bits_support = COMEDI_UNSUPPORTED;
-
- us->settling_time_0 = s->settling_time_0;
}
ret = copy_to_user(arg, tmp, dev->n_subdevices * sizeof(*tmp));
@@ -875,13 +870,8 @@ static int do_chaninfo_ioctl(struct comedi_device *dev,
return -EFAULT;
}
- if (it.flaglist) {
- if (!s->flaglist)
- return -EINVAL;
- if (copy_to_user(it.flaglist, s->flaglist,
- s->n_chan * sizeof(unsigned int)))
- return -EFAULT;
- }
+ if (it.flaglist)
+ return -EINVAL; /* flaglist not supported */
if (it.rangelist) {
int i;
@@ -1431,17 +1421,11 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd = cmd;
async->cmd.data = NULL;
/* load channel/gain list */
- async->cmd.chanlist =
- kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
- if (!async->cmd.chanlist) {
- DPRINTK("allocation failed\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(async->cmd.chanlist, user_chanlist,
- async->cmd.chanlist_len * sizeof(int))) {
- DPRINTK("fault reading chanlist\n");
- ret = -EFAULT;
+ async->cmd.chanlist = memdup_user(user_chanlist,
+ async->cmd.chanlist_len * sizeof(int));
+ if (IS_ERR(async->cmd.chanlist)) {
+ ret = PTR_ERR(async->cmd.chanlist);
+ DPRINTK("memdup_user failed with code %d\n", ret);
goto cleanup;
}
@@ -1485,7 +1469,8 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & TRIG_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
- comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+ comedi_set_subdevice_runflags(s, SRF_USER | SRF_ERROR | SRF_RUNNING,
+ SRF_USER | SRF_RUNNING);
/* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
* comedi_read() or comedi_write() */
@@ -1558,18 +1543,11 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
/* load channel/gain list */
if (cmd.chanlist) {
- chanlist =
- kmalloc(cmd.chanlist_len * sizeof(int), GFP_KERNEL);
- if (!chanlist) {
- DPRINTK("allocation failed\n");
- ret = -ENOMEM;
- goto cleanup;
- }
-
- if (copy_from_user(chanlist, user_chanlist,
- cmd.chanlist_len * sizeof(int))) {
- DPRINTK("fault reading chanlist\n");
- ret = -EFAULT;
+ chanlist = memdup_user(user_chanlist,
+ cmd.chanlist_len * sizeof(int));
+ if (IS_ERR(chanlist)) {
+ ret = PTR_ERR(chanlist);
+ DPRINTK("memdup_user exited with code %d", ret);
goto cleanup;
}
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 2e19f659cd2..143be8076a2 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -57,11 +57,6 @@ struct comedi_subdevice {
unsigned int maxdata; /* if maxdata==0, use list */
const unsigned int *maxdata_list; /* list is channel specific */
- unsigned int flags;
- const unsigned int *flaglist;
-
- unsigned int settling_time_0;
-
const struct comedi_lrange *range_table;
const struct comedi_lrange *const *range_table_list;
@@ -307,7 +302,26 @@ static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
return s->range_table->range[range].min >= 0;
}
-/* some silly little inline functions */
+static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min < 0;
+}
+
+static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min >= 0;
+}
+
+/* munge between offset binary and two's complement values */
+static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
+ unsigned int val)
+{
+ return val ^ s->maxdata ^ (s->maxdata >> 1);
+}
static inline unsigned int bytes_per_sample(const struct comedi_subdevice *subd)
{
@@ -332,8 +346,8 @@ unsigned int comedi_buf_read_n_available(struct comedi_async *);
unsigned int comedi_buf_read_alloc(struct comedi_async *, unsigned int);
unsigned int comedi_buf_read_free(struct comedi_async *, unsigned int);
-int comedi_buf_put(struct comedi_async *, short);
-int comedi_buf_get(struct comedi_async *, short *);
+int comedi_buf_put(struct comedi_async *, unsigned short);
+int comedi_buf_get(struct comedi_async *, unsigned short *);
void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
const void *source, unsigned int num_bytes);
@@ -345,6 +359,8 @@ void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *data,
unsigned int mask);
+unsigned int comedi_dio_update_state(struct comedi_subdevice *,
+ unsigned int *data);
void *comedi_alloc_devpriv(struct comedi_device *, size_t);
int comedi_alloc_subdevices(struct comedi_device *, int);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 317a821b790..8f02bf66e20 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -190,6 +190,28 @@ int comedi_dio_insn_config(struct comedi_device *dev,
}
EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
+/**
+ * comedi_dio_update_state() - update the internal state of DIO subdevices.
+ * @s: comedi_subdevice struct
+ * @data: the channel mask and bits to update
+ */
+unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ unsigned int *data)
+{
+ unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
+ : 0xffffffff;
+ unsigned int mask = data[0] & chanmask;
+ unsigned int bits = data[1];
+
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+ }
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(comedi_dio_update_state);
+
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -285,6 +307,13 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
if (s->type == COMEDI_SUBD_UNUSED)
continue;
+ if (s->type == COMEDI_SUBD_DO) {
+ if (s->n_chan < 32)
+ s->io_bits = (1 << s->n_chan) - 1;
+ else
+ s->io_bits = 0xffffffff;
+ }
+
if (s->len_chanlist == 0)
s->len_chanlist = 1;
diff --git a/drivers/staging/comedi/drivers/8253.h b/drivers/staging/comedi/drivers/8253.h
index 3abedcd2527..e3d737cf730 100644
--- a/drivers/staging/comedi/drivers/8253.h
+++ b/drivers/staging/comedi/drivers/8253.h
@@ -21,6 +21,15 @@
#include "../comedi.h"
+/*
+ * Common oscillator base values in nanoseconds
+ */
+#define I8254_OSC_BASE_10MHZ 100
+#define I8254_OSC_BASE_5MHZ 200
+#define I8254_OSC_BASE_4MHZ 250
+#define I8254_OSC_BASE_2MHZ 500
+#define I8254_OSC_BASE_1MHZ 1000
+
#define i8253_cascade_ns_to_timer i8253_cascade_ns_to_timer_2div
static inline void i8253_cascade_ns_to_timer_2div_old(int i8253_osc_base,
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index 2f070fdbbb1..b4009e86341 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -112,7 +112,7 @@ void subdev_8255_interrupt(struct comedi_device *dev,
{
struct subdev_8255_private *spriv = s->private;
unsigned long iobase = spriv->iobase;
- short d;
+ unsigned short d;
d = spriv->io(0, _8255_DATA, 0, iobase);
d |= (spriv->io(0, _8255_DATA + 1, 0, iobase) << 8);
@@ -126,30 +126,24 @@ EXPORT_SYMBOL_GPL(subdev_8255_interrupt);
static int subdev_8255_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct subdev_8255_private *spriv = s->private;
unsigned long iobase = spriv->iobase;
unsigned int mask;
- unsigned int bits;
unsigned int v;
- mask = data[0];
- bits = data[1];
-
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- v = s->state;
- v &= ~mask;
- v |= (bits & mask);
-
if (mask & 0xff)
- spriv->io(1, _8255_DATA, v & 0xff, iobase);
+ spriv->io(1, _8255_DATA, s->state & 0xff, iobase);
if (mask & 0xff00)
- spriv->io(1, _8255_DATA + 1, (v >> 8) & 0xff, iobase);
+ spriv->io(1, _8255_DATA + 1, (s->state >> 8) & 0xff,
+ iobase);
if (mask & 0xff0000)
- spriv->io(1, _8255_DATA + 2, (v >> 16) & 0xff, iobase);
-
- s->state = v;
+ spriv->io(1, _8255_DATA + 2, (s->state >> 16) & 0xff,
+ iobase);
}
v = spriv->io(0, _8255_DATA, 0, iobase);
@@ -288,9 +282,6 @@ int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
s->insn_bits = subdev_8255_insn;
s->insn_config = subdev_8255_insn_config;
- s->state = 0;
- s->io_bits = 0;
-
subdev_8255_do_config(dev, s);
return 0;
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 63dff7729ea..dc87df03220 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -204,7 +204,6 @@ static int addi_auto_attach(struct comedi_device *dev,
s->len_chanlist =
devpriv->s_EeParameters.i_NbrDiChannel;
s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
s->insn_config = this_board->di_config;
s->insn_read = this_board->di_read;
s->insn_write = this_board->di_write;
@@ -223,7 +222,6 @@ static int addi_auto_attach(struct comedi_device *dev,
s->len_chanlist =
devpriv->s_EeParameters.i_NbrDoChannel;
s->range_table = &range_digital;
- s->io_bits = 0xf; /* all bits output */
/* insn_config - for digital output memory */
s->insn_config = this_board->do_config;
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.h b/drivers/staging/comedi/drivers/addi-data/addi_common.h
index dfd1e666cc1..2ed2da3499f 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.h
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.h
@@ -133,7 +133,7 @@ struct addi_private {
unsigned short us_UseDma; /* To use Dma or not */
unsigned char b_DmaDoubleBuffer; /* we can use double buffering */
unsigned int ui_DmaActualBuffer; /* which buffer is used now */
- short *ul_DmaBufferVirtual[2]; /* pointers to begin of DMA buffer */
+ unsigned short *ul_DmaBufferVirtual[2]; /* pointers to DMA buffer */
unsigned int ul_DmaBufferHw[2]; /* hw address of DMA buff */
unsigned int ui_DmaBufferSize[2]; /* size of dma buffer in bytes */
unsigned int ui_DmaBufferUsesize[2]; /* which size we may now used for transfer */
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index e3cc429403c..84668544f52 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -260,18 +260,13 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
s->state = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
APCI1564_DIGITAL_OP_RW);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outl(s->state, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
APCI1564_DIGITAL_OP_RW);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index 1449b92403e..3c9eec84f0e 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -1391,7 +1391,7 @@ static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
*/
static void v_APCI3120_InterruptDmaMoveBlock16bit(struct comedi_device *dev,
struct comedi_subdevice *s,
- short *dma_buffer,
+ unsigned short *dma_buffer,
unsigned int num_samples)
{
struct addi_private *devpriv = dev->private;
@@ -2175,21 +2175,16 @@ static int apci3120_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
- unsigned int val;
- /* The do channels are bits 7:4 of the do register */
- val = devpriv->b_DigitalOutputRegister >> 4;
- if (mask) {
- val &= ~mask;
- val |= (bits & mask);
- devpriv->b_DigitalOutputRegister = val << 4;
+ if (comedi_dio_update_state(s, data)) {
+ /* The do channels are bits 7:4 of the do register */
+ devpriv->b_DigitalOutputRegister = s->state << 4;
- outb(val << 4, devpriv->iobase + APCI3120_DIGITAL_OUTPUT);
+ outb(devpriv->b_DigitalOutputRegister,
+ devpriv->iobase + APCI3120_DIGITAL_OUTPUT);
}
- data[1] = val;
+ data[1] = s->state;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index 32dce0329fd..dc73d4d348e 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -623,16 +623,11 @@ static int apci3200_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
s->state = inl(devpriv->i_IobaseAddon) & 0xf;
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outl(s->state, devpriv->i_IobaseAddon);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index 08674c18cf4..9d1b1425c60 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -90,16 +90,10 @@ static int apci1516_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
s->state = inw(dev->iobase + APCI1516_DO_REG);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + APCI1516_DO_REG);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 96523744b8d..5ee204bcbee 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -87,17 +87,8 @@ static int apci16xx_dio_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- /* Only update the channels configured as outputs */
- mask &= s->io_bits;
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
outl(s->state, dev->iobase + APCI16XX_OUT_REG(s->index));
- }
data[1] = inl(dev->iobase + APCI16XX_IN_REG(s->index));
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index 6b0ea16ff54..c77ee8732d3 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -57,16 +57,10 @@ static int apci2032_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
s->state = inl(dev->iobase + APCI2032_DO_REG);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outl(s->state, dev->iobase + APCI2032_DO_REG);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 92ac8ece849..7fb32e778d8 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -50,16 +50,10 @@ static int apci2200_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
s->state = inw(dev->iobase + APCI2200_DO_REG);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + APCI2200_DO_REG);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index d804957018a..67d09e8afb2 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -164,7 +164,6 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->len_chanlist = this_board->i_NbrDiChannel;
s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
s->insn_bits = apci3120_di_insn_bits;
/* Allocate and Initialise DO Subdevice Structures */
@@ -176,7 +175,6 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->maxdata = this_board->i_DoMaxdata;
s->len_chanlist = this_board->i_NbrDoChannel;
s->range_table = &range_digital;
- s->io_bits = 0xf; /* all bits output */
s->insn_bits = apci3120_do_insn_bits;
/* Allocate and Initialise Timer Subdevice Structures */
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index d9650ffb7d2..6138440b919 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -161,16 +161,10 @@ static int apci3501_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
s->state = inl(dev->iobase + APCI3501_DO_REG);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outl(s->state, dev->iobase + APCI3501_DO_REG);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index cf5dd10eaf9..761cbf8f964 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -664,16 +664,10 @@ static int apci3xxx_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
s->state = inl(dev->iobase + 48) & 0xf;
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outl(s->state, dev->iobase + 48);
- }
data[1] = s->state;
@@ -717,16 +711,11 @@ static int apci3xxx_dio_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
+ unsigned int mask;
unsigned int val;
- /* only update output channels */
- mask &= s->io_bits;
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
if (mask & 0xff)
outl(s->state & 0xff, dev->iobase + 80);
if (mask & 0xff0000)
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index a67ad57cefc..dd092c7954a 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -1,44 +1,35 @@
/*
- comedi/drivers/adl_pci6208.c
-
- Hardware driver for ADLink 6208 series cards:
- card | voltage output | current output
- -------------+-------------------+---------------
- PCI-6208V | 8 channels | -
- PCI-6216V | 16 channels | -
- PCI-6208A | 8 channels | 8 channels
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * adl_pci6208.c
+ * Comedi driver for ADLink 6208 series cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: adl_pci6208
-Description: ADLink PCI-6208/6216 Series Multi-channel Analog Output Cards
-Devices: (ADLink) PCI-6208 [adl_pci6208]
- (ADLink) PCI-6216 [adl_pci6216]
-Author: nsyeow <nsyeow@pd.jaring.my>
-Updated: Fri, 30 Jan 2004 14:44:27 +0800
-Status: untested
-
-Configuration Options: not applicable, uses PCI auto config
-
-References:
- - ni_660x.c
- - adl_pci9111.c copied the entire pci setup section
- - adl_pci9118.c
-*/
+ * Driver: adl_pci6208
+ * Description: ADLink PCI-6208/6216 Series Multi-channel Analog Output Cards
+ * Devices: (ADLink) PCI-6208 [adl_pci6208]
+ * (ADLink) PCI-6216 [adl_pci6216]
+ * Author: nsyeow <nsyeow@pd.jaring.my>
+ * Updated: Fri, 30 Jan 2004 14:44:27 +0800
+ * Status: untested
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -82,37 +73,56 @@ struct pci6208_private {
unsigned int ao_readback[PCI6208_MAX_AO_CHANNELS];
};
-static int pci6208_ao_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pci6208_ao_wait_for_data_send(struct comedi_device *dev,
+ unsigned int timeout)
+{
+ unsigned int status;
+
+ while (timeout--) {
+ status = inw(dev->iobase + PCI6208_AO_STATUS);
+ if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int pci6208_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pci6208_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- unsigned long invert = 1 << (16 - 1);
- unsigned long value = 0;
- unsigned short status;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
- value = data[i] ^ invert;
+ val = data[i];
- do {
- status = inw(dev->iobase + PCI6208_AO_STATUS);
- } while (status & PCI6208_AO_STATUS_DATA_SEND);
+ /* D/A transfer rate is 2.2us, wait up to 10us */
+ ret = pci6208_ao_wait_for_data_send(dev, 10);
+ if (ret)
+ return ret;
- outw(value, dev->iobase + PCI6208_AO_CONTROL(chan));
+ /* the hardware expects two's complement values */
+ outw(comedi_offset_munge(s, val),
+ dev->iobase + PCI6208_AO_CONTROL(chan));
}
- devpriv->ao_readback[chan] = value;
+ devpriv->ao_readback[chan] = val;
return insn->n;
}
-static int pci6208_ao_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pci6208_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pci6208_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++)
@@ -141,15 +151,8 @@ static int pci6208_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PCI6208_DIO);
- }
data[1] = s->state;
@@ -193,8 +196,8 @@ static int pci6208_auto_attach(struct comedi_device *dev,
s->n_chan = boardinfo->ao_chans;
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
- s->insn_write = pci6208_ao_winsn;
- s->insn_read = pci6208_ao_rinsn;
+ s->insn_write = pci6208_ao_insn_write;
+ s->insn_read = pci6208_ao_insn_read;
s = &dev->subdevices[1];
/* digital input subdevice */
@@ -221,10 +224,6 @@ static int pci6208_auto_attach(struct comedi_device *dev,
val = inw(dev->iobase + PCI6208_DIO);
val = (val & PCI6208_DIO_DO_MASK) >> PCI6208_DIO_DO_SHIFT;
s->state = val;
- s->io_bits = 0x0f;
-
- dev_info(dev->class_dev, "%s: %s, I/O base=0x%04lx\n",
- dev->driver->driver_name, dev->board_name, dev->iobase);
return 0;
}
@@ -259,5 +258,5 @@ static struct pci_driver adl_pci6208_pci_driver = {
module_comedi_pci_driver(adl_pci6208_driver, adl_pci6208_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for ADLink 6208 series cards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 81b7203f824..5617f5ca384 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -112,21 +112,10 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
outl(s->state, dev->iobase + reg);
- }
- /*
- * NOTE: The output register is not readable.
- * This returned state will not be correct until all the
- * outputs have been updated.
- */
data[1] = s->state;
return insn->n;
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 78cea193504..eab8da2c3d6 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -86,8 +86,6 @@ TODO:
#define PCI9111_AI_INSTANT_READ_UDELAY_US 2
#define PCI9111_AI_INSTANT_READ_TIMEOUT 100
-#define PCI9111_8254_CLOCK_PERIOD_NS 500
-
/*
* IO address map and bit defines
*/
@@ -153,7 +151,7 @@ struct pci9111_private_data {
unsigned int div1;
unsigned int div2;
- short ai_bounce_buffer[2 * PCI9111_FIFO_HALF_SIZE];
+ unsigned short ai_bounce_buffer[2 * PCI9111_FIFO_HALF_SIZE];
};
static void plx9050_interrupt_control(unsigned long io_base,
@@ -393,11 +391,10 @@ static int pci9111_ai_do_cmd_test(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer_2div(PCI9111_8254_CLOCK_PERIOD_NS,
- &dev_private->div1,
- &dev_private->div2,
- &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
+ &dev_private->div1,
+ &dev_private->div2,
+ &cmd->convert_arg, cmd->flags);
if (tmp != cmd->convert_arg)
error++;
}
@@ -570,7 +567,7 @@ static void pci9111_ai_munge(struct comedi_device *dev,
unsigned int num_bytes,
unsigned int start_chan_index)
{
- short *array = data;
+ unsigned short *array = data;
unsigned int maxdata = s->maxdata;
unsigned int invert = (maxdata + 1) >> 1;
unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
@@ -813,15 +810,8 @@ static int pci9111_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PCI9111_DIO_REG);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 22196ada036..986489641ed 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -352,12 +352,11 @@ struct pci9118_private {
* on external start
*/
unsigned int ai_data_len;
- short *ai_data;
- short ao_data[2]; /* data output buffer */
+ unsigned short ao_data[2]; /* data output buffer */
unsigned int ai_scans; /* number of scans to do */
char dma_doublebuf; /* we can use double buffering */
unsigned int dma_actbuf; /* which buffer is used now */
- short *dmabuf_virt[2]; /*
+ unsigned short *dmabuf_virt[2]; /*
* pointers to begin of
* DMA buffer
*/
@@ -671,13 +670,12 @@ static int pci9118_insn_bits_di(struct comedi_device *dev,
static int pci9118_insn_bits_do(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data))
outl(s->state & 0x0f, dev->iobase + PCI9118_DO);
- }
+
data[1] = s->state;
return insn->n;
@@ -701,7 +699,7 @@ static void interrupt_pci9118_ai_mode4_switch(struct comedi_device *dev)
static unsigned int defragment_dma_buffer(struct comedi_device *dev,
struct comedi_subdevice *s,
- short *dma_buffer,
+ unsigned short *dma_buffer,
unsigned int num_samples)
{
struct pci9118_private *devpriv = dev->private;
@@ -725,7 +723,7 @@ static unsigned int defragment_dma_buffer(struct comedi_device *dev,
static int move_block_from_dma(struct comedi_device *dev,
struct comedi_subdevice *s,
- short *dma_buffer,
+ unsigned short *dma_buffer,
unsigned int num_samples)
{
struct pci9118_private *devpriv = dev->private;
@@ -793,7 +791,8 @@ static void pci9118_calc_divisors(char mode, struct comedi_device *dev,
case 4:
if (*tim2 < this_board->ai_ns_min)
*tim2 = this_board->ai_ns_min;
- i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, div1, div2,
+ i8253_cascade_ns_to_timer(devpriv->i8254_osc_base,
+ div1, div2,
tim2, flags & TRIG_ROUND_NEAREST);
break;
case 2:
@@ -925,7 +924,7 @@ static void pci9118_ai_munge(struct comedi_device *dev,
{
struct pci9118_private *devpriv = dev->private;
unsigned int i, num_samples = num_bytes / sizeof(short);
- short *array = data;
+ unsigned short *array = data;
for (i = 0; i < num_samples; i++) {
if (devpriv->usedma)
@@ -945,7 +944,7 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
unsigned short int_daq)
{
struct pci9118_private *devpriv = dev->private;
- register short sampl;
+ unsigned short sampl;
s->async->events = 0;
@@ -1278,9 +1277,9 @@ static int pci9118_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, &divisor1,
- &divisor2, &cmd->scan_begin_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->i8254_osc_base,
+ &divisor1, &divisor2,
+ &cmd->scan_begin_arg, cmd->flags);
if (cmd->scan_begin_arg < this_board->ai_ns_min)
cmd->scan_begin_arg = this_board->ai_ns_min;
if (tmp != cmd->scan_begin_arg)
@@ -1289,9 +1288,9 @@ static int pci9118_ai_cmdtest(struct comedi_device *dev,
if (cmd->convert_src & (TRIG_TIMER | TRIG_NOW)) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, &divisor1,
- &divisor2, &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->i8254_osc_base,
+ &divisor1, &divisor2,
+ &cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
if (tmp != cmd->convert_arg)
@@ -1613,7 +1612,6 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_n_scanlen = cmd->scan_end_arg;
devpriv->ai_chanlist = cmd->chanlist;
- devpriv->ai_data = s->async->prealloc_buf;
devpriv->ai_data_len = s->async->prealloc_bufsz;
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
@@ -1987,8 +1985,8 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
for (i = 0; i < 2; i++) {
for (pages = 4; pages >= 0; pages--) {
devpriv->dmabuf_virt[i] =
- (short *)__get_free_pages(GFP_KERNEL,
- pages);
+ (unsigned short *)
+ __get_free_pages(GFP_KERNEL, pages);
if (devpriv->dmabuf_virt[i])
break;
}
@@ -2075,7 +2073,6 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
s->maxdata = 1;
s->len_chanlist = 4;
s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
s->insn_bits = pci9118_insn_bits_di;
s = &dev->subdevices[3];
@@ -2085,11 +2082,10 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
s->maxdata = 1;
s->len_chanlist = 4;
s->range_table = &range_digital;
- s->io_bits = 0xf; /* all bits output */
s->insn_bits = pci9118_insn_bits_do;
devpriv->valid = 1;
- devpriv->i8254_osc_base = 250; /* 250ns=4MHz */
+ devpriv->i8254_osc_base = I8254_OSC_BASE_4MHZ;
devpriv->ai_maskharderr = 0x10a;
/* default measure crash condition */
if (hw_err_mask) /* disable some requested */
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index cdf5ba26c59..8150a67cd1f 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -119,7 +119,6 @@ struct adq12b_private {
int differential; /* option 3 of comedi_config */
int last_channel;
int last_range;
- unsigned int digital_state;
};
/*
@@ -186,23 +185,25 @@ static int adq12b_di_insn_bits(struct comedi_device *dev,
static int adq12b_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct adq12b_private *devpriv = dev->private;
- int channel;
-
- for (channel = 0; channel < 8; channel++)
- if (((data[0] >> channel) & 0x01) != 0)
- outb((((data[1] >> channel) & 0x01) << 3) | channel,
- dev->iobase + ADQ12B_OUTBR);
-
- /* store information to retrieve when asked for reading */
- if (data[0]) {
- devpriv->digital_state &= ~data[0];
- devpriv->digital_state |= (data[0] & data[1]);
+ unsigned int mask;
+ unsigned int chan;
+ unsigned int val;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ for (chan = 0; chan < 8; chan++) {
+ if ((mask >> chan) & 0x01) {
+ val = (s->state >> chan) & 0x01;
+ outb((val << 3) | chan,
+ dev->iobase + ADQ12B_OUTBR);
+ }
+ }
}
- data[1] = devpriv->digital_state;
+ data[1] = s->state;
return insn->n;
}
@@ -223,7 +224,6 @@ static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->unipolar = it->options[1];
devpriv->differential = it->options[2];
- devpriv->digital_state = 0;
/*
* initialize channel and range to -1 so we make sure we
* always write at least once to the CTREG in the instruction
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index f84df46d326..c3fdcabe9ae 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -314,10 +314,9 @@ struct pci1710_private {
unsigned int *ai_chanlist; /* actaul chanlist */
unsigned int ai_flags; /* flaglist */
unsigned int ai_data_len; /* len of data buffer */
- short *ai_data; /* data buffer */
unsigned int ai_timer1; /* timers */
unsigned int ai_timer2;
- short ao_data[4]; /* data output buffer */
+ unsigned short ao_data[4]; /* data output buffer */
unsigned int cnt0_write_wait; /* after a write, wait for update of the
* internal state */
};
@@ -544,18 +543,14 @@ static int pci171x_insn_bits_di(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
-*/
static int pci171x_insn_bits_do(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PCI171x_DO);
- }
+
data[1] = s->state;
return insn->n;
@@ -740,7 +735,7 @@ static void interrupt_pci1710_every_sample(void *d)
int m;
#ifdef PCI171x_PARANOIDCHECK
const struct boardtype *this_board = comedi_board(dev);
- short sampl;
+ unsigned short sampl;
#endif
m = inw(dev->iobase + PCI171x_STATUS);
@@ -821,7 +816,7 @@ static int move_block_from_fifo(struct comedi_device *dev,
int i, j;
#ifdef PCI171x_PARANOIDCHECK
const struct boardtype *this_board = comedi_board(dev);
- int sampl;
+ unsigned short sampl;
#endif
j = s->async->cur_chan;
@@ -1009,9 +1004,10 @@ static int pci171x_ai_docmd_and_mode(int mode, struct comedi_device *dev,
} else {
devpriv->ai_et = 0;
}
- i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, &divisor1,
- &divisor2, &devpriv->ai_timer1,
- devpriv->ai_flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->i8254_osc_base,
+ &divisor1, &divisor2,
+ &devpriv->ai_timer1,
+ devpriv->ai_flags);
outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
if (mode != 2) {
/* start pacer */
@@ -1090,9 +1086,9 @@ static int pci171x_ai_cmdtest(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, &divisor1,
- &divisor2, &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->i8254_osc_base,
+ &divisor1, &divisor2,
+ &cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
if (tmp != cmd->convert_arg)
@@ -1125,7 +1121,6 @@ static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_data = s->async->prealloc_buf;
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
@@ -1288,7 +1283,7 @@ static int pci1710_auto_attach(struct comedi_device *dev,
s->do_cmdtest = pci171x_ai_cmdtest;
s->do_cmd = pci171x_ai_cmd;
}
- devpriv->i8254_osc_base = 100; /* 100ns=10MHz */
+ devpriv->i8254_osc_base = I8254_OSC_BASE_10MHZ;
subdev++;
}
@@ -1320,7 +1315,6 @@ static int pci1710_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->len_chanlist = this_board->n_dichan;
s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
s->insn_bits = pci171x_insn_bits_di;
subdev++;
}
@@ -1333,9 +1327,6 @@ static int pci1710_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
s->range_table = &range_digital;
- /* all bits output */
- s->io_bits = (1 << this_board->n_dochan) - 1;
- s->state = 0;
s->insn_bits = pci171x_insn_bits_do;
subdev++;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index b793d6987b8..bd4f781b4b2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -105,7 +105,7 @@ TODO:
struct pci1723_private {
unsigned char da_range[8]; /* D/A output range for each channel */
- short ao_data[8]; /* data output buffer */
+ unsigned short ao_data[8]; /* data output buffer */
};
/*
@@ -205,19 +205,16 @@ static int pci1723_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
-/*
- digital i/o bits read/write
-*/
static int pci1723_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PCI1723_WRITE_DIGITAL_OUTPUT_CMD);
- }
+
data[1] = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index f091fa0d304..6bac665261f 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -448,45 +448,39 @@ static int pci_dio_insn_bits_di_w(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
-*/
static int pci_dio_insn_bits_do_b(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct diosubd_data *d = (const struct diosubd_data *)s->private;
int i;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
for (i = 0; i < d->regs; i++)
outb((s->state >> (8 * i)) & 0xff,
dev->iobase + d->addr + i);
}
+
data[1] = s->state;
return insn->n;
}
-/*
-==============================================================================
-*/
static int pci_dio_insn_bits_do_w(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct diosubd_data *d = (const struct diosubd_data *)s->private;
int i;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
for (i = 0; i < d->regs; i++)
outw((s->state >> (16 * i)) & 0xffff,
dev->iobase + d->addr + 2 * i);
}
+
data[1] = s->state;
return insn->n;
@@ -641,12 +635,10 @@ static int pci1760_insn_bits_di(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
-*/
static int pci1760_insn_bits_do(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int ret;
unsigned char omb[4] = {
@@ -657,14 +649,13 @@ static int pci1760_insn_bits_do(struct comedi_device *dev,
};
unsigned char imb[4];
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
omb[0] = s->state;
ret = pci1760_mbxrequest(dev, omb, imb);
if (!ret)
return ret;
}
+
data[1] = s->state;
return insn->n;
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index afe87cc8976..22b3dda135f 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -45,9 +45,7 @@ static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
+ if (comedi_dio_update_state(s, data)) {
outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7);
outb((s->state >> 8) & 0xff,
dev->iobase + AIO_IIRO_16_RELAY_8_15);
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index c1f723e8614..2e4bf284d52 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -941,31 +941,34 @@ static void dio200_subdev_8255_set_dir(struct comedi_device *dev,
dio200_write8(dev, subpriv->ofs + 3, config);
}
-/*
- * Handle 'insn_bits' for an '8255' DIO subdevice.
- */
static int dio200_subdev_8255_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dio200_subdev_8255 *subpriv = s->private;
+ unsigned int mask;
+ unsigned int val;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
- if (data[0] & 0xff)
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ if (mask & 0xff)
dio200_write8(dev, subpriv->ofs, s->state & 0xff);
- if (data[0] & 0xff00)
+ if (mask & 0xff00)
dio200_write8(dev, subpriv->ofs + 1,
(s->state >> 8) & 0xff);
- if (data[0] & 0xff0000)
+ if (mask & 0xff0000)
dio200_write8(dev, subpriv->ofs + 2,
(s->state >> 16) & 0xff);
}
- data[1] = dio200_read8(dev, subpriv->ofs);
- data[1] |= dio200_read8(dev, subpriv->ofs + 1) << 8;
- data[1] |= dio200_read8(dev, subpriv->ofs + 2) << 16;
- return 2;
+
+ val = dio200_read8(dev, subpriv->ofs);
+ val |= dio200_read8(dev, subpriv->ofs + 1) << 8;
+ val |= dio200_read8(dev, subpriv->ofs + 2) << 16;
+
+ data[1] = val;
+
+ return insn->n;
}
/*
@@ -1022,8 +1025,6 @@ static int dio200_subdev_8255_init(struct comedi_device *dev,
s->maxdata = 1;
s->insn_bits = dio200_subdev_8255_bits;
s->insn_config = dio200_subdev_8255_config;
- s->state = 0;
- s->io_bits = 0;
dio200_subdev_8255_set_dir(dev, s);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index e7108045f55..5b4b5ab34e2 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -57,17 +57,16 @@ static const struct pc263_board pc263_boards[] = {
static int pc263_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- /* Write out the new digital output lines */
- outb(s->state & 0xFF, dev->iobase);
- outb(s->state >> 8, dev->iobase + 1);
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase);
+ outb((s->state >> 8) & 0xff, dev->iobase + 1);
}
+
+ data[1] = s->state;
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 179de53a86f..810e397d8fd 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -215,12 +215,6 @@ Caveats:
#define CLK_EXT 7 /* external clock */
/* Macro to construct clock input configuration register value. */
#define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
-/* Timebases in ns. */
-#define TIMEBASE_10MHZ 100
-#define TIMEBASE_1MHZ 1000
-#define TIMEBASE_100KHZ 10000
-#define TIMEBASE_10KHZ 100000
-#define TIMEBASE_1KHZ 1000000
/*
* Counter/timer gate input configuration sources.
@@ -379,7 +373,7 @@ struct pci224_private {
unsigned long state;
spinlock_t ao_spinlock;
unsigned int *ao_readback;
- short *ao_scan_vals;
+ unsigned short *ao_scan_vals;
unsigned char *ao_scan_order;
int intr_cpuid;
short intr_running;
@@ -843,26 +837,26 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
switch (round_mode) {
case TRIG_ROUND_NEAREST:
default:
- round = TIMEBASE_10MHZ / 2;
+ round = I8254_OSC_BASE_10MHZ / 2;
break;
case TRIG_ROUND_DOWN:
round = 0;
break;
case TRIG_ROUND_UP:
- round = TIMEBASE_10MHZ - 1;
+ round = I8254_OSC_BASE_10MHZ - 1;
break;
}
/* Be careful to avoid overflow! */
- div2 = cmd->scan_begin_arg / TIMEBASE_10MHZ;
- div2 += (round + cmd->scan_begin_arg % TIMEBASE_10MHZ) /
- TIMEBASE_10MHZ;
+ div2 = cmd->scan_begin_arg / I8254_OSC_BASE_10MHZ;
+ div2 += (round + cmd->scan_begin_arg % I8254_OSC_BASE_10MHZ) /
+ I8254_OSC_BASE_10MHZ;
if (div2 <= 0x10000) {
/* A single timer will suffice. */
if (div2 < 2)
div2 = 2;
- cmd->scan_begin_arg = div2 * TIMEBASE_10MHZ;
+ cmd->scan_begin_arg = div2 * I8254_OSC_BASE_10MHZ;
if (cmd->scan_begin_arg < div2 ||
- cmd->scan_begin_arg < TIMEBASE_10MHZ) {
+ cmd->scan_begin_arg < I8254_OSC_BASE_10MHZ) {
/* Overflow! */
cmd->scan_begin_arg = MAX_SCAN_PERIOD;
}
@@ -870,7 +864,8 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
/* Use two timers. */
div1 = devpriv->cached_div1;
div2 = devpriv->cached_div2;
- pci224_cascade_ns_to_timer(TIMEBASE_10MHZ, &div1, &div2,
+ pci224_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &div1, &div2,
&cmd->scan_begin_arg,
round_mode);
devpriv->cached_div1 = div1;
@@ -1002,19 +997,19 @@ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
switch (round_mode) {
case TRIG_ROUND_NEAREST:
default:
- round = TIMEBASE_10MHZ / 2;
+ round = I8254_OSC_BASE_10MHZ / 2;
break;
case TRIG_ROUND_DOWN:
round = 0;
break;
case TRIG_ROUND_UP:
- round = TIMEBASE_10MHZ - 1;
+ round = I8254_OSC_BASE_10MHZ - 1;
break;
}
/* Be careful to avoid overflow! */
- div2 = cmd->scan_begin_arg / TIMEBASE_10MHZ;
- div2 += (round + cmd->scan_begin_arg % TIMEBASE_10MHZ) /
- TIMEBASE_10MHZ;
+ div2 = cmd->scan_begin_arg / I8254_OSC_BASE_10MHZ;
+ div2 += (round + cmd->scan_begin_arg % I8254_OSC_BASE_10MHZ) /
+ I8254_OSC_BASE_10MHZ;
if (div2 <= 0x10000) {
/* A single timer will suffice. */
if (div2 < 2)
@@ -1025,7 +1020,8 @@ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* Use two timers. */
div1 = devpriv->cached_div1;
div2 = devpriv->cached_div2;
- pci224_cascade_ns_to_timer(TIMEBASE_10MHZ, &div1, &div2,
+ pci224_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &div1, &div2,
&ns, round_mode);
}
@@ -1116,7 +1112,7 @@ pci224_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
const struct pci224_board *thisboard = comedi_board(dev);
struct pci224_private *devpriv = dev->private;
struct comedi_async *async = s->async;
- short *array = data;
+ unsigned short *array = data;
unsigned int length = num_bytes / sizeof(*array);
unsigned int offset;
unsigned int shift;
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 43059c25d5e..a97bbd6ca3d 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -573,14 +573,14 @@ static const struct comedi_lrange pci230_ao_range = { 2, {
/* PCI230 daccon bipolar flag for each analogue output range. */
static const unsigned char pci230_ao_bipolar[2] = { 0, 1 };
-static short pci230_ai_read(struct comedi_device *dev)
+static unsigned short pci230_ai_read(struct comedi_device *dev)
{
const struct pci230_board *thisboard = comedi_board(dev);
struct pci230_private *devpriv = dev->private;
- short data;
+ unsigned short data;
/* Read sample. */
- data = (short)inw(dev->iobase + PCI230_ADCDATA);
+ data = inw(dev->iobase + PCI230_ADCDATA);
/* PCI230 is 12 bit - stored in upper bits of 16 bit register (lower
* four bits reserved for expansion). */
/* PCI230+ is 16 bit AI. */
@@ -595,7 +595,7 @@ static short pci230_ai_read(struct comedi_device *dev)
}
static inline unsigned short pci230_ao_mangle_datum(struct comedi_device *dev,
- short datum)
+ unsigned short datum)
{
const struct pci230_board *thisboard = comedi_board(dev);
struct pci230_private *devpriv = dev->private;
@@ -609,11 +609,12 @@ static inline unsigned short pci230_ao_mangle_datum(struct comedi_device *dev,
* four bits reserved for expansion). */
/* PCI230+ is also 12 bit AO. */
datum <<= (16 - thisboard->ao_bits);
- return (unsigned short)datum;
+ return datum;
}
static inline void pci230_ao_write_nofifo(struct comedi_device *dev,
- short datum, unsigned int chan)
+ unsigned short datum,
+ unsigned int chan)
{
struct pci230_private *devpriv = dev->private;
@@ -627,8 +628,8 @@ static inline void pci230_ao_write_nofifo(struct comedi_device *dev,
PCI230_DACOUT2));
}
-static inline void pci230_ao_write_fifo(struct comedi_device *dev, short datum,
- unsigned int chan)
+static inline void pci230_ao_write_fifo(struct comedi_device *dev,
+ unsigned short datum, unsigned int chan)
{
struct pci230_private *devpriv = dev->private;
@@ -1165,7 +1166,7 @@ static void pci230_handle_ao_nofifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pci230_private *devpriv = dev->private;
- short data;
+ unsigned short data;
int i, ret;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -1258,7 +1259,7 @@ static int pci230_handle_ao_fifo(struct comedi_device *dev,
/* Process scans. */
for (n = 0; n < num_scans; n++) {
for (i = 0; i < cmd->chanlist_len; i++) {
- short datum;
+ unsigned short datum;
comedi_buf_get(async, &datum);
pci230_ao_write_fifo(dev, datum,
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 145bb48f618..4bd4ef8e88c 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -44,17 +44,16 @@ The state of the outputs can be read.
static int pci263_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- /* Write out the new digital output lines */
- outb(s->state & 0xFF, dev->iobase);
- outb(s->state >> 8, dev->iobase + 1);
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase);
+ outb((s->state >> 8) & 0xff, dev->iobase + 1);
}
+
+ data[1] = s->state;
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 0ce93da7084..64d5f291553 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -234,9 +234,9 @@ static int das16cs_ai_cmdtest(struct comedi_device *dev,
unsigned int div1 = 0, div2 = 0;
tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer(100, &div1, &div2,
- &cmd->scan_begin_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &div1, &div2,
+ &cmd->scan_begin_arg, cmd->flags);
if (tmp != cmd->scan_begin_arg)
err++;
}
@@ -244,9 +244,9 @@ static int das16cs_ai_cmdtest(struct comedi_device *dev,
unsigned int div1 = 0, div2 = 0;
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(100, &div1, &div2,
- &cmd->scan_begin_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &div1, &div2,
+ &cmd->scan_begin_arg, cmd->flags);
if (tmp != cmd->convert_arg)
err++;
if (cmd->scan_begin_src == TRIG_TIMER &&
@@ -325,14 +325,11 @@ static int das16cs_ao_rinsn(struct comedi_device *dev,
static int das16cs_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
-
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + DAS16CS_DIO);
- }
data[1] = inw(dev->iobase + DAS16CS_DIO);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 41d89ee7fa3..e72a403db17 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -73,7 +73,6 @@ analog triggering on 1602 series
#include "amcc_s5933.h"
#include "comedi_fc.h"
-#define TIMER_BASE 100 /* 10MHz master clock */
#define AI_BUFFER_SIZE 1024 /* max ai fifo size */
#define AO_BUFFER_SIZE 1024 /* max ao fifo size */
#define NUM_CHANNELS_8800 8
@@ -358,15 +357,15 @@ struct cb_pcidas_private {
unsigned int s5933_intcsr_bits;
unsigned int ao_control_bits;
/* fifo buffers */
- short ai_buffer[AI_BUFFER_SIZE];
- short ao_buffer[AO_BUFFER_SIZE];
+ unsigned short ai_buffer[AI_BUFFER_SIZE];
+ unsigned short ao_buffer[AO_BUFFER_SIZE];
/* divisors of master clock for analog output pacing */
unsigned int ao_divisor1;
unsigned int ao_divisor2;
/* number of analog output samples remaining */
unsigned int ao_count;
/* cached values for readback */
- int ao_value[2];
+ unsigned short ao_value[2];
unsigned int caldac_value[NUM_CHANNELS_8800];
unsigned int trimpot_value[NUM_CHANNELS_8402];
unsigned int dac08_value;
@@ -880,21 +879,19 @@ static int cb_pcidas_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->scan_begin_arg),
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->scan_begin_arg, cmd->flags);
if (tmp != cmd->scan_begin_arg)
err++;
}
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->convert_arg),
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg, cmd->flags);
if (tmp != cmd->convert_arg)
err++;
}
@@ -932,9 +929,9 @@ static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns,
{
struct cb_pcidas_private *devpriv = dev->private;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1),
- &(devpriv->divisor2), ns,
- rounding_flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1, &devpriv->divisor2,
+ ns, rounding_flags);
/* Write the values of ctr1 and ctr2 into counters 1 and 2 */
i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 1,
@@ -1084,11 +1081,10 @@ static int cb_pcidas_ao_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &(devpriv->ao_divisor1),
- &(devpriv->ao_divisor2),
- &(cmd->scan_begin_arg),
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->ao_divisor1,
+ &devpriv->ao_divisor2,
+ &cmd->scan_begin_arg, cmd->flags);
if (tmp != cmd->scan_begin_arg)
err++;
}
@@ -1209,11 +1205,10 @@ static int cb_pcidas_ao_cmd(struct comedi_device *dev,
/* load counters */
if (cmd->scan_begin_src == TRIG_TIMER) {
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &(devpriv->ao_divisor1),
- &(devpriv->ao_divisor2),
- &(cmd->scan_begin_arg),
- cmd->flags);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->ao_divisor1,
+ &devpriv->ao_divisor2,
+ &cmd->scan_begin_arg, cmd->flags);
/* Write the values of ctr1 and ctr2 into counters 1 and 2 */
i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 1,
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 388dbd7a5d2..ff5206536be 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1137,7 +1137,7 @@ struct pcidas64_private {
volatile short ai_cmd_running;
unsigned int ai_fifo_segment_length;
struct ext_clock_info ext_clock;
- short ao_bounce_buffer[DAC_FIFO_SIZE];
+ unsigned short ao_bounce_buffer[DAC_FIFO_SIZE];
};
static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
@@ -3490,18 +3490,15 @@ static int di_rbits(struct comedi_device *dev, struct comedi_subdevice *s,
return insn->n;
}
-static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int do_wbits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcidas64_private *devpriv = dev->private;
- data[0] &= 0xf;
- /* zero bits we are going to change */
- s->state &= ~data[0];
- /* set new bits */
- s->state |= data[0] & data[1];
-
- writeb(s->state, devpriv->dio_counter_iobase + DO_REG);
+ if (comedi_dio_update_state(s, data))
+ writeb(s->state, devpriv->dio_counter_iobase + DO_REG);
data[1] = s->state;
@@ -3526,14 +3523,14 @@ static int dio_60xx_config_insn(struct comedi_device *dev,
return insn->n;
}
-static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int dio_60xx_wbits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcidas64_private *devpriv = dev->private;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
writeb(s->state,
devpriv->dio_counter_iobase + DIO_DATA_60XX_REG);
}
diff --git a/drivers/staging/comedi/drivers/comedi_fc.h b/drivers/staging/comedi/drivers/comedi_fc.h
index a4dea7cb86b..8558b07f8df 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.h
+++ b/drivers/staging/comedi/drivers/comedi_fc.h
@@ -30,7 +30,7 @@ extern unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
unsigned int num_bytes);
static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *subd,
- short data)
+ unsigned short data)
{
return cfc_write_array_to_buffer(subd, &data, sizeof(data));
};
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index f28a15f0274..9de81c7712f 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -1,168 +1,153 @@
/*
- comedi/drivers/comedi_parport.c
- hardware driver for standard parallel port
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: comedi_parport
-Description: Standard PC parallel port
-Author: ds
-Status: works in immediate mode
-Devices: [standard] parallel port (comedi_parport)
-Updated: Tue, 30 Apr 2002 21:11:45 -0700
-
-A cheap and easy way to get a few more digital I/O lines. Steal
-additional parallel ports from old computers or your neighbors'
-computers.
-
-Option list:
- 0: I/O port base for the parallel port.
- 1: IRQ
-
-Parallel Port Lines:
-
-pin subdev chan aka
---- ------ ---- ---
-1 2 0 strobe
-2 0 0 data 0
-3 0 1 data 1
-4 0 2 data 2
-5 0 3 data 3
-6 0 4 data 4
-7 0 5 data 5
-8 0 6 data 6
-9 0 7 data 7
-10 1 3 acknowledge
-11 1 4 busy
-12 1 2 output
-13 1 1 printer selected
-14 2 1 auto LF
-15 1 0 error
-16 2 2 init
-17 2 3 select printer
-18-25 ground
-
-Notes:
-
-Subdevices 0 is digital I/O, subdevice 1 is digital input, and
-subdevice 2 is digital output. Unlike other Comedi devices,
-subdevice 0 defaults to output.
-
-Pins 13 and 14 are inverted once by Comedi and once by the
-hardware, thus cancelling the effect.
-
-Pin 1 is a strobe, thus acts like one. There's no way in software
-to change this, at least on a standard parallel port.
-
-Subdevice 3 pretends to be a digital input subdevice, but it always
-returns 0 when read. However, if you run a command with
-scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering
-pin, which can be used to wake up tasks.
-*/
+ * comedi_parport.c
+ * Comedi driver for standard parallel port
+ *
+ * For more information see:
+ * http://retired.beyondlogic.org/spp/parallel.htm
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
- see http://www.beyondlogic.org/ for information.
- or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
+ * Driver: comedi_parport
+ * Description: Standard PC parallel port
+ * Author: ds
+ * Status: works in immediate mode
+ * Devices: (standard) parallel port [comedi_parport]
+ * Updated: Tue, 30 Apr 2002 21:11:45 -0700
+ *
+ * A cheap and easy way to get a few more digital I/O lines. Steal
+ * additional parallel ports from old computers or your neighbors'
+ * computers.
+ *
+ * Option list:
+ * 0: I/O port base for the parallel port.
+ * 1: IRQ (optional)
+ *
+ * Parallel Port Lines:
+ *
+ * pin subdev chan type name
+ * ----- ------ ---- ---- --------------
+ * 1 2 0 DO strobe
+ * 2 0 0 DIO data 0
+ * 3 0 1 DIO data 1
+ * 4 0 2 DIO data 2
+ * 5 0 3 DIO data 3
+ * 6 0 4 DIO data 4
+ * 7 0 5 DIO data 5
+ * 8 0 6 DIO data 6
+ * 9 0 7 DIO data 7
+ * 10 1 3 DI ack
+ * 11 1 4 DI busy
+ * 12 1 2 DI paper out
+ * 13 1 1 DI select in
+ * 14 2 1 DO auto LF
+ * 15 1 0 DI error
+ * 16 2 2 DO init
+ * 17 2 3 DO select printer
+ * 18-25 ground
+ *
+ * When an IRQ is configured subdevice 3 pretends to be a digital
+ * input subdevice, but it always returns 0 when read. However, if
+ * you run a command with scan_begin_src=TRIG_EXT, it uses pin 10
+ * as a external trigger, which can be used to wake up tasks.
*/
#include <linux/module.h>
-#include "../comedidev.h"
#include <linux/interrupt.h>
-#include "comedi_fc.h"
-
-#define PARPORT_SIZE 3
-
-#define PARPORT_A 0
-#define PARPORT_B 1
-#define PARPORT_C 2
+#include "../comedidev.h"
-struct parport_private {
- unsigned int a_data;
- unsigned int c_data;
- int enable_irq;
-};
+#include "comedi_fc.h"
-static int parport_insn_a(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+/*
+ * Register map
+ */
+#define PARPORT_DATA_REG 0x00
+#define PARPORT_STATUS_REG 0x01
+#define PARPORT_CTRL_REG 0x02
+#define PARPORT_CTRL_IRQ_ENA (1 << 4)
+#define PARPORT_CTRL_BIDIR_ENA (1 << 5)
+
+static int parport_data_reg_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct parport_private *devpriv = dev->private;
-
- if (data[0]) {
- devpriv->a_data &= ~data[0];
- devpriv->a_data |= (data[0] & data[1]);
-
- outb(devpriv->a_data, dev->iobase + PARPORT_A);
- }
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + PARPORT_DATA_REG);
- data[1] = inb(dev->iobase + PARPORT_A);
+ data[1] = inb(dev->iobase + PARPORT_DATA_REG);
return insn->n;
}
-static int parport_insn_config_a(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int parport_data_reg_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct parport_private *devpriv = dev->private;
-
- if (data[0]) {
- s->io_bits = 0xff;
- devpriv->c_data &= ~(1 << 5);
- } else {
- s->io_bits = 0;
- devpriv->c_data |= (1 << 5);
- }
- outb(devpriv->c_data, dev->iobase + PARPORT_C);
+ unsigned int ctrl;
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0xff);
+ if (ret)
+ return ret;
+
+ ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
+ if (s->io_bits)
+ ctrl &= ~PARPORT_CTRL_BIDIR_ENA;
+ else
+ ctrl |= PARPORT_CTRL_BIDIR_ENA;
+ outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
- return 1;
+ return insn->n;
}
-static int parport_insn_b(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int parport_status_reg_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- /* should writes be ignored? */
- /* anyone??? */
- }
-
- data[1] = (inb(dev->iobase + PARPORT_B) >> 3);
+ data[1] = inb(dev->iobase + PARPORT_STATUS_REG) >> 3;
return insn->n;
}
-static int parport_insn_c(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int parport_ctrl_reg_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct parport_private *devpriv = dev->private;
-
- data[0] &= 0x0f;
- if (data[0]) {
- devpriv->c_data &= ~data[0];
- devpriv->c_data |= (data[0] & data[1]);
+ unsigned int ctrl;
- outb(devpriv->c_data, dev->iobase + PARPORT_C);
+ if (comedi_dio_update_state(s, data)) {
+ ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
+ ctrl &= (PARPORT_CTRL_IRQ_ENA | PARPORT_CTRL_BIDIR_ENA);
+ ctrl |= s->state;
+ outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
}
- data[1] = devpriv->c_data & 0xf;
+ data[1] = s->state;
return insn->n;
}
-static int parport_intr_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int parport_intr_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[1] = 0;
return insn->n;
@@ -213,12 +198,11 @@ static int parport_intr_cmdtest(struct comedi_device *dev,
static int parport_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct parport_private *devpriv = dev->private;
+ unsigned int ctrl;
- devpriv->c_data |= 0x10;
- outb(devpriv->c_data, dev->iobase + PARPORT_C);
-
- devpriv->enable_irq = 1;
+ ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
+ ctrl |= PARPORT_CTRL_IRQ_ENA;
+ outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
return 0;
}
@@ -226,12 +210,11 @@ static int parport_intr_cmd(struct comedi_device *dev,
static int parport_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct parport_private *devpriv = dev->private;
-
- devpriv->c_data &= ~0x10;
- outb(devpriv->c_data, dev->iobase + PARPORT_C);
+ unsigned int ctrl;
- devpriv->enable_irq = 0;
+ ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
+ ctrl &= ~PARPORT_CTRL_IRQ_ENA;
+ outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
return 0;
}
@@ -239,10 +222,11 @@ static int parport_intr_cancel(struct comedi_device *dev,
static irqreturn_t parport_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct parport_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[3];
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int ctrl;
- if (!devpriv->enable_irq)
+ ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
+ if (!(ctrl & PARPORT_CTRL_IRQ_ENA))
return IRQ_NONE;
comedi_buf_put(s->async, 0);
@@ -255,79 +239,69 @@ static irqreturn_t parport_interrupt(int irq, void *d)
static int parport_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct parport_private *devpriv;
struct comedi_subdevice *s;
- unsigned int irq;
int ret;
- ret = comedi_request_region(dev, it->options[0], PARPORT_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x03);
if (ret)
return ret;
- irq = it->options[1];
- if (irq) {
- ret = request_irq(irq, parport_interrupt, 0, dev->board_name,
- dev);
- if (ret < 0) {
- dev_err(dev->class_dev, "irq not available\n");
- return -EINVAL;
- }
- dev->irq = irq;
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], parport_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- ret = comedi_alloc_subdevices(dev, 4);
+ ret = comedi_alloc_subdevices(dev, dev->irq ? 4 : 3);
if (ret)
return ret;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
+ /* Digial I/O subdevice - Parallel port DATA register */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_insn_a;
- s->insn_config = parport_insn_config_a;
-
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = parport_data_reg_insn_bits;
+ s->insn_config = parport_data_reg_insn_config;
+
+ /* Digial Input subdevice - Parallel port STATUS register */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 5;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_insn_b;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 5;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = parport_status_reg_insn_bits;
+
+ /* Digial Output subdevice - Parallel port CONTROL register */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_insn_c;
-
- s = &dev->subdevices[3];
- if (irq) {
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = parport_ctrl_reg_insn_bits;
+
+ if (dev->irq) {
+ /* Digial Input subdevice - Interrupt support */
+ s = &dev->subdevices[3];
dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_intr_insn;
- s->do_cmdtest = parport_intr_cmdtest;
- s->do_cmd = parport_intr_cmd;
- s->cancel = parport_intr_cancel;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = parport_intr_insn_bits;
+ s->do_cmdtest = parport_intr_cmdtest;
+ s->do_cmd = parport_intr_cmd;
+ s->cancel = parport_intr_cancel;
}
- devpriv->a_data = 0;
- outb(devpriv->a_data, dev->iobase + PARPORT_A);
- devpriv->c_data = 0;
- outb(devpriv->c_data, dev->iobase + PARPORT_C);
+ outb(0, dev->iobase + PARPORT_DATA_REG);
+ outb(0, dev->iobase + PARPORT_CTRL_REG);
return 0;
}
@@ -341,5 +315,5 @@ static struct comedi_driver parport_driver = {
module_comedi_driver(parport_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi: Standard parallel port driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index e781716bf35..89836c0828d 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -40,17 +40,11 @@ Configuration Options: not applicable, uses comedi PCI auto config
static int contec_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PIO1616L_DO_REG);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 5f669709501..15dd33e3e1c 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -279,27 +279,23 @@ static int das08_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s,
return insn->n;
}
-static int das08_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das08_do_wbits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct das08_private_struct *devpriv = dev->private;
- int wbits;
-
- /* get current settings of digital output lines */
- wbits = (devpriv->do_mux_bits >> 4) & 0xf;
- /* null bits we are going to set */
- wbits &= ~data[0];
- /* set new bit values */
- wbits |= data[0] & data[1];
- /* remember digital output bits */
- /* prevent race with setting of analog input mux */
- spin_lock(&dev->spinlock);
- devpriv->do_mux_bits &= ~DAS08_DO_MASK;
- devpriv->do_mux_bits |= DAS08_OP(wbits);
- outb(devpriv->do_mux_bits, dev->iobase + DAS08_CONTROL);
- spin_unlock(&dev->spinlock);
- data[1] = wbits;
+ if (comedi_dio_update_state(s, data)) {
+ /* prevent race with setting of analog input mux */
+ spin_lock(&dev->spinlock);
+ devpriv->do_mux_bits &= ~DAS08_DO_MASK;
+ devpriv->do_mux_bits |= DAS08_OP(s->state);
+ outb(devpriv->do_mux_bits, dev->iobase + DAS08_CONTROL);
+ spin_unlock(&dev->spinlock);
+ }
+
+ data[1] = s->state;
return insn->n;
}
@@ -316,17 +312,13 @@ static int das08jr_di_rbits(struct comedi_device *dev,
static int das08jr_do_wbits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das08_private_struct *devpriv = dev->private;
-
- /* null bits we are going to set */
- devpriv->do_bits &= ~data[0];
- /* set new bit values */
- devpriv->do_bits |= data[0] & data[1];
- outb(devpriv->do_bits, dev->iobase + DAS08JR_DIO);
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + DAS08JR_DIO);
- data[1] = devpriv->do_bits;
+ data[1] = s->state;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/das08.h b/drivers/staging/comedi/drivers/das08.h
index cce1b584200..46a314c5113 100644
--- a/drivers/staging/comedi/drivers/das08.h
+++ b/drivers/staging/comedi/drivers/das08.h
@@ -41,7 +41,6 @@ struct das08_board_struct {
struct das08_private_struct {
unsigned int do_mux_bits; /* bits for do/mux register on boards without separate do register */
- unsigned int do_bits; /* bits for do register on boards with register dedicated to digital out only */
const unsigned int *pg_gainlist;
unsigned int ao_readback[2]; /* assume 2 AO channels */
};
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 1b0793f33b9..a8446ca0411 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -675,21 +675,19 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->scan_begin_src == TRIG_TIMER) {
unsigned int tmp = cmd->scan_begin_arg;
/* set divisors, correct timing arguments */
- i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &devpriv->divisor1,
- &devpriv->divisor2,
- &cmd->scan_begin_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->clockbase,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->scan_begin_arg, cmd->flags);
err += (tmp != cmd->scan_begin_arg);
}
if (cmd->convert_src == TRIG_TIMER) {
unsigned int tmp = cmd->convert_arg;
/* set divisors, correct timing arguments */
- i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &devpriv->divisor1,
- &devpriv->divisor2,
- &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->clockbase,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg, cmd->flags);
err += (tmp != cmd->convert_arg);
}
if (err)
@@ -725,11 +723,9 @@ static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns,
struct das16_private_struct *devpriv = dev->private;
unsigned long timer_base = dev->iobase + DAS16_TIMER_BASE_REG;
- i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &devpriv->divisor1,
- &devpriv->divisor2,
- &ns,
- rounding_flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->clockbase,
+ &devpriv->divisor1, &devpriv->divisor2,
+ &ns, rounding_flags);
/* Write the values of ctr1 and ctr2 into counters 1 and 2 */
i8254_load(timer_base, 0, 1, devpriv->divisor1, 2);
@@ -850,7 +846,7 @@ static void das16_ai_munge(struct comedi_device *dev,
unsigned int start_chan_index)
{
unsigned int i, num_samples = num_bytes / sizeof(short);
- short *data = array;
+ unsigned short *data = array;
for (i = 0; i < num_samples; i++) {
data[i] = le16_to_cpu(data[i]);
@@ -952,15 +948,8 @@ static int das16_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DAS16_DIO_REG);
- }
data[1] = s->state;
@@ -1043,14 +1032,15 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
status = inb(dev->iobase + DAS1600_STATUS_REG);
if (status & DAS1600_STATUS_CLK_10MHZ)
- devpriv->clockbase = 100;
+ devpriv->clockbase = I8254_OSC_BASE_10MHZ;
else
- devpriv->clockbase = 1000;
+ devpriv->clockbase = I8254_OSC_BASE_1MHZ;
} else {
if (it->options[3])
- devpriv->clockbase = 1000 / it->options[3];
+ devpriv->clockbase = I8254_OSC_BASE_1MHZ /
+ it->options[3];
else
- devpriv->clockbase = 1000; /* 1 MHz default */
+ devpriv->clockbase = I8254_OSC_BASE_1MHZ;
}
/* initialize dma */
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index b943c449b69..fce9acfe808 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -63,8 +63,6 @@ irq can be omitted, although the cmd interface will not work without it.
#define DAS16M1_SIZE 16
#define DAS16M1_SIZE2 8
-#define DAS16M1_XTAL 100 /* 10 MHz master clock */
-
#define FIFO_SIZE 1024 /* 1024 sample fifo */
/*
@@ -133,19 +131,18 @@ struct das16m1_private_struct {
* needed to keep track of whether new count has been loaded into
* counter yet (loaded by first sample conversion) */
u16 initial_hw_count;
- short ai_buffer[FIFO_SIZE];
- unsigned int do_bits; /* saves status of digital output bits */
+ unsigned short ai_buffer[FIFO_SIZE];
unsigned int divisor1; /* divides master clock to obtain conversion speed */
unsigned int divisor2; /* divides master clock to obtain conversion speed */
unsigned long extra_iobase;
};
-static inline short munge_sample(short data)
+static inline unsigned short munge_sample(unsigned short data)
{
return (data >> 4) & 0xfff;
}
-static void munge_sample_array(short *array, unsigned int num_elements)
+static void munge_sample_array(unsigned short *array, unsigned int num_elements)
{
unsigned int i;
@@ -208,11 +205,10 @@ static int das16m1_cmd_test(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
/* calculate counter values that give desired timing */
- i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->convert_arg),
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg, cmd->flags);
if (tmp != cmd->convert_arg)
err++;
}
@@ -251,9 +247,10 @@ static unsigned int das16m1_set_pacer(struct comedi_device *dev,
{
struct das16m1_private_struct *devpriv = dev->private;
- i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1),
- &(devpriv->divisor2), &ns,
- rounding_flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer_2div(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &ns, rounding_flags);
/* Write the values of ctr1 and ctr2 into counters 1 and 2 */
i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 1, devpriv->divisor1,
@@ -393,22 +390,13 @@ static int das16m1_di_rbits(struct comedi_device *dev,
static int das16m1_do_wbits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das16m1_private_struct *devpriv = dev->private;
- unsigned int wbits;
-
- /* only set bits that have been masked */
- data[0] &= 0xf;
- wbits = devpriv->do_bits;
- /* zero bits that have been masked */
- wbits &= ~data[0];
- /* set masked bits */
- wbits |= data[0] & data[1];
- devpriv->do_bits = wbits;
- data[1] = wbits;
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + DAS16M1_DIO);
- outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO);
+ data[1] = s->state;
return insn->n;
}
@@ -649,7 +637,7 @@ static int das16m1_attach(struct comedi_device *dev,
outb(TOTAL_CLEAR, dev->iobase + DAS16M1_8254_FIRST_CNTRL);
/* initialize digital output lines */
- outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO);
+ outb(0, dev->iobase + DAS16M1_DIO);
/* set the interrupt level */
if (dev->irq)
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 5b300294d32..1880038956d 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -108,7 +108,6 @@ TODO:
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
#define FIFO_SIZE 1024 /* 1024 sample fifo */
-#define TIMER_BASE 200 /* 5 Mhz master clock */
#define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */
#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */
@@ -427,7 +426,6 @@ struct das1800_private {
volatile unsigned int count; /* number of data points left to be taken */
unsigned int divisor1; /* value to load into board's counter 1 for timed conversions */
unsigned int divisor2; /* value to load into board's counter 2 for timed conversions */
- int do_bits; /* digital output bits */
int irq_dma_bits; /* bits for control register b */
/* dma bits for control register b, stored so that dma can be
* turned on and off */
@@ -440,7 +438,8 @@ struct das1800_private {
uint16_t *dma_current_buf; /* pointer to dma buffer currently being used */
unsigned int dma_transfer_size; /* size of transfer currently used, in bytes */
unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */
- short ao_update_bits; /* remembers the last write to the 'update' dac */
+ unsigned short ao_update_bits; /* remembers the last write to the
+ * 'update' dac */
};
/* analog out range for 'ao' boards */
@@ -503,7 +502,7 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
- short dpnt;
+ unsigned short dpnt;
int unipolar;
struct comedi_cmd *cmd = &s->async->cmd;
@@ -840,12 +839,11 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_FOLLOW) {
tmp_arg = cmd->convert_arg;
/* calculate counter values that give desired timing */
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->convert_arg),
- cmd->
- flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_5MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg,
+ cmd->flags);
if (tmp_arg != cmd->convert_arg)
err++;
}
@@ -870,16 +868,11 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
}
tmp_arg = cmd->scan_begin_arg;
/* calculate counter values that give desired timing */
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &(devpriv->
- divisor1),
- &(devpriv->
- divisor2),
- &(cmd->
- scan_begin_arg),
- cmd->
- flags &
- TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_5MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->scan_begin_arg,
+ cmd->flags);
if (tmp_arg != cmd->scan_begin_arg)
err++;
}
@@ -1011,12 +1004,10 @@ static int setup_counters(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
/* set conversion frequency */
period = cmd->convert_arg;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &devpriv->divisor1,
- &devpriv->divisor2,
- &period,
- cmd->flags &
- TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_5MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &period, cmd->flags);
if (das1800_set_frequency(dev) < 0)
return -1;
}
@@ -1024,9 +1015,10 @@ static int setup_counters(struct comedi_device *dev,
case TRIG_TIMER: /* in burst mode */
/* set scan frequency */
period = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE, &devpriv->divisor1,
- &devpriv->divisor2, &period,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_5MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &period, cmd->flags);
if (das1800_set_frequency(dev) < 0)
return -1;
break;
@@ -1220,7 +1212,7 @@ static int das1800_ai_rinsn(struct comedi_device *dev,
int i, n;
int chan, range, aref, chan_range;
int timeout = 1000;
- short dpnt;
+ unsigned short dpnt;
int conv_flags = 0;
unsigned long irq_flags;
@@ -1285,7 +1277,7 @@ static int das1800_ao_winsn(struct comedi_device *dev,
int chan = CR_CHAN(insn->chanspec);
/* int range = CR_RANGE(insn->chanspec); */
int update_chan = thisboard->ao_n_chan - 1;
- short output;
+ unsigned short output;
unsigned long irq_flags;
/* card expects two's complement data */
@@ -1319,24 +1311,15 @@ static int das1800_di_rbits(struct comedi_device *dev,
return insn->n;
}
-/* writes to digital output channels */
static int das1800_do_wbits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das1800_private *devpriv = dev->private;
- unsigned int wbits;
-
- /* only set bits that have been masked */
- data[0] &= (1 << s->n_chan) - 1;
- wbits = devpriv->do_bits;
- wbits &= ~data[0];
- wbits |= data[0] & data[1];
- devpriv->do_bits = wbits;
-
- outb(devpriv->do_bits, dev->iobase + DAS1800_DIGITAL);
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + DAS1800_DIGITAL);
- data[1] = devpriv->do_bits;
+ data[1] = s->state;
return insn->n;
}
@@ -1644,7 +1627,7 @@ static int das1800_attach(struct comedi_device *dev,
das1800_cancel(dev, dev->read_subdev);
/* initialize digital out channels */
- outb(devpriv->do_bits, dev->iobase + DAS1800_DIGITAL);
+ outb(0, dev->iobase + DAS1800_DIGITAL);
/* initialize analog out channels */
if (thisboard->ao_ability == 1) {
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 11e16114e4e..5af0a5764a8 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -66,7 +66,6 @@ cmd triggers supported:
#include "comedi_fc.h"
#define DAS800_SIZE 8
-#define TIMER_BASE 1000
#define N_CHAN_AI 8 /* number of analog input channels */
/* Registers for the das800 */
@@ -356,11 +355,10 @@ static int das800_ai_do_cmdtest(struct comedi_device *dev,
int tmp = cmd->convert_arg;
/* calculate counter values that give desired timing */
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &devpriv->divisor1,
- &devpriv->divisor2,
- &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_1MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg, cmd->flags);
if (tmp != cmd->convert_arg)
err++;
}
@@ -630,13 +628,9 @@ static int das800_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct das800_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
unsigned long irq_flags;
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data)) {
devpriv->do_bits = s->state << 4;
spin_lock_irqsave(&dev->spinlock, irq_flags);
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 118a4fd129f..b04a5633f75 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -596,52 +596,40 @@ static int dmm32at_ao_rinsn(struct comedi_device *dev,
static int dmm32at_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dmm32at_private *devpriv = dev->private;
- unsigned char diobits;
-
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- /* Write out the new digital output lines */
- /* outw(s->state,dev->iobase + DMM32AT_DIO); */
+ unsigned int mask;
+ unsigned int val;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ /* get access to the DIO regs */
+ outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
+
+ /* if either part of dio is set for output */
+ if (((devpriv->dio_config & DMM32AT_DIRCL) == 0) ||
+ ((devpriv->dio_config & DMM32AT_DIRCH) == 0)) {
+ val = (s->state & 0x00ff0000) >> 16;
+ outb(val, dev->iobase + DMM32AT_DIOC);
+ }
+ if ((devpriv->dio_config & DMM32AT_DIRB) == 0) {
+ val = (s->state & 0x0000ff00) >> 8;
+ outb(val, dev->iobase + DMM32AT_DIOB);
+ }
+ if ((devpriv->dio_config & DMM32AT_DIRA) == 0) {
+ val = (s->state & 0x000000ff);
+ outb(val, dev->iobase + DMM32AT_DIOA);
+ }
}
- /* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
-
- /* if either part of dio is set for output */
- if (((devpriv->dio_config & DMM32AT_DIRCL) == 0) ||
- ((devpriv->dio_config & DMM32AT_DIRCH) == 0)) {
- diobits = (s->state & 0x00ff0000) >> 16;
- outb(diobits, dev->iobase + DMM32AT_DIOC);
- }
- if ((devpriv->dio_config & DMM32AT_DIRB) == 0) {
- diobits = (s->state & 0x0000ff00) >> 8;
- outb(diobits, dev->iobase + DMM32AT_DIOB);
- }
- if ((devpriv->dio_config & DMM32AT_DIRA) == 0) {
- diobits = (s->state & 0x000000ff);
- outb(diobits, dev->iobase + DMM32AT_DIOA);
- }
+ val = inb(dev->iobase + DMM32AT_DIOA);
+ val |= inb(dev->iobase + DMM32AT_DIOB) << 8;
+ val |= inb(dev->iobase + DMM32AT_DIOC) << 16;
+ s->state = val;
- /* now read the state back in */
- s->state = inb(dev->iobase + DMM32AT_DIOC);
- s->state <<= 8;
- s->state |= inb(dev->iobase + DMM32AT_DIOB);
- s->state <<= 8;
- s->state |= inb(dev->iobase + DMM32AT_DIOA);
- data[1] = s->state;
-
- /* on return, data[1] contains the value of the digital
- * input and output lines. */
- /* data[1]=inw(dev->iobase + DMM32AT_DIO); */
- /* or we could just return the software copy of the output values if
- * it was a purely digital output subdevice */
- /* data[1]=s->state; */
+ data[1] = val;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 38918a1198a..811c8c59c01 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -260,7 +260,8 @@ static int dt2801_readdata(struct comedi_device *dev, int *data)
static int dt2801_readdata2(struct comedi_device *dev, int *data)
{
- int lb, hb;
+ int lb = 0;
+ int hb = 0;
int ret;
ret = dt2801_readdata(dev, &lb);
@@ -528,23 +529,23 @@ static int dt2801_ao_insn_write(struct comedi_device *dev,
static int dt2801_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int which = 0;
-
- if (s == &dev->subdevices[3])
- which = 1;
+ int which = (s == &dev->subdevices[3]) ? 1 : 0;
+ unsigned int val = 0;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
dt2801_writecmd(dev, DT_C_WRITE_DIG);
dt2801_writedata(dev, which);
dt2801_writedata(dev, s->state);
}
+
dt2801_writecmd(dev, DT_C_READ_DIG);
dt2801_writedata(dev, which);
- dt2801_readdata(dev, data + 1);
+ dt2801_readdata(dev, &val);
+
+ data[1] = val;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index a41a5716f35..0ca02fa7ba1 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -353,11 +353,11 @@ static int dt2811_di_insn_bits(struct comedi_device *dev,
static int dt2811_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- outb(s->state, dev->iobase + DT2811_DIO);
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + DT2811_DIO);
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index f4a8529239b..bf589936e54 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -80,36 +80,31 @@ static int dt2817_dio_insn_config(struct comedi_device *dev,
static int dt2817_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int changed;
-
- /* It's questionable whether it is more important in
- * a driver like this to be deterministic or fast.
- * We choose fast. */
-
- if (data[0]) {
- changed = s->state;
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
- changed ^= s->state;
- changed &= s->io_bits;
- if (changed & 0x000000ff)
- outb(s->state & 0xff, dev->iobase + DT2817_DATA + 0);
- if (changed & 0x0000ff00)
- outb((s->state >> 8) & 0xff,
- dev->iobase + DT2817_DATA + 1);
- if (changed & 0x00ff0000)
- outb((s->state >> 16) & 0xff,
- dev->iobase + DT2817_DATA + 2);
- if (changed & 0xff000000)
- outb((s->state >> 24) & 0xff,
- dev->iobase + DT2817_DATA + 3);
+ unsigned long iobase = dev->iobase + DT2817_DATA;
+ unsigned int mask;
+ unsigned int val;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ if (mask & 0x000000ff)
+ outb(s->state & 0xff, iobase + 0);
+ if (mask & 0x0000ff00)
+ outb((s->state >> 8) & 0xff, iobase + 1);
+ if (mask & 0x00ff0000)
+ outb((s->state >> 16) & 0xff, iobase + 2);
+ if (mask & 0xff000000)
+ outb((s->state >> 24) & 0xff, iobase + 3);
}
- data[1] = inb(dev->iobase + DT2817_DATA + 0);
- data[1] |= (inb(dev->iobase + DT2817_DATA + 1) << 8);
- data[1] |= (inb(dev->iobase + DT2817_DATA + 2) << 16);
- data[1] |= (inb(dev->iobase + DT2817_DATA + 3) << 24);
+
+ val = inb(iobase + 0);
+ val |= (inb(iobase + 1) << 8);
+ val |= (inb(iobase + 2) << 16);
+ val |= (inb(iobase + 3) << 24);
+
+ data[1] = val;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index da3ee859bdb..a01e6b55388 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -226,7 +226,7 @@ struct dt282x_private {
const struct comedi_lrange *darangelist[2];
- short ao[2];
+ unsigned short ao[2];
volatile int dacsr; /* software copies of registers */
volatile int adcsr;
@@ -237,7 +237,7 @@ struct dt282x_private {
struct {
int chan;
- short *buf; /* DMA buffer */
+ unsigned short *buf; /* DMA buffer */
volatile int size; /* size of current transfer */
} dma[2];
int dma_maxsize; /* max size of DMA transfer (in bytes) */
@@ -283,7 +283,7 @@ static void dt282x_disable_dma(struct comedi_device *dev);
static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2);
-static void dt282x_munge(struct comedi_device *dev, short *buf,
+static void dt282x_munge(struct comedi_device *dev, unsigned short *buf,
unsigned int nbytes)
{
const struct dt282x_board *board = comedi_board(dev);
@@ -496,9 +496,9 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
#if 0
if (adcsr & DT2821_ADDONE) {
int ret;
- short data;
+ unsigned short data;
- data = (short)inw(dev->iobase + DT2821_ADDAT);
+ data = inw(dev->iobase + DT2821_ADDAT);
data &= (1 << board->adbits) - 1;
if (devpriv->ad_2scomp)
@@ -796,7 +796,7 @@ static int dt282x_ao_insn_write(struct comedi_device *dev,
{
const struct dt282x_board *board = comedi_board(dev);
struct dt282x_private *devpriv = dev->private;
- short d;
+ unsigned short d;
unsigned int chan;
chan = CR_CHAN(insn->chanspec);
@@ -967,14 +967,12 @@ static int dt282x_ao_cancel(struct comedi_device *dev,
static int dt282x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
-
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + DT2821_DIODAT);
- }
+
data[1] = inw(dev->iobase + DT2821_DIODAT);
return insn->n;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 64ef87598b6..292226eeff9 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -331,7 +331,7 @@ static void dt3k_ai_empty_fifo(struct comedi_device *dev,
int rear;
int count;
int i;
- short data;
+ unsigned short data;
front = readw(devpriv->io_addr + DPR_AD_Buf_Front);
count = front - devpriv->ai_front;
@@ -665,13 +665,12 @@ static int dt3k_dio_insn_config(struct comedi_device *dev,
static int dt3k_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[1] & data[0];
+ if (comedi_dio_update_state(s, data))
dt3k_writesingle(dev, SUBS_DOUT, 0, s->state);
- }
+
data[1] = dt3k_readsingle(dev, SUBS_DIN, 0, 0);
return insn->n;
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index b5e6f33dc21..73af600c172 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -85,13 +85,9 @@ for my needs.
#define F020_MASK_DACxCN_DACxEN 0x80
enum {
- /* A/D D/A DI DO CT */
- DT9812_DEVID_DT9812_10, /* 8 2 8 8 1 +/- 10V */
- DT9812_DEVID_DT9812_2PT5, /* 8 2 8 8 1 0-2.44V */
-#if 0
- DT9812_DEVID_DT9813, /* 16 2 4 4 1 +/- 10V */
- DT9812_DEVID_DT9814 /* 24 2 0 0 1 +/- 10V */
-#endif
+ /* A/D D/A DI DO CT */
+ DT9812_DEVID_DT9812_10, /* 8 2 8 8 1 +/- 10V */
+ DT9812_DEVID_DT9812_2PT5, /* 8 2 8 8 1 0-2.44V */
};
enum dt9812_gain {
@@ -580,15 +576,8 @@ static int dt9812_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
dt9812_digital_out(dev, s->state);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index fd525f499f2..f2a9f1c2f3b 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -147,33 +147,23 @@ static int dyna_pci10xx_di_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/* digital output bit interface */
static int dyna_pci10xx_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dyna_pci10xx_private *devpriv = dev->private;
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit.
- * s->state contains the previous write data
- */
mutex_lock(&devpriv->mutex);
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
smp_mb();
outw_p(s->state, devpriv->BADR3);
udelay(10);
}
- /*
- * On return, data[1] contains the value of the digital
- * input and output lines. We just return the software copy of the
- * output values if it was a purely digital output subdevice.
- */
data[1] = s->state;
mutex_unlock(&devpriv->mutex);
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index 8d70f64b157..e3ff4c43897 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -25,8 +25,7 @@ Configuration options:
#define FL512_SIZE 16 /* the size of the used memory */
struct fl512_private {
-
- short ao_readback[2];
+ unsigned short ao_readback[2];
};
static const struct comedi_lrange range_fl512 = { 4, {
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 3889d23292d..1e16641ec52 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -118,9 +118,7 @@ struct icp_multi_private {
unsigned char act_chanlist_len; /* len of scanlist */
unsigned char act_chanlist_pos; /* actual position in MUX list */
unsigned int *ai_chanlist; /* actaul chanlist */
- short *ai_data; /* data buffer */
- short ao_data[4]; /* data output buffer */
- short di_data; /* Digital input data */
+ unsigned short ao_data[4]; /* data output buffer */
unsigned int do_data; /* Remember digital output data */
};
@@ -348,18 +346,13 @@ static int icp_multi_insn_bits_di(struct comedi_device *dev,
static int icp_multi_insn_bits_do(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct icp_multi_private *devpriv = dev->private;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
-
- printk(KERN_DEBUG "Digital outputs = %4x \n", s->state);
-
+ if (comedi_dio_update_state(s, data))
writew(s->state, devpriv->io_addr + ICP_MULTI_DO);
- }
data[1] = readw(devpriv->io_addr + ICP_MULTI_DI);
@@ -548,7 +541,6 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->len_chanlist = 16;
s->range_table = &range_digital;
- s->io_bits = 0;
s->insn_bits = icp_multi_insn_bits_di;
s = &dev->subdevices[3];
@@ -558,8 +550,6 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->len_chanlist = 8;
s->range_table = &range_digital;
- s->io_bits = 0xff;
- s->state = 0;
s->insn_bits = icp_multi_insn_bits_do;
s = &dev->subdevices[4];
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 5c3a318b464..8577778441f 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -378,13 +378,10 @@ static int ii20k_dio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct ii20k_private *devpriv = dev->private;
- unsigned int mask = data[0] & s->io_bits; /* outputs only */
- unsigned int bits = data[1];
+ unsigned int mask;
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
if (mask & 0x000000ff)
writeb((s->state >> 0) & 0xff,
devpriv->ioaddr + II20K_DIO0_REG);
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 8f4afadab76..3d12e913592 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -427,7 +427,7 @@ static int xilinx_download(struct comedi_device *dev)
static void me4000_reset(struct comedi_device *dev)
{
struct me4000_info *info = dev->private;
- unsigned long val;
+ unsigned int val;
int chan;
/* Make a hardware reset */
@@ -480,9 +480,9 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
int rang = CR_RANGE(insn->chanspec);
int aref = CR_AREF(insn->chanspec);
- unsigned long entry = 0;
- unsigned long tmp;
- long lval;
+ unsigned int entry = 0;
+ unsigned int tmp;
+ unsigned int lval;
if (insn->n == 0) {
return 0;
@@ -586,7 +586,7 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
static int me4000_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- unsigned long tmp;
+ unsigned int tmp;
/* Stop any running conversion */
tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
@@ -783,7 +783,7 @@ static int ai_prepare(struct comedi_device *dev,
unsigned int scan_ticks, unsigned int chan_ticks)
{
- unsigned long tmp = 0;
+ unsigned int tmp = 0;
/* Write timer arguments */
ai_write_timer(dev, init_ticks, scan_ticks, chan_ticks);
@@ -1108,7 +1108,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
struct comedi_subdevice *s = &dev->subdevices[0];
int i;
int c = 0;
- long lval;
+ unsigned int lval;
if (!dev->attached)
return IRQ_NONE;
@@ -1252,7 +1252,7 @@ static int me4000_ao_insn_write(struct comedi_device *dev,
int chan = CR_CHAN(insn->chanspec);
int rang = CR_RANGE(insn->chanspec);
int aref = CR_AREF(insn->chanspec);
- unsigned long tmp;
+ unsigned int tmp;
if (insn->n == 0) {
return 0;
@@ -1313,29 +1313,12 @@ static int me4000_ao_insn_read(struct comedi_device *dev,
return 1;
}
-/*=============================================================================
- Digital I/O section
- ===========================================================================*/
-
static int me4000_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /*
- * The insn data consists of a mask in data[0] and the new data
- * in data[1]. The mask defines which bits we are concerning about.
- * The new data must be anded with the mask.
- * Each channel corresponds to a bit.
- */
- if (data[0]) {
- /* Check if requested ports are configured for output */
- if ((s->io_bits & data[0]) != data[0])
- return -EIO;
-
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
-
- /* Write out the new digital output lines */
+ if (comedi_dio_update_state(s, data)) {
outl((s->state >> 0) & 0xFF,
dev->iobase + ME4000_DIO_PORT_0_REG);
outl((s->state >> 8) & 0xFF,
@@ -1346,8 +1329,6 @@ static int me4000_dio_insn_bits(struct comedi_device *dev,
dev->iobase + ME4000_DIO_PORT_3_REG);
}
- /* On return, data[1] contains the value of
- the digital input and output lines. */
data[1] = ((inl(dev->iobase + ME4000_DIO_PORT_0_REG) & 0xFF) << 0) |
((inl(dev->iobase + ME4000_DIO_PORT_1_REG) & 0xFF) << 8) |
((inl(dev->iobase + ME4000_DIO_PORT_2_REG) & 0xFF) << 16) |
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index a6f6d4a4658..24ec9ef9b1a 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -222,15 +222,11 @@ static int me_dio_insn_bits(struct comedi_device *dev,
struct me_private_data *dev_private = dev->private;
void __iomem *mmio_porta = dev_private->me_regbase + ME_DIO_PORT_A;
void __iomem *mmio_portb = dev_private->me_regbase + ME_DIO_PORT_B;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
+ unsigned int mask;
unsigned int val;
- mask &= s->io_bits; /* only update the COMEDI_OUTPUT channels */
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
if (mask & 0x0000ffff)
writew((s->state & 0xffff), mmio_porta);
if (mask & 0xffff0000)
@@ -545,7 +541,6 @@ static int me_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = me_dio_insn_bits;
s->insn_config = me_dio_insn_config;
- s->io_bits = 0;
dev_info(dev->class_dev, "%s: %s attached\n",
dev->driver->driver_name, dev->board_name);
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index 9d75ea4e201..3ca755eca28 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -163,11 +163,11 @@ static int multiq3_di_insn_bits(struct comedi_device *dev,
static int multiq3_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
- outw(s->state, dev->iobase + MULTIQ3_DIGOUT_PORT);
+ if (comedi_dio_update_state(s, data))
+ outw(s->state, dev->iobase + MULTIQ3_DIGOUT_PORT);
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index c2745f201f2..85aa9609d6a 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -1,41 +1,33 @@
/*
- comedi/drivers/ni_6527.c
- driver for National Instruments PCI-6527
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_6527
-Description: National Instruments 6527
-Author: ds
-Status: works
-Devices: [National Instruments] PCI-6527 (ni6527), PXI-6527
-Updated: Sat, 25 Jan 2003 13:24:40 -0800
-
-
-*/
+ * ni_6527.c
+ * Comedi driver for National Instruments PCI-6527
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- Manuals (available from ftp://ftp.natinst.com/support/manuals)
-
- 370106b.pdf 6527 Register Level Programmer Manual
-
+ * Driver: ni_6527
+ * Description: National Instruments 6527
+ * Devices: (National Instruments) PCI-6527 [pci-6527]
+ * (National Instruments) PXI-6527 [pxi-6527]
+ * Author: David A. Schleef <ds@schleef.org>
+ * Updated: Sat, 25 Jan 2003 13:24:40 -0800
+ * Status: works
+ *
+ * Configuration Options: not applicable, uses PCI auto config
*/
-#define DEBUG 1
-#define DEBUG_FLAGS
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -43,39 +35,41 @@ Updated: Sat, 25 Jan 2003 13:24:40 -0800
#include "../comedidev.h"
#include "comedi_fc.h"
-#include "mite.h"
-
-#define DRIVER_NAME "ni_6527"
-
-#define NI6527_DIO_SIZE 4096
-#define NI6527_MITE_SIZE 4096
-
-#define Port_Register(x) (0x00+(x))
-#define ID_Register 0x06
-
-#define Clear_Register 0x07
-#define ClrEdge 0x08
-#define ClrOverflow 0x04
-#define ClrFilter 0x02
-#define ClrInterval 0x01
-#define Filter_Interval(x) (0x08+(x))
-#define Filter_Enable(x) (0x0c+(x))
-
-#define Change_Status 0x14
-#define MasterInterruptStatus 0x04
-#define Overflow 0x02
-#define EdgeStatus 0x01
-
-#define Master_Interrupt_Control 0x15
-#define FallingEdgeIntEnable 0x10
-#define RisingEdgeIntEnable 0x08
-#define MasterInterruptEnable 0x04
-#define OverflowIntEnable 0x02
-#define EdgeIntEnable 0x01
-
-#define Rising_Edge_Detection_Enable(x) (0x018+(x))
-#define Falling_Edge_Detection_Enable(x) (0x020+(x))
+/*
+ * PCI BAR1 - Register memory map
+ *
+ * Manuals (available from ftp://ftp.natinst.com/support/manuals)
+ * 370106b.pdf 6527 Register Level Programmer Manual
+ */
+#define NI6527_DI_REG(x) (0x00 + (x))
+#define NI6527_DO_REG(x) (0x03 + (x))
+#define NI6527_ID_REG 0x06
+#define NI6527_CLR_REG 0x07
+#define NI6527_CLR_EDGE (1 << 3)
+#define NI6527_CLR_OVERFLOW (1 << 2)
+#define NI6527_CLR_FILT (1 << 1)
+#define NI6527_CLR_INTERVAL (1 << 0)
+#define NI6527_CLR_IRQS (NI6527_CLR_EDGE | NI6527_CLR_OVERFLOW)
+#define NI6527_CLR_RESET_FILT (NI6527_CLR_FILT | NI6527_CLR_INTERVAL)
+#define NI6527_FILT_INTERVAL_REG(x) (0x08 + (x))
+#define NI6527_FILT_ENA_REG(x) (0x0c + (x))
+#define NI6527_STATUS_REG 0x14
+#define NI6527_STATUS_IRQ (1 << 2)
+#define NI6527_STATUS_OVERFLOW (1 << 1)
+#define NI6527_STATUS_EDGE (1 << 0)
+#define NI6527_CTRL_REG 0x15
+#define NI6527_CTRL_FALLING (1 << 4)
+#define NI6527_CTRL_RISING (1 << 3)
+#define NI6527_CTRL_IRQ (1 << 2)
+#define NI6527_CTRL_OVERFLOW (1 << 1)
+#define NI6527_CTRL_EDGE (1 << 0)
+#define NI6527_CTRL_DISABLE_IRQS 0
+#define NI6527_CTRL_ENABLE_IRQS (NI6527_CTRL_FALLING | \
+ NI6527_CTRL_RISING | \
+ NI6527_CTRL_IRQ | NI6527_CTRL_EDGE)
+#define NI6527_RISING_EDGE_REG(x) (0x18 + (x))
+#define NI6527_FALLING_EDGE_REG(x) (0x20 + (x))
enum ni6527_boardid {
BOARD_PCI6527,
@@ -96,96 +90,113 @@ static const struct ni6527_board ni6527_boards[] = {
};
struct ni6527_private {
- struct mite_struct *mite;
+ void __iomem *mmio_base;
unsigned int filter_interval;
unsigned int filter_enable;
};
+static void ni6527_set_filter_interval(struct comedi_device *dev,
+ unsigned int val)
+{
+ struct ni6527_private *devpriv = dev->private;
+ void __iomem *mmio = devpriv->mmio_base;
+
+ if (val != devpriv->filter_interval) {
+ writeb(val & 0xff, mmio + NI6527_FILT_INTERVAL_REG(0));
+ writeb((val >> 8) & 0xff, mmio + NI6527_FILT_INTERVAL_REG(1));
+ writeb((val >> 16) & 0x0f, mmio + NI6527_FILT_INTERVAL_REG(2));
+
+ writeb(NI6527_CLR_INTERVAL, mmio + NI6527_CLR_REG);
+
+ devpriv->filter_interval = val;
+ }
+}
+
+static void ni6527_set_filter_enable(struct comedi_device *dev,
+ unsigned int val)
+{
+ struct ni6527_private *devpriv = dev->private;
+ void __iomem *mmio = devpriv->mmio_base;
+
+ writeb(val & 0xff, mmio + NI6527_FILT_ENA_REG(0));
+ writeb((val >> 8) & 0xff, mmio + NI6527_FILT_ENA_REG(1));
+ writeb((val >> 16) & 0xff, mmio + NI6527_FILT_ENA_REG(2));
+}
+
static int ni6527_di_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni6527_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int interval;
- if (insn->n != 2)
- return -EINVAL;
-
- if (data[0] != INSN_CONFIG_FILTER)
- return -EINVAL;
-
- if (data[1]) {
+ switch (data[0]) {
+ case INSN_CONFIG_FILTER:
+ /*
+ * The deglitch filter interval is specified in nanoseconds.
+ * The hardware supports intervals in 200ns increments. Round
+ * the user values up and return the actual interval.
+ */
interval = (data[1] + 100) / 200;
data[1] = interval * 200;
- if (interval != devpriv->filter_interval) {
- writeb(interval & 0xff,
- devpriv->mite->daq_io_addr + Filter_Interval(0));
- writeb((interval >> 8) & 0xff,
- devpriv->mite->daq_io_addr + Filter_Interval(1));
- writeb((interval >> 16) & 0x0f,
- devpriv->mite->daq_io_addr + Filter_Interval(2));
-
- writeb(ClrInterval,
- devpriv->mite->daq_io_addr + Clear_Register);
-
- devpriv->filter_interval = interval;
+ if (interval) {
+ ni6527_set_filter_interval(dev, interval);
+ devpriv->filter_enable |= 1 << chan;
+ } else {
+ devpriv->filter_enable &= ~(1 << chan);
}
-
- devpriv->filter_enable |= 1 << chan;
- } else {
- devpriv->filter_enable &= ~(1 << chan);
+ ni6527_set_filter_enable(dev, devpriv->filter_enable);
+ break;
+ default:
+ return -EINVAL;
}
- writeb(devpriv->filter_enable,
- devpriv->mite->daq_io_addr + Filter_Enable(0));
- writeb(devpriv->filter_enable >> 8,
- devpriv->mite->daq_io_addr + Filter_Enable(1));
- writeb(devpriv->filter_enable >> 16,
- devpriv->mite->daq_io_addr + Filter_Enable(2));
-
- return 2;
+ return insn->n;
}
static int ni6527_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni6527_private *devpriv = dev->private;
+ void __iomem *mmio = devpriv->mmio_base;
+ unsigned int val;
- data[1] = readb(devpriv->mite->daq_io_addr + Port_Register(0));
- data[1] |= readb(devpriv->mite->daq_io_addr + Port_Register(1)) << 8;
- data[1] |= readb(devpriv->mite->daq_io_addr + Port_Register(2)) << 16;
+ val = readb(mmio + NI6527_DI_REG(0));
+ val |= (readb(mmio + NI6527_DI_REG(1)) << 8);
+ val |= (readb(mmio + NI6527_DI_REG(2)) << 16);
+
+ data[1] = val;
return insn->n;
}
static int ni6527_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni6527_private *devpriv = dev->private;
-
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
-
- /* The open relay state on the board cooresponds to 1,
- * but in Comedi, it is represented by 0. */
- if (data[0] & 0x0000ff) {
- writeb((s->state ^ 0xff),
- devpriv->mite->daq_io_addr + Port_Register(3));
- }
- if (data[0] & 0x00ff00) {
- writeb((s->state >> 8) ^ 0xff,
- devpriv->mite->daq_io_addr + Port_Register(4));
- }
- if (data[0] & 0xff0000) {
- writeb((s->state >> 16) ^ 0xff,
- devpriv->mite->daq_io_addr + Port_Register(5));
- }
+ void __iomem *mmio = devpriv->mmio_base;
+ unsigned int mask;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ /* Outputs are inverted */
+ unsigned int val = s->state ^ 0xffffff;
+
+ if (mask & 0x0000ff)
+ writeb(val & 0xff, mmio + NI6527_DO_REG(0));
+ if (mask & 0x00ff00)
+ writeb((val >> 8) & 0xff, mmio + NI6527_DO_REG(1));
+ if (mask & 0xff0000)
+ writeb((val >> 16) & 0xff, mmio + NI6527_DO_REG(2));
}
+
data[1] = s->state;
return insn->n;
@@ -195,21 +206,22 @@ static irqreturn_t ni6527_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct ni6527_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[2];
+ struct comedi_subdevice *s = dev->read_subdev;
+ void __iomem *mmio = devpriv->mmio_base;
unsigned int status;
- status = readb(devpriv->mite->daq_io_addr + Change_Status);
- if ((status & MasterInterruptStatus) == 0)
- return IRQ_NONE;
- if ((status & EdgeStatus) == 0)
+ status = readb(mmio + NI6527_STATUS_REG);
+ if (!(status & NI6527_STATUS_IRQ))
return IRQ_NONE;
- writeb(ClrEdge | ClrOverflow,
- devpriv->mite->daq_io_addr + Clear_Register);
+ if (status & NI6527_STATUS_EDGE) {
+ comedi_buf_put(s->async, 0);
+ s->async->events |= COMEDI_CB_EOS;
+ comedi_event(dev, s);
+ }
+
+ writeb(NI6527_CLR_IRQS, mmio + NI6527_CLR_REG);
- comedi_buf_put(s->async, 0);
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
return IRQ_HANDLED;
}
@@ -259,13 +271,10 @@ static int ni6527_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni6527_private *devpriv = dev->private;
- /* struct comedi_cmd *cmd = &s->async->cmd; */
+ void __iomem *mmio = devpriv->mmio_base;
- writeb(ClrEdge | ClrOverflow,
- devpriv->mite->daq_io_addr + Clear_Register);
- writeb(FallingEdgeIntEnable | RisingEdgeIntEnable |
- MasterInterruptEnable | EdgeIntEnable,
- devpriv->mite->daq_io_addr + Master_Interrupt_Control);
+ writeb(NI6527_CLR_IRQS, mmio + NI6527_CLR_REG);
+ writeb(NI6527_CTRL_ENABLE_IRQS, mmio + NI6527_CTRL_REG);
return 0;
}
@@ -274,8 +283,9 @@ static int ni6527_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni6527_private *devpriv = dev->private;
+ void __iomem *mmio = devpriv->mmio_base;
- writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control);
+ writeb(NI6527_CTRL_DISABLE_IRQS, mmio + NI6527_CTRL_REG);
return 0;
}
@@ -288,32 +298,54 @@ static int ni6527_intr_insn_bits(struct comedi_device *dev,
return insn->n;
}
+static void ni6527_set_edge_detection(struct comedi_device *dev,
+ unsigned int rising,
+ unsigned int falling)
+{
+ struct ni6527_private *devpriv = dev->private;
+ void __iomem *mmio = devpriv->mmio_base;
+
+ /* enable rising-edge detection channels */
+ writeb(rising & 0xff, mmio + NI6527_RISING_EDGE_REG(0));
+ writeb((rising >> 8) & 0xff, mmio + NI6527_RISING_EDGE_REG(1));
+ writeb((rising >> 16) & 0xff, mmio + NI6527_RISING_EDGE_REG(2));
+
+ /* enable falling-edge detection channels */
+ writeb(falling & 0xff, mmio + NI6527_FALLING_EDGE_REG(0));
+ writeb((falling >> 8) & 0xff, mmio + NI6527_FALLING_EDGE_REG(1));
+ writeb((falling >> 16) & 0xff, mmio + NI6527_FALLING_EDGE_REG(2));
+}
+
static int ni6527_intr_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ switch (data[0]) {
+ case INSN_CONFIG_CHANGE_NOTIFY:
+ /* check_insn_config_length() does not check this instruction */
+ if (insn->n != 3)
+ return -EINVAL;
+ ni6527_set_edge_detection(dev, data[1], data[2]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static void ni6527_reset(struct comedi_device *dev)
{
struct ni6527_private *devpriv = dev->private;
+ void __iomem *mmio = devpriv->mmio_base;
- if (insn->n < 1)
- return -EINVAL;
- if (data[0] != INSN_CONFIG_CHANGE_NOTIFY)
- return -EINVAL;
+ /* disable deglitch filters on all channels */
+ ni6527_set_filter_enable(dev, 0);
- writeb(data[1],
- devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(0));
- writeb(data[1] >> 8,
- devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(1));
- writeb(data[1] >> 16,
- devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(2));
-
- writeb(data[2],
- devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(0));
- writeb(data[2] >> 8,
- devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(1));
- writeb(data[2] >> 16,
- devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(2));
-
- return 2;
+ writeb(NI6527_CLR_IRQS | NI6527_CLR_RESET_FILT,
+ mmio + NI6527_CLR_REG);
+ writeb(NI6527_CTRL_DISABLE_IRQS, mmio + NI6527_CTRL_REG);
}
static int ni6527_auto_attach(struct comedi_device *dev,
@@ -332,75 +364,69 @@ static int ni6527_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
ret = comedi_pci_enable(dev);
if (ret)
return ret;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
+ devpriv->mmio_base = pci_ioremap_bar(pcidev, 1);
+ if (!devpriv->mmio_base)
return -ENOMEM;
- devpriv->mite = mite_alloc(pcidev);
- if (!devpriv->mite)
- return -ENOMEM;
+ /* make sure this is actually a 6527 device */
+ if (readb(devpriv->mmio_base + NI6527_ID_REG) != 0x27)
+ return -ENODEV;
- ret = mite_setup(devpriv->mite);
- if (ret < 0) {
- dev_err(dev->class_dev, "error setting up mite\n");
- return ret;
- }
+ ni6527_reset(dev);
- dev_info(dev->class_dev, "board: %s, ID=0x%02x\n", dev->board_name,
- readb(devpriv->mite->daq_io_addr + ID_Register));
+ ret = request_irq(pcidev->irq, ni6527_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
+ /* Digital Input subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 24;
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_config = ni6527_di_insn_config;
- s->insn_bits = ni6527_di_insn_bits;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = ni6527_di_insn_config;
+ s->insn_bits = ni6527_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->range_table = &range_unknown; /* FIXME: actually conductance */
- s->maxdata = 1;
- s->insn_bits = ni6527_do_insn_bits;
-
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ni6527_do_insn_bits;
+
+ /* Edge detection interrupt subdevice */
s = &dev->subdevices[2];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->range_table = &range_unknown;
- s->maxdata = 1;
- s->do_cmdtest = ni6527_intr_cmdtest;
- s->do_cmd = ni6527_intr_cmd;
- s->cancel = ni6527_intr_cancel;
- s->insn_bits = ni6527_intr_insn_bits;
- s->insn_config = ni6527_intr_insn_config;
-
- writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(0));
- writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(1));
- writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(2));
-
- writeb(ClrEdge | ClrOverflow | ClrFilter | ClrInterval,
- devpriv->mite->daq_io_addr + Clear_Register);
- writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control);
-
- ret = request_irq(mite_irq(devpriv->mite), ni6527_interrupt,
- IRQF_SHARED, DRIVER_NAME, dev);
- if (ret < 0)
- dev_warn(dev->class_dev, "irq not available\n");
- else
- dev->irq = mite_irq(devpriv->mite);
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = ni6527_intr_insn_config;
+ s->insn_bits = ni6527_intr_insn_bits;
+ s->do_cmdtest = ni6527_intr_cmdtest;
+ s->do_cmd = ni6527_intr_cmd;
+ s->cancel = ni6527_intr_cancel;
+ } else {
+ s->type = COMEDI_SUBD_UNUSED;
+ }
return 0;
}
@@ -409,23 +435,18 @@ static void ni6527_detach(struct comedi_device *dev)
{
struct ni6527_private *devpriv = dev->private;
- if (devpriv && devpriv->mite && devpriv->mite->daq_io_addr)
- writeb(0x00,
- devpriv->mite->daq_io_addr + Master_Interrupt_Control);
+ if (devpriv && devpriv->mmio_base)
+ ni6527_reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
- if (devpriv && devpriv->mite) {
- mite_unsetup(devpriv->mite);
- mite_free(devpriv->mite);
- }
comedi_pci_disable(dev);
}
static struct comedi_driver ni6527_driver = {
- .driver_name = DRIVER_NAME,
- .module = THIS_MODULE,
- .auto_attach = ni6527_auto_attach,
- .detach = ni6527_detach,
+ .driver_name = "ni_6527",
+ .module = THIS_MODULE,
+ .auto_attach = ni6527_auto_attach,
+ .detach = ni6527_detach,
};
static int ni6527_pci_probe(struct pci_dev *dev,
@@ -442,7 +463,7 @@ static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = {
MODULE_DEVICE_TABLE(pci, ni6527_pci_table);
static struct pci_driver ni6527_pci_driver = {
- .name = DRIVER_NAME,
+ .name = "ni_6527",
.id_table = ni6527_pci_table,
.probe = ni6527_pci_probe,
.remove = comedi_pci_auto_unconfig,
@@ -450,5 +471,5 @@ static struct pci_driver ni6527_pci_driver = {
module_comedi_pci_driver(ni6527_driver, ni6527_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for National Instruments PCI-6527");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 3607336dafe..8a991dcab24 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1213,7 +1213,6 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = ni_660x_dio_insn_bits;
s->insn_config = ni_660x_dio_insn_config;
- s->io_bits = 0; /* all bits default to input */
/* we use the ioconfig registers to control dio direction, so zero
output enables in stc dio control reg */
ni_660x_write_register(dev, 0, 0, STCDIOControl);
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e2926ce3fb2..e4414cf110e 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -136,20 +136,15 @@ static int ni_670x_ao_rinsn(struct comedi_device *dev,
static int ni_670x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni_670x_private *devpriv = dev->private;
void __iomem *io_addr = devpriv->mite->daq_io_addr +
DIO_PORT0_DATA_OFFSET;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
writel(s->state, io_addr);
- }
data[1] = readl(io_addr);
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 2512ce8dfca..63c847932eb 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -154,7 +154,7 @@ struct a2150_private {
volatile unsigned int count; /* number of data points left to be taken */
unsigned int dma; /* dma channel */
- s16 *dma_buffer; /* dma buffer */
+ uint16_t *dma_buffer; /* dma buffer */
unsigned int dma_transfer_size; /* size in bytes of dma transfers */
int irq_dma_bits; /* irq/dma register bits */
int config_bits; /* config register bits */
@@ -192,7 +192,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
struct comedi_async *async;
struct comedi_cmd *cmd;
unsigned int max_points, num_points, residue, leftover;
- short dpnt;
+ unsigned short dpnt;
static const int sample_size = sizeof(devpriv->dma_buffer[0]);
if (!dev->attached) {
@@ -684,13 +684,12 @@ static int a2150_set_chanlist(struct comedi_device *dev,
devpriv->config_bits |= CHANNEL_BITS(0x4 | start_channel);
break;
case 2:
- if (start_channel == 0) {
+ if (start_channel == 0)
devpriv->config_bits |= CHANNEL_BITS(0x2);
- } else if (start_channel == 2) {
+ else if (start_channel == 2)
devpriv->config_bits |= CHANNEL_BITS(0x3);
- } else {
+ else
return -1;
- }
break;
case 4:
devpriv->config_bits |= CHANNEL_BITS(0x1);
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index b9122fd835e..10e3e9475ee 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -1,247 +1,193 @@
/*
- comedi/drivers/ni_at_ao.c
- Driver for NI AT-AO-6/10 boards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_at_ao
-Description: National Instruments AT-AO-6/10
-Devices: [National Instruments] AT-AO-6 (at-ao-6), AT-AO-10 (at-ao-10)
-Status: should work
-Author: ds
-Updated: Sun Dec 26 12:26:28 EST 2004
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (unused)
- [2] - DMA (unused)
- [3] - analog output range, set by jumpers on hardware (0 for -10 to 10V
- bipolar, 1 for 0V to 10V unipolar)
-
-*/
+ * ni_at_ao.c
+ * Driver for NI AT-AO-6/10 boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
- * Register-level programming information can be found in NI
- * document 320379.pdf.
+ * Driver: ni_at_ao
+ * Description: National Instruments AT-AO-6/10
+ * Devices: (National Instruments) AT-AO-6 [at-ao-6]
+ * (National Instruments) AT-AO-10 [at-ao-10]
+ * Status: should work
+ * Author: David A. Schleef <ds@schleef.org>
+ * Updated: Sun Dec 26 12:26:28 EST 2004
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (unused)
+ * [2] - DMA (unused)
+ * [3] - analog output range, set by jumpers on hardware
+ * 0 for -10 to 10V bipolar
+ * 1 for 0V to 10V unipolar
*/
#include <linux/module.h>
-#include "../comedidev.h"
-/* board egisters */
-/* registers with _2_ are accessed when GRP2WR is set in CFG1 */
+#include "../comedidev.h"
-#define ATAO_SIZE 0x20
-
-#define ATAO_2_DMATCCLR 0x00 /* W 16 */
-#define ATAO_DIN 0x00 /* R 16 */
-#define ATAO_DOUT 0x00 /* W 16 */
-
-#define ATAO_CFG2 0x02 /* W 16 */
-#define CALLD1 0x8000
-#define CALLD0 0x4000
-#define FFRTEN 0x2000
-#define DAC2S8 0x1000
-#define DAC2S6 0x0800
-#define DAC2S4 0x0400
-#define DAC2S2 0x0200
-#define DAC2S0 0x0100
-#define LDAC8 0x0080
-#define LDAC6 0x0040
-#define LDAC4 0x0020
-#define LDAC2 0x0010
-#define LDAC0 0x0008
-#define PROMEN 0x0004
-#define SCLK 0x0002
-#define SDATA 0x0001
-
-#define ATAO_2_INT1CLR 0x02 /* W 16 */
-
-#define ATAO_CFG3 0x04 /* W 16 */
-#define DMAMODE 0x0040
-#define CLKOUT 0x0020
-#define RCLKEN 0x0010
-#define DOUTEN2 0x0008
-#define DOUTEN1 0x0004
-#define EN2_5V 0x0002
-#define SCANEN 0x0001
-
-#define ATAO_2_INT2CLR 0x04 /* W 16 */
-
-#define ATAO_82C53_BASE 0x06 /* RW 8 */
-
-#define ATAO_82C53_CNTR1 0x06 /* RW 8 */
-#define ATAO_82C53_CNTR2 0x07 /* RW 8 */
-#define ATAO_82C53_CNTR3 0x08 /* RW 8 */
-#define ATAO_82C53_CNTRCMD 0x09 /* W 8 */
-#define CNTRSEL1 0x80
-#define CNTRSEL0 0x40
-#define RWSEL1 0x20
-#define RWSEL0 0x10
-#define MODESEL2 0x08
-#define MODESEL1 0x04
-#define MODESEL0 0x02
-#define BCDSEL 0x01
- /* read-back command */
-#define COUNT 0x20
-#define STATUS 0x10
-#define CNTR3 0x08
-#define CNTR2 0x04
-#define CNTR1 0x02
- /* status */
-#define OUT 0x80
-#define _NULL 0x40
-#define RW1 0x20
-#define RW0 0x10
-#define MODE2 0x08
-#define MODE1 0x04
-#define MODE0 0x02
-#define BCD 0x01
-
-#define ATAO_2_RTSISHFT 0x06 /* W 8 */
-#define RSI 0x01
-
-#define ATAO_2_RTSISTRB 0x07 /* W 8 */
-
-#define ATAO_CFG1 0x0a /* W 16 */
-#define EXTINT2EN 0x8000
-#define EXTINT1EN 0x4000
-#define CNTINT2EN 0x2000
-#define CNTINT1EN 0x1000
-#define TCINTEN 0x0800
-#define CNT1SRC 0x0400
-#define CNT2SRC 0x0200
-#define FIFOEN 0x0100
-#define GRP2WR 0x0080
-#define EXTUPDEN 0x0040
-#define DMARQ 0x0020
-#define DMAEN 0x0010
-#define CH_mask 0x000f
-#define ATAO_STATUS 0x0a /* R 16 */
-#define FH 0x0040
-#define FE 0x0020
-#define FF 0x0010
-#define INT2 0x0008
-#define INT1 0x0004
-#define TCINT 0x0002
-#define PROMOUT 0x0001
-
-#define ATAO_FIFO_WRITE 0x0c /* W 16 */
-#define ATAO_FIFO_CLEAR 0x0c /* R 16 */
-#define ATAO_DACn(x) (0x0c + 2*(x)) /* W */
+#include "8253.h"
/*
- * Board descriptions for two imaginary boards. Describing the
- * boards in this way is optional, and completely driver-dependent.
- * Some drivers use arrays such as this, other do not.
+ * Register map
+ *
+ * Register-level programming information can be found in NI
+ * document 320379.pdf.
*/
+#define ATAO_DIO_REG 0x00
+#define ATAO_CFG2_REG 0x02
+#define ATAO_CFG2_CALLD_NOP (0 << 14)
+#define ATAO_CFG2_CALLD(x) ((((x) >> 3) + 1) << 14)
+#define ATAO_CFG2_FFRTEN (1 << 13)
+#define ATAO_CFG2_DACS(x) (1 << (((x) / 2) + 8))
+#define ATAO_CFG2_LDAC(x) (1 << (((x) / 2) + 3))
+#define ATAO_CFG2_PROMEN (1 << 2)
+#define ATAO_CFG2_SCLK (1 << 1)
+#define ATAO_CFG2_SDATA (1 << 0)
+#define ATAO_CFG3_REG 0x04
+#define ATAO_CFG3_DMAMODE (1 << 6)
+#define ATAO_CFG3_CLKOUT (1 << 5)
+#define ATAO_CFG3_RCLKEN (1 << 4)
+#define ATAO_CFG3_DOUTEN2 (1 << 3)
+#define ATAO_CFG3_DOUTEN1 (1 << 2)
+#define ATAO_CFG3_EN2_5V (1 << 1)
+#define ATAO_CFG3_SCANEN (1 << 0)
+#define ATAO_82C53_BASE 0x06
+#define ATAO_CFG1_REG 0x0a
+#define ATAO_CFG1_EXTINT2EN (1 << 15)
+#define ATAO_CFG1_EXTINT1EN (1 << 14)
+#define ATAO_CFG1_CNTINT2EN (1 << 13)
+#define ATAO_CFG1_CNTINT1EN (1 << 12)
+#define ATAO_CFG1_TCINTEN (1 << 11)
+#define ATAO_CFG1_CNT1SRC (1 << 10)
+#define ATAO_CFG1_CNT2SRC (1 << 9)
+#define ATAO_CFG1_FIFOEN (1 << 8)
+#define ATAO_CFG1_GRP2WR (1 << 7)
+#define ATAO_CFG1_EXTUPDEN (1 << 6)
+#define ATAO_CFG1_DMARQ (1 << 5)
+#define ATAO_CFG1_DMAEN (1 << 4)
+#define ATAO_CFG1_CH(x) (((x) & 0xf) << 0)
+#define ATAO_STATUS_REG 0x0a
+#define ATAO_STATUS_FH (1 << 6)
+#define ATAO_STATUS_FE (1 << 5)
+#define ATAO_STATUS_FF (1 << 4)
+#define ATAO_STATUS_INT2 (1 << 3)
+#define ATAO_STATUS_INT1 (1 << 2)
+#define ATAO_STATUS_TCINT (1 << 1)
+#define ATAO_STATUS_PROMOUT (1 << 0)
+#define ATAO_FIFO_WRITE_REG 0x0c
+#define ATAO_FIFO_CLEAR_REG 0x0c
+#define ATAO_AO_REG(x) (0x0c + ((x) * 2))
+
+/* registers with _2_ are accessed when GRP2WR is set in CFG1 */
+#define ATAO_2_DMATCCLR_REG 0x00
+#define ATAO_2_INT1CLR_REG 0x02
+#define ATAO_2_INT2CLR_REG 0x04
+#define ATAO_2_RTSISHFT_REG 0x06
+#define ATAO_2_RTSISHFT_RSI (1 << 0)
+#define ATAO_2_RTSISTRB_REG 0x07
+
struct atao_board {
const char *name;
int n_ao_chans;
};
-struct atao_private {
+static const struct atao_board atao_boards[] = {
+ {
+ .name = "at-ao-6",
+ .n_ao_chans = 6,
+ }, {
+ .name = "at-ao-10",
+ .n_ao_chans = 10,
+ },
+};
+struct atao_private {
unsigned short cfg1;
- unsigned short cfg2;
unsigned short cfg3;
/* Used for AO readback */
unsigned int ao_readback[10];
+
+ /* Used for caldac readback */
+ unsigned char caldac[21];
};
-static void atao_reset(struct comedi_device *dev)
+static void atao_select_reg_group(struct comedi_device *dev, int group)
{
struct atao_private *devpriv = dev->private;
- /* This is the reset sequence described in the manual */
-
- devpriv->cfg1 = 0;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1);
-
- outb(RWSEL0 | MODESEL2, dev->iobase + ATAO_82C53_CNTRCMD);
- outb(0x03, dev->iobase + ATAO_82C53_CNTR1);
- outb(CNTRSEL0 | RWSEL0 | MODESEL2, dev->iobase + ATAO_82C53_CNTRCMD);
-
- devpriv->cfg2 = 0;
- outw(devpriv->cfg2, dev->iobase + ATAO_CFG2);
-
- devpriv->cfg3 = 0;
- outw(devpriv->cfg3, dev->iobase + ATAO_CFG3);
-
- inw(dev->iobase + ATAO_FIFO_CLEAR);
-
- devpriv->cfg1 |= GRP2WR;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1);
-
- outw(0, dev->iobase + ATAO_2_INT1CLR);
- outw(0, dev->iobase + ATAO_2_INT2CLR);
- outw(0, dev->iobase + ATAO_2_DMATCCLR);
-
- devpriv->cfg1 &= ~GRP2WR;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1);
+ if (group)
+ devpriv->cfg1 |= ATAO_CFG1_GRP2WR;
+ else
+ devpriv->cfg1 &= ~ATAO_CFG1_GRP2WR;
+ outw(devpriv->cfg1, dev->iobase + ATAO_CFG1_REG);
}
-static int atao_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int atao_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct atao_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
int i;
- int chan = CR_CHAN(insn->chanspec);
- short bits;
+
+ if (chan == 0)
+ atao_select_reg_group(dev, 1);
for (i = 0; i < insn->n; i++) {
- bits = data[i] - 0x800;
- if (chan == 0) {
- devpriv->cfg1 |= GRP2WR;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1);
- }
- outw(bits, dev->iobase + ATAO_DACn(chan));
- if (chan == 0) {
- devpriv->cfg1 &= ~GRP2WR;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1);
- }
- devpriv->ao_readback[chan] = data[i];
+ val = data[i];
+ devpriv->ao_readback[chan] = val;
+
+ /* munge offset binary (unsigned) to two's complement */
+ val = comedi_offset_munge(s, val);
+ outw(val, dev->iobase + ATAO_AO_REG(chan));
}
- return i;
+ if (chan == 0)
+ atao_select_reg_group(dev, 0);
+
+ return insn->n;
}
-static int atao_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int atao_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct atao_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- int chan = CR_CHAN(insn->chanspec);
for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- return i;
+ return insn->n;
}
static int atao_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- outw(s->state, dev->iobase + ATAO_DOUT);
- }
+ if (comedi_dio_update_state(s, data))
+ outw(s->state, dev->iobase + ATAO_DIO_REG);
- data[1] = inw(dev->iobase + ATAO_DIN);
+ data[1] = inw(dev->iobase + ATAO_DIO_REG);
return insn->n;
}
@@ -266,57 +212,128 @@ static int atao_dio_insn_config(struct comedi_device *dev,
return ret;
if (s->io_bits & 0x0f)
- devpriv->cfg3 |= DOUTEN1;
+ devpriv->cfg3 |= ATAO_CFG3_DOUTEN1;
else
- devpriv->cfg3 &= ~DOUTEN1;
+ devpriv->cfg3 &= ~ATAO_CFG3_DOUTEN1;
if (s->io_bits & 0xf0)
- devpriv->cfg3 |= DOUTEN2;
+ devpriv->cfg3 |= ATAO_CFG3_DOUTEN2;
else
- devpriv->cfg3 &= ~DOUTEN2;
+ devpriv->cfg3 &= ~ATAO_CFG3_DOUTEN2;
- outw(devpriv->cfg3, dev->iobase + ATAO_CFG3);
+ outw(devpriv->cfg3, dev->iobase + ATAO_CFG3_REG);
return insn->n;
}
/*
- * Figure 2-1 in the manual shows 3 chips labeled DAC8800, which
- * are 8-channel 8-bit DACs. These are most likely the calibration
- * DACs. It is not explicitly stated in the manual how to access
- * the caldacs, but we can guess.
+ * There are three DAC8800 TrimDACs on the board. These are 8-channel,
+ * 8-bit DACs that are used to calibrate the Analog Output channels.
+ * The factory default calibration values are stored in the EEPROM.
+ * The TrimDACs, and EEPROM addresses, are mapped as:
+ *
+ * Channel EEPROM Description
+ * ----------------- ------ -----------------------------------
+ * 0 - DAC0 Chan 0 0x30 AO Channel 0 Offset
+ * 1 - DAC0 Chan 1 0x31 AO Channel 0 Gain
+ * 2 - DAC0 Chan 2 0x32 AO Channel 1 Offset
+ * 3 - DAC0 Chan 3 0x33 AO Channel 1 Gain
+ * 4 - DAC0 Chan 4 0x34 AO Channel 2 Offset
+ * 5 - DAC0 Chan 5 0x35 AO Channel 2 Gain
+ * 6 - DAC0 Chan 6 0x36 AO Channel 3 Offset
+ * 7 - DAC0 Chan 7 0x37 AO Channel 3 Gain
+ * 8 - DAC1 Chan 0 0x38 AO Channel 4 Offset
+ * 9 - DAC1 Chan 1 0x39 AO Channel 4 Gain
+ * 10 - DAC1 Chan 2 0x3a AO Channel 5 Offset
+ * 11 - DAC1 Chan 3 0x3b AO Channel 5 Gain
+ * 12 - DAC1 Chan 4 0x3c 2.5V Offset
+ * 13 - DAC1 Chan 5 0x3d AO Channel 6 Offset (at-ao-10 only)
+ * 14 - DAC1 Chan 6 0x3e AO Channel 6 Gain (at-ao-10 only)
+ * 15 - DAC1 Chan 7 0x3f AO Channel 7 Offset (at-ao-10 only)
+ * 16 - DAC2 Chan 0 0x40 AO Channel 7 Gain (at-ao-10 only)
+ * 17 - DAC2 Chan 1 0x41 AO Channel 8 Offset (at-ao-10 only)
+ * 18 - DAC2 Chan 2 0x42 AO Channel 8 Gain (at-ao-10 only)
+ * 19 - DAC2 Chan 3 0x43 AO Channel 9 Offset (at-ao-10 only)
+ * 20 - DAC2 Chan 4 0x44 AO Channel 9 Gain (at-ao-10 only)
+ * DAC2 Chan 5 0x45 Reserved
+ * DAC2 Chan 6 0x46 Reserved
+ * DAC2 Chan 7 0x47 Reserved
*/
+static int atao_calib_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct atao_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int bitstring;
+ unsigned int val;
+ int bit;
+
+ if (insn->n == 0)
+ return 0;
+
+ devpriv->caldac[chan] = data[insn->n - 1] & s->maxdata;
+
+ /* write the channel and last data value to the caldac */
+ bitstring = ((chan & 0x7) << 8) | devpriv->caldac[chan];
+
+ /* clock the bitstring to the caldac; MSB -> LSB */
+ for (bit = 1 << 10; bit; bit >>= 1) {
+ val = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
+
+ outw(val, dev->iobase + ATAO_CFG2_REG);
+ outw(val | ATAO_CFG2_SCLK, dev->iobase + ATAO_CFG2_REG);
+ }
+
+ /* strobe the caldac to load the value */
+ outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG);
+ outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
+
+ return insn->n;
+}
+
static int atao_calib_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct atao_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
+
for (i = 0; i < insn->n; i++)
- data[i] = 0; /* XXX */
+ data[i] = devpriv->caldac[chan];
+
return insn->n;
}
-static int atao_calib_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void atao_reset(struct comedi_device *dev)
{
struct atao_private *devpriv = dev->private;
- unsigned int bitstring, bit;
- unsigned int chan = CR_CHAN(insn->chanspec);
- bitstring = ((chan & 0x7) << 8) | (data[insn->n - 1] & 0xff);
+ /* This is the reset sequence described in the manual */
- for (bit = 1 << (11 - 1); bit; bit >>= 1) {
- outw(devpriv->cfg2 | ((bit & bitstring) ? SDATA : 0),
- dev->iobase + ATAO_CFG2);
- outw(devpriv->cfg2 | SCLK | ((bit & bitstring) ? SDATA : 0),
- dev->iobase + ATAO_CFG2);
- }
- /* strobe the appropriate caldac */
- outw(devpriv->cfg2 | (((chan >> 3) + 1) << 14),
- dev->iobase + ATAO_CFG2);
- outw(devpriv->cfg2, dev->iobase + ATAO_CFG2);
+ devpriv->cfg1 = 0;
+ outw(devpriv->cfg1, dev->iobase + ATAO_CFG1_REG);
- return insn->n;
+ /* Put outputs of counter 1 and counter 2 in a high state */
+ i8254_load(dev->iobase + ATAO_82C53_BASE, 0,
+ 0, 0x0003, I8254_MODE4 | I8254_BINARY);
+ i8254_set_mode(dev->iobase + ATAO_82C53_BASE, 0,
+ 1, I8254_MODE4 | I8254_BINARY);
+
+ outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
+
+ devpriv->cfg3 = 0;
+ outw(devpriv->cfg3, dev->iobase + ATAO_CFG3_REG);
+
+ inw(dev->iobase + ATAO_FIFO_CLEAR_REG);
+
+ atao_select_reg_group(dev, 1);
+ outw(0, dev->iobase + ATAO_2_INT1CLR_REG);
+ outw(0, dev->iobase + ATAO_2_INT2CLR_REG);
+ outw(0, dev->iobase + ATAO_2_DMATCCLR_REG);
+ atao_select_reg_group(dev, 0);
}
static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -324,12 +341,9 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct atao_board *board = comedi_board(dev);
struct atao_private *devpriv;
struct comedi_subdevice *s;
- int ao_unipolar;
int ret;
- ao_unipolar = it->options[3];
-
- ret = comedi_request_region(dev, it->options[0], ATAO_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x20);
if (ret)
return ret;
@@ -341,60 +355,44 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
+ /* Analog Output subdevice */
s = &dev->subdevices[0];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_ao_chans;
- s->maxdata = (1 << 12) - 1;
- if (ao_unipolar)
- s->range_table = &range_unipolar10;
- else
- s->range_table = &range_bipolar10;
- s->insn_write = &atao_ao_winsn;
- s->insn_read = &atao_ao_rinsn;
-
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = board->n_ao_chans;
+ s->maxdata = 0x0fff;
+ s->range_table = it->options[3] ? &range_unipolar10 : &range_bipolar10;
+ s->insn_write = atao_ao_insn_write;
+ s->insn_read = atao_ao_insn_read;
+
+ /* Digital I/O subdevice */
s = &dev->subdevices[1];
- /* digital i/o subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = atao_dio_insn_bits;
- s->insn_config = atao_dio_insn_config;
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = atao_dio_insn_bits;
+ s->insn_config = atao_dio_insn_config;
- s = &dev->subdevices[2];
/* caldac subdevice */
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 21;
- s->maxdata = 0xff;
- s->insn_read = atao_calib_insn_read;
- s->insn_write = atao_calib_insn_write;
-
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_CALIB;
+ s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
+ s->n_chan = (board->n_ao_chans * 2) + 1;
+ s->maxdata = 0xff;
+ s->insn_read = atao_calib_insn_read;
+ s->insn_write = atao_calib_insn_write;
+
+ /* EEPROM subdevice */
s = &dev->subdevices[3];
- /* eeprom subdevice */
- /* s->type=COMEDI_SUBD_EEPROM; */
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
atao_reset(dev);
- printk(KERN_INFO "\n");
-
return 0;
}
-static const struct atao_board atao_boards[] = {
- {
- .name = "ai-ao-6",
- .n_ao_chans = 6,
- }, {
- .name = "ai-ao-10",
- .n_ao_chans = 10,
- },
-};
-
static struct comedi_driver ni_at_ao_driver = {
.driver_name = "ni_at_ao",
.module = THIS_MODULE,
@@ -407,5 +405,5 @@ static struct comedi_driver ni_at_ao_driver = {
module_comedi_driver(ni_at_ao_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for NI AT-AO-6/10 boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index bb3491f5ad2..a9f7d40d6db 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -558,13 +558,12 @@ static int atmio16d_ao_insn_write(struct comedi_device *dev,
static int atmio16d_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] | data[1]);
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + MIO_16_DIG_OUT_REG);
- }
+
data[1] = inw(dev->iobase + MIO_16_DIG_IN_REG);
return insn->n;
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 404f83de276..e4cdca34915 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -72,18 +72,22 @@ Manuals: Register level: http://www.ni.com/pdf/manuals/340698.pdf
static int daq700_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ unsigned int mask;
+ unsigned int val;
- if (data[0] & 0xff)
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ if (mask & 0xff)
outb(s->state & 0xff, dev->iobase + DIO_W);
}
- data[1] = s->state & 0xff;
- data[1] |= inb(dev->iobase + DIO_R) << 8;
+ val = s->state & 0xff;
+ val |= inb(dev->iobase + DIO_R) << 8;
+
+ data[1] = val;
return insn->n;
}
@@ -212,7 +216,6 @@ static int daq700_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->insn_bits = daq700_dio_insn_bits;
s->insn_config = daq700_dio_insn_config;
- s->state = 0;
s->io_bits = 0x00ff;
/* DAQCard-700 ai */
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 1add114dc0b..0512445df08 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -73,7 +73,6 @@
#include "ni_labpc_isadma.h"
#define LABPC_SIZE 0x20 /* size of ISA io region */
-#define LABPC_TIMER_BASE 500 /* 2 MHz master clock */
#define LABPC_ADC_TIMEOUT 1000
enum scan_mode {
@@ -201,12 +200,6 @@ static int labpc_counter_set_mode(struct comedi_device *dev,
return i8254_set_mode(base_address, 0, counter_number, mode);
}
-static bool labpc_range_is_unipolar(struct comedi_subdevice *s,
- unsigned int range)
-{
- return s->range_table->range[range].min >= 0;
-}
-
static int labpc_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct labpc_private *devpriv = dev->private;
@@ -272,7 +265,7 @@ static void labpc_setup_cmd6_reg(struct comedi_device *dev,
devpriv->cmd6 &= ~CMD6_NRSE;
/* bipolar or unipolar range? */
- if (labpc_range_is_unipolar(s, range))
+ if (comedi_range_is_unipolar(s, range))
devpriv->cmd6 |= CMD6_ADCUNI;
else
devpriv->cmd6 &= ~CMD6_ADCUNI;
@@ -465,13 +458,13 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
* clock speed on convert and scan counters)
*/
devpriv->divisor_b0 = (scan_period - 1) /
- (LABPC_TIMER_BASE * max_counter_value) + 1;
+ (I8254_OSC_BASE_2MHZ * max_counter_value) + 1;
if (devpriv->divisor_b0 < min_counter_value)
devpriv->divisor_b0 = min_counter_value;
if (devpriv->divisor_b0 > max_counter_value)
devpriv->divisor_b0 = max_counter_value;
- base_period = LABPC_TIMER_BASE * devpriv->divisor_b0;
+ base_period = I8254_OSC_BASE_2MHZ * devpriv->divisor_b0;
/* set a0 for conversion frequency and b1 for scan frequency */
switch (cmd->flags & TRIG_ROUND_MASK) {
@@ -516,22 +509,20 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
* calculate cascaded counter values
* that give desired scan timing
*/
- i8253_cascade_ns_to_timer_2div(LABPC_TIMER_BASE,
- &(devpriv->divisor_b1),
- &(devpriv->divisor_b0),
- &scan_period,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
+ &devpriv->divisor_b1,
+ &devpriv->divisor_b0,
+ &scan_period, cmd->flags);
labpc_set_ai_scan_period(cmd, mode, scan_period);
} else if (convert_period) {
/*
* calculate cascaded counter values
* that give desired conversion timing
*/
- i8253_cascade_ns_to_timer_2div(LABPC_TIMER_BASE,
- &(devpriv->divisor_a0),
- &(devpriv->divisor_b0),
- &convert_period,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
+ &devpriv->divisor_a0,
+ &devpriv->divisor_b0,
+ &convert_period, cmd->flags);
labpc_set_ai_convert_period(cmd, mode, convert_period);
}
}
@@ -902,7 +893,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int labpc_drain_fifo(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
- short data;
+ unsigned short data;
struct comedi_async *async = dev->read_subdev->async;
const int timeout = 10000;
unsigned int i;
@@ -1046,7 +1037,7 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
/* set range */
if (board->is_labpc1200) {
range = CR_RANGE(insn->chanspec);
- if (labpc_range_is_unipolar(s, range))
+ if (comedi_range_is_unipolar(s, range))
devpriv->cmd6 |= CMD6_DACUNI(channel);
else
devpriv->cmd6 &= ~CMD6_DACUNI(channel);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 4e02770e834..5113397bfec 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1292,7 +1292,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
struct comedi_cmd *cmd = &async->cmd;
int chan;
int i;
- short d;
+ unsigned short d;
u32 packed_data;
int range;
int err = 1;
@@ -1403,7 +1403,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
int i;
if (board->reg_type == ni_reg_611x) {
- short data[2];
+ unsigned short data[2];
u32 dl;
for (i = 0; i < n / 2; i++) {
@@ -1420,7 +1420,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
cfc_write_to_buffer(s, data[0]);
}
} else if (board->reg_type == ni_reg_6143) {
- short data[2];
+ unsigned short data[2];
u32 dl;
/* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
@@ -1511,9 +1511,9 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV];
- short data[2];
+ unsigned short data[2];
u32 dl;
- short fifo_empty;
+ unsigned short fifo_empty;
int i;
if (board->reg_type == ni_reg_611x) {
@@ -1577,7 +1577,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv __maybe_unused = dev->private;
struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV];
- short data;
+ unsigned short data;
u32 dl;
if (board->reg_type != ni_reg_611x)
@@ -1596,7 +1596,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv __maybe_unused = dev->private;
struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV];
- short data;
+ unsigned short data;
u32 dl;
if (board->reg_type != ni_reg_6143)
@@ -1621,7 +1621,7 @@ static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_async *async = s->async;
unsigned int i;
unsigned int length = num_bytes / bytes_per_sample(s);
- short *array = data;
+ unsigned short *array = data;
unsigned int *larray = data;
for (i = 0; i < length; i++) {
@@ -2873,7 +2873,7 @@ static void ni_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int i;
unsigned int offset;
unsigned int length = num_bytes / sizeof(short);
- short *array = data;
+ unsigned short *array = data;
offset = 1 << (board->aobits - 1);
for (i = 0; i < length; i++) {
@@ -3547,28 +3547,22 @@ static int ni_dio_insn_config(struct comedi_device *dev,
static int ni_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni_private *devpriv = dev->private;
-#ifdef DEBUG_DIO
- printk("ni_dio_insn_bits() mask=0x%x bits=0x%x\n", data[0], data[1]);
-#endif
-
- if (data[0]) {
- /* Perform check to make sure we're not using the
- serial part of the dio */
- if ((data[0] & (DIO_SDIN | DIO_SDOUT))
- && devpriv->serial_interval_ns)
- return -EBUSY;
+ /* Make sure we're not using the serial part of the dio */
+ if ((data[0] & (DIO_SDIN | DIO_SDOUT)) && devpriv->serial_interval_ns)
+ return -EBUSY;
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data)) {
devpriv->dio_output &= ~DIO_Parallel_Data_Mask;
devpriv->dio_output |= DIO_Parallel_Data_Out(s->state);
devpriv->stc_writew(dev, devpriv->dio_output,
DIO_Output_Register);
}
+
data[1] = devpriv->stc_readw(dev, DIO_Parallel_Input_Register);
return insn->n;
@@ -3598,16 +3592,9 @@ static int ni_m_series_dio_insn_bits(struct comedi_device *dev,
{
struct ni_private *devpriv __maybe_unused = dev->private;
-#ifdef DEBUG_DIO
- printk("ni_m_series_dio_insn_bits() mask=0x%x bits=0x%x\n", data[0],
- data[1]);
-#endif
-
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data))
ni_writel(s->state, M_Offset_Static_Digital_Output);
- }
+
data[1] = ni_readl(M_Offset_Static_Digital_Input);
return insn->n;
@@ -5355,20 +5342,20 @@ static int ni_config_filter(struct comedi_device *dev, unsigned pfi_channel,
static int ni_pfi_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv __maybe_unused = dev->private;
- if ((board->reg_type & ni_reg_m_series_mask) == 0) {
+ if (!(board->reg_type & ni_reg_m_series_mask))
return -ENOTSUPP;
- }
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+
+ if (comedi_dio_update_state(s, data))
ni_writew(s->state, M_Offset_PFI_DO);
- }
+
data[1] = ni_readw(M_Offset_PFI_DI);
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index fad81bc97b6..e3a8fa96d9b 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -406,9 +406,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
struct mite_struct *mite = devpriv->mite;
/* int i, j; */
- long int AuxData = 0;
- short data1 = 0;
- short data2 = 0;
+ unsigned int auxdata = 0;
+ unsigned short data1 = 0;
+ unsigned short data2 = 0;
int flags;
int status;
int work = 0;
@@ -481,11 +481,11 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
);
goto out;
}
- AuxData =
+ auxdata =
readl(devpriv->mite->daq_io_addr +
Group_1_FIFO);
- data1 = AuxData & 0xffff;
- data2 = (AuxData & 0xffff0000) >> 16;
+ data1 = auxdata & 0xffff;
+ data2 = (auxdata & 0xffff0000) >> 16;
comedi_buf_put(async, data1);
comedi_buf_put(async, data2);
/* DPRINTK("read:%d, %d\n",data1,data2); */
@@ -657,15 +657,14 @@ static int ni_pcidio_insn_config(struct comedi_device *dev,
static int ni_pcidio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct nidio96_private *devpriv = dev->private;
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ if (comedi_dio_update_state(s, data))
writel(s->state, devpriv->mite->daq_io_addr + Port_IO(0));
- }
+
data[1] = readl(devpriv->mite->daq_io_addr + Port_IO(0));
return insn->n;
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 11bf0aab82e..f0630b7897b 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1491,7 +1491,7 @@ struct ni_board_struct {
unsigned short pwm_up_count; \
unsigned short pwm_down_count; \
\
- short ai_fifo_buffer[0x2000]; \
+ unsigned short ai_fifo_buffer[0x2000]; \
uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE]; \
uint32_t serial_number; \
\
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index e859f85a8e1..f0fc123ef56 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -1,258 +1,295 @@
/*
- comedi/drivers/pcl711.c
- hardware driver for PC-LabCard PCL-711 and AdSys ACL-8112
- and compatibles
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- Janne Jalkanen <jalkanen@cs.hut.fi>
- Eric Bunn <ebu@cs.hut.fi>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * pcl711.c
+ * Comedi driver for PC-LabCard PCL-711 and AdSys ACL-8112 and compatibles
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ * Janne Jalkanen <jalkanen@cs.hut.fi>
+ * Eric Bunn <ebu@cs.hut.fi>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-/*
-Driver: pcl711
-Description: Advantech PCL-711 and 711b, ADLink ACL-8112
-Author: ds, Janne Jalkanen <jalkanen@cs.hut.fi>, Eric Bunn <ebu@cs.hut.fi>
-Status: mostly complete
-Devices: [Advantech] PCL-711 (pcl711), PCL-711B (pcl711b),
- [AdLink] ACL-8112HG (acl8112hg), ACL-8112DG (acl8112dg)
-
-Since these boards do not have DMA or FIFOs, only immediate mode is
-supported.
-
-*/
/*
- Dave Andruczyk <dave@tech.buffalostate.edu> also wrote a
- driver for the PCL-711. I used a few ideas from his driver
- here. His driver also has more comments, if you are
- interested in understanding how this driver works.
- http://tech.buffalostate.edu/~dave/driver/
-
- The ACL-8112 driver was hacked from the sources of the PCL-711
- driver (the 744 chip used on the 8112 is almost the same as
- the 711b chip, but it has more I/O channels) by
- Janne Jalkanen (jalkanen@cs.hut.fi) and
- Erik Bunn (ebu@cs.hut.fi). Remerged with the PCL-711 driver
- by ds.
-
- [acl-8112]
- This driver supports both TRIGNOW and TRIGCLK,
- but does not yet support DMA transfers. It also supports
- both high (HG) and low (DG) versions of the card, though
- the HG version has been untested.
-
+ * Driver: pcl711
+ * Description: Advantech PCL-711 and 711b, ADLink ACL-8112
+ * Devices: (Advantech) PCL-711 [pcl711]
+ * (Advantech) PCL-711B [pcl711b]
+ * (AdLink) ACL-8112HG [acl8112hg]
+ * (AdLink) ACL-8112DG [acl8112dg]
+ * Author: David A. Schleef <ds@schleef.org>
+ * Janne Jalkanen <jalkanen@cs.hut.fi>
+ * Eric Bunn <ebu@cs.hut.fi>
+ * Updated:
+ * Status: mostly complete
+ *
+ * Configuration Options:
+ * [0] - I/O port base
+ * [1] - IRQ, optional
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-#include <linux/delay.h>
+#include "../comedidev.h"
#include "comedi_fc.h"
#include "8253.h"
-#define PCL711_SIZE 16
-
-#define PCL711_CTR0 0
-#define PCL711_CTR1 1
-#define PCL711_CTR2 2
-#define PCL711_CTRCTL 3
-#define PCL711_AD_LO 4
-#define PCL711_DA0_LO 4
-#define PCL711_AD_HI 5
-#define PCL711_DA0_HI 5
-#define PCL711_DI_LO 6
-#define PCL711_DA1_LO 6
-#define PCL711_DI_HI 7
-#define PCL711_DA1_HI 7
-#define PCL711_CLRINTR 8
-#define PCL711_GAIN 9
-#define PCL711_MUX 10
-#define PCL711_MODE 11
-#define PCL711_SOFTTRIG 12
-#define PCL711_DO_LO 13
-#define PCL711_DO_HI 14
-
-static const struct comedi_lrange range_pcl711b_ai = { 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
+/*
+ * I/O port register map
+ */
+#define PCL711_TIMER_BASE 0x00
+#define PCL711_AI_LSB_REG 0x04
+#define PCL711_AI_MSB_REG 0x05
+#define PCL711_AI_MSB_DRDY (1 << 4)
+#define PCL711_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define PCL711_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define PCL711_DI_LSB_REG 0x06
+#define PCL711_DI_MSB_REG 0x07
+#define PCL711_INT_STAT_REG 0x08
+#define PCL711_INT_STAT_CLR (0 << 0) /* any value will work */
+#define PCL711_AI_GAIN_REG 0x09
+#define PCL711_AI_GAIN(x) (((x) & 0xf) << 0)
+#define PCL711_MUX_REG 0x0a
+#define PCL711_MUX_CHAN(x) (((x) & 0xf) << 0)
+#define PCL711_MUX_CS0 (1 << 4)
+#define PCL711_MUX_CS1 (1 << 5)
+#define PCL711_MUX_DIFF (PCL711_MUX_CS0 | PCL711_MUX_CS1)
+#define PCL711_MODE_REG 0x0b
+#define PCL711_MODE_DEFAULT (0 << 0)
+#define PCL711_MODE_SOFTTRIG (1 << 0)
+#define PCL711_MODE_EXT (2 << 0)
+#define PCL711_MODE_EXT_IRQ (3 << 0)
+#define PCL711_MODE_PACER (4 << 0)
+#define PCL711_MODE_PACER_IRQ (6 << 0)
+#define PCL711_MODE_IRQ(x) (((x) & 0x7) << 4)
+#define PCL711_SOFTTRIG_REG 0x0c
+#define PCL711_SOFTTRIG (0 << 0) /* any value will work */
+#define PCL711_DO_LSB_REG 0x0d
+#define PCL711_DO_MSB_REG 0x0e
+
+static const struct comedi_lrange range_pcl711b_ai = {
+ 5, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_acl8112hg_ai = { 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01)
- }
+static const struct comedi_lrange range_acl8112hg_ai = {
+ 12, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_acl8112dg_ai = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange range_acl8112dg_ai = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
-/*
- * flags
- */
-
-#define PCL711_TIMEOUT 100
-#define PCL711_DRDY 0x10
-
-static const int i8253_osc_base = 500; /* 2 Mhz */
-
struct pcl711_board {
-
const char *name;
- int is_pcl711b;
- int is_8112;
- int is_dg;
- int n_ranges;
int n_aichan;
int n_aochan;
int maxirq;
const struct comedi_lrange *ai_range_type;
};
-struct pcl711_private {
+static const struct pcl711_board boardtypes[] = {
+ {
+ .name = "pcl711",
+ .n_aichan = 8,
+ .n_aochan = 1,
+ .ai_range_type = &range_bipolar5,
+ }, {
+ .name = "pcl711b",
+ .n_aichan = 8,
+ .n_aochan = 1,
+ .maxirq = 7,
+ .ai_range_type = &range_pcl711b_ai,
+ }, {
+ .name = "acl8112hg",
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .maxirq = 15,
+ .ai_range_type = &range_acl8112hg_ai,
+ }, {
+ .name = "acl8112dg",
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .maxirq = 15,
+ .ai_range_type = &range_acl8112dg_ai,
+ },
+};
- int board;
- int adchan;
- int ntrig;
- int aip[8];
- int mode;
+struct pcl711_private {
+ unsigned int ntrig;
unsigned int ao_readback[2];
unsigned int divisor1;
unsigned int divisor2;
};
+static void pcl711_ai_set_mode(struct comedi_device *dev, unsigned int mode)
+{
+ /*
+ * The pcl711b board uses bits in the mode register to select the
+ * interrupt. The other boards supported by this driver all use
+ * jumpers on the board.
+ *
+ * Enables the interrupt when needed on the pcl711b board. These
+ * bits do nothing on the other boards.
+ */
+ if (mode == PCL711_MODE_EXT_IRQ || mode == PCL711_MODE_PACER_IRQ)
+ mode |= PCL711_MODE_IRQ(dev->irq);
+
+ outb(mode, dev->iobase + PCL711_MODE_REG);
+}
+
+static unsigned int pcl711_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + PCL711_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL711_AI_LSB_REG);
+
+ return val & s->maxdata;
+}
+
+static int pcl711_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
+ pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
+ return 0;
+}
+
static irqreturn_t pcl711_interrupt(int irq, void *d)
{
- int lo, hi;
- int data;
struct comedi_device *dev = d;
- const struct pcl711_board *board = comedi_board(dev);
struct pcl711_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int data;
if (!dev->attached) {
comedi_error(dev, "spurious interrupt");
return IRQ_HANDLED;
}
- hi = inb(dev->iobase + PCL711_AD_HI);
- lo = inb(dev->iobase + PCL711_AD_LO);
- outb(0, dev->iobase + PCL711_CLRINTR);
-
- data = (hi << 8) | lo;
+ data = pcl711_ai_get_sample(dev, s);
- /* FIXME! Nothing else sets ntrig! */
- if (!(--devpriv->ntrig)) {
- if (board->is_8112)
- outb(1, dev->iobase + PCL711_MODE);
- else
- outb(0, dev->iobase + PCL711_MODE);
+ outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
- s->async->events |= COMEDI_CB_EOA;
+ if (comedi_buf_put(s->async, data) == 0) {
+ s->async->events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
+ } else {
+ s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
+ if (s->async->cmd.stop_src == TRIG_COUNT &&
+ !(--devpriv->ntrig)) {
+ pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
+ s->async->events |= COMEDI_CB_EOA;
+ }
}
comedi_event(dev, s);
return IRQ_HANDLED;
}
-static void pcl711_set_changain(struct comedi_device *dev, int chan)
+static void pcl711_set_changain(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec)
{
- const struct pcl711_board *board = comedi_board(dev);
- int chan_register;
-
- outb(CR_RANGE(chan), dev->iobase + PCL711_GAIN);
-
- chan_register = CR_CHAN(chan);
-
- if (board->is_8112) {
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+ unsigned int mux = 0;
+
+ outb(PCL711_AI_GAIN(range), dev->iobase + PCL711_AI_GAIN_REG);
+
+ if (s->n_chan > 8) {
+ /* Select the correct MPC508A chip */
+ if (aref == AREF_DIFF) {
+ chan &= 0x7;
+ mux |= PCL711_MUX_DIFF;
+ } else {
+ if (chan < 8)
+ mux |= PCL711_MUX_CS0;
+ else
+ mux |= PCL711_MUX_CS1;
+ }
+ }
+ outb(mux | PCL711_MUX_CHAN(chan), dev->iobase + PCL711_MUX_REG);
+}
- /*
- * Set the correct channel. The two channel banks are switched
- * using the mask value.
- * NB: To use differential channels, you should use
- * mask = 0x30, but I haven't written the support for this
- * yet. /JJ
- */
+static int pcl711_ai_wait_for_eoc(struct comedi_device *dev,
+ unsigned int timeout)
+{
+ unsigned int msb;
- if (chan_register >= 8)
- chan_register = 0x20 | (chan_register & 0x7);
- else
- chan_register |= 0x10;
- } else {
- outb(chan_register, dev->iobase + PCL711_MUX);
+ while (timeout--) {
+ msb = inb(dev->iobase + PCL711_AI_MSB_REG);
+ if ((msb & PCL711_AI_MSB_DRDY) == 0)
+ return 0;
+ udelay(1);
}
+ return -ETIME;
}
-static int pcl711_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcl711_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct pcl711_board *board = comedi_board(dev);
- int i, n;
- int hi, lo;
-
- pcl711_set_changain(dev, insn->chanspec);
-
- for (n = 0; n < insn->n; n++) {
- /*
- * Write the correct mode (software polling) and start polling
- * by writing to the trigger register
- */
- outb(1, dev->iobase + PCL711_MODE);
-
- if (!board->is_8112)
- outb(0, dev->iobase + PCL711_SOFTTRIG);
-
- i = PCL711_TIMEOUT;
- while (--i) {
- hi = inb(dev->iobase + PCL711_AD_HI);
- if (!(hi & PCL711_DRDY))
- goto ok;
- udelay(1);
- }
- printk(KERN_ERR "comedi%d: pcl711: A/D timeout\n", dev->minor);
- return -ETIME;
+ int ret;
+ int i;
+
+ pcl711_set_changain(dev, s, insn->chanspec);
-ok:
- lo = inb(dev->iobase + PCL711_AD_LO);
+ pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
- data[n] = ((hi & 0xf) << 8) | lo;
+ for (i = 0; i < insn->n; i++) {
+ outb(PCL711_SOFTTRIG, dev->iobase + PCL711_SOFTTRIG_REG);
+
+ ret = pcl711_ai_wait_for_eoc(dev, 100);
+ if (ret)
+ return ret;
+
+ data[i] = pcl711_ai_get_sample(dev, s);
}
- return n;
+ return insn->n;
}
static int pcl711_ai_cmdtest(struct comedi_device *dev,
@@ -292,7 +329,6 @@ static int pcl711_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
} else {
#define MAX_SPEED 1000
-#define TIMER_BASE 100
err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
MAX_SPEED);
}
@@ -313,11 +349,11 @@ static int pcl711_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer_2div(TIMER_BASE,
- &devpriv->divisor1,
- &devpriv->divisor2,
- &cmd->scan_begin_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->scan_begin_arg,
+ cmd->flags);
if (tmp != cmd->scan_begin_arg)
err++;
}
@@ -331,110 +367,106 @@ static int pcl711_ai_cmdtest(struct comedi_device *dev,
static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl711_private *devpriv = dev->private;
- int timer1, timer2;
struct comedi_cmd *cmd = &s->async->cmd;
- pcl711_set_changain(dev, cmd->chanlist[0]);
+ pcl711_set_changain(dev, s, cmd->chanlist[0]);
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ if (cmd->stop_arg == 0) {
+ /* an empty acquisition */
+ s->async->events |= COMEDI_CB_EOA;
+ comedi_event(dev, s);
+ return 0;
+ }
+ devpriv->ntrig = cmd->stop_arg;
+ }
if (cmd->scan_begin_src == TRIG_TIMER) {
- /*
- * Set timers
- * timer chip is an 8253, with timers 1 and 2
- * cascaded
- * 0x74 = Select Counter 1 | LSB/MSB | Mode=2 | Binary
- * Mode 2 = Rate generator
- *
- * 0xb4 = Select Counter 2 | LSB/MSB | Mode=2 | Binary
- */
-
- timer1 = timer2 = 0;
- i8253_cascade_ns_to_timer(i8253_osc_base, &timer1, &timer2,
- &cmd->scan_begin_arg,
- TRIG_ROUND_NEAREST);
-
- outb(0x74, dev->iobase + PCL711_CTRCTL);
- outb(timer1 & 0xff, dev->iobase + PCL711_CTR1);
- outb((timer1 >> 8) & 0xff, dev->iobase + PCL711_CTR1);
- outb(0xb4, dev->iobase + PCL711_CTRCTL);
- outb(timer2 & 0xff, dev->iobase + PCL711_CTR2);
- outb((timer2 >> 8) & 0xff, dev->iobase + PCL711_CTR2);
-
- /* clear pending interrupts (just in case) */
- outb(0, dev->iobase + PCL711_CLRINTR);
-
- /*
- * Set mode to IRQ transfer
- */
- outb(devpriv->mode | 6, dev->iobase + PCL711_MODE);
+ i8254_load(dev->iobase + PCL711_TIMER_BASE, 0,
+ 1, devpriv->divisor1, I8254_MODE2 | I8254_BINARY);
+ i8254_load(dev->iobase + PCL711_TIMER_BASE, 0,
+ 2, devpriv->divisor2, I8254_MODE2 | I8254_BINARY);
+
+ outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
+
+ pcl711_ai_set_mode(dev, PCL711_MODE_PACER_IRQ);
} else {
- /* external trigger */
- outb(devpriv->mode | 3, dev->iobase + PCL711_MODE);
+ pcl711_ai_set_mode(dev, PCL711_MODE_EXT_IRQ);
}
return 0;
}
-/*
- analog output
-*/
-static int pcl711_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl711_ao_write(struct comedi_device *dev,
+ unsigned int chan, unsigned int val)
{
- struct pcl711_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
+ outb(val & 0xff, dev->iobase + PCL711_AO_LSB_REG(chan));
+ outb((val >> 8) & 0xff, dev->iobase + PCL711_AO_MSB_REG(chan));
+}
- for (n = 0; n < insn->n; n++) {
- outb((data[n] & 0xff),
- dev->iobase + (chan ? PCL711_DA1_LO : PCL711_DA0_LO));
- outb((data[n] >> 8),
- dev->iobase + (chan ? PCL711_DA1_HI : PCL711_DA0_HI));
+static int pcl711_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl711_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ int i;
- devpriv->ao_readback[chan] = data[n];
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+ pcl711_ao_write(dev, chan, val);
}
+ devpriv->ao_readback[chan] = val;
- return n;
+ return insn->n;
}
static int pcl711_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcl711_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- return n;
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+ return insn->n;
}
-/* Digital port read - Untested on 8112 */
static int pcl711_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- data[1] = inb(dev->iobase + PCL711_DI_LO) |
- (inb(dev->iobase + PCL711_DI_HI) << 8);
+ unsigned int val;
+
+ val = inb(dev->iobase + PCL711_DI_LSB_REG);
+ val |= (inb(dev->iobase + PCL711_DI_MSB_REG) << 8);
+
+ data[1] = val;
return insn->n;
}
-/* Digital port write - Untested on 8112 */
static int pcl711_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
+ unsigned int mask;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ if (mask & 0x00ff)
+ outb(s->state & 0xff, dev->iobase + PCL711_DO_LSB_REG);
+ if (mask & 0xff00)
+ outb((s->state >> 8), dev->iobase + PCL711_DO_MSB_REG);
}
- if (data[0] & 0x00ff)
- outb(s->state & 0xff, dev->iobase + PCL711_DO_LO);
- if (data[0] & 0xff00)
- outb((s->state >> 8), dev->iobase + PCL711_DO_HI);
data[1] = s->state;
@@ -445,112 +477,82 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl711_board *board = comedi_board(dev);
struct pcl711_private *devpriv;
- int ret;
- unsigned int irq;
struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
- ret = comedi_request_region(dev, it->options[0], PCL711_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
- /* grab our IRQ */
- irq = it->options[1];
- if (irq > board->maxirq) {
- printk(KERN_ERR "irq out of range\n");
- return -EINVAL;
- }
- if (irq) {
- if (request_irq(irq, pcl711_interrupt, 0, dev->board_name,
- dev)) {
- printk(KERN_ERR "unable to allocate irq %u\n", irq);
- return -EINVAL;
- } else {
- printk(KERN_INFO "( irq = %u )\n", irq);
- }
+ if (it->options[1] && it->options[1] <= board->maxirq) {
+ ret = request_irq(it->options[1], pcl711_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* AI subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = board->n_aichan;
- s->maxdata = 0xfff;
- s->len_chanlist = 1;
- s->range_table = board->ai_range_type;
- s->insn_read = pcl711_ai_insn;
- if (irq) {
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ if (board->n_aichan > 8)
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = board->n_aichan;
+ s->maxdata = 0xfff;
+ s->range_table = board->ai_range_type;
+ s->insn_read = pcl711_ai_insn_read;
+ if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmdtest = pcl711_ai_cmdtest;
- s->do_cmd = pcl711_ai_cmd;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmdtest = pcl711_ai_cmdtest;
+ s->do_cmd = pcl711_ai_cmd;
+ s->cancel = pcl711_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* AO subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_aochan;
- s->maxdata = 0xfff;
- s->len_chanlist = 1;
- s->range_table = &range_bipolar5;
- s->insn_write = pcl711_ao_insn;
- s->insn_read = pcl711_ao_insn_read;
-
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = board->n_aochan;
+ s->maxdata = 0xfff;
+ s->range_table = &range_bipolar5;
+ s->insn_write = pcl711_ao_insn_write;
+ s->insn_read = pcl711_ao_insn_read;
+
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- /* 16-bit digital input */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->len_chanlist = 16;
- s->range_table = &range_digital;
- s->insn_bits = pcl711_di_insn_bits;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl711_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- /* 16-bit digital out */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->len_chanlist = 16;
- s->range_table = &range_digital;
- s->state = 0;
- s->insn_bits = pcl711_do_insn_bits;
-
- /*
- this is the "base value" for the mode register, which is
- used for the irq on the PCL711
- */
- if (board->is_pcl711b)
- devpriv->mode = (dev->irq << 4);
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl711_do_insn_bits;
/* clear DAC */
- outb(0, dev->iobase + PCL711_DA0_LO);
- outb(0, dev->iobase + PCL711_DA0_HI);
- outb(0, dev->iobase + PCL711_DA1_LO);
- outb(0, dev->iobase + PCL711_DA1_HI);
-
- printk(KERN_INFO "\n");
+ pcl711_ao_write(dev, 0, 0x0);
+ pcl711_ao_write(dev, 1, 0x0);
return 0;
}
-static const struct pcl711_board boardtypes[] = {
- { "pcl711", 0, 0, 0, 5, 8, 1, 0, &range_bipolar5 },
- { "pcl711b", 1, 0, 0, 5, 8, 1, 7, &range_pcl711b_ai },
- { "acl8112hg", 0, 1, 0, 12, 16, 2, 15, &range_acl8112hg_ai },
- { "acl8112dg", 0, 1, 1, 9, 16, 2, 15, &range_acl8112dg_ai },
-};
-
static struct comedi_driver pcl711_driver = {
.driver_name = "pcl711",
.module = THIS_MODULE,
@@ -563,5 +565,5 @@ static struct comedi_driver pcl711_driver = {
module_comedi_driver(pcl711_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for PCL-711 compatible boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index a4d0bcc31e5..cf9568ee46e 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -1,230 +1,356 @@
/*
- comedi/drivers/pcl726.c
-
- hardware driver for Advantech cards:
- card: PCL-726, PCL-727, PCL-728
- driver: pcl726, pcl727, pcl728
- and for ADLink cards:
- card: ACL-6126, ACL-6128
- driver: acl6126, acl6128
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: pcl726
-Description: Advantech PCL-726 & compatibles
-Author: ds
-Status: untested
-Devices: [Advantech] PCL-726 (pcl726), PCL-727 (pcl727), PCL-728 (pcl728),
- [ADLink] ACL-6126 (acl6126), ACL-6128 (acl6128)
-
-Interrupts are not supported.
-
- Options for PCL-726:
- [0] - IO Base
- [2]...[7] - D/A output range for channel 1-6:
- 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V,
- 4: 4-20mA, 5: unknown (external reference)
-
- Options for PCL-727:
- [0] - IO Base
- [2]...[13] - D/A output range for channel 1-12:
- 0: 0-5V, 1: 0-10V, 2: +/-5V,
- 3: 4-20mA
-
- Options for PCL-728 and ACL-6128:
- [0] - IO Base
- [2], [3] - D/A output range for channel 1 and 2:
- 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V,
- 4: 4-20mA, 5: 0-20mA
-
- Options for ACL-6126:
- [0] - IO Base
- [1] - IRQ (0=disable, 3, 5, 6, 7, 9, 10, 11, 12, 15) (currently ignored)
- [2]...[7] - D/A output range for channel 1-6:
- 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V,
- 4: 4-20mA
-*/
+ * pcl726.c
+ * Comedi driver for 6/12-Channel D/A Output and DIO cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- Thanks to Circuit Specialists for having programming info (!) on
- their web page. (http://www.cir.com/)
-*/
+ * Driver: pcl726
+ * Description: Advantech PCL-726 & compatibles
+ * Author: David A. Schleef <ds@schleef.org>
+ * Status: untested
+ * Devices: (Advantech) PCL-726 [pcl726]
+ * (Advantech) PCL-727 [pcl727]
+ * (Advantech) PCL-728 [pcl728]
+ * (ADLink) ACL-6126 [acl6126]
+ * (ADLink) ACL-6128 [acl6128]
+ *
+ * Configuration Options:
+ * [0] - IO Base
+ * [1] - IRQ (ACL-6126 only)
+ * [2] - D/A output range for channel 0
+ * [3] - D/A output range for channel 1
+ *
+ * Boards with > 2 analog output channels:
+ * [4] - D/A output range for channel 2
+ * [5] - D/A output range for channel 3
+ * [6] - D/A output range for channel 4
+ * [7] - D/A output range for channel 5
+ *
+ * Boards with > 6 analog output channels:
+ * [8] - D/A output range for channel 6
+ * [9] - D/A output range for channel 7
+ * [10] - D/A output range for channel 8
+ * [11] - D/A output range for channel 9
+ * [12] - D/A output range for channel 10
+ * [13] - D/A output range for channel 11
+ *
+ * For PCL-726 the D/A output ranges are:
+ * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V, 4: 4-20mA, 5: unknown
+ *
+ * For PCL-727:
+ * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: 4-20mA
+ *
+ * For PCL-728 and ACL-6128:
+ * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V, 4: 4-20mA, 5: 0-20mA
+ *
+ * For ACL-6126:
+ * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V, 4: 4-20mA
+ */
#include <linux/module.h>
-#include "../comedidev.h"
-
-#undef ACL6126_IRQ /* no interrupt support (yet) */
+#include <linux/interrupt.h>
-#define PCL726_SIZE 16
-#define PCL727_SIZE 32
-#define PCL728_SIZE 8
+#include "../comedidev.h"
-#define PCL726_DAC0_HI 0
-#define PCL726_DAC0_LO 1
+#include "comedi_fc.h"
-#define PCL726_DO_HI 12
-#define PCL726_DO_LO 13
-#define PCL726_DI_HI 14
-#define PCL726_DI_LO 15
+#define PCL726_AO_MSB_REG(x) (0x00 + ((x) * 2))
+#define PCL726_AO_LSB_REG(x) (0x01 + ((x) * 2))
+#define PCL726_DO_MSB_REG 0x0c
+#define PCL726_DO_LSB_REG 0x0d
+#define PCL726_DI_MSB_REG 0x0e
+#define PCL726_DI_LSB_REG 0x0f
-#define PCL727_DO_HI 24
-#define PCL727_DO_LO 25
-#define PCL727_DI_HI 0
-#define PCL727_DI_LO 1
+#define PCL727_DI_MSB_REG 0x00
+#define PCL727_DI_LSB_REG 0x01
+#define PCL727_DO_MSB_REG 0x18
+#define PCL727_DO_LSB_REG 0x19
static const struct comedi_lrange *const rangelist_726[] = {
- &range_unipolar5, &range_unipolar10,
- &range_bipolar5, &range_bipolar10,
- &range_4_20mA, &range_unknown
+ &range_unipolar5,
+ &range_unipolar10,
+ &range_bipolar5,
+ &range_bipolar10,
+ &range_4_20mA,
+ &range_unknown
};
static const struct comedi_lrange *const rangelist_727[] = {
- &range_unipolar5, &range_unipolar10,
+ &range_unipolar5,
+ &range_unipolar10,
&range_bipolar5,
&range_4_20mA
};
static const struct comedi_lrange *const rangelist_728[] = {
- &range_unipolar5, &range_unipolar10,
- &range_bipolar5, &range_bipolar10,
- &range_4_20mA, &range_0_20mA
+ &range_unipolar5,
+ &range_unipolar10,
+ &range_bipolar5,
+ &range_bipolar10,
+ &range_4_20mA,
+ &range_0_20mA
};
struct pcl726_board {
-
- const char *name; /* driver name */
- int n_aochan; /* num of D/A chans */
- int num_of_ranges; /* num of ranges */
- unsigned int IRQbits; /* allowed interrupts */
- unsigned int io_range; /* len of IO space */
- char have_dio; /* 1=card have DI/DO ports */
- int di_hi; /* ports for DI/DO operations */
- int di_lo;
- int do_hi;
- int do_lo;
- const struct comedi_lrange *const *range_type_list;
- /* list of supported ranges */
+ const char *name;
+ unsigned long io_len;
+ unsigned int irq_mask;
+ const struct comedi_lrange *const *ao_ranges;
+ int ao_num_ranges;
+ int ao_nchan;
+ unsigned int have_dio:1;
+ unsigned int is_pcl727:1;
};
-static const struct pcl726_board boardtypes[] = {
- {"pcl726", 6, 6, 0x0000, PCL726_SIZE, 1,
- PCL726_DI_HI, PCL726_DI_LO, PCL726_DO_HI, PCL726_DO_LO,
- &rangelist_726[0],},
- {"pcl727", 12, 4, 0x0000, PCL727_SIZE, 1,
- PCL727_DI_HI, PCL727_DI_LO, PCL727_DO_HI, PCL727_DO_LO,
- &rangelist_727[0],},
- {"pcl728", 2, 6, 0x0000, PCL728_SIZE, 0,
- 0, 0, 0, 0,
- &rangelist_728[0],},
- {"acl6126", 6, 5, 0x96e8, PCL726_SIZE, 1,
- PCL726_DI_HI, PCL726_DI_LO, PCL726_DO_HI, PCL726_DO_LO,
- &rangelist_726[0],},
- {"acl6128", 2, 6, 0x0000, PCL728_SIZE, 0,
- 0, 0, 0, 0,
- &rangelist_728[0],},
+static const struct pcl726_board pcl726_boards[] = {
+ {
+ .name = "pcl726",
+ .io_len = 0x10,
+ .ao_ranges = &rangelist_726[0],
+ .ao_num_ranges = ARRAY_SIZE(rangelist_726),
+ .ao_nchan = 6,
+ .have_dio = 1,
+ }, {
+ .name = "pcl727",
+ .io_len = 0x20,
+ .ao_ranges = &rangelist_727[0],
+ .ao_num_ranges = ARRAY_SIZE(rangelist_727),
+ .ao_nchan = 12,
+ .have_dio = 1,
+ .is_pcl727 = 1,
+ }, {
+ .name = "pcl728",
+ .io_len = 0x08,
+ .ao_num_ranges = ARRAY_SIZE(rangelist_728),
+ .ao_ranges = &rangelist_728[0],
+ .ao_nchan = 2,
+ }, {
+ .name = "acl6126",
+ .io_len = 0x10,
+ .irq_mask = 0x96e8,
+ .ao_num_ranges = ARRAY_SIZE(rangelist_726),
+ .ao_ranges = &rangelist_726[0],
+ .ao_nchan = 6,
+ .have_dio = 1,
+ }, {
+ .name = "acl6128",
+ .io_len = 0x08,
+ .ao_num_ranges = ARRAY_SIZE(rangelist_728),
+ .ao_ranges = &rangelist_728[0],
+ .ao_nchan = 2,
+ },
};
struct pcl726_private {
-
- int bipolar[12];
const struct comedi_lrange *rangelist[12];
unsigned int ao_readback[12];
+ unsigned int cmd_running:1;
};
-static int pcl726_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcl726_intr_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = 0;
+ return insn->n;
+}
+
+static int pcl726_intr_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, 1);
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* step 4: ignored */
+
+ if (err)
+ return 4;
+
+ return 0;
+}
+
+static int pcl726_intr_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl726_private *devpriv = dev->private;
+
+ devpriv->cmd_running = 1;
+
+ return 0;
+}
+
+static int pcl726_intr_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl726_private *devpriv = dev->private;
- int hi, lo;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++) {
- lo = data[n] & 0xff;
- hi = (data[n] >> 8) & 0xf;
- if (devpriv->bipolar[chan])
- hi ^= 0x8;
- /*
- * the programming info did not say which order
- * to write bytes. switch the order of the next
- * two lines if you get glitches.
- */
- outb(hi, dev->iobase + PCL726_DAC0_HI + 2 * chan);
- outb(lo, dev->iobase + PCL726_DAC0_LO + 2 * chan);
- devpriv->ao_readback[chan] = data[n];
+
+ devpriv->cmd_running = 0;
+
+ return 0;
+}
+
+static irqreturn_t pcl726_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct pcl726_private *devpriv = dev->private;
+
+ if (devpriv->cmd_running) {
+ pcl726_intr_cancel(dev, s);
+
+ comedi_buf_put(s->async, 0);
+ s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
+ comedi_event(dev, s);
}
- return n;
+ return IRQ_HANDLED;
+}
+
+static int pcl726_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl726_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+ devpriv->ao_readback[chan] = val;
+
+ /* bipolar data to the DAC is two's complement */
+ if (comedi_chan_range_is_bipolar(s, chan, range))
+ val = comedi_offset_munge(s, val);
+
+ /* order is important, MSB then LSB */
+ outb((val >> 8) & 0xff, dev->iobase + PCL726_AO_MSB_REG(chan));
+ outb(val & 0xff, dev->iobase + PCL726_AO_LSB_REG(chan));
+ }
+
+ return insn->n;
}
static int pcl726_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcl726_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int n;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
- return n;
+ return insn->n;
}
static int pcl726_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct pcl726_board *board = comedi_board(dev);
+ unsigned int val;
- data[1] = inb(dev->iobase + board->di_lo) |
- (inb(dev->iobase + board->di_hi) << 8);
+ if (board->is_pcl727) {
+ val = inb(dev->iobase + PCL727_DI_LSB_REG);
+ val |= (inb(dev->iobase + PCL727_DI_MSB_REG) << 8);
+ } else {
+ val = inb(dev->iobase + PCL726_DI_LSB_REG);
+ val |= (inb(dev->iobase + PCL726_DI_MSB_REG) << 8);
+ }
+
+ data[1] = val;
return insn->n;
}
static int pcl726_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct pcl726_board *board = comedi_board(dev);
-
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
+ unsigned long io = dev->iobase;
+ unsigned int mask;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ if (board->is_pcl727) {
+ if (mask & 0x00ff)
+ outb(s->state & 0xff, io + PCL727_DO_LSB_REG);
+ if (mask & 0xff00)
+ outb((s->state >> 8), io + PCL727_DO_MSB_REG);
+ } else {
+ if (mask & 0x00ff)
+ outb(s->state & 0xff, io + PCL726_DO_LSB_REG);
+ if (mask & 0xff00)
+ outb((s->state >> 8), io + PCL726_DO_MSB_REG);
+ }
}
- if (data[1] & 0x00ff)
- outb(s->state & 0xff, dev->iobase + board->do_lo);
- if (data[1] & 0xff00)
- outb((s->state >> 8), dev->iobase + board->do_hi);
data[1] = s->state;
return insn->n;
}
-static int pcl726_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int pcl726_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
const struct pcl726_board *board = comedi_board(dev);
struct pcl726_private *devpriv;
struct comedi_subdevice *s;
- int ret, i;
-#ifdef ACL6126_IRQ
- unsigned int irq;
-#endif
+ int subdev;
+ int ret;
+ int i;
- ret = comedi_request_region(dev, it->options[0], board->io_range);
+ ret = comedi_request_region(dev, it->options[0], board->io_len);
if (ret)
return ret;
@@ -232,97 +358,81 @@ static int pcl726_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- for (i = 0; i < 12; i++) {
- devpriv->bipolar[i] = 0;
- devpriv->rangelist[i] = &range_unknown;
- }
-
-#ifdef ACL6126_IRQ
- irq = 0;
- if (boardtypes[board].IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- devpriv->first_chan = 2;
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & boardtypes[board].IRQbits) == 0) {
- printk(KERN_WARNING
- ", IRQ %d is out of allowed range,"
- " DISABLING IT", irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl818, 0,
- dev->board_name, dev)) {
- printk(KERN_WARNING
- ", unable to allocate IRQ %d,"
- " DISABLING IT", irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(", irq=%d", irq);
- }
- }
+ /*
+ * Hook up the external trigger source interrupt only if the
+ * user config option is valid and the board supports interrupts.
+ */
+ if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
+ ret = request_irq(it->options[1], pcl726_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ /* External trigger source is from Pin-17 of CN3 */
+ dev->irq = it->options[1];
}
}
- dev->irq = irq;
-#endif
+ /* setup the per-channel analog output range_table_list */
+ for (i = 0; i < 12; i++) {
+ unsigned int opt = it->options[2 + i];
- printk("\n");
+ if (opt < board->ao_num_ranges && i < board->ao_nchan)
+ devpriv->rangelist[i] = board->ao_ranges[opt];
+ else
+ devpriv->rangelist[i] = &range_unknown;
+ }
- ret = comedi_alloc_subdevices(dev, 3);
+ subdev = board->have_dio ? 3 : 1;
+ if (dev->irq)
+ subdev++;
+ ret = comedi_alloc_subdevices(dev, subdev);
if (ret)
return ret;
- s = &dev->subdevices[0];
- /* ao */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = 0xfff;
- s->len_chanlist = 1;
- s->insn_write = pcl726_ao_insn;
- s->insn_read = pcl726_ao_insn_read;
- s->range_table_list = devpriv->rangelist;
- for (i = 0; i < board->n_aochan; i++) {
- int j;
-
- j = it->options[2 + 1];
- if ((j < 0) || (j >= board->num_of_ranges)) {
- printk
- ("Invalid range for channel %d! Must be 0<=%d<%d\n",
- i, j, board->num_of_ranges - 1);
- j = 0;
- }
- devpriv->rangelist[i] = board->range_type_list[j];
- if (devpriv->rangelist[i]->range[0].min ==
- -devpriv->rangelist[i]->range[0].max)
- devpriv->bipolar[i] = 1; /* bipolar range */
- }
+ subdev = 0;
- s = &dev->subdevices[1];
- /* di */
- if (!board->have_dio) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 16;
- s->maxdata = 1;
- s->len_chanlist = 1;
- s->insn_bits = pcl726_di_insn_bits;
- s->range_table = &range_digital;
+ /* Analog Output subdevice */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = board->ao_nchan;
+ s->maxdata = 0x0fff;
+ s->range_table_list = devpriv->rangelist;
+ s->insn_write = pcl726_ao_insn_write;
+ s->insn_read = pcl726_ao_insn_read;
+
+ if (board->have_dio) {
+ /* Digital Input subdevice */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->insn_bits = pcl726_di_insn_bits;
+ s->range_table = &range_digital;
+
+ /* Digital Output subdevice */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->insn_bits = pcl726_do_insn_bits;
+ s->range_table = &range_digital;
}
- s = &dev->subdevices[2];
- /* do */
- if (!board->have_dio) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = 16;
- s->maxdata = 1;
- s->len_chanlist = 1;
- s->insn_bits = pcl726_do_insn_bits;
- s->range_table = &range_digital;
+ if (dev->irq) {
+ /* Digial Input subdevice - Interrupt support */
+ s = &dev->subdevices[subdev++];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl726_intr_insn_bits;
+ s->do_cmdtest = pcl726_intr_cmdtest;
+ s->do_cmd = pcl726_intr_cmd;
+ s->cancel = pcl726_intr_cancel;
}
return 0;
@@ -333,12 +443,12 @@ static struct comedi_driver pcl726_driver = {
.module = THIS_MODULE,
.attach = pcl726_attach,
.detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
+ .board_name = &pcl726_boards[0].name,
+ .num_names = ARRAY_SIZE(pcl726_boards),
.offset = sizeof(struct pcl726_board),
};
module_comedi_driver(pcl726_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Advantech PCL-726 & compatibles");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index 2a659f23ecd..d041b714db2 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -167,20 +167,17 @@ static int pcl730_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
unsigned long reg = (unsigned long)s->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
+ unsigned int mask;
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
if (mask & 0x00ff)
outb(s->state & 0xff, dev->iobase + reg);
- if ((mask & 0xff00) && (s->n_chan > 8))
+ if ((mask & 0xff00) & (s->n_chan > 8))
outb((s->state >> 8) & 0xff, dev->iobase + reg + 1);
- if ((mask & 0xff0000) && (s->n_chan > 16))
+ if ((mask & 0xff0000) & (s->n_chan > 16))
outb((s->state >> 16) & 0xff, dev->iobase + reg + 2);
- if ((mask & 0xff000000) && (s->n_chan > 24))
+ if ((mask & 0xff000000) & (s->n_chan > 24))
outb((s->state >> 24) & 0xff, dev->iobase + reg + 3);
}
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 03a098900d3..03315abcca1 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -355,7 +355,6 @@ struct pcl812_private {
unsigned int ai_n_chan; /* how many channels is measured */
unsigned int ai_flags; /* flaglist */
unsigned int ai_data_len; /* len of data buffer */
- short *ai_data; /* data buffer */
unsigned int ai_is16b; /* =1 we have 16 bit card */
unsigned long dmabuf[2]; /* PTR to DMA buf */
unsigned int dmapages[2]; /* how many pages we have allocated */
@@ -509,19 +508,16 @@ static int pcl812_di_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
-*/
static int pcl812_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
+ if (comedi_dio_update_state(s, data)) {
outb(s->state & 0xff, dev->iobase + PCL812_DO_LO);
outb((s->state >> 8), dev->iobase + PCL812_DO_HI);
}
+
data[1] = s->state;
return insn->n;
@@ -592,9 +588,9 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(board->i8254_osc_base, &divisor1,
- &divisor2, &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(board->i8254_osc_base,
+ &divisor1, &divisor2,
+ &cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ai_ns_min)
cmd->convert_arg = board->ai_ns_min;
if (tmp != cmd->convert_arg)
@@ -640,8 +636,7 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
cmd->convert_arg = board->ai_ns_min;
i8253_cascade_ns_to_timer(board->i8254_osc_base,
&divisor1, &divisor2,
- &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ &cmd->convert_arg, cmd->flags);
}
start_pacer(dev, -1, 0, 0); /* stop pacer */
@@ -665,7 +660,6 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_flags = cmd->flags;
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_data = s->async->prealloc_buf;
if (cmd->stop_src == TRIG_COUNT) {
devpriv->ai_scans = cmd->stop_arg;
devpriv->ai_neverending = 0;
@@ -835,7 +829,8 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
==============================================================================
*/
static void transfer_from_dma_buf(struct comedi_device *dev,
- struct comedi_subdevice *s, short *ptr,
+ struct comedi_subdevice *s,
+ unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
struct pcl812_private *devpriv = dev->private;
@@ -873,9 +868,9 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
struct comedi_subdevice *s = &dev->subdevices[0];
unsigned long dma_flags;
int len, bufptr;
- short *ptr;
+ unsigned short *ptr;
- ptr = (short *)devpriv->dmabuf[devpriv->next_dma_buf];
+ ptr = (unsigned short *)devpriv->dmabuf[devpriv->next_dma_buf];
len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
devpriv->ai_poll_ptr;
@@ -1443,40 +1438,40 @@ static void pcl812_detach(struct comedi_device *dev)
static const struct pcl812_board boardtypes[] = {
{"pcl812", boardPCL812, 16, 0, 2, 16, 16, 0x0fff,
- 33000, 500, &range_bipolar10, &range_unipolar5,
+ 33000, I8254_OSC_BASE_2MHZ, &range_bipolar10, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"pcl812pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
- 33000, 500, &range_pcl812pg_ai, &range_unipolar5,
+ 33000, I8254_OSC_BASE_2MHZ, &range_pcl812pg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"acl8112pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
- 10000, 500, &range_pcl812pg_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_pcl812pg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"acl8112dg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, 500, &range_acl8112dg_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
{"acl8112hg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, 500, &range_acl8112hg_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
{"a821pgl", boardA821, 16, 8, 1, 16, 16, 0x0fff,
- 10000, 500, &range_pcl813b_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b_ai, &range_unipolar5,
0x000c, 0x00, PCLx1x_IORANGE, 0},
{"a821pglnda", boardA821, 16, 8, 0, 0, 0, 0x0fff,
- 10000, 500, &range_pcl813b_ai, NULL,
+ 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b_ai, NULL,
0x000c, 0x00, PCLx1x_IORANGE, 0},
{"a821pgh", boardA821, 16, 8, 1, 16, 16, 0x0fff,
- 10000, 500, &range_a821pgh_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_a821pgh_ai, &range_unipolar5,
0x000c, 0x00, PCLx1x_IORANGE, 0},
{"a822pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, 500, &range_acl8112dg_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"a822pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, 500, &range_acl8112hg_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"a823pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 8000, 500, &range_acl8112dg_ai, &range_unipolar5,
+ 8000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"a823pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 8000, 500, &range_acl8112hg_ai, &range_unipolar5,
+ 8000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"pcl813", boardPCL813, 32, 0, 0, 0, 0, 0x0fff,
0, 0, &range_pcl813b_ai, NULL,
@@ -1491,10 +1486,10 @@ static const struct pcl812_board boardtypes[] = {
0, 0, &range_iso813_1_ai, NULL,
0x0000, 0x00, PCLx1x_IORANGE, 0},
{"acl8216", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
- 10000, 500, &range_pcl813b2_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b2_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
{"a826pg", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
- 10000, 500, &range_pcl813b2_ai, &range_unipolar5,
+ 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b2_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
};
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index f0313496259..ab9d2bd26a2 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -229,7 +229,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
struct comedi_subdevice *s = &dev->subdevices[0];
- int low, hi;
+ unsigned char low, hi;
int timeout = 50; /* wait max 50us */
while (timeout--) {
@@ -281,7 +281,8 @@ static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
analog input dma mode 1 & 3, 816 cards
*/
static void transfer_from_dma_buf(struct comedi_device *dev,
- struct comedi_subdevice *s, short *ptr,
+ struct comedi_subdevice *s,
+ unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
struct pcl816_private *devpriv = dev->private;
@@ -324,7 +325,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
struct comedi_subdevice *s = &dev->subdevices[0];
int len, bufptr, this_dma_buf;
unsigned long dma_flags;
- short *ptr;
+ unsigned short *ptr;
disable_dma(devpriv->dma);
this_dma_buf = devpriv->next_dma_buf;
@@ -352,7 +353,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
devpriv->dma_runs_to_end--;
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- ptr = (short *)devpriv->dmabuf[this_dma_buf];
+ ptr = (unsigned short *)devpriv->dmabuf[this_dma_buf];
len = (devpriv->hwdmasize[0] >> 1) - devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
@@ -481,8 +482,7 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
tmp = cmd->convert_arg;
i8253_cascade_ns_to_timer(board->i8254_osc_base,
&divisor1, &divisor2,
- &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ &cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ai_ns_min)
cmd->convert_arg = board->ai_ns_min;
if (tmp != cmd->convert_arg)
@@ -528,9 +528,9 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->convert_arg < board->ai_ns_min)
cmd->convert_arg = board->ai_ns_min;
- i8253_cascade_ns_to_timer(board->i8254_osc_base, &divisor1,
- &divisor2, &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(board->i8254_osc_base,
+ &divisor1, &divisor2,
+ &cmd->convert_arg, cmd->flags);
/* PCL816 crash if any divisor is set to 1 */
if (divisor1 == 1) {
@@ -666,7 +666,8 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
}
transfer_from_dma_buf(dev, s,
- (short *)devpriv->dmabuf[devpriv->next_dma_buf],
+ (unsigned short *)devpriv->dmabuf[devpriv->
+ next_dma_buf],
devpriv->ai_poll_ptr, top2);
devpriv->ai_poll_ptr = top1; /* new buffer position */
@@ -1105,7 +1106,7 @@ static const struct pcl816_board boardtypes[] = {
0xffff, /* D/A maxdata */
1024,
1, /* ao chan list */
- 100},
+ I8254_OSC_BASE_10MHZ},
{"pcl814b", 8, 16, 10000, 1, 16, 16, &range_pcl816,
&range_pcl816, PCLx1x_RANGE,
0x00fc,
@@ -1114,7 +1115,7 @@ static const struct pcl816_board boardtypes[] = {
0x3fff,
1024,
1,
- 100},
+ I8254_OSC_BASE_10MHZ},
};
static struct comedi_driver pcl816_driver = {
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index a52ba82ff0e..9e4d7e86050 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -289,7 +289,6 @@ struct pcl818_private {
unsigned int *ai_chanlist; /* actaul chanlist */
unsigned int ai_flags; /* flaglist */
unsigned int ai_data_len; /* len of data buffer */
- short *ai_data; /* data buffer */
unsigned int ai_timer1; /* timers */
unsigned int ai_timer2;
struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
@@ -418,21 +417,15 @@ static int pcl818_di_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/*
-==============================================================================
- DIGITAL OUTPUT MODE0, 818 cards
-
- only one sample per call is supported
-*/
static int pcl818_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
-
- outb(s->state & 0xff, dev->iobase + PCL818_DO_LO);
- outb((s->state >> 8), dev->iobase + PCL818_DO_HI);
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL818_DO_LO);
+ outb((s->state >> 8), dev->iobase + PCL818_DO_HI);
+ }
data[1] = s->state;
@@ -449,7 +442,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d)
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
struct comedi_subdevice *s = &dev->subdevices[0];
- int low;
+ unsigned char low;
int timeout = 50; /* wait max 50us */
while (timeout--) {
@@ -511,7 +504,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
struct comedi_subdevice *s = &dev->subdevices[0];
int i, len, bufptr;
unsigned long flags;
- short *ptr;
+ unsigned short *ptr;
disable_dma(devpriv->dma);
devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
@@ -534,7 +527,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
devpriv->dma_runs_to_end--;
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
- ptr = (short *)devpriv->dmabuf[1 - devpriv->next_dma_buf];
+ ptr = (unsigned short *)devpriv->dmabuf[1 - devpriv->next_dma_buf];
len = devpriv->hwdmasize[0] >> 1;
bufptr = 0;
@@ -588,7 +581,8 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
struct comedi_subdevice *s = &dev->subdevices[0];
- int i, len, lo;
+ int i, len;
+ unsigned char lo;
outb(0, dev->iobase + PCL818_FI_INTCLR); /* clear fifo int request */
@@ -806,8 +800,9 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
devpriv->neverending_ai = 1; /* well, user want neverending */
if (mode == 1) {
- i8253_cascade_ns_to_timer(devpriv->i8253_osc_base, &divisor1,
- &divisor2, &cmd->convert_arg,
+ i8253_cascade_ns_to_timer(devpriv->i8253_osc_base,
+ &divisor1, &divisor2,
+ &cmd->convert_arg,
TRIG_ROUND_NEAREST);
if (divisor1 == 1) { /* PCL718/818 crash if any divisor is set to 1 */
divisor1 = 2;
@@ -1040,9 +1035,9 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(devpriv->i8253_osc_base, &divisor1,
- &divisor2, &cmd->convert_arg,
- cmd->flags & TRIG_ROUND_MASK);
+ i8253_cascade_ns_to_timer(devpriv->i8253_osc_base,
+ &divisor1, &divisor2,
+ &cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ns_min)
cmd->convert_arg = board->ns_min;
if (tmp != cmd->convert_arg)
@@ -1077,7 +1072,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_data = s->async->prealloc_buf;
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
@@ -1438,9 +1432,9 @@ no_dma:
/* select 1/10MHz oscilator */
if ((it->options[3] == 0) || (it->options[3] == 10))
- devpriv->i8253_osc_base = 100;
+ devpriv->i8253_osc_base = I8254_OSC_BASE_10MHZ;
else
- devpriv->i8253_osc_base = 1000;
+ devpriv->i8253_osc_base = I8254_OSC_BASE_1MHZ;
/* max sampling speed */
devpriv->ns_min = board->ns_min;
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index 423f23676d2..fe482fdd512 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -75,12 +75,6 @@ static int pcmad_ai_wait_for_eoc(struct comedi_device *dev,
return -ETIME;
}
-static bool pcmad_range_is_bipolar(struct comedi_subdevice *s,
- unsigned int range)
-{
- return s->range_table->range[range].min < 0;
-}
-
static int pcmad_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -106,7 +100,7 @@ static int pcmad_ai_insn_read(struct comedi_device *dev,
if (s->maxdata == 0x0fff)
val >>= 4;
- if (pcmad_range_is_bipolar(s, range)) {
+ if (comedi_range_is_bipolar(s, range)) {
/* munge the two's complement value */
val ^= ((s->maxdata + 1) >> 1);
}
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 574443df42d..14cee3ac92c 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -553,12 +553,11 @@ static irqreturn_t interrupt_pcmmio(int irq, void *d)
val |= (1U << n);
}
/* Write the scan to the buffer. */
- if (comedi_buf_put(s->async, ((short *)&val)[0])
+ if (comedi_buf_put(s->async, val)
&&
comedi_buf_put
(s->async,
- ((short *)
- &val)[1])) {
+ val >> 16)) {
s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
} else {
/* Overflow! Stop acquisition!! */
@@ -846,7 +845,7 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
CR_RANGE(insn->chanspec), aref = CR_AREF(insn->chanspec);
unsigned char command_byte = 0;
unsigned iooffset = 0;
- short sample, adc_adjust = 0;
+ unsigned short sample, adc_adjust = 0;
if (chan > 7)
chan -= 8, iooffset = 4; /*
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 67e2bb1d66f..954fa96a50a 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -315,8 +315,8 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
}
/* Write the scan to the buffer. */
- if (comedi_buf_put(s->async, ((short *)&val)[0]) &&
- comedi_buf_put(s->async, ((short *)&val)[1])) {
+ if (comedi_buf_put(s->async, val) &&
+ comedi_buf_put(s->async, val >> 16)) {
s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
} else {
/* Overflow! Stop acquisition!! */
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 9775d3622a6..96a46954b3c 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -208,8 +208,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
case buffer:
while (!((status = inb(dev->iobase + DAQP_STATUS))
& DAQP_STATUS_FIFO_EMPTY)) {
-
- short data;
+ unsigned short data;
if (status & DAQP_STATUS_DATA_LOST) {
s->async->events |=
@@ -690,18 +689,12 @@ static int daqp_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct daqp_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
if (devpriv->stop)
return -EIO;
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DAQP_DIGITAL_IO);
- }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 93c980c62a2..44c8712ed9e 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -394,11 +394,8 @@ struct rtd_private {
long ai_count; /* total transfer size (samples) */
int xfer_count; /* # to transfer data. 0->1/2FIFO */
int flags; /* flag event modes */
-
- unsigned char chan_is_bipolar[RTD_MAX_CHANLIST / 8]; /* bit array */
-
+ DECLARE_BITMAP(chan_is_bipolar, RTD_MAX_CHANLIST);
unsigned int ao_readback[2];
-
unsigned fifosz;
};
@@ -407,14 +404,6 @@ struct rtd_private {
#define DMA0_ACTIVE 0x02 /* DMA0 is active */
#define DMA1_ACTIVE 0x04 /* DMA1 is active */
-/* Macros for accessing channel list bit array */
-#define CHAN_ARRAY_TEST(array, index) \
- (((array)[(index)/8] >> ((index) & 0x7)) & 0x1)
-#define CHAN_ARRAY_SET(array, index) \
- (((array)[(index)/8] |= 1 << ((index) & 0x7)))
-#define CHAN_ARRAY_CLEAR(array, index) \
- (((array)[(index)/8] &= ~(1 << ((index) & 0x7))))
-
/*
Given a desired period and the clock period (both in ns),
return the proper counter value (divider-1).
@@ -478,17 +467,17 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
/* +-5 range */
r |= 0x000;
r |= (range & 0x7) << 4;
- CHAN_ARRAY_SET(devpriv->chan_is_bipolar, index);
+ __set_bit(index, devpriv->chan_is_bipolar);
} else if (range < board->range_uni10) {
/* +-10 range */
r |= 0x100;
r |= ((range - board->range_bip10) & 0x7) << 4;
- CHAN_ARRAY_SET(devpriv->chan_is_bipolar, index);
+ __set_bit(index, devpriv->chan_is_bipolar);
} else {
/* +10 range */
r |= 0x200;
r |= ((range - board->range_uni10) & 0x7) << 4;
- CHAN_ARRAY_CLEAR(devpriv->chan_is_bipolar, index);
+ __clear_bit(index, devpriv->chan_is_bipolar);
}
switch (aref) {
@@ -602,7 +591,7 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* convert n samples */
for (n = 0; n < insn->n; n++) {
- s16 d;
+ unsigned short d;
/* trigger conversion */
writew(0, devpriv->las0 + LAS0_ADC);
@@ -619,11 +608,10 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
/*printk ("rtd520: Got 0x%x after %d usec\n", d, ii+1); */
d = d >> 3; /* low 3 bits are marker lines */
- if (CHAN_ARRAY_TEST(devpriv->chan_is_bipolar, 0))
+ if (test_bit(0, devpriv->chan_is_bipolar))
/* convert to comedi unsigned data */
- data[n] = d + 2048;
- else
- data[n] = d;
+ d = comedi_offset_munge(s, d);
+ data[n] = d & s->maxdata;
}
/* return the number of samples read/written */
@@ -643,8 +631,7 @@ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
int ii;
for (ii = 0; ii < count; ii++) {
- short sample;
- s16 d;
+ unsigned short d;
if (0 == devpriv->ai_count) { /* done */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
@@ -653,14 +640,12 @@ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d = d >> 3; /* low 3 bits are marker lines */
- if (CHAN_ARRAY_TEST(devpriv->chan_is_bipolar,
- s->async->cur_chan)) {
+ if (test_bit(s->async->cur_chan, devpriv->chan_is_bipolar))
/* convert to comedi unsigned data */
- sample = d + 2048;
- } else
- sample = d;
+ d = comedi_offset_munge(s, d);
+ d &= s->maxdata;
- if (!comedi_buf_put(s->async, sample))
+ if (!comedi_buf_put(s->async, d))
return -1;
if (devpriv->ai_count > 0) /* < 0, means read forever */
@@ -677,22 +662,19 @@ static int ai_read_dregs(struct comedi_device *dev, struct comedi_subdevice *s)
struct rtd_private *devpriv = dev->private;
while (readl(devpriv->las0 + LAS0_ADC) & FS_ADC_NOT_EMPTY) {
- short sample;
- s16 d = readw(devpriv->las1 + LAS1_ADC_FIFO);
+ unsigned short d = readw(devpriv->las1 + LAS1_ADC_FIFO);
if (0 == devpriv->ai_count) { /* done */
continue; /* read rest */
}
d = d >> 3; /* low 3 bits are marker lines */
- if (CHAN_ARRAY_TEST(devpriv->chan_is_bipolar,
- s->async->cur_chan)) {
+ if (test_bit(s->async->cur_chan, devpriv->chan_is_bipolar))
/* convert to comedi unsigned data */
- sample = d + 2048;
- } else
- sample = d;
+ d = comedi_offset_munge(s, d);
+ d &= s->maxdata;
- if (!comedi_buf_put(s->async, sample))
+ if (!comedi_buf_put(s->async, d))
return -1;
if (devpriv->ai_count > 0) /* < 0, means read forever */
@@ -1217,15 +1199,9 @@ static int rtd_dio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
+ if (comedi_dio_update_state(s, data))
writew(s->state & 0xff, devpriv->las0 + LAS0_DIO0);
- }
data[1] = readw(devpriv->las0 + LAS0_DIO0) & 0xff;
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index cbb4ba5b852..e1f3671ac05 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -267,13 +267,7 @@ static int rti800_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
+ if (comedi_dio_update_state(s, data)) {
/* Outputs are inverted... */
outb(s->state ^ 0xff, dev->iobase + RTI800_DO);
}
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index d629463b85a..9950f59b119 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -499,14 +499,11 @@ static int s526_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
static int s526_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
-
+ if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + REG_DIO);
- }
data[1] = inw(dev->iobase + REG_DIO) & 0xff;
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index d22b95dcb9b..6815cfe2664 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -1,63 +1,63 @@
/*
- comedi/drivers/s626.c
- Sensoray s626 Comedi driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- Based on Sensoray Model 626 Linux driver Version 0.2
- Copyright (C) 2002-2004 Sensoray Co., Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/s626.c
+ * Sensoray s626 Comedi driver
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * Based on Sensoray Model 626 Linux driver Version 0.2
+ * Copyright (C) 2002-2004 Sensoray Co., Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: s626
-Description: Sensoray 626 driver
-Devices: [Sensoray] 626 (s626)
-Authors: Gianluca Palli <gpalli@deis.unibo.it>,
-Updated: Fri, 15 Feb 2008 10:28:42 +0000
-Status: experimental
-
-Configuration options: not applicable, uses PCI auto config
-
-INSN_CONFIG instructions:
- analog input:
- none
-
- analog output:
- none
-
- digital channel:
- s626 has 3 dio subdevices (2,3 and 4) each with 16 i/o channels
- supported configuration options:
- INSN_CONFIG_DIO_QUERY
- COMEDI_INPUT
- COMEDI_OUTPUT
-
- encoder:
- Every channel must be configured before reading.
-
- Example code
-
- insn.insn=INSN_CONFIG; //configuration instruction
- insn.n=1; //number of operation (must be 1)
- insn.data=&initialvalue; //initial value loaded into encoder
- //during configuration
- insn.subdev=5; //encoder subdevice
- insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); //encoder_channel
- //to configure
-
- comedi_do_insn(cf,&insn); //executing configuration
-*/
+ * Driver: s626
+ * Description: Sensoray 626 driver
+ * Devices: [Sensoray] 626 (s626)
+ * Authors: Gianluca Palli <gpalli@deis.unibo.it>,
+ * Updated: Fri, 15 Feb 2008 10:28:42 +0000
+ * Status: experimental
+
+ * Configuration options: not applicable, uses PCI auto config
+
+ * INSN_CONFIG instructions:
+ * analog input:
+ * none
+ *
+ * analog output:
+ * none
+ *
+ * digital channel:
+ * s626 has 3 dio subdevices (2,3 and 4) each with 16 i/o channels
+ * supported configuration options:
+ * INSN_CONFIG_DIO_QUERY
+ * COMEDI_INPUT
+ * COMEDI_OUTPUT
+ *
+ * encoder:
+ * Every channel must be configured before reading.
+ *
+ * Example code
+ *
+ * insn.insn=INSN_CONFIG; //configuration instruction
+ * insn.n=1; //number of operation (must be 1)
+ * insn.data=&initialvalue; //initial value loaded into encoder
+ * //during configuration
+ * insn.subdev=5; //encoder subdevice
+ * insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); //encoder_channel
+ * //to configure
+ *
+ * comedi_do_insn(cf,&insn); //executing configuration
+ */
#include <linux/module.h>
#include <linux/delay.h>
@@ -71,68 +71,91 @@ INSN_CONFIG instructions:
#include "comedi_fc.h"
#include "s626.h"
-#define PCI_VENDOR_ID_S626 0x1131
-#define PCI_DEVICE_ID_S626 0x7146
-#define PCI_SUBVENDOR_ID_S626 0x6000
-#define PCI_SUBDEVICE_ID_S626 0x0272
+struct s626_buffer_dma {
+ dma_addr_t physical_base;
+ void *logical_base;
+};
struct s626_private {
void __iomem *mmio;
- uint8_t ai_cmd_running; /* ai_cmd is running */
- uint8_t ai_continous; /* continous acquisition */
- int ai_sample_count; /* number of samples to acquire */
- unsigned int ai_sample_timer;
- /* time between samples in units of the timer */
- int ai_convert_count; /* conversion counter */
- unsigned int ai_convert_timer;
- /* time between conversion in units of the timer */
- uint16_t CounterIntEnabs;
- /* Counter interrupt enable mask for MISC2 register. */
- uint8_t AdcItems; /* Number of items in ADC poll list. */
- struct bufferDMA RPSBuf; /* DMA buffer used to hold ADC (RPS1) program. */
- struct bufferDMA ANABuf;
- /* DMA buffer used to receive ADC data and hold DAC data. */
- uint32_t *pDacWBuf;
- /* Pointer to logical adrs of DMA buffer used to hold DAC data. */
- uint16_t Dacpol; /* Image of DAC polarity register. */
- uint8_t TrimSetpoint[12]; /* Images of TrimDAC setpoints */
- /* Charge Enabled (0 or WRMISC2_CHARGE_ENABLE). */
- uint32_t I2CAdrs;
- /* I2C device address for onboard EEPROM (board rev dependent). */
- /* short I2Cards; */
+ uint8_t ai_cmd_running; /* ai_cmd is running */
+ uint8_t ai_continuous; /* continuous acquisition */
+ int ai_sample_count; /* number of samples to acquire */
+ unsigned int ai_sample_timer; /* time between samples in
+ * units of the timer */
+ int ai_convert_count; /* conversion counter */
+ unsigned int ai_convert_timer; /* time between conversion in
+ * units of the timer */
+ uint16_t counter_int_enabs; /* counter interrupt enable mask
+ * for MISC2 register */
+ uint8_t adc_items; /* number of items in ADC poll list */
+ struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1)
+ * program */
+ struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data
+ * and hold DAC data */
+ uint32_t *dac_wbuf; /* pointer to logical adrs of DMA buffer
+ * used to hold DAC data */
+ uint16_t dacpol; /* image of DAC polarity register */
+ uint8_t trim_setpoint[12]; /* images of TrimDAC setpoints */
+ uint32_t i2c_adrs; /* I2C device address for onboard EEPROM
+ * (board rev dependent) */
unsigned int ao_readback[S626_DAC_CHANNELS];
};
-/* COUNTER OBJECT ------------------------------------------------ */
-struct enc_private {
- /* Pointers to functions that differ for A and B counters: */
- uint16_t(*GetEnable) (struct comedi_device *dev, struct enc_private *); /* Return clock enable. */
- uint16_t(*GetIntSrc) (struct comedi_device *dev, struct enc_private *); /* Return interrupt source. */
- uint16_t(*GetLoadTrig) (struct comedi_device *dev, struct enc_private *); /* Return preload trigger source. */
- uint16_t(*GetMode) (struct comedi_device *dev, struct enc_private *); /* Return standardized operating mode. */
- void (*PulseIndex) (struct comedi_device *dev, struct enc_private *); /* Generate soft index strobe. */
- void (*SetEnable) (struct comedi_device *dev, struct enc_private *, uint16_t enab); /* Program clock enable. */
- void (*SetIntSrc) (struct comedi_device *dev, struct enc_private *, uint16_t IntSource); /* Program interrupt source. */
- void (*SetLoadTrig) (struct comedi_device *dev, struct enc_private *, uint16_t Trig); /* Program preload trigger source. */
- void (*SetMode) (struct comedi_device *dev, struct enc_private *, uint16_t Setup, uint16_t DisableIntSrc); /* Program standardized operating mode. */
- void (*ResetCapFlags) (struct comedi_device *dev, struct enc_private *); /* Reset event capture flags. */
-
- uint16_t MyCRA; /* Address of CRA register. */
- uint16_t MyCRB; /* Address of CRB register. */
- uint16_t MyLatchLsw; /* Address of Latch least-significant-word */
- /* register. */
- uint16_t MyEventBits[4]; /* Bit translations for IntSrc -->RDMISC2. */
+/* COUNTER OBJECT ------------------------------------------------ */
+struct s626_enc_info {
+ /* Pointers to functions that differ for A and B counters: */
+ /* Return clock enable. */
+ uint16_t(*get_enable)(struct comedi_device *dev,
+ const struct s626_enc_info *k);
+ /* Return interrupt source. */
+ uint16_t(*get_int_src)(struct comedi_device *dev,
+ const struct s626_enc_info *k);
+ /* Return preload trigger source. */
+ uint16_t(*get_load_trig)(struct comedi_device *dev,
+ const struct s626_enc_info *k);
+ /* Return standardized operating mode. */
+ uint16_t(*get_mode)(struct comedi_device *dev,
+ const struct s626_enc_info *k);
+ /* Generate soft index strobe. */
+ void (*pulse_index)(struct comedi_device *dev,
+ const struct s626_enc_info *k);
+ /* Program clock enable. */
+ void (*set_enable)(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t enab);
+ /* Program interrupt source. */
+ void (*set_int_src)(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t int_source);
+ /* Program preload trigger source. */
+ void (*set_load_trig)(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t trig);
+ /* Program standardized operating mode. */
+ void (*set_mode)(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t setup,
+ uint16_t disable_int_src);
+ /* Reset event capture flags. */
+ void (*reset_cap_flags)(struct comedi_device *dev,
+ const struct s626_enc_info *k);
+
+ uint16_t my_cra; /* address of CRA register */
+ uint16_t my_crb; /* address of CRB register */
+ uint16_t my_latch_lsw; /* address of Latch least-significant-word
+ * register */
+ uint16_t my_event_bits[4]; /* bit translations for IntSrc -->RDMISC2 */
};
-#define encpriv ((struct enc_private *)(dev->subdevices+5)->private)
-
-/* Counter overflow/index event flag masks for RDMISC2. */
-#define INDXMASK(C) (1 << (((C) > 2) ? ((C) * 2 - 1) : ((C) * 2 + 4)))
-#define OVERMASK(C) (1 << (((C) > 2) ? ((C) * 2 + 5) : ((C) * 2 + 10)))
-#define EVBITS(C) { 0, OVERMASK(C), INDXMASK(C), OVERMASK(C) | INDXMASK(C) }
+/* Counter overflow/index event flag masks for RDMISC2. */
+#define S626_INDXMASK(C) (1 << (((C) > 2) ? ((C) * 2 - 1) : ((C) * 2 + 4)))
+#define S626_OVERMASK(C) (1 << (((C) > 2) ? ((C) * 2 + 5) : ((C) * 2 + 10)))
+#define S626_EVBITS(C) { 0, S626_OVERMASK(C), S626_INDXMASK(C), \
+ S626_OVERMASK(C) | S626_INDXMASK(C) }
-/* Translation table to map IntSrc into equivalent RDMISC2 event flag bits. */
-/* static const uint16_t EventBits[][4] = { EVBITS(0), EVBITS(1), EVBITS(2), EVBITS(3), EVBITS(4), EVBITS(5) }; */
+/*
+ * Translation table to map IntSrc into equivalent RDMISC2 event flag bits.
+ * static const uint16_t s626_event_bits[][4] =
+ * { S626_EVBITS(0), S626_EVBITS(1), S626_EVBITS(2), S626_EVBITS(3),
+ * S626_EVBITS(4), S626_EVBITS(5) };
+ */
/*
* Enable/disable a function or test status bit(s) that are accessed
@@ -144,6 +167,7 @@ static void s626_mc_enable(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
unsigned int val = (cmd << 16) | cmd;
+ mmiowb();
writel(val, devpriv->mmio + reg);
}
@@ -153,6 +177,7 @@ static void s626_mc_disable(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
writel(cmd << 16 , devpriv->mmio + reg);
+ mmiowb();
}
static bool s626_mc_test(struct comedi_device *dev,
@@ -166,15 +191,10 @@ static bool s626_mc_test(struct comedi_device *dev,
return (val & cmd) ? true : false;
}
-#define BUGFIX_STREG(REGADRS) (REGADRS - 4)
+#define S626_BUGFIX_STREG(REGADRS) ((REGADRS) - 4)
-/* Write a time slot control record to TSL2. */
-#define VECTPORT(VECTNUM) (P_TSL2 + ((VECTNUM) << 2))
-
-/* Code macros used for constructing I2C command bytes. */
-#define I2C_B2(ATTR, VAL) (((ATTR) << 6) | ((VAL) << 24))
-#define I2C_B1(ATTR, VAL) (((ATTR) << 4) | ((VAL) << 16))
-#define I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8))
+/* Write a time slot control record to TSL2. */
+#define S626_VECTPORT(VECTNUM) (S626_P_TSL2 + ((VECTNUM) << 2))
static const struct comedi_lrange s626_range_table = {
2, {
@@ -183,178 +203,182 @@ static const struct comedi_lrange s626_range_table = {
}
};
-/* Execute a DEBI transfer. This must be called from within a */
-/* critical section. */
-static void DEBItransfer(struct comedi_device *dev)
+/*
+ * Execute a DEBI transfer. This must be called from within a critical section.
+ */
+static void s626_debi_transfer(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
/* Initiate upload of shadow RAM to DEBI control register */
- s626_mc_enable(dev, MC2_UPLD_DEBI, P_MC2);
+ s626_mc_enable(dev, S626_MC2_UPLD_DEBI, S626_P_MC2);
/*
* Wait for completion of upload from shadow RAM to
* DEBI control register.
*/
- while (!s626_mc_test(dev, MC2_UPLD_DEBI, P_MC2))
+ while (!s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
;
/* Wait until DEBI transfer is done */
- while (readl(devpriv->mmio + P_PSR) & PSR_DEBI_S)
+ while (readl(devpriv->mmio + S626_P_PSR) & S626_PSR_DEBI_S)
;
}
-/* Initialize the DEBI interface for all transfers. */
-
-static uint16_t DEBIread(struct comedi_device *dev, uint16_t addr)
+/*
+ * Read a value from a gate array register.
+ */
+static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
{
struct s626_private *devpriv = dev->private;
/* Set up DEBI control register value in shadow RAM */
- writel(DEBI_CMD_RDWORD | addr, devpriv->mmio + P_DEBICMD);
+ writel(S626_DEBI_CMD_RDWORD | addr, devpriv->mmio + S626_P_DEBICMD);
/* Execute the DEBI transfer. */
- DEBItransfer(dev);
+ s626_debi_transfer(dev);
- return readl(devpriv->mmio + P_DEBIAD);
+ return readl(devpriv->mmio + S626_P_DEBIAD);
}
-/* Write a value to a gate array register. */
-static void DEBIwrite(struct comedi_device *dev, uint16_t addr, uint16_t wdata)
+/*
+ * Write a value to a gate array register.
+ */
+static void s626_debi_write(struct comedi_device *dev, uint16_t addr,
+ uint16_t wdata)
{
struct s626_private *devpriv = dev->private;
/* Set up DEBI control register value in shadow RAM */
- writel(DEBI_CMD_WRWORD | addr, devpriv->mmio + P_DEBICMD);
- writel(wdata, devpriv->mmio + P_DEBIAD);
+ writel(S626_DEBI_CMD_WRWORD | addr, devpriv->mmio + S626_P_DEBICMD);
+ writel(wdata, devpriv->mmio + S626_P_DEBIAD);
/* Execute the DEBI transfer. */
- DEBItransfer(dev);
+ s626_debi_transfer(dev);
}
-/* Replace the specified bits in a gate array register. Imports: mask
+/*
+ * Replace the specified bits in a gate array register. Imports: mask
* specifies bits that are to be preserved, wdata is new value to be
* or'd with the masked original.
*/
-static void DEBIreplace(struct comedi_device *dev, unsigned int addr,
- unsigned int mask, unsigned int wdata)
+static void s626_debi_replace(struct comedi_device *dev, unsigned int addr,
+ unsigned int mask, unsigned int wdata)
{
struct s626_private *devpriv = dev->private;
unsigned int val;
addr &= 0xffff;
- writel(DEBI_CMD_RDWORD | addr, devpriv->mmio + P_DEBICMD);
- DEBItransfer(dev);
+ writel(S626_DEBI_CMD_RDWORD | addr, devpriv->mmio + S626_P_DEBICMD);
+ s626_debi_transfer(dev);
- writel(DEBI_CMD_WRWORD | addr, devpriv->mmio + P_DEBICMD);
- val = readl(devpriv->mmio + P_DEBIAD);
+ writel(S626_DEBI_CMD_WRWORD | addr, devpriv->mmio + S626_P_DEBICMD);
+ val = readl(devpriv->mmio + S626_P_DEBIAD);
val &= mask;
val |= wdata;
- writel(val & 0xffff, devpriv->mmio + P_DEBIAD);
- DEBItransfer(dev);
+ writel(val & 0xffff, devpriv->mmio + S626_P_DEBIAD);
+ s626_debi_transfer(dev);
}
/* ************** EEPROM ACCESS FUNCTIONS ************** */
-static uint32_t I2Chandshake(struct comedi_device *dev, uint32_t val)
+static uint32_t s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
{
struct s626_private *devpriv = dev->private;
unsigned int ctrl;
/* Write I2C command to I2C Transfer Control shadow register */
- writel(val, devpriv->mmio + P_I2CCTRL);
+ writel(val, devpriv->mmio + S626_P_I2CCTRL);
/*
* Upload I2C shadow registers into working registers and
* wait for upload confirmation.
*/
- s626_mc_enable(dev, MC2_UPLD_IIC, P_MC2);
- while (!s626_mc_test(dev, MC2_UPLD_IIC, P_MC2))
+ s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
+ while (!s626_mc_test(dev, S626_MC2_UPLD_IIC, S626_P_MC2))
;
/* Wait until I2C bus transfer is finished or an error occurs */
do {
- ctrl = readl(devpriv->mmio + P_I2CCTRL);
- } while ((ctrl & (I2C_BUSY | I2C_ERR)) == I2C_BUSY);
+ ctrl = readl(devpriv->mmio + S626_P_I2CCTRL);
+ } while ((ctrl & (S626_I2C_BUSY | S626_I2C_ERR)) == S626_I2C_BUSY);
/* Return non-zero if I2C error occurred */
- return ctrl & I2C_ERR;
+ return ctrl & S626_I2C_ERR;
}
-/* Read uint8_t from EEPROM. */
-static uint8_t I2Cread(struct comedi_device *dev, uint8_t addr)
+/* Read uint8_t from EEPROM. */
+static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
{
struct s626_private *devpriv = dev->private;
- /* Send EEPROM target address. */
- if (I2Chandshake(dev, I2C_B2(I2C_ATTRSTART, I2CW)
- /* Byte2 = I2C command: write to I2C EEPROM device. */
- | I2C_B1(I2C_ATTRSTOP, addr)
- /* Byte1 = EEPROM internal target address. */
- | I2C_B0(I2C_ATTRNOP, 0))) { /* Byte0 = Not sent. */
- /* Abort function and declare error if handshake failed. */
+ /*
+ * Send EEPROM target address:
+ * Byte2 = I2C command: write to I2C EEPROM device.
+ * Byte1 = EEPROM internal target address.
+ * Byte0 = Not sent.
+ */
+ if (s626_i2c_handshake(dev, S626_I2C_B2(S626_I2C_ATTRSTART,
+ devpriv->i2c_adrs) |
+ S626_I2C_B1(S626_I2C_ATTRSTOP, addr) |
+ S626_I2C_B0(S626_I2C_ATTRNOP, 0)))
+ /* Abort function and declare error if handshake failed. */
return 0;
- }
- /* Execute EEPROM read. */
- if (I2Chandshake(dev, I2C_B2(I2C_ATTRSTART, I2CR)
-
- /* Byte2 = I2C */
- /* command: read */
- /* from I2C EEPROM */
- /* device. */
- |I2C_B1(I2C_ATTRSTOP, 0)
- /* Byte1 receives */
- /* uint8_t from */
- /* EEPROM. */
- |I2C_B0(I2C_ATTRNOP, 0))) { /* Byte0 = Not sent. */
-
- /* Abort function and declare error if handshake failed. */
+ /*
+ * Execute EEPROM read:
+ * Byte2 = I2C command: read from I2C EEPROM device.
+ * Byte1 receives uint8_t from EEPROM.
+ * Byte0 = Not sent.
+ */
+ if (s626_i2c_handshake(dev, S626_I2C_B2(S626_I2C_ATTRSTART,
+ (devpriv->i2c_adrs | 1)) |
+ S626_I2C_B1(S626_I2C_ATTRSTOP, 0) |
+ S626_I2C_B0(S626_I2C_ATTRNOP, 0)))
+ /* Abort function and declare error if handshake failed. */
return 0;
- }
- return (readl(devpriv->mmio + P_I2CCTRL) >> 16) & 0xff;
+ return (readl(devpriv->mmio + S626_P_I2CCTRL) >> 16) & 0xff;
}
/* *********** DAC FUNCTIONS *********** */
-/* Slot 0 base settings. */
-#define VECT0 (XSD2 | RSD3 | SIB_A2)
-/* Slot 0 always shifts in 0xFF and store it to FB_BUFFER2. */
+/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
+static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
-/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
-static uint8_t trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
-
-/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
-static uint8_t trimadrs[] = { 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63 };
+/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
+static const uint8_t s626_trimadrs[] = {
+ 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
+};
-/* Private helper function: Transmit serial data to DAC via Audio
+/*
+ * Private helper function: Transmit serial data to DAC via Audio
* channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
- * Dacpol contains valid target image.
+ * dacpol contains valid target image.
*/
-static void SendDAC(struct comedi_device *dev, uint32_t val)
+static void s626_send_dac(struct comedi_device *dev, uint32_t val)
{
struct s626_private *devpriv = dev->private;
/* START THE SERIAL CLOCK RUNNING ------------- */
- /* Assert DAC polarity control and enable gating of DAC serial clock
+ /*
+ * Assert DAC polarity control and enable gating of DAC serial clock
* and audio bit stream signals. At this point in time we must be
* assured of being in time slot 0. If we are not in slot 0, the
* serial clock and audio stream signals will be disabled; this is
- * because the following DEBIwrite statement (which enables signals
- * to be passed through the gate array) would execute before the
- * trailing edge of WS1/WS3 (which turns off the signals), thus
+ * because the following s626_debi_write statement (which enables
+ * signals to be passed through the gate array) would execute before
+ * the trailing edge of WS1/WS3 (which turns off the signals), thus
* causing the signals to be inactive during the DAC write.
*/
- DEBIwrite(dev, LP_DACPOL, devpriv->Dacpol);
+ s626_debi_write(dev, S626_LP_DACPOL, devpriv->dacpol);
/* TRANSFER OUTPUT DWORD VALUE INTO A2'S OUTPUT FIFO ---------------- */
/* Copy DAC setpoint value to DAC's output DMA buffer. */
-
- /* writel(val, devpriv->mmio + (uint32_t)devpriv->pDacWBuf); */
- *devpriv->pDacWBuf = val;
+ /* writel(val, devpriv->mmio + (uint32_t)devpriv->dac_wbuf); */
+ *devpriv->dac_wbuf = val;
/*
* Enable the output DMA transfer. This will cause the DMAC to copy
@@ -362,56 +386,62 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* then immediately terminate because the protection address is
* reached upon transfer of the first DWORD value.
*/
- s626_mc_enable(dev, MC1_A2OUT, P_MC1);
+ s626_mc_enable(dev, S626_MC1_A2OUT, S626_P_MC1);
- /* While the DMA transfer is executing ... */
+ /* While the DMA transfer is executing ... */
/*
* Reset Audio2 output FIFO's underflow flag (along with any
* other FIFO underflow/overflow flags). When set, this flag
* will indicate that we have emerged from slot 0.
*/
- writel(ISR_AFOU, devpriv->mmio + P_ISR);
+ writel(S626_ISR_AFOU, devpriv->mmio + S626_P_ISR);
- /* Wait for the DMA transfer to finish so that there will be data
+ /*
+ * Wait for the DMA transfer to finish so that there will be data
* available in the FIFO when time slot 1 tries to transfer a DWORD
* from the FIFO to the output buffer register. We test for DMA
* Done by polling the DMAC enable flag; this flag is automatically
* cleared when the transfer has finished.
*/
- while (readl(devpriv->mmio + P_MC1) & MC1_A2OUT)
+ while (readl(devpriv->mmio + S626_P_MC1) & S626_MC1_A2OUT)
;
/* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */
- /* FIFO data is now available, so we enable execution of time slots
+ /*
+ * FIFO data is now available, so we enable execution of time slots
* 1 and higher by clearing the EOS flag in slot 0. Note that SD3
* will be shifted in and stored in FB_BUFFER2 for end-of-slot-list
* detection.
*/
- writel(XSD2 | RSD3 | SIB_A2, devpriv->mmio + VECTPORT(0));
+ writel(S626_XSD2 | S626_RSD3 | S626_SIB_A2,
+ devpriv->mmio + S626_VECTPORT(0));
- /* Wait for slot 1 to execute to ensure that the Packet will be
+ /*
+ * Wait for slot 1 to execute to ensure that the Packet will be
* transmitted. This is detected by polling the Audio2 output FIFO
* underflow flag, which will be set when slot 1 execution has
* finished transferring the DAC's data DWORD from the output FIFO
* to the output buffer register.
*/
- while (!(readl(devpriv->mmio + P_SSR) & SSR_AF2_OUT))
+ while (!(readl(devpriv->mmio + S626_P_SSR) & S626_SSR_AF2_OUT))
;
- /* Set up to trap execution at slot 0 when the TSL sequencer cycles
+ /*
+ * Set up to trap execution at slot 0 when the TSL sequencer cycles
* back to slot 0 after executing the EOS in slot 5. Also,
* simultaneously shift out and in the 0x00 that is ALWAYS the value
* stored in the last byte to be shifted out of the FIFO's DWORD
* buffer register.
*/
- writel(XSD2 | XFIFO_2 | RSD2 | SIB_A2 | EOS,
- devpriv->mmio + VECTPORT(0));
+ writel(S626_XSD2 | S626_XFIFO_2 | S626_RSD2 | S626_SIB_A2 | S626_EOS,
+ devpriv->mmio + S626_VECTPORT(0));
/* WAIT FOR THE TRANSACTION TO FINISH ----------------------- */
- /* Wait for the TSL to finish executing all time slots before
+ /*
+ * Wait for the TSL to finish executing all time slots before
* exiting this function. We must do this so that the next DAC
* write doesn't start, thereby enabling clock/chip select signals:
*
@@ -428,17 +458,19 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* we test for the FB_BUFFER2 MSB contents to be equal to 0xFF. If
* the TSL has not yet finished executing slot 5 ...
*/
- if (readl(devpriv->mmio + P_FB_BUFFER2) & 0xff000000) {
- /* The trap was set on time and we are still executing somewhere
+ if (readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000) {
+ /*
+ * The trap was set on time and we are still executing somewhere
* in slots 2-5, so we now wait for slot 0 to execute and trap
* TSL execution. This is detected when FB_BUFFER2 MSB changes
* from 0xFF to 0x00, which slot 0 causes to happen by shifting
* out/in on SD2 the 0x00 that is always referenced by slot 5.
*/
- while (readl(devpriv->mmio + P_FB_BUFFER2) & 0xff000000)
+ while (readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000)
;
}
- /* Either (1) we were too late setting the slot 0 trap; the TSL
+ /*
+ * Either (1) we were too late setting the slot 0 trap; the TSL
* sequencer restarted slot 0 before we could set the EOS trap flag,
* or (2) we were not late and execution is now trapped at slot 0.
* In either case, we must now change slot 0 so that it will store
@@ -446,37 +478,46 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* In order to do this, we reprogram slot 0 so that it will shift in
* SD3, which is driven only by a pull-up resistor.
*/
- writel(RSD3 | SIB_A2 | EOS, devpriv->mmio + VECTPORT(0));
+ writel(S626_RSD3 | S626_SIB_A2 | S626_EOS,
+ devpriv->mmio + S626_VECTPORT(0));
- /* Wait for slot 0 to execute, at which time the TSL is setup for
+ /*
+ * Wait for slot 0 to execute, at which time the TSL is setup for
* the next DAC write. This is detected when FB_BUFFER2 MSB changes
* from 0x00 to 0xFF.
*/
- while (!(readl(devpriv->mmio + P_FB_BUFFER2) & 0xff000000))
+ while (!(readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000))
;
}
-/* Private helper function: Write setpoint to an application DAC channel. */
-static void SetDAC(struct comedi_device *dev, uint16_t chan, short dacdata)
+/*
+ * Private helper function: Write setpoint to an application DAC channel.
+ */
+static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
+ unsigned short dacdata)
{
struct s626_private *devpriv = dev->private;
- register uint16_t signmask;
- register uint32_t WSImage;
+ uint16_t signmask;
+ uint32_t ws_image;
+ uint32_t val;
- /* Adjust DAC data polarity and set up Polarity Control Register */
- /* image. */
+ /*
+ * Adjust DAC data polarity and set up Polarity Control Register image.
+ */
signmask = 1 << chan;
if (dacdata < 0) {
dacdata = -dacdata;
- devpriv->Dacpol |= signmask;
- } else
- devpriv->Dacpol &= ~signmask;
+ devpriv->dacpol |= signmask;
+ } else {
+ devpriv->dacpol &= ~signmask;
+ }
- /* Limit DAC setpoint value to valid range. */
- if ((uint16_t) dacdata > 0x1FFF)
+ /* Limit DAC setpoint value to valid range. */
+ if ((uint16_t)dacdata > 0x1FFF)
dacdata = 0x1FFF;
- /* Set up TSL2 records (aka "vectors") for DAC update. Vectors V2
+ /*
+ * Set up TSL2 records (aka "vectors") for DAC update. Vectors V2
* and V3 transmit the setpoint to the target DAC. V4 and V5 send
* data to a non-existent TrimDac channel just to keep the clock
* running after sending data to the target DAC. This is necessary
@@ -487,140 +528,792 @@ static void SetDAC(struct comedi_device *dev, uint16_t chan, short dacdata)
*/
/* Choose DAC chip select to be asserted */
- WSImage = (chan & 2) ? WS1 : WS2;
+ ws_image = (chan & 2) ? S626_WS1 : S626_WS2;
/* Slot 2: Transmit high data byte to target DAC */
- writel(XSD2 | XFIFO_1 | WSImage, devpriv->mmio + VECTPORT(2));
+ writel(S626_XSD2 | S626_XFIFO_1 | ws_image,
+ devpriv->mmio + S626_VECTPORT(2));
/* Slot 3: Transmit low data byte to target DAC */
- writel(XSD2 | XFIFO_0 | WSImage, devpriv->mmio + VECTPORT(3));
+ writel(S626_XSD2 | S626_XFIFO_0 | ws_image,
+ devpriv->mmio + S626_VECTPORT(3));
/* Slot 4: Transmit to non-existent TrimDac channel to keep clock */
- writel(XSD2 | XFIFO_3 | WS3, devpriv->mmio + VECTPORT(4));
+ writel(S626_XSD2 | S626_XFIFO_3 | S626_WS3,
+ devpriv->mmio + S626_VECTPORT(4));
/* Slot 5: running after writing target DAC's low data byte */
- writel(XSD2 | XFIFO_2 | WS3 | EOS, devpriv->mmio + VECTPORT(5));
+ writel(S626_XSD2 | S626_XFIFO_2 | S626_WS3 | S626_EOS,
+ devpriv->mmio + S626_VECTPORT(5));
- /* Construct and transmit target DAC's serial packet:
- * ( A10D DDDD ),( DDDD DDDD ),( 0x0F ),( 0x00 ) where A is chan<0>,
+ /*
+ * Construct and transmit target DAC's serial packet:
+ * (A10D DDDD), (DDDD DDDD), (0x0F), (0x00) where A is chan<0>,
* and D<12:0> is the DAC setpoint. Append a WORD value (that writes
* to a non-existent TrimDac channel) that serves to keep the clock
* running after the packet has been sent to the target DAC.
*/
- SendDAC(dev, 0x0F000000
- /* Continue clock after target DAC data (write to non-existent trimdac). */
- | 0x00004000
- /* Address the two main dual-DAC devices (TSL's chip select enables
- * target device). */
- | ((uint32_t) (chan & 1) << 15)
- /* Address the DAC channel within the device. */
- | (uint32_t) dacdata); /* Include DAC setpoint data. */
-
+ val = 0x0F000000; /* Continue clock after target DAC data
+ * (write to non-existent trimdac). */
+ val |= 0x00004000; /* Address the two main dual-DAC devices
+ * (TSL's chip select enables target device). */
+ val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel
+ * within the device. */
+ val |= (uint32_t)dacdata; /* Include DAC setpoint data. */
+ s626_send_dac(dev, val);
}
-static void WriteTrimDAC(struct comedi_device *dev, uint8_t LogicalChan,
- uint8_t DacData)
+static void s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
+ uint8_t dac_data)
{
struct s626_private *devpriv = dev->private;
uint32_t chan;
- /* Save the new setpoint in case the application needs to read it back later. */
- devpriv->TrimSetpoint[LogicalChan] = (uint8_t) DacData;
+ /*
+ * Save the new setpoint in case the application needs to read it back
+ * later.
+ */
+ devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data;
- /* Map logical channel number to physical channel number. */
- chan = (uint32_t) trimchan[LogicalChan];
+ /* Map logical channel number to physical channel number. */
+ chan = s626_trimchan[logical_chan];
- /* Set up TSL2 records for TrimDac write operation. All slots shift
+ /*
+ * Set up TSL2 records for TrimDac write operation. All slots shift
* 0xFF in from pulled-up SD3 so that the end of the slot sequence
* can be detected.
*/
/* Slot 2: Send high uint8_t to target TrimDac */
- writel(XSD2 | XFIFO_1 | WS3, devpriv->mmio + VECTPORT(2));
+ writel(S626_XSD2 | S626_XFIFO_1 | S626_WS3,
+ devpriv->mmio + S626_VECTPORT(2));
/* Slot 3: Send low uint8_t to target TrimDac */
- writel(XSD2 | XFIFO_0 | WS3, devpriv->mmio + VECTPORT(3));
+ writel(S626_XSD2 | S626_XFIFO_0 | S626_WS3,
+ devpriv->mmio + S626_VECTPORT(3));
/* Slot 4: Send NOP high uint8_t to DAC0 to keep clock running */
- writel(XSD2 | XFIFO_3 | WS1, devpriv->mmio + VECTPORT(4));
+ writel(S626_XSD2 | S626_XFIFO_3 | S626_WS1,
+ devpriv->mmio + S626_VECTPORT(4));
/* Slot 5: Send NOP low uint8_t to DAC0 */
- writel(XSD2 | XFIFO_2 | WS1 | EOS, devpriv->mmio + VECTPORT(5));
+ writel(S626_XSD2 | S626_XFIFO_2 | S626_WS1 | S626_EOS,
+ devpriv->mmio + S626_VECTPORT(5));
- /* Construct and transmit target DAC's serial packet:
- * ( 0000 AAAA ), ( DDDD DDDD ),( 0x00 ),( 0x00 ) where A<3:0> is the
+ /*
+ * Construct and transmit target DAC's serial packet:
+ * (0000 AAAA), (DDDD DDDD), (0x00), (0x00) where A<3:0> is the
* DAC channel's address, and D<7:0> is the DAC setpoint. Append a
* WORD value (that writes a channel 0 NOP command to a non-existent
* main DAC channel) that serves to keep the clock running after the
* packet has been sent to the target DAC.
*/
- /* Address the DAC channel within the trimdac device. */
- SendDAC(dev, ((uint32_t) chan << 8)
- | (uint32_t) DacData); /* Include DAC setpoint data. */
+ /*
+ * Address the DAC channel within the trimdac device.
+ * Include DAC setpoint data.
+ */
+ s626_send_dac(dev, (chan << 8) | dac_data);
}
-static void LoadTrimDACs(struct comedi_device *dev)
+static void s626_load_trim_dacs(struct comedi_device *dev)
{
- register uint8_t i;
+ uint8_t i;
- /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
- for (i = 0; i < ARRAY_SIZE(trimchan); i++)
- WriteTrimDAC(dev, i, I2Cread(dev, trimadrs[i]));
+ /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
+ for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++)
+ s626_write_trim_dac(dev, i,
+ s626_i2c_read(dev, s626_trimadrs[i]));
}
/* ****** COUNTER FUNCTIONS ******* */
-/* All counter functions address a specific counter by means of the
+
+/*
+ * All counter functions address a specific counter by means of the
* "Counter" argument, which is a logical counter number. The Counter
* argument may have any of the following legal values: 0=0A, 1=1A,
* 2=2A, 3=0B, 4=1B, 5=2B.
*/
-/* Read a counter's output latch. */
-static uint32_t ReadLatch(struct comedi_device *dev, struct enc_private *k)
+/*
+ * Read a counter's output latch.
+ */
+static uint32_t s626_read_latch(struct comedi_device *dev,
+ const struct s626_enc_info *k)
{
- register uint32_t value;
+ uint32_t value;
- /* Latch counts and fetch LSW of latched counts value. */
- value = (uint32_t) DEBIread(dev, k->MyLatchLsw);
+ /* Latch counts and fetch LSW of latched counts value. */
+ value = s626_debi_read(dev, k->my_latch_lsw);
- /* Fetch MSW of latched counts and combine with LSW. */
- value |= ((uint32_t) DEBIread(dev, k->MyLatchLsw + 2) << 16);
+ /* Fetch MSW of latched counts and combine with LSW. */
+ value |= ((uint32_t)s626_debi_read(dev, k->my_latch_lsw + 2) << 16);
- /* Return latched counts. */
+ /* Return latched counts. */
return value;
}
-/* Return/set a counter pair's latch trigger source. 0: On read
+/*
+ * Return/set a counter pair's latch trigger source. 0: On read
* access, 1: A index latches A, 2: B index latches B, 3: A overflow
* latches B.
*/
-static void SetLatchSource(struct comedi_device *dev, struct enc_private *k,
- uint16_t value)
+static void s626_set_latch_source(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t value)
{
- DEBIreplace(dev, k->MyCRB,
- ~(CRBMSK_INTCTRL | CRBMSK_LATCHSRC),
- value << CRBBIT_LATCHSRC);
+ s626_debi_replace(dev, k->my_crb,
+ ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
+ S626_SET_CRB_LATCHSRC(value));
}
-/* Write value into counter preload register. */
-static void Preload(struct comedi_device *dev, struct enc_private *k,
- uint32_t value)
+/*
+ * Write value into counter preload register.
+ */
+static void s626_preload(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint32_t value)
{
- DEBIwrite(dev, (uint16_t) (k->MyLatchLsw), (uint16_t) value);
- DEBIwrite(dev, (uint16_t) (k->MyLatchLsw + 2),
- (uint16_t) (value >> 16));
+ s626_debi_write(dev, k->my_latch_lsw, value);
+ s626_debi_write(dev, k->my_latch_lsw + 2, value >> 16);
}
-static unsigned int s626_ai_reg_to_uint(int data)
+/* ****** PRIVATE COUNTER FUNCTIONS ****** */
+
+/*
+ * Reset a counter's index and overflow event capture flags.
+ */
+static void s626_reset_cap_flags_a(struct comedi_device *dev,
+ const struct s626_enc_info *k)
{
- unsigned int tempdata;
+ s626_debi_replace(dev, k->my_crb, ~S626_CRBMSK_INTCTRL,
+ (S626_SET_CRB_INTRESETCMD(1) |
+ S626_SET_CRB_INTRESET_A(1)));
+}
- tempdata = (data >> 18);
- if (tempdata & 0x2000)
- tempdata &= 0x1fff;
- else
- tempdata += (1 << 13);
+static void s626_reset_cap_flags_b(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ s626_debi_replace(dev, k->my_crb, ~S626_CRBMSK_INTCTRL,
+ (S626_SET_CRB_INTRESETCMD(1) |
+ S626_SET_CRB_INTRESET_B(1)));
+}
+
+/*
+ * Return counter setup in a format (COUNTER_SETUP) that is consistent
+ * for both A and B counters.
+ */
+static uint16_t s626_get_mode_a(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ uint16_t cra;
+ uint16_t crb;
+ uint16_t setup;
+ unsigned cntsrc, clkmult, clkpol, encmode;
+
+ /* Fetch CRA and CRB register images. */
+ cra = s626_debi_read(dev, k->my_cra);
+ crb = s626_debi_read(dev, k->my_crb);
+
+ /*
+ * Populate the standardized counter setup bit fields.
+ */
+ setup =
+ /* LoadSrc = LoadSrcA. */
+ S626_SET_STD_LOADSRC(S626_GET_CRA_LOADSRC_A(cra)) |
+ /* LatchSrc = LatchSrcA. */
+ S626_SET_STD_LATCHSRC(S626_GET_CRB_LATCHSRC(crb)) |
+ /* IntSrc = IntSrcA. */
+ S626_SET_STD_INTSRC(S626_GET_CRA_INTSRC_A(cra)) |
+ /* IndxSrc = IndxSrcA. */
+ S626_SET_STD_INDXSRC(S626_GET_CRA_INDXSRC_A(cra)) |
+ /* IndxPol = IndxPolA. */
+ S626_SET_STD_INDXPOL(S626_GET_CRA_INDXPOL_A(cra)) |
+ /* ClkEnab = ClkEnabA. */
+ S626_SET_STD_CLKENAB(S626_GET_CRB_CLKENAB_A(crb));
+
+ /* Adjust mode-dependent parameters. */
+ cntsrc = S626_GET_CRA_CNTSRC_A(cra);
+ if (cntsrc & S626_CNTSRC_SYSCLK) {
+ /* Timer mode (CntSrcA<1> == 1): */
+ encmode = S626_ENCMODE_TIMER;
+ /* Set ClkPol to indicate count direction (CntSrcA<0>). */
+ clkpol = cntsrc & 1;
+ /* ClkMult must be 1x in Timer mode. */
+ clkmult = S626_CLKMULT_1X;
+ } else {
+ /* Counter mode (CntSrcA<1> == 0): */
+ encmode = S626_ENCMODE_COUNTER;
+ /* Pass through ClkPol. */
+ clkpol = S626_GET_CRA_CLKPOL_A(cra);
+ /* Force ClkMult to 1x if not legal, else pass through. */
+ clkmult = S626_GET_CRA_CLKMULT_A(cra);
+ if (clkmult == S626_CLKMULT_SPECIAL)
+ clkmult = S626_CLKMULT_1X;
+ }
+ setup |= S626_SET_STD_ENCMODE(encmode) | S626_SET_STD_CLKMULT(clkmult) |
+ S626_SET_STD_CLKPOL(clkpol);
+
+ /* Return adjusted counter setup. */
+ return setup;
+}
+
+static uint16_t s626_get_mode_b(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ uint16_t cra;
+ uint16_t crb;
+ uint16_t setup;
+ unsigned cntsrc, clkmult, clkpol, encmode;
- return tempdata;
+ /* Fetch CRA and CRB register images. */
+ cra = s626_debi_read(dev, k->my_cra);
+ crb = s626_debi_read(dev, k->my_crb);
+
+ /*
+ * Populate the standardized counter setup bit fields.
+ */
+ setup =
+ /* IntSrc = IntSrcB. */
+ S626_SET_STD_INTSRC(S626_GET_CRB_INTSRC_B(crb)) |
+ /* LatchSrc = LatchSrcB. */
+ S626_SET_STD_LATCHSRC(S626_GET_CRB_LATCHSRC(crb)) |
+ /* LoadSrc = LoadSrcB. */
+ S626_SET_STD_LOADSRC(S626_GET_CRB_LOADSRC_B(crb)) |
+ /* IndxPol = IndxPolB. */
+ S626_SET_STD_INDXPOL(S626_GET_CRB_INDXPOL_B(crb)) |
+ /* ClkEnab = ClkEnabB. */
+ S626_SET_STD_CLKENAB(S626_GET_CRB_CLKENAB_B(crb)) |
+ /* IndxSrc = IndxSrcB. */
+ S626_SET_STD_INDXSRC(S626_GET_CRA_INDXSRC_B(cra));
+
+ /* Adjust mode-dependent parameters. */
+ cntsrc = S626_GET_CRA_CNTSRC_B(cra);
+ clkmult = S626_GET_CRB_CLKMULT_B(crb);
+ if (clkmult == S626_CLKMULT_SPECIAL) {
+ /* Extender mode (ClkMultB == S626_CLKMULT_SPECIAL): */
+ encmode = S626_ENCMODE_EXTENDER;
+ /* Indicate multiplier is 1x. */
+ clkmult = S626_CLKMULT_1X;
+ /* Set ClkPol equal to Timer count direction (CntSrcB<0>). */
+ clkpol = cntsrc & 1;
+ } else if (cntsrc & S626_CNTSRC_SYSCLK) {
+ /* Timer mode (CntSrcB<1> == 1): */
+ encmode = S626_ENCMODE_TIMER;
+ /* Indicate multiplier is 1x. */
+ clkmult = S626_CLKMULT_1X;
+ /* Set ClkPol equal to Timer count direction (CntSrcB<0>). */
+ clkpol = cntsrc & 1;
+ } else {
+ /* If Counter mode (CntSrcB<1> == 0): */
+ encmode = S626_ENCMODE_COUNTER;
+ /* Clock multiplier is passed through. */
+ /* Clock polarity is passed through. */
+ clkpol = S626_GET_CRB_CLKPOL_B(crb);
+ }
+ setup |= S626_SET_STD_ENCMODE(encmode) | S626_SET_STD_CLKMULT(clkmult) |
+ S626_SET_STD_CLKPOL(clkpol);
+
+ /* Return adjusted counter setup. */
+ return setup;
}
-/* static unsigned int s626_uint_to_reg(struct comedi_subdevice *s, int data){ */
-/* return 0; */
-/* } */
+/*
+ * Set the operating mode for the specified counter. The setup
+ * parameter is treated as a COUNTER_SETUP data type. The following
+ * parameters are programmable (all other parms are ignored): ClkMult,
+ * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
+ */
+static void s626_set_mode_a(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t setup,
+ uint16_t disable_int_src)
+{
+ struct s626_private *devpriv = dev->private;
+ uint16_t cra;
+ uint16_t crb;
+ unsigned cntsrc, clkmult, clkpol;
+
+ /* Initialize CRA and CRB images. */
+ /* Preload trigger is passed through. */
+ cra = S626_SET_CRA_LOADSRC_A(S626_GET_STD_LOADSRC(setup));
+ /* IndexSrc is passed through. */
+ cra |= S626_SET_CRA_INDXSRC_A(S626_GET_STD_INDXSRC(setup));
+
+ /* Reset any pending CounterA event captures. */
+ crb = S626_SET_CRB_INTRESETCMD(1) | S626_SET_CRB_INTRESET_A(1);
+ /* Clock enable is passed through. */
+ crb |= S626_SET_CRB_CLKENAB_A(S626_GET_STD_CLKENAB(setup));
+
+ /* Force IntSrc to Disabled if disable_int_src is asserted. */
+ if (!disable_int_src)
+ cra |= S626_SET_CRA_INTSRC_A(S626_GET_STD_INTSRC(setup));
+
+ /* Populate all mode-dependent attributes of CRA & CRB images. */
+ clkpol = S626_GET_STD_CLKPOL(setup);
+ switch (S626_GET_STD_ENCMODE(setup)) {
+ case S626_ENCMODE_EXTENDER: /* Extender Mode: */
+ /* Force to Timer mode (Extender valid only for B counters). */
+ /* Fall through to case S626_ENCMODE_TIMER: */
+ case S626_ENCMODE_TIMER: /* Timer Mode: */
+ /* CntSrcA<1> selects system clock */
+ cntsrc = S626_CNTSRC_SYSCLK;
+ /* Count direction (CntSrcA<0>) obtained from ClkPol. */
+ cntsrc |= clkpol;
+ /* ClkPolA behaves as always-on clock enable. */
+ clkpol = 1;
+ /* ClkMult must be 1x. */
+ clkmult = S626_CLKMULT_1X;
+ break;
+ default: /* Counter Mode: */
+ /* Select ENC_C and ENC_D as clock/direction inputs. */
+ cntsrc = S626_CNTSRC_ENCODER;
+ /* Clock polarity is passed through. */
+ /* Force multiplier to x1 if not legal, else pass through. */
+ clkmult = S626_GET_STD_CLKMULT(setup);
+ if (clkmult == S626_CLKMULT_SPECIAL)
+ clkmult = S626_CLKMULT_1X;
+ break;
+ }
+ cra |= S626_SET_CRA_CNTSRC_A(cntsrc) | S626_SET_CRA_CLKPOL_A(clkpol) |
+ S626_SET_CRA_CLKMULT_A(clkmult);
+
+ /*
+ * Force positive index polarity if IndxSrc is software-driven only,
+ * otherwise pass it through.
+ */
+ if (S626_GET_STD_INDXSRC(setup) != S626_INDXSRC_SOFT)
+ cra |= S626_SET_CRA_INDXPOL_A(S626_GET_STD_INDXPOL(setup));
+
+ /*
+ * If IntSrc has been forced to Disabled, update the MISC2 interrupt
+ * enable mask to indicate the counter interrupt is disabled.
+ */
+ if (disable_int_src)
+ devpriv->counter_int_enabs &= ~k->my_event_bits[3];
+
+ /*
+ * While retaining CounterB and LatchSrc configurations, program the
+ * new counter operating mode.
+ */
+ s626_debi_replace(dev, k->my_cra,
+ S626_CRAMSK_INDXSRC_B | S626_CRAMSK_CNTSRC_B, cra);
+ s626_debi_replace(dev, k->my_crb,
+ ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_CLKENAB_A), crb);
+}
+
+static void s626_set_mode_b(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t setup,
+ uint16_t disable_int_src)
+{
+ struct s626_private *devpriv = dev->private;
+ uint16_t cra;
+ uint16_t crb;
+ unsigned cntsrc, clkmult, clkpol;
+
+ /* Initialize CRA and CRB images. */
+ /* IndexSrc is passed through. */
+ cra = S626_SET_CRA_INDXSRC_B(S626_GET_STD_INDXSRC(setup));
+
+ /* Reset event captures and disable interrupts. */
+ crb = S626_SET_CRB_INTRESETCMD(1) | S626_SET_CRB_INTRESET_B(1);
+ /* Clock enable is passed through. */
+ crb |= S626_SET_CRB_CLKENAB_B(S626_GET_STD_CLKENAB(setup));
+ /* Preload trigger source is passed through. */
+ crb |= S626_SET_CRB_LOADSRC_B(S626_GET_STD_LOADSRC(setup));
+
+ /* Force IntSrc to Disabled if disable_int_src is asserted. */
+ if (!disable_int_src)
+ crb |= S626_SET_CRB_INTSRC_B(S626_GET_STD_INTSRC(setup));
+
+ /* Populate all mode-dependent attributes of CRA & CRB images. */
+ clkpol = S626_GET_STD_CLKPOL(setup);
+ switch (S626_GET_STD_ENCMODE(setup)) {
+ case S626_ENCMODE_TIMER: /* Timer Mode: */
+ /* CntSrcB<1> selects system clock */
+ cntsrc = S626_CNTSRC_SYSCLK;
+ /* with direction (CntSrcB<0>) obtained from ClkPol. */
+ cntsrc |= clkpol;
+ /* ClkPolB behaves as always-on clock enable. */
+ clkpol = 1;
+ /* ClkMultB must be 1x. */
+ clkmult = S626_CLKMULT_1X;
+ break;
+ case S626_ENCMODE_EXTENDER: /* Extender Mode: */
+ /* CntSrcB source is OverflowA (same as "timer") */
+ cntsrc = S626_CNTSRC_SYSCLK;
+ /* with direction obtained from ClkPol. */
+ cntsrc |= clkpol;
+ /* ClkPolB controls IndexB -- always set to active. */
+ clkpol = 1;
+ /* ClkMultB selects OverflowA as the clock source. */
+ clkmult = S626_CLKMULT_SPECIAL;
+ break;
+ default: /* Counter Mode: */
+ /* Select ENC_C and ENC_D as clock/direction inputs. */
+ cntsrc = S626_CNTSRC_ENCODER;
+ /* ClkPol is passed through. */
+ /* Force ClkMult to x1 if not legal, otherwise pass through. */
+ clkmult = S626_GET_STD_CLKMULT(setup);
+ if (clkmult == S626_CLKMULT_SPECIAL)
+ clkmult = S626_CLKMULT_1X;
+ break;
+ }
+ cra |= S626_SET_CRA_CNTSRC_B(cntsrc);
+ crb |= S626_SET_CRB_CLKPOL_B(clkpol) | S626_SET_CRB_CLKMULT_B(clkmult);
+
+ /*
+ * Force positive index polarity if IndxSrc is software-driven only,
+ * otherwise pass it through.
+ */
+ if (S626_GET_STD_INDXSRC(setup) != S626_INDXSRC_SOFT)
+ crb |= S626_SET_CRB_INDXPOL_B(S626_GET_STD_INDXPOL(setup));
+
+ /*
+ * If IntSrc has been forced to Disabled, update the MISC2 interrupt
+ * enable mask to indicate the counter interrupt is disabled.
+ */
+ if (disable_int_src)
+ devpriv->counter_int_enabs &= ~k->my_event_bits[3];
+
+ /*
+ * While retaining CounterA and LatchSrc configurations, program the
+ * new counter operating mode.
+ */
+ s626_debi_replace(dev, k->my_cra,
+ ~(S626_CRAMSK_INDXSRC_B | S626_CRAMSK_CNTSRC_B), cra);
+ s626_debi_replace(dev, k->my_crb,
+ S626_CRBMSK_CLKENAB_A | S626_CRBMSK_LATCHSRC, crb);
+}
+
+/*
+ * Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index.
+ */
+static void s626_set_enable_a(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t enab)
+{
+ s626_debi_replace(dev, k->my_crb,
+ ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_CLKENAB_A),
+ S626_SET_CRB_CLKENAB_A(enab));
+}
+
+static void s626_set_enable_b(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t enab)
+{
+ s626_debi_replace(dev, k->my_crb,
+ ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_CLKENAB_B),
+ S626_SET_CRB_CLKENAB_B(enab));
+}
+
+static uint16_t s626_get_enable_a(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRB_CLKENAB_A(s626_debi_read(dev, k->my_crb));
+}
+
+static uint16_t s626_get_enable_b(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRB_CLKENAB_B(s626_debi_read(dev, k->my_crb));
+}
+
+#ifdef unused
+static uint16_t s626_get_latch_source(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRB_LATCHSRC(s626_debi_read(dev, k->my_crb));
+}
+#endif
+
+/*
+ * Return/set the event that will trigger transfer of the preload
+ * register into the counter. 0=ThisCntr_Index, 1=ThisCntr_Overflow,
+ * 2=OverflowA (B counters only), 3=disabled.
+ */
+static void s626_set_load_trig_a(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t trig)
+{
+ s626_debi_replace(dev, k->my_cra, ~S626_CRAMSK_LOADSRC_A,
+ S626_SET_CRA_LOADSRC_A(trig));
+}
+
+static void s626_set_load_trig_b(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t trig)
+{
+ s626_debi_replace(dev, k->my_crb,
+ ~(S626_CRBMSK_LOADSRC_B | S626_CRBMSK_INTCTRL),
+ S626_SET_CRB_LOADSRC_B(trig));
+}
+
+static uint16_t s626_get_load_trig_a(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRA_LOADSRC_A(s626_debi_read(dev, k->my_cra));
+}
+
+static uint16_t s626_get_load_trig_b(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRB_LOADSRC_B(s626_debi_read(dev, k->my_crb));
+}
+
+/*
+ * Return/set counter interrupt source and clear any captured
+ * index/overflow events. int_source: 0=Disabled, 1=OverflowOnly,
+ * 2=IndexOnly, 3=IndexAndOverflow.
+ */
+static void s626_set_int_src_a(struct comedi_device *dev,
+ const struct s626_enc_info *k,
+ uint16_t int_source)
+{
+ struct s626_private *devpriv = dev->private;
+
+ /* Reset any pending counter overflow or index captures. */
+ s626_debi_replace(dev, k->my_crb, ~S626_CRBMSK_INTCTRL,
+ (S626_SET_CRB_INTRESETCMD(1) |
+ S626_SET_CRB_INTRESET_A(1)));
+
+ /* Program counter interrupt source. */
+ s626_debi_replace(dev, k->my_cra, ~S626_CRAMSK_INTSRC_A,
+ S626_SET_CRA_INTSRC_A(int_source));
+
+ /* Update MISC2 interrupt enable mask. */
+ devpriv->counter_int_enabs =
+ (devpriv->counter_int_enabs & ~k->my_event_bits[3]) |
+ k->my_event_bits[int_source];
+}
+
+static void s626_set_int_src_b(struct comedi_device *dev,
+ const struct s626_enc_info *k,
+ uint16_t int_source)
+{
+ struct s626_private *devpriv = dev->private;
+ uint16_t crb;
+
+ /* Cache writeable CRB register image. */
+ crb = s626_debi_read(dev, k->my_crb) & ~S626_CRBMSK_INTCTRL;
+
+ /* Reset any pending counter overflow or index captures. */
+ s626_debi_write(dev, k->my_crb, (crb | S626_SET_CRB_INTRESETCMD(1) |
+ S626_SET_CRB_INTRESET_B(1)));
+
+ /* Program counter interrupt source. */
+ s626_debi_write(dev, k->my_crb, ((crb & ~S626_CRBMSK_INTSRC_B) |
+ S626_SET_CRB_INTSRC_B(int_source)));
+
+ /* Update MISC2 interrupt enable mask. */
+ devpriv->counter_int_enabs =
+ (devpriv->counter_int_enabs & ~k->my_event_bits[3]) |
+ k->my_event_bits[int_source];
+}
+
+static uint16_t s626_get_int_src_a(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRA_INTSRC_A(s626_debi_read(dev, k->my_cra));
+}
+
+static uint16_t s626_get_int_src_b(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_CRB_INTSRC_B(s626_debi_read(dev, k->my_crb));
+}
+
+#ifdef unused
+/*
+ * Return/set the clock multiplier.
+ */
+static void s626_set_clk_mult(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t value)
+{
+ k->set_mode(dev, k, ((k->get_mode(dev, k) & ~S626_STDMSK_CLKMULT) |
+ S626_SET_STD_CLKMULT(value)), false);
+}
+
+static uint16_t s626_get_clk_mult(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_STD_CLKMULT(k->get_mode(dev, k));
+}
+
+/*
+ * Return/set the clock polarity.
+ */
+static void s626_set_clk_pol(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t value)
+{
+ k->set_mode(dev, k, ((k->get_mode(dev, k) & ~S626_STDMSK_CLKPOL) |
+ S626_SET_STD_CLKPOL(value)), false);
+}
+
+static uint16_t s626_get_clk_pol(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_STD_CLKPOL(k->get_mode(dev, k));
+}
+
+/*
+ * Return/set the encoder mode.
+ */
+static void s626_set_enc_mode(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t value)
+{
+ k->set_mode(dev, k, ((k->get_mode(dev, k) & ~S626_STDMSK_ENCMODE) |
+ S626_SET_STD_ENCMODE(value)), false);
+}
+
+static uint16_t s626_get_enc_mode(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_STD_ENCMODE(k->get_mode(dev, k));
+}
+
+/*
+ * Return/set the index polarity.
+ */
+static void s626_set_index_pol(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t value)
+{
+ k->set_mode(dev, k, ((k->get_mode(dev, k) & ~S626_STDMSK_INDXPOL) |
+ S626_SET_STD_INDXPOL(value != 0)), false);
+}
+
+static uint16_t s626_get_index_pol(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_STD_INDXPOL(k->get_mode(dev, k));
+}
+
+/*
+ * Return/set the index source.
+ */
+static void s626_set_index_src(struct comedi_device *dev,
+ const struct s626_enc_info *k, uint16_t value)
+{
+ k->set_mode(dev, k, ((k->get_mode(dev, k) & ~S626_STDMSK_INDXSRC) |
+ S626_SET_STD_INDXSRC(value != 0)), false);
+}
+
+static uint16_t s626_get_index_src(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ return S626_GET_STD_INDXSRC(k->get_mode(dev, k));
+}
+#endif
+
+/*
+ * Generate an index pulse.
+ */
+static void s626_pulse_index_a(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ uint16_t cra;
+
+ cra = s626_debi_read(dev, k->my_cra);
+ /* Pulse index. */
+ s626_debi_write(dev, k->my_cra, (cra ^ S626_CRAMSK_INDXPOL_A));
+ s626_debi_write(dev, k->my_cra, cra);
+}
+
+static void s626_pulse_index_b(struct comedi_device *dev,
+ const struct s626_enc_info *k)
+{
+ uint16_t crb;
+
+ crb = s626_debi_read(dev, k->my_crb) & ~S626_CRBMSK_INTCTRL;
+ /* Pulse index. */
+ s626_debi_write(dev, k->my_crb, (crb ^ S626_CRBMSK_INDXPOL_B));
+ s626_debi_write(dev, k->my_crb, crb);
+}
+
+static const struct s626_enc_info s626_enc_chan_info[] = {
+ {
+ .get_enable = s626_get_enable_a,
+ .get_int_src = s626_get_int_src_a,
+ .get_load_trig = s626_get_load_trig_a,
+ .get_mode = s626_get_mode_a,
+ .pulse_index = s626_pulse_index_a,
+ .set_enable = s626_set_enable_a,
+ .set_int_src = s626_set_int_src_a,
+ .set_load_trig = s626_set_load_trig_a,
+ .set_mode = s626_set_mode_a,
+ .reset_cap_flags = s626_reset_cap_flags_a,
+ .my_cra = S626_LP_CR0A,
+ .my_crb = S626_LP_CR0B,
+ .my_latch_lsw = S626_LP_CNTR0ALSW,
+ .my_event_bits = S626_EVBITS(0),
+ }, {
+ .get_enable = s626_get_enable_a,
+ .get_int_src = s626_get_int_src_a,
+ .get_load_trig = s626_get_load_trig_a,
+ .get_mode = s626_get_mode_a,
+ .pulse_index = s626_pulse_index_a,
+ .set_enable = s626_set_enable_a,
+ .set_int_src = s626_set_int_src_a,
+ .set_load_trig = s626_set_load_trig_a,
+ .set_mode = s626_set_mode_a,
+ .reset_cap_flags = s626_reset_cap_flags_a,
+ .my_cra = S626_LP_CR1A,
+ .my_crb = S626_LP_CR1B,
+ .my_latch_lsw = S626_LP_CNTR1ALSW,
+ .my_event_bits = S626_EVBITS(1),
+ }, {
+ .get_enable = s626_get_enable_a,
+ .get_int_src = s626_get_int_src_a,
+ .get_load_trig = s626_get_load_trig_a,
+ .get_mode = s626_get_mode_a,
+ .pulse_index = s626_pulse_index_a,
+ .set_enable = s626_set_enable_a,
+ .set_int_src = s626_set_int_src_a,
+ .set_load_trig = s626_set_load_trig_a,
+ .set_mode = s626_set_mode_a,
+ .reset_cap_flags = s626_reset_cap_flags_a,
+ .my_cra = S626_LP_CR2A,
+ .my_crb = S626_LP_CR2B,
+ .my_latch_lsw = S626_LP_CNTR2ALSW,
+ .my_event_bits = S626_EVBITS(2),
+ }, {
+ .get_enable = s626_get_enable_b,
+ .get_int_src = s626_get_int_src_b,
+ .get_load_trig = s626_get_load_trig_b,
+ .get_mode = s626_get_mode_b,
+ .pulse_index = s626_pulse_index_b,
+ .set_enable = s626_set_enable_b,
+ .set_int_src = s626_set_int_src_b,
+ .set_load_trig = s626_set_load_trig_b,
+ .set_mode = s626_set_mode_b,
+ .reset_cap_flags = s626_reset_cap_flags_b,
+ .my_cra = S626_LP_CR0A,
+ .my_crb = S626_LP_CR0B,
+ .my_latch_lsw = S626_LP_CNTR0BLSW,
+ .my_event_bits = S626_EVBITS(3),
+ }, {
+ .get_enable = s626_get_enable_b,
+ .get_int_src = s626_get_int_src_b,
+ .get_load_trig = s626_get_load_trig_b,
+ .get_mode = s626_get_mode_b,
+ .pulse_index = s626_pulse_index_b,
+ .set_enable = s626_set_enable_b,
+ .set_int_src = s626_set_int_src_b,
+ .set_load_trig = s626_set_load_trig_b,
+ .set_mode = s626_set_mode_b,
+ .reset_cap_flags = s626_reset_cap_flags_b,
+ .my_cra = S626_LP_CR1A,
+ .my_crb = S626_LP_CR1B,
+ .my_latch_lsw = S626_LP_CNTR1BLSW,
+ .my_event_bits = S626_EVBITS(4),
+ }, {
+ .get_enable = s626_get_enable_b,
+ .get_int_src = s626_get_int_src_b,
+ .get_load_trig = s626_get_load_trig_b,
+ .get_mode = s626_get_mode_b,
+ .pulse_index = s626_pulse_index_b,
+ .set_enable = s626_set_enable_b,
+ .set_int_src = s626_set_int_src_b,
+ .set_load_trig = s626_set_load_trig_b,
+ .set_mode = s626_set_mode_b,
+ .reset_cap_flags = s626_reset_cap_flags_b,
+ .my_cra = S626_LP_CR2A,
+ .my_crb = S626_LP_CR2B,
+ .my_latch_lsw = S626_LP_CNTR2BLSW,
+ .my_event_bits = S626_EVBITS(5),
+ },
+};
+
+static unsigned int s626_ai_reg_to_uint(unsigned int data)
+{
+ return ((data >> 18) & 0x3fff) ^ 0x2000;
+}
static int s626_dio_set_irq(struct comedi_device *dev, unsigned int chan)
{
@@ -629,19 +1322,19 @@ static int s626_dio_set_irq(struct comedi_device *dev, unsigned int chan)
unsigned int status;
/* set channel to capture positive edge */
- status = DEBIread(dev, LP_RDEDGSEL(group));
- DEBIwrite(dev, LP_WREDGSEL(group), mask | status);
+ status = s626_debi_read(dev, S626_LP_RDEDGSEL(group));
+ s626_debi_write(dev, S626_LP_WREDGSEL(group), mask | status);
/* enable interrupt on selected channel */
- status = DEBIread(dev, LP_RDINTSEL(group));
- DEBIwrite(dev, LP_WRINTSEL(group), mask | status);
+ status = s626_debi_read(dev, S626_LP_RDINTSEL(group));
+ s626_debi_write(dev, S626_LP_WRINTSEL(group), mask | status);
/* enable edge capture write command */
- DEBIwrite(dev, LP_MISC1, MISC1_EDCAP);
+ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_EDCAP);
/* enable edge capture on selected channel */
- status = DEBIread(dev, LP_RDCAPSEL(group));
- DEBIwrite(dev, LP_WRCAPSEL(group), mask | status);
+ status = s626_debi_read(dev, S626_LP_RDCAPSEL(group));
+ s626_debi_write(dev, S626_LP_WRCAPSEL(group), mask | status);
return 0;
}
@@ -650,10 +1343,10 @@ static int s626_dio_reset_irq(struct comedi_device *dev, unsigned int group,
unsigned int mask)
{
/* disable edge capture write command */
- DEBIwrite(dev, LP_MISC1, MISC1_NOEDCAP);
+ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
/* enable edge capture on selected channel */
- DEBIwrite(dev, LP_WRCAPSEL(group), mask);
+ s626_debi_write(dev, S626_LP_WRCAPSEL(group), mask);
return 0;
}
@@ -663,17 +1356,17 @@ static int s626_dio_clear_irq(struct comedi_device *dev)
unsigned int group;
/* disable edge capture write command */
- DEBIwrite(dev, LP_MISC1, MISC1_NOEDCAP);
+ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
/* clear all dio pending events and interrupt */
for (group = 0; group < S626_DIO_BANKS; group++)
- DEBIwrite(dev, LP_WRCAPSEL(group), 0xffff);
+ s626_debi_write(dev, S626_LP_WRCAPSEL(group), 0xffff);
return 0;
}
-static void handle_dio_interrupt(struct comedi_device *dev,
- uint16_t irqbit, uint8_t group)
+static void s626_handle_dio_interrupt(struct comedi_device *dev,
+ uint16_t irqbit, uint8_t group)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -686,7 +1379,7 @@ static void handle_dio_interrupt(struct comedi_device *dev,
if ((irqbit >> (cmd->start_arg - (16 * group))) == 1 &&
cmd->start_src == TRIG_EXT) {
/* Start executing the RPS program */
- s626_mc_enable(dev, MC1_ERPS1, P_MC1);
+ s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
if (cmd->scan_begin_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
@@ -694,7 +1387,7 @@ static void handle_dio_interrupt(struct comedi_device *dev,
if ((irqbit >> (cmd->scan_begin_arg - (16 * group))) == 1 &&
cmd->scan_begin_src == TRIG_EXT) {
/* Trigger ADC scan loop start */
- s626_mc_enable(dev, MC2_ADC_RPS, P_MC2);
+ s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
if (cmd->convert_src == TRIG_EXT) {
devpriv->ai_convert_count = cmd->chanlist_len;
@@ -703,16 +1396,17 @@ static void handle_dio_interrupt(struct comedi_device *dev,
}
if (cmd->convert_src == TRIG_TIMER) {
- struct enc_private *k = &encpriv[5];
+ const struct s626_enc_info *k =
+ &s626_enc_chan_info[5];
devpriv->ai_convert_count = cmd->chanlist_len;
- k->SetEnable(dev, k, CLKENAB_ALWAYS);
+ k->set_enable(dev, k, S626_CLKENAB_ALWAYS);
}
}
if ((irqbit >> (cmd->convert_arg - (16 * group))) == 1 &&
cmd->convert_src == TRIG_EXT) {
/* Trigger ADC scan loop start */
- s626_mc_enable(dev, MC2_ADC_RPS, P_MC2);
+ s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
devpriv->ai_convert_count--;
if (devpriv->ai_convert_count > 0)
@@ -721,7 +1415,7 @@ static void handle_dio_interrupt(struct comedi_device *dev,
}
}
-static void check_dio_interrupts(struct comedi_device *dev)
+static void s626_check_dio_interrupts(struct comedi_device *dev)
{
uint16_t irqbit;
uint8_t group;
@@ -729,90 +1423,91 @@ static void check_dio_interrupts(struct comedi_device *dev)
for (group = 0; group < S626_DIO_BANKS; group++) {
irqbit = 0;
/* read interrupt type */
- irqbit = DEBIread(dev, LP_RDCAPFLG(group));
+ irqbit = s626_debi_read(dev, S626_LP_RDCAPFLG(group));
/* check if interrupt is generated from dio channels */
if (irqbit) {
- handle_dio_interrupt(dev, irqbit, group);
+ s626_handle_dio_interrupt(dev, irqbit, group);
return;
}
}
}
-static void check_counter_interrupts(struct comedi_device *dev)
+static void s626_check_counter_interrupts(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- struct enc_private *k;
+ const struct s626_enc_info *k;
uint16_t irqbit;
/* read interrupt type */
- irqbit = DEBIread(dev, LP_RDMISC2);
+ irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
/* check interrupt on counters */
- if (irqbit & IRQ_COINT1A) {
- k = &encpriv[0];
+ if (irqbit & S626_IRQ_COINT1A) {
+ k = &s626_enc_chan_info[0];
/* clear interrupt capture flag */
- k->ResetCapFlags(dev, k);
+ k->reset_cap_flags(dev, k);
}
- if (irqbit & IRQ_COINT2A) {
- k = &encpriv[1];
+ if (irqbit & S626_IRQ_COINT2A) {
+ k = &s626_enc_chan_info[1];
/* clear interrupt capture flag */
- k->ResetCapFlags(dev, k);
+ k->reset_cap_flags(dev, k);
}
- if (irqbit & IRQ_COINT3A) {
- k = &encpriv[2];
+ if (irqbit & S626_IRQ_COINT3A) {
+ k = &s626_enc_chan_info[2];
/* clear interrupt capture flag */
- k->ResetCapFlags(dev, k);
+ k->reset_cap_flags(dev, k);
}
- if (irqbit & IRQ_COINT1B) {
- k = &encpriv[3];
+ if (irqbit & S626_IRQ_COINT1B) {
+ k = &s626_enc_chan_info[3];
/* clear interrupt capture flag */
- k->ResetCapFlags(dev, k);
+ k->reset_cap_flags(dev, k);
}
- if (irqbit & IRQ_COINT2B) {
- k = &encpriv[4];
+ if (irqbit & S626_IRQ_COINT2B) {
+ k = &s626_enc_chan_info[4];
/* clear interrupt capture flag */
- k->ResetCapFlags(dev, k);
+ k->reset_cap_flags(dev, k);
if (devpriv->ai_convert_count > 0) {
devpriv->ai_convert_count--;
if (devpriv->ai_convert_count == 0)
- k->SetEnable(dev, k, CLKENAB_INDEX);
+ k->set_enable(dev, k, S626_CLKENAB_INDEX);
if (cmd->convert_src == TRIG_TIMER) {
/* Trigger ADC scan loop start */
- s626_mc_enable(dev, MC2_ADC_RPS, P_MC2);
+ s626_mc_enable(dev, S626_MC2_ADC_RPS,
+ S626_P_MC2);
}
}
}
- if (irqbit & IRQ_COINT3B) {
- k = &encpriv[5];
+ if (irqbit & S626_IRQ_COINT3B) {
+ k = &s626_enc_chan_info[5];
/* clear interrupt capture flag */
- k->ResetCapFlags(dev, k);
+ k->reset_cap_flags(dev, k);
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Trigger ADC scan loop start */
- s626_mc_enable(dev, MC2_ADC_RPS, P_MC2);
+ s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
}
if (cmd->convert_src == TRIG_TIMER) {
- k = &encpriv[4];
+ k = &s626_enc_chan_info[4];
devpriv->ai_convert_count = cmd->chanlist_len;
- k->SetEnable(dev, k, CLKENAB_ALWAYS);
+ k->set_enable(dev, k, S626_CLKENAB_ALWAYS);
}
}
}
-static bool handle_eos_interrupt(struct comedi_device *dev)
+static bool s626_handle_eos_interrupt(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -823,19 +1518,19 @@ static bool handle_eos_interrupt(struct comedi_device *dev)
* first uint16_t in the buffer because it contains junk data
* from the final ADC of the previous poll list scan.
*/
- int32_t *readaddr = (int32_t *)devpriv->ANABuf.LogicalBase + 1;
+ uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
bool finished = false;
int i;
/* get the data and hand it over to comedi */
for (i = 0; i < cmd->chanlist_len; i++) {
- short tempdata;
+ unsigned short tempdata;
/*
* Convert ADC data to 16-bit integer values and copy
* to application buffer.
*/
- tempdata = s626_ai_reg_to_uint((int)*readaddr);
+ tempdata = s626_ai_reg_to_uint(*readaddr);
readaddr++;
/* put data into read buffer */
@@ -846,13 +1541,13 @@ static bool handle_eos_interrupt(struct comedi_device *dev)
/* end of scan occurs */
async->events |= COMEDI_CB_EOS;
- if (!devpriv->ai_continous)
+ if (!devpriv->ai_continuous)
devpriv->ai_sample_count--;
if (devpriv->ai_sample_count <= 0) {
devpriv->ai_cmd_running = 0;
/* Stop RPS program */
- s626_mc_disable(dev, MC1_ERPS1, P_MC1);
+ s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
/* send end of acquisition */
async->events |= COMEDI_CB_EOA;
@@ -879,229 +1574,238 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
if (!dev->attached)
return IRQ_NONE;
- /* lock to avoid race with comedi_poll */
+ /* lock to avoid race with comedi_poll */
spin_lock_irqsave(&dev->spinlock, flags);
/* save interrupt enable register state */
- irqstatus = readl(devpriv->mmio + P_IER);
+ irqstatus = readl(devpriv->mmio + S626_P_IER);
/* read interrupt type */
- irqtype = readl(devpriv->mmio + P_ISR);
+ irqtype = readl(devpriv->mmio + S626_P_ISR);
/* disable master interrupt */
- writel(0, devpriv->mmio + P_IER);
+ writel(0, devpriv->mmio + S626_P_IER);
/* clear interrupt */
- writel(irqtype, devpriv->mmio + P_ISR);
+ writel(irqtype, devpriv->mmio + S626_P_ISR);
switch (irqtype) {
- case IRQ_RPS1: /* end_of_scan occurs */
- if (handle_eos_interrupt(dev))
+ case S626_IRQ_RPS1: /* end_of_scan occurs */
+ if (s626_handle_eos_interrupt(dev))
irqstatus = 0;
break;
- case IRQ_GPIO3: /* check dio and conter interrupt */
+ case S626_IRQ_GPIO3: /* check dio and counter interrupt */
/* s626_dio_clear_irq(dev); */
- check_dio_interrupts(dev);
- check_counter_interrupts(dev);
+ s626_check_dio_interrupts(dev);
+ s626_check_counter_interrupts(dev);
break;
}
/* enable interrupt */
- writel(irqstatus, devpriv->mmio + P_IER);
+ writel(irqstatus, devpriv->mmio + S626_P_IER);
spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
}
/*
- * this functions build the RPS program for hardware driven acquistion
+ * This function builds the RPS program for hardware driven acquisition.
*/
-static void ResetADC(struct comedi_device *dev, uint8_t *ppl)
+static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
{
struct s626_private *devpriv = dev->private;
- register uint32_t *pRPS;
- uint32_t JmpAdrs;
+ uint32_t *rps;
+ uint32_t jmp_adrs;
uint16_t i;
uint16_t n;
- uint32_t LocalPPL;
- struct comedi_cmd *cmd = &(dev->subdevices->async->cmd);
+ uint32_t local_ppl;
+ struct comedi_cmd *cmd = &dev->subdevices->async->cmd;
/* Stop RPS program in case it is currently running */
- s626_mc_disable(dev, MC1_ERPS1, P_MC1);
+ s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
- /* Set starting logical address to write RPS commands. */
- pRPS = (uint32_t *) devpriv->RPSBuf.LogicalBase;
+ /* Set starting logical address to write RPS commands. */
+ rps = (uint32_t *)devpriv->rps_buf.logical_base;
/* Initialize RPS instruction pointer */
- writel((uint32_t)devpriv->RPSBuf.PhysicalBase,
- devpriv->mmio + P_RPSADDR1);
-
- /* Construct RPS program in RPSBuf DMA buffer */
+ writel((uint32_t)devpriv->rps_buf.physical_base,
+ devpriv->mmio + S626_P_RPSADDR1);
+ /* Construct RPS program in rps_buf DMA buffer */
if (cmd != NULL && cmd->scan_begin_src != TRIG_FOLLOW) {
- /* Wait for Start trigger. */
- *pRPS++ = RPS_PAUSE | RPS_SIGADC;
- *pRPS++ = RPS_CLRSIGNAL | RPS_SIGADC;
+ /* Wait for Start trigger. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_SIGADC;
+ *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_SIGADC;
}
- /* SAA7146 BUG WORKAROUND Do a dummy DEBI Write. This is necessary
+ /*
+ * SAA7146 BUG WORKAROUND Do a dummy DEBI Write. This is necessary
* because the first RPS DEBI Write following a non-RPS DEBI write
* seems to always fail. If we don't do this dummy write, the ADC
* gain might not be set to the value required for the first slot in
* the poll list; the ADC gain would instead remain unchanged from
* the previously programmed value.
*/
- *pRPS++ = RPS_LDREG | (P_DEBICMD >> 2);
/* Write DEBI Write command and address to shadow RAM. */
+ *rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
+ *rps++ = S626_DEBI_CMD_WRWORD | S626_LP_GSEL;
+ *rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
+ /* Write DEBI immediate data to shadow RAM: */
+ *rps++ = S626_GSEL_BIPOLAR5V; /* arbitrary immediate data value. */
+ *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
+ /* Reset "shadow RAM uploaded" flag. */
+ /* Invoke shadow RAM upload. */
+ *rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
+ /* Wait for shadow upload to finish. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
- *pRPS++ = DEBI_CMD_WRWORD | LP_GSEL;
- *pRPS++ = RPS_LDREG | (P_DEBIAD >> 2);
- /* Write DEBI immediate data to shadow RAM: */
-
- *pRPS++ = GSEL_BIPOLAR5V;
- /* arbitrary immediate data value. */
-
- *pRPS++ = RPS_CLRSIGNAL | RPS_DEBI;
- /* Reset "shadow RAM uploaded" flag. */
- *pRPS++ = RPS_UPLOAD | RPS_DEBI; /* Invoke shadow RAM upload. */
- *pRPS++ = RPS_PAUSE | RPS_DEBI; /* Wait for shadow upload to finish. */
-
- /* Digitize all slots in the poll list. This is implemented as a
+ /*
+ * Digitize all slots in the poll list. This is implemented as a
* for loop to limit the slot count to 16 in case the application
- * forgot to set the EOPL flag in the final slot.
+ * forgot to set the S626_EOPL flag in the final slot.
*/
- for (devpriv->AdcItems = 0; devpriv->AdcItems < 16; devpriv->AdcItems++) {
- /* Convert application's poll list item to private board class
+ for (devpriv->adc_items = 0; devpriv->adc_items < 16;
+ devpriv->adc_items++) {
+ /*
+ * Convert application's poll list item to private board class
* format. Each app poll list item is an uint8_t with form
* (EOPL,x,x,RANGE,CHAN<3:0>), where RANGE code indicates 0 =
* +-10V, 1 = +-5V, and EOPL = End of Poll List marker.
*/
- LocalPPL =
- (*ppl << 8) | (*ppl & 0x10 ? GSEL_BIPOLAR5V :
- GSEL_BIPOLAR10V);
-
- /* Switch ADC analog gain. */
- *pRPS++ = RPS_LDREG | (P_DEBICMD >> 2); /* Write DEBI command */
- /* and address to */
- /* shadow RAM. */
- *pRPS++ = DEBI_CMD_WRWORD | LP_GSEL;
- *pRPS++ = RPS_LDREG | (P_DEBIAD >> 2); /* Write DEBI */
- /* immediate data to */
- /* shadow RAM. */
- *pRPS++ = LocalPPL;
- *pRPS++ = RPS_CLRSIGNAL | RPS_DEBI; /* Reset "shadow RAM uploaded" */
- /* flag. */
- *pRPS++ = RPS_UPLOAD | RPS_DEBI; /* Invoke shadow RAM upload. */
- *pRPS++ = RPS_PAUSE | RPS_DEBI; /* Wait for shadow upload to */
- /* finish. */
-
- /* Select ADC analog input channel. */
- *pRPS++ = RPS_LDREG | (P_DEBICMD >> 2);
- /* Write DEBI command and address to shadow RAM. */
- *pRPS++ = DEBI_CMD_WRWORD | LP_ISEL;
- *pRPS++ = RPS_LDREG | (P_DEBIAD >> 2);
- /* Write DEBI immediate data to shadow RAM. */
- *pRPS++ = LocalPPL;
- *pRPS++ = RPS_CLRSIGNAL | RPS_DEBI;
- /* Reset "shadow RAM uploaded" flag. */
-
- *pRPS++ = RPS_UPLOAD | RPS_DEBI;
- /* Invoke shadow RAM upload. */
-
- *pRPS++ = RPS_PAUSE | RPS_DEBI;
- /* Wait for shadow upload to finish. */
-
- /* Delay at least 10 microseconds for analog input settling.
- * Instead of padding with NOPs, we use RPS_JUMP instructions
- * here; this allows us to produce a longer delay than is
- * possible with NOPs because each RPS_JUMP flushes the RPS'
- * instruction prefetch pipeline.
+ local_ppl = (*ppl << 8) | (*ppl & 0x10 ? S626_GSEL_BIPOLAR5V :
+ S626_GSEL_BIPOLAR10V);
+
+ /* Switch ADC analog gain. */
+ /* Write DEBI command and address to shadow RAM. */
+ *rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
+ *rps++ = S626_DEBI_CMD_WRWORD | S626_LP_GSEL;
+ /* Write DEBI immediate data to shadow RAM. */
+ *rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
+ *rps++ = local_ppl;
+ /* Reset "shadow RAM uploaded" flag. */
+ *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
+ /* Invoke shadow RAM upload. */
+ *rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
+ /* Wait for shadow upload to finish. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
+ /* Select ADC analog input channel. */
+ *rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
+ /* Write DEBI command and address to shadow RAM. */
+ *rps++ = S626_DEBI_CMD_WRWORD | S626_LP_ISEL;
+ *rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
+ /* Write DEBI immediate data to shadow RAM. */
+ *rps++ = local_ppl;
+ /* Reset "shadow RAM uploaded" flag. */
+ *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
+ /* Invoke shadow RAM upload. */
+ *rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
+ /* Wait for shadow upload to finish. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
+
+ /*
+ * Delay at least 10 microseconds for analog input settling.
+ * Instead of padding with NOPs, we use S626_RPS_JUMP
+ * instructions here; this allows us to produce a longer delay
+ * than is possible with NOPs because each S626_RPS_JUMP
+ * flushes the RPS' instruction prefetch pipeline.
*/
- JmpAdrs =
- (uint32_t) devpriv->RPSBuf.PhysicalBase +
- (uint32_t) ((unsigned long)pRPS -
- (unsigned long)devpriv->RPSBuf.LogicalBase);
- for (i = 0; i < (10 * RPSCLK_PER_US / 2); i++) {
- JmpAdrs += 8; /* Repeat to implement time delay: */
- *pRPS++ = RPS_JUMP; /* Jump to next RPS instruction. */
- *pRPS++ = JmpAdrs;
+ jmp_adrs =
+ (uint32_t)devpriv->rps_buf.physical_base +
+ (uint32_t)((unsigned long)rps -
+ (unsigned long)devpriv->
+ rps_buf.logical_base);
+ for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
+ jmp_adrs += 8; /* Repeat to implement time delay: */
+ /* Jump to next RPS instruction. */
+ *rps++ = S626_RPS_JUMP;
+ *rps++ = jmp_adrs;
}
if (cmd != NULL && cmd->convert_src != TRIG_NOW) {
- /* Wait for Start trigger. */
- *pRPS++ = RPS_PAUSE | RPS_SIGADC;
- *pRPS++ = RPS_CLRSIGNAL | RPS_SIGADC;
+ /* Wait for Start trigger. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_SIGADC;
+ *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_SIGADC;
}
- /* Start ADC by pulsing GPIO1. */
- *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* Begin ADC Start pulse. */
- *pRPS++ = GPIO_BASE | GPIO1_LO;
- *pRPS++ = RPS_NOP;
- /* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
- *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* End ADC Start pulse. */
- *pRPS++ = GPIO_BASE | GPIO1_HI;
-
- /* Wait for ADC to complete (GPIO2 is asserted high when ADC not
+ /* Start ADC by pulsing GPIO1. */
+ /* Begin ADC Start pulse. */
+ *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
+ *rps++ = S626_GPIO_BASE | S626_GPIO1_LO;
+ *rps++ = S626_RPS_NOP;
+ /* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
+ /* End ADC Start pulse. */
+ *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
+ *rps++ = S626_GPIO_BASE | S626_GPIO1_HI;
+ /*
+ * Wait for ADC to complete (GPIO2 is asserted high when ADC not
* busy) and for data from previous conversion to shift into FB
* BUFFER 1 register.
*/
- *pRPS++ = RPS_PAUSE | RPS_GPIO2; /* Wait for ADC done. */
-
- /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
- *pRPS++ = RPS_STREG | (BUGFIX_STREG(P_FB_BUFFER1) >> 2);
- *pRPS++ =
- (uint32_t) devpriv->ANABuf.PhysicalBase +
- (devpriv->AdcItems << 2);
-
- /* If this slot's EndOfPollList flag is set, all channels have */
- /* now been processed. */
- if (*ppl++ & EOPL) {
- devpriv->AdcItems++; /* Adjust poll list item count. */
- break; /* Exit poll list processing loop. */
+ /* Wait for ADC done. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_GPIO2;
+
+ /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
+ *rps++ = S626_RPS_STREG |
+ (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
+ *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+ (devpriv->adc_items << 2);
+
+ /*
+ * If this slot's EndOfPollList flag is set, all channels have
+ * now been processed.
+ */
+ if (*ppl++ & S626_EOPL) {
+ devpriv->adc_items++; /* Adjust poll list item count. */
+ break; /* Exit poll list processing loop. */
}
}
- /* VERSION 2.01 CHANGE: DELAY CHANGED FROM 250NS to 2US. Allow the
+ /*
+ * VERSION 2.01 CHANGE: DELAY CHANGED FROM 250NS to 2US. Allow the
* ADC to stabilize for 2 microseconds before starting the final
* (dummy) conversion. This delay is necessary to allow sufficient
* time between last conversion finished and the start of the dummy
* conversion. Without this delay, the last conversion's data value
* is sometimes set to the previous conversion's data value.
*/
- for (n = 0; n < (2 * RPSCLK_PER_US); n++)
- *pRPS++ = RPS_NOP;
+ for (n = 0; n < (2 * S626_RPSCLK_PER_US); n++)
+ *rps++ = S626_RPS_NOP;
- /* Start a dummy conversion to cause the data from the last
+ /*
+ * Start a dummy conversion to cause the data from the last
* conversion of interest to be shifted in.
*/
- *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* Begin ADC Start pulse. */
- *pRPS++ = GPIO_BASE | GPIO1_LO;
- *pRPS++ = RPS_NOP;
+ /* Begin ADC Start pulse. */
+ *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
+ *rps++ = S626_GPIO_BASE | S626_GPIO1_LO;
+ *rps++ = S626_RPS_NOP;
/* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
- *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* End ADC Start pulse. */
- *pRPS++ = GPIO_BASE | GPIO1_HI;
+ *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2); /* End ADC Start pulse. */
+ *rps++ = S626_GPIO_BASE | S626_GPIO1_HI;
- /* Wait for the data from the last conversion of interest to arrive
+ /*
+ * Wait for the data from the last conversion of interest to arrive
* in FB BUFFER 1 register.
*/
- *pRPS++ = RPS_PAUSE | RPS_GPIO2; /* Wait for ADC done. */
+ *rps++ = S626_RPS_PAUSE | S626_RPS_GPIO2; /* Wait for ADC done. */
- /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
- *pRPS++ = RPS_STREG | (BUGFIX_STREG(P_FB_BUFFER1) >> 2); /* */
- *pRPS++ =
- (uint32_t) devpriv->ANABuf.PhysicalBase + (devpriv->AdcItems << 2);
+ /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
+ *rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
+ *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+ (devpriv->adc_items << 2);
- /* Indicate ADC scan loop is finished. */
- /* *pRPS++= RPS_CLRSIGNAL | RPS_SIGADC ; // Signal ReadADC() that scan is done. */
+ /* Indicate ADC scan loop is finished. */
+ /* Signal ReadADC() that scan is done. */
+ /* *rps++= S626_RPS_CLRSIGNAL | S626_RPS_SIGADC; */
/* invoke interrupt */
- if (devpriv->ai_cmd_running == 1) {
- *pRPS++ = RPS_IRQ;
- }
- /* Restart RPS program at its beginning. */
- *pRPS++ = RPS_JUMP; /* Branch to start of RPS program. */
- *pRPS++ = (uint32_t) devpriv->RPSBuf.PhysicalBase;
+ if (devpriv->ai_cmd_running == 1)
+ *rps++ = S626_RPS_IRQ;
+
+ /* Restart RPS program at its beginning. */
+ *rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */
+ *rps++ = (uint32_t)devpriv->rps_buf.physical_base;
- /* End of RPS program build */
+ /* End of RPS program build */
}
#ifdef unused_code
@@ -1111,14 +1815,14 @@ static int s626_ai_rinsn(struct comedi_device *dev,
unsigned int *data)
{
struct s626_private *devpriv = dev->private;
- register uint8_t i;
- register int32_t *readaddr;
+ uint8_t i;
+ int32_t *readaddr;
/* Trigger ADC scan loop start */
- s626_mc_enable(dev, MC2_ADC_RPS, P_MC2);
+ s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
/* Wait until ADC scan loop is finished (RPS Signal 0 reset) */
- while (s626_mc_test(dev, MC2_ADC_RPS, P_MC2))
+ while (s626_mc_test(dev, S626_MC2_ADC_RPS, S626_P_MC2))
;
/*
@@ -1126,13 +1830,13 @@ static int s626_ai_rinsn(struct comedi_device *dev,
* first uint16_t in the buffer because it contains junk data from
* the final ADC of the previous poll list scan.
*/
- readaddr = (uint32_t *)devpriv->ANABuf.LogicalBase + 1;
+ readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
/*
* Convert ADC data to 16-bit integer values and
* copy to application buffer.
*/
- for (i = 0; i < devpriv->AdcItems; i++) {
+ for (i = 0; i < devpriv->adc_items; i++) {
*data = s626_ai_reg_to_uint(*readaddr++);
data++;
}
@@ -1148,55 +1852,61 @@ static int s626_ai_insn_read(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
uint16_t chan = CR_CHAN(insn->chanspec);
uint16_t range = CR_RANGE(insn->chanspec);
- uint16_t AdcSpec = 0;
- uint32_t GpioImage;
- int tmp;
+ uint16_t adc_spec = 0;
+ uint32_t gpio_image;
+ uint32_t tmp;
int n;
- /* Convert application's ADC specification into form
+ /*
+ * Convert application's ADC specification into form
* appropriate for register programming.
*/
if (range == 0)
- AdcSpec = (chan << 8) | (GSEL_BIPOLAR5V);
+ adc_spec = (chan << 8) | (S626_GSEL_BIPOLAR5V);
else
- AdcSpec = (chan << 8) | (GSEL_BIPOLAR10V);
+ adc_spec = (chan << 8) | (S626_GSEL_BIPOLAR10V);
- /* Switch ADC analog gain. */
- DEBIwrite(dev, LP_GSEL, AdcSpec); /* Set gain. */
+ /* Switch ADC analog gain. */
+ s626_debi_write(dev, S626_LP_GSEL, adc_spec); /* Set gain. */
- /* Select ADC analog input channel. */
- DEBIwrite(dev, LP_ISEL, AdcSpec); /* Select channel. */
+ /* Select ADC analog input channel. */
+ s626_debi_write(dev, S626_LP_ISEL, adc_spec); /* Select channel. */
for (n = 0; n < insn->n; n++) {
-
- /* Delay 10 microseconds for analog input settling. */
+ /* Delay 10 microseconds for analog input settling. */
udelay(10);
/* Start ADC by pulsing GPIO1 low */
- GpioImage = readl(devpriv->mmio + P_GPIO);
+ gpio_image = readl(devpriv->mmio + S626_P_GPIO);
/* Assert ADC Start command */
- writel(GpioImage & ~GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(gpio_image & ~S626_GPIO1_HI,
+ devpriv->mmio + S626_P_GPIO);
/* and stretch it out */
- writel(GpioImage & ~GPIO1_HI, devpriv->mmio + P_GPIO);
- writel(GpioImage & ~GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(gpio_image & ~S626_GPIO1_HI,
+ devpriv->mmio + S626_P_GPIO);
+ writel(gpio_image & ~S626_GPIO1_HI,
+ devpriv->mmio + S626_P_GPIO);
/* Negate ADC Start command */
- writel(GpioImage | GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(gpio_image | S626_GPIO1_HI, devpriv->mmio + S626_P_GPIO);
- /* Wait for ADC to complete (GPIO2 is asserted high when */
- /* ADC not busy) and for data from previous conversion to */
- /* shift into FB BUFFER 1 register. */
+ /*
+ * Wait for ADC to complete (GPIO2 is asserted high when
+ * ADC not busy) and for data from previous conversion to
+ * shift into FB BUFFER 1 register.
+ */
/* Wait for ADC done */
- while (!(readl(devpriv->mmio + P_PSR) & PSR_GPIO2))
+ while (!(readl(devpriv->mmio + S626_P_PSR) & S626_PSR_GPIO2))
;
/* Fetch ADC data */
if (n != 0) {
- tmp = readl(devpriv->mmio + P_FB_BUFFER1);
+ tmp = readl(devpriv->mmio + S626_P_FB_BUFFER1);
data[n - 1] = s626_ai_reg_to_uint(tmp);
}
- /* Allow the ADC to stabilize for 4 microseconds before
+ /*
+ * Allow the ADC to stabilize for 4 microseconds before
* starting the next (final) conversion. This delay is
* necessary to allow sufficient time between last
* conversion finished and the start of the next
@@ -1207,28 +1917,30 @@ static int s626_ai_insn_read(struct comedi_device *dev,
udelay(4);
}
- /* Start a dummy conversion to cause the data from the
- * previous conversion to be shifted in. */
- GpioImage = readl(devpriv->mmio + P_GPIO);
+ /*
+ * Start a dummy conversion to cause the data from the
+ * previous conversion to be shifted in.
+ */
+ gpio_image = readl(devpriv->mmio + S626_P_GPIO);
/* Assert ADC Start command */
- writel(GpioImage & ~GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(gpio_image & ~S626_GPIO1_HI, devpriv->mmio + S626_P_GPIO);
/* and stretch it out */
- writel(GpioImage & ~GPIO1_HI, devpriv->mmio + P_GPIO);
- writel(GpioImage & ~GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(gpio_image & ~S626_GPIO1_HI, devpriv->mmio + S626_P_GPIO);
+ writel(gpio_image & ~S626_GPIO1_HI, devpriv->mmio + S626_P_GPIO);
/* Negate ADC Start command */
- writel(GpioImage | GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(gpio_image | S626_GPIO1_HI, devpriv->mmio + S626_P_GPIO);
- /* Wait for the data to arrive in FB BUFFER 1 register. */
+ /* Wait for the data to arrive in FB BUFFER 1 register. */
/* Wait for ADC done */
- while (!(readl(devpriv->mmio + P_PSR) & PSR_GPIO2))
+ while (!(readl(devpriv->mmio + S626_P_PSR) & S626_PSR_GPIO2))
;
- /* Fetch ADC data from audio interface's input shift register. */
+ /* Fetch ADC data from audio interface's input shift register. */
/* Fetch ADC data */
if (n != 0) {
- tmp = readl(devpriv->mmio + P_FB_BUFFER1);
+ tmp = readl(devpriv->mmio + S626_P_FB_BUFFER1);
data[n - 1] = s626_ai_reg_to_uint(tmp);
}
@@ -1237,17 +1949,16 @@ static int s626_ai_insn_read(struct comedi_device *dev,
static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd)
{
-
int n;
for (n = 0; n < cmd->chanlist_len; n++) {
- if (CR_RANGE((cmd->chanlist)[n]) == 0)
- ppl[n] = (CR_CHAN((cmd->chanlist)[n])) | (RANGE_5V);
+ if (CR_RANGE(cmd->chanlist[n]) == 0)
+ ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_5V;
else
- ppl[n] = (CR_CHAN((cmd->chanlist)[n])) | (RANGE_10V);
+ ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_10V;
}
if (n != 0)
- ppl[n - 1] |= EOPL;
+ ppl[n - 1] |= S626_EOPL;
return n;
}
@@ -1259,18 +1970,20 @@ static int s626_ai_inttrig(struct comedi_device *dev,
return -EINVAL;
/* Start executing the RPS program */
- s626_mc_enable(dev, MC1_ERPS1, P_MC1);
+ s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
s->async->inttrig = NULL;
return 1;
}
-/* This function doesn't require a particular form, this is just what
+/*
+ * This function doesn't require a particular form, this is just what
* happens to be used in some of the drivers. It should convert ns
* nanoseconds to a counter value suitable for programming the device.
* Also, it should adjust ns so that it cooresponds to the actual time
- * that the device will use. */
+ * that the device will use.
+ */
static int s626_ns_to_timer(int *nanosec, int round_mode)
{
int divider, base;
@@ -1294,68 +2007,75 @@ static int s626_ns_to_timer(int *nanosec, int round_mode)
return divider - 1;
}
-static void s626_timer_load(struct comedi_device *dev, struct enc_private *k,
- int tick)
+static void s626_timer_load(struct comedi_device *dev,
+ const struct s626_enc_info *k, int tick)
{
- uint16_t Setup = (LOADSRC_INDX << BF_LOADSRC) | /* Preload upon */
- /* index. */
- (INDXSRC_SOFT << BF_INDXSRC) | /* Disable hardware index. */
- (CLKSRC_TIMER << BF_CLKSRC) | /* Operating mode is Timer. */
- (CLKPOL_POS << BF_CLKPOL) | /* Active high clock. */
- (CNTDIR_DOWN << BF_CLKPOL) | /* Count direction is Down. */
- (CLKMULT_1X << BF_CLKMULT) | /* Clock multiplier is 1x. */
- (CLKENAB_INDEX << BF_CLKENAB);
- uint16_t valueSrclatch = LATCHSRC_A_INDXA;
- /* uint16_t enab=CLKENAB_ALWAYS; */
+ uint16_t setup =
+ /* Preload upon index. */
+ S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
+ /* Disable hardware index. */
+ S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
+ /* Operating mode is Timer. */
+ S626_SET_STD_ENCMODE(S626_ENCMODE_TIMER) |
+ /* Count direction is Down. */
+ S626_SET_STD_CLKPOL(S626_CNTDIR_DOWN) |
+ /* Clock multiplier is 1x. */
+ S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
+ /* Enabled by index */
+ S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
+ uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA;
+ /* uint16_t enab = S626_CLKENAB_ALWAYS; */
- k->SetMode(dev, k, Setup, FALSE);
+ k->set_mode(dev, k, setup, false);
- /* Set the preload register */
- Preload(dev, k, tick);
+ /* Set the preload register */
+ s626_preload(dev, k, tick);
- /* Software index pulse forces the preload register to load */
- /* into the counter */
- k->SetLoadTrig(dev, k, 0);
- k->PulseIndex(dev, k);
+ /*
+ * Software index pulse forces the preload register to load
+ * into the counter
+ */
+ k->set_load_trig(dev, k, 0);
+ k->pulse_index(dev, k);
/* set reload on counter overflow */
- k->SetLoadTrig(dev, k, 1);
+ k->set_load_trig(dev, k, 1);
/* set interrupt on overflow */
- k->SetIntSrc(dev, k, INTSRC_OVER);
+ k->set_int_src(dev, k, S626_INTSRC_OVER);
- SetLatchSource(dev, k, valueSrclatch);
- /* k->SetEnable(dev,k,(uint16_t)(enab != 0)); */
+ s626_set_latch_source(dev, k, value_latchsrc);
+ /* k->set_enable(dev, k, (uint16_t)(enab != 0)); */
}
-/* TO COMPLETE */
+/* TO COMPLETE */
static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct s626_private *devpriv = dev->private;
uint8_t ppl[16];
struct comedi_cmd *cmd = &s->async->cmd;
- struct enc_private *k;
+ const struct s626_enc_info *k;
int tick;
if (devpriv->ai_cmd_running) {
- printk(KERN_ERR "s626_ai_cmd: Another ai_cmd is running %d\n",
- dev->minor);
+ dev_err(dev->class_dev,
+ "s626_ai_cmd: Another ai_cmd is running\n");
return -EBUSY;
}
/* disable interrupt */
- writel(0, devpriv->mmio + P_IER);
+ writel(0, devpriv->mmio + S626_P_IER);
/* clear interrupt request */
- writel(IRQ_RPS1 | IRQ_GPIO3, devpriv->mmio + P_ISR);
+ writel(S626_IRQ_RPS1 | S626_IRQ_GPIO3, devpriv->mmio + S626_P_ISR);
/* clear any pending interrupt */
s626_dio_clear_irq(dev);
- /* s626_enc_clear_irq(dev); */
+ /* s626_enc_clear_irq(dev); */
/* reset ai_cmd_running flag */
devpriv->ai_cmd_running = 0;
- /* test if cmd is valid */
+ /* test if cmd is valid */
if (cmd == NULL)
return -EINVAL;
@@ -1373,17 +2093,20 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
case TRIG_FOLLOW:
break;
case TRIG_TIMER:
- /* set a conter to generate adc trigger at scan_begin_arg interval */
- k = &encpriv[5];
+ /*
+ * set a counter to generate adc trigger at scan_begin_arg
+ * interval
+ */
+ k = &s626_enc_chan_info[5];
tick = s626_ns_to_timer((int *)&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
/* load timer value and enable interrupt */
s626_timer_load(dev, k, tick);
- k->SetEnable(dev, k, CLKENAB_ALWAYS);
+ k->set_enable(dev, k, S626_CLKENAB_ALWAYS);
break;
case TRIG_EXT:
- /* set the digital line and interrupt for scan trigger */
+ /* set the digital line and interrupt for scan trigger */
if (cmd->start_src != TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
break;
@@ -1393,52 +2116,53 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
case TRIG_NOW:
break;
case TRIG_TIMER:
- /* set a conter to generate adc trigger at convert_arg interval */
- k = &encpriv[4];
+ /*
+ * set a counter to generate adc trigger at convert_arg
+ * interval
+ */
+ k = &s626_enc_chan_info[4];
tick = s626_ns_to_timer((int *)&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
/* load timer value and enable interrupt */
s626_timer_load(dev, k, tick);
- k->SetEnable(dev, k, CLKENAB_INDEX);
+ k->set_enable(dev, k, S626_CLKENAB_INDEX);
break;
case TRIG_EXT:
- /* set the digital line and interrupt for convert trigger */
- if (cmd->scan_begin_src != TRIG_EXT
- && cmd->start_src == TRIG_EXT)
+ /* set the digital line and interrupt for convert trigger */
+ if (cmd->scan_begin_src != TRIG_EXT &&
+ cmd->start_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->convert_arg);
break;
}
switch (cmd->stop_src) {
case TRIG_COUNT:
- /* data arrives as one packet */
+ /* data arrives as one packet */
devpriv->ai_sample_count = cmd->stop_arg;
- devpriv->ai_continous = 0;
+ devpriv->ai_continuous = 0;
break;
case TRIG_NONE:
- /* continous acquisition */
- devpriv->ai_continous = 1;
+ /* continuous acquisition */
+ devpriv->ai_continuous = 1;
devpriv->ai_sample_count = 1;
break;
}
- ResetADC(dev, ppl);
+ s626_reset_adc(dev, ppl);
switch (cmd->start_src) {
case TRIG_NOW:
/* Trigger ADC scan loop start */
- /* s626_mc_enable(dev, MC2_ADC_RPS, P_MC2); */
+ /* s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2); */
/* Start executing the RPS program */
- s626_mc_enable(dev, MC1_ERPS1, P_MC1);
-
+ s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
s->async->inttrig = NULL;
break;
case TRIG_EXT:
/* configure DIO channel for acquisition trigger */
s626_dio_set_irq(dev, cmd->start_arg);
-
s->async->inttrig = NULL;
break;
case TRIG_INT:
@@ -1447,7 +2171,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
/* enable interrupt */
- writel(IRQ_GPIO3 | IRQ_RPS1, devpriv->mmio + P_IER);
+ writel(S626_IRQ_GPIO3 | S626_IRQ_RPS1, devpriv->mmio + S626_P_IER);
return 0;
}
@@ -1461,11 +2185,11 @@ static int s626_ai_cmdtest(struct comedi_device *dev,
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src,
- TRIG_NOW | TRIG_INT | TRIG_EXT);
+ TRIG_NOW | TRIG_INT | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+ TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
err |= cfc_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+ TRIG_TIMER | TRIG_EXT | TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -1490,34 +2214,34 @@ static int s626_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->start_src == TRIG_EXT)
err |= cfc_check_trigger_arg_max(&cmd->start_arg, 39);
-
if (cmd->scan_begin_src == TRIG_EXT)
err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 39);
-
if (cmd->convert_src == TRIG_EXT)
err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 39);
-#define MAX_SPEED 200000 /* in nanoseconds */
-#define MIN_SPEED 2000000000 /* in nanoseconds */
+#define S626_MAX_SPEED 200000 /* in nanoseconds */
+#define S626_MIN_SPEED 2000000000 /* in nanoseconds */
if (cmd->scan_begin_src == TRIG_TIMER) {
err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED);
+ S626_MAX_SPEED);
err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
- MIN_SPEED);
+ S626_MIN_SPEED);
} else {
/* external trigger */
/* should be level/edge, hi/lo specification here */
/* should specify multiple external triggers */
-/* err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9); */
+ /* err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9); */
}
if (cmd->convert_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg, MAX_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg, MIN_SPEED);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
+ S626_MAX_SPEED);
+ err |= cfc_check_trigger_arg_max(&cmd->convert_arg,
+ S626_MIN_SPEED);
} else {
/* external trigger */
/* see above */
-/* err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9); */
+ /* err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9); */
}
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
@@ -1546,10 +2270,10 @@ static int s626_ai_cmdtest(struct comedi_device *dev,
if (tmp != cmd->convert_arg)
err++;
if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->scan_begin_arg <
- cmd->convert_arg * cmd->scan_end_arg) {
- cmd->scan_begin_arg =
- cmd->convert_arg * cmd->scan_end_arg;
+ cmd->scan_begin_arg < cmd->convert_arg *
+ cmd->scan_end_arg) {
+ cmd->scan_begin_arg = cmd->convert_arg *
+ cmd->scan_end_arg;
err++;
}
}
@@ -1565,10 +2289,10 @@ static int s626_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
struct s626_private *devpriv = dev->private;
/* Stop RPS program in case it is currently running */
- s626_mc_disable(dev, MC1_ERPS1, P_MC1);
+ s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
/* disable master interrupt */
- writel(0, devpriv->mmio + P_IER);
+ writel(0, devpriv->mmio + S626_P_IER);
devpriv->ai_cmd_running = 0;
@@ -1588,7 +2312,7 @@ static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->ao_readback[CR_CHAN(insn->chanspec)] = data[i];
dacdata -= (0x1fff);
- SetDAC(dev, chan, dacdata);
+ s626_set_dac(dev, chan, dacdata);
}
return i;
@@ -1606,7 +2330,9 @@ static int s626_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
return i;
}
-/* *************** DIGITAL I/O FUNCTIONS ***************
+/* *************** DIGITAL I/O FUNCTIONS *************** */
+
+/*
* All DIO functions address a group of DIO channels by means of
* "group" argument. group may be 0, 1 or 2, which correspond to DIO
* ports A, B and C, respectively.
@@ -1616,19 +2342,19 @@ static void s626_dio_init(struct comedi_device *dev)
{
uint16_t group;
- /* Prepare to treat writes to WRCapSel as capture disables. */
- DEBIwrite(dev, LP_MISC1, MISC1_NOEDCAP);
+ /* Prepare to treat writes to WRCapSel as capture disables. */
+ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
- /* For each group of sixteen channels ... */
+ /* For each group of sixteen channels ... */
for (group = 0; group < S626_DIO_BANKS; group++) {
/* Disable all interrupts */
- DEBIwrite(dev, LP_WRINTSEL(group), 0);
+ s626_debi_write(dev, S626_LP_WRINTSEL(group), 0);
/* Disable all event captures */
- DEBIwrite(dev, LP_WRCAPSEL(group), 0xffff);
+ s626_debi_write(dev, S626_LP_WRCAPSEL(group), 0xffff);
/* Init all DIOs to default edge polarity */
- DEBIwrite(dev, LP_WREDGSEL(group), 0);
+ s626_debi_write(dev, S626_LP_WREDGSEL(group), 0);
/* Program all outputs to inactive state */
- DEBIwrite(dev, LP_WRDOUT(group), 0);
+ s626_debi_write(dev, S626_LP_WRDOUT(group), 0);
}
}
@@ -1638,20 +2364,11 @@ static int s626_dio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
unsigned long group = (unsigned long)s->private;
- unsigned long mask = data[0];
- unsigned long bits = data[1];
- if (mask) {
- /* Check if requested channels are configured for output */
- if ((s->io_bits & mask) != mask)
- return -EIO;
+ if (comedi_dio_update_state(s, data))
+ s626_debi_write(dev, S626_LP_WRDOUT(group), s->state);
- s->state &= ~mask;
- s->state |= (bits & mask);
-
- DEBIwrite(dev, LP_WRDOUT(group), s->state);
- }
- data[1] = DEBIread(dev, LP_RDDIN(group));
+ data[1] = s626_debi_read(dev, S626_LP_RDDIN(group));
return insn->n;
}
@@ -1668,42 +2385,51 @@ static int s626_dio_insn_config(struct comedi_device *dev,
if (ret)
return ret;
- DEBIwrite(dev, LP_WRDOUT(group), s->io_bits);
+ s626_debi_write(dev, S626_LP_WRDOUT(group), s->io_bits);
return insn->n;
}
-/* Now this function initializes the value of the counter (data[0])
- and set the subdevice. To complete with trigger and interrupt
- configuration */
-/* FIXME: data[0] is supposed to be an INSN_CONFIG_xxx constant indicating
+/*
+ * Now this function initializes the value of the counter (data[0])
+ * and set the subdevice. To complete with trigger and interrupt
+ * configuration.
+ *
+ * FIXME: data[0] is supposed to be an INSN_CONFIG_xxx constant indicating
* what is being configured, but this function appears to be using data[0]
- * as a variable. */
+ * as a variable.
+ */
static int s626_enc_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- uint16_t Setup = (LOADSRC_INDX << BF_LOADSRC) | /* Preload upon */
- /* index. */
- (INDXSRC_SOFT << BF_INDXSRC) | /* Disable hardware index. */
- (CLKSRC_COUNTER << BF_CLKSRC) | /* Operating mode is Counter. */
- (CLKPOL_POS << BF_CLKPOL) | /* Active high clock. */
- /* ( CNTDIR_UP << BF_CLKPOL ) | // Count direction is Down. */
- (CLKMULT_1X << BF_CLKMULT) | /* Clock multiplier is 1x. */
- (CLKENAB_INDEX << BF_CLKENAB);
- /* uint16_t DisableIntSrc=TRUE; */
- /* uint32_t Preloadvalue; //Counter initial value */
- uint16_t valueSrclatch = LATCHSRC_AB_READ;
- uint16_t enab = CLKENAB_ALWAYS;
- struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)];
-
- /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
-
- k->SetMode(dev, k, Setup, TRUE);
- Preload(dev, k, data[0]);
- k->PulseIndex(dev, k);
- SetLatchSource(dev, k, valueSrclatch);
- k->SetEnable(dev, k, (uint16_t) (enab != 0));
+ uint16_t setup =
+ /* Preload upon index. */
+ S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
+ /* Disable hardware index. */
+ S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
+ /* Operating mode is Counter. */
+ S626_SET_STD_ENCMODE(S626_ENCMODE_COUNTER) |
+ /* Active high clock. */
+ S626_SET_STD_CLKPOL(S626_CLKPOL_POS) |
+ /* Clock multiplier is 1x. */
+ S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
+ /* Enabled by index */
+ S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
+ /* uint16_t disable_int_src = true; */
+ /* uint32_t Preloadvalue; //Counter initial value */
+ uint16_t value_latchsrc = S626_LATCHSRC_AB_READ;
+ uint16_t enab = S626_CLKENAB_ALWAYS;
+ const struct s626_enc_info *k =
+ &s626_enc_chan_info[CR_CHAN(insn->chanspec)];
+
+ /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
+
+ k->set_mode(dev, k, setup, true);
+ s626_preload(dev, k, data[0]);
+ k->pulse_index(dev, k);
+ s626_set_latch_source(dev, k, value_latchsrc);
+ k->set_enable(dev, k, (enab != 0));
return insn->n;
}
@@ -1712,12 +2438,12 @@ static int s626_enc_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
-
int n;
- struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)];
+ const struct s626_enc_info *k =
+ &s626_enc_chan_info[CR_CHAN(insn->chanspec)];
for (n = 0; n < insn->n; n++)
- data[n] = ReadLatch(dev, k);
+ data[n] = s626_read_latch(dev, k);
return n;
}
@@ -1726,31 +2452,32 @@ static int s626_enc_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
+ const struct s626_enc_info *k =
+ &s626_enc_chan_info[CR_CHAN(insn->chanspec)];
- struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)];
+ /* Set the preload register */
+ s626_preload(dev, k, data[0]);
- /* Set the preload register */
- Preload(dev, k, data[0]);
-
- /* Software index pulse forces the preload register to load */
- /* into the counter */
- k->SetLoadTrig(dev, k, 0);
- k->PulseIndex(dev, k);
- k->SetLoadTrig(dev, k, 2);
+ /*
+ * Software index pulse forces the preload register to load
+ * into the counter
+ */
+ k->set_load_trig(dev, k, 0);
+ k->pulse_index(dev, k);
+ k->set_load_trig(dev, k, 2);
return 1;
}
-static void WriteMISC2(struct comedi_device *dev, uint16_t NewImage)
+static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
{
- DEBIwrite(dev, LP_MISC1, MISC1_WENABLE); /* enab writes to */
- /* MISC2 register. */
- DEBIwrite(dev, LP_WRMISC2, NewImage); /* Write new image to MISC2. */
- DEBIwrite(dev, LP_MISC1, MISC1_WDISABLE); /* Disable writes to MISC2. */
+ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
+ s626_debi_write(dev, S626_LP_WRMISC2, new_image);
+ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WDISABLE);
}
-static void CloseDMAB(struct comedi_device *dev, struct bufferDMA *pdma,
- size_t bsize)
+static void s626_close_dma_b(struct comedi_device *dev,
+ struct s626_buffer_dma *pdma, size_t bsize)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
void *vbptr;
@@ -1758,554 +2485,44 @@ static void CloseDMAB(struct comedi_device *dev, struct bufferDMA *pdma,
if (pdma == NULL)
return;
- /* find the matching allocation from the board struct */
- vbptr = pdma->LogicalBase;
- vpptr = pdma->PhysicalBase;
+ /* find the matching allocation from the board struct */
+ vbptr = pdma->logical_base;
+ vpptr = pdma->physical_base;
if (vbptr) {
pci_free_consistent(pcidev, bsize, vbptr, vpptr);
- pdma->LogicalBase = NULL;
- pdma->PhysicalBase = 0;
- }
-}
-
-/* ****** PRIVATE COUNTER FUNCTIONS ****** */
-
-/* Reset a counter's index and overflow event capture flags. */
-
-static void ResetCapFlags_A(struct comedi_device *dev, struct enc_private *k)
-{
- DEBIreplace(dev, k->MyCRB, ~CRBMSK_INTCTRL,
- CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A);
-}
-
-static void ResetCapFlags_B(struct comedi_device *dev, struct enc_private *k)
-{
- DEBIreplace(dev, k->MyCRB, ~CRBMSK_INTCTRL,
- CRBMSK_INTRESETCMD | CRBMSK_INTRESET_B);
-}
-
-/* Return counter setup in a format (COUNTER_SETUP) that is consistent */
-/* for both A and B counters. */
-
-static uint16_t GetMode_A(struct comedi_device *dev, struct enc_private *k)
-{
- register uint16_t cra;
- register uint16_t crb;
- register uint16_t setup;
-
- /* Fetch CRA and CRB register images. */
- cra = DEBIread(dev, k->MyCRA);
- crb = DEBIread(dev, k->MyCRB);
-
- /* Populate the standardized counter setup bit fields. Note: */
- /* IndexSrc is restricted to ENC_X or IndxPol. */
- setup = ((cra & STDMSK_LOADSRC) /* LoadSrc = LoadSrcA. */
- |((crb << (STDBIT_LATCHSRC - CRBBIT_LATCHSRC)) & STDMSK_LATCHSRC) /* LatchSrc = LatchSrcA. */
- |((cra << (STDBIT_INTSRC - CRABIT_INTSRC_A)) & STDMSK_INTSRC) /* IntSrc = IntSrcA. */
- |((cra << (STDBIT_INDXSRC - (CRABIT_INDXSRC_A + 1))) & STDMSK_INDXSRC) /* IndxSrc = IndxSrcA<1>. */
- |((cra >> (CRABIT_INDXPOL_A - STDBIT_INDXPOL)) & STDMSK_INDXPOL) /* IndxPol = IndxPolA. */
- |((crb >> (CRBBIT_CLKENAB_A - STDBIT_CLKENAB)) & STDMSK_CLKENAB)); /* ClkEnab = ClkEnabA. */
-
- /* Adjust mode-dependent parameters. */
- if (cra & (2 << CRABIT_CLKSRC_A)) /* If Timer mode (ClkSrcA<1> == 1): */
- setup |= ((CLKSRC_TIMER << STDBIT_CLKSRC) /* Indicate Timer mode. */
- |((cra << (STDBIT_CLKPOL - CRABIT_CLKSRC_A)) & STDMSK_CLKPOL) /* Set ClkPol to indicate count direction (ClkSrcA<0>). */
- |(MULT_X1 << STDBIT_CLKMULT)); /* ClkMult must be 1x in Timer mode. */
-
- else /* If Counter mode (ClkSrcA<1> == 0): */
- setup |= ((CLKSRC_COUNTER << STDBIT_CLKSRC) /* Indicate Counter mode. */
- |((cra >> (CRABIT_CLKPOL_A - STDBIT_CLKPOL)) & STDMSK_CLKPOL) /* Pass through ClkPol. */
- |(((cra & CRAMSK_CLKMULT_A) == (MULT_X0 << CRABIT_CLKMULT_A)) ? /* Force ClkMult to 1x if not legal, else pass through. */
- (MULT_X1 << STDBIT_CLKMULT) :
- ((cra >> (CRABIT_CLKMULT_A -
- STDBIT_CLKMULT)) & STDMSK_CLKMULT)));
-
- /* Return adjusted counter setup. */
- return setup;
-}
-
-static uint16_t GetMode_B(struct comedi_device *dev, struct enc_private *k)
-{
- register uint16_t cra;
- register uint16_t crb;
- register uint16_t setup;
-
- /* Fetch CRA and CRB register images. */
- cra = DEBIread(dev, k->MyCRA);
- crb = DEBIread(dev, k->MyCRB);
-
- /* Populate the standardized counter setup bit fields. Note: */
- /* IndexSrc is restricted to ENC_X or IndxPol. */
- setup = (((crb << (STDBIT_INTSRC - CRBBIT_INTSRC_B)) & STDMSK_INTSRC) /* IntSrc = IntSrcB. */
- |((crb << (STDBIT_LATCHSRC - CRBBIT_LATCHSRC)) & STDMSK_LATCHSRC) /* LatchSrc = LatchSrcB. */
- |((crb << (STDBIT_LOADSRC - CRBBIT_LOADSRC_B)) & STDMSK_LOADSRC) /* LoadSrc = LoadSrcB. */
- |((crb << (STDBIT_INDXPOL - CRBBIT_INDXPOL_B)) & STDMSK_INDXPOL) /* IndxPol = IndxPolB. */
- |((crb >> (CRBBIT_CLKENAB_B - STDBIT_CLKENAB)) & STDMSK_CLKENAB) /* ClkEnab = ClkEnabB. */
- |((cra >> ((CRABIT_INDXSRC_B + 1) - STDBIT_INDXSRC)) & STDMSK_INDXSRC)); /* IndxSrc = IndxSrcB<1>. */
-
- /* Adjust mode-dependent parameters. */
- if ((crb & CRBMSK_CLKMULT_B) == (MULT_X0 << CRBBIT_CLKMULT_B)) /* If Extender mode (ClkMultB == MULT_X0): */
- setup |= ((CLKSRC_EXTENDER << STDBIT_CLKSRC) /* Indicate Extender mode. */
- |(MULT_X1 << STDBIT_CLKMULT) /* Indicate multiplier is 1x. */
- |((cra >> (CRABIT_CLKSRC_B - STDBIT_CLKPOL)) & STDMSK_CLKPOL)); /* Set ClkPol equal to Timer count direction (ClkSrcB<0>). */
-
- else if (cra & (2 << CRABIT_CLKSRC_B)) /* If Timer mode (ClkSrcB<1> == 1): */
- setup |= ((CLKSRC_TIMER << STDBIT_CLKSRC) /* Indicate Timer mode. */
- |(MULT_X1 << STDBIT_CLKMULT) /* Indicate multiplier is 1x. */
- |((cra >> (CRABIT_CLKSRC_B - STDBIT_CLKPOL)) & STDMSK_CLKPOL)); /* Set ClkPol equal to Timer count direction (ClkSrcB<0>). */
-
- else /* If Counter mode (ClkSrcB<1> == 0): */
- setup |= ((CLKSRC_COUNTER << STDBIT_CLKSRC) /* Indicate Timer mode. */
- |((crb >> (CRBBIT_CLKMULT_B - STDBIT_CLKMULT)) & STDMSK_CLKMULT) /* Clock multiplier is passed through. */
- |((crb << (STDBIT_CLKPOL - CRBBIT_CLKPOL_B)) & STDMSK_CLKPOL)); /* Clock polarity is passed through. */
-
- /* Return adjusted counter setup. */
- return setup;
-}
-
-/*
- * Set the operating mode for the specified counter. The setup
- * parameter is treated as a COUNTER_SETUP data type. The following
- * parameters are programmable (all other parms are ignored): ClkMult,
- * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
- */
-
-static void SetMode_A(struct comedi_device *dev, struct enc_private *k,
- uint16_t Setup, uint16_t DisableIntSrc)
-{
- struct s626_private *devpriv = dev->private;
- register uint16_t cra;
- register uint16_t crb;
- register uint16_t setup = Setup; /* Cache the Standard Setup. */
-
- /* Initialize CRA and CRB images. */
- cra = ((setup & CRAMSK_LOADSRC_A) /* Preload trigger is passed through. */
- |((setup & STDMSK_INDXSRC) >> (STDBIT_INDXSRC - (CRABIT_INDXSRC_A + 1)))); /* IndexSrc is restricted to ENC_X or IndxPol. */
-
- crb = (CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A /* Reset any pending CounterA event captures. */
- | ((setup & STDMSK_CLKENAB) << (CRBBIT_CLKENAB_A - STDBIT_CLKENAB))); /* Clock enable is passed through. */
-
- /* Force IntSrc to Disabled if DisableIntSrc is asserted. */
- if (!DisableIntSrc)
- cra |= ((setup & STDMSK_INTSRC) >> (STDBIT_INTSRC -
- CRABIT_INTSRC_A));
-
- /* Populate all mode-dependent attributes of CRA & CRB images. */
- switch ((setup & STDMSK_CLKSRC) >> STDBIT_CLKSRC) {
- case CLKSRC_EXTENDER: /* Extender Mode: Force to Timer mode */
- /* (Extender valid only for B counters). */
-
- case CLKSRC_TIMER: /* Timer Mode: */
- cra |= ((2 << CRABIT_CLKSRC_A) /* ClkSrcA<1> selects system clock */
- |((setup & STDMSK_CLKPOL) >> (STDBIT_CLKPOL - CRABIT_CLKSRC_A)) /* with count direction (ClkSrcA<0>) obtained from ClkPol. */
- |(1 << CRABIT_CLKPOL_A) /* ClkPolA behaves as always-on clock enable. */
- |(MULT_X1 << CRABIT_CLKMULT_A)); /* ClkMult must be 1x. */
- break;
-
- default: /* Counter Mode: */
- cra |= (CLKSRC_COUNTER /* Select ENC_C and ENC_D as clock/direction inputs. */
- | ((setup & STDMSK_CLKPOL) << (CRABIT_CLKPOL_A - STDBIT_CLKPOL)) /* Clock polarity is passed through. */
- |(((setup & STDMSK_CLKMULT) == (MULT_X0 << STDBIT_CLKMULT)) ? /* Force multiplier to x1 if not legal, otherwise pass through. */
- (MULT_X1 << CRABIT_CLKMULT_A) :
- ((setup & STDMSK_CLKMULT) << (CRABIT_CLKMULT_A -
- STDBIT_CLKMULT))));
+ pdma->logical_base = NULL;
+ pdma->physical_base = 0;
}
-
- /* Force positive index polarity if IndxSrc is software-driven only, */
- /* otherwise pass it through. */
- if (~setup & STDMSK_INDXSRC)
- cra |= ((setup & STDMSK_INDXPOL) << (CRABIT_INDXPOL_A -
- STDBIT_INDXPOL));
-
- /* If IntSrc has been forced to Disabled, update the MISC2 interrupt */
- /* enable mask to indicate the counter interrupt is disabled. */
- if (DisableIntSrc)
- devpriv->CounterIntEnabs &= ~k->MyEventBits[3];
-
- /* While retaining CounterB and LatchSrc configurations, program the */
- /* new counter operating mode. */
- DEBIreplace(dev, k->MyCRA, CRAMSK_INDXSRC_B | CRAMSK_CLKSRC_B, cra);
- DEBIreplace(dev, k->MyCRB, ~(CRBMSK_INTCTRL | CRBMSK_CLKENAB_A), crb);
-}
-
-static void SetMode_B(struct comedi_device *dev, struct enc_private *k,
- uint16_t Setup, uint16_t DisableIntSrc)
-{
- struct s626_private *devpriv = dev->private;
- register uint16_t cra;
- register uint16_t crb;
- register uint16_t setup = Setup; /* Cache the Standard Setup. */
-
- /* Initialize CRA and CRB images. */
- cra = ((setup & STDMSK_INDXSRC) << ((CRABIT_INDXSRC_B + 1) - STDBIT_INDXSRC)); /* IndexSrc field is restricted to ENC_X or IndxPol. */
-
- crb = (CRBMSK_INTRESETCMD | CRBMSK_INTRESET_B /* Reset event captures and disable interrupts. */
- | ((setup & STDMSK_CLKENAB) << (CRBBIT_CLKENAB_B - STDBIT_CLKENAB)) /* Clock enable is passed through. */
- |((setup & STDMSK_LOADSRC) >> (STDBIT_LOADSRC - CRBBIT_LOADSRC_B))); /* Preload trigger source is passed through. */
-
- /* Force IntSrc to Disabled if DisableIntSrc is asserted. */
- if (!DisableIntSrc)
- crb |= ((setup & STDMSK_INTSRC) >> (STDBIT_INTSRC -
- CRBBIT_INTSRC_B));
-
- /* Populate all mode-dependent attributes of CRA & CRB images. */
- switch ((setup & STDMSK_CLKSRC) >> STDBIT_CLKSRC) {
- case CLKSRC_TIMER: /* Timer Mode: */
- cra |= ((2 << CRABIT_CLKSRC_B) /* ClkSrcB<1> selects system clock */
- |((setup & STDMSK_CLKPOL) << (CRABIT_CLKSRC_B - STDBIT_CLKPOL))); /* with direction (ClkSrcB<0>) obtained from ClkPol. */
- crb |= ((1 << CRBBIT_CLKPOL_B) /* ClkPolB behaves as always-on clock enable. */
- |(MULT_X1 << CRBBIT_CLKMULT_B)); /* ClkMultB must be 1x. */
- break;
-
- case CLKSRC_EXTENDER: /* Extender Mode: */
- cra |= ((2 << CRABIT_CLKSRC_B) /* ClkSrcB source is OverflowA (same as "timer") */
- |((setup & STDMSK_CLKPOL) << (CRABIT_CLKSRC_B - STDBIT_CLKPOL))); /* with direction obtained from ClkPol. */
- crb |= ((1 << CRBBIT_CLKPOL_B) /* ClkPolB controls IndexB -- always set to active. */
- |(MULT_X0 << CRBBIT_CLKMULT_B)); /* ClkMultB selects OverflowA as the clock source. */
- break;
-
- default: /* Counter Mode: */
- cra |= (CLKSRC_COUNTER << CRABIT_CLKSRC_B); /* Select ENC_C and ENC_D as clock/direction inputs. */
- crb |= (((setup & STDMSK_CLKPOL) >> (STDBIT_CLKPOL - CRBBIT_CLKPOL_B)) /* ClkPol is passed through. */
- |(((setup & STDMSK_CLKMULT) == (MULT_X0 << STDBIT_CLKMULT)) ? /* Force ClkMult to x1 if not legal, otherwise pass through. */
- (MULT_X1 << CRBBIT_CLKMULT_B) :
- ((setup & STDMSK_CLKMULT) << (CRBBIT_CLKMULT_B -
- STDBIT_CLKMULT))));
- }
-
- /* Force positive index polarity if IndxSrc is software-driven only, */
- /* otherwise pass it through. */
- if (~setup & STDMSK_INDXSRC)
- crb |= ((setup & STDMSK_INDXPOL) >> (STDBIT_INDXPOL -
- CRBBIT_INDXPOL_B));
-
- /* If IntSrc has been forced to Disabled, update the MISC2 interrupt */
- /* enable mask to indicate the counter interrupt is disabled. */
- if (DisableIntSrc)
- devpriv->CounterIntEnabs &= ~k->MyEventBits[3];
-
- /* While retaining CounterA and LatchSrc configurations, program the */
- /* new counter operating mode. */
- DEBIreplace(dev, k->MyCRA, ~(CRAMSK_INDXSRC_B | CRAMSK_CLKSRC_B), cra);
- DEBIreplace(dev, k->MyCRB, CRBMSK_CLKENAB_A | CRBMSK_LATCHSRC, crb);
-}
-
-/* Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index. */
-
-static void SetEnable_A(struct comedi_device *dev, struct enc_private *k,
- uint16_t enab)
-{
- DEBIreplace(dev, k->MyCRB, ~(CRBMSK_INTCTRL | CRBMSK_CLKENAB_A),
- enab << CRBBIT_CLKENAB_A);
-}
-
-static void SetEnable_B(struct comedi_device *dev, struct enc_private *k,
- uint16_t enab)
-{
- DEBIreplace(dev, k->MyCRB, ~(CRBMSK_INTCTRL | CRBMSK_CLKENAB_B),
- enab << CRBBIT_CLKENAB_B);
-}
-
-static uint16_t GetEnable_A(struct comedi_device *dev, struct enc_private *k)
-{
- return (DEBIread(dev, k->MyCRB) >> CRBBIT_CLKENAB_A) & 1;
-}
-
-static uint16_t GetEnable_B(struct comedi_device *dev, struct enc_private *k)
-{
- return (DEBIread(dev, k->MyCRB) >> CRBBIT_CLKENAB_B) & 1;
-}
-
-/*
- * static uint16_t GetLatchSource(struct comedi_device *dev, struct enc_private *k )
- * {
- * return ( DEBIread( dev, k->MyCRB) >> CRBBIT_LATCHSRC ) & 3;
- * }
- */
-
-/*
- * Return/set the event that will trigger transfer of the preload
- * register into the counter. 0=ThisCntr_Index, 1=ThisCntr_Overflow,
- * 2=OverflowA (B counters only), 3=disabled.
- */
-
-static void SetLoadTrig_A(struct comedi_device *dev, struct enc_private *k,
- uint16_t Trig)
-{
- DEBIreplace(dev, k->MyCRA, ~CRAMSK_LOADSRC_A,
- Trig << CRABIT_LOADSRC_A);
-}
-
-static void SetLoadTrig_B(struct comedi_device *dev, struct enc_private *k,
- uint16_t Trig)
-{
- DEBIreplace(dev, k->MyCRB, ~(CRBMSK_LOADSRC_B | CRBMSK_INTCTRL),
- Trig << CRBBIT_LOADSRC_B);
-}
-
-static uint16_t GetLoadTrig_A(struct comedi_device *dev, struct enc_private *k)
-{
- return (DEBIread(dev, k->MyCRA) >> CRABIT_LOADSRC_A) & 3;
-}
-
-static uint16_t GetLoadTrig_B(struct comedi_device *dev, struct enc_private *k)
-{
- return (DEBIread(dev, k->MyCRB) >> CRBBIT_LOADSRC_B) & 3;
-}
-
-/* Return/set counter interrupt source and clear any captured
- * index/overflow events. IntSource: 0=Disabled, 1=OverflowOnly,
- * 2=IndexOnly, 3=IndexAndOverflow.
- */
-
-static void SetIntSrc_A(struct comedi_device *dev, struct enc_private *k,
- uint16_t IntSource)
-{
- struct s626_private *devpriv = dev->private;
-
- /* Reset any pending counter overflow or index captures. */
- DEBIreplace(dev, k->MyCRB, ~CRBMSK_INTCTRL,
- CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A);
-
- /* Program counter interrupt source. */
- DEBIreplace(dev, k->MyCRA, ~CRAMSK_INTSRC_A,
- IntSource << CRABIT_INTSRC_A);
-
- /* Update MISC2 interrupt enable mask. */
- devpriv->CounterIntEnabs =
- (devpriv->CounterIntEnabs & ~k->
- MyEventBits[3]) | k->MyEventBits[IntSource];
-}
-
-static void SetIntSrc_B(struct comedi_device *dev, struct enc_private *k,
- uint16_t IntSource)
-{
- struct s626_private *devpriv = dev->private;
- uint16_t crb;
-
- /* Cache writeable CRB register image. */
- crb = DEBIread(dev, k->MyCRB) & ~CRBMSK_INTCTRL;
-
- /* Reset any pending counter overflow or index captures. */
- DEBIwrite(dev, k->MyCRB,
- (uint16_t) (crb | CRBMSK_INTRESETCMD | CRBMSK_INTRESET_B));
-
- /* Program counter interrupt source. */
- DEBIwrite(dev, k->MyCRB,
- (uint16_t) ((crb & ~CRBMSK_INTSRC_B) | (IntSource <<
- CRBBIT_INTSRC_B)));
-
- /* Update MISC2 interrupt enable mask. */
- devpriv->CounterIntEnabs =
- (devpriv->CounterIntEnabs & ~k->
- MyEventBits[3]) | k->MyEventBits[IntSource];
-}
-
-static uint16_t GetIntSrc_A(struct comedi_device *dev, struct enc_private *k)
-{
- return (DEBIread(dev, k->MyCRA) >> CRABIT_INTSRC_A) & 3;
-}
-
-static uint16_t GetIntSrc_B(struct comedi_device *dev, struct enc_private *k)
-{
- return (DEBIread(dev, k->MyCRB) >> CRBBIT_INTSRC_B) & 3;
-}
-
-/* Return/set the clock multiplier. */
-
-/* static void SetClkMult(struct comedi_device *dev, struct enc_private *k, uint16_t value ) */
-/* { */
-/* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_CLKMULT ) | ( value << STDBIT_CLKMULT ) ), FALSE ); */
-/* } */
-
-/* static uint16_t GetClkMult(struct comedi_device *dev, struct enc_private *k ) */
-/* { */
-/* return ( k->GetMode(dev, k ) >> STDBIT_CLKMULT ) & 3; */
-/* } */
-
-/* Return/set the clock polarity. */
-
-/* static void SetClkPol( struct comedi_device *dev,struct enc_private *k, uint16_t value ) */
-/* { */
-/* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_CLKPOL ) | ( value << STDBIT_CLKPOL ) ), FALSE ); */
-/* } */
-
-/* static uint16_t GetClkPol(struct comedi_device *dev, struct enc_private *k ) */
-/* { */
-/* return ( k->GetMode(dev, k ) >> STDBIT_CLKPOL ) & 1; */
-/* } */
-
-/* Return/set the clock source. */
-
-/* static void SetClkSrc( struct comedi_device *dev,struct enc_private *k, uint16_t value ) */
-/* { */
-/* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_CLKSRC ) | ( value << STDBIT_CLKSRC ) ), FALSE ); */
-/* } */
-
-/* static uint16_t GetClkSrc( struct comedi_device *dev,struct enc_private *k ) */
-/* { */
-/* return ( k->GetMode(dev, k ) >> STDBIT_CLKSRC ) & 3; */
-/* } */
-
-/* Return/set the index polarity. */
-
-/* static void SetIndexPol(struct comedi_device *dev, struct enc_private *k, uint16_t value ) */
-/* { */
-/* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_INDXPOL ) | ( (value != 0) << STDBIT_INDXPOL ) ), FALSE ); */
-/* } */
-
-/* static uint16_t GetIndexPol(struct comedi_device *dev, struct enc_private *k ) */
-/* { */
-/* return ( k->GetMode(dev, k ) >> STDBIT_INDXPOL ) & 1; */
-/* } */
-
-/* Return/set the index source. */
-
-/* static void SetIndexSrc(struct comedi_device *dev, struct enc_private *k, uint16_t value ) */
-/* { */
-/* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_INDXSRC ) | ( (value != 0) << STDBIT_INDXSRC ) ), FALSE ); */
-/* } */
-
-/* static uint16_t GetIndexSrc(struct comedi_device *dev, struct enc_private *k ) */
-/* { */
-/* return ( k->GetMode(dev, k ) >> STDBIT_INDXSRC ) & 1; */
-/* } */
-
-/* Generate an index pulse. */
-
-static void PulseIndex_A(struct comedi_device *dev, struct enc_private *k)
-{
- register uint16_t cra;
-
- cra = DEBIread(dev, k->MyCRA); /* Pulse index. */
- DEBIwrite(dev, k->MyCRA, (uint16_t) (cra ^ CRAMSK_INDXPOL_A));
- DEBIwrite(dev, k->MyCRA, cra);
-}
-
-static void PulseIndex_B(struct comedi_device *dev, struct enc_private *k)
-{
- register uint16_t crb;
-
- crb = DEBIread(dev, k->MyCRB) & ~CRBMSK_INTCTRL; /* Pulse index. */
- DEBIwrite(dev, k->MyCRB, (uint16_t) (crb ^ CRBMSK_INDXPOL_B));
- DEBIwrite(dev, k->MyCRB, crb);
}
-static struct enc_private enc_private_data[] = {
- {
- .GetEnable = GetEnable_A,
- .GetIntSrc = GetIntSrc_A,
- .GetLoadTrig = GetLoadTrig_A,
- .GetMode = GetMode_A,
- .PulseIndex = PulseIndex_A,
- .SetEnable = SetEnable_A,
- .SetIntSrc = SetIntSrc_A,
- .SetLoadTrig = SetLoadTrig_A,
- .SetMode = SetMode_A,
- .ResetCapFlags = ResetCapFlags_A,
- .MyCRA = LP_CR0A,
- .MyCRB = LP_CR0B,
- .MyLatchLsw = LP_CNTR0ALSW,
- .MyEventBits = EVBITS(0),
- }, {
- .GetEnable = GetEnable_A,
- .GetIntSrc = GetIntSrc_A,
- .GetLoadTrig = GetLoadTrig_A,
- .GetMode = GetMode_A,
- .PulseIndex = PulseIndex_A,
- .SetEnable = SetEnable_A,
- .SetIntSrc = SetIntSrc_A,
- .SetLoadTrig = SetLoadTrig_A,
- .SetMode = SetMode_A,
- .ResetCapFlags = ResetCapFlags_A,
- .MyCRA = LP_CR1A,
- .MyCRB = LP_CR1B,
- .MyLatchLsw = LP_CNTR1ALSW,
- .MyEventBits = EVBITS(1),
- }, {
- .GetEnable = GetEnable_A,
- .GetIntSrc = GetIntSrc_A,
- .GetLoadTrig = GetLoadTrig_A,
- .GetMode = GetMode_A,
- .PulseIndex = PulseIndex_A,
- .SetEnable = SetEnable_A,
- .SetIntSrc = SetIntSrc_A,
- .SetLoadTrig = SetLoadTrig_A,
- .SetMode = SetMode_A,
- .ResetCapFlags = ResetCapFlags_A,
- .MyCRA = LP_CR2A,
- .MyCRB = LP_CR2B,
- .MyLatchLsw = LP_CNTR2ALSW,
- .MyEventBits = EVBITS(2),
- }, {
- .GetEnable = GetEnable_B,
- .GetIntSrc = GetIntSrc_B,
- .GetLoadTrig = GetLoadTrig_B,
- .GetMode = GetMode_B,
- .PulseIndex = PulseIndex_B,
- .SetEnable = SetEnable_B,
- .SetIntSrc = SetIntSrc_B,
- .SetLoadTrig = SetLoadTrig_B,
- .SetMode = SetMode_B,
- .ResetCapFlags = ResetCapFlags_B,
- .MyCRA = LP_CR0A,
- .MyCRB = LP_CR0B,
- .MyLatchLsw = LP_CNTR0BLSW,
- .MyEventBits = EVBITS(3),
- }, {
- .GetEnable = GetEnable_B,
- .GetIntSrc = GetIntSrc_B,
- .GetLoadTrig = GetLoadTrig_B,
- .GetMode = GetMode_B,
- .PulseIndex = PulseIndex_B,
- .SetEnable = SetEnable_B,
- .SetIntSrc = SetIntSrc_B,
- .SetLoadTrig = SetLoadTrig_B,
- .SetMode = SetMode_B,
- .ResetCapFlags = ResetCapFlags_B,
- .MyCRA = LP_CR1A,
- .MyCRB = LP_CR1B,
- .MyLatchLsw = LP_CNTR1BLSW,
- .MyEventBits = EVBITS(4),
- }, {
- .GetEnable = GetEnable_B,
- .GetIntSrc = GetIntSrc_B,
- .GetLoadTrig = GetLoadTrig_B,
- .GetMode = GetMode_B,
- .PulseIndex = PulseIndex_B,
- .SetEnable = SetEnable_B,
- .SetIntSrc = SetIntSrc_B,
- .SetLoadTrig = SetLoadTrig_B,
- .SetMode = SetMode_B,
- .ResetCapFlags = ResetCapFlags_B,
- .MyCRA = LP_CR2A,
- .MyCRB = LP_CR2B,
- .MyLatchLsw = LP_CNTR2BLSW,
- .MyEventBits = EVBITS(5),
- },
-};
-
-static void CountersInit(struct comedi_device *dev)
+static void s626_counters_init(struct comedi_device *dev)
{
int chan;
- struct enc_private *k;
- uint16_t Setup = (LOADSRC_INDX << BF_LOADSRC) | /* Preload upon */
- /* index. */
- (INDXSRC_SOFT << BF_INDXSRC) | /* Disable hardware index. */
- (CLKSRC_COUNTER << BF_CLKSRC) | /* Operating mode is counter. */
- (CLKPOL_POS << BF_CLKPOL) | /* Active high clock. */
- (CNTDIR_UP << BF_CLKPOL) | /* Count direction is up. */
- (CLKMULT_1X << BF_CLKMULT) | /* Clock multiplier is 1x. */
- (CLKENAB_INDEX << BF_CLKENAB); /* Enabled by index */
-
- /* Disable all counter interrupts and clear any captured counter events. */
+ const struct s626_enc_info *k;
+ uint16_t setup =
+ /* Preload upon index. */
+ S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
+ /* Disable hardware index. */
+ S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
+ /* Operating mode is counter. */
+ S626_SET_STD_ENCMODE(S626_ENCMODE_COUNTER) |
+ /* Active high clock. */
+ S626_SET_STD_CLKPOL(S626_CLKPOL_POS) |
+ /* Clock multiplier is 1x. */
+ S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
+ /* Enabled by index */
+ S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
+
+ /*
+ * Disable all counter interrupts and clear any captured counter events.
+ */
for (chan = 0; chan < S626_ENCODER_CHANNELS; chan++) {
- k = &encpriv[chan];
- k->SetMode(dev, k, Setup, TRUE);
- k->SetIntSrc(dev, k, 0);
- k->ResetCapFlags(dev, k);
- k->SetEnable(dev, k, CLKENAB_ALWAYS);
+ k = &s626_enc_chan_info[chan];
+ k->set_mode(dev, k, setup, true);
+ k->set_int_src(dev, k, 0);
+ k->reset_cap_flags(dev, k);
+ k->set_enable(dev, k, S626_CLKENAB_ALWAYS);
}
}
@@ -2316,17 +2533,17 @@ static int s626_allocate_dma_buffers(struct comedi_device *dev)
void *addr;
dma_addr_t appdma;
- addr = pci_alloc_consistent(pcidev, DMABUF_SIZE, &appdma);
+ addr = pci_alloc_consistent(pcidev, S626_DMABUF_SIZE, &appdma);
if (!addr)
return -ENOMEM;
- devpriv->ANABuf.LogicalBase = addr;
- devpriv->ANABuf.PhysicalBase = appdma;
+ devpriv->ana_buf.logical_base = addr;
+ devpriv->ana_buf.physical_base = appdma;
- addr = pci_alloc_consistent(pcidev, DMABUF_SIZE, &appdma);
+ addr = pci_alloc_consistent(pcidev, S626_DMABUF_SIZE, &appdma);
if (!addr)
return -ENOMEM;
- devpriv->RPSBuf.LogicalBase = addr;
- devpriv->RPSBuf.PhysicalBase = appdma;
+ devpriv->rps_buf.logical_base = addr;
+ devpriv->rps_buf.physical_base = appdma;
return 0;
}
@@ -2334,42 +2551,43 @@ static int s626_allocate_dma_buffers(struct comedi_device *dev)
static void s626_initialize(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
- dma_addr_t pPhysBuf;
+ dma_addr_t phys_buf;
uint16_t chan;
int i;
/* Enable DEBI and audio pins, enable I2C interface */
- s626_mc_enable(dev, MC1_DEBI | MC1_AUDIO | MC1_I2C, P_MC1);
+ s626_mc_enable(dev, S626_MC1_DEBI | S626_MC1_AUDIO | S626_MC1_I2C,
+ S626_P_MC1);
/*
- * Configure DEBI operating mode
+ * Configure DEBI operating mode
*
- * Local bus is 16 bits wide
- * Declare DEBI transfer timeout interval
- * Set up byte lane steering
- * Intel-compatible local bus (DEBI never times out)
+ * Local bus is 16 bits wide
+ * Declare DEBI transfer timeout interval
+ * Set up byte lane steering
+ * Intel-compatible local bus (DEBI never times out)
*/
- writel(DEBI_CFG_SLAVE16 |
- (DEBI_TOUT << DEBI_CFG_TOUT_BIT) |
- DEBI_SWAP | DEBI_CFG_INTEL,
- devpriv->mmio + P_DEBICFG);
+ writel(S626_DEBI_CFG_SLAVE16 |
+ (S626_DEBI_TOUT << S626_DEBI_CFG_TOUT_BIT) | S626_DEBI_SWAP |
+ S626_DEBI_CFG_INTEL, devpriv->mmio + S626_P_DEBICFG);
/* Disable MMU paging */
- writel(DEBI_PAGE_DISABLE, devpriv->mmio + P_DEBIPAGE);
+ writel(S626_DEBI_PAGE_DISABLE, devpriv->mmio + S626_P_DEBIPAGE);
/* Init GPIO so that ADC Start* is negated */
- writel(GPIO_BASE | GPIO1_HI, devpriv->mmio + P_GPIO);
+ writel(S626_GPIO_BASE | S626_GPIO1_HI, devpriv->mmio + S626_P_GPIO);
/* I2C device address for onboard eeprom (revb) */
- devpriv->I2CAdrs = 0xA0;
+ devpriv->i2c_adrs = 0xA0;
/*
* Issue an I2C ABORT command to halt any I2C
* operation in progress and reset BUSY flag.
*/
- writel(I2C_CLKSEL | I2C_ABORT, devpriv->mmio + P_I2CSTAT);
- s626_mc_enable(dev, MC2_UPLD_IIC, P_MC2);
- while (!(readl(devpriv->mmio + P_MC2) & MC2_UPLD_IIC))
+ writel(S626_I2C_CLKSEL | S626_I2C_ABORT,
+ devpriv->mmio + S626_P_I2CSTAT);
+ s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
+ while (!(readl(devpriv->mmio + S626_P_MC2) & S626_MC2_UPLD_IIC))
;
/*
@@ -2377,9 +2595,9 @@ static void s626_initialize(struct comedi_device *dev)
* reg twice to reset all I2C error flags.
*/
for (i = 0; i < 2; i++) {
- writel(I2C_CLKSEL, devpriv->mmio + P_I2CSTAT);
- s626_mc_enable(dev, MC2_UPLD_IIC, P_MC2);
- while (!s626_mc_test(dev, MC2_UPLD_IIC, P_MC2))
+ writel(S626_I2C_CLKSEL, devpriv->mmio + S626_P_I2CSTAT);
+ s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
+ while (!s626_mc_test(dev, S626_MC2_UPLD_IIC, S626_P_MC2))
;
}
@@ -2389,31 +2607,32 @@ static void s626_initialize(struct comedi_device *dev)
* DAC data setup times are satisfied, enable DAC serial
* clock out.
*/
- writel(ACON2_INIT, devpriv->mmio + P_ACON2);
+ writel(S626_ACON2_INIT, devpriv->mmio + S626_P_ACON2);
/*
* Set up TSL1 slot list, which is used to control the
- * accumulation of ADC data: RSD1 = shift data in on SD1.
- * SIB_A1 = store data uint8_t at next available location
+ * accumulation of ADC data: S626_RSD1 = shift data in on SD1.
+ * S626_SIB_A1 = store data uint8_t at next available location
* in FB BUFFER1 register.
*/
- writel(RSD1 | SIB_A1, devpriv->mmio + P_TSL1);
- writel(RSD1 | SIB_A1 | EOS, devpriv->mmio + P_TSL1 + 4);
+ writel(S626_RSD1 | S626_SIB_A1, devpriv->mmio + S626_P_TSL1);
+ writel(S626_RSD1 | S626_SIB_A1 | S626_EOS,
+ devpriv->mmio + S626_P_TSL1 + 4);
/* Enable TSL1 slot list so that it executes all the time */
- writel(ACON1_ADCSTART, devpriv->mmio + P_ACON1);
+ writel(S626_ACON1_ADCSTART, devpriv->mmio + S626_P_ACON1);
/*
* Initialize RPS registers used for ADC
*/
/* Physical start of RPS program */
- writel((uint32_t)devpriv->RPSBuf.PhysicalBase,
- devpriv->mmio + P_RPSADDR1);
+ writel((uint32_t)devpriv->rps_buf.physical_base,
+ devpriv->mmio + S626_P_RPSADDR1);
/* RPS program performs no explicit mem writes */
- writel(0, devpriv->mmio + P_RPSPAGE1);
+ writel(0, devpriv->mmio + S626_P_RPSPAGE1);
/* Disable RPS timeouts */
- writel(0, devpriv->mmio + P_RPS1_TOUT);
+ writel(0, devpriv->mmio + S626_P_RPS1_TOUT);
#if 0
/*
@@ -2425,38 +2644,37 @@ static void s626_initialize(struct comedi_device *dev)
* because the SAA7146 ADC interface does not start up in
* a defined state after a PCI reset.
*/
-
{
- uint8_t PollList;
- uint16_t AdcData;
- uint16_t StartVal;
- uint16_t index;
- unsigned int data[16];
+ uint8_t poll_list;
+ uint16_t adc_data;
+ uint16_t start_val;
+ uint16_t index;
+ unsigned int data[16];
- /* Create a simple polling list for analog input channel 0 */
- PollList = EOPL;
- ResetADC(dev, &PollList);
+ /* Create a simple polling list for analog input channel 0 */
+ poll_list = S626_EOPL;
+ s626_reset_adc(dev, &poll_list);
- /* Get initial ADC value */
- s626_ai_rinsn(dev, dev->subdevices, NULL, data);
- StartVal = data[0];
-
- /*
- * VERSION 2.01 CHANGE: TIMEOUT ADDED TO PREVENT HANGED EXECUTION.
- *
- * Invoke ADCs until the new ADC value differs from the initial
- * value or a timeout occurs. The timeout protects against the
- * possibility that the driver is restarting and the ADC data is a
- * fixed value resulting from the applied ADC analog input being
- * unusually quiet or at the rail.
- */
- for (index = 0; index < 500; index++) {
+ /* Get initial ADC value */
s626_ai_rinsn(dev, dev->subdevices, NULL, data);
- AdcData = data[0];
- if (AdcData != StartVal)
- break;
- }
+ start_val = data[0];
+ /*
+ * VERSION 2.01 CHANGE: TIMEOUT ADDED TO PREVENT HANGED
+ * EXECUTION.
+ *
+ * Invoke ADCs until the new ADC value differs from the initial
+ * value or a timeout occurs. The timeout protects against the
+ * possibility that the driver is restarting and the ADC data is
+ * a fixed value resulting from the applied ADC analog input
+ * being unusually quiet or at the rail.
+ */
+ for (index = 0; index < 500; index++) {
+ s626_ai_rinsn(dev, dev->subdevices, NULL, data);
+ adc_data = data[0];
+ if (adc_data != start_val)
+ break;
+ }
}
#endif /* SAA7146 BUG WORKAROUND */
@@ -2469,7 +2687,7 @@ static void s626_initialize(struct comedi_device *dev)
* burst length = 1 DWORD
* threshold = 1 DWORD.
*/
- writel(0, devpriv->mmio + P_PCI_BT_A);
+ writel(0, devpriv->mmio + S626_P_PCI_BT_A);
/*
* Init Audio2's output DMA physical addresses. The protection
@@ -2477,18 +2695,18 @@ static void s626_initialize(struct comedi_device *dev)
* single DWORD will be transferred each time a DMA transfer is
* enabled.
*/
- pPhysBuf = devpriv->ANABuf.PhysicalBase +
- (DAC_WDMABUF_OS * sizeof(uint32_t));
- writel((uint32_t)pPhysBuf, devpriv->mmio + P_BASEA2_OUT);
- writel((uint32_t)(pPhysBuf + sizeof(uint32_t)),
- devpriv->mmio + P_PROTA2_OUT);
+ phys_buf = devpriv->ana_buf.physical_base +
+ (S626_DAC_WDMABUF_OS * sizeof(uint32_t));
+ writel((uint32_t)phys_buf, devpriv->mmio + S626_P_BASEA2_OUT);
+ writel((uint32_t)(phys_buf + sizeof(uint32_t)),
+ devpriv->mmio + S626_P_PROTA2_OUT);
/*
* Cache Audio2's output DMA buffer logical address. This is
* where DAC data is buffered for A2 output DMA transfers.
*/
- devpriv->pDacWBuf = (uint32_t *)devpriv->ANABuf.LogicalBase +
- DAC_WDMABUF_OS;
+ devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base +
+ S626_DAC_WDMABUF_OS;
/*
* Audio2's output channels does not use paging. The
@@ -2496,7 +2714,7 @@ static void s626_initialize(struct comedi_device *dev)
* DMAC will automatically halt and its PCI address pointer
* will be reset when the protection address is reached.
*/
- writel(8, devpriv->mmio + P_PAGEA2_OUT);
+ writel(8, devpriv->mmio + S626_P_PAGEA2_OUT);
/*
* Initialize time slot list 2 (TSL2), which is used to control
@@ -2511,7 +2729,8 @@ static void s626_initialize(struct comedi_device *dev)
*/
/* Slot 0: Trap TSL execution, shift 0xFF into FB_BUFFER2 */
- writel(XSD2 | RSD3 | SIB_A2 | EOS, devpriv->mmio + VECTPORT(0));
+ writel(S626_XSD2 | S626_RSD3 | S626_SIB_A2 | S626_EOS,
+ devpriv->mmio + S626_VECTPORT(0));
/*
* Initialize slot 1, which is constant. Slot 1 causes a
@@ -2523,18 +2742,18 @@ static void s626_initialize(struct comedi_device *dev)
*/
/* Slot 1: Fetch DWORD from Audio2's output FIFO */
- writel(LF_A2, devpriv->mmio + VECTPORT(1));
+ writel(S626_LF_A2, devpriv->mmio + S626_VECTPORT(1));
/* Start DAC's audio interface (TSL2) running */
- writel(ACON1_DACSTART, devpriv->mmio + P_ACON1);
+ writel(S626_ACON1_DACSTART, devpriv->mmio + S626_P_ACON1);
/*
* Init Trim DACs to calibrated values. Do it twice because the
* SAA7146 audio channel does not always reset properly and
* sometimes causes the first few TrimDAC writes to malfunction.
*/
- LoadTrimDACs(dev);
- LoadTrimDACs(dev);
+ s626_load_trim_dacs(dev);
+ s626_load_trim_dacs(dev);
/*
* Manually init all gate array hardware in case this is a soft
@@ -2549,10 +2768,10 @@ static void s626_initialize(struct comedi_device *dev)
* polarity images.
*/
for (chan = 0; chan < S626_DAC_CHANNELS; chan++)
- SetDAC(dev, chan, 0);
+ s626_set_dac(dev, chan, 0);
/* Init counters */
- CountersInit(dev);
+ s626_counters_init(dev);
/*
* Without modifying the state of the Battery Backup enab, disable
@@ -2560,8 +2779,8 @@ static void s626_initialize(struct comedi_device *dev)
* standard DIO (vs. counter overflow) mode, disable the battery
* charger, and reset the watchdog interval selector to zero.
*/
- WriteMISC2(dev, (uint16_t)(DEBIread(dev, LP_RDMISC2) &
- MISC2_BATT_ENABLE));
+ s626_write_misc2(dev, (s626_debi_read(dev, S626_LP_RDMISC2) &
+ S626_MISC2_BATT_ENABLE));
/* Initialize the digital I/O subsystem */
s626_dio_init(dev);
@@ -2588,10 +2807,10 @@ static int s626_auto_attach(struct comedi_device *dev,
return -ENOMEM;
/* disable master interrupt */
- writel(0, devpriv->mmio + P_IER);
+ writel(0, devpriv->mmio + S626_P_IER);
/* soft reset */
- writel(MC1_SOFT_RESET, devpriv->mmio + P_MC1);
+ writel(S626_MC1_SOFT_RESET, devpriv->mmio + S626_P_MC1);
/* DMA FIXME DMA// */
@@ -2670,7 +2889,7 @@ static int s626_auto_attach(struct comedi_device *dev,
s->io_bits = 0xffff;
s->private = (void *)2; /* DIO group 2 */
s->range_table = &range_digital;
- s->insn_config = s626_dio_insn_config;
+ s->insn_config = s626_dio_insn_config;
s->insn_bits = s626_dio_insn_bits;
s = &dev->subdevices[5];
@@ -2679,7 +2898,6 @@ static int s626_auto_attach(struct comedi_device *dev,
s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL;
s->n_chan = S626_ENCODER_CHANNELS;
s->maxdata = 0xffffff;
- s->private = enc_private_data;
s->range_table = &range_unknown;
s->insn_config = s626_enc_insn_config;
s->insn_read = s626_enc_insn_read;
@@ -2703,20 +2921,22 @@ static void s626_detach(struct comedi_device *dev)
if (devpriv->mmio) {
/* interrupt mask */
/* Disable master interrupt */
- writel(0, devpriv->mmio + P_IER);
+ writel(0, devpriv->mmio + S626_P_IER);
/* Clear board's IRQ status flag */
- writel(IRQ_GPIO3 | IRQ_RPS1,
- devpriv->mmio + P_ISR);
+ writel(S626_IRQ_GPIO3 | S626_IRQ_RPS1,
+ devpriv->mmio + S626_P_ISR);
- /* Disable the watchdog timer and battery charger. */
- WriteMISC2(dev, 0);
+ /* Disable the watchdog timer and battery charger. */
+ s626_write_misc2(dev, 0);
/* Close all interfaces on 7146 device */
- writel(MC1_SHUTDOWN, devpriv->mmio + P_MC1);
- writel(ACON1_BASE, devpriv->mmio + P_ACON1);
+ writel(S626_MC1_SHUTDOWN, devpriv->mmio + S626_P_MC1);
+ writel(S626_ACON1_BASE, devpriv->mmio + S626_P_ACON1);
- CloseDMAB(dev, &devpriv->RPSBuf, DMABUF_SIZE);
- CloseDMAB(dev, &devpriv->ANABuf, DMABUF_SIZE);
+ s626_close_dma_b(dev, &devpriv->rps_buf,
+ S626_DMABUF_SIZE);
+ s626_close_dma_b(dev, &devpriv->ana_buf,
+ S626_DMABUF_SIZE);
}
if (dev->irq)
@@ -2746,8 +2966,8 @@ static int s626_pci_probe(struct pci_dev *dev,
* Philips SAA7146 media/dvb based cards.
*/
static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = {
- { PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626,
- PCI_SUBVENDOR_ID_S626, PCI_SUBDEVICE_ID_S626, 0, 0, 0 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146,
+ 0x6000, 0x0272) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, s626_pci_table);
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index a85e6bdcad0..33b72739c1c 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -1,690 +1,774 @@
/*
- comedi/drivers/s626.h
- Sensoray s626 Comedi driver, header file
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- Based on Sensoray Model 626 Linux driver Version 0.2
- Copyright (C) 2002-2004 Sensoray Co., Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- Driver: s626.o (s626.ko)
- Description: Sensoray 626 driver
- Devices: Sensoray s626
- Authors: Gianluca Palli <gpalli@deis.unibo.it>,
- Updated: Thu, 12 Jul 2005
- Status: experimental
-
- Configuration Options:
- analog input:
- none
-
- analog output:
- none
-
- digital channel:
- s626 has 3 dio subdevices (2,3 and 4) each with 16 i/o channels
- supported configuration options:
- INSN_CONFIG_DIO_QUERY
- COMEDI_INPUT
- COMEDI_OUTPUT
-
- encoder:
- Every channel must be configured before reading.
-
- Example code
-
- insn.insn=INSN_CONFIG; // configuration instruction
- insn.n=1; // number of operation (must be 1)
- insn.data=&initialvalue; // initial value loaded into encoder
- // during configuration
- insn.subdev=5; // encoder subdevice
- insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); // encoder_channel
- // to configure
-
- comedi_do_insn(cf,&insn); // executing configuration
-*/
-
-#if !defined(TRUE)
-#define TRUE (1)
-#endif
-
-#if !defined(FALSE)
-#define FALSE (0)
-#endif
-
-#define S626_SIZE 0x0200
-#define DMABUF_SIZE 4096 /* 4k pages */
+ * comedi/drivers/s626.h
+ * Sensoray s626 Comedi driver, header file
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * Based on Sensoray Model 626 Linux driver Version 0.2
+ * Copyright (C) 2002-2004 Sensoray Co., Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef S626_H_INCLUDED
+#define S626_H_INCLUDED
+
+#define S626_DMABUF_SIZE 4096 /* 4k pages */
#define S626_ADC_CHANNELS 16
#define S626_DAC_CHANNELS 4
#define S626_ENCODER_CHANNELS 6
#define S626_DIO_CHANNELS 48
-#define S626_DIO_BANKS 3 /* Number of DIO groups. */
-#define S626_DIO_EXTCHANS 40 /* Number of */
- /* extended-capability */
- /* DIO channels. */
-
-#define NUM_TRIMDACS 12 /* Number of valid TrimDAC channels. */
-
-/* PCI bus interface types. */
-#define INTEL 1 /* Intel bus type. */
-#define MOTOROLA 2 /* Motorola bus type. */
+#define S626_DIO_BANKS 3 /* Number of DIO groups. */
+#define S626_DIO_EXTCHANS 40 /* Number of extended-capability
+ * DIO channels. */
-#define PLATFORM INTEL /* *** SELECT PLATFORM TYPE *** */
+#define S626_NUM_TRIMDACS 12 /* Number of valid TrimDAC channels. */
-#define RANGE_5V 0x10 /* +/-5V range */
-#define RANGE_10V 0x00 /* +/-10V range */
+/* PCI bus interface types. */
+#define S626_INTEL 1 /* Intel bus type. */
+#define S626_MOTOROLA 2 /* Motorola bus type. */
-#define EOPL 0x80 /* End of ADC poll list marker. */
-#define GSEL_BIPOLAR5V 0x00F0 /* LP_GSEL setting for 5V bipolar range. */
-#define GSEL_BIPOLAR10V 0x00A0 /* LP_GSEL setting for 10V bipolar range. */
+#define S626_PLATFORM S626_INTEL /* *** SELECT PLATFORM TYPE *** */
-/* Error codes that must be visible to this base class. */
-#define ERR_ILLEGAL_PARM 0x00010000 /* Illegal function parameter value was specified. */
-#define ERR_I2C 0x00020000 /* I2C error. */
-#define ERR_COUNTERSETUP 0x00200000 /* Illegal setup specified for counter channel. */
-#define ERR_DEBI_TIMEOUT 0x00400000 /* DEBI transfer timed out. */
+#define S626_RANGE_5V 0x10 /* +/-5V range */
+#define S626_RANGE_10V 0x00 /* +/-10V range */
-/* Organization (physical order) and size (in DWORDs) of logical DMA buffers contained by ANA_DMABUF. */
-#define ADC_DMABUF_DWORDS 40 /* ADC DMA buffer must hold 16 samples, plus pre/post garbage samples. */
-#define DAC_WDMABUF_DWORDS 1 /* DAC output DMA buffer holds a single sample. */
+#define S626_EOPL 0x80 /* End of ADC poll list marker. */
+#define S626_GSEL_BIPOLAR5V 0x00F0 /* S626_LP_GSEL setting 5V bipolar. */
+#define S626_GSEL_BIPOLAR10V 0x00A0 /* S626_LP_GSEL setting 10V bipolar. */
-/* All remaining space in 4KB DMA buffer is available for the RPS1 program. */
+/* Error codes that must be visible to this base class. */
+#define S626_ERR_ILLEGAL_PARM 0x00010000 /* Illegal function parameter
+ * value was specified. */
+#define S626_ERR_I2C 0x00020000 /* I2C error. */
+#define S626_ERR_COUNTERSETUP 0x00200000 /* Illegal setup specified for
+ * counter channel. */
+#define S626_ERR_DEBI_TIMEOUT 0x00400000 /* DEBI transfer timed out. */
-/* Address offsets, in DWORDS, from base of DMA buffer. */
-#define DAC_WDMABUF_OS ADC_DMABUF_DWORDS
-
-/* Interrupt enab bit in ISR and IER. */
-#define IRQ_GPIO3 0x00000040 /* IRQ enable for GPIO3. */
-#define IRQ_RPS1 0x10000000
-#define ISR_AFOU 0x00000800
+/*
+ * Organization (physical order) and size (in DWORDs) of logical DMA buffers
+ * contained by ANA_DMABUF.
+ */
+#define S626_ADC_DMABUF_DWORDS 40 /* ADC DMA buffer must hold 16 samples,
+ * plus pre/post garbage samples. */
+#define S626_DAC_WDMABUF_DWORDS 1 /* DAC output DMA buffer holds a single
+ * sample. */
+
+/* All remaining space in 4KB DMA buffer is available for the RPS1 program. */
+
+/* Address offsets, in DWORDS, from base of DMA buffer. */
+#define S626_DAC_WDMABUF_OS S626_ADC_DMABUF_DWORDS
+
+/* Interrupt enable bit in ISR and IER. */
+#define S626_IRQ_GPIO3 0x00000040 /* IRQ enable for GPIO3. */
+#define S626_IRQ_RPS1 0x10000000
+#define S626_ISR_AFOU 0x00000800
/* Audio fifo under/overflow detected. */
-#define IRQ_COINT1A 0x0400 /* conter 1A overflow interrupt mask */
-#define IRQ_COINT1B 0x0800 /* conter 1B overflow interrupt mask */
-#define IRQ_COINT2A 0x1000 /* conter 2A overflow interrupt mask */
-#define IRQ_COINT2B 0x2000 /* conter 2B overflow interrupt mask */
-#define IRQ_COINT3A 0x4000 /* conter 3A overflow interrupt mask */
-#define IRQ_COINT3B 0x8000 /* conter 3B overflow interrupt mask */
-
-/* RPS command codes. */
-#define RPS_CLRSIGNAL 0x00000000 /* CLEAR SIGNAL */
-#define RPS_SETSIGNAL 0x10000000 /* SET SIGNAL */
-#define RPS_NOP 0x00000000 /* NOP */
-#define RPS_PAUSE 0x20000000 /* PAUSE */
-#define RPS_UPLOAD 0x40000000 /* UPLOAD */
-#define RPS_JUMP 0x80000000 /* JUMP */
-#define RPS_LDREG 0x90000100 /* LDREG (1 uint32_t only) */
-#define RPS_STREG 0xA0000100 /* STREG (1 uint32_t only) */
-#define RPS_STOP 0x50000000 /* STOP */
-#define RPS_IRQ 0x60000000 /* IRQ */
-
-#define RPS_LOGICAL_OR 0x08000000 /* Logical OR conditionals. */
-#define RPS_INVERT 0x04000000 /* Test for negated semaphores. */
-#define RPS_DEBI 0x00000002 /* DEBI done */
-
-#define RPS_SIG0 0x00200000 /* RPS semaphore 0 (used by ADC). */
-#define RPS_SIG1 0x00400000 /* RPS semaphore 1 (used by DAC). */
-#define RPS_SIG2 0x00800000 /* RPS semaphore 2 (not used). */
-#define RPS_GPIO2 0x00080000 /* RPS GPIO2 */
-#define RPS_GPIO3 0x00100000 /* RPS GPIO3 */
-
-#define RPS_SIGADC RPS_SIG0 /* Trigger/status for ADC's RPS program. */
-#define RPS_SIGDAC RPS_SIG1 /* Trigger/status for DAC's RPS program. */
-
-/* RPS clock parameters. */
-#define RPSCLK_SCALAR 8 /* This is apparent ratio of PCI/RPS clks (undocumented!!). */
-#define RPSCLK_PER_US (33 / RPSCLK_SCALAR) /* Number of RPS clocks in one microsecond. */
-
-/* Event counter source addresses. */
-#define SBA_RPS_A0 0x27 /* Time of RPS0 busy, in PCI clocks. */
-
-/* GPIO constants. */
-#define GPIO_BASE 0x10004000 /* GPIO 0,2,3 = inputs, GPIO3 = IRQ; GPIO1 = out. */
-#define GPIO1_LO 0x00000000 /* GPIO1 set to LOW. */
-#define GPIO1_HI 0x00001000 /* GPIO1 set to HIGH. */
-
-/* Primary Status Register (PSR) constants. */
-#define PSR_DEBI_E 0x00040000 /* DEBI event flag. */
-#define PSR_DEBI_S 0x00080000 /* DEBI status flag. */
-#define PSR_A2_IN 0x00008000 /* Audio output DMA2 protection address reached. */
-#define PSR_AFOU 0x00000800 /* Audio FIFO under/overflow detected. */
-#define PSR_GPIO2 0x00000020 /* GPIO2 input pin: 0=AdcBusy, 1=AdcIdle. */
-#define PSR_EC0S 0x00000001 /* Event counter 0 threshold reached. */
-
-/* Secondary Status Register (SSR) constants. */
-#define SSR_AF2_OUT 0x00000200 /* Audio 2 output FIFO under/overflow detected. */
-
-/* Master Control Register 1 (MC1) constants. */
-#define MC1_SOFT_RESET 0x80000000 /* Invoke 7146 soft reset. */
-#define MC1_SHUTDOWN 0x3FFF0000 /* Shut down all MC1-controlled enables. */
-
-#define MC1_ERPS1 0x2000 /* enab/disable RPS task 1. */
-#define MC1_ERPS0 0x1000 /* enab/disable RPS task 0. */
-#define MC1_DEBI 0x0800 /* enab/disable DEBI pins. */
-#define MC1_AUDIO 0x0200 /* enab/disable audio port pins. */
-#define MC1_I2C 0x0100 /* enab/disable I2C interface. */
-#define MC1_A2OUT 0x0008 /* enab/disable transfer on A2 out. */
-#define MC1_A2IN 0x0004 /* enab/disable transfer on A2 in. */
-#define MC1_A1IN 0x0001 /* enab/disable transfer on A1 in. */
-
-/* Master Control Register 2 (MC2) constants. */
-#define MC2_UPLD_DEBIq 0x00020002 /* Upload DEBI registers. */
-#define MC2_UPLD_IICq 0x00010001 /* Upload I2C registers. */
-#define MC2_RPSSIG2_ONq 0x20002000 /* Assert RPS_SIG2. */
-#define MC2_RPSSIG1_ONq 0x10001000 /* Assert RPS_SIG1. */
-#define MC2_RPSSIG0_ONq 0x08000800 /* Assert RPS_SIG0. */
-#define MC2_UPLD_DEBI_MASKq 0x00000002 /* Upload DEBI mask. */
-#define MC2_UPLD_IIC_MASKq 0x00000001 /* Upload I2C mask. */
-#define MC2_RPSSIG2_MASKq 0x00002000 /* RPS_SIG2 bit mask. */
-#define MC2_RPSSIG1_MASKq 0x00001000 /* RPS_SIG1 bit mask. */
-#define MC2_RPSSIG0_MASKq 0x00000800 /* RPS_SIG0 bit mask. */
-
-#define MC2_DELAYTRIG_4USq MC2_RPSSIG1_ON
-#define MC2_DELAYBUSY_4USq MC2_RPSSIG1_MASK
-
-#define MC2_DELAYTRIG_6USq MC2_RPSSIG2_ON
-#define MC2_DELAYBUSY_6USq MC2_RPSSIG2_MASK
-
-#define MC2_UPLD_DEBI 0x0002 /* Upload DEBI. */
-#define MC2_UPLD_IIC 0x0001 /* Upload I2C. */
-#define MC2_RPSSIG2 0x2000 /* RPS signal 2 (not used). */
-#define MC2_RPSSIG1 0x1000 /* RPS signal 1 (DAC RPS busy). */
-#define MC2_RPSSIG0 0x0800 /* RPS signal 0 (ADC RPS busy). */
-
-#define MC2_ADC_RPS MC2_RPSSIG0 /* ADC RPS busy. */
-#define MC2_DAC_RPS MC2_RPSSIG1 /* DAC RPS busy. */
-
-/* ***** oldies ***** */
-#define MC2_UPLD_DEBIQ 0x00020002 /* Upload DEBI registers. */
-#define MC2_UPLD_IICQ 0x00010001 /* Upload I2C registers. */
-
-/* PCI BUS (SAA7146) REGISTER ADDRESS OFFSETS */
-#define P_PCI_BT_A 0x004C /* Audio DMA burst/threshold control. */
-#define P_DEBICFG 0x007C /* DEBI configuration. */
-#define P_DEBICMD 0x0080 /* DEBI command. */
-#define P_DEBIPAGE 0x0084 /* DEBI page. */
-#define P_DEBIAD 0x0088 /* DEBI target address. */
-#define P_I2CCTRL 0x008C /* I2C control. */
-#define P_I2CSTAT 0x0090 /* I2C status. */
-#define P_BASEA2_IN 0x00AC /* Audio input 2 base physical DMAbuf
+#define S626_IRQ_COINT1A 0x0400 /* counter 1A overflow interrupt mask */
+#define S626_IRQ_COINT1B 0x0800 /* counter 1B overflow interrupt mask */
+#define S626_IRQ_COINT2A 0x1000 /* counter 2A overflow interrupt mask */
+#define S626_IRQ_COINT2B 0x2000 /* counter 2B overflow interrupt mask */
+#define S626_IRQ_COINT3A 0x4000 /* counter 3A overflow interrupt mask */
+#define S626_IRQ_COINT3B 0x8000 /* counter 3B overflow interrupt mask */
+
+/* RPS command codes. */
+#define S626_RPS_CLRSIGNAL 0x00000000 /* CLEAR SIGNAL */
+#define S626_RPS_SETSIGNAL 0x10000000 /* SET SIGNAL */
+#define S626_RPS_NOP 0x00000000 /* NOP */
+#define S626_RPS_PAUSE 0x20000000 /* PAUSE */
+#define S626_RPS_UPLOAD 0x40000000 /* UPLOAD */
+#define S626_RPS_JUMP 0x80000000 /* JUMP */
+#define S626_RPS_LDREG 0x90000100 /* LDREG (1 uint32_t only) */
+#define S626_RPS_STREG 0xA0000100 /* STREG (1 uint32_t only) */
+#define S626_RPS_STOP 0x50000000 /* STOP */
+#define S626_RPS_IRQ 0x60000000 /* IRQ */
+
+#define S626_RPS_LOGICAL_OR 0x08000000 /* Logical OR conditionals. */
+#define S626_RPS_INVERT 0x04000000 /* Test for negated
+ * semaphores. */
+#define S626_RPS_DEBI 0x00000002 /* DEBI done */
+
+#define S626_RPS_SIG0 0x00200000 /* RPS semaphore 0
+ * (used by ADC). */
+#define S626_RPS_SIG1 0x00400000 /* RPS semaphore 1
+ * (used by DAC). */
+#define S626_RPS_SIG2 0x00800000 /* RPS semaphore 2
+ * (not used). */
+#define S626_RPS_GPIO2 0x00080000 /* RPS GPIO2 */
+#define S626_RPS_GPIO3 0x00100000 /* RPS GPIO3 */
+
+#define S626_RPS_SIGADC S626_RPS_SIG0 /* Trigger/status for
+ * ADC's RPS program. */
+#define S626_RPS_SIGDAC S626_RPS_SIG1 /* Trigger/status for
+ * DAC's RPS program. */
+
+/* RPS clock parameters. */
+#define S626_RPSCLK_SCALAR 8 /* This is apparent ratio of
+ * PCI/RPS clks (undocumented!!). */
+#define S626_RPSCLK_PER_US (33 / S626_RPSCLK_SCALAR)
+ /* Number of RPS clocks in one
+ * microsecond. */
+
+/* Event counter source addresses. */
+#define S626_SBA_RPS_A0 0x27 /* Time of RPS0 busy, in PCI clocks. */
+
+/* GPIO constants. */
+#define S626_GPIO_BASE 0x10004000 /* GPIO 0,2,3 = inputs,
+ * GPIO3 = IRQ; GPIO1 = out. */
+#define S626_GPIO1_LO 0x00000000 /* GPIO1 set to LOW. */
+#define S626_GPIO1_HI 0x00001000 /* GPIO1 set to HIGH. */
+
+/* Primary Status Register (PSR) constants. */
+#define S626_PSR_DEBI_E 0x00040000 /* DEBI event flag. */
+#define S626_PSR_DEBI_S 0x00080000 /* DEBI status flag. */
+#define S626_PSR_A2_IN 0x00008000 /* Audio output DMA2 protection
+ * address reached. */
+#define S626_PSR_AFOU 0x00000800 /* Audio FIFO under/overflow
+ * detected. */
+#define S626_PSR_GPIO2 0x00000020 /* GPIO2 input pin: 0=AdcBusy,
+ * 1=AdcIdle. */
+#define S626_PSR_EC0S 0x00000001 /* Event counter 0 threshold
+ * reached. */
+
+/* Secondary Status Register (SSR) constants. */
+#define S626_SSR_AF2_OUT 0x00000200 /* Audio 2 output FIFO
+ * under/overflow detected. */
+
+/* Master Control Register 1 (MC1) constants. */
+#define S626_MC1_SOFT_RESET 0x80000000 /* Invoke 7146 soft reset. */
+#define S626_MC1_SHUTDOWN 0x3FFF0000 /* Shut down all MC1-controlled
+ * enables. */
+
+#define S626_MC1_ERPS1 0x2000 /* Enab/disable RPS task 1. */
+#define S626_MC1_ERPS0 0x1000 /* Enab/disable RPS task 0. */
+#define S626_MC1_DEBI 0x0800 /* Enab/disable DEBI pins. */
+#define S626_MC1_AUDIO 0x0200 /* Enab/disable audio port pins. */
+#define S626_MC1_I2C 0x0100 /* Enab/disable I2C interface. */
+#define S626_MC1_A2OUT 0x0008 /* Enab/disable transfer on A2 out. */
+#define S626_MC1_A2IN 0x0004 /* Enab/disable transfer on A2 in. */
+#define S626_MC1_A1IN 0x0001 /* Enab/disable transfer on A1 in. */
+
+/* Master Control Register 2 (MC2) constants. */
+#define S626_MC2_UPLD_DEBI 0x0002 /* Upload DEBI. */
+#define S626_MC2_UPLD_IIC 0x0001 /* Upload I2C. */
+#define S626_MC2_RPSSIG2 0x2000 /* RPS signal 2 (not used). */
+#define S626_MC2_RPSSIG1 0x1000 /* RPS signal 1 (DAC RPS busy). */
+#define S626_MC2_RPSSIG0 0x0800 /* RPS signal 0 (ADC RPS busy). */
+
+#define S626_MC2_ADC_RPS S626_MC2_RPSSIG0 /* ADC RPS busy. */
+#define S626_MC2_DAC_RPS S626_MC2_RPSSIG1 /* DAC RPS busy. */
+
+/* PCI BUS (SAA7146) REGISTER ADDRESS OFFSETS */
+#define S626_P_PCI_BT_A 0x004C /* Audio DMA burst/threshold control. */
+#define S626_P_DEBICFG 0x007C /* DEBI configuration. */
+#define S626_P_DEBICMD 0x0080 /* DEBI command. */
+#define S626_P_DEBIPAGE 0x0084 /* DEBI page. */
+#define S626_P_DEBIAD 0x0088 /* DEBI target address. */
+#define S626_P_I2CCTRL 0x008C /* I2C control. */
+#define S626_P_I2CSTAT 0x0090 /* I2C status. */
+#define S626_P_BASEA2_IN 0x00AC /* Audio input 2 base physical DMAbuf
* address. */
-#define P_PROTA2_IN 0x00B0 /* Audio input 2 physical DMAbuf
+#define S626_P_PROTA2_IN 0x00B0 /* Audio input 2 physical DMAbuf
* protection address. */
-#define P_PAGEA2_IN 0x00B4 /* Audio input 2 paging attributes. */
-#define P_BASEA2_OUT 0x00B8 /* Audio output 2 base physical DMAbuf
+#define S626_P_PAGEA2_IN 0x00B4 /* Audio input 2 paging attributes. */
+#define S626_P_BASEA2_OUT 0x00B8 /* Audio output 2 base physical DMAbuf
* address. */
-#define P_PROTA2_OUT 0x00BC /* Audio output 2 physical DMAbuf
+#define S626_P_PROTA2_OUT 0x00BC /* Audio output 2 physical DMAbuf
* protection address. */
-#define P_PAGEA2_OUT 0x00C0 /* Audio output 2 paging attributes. */
-#define P_RPSPAGE0 0x00C4 /* RPS0 page. */
-#define P_RPSPAGE1 0x00C8 /* RPS1 page. */
-#define P_RPS0_TOUT 0x00D4 /* RPS0 time-out. */
-#define P_RPS1_TOUT 0x00D8 /* RPS1 time-out. */
-#define P_IER 0x00DC /* Interrupt enable. */
-#define P_GPIO 0x00E0 /* General-purpose I/O. */
-#define P_EC1SSR 0x00E4 /* Event counter set 1 source select. */
-#define P_ECT1R 0x00EC /* Event counter threshold set 1. */
-#define P_ACON1 0x00F4 /* Audio control 1. */
-#define P_ACON2 0x00F8 /* Audio control 2. */
-#define P_MC1 0x00FC /* Master control 1. */
-#define P_MC2 0x0100 /* Master control 2. */
-#define P_RPSADDR0 0x0104 /* RPS0 instruction pointer. */
-#define P_RPSADDR1 0x0108 /* RPS1 instruction pointer. */
-#define P_ISR 0x010C /* Interrupt status. */
-#define P_PSR 0x0110 /* Primary status. */
-#define P_SSR 0x0114 /* Secondary status. */
-#define P_EC1R 0x0118 /* Event counter set 1. */
-#define P_ADP4 0x0138 /* Logical audio DMA pointer of audio
+#define S626_P_PAGEA2_OUT 0x00C0 /* Audio output 2 paging attributes. */
+#define S626_P_RPSPAGE0 0x00C4 /* RPS0 page. */
+#define S626_P_RPSPAGE1 0x00C8 /* RPS1 page. */
+#define S626_P_RPS0_TOUT 0x00D4 /* RPS0 time-out. */
+#define S626_P_RPS1_TOUT 0x00D8 /* RPS1 time-out. */
+#define S626_P_IER 0x00DC /* Interrupt enable. */
+#define S626_P_GPIO 0x00E0 /* General-purpose I/O. */
+#define S626_P_EC1SSR 0x00E4 /* Event counter set 1 source select. */
+#define S626_P_ECT1R 0x00EC /* Event counter threshold set 1. */
+#define S626_P_ACON1 0x00F4 /* Audio control 1. */
+#define S626_P_ACON2 0x00F8 /* Audio control 2. */
+#define S626_P_MC1 0x00FC /* Master control 1. */
+#define S626_P_MC2 0x0100 /* Master control 2. */
+#define S626_P_RPSADDR0 0x0104 /* RPS0 instruction pointer. */
+#define S626_P_RPSADDR1 0x0108 /* RPS1 instruction pointer. */
+#define S626_P_ISR 0x010C /* Interrupt status. */
+#define S626_P_PSR 0x0110 /* Primary status. */
+#define S626_P_SSR 0x0114 /* Secondary status. */
+#define S626_P_EC1R 0x0118 /* Event counter set 1. */
+#define S626_P_ADP4 0x0138 /* Logical audio DMA pointer of audio
* input FIFO A2_IN. */
-#define P_FB_BUFFER1 0x0144 /* Audio feedback buffer 1. */
-#define P_FB_BUFFER2 0x0148 /* Audio feedback buffer 2. */
-#define P_TSL1 0x0180 /* Audio time slot list 1. */
-#define P_TSL2 0x01C0 /* Audio time slot list 2. */
+#define S626_P_FB_BUFFER1 0x0144 /* Audio feedback buffer 1. */
+#define S626_P_FB_BUFFER2 0x0148 /* Audio feedback buffer 2. */
+#define S626_P_TSL1 0x0180 /* Audio time slot list 1. */
+#define S626_P_TSL2 0x01C0 /* Audio time slot list 2. */
-/* LOCAL BUS (GATE ARRAY) REGISTER ADDRESS OFFSETS */
-/* Analog I/O registers: */
-#define LP_DACPOL 0x0082 /* Write DAC polarity. */
-#define LP_GSEL 0x0084 /* Write ADC gain. */
-#define LP_ISEL 0x0086 /* Write ADC channel select. */
+/* LOCAL BUS (GATE ARRAY) REGISTER ADDRESS OFFSETS */
+/* Analog I/O registers: */
+#define S626_LP_DACPOL 0x0082 /* Write DAC polarity. */
+#define S626_LP_GSEL 0x0084 /* Write ADC gain. */
+#define S626_LP_ISEL 0x0086 /* Write ADC channel select. */
/* Digital I/O registers */
-#define LP_RDDIN(x) (0x0040 + (x) * 0x10) /* R: digital input */
-#define LP_WRINTSEL(x) (0x0042 + (x) * 0x10) /* W: int enable */
-#define LP_WREDGSEL(x) (0x0044 + (x) * 0x10) /* W: edge selection */
-#define LP_WRCAPSEL(x) (0x0046 + (x) * 0x10) /* W: capture enable */
-#define LP_RDCAPFLG(x) (0x0048 + (x) * 0x10) /* R: edges captured */
-#define LP_WRDOUT(x) (0x0048 + (x) * 0x10) /* W: digital output */
-#define LP_RDINTSEL(x) (0x004a + (x) * 0x10) /* R: int enable */
-#define LP_RDEDGSEL(x) (0x004c + (x) * 0x10) /* R: edge selection */
-#define LP_RDCAPSEL(x) (0x004e + (x) * 0x10) /* R: capture enable */
-
-/* Counter Registers (read/write): */
-#define LP_CR0A 0x0000 /* 0A setup register. */
-#define LP_CR0B 0x0002 /* 0B setup register. */
-#define LP_CR1A 0x0004 /* 1A setup register. */
-#define LP_CR1B 0x0006 /* 1B setup register. */
-#define LP_CR2A 0x0008 /* 2A setup register. */
-#define LP_CR2B 0x000A /* 2B setup register. */
-
-/* Counter PreLoad (write) and Latch (read) Registers: */
-#define LP_CNTR0ALSW 0x000C /* 0A lsw. */
-#define LP_CNTR0AMSW 0x000E /* 0A msw. */
-#define LP_CNTR0BLSW 0x0010 /* 0B lsw. */
-#define LP_CNTR0BMSW 0x0012 /* 0B msw. */
-#define LP_CNTR1ALSW 0x0014 /* 1A lsw. */
-#define LP_CNTR1AMSW 0x0016 /* 1A msw. */
-#define LP_CNTR1BLSW 0x0018 /* 1B lsw. */
-#define LP_CNTR1BMSW 0x001A /* 1B msw. */
-#define LP_CNTR2ALSW 0x001C /* 2A lsw. */
-#define LP_CNTR2AMSW 0x001E /* 2A msw. */
-#define LP_CNTR2BLSW 0x0020 /* 2B lsw. */
-#define LP_CNTR2BMSW 0x0022 /* 2B msw. */
-
-/* Miscellaneous Registers (read/write): */
-#define LP_MISC1 0x0088 /* Read/write Misc1. */
-#define LP_WRMISC2 0x0090 /* Write Misc2. */
-#define LP_RDMISC2 0x0082 /* Read Misc2. */
-
-/* Bit masks for MISC1 register that are the same for reads and writes. */
-#define MISC1_WENABLE 0x8000 /* enab writes to MISC2 (except Clear
+#define S626_LP_RDDIN(x) (0x0040 + (x) * 0x10) /* R: digital input */
+#define S626_LP_WRINTSEL(x) (0x0042 + (x) * 0x10) /* W: int enable */
+#define S626_LP_WREDGSEL(x) (0x0044 + (x) * 0x10) /* W: edge selection */
+#define S626_LP_WRCAPSEL(x) (0x0046 + (x) * 0x10) /* W: capture enable */
+#define S626_LP_RDCAPFLG(x) (0x0048 + (x) * 0x10) /* R: edges captured */
+#define S626_LP_WRDOUT(x) (0x0048 + (x) * 0x10) /* W: digital output */
+#define S626_LP_RDINTSEL(x) (0x004a + (x) * 0x10) /* R: int enable */
+#define S626_LP_RDEDGSEL(x) (0x004c + (x) * 0x10) /* R: edge selection */
+#define S626_LP_RDCAPSEL(x) (0x004e + (x) * 0x10) /* R: capture enable */
+
+/* Counter Registers (read/write): */
+#define S626_LP_CR0A 0x0000 /* 0A setup register. */
+#define S626_LP_CR0B 0x0002 /* 0B setup register. */
+#define S626_LP_CR1A 0x0004 /* 1A setup register. */
+#define S626_LP_CR1B 0x0006 /* 1B setup register. */
+#define S626_LP_CR2A 0x0008 /* 2A setup register. */
+#define S626_LP_CR2B 0x000A /* 2B setup register. */
+
+/* Counter PreLoad (write) and Latch (read) Registers: */
+#define S626_LP_CNTR0ALSW 0x000C /* 0A lsw. */
+#define S626_LP_CNTR0AMSW 0x000E /* 0A msw. */
+#define S626_LP_CNTR0BLSW 0x0010 /* 0B lsw. */
+#define S626_LP_CNTR0BMSW 0x0012 /* 0B msw. */
+#define S626_LP_CNTR1ALSW 0x0014 /* 1A lsw. */
+#define S626_LP_CNTR1AMSW 0x0016 /* 1A msw. */
+#define S626_LP_CNTR1BLSW 0x0018 /* 1B lsw. */
+#define S626_LP_CNTR1BMSW 0x001A /* 1B msw. */
+#define S626_LP_CNTR2ALSW 0x001C /* 2A lsw. */
+#define S626_LP_CNTR2AMSW 0x001E /* 2A msw. */
+#define S626_LP_CNTR2BLSW 0x0020 /* 2B lsw. */
+#define S626_LP_CNTR2BMSW 0x0022 /* 2B msw. */
+
+/* Miscellaneous Registers (read/write): */
+#define S626_LP_MISC1 0x0088 /* Read/write Misc1. */
+#define S626_LP_WRMISC2 0x0090 /* Write Misc2. */
+#define S626_LP_RDMISC2 0x0082 /* Read Misc2. */
+
+/* Bit masks for MISC1 register that are the same for reads and writes. */
+#define S626_MISC1_WENABLE 0x8000 /* enab writes to MISC2 (except Clear
* Watchdog bit). */
-#define MISC1_WDISABLE 0x0000 /* Disable writes to MISC2. */
-#define MISC1_EDCAP 0x1000 /* enab edge capture on DIO chans
- * specified by LP_WRCAPSELx. */
-#define MISC1_NOEDCAP 0x0000 /* Disable edge capture on specified
+#define S626_MISC1_WDISABLE 0x0000 /* Disable writes to MISC2. */
+#define S626_MISC1_EDCAP 0x1000 /* Enable edge capture on DIO chans
+ * specified by S626_LP_WRCAPSELx. */
+#define S626_MISC1_NOEDCAP 0x0000 /* Disable edge capture on specified
* DIO chans. */
-/* Bit masks for MISC1 register reads. */
-#define RDMISC1_WDTIMEOUT 0x4000 /* Watchdog timer timed out. */
-
-/* Bit masks for MISC2 register writes. */
-#define WRMISC2_WDCLEAR 0x8000 /* Reset watchdog timer to zero. */
-#define WRMISC2_CHARGE_ENABLE 0x4000 /* enab battery trickle charging. */
-
-/* Bit masks for MISC2 register that are the same for reads and writes. */
-#define MISC2_BATT_ENABLE 0x0008 /* Backup battery enable. */
-#define MISC2_WDENABLE 0x0004 /* Watchdog timer enable. */
-#define MISC2_WDPERIOD_MASK 0x0003 /* Watchdog interval */
- /* select mask. */
-
-/* Bit masks for ACON1 register. */
-#define A2_RUN 0x40000000 /* Run A2 based on TSL2. */
-#define A1_RUN 0x20000000 /* Run A1 based on TSL1. */
-#define A1_SWAP 0x00200000 /* Use big-endian for A1. */
-#define A2_SWAP 0x00100000 /* Use big-endian for A2. */
-#define WS_MODES 0x00019999 /* WS0 = TSL1 trigger */
- /* input, WS1-WS4 = */
- /* CS* outputs. */
-
-#if PLATFORM == INTEL /* Base ACON1 config: always run A1 based
- * on TSL1. */
-#define ACON1_BASE (WS_MODES | A1_RUN)
-#elif PLATFORM == MOTOROLA
-#define ACON1_BASE (WS_MODES | A1_RUN | A1_SWAP | A2_SWAP)
+/* Bit masks for MISC1 register reads. */
+#define S626_RDMISC1_WDTIMEOUT 0x4000 /* Watchdog timer timed out. */
+
+/* Bit masks for MISC2 register writes. */
+#define S626_WRMISC2_WDCLEAR 0x8000 /* Reset watchdog timer to zero. */
+#define S626_WRMISC2_CHARGE_ENABLE 0x4000 /* Enable battery trickle charging. */
+
+/* Bit masks for MISC2 register that are the same for reads and writes. */
+#define S626_MISC2_BATT_ENABLE 0x0008 /* Backup battery enable. */
+#define S626_MISC2_WDENABLE 0x0004 /* Watchdog timer enable. */
+#define S626_MISC2_WDPERIOD_MASK 0x0003 /* Watchdog interval select mask. */
+
+/* Bit masks for ACON1 register. */
+#define S626_A2_RUN 0x40000000 /* Run A2 based on TSL2. */
+#define S626_A1_RUN 0x20000000 /* Run A1 based on TSL1. */
+#define S626_A1_SWAP 0x00200000 /* Use big-endian for A1. */
+#define S626_A2_SWAP 0x00100000 /* Use big-endian for A2. */
+#define S626_WS_MODES 0x00019999 /* WS0 = TSL1 trigger input,
+ * WS1-WS4 = CS* outputs. */
+
+#if S626_PLATFORM == S626_INTEL /* Base ACON1 config: always run
+ * A1 based on TSL1. */
+#define S626_ACON1_BASE (S626_WS_MODES | S626_A1_RUN)
+#elif S626_PLATFORM == S626_MOTOROLA
+#define S626_ACON1_BASE \
+ (S626_WS_MODES | S626_A1_RUN | S626_A1_SWAP | S626_A2_SWAP)
#endif
-#define ACON1_ADCSTART ACON1_BASE /* Start ADC: run A1
- * based on TSL1. */
-#define ACON1_DACSTART (ACON1_BASE | A2_RUN)
+#define S626_ACON1_ADCSTART S626_ACON1_BASE /* Start ADC: run A1
+ * based on TSL1. */
+#define S626_ACON1_DACSTART (S626_ACON1_BASE | S626_A2_RUN)
/* Start transmit to DAC: run A2 based on TSL2. */
-#define ACON1_DACSTOP ACON1_BASE /* Halt A2. */
-
-/* Bit masks for ACON2 register. */
-#define A1_CLKSRC_BCLK1 0x00000000 /* A1 bit rate = BCLK1 (ADC). */
-#define A2_CLKSRC_X1 0x00800000 /* A2 bit rate = ACLK/1 (DACs). */
-#define A2_CLKSRC_X2 0x00C00000 /* A2 bit rate = ACLK/2 (DACs). */
-#define A2_CLKSRC_X4 0x01400000 /* A2 bit rate = ACLK/4 (DACs). */
-#define INVERT_BCLK2 0x00100000 /* Invert BCLK2 (DACs). */
-#define BCLK2_OE 0x00040000 /* enab BCLK2 (DACs). */
-#define ACON2_XORMASK 0x000C0000 /* XOR mask for ACON2 */
- /* active-low bits. */
-
-#define ACON2_INIT (ACON2_XORMASK ^ (A1_CLKSRC_BCLK1 | A2_CLKSRC_X2 | INVERT_BCLK2 | BCLK2_OE))
-
-/* Bit masks for timeslot records. */
-#define WS1 0x40000000 /* WS output to assert. */
-#define WS2 0x20000000
-#define WS3 0x10000000
-#define WS4 0x08000000
-#define RSD1 0x01000000 /* Shift A1 data in on SD1. */
-#define SDW_A1 0x00800000 /* Store rcv'd char at next
- * char slot of DWORD1 buffer. */
-#define SIB_A1 0x00400000 /* Store rcv'd char at next
+#define S626_ACON1_DACSTOP S626_ACON1_BASE /* Halt A2. */
+
+/* Bit masks for ACON2 register. */
+#define S626_A1_CLKSRC_BCLK1 0x00000000 /* A1 bit rate = BCLK1 (ADC). */
+#define S626_A2_CLKSRC_X1 0x00800000 /* A2 bit rate = ACLK/1
+ * (DACs). */
+#define S626_A2_CLKSRC_X2 0x00C00000 /* A2 bit rate = ACLK/2
+ * (DACs). */
+#define S626_A2_CLKSRC_X4 0x01400000 /* A2 bit rate = ACLK/4
+ * (DACs). */
+#define S626_INVERT_BCLK2 0x00100000 /* Invert BCLK2 (DACs). */
+#define S626_BCLK2_OE 0x00040000 /* Enable BCLK2 (DACs). */
+#define S626_ACON2_XORMASK 0x000C0000 /* XOR mask for ACON2
+ * active-low bits. */
+
+#define S626_ACON2_INIT (S626_ACON2_XORMASK ^ \
+ (S626_A1_CLKSRC_BCLK1 | S626_A2_CLKSRC_X2 | \
+ S626_INVERT_BCLK2 | S626_BCLK2_OE))
+
+/* Bit masks for timeslot records. */
+#define S626_WS1 0x40000000 /* WS output to assert. */
+#define S626_WS2 0x20000000
+#define S626_WS3 0x10000000
+#define S626_WS4 0x08000000
+#define S626_RSD1 0x01000000 /* Shift A1 data in on SD1. */
+#define S626_SDW_A1 0x00800000 /* Store rcv'd char at next char
+ * slot of DWORD1 buffer. */
+#define S626_SIB_A1 0x00400000 /* Store rcv'd char at next
* char slot of FB1 buffer. */
-#define SF_A1 0x00200000 /* Write unsigned long
+#define S626_SF_A1 0x00200000 /* Write unsigned long
* buffer to input FIFO. */
/* Select parallel-to-serial converter's data source: */
-#define XFIFO_0 0x00000000 /* Data fifo byte 0. */
-#define XFIFO_1 0x00000010 /* Data fifo byte 1. */
-#define XFIFO_2 0x00000020 /* Data fifo byte 2. */
-#define XFIFO_3 0x00000030 /* Data fifo byte 3. */
-#define XFB0 0x00000040 /* FB_BUFFER byte 0. */
-#define XFB1 0x00000050 /* FB_BUFFER byte 1. */
-#define XFB2 0x00000060 /* FB_BUFFER byte 2. */
-#define XFB3 0x00000070 /* FB_BUFFER byte 3. */
-#define SIB_A2 0x00000200 /* Store next dword from A2's
- * input shifter to FB2 buffer. */
-#define SF_A2 0x00000100 /* Store next dword from A2's
+#define S626_XFIFO_0 0x00000000 /* Data fifo byte 0. */
+#define S626_XFIFO_1 0x00000010 /* Data fifo byte 1. */
+#define S626_XFIFO_2 0x00000020 /* Data fifo byte 2. */
+#define S626_XFIFO_3 0x00000030 /* Data fifo byte 3. */
+#define S626_XFB0 0x00000040 /* FB_BUFFER byte 0. */
+#define S626_XFB1 0x00000050 /* FB_BUFFER byte 1. */
+#define S626_XFB2 0x00000060 /* FB_BUFFER byte 2. */
+#define S626_XFB3 0x00000070 /* FB_BUFFER byte 3. */
+#define S626_SIB_A2 0x00000200 /* Store next dword from A2's
+ * input shifter to FB2
+ * buffer. */
+#define S626_SF_A2 0x00000100 /* Store next dword from A2's
* input shifter to its input
* fifo. */
-#define LF_A2 0x00000080 /* Load next dword from A2's
+#define S626_LF_A2 0x00000080 /* Load next dword from A2's
* output fifo into its
* output dword buffer. */
-#define XSD2 0x00000008 /* Shift data out on SD2. */
-#define RSD3 0x00001800 /* Shift data in on SD3. */
-#define RSD2 0x00001000 /* Shift data in on SD2. */
-#define LOW_A2 0x00000002 /* Drive last SD low */
- /* for 7 clks, then */
- /* tri-state. */
-#define EOS 0x00000001 /* End of superframe. */
-
-/* I2C configuration constants. */
-#define I2C_CLKSEL 0x0400
-/* I2C bit rate = PCIclk/480 = 68.75 KHz. */
-
-#define I2C_BITRATE 68.75
-/* I2C bus data bit rate (determined by I2C_CLKSEL) in KHz. */
-
-#define I2C_WRTIME 15.0
-/* Worst case time, in msec, for EEPROM internal write op. */
-
-/* I2C manifest constants. */
-
-/* Max retries to wait for EEPROM write. */
-#define I2C_RETRIES (I2C_WRTIME * I2C_BITRATE / 9.0)
-#define I2C_ERR 0x0002 /* I2C control/status */
- /* flag ERROR. */
-#define I2C_BUSY 0x0001 /* I2C control/status */
- /* flag BUSY. */
-#define I2C_ABORT 0x0080 /* I2C status flag ABORT. */
-#define I2C_ATTRSTART 0x3 /* I2C attribute START. */
-#define I2C_ATTRCONT 0x2 /* I2C attribute CONT. */
-#define I2C_ATTRSTOP 0x1 /* I2C attribute STOP. */
-#define I2C_ATTRNOP 0x0 /* I2C attribute NOP. */
-
-/* I2C read command | EEPROM address. */
-#define I2CR (devpriv->I2CAdrs | 1)
-
-/* I2C write command | EEPROM address. */
-#define I2CW (devpriv->I2CAdrs)
-
-/* Code macros used for constructing I2C command bytes. */
-#define I2C_B2(ATTR, VAL) (((ATTR) << 6) | ((VAL) << 24))
-#define I2C_B1(ATTR, VAL) (((ATTR) << 4) | ((VAL) << 16))
-#define I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8))
-
-/* oldest */
-#define P_DEBICFGq 0x007C /* DEBI configuration. */
-#define P_DEBICMDq 0x0080 /* DEBI command. */
-#define P_DEBIPAGEq 0x0084 /* DEBI page. */
-#define P_DEBIADq 0x0088 /* DEBI target address. */
-
-#define DEBI_CFG_TOQ 0x03C00000 /* timeout (15 PCI cycles) */
-#define DEBI_CFG_FASTQ 0x10000000 /* fast mode enable */
-#define DEBI_CFG_16Q 0x00080000 /* 16-bit access enable */
-#define DEBI_CFG_INCQ 0x00040000 /* enable address increment */
-#define DEBI_CFG_TIMEROFFQ 0x00010000 /* disable timer */
-#define DEBI_CMD_RDQ 0x00050000 /* read immediate 2 bytes */
-#define DEBI_CMD_WRQ 0x00040000 /* write immediate 2 bytes */
-#define DEBI_PAGE_DISABLEQ 0x00000000 /* paging disable */
-
-/* DEBI command constants. */
-#define DEBI_CMD_SIZE16 (2 << 17) /* Transfer size is */
- /* always 2 bytes. */
-#define DEBI_CMD_READ 0x00010000 /* Read operation. */
-#define DEBI_CMD_WRITE 0x00000000 /* Write operation. */
-
-/* Read immediate 2 bytes. */
-#define DEBI_CMD_RDWORD (DEBI_CMD_READ | DEBI_CMD_SIZE16)
-
-/* Write immediate 2 bytes. */
-#define DEBI_CMD_WRWORD (DEBI_CMD_WRITE | DEBI_CMD_SIZE16)
-
-/* DEBI configuration constants. */
-#define DEBI_CFG_XIRQ_EN 0x80000000 /* enab external */
- /* interrupt on GPIO3. */
-#define DEBI_CFG_XRESUME 0x40000000 /* Resume block */
- /* transfer when XIRQ */
- /* deasserted. */
-#define DEBI_CFG_FAST 0x10000000 /* Fast mode enable. */
-
-/* 4-bit field that specifies DEBI timeout value in PCI clock cycles: */
-#define DEBI_CFG_TOUT_BIT 22 /* Finish DEBI cycle after */
- /* this many clocks. */
-
-/* 2-bit field that specifies Endian byte lane steering: */
-#define DEBI_CFG_SWAP_NONE 0x00000000 /* Straight - don't */
- /* swap any bytes */
- /* (Intel). */
-#define DEBI_CFG_SWAP_2 0x00100000 /* 2-byte swap (Motorola). */
-#define DEBI_CFG_SWAP_4 0x00200000 /* 4-byte swap. */
-#define DEBI_CFG_16 0x00080000 /* Slave is able to */
- /* serve 16-bit */
- /* cycles. */
-
-#define DEBI_CFG_SLAVE16 0x00080000 /* Slave is able to */
- /* serve 16-bit */
- /* cycles. */
-#define DEBI_CFG_INC 0x00040000 /* enab address */
- /* increment for block */
- /* transfers. */
-#define DEBI_CFG_INTEL 0x00020000 /* Intel style local bus. */
-#define DEBI_CFG_TIMEROFF 0x00010000 /* Disable timer. */
-
-#if PLATFORM == INTEL
-
-#define DEBI_TOUT 7 /* Wait 7 PCI clocks */
- /* (212 ns) before */
- /* polling RDY. */
-
-/* Intel byte lane steering (pass through all byte lanes). */
-#define DEBI_SWAP DEBI_CFG_SWAP_NONE
-
-#elif PLATFORM == MOTOROLA
-
-#define DEBI_TOUT 15 /* Wait 15 PCI clocks (454 ns) */
- /* maximum before timing out. */
-#define DEBI_SWAP DEBI_CFG_SWAP_2 /* Motorola byte lane steering. */
+#define S626_XSD2 0x00000008 /* Shift data out on SD2. */
+#define S626_RSD3 0x00001800 /* Shift data in on SD3. */
+#define S626_RSD2 0x00001000 /* Shift data in on SD2. */
+#define S626_LOW_A2 0x00000002 /* Drive last SD low for 7 clks,
+ * then tri-state. */
+#define S626_EOS 0x00000001 /* End of superframe. */
+
+/* I2C configuration constants. */
+#define S626_I2C_CLKSEL 0x0400 /* I2C bit rate =
+ * PCIclk/480 = 68.75 KHz. */
+#define S626_I2C_BITRATE 68.75 /* I2C bus data bit rate
+ * (determined by
+ * S626_I2C_CLKSEL) in KHz. */
+#define S626_I2C_WRTIME 15.0 /* Worst case time, in msec,
+ * for EEPROM internal write
+ * op. */
+
+/* I2C manifest constants. */
+
+/* Max retries to wait for EEPROM write. */
+#define S626_I2C_RETRIES (S626_I2C_WRTIME * S626_I2C_BITRATE / 9.0)
+#define S626_I2C_ERR 0x0002 /* I2C control/status flag ERROR. */
+#define S626_I2C_BUSY 0x0001 /* I2C control/status flag BUSY. */
+#define S626_I2C_ABORT 0x0080 /* I2C status flag ABORT. */
+#define S626_I2C_ATTRSTART 0x3 /* I2C attribute START. */
+#define S626_I2C_ATTRCONT 0x2 /* I2C attribute CONT. */
+#define S626_I2C_ATTRSTOP 0x1 /* I2C attribute STOP. */
+#define S626_I2C_ATTRNOP 0x0 /* I2C attribute NOP. */
+
+/* Code macros used for constructing I2C command bytes. */
+#define S626_I2C_B2(ATTR, VAL) (((ATTR) << 6) | ((VAL) << 24))
+#define S626_I2C_B1(ATTR, VAL) (((ATTR) << 4) | ((VAL) << 16))
+#define S626_I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8))
+
+/* DEBI command constants. */
+#define S626_DEBI_CMD_SIZE16 (2 << 17) /* Transfer size is always
+ * 2 bytes. */
+#define S626_DEBI_CMD_READ 0x00010000 /* Read operation. */
+#define S626_DEBI_CMD_WRITE 0x00000000 /* Write operation. */
+
+/* Read immediate 2 bytes. */
+#define S626_DEBI_CMD_RDWORD (S626_DEBI_CMD_READ | S626_DEBI_CMD_SIZE16)
+
+/* Write immediate 2 bytes. */
+#define S626_DEBI_CMD_WRWORD (S626_DEBI_CMD_WRITE | S626_DEBI_CMD_SIZE16)
+
+/* DEBI configuration constants. */
+#define S626_DEBI_CFG_XIRQ_EN 0x80000000 /* Enable external interrupt
+ * on GPIO3. */
+#define S626_DEBI_CFG_XRESUME 0x40000000 /* Resume block */
+ /* Transfer when XIRQ
+ * deasserted. */
+#define S626_DEBI_CFG_TOQ 0x03C00000 /* Timeout (15 PCI cycles). */
+#define S626_DEBI_CFG_FAST 0x10000000 /* Fast mode enable. */
+
+/* 4-bit field that specifies DEBI timeout value in PCI clock cycles: */
+#define S626_DEBI_CFG_TOUT_BIT 22 /* Finish DEBI cycle after this many
+ * clocks. */
+
+/* 2-bit field that specifies Endian byte lane steering: */
+#define S626_DEBI_CFG_SWAP_NONE 0x00000000 /* Straight - don't swap any
+ * bytes (Intel). */
+#define S626_DEBI_CFG_SWAP_2 0x00100000 /* 2-byte swap (Motorola). */
+#define S626_DEBI_CFG_SWAP_4 0x00200000 /* 4-byte swap. */
+#define S626_DEBI_CFG_SLAVE16 0x00080000 /* Slave is able to serve
+ * 16-bit cycles. */
+#define S626_DEBI_CFG_INC 0x00040000 /* Enable address increment
+ * for block transfers. */
+#define S626_DEBI_CFG_INTEL 0x00020000 /* Intel style local bus. */
+#define S626_DEBI_CFG_TIMEROFF 0x00010000 /* Disable timer. */
+
+#if S626_PLATFORM == S626_INTEL
+
+#define S626_DEBI_TOUT 7 /* Wait 7 PCI clocks (212 ns) before
+ * polling RDY. */
+
+/* Intel byte lane steering (pass through all byte lanes). */
+#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_NONE
+
+#elif S626_PLATFORM == S626_MOTOROLA
+
+#define S626_DEBI_TOUT 15 /* Wait 15 PCI clocks (454 ns) maximum
+ * before timing out. */
+
+/* Motorola byte lane steering. */
+#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_2
#endif
-/* DEBI page table constants. */
-#define DEBI_PAGE_DISABLE 0x00000000 /* Paging disable. */
-
-/* ******* EXTRA FROM OTHER SANSORAY * .h ******* */
-
-/* LoadSrc values: */
-#define LOADSRC_INDX 0 /* Preload core in response to */
- /* Index. */
-#define LOADSRC_OVER 1 /* Preload core in response to */
- /* Overflow. */
-#define LOADSRCB_OVERA 2 /* Preload B core in response */
- /* to A Overflow. */
-#define LOADSRC_NONE 3 /* Never preload core. */
-
-/* IntSrc values: */
-#define INTSRC_NONE 0 /* Interrupts disabled. */
-#define INTSRC_OVER 1 /* Interrupt on Overflow. */
-#define INTSRC_INDX 2 /* Interrupt on Index. */
-#define INTSRC_BOTH 3 /* Interrupt on Index or Overflow. */
-
-/* LatchSrc values: */
-#define LATCHSRC_AB_READ 0 /* Latch on read. */
-#define LATCHSRC_A_INDXA 1 /* Latch A on A Index. */
-#define LATCHSRC_B_INDXB 2 /* Latch B on B Index. */
-#define LATCHSRC_B_OVERA 3 /* Latch B on A Overflow. */
-
-/* IndxSrc values: */
-#define INDXSRC_HARD 0 /* Hardware or software index. */
-#define INDXSRC_SOFT 1 /* Software index only. */
-
-/* IndxPol values: */
-#define INDXPOL_POS 0 /* Index input is active high. */
-#define INDXPOL_NEG 1 /* Index input is active low. */
-
-/* ClkSrc values: */
-#define CLKSRC_COUNTER 0 /* Counter mode. */
-#define CLKSRC_TIMER 2 /* Timer mode. */
-#define CLKSRC_EXTENDER 3 /* Extender mode. */
-
-/* ClkPol values: */
-#define CLKPOL_POS 0 /* Counter/Extender clock is */
- /* active high. */
-#define CLKPOL_NEG 1 /* Counter/Extender clock is */
- /* active low. */
-#define CNTDIR_UP 0 /* Timer counts up. */
-#define CNTDIR_DOWN 1 /* Timer counts down. */
-
-/* ClkEnab values: */
-#define CLKENAB_ALWAYS 0 /* Clock always enabled. */
-#define CLKENAB_INDEX 1 /* Clock is enabled by index. */
-
-/* ClkMult values: */
-#define CLKMULT_4X 0 /* 4x clock multiplier. */
-#define CLKMULT_2X 1 /* 2x clock multiplier. */
-#define CLKMULT_1X 2 /* 1x clock multiplier. */
-
-/* Bit Field positions in COUNTER_SETUP structure: */
-#define BF_LOADSRC 9 /* Preload trigger. */
-#define BF_INDXSRC 7 /* Index source. */
-#define BF_INDXPOL 6 /* Index polarity. */
-#define BF_CLKSRC 4 /* Clock source. */
-#define BF_CLKPOL 3 /* Clock polarity/count direction. */
-#define BF_CLKMULT 1 /* Clock multiplier. */
-#define BF_CLKENAB 0 /* Clock enable. */
-
-/* Enumerated counter operating modes specified by ClkSrc bit field in */
-/* a COUNTER_SETUP. */
-
-#define CLKSRC_COUNTER 0 /* Counter: ENC_C clock, ENC_D */
- /* direction. */
-#define CLKSRC_TIMER 2 /* Timer: SYS_C clock, */
- /* direction specified by */
- /* ClkPol. */
-#define CLKSRC_EXTENDER 3 /* Extender: OVR_A clock, */
- /* ENC_D direction. */
-
-/* Enumerated counter clock multipliers. */
-
-#define MULT_X0 0x0003 /* Supports no multipliers; */
- /* fixed physical multiplier = */
- /* 3. */
-#define MULT_X1 0x0002 /* Supports multiplier x1; */
- /* fixed physical multiplier = */
- /* 2. */
-#define MULT_X2 0x0001 /* Supports multipliers x1, */
- /* x2; physical multipliers = */
- /* 1 or 2. */
-#define MULT_X4 0x0000 /* Supports multipliers x1, */
- /* x2, x4; physical */
- /* multipliers = 0, 1 or 2. */
-
-/* Sanity-check limits for parameters. */
-
-#define NUM_COUNTERS 6 /* Maximum valid counter */
- /* logical channel number. */
-#define NUM_INTSOURCES 4
-#define NUM_LATCHSOURCES 4
-#define NUM_CLKMULTS 4
-#define NUM_CLKSOURCES 4
-#define NUM_CLKPOLS 2
-#define NUM_INDEXPOLS 2
-#define NUM_INDEXSOURCES 2
-#define NUM_LOADTRIGS 4
-
-/* Bit field positions in CRA and CRB counter control registers. */
-
-/* Bit field positions in CRA: */
-#define CRABIT_INDXSRC_B 14 /* B index source. */
-#define CRABIT_CLKSRC_B 12 /* B clock source. */
-#define CRABIT_INDXPOL_A 11 /* A index polarity. */
-#define CRABIT_LOADSRC_A 9 /* A preload trigger. */
-#define CRABIT_CLKMULT_A 7 /* A clock multiplier. */
-#define CRABIT_INTSRC_A 5 /* A interrupt source. */
-#define CRABIT_CLKPOL_A 4 /* A clock polarity. */
-#define CRABIT_INDXSRC_A 2 /* A index source. */
-#define CRABIT_CLKSRC_A 0 /* A clock source. */
-
-/* Bit field positions in CRB: */
-#define CRBBIT_INTRESETCMD 15 /* Interrupt reset command. */
-#define CRBBIT_INTRESET_B 14 /* B interrupt reset enable. */
-#define CRBBIT_INTRESET_A 13 /* A interrupt reset enable. */
-#define CRBBIT_CLKENAB_A 12 /* A clock enable. */
-#define CRBBIT_INTSRC_B 10 /* B interrupt source. */
-#define CRBBIT_LATCHSRC 8 /* A/B latch source. */
-#define CRBBIT_LOADSRC_B 6 /* B preload trigger. */
-#define CRBBIT_CLKMULT_B 3 /* B clock multiplier. */
-#define CRBBIT_CLKENAB_B 2 /* B clock enable. */
-#define CRBBIT_INDXPOL_B 1 /* B index polarity. */
-#define CRBBIT_CLKPOL_B 0 /* B clock polarity. */
-
-/* Bit field masks for CRA and CRB. */
-
-#define CRAMSK_INDXSRC_B (3 << CRABIT_INDXSRC_B)
-#define CRAMSK_CLKSRC_B (3 << CRABIT_CLKSRC_B)
-#define CRAMSK_INDXPOL_A (1 << CRABIT_INDXPOL_A)
-#define CRAMSK_LOADSRC_A (3 << CRABIT_LOADSRC_A)
-#define CRAMSK_CLKMULT_A (3 << CRABIT_CLKMULT_A)
-#define CRAMSK_INTSRC_A (3 << CRABIT_INTSRC_A)
-#define CRAMSK_CLKPOL_A (3 << CRABIT_CLKPOL_A)
-#define CRAMSK_INDXSRC_A (3 << CRABIT_INDXSRC_A)
-#define CRAMSK_CLKSRC_A (3 << CRABIT_CLKSRC_A)
-
-#define CRBMSK_INTRESETCMD (1 << CRBBIT_INTRESETCMD)
-#define CRBMSK_INTRESET_B (1 << CRBBIT_INTRESET_B)
-#define CRBMSK_INTRESET_A (1 << CRBBIT_INTRESET_A)
-#define CRBMSK_CLKENAB_A (1 << CRBBIT_CLKENAB_A)
-#define CRBMSK_INTSRC_B (3 << CRBBIT_INTSRC_B)
-#define CRBMSK_LATCHSRC (3 << CRBBIT_LATCHSRC)
-#define CRBMSK_LOADSRC_B (3 << CRBBIT_LOADSRC_B)
-#define CRBMSK_CLKMULT_B (3 << CRBBIT_CLKMULT_B)
-#define CRBMSK_CLKENAB_B (1 << CRBBIT_CLKENAB_B)
-#define CRBMSK_INDXPOL_B (1 << CRBBIT_INDXPOL_B)
-#define CRBMSK_CLKPOL_B (1 << CRBBIT_CLKPOL_B)
-
-#define CRBMSK_INTCTRL (CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A | CRBMSK_INTRESET_B) /* Interrupt reset control bits. */
-
-/* Bit field positions for standardized SETUP structure. */
-
-#define STDBIT_INTSRC 13
-#define STDBIT_LATCHSRC 11
-#define STDBIT_LOADSRC 9
-#define STDBIT_INDXSRC 7
-#define STDBIT_INDXPOL 6
-#define STDBIT_CLKSRC 4
-#define STDBIT_CLKPOL 3
-#define STDBIT_CLKMULT 1
-#define STDBIT_CLKENAB 0
-
-/* Bit field masks for standardized SETUP structure. */
-
-#define STDMSK_INTSRC (3 << STDBIT_INTSRC)
-#define STDMSK_LATCHSRC (3 << STDBIT_LATCHSRC)
-#define STDMSK_LOADSRC (3 << STDBIT_LOADSRC)
-#define STDMSK_INDXSRC (1 << STDBIT_INDXSRC)
-#define STDMSK_INDXPOL (1 << STDBIT_INDXPOL)
-#define STDMSK_CLKSRC (3 << STDBIT_CLKSRC)
-#define STDMSK_CLKPOL (1 << STDBIT_CLKPOL)
-#define STDMSK_CLKMULT (3 << STDBIT_CLKMULT)
-#define STDMSK_CLKENAB (1 << STDBIT_CLKENAB)
-
-struct bufferDMA {
- dma_addr_t PhysicalBase;
- void *LogicalBase;
- uint32_t DMAHandle;
-};
+/* DEBI page table constants. */
+#define S626_DEBI_PAGE_DISABLE 0x00000000 /* Paging disable. */
+
+/* ******* EXTRA FROM OTHER SENSORAY * .h ******* */
+
+/* LoadSrc values: */
+#define S626_LOADSRC_INDX 0 /* Preload core in response to Index. */
+#define S626_LOADSRC_OVER 1 /* Preload core in response to
+ * Overflow. */
+#define S626_LOADSRCB_OVERA 2 /* Preload B core in response to
+ * A Overflow. */
+#define S626_LOADSRC_NONE 3 /* Never preload core. */
+
+/* IntSrc values: */
+#define S626_INTSRC_NONE 0 /* Interrupts disabled. */
+#define S626_INTSRC_OVER 1 /* Interrupt on Overflow. */
+#define S626_INTSRC_INDX 2 /* Interrupt on Index. */
+#define S626_INTSRC_BOTH 3 /* Interrupt on Index or Overflow. */
+
+/* LatchSrc values: */
+#define S626_LATCHSRC_AB_READ 0 /* Latch on read. */
+#define S626_LATCHSRC_A_INDXA 1 /* Latch A on A Index. */
+#define S626_LATCHSRC_B_INDXB 2 /* Latch B on B Index. */
+#define S626_LATCHSRC_B_OVERA 3 /* Latch B on A Overflow. */
+
+/* IndxSrc values: */
+#define S626_INDXSRC_ENCODER 0 /* Encoder. */
+#define S626_INDXSRC_DIGIN 1 /* Digital inputs. */
+#define S626_INDXSRC_SOFT 2 /* S/w controlled by IndxPol bit. */
+#define S626_INDXSRC_DISABLED 3 /* Index disabled. */
+
+/* IndxPol values: */
+#define S626_INDXPOL_POS 0 /* Index input is active high. */
+#define S626_INDXPOL_NEG 1 /* Index input is active low. */
+
+/* Logical encoder mode values: */
+#define S626_ENCMODE_COUNTER 0 /* Counter mode. */
+#define S626_ENCMODE_TIMER 2 /* Timer mode. */
+#define S626_ENCMODE_EXTENDER 3 /* Extender mode. */
+
+/* Physical CntSrc values (for Counter A source and Counter B source): */
+#define S626_CNTSRC_ENCODER 0 /* Encoder */
+#define S626_CNTSRC_DIGIN 1 /* Digital inputs */
+#define S626_CNTSRC_SYSCLK 2 /* System clock up */
+#define S626_CNTSRC_SYSCLK_DOWN 3 /* System clock down */
+
+/* ClkPol values: */
+#define S626_CLKPOL_POS 0 /* Counter/Extender clock is
+ * active high. */
+#define S626_CLKPOL_NEG 1 /* Counter/Extender clock is
+ * active low. */
+#define S626_CNTDIR_UP 0 /* Timer counts up. */
+#define S626_CNTDIR_DOWN 1 /* Timer counts down. */
+
+/* ClkEnab values: */
+#define S626_CLKENAB_ALWAYS 0 /* Clock always enabled. */
+#define S626_CLKENAB_INDEX 1 /* Clock is enabled by index. */
+
+/* ClkMult values: */
+#define S626_CLKMULT_4X 0 /* 4x clock multiplier. */
+#define S626_CLKMULT_2X 1 /* 2x clock multiplier. */
+#define S626_CLKMULT_1X 2 /* 1x clock multiplier. */
+#define S626_CLKMULT_SPECIAL 3 /* Special clock multiplier value. */
+
+/* Sanity-check limits for parameters. */
+
+#define S626_NUM_COUNTERS 6 /* Maximum valid counter
+ * logical channel number. */
+#define S626_NUM_INTSOURCES 4
+#define S626_NUM_LATCHSOURCES 4
+#define S626_NUM_CLKMULTS 4
+#define S626_NUM_CLKSOURCES 4
+#define S626_NUM_CLKPOLS 2
+#define S626_NUM_INDEXPOLS 2
+#define S626_NUM_INDEXSOURCES 2
+#define S626_NUM_LOADTRIGS 4
+
+/* General macros for manipulating bitfields: */
+#define S626_MAKE(x, w, p) (((x) & ((1 << (w)) - 1)) << (p))
+#define S626_UNMAKE(v, w, p) (((v) >> (p)) & ((1 << (w)) - 1))
+
+/* Bit field positions in CRA: */
+#define S626_CRABIT_INDXSRC_B 14 /* B index source. */
+#define S626_CRABIT_CNTSRC_B 12 /* B counter source. */
+#define S626_CRABIT_INDXPOL_A 11 /* A index polarity. */
+#define S626_CRABIT_LOADSRC_A 9 /* A preload trigger. */
+#define S626_CRABIT_CLKMULT_A 7 /* A clock multiplier. */
+#define S626_CRABIT_INTSRC_A 5 /* A interrupt source. */
+#define S626_CRABIT_CLKPOL_A 4 /* A clock polarity. */
+#define S626_CRABIT_INDXSRC_A 2 /* A index source. */
+#define S626_CRABIT_CNTSRC_A 0 /* A counter source. */
+
+/* Bit field widths in CRA: */
+#define S626_CRAWID_INDXSRC_B 2
+#define S626_CRAWID_CNTSRC_B 2
+#define S626_CRAWID_INDXPOL_A 1
+#define S626_CRAWID_LOADSRC_A 2
+#define S626_CRAWID_CLKMULT_A 2
+#define S626_CRAWID_INTSRC_A 2
+#define S626_CRAWID_CLKPOL_A 1
+#define S626_CRAWID_INDXSRC_A 2
+#define S626_CRAWID_CNTSRC_A 2
+
+/* Bit field masks for CRA: */
+#define S626_CRAMSK_INDXSRC_B S626_SET_CRA_INDXSRC_B(~0)
+#define S626_CRAMSK_CNTSRC_B S626_SET_CRA_CNTSRC_B(~0)
+#define S626_CRAMSK_INDXPOL_A S626_SET_CRA_INDXPOL_A(~0)
+#define S626_CRAMSK_LOADSRC_A S626_SET_CRA_LOADSRC_A(~0)
+#define S626_CRAMSK_CLKMULT_A S626_SET_CRA_CLKMULT_A(~0)
+#define S626_CRAMSK_INTSRC_A S626_SET_CRA_INTSRC_A(~0)
+#define S626_CRAMSK_CLKPOL_A S626_SET_CRA_CLKPOL_A(~0)
+#define S626_CRAMSK_INDXSRC_A S626_SET_CRA_INDXSRC_A(~0)
+#define S626_CRAMSK_CNTSRC_A S626_SET_CRA_CNTSRC_A(~0)
+
+/* Construct parts of the CRA value: */
+#define S626_SET_CRA_INDXSRC_B(x) \
+ S626_MAKE((x), S626_CRAWID_INDXSRC_B, S626_CRABIT_INDXSRC_B)
+#define S626_SET_CRA_CNTSRC_B(x) \
+ S626_MAKE((x), S626_CRAWID_CNTSRC_B, S626_CRABIT_CNTSRC_B)
+#define S626_SET_CRA_INDXPOL_A(x) \
+ S626_MAKE((x), S626_CRAWID_INDXPOL_A, S626_CRABIT_INDXPOL_A)
+#define S626_SET_CRA_LOADSRC_A(x) \
+ S626_MAKE((x), S626_CRAWID_LOADSRC_A, S626_CRABIT_LOADSRC_A)
+#define S626_SET_CRA_CLKMULT_A(x) \
+ S626_MAKE((x), S626_CRAWID_CLKMULT_A, S626_CRABIT_CLKMULT_A)
+#define S626_SET_CRA_INTSRC_A(x) \
+ S626_MAKE((x), S626_CRAWID_INTSRC_A, S626_CRABIT_INTSRC_A)
+#define S626_SET_CRA_CLKPOL_A(x) \
+ S626_MAKE((x), S626_CRAWID_CLKPOL_A, S626_CRABIT_CLKPOL_A)
+#define S626_SET_CRA_INDXSRC_A(x) \
+ S626_MAKE((x), S626_CRAWID_INDXSRC_A, S626_CRABIT_INDXSRC_A)
+#define S626_SET_CRA_CNTSRC_A(x) \
+ S626_MAKE((x), S626_CRAWID_CNTSRC_A, S626_CRABIT_CNTSRC_A)
+
+/* Extract parts of the CRA value: */
+#define S626_GET_CRA_INDXSRC_B(v) \
+ S626_UNMAKE((v), S626_CRAWID_INDXSRC_B, S626_CRABIT_INDXSRC_B)
+#define S626_GET_CRA_CNTSRC_B(v) \
+ S626_UNMAKE((v), S626_CRAWID_CNTSRC_B, S626_CRABIT_CNTSRC_B)
+#define S626_GET_CRA_INDXPOL_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_INDXPOL_A, S626_CRABIT_INDXPOL_A)
+#define S626_GET_CRA_LOADSRC_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_LOADSRC_A, S626_CRABIT_LOADSRC_A)
+#define S626_GET_CRA_CLKMULT_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_CLKMULT_A, S626_CRABIT_CLKMULT_A)
+#define S626_GET_CRA_INTSRC_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_INTSRC_A, S626_CRABIT_INTSRC_A)
+#define S626_GET_CRA_CLKPOL_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_CLKPOL_A, S626_CRABIT_CLKPOL_A)
+#define S626_GET_CRA_INDXSRC_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_INDXSRC_A, S626_CRABIT_INDXSRC_A)
+#define S626_GET_CRA_CNTSRC_A(v) \
+ S626_UNMAKE((v), S626_CRAWID_CNTSRC_A, S626_CRABIT_CNTSRC_A)
+
+/* Bit field positions in CRB: */
+#define S626_CRBBIT_INTRESETCMD 15 /* (w) Interrupt reset command. */
+#define S626_CRBBIT_CNTDIR_B 15 /* (r) B counter direction. */
+#define S626_CRBBIT_INTRESET_B 14 /* (w) B interrupt reset enable. */
+#define S626_CRBBIT_OVERDO_A 14 /* (r) A overflow routed to dig. out. */
+#define S626_CRBBIT_INTRESET_A 13 /* (w) A interrupt reset enable. */
+#define S626_CRBBIT_OVERDO_B 13 /* (r) B overflow routed to dig. out. */
+#define S626_CRBBIT_CLKENAB_A 12 /* A clock enable. */
+#define S626_CRBBIT_INTSRC_B 10 /* B interrupt source. */
+#define S626_CRBBIT_LATCHSRC 8 /* A/B latch source. */
+#define S626_CRBBIT_LOADSRC_B 6 /* B preload trigger. */
+#define S626_CRBBIT_CLEAR_B 7 /* B cleared when A overflows. */
+#define S626_CRBBIT_CLKMULT_B 3 /* B clock multiplier. */
+#define S626_CRBBIT_CLKENAB_B 2 /* B clock enable. */
+#define S626_CRBBIT_INDXPOL_B 1 /* B index polarity. */
+#define S626_CRBBIT_CLKPOL_B 0 /* B clock polarity. */
+
+/* Bit field widths in CRB: */
+#define S626_CRBWID_INTRESETCMD 1
+#define S626_CRBWID_CNTDIR_B 1
+#define S626_CRBWID_INTRESET_B 1
+#define S626_CRBWID_OVERDO_A 1
+#define S626_CRBWID_INTRESET_A 1
+#define S626_CRBWID_OVERDO_B 1
+#define S626_CRBWID_CLKENAB_A 1
+#define S626_CRBWID_INTSRC_B 2
+#define S626_CRBWID_LATCHSRC 2
+#define S626_CRBWID_LOADSRC_B 2
+#define S626_CRBWID_CLEAR_B 1
+#define S626_CRBWID_CLKMULT_B 2
+#define S626_CRBWID_CLKENAB_B 1
+#define S626_CRBWID_INDXPOL_B 1
+#define S626_CRBWID_CLKPOL_B 1
+
+/* Bit field masks for CRB: */
+#define S626_CRBMSK_INTRESETCMD S626_SET_CRB_INTRESETCMD(~0) /* (w) */
+#define S626_CRBMSK_CNTDIR_B S626_CRBMSK_INTRESETCMD /* (r) */
+#define S626_CRBMSK_INTRESET_B S626_SET_CRB_INTRESET_B(~0) /* (w) */
+#define S626_CRBMSK_OVERDO_A S626_CRBMSK_INTRESET_B /* (r) */
+#define S626_CRBMSK_INTRESET_A S626_SET_CRB_INTRESET_A(~0) /* (w) */
+#define S626_CRBMSK_OVERDO_B S626_CRBMSK_INTRESET_A /* (r) */
+#define S626_CRBMSK_CLKENAB_A S626_SET_CRB_CLKENAB_A(~0)
+#define S626_CRBMSK_INTSRC_B S626_SET_CRB_INTSRC_B(~0)
+#define S626_CRBMSK_LATCHSRC S626_SET_CRB_LATCHSRC(~0)
+#define S626_CRBMSK_LOADSRC_B S626_SET_CRB_LOADSRC_B(~0)
+#define S626_CRBMSK_CLEAR_B S626_SET_CRB_CLEAR_B(~0)
+#define S626_CRBMSK_CLKMULT_B S626_SET_CRB_CLKMULT_B(~0)
+#define S626_CRBMSK_CLKENAB_B S626_SET_CRB_CLKENAB_B(~0)
+#define S626_CRBMSK_INDXPOL_B S626_SET_CRB_INDXPOL_B(~0)
+#define S626_CRBMSK_CLKPOL_B S626_SET_CRB_CLKPOL_B(~0)
+
+/* Interrupt reset control bits. */
+#define S626_CRBMSK_INTCTRL (S626_CRBMSK_INTRESETCMD | \
+ S626_CRBMSK_INTRESET_A | \
+ S626_CRBMSK_INTRESET_B)
+
+/* Construct parts of the CRB value: */
+#define S626_SET_CRB_INTRESETCMD(x) \
+ S626_MAKE((x), S626_CRBWID_INTRESETCMD, S626_CRBBIT_INTRESETCMD)
+#define S626_SET_CRB_INTRESET_B(x) \
+ S626_MAKE((x), S626_CRBWID_INTRESET_B, S626_CRBBIT_INTRESET_B)
+#define S626_SET_CRB_INTRESET_A(x) \
+ S626_MAKE((x), S626_CRBWID_INTRESET_A, S626_CRBBIT_INTRESET_A)
+#define S626_SET_CRB_CLKENAB_A(x) \
+ S626_MAKE((x), S626_CRBWID_CLKENAB_A, S626_CRBBIT_CLKENAB_A)
+#define S626_SET_CRB_INTSRC_B(x) \
+ S626_MAKE((x), S626_CRBWID_INTSRC_B, S626_CRBBIT_INTSRC_B)
+#define S626_SET_CRB_LATCHSRC(x) \
+ S626_MAKE((x), S626_CRBWID_LATCHSRC, S626_CRBBIT_LATCHSRC)
+#define S626_SET_CRB_LOADSRC_B(x) \
+ S626_MAKE((x), S626_CRBWID_LOADSRC_B, S626_CRBBIT_LOADSRC_B)
+#define S626_SET_CRB_CLEAR_B(x) \
+ S626_MAKE((x), S626_CRBWID_CLEAR_B, S626_CRBBIT_CLEAR_B)
+#define S626_SET_CRB_CLKMULT_B(x) \
+ S626_MAKE((x), S626_CRBWID_CLKMULT_B, S626_CRBBIT_CLKMULT_B)
+#define S626_SET_CRB_CLKENAB_B(x) \
+ S626_MAKE((x), S626_CRBWID_CLKENAB_B, S626_CRBBIT_CLKENAB_B)
+#define S626_SET_CRB_INDXPOL_B(x) \
+ S626_MAKE((x), S626_CRBWID_INDXPOL_B, S626_CRBBIT_INDXPOL_B)
+#define S626_SET_CRB_CLKPOL_B(x) \
+ S626_MAKE((x), S626_CRBWID_CLKPOL_B, S626_CRBBIT_CLKPOL_B)
+
+/* Extract parts of the CRB value: */
+#define S626_GET_CRB_CNTDIR_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_CNTDIR_B, S626_CRBBIT_CNTDIR_B)
+#define S626_GET_CRB_OVERDO_A(v) \
+ S626_UNMAKE((v), S626_CRBWID_OVERDO_A, S626_CRBBIT_OVERDO_A)
+#define S626_GET_CRB_OVERDO_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_OVERDO_B, S626_CRBBIT_OVERDO_B)
+#define S626_GET_CRB_CLKENAB_A(v) \
+ S626_UNMAKE((v), S626_CRBWID_CLKENAB_A, S626_CRBBIT_CLKENAB_A)
+#define S626_GET_CRB_INTSRC_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_INTSRC_B, S626_CRBBIT_INTSRC_B)
+#define S626_GET_CRB_LATCHSRC(v) \
+ S626_UNMAKE((v), S626_CRBWID_LATCHSRC, S626_CRBBIT_LATCHSRC)
+#define S626_GET_CRB_LOADSRC_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_LOADSRC_B, S626_CRBBIT_LOADSRC_B)
+#define S626_GET_CRB_CLEAR_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_CLEAR_B, S626_CRBBIT_CLEAR_B)
+#define S626_GET_CRB_CLKMULT_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_CLKMULT_B, S626_CRBBIT_CLKMULT_B)
+#define S626_GET_CRB_CLKENAB_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_CLKENAB_B, S626_CRBBIT_CLKENAB_B)
+#define S626_GET_CRB_INDXPOL_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_INDXPOL_B, S626_CRBBIT_INDXPOL_B)
+#define S626_GET_CRB_CLKPOL_B(v) \
+ S626_UNMAKE((v), S626_CRBWID_CLKPOL_B, S626_CRBBIT_CLKPOL_B)
+
+/* Bit field positions for standardized SETUP structure: */
+#define S626_STDBIT_INTSRC 13
+#define S626_STDBIT_LATCHSRC 11
+#define S626_STDBIT_LOADSRC 9
+#define S626_STDBIT_INDXSRC 7
+#define S626_STDBIT_INDXPOL 6
+#define S626_STDBIT_ENCMODE 4
+#define S626_STDBIT_CLKPOL 3
+#define S626_STDBIT_CLKMULT 1
+#define S626_STDBIT_CLKENAB 0
+
+/* Bit field widths for standardized SETUP structure: */
+#define S626_STDWID_INTSRC 2
+#define S626_STDWID_LATCHSRC 2
+#define S626_STDWID_LOADSRC 2
+#define S626_STDWID_INDXSRC 2
+#define S626_STDWID_INDXPOL 1
+#define S626_STDWID_ENCMODE 2
+#define S626_STDWID_CLKPOL 1
+#define S626_STDWID_CLKMULT 2
+#define S626_STDWID_CLKENAB 1
+
+/* Bit field masks for standardized SETUP structure: */
+#define S626_STDMSK_INTSRC S626_SET_STD_INTSRC(~0)
+#define S626_STDMSK_LATCHSRC S626_SET_STD_LATCHSRC(~0)
+#define S626_STDMSK_LOADSRC S626_SET_STD_LOADSRC(~0)
+#define S626_STDMSK_INDXSRC S626_SET_STD_INDXSRC(~0)
+#define S626_STDMSK_INDXPOL S626_SET_STD_INDXPOL(~0)
+#define S626_STDMSK_ENCMODE S626_SET_STD_ENCMODE(~0)
+#define S626_STDMSK_CLKPOL S626_SET_STD_CLKPOL(~0)
+#define S626_STDMSK_CLKMULT S626_SET_STD_CLKMULT(~0)
+#define S626_STDMSK_CLKENAB S626_SET_STD_CLKENAB(~0)
+
+/* Construct parts of standardized SETUP structure: */
+#define S626_SET_STD_INTSRC(x) \
+ S626_MAKE((x), S626_STDWID_INTSRC, S626_STDBIT_INTSRC)
+#define S626_SET_STD_LATCHSRC(x) \
+ S626_MAKE((x), S626_STDWID_LATCHSRC, S626_STDBIT_LATCHSRC)
+#define S626_SET_STD_LOADSRC(x) \
+ S626_MAKE((x), S626_STDWID_LOADSRC, S626_STDBIT_LOADSRC)
+#define S626_SET_STD_INDXSRC(x) \
+ S626_MAKE((x), S626_STDWID_INDXSRC, S626_STDBIT_INDXSRC)
+#define S626_SET_STD_INDXPOL(x) \
+ S626_MAKE((x), S626_STDWID_INDXPOL, S626_STDBIT_INDXPOL)
+#define S626_SET_STD_ENCMODE(x) \
+ S626_MAKE((x), S626_STDWID_ENCMODE, S626_STDBIT_ENCMODE)
+#define S626_SET_STD_CLKPOL(x) \
+ S626_MAKE((x), S626_STDWID_CLKPOL, S626_STDBIT_CLKPOL)
+#define S626_SET_STD_CLKMULT(x) \
+ S626_MAKE((x), S626_STDWID_CLKMULT, S626_STDBIT_CLKMULT)
+#define S626_SET_STD_CLKENAB(x) \
+ S626_MAKE((x), S626_STDWID_CLKENAB, S626_STDBIT_CLKENAB)
+
+/* Extract parts of standardized SETUP structure: */
+#define S626_GET_STD_INTSRC(v) \
+ S626_UNMAKE((v), S626_STDWID_INTSRC, S626_STDBIT_INTSRC)
+#define S626_GET_STD_LATCHSRC(v) \
+ S626_UNMAKE((v), S626_STDWID_LATCHSRC, S626_STDBIT_LATCHSRC)
+#define S626_GET_STD_LOADSRC(v) \
+ S626_UNMAKE((v), S626_STDWID_LOADSRC, S626_STDBIT_LOADSRC)
+#define S626_GET_STD_INDXSRC(v) \
+ S626_UNMAKE((v), S626_STDWID_INDXSRC, S626_STDBIT_INDXSRC)
+#define S626_GET_STD_INDXPOL(v) \
+ S626_UNMAKE((v), S626_STDWID_INDXPOL, S626_STDBIT_INDXPOL)
+#define S626_GET_STD_ENCMODE(v) \
+ S626_UNMAKE((v), S626_STDWID_ENCMODE, S626_STDBIT_ENCMODE)
+#define S626_GET_STD_CLKPOL(v) \
+ S626_UNMAKE((v), S626_STDWID_CLKPOL, S626_STDBIT_CLKPOL)
+#define S626_GET_STD_CLKMULT(v) \
+ S626_UNMAKE((v), S626_STDWID_CLKMULT, S626_STDBIT_CLKMULT)
+#define S626_GET_STD_CLKENAB(v) \
+ S626_UNMAKE((v), S626_STDWID_CLKENAB, S626_STDBIT_CLKENAB)
+
+#endif
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index 9e964950a56..daee2f42bde 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -332,30 +332,44 @@ static int skel_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
return i;
}
-/* DIO devices are slightly special. Although it is possible to
+/*
+ * DIO devices are slightly special. Although it is possible to
* implement the insn_read/insn_write interface, it is much more
* useful to applications if you implement the insn_bits interface.
- * This allows packed reading/writing of the DIO channels. The
- * comedi core can convert between insn_bits and insn_read/write */
+ * This allows packed reading/writing of the DIO channels. The
+ * comedi core can convert between insn_bits and insn_read/write.
+ */
static int skel_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
+ /*
+ * The insn data is a mask in data[0] and the new data
+ * in data[1], each channel cooresponding to a bit.
+ *
+ * The core provided comedi_dio_update_state() function can
+ * be used to handle the internal state update to DIO subdevices
+ * with <= 32 channels. This function will return '0' if the
+ * state does not change or the mask of the channels that need
+ * to be updated.
+ */
+ if (comedi_dio_update_state(s, data)) {
/* Write out the new digital output lines */
- /* outw(s->state,dev->iobase + SKEL_DIO); */
+ /* outw(s->state, dev->iobase + SKEL_DIO); */
}
- /* on return, data[1] contains the value of the digital
- * input and output lines. */
- /* data[1]=inw(dev->iobase + SKEL_DIO); */
- /* or we could just return the software copy of the output values if
- * it was a purely digital output subdevice */
- /* data[1]=s->state; */
+ /*
+ * On return, data[1] contains the value of the digital
+ * input and output lines.
+ */
+ /* data[1] = inw(dev->iobase + SKEL_DIO); */
+
+ /*
+ * Or we could just return the software copy of the output
+ * values if it was a purely digital output subdevice.
+ */
+ /* data[1] = s->state; */
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index 11758a515c1..df22a78d2b7 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -46,51 +46,43 @@ Status: unknown
#define PCMR 0xa3 /* Port C Mode Register */
#define PCDR 0xa7 /* Port C Data Register */
-/* ------------------------------------------------------------------------- */
-/* The insn_bits interface allows packed reading/writing of DIO channels. */
-/* The comedi core can convert between insn_bits and insn_read/write, so you */
-/* are able to use these instructions as well. */
-/* ------------------------------------------------------------------------- */
-
static int dnp_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* The insn data is a mask in data[0] and the new data in data[1], */
- /* each channel cooresponding to a bit. */
-
- /* Ports A and B are straight forward: each bit corresponds to an */
- /* output pin with the same order. Port C is different: bits 0...3 */
- /* correspond to bits 4...7 of the output register (PCDR). */
+ unsigned int mask;
+ unsigned int val;
- if (data[0]) {
+ /*
+ * Ports A and B are straight forward: each bit corresponds to an
+ * output pin with the same order. Port C is different: bits 0...3
+ * correspond to bits 4...7 of the output register (PCDR).
+ */
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
outb(PADR, CSCIR);
- outb((inb(CSCDR)
- & ~(u8) (data[0] & 0x0000FF))
- | (u8) (data[1] & 0x0000FF), CSCDR);
+ outb(s->state & 0xff, CSCDR);
outb(PBDR, CSCIR);
- outb((inb(CSCDR)
- & ~(u8) ((data[0] & 0x00FF00) >> 8))
- | (u8) ((data[1] & 0x00FF00) >> 8), CSCDR);
+ outb((s->state >> 8) & 0xff, CSCDR);
outb(PCDR, CSCIR);
- outb((inb(CSCDR)
- & ~(u8) ((data[0] & 0x0F0000) >> 12))
- | (u8) ((data[1] & 0x0F0000) >> 12), CSCDR);
+ val = inb(CSCDR) & 0x0f;
+ outb(((s->state >> 12) & 0xf0) | val, CSCDR);
}
- /* on return, data[1] contains the value of the digital input lines. */
outb(PADR, CSCIR);
- data[0] = inb(CSCDR);
+ val = inb(CSCDR);
outb(PBDR, CSCIR);
- data[0] += inb(CSCDR) << 8;
+ val |= (inb(CSCDR) << 8);
outb(PCDR, CSCIR);
- data[0] += ((inb(CSCDR) & 0xF0) << 12);
+ val |= ((inb(CSCDR) & 0xf0) << 12);
- return insn->n;
+ data[1] = val;
+ return insn->n;
}
static int dnp_dio_insn_config(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 701ad1a6939..da1d501d9e4 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -122,7 +122,7 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#define PWM_DEFAULT_PERIOD ((long)(1E9/100))
/* Size of one A/D value */
-#define SIZEADIN ((sizeof(int16_t)))
+#define SIZEADIN ((sizeof(uint16_t)))
/*
* Size of the input-buffer IN BYTES
@@ -134,7 +134,7 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#define SIZEINSNBUF 16
/* size of one value for the D/A converter: channel and value */
-#define SIZEDAOUT ((sizeof(int8_t)+sizeof(int16_t)))
+#define SIZEDAOUT ((sizeof(uint8_t)+sizeof(uint16_t)))
/*
* Size of the output-buffer in bytes
@@ -195,15 +195,15 @@ struct usbdux_private {
/* PWM period */
unsigned int pwm_period;
/* PWM internal delay for the GPIF in the FX2 */
- int8_t pwm_delay;
+ uint8_t pwm_delay;
/* size of the PWM buffer which holds the bit pattern */
int pwm_buf_sz;
/* input buffer for the ISO-transfer */
- int16_t *in_buf;
+ uint16_t *in_buf;
/* input buffer for single insn */
- int16_t *insn_buf;
+ uint16_t *insn_buf;
- int8_t ao_chanlist[USBDUX_NUM_AO_CHAN];
+ uint8_t ao_chanlist[USBDUX_NUM_AO_CHAN];
unsigned int ao_readback[USBDUX_NUM_AO_CHAN];
unsigned int high_speed:1;
@@ -225,7 +225,7 @@ struct usbdux_private {
/* interval in frames/uframes */
unsigned int ai_interval;
/* commands */
- int8_t *dux_commands;
+ uint8_t *dux_commands;
struct semaphore sem;
};
@@ -367,7 +367,7 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
n = s->async->cmd.chanlist_len;
for (i = 0; i < n; i++) {
unsigned int range = CR_RANGE(s->async->cmd.chanlist[i]);
- int16_t val = le16_to_cpu(devpriv->in_buf[i]);
+ uint16_t val = le16_to_cpu(devpriv->in_buf[i]);
/* bipolar data is two's-complement */
if (comedi_range_is_bipolar(s, range))
@@ -415,7 +415,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
struct comedi_device *dev = urb->context;
struct comedi_subdevice *s = dev->write_subdev;
struct usbdux_private *devpriv = dev->private;
- int8_t *datap;
+ uint8_t *datap;
int len;
int ret;
int i;
@@ -483,7 +483,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
*datap++ = len;
for (i = 0; i < s->async->cmd.chanlist_len; i++) {
unsigned int chan = devpriv->ao_chanlist[i];
- short val;
+ unsigned short val;
ret = comedi_buf_get(s->async, &val);
if (ret < 0) {
@@ -649,14 +649,15 @@ static int usbdux_ai_cmdtest(struct comedi_device *dev,
* creates the ADC command for the MAX1271
* range is the range value from comedi
*/
-static int8_t create_adc_command(unsigned int chan, int range)
+static uint8_t create_adc_command(unsigned int chan, unsigned int range)
{
- int8_t p = (range <= 1);
- int8_t r = ((range % 2) == 0);
+ uint8_t p = (range <= 1);
+ uint8_t r = ((range % 2) == 0);
+
return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3);
}
-static int send_dux_commands(struct comedi_device *dev, int cmd_type)
+static int send_dux_commands(struct comedi_device *dev, unsigned int cmd_type)
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct usbdux_private *devpriv = dev->private;
@@ -669,7 +670,7 @@ static int send_dux_commands(struct comedi_device *dev, int cmd_type)
&nsent, BULK_TIMEOUT);
}
-static int receive_dux_commands(struct comedi_device *dev, int command)
+static int receive_dux_commands(struct comedi_device *dev, unsigned int command)
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct usbdux_private *devpriv = dev->private;
@@ -879,7 +880,7 @@ static int usbdux_ao_insn_write(struct comedi_device *dev,
struct usbdux_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = devpriv->ao_readback[chan];
- int16_t *p = (int16_t *)&devpriv->dux_commands[2];
+ uint16_t *p = (uint16_t *)&devpriv->dux_commands[2];
int ret = -EBUSY;
int i;
@@ -1133,15 +1134,13 @@ static int usbdux_dio_insn_bits(struct comedi_device *dev,
{
struct usbdux_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
int ret;
down(&devpriv->sem);
- s->state &= ~mask;
- s->state |= (bits & mask);
+ comedi_dio_update_state(s, data);
+ /* Always update the hardware. See the (*insn_config). */
devpriv->dux_commands[1] = s->io_bits;
devpriv->dux_commands[2] = s->state;
@@ -1200,7 +1199,7 @@ static int usbdux_counter_write(struct comedi_device *dev,
{
struct usbdux_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- int16_t *p = (int16_t *)&devpriv->dux_commands[2];
+ uint16_t *p = (uint16_t *)&devpriv->dux_commands[2];
int ret = 0;
int i;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index c47f4087568..a5363ded366 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -78,7 +78,7 @@
#define USBDUXSIGMA_NUM_AO_CHAN 4
/* Size of one A/D value */
-#define SIZEADIN ((sizeof(int32_t)))
+#define SIZEADIN ((sizeof(uint32_t)))
/*
* Size of the async input-buffer IN BYTES, the DIO state is transmitted
@@ -93,7 +93,7 @@
#define NUMOUTCHANNELS 8
/* size of one value for the D/A converter: channel and value */
-#define SIZEDAOUT ((sizeof(uint8_t)+sizeof(int16_t)))
+#define SIZEDAOUT ((sizeof(uint8_t)+sizeof(uint16_t)))
/*
* Size of the output-buffer in bytes
@@ -157,11 +157,11 @@ struct usbduxsigma_private {
/* size of the PWM buffer which holds the bit pattern */
int pwm_buf_sz;
/* input buffer for the ISO-transfer */
- int32_t *in_buf;
+ uint32_t *in_buf;
/* input buffer for single insn */
- int8_t *insn_buf;
+ uint8_t *insn_buf;
- int8_t ao_chanlist[USBDUXSIGMA_NUM_AO_CHAN];
+ uint8_t ao_chanlist[USBDUXSIGMA_NUM_AO_CHAN];
unsigned int ao_readback[USBDUXSIGMA_NUM_AO_CHAN];
unsigned high_speed:1;
@@ -224,7 +224,7 @@ static void usbduxsigma_ai_urb_complete(struct urb *urb)
struct usbduxsigma_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned int dio_state;
- int32_t val;
+ uint32_t val;
int ret;
int i;
@@ -421,7 +421,7 @@ static void usbduxsigma_ao_urb_complete(struct urb *urb)
*datap++ = len;
for (i = 0; i < len; i++) {
unsigned int chan = devpriv->ao_chanlist[i];
- short val;
+ unsigned short val;
ret = comedi_buf_get(s->async, &val);
if (ret < 0) {
@@ -784,7 +784,7 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
for (i = 0; i < insn->n; i++) {
- int32_t val;
+ uint32_t val;
ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
if (ret < 0) {
@@ -793,7 +793,7 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((int32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1059,15 +1059,13 @@ static int usbduxsigma_dio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct usbduxsigma_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
int ret;
down(&devpriv->sem);
- s->state &= ~mask;
- s->state |= (bits & mask);
+ comedi_dio_update_state(s, data);
+ /* Always update the hardware. See the (*insn_config). */
devpriv->dux_commands[1] = s->io_bits & 0xff;
devpriv->dux_commands[4] = s->state & 0xff;
devpriv->dux_commands[2] = (s->io_bits >> 8) & 0xff;
@@ -1360,7 +1358,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((int32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 06efa16b9af..933b01a0f03 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -462,9 +462,10 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
struct vmk80xx_private *devpriv = dev->private;
- unsigned char *rx_buf, *tx_buf;
+ unsigned char *rx_buf = devpriv->usb_rx_buf;
+ unsigned char *tx_buf = devpriv->usb_tx_buf;
int reg, cmd;
- int retval;
+ int ret;
if (devpriv->model == VMK8061_MODEL) {
reg = VMK8061_DO_REG;
@@ -476,37 +477,27 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
down(&devpriv->limit_sem);
- rx_buf = devpriv->usb_rx_buf;
- tx_buf = devpriv->usb_tx_buf;
-
- if (data[0]) {
- tx_buf[reg] &= ~data[0];
- tx_buf[reg] |= (data[0] & data[1]);
-
- retval = vmk80xx_write_packet(dev, cmd);
-
- if (retval)
+ if (comedi_dio_update_state(s, data)) {
+ tx_buf[reg] = s->state;
+ ret = vmk80xx_write_packet(dev, cmd);
+ if (ret)
goto out;
}
if (devpriv->model == VMK8061_MODEL) {
tx_buf[0] = VMK8061_CMD_RD_DO;
-
- retval = vmk80xx_read_packet(dev);
-
- if (!retval) {
- data[1] = rx_buf[reg];
- retval = 2;
- }
+ ret = vmk80xx_read_packet(dev);
+ if (ret)
+ goto out;
+ data[1] = rx_buf[reg];
} else {
- data[1] = tx_buf[reg];
- retval = 2;
+ data[1] = s->state;
}
out:
up(&devpriv->limit_sem);
- return retval;
+ return ret ? ret : insn->n;
}
static int vmk80xx_cnt_insn_read(struct comedi_device *dev,
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
index 42a5f5c8d3d..ca4c2c67dd8 100644
--- a/drivers/staging/cptm1217/clearpad_tm1217.c
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -457,8 +457,6 @@ static int cp_tm1217_probe(struct i2c_client *client,
for (i = 0; i < TOUCH_SUPPORTED; i++) {
input_dev = input_allocate_device();
if (input_dev == NULL) {
- dev_err(ts->dev,
- "cp_tm1217:Input Device Struct alloc failed\n");
retval = -ENOMEM;
goto fail;
}
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 5845e899ee8..043bd49843f 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -1517,7 +1517,7 @@ static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
uint32_t i, list_avail = 0;
enum BC_STATUS comp_sts = BC_STS_NO_DATA;
uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
- bool ret = 0;
+ bool ret = false;
if (!hw) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1852,7 +1852,7 @@ bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
{
uint32_t intr_sts = 0;
uint32_t deco_intr = 0;
- bool rc = 0;
+ bool rc = false;
if (!adp || !hw->dev_started)
return rc;
@@ -1865,7 +1865,7 @@ bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
if (intr_sts) {
/* let system know we processed interrupt..*/
- rc = 1;
+ rc = true;
hw->stats.dev_interrupts++;
}
@@ -1886,7 +1886,7 @@ bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
/* FIXME: jarod: No udelay? might this be
the real reason mini pci-e cards were stalling out? */
bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
- rc = 1;
+ rc = true;
}
/* Rx interrupts */
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index b17fbf8181c..190b9b92436 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -75,8 +75,9 @@ static int chd_dec_disable_int(struct crystalhd_adp *adp)
return 0;
}
-struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp,
- bool isr)
+static struct
+crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp,
+ bool isr)
{
unsigned long flags = 0;
struct crystalhd_ioctl_data *temp;
@@ -96,8 +97,8 @@ struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp,
return temp;
}
-void chd_dec_free_iodata(struct crystalhd_adp *adp,
- struct crystalhd_ioctl_data *iodata, bool isr)
+static void chd_dec_free_iodata(struct crystalhd_adp *adp,
+ struct crystalhd_ioctl_data *iodata, bool isr)
{
unsigned long flags = 0;
@@ -156,7 +157,7 @@ static int chd_dec_fetch_cdata(struct crystalhd_adp *adp,
if (rc) {
BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n",
io->add_cdata_sz, (unsigned int)ua_off);
- kfree(io->add_cdata);
+ vfree(io->add_cdata);
io->add_cdata = NULL;
return -ENODATA;
}
@@ -627,7 +628,7 @@ err:
}
#ifdef CONFIG_PM
-int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct crystalhd_adp *adp;
struct crystalhd_ioctl_data *temp;
@@ -661,7 +662,7 @@ int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-int chd_dec_pci_resume(struct pci_dev *pdev)
+static int chd_dec_pci_resume(struct pci_dev *pdev)
{
struct crystalhd_adp *adp;
enum BC_STATUS sts = BC_STS_SUCCESS;
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index d71aea54181..46a0d92173e 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -145,10 +145,8 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* Enable 8 out of 10 validation */
/* t1RBOC enable(BOC:BitOriented Code) */
pci_write_32((u_int32_t *) &comet->t1_rboc_ena, 0x00);
- if (isT1mode)
- {
-
- /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
+ if (isT1mode) {
+ /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
/* 6 bit down, 5 bit up (assert) */
pci_write_32((u_int32_t *) &comet->ibcd_cfg, 0x04);
/* line loopback activate pattern */
@@ -353,7 +351,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* RLPS Configuration Status */
pci_write_32((u_int32_t *) &comet->rlps_cfgsts, 0x11);
if (isT1mode)
- /* ? */
+ /* ? */
pci_write_32((u_int32_t *) &comet->rlps_alos_thresh, 0x55);
else
/* ? */
@@ -452,7 +450,7 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
volatile u_int32_t value;
for (ramaddr = 0; ramaddr < 256; ramaddr++) {
- /*** the following lines are per Errata 7, 2.5 ***/
+ /*** the following lines are per Errata 7, 2.5 ***/
{
/* Set up for a read operation */
pci_write_32((u_int32_t *) &comet->rlps_eq_rwsel, 0x80);
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
index e06da4a6f6f..03b9bb77a80 100644
--- a/drivers/staging/cxt1e1/comet.h
+++ b/drivers/staging/cxt1e1/comet.h
@@ -338,7 +338,7 @@ typedef struct s_comet_reg comet_t;
#ifdef __KERNEL__
extern void
-init_comet (void *, comet_t *, u_int32_t, int, u_int8_t);
+init_comet(void *, comet_t *, u_int32_t, int, u_int8_t);
#endif
#endif /* _INC_COMET_H_ */
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 53e923701ae..02b4f8f1aca 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -157,7 +157,7 @@ prep_hdw_info (void)
hi->pci_slot = 0xff;
hi->pci_pin[0] = 0;
hi->pci_pin[1] = 0;
- hi->ndev = 0;
+ hi->ndev = NULL;
hi->addr[0] = 0L;
hi->addr[1] = 0L;
hi->addr_mapped[0] = 0L;
@@ -309,7 +309,7 @@ c4hw_attach_all (void)
if (!found)
{
pr_warning("No boards found\n");
- return ENODEV;
+ return -ENODEV;
}
/* sanity check for consistent hardware found */
for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
@@ -318,7 +318,7 @@ c4hw_attach_all (void)
{
pr_warning("%s: something very wrong with pci_get_device\n",
hi->devname);
- return EIO;
+ return -EIO;
}
}
/* bring board's memory regions on/line */
@@ -328,12 +328,12 @@ c4hw_attach_all (void)
break;
for (j = 0; j < 2; j++)
{
- if (request_mem_region (hi->addr[j], hi->len[j], hi->devname) == 0)
+ if (!request_mem_region (hi->addr[j], hi->len[j], hi->devname))
{
pr_warning("%s: memory in use, addr=0x%lx, len=0x%lx ?\n",
hi->devname, hi->addr[j], hi->len[j]);
cleanup_ioremap ();
- return ENOMEM;
+ return -ENOMEM;
}
hi->addr_mapped[j] = (unsigned long) ioremap (hi->addr[j], hi->len[j]);
if (!hi->addr_mapped[j])
@@ -341,7 +341,7 @@ c4hw_attach_all (void)
pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
hi->devname, hi->addr[j], hi->len[j]);
cleanup_ioremap ();
- return ENOMEM;
+ return -ENOMEM;
}
#ifdef SBE_MAP_DEBUG
pr_warning("%s: io remapped from phys %x to virt %x\n",
@@ -365,7 +365,7 @@ c4hw_attach_all (void)
hi->devname, i, hi->pci_slot);
cleanup_devs ();
cleanup_ioremap ();
- return EIO;
+ return -EIO;
}
pci_set_master (hi->pdev[0]);
pci_set_master (hi->pdev[1]);
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 142691c8d8d..9b483739881 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -133,7 +133,7 @@ getuserbychan (int channum)
mch_t *ch;
ch = c4_find_chan (channum);
- return ch ? ch->user : 0;
+ return ch ? ch->user : NULL;
}
@@ -230,7 +230,7 @@ c4_wq_port_init (mpi_t *pi)
__func__, name, pi->portnum); /* RLD DEBUG */
#endif
if (!(pi->wq_port = create_singlethread_workqueue (name)))
- return ENOMEM;
+ return -ENOMEM;
return 0; /* success */
}
@@ -245,7 +245,7 @@ c4_wq_port_cleanup (mpi_t *pi)
{
destroy_workqueue (pi->wq_port); /* this also calls
* flush_workqueue() */
- pi->wq_port = 0;
+ pi->wq_port = NULL;
}
}
@@ -420,7 +420,7 @@ create_chan (struct net_device *ndev, ci_t *ci,
int ret;
if (c4_find_chan (cp->channum))
- return 0; /* channel already exists */
+ return NULL; /* channel already exists */
{
struct c4_priv *priv;
@@ -430,14 +430,14 @@ create_chan (struct net_device *ndev, ci_t *ci,
if (!priv)
{
pr_warning("%s: no memory for net_device !\n", ci->devname);
- return 0;
+ return NULL;
}
dev = alloc_hdlcdev (priv);
if (!dev)
{
pr_warning("%s: no memory for hdlc_device !\n", ci->devname);
OS_kfree (priv);
- return 0;
+ return NULL;
}
priv->ci = ci;
priv->channum = cp->channum;
@@ -496,7 +496,7 @@ create_chan (struct net_device *ndev, ci_t *ci,
pr_info("%s: create_chan[%d] registration error = %d.\n",
ci->devname, cp->channum, ret);
free_netdev (dev); /* cleanup */
- return 0; /* failed to register */
+ return NULL; /* failed to register */
}
return dev;
}
@@ -744,7 +744,7 @@ do_deluser (struct net_device *ndev, int lockit)
ch = c4_find_chan (channum);
if (ch == NULL)
return -ENOENT;
- ch->user = 0; /* will be freed, below */
+ ch->user = NULL; /* will be freed, below */
}
if (lockit)
@@ -959,7 +959,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
{
pr_warning("%s: no memory for struct net_device !\n", hi->devname);
error_flag = ENOMEM;
- return 0;
+ return NULL;
}
ci = (ci_t *)(netdev_priv(ndev));
ndev->irq = irq0;
@@ -970,7 +970,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
c4_list = ci;
ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
- if (CI == 0)
+ if (!CI)
CI = ci; /* DEBUG, only board 0 usage */
strcpy (ci->devname, hi->devname);
@@ -996,7 +996,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
OS_kfree (netdev_priv(ndev));
OS_kfree (ndev);
error_flag = ENODEV;
- return 0;
+ return NULL;
}
/*************************************************************
* int request_irq(unsigned int irq,
@@ -1022,7 +1022,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
OS_kfree (netdev_priv(ndev));
OS_kfree (ndev);
error_flag = EIO;
- return 0;
+ return NULL;
}
#ifdef CONFIG_SBE_PMCC4_NCOMM
if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
@@ -1033,7 +1033,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
OS_kfree (netdev_priv(ndev));
OS_kfree (ndev);
error_flag = EIO;
- return 0;
+ return NULL;
}
#endif
@@ -1091,7 +1091,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
free_irq (irq0, ndev);
OS_kfree (netdev_priv(ndev));
OS_kfree (ndev);
- return 0; /* failure, error_flag is set */
+ return NULL; /* failure, error_flag is set */
}
return ndev;
}
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 52b6d7f5fd4..0ba8c3ae673 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -745,8 +745,8 @@ musycc_init(ci_t *ci)
#define INT_QUEUE_BOUNDARY 4
regaddr = OS_kmalloc((INT_QUEUE_SIZE + 1) * sizeof(u_int32_t));
- if (regaddr == 0)
- return ENOMEM;
+ if (!regaddr)
+ return -ENOMEM;
ci->iqd_p_saved = regaddr; /* save orig value for free's usage */
ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) &
(~(INT_QUEUE_BOUNDARY - 1))); /* this calculates
@@ -766,13 +766,13 @@ musycc_init(ci_t *ci)
#define GROUP_BOUNDARY 0x800
regaddr = OS_kmalloc(sizeof(struct musycc_groupr) + GROUP_BOUNDARY);
- if (regaddr == 0) {
+ if (!regaddr) {
for (gchan = 0; gchan < i; gchan++) {
pi = &ci->port[gchan];
OS_kfree(pi->reg);
- pi->reg = 0;
+ pi->reg = NULL;
}
- return ENOMEM;
+ return -ENOMEM;
}
pi->regram_saved = regaddr; /* save orig value for free's usage */
pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) &
@@ -839,12 +839,12 @@ musycc_bh_tx_eom(mpi_t *pi, int gchan)
volatile u_int32_t status;
ch = pi->chan[gchan];
- if (ch == 0 || ch->state != UP) {
+ if (!ch || ch->state != UP) {
if (cxt1e1_log_level >= LOG_ERROR)
pr_info("%s: intr: xmit EOM on uninitialized channel %d\n",
pi->up->devname, gchan);
}
- if (ch == 0 || ch->mdt == 0)
+ if (!ch || !ch->mdt)
return; /* note: mdt==0 implies a malloc()
* failure w/in chan_up() routine */
@@ -907,7 +907,7 @@ musycc_bh_tx_eom(mpi_t *pi, int gchan)
ch->txd_irq_srv = md->snext;
md->data = 0;
- if (md->mem_token != 0) {
+ if (md->mem_token) {
/* upcount channel */
atomic_sub(OS_mem_token_tlen(md->mem_token), &ch->tx_pending);
/* upcount card */
@@ -931,7 +931,7 @@ musycc_bh_tx_eom(mpi_t *pi, int gchan)
#endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/
OS_mem_token_free_irq(md->mem_token);
- md->mem_token = 0;
+ md->mem_token = NULL;
}
md->status = 0;
#ifdef RLD_TXFULL_DEBUG
@@ -1012,13 +1012,13 @@ musycc_bh_rx_eom(mpi_t *pi, int gchan)
u_int32_t error;
ch = pi->chan[gchan];
- if (ch == 0 || ch->state != UP) {
+ if (!ch || ch->state != UP) {
if (cxt1e1_log_level > LOG_ERROR)
pr_info("%s: intr: receive EOM on uninitialized channel %d\n",
pi->up->devname, gchan);
return;
}
- if (ch->mdr == 0)
+ if (!ch->mdr)
return; /* can this happen ? */
for (;;) {
@@ -1566,18 +1566,18 @@ musycc_chan_down(ci_t *dummy, int channum)
pi->regram->rmp[gchan] = 0;
FLUSH_MEM_WRITE();
for (i = 0; i < ch->txd_num; i++)
- if (ch->mdt[i].mem_token != 0)
+ if (ch->mdt[i].mem_token)
OS_mem_token_free(ch->mdt[i].mem_token);
for (i = 0; i < ch->rxd_num; i++)
- if (ch->mdr[i].mem_token != 0)
+ if (ch->mdr[i].mem_token)
OS_mem_token_free(ch->mdr[i].mem_token);
OS_kfree(ch->mdr);
- ch->mdr = 0;
+ ch->mdr = NULL;
ch->rxd_num = 0;
OS_kfree(ch->mdt);
- ch->mdt = 0;
+ ch->mdt = NULL;
ch->txd_num = 0;
musycc_update_timeslots(pi);
@@ -1746,7 +1746,7 @@ musycc_start_xmit(ci_t *ci, int channum, void *mem_token)
#endif
u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS));
}
- md->mem_token = len ? 0 : mem_token; /* Fill in mds on last
+ md->mem_token = len ? NULL : mem_token; /* Fill in mds on last
* segment, others set ZERO
* so that entire token is
* removed ONLY when ALL
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 2383c609bf3..4028ea11c44 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -70,7 +70,7 @@ extern void *memset (void *s, int c, size_t n);
#endif
int drvr_state = SBE_DRVR_INIT;
-ci_t *c4_list = 0;
+ci_t *c4_list = NULL;
ci_t *CI; /* dummy pointer to board ZEROE's data -
* DEBUG USAGE */
@@ -119,7 +119,7 @@ c4_find_chan (int channum)
return ch;
}
}
- return 0;
+ return NULL;
}
@@ -145,7 +145,7 @@ c4_new (void *hi)
pr_warning("failed CI malloc, size %u.\n",
(unsigned int) sizeof (ci_t));
- if (CI == 0)
+ if (!CI)
CI = ci; /* DEBUG, only board 0 usage */
return ci;
}
@@ -831,7 +831,7 @@ c4_musycc_rw (ci_t *ci, struct c4_musycc_param *mcp)
{
mpi_t *pi;
volatile u_int32_t *dph; /* hardware implemented register */
- u_int32_t *dpr = 0; /* RAM image of registers for group command
+ u_int32_t *dpr = NULL; /* RAM image of registers for group command
* usage */
int offset = mcp->offset % 0x800; /* group relative address
* offset, mcp->portnum is
@@ -1060,7 +1060,7 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
}
/* save off interface assignments which bound a board */
- if (ci->first_if == 0) /* first channel registered is assumed to
+ if (!ci->first_if) /* first channel registered is assumed to
* be the lowest channel */
{
ci->first_if = ci->last_if = user;
@@ -1392,7 +1392,7 @@ c4_chan_up (ci_t *ci, int channum)
md->status = HOST_TX_OWNED; /* Host owns TX descriptor ** CODING
* NOTE: HOST_TX_OWNED = 0 so no need to
* byteSwap */
- md->mem_token = 0;
+ md->mem_token = NULL;
md->data = 0;
if (i == (txnum - 1))
{
@@ -1448,10 +1448,10 @@ errfree:
OS_mem_token_free (ch->mdr[i].mem_token);
}
OS_kfree (ch->mdt);
- ch->mdt = 0;
+ ch->mdt = NULL;
ch->txd_num = 0;
OS_kfree (ch->mdr);
- ch->mdr = 0;
+ ch->mdr = NULL;
ch->rxd_num = 0;
ch->state = DOWN;
return ENOBUFS;
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 3c6d1c0fc6d..ba3ff3efe06 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -73,7 +73,7 @@ OS_mem_token_alloc (size_t size)
if (!skb)
{
//pr_warning("no mem in OS_mem_token_alloc !\n");
- return 0;
+ return NULL;
}
return skb;
}
@@ -103,7 +103,7 @@ OS_mem_token_data (void *token)
static inline void *
OS_mem_token_next (void *token)
{
- return 0;
+ return NULL;
}
diff --git a/drivers/staging/cxt1e1/sbecrc.c b/drivers/staging/cxt1e1/sbecrc.c
index 87512a53f72..81fa8a3a462 100644
--- a/drivers/staging/cxt1e1/sbecrc.c
+++ b/drivers/staging/cxt1e1/sbecrc.c
@@ -88,7 +88,7 @@ sbeCrc(u_int8_t *buffer, /* data buffer to crc */
u_int32_t initialCrc, /* starting CRC */
u_int32_t *result)
{
- u_int32_t *tbl = 0;
+ u_int32_t *tbl = NULL;
u_int32_t temp1, temp2, crc;
/*
@@ -102,7 +102,7 @@ sbeCrc(u_int8_t *buffer, /* data buffer to crc */
genCrcTable(tbl);
#else
tbl = (u_int32_t *) OS_kmalloc(CRC_TABLE_ENTRIES * sizeof(u_int32_t));
- if (tbl == 0) {
+ if (!tbl) {
*result = 0; /* dummy up return value due to malloc
* failure */
return;
diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c
index 791993fec96..6ec51bccceb 100644
--- a/drivers/staging/cxt1e1/sbeid.c
+++ b/drivers/staging/cxt1e1/sbeid.c
@@ -22,7 +22,7 @@
char *
sbeid_get_bdname (ci_t *ci)
{
- char *np = 0;
+ char *np = NULL;
switch (ci->brd_id)
{
diff --git a/drivers/staging/cxt1e1/sbeproc.c b/drivers/staging/cxt1e1/sbeproc.c
index 9361dd8ce12..353c001d3fb 100644
--- a/drivers/staging/cxt1e1/sbeproc.c
+++ b/drivers/staging/cxt1e1/sbeproc.c
@@ -44,7 +44,7 @@ void sbecom_proc_brd_cleanup(ci_t *ci)
static void sbecom_proc_get_brdinfo(ci_t *ci, struct sbe_brd_info *bip)
{
hdw_info_t *hi = &hdw_info[ci->brdno];
- u_int8_t *bsn = 0;
+ u_int8_t *bsn = NULL;
switch (hi->promfmt)
{
diff --git a/drivers/staging/cxt1e1/sbew_ioc.h b/drivers/staging/cxt1e1/sbew_ioc.h
index ce9b15c7189..e1e5bfc9ad3 100644
--- a/drivers/staging/cxt1e1/sbew_ioc.h
+++ b/drivers/staging/cxt1e1/sbew_ioc.h
@@ -39,21 +39,21 @@
*/
#define SBE_IOC_LOGLEVEL _IOW(SBE_IOC_MAGIC, 0x00, int)
-#define SBE_IOC_CHAN_NEW _IOW(SBE_IOC_MAGIC, 0x01,int) /* unused */
-#define SBE_IOC_CHAN_UP _IOW(SBE_IOC_MAGIC, 0x02,int) /* unused */
-#define SBE_IOC_CHAN_DOWN _IOW(SBE_IOC_MAGIC, 0x03,int) /* unused */
-#define SBE_IOC_CHAN_GET _IOWR(SBE_IOC_MAGIC,0x04, struct sbecom_chan_param)
+#define SBE_IOC_CHAN_NEW _IOW(SBE_IOC_MAGIC, 0x01, int) /* unused */
+#define SBE_IOC_CHAN_UP _IOW(SBE_IOC_MAGIC, 0x02, int) /* unused */
+#define SBE_IOC_CHAN_DOWN _IOW(SBE_IOC_MAGIC, 0x03, int) /* unused */
+#define SBE_IOC_CHAN_GET _IOWR(SBE_IOC_MAGIC, 0x04, struct sbecom_chan_param)
#define SBE_IOC_CHAN_SET _IOW(SBE_IOC_MAGIC, 0x05, struct sbecom_chan_param)
-#define SBE_IOC_CHAN_GET_STAT _IOWR(SBE_IOC_MAGIC,0x06, struct sbecom_chan_stats)
+#define SBE_IOC_CHAN_GET_STAT _IOWR(SBE_IOC_MAGIC, 0x06, struct sbecom_chan_stats)
#define SBE_IOC_CHAN_DEL_STAT _IOW(SBE_IOC_MAGIC, 0x07, int)
#define SBE_IOC_PORTS_ENABLE _IOW(SBE_IOC_MAGIC, 0x0A, int)
-#define SBE_IOC_PORT_GET _IOWR(SBE_IOC_MAGIC,0x0C, struct sbecom_port_param)
+#define SBE_IOC_PORT_GET _IOWR(SBE_IOC_MAGIC, 0x0C, struct sbecom_port_param)
#define SBE_IOC_PORT_SET _IOW(SBE_IOC_MAGIC, 0x0D, struct sbecom_port_param)
-#define SBE_IOC_READ_VEC _IOWR(SBE_IOC_MAGIC,0x10, struct sbecom_wrt_vec)
-#define SBE_IOC_WRITE_VEC _IOWR(SBE_IOC_MAGIC,0x11, struct sbecom_wrt_vec)
+#define SBE_IOC_READ_VEC _IOWR(SBE_IOC_MAGIC, 0x10, struct sbecom_wrt_vec)
+#define SBE_IOC_WRITE_VEC _IOWR(SBE_IOC_MAGIC, 0x11, struct sbecom_wrt_vec)
#define SBE_IOC_GET_SN _IOR(SBE_IOC_MAGIC, 0x12, u_int32_t)
#define SBE_IOC_RESET_DEV _IOW(SBE_IOC_MAGIC, 0x13, int)
-#define SBE_IOC_FRAMER_GET _IOWR(SBE_IOC_MAGIC,0x14, struct sbecom_framer_param)
+#define SBE_IOC_FRAMER_GET _IOWR(SBE_IOC_MAGIC, 0x14, struct sbecom_framer_param)
#define SBE_IOC_FRAMER_SET _IOW(SBE_IOC_MAGIC, 0x15, struct sbecom_framer_param)
#define SBE_IOC_CARD_GET _IOR(SBE_IOC_MAGIC, 0x20, struct sbecom_card_param)
#define SBE_IOC_CARD_SET _IOW(SBE_IOC_MAGIC, 0x21, struct sbecom_card_param)
@@ -61,13 +61,13 @@
#define SBE_IOC_CARD_DEL_STAT _IO(SBE_IOC_MAGIC, 0x23)
#define SBE_IOC_CARD_CHAN_STAT _IOR(SBE_IOC_MAGIC, 0x24, struct sbecom_chan_stats)
#define SBE_IOC_CARD_BLINK _IOW(SBE_IOC_MAGIC, 0x30, int)
-#define SBE_IOC_DRVINFO_GET _IOWR(SBE_IOC_MAGIC,0x31, struct sbe_drv_info)
+#define SBE_IOC_DRVINFO_GET _IOWR(SBE_IOC_MAGIC, 0x31, struct sbe_drv_info)
#define SBE_IOC_BRDINFO_GET _IOR(SBE_IOC_MAGIC, 0x32, struct sbe_brd_info)
-#define SBE_IOC_IID_GET _IOWR(SBE_IOC_MAGIC,0x33, struct sbe_iid_info)
+#define SBE_IOC_IID_GET _IOWR(SBE_IOC_MAGIC, 0x33, struct sbe_iid_info)
#define SBE_IOC_BRDADDR_GET _IOWR(SBE_IOC_MAGIC, 0x34, struct sbe_brd_addr)
#ifdef NOT_YET_COMMON
-#define SBE_IOC_TSIOC_GET _IOWR(SBE_IOC_MAGIC,0x16, struct wanc1t3_ts_param)
+#define SBE_IOC_TSIOC_GET _IOWR(SBE_IOC_MAGIC, 0x16, struct wanc1t3_ts_param)
#define SBE_IOC_TSIOC_SET _IOW(SBE_IOC_MAGIC, 0x17, struct wanc1t3_ts_param)
#endif
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
index 9f1fce157c7..3abe8d2bb74 100644
--- a/drivers/staging/dgap/Makefile
+++ b/drivers/staging/dgap/Makefile
@@ -1,5 +1,3 @@
-EXTRA_CFLAGS += -DDG_NAME=\"dgap-1.3-16\" -DDG_PART=\"40002347_C\"
-
obj-$(CONFIG_DGAP) += dgap.o
diff --git a/drivers/staging/dgap/dgap_downld.h b/drivers/staging/dgap/dgap_downld.h
index f79e65cd1d5..271ac19257f 100644
--- a/drivers/staging/dgap/dgap_downld.h
+++ b/drivers/staging/dgap/dgap_downld.h
@@ -35,7 +35,7 @@
struct fepimg {
int type; /* board type */
int len; /* length of image */
- char fepimage[1]; /* begining of image */
+ char fepimage[1]; /* beginning of image */
};
struct downldio {
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
index 40ef785a042..4c1515ee56e 100644
--- a/drivers/staging/dgap/dgap_driver.c
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -32,16 +32,12 @@
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h> /* For udelay */
#include <linux/slab.h>
#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
#include <linux/sched.h>
-#endif
#include "dgap_driver.h"
#include "dgap_pci.h"
@@ -420,8 +416,7 @@ void dgap_cleanup_module(void)
unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
}
- if (dgap_config_buf)
- kfree(dgap_config_buf);
+ kfree(dgap_config_buf);
for (i = 0; i < dgap_NumBoards; ++i) {
dgap_remove_ports_sysfiles(dgap_Board[i]);
@@ -488,10 +483,8 @@ static void dgap_cleanup_board(struct board_t *brd)
}
}
- if (brd->flipbuf)
- kfree(brd->flipbuf);
- if (brd->flipflagbuf)
- kfree(brd->flipflagbuf);
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
dgap_Board[brd->boardnum] = NULL;
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
index b1cf489a729..7d631e80c00 100644
--- a/drivers/staging/dgap/dgap_driver.h
+++ b/drivers/staging/dgap/dgap_driver.h
@@ -46,13 +46,16 @@
/*
* Driver identification, error and debugging statments
*
- * In theory, you can change all occurances of "digi" in the next
+ * In theory, you can change all occurrences of "digi" in the next
* three lines, and the driver printk's will all automagically change.
*
* APR((fmt, args, ...)); Always prints message
* DPR((fmt, args, ...)); Only prints if DGAP_TRACER is defined at
* compile time and dgap_debug!=0
*/
+#define DG_NAME "dgap-1.3-16"
+#define DG_PART "40002347_C"
+
#define PROCSTR "dgap" /* /proc entries */
#define DEVSTR "/dev/dg/dgap" /* /dev entries */
#define DRVSTR "dgap" /* Driver name string
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
index 4464f02c957..794cf9db8b8 100644
--- a/drivers/staging/dgap/dgap_fep5.c
+++ b/drivers/staging/dgap/dgap_fep5.c
@@ -134,7 +134,7 @@ int dgap_after_config_loaded(void)
dgap_Board[i]->flipflagbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
}
- return (rc);
+ return rc;
}
@@ -150,14 +150,14 @@ static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *fro
int n = U2BSIZE;
if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return(-EFAULT);
+ return -EFAULT;
while (len) {
if (n > len)
n = len;
if (copy_from_user((char *) &buf, from_addr, n) == -1 ) {
- return(-EFAULT);
+ return -EFAULT;
}
/* Copy data from buffer to card memory */
@@ -169,7 +169,7 @@ static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *fro
from_addr += n;
n = U2BSIZE;
}
- return(0);
+ return 0;
}
@@ -1155,20 +1155,20 @@ uint dgap_get_custom_baud(struct channel_t *ch)
uint value = 0;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- return (0);
+ return 0;
}
if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) {
- return (0);
+ return 0;
}
if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return (0);
+ return 0;
vaddr = ch->ch_bd->re_map_membase;
if (!vaddr)
- return (0);
+ return 0;
/*
* Go get from fep mem, what the fep
@@ -1178,7 +1178,7 @@ uint dgap_get_custom_baud(struct channel_t *ch)
(ch->ch_portnum * 0x28) + LINE_SPEED));
value = readw(vaddr + offset);
- return (value);
+ return value;
}
@@ -1229,29 +1229,24 @@ int dgap_param(struct tty_struct *tty)
uchar mval;
uchar hflow;
- if (!tty || tty->magic != TTY_MAGIC) {
- return (-ENXIO);
- }
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -ENXIO;
un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC) {
- return (-ENXIO);
- }
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENXIO;
ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- return (-ENXIO);
- }
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC) {
- return (-ENXIO);
- }
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
bs = ch->ch_bs;
- if (bs == 0) {
- return (-ENXIO);
- }
+ if (!bs)
+ return -ENXIO;
DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
@@ -1558,7 +1553,7 @@ int dgap_param(struct tty_struct *tty)
DPR_PARAM(("param finish\n"));
- return (0);
+ return 0;
}
@@ -1675,7 +1670,7 @@ static int dgap_event(struct board_t *bd)
int b1;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-ENXIO);
+ return -ENXIO;
DGAP_LOCK(bd->bd_lock, lock_flags);
@@ -1683,7 +1678,7 @@ static int dgap_event(struct board_t *bd)
if (!vaddr) {
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return (-ENXIO);
+ return -ENXIO;
}
eaddr = (struct ev_t *) (vaddr + EVBUF);
@@ -1701,7 +1696,7 @@ static int dgap_event(struct board_t *bd)
DPR_EVENT(("should be calling xxfail %d\n", __LINE__));
/* Let go of board lock */
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return (-ENXIO);
+ return -ENXIO;
}
/*
@@ -1949,5 +1944,5 @@ next:
writew(tail, &(eaddr->ev_tail));
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return (0);
+ return 0;
}
diff --git a/drivers/staging/dgap/dgap_fep5.h b/drivers/staging/dgap/dgap_fep5.h
index 3a12ba5e3c2..c9abc406a1e 100644
--- a/drivers/staging/dgap/dgap_fep5.h
+++ b/drivers/staging/dgap/dgap_fep5.h
@@ -211,7 +211,7 @@ struct bs_t {
#define SIFLAG 0xea /* Set UNIX iflags */
#define SFLOWC 0xeb /* Set flow control characters */
#define STLOW 0xec /* Set transmit low water mark */
-#define RPAUSE 0xee /* Pause recieve */
+#define RPAUSE 0xee /* Pause receive */
#define RRESUME 0xef /* Resume receive */
#define CHRESET 0xf0 /* Reset Channel */
#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
diff --git a/drivers/staging/dgap/dgap_kcompat.h b/drivers/staging/dgap/dgap_kcompat.h
index 8ebf4b7373b..0dc2404922f 100644
--- a/drivers/staging/dgap/dgap_kcompat.h
+++ b/drivers/staging/dgap/dgap_kcompat.h
@@ -28,11 +28,6 @@
#ifndef __DGAP_KCOMPAT_H
#define __DGAP_KCOMPAT_H
-# ifndef KERNEL_VERSION
-# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-# endif
-
-
#if !defined(TTY_FLIPBUF_SIZE)
# define TTY_FLIPBUF_SIZE 512
#endif
@@ -66,28 +61,4 @@
module_param(VAR, long, PERM); \
MODULE_PARM_DESC(VAR, DESC);
-
-
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
-
-
-
-
-/* NOTHING YET */
-
-
-
-
-# else
-
-
-
-# error "this driver does not support anything below the 2.6.27 kernel series."
-
-
-
-# endif
-
#endif /* ! __DGAP_KCOMPAT_H */
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
index 5497e6de060..ff9d19449b4 100644
--- a/drivers/staging/dgap/dgap_parse.c
+++ b/drivers/staging/dgap/dgap_parse.c
@@ -904,7 +904,7 @@ int dgap_parsefile(char **in, int Remove)
/*
* dgap_sindex: much like index(), but it looks for a match of any character in
* the group, and returns that position. If the first character is a ^, then
- * this will match the first occurence not in that group.
+ * this will match the first occurrence not in that group.
*/
static char *dgap_sindex (char *string, char *group)
{
@@ -1013,8 +1013,10 @@ static void dgap_err(char *s)
static struct cnode *dgap_newnode(int t)
{
struct cnode *n;
- if ( (n = (struct cnode *) kmalloc(sizeof(struct cnode ), GFP_ATOMIC) ) != NULL) {
- memset( (char *)n, 0, sizeof(struct cnode ) );
+
+ n = kmalloc(sizeof(struct cnode), GFP_ATOMIC);
+ if (n != NULL) {
+ memset((char *)n, 0, sizeof(struct cnode));
n->type = t;
}
return(n);
@@ -1150,7 +1152,7 @@ uint dgap_config_get_altpin(struct board_t *bd)
/*
* Given a specific type of board, if found, detached link and
- * returns the first occurance in the list.
+ * returns the first occurrence in the list.
*/
struct cnode *dgap_find_config(int type, int bus, int slot)
{
diff --git a/drivers/staging/dgap/dgap_sysfs.c b/drivers/staging/dgap/dgap_sysfs.c
index 94da06fcf7e..7f4ec9a1829 100644
--- a/drivers/staging/dgap/dgap_sysfs.c
+++ b/drivers/staging/dgap/dgap_sysfs.c
@@ -395,7 +395,7 @@ static ssize_t dgap_tty_state_show(struct device *d, struct device_attribute *at
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -420,7 +420,7 @@ static ssize_t dgap_tty_baud_show(struct device *d, struct device_attribute *att
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -445,7 +445,7 @@ static ssize_t dgap_tty_msignals_show(struct device *d, struct device_attribute
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -479,7 +479,7 @@ static ssize_t dgap_tty_iflag_show(struct device *d, struct device_attribute *at
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -504,7 +504,7 @@ static ssize_t dgap_tty_cflag_show(struct device *d, struct device_attribute *at
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -529,7 +529,7 @@ static ssize_t dgap_tty_oflag_show(struct device *d, struct device_attribute *at
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -554,7 +554,7 @@ static ssize_t dgap_tty_lflag_show(struct device *d, struct device_attribute *at
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -579,7 +579,7 @@ static ssize_t dgap_tty_digi_flag_show(struct device *d, struct device_attribute
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -604,7 +604,7 @@ static ssize_t dgap_tty_rxcount_show(struct device *d, struct device_attribute *
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -629,7 +629,7 @@ static ssize_t dgap_tty_txcount_show(struct device *d, struct device_attribute *
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
@@ -661,7 +661,7 @@ static ssize_t dgap_tty_name_show(struct device *d, struct device_attribute *att
if (!d)
return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return (0);
ch = un->un_ch;
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
index b906db30b61..2a7a37298da 100644
--- a/drivers/staging/dgap/dgap_tty.c
+++ b/drivers/staging/dgap/dgap_tty.c
@@ -249,7 +249,7 @@ int dgap_tty_register(struct board_t *brd)
/*
* If we're doing transparent print, we have to do all of the above
- * again, seperately so we don't get the LD confused about what major
+ * again, separately so we don't get the LD confused about what major
* we are when we get into the dgap_tty_open() routine.
*/
brd->PrintDriver = alloc_tty_driver(MAXPORTS);
@@ -497,10 +497,8 @@ int dgap_tty_init(struct board_t *brd)
*/
void dgap_tty_post_uninit(void)
{
- if (dgap_TmpWriteBuf) {
- kfree(dgap_TmpWriteBuf);
- dgap_TmpWriteBuf = NULL;
- }
+ kfree(dgap_TmpWriteBuf);
+ dgap_TmpWriteBuf = NULL;
}
@@ -522,10 +520,8 @@ void dgap_tty_uninit(struct board_t *brd)
tty_unregister_device(brd->SerialDriver, i);
}
tty_unregister_driver(brd->SerialDriver);
- if (brd->SerialDriver->ttys) {
- kfree(brd->SerialDriver->ttys);
- brd->SerialDriver->ttys = NULL;
- }
+ kfree(brd->SerialDriver->ttys);
+ brd->SerialDriver->ttys = NULL;
put_tty_driver(brd->SerialDriver);
brd->dgap_Major_Serial_Registered = FALSE;
}
@@ -538,10 +534,8 @@ void dgap_tty_uninit(struct board_t *brd)
tty_unregister_device(brd->PrintDriver, i);
}
tty_unregister_driver(brd->PrintDriver);
- if (brd->PrintDriver->ttys) {
- kfree(brd->PrintDriver->ttys);
- brd->PrintDriver->ttys = NULL;
- }
+ kfree(brd->PrintDriver->ttys);
+ brd->PrintDriver->ttys = NULL;
put_tty_driver(brd->PrintDriver);
brd->dgap_Major_TransparentPrint_Registered = FALSE;
}
@@ -601,7 +595,7 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b
/*
* Loop while data remains.
*/
- while (nbuf > 0 && ch->ch_sniff_buf != 0) {
+ while (nbuf > 0 && ch->ch_sniff_buf) {
/*
* Determine the amount of available space left in the
* buffer. If there's none, wait until some appears.
@@ -1069,7 +1063,7 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file)
DGAP_LOCK(brd->bd_lock, lock_flags);
- /* The wait above should guarentee this cannot happen */
+ /* The wait above should guarantee this cannot happen */
if (brd->state != BOARD_READY) {
DGAP_UNLOCK(brd->bd_lock, lock_flags);
return -ENXIO;
@@ -1113,9 +1107,10 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file)
MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), un, brd->name));
/*
- * Error if channel info pointer is 0.
+ * Error if channel info pointer is NULL.
*/
- if ((bs = ch->ch_bs) == 0) {
+ bs = ch->ch_bs;
+ if (!bs) {
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(brd->bd_lock, lock_flags);
DPR_OPEN(("%d BS is 0!\n", __LINE__));
@@ -3513,10 +3508,6 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return(-EINVAL);
}
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(-ENOIOCTLCMD);
-
case DIGI_GETA:
/* get information for ditty */
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
@@ -3586,12 +3577,4 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return(-ENOIOCTLCMD);
}
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
- dgap_ioctl_name(cmd), cmd, arg));
-
- return(0);
}
diff --git a/drivers/staging/dgap/digi.h b/drivers/staging/dgap/digi.h
index 651e2e5e93c..bcea4f734a3 100644
--- a/drivers/staging/dgap/digi.h
+++ b/drivers/staging/dgap/digi.h
@@ -203,9 +203,9 @@ struct shrink_buf_struct {
unsigned long shrink_buf_vaddr; /* Virtual address of board */
unsigned long shrink_buf_phys; /* Physical address of board */
unsigned long shrink_buf_bseg; /* Amount of board memory */
- unsigned long shrink_buf_hseg; /* '186 Begining of Dual-Port */
+ unsigned long shrink_buf_hseg; /* '186 Beginning of Dual-Port */
- unsigned long shrink_buf_lseg; /* '186 Begining of freed memory */
+ unsigned long shrink_buf_lseg; /* '186 Beginning of freed memory */
unsigned long shrink_buf_mseg; /* Linear address from start of
dual-port were freed memory
begins, host viewpoint. */
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
index 57dfd6bafcf..638c5da43c8 100644
--- a/drivers/staging/dgap/downld.c
+++ b/drivers/staging/dgap/downld.c
@@ -52,7 +52,7 @@ char *pgm;
void myperror();
/*
-** This structure is used to keep track of the diferent images available
+** This structure is used to keep track of the different images available
** to give to the driver. It is arranged so that the things that are
** constants or that have defaults are first inthe strucutre to simplify
** the table of initializers.
@@ -789,7 +789,7 @@ int main(int argc, char **argv)
/*
** myperror()
**
-** Same as normal perror(), but places the program name at the begining
+** Same as normal perror(), but places the program name at the beginning
** of the message.
*/
void myperror(char *s)
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 117e1580824..fdc1aabc7fd 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -33,7 +33,7 @@
#include <linux/sched.h> /* For jiffies, task states */
#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
#include <linux/delay.h> /* For udelay */
-#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/io.h> /* For read[bwl]/write[bwl] */
#include <linux/serial.h> /* For struct async_serial */
#include <linux/serial_reg.h> /* For the various UART offsets */
#include <linux/pci.h>
@@ -43,7 +43,7 @@
#include "dgnc_tty.h"
#include "dgnc_trace.h"
-static inline void cls_parse_isr(struct board_t *brd, uint port);
+static inline void cls_parse_isr(struct dgnc_board *brd, uint port);
static inline void cls_clear_break(struct channel_t *ch, int force);
static inline void cls_set_cts_flow_control(struct channel_t *ch);
static inline void cls_set_rts_flow_control(struct channel_t *ch);
@@ -53,7 +53,7 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch);
static inline void cls_set_no_input_flow_control(struct channel_t *ch);
static void cls_parse_modem(struct channel_t *ch, uchar signals);
static void cls_tasklet(unsigned long data);
-static void cls_vpd(struct board_t *brd);
+static void cls_vpd(struct dgnc_board *brd);
static void cls_uart_init(struct channel_t *ch);
static void cls_uart_off(struct channel_t *ch);
static int cls_drain(struct tty_struct *tty, uint seconds);
@@ -393,7 +393,7 @@ static inline void cls_clear_break(struct channel_t *ch, int force)
/* Parse the ISR register for the specific port */
-static inline void cls_parse_isr(struct board_t *brd, uint port)
+static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
{
struct channel_t *ch;
uchar isr = 0;
@@ -417,9 +417,8 @@ static inline void cls_parse_isr(struct board_t *brd, uint port)
isr = readb(&ch->ch_cls_uart->isr_fcr);
/* Bail if no pending interrupt on port */
- if (isr & UART_IIR_NO_INT) {
+ if (isr & UART_IIR_NO_INT)
break;
- }
DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, port, isr));
@@ -444,9 +443,8 @@ static inline void cls_parse_isr(struct board_t *brd, uint port)
}
/* Received Xoff signal/Special character */
- if (isr & UART_IIR_XOFF) {
+ if (isr & UART_IIR_XOFF)
/* Empty */
- }
/* CTS/RTS change of state */
if (isr & UART_IIR_CTSRTS) {
@@ -477,28 +475,24 @@ static void cls_param(struct tty_struct *tty)
uchar uart_ier = 0;
uint baud = 9600;
int quot = 0;
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
- if (!tty || tty->magic != TTY_MAGIC) {
+ if (!tty || tty->magic != TTY_MAGIC)
return;
- }
un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
return;
- }
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- }
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- }
DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
@@ -725,7 +719,7 @@ static void cls_param(struct tty_struct *tty)
*/
static void cls_tasklet(unsigned long data)
{
- struct board_t *bd = (struct board_t *) data;
+ struct dgnc_board *bd = (struct dgnc_board *) data;
struct channel_t *ch;
ulong lock_flags;
int i;
@@ -802,7 +796,7 @@ static void cls_tasklet(unsigned long data)
*/
static irqreturn_t cls_intr(int irq, void *voidbrd)
{
- struct board_t *brd = (struct board_t *) voidbrd;
+ struct dgnc_board *brd = (struct dgnc_board *) voidbrd;
uint i = 0;
uchar poll_reg;
unsigned long lock_flags;
@@ -976,17 +970,17 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
int rc = 0;
if (!tty || tty->magic != TTY_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
un = (struct un_t *) tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -1002,7 +996,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
if (rc)
DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
- return (rc);
+ return rc;
}
@@ -1305,9 +1299,8 @@ static uint cls_get_uart_bytes_left(struct channel_t *ch)
/* Determine whether the Transmitter is empty or not */
if (!(lsr & UART_LSR_TEMT)) {
- if (ch->ch_flags & CH_TX_FIFO_EMPTY) {
+ if (ch->ch_flags & CH_TX_FIFO_EMPTY)
tasklet_schedule(&ch->ch_bd->helper_tasklet);
- }
left = 1;
}
else {
@@ -1378,7 +1371,7 @@ static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
writeb(c, &ch->ch_cls_uart->txrx);
}
-static void cls_vpd(struct board_t *brd)
+static void cls_vpd(struct dgnc_board *brd)
{
ulong vpdbase; /* Start of io base of the card */
u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 71d2b83cc3a..c204266cb69 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -31,15 +31,10 @@
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
#include <linux/sched.h>
-#endif
-
#include "dgnc_driver.h"
#include "dgnc_pci.h"
#include "dpacompat.h"
@@ -71,16 +66,16 @@ PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
*
*/
static int dgnc_start(void);
-static int dgnc_finalize_board_init(struct board_t *brd);
+static int dgnc_finalize_board_init(struct dgnc_board *brd);
static void dgnc_init_globals(void);
static int dgnc_found_board(struct pci_dev *pdev, int id);
-static void dgnc_cleanup_board(struct board_t *brd);
+static void dgnc_cleanup_board(struct dgnc_board *brd);
static void dgnc_poll_handler(ulong dummy);
static int dgnc_init_pci(void);
static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void dgnc_remove_one(struct pci_dev *dev);
static int dgnc_probe1(struct pci_dev *pdev, int card_type);
-static void dgnc_do_remap(struct board_t *brd);
+static void dgnc_do_remap(struct dgnc_board *brd);
/* Driver load/unload functions */
int dgnc_init_module(void);
@@ -106,7 +101,7 @@ static struct file_operations dgnc_BoardFops =
* Globals
*/
uint dgnc_NumBoards;
-struct board_t *dgnc_Board[MAXBOARDS];
+struct dgnc_board *dgnc_Board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
int dgnc_driver_state = DRIVER_INITIALIZED;
ulong dgnc_poll_counter;
@@ -225,7 +220,7 @@ int dgnc_init_module(void)
rc = dgnc_start();
if (rc < 0) {
- return(rc);
+ return rc;
}
/*
@@ -250,7 +245,7 @@ int dgnc_init_module(void)
}
DPR_INIT(("Finished init_module. Returning %d\n", rc));
- return (rc);
+ return rc;
}
@@ -286,21 +281,14 @@ static int dgnc_start(void)
if (rc <= 0) {
APR(("Can't register dgnc driver device (%d)\n", rc));
rc = -ENXIO;
- return(rc);
+ return rc;
}
dgnc_Major = rc;
dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- device_create_drvdata(dgnc_class, NULL,
- MKDEV(dgnc_Major, 0),
- NULL, "dgnc_mgmt");
-#else
device_create(dgnc_class, NULL,
MKDEV(dgnc_Major, 0),
NULL, "dgnc_mgmt");
-#endif
-
dgnc_Major_Control_Registered = TRUE;
}
@@ -311,7 +299,7 @@ static int dgnc_start(void)
if (rc < 0) {
APR(("tty preinit - not enough memory (%d)\n", rc));
- return(rc);
+ return rc;
}
/* Start the poller */
@@ -328,7 +316,7 @@ static int dgnc_start(void)
dgnc_driver_state = DRIVER_READY;
}
- return(rc);
+ return rc;
}
/*
@@ -418,7 +406,7 @@ void dgnc_cleanup_module(void)
*
* Free all the memory associated with a board
*/
-static void dgnc_cleanup_board(struct board_t *brd)
+static void dgnc_cleanup_board(struct dgnc_board *brd)
{
int i = 0;
@@ -491,7 +479,7 @@ static void dgnc_cleanup_board(struct board_t *brd)
*/
static int dgnc_found_board(struct pci_dev *pdev, int id)
{
- struct board_t *brd;
+ struct dgnc_board *brd;
unsigned int pci_irq;
int i = 0;
int rc = 0;
@@ -499,19 +487,16 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
/* get the board structure and prep it */
brd = dgnc_Board[dgnc_NumBoards] =
- (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd) {
- APR(("memory allocation for board structure failed\n"));
- return(-ENOMEM);
- }
+ kzalloc(sizeof(*brd), GFP_KERNEL);
+ if (!brd)
+ return -ENOMEM;
/* make a temporary message buffer for the boot messages */
brd->msgbuf = brd->msgbuf_head =
- (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
+ kzalloc(sizeof(u8) * 8192, GFP_KERNEL);
if (!brd->msgbuf) {
kfree(brd);
- APR(("memory allocation for board msgbuf failed\n"));
- return(-ENOMEM);
+ return -ENOMEM;
}
/* store the info for the board we've found */
@@ -663,7 +648,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
default:
APR(("Did not find any compatible Neo or Classic PCI boards in system.\n"));
- return (-ENXIO);
+ return -ENXIO;
}
@@ -725,22 +710,22 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
wake_up_interruptible(&brd->state_wait);
- return(0);
+ return 0;
failed:
- return (-ENXIO);
+ return -ENXIO;
}
-static int dgnc_finalize_board_init(struct board_t *brd) {
+static int dgnc_finalize_board_init(struct dgnc_board *brd) {
int rc = 0;
DPR_INIT(("dgnc_finalize_board_init() - start\n"));
if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return(-ENODEV);
+ return -ENODEV;
DPR_INIT(("dgnc_finalize_board_init() - start #2\n"));
@@ -756,13 +741,13 @@ static int dgnc_finalize_board_init(struct board_t *brd) {
DPR_INIT(("Requested and received usage of IRQ %d\n", brd->irq));
}
}
- return(rc);
+ return rc;
}
/*
* Remap PCI memory.
*/
-static void dgnc_do_remap(struct board_t *brd)
+static void dgnc_do_remap(struct dgnc_board *brd)
{
if (!brd || brd->magic != DGNC_BOARD_MAGIC)
@@ -802,7 +787,7 @@ static void dgnc_do_remap(struct board_t *brd)
static void dgnc_poll_handler(ulong dummy)
{
- struct board_t *brd;
+ struct dgnc_board *brd;
unsigned long lock_flags;
int i;
unsigned long new_time;
@@ -900,7 +885,7 @@ int dgnc_ms_sleep(ulong ms)
{
current->state = TASK_INTERRUPTIBLE;
schedule_timeout((ms * HZ) / 1000);
- return (signal_pending(current));
+ return signal_pending(current);
}
@@ -912,47 +897,47 @@ char *dgnc_ioctl_name(int cmd)
{
switch(cmd) {
- case TCGETA: return("TCGETA");
- case TCGETS: return("TCGETS");
- case TCSETA: return("TCSETA");
- case TCSETS: return("TCSETS");
- case TCSETAW: return("TCSETAW");
- case TCSETSW: return("TCSETSW");
- case TCSETAF: return("TCSETAF");
- case TCSETSF: return("TCSETSF");
- case TCSBRK: return("TCSBRK");
- case TCXONC: return("TCXONC");
- case TCFLSH: return("TCFLSH");
- case TIOCGSID: return("TIOCGSID");
-
- case TIOCGETD: return("TIOCGETD");
- case TIOCSETD: return("TIOCSETD");
- case TIOCGWINSZ: return("TIOCGWINSZ");
- case TIOCSWINSZ: return("TIOCSWINSZ");
-
- case TIOCMGET: return("TIOCMGET");
- case TIOCMSET: return("TIOCMSET");
- case TIOCMBIS: return("TIOCMBIS");
- case TIOCMBIC: return("TIOCMBIC");
+ case TCGETA: return "TCGETA";
+ case TCGETS: return "TCGETS";
+ case TCSETA: return "TCSETA";
+ case TCSETS: return "TCSETS";
+ case TCSETAW: return "TCSETAW";
+ case TCSETSW: return "TCSETSW";
+ case TCSETAF: return "TCSETAF";
+ case TCSETSF: return "TCSETSF";
+ case TCSBRK: return "TCSBRK";
+ case TCXONC: return "TCXONC";
+ case TCFLSH: return "TCFLSH";
+ case TIOCGSID: return "TIOCGSID";
+
+ case TIOCGETD: return "TIOCGETD";
+ case TIOCSETD: return "TIOCSETD";
+ case TIOCGWINSZ: return "TIOCGWINSZ";
+ case TIOCSWINSZ: return "TIOCSWINSZ";
+
+ case TIOCMGET: return "TIOCMGET";
+ case TIOCMSET: return "TIOCMSET";
+ case TIOCMBIS: return "TIOCMBIS";
+ case TIOCMBIC: return "TIOCMBIC";
/* from digi.h */
- case DIGI_SETA: return("DIGI_SETA");
- case DIGI_SETAW: return("DIGI_SETAW");
- case DIGI_SETAF: return("DIGI_SETAF");
- case DIGI_SETFLOW: return("DIGI_SETFLOW");
- case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
- case DIGI_GETFLOW: return("DIGI_GETFLOW");
- case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
- case DIGI_GETA: return("DIGI_GETA");
- case DIGI_GEDELAY: return("DIGI_GEDELAY");
- case DIGI_SEDELAY: return("DIGI_SEDELAY");
- case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
- case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
- case TIOCMODG: return("TIOCMODG");
- case TIOCMODS: return("TIOCMODS");
- case TIOCSDTR: return("TIOCSDTR");
- case TIOCCDTR: return("TIOCCDTR");
-
- default: return("unknown");
+ case DIGI_SETA: return "DIGI_SETA";
+ case DIGI_SETAW: return "DIGI_SETAW";
+ case DIGI_SETAF: return "DIGI_SETAF";
+ case DIGI_SETFLOW: return "DIGI_SETFLOW";
+ case DIGI_SETAFLOW: return "DIGI_SETAFLOW";
+ case DIGI_GETFLOW: return "DIGI_GETFLOW";
+ case DIGI_GETAFLOW: return "DIGI_GETAFLOW";
+ case DIGI_GETA: return "DIGI_GETA";
+ case DIGI_GEDELAY: return "DIGI_GEDELAY";
+ case DIGI_SEDELAY: return "DIGI_SEDELAY";
+ case DIGI_GETCUSTOMBAUD: return "DIGI_GETCUSTOMBAUD";
+ case DIGI_SETCUSTOMBAUD: return "DIGI_SETCUSTOMBAUD";
+ case TIOCMODG: return "TIOCMODG";
+ case TIOCMODS: return "TIOCMODS";
+ case TIOCSDTR: return "TIOCSDTR";
+ case TIOCCDTR: return "TIOCCDTR";
+
+ default: return "unknown";
}
}
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 218b15dccb7..3519b803e75 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -45,7 +45,7 @@
/*
* Driver identification, error and debugging statments
*
- * In theory, you can change all occurances of "digi" in the next
+ * In theory, you can change all occurrences of "digi" in the next
* three lines, and the driver printk's will all automagically change.
*
* APR((fmt, args, ...)); Always prints message
@@ -246,7 +246,7 @@ enum {
*
*************************************************************************/
-struct board_t;
+struct dgnc_board;
struct channel_t;
/************************************************************************
@@ -259,7 +259,7 @@ struct board_ops {
void (*uart_off) (struct channel_t *ch);
int (*drain) (struct tty_struct *tty, uint seconds);
void (*param) (struct tty_struct *tty);
- void (*vpd) (struct board_t *brd);
+ void (*vpd) (struct dgnc_board *brd);
void (*assert_modem_signals) (struct channel_t *ch);
void (*flush_uart_write) (struct channel_t *ch);
void (*flush_uart_read) (struct channel_t *ch);
@@ -282,7 +282,7 @@ struct board_ops {
/*
* Per-board information
*/
-struct board_t {
+struct dgnc_board {
int magic; /* Board Magic number. */
int boardnum; /* Board number: 0-32 */
@@ -449,7 +449,7 @@ struct un_t {
************************************************************************/
struct channel_t {
int magic; /* Channel Magic Number */
- struct board_t *ch_bd; /* Board structure pointer */
+ struct dgnc_board *ch_bd; /* Board structure pointer */
struct digi_t ch_digi; /* Transparent Print structure */
struct un_t ch_tun; /* Terminal unit info */
struct un_t ch_pun; /* Printer unit info */
@@ -555,7 +555,7 @@ extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern int dgnc_trcbuf_size; /* Size of the ringbuffer */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern uint dgnc_NumBoards; /* Total number of boards */
-extern struct board_t *dgnc_Board[MAXBOARDS]; /* Array of board structs */
+extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board structs */
extern ulong dgnc_poll_counter; /* Times the poller has run */
extern char *dgnc_state_text[]; /* Array of state text */
extern char *dgnc_driver_state_text[];/* Array of driver state text */
diff --git a/drivers/staging/dgnc/dgnc_kcompat.h b/drivers/staging/dgnc/dgnc_kcompat.h
index 00f589a13ab..eaec7e6a28e 100644
--- a/drivers/staging/dgnc/dgnc_kcompat.h
+++ b/drivers/staging/dgnc/dgnc_kcompat.h
@@ -28,13 +28,6 @@
#ifndef __DGNC_KCOMPAT_H
#define __DGNC_KCOMPAT_H
-#include <linux/version.h>
-
-# ifndef KERNEL_VERSION
-# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-# endif
-
-
#if !defined(TTY_FLIPBUF_SIZE)
# define TTY_FLIPBUF_SIZE 512
#endif
@@ -68,26 +61,4 @@
module_param(VAR, long, PERM); \
MODULE_PARM_DESC(VAR, DESC);
-
-
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
-
-
-
-/* NOTHING YET */
-
-
-
-# else
-
-
-
-# error "this driver does not support anything below the 2.6.27 kernel series."
-
-
-
-# endif
-
#endif /* ! __DGNC_KCOMPAT_H */
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index c4629d7c80b..1c5ab3d007b 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -74,13 +74,13 @@ int dgnc_mgmt_open(struct inode *inode, struct file *file)
/* Only allow 1 open at a time on mgmt device */
if (dgnc_mgmt_in_use[minor]) {
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
- return (-EBUSY);
+ return -EBUSY;
}
dgnc_mgmt_in_use[minor]++;
}
else {
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
- return (-ENXIO);
+ return -ENXIO;
}
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
@@ -154,7 +154,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ddi.dinfo_nboards, ddi.dinfo_version));
if (copy_to_user(uarg, &ddi, sizeof (ddi)))
- return(-EFAULT);
+ return -EFAULT;
break;
}
@@ -166,13 +166,13 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct digi_info di;
if (copy_from_user(&brd, uarg, sizeof(int))) {
- return(-EFAULT);
+ return -EFAULT;
}
DPR_MGMT(("DIGI_GETBD asking about board: %d\n", brd));
if ((brd < 0) || (brd > dgnc_NumBoards) || (dgnc_NumBoards == 0))
- return (-ENODEV);
+ return -ENODEV;
memset(&di, 0, sizeof(di));
@@ -196,7 +196,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
di.info_bdtype, di.info_bdstate, di.info_nports, di.info_physsize));
if (copy_to_user(uarg, &di, sizeof (di)))
- return (-EFAULT);
+ return -EFAULT;
break;
}
@@ -209,8 +209,8 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
uint board = 0;
uint channel = 0;
- if (copy_from_user(&ni, uarg, sizeof(struct ni_info))) {
- return(-EFAULT);
+ if (copy_from_user(&ni, uarg, sizeof(ni))) {
+ return -EFAULT;
}
DPR_MGMT(("DIGI_GETBD asking about board: %d channel: %d\n",
@@ -220,17 +220,17 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
channel = ni.channel;
/* Verify boundaries on board */
- if ((board < 0) || (board > dgnc_NumBoards) || (dgnc_NumBoards == 0))
- return (-ENODEV);
+ if ((board > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ return -ENODEV;
/* Verify boundaries on channel */
if ((channel < 0) || (channel > dgnc_Board[board]->nasync))
- return (-ENODEV);
+ return -ENODEV;
ch = dgnc_Board[board]->channels[channel];
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (-ENODEV);
+ return -ENODEV;
memset(&ni, 0, sizeof(ni));
ni.board = board;
@@ -291,7 +291,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DGNC_UNLOCK(ch->ch_lock, lock_flags);
if (copy_to_user(uarg, &ni, sizeof(ni)))
- return (-EFAULT);
+ return -EFAULT;
break;
}
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 8b9e09a83f7..dc5a138d8d4 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -43,8 +43,8 @@
#include "dgnc_tty.h"
#include "dgnc_trace.h"
-static inline void neo_parse_lsr(struct board_t *brd, uint port);
-static inline void neo_parse_isr(struct board_t *brd, uint port);
+static inline void neo_parse_lsr(struct dgnc_board *brd, uint port);
+static inline void neo_parse_isr(struct dgnc_board *brd, uint port);
static void neo_copy_data_from_uart_to_queue(struct channel_t *ch);
static inline void neo_clear_break(struct channel_t *ch, int force);
static inline void neo_set_cts_flow_control(struct channel_t *ch);
@@ -56,7 +56,7 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch);
static inline void neo_set_new_start_stop_chars(struct channel_t *ch);
static void neo_parse_modem(struct channel_t *ch, uchar signals);
static void neo_tasklet(unsigned long data);
-static void neo_vpd(struct board_t *brd);
+static void neo_vpd(struct dgnc_board *brd);
static void neo_uart_init(struct channel_t *ch);
static void neo_uart_off(struct channel_t *ch);
static int neo_drain(struct tty_struct *tty, uint seconds);
@@ -107,7 +107,7 @@ static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0
* In this case, we are reading the DVID (Read-only Device Identification)
* value of the Neo card.
*/
-static inline void neo_pci_posting_flush(struct board_t *bd)
+static inline void neo_pci_posting_flush(struct dgnc_board *bd)
{
readb(bd->re_map_membase + 0x8D);
}
@@ -411,7 +411,7 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
/*
* Parse the ISR register.
*/
-static inline void neo_parse_isr(struct board_t *brd, uint port)
+static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
{
struct channel_t *ch;
uchar isr;
@@ -538,7 +538,7 @@ static inline void neo_parse_isr(struct board_t *brd, uint port)
}
-static inline void neo_parse_lsr(struct board_t *brd, uint port)
+static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
{
struct channel_t *ch;
int linestatus;
@@ -650,7 +650,7 @@ static void neo_param(struct tty_struct *tty)
uchar uart_ier = 0;
uint baud = 9600;
int quot = 0;
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
@@ -911,7 +911,7 @@ static void neo_param(struct tty_struct *tty)
*/
static void neo_tasklet(unsigned long data)
{
- struct board_t *bd = (struct board_t *) data;
+ struct dgnc_board *bd = (struct dgnc_board *) data;
struct channel_t *ch;
ulong lock_flags;
int i;
@@ -994,7 +994,7 @@ static void neo_tasklet(unsigned long data)
*/
static irqreturn_t neo_intr(int irq, void *voidbrd)
{
- struct board_t *brd = (struct board_t *) voidbrd;
+ struct dgnc_board *brd = (struct dgnc_board *) voidbrd;
struct channel_t *ch;
int port = 0;
int type = 0;
@@ -1111,7 +1111,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* Why would I check EVERY possibility of type of
* interrupt, when we know its TXRDY???
* Becuz for some reason, even tho we got triggered for TXRDY,
- * it seems to be occassionally wrong. Instead of TX, which
+ * it seems to be occasionally wrong. Instead of TX, which
* it should be, I was getting things like RXDY too. Weird.
*/
neo_parse_isr(brd, port);
@@ -1404,17 +1404,17 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
int rc = 0;
if (!tty || tty->magic != TTY_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
un = (struct un_t *) tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
DPR_IOCTL(("%d Drain wait started.\n", __LINE__));
@@ -1439,7 +1439,7 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
DPR_IOCTL(("%d Drain wait finished.\n", __LINE__));
}
- return (rc);
+ return rc;
}
@@ -1939,7 +1939,7 @@ static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int ad
}
-static void neo_vpd(struct board_t *brd)
+static void neo_vpd(struct dgnc_board *brd)
{
unsigned int i = 0;
unsigned int a;
@@ -1965,7 +1965,7 @@ static void neo_vpd(struct board_t *brd)
}
else {
/* Search for the serial number */
- for (i = 0; i < NEO_VPD_IMAGESIZE * 2; i++) {
+ for (i = 0; i < NEO_VPD_IMAGEBYTES - 3; i++) {
if (brd->vpd[i] == 'S' && brd->vpd[i + 1] == 'N') {
strncpy(brd->serial_num, &(brd->vpd[i + 3]), 9);
}
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index 7ec5710a434..1a4abb12869 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -47,7 +47,7 @@ struct neo_uart_struct {
u8 fctr; /* WR FCTR - Feature Control Reg */
u8 efr; /* WR EFR - Enhanced Function Reg */
u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
- u8 rfifo; /* WR RXCNT/RXTRG - Recieve FIFO Reg */
+ u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 0ea6c800280..946230c2348 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -152,19 +152,19 @@ void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
#define DGNC_VERIFY_BOARD(p, bd) \
if (!p) \
- return (0); \
+ return 0; \
\
bd = dev_get_drvdata(p); \
if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
- return (0); \
+ return 0; \
if (bd->state != BOARD_READY) \
- return (0); \
+ return 0; \
static ssize_t dgnc_vpd_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -184,7 +184,7 @@ static DEVICE_ATTR(vpd, S_IRUSR, dgnc_vpd_show, NULL);
static ssize_t dgnc_serial_number_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
DGNC_VERIFY_BOARD(p, bd);
@@ -201,7 +201,7 @@ static DEVICE_ATTR(serial_number, S_IRUSR, dgnc_serial_number_show, NULL);
static ssize_t dgnc_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -219,7 +219,7 @@ static DEVICE_ATTR(ports_state, S_IRUSR, dgnc_ports_state_show, NULL);
static ssize_t dgnc_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -236,7 +236,7 @@ static DEVICE_ATTR(ports_baud, S_IRUSR, dgnc_ports_baud_show, NULL);
static ssize_t dgnc_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -264,7 +264,7 @@ static DEVICE_ATTR(ports_msignals, S_IRUSR, dgnc_ports_msignals_show, NULL);
static ssize_t dgnc_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -281,7 +281,7 @@ static DEVICE_ATTR(ports_iflag, S_IRUSR, dgnc_ports_iflag_show, NULL);
static ssize_t dgnc_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -298,7 +298,7 @@ static DEVICE_ATTR(ports_cflag, S_IRUSR, dgnc_ports_cflag_show, NULL);
static ssize_t dgnc_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -315,7 +315,7 @@ static DEVICE_ATTR(ports_oflag, S_IRUSR, dgnc_ports_oflag_show, NULL);
static ssize_t dgnc_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -332,7 +332,7 @@ static DEVICE_ATTR(ports_lflag, S_IRUSR, dgnc_ports_lflag_show, NULL);
static ssize_t dgnc_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -349,7 +349,7 @@ static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgnc_ports_digi_flag_show, NULL);
static ssize_t dgnc_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -366,7 +366,7 @@ static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgnc_ports_rxcount_show, NULL);
static ssize_t dgnc_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int count = 0;
int i = 0;
@@ -384,7 +384,7 @@ static DEVICE_ATTR(ports_txcount, S_IRUSR, dgnc_ports_txcount_show, NULL);
/* this function creates the sys files that will export each signal status
* to sysfs each value will be put in a separate filename
*/
-void dgnc_create_ports_sysfiles(struct board_t *bd)
+void dgnc_create_ports_sysfiles(struct dgnc_board *bd)
{
int rc = 0;
@@ -408,7 +408,7 @@ void dgnc_create_ports_sysfiles(struct board_t *bd)
/* removes all the sys files created for that port */
-void dgnc_remove_ports_sysfiles(struct board_t *bd)
+void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
{
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
@@ -427,23 +427,23 @@ void dgnc_remove_ports_sysfiles(struct board_t *bd)
static ssize_t dgnc_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
}
@@ -452,23 +452,23 @@ static DEVICE_ATTR(state, S_IRUSR, dgnc_tty_state_show, NULL);
static ssize_t dgnc_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
}
@@ -477,23 +477,23 @@ static DEVICE_ATTR(baud, S_IRUSR, dgnc_tty_baud_show, NULL);
static ssize_t dgnc_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
if (ch->ch_open_count) {
return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
@@ -511,23 +511,23 @@ static DEVICE_ATTR(msignals, S_IRUSR, dgnc_tty_msignals_show, NULL);
static ssize_t dgnc_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
}
@@ -536,23 +536,23 @@ static DEVICE_ATTR(iflag, S_IRUSR, dgnc_tty_iflag_show, NULL);
static ssize_t dgnc_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
}
@@ -561,23 +561,23 @@ static DEVICE_ATTR(cflag, S_IRUSR, dgnc_tty_cflag_show, NULL);
static ssize_t dgnc_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
}
@@ -586,23 +586,23 @@ static DEVICE_ATTR(oflag, S_IRUSR, dgnc_tty_oflag_show, NULL);
static ssize_t dgnc_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
}
@@ -611,23 +611,23 @@ static DEVICE_ATTR(lflag, S_IRUSR, dgnc_tty_lflag_show, NULL);
static ssize_t dgnc_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
}
@@ -636,23 +636,23 @@ static DEVICE_ATTR(digi_flag, S_IRUSR, dgnc_tty_digi_flag_show, NULL);
static ssize_t dgnc_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
}
@@ -661,23 +661,23 @@ static DEVICE_ATTR(rxcount, S_IRUSR, dgnc_tty_rxcount_show, NULL);
static ssize_t dgnc_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
}
@@ -686,23 +686,23 @@ static DEVICE_ATTR(txcount, S_IRUSR, dgnc_tty_txcount_show, NULL);
static ssize_t dgnc_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
- return (0);
- un = (struct un_t *) dev_get_drvdata(d);
+ return 0;
+ un = dev_get_drvdata(d);
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (0);
+ return 0;
if (bd->state != BOARD_READY)
- return (0);
+ return 0;
return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
(un->un_type == DGNC_PRINT) ? "pr" : "tty",
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
index 4b87ce1cc7a..68c0de5898a 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ b/drivers/staging/dgnc/dgnc_sysfs.h
@@ -26,14 +26,14 @@
#include <linux/device.h>
-struct board_t;
+struct dgnc_board;
struct channel_t;
struct un_t;
struct pci_driver;
struct class_device;
-extern void dgnc_create_ports_sysfiles(struct board_t *bd);
-extern void dgnc_remove_ports_sysfiles(struct board_t *bd);
+extern void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
+extern void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
extern void dgnc_create_driver_sysfiles(struct pci_driver *);
extern void dgnc_remove_driver_sysfiles(struct pci_driver *);
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index a7bb6bceb9e..a6c6aba82d7 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -38,7 +38,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/sched.h> /* For jiffies, task states */
#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
#include <linux/module.h>
@@ -60,16 +59,14 @@
#include "dpacompat.h"
#include "dgnc_sysfs.h"
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
#define init_MUTEX(sem) sema_init(sem, 1)
#define DECLARE_MUTEX(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
-#endif
/*
* internal variables
*/
-static struct board_t *dgnc_BoardsByMajor[256];
+static struct dgnc_board *dgnc_BoardsByMajor[256];
static uchar *dgnc_TmpWriteBuf = NULL;
static DECLARE_MUTEX(dgnc_TmpWriteSem);
@@ -126,13 +123,8 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty);
static void dgnc_tty_hangup(struct tty_struct *tty);
static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
static int dgnc_tty_tiocmget(struct tty_struct *tty);
static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
-#else
-static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file);
-static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
-#endif
static int dgnc_tty_send_break(struct tty_struct *tty, int msec);
static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout);
static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
@@ -190,10 +182,10 @@ int dgnc_tty_preinit(void)
if (!dgnc_TmpWriteBuf) {
DPR_INIT(("unable to allocate tmp write buf"));
- return (-ENOMEM);
+ return -ENOMEM;
}
- return(0);
+ return 0;
}
@@ -202,14 +194,14 @@ int dgnc_tty_preinit(void)
*
* Init the tty subsystem for this board.
*/
-int dgnc_tty_register(struct board_t *brd)
+int dgnc_tty_register(struct dgnc_board *brd)
{
int rc = 0;
DPR_INIT(("tty_register start\n"));
- memset(&brd->SerialDriver, 0, sizeof(struct tty_driver));
- memset(&brd->PrintDriver, 0, sizeof(struct tty_driver));
+ memset(&brd->SerialDriver, 0, sizeof(brd->SerialDriver));
+ memset(&brd->PrintDriver, 0, sizeof(brd->PrintDriver));
brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
@@ -230,25 +222,15 @@ int dgnc_tty_register(struct board_t *brd)
* The kernel wants space to store pointers to
* tty_struct's and termios's.
*/
- brd->SerialDriver.ttys = kzalloc(brd->maxports * sizeof(struct tty_struct *), GFP_KERNEL);
+ brd->SerialDriver.ttys = kzalloc(brd->maxports * sizeof(*brd->SerialDriver.ttys), GFP_KERNEL);
if (!brd->SerialDriver.ttys)
- return(-ENOMEM);
+ return -ENOMEM;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- brd->SerialDriver.refcount = brd->TtyRefCnt;
-#else
kref_init(&brd->SerialDriver.kref);
-#endif
-
- brd->SerialDriver.termios = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ brd->SerialDriver.termios = kzalloc(brd->maxports * sizeof(*brd->SerialDriver.termios), GFP_KERNEL);
if (!brd->SerialDriver.termios)
- return(-ENOMEM);
+ return -ENOMEM;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
- brd->SerialDriver.termios_locked = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
- if (!brd->SerialDriver.termios_locked)
- return(-ENOMEM);
-#endif
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
@@ -260,14 +242,14 @@ int dgnc_tty_register(struct board_t *brd)
rc = tty_register_driver(&brd->SerialDriver);
if (rc < 0) {
APR(("Can't register tty device (%d)\n", rc));
- return(rc);
+ return rc;
}
brd->dgnc_Major_Serial_Registered = TRUE;
}
/*
* If we're doing transparent print, we have to do all of the above
- * again, seperately so we don't get the LD confused about what major
+ * again, separately so we don't get the LD confused about what major
* we are when we get into the dgnc_tty_open() routine.
*/
brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
@@ -286,28 +268,16 @@ int dgnc_tty_register(struct board_t *brd)
/*
* The kernel wants space to store pointers to
- * tty_struct's and termios's. Must be seperate from
+ * tty_struct's and termios's. Must be separated from
* the Serial Driver so we don't get confused
*/
- brd->PrintDriver.ttys = kzalloc(brd->maxports * sizeof(struct tty_struct *), GFP_KERNEL);
+ brd->PrintDriver.ttys = kzalloc(brd->maxports * sizeof(*brd->PrintDriver.ttys), GFP_KERNEL);
if (!brd->PrintDriver.ttys)
- return(-ENOMEM);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- brd->PrintDriver.refcount = brd->TtyRefCnt;
-#else
+ return -ENOMEM;
kref_init(&brd->PrintDriver.kref);
-#endif
-
- brd->PrintDriver.termios = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ brd->PrintDriver.termios = kzalloc(brd->maxports * sizeof(*brd->PrintDriver.termios), GFP_KERNEL);
if (!brd->PrintDriver.termios)
- return(-ENOMEM);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
- brd->PrintDriver.termios_locked = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
- if (!brd->PrintDriver.termios_locked)
- return(-ENOMEM);
-#endif
+ return -ENOMEM;
/*
* Entry points for driver. Called by the kernel from
@@ -320,7 +290,7 @@ int dgnc_tty_register(struct board_t *brd)
rc = tty_register_driver(&brd->PrintDriver);
if (rc < 0) {
APR(("Can't register Transparent Print device (%d)\n", rc));
- return(rc);
+ return rc;
}
brd->dgnc_Major_TransparentPrint_Registered = TRUE;
}
@@ -331,7 +301,7 @@ int dgnc_tty_register(struct board_t *brd)
DPR_INIT(("DGNC REGISTER TTY: MAJOR: %d\n", brd->SerialDriver.major));
- return (rc);
+ return rc;
}
@@ -341,14 +311,14 @@ int dgnc_tty_register(struct board_t *brd)
* Init the tty subsystem. Called once per board after board has been
* downloaded and init'ed.
*/
-int dgnc_tty_init(struct board_t *brd)
+int dgnc_tty_init(struct dgnc_board *brd)
{
int i;
void __iomem *vaddr;
struct channel_t *ch;
if (!brd)
- return (-ENXIO);
+ return -ENXIO;
DPR_INIT(("dgnc_tty_init start\n"));
@@ -371,7 +341,7 @@ int dgnc_tty_init(struct board_t *brd)
* Okay to malloc with GFP_KERNEL, we are not at
* interrupt context, and there are no locks held.
*/
- brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_KERNEL);
+ brd->channels[i] = kzalloc(sizeof(*brd->channels[i]), GFP_KERNEL);
if (!brd->channels[i]) {
DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
__FILE__, __LINE__));
@@ -436,7 +406,7 @@ int dgnc_tty_init(struct board_t *brd)
DPR_INIT(("dgnc_tty_init finish\n"));
- return (0);
+ return 0;
}
@@ -460,7 +430,7 @@ void dgnc_tty_post_uninit(void)
* Uninitialize the TTY portion of this driver. Free all memory and
* resources.
*/
-void dgnc_tty_uninit(struct board_t *brd)
+void dgnc_tty_uninit(struct dgnc_board *brd)
{
int i = 0;
@@ -550,7 +520,7 @@ void dgnc_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int
/*
* Loop while data remains.
*/
- while (nbuf > 0 && ch->ch_sniff_buf != 0) {
+ while (nbuf > 0 && ch->ch_sniff_buf) {
/*
* Determine the amount of available space left in the
* buffer. If there's none, wait until some appears.
@@ -671,7 +641,7 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
*=======================================================================*/
void dgnc_input(struct channel_t *ch)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct tty_struct *tp;
struct tty_ldisc *ld;
uint rmask;
@@ -867,7 +837,7 @@ void dgnc_input(struct channel_t *ch)
************************************************************************/
void dgnc_carrier(struct channel_t *ch)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
int virt_carrier = 0;
int phys_carrier = 0;
@@ -1158,7 +1128,6 @@ void dgnc_wakeup_writes(struct channel_t *ch)
}
if (ch->ch_tun.un_flags & UN_ISOPEN) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
{
@@ -1166,15 +1135,6 @@ void dgnc_wakeup_writes(struct channel_t *ch)
(ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
DGNC_LOCK(ch->ch_lock, lock_flags);
}
-#else
- if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
- {
- DGNC_UNLOCK(ch->ch_lock, lock_flags);
- (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
- DGNC_LOCK(ch->ch_lock, lock_flags);
- }
-#endif
wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
@@ -1210,7 +1170,6 @@ void dgnc_wakeup_writes(struct channel_t *ch)
}
if (ch->ch_pun.un_flags & UN_ISOPEN) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
{
@@ -1218,15 +1177,6 @@ void dgnc_wakeup_writes(struct channel_t *ch)
(ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
DGNC_LOCK(ch->ch_lock, lock_flags);
}
-#else
- if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
- {
- DGNC_UNLOCK(ch->ch_lock, lock_flags);
- (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
- DGNC_LOCK(ch->ch_lock, lock_flags);
- }
-#endif
wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
@@ -1260,7 +1210,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
*/
static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
{
- struct board_t *brd;
+ struct dgnc_board *brd;
struct channel_t *ch;
struct un_t *un;
uint major = 0;
@@ -1473,7 +1423,7 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
DGNC_UNLOCK(ch->ch_lock, lock_flags);
DPR_OPEN(("dgnc_tty_open finished\n"));
- return (rc);
+ return rc;
}
@@ -1491,12 +1441,12 @@ static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struc
int sleep_on_un_flags = 0;
if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC) {
- return (-ENXIO);
+ return -ENXIO;
}
DPR_OPEN(("dgnc_block_til_ready - before block.\n"));
@@ -1624,12 +1574,12 @@ static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struc
if (retval) {
DPR_OPEN(("dgnc_block_til_ready - done. error. retval: %x\n", retval));
- return(retval);
+ return retval;
}
DPR_OPEN(("dgnc_block_til_ready - done no error. jiffies: %lu\n", jiffies));
- return(0);
+ return 0;
}
@@ -1667,7 +1617,7 @@ static void dgnc_tty_hangup(struct tty_struct *tty)
static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
{
struct ktermios *ts;
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
@@ -1843,15 +1793,15 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
ulong lock_flags = 0;
if (tty == NULL)
- return(0);
+ return 0;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -1873,7 +1823,7 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
DPR_WRITE(("dgnc_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d)\n",
ch->ch_portnum, chars, thead, ttail));
- return(chars);
+ return chars;
}
@@ -1891,22 +1841,22 @@ static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
struct un_t *un = NULL;
if (!tty)
- return (bytes_available);
+ return bytes_available;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (bytes_available);
+ return bytes_available;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (bytes_available);
+ return bytes_available;
/*
* If its not the Transparent print device, return
* the full data amount.
*/
if (un->un_type != DGNC_PRINT)
- return (bytes_available);
+ return bytes_available;
if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
int cps_limit = 0;
@@ -1931,7 +1881,7 @@ static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
bytes_available = min(cps_limit, bytes_available);
}
- return (bytes_available);
+ return bytes_available;
}
@@ -1951,15 +1901,15 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
ulong lock_flags = 0;
if (tty == NULL || dgnc_TmpWriteBuf == NULL)
- return(0);
+ return 0;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (0);
+ return 0;
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -1994,7 +1944,7 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
DPR_WRITE(("dgnc_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
- return(ret);
+ return ret;
}
@@ -2037,18 +1987,18 @@ static int dgnc_tty_write(struct tty_struct *tty,
int from_user = 0;
if (tty == NULL || dgnc_TmpWriteBuf == NULL)
- return(0);
+ return 0;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return(0);
+ return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return(0);
+ return 0;
if (!count)
- return(0);
+ return 0;
DPR_WRITE(("dgnc_tty_write: Port: %x tty=%p user=%d len=%d\n",
ch->ch_portnum, tty, from_user, count));
@@ -2090,7 +2040,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
if (count <= 0) {
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
+ return 0;
}
/*
@@ -2120,7 +2070,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
if (count <= 0) {
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
+ return 0;
}
if (from_user) {
@@ -2136,7 +2086,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
/* we're allowed to block if it's from_user */
if (down_interruptible(&dgnc_TmpWriteSem)) {
- return (-EINTR);
+ return -EINTR;
}
/*
@@ -2147,7 +2097,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
if (!count) {
up(&dgnc_TmpWriteSem);
- return(-EFAULT);
+ return -EFAULT;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -2229,18 +2179,15 @@ static int dgnc_tty_write(struct tty_struct *tty,
ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch);
}
- return (count);
+ return count;
}
/*
* Return modem signals to ld.
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+
static int dgnc_tty_tiocmget(struct tty_struct *tty)
-#else
-static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file)
-#endif
{
struct channel_t *ch;
struct un_t *un;
@@ -2293,15 +2240,11 @@ static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file)
*
* Set modem signals, called by ld.
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+
static int dgnc_tty_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
-#else
-static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-#endif
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
int ret = -EIO;
@@ -2349,7 +2292,7 @@ static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file,
DPR_IOCTL(("dgnc_tty_tiocmset finish\n"));
- return (0);
+ return 0;
}
@@ -2360,7 +2303,7 @@ static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file,
*/
static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
int ret = -EIO;
@@ -2402,7 +2345,7 @@ static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
DPR_IOCTL(("dgnc_tty_send_break finish\n"));
- return (0);
+ return 0;
}
@@ -2414,7 +2357,7 @@ static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
*/
static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
int rc;
@@ -2450,7 +2393,7 @@ static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
*/
static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
@@ -2497,7 +2440,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
DPR_IOCTL(("dgnc_getmstat start\n"));
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return(-ENXIO);
+ return -ENXIO;
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -2522,7 +2465,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
DPR_IOCTL(("dgnc_getmstat finish\n"));
- return(result);
+ return result;
}
@@ -2538,17 +2481,17 @@ static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value
DPR_IOCTL(("dgnc_get_modem_info start\n"));
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return(-ENXIO);
+ return -ENXIO;
result = dgnc_get_mstat(ch);
if (result < 0)
- return (-ENXIO);
+ return -ENXIO;
rc = put_user(result, value);
DPR_IOCTL(("dgnc_get_modem_info finish\n"));
- return(rc);
+ return rc;
}
@@ -2559,7 +2502,7 @@ static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value
*/
static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
int ret = -ENXIO;
@@ -2587,7 +2530,7 @@ static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, uns
ret = get_user(arg, value);
if (ret)
- return(ret);
+ return ret;
switch (command) {
case TIOCMBIS:
@@ -2631,7 +2574,7 @@ static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, uns
break;
default:
- return(-EINVAL);
+ return -EINVAL;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -2642,7 +2585,7 @@ static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, uns
DPR_IOCTL(("dgnc_set_modem_info finish\n"));
- return (0);
+ return 0;
}
@@ -2662,18 +2605,18 @@ static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retin
ulong lock_flags;
if (!retinfo)
- return (-EFAULT);
+ return -EFAULT;
if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
@@ -2682,9 +2625,9 @@ static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retin
DGNC_UNLOCK(ch->ch_lock, lock_flags);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
+ return -EFAULT;
- return (0);
+ return 0;
}
@@ -2698,7 +2641,7 @@ static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retin
*/
static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
struct digi_t new_digi;
@@ -2707,23 +2650,23 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
DPR_IOCTL(("DIGI_SETA start\n"));
if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (-EFAULT);
+ return -EFAULT;
- if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
+ if (copy_from_user(&new_digi, new_info, sizeof(new_digi))) {
DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
- return(-EFAULT);
+ return -EFAULT;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -2744,7 +2687,7 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE))
ch->ch_mostat |= (UART_MCR_DTR);
- memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+ memcpy(&ch->ch_digi, &new_digi, sizeof(new_digi));
if (ch->ch_digi.digi_maxcps < 1)
ch->ch_digi.digi_maxcps = 1;
@@ -2773,7 +2716,7 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
DPR_IOCTL(("DIGI_SETA finish\n"));
- return(0);
+ return 0;
}
@@ -2782,7 +2725,7 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
*/
static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
unsigned long lock_flags;
@@ -2878,7 +2821,7 @@ static void dgnc_tty_unthrottle(struct tty_struct *tty)
static void dgnc_tty_start(struct tty_struct *tty)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
@@ -2912,7 +2855,7 @@ static void dgnc_tty_start(struct tty_struct *tty)
static void dgnc_tty_stop(struct tty_struct *tty)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
@@ -2959,7 +2902,7 @@ static void dgnc_tty_stop(struct tty_struct *tty)
*/
static void dgnc_tty_flush_chars(struct tty_struct *tty)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
@@ -3056,7 +2999,7 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct board_t *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
int rc;
@@ -3064,19 +3007,19 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
void __user *uarg = (void __user *) arg;
if (!tty || tty->magic != TTY_MAGIC)
- return (-ENODEV);
+ return -ENODEV;
un = tty->driver_data;
if (!un || un->magic != DGNC_UNIT_MAGIC)
- return (-ENODEV);
+ return -ENODEV;
ch = un->un_ch;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return (-ENODEV);
+ return -ENODEV;
bd = ch->ch_bd;
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return (-ENODEV);
+ return -ENODEV;
DPR_IOCTL(("dgnc_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
@@ -3086,7 +3029,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (un->un_open_count <= 0) {
DPR_BASIC(("dgnc_tty_ioctl - unit not open.\n"));
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(-EIO);
+ return -EIO;
}
switch (cmd) {
@@ -3105,14 +3048,14 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
rc = tty_check_change(tty);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
if (rc) {
- return(rc);
+ return rc;
}
rc = ch->ch_bd->bd_ops->drain(tty, 0);
if (rc) {
DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
+ return -EINTR;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -3126,7 +3069,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
- return(0);
+ return 0;
case TCSBRKP:
@@ -3138,13 +3081,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
rc = tty_check_change(tty);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
if (rc) {
- return(rc);
+ return rc;
}
rc = ch->ch_bd->bd_ops->drain(tty, 0);
if (rc) {
DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
+ return -EINTR;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -3156,19 +3099,19 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
- return(0);
+ return 0;
case TIOCSBRK:
rc = tty_check_change(tty);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
if (rc) {
- return(rc);
+ return rc;
}
rc = ch->ch_bd->bd_ops->drain(tty, 0);
if (rc) {
DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
+ return -EINTR;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -3180,7 +3123,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
- return(0);
+ return 0;
case TIOCCBRK:
/* Do Nothing */
@@ -3192,31 +3135,31 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
- return(rc);
+ return rc;
case TIOCSSOFTCAR:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = get_user(arg, (unsigned long __user *) arg);
if (rc)
- return(rc);
+ return rc;
DGNC_LOCK(ch->ch_lock, lock_flags);
tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
ch->ch_bd->bd_ops->param(tty);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
+ return 0;
case TIOCMGET:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(dgnc_get_modem_info(ch, uarg));
+ return dgnc_get_modem_info(ch, uarg);
case TIOCMBIS:
case TIOCMBIC:
case TIOCMSET:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(dgnc_set_modem_info(tty, cmd, uarg));
+ return dgnc_set_modem_info(tty, cmd, uarg);
/*
* Here are any additional ioctl's that we want to implement
@@ -3235,7 +3178,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
rc = tty_check_change(tty);
if (rc) {
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(rc);
+ return rc;
}
if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
@@ -3265,7 +3208,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* pretend we didn't recognize this IOCTL */
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(-ENOIOCTLCMD);
+ return -ENOIOCTLCMD;
case TCSETSF:
case TCSETSW:
/*
@@ -3291,14 +3234,14 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
rc = ch->ch_bd->bd_ops->drain(tty, 0);
if (rc) {
DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d\n", rc));
- return(-EINTR);
+ return -EINTR;
}
DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
/* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
+ return -ENOIOCTLCMD;
case TCSETAW:
@@ -3306,21 +3249,21 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
rc = ch->ch_bd->bd_ops->drain(tty, 0);
if (rc) {
DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
+ return -EINTR;
}
/* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
+ return -ENOIOCTLCMD;
case TCXONC:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
/* Make the ld do it */
- return(-ENOIOCTLCMD);
+ return -ENOIOCTLCMD;
case DIGI_GETA:
/* get information for ditty */
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(dgnc_tty_digigeta(tty, uarg));
+ return dgnc_tty_digigeta(tty, uarg);
case DIGI_SETAW:
case DIGI_SETAF:
@@ -3332,7 +3275,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
rc = ch->ch_bd->bd_ops->drain(tty, 0);
if (rc) {
DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
+ return -EINTR;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
}
@@ -3343,7 +3286,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_SETA:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(dgnc_tty_digiseta(tty, uarg));
+ return dgnc_tty_digiseta(tty, uarg);
case DIGI_LOOPBACK:
{
@@ -3352,7 +3295,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = get_user(loopback, (unsigned int __user *) arg);
if (rc)
- return(rc);
+ return rc;
DGNC_LOCK(ch->ch_lock, lock_flags);
/* Enable/disable internal loopback for this port */
@@ -3363,13 +3306,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
ch->ch_bd->bd_ops->param(tty);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
+ return 0;
}
case DIGI_GETCUSTOMBAUD:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = put_user(ch->ch_custom_speed, (unsigned int __user *) arg);
- return(rc);
+ return rc;
case DIGI_SETCUSTOMBAUD:
{
@@ -3378,12 +3321,12 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = get_user(new_rate, (unsigned int __user *) arg);
if (rc)
- return(rc);
+ return rc;
DGNC_LOCK(ch->ch_lock, lock_flags);
dgnc_set_custom_speed(ch, new_rate);
ch->ch_bd->bd_ops->param(tty);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
+ return 0;
}
/*
@@ -3399,11 +3342,11 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = get_user(c, (unsigned char __user *) arg);
if (rc)
- return(rc);
+ return rc;
DGNC_LOCK(ch->ch_lock, lock_flags);
ch->ch_bd->bd_ops->send_immediate_char(ch, c);
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
+ return 0;
}
/*
@@ -3426,10 +3369,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- if (copy_to_user(uarg, &buf, sizeof(struct digi_getcounter))) {
- return (-EFAULT);
+ if (copy_to_user(uarg, &buf, sizeof(buf))) {
+ return -EFAULT;
}
- return(0);
+ return 0;
}
/*
@@ -3454,7 +3397,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
rc = put_user(events, (unsigned int __user *) arg);
- return(rc);
+ return rc;
}
/*
@@ -3474,8 +3417,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* Get data from user first.
*/
- if (copy_from_user(&buf, uarg, sizeof(struct digi_getbuffer))) {
- return (-EFAULT);
+ if (copy_from_user(&buf, uarg, sizeof(buf))) {
+ return -EFAULT;
}
DGNC_LOCK(ch->ch_lock, lock_flags);
@@ -3520,10 +3463,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- if (copy_to_user(uarg, &buf, sizeof(struct digi_getbuffer))) {
- return (-EFAULT);
+ if (copy_to_user(uarg, &buf, sizeof(buf))) {
+ return -EFAULT;
}
- return(0);
+ return 0;
}
default:
DGNC_UNLOCK(ch->ch_lock, lock_flags);
@@ -3532,7 +3475,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DPR_IOCTL(("dgnc_tty_ioctl end - cmd %s (%x), arg %lx\n",
dgnc_ioctl_name(cmd), cmd, arg));
- return(-ENOIOCTLCMD);
+ return -ENOIOCTLCMD;
}
DGNC_UNLOCK(ch->ch_lock, lock_flags);
@@ -3540,5 +3483,5 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DPR_IOCTL(("dgnc_tty_ioctl end - cmd %s (%x), arg %lx\n",
dgnc_ioctl_name(cmd), cmd, arg));
- return(0);
+ return 0;
}
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index deb388d2f4c..9d1c2847bd9 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -24,13 +24,13 @@
#include "dgnc_driver.h"
-int dgnc_tty_register(struct board_t *brd);
+int dgnc_tty_register(struct dgnc_board *brd);
int dgnc_tty_preinit(void);
-int dgnc_tty_init(struct board_t *);
+int dgnc_tty_init(struct dgnc_board *);
void dgnc_tty_post_uninit(void);
-void dgnc_tty_uninit(struct board_t *);
+void dgnc_tty_uninit(struct dgnc_board *);
void dgnc_input(struct channel_t *ch);
void dgnc_carrier(struct channel_t *ch);
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index eb6e3712572..6a9adf6591e 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -201,9 +201,9 @@ struct shrink_buf_struct {
unsigned int shrink_buf_vaddr; /* Virtual address of board */
unsigned int shrink_buf_phys; /* Physical address of board */
unsigned int shrink_buf_bseg; /* Amount of board memory */
- unsigned int shrink_buf_hseg; /* '186 Begining of Dual-Port */
+ unsigned int shrink_buf_hseg; /* '186 Beginning of Dual-Port */
- unsigned int shrink_buf_lseg; /* '186 Begining of freed memory */
+ unsigned int shrink_buf_lseg; /* '186 Beginning of freed memory */
unsigned int shrink_buf_mseg; /* Linear address from start of
dual-port were freed memory
begins, host viewpoint. */
diff --git a/drivers/staging/dgrp/dgrp_sysfs.c b/drivers/staging/dgrp/dgrp_sysfs.c
index 8cee9c8bc38..9a18a2c9e73 100644
--- a/drivers/staging/dgrp/dgrp_sysfs.c
+++ b/drivers/staging/dgrp/dgrp_sysfs.c
@@ -157,7 +157,7 @@ static ssize_t dgrp_node_state_show(struct device *c,
if (!c)
return 0;
- nd = (struct nd_struct *) dev_get_drvdata(c);
+ nd = dev_get_drvdata(c);
if (!nd)
return 0;
@@ -174,7 +174,7 @@ static ssize_t dgrp_node_description_show(struct device *c,
if (!c)
return 0;
- nd = (struct nd_struct *) dev_get_drvdata(c);
+ nd = dev_get_drvdata(c);
if (!nd)
return 0;
@@ -192,7 +192,7 @@ static ssize_t dgrp_node_hw_version_show(struct device *c,
if (!c)
return 0;
- nd = (struct nd_struct *) dev_get_drvdata(c);
+ nd = dev_get_drvdata(c);
if (!nd)
return 0;
@@ -212,7 +212,7 @@ static ssize_t dgrp_node_hw_id_show(struct device *c,
if (!c)
return 0;
- nd = (struct nd_struct *) dev_get_drvdata(c);
+ nd = dev_get_drvdata(c);
if (!nd)
return 0;
@@ -232,7 +232,7 @@ static ssize_t dgrp_node_sw_version_show(struct device *c,
if (!c)
return 0;
- nd = (struct nd_struct *) dev_get_drvdata(c);
+ nd = dev_get_drvdata(c);
if (!nd)
return 0;
@@ -311,7 +311,7 @@ static ssize_t dgrp_tty_state_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
@@ -328,7 +328,7 @@ static ssize_t dgrp_tty_baud_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -348,7 +348,7 @@ static ssize_t dgrp_tty_msignals_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -377,7 +377,7 @@ static ssize_t dgrp_tty_iflag_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -396,7 +396,7 @@ static ssize_t dgrp_tty_cflag_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -415,7 +415,7 @@ static ssize_t dgrp_tty_oflag_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -434,7 +434,7 @@ static ssize_t dgrp_tty_digi_flag_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -453,7 +453,7 @@ static ssize_t dgrp_tty_rxcount_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -472,7 +472,7 @@ static ssize_t dgrp_tty_txcount_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
@@ -493,7 +493,7 @@ static ssize_t dgrp_tty_name_show(struct device *d,
if (!d)
return 0;
- un = (struct un_struct *) dev_get_drvdata(d);
+ un = dev_get_drvdata(d);
if (!un)
return 0;
ch = un->un_ch;
diff --git a/drivers/staging/dwc2/TODO b/drivers/staging/dwc2/TODO
new file mode 100644
index 00000000000..282470d5531
--- /dev/null
+++ b/drivers/staging/dwc2/TODO
@@ -0,0 +1,33 @@
+TODO:
+ - Dan Carpenter would like to see some cleanups to the microframe
+ scheduler code:
+ http://www.mail-archive.com/linux-usb@vger.kernel.org/msg26650.html
+
+ - Should merge the NAK holdoff patch from Raspberry Pi
+ (http://marc.info/?l=linux-usb&m=137625067103833). But as it stands
+ that patch is incomplete, it needs more investigation to see if it
+ can be made to work for non-Raspberry Pi platforms that lack the
+ special FIQ interrupt that the Pi has. Without this patch, the driver
+ has a high interrupt rate (8K/sec).
+
+ - The Raspberry Pi platform needs to have support for its FIQ interrupt
+ added, to get the same level of functionality as the downstream
+ driver. The raspberrypi.org developers have indicated they are
+ willing to help with that.
+
+ - Some of the default driver parameters (see 'struct dwc2_core_params'
+ in core.h) won't work for many platforms. So DT attributes will need
+ to be added for some of these. But that can be done as-needed as new
+ platforms are added.
+
+ - Eventually the driver should be merged with the s3c-hsotg peripheral
+ mode driver, so that both modes of operation can be supported with a
+ single driver. But I think that can wait till after the driver has
+ been moved to mainline.
+
+ - After that, OTG support can be added. I'm not sure how much demand
+ there is for that, though, so I have that as a low priority.
+
+Please send any patches for this driver to Paul Zimmerman <paulz@synopsys.com>
+and Greg Kroah-Hartman <gregkh@linuxfoundation.org>. And please CC linux-usb
+<linux-usb@vger.kernel.org> too.
diff --git a/drivers/staging/dwc2/core.c b/drivers/staging/dwc2/core.c
index 06dae67a9d6..6d001b52f65 100644
--- a/drivers/staging/dwc2/core.c
+++ b/drivers/staging/dwc2/core.c
@@ -564,7 +564,7 @@ void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
/*
* This bit allows dynamic reloading of the HFIR register during
- * runtime. This bit needs to be programmed during inital configuration
+ * runtime. This bit needs to be programmed during initial configuration
* and its value must not be changed during runtime.
*/
if (hsotg->core_params->reload_ctl > 0) {
@@ -2205,7 +2205,7 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
{
#ifndef NO_FS_PHY_HW_CHECKS
int valid = 0;
- u32 hs_phy_type, fs_phy_type;
+ u32 hs_phy_type, fs_phy_type;
#endif
int retval = 0;
@@ -2553,7 +2553,7 @@ int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
hsotg->core_params->ahbcfg = val;
else
hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
- GAHBCFG_HBSTLEN_SHIFT;
+ GAHBCFG_HBSTLEN_SHIFT;
return 0;
}
@@ -2736,6 +2736,26 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
return 0;
}
+int dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+ int retval = 0;
+
+ if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter uframe_sched\n",
+ val);
+ dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+ }
+ val = 1;
+ dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+ retval = -EINVAL;
+ }
+
+ hsotg->core_params->uframe_sched = val;
+ return retval;
+}
+
/*
* This function is called during module intialization to pass module parameters
* for the DWC_otg core. It returns non-0 if any parameters are invalid.
@@ -2782,6 +2802,7 @@ int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+ retval |= dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
return retval;
}
diff --git a/drivers/staging/dwc2/core.h b/drivers/staging/dwc2/core.h
index 9102f66d011..fab718d9b32 100644
--- a/drivers/staging/dwc2/core.h
+++ b/drivers/staging/dwc2/core.h
@@ -188,6 +188,7 @@ enum dwc2_lx_state {
* bits defined by GAHBCFG_CTRL_MASK are controlled
* by the driver and are ignored in this
* configuration value.
+ * @uframe_sched: True to enable the microframe scheduler
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -224,6 +225,7 @@ struct dwc2_core_params {
int ts_dline;
int reload_ctl;
int ahbcfg;
+ int uframe_sched;
};
/**
@@ -292,7 +294,7 @@ struct dwc2_hw_params {
unsigned dev_token_q_depth:5;
unsigned max_transfer_size:26;
unsigned max_packet_count:11;
- unsigned host_channels:4;
+ unsigned host_channels:5;
unsigned hs_phy_type:2;
unsigned fs_phy_type:2;
unsigned i2c_enable:1;
@@ -370,6 +372,7 @@ struct dwc2_hw_params {
* This value is in microseconds per (micro)frame. The
* assumption is that all periodic transfers may occur in
* the same (micro)frame.
+ * @frame_usecs: Internal variable used by the microframe scheduler
* @frame_number: Frame number read from the core at SOF. The value ranges
* from 0 to HFNUM_MAX_FRNUM.
* @periodic_qh_count: Count of periodic QHs, if using several eps. Used for
@@ -382,6 +385,8 @@ struct dwc2_hw_params {
* host channel is available for non-periodic transactions.
* @non_periodic_channels: Number of host channels assigned to non-periodic
* transfers
+ * @available_host_channels Number of host channels available for the microframe
+ * scheduler to use
* @hc_ptr_array: Array of pointers to the host channel descriptors.
* Allows accessing a host channel descriptor given the
* host channel number. This is useful in interrupt
@@ -436,6 +441,7 @@ struct dwc2_hsotg {
struct list_head periodic_sched_assigned;
struct list_head periodic_sched_queued;
u16 periodic_usecs;
+ u16 frame_usecs[8];
u16 frame_number;
u16 periodic_qh_count;
@@ -451,6 +457,7 @@ struct dwc2_hsotg {
struct list_head free_hc_list;
int periodic_channels;
int non_periodic_channels;
+ int available_host_channels;
struct dwc2_host_chan *hc_ptr_array[MAX_EPS_CHANNELS];
u8 *status_buf;
dma_addr_t status_buf_dma;
diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c
index da0d35cc33c..3cfd2d5152c 100644
--- a/drivers/staging/dwc2/hcd.c
+++ b/drivers/staging/dwc2/hcd.c
@@ -537,10 +537,15 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
int i;
hsotg->flags.d32 = 0;
-
hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
- hsotg->non_periodic_channels = 0;
- hsotg->periodic_channels = 0;
+
+ if (hsotg->core_params->uframe_sched > 0) {
+ hsotg->available_host_channels =
+ hsotg->core_params->host_channels;
+ } else {
+ hsotg->non_periodic_channels = 0;
+ hsotg->periodic_channels = 0;
+ }
/*
* Put all channels in the free channel list and clean up channel
@@ -716,8 +721,7 @@ static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
* @qh: Transactions from the first QTD for this QH are selected and assigned
* to a free host channel
*/
-static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
- struct dwc2_qh *qh)
+static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
struct dwc2_host_chan *chan;
struct dwc2_hcd_urb *urb;
@@ -729,18 +733,18 @@ static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
if (list_empty(&qh->qtd_list)) {
dev_dbg(hsotg->dev, "No QTDs in QH list\n");
- return;
+ return -ENOMEM;
}
if (list_empty(&hsotg->free_hc_list)) {
dev_dbg(hsotg->dev, "No free channel to assign\n");
- return;
+ return -ENOMEM;
}
chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
hc_list_entry);
- /* Remove the host channel from the free list */
+ /* Remove host channel from free list */
list_del_init(&chan->hc_list_entry);
qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
@@ -780,6 +784,10 @@ static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
chan->data_pid_start = qh->data_toggle;
chan->multi_count = 1;
+ if (urb->actual_length > urb->length &&
+ !dwc2_hcd_is_pipe_in(&urb->pipe_info))
+ urb->actual_length = urb->length;
+
if (hsotg->core_params->dma_enable > 0) {
chan->xfer_dma = urb->dma + urb->actual_length;
@@ -817,7 +825,7 @@ static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
&hsotg->free_hc_list);
qtd->in_process = 0;
qh->channel = NULL;
- return;
+ return -ENOMEM;
}
} else {
chan->align_buf = 0;
@@ -836,6 +844,8 @@ static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
dwc2_hc_init(hsotg, chan);
chan->qh = qh;
+
+ return 0;
}
/**
@@ -864,8 +874,14 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
while (qh_ptr != &hsotg->periodic_sched_ready) {
if (list_empty(&hsotg->free_hc_list))
break;
+ if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->available_host_channels <= 1)
+ break;
+ hsotg->available_host_channels--;
+ }
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- dwc2_assign_and_init_hc(hsotg, qh);
+ if (dwc2_assign_and_init_hc(hsotg, qh))
+ break;
/*
* Move the QH from the periodic ready schedule to the
@@ -884,13 +900,21 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
num_channels = hsotg->core_params->host_channels;
qh_ptr = hsotg->non_periodic_sched_inactive.next;
while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
- if (hsotg->non_periodic_channels >= num_channels -
+ if (hsotg->core_params->uframe_sched <= 0 &&
+ hsotg->non_periodic_channels >= num_channels -
hsotg->periodic_channels)
break;
if (list_empty(&hsotg->free_hc_list))
break;
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- dwc2_assign_and_init_hc(hsotg, qh);
+ if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->available_host_channels < 1)
+ break;
+ hsotg->available_host_channels--;
+ }
+
+ if (dwc2_assign_and_init_hc(hsotg, qh))
+ break;
/*
* Move the QH from the non-periodic inactive schedule to the
@@ -905,7 +929,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
else
ret_val = DWC2_TRANSACTION_ALL;
- hsotg->non_periodic_channels++;
+ if (hsotg->core_params->uframe_sched <= 0)
+ hsotg->non_periodic_channels++;
}
return ret_val;
@@ -2848,6 +2873,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
hsotg->hc_ptr_array[i] = channel;
}
+ if (hsotg->core_params->uframe_sched > 0)
+ dwc2_hcd_init_usecs(hsotg);
+
/* Initialize hsotg start work */
INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
diff --git a/drivers/staging/dwc2/hcd.h b/drivers/staging/dwc2/hcd.h
index cc0a1170831..89a5484f5b7 100644
--- a/drivers/staging/dwc2/hcd.h
+++ b/drivers/staging/dwc2/hcd.h
@@ -238,6 +238,7 @@ enum dwc2_transaction_type {
* @interval: Interval between transfers in (micro)frames
* @sched_frame: (Micro)frame to initialize a periodic transfer.
* The transfer executes in the following (micro)frame.
+ * @frame_usecs: Internal variable used by the microframe scheduler
* @start_split_frame: (Micro)frame at which last start split was initialized
* @ntd: Actual number of transfer descriptors in a list
* @dw_align_buf: Used instead of original buffer if its physical address
@@ -271,6 +272,7 @@ struct dwc2_qh {
u16 usecs;
u16 interval;
u16 sched_frame;
+ u16 frame_usecs[8];
u16 start_split_frame;
u16 ntd;
u8 *dw_align_buf;
@@ -463,6 +465,7 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
/* Schedule Queue Functions */
/* Implemented in hcd_queue.c */
+extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
diff --git a/drivers/staging/dwc2/hcd_ddma.c b/drivers/staging/dwc2/hcd_ddma.c
index 69070f4442a..c7d43451977 100644
--- a/drivers/staging/dwc2/hcd_ddma.c
+++ b/drivers/staging/dwc2/hcd_ddma.c
@@ -271,10 +271,14 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
{
struct dwc2_host_chan *chan = qh->channel;
- if (dwc2_qh_is_non_per(qh))
- hsotg->non_periodic_channels--;
- else
+ if (dwc2_qh_is_non_per(qh)) {
+ if (hsotg->core_params->uframe_sched > 0)
+ hsotg->available_host_channels++;
+ else
+ hsotg->non_periodic_channels--;
+ } else {
dwc2_update_frame_list(hsotg, qh, 0);
+ }
/*
* The condition is added to prevent double cleanup try in case of
@@ -370,7 +374,8 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
- !hsotg->periodic_channels && hsotg->frame_list) {
+ (hsotg->core_params->uframe_sched > 0 ||
+ !hsotg->periodic_channels) && hsotg->frame_list) {
dwc2_per_sched_disable(hsotg);
dwc2_frame_list_free(hsotg);
}
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/staging/dwc2/hcd_intr.c
index e143f69939f..dda18540f5a 100644
--- a/drivers/staging/dwc2/hcd_intr.c
+++ b/drivers/staging/dwc2/hcd_intr.c
@@ -748,18 +748,23 @@ cleanup:
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
- switch (chan->ep_type) {
- case USB_ENDPOINT_XFER_CONTROL:
- case USB_ENDPOINT_XFER_BULK:
- hsotg->non_periodic_channels--;
- break;
- default:
- /*
- * Don't release reservations for periodic channels here.
- * That's done when a periodic transfer is descheduled (i.e.
- * when the QH is removed from the periodic schedule).
- */
- break;
+ if (hsotg->core_params->uframe_sched > 0) {
+ hsotg->available_host_channels++;
+ } else {
+ switch (chan->ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ hsotg->non_periodic_channels--;
+ break;
+ default:
+ /*
+ * Don't release reservations for periodic channels
+ * here. That's done when a periodic transfer is
+ * descheduled (i.e. when the QH is removed from the
+ * periodic schedule).
+ */
+ break;
+ }
}
haintmsk = readl(hsotg->regs + HAINTMSK);
@@ -1927,23 +1932,22 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chan = hsotg->hc_ptr_array[chnum];
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
- chnum);
-
hcint = readl(hsotg->regs + HCINT(chnum));
hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev,
- " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
- hcint, hcintmsk, hcint & hcintmsk);
-
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
writel(hcint, hsotg->regs + HCINT(chnum));
return;
}
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
+ chnum);
+ dev_vdbg(hsotg->dev,
+ " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ hcint, hcintmsk, hcint & hcintmsk);
+ }
+
writel(hcint, hsotg->regs + HCINT(chnum));
chan->hcint = hcint;
hcint &= hcintmsk;
diff --git a/drivers/staging/dwc2/hcd_queue.c b/drivers/staging/dwc2/hcd_queue.c
index b1980ef28fa..f200f1f6e1c 100644
--- a/drivers/staging/dwc2/hcd_queue.c
+++ b/drivers/staging/dwc2/hcd_queue.c
@@ -251,12 +251,12 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*
* @hsotg: The HCD state structure for the DWC OTG controller
*
- * Return: 0 if successful, negative error code otherise
+ * Return: 0 if successful, negative error code otherwise
*/
static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
{
/*
- * Currently assuming that there is a dedicated host channnel for
+ * Currently assuming that there is a dedicated host channel for
* each periodic transaction plus at least one host channel for
* non-periodic transactions
*/
@@ -324,6 +324,146 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
}
/**
+ * Microframe scheduler
+ * track the total use in hsotg->frame_usecs
+ * keep each qh use in qh->frame_usecs
+ * when surrendering the qh then donate the time back
+ */
+static const unsigned short max_uframe_usecs[] = {
+ 100, 100, 100, 100, 100, 100, 30, 0
+};
+
+void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ hsotg->frame_usecs[i] = max_uframe_usecs[i];
+}
+
+static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ unsigned short utime = qh->usecs;
+ int done = 0;
+ int i = 0;
+ int ret = -1;
+
+ while (!done) {
+ /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
+ if (utime <= hsotg->frame_usecs[i]) {
+ hsotg->frame_usecs[i] -= utime;
+ qh->frame_usecs[i] += utime;
+ ret = i;
+ done = 1;
+ } else {
+ i++;
+ if (i == 8)
+ done = 1;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * use this for FS apps that can span multiple uframes
+ */
+static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ unsigned short utime = qh->usecs;
+ unsigned short xtime;
+ int t_left = utime;
+ int done = 0;
+ int i = 0;
+ int j;
+ int ret = -1;
+
+ while (!done) {
+ if (hsotg->frame_usecs[i] <= 0) {
+ i++;
+ if (i == 8) {
+ ret = -1;
+ done = 1;
+ }
+ continue;
+ }
+
+ /*
+ * we need n consecutive slots so use j as a start slot
+ * j plus j+1 must be enough time (for now)
+ */
+ xtime = hsotg->frame_usecs[i];
+ for (j = i + 1; j < 8; j++) {
+ /*
+ * if we add this frame remaining time to xtime we may
+ * be OK, if not we need to test j for a complete frame
+ */
+ if (xtime + hsotg->frame_usecs[j] < utime) {
+ if (hsotg->frame_usecs[j] <
+ max_uframe_usecs[j]) {
+ ret = -1;
+ break;
+ }
+ }
+ if (xtime >= utime) {
+ ret = i;
+ break;
+ }
+ /* add the frame time to x time */
+ xtime += hsotg->frame_usecs[j];
+ /* we must have a fully available next frame or break */
+ if (xtime < utime &&
+ hsotg->frame_usecs[j] == max_uframe_usecs[j]) {
+ ret = -1;
+ break;
+ }
+ }
+ if (ret >= 0) {
+ t_left = utime;
+ for (j = i; t_left > 0 && j < 8; j++) {
+ t_left -= hsotg->frame_usecs[j];
+ if (t_left <= 0) {
+ qh->frame_usecs[j] +=
+ hsotg->frame_usecs[j] + t_left;
+ hsotg->frame_usecs[j] = -t_left;
+ ret = i;
+ done = 1;
+ } else {
+ qh->frame_usecs[j] +=
+ hsotg->frame_usecs[j];
+ hsotg->frame_usecs[j] = 0;
+ }
+ }
+ } else {
+ i++;
+ if (i == 8) {
+ ret = -1;
+ done = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int ret;
+
+ if (qh->dev_speed == USB_SPEED_HIGH) {
+ /* if this is a hs transaction we need a full frame */
+ ret = dwc2_find_single_uframe(hsotg, qh);
+ } else {
+ /*
+ * if this is a fs transaction we may need a sequence
+ * of frames
+ */
+ ret = dwc2_find_multi_uframe(hsotg, qh);
+ }
+ return ret;
+}
+
+/**
* dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
* host channel is large enough to handle the maximum data transfer in a single
* (micro)frame for a periodic transfer
@@ -367,15 +507,35 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
- status = dwc2_periodic_channel_available(hsotg);
- if (status) {
- dev_dbg(hsotg->dev,
- "%s: No host channel available for periodic transfer\n",
- __func__);
- return status;
+ if (hsotg->core_params->uframe_sched > 0) {
+ int frame = -1;
+
+ status = dwc2_find_uframe(hsotg, qh);
+ if (status == 0)
+ frame = 7;
+ else if (status > 0)
+ frame = status - 1;
+
+ /* Set the new frame up */
+ if (frame > -1) {
+ qh->sched_frame &= ~0x7;
+ qh->sched_frame |= (frame & 7);
+ }
+
+ if (status != -1)
+ status = 0;
+ } else {
+ status = dwc2_periodic_channel_available(hsotg);
+ if (status) {
+ dev_info(hsotg->dev,
+ "%s: No host channel available for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ status = dwc2_check_periodic_bandwidth(hsotg, qh);
}
- status = dwc2_check_periodic_bandwidth(hsotg, qh);
if (status) {
dev_dbg(hsotg->dev,
"%s: Insufficient periodic bandwidth for periodic transfer\n",
@@ -399,8 +559,9 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
list_add_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_inactive);
- /* Reserve periodic channel */
- hsotg->periodic_channels++;
+ if (hsotg->core_params->uframe_sched <= 0)
+ /* Reserve periodic channel */
+ hsotg->periodic_channels++;
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs += qh->usecs;
@@ -418,13 +579,22 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
- list_del_init(&qh->qh_list_entry);
+ int i;
- /* Release periodic channel reservation */
- hsotg->periodic_channels--;
+ list_del_init(&qh->qh_list_entry);
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs -= qh->usecs;
+
+ if (hsotg->core_params->uframe_sched > 0) {
+ for (i = 0; i < 8; i++) {
+ hsotg->frame_usecs[i] += qh->frame_usecs[i];
+ qh->frame_usecs[i] = 0;
+ }
+ } else {
+ /* Release periodic channel reservation */
+ hsotg->periodic_channels--;
+ }
}
/**
@@ -581,7 +751,10 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
* Remove from periodic_sched_queued and move to
* appropriate queue
*/
- if (qh->sched_frame == frame_number)
+ if ((hsotg->core_params->uframe_sched > 0 &&
+ dwc2_frame_num_le(qh->sched_frame, frame_number))
+ || (hsotg->core_params->uframe_sched <= 0 &&
+ qh->sched_frame == frame_number))
list_move(&qh->qh_list_entry,
&hsotg->periodic_sched_ready);
else
diff --git a/drivers/staging/dwc2/pci.c b/drivers/staging/dwc2/pci.c
index 9020260d5df..3d14c8870fc 100644
--- a/drivers/staging/dwc2/pci.c
+++ b/drivers/staging/dwc2/pci.c
@@ -84,6 +84,7 @@ static const struct dwc2_core_params dwc2_module_params = {
.ts_dline = -1,
.reload_ctl = -1,
.ahbcfg = -1,
+ .uframe_sched = -1,
};
/**
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c
index 44cce2fa636..83ca1053bb1 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/staging/dwc2/platform.c
@@ -100,13 +100,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
*/
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- if (!dev->dev.coherent_dma_mask)
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ return retval;
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "missing IRQ resource\n");
- return -EINVAL;
+ return irq;
}
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
diff --git a/drivers/staging/et131x/Module.symvers b/drivers/staging/et131x/Module.symvers
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/drivers/staging/et131x/Module.symvers
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 9272a24ae61..8da96a6d2c9 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -11,7 +11,12 @@ TODO:
- Look at reducing the number of spinlocks
- Simplify code in nic_rx_pkts(), when determining multicast_pkts_rcvd
- Implement NAPI support
- - in et131x_tx(), don't return NETDEV_TX_BUSY, just drop the packet with kfree_skb().
+ - In et131x_tx(), don't return NETDEV_TX_BUSY, just drop the packet with kfree_skb().
+ - Reduce the number of split lines by careful consideration of variable names etc.
+ - Do this in et131x.c:
+ struct fbr_lookup *fbr;
+ fbr = rx_local->fbr[id];
+ Then replace all the instances of "rx_local->fbr[id]" with fbr.
Please send patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f73e58f5ef8..ab8b29d2cb2 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -493,11 +493,8 @@ struct et131x_adapter {
spinlock_t send_hw_lock;
spinlock_t rcv_lock;
- spinlock_t rcv_pend_lock;
spinlock_t fbr_lock;
- spinlock_t phy_lock;
-
/* Packet Filter and look ahead size */
u32 packet_filter;
@@ -2777,10 +2774,9 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->net_stats.rx_packets++;
/* Set the status on the packet, either resources or success */
- if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
- dev_warn(&adapter->pdev->dev,
- "RFD's are running out\n");
- }
+ if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK)
+ dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
+
count++;
}
@@ -2906,8 +2902,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* number of fragments. If needed, we can call this function,
* although it is less efficient.
*/
- if (nr_frags > 23)
- return -EIO;
+
+ /* nr_frags should be no more than 18. */
+ BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
@@ -3100,11 +3097,10 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
shbufva = (u16 *) skb->data;
if ((shbufva[0] == 0xffff) &&
- (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
+ (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff))
tcb->flags |= FMP_DEST_BROAD;
- } else if ((shbufva[0] & 0x3) == 0x0001) {
+ else if ((shbufva[0] & 0x3) == 0x0001)
tcb->flags |= FMP_DEST_MULTI;
- }
}
tcb->next = NULL;
@@ -3605,17 +3601,10 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
goto err_out;
}
- /* Let's set up the PORT LOGIC Register. First we need to know what
- * the max_payload_size is
- */
- if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
- dev_err(&pdev->dev,
- "Could not read PCI config space for Max Payload Size\n");
- goto err_out;
- }
+ /* Let's set up the PORT LOGIC Register. */
/* Program the Ack/Nak latency and replay timers */
- max_payload &= 0x07;
+ max_payload = pdev->pcie_mpss;
if (max_payload < 2) {
static const u16 acknak[2] = { 0x76, 0xD0 };
@@ -3645,8 +3634,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
}
/* Change the max read size to 2k */
- if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
+ if (pcie_set_readrq(pdev, 2048)) {
dev_err(&pdev->dev,
"Couldn't change PCI config space for Max read size\n");
goto err_out;
@@ -3926,9 +3914,7 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
spin_lock_init(&adapter->tcb_ready_qlock);
spin_lock_init(&adapter->send_hw_lock);
spin_lock_init(&adapter->rcv_lock);
- spin_lock_init(&adapter->rcv_pend_lock);
spin_lock_init(&adapter->fbr_lock);
- spin_lock_init(&adapter->phy_lock);
adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
@@ -4797,21 +4783,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 64 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 32 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
rc = -EIO;
goto err_release_res;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
index 1fc4ac12e24..9dce54eae1c 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
@@ -27,7 +27,7 @@
#define _BOOTH_
// Official bootloader
-unsigned char bootimage [] = {
+static unsigned char bootimage [] = {
0x00,0x00,0x01,0x5E,0x00,0x00
,0x00,0x00,0x00,0x00,0x02,0xD7
,0x00,0x00,0x01,0x5E,0x46,0xB3
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index 6311b2ff581..d44e8583ad1 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -304,7 +304,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
struct dsp_file_hdr *pFileHdr5;
struct dsp_image_info *pDspImageInfoV6 = NULL;
long requested_version;
- bool bGoodVersion = 0;
+ bool bGoodVersion = false;
struct drv_msg *pMailBoxData;
u16 *pUsData = NULL;
u16 *pUsFile = NULL;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index b2330f1df7e..88f6f9ce304 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -32,14 +32,14 @@
#define seq_putx(m, message, size, var) \
seq_printf(m, message); \
- for(i = 0; i < (size - 1); i++) { \
+ for (i = 0; i < (size - 1); i++) { \
seq_printf(m, "%02x:", var[i]); \
} \
seq_printf(m, "%02x\n", var[i])
#define seq_putd(m, message, size, var) \
seq_printf(m, message); \
- for(i = 0; i < (size - 1); i++) { \
+ for (i = 0; i < (size - 1); i++) { \
seq_printf(m, "%d.", var[i]); \
} \
seq_printf(m, "%d\n", var[i])
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 5190c8ac4e0..68ded17c0f5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -1,12 +1,8 @@
-//=====================================================
-// CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-//
-//
-// This file is part of Express Card USB Driver
-//
-// $Id:
-//====================================================
-// 20090926; aelias; removed compiler warnings; ubuntu 9.04; 2.6.28-15-generic
+/*
+* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
+*
+* This file is part of Express Card USB Driver
+*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -111,23 +107,12 @@ struct dsp_image_info {
};
-//---------------------------------------------------------------------------
-// Function: check_usb_db
-//
-// Parameters: struct ft1000_usb - device structure
-//
-// Returns: 0 - success
-//
-// Description: This function checks if the doorbell register is cleared
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static u32 check_usb_db (struct ft1000_usb *ft1000dev)
+/* checks if the doorbell register is cleared */
+static int check_usb_db(struct ft1000_usb *ft1000dev)
{
int loopcnt;
u16 temp;
- u32 status;
+ int status;
loopcnt = 0;
@@ -169,25 +154,12 @@ static u32 check_usb_db (struct ft1000_usb *ft1000dev)
return HANDSHAKE_MAG_TIMEOUT_VALUE;
}
-//---------------------------------------------------------------------------
-// Function: get_handshake
-//
-// Parameters: struct ft1000_usb - device structure
-// u16 expected_value - the handshake value expected
-//
-// Returns: handshakevalue - success
-// HANDSHAKE_TIMEOUT_VALUE - failure
-//
-// Description: This function gets the handshake and compare with the expected value
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* gets the handshake and compares it with the expected value */
static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
{
u16 handshake;
int loopcnt;
- u32 status = 0;
+ int status = 0;
loopcnt = 0;
@@ -229,25 +201,12 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
return HANDSHAKE_TIMEOUT_VALUE;
}
-//---------------------------------------------------------------------------
-// Function: put_handshake
-//
-// Parameters: struct ft1000_usb - device structure
-// u16 handshake_value - handshake to be written
-//
-// Returns: none
-//
-// Description: This function write the handshake value to the handshake location
-// in DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* write the handshake value to the handshake location */
static void put_handshake(struct ft1000_usb *ft1000dev,u16 handshake_value)
{
u32 tempx;
u16 tempword;
- u32 status;
+ int status;
tempx = (u32)handshake_value;
tempx = ntohl(tempx);
@@ -267,7 +226,7 @@ static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
u16 handshake;
int loopcnt;
u16 temp;
- u32 status = 0;
+ int status = 0;
loopcnt = 0;
handshake = 0;
@@ -316,22 +275,10 @@ static void put_handshake_usb(struct ft1000_usb *ft1000dev,u16 handshake_value)
for (i=0; i<1000; i++);
}
-//---------------------------------------------------------------------------
-// Function: get_request_type
-//
-// Parameters: struct ft1000_usb - device structure
-//
-// Returns: request type - success
-//
-// Description: This function returns the request type
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
static u16 get_request_type(struct ft1000_usb *ft1000dev)
{
u16 request_type;
- u32 status;
+ int status;
u16 tempword;
u32 tempx;
@@ -354,7 +301,7 @@ static u16 get_request_type(struct ft1000_usb *ft1000dev)
static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
{
u16 request_type;
- u32 status;
+ int status;
u16 tempword;
u32 tempx;
@@ -380,23 +327,11 @@ static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
return request_type;
}
-//---------------------------------------------------------------------------
-// Function: get_request_value
-//
-// Parameters: struct ft1000_usb - device structure
-//
-// Returns: request value - success
-//
-// Description: This function returns the request value
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
static long get_request_value(struct ft1000_usb *ft1000dev)
{
u32 value;
u16 tempword;
- u32 status;
+ int status;
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
@@ -416,23 +351,11 @@ static long get_request_value(struct ft1000_usb *ft1000dev)
}
-//---------------------------------------------------------------------------
-// Function: put_request_value
-//
-// Parameters: struct ft1000_usb - device structure
-// long lvalue - value to be put into DPRAM location DWNLD_MAG1_SIZE_LOC
-//
-// Returns: none
-//
-// Description: This function writes a value to DWNLD_MAG1_SIZE_LOC
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* writes a value to DWNLD_MAG1_SIZE_LOC */
static void put_request_value(struct ft1000_usb *ft1000dev, long lvalue)
{
u32 tempx;
- u32 status;
+ int status;
tempx = ntohl(lvalue);
status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC,
@@ -441,18 +364,7 @@ static void put_request_value(struct ft1000_usb *ft1000dev, long lvalue)
-//---------------------------------------------------------------------------
-// Function: hdr_checksum
-//
-// Parameters: struct pseudo_hdr *pHdr - Pseudo header pointer
-//
-// Returns: checksum - success
-//
-// Description: This function returns the checksum of the pseudo header
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* returns the checksum of the pseudo header */
static u16 hdr_checksum(struct pseudo_hdr *pHdr)
{
u16 *usPtr = (u16 *)pHdr;
@@ -471,144 +383,130 @@ static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset)
for (i = 0; i < len; i++) {
if (buff_w[i] != buff_r[i + offset])
- return -1;
+ return -EREMOTEIO;
}
return 0;
}
-//---------------------------------------------------------------------------
-// Function: write_blk
-//
-// Parameters: struct ft1000_usb - device structure
-// u16 **pUsFile - DSP image file pointer in u16
-// u8 **pUcFile - DSP image file pointer in u8
-// long word_length - length of the buffer to be written
-// to DPRAM
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes a block of DSP image to DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static u32 write_blk (struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
+static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
+ u16 tempbuffer[], u16 dpram)
{
- u32 Status = STATUS_SUCCESS;
- u16 dpram;
- int loopcnt, i, j;
- u16 tempword;
- u16 tempbuffer[64];
- u16 resultbuffer[64];
-
- //DEBUG("FT1000:download:start word_length = %d\n",(int)word_length);
- dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
- tempword = *(*pUsFile);
- (*pUsFile)++;
- Status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0);
- tempword = *(*pUsFile);
- (*pUsFile)++;
- Status = ft1000_write_dpram16(ft1000dev, dpram++, tempword, 1);
-
- *pUcFile = *pUcFile + 4;
- word_length--;
- tempword = (u16)word_length;
- word_length = (word_length / 16) + 1;
- for (; word_length > 0; word_length--) /* In words */
- {
- loopcnt = 0;
-
- for (i=0; i<32; i++)
- {
- if (tempword != 0)
- {
- tempbuffer[i++] = *(*pUsFile);
- (*pUsFile)++;
- tempbuffer[i] = *(*pUsFile);
- (*pUsFile)++;
- *pUcFile = *pUcFile + 4;
- loopcnt++;
- tempword--;
- }
- else
- {
- tempbuffer[i++] = 0;
- tempbuffer[i] = 0;
- }
- }
-
- //DEBUG("write_blk: loopcnt is %d\n", loopcnt);
- //DEBUG("write_blk: bootmode = %d\n", bootmode);
- //DEBUG("write_blk: dpram = %x\n", dpram);
- if (ft1000dev->bootmode == 0)
- {
- if (dpram >= 0x3F4)
- Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 8);
- else
- Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64);
- }
- else
- {
- for (j=0; j<10; j++)
- {
- Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64);
- if (Status == STATUS_SUCCESS)
- {
- // Work around for ASIC bit stuffing problem.
- if ( (tempbuffer[31] & 0xfe00) == 0xfe00)
- {
- Status = ft1000_write_dpram32(ft1000dev, dpram+12, (u8 *)&tempbuffer[24], 64);
- }
- // Let's check the data written
- Status = ft1000_read_dpram32 (ft1000dev, dpram, (u8 *)&resultbuffer[0], 64);
- if ( (tempbuffer[31] & 0xfe00) == 0xfe00)
- {
- if (check_buffers(tempbuffer, resultbuffer, 28, 0)) {
+ int status;
+ u16 resultbuffer[64];
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ status = ft1000_write_dpram32(ft1000dev, dpram,
+ (u8 *)&tempbuffer[0], 64);
+ if (status == 0) {
+ /* Work around for ASIC bit stuffing problem. */
+ if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
+ status = ft1000_write_dpram32(ft1000dev,
+ dpram+12, (u8 *)&tempbuffer[24],
+ 64);
+ }
+ /* Let's check the data written */
+ status = ft1000_read_dpram32(ft1000dev, dpram,
+ (u8 *)&resultbuffer[0], 64);
+ if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
+ if (check_buffers(tempbuffer, resultbuffer, 28,
+ 0)) {
DEBUG("FT1000:download:DPRAM write failed 1 during bootloading\n");
- msleep(10);
- Status = STATUS_FAILURE;
+ usleep_range(9000, 11000);
break;
}
- Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (u8 *)&resultbuffer[0], 64);
+ status = ft1000_read_dpram32(ft1000dev,
+ dpram+12,
+ (u8 *)&resultbuffer[0], 64);
- if (check_buffers(tempbuffer, resultbuffer, 16, 24)) {
+ if (check_buffers(tempbuffer, resultbuffer, 16,
+ 24)) {
DEBUG("FT1000:download:DPRAM write failed 2 during bootloading\n");
- msleep(10);
- Status = STATUS_FAILURE;
+ usleep_range(9000, 11000);
break;
}
-
- }
- else
- {
- if (check_buffers(tempbuffer, resultbuffer, 32, 0)) {
+ } else {
+ if (check_buffers(tempbuffer, resultbuffer, 32,
+ 0)) {
DEBUG("FT1000:download:DPRAM write failed 3 during bootloading\n");
- msleep(10);
- Status = STATUS_FAILURE;
+ usleep_range(9000, 11000);
break;
}
-
}
-
- if (Status == STATUS_SUCCESS)
- break;
-
- }
+ if (status == 0)
+ break;
}
+ }
+ return status;
+}
- if (Status != STATUS_SUCCESS)
- {
- DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]);
- break;
+/* writes a block of DSP image to DPRAM
+ * Parameters: struct ft1000_usb - device structure
+ * u16 **pUsFile - DSP image file pointer in u16
+ * u8 **pUcFile - DSP image file pointer in u8
+ * long word_length - length of the buffer to be written to DPRAM
+ */
+static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
+ long word_length)
+{
+ int status = STATUS_SUCCESS;
+ u16 dpram;
+ int loopcnt, i;
+ u16 tempword;
+ u16 tempbuffer[64];
+
+ /*DEBUG("FT1000:download:start word_length = %d\n",(int)word_length); */
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
+ tempword = *(*pUsFile);
+ (*pUsFile)++;
+ status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0);
+ tempword = *(*pUsFile);
+ (*pUsFile)++;
+ status = ft1000_write_dpram16(ft1000dev, dpram++, tempword, 1);
+
+ *pUcFile = *pUcFile + 4;
+ word_length--;
+ tempword = (u16)word_length;
+ word_length = (word_length / 16) + 1;
+ for (; word_length > 0; word_length--) { /* In words */
+ loopcnt = 0;
+ for (i = 0; i < 32; i++) {
+ if (tempword != 0) {
+ tempbuffer[i++] = *(*pUsFile);
+ (*pUsFile)++;
+ tempbuffer[i] = *(*pUsFile);
+ (*pUsFile)++;
+ *pUcFile = *pUcFile + 4;
+ loopcnt++;
+ tempword--;
+ } else {
+ tempbuffer[i++] = 0;
+ tempbuffer[i] = 0;
+ }
}
- }
- dpram = dpram + loopcnt;
- }
-
- return Status;
+ /*DEBUG("write_blk: loopcnt is %d\n", loopcnt); */
+ /*DEBUG("write_blk: bootmode = %d\n", bootmode); */
+ /*DEBUG("write_blk: dpram = %x\n", dpram); */
+ if (ft1000dev->bootmode == 0) {
+ if (dpram >= 0x3F4)
+ status = ft1000_write_dpram32(ft1000dev, dpram,
+ (u8 *)&tempbuffer[0], 8);
+ else
+ status = ft1000_write_dpram32(ft1000dev, dpram,
+ (u8 *)&tempbuffer[0], 64);
+ } else {
+ status = write_dpram32_and_check(ft1000dev, tempbuffer,
+ dpram);
+ if (status != STATUS_SUCCESS) {
+ DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]);
+ break;
+ }
+ }
+ dpram = dpram + loopcnt;
+ }
+ return status;
}
static void usb_dnld_complete (struct urb *urb)
@@ -616,27 +514,16 @@ static void usb_dnld_complete (struct urb *urb)
//DEBUG("****** usb_dnld_complete\n");
}
-//---------------------------------------------------------------------------
-// Function: write_blk_fifo
-//
-// Parameters: struct ft1000_usb - device structure
-// u16 **pUsFile - DSP image file pointer in u16
-// u8 **pUcFile - DSP image file pointer in u8
-// long word_length - length of the buffer to be written
-// to DPRAM
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes a block of DSP image to DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static u32 write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
+/* writes a block of DSP image to DPRAM
+ * Parameters: struct ft1000_usb - device structure
+ * u16 **pUsFile - DSP image file pointer in u16
+ * u8 **pUcFile - DSP image file pointer in u8
+ * long word_length - length of the buffer to be written to DPRAM
+ */
+static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
u8 **pUcFile, long word_length)
{
- u32 Status = STATUS_SUCCESS;
+ int Status = STATUS_SUCCESS;
int byte_length;
byte_length = word_length * 4;
@@ -664,22 +551,71 @@ static u32 write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
return Status;
}
-//---------------------------------------------------------------------------
-//
-// Function: scram_dnldr
-//
-// Synopsis: Scramble downloader for Harley based ASIC via USB interface
-//
-// Arguments: pFileStart - pointer to start of file
-// FileLength - file length
-//
-// Returns: status - return code
-//---------------------------------------------------------------------------
-
-u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
+static int scram_start_dwnld(struct ft1000_usb *ft1000dev, u16 *hshake,
+ u32 *state)
+{
+ int status = 0;
+
+ DEBUG("FT1000:STATE_START_DWNLD\n");
+ if (ft1000dev->usbboot)
+ *hshake = get_handshake_usb(ft1000dev, HANDSHAKE_DSP_BL_READY);
+ else
+ *hshake = get_handshake(ft1000dev, HANDSHAKE_DSP_BL_READY);
+ if (*hshake == HANDSHAKE_DSP_BL_READY) {
+ DEBUG("scram_dnldr: handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
+ put_handshake(ft1000dev, HANDSHAKE_DRIVER_READY);
+ } else if (*hshake == HANDSHAKE_TIMEOUT_VALUE) {
+ status = -ETIMEDOUT;
+ } else {
+ DEBUG("FT1000:download:Download error: Handshake failed\n");
+ status = -ENETRESET;
+ }
+ *state = STATE_BOOT_DWNLD;
+ return status;
+}
+
+static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
+ u8 **c_file, const u8 *endpoint, bool boot_case)
+{
+ long word_length;
+ int status;
+
+ /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/
+ word_length = get_request_value(ft1000dev);
+ /*DEBUG("FT1000:word_length = 0x%x\n", (int)word_length); */
+ /*NdisMSleep (100); */
+ if (word_length > MAX_LENGTH) {
+ DEBUG("FT1000:download:Download error: Max length exceeded\n");
+ return STATUS_FAILURE;
+ }
+ if ((word_length * 2 + (long)c_file) > (long)endpoint) {
+ /* Error, beyond boot code range.*/
+ DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length);
+ return STATUS_FAILURE;
+ }
+ if (word_length & 0x1)
+ word_length++;
+ word_length = word_length / 2;
+
+ if (boot_case) {
+ status = write_blk(ft1000dev, s_file, c_file, word_length);
+ /*DEBUG("write_blk returned %d\n", status); */
+ } else {
+ write_blk_fifo(ft1000dev, s_file, c_file, word_length);
+ if (ft1000dev->usbboot == 0)
+ ft1000dev->usbboot++;
+ if (ft1000dev->usbboot == 1)
+ ft1000_write_dpram16(ft1000dev,
+ DWNLD_MAG1_PS_HDR_LOC, 0, 0);
+ }
+ return status;
+}
+
+/* Scramble downloader for Harley based ASIC via USB interface */
+int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
u32 FileLength)
{
- u16 status = STATUS_SUCCESS;
+ int status = STATUS_SUCCESS;
u32 state;
u16 handshake;
struct pseudo_hdr *pseudo_header;
@@ -687,7 +623,6 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
long word_length;
u16 request;
u16 temp;
- u16 tempword;
struct dsp_file_hdr *file_hdr;
struct dsp_image_info *dsp_img_info = NULL;
@@ -733,34 +668,13 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
loader_code_address = file_hdr->loader_code_address;
loader_code_size = file_hdr->loader_code_size;
- correct_version = FALSE;
+ correct_version = false;
while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE)) {
switch (state) {
case STATE_START_DWNLD:
- DEBUG("FT1000:STATE_START_DWNLD\n");
- if (ft1000dev->usbboot)
- handshake =
- get_handshake_usb(ft1000dev,
- HANDSHAKE_DSP_BL_READY);
- else
- handshake =
- get_handshake(ft1000dev,
- HANDSHAKE_DSP_BL_READY);
-
- if (handshake == HANDSHAKE_DSP_BL_READY) {
- DEBUG
- ("scram_dnldr: handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
- put_handshake(ft1000dev,
- HANDSHAKE_DRIVER_READY);
- } else {
- DEBUG
- ("FT1000:download:Download error: Handshake failed\n");
- status = STATUS_FAILURE;
- }
-
- state = STATE_BOOT_DWNLD;
-
+ status = scram_start_dwnld(ft1000dev, &handshake,
+ &state);
break;
case STATE_BOOT_DWNLD:
@@ -794,41 +708,11 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
ft1000dev->fcodeldr = 1;
break;
case REQUEST_CODE_SEGMENT:
- //DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");
- word_length =
- get_request_value(ft1000dev);
- //DEBUG("FT1000:word_length = 0x%x\n", (int)word_length);
- //NdisMSleep (100);
- if (word_length > MAX_LENGTH) {
- DEBUG
- ("FT1000:download:Download error: Max length exceeded\n");
- status = STATUS_FAILURE;
- break;
- }
- if ((word_length * 2 + c_file) >
- boot_end) {
- /*
- * Error, beyond boot code range.
- */
- DEBUG
- ("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n",
- (int)word_length);
- status = STATUS_FAILURE;
- break;
- }
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
- if (word_length & 0x1)
- word_length++;
- word_length = word_length / 2;
-
- status =
- write_blk(ft1000dev, &s_file,
- &c_file, word_length);
- //DEBUG("write_blk returned %d\n", status);
- break;
+ status = request_code_segment(ft1000dev,
+ &s_file, &c_file,
+ (const u8 *)boot_end,
+ true);
+ break;
default:
DEBUG
("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",
@@ -929,45 +813,10 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
}
- word_length =
- get_request_value(ft1000dev);
- //DEBUG("FT1000:download:word_length = %d\n", (int)word_length);
- if (word_length > MAX_LENGTH) {
- DEBUG
- ("FT1000:download:Download error: Max length exceeded\n");
- status = STATUS_FAILURE;
- break;
- }
- if ((word_length * 2 + c_file) >
- code_end) {
- /*
- * Error, beyond boot code range.
- */
- DEBUG
- ("FT1000:download:Download error: Requested len=%d exceeds DSP code boundary.\n",
- (int)word_length);
- status = STATUS_FAILURE;
- break;
- }
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
- if (word_length & 0x1)
- word_length++;
- word_length = word_length / 2;
-
- write_blk_fifo(ft1000dev, &s_file,
- &c_file, word_length);
- if (ft1000dev->usbboot == 0)
- ft1000dev->usbboot++;
- if (ft1000dev->usbboot == 1) {
- tempword = 0;
- ft1000_write_dpram16(ft1000dev,
- DWNLD_MAG1_PS_HDR_LOC,
- tempword,
- 0);
- }
+ status = request_code_segment(ft1000dev,
+ &s_file, &c_file,
+ (const u8 *)code_end,
+ false);
break;
@@ -1036,7 +885,7 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
status =
fix_ft1000_write_dpram32
(ft1000dev, dpram++,
- (u8 *) & templong);
+ (u8 *) &templong);
}
break;
@@ -1044,7 +893,7 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
case REQUEST_CODE_BY_VERSION:
DEBUG
("FT1000:download:REQUEST_CODE_BY_VERSION\n");
- correct_version = FALSE;
+ correct_version = false;
requested_version =
get_request_value(ft1000dev);
@@ -1061,7 +910,7 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
if (dsp_img_info->version ==
requested_version) {
- correct_version = TRUE;
+ correct_version = true;
DEBUG
("FT1000:download: correct_version is TRUE\n");
s_file =
@@ -1137,13 +986,13 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
if (pseudo_header->checksum ==
hdr_checksum(pseudo_header)) {
if (pseudo_header->portdest !=
- 0x80 /* Dsp OAM */ ) {
+ 0x80 /* Dsp OAM */) {
state = STATE_DONE_PROV;
break;
}
pseudo_header_len = ntohs(pseudo_header->length); /* Byte length for PROV records */
- // Get buffer for provisioning data
+ /* Get buffer for provisioning data */
pbuffer =
kmalloc((pseudo_header_len +
sizeof(struct pseudo_hdr)),
@@ -1201,9 +1050,8 @@ u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
} /* End Switch */
- if (status != STATUS_SUCCESS) {
+ if (status != STATUS_SUCCESS)
break;
- }
/****
// Check if Card is present
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 9b8fed7b405..0d4931b2c2e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -29,12 +29,7 @@
//#define JDEBUG
-static int ft1000_reset(void *ft1000dev);
static int ft1000_submit_rx_urb(struct ft1000_info *info);
-static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static int ft1000_open (struct net_device *dev);
-static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev);
-static int ft1000_chkcard (struct ft1000_usb *dev);
static u8 tempbuffer[1600];
@@ -526,7 +521,7 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
//-----------------------------------------------------------------------
int dsp_reload(struct ft1000_usb *ft1000dev)
{
- u16 status;
+ int status;
u16 tempword;
u32 templong;
@@ -633,9 +628,9 @@ static int ft1000_reset_card(struct net_device *dev)
DEBUG("ft1000_hw:ft1000_reset_card called.....\n");
- ft1000dev->fCondResetPend = 1;
+ ft1000dev->fCondResetPend = true;
info->CardReady = 0;
- ft1000dev->fProvComplete = 0;
+ ft1000dev->fProvComplete = false;
/* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
@@ -666,11 +661,216 @@ static int ft1000_reset_card(struct net_device *dev)
info->CardReady = 1;
- ft1000dev->fCondResetPend = 0;
+ ft1000dev->fCondResetPend = false;
return TRUE;
}
+//---------------------------------------------------------------------------
+// Function: ft1000_usb_transmit_complete
+//
+// Parameters: urb - transmitted usb urb
+//
+//
+// Returns: none
+//
+// Description: This is the callback function when a urb is transmitted
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+static void ft1000_usb_transmit_complete(struct urb *urb)
+{
+
+ struct ft1000_usb *ft1000dev = urb->context;
+
+ if (urb->status)
+ pr_err("%s: TX status %d\n", ft1000dev->net->name, urb->status);
+
+ netif_wake_queue(ft1000dev->net);
+}
+
+//---------------------------------------------------------------------------
+//
+// Function: ft1000_copy_down_pkt
+// Description: This function will take an ethernet packet and convert it to
+// a Flarion packet prior to sending it to the ASIC Downlink
+// FIFO.
+// Input:
+// dev - device structure
+// packet - address of ethernet packet
+// len - length of IP packet
+// Output:
+// status - FAILURE
+// SUCCESS
+//
+//---------------------------------------------------------------------------
+static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
+{
+ struct ft1000_info *pInfo = netdev_priv(netdev);
+ struct ft1000_usb *pFt1000Dev = pInfo->priv;
+
+ int count, ret;
+ u8 *t;
+ struct pseudo_hdr hdr;
+
+ if (!pInfo->CardReady) {
+ DEBUG("ft1000_copy_down_pkt::Card Not Ready\n");
+ return -ENODEV;
+ }
+
+ count = sizeof(struct pseudo_hdr) + len;
+ if (count > MAX_BUF_SIZE) {
+ DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n");
+ DEBUG("size = %d\n", count);
+ return -EINVAL;
+ }
+
+ if (count % 4)
+ count = count + (4 - (count % 4));
+
+ memset(&hdr, 0, sizeof(struct pseudo_hdr));
+
+ hdr.length = ntohs(count);
+ hdr.source = 0x10;
+ hdr.destination = 0x20;
+ hdr.portdest = 0x20;
+ hdr.portsrc = 0x10;
+ hdr.sh_str_id = 0x91;
+ hdr.control = 0x00;
+
+ hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
+ hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
+
+ memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
+ memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len);
+
+ netif_stop_queue(netdev);
+
+ usb_fill_bulk_urb(pFt1000Dev->tx_urb,
+ pFt1000Dev->dev,
+ usb_sndbulkpipe(pFt1000Dev->dev,
+ pFt1000Dev->bulk_out_endpointAddr),
+ pFt1000Dev->tx_buf, count,
+ ft1000_usb_transmit_complete, (void *)pFt1000Dev);
+
+ t = (u8 *) pFt1000Dev->tx_urb->transfer_buffer;
+
+ ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
+
+ if (ret) {
+ DEBUG("ft1000 failed tx_urb %d\n", ret);
+ return ret;
+ } else {
+ pInfo->stats.tx_packets++;
+ pInfo->stats.tx_bytes += (len + 14);
+ }
+
+ return 0;
+}
+
+//---------------------------------------------------------------------------
+// Function: ft1000_start_xmit
+//
+// Parameters: skb - socket buffer to be sent
+// dev - network device
+//
+//
+// Returns: none
+//
+// Description: transmit a ethernet packet
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ft1000_info *pInfo = netdev_priv(dev);
+ struct ft1000_usb *pFt1000Dev = pInfo->priv;
+ u8 *pdata;
+ int maxlen, pipe;
+
+ if (skb == NULL) {
+ DEBUG("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n");
+ return NETDEV_TX_OK;
+ }
+
+ if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
+ DEBUG("network driver is closed, return\n");
+ goto err;
+ }
+
+ pipe =
+ usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr);
+ maxlen = usb_maxpacket(pFt1000Dev->dev, pipe, usb_pipeout(pipe));
+
+ pdata = (u8 *) skb->data;
+
+ if (pInfo->mediastate == 0) {
+ /* Drop packet is mediastate is down */
+ DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n");
+ goto err;
+ }
+
+ if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
+ /* Drop packet which has invalid size */
+ DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n");
+ goto err;
+ }
+
+ ft1000_copy_down_pkt(dev, (pdata + ENET_HEADER_SIZE - 2),
+ skb->len - ENET_HEADER_SIZE + 2);
+
+err:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+//---------------------------------------------------------------------------
+// Function: ft1000_open
+//
+// Parameters:
+// dev - network device
+//
+//
+// Returns: none
+//
+// Description: open the network driver
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+static int ft1000_open(struct net_device *dev)
+{
+ struct ft1000_info *pInfo = netdev_priv(dev);
+ struct ft1000_usb *pFt1000Dev = pInfo->priv;
+ struct timeval tv;
+
+ DEBUG("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
+
+ pInfo->stats.rx_bytes = 0;
+ pInfo->stats.tx_bytes = 0;
+ pInfo->stats.rx_packets = 0;
+ pInfo->stats.tx_packets = 0;
+ do_gettimeofday(&tv);
+ pInfo->ConTm = tv.tv_sec;
+ pInfo->ProgConStat = 0;
+
+ netif_start_queue(dev);
+
+ netif_carrier_on(dev);
+
+ return ft1000_submit_rx_urb(pInfo);
+}
+
+static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
+{
+ struct ft1000_info *info = netdev_priv(dev);
+
+ return &(info->stats);
+}
+
static const struct net_device_ops ftnet_ops =
{
.ndo_open = &ft1000_open,
@@ -679,7 +879,6 @@ static const struct net_device_ops ftnet_ops =
.ndo_get_stats = &ft1000_netdev_stats,
};
-
//---------------------------------------------------------------------------
// Function: init_ft1000_netdev
//
@@ -694,6 +893,13 @@ static const struct net_device_ops ftnet_ops =
// Notes:
//
//---------------------------------------------------------------------------
+
+static int ft1000_reset(void *dev)
+{
+ ft1000_reset_card(dev);
+ return 0;
+}
+
int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
{
struct net_device *netdev;
@@ -752,8 +958,8 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
pInfo->DSP_TIME[1] = 0;
pInfo->DSP_TIME[2] = 0;
pInfo->DSP_TIME[3] = 0;
- ft1000dev->fAppMsgPend = 0;
- ft1000dev->fCondResetPend = 0;
+ ft1000dev->fAppMsgPend = false;
+ ft1000dev->fCondResetPend = false;
ft1000dev->usbboot = 0;
ft1000dev->dspalive = 0;
memset(&ft1000dev->tempbuf[0], 0, sizeof(ft1000dev->tempbuf));
@@ -854,175 +1060,6 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
return 0;
}
-int ft1000_reset(void *dev)
-{
- ft1000_reset_card(dev);
- return 0;
-}
-
-//---------------------------------------------------------------------------
-// Function: ft1000_usb_transmit_complete
-//
-// Parameters: urb - transmitted usb urb
-//
-//
-// Returns: none
-//
-// Description: This is the callback function when a urb is transmitted
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static void ft1000_usb_transmit_complete(struct urb *urb)
-{
-
- struct ft1000_usb *ft1000dev = urb->context;
-
- if (urb->status)
- pr_err("%s: TX status %d\n", ft1000dev->net->name, urb->status);
-
- netif_wake_queue(ft1000dev->net);
-}
-
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_down_pkt
-// Description: This function will take an ethernet packet and convert it to
-// a Flarion packet prior to sending it to the ASIC Downlink
-// FIFO.
-// Input:
-// dev - device structure
-// packet - address of ethernet packet
-// len - length of IP packet
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
-static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
-{
- struct ft1000_info *pInfo = netdev_priv(netdev);
- struct ft1000_usb *pFt1000Dev = pInfo->priv;
-
- int count, ret;
- u8 *t;
- struct pseudo_hdr hdr;
-
- if (!pInfo->CardReady) {
- DEBUG("ft1000_copy_down_pkt::Card Not Ready\n");
- return -ENODEV;
- }
-
- count = sizeof(struct pseudo_hdr) + len;
- if (count > MAX_BUF_SIZE) {
- DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n");
- DEBUG("size = %d\n", count);
- return -EINVAL;
- }
-
- if (count % 4)
- count = count + (4 - (count % 4));
-
- memset(&hdr, 0, sizeof(struct pseudo_hdr));
-
- hdr.length = ntohs(count);
- hdr.source = 0x10;
- hdr.destination = 0x20;
- hdr.portdest = 0x20;
- hdr.portsrc = 0x10;
- hdr.sh_str_id = 0x91;
- hdr.control = 0x00;
-
- hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
- hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
-
- memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
- memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len);
-
- netif_stop_queue(netdev);
-
- usb_fill_bulk_urb(pFt1000Dev->tx_urb,
- pFt1000Dev->dev,
- usb_sndbulkpipe(pFt1000Dev->dev,
- pFt1000Dev->bulk_out_endpointAddr),
- pFt1000Dev->tx_buf, count,
- ft1000_usb_transmit_complete, (void *)pFt1000Dev);
-
- t = (u8 *) pFt1000Dev->tx_urb->transfer_buffer;
-
- ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
-
- if (ret) {
- DEBUG("ft1000 failed tx_urb %d\n", ret);
- return ret;
- } else {
- pInfo->stats.tx_packets++;
- pInfo->stats.tx_bytes += (len + 14);
- }
-
- return 0;
-}
-
-
-//---------------------------------------------------------------------------
-// Function: ft1000_start_xmit
-//
-// Parameters: skb - socket buffer to be sent
-// dev - network device
-//
-//
-// Returns: none
-//
-// Description: transmit a ethernet packet
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ft1000_info *pInfo = netdev_priv(dev);
- struct ft1000_usb *pFt1000Dev = pInfo->priv;
- u8 *pdata;
- int maxlen, pipe;
-
- if (skb == NULL) {
- DEBUG("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n");
- return NETDEV_TX_OK;
- }
-
- if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
- goto err;
- }
-
- pipe =
- usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr);
- maxlen = usb_maxpacket(pFt1000Dev->dev, pipe, usb_pipeout(pipe));
-
- pdata = (u8 *) skb->data;
-
- if (pInfo->mediastate == 0) {
- /* Drop packet is mediastate is down */
- DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n");
- goto err;
- }
-
- if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
- /* Drop packet which has invalid size */
- DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n");
- goto err;
- }
-
- ft1000_copy_down_pkt(dev, (pdata + ENET_HEADER_SIZE - 2),
- skb->len - ENET_HEADER_SIZE + 2);
-
-err:
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-
//---------------------------------------------------------------------------
//
// Function: ft1000_copy_up_pkt
@@ -1159,44 +1196,6 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
return 0;
}
-
-//---------------------------------------------------------------------------
-// Function: ft1000_open
-//
-// Parameters:
-// dev - network device
-//
-//
-// Returns: none
-//
-// Description: open the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int ft1000_open(struct net_device *dev)
-{
- struct ft1000_info *pInfo = netdev_priv(dev);
- struct ft1000_usb *pFt1000Dev = pInfo->priv;
- struct timeval tv;
-
- DEBUG("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
-
- pInfo->stats.rx_bytes = 0;
- pInfo->stats.tx_bytes = 0;
- pInfo->stats.rx_packets = 0;
- pInfo->stats.tx_packets = 0;
- do_gettimeofday(&tv);
- pInfo->ConTm = tv.tv_sec;
- pInfo->ProgConStat = 0;
-
- netif_start_queue(dev);
-
- netif_carrier_on(dev);
-
- return ft1000_submit_rx_urb(pInfo);
-}
-
//---------------------------------------------------------------------------
// Function: ft1000_close
//
@@ -1228,14 +1227,6 @@ int ft1000_close(struct net_device *net)
return 0;
}
-static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
-
- return &(info->stats);
-}
-
-
//---------------------------------------------------------------------------
//
// Function: ft1000_chkcard
@@ -1441,7 +1432,7 @@ static int ft1000_dsp_prov(void *arg)
msleep(100);
- dev->fProvComplete = 1;
+ dev->fProvComplete = true;
info->CardReady = 1;
return STATUS_SUCCESS;
@@ -1567,12 +1558,12 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
* Send provisioning data to DSP
*/
if (list_empty(&info->prov_list) == 0) {
- dev->fProvComplete = 0;
+ dev->fProvComplete = false;
status = ft1000_dsp_prov(dev);
if (status != STATUS_SUCCESS)
goto out;
} else {
- dev->fProvComplete = 1;
+ dev->fProvComplete = true;
status =
ft1000_write_register(dev, FT1000_DB_HB,
FT1000_REG_DOORBELL);
@@ -1921,7 +1912,7 @@ int ft1000_poll(void* dev_id)
else if (tempword & FT1000_DB_COND_RESET) {
DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
- if (dev->fAppMsgPend == 0) {
+ if (!dev->fAppMsgPend) {
// Reset ASIC and DSP
status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
@@ -1934,8 +1925,8 @@ int ft1000_poll(void* dev_id)
info->ft1000_reset(dev->net);
}
else {
- dev->fProvComplete = 0;
- dev->fCondResetPend = 1;
+ dev->fProvComplete = false;
+ dev->fCondResetPend = true;
}
ft1000_write_register(dev, FT1000_DB_COND_RESET, FT1000_REG_DOORBELL);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 29a7cd23845..a8dd1e54878 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -36,7 +36,7 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
-static bool gPollingfailed = FALSE;
+static bool gPollingfailed = false;
static int ft1000_poll_thread(void *arg)
{
int ret;
@@ -47,7 +47,7 @@ static int ft1000_poll_thread(void *arg)
ret = ft1000_poll(arg);
if (ret != STATUS_SUCCESS) {
DEBUG("ft1000_poll_thread: polling failed\n");
- gPollingfailed = TRUE;
+ gPollingfailed = true;
}
}
}
@@ -171,7 +171,7 @@ static int ft1000_probe(struct usb_interface *interface,
goto err_load;
}
- gPollingfailed = FALSE;
+ gPollingfailed = false;
ft1000dev->pPollThread =
kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index bd1da1f19cd..e8d00a930dc 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -125,7 +125,7 @@ extern size_t FileLength;
extern int numofmsgbuf;
int ft1000_close(struct net_device *dev);
-u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
+int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
u32 FileLength);
extern struct list_head freercvpool;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index ff92f34e474..62df009e5ac 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -2394,7 +2394,8 @@ static int fwserial_create(struct fw_unit *unit)
list_del_rcu(&serial->list);
if (create_loop_dev)
- tty_unregister_device(fwloop_driver, loop_idx(serial->ports[j]));
+ tty_unregister_device(fwloop_driver,
+ loop_idx(serial->ports[j]));
unregister_ttys:
for (--j; j >= 0; --j)
tty_unregister_device(fwtty_driver, serial->ports[j]->index);
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index bc0d510fb0a..c57a6ba5d01 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -110,7 +110,7 @@ static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
return 0;
}
-int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
+static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
{
struct nic *nic = netdev_priv(skb_in->dev);
struct sk_buff *skb_out;
@@ -186,7 +186,7 @@ int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
return 0;
}
-int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
+static int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
{
unsigned short *w = ptr;
int sum = 0;
@@ -226,7 +226,7 @@ int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
return sum;
}
-int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
+static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
{
struct nic *nic = netdev_priv(skb_in->dev);
struct sk_buff *skb_out;
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 5b1ef4000d0..62163673976 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -26,7 +26,7 @@
#include "gdm_mux.h"
-struct workqueue_struct *mux_rx_wq;
+static struct workqueue_struct *mux_rx_wq;
static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
@@ -51,7 +51,7 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
-int packet_type_to_index(u16 packetType)
+static int packet_type_to_index(u16 packetType)
{
int i;
@@ -96,12 +96,12 @@ static struct mux_rx *alloc_mux_rx(void)
{
struct mux_rx *r = NULL;
- r = kzalloc(sizeof(struct mux_rx), GFP_ATOMIC);
+ r = kzalloc(sizeof(struct mux_rx), GFP_KERNEL);
if (!r)
return NULL;
- r->urb = usb_alloc_urb(0, GFP_ATOMIC);
- r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_ATOMIC);
+ r->urb = usb_alloc_urb(0, GFP_KERNEL);
+ r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
if (!r->urb || !r->buf) {
usb_free_urb(r->urb);
kfree(r->buf);
@@ -541,7 +541,7 @@ static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id
ret = init_usb(mux_dev);
if (ret)
- goto err_free_tty;
+ goto err_free_usb;
tty_dev->priv_dev = (void *)mux_dev;
tty_dev->send_func = gdm_mux_send;
@@ -565,8 +565,8 @@ static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id
err_unregister_tty:
unregister_lte_tty_device(tty_dev);
+err_free_usb:
release_usb(mux_dev);
-err_free_tty:
kfree(tty_dev);
err_free_mux:
kfree(mux_dev);
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index 0247a2055e8..c0f7cd75116 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -171,7 +171,8 @@ static void gdm_tty_send_complete(void *arg)
tty_port_tty_wakeup(&gdm->port);
}
-static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
+static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
{
struct gdm *gdm = tty->driver_data;
int remain = len;
@@ -185,7 +186,8 @@ static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf, int l
return 0;
while (1) {
- sending_len = remain > MUX_TX_MAX_SIZE ? MUX_TX_MAX_SIZE : remain;
+ sending_len = remain > MUX_TX_MAX_SIZE ? MUX_TX_MAX_SIZE :
+ remain;
gdm_tty_send(gdm,
(void *)(buf+sent_len),
sending_len,
@@ -247,7 +249,8 @@ int register_lte_tty_device(struct tty_dev *tty_dev, struct device *device)
gdm->minor = j;
gdm->tty_dev = tty_dev;
- tty_port_register_device(&gdm->port, gdm_driver[i], gdm->minor, device);
+ tty_port_register_device(&gdm->port, gdm_driver[i],
+ gdm->minor, device);
}
for (i = 0; i < MAX_ISSUE_NUM; i++)
@@ -309,7 +312,8 @@ int register_lte_tty_driver(void)
tty_driver->major = GDM_TTY_MAJOR;
tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
tty_driver->subtype = SERIAL_TYPE_NORMAL;
- tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_driver->flags = TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV;
tty_driver->init_termios = tty_std_termios;
tty_driver->init_termios.c_cflag = B9600 | CS8 | HUPCL | CLOCAL;
tty_driver->init_termios.c_lflag = ISIG | ICANON | IEXTEN;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index bdc96370e43..781134af69d 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -88,12 +88,11 @@ static struct usb_tx *alloc_tx_struct(int len)
struct usb_tx *t = NULL;
int ret = 0;
- t = kmalloc(sizeof(struct usb_tx), GFP_ATOMIC);
+ t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
if (!t) {
ret = -ENOMEM;
goto out;
}
- memset(t, 0, sizeof(struct usb_tx));
t->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!(len % 512))
@@ -124,12 +123,11 @@ static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
int ret = 0;
- t_sdu = kmalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
+ t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
if (!t_sdu) {
ret = -ENOMEM;
goto out;
}
- memset(t_sdu, 0, sizeof(struct usb_tx_sdu));
t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
if (!t_sdu->buf) {
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index cf32ae099cd..35154d60faf 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -502,7 +502,7 @@ inline int find_type_by_name(const char *name, const char *type)
inline int _write_sysfs_int(char *filename, char *basedir, int val, int verify)
{
- int ret;
+ int ret = 0;
FILE *sysfsfp;
int test;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 04c23262f8e..c22a0edd152 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -13,6 +13,17 @@ Would be nice
3) Expand device set. Lots of other maxim adc's have very
similar interfaces.
+MXS LRADC driver:
+This is a classic MFD device as it combines the following subdevices
+ - touchscreen controller (input subsystem related device)
+ - general purpose ADC channels
+ - battery voltage monitor (power subsystem related device)
+ - die temperature monitor (thermal management)
+
+At least the battery voltage and die temperature feature is required in-kernel
+by a driver of the SoC's battery charging unit to avoid any damage to the
+silicon and the battery.
+
TSL2561
Would be nice
1) Open question of userspace vs kernel space balance when
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 5c289614357..4c9364b63c7 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -102,7 +102,6 @@ static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
int addr)
{
struct adis16220_state *st = iio_priv(indio_dev);
- struct spi_message msg;
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -147,10 +146,7 @@ static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
}
xfers[1].len = count;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->adis.spi, &msg);
+ ret = spi_sync_transfer(st->adis.spi, xfers, ARRAY_SIZE(xfers));
if (ret) {
mutex_unlock(&st->buf_lock);
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index bb852dc9c98..735c0a34fa9 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -190,15 +190,26 @@ static u8 lis3l02dq_axis_map[3][3] = {
};
static int lis3l02dq_read_thresh(struct iio_dev *indio_dev,
- u64 e,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
- return lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
+ int ret;
+
+ ret = lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
}
static int lis3l02dq_write_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
u16 value = val;
return lis3l02dq_spi_write_reg_s16(indio_dev,
@@ -503,9 +514,19 @@ static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
-#define LIS3L02DQ_EVENT_MASK \
- (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
+static const struct iio_event_spec lis3l02dq_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+ }
+};
#define LIS3L02DQ_CHAN(index, mod) \
{ \
@@ -523,7 +544,8 @@ static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
.realbits = 12, \
.storagebits = 16, \
}, \
- .event_mask = LIS3L02DQ_EVENT_MASK, \
+ .event_spec = lis3l02dq_event, \
+ .num_event_specs = ARRAY_SIZE(lis3l02dq_event), \
}
static const struct iio_chan_spec lis3l02dq_channels[] = {
@@ -535,14 +557,14 @@ static const struct iio_chan_spec lis3l02dq_channels[] = {
static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
u8 val;
int ret;
- u8 mask = (1 << (IIO_EVENT_CODE_EXTRACT_MODIFIER(event_code)*2 +
- (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)));
+ u8 mask = (1 << (chan->channel2*2 + (dir == IIO_EV_DIR_RISING)));
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
&val);
@@ -587,16 +609,16 @@ error_ret:
}
static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
- u64 event_code,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
int state)
{
int ret = 0;
u8 val, control;
u8 currentlyset;
bool changed = false;
- u8 mask = (1 << (IIO_EVENT_CODE_EXTRACT_MODIFIER(event_code)*2 +
- (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)));
+ u8 mask = (1 << (chan->channel2*2 + (dir == IIO_EV_DIR_RISING)));
mutex_lock(&indio_dev->mlock);
/* read current control */
@@ -654,10 +676,10 @@ static const struct attribute_group lis3l02dq_attribute_group = {
static const struct iio_info lis3l02dq_info = {
.read_raw = &lis3l02dq_read_raw,
.write_raw = &lis3l02dq_write_raw,
- .read_event_value = &lis3l02dq_read_thresh,
- .write_event_value = &lis3l02dq_write_thresh,
- .write_event_config = &lis3l02dq_write_event_config,
- .read_event_config = &lis3l02dq_read_event_config,
+ .read_event_value_new = &lis3l02dq_read_thresh,
+ .write_event_value_new = &lis3l02dq_write_thresh,
+ .write_event_config_new = &lis3l02dq_write_event_config,
+ .read_event_config_new = &lis3l02dq_read_event_config,
.driver_module = THIS_MODULE,
.attrs = &lis3l02dq_attribute_group,
};
@@ -694,7 +716,7 @@ static int lis3l02dq_probe(struct spi_device *spi)
lis3l02dq_channels,
ARRAY_SIZE(lis3l02dq_channels));
if (ret) {
- printk(KERN_ERR "failed to initialize the buffer\n");
+ dev_err(&spi->dev, "failed to initialize the buffer\n");
goto error_unreg_buffer_funcs;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 5b8f0f6c993..79cefe0a516 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -111,7 +111,7 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
u8 *buf)
{
int ret, i;
- u8 *rx_array ;
+ u8 *rx_array;
s16 *data = (s16 *)buf;
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
@@ -146,11 +146,7 @@ static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
- /* Guaranteed to be aligned with 8 byte boundary */
- if (indio_dev->scan_timestamp)
- *(s64 *)((u8 *)data + ALIGN(len, sizeof(s64)))
- = pf->timestamp;
- iio_push_to_buffers(indio_dev, (u8 *)data);
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
kfree(data);
done:
@@ -264,8 +260,7 @@ static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
else
break;
if (i == 5)
- printk(KERN_INFO
- "Failed to clear the interrupt for lis3l02dq\n");
+ pr_info("Failed to clear the interrupt for lis3l02dq\n");
/* irq reenabled so success! */
return 0;
@@ -387,7 +382,6 @@ error_ret:
}
static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
- .preenable = &iio_sw_buffer_preenable,
.postenable = &lis3l02dq_buffer_postenable,
.predisable = &lis3l02dq_buffer_predisable,
};
@@ -401,7 +395,7 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
if (!buffer)
return -ENOMEM;
- indio_dev->buffer = buffer;
+ iio_device_attach_buffer(indio_dev, buffer);
buffer->scan_timestamp = true;
indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 48a25ba290f..c49e6ef9d05 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -419,8 +419,11 @@ static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
-#define SCA3000_EVENT_MASK \
- (IIO_EV_BIT(IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING))
+static const struct iio_event_spec sca3000_event = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
#define SCA3000_CHAN(index, mod) \
{ \
@@ -437,7 +440,8 @@ static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
.storagebits = 16, \
.shift = 5, \
}, \
- .event_mask = SCA3000_EVENT_MASK, \
+ .event_spec = &sca3000_event, \
+ .num_event_specs = 1, \
}
static const struct iio_chan_spec sca3000_channels[] = {
@@ -624,9 +628,9 @@ static ssize_t sca3000_set_frequency(struct device *dev,
struct sca3000_state *st = iio_priv(indio_dev);
int ret, base_freq = 0;
int ctrlval;
- long val;
+ int val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
@@ -703,12 +707,15 @@ static IIO_CONST_ATTR_TEMP_OFFSET("-214.6");
* sca3000_read_thresh() - query of a threshold
**/
static int sca3000_read_thresh(struct iio_dev *indio_dev,
- u64 e,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
int ret, i;
struct sca3000_state *st = iio_priv(indio_dev);
- int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
+ int num = chan->channel2;
mutex_lock(&st->lock);
ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
mutex_unlock(&st->lock);
@@ -724,18 +731,21 @@ static int sca3000_read_thresh(struct iio_dev *indio_dev,
ARRAY_SIZE(st->info->mot_det_mult_xz))
*val += st->info->mot_det_mult_xz[i];
- return 0;
+ return IIO_VAL_INT;
}
/**
* sca3000_write_thresh() control of threshold
**/
static int sca3000_write_thresh(struct iio_dev *indio_dev,
- u64 e,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
struct sca3000_state *st = iio_priv(indio_dev);
- int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
+ int num = chan->channel2;
int ret;
int i;
u8 nonlinear = 0;
@@ -866,12 +876,14 @@ done:
* sca3000_read_event_config() what events are enabled
**/
static int sca3000_read_event_config(struct iio_dev *indio_dev,
- u64 e)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct sca3000_state *st = iio_priv(indio_dev);
int ret;
u8 protect_mask = 0x03;
- int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
+ int num = chan->channel2;
/* read current value of mode register */
mutex_lock(&st->lock);
@@ -931,12 +943,12 @@ static ssize_t sca3000_set_free_fall_mode(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct sca3000_state *st = iio_priv(indio_dev);
- long val;
+ u8 val;
int ret;
u8 protect_mask = SCA3000_FREE_FALL_DETECT;
mutex_lock(&st->lock);
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
@@ -969,13 +981,15 @@ error_ret:
* this mode is disabled. Currently normal mode is assumed.
**/
static int sca3000_write_event_config(struct iio_dev *indio_dev,
- u64 e,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
int state)
{
struct sca3000_state *st = iio_priv(indio_dev);
int ret, ctrlval;
u8 protect_mask = 0x03;
- int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
+ int num = chan->channel2;
mutex_lock(&st->lock);
/* First read the motion detector config to find out if
@@ -1112,20 +1126,20 @@ static const struct iio_info sca3000_info = {
.attrs = &sca3000_attribute_group,
.read_raw = &sca3000_read_raw,
.event_attrs = &sca3000_event_attribute_group,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
+ .read_event_value_new = &sca3000_read_thresh,
+ .write_event_value_new = &sca3000_write_thresh,
+ .read_event_config_new = &sca3000_read_event_config,
+ .write_event_config_new = &sca3000_write_event_config,
.driver_module = THIS_MODULE,
};
static const struct iio_info sca3000_info_with_temp = {
.attrs = &sca3000_attribute_group_with_temp,
.read_raw = &sca3000_read_raw,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
+ .read_event_value_new = &sca3000_read_thresh,
+ .write_event_value_new = &sca3000_write_thresh,
+ .read_event_config_new = &sca3000_read_event_config,
+ .write_event_config_new = &sca3000_write_event_config,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 3e5e860aa38..ea0af6d81d2 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -177,11 +177,11 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct sca3000_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long val;
+ u8 val;
int ret;
mutex_lock(&st->lock);
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
@@ -252,7 +252,7 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
struct iio_buffer *buf;
struct iio_hw_buffer *ring;
- ring = kzalloc(sizeof *ring, GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return NULL;
@@ -265,7 +265,7 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
return buf;
}
-static inline void sca3000_rb_free(struct iio_buffer *r)
+static void sca3000_ring_release(struct iio_buffer *r)
{
kfree(iio_to_hw_buf(r));
}
@@ -274,23 +274,28 @@ static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
.read_first_n = &sca3000_read_first_n_hw_rb,
.get_length = &sca3000_ring_get_length,
.get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum,
+ .release = sca3000_ring_release,
};
int sca3000_configure_ring(struct iio_dev *indio_dev)
{
- indio_dev->buffer = sca3000_rb_allocate(indio_dev);
- if (indio_dev->buffer == NULL)
+ struct iio_buffer *buffer;
+
+ buffer = sca3000_rb_allocate(indio_dev);
+ if (buffer == NULL)
return -ENOMEM;
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
indio_dev->buffer->access = &sca3000_ring_access_funcs;
+ iio_device_attach_buffer(indio_dev, buffer);
+
return 0;
}
void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
{
- sca3000_rb_free(indio_dev->buffer);
+ iio_buffer_put(indio_dev->buffer);
}
static inline
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index cabc7a367db..e3d64300195 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -102,7 +102,7 @@ config AD7280
config LPC32XX_ADC
tristate "NXP LPC32XX ADC"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
help
Say yes here to build support for the integrated ADC inside the
LPC32XX SoC. Note that this feature uses the same hardware as the
@@ -113,7 +113,9 @@ config LPC32XX_ADC
config MXS_LRADC
tristate "Freescale i.MX23/i.MX28 LRADC"
- depends on ARCH_MXS
+ depends on ARCH_MXS || COMPILE_TEST
+ depends on INPUT
+ select STMP_DEVICE
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -125,7 +127,7 @@ config MXS_LRADC
config SPEAR_ADC
tristate "ST SPEAr ADC"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
help
Say yes here to build support for the integrated ADC inside the
ST SPEAr SoC. Provides direct access via sysfs.
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 3283e282953..83bb44b3815 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -623,17 +623,17 @@ static int ad7192_probe(struct spi_device *spi)
return -ENODEV;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
voltage_uv = regulator_get_voltage(st->reg);
}
@@ -677,11 +677,6 @@ error_remove_trigger:
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return ret;
}
@@ -694,10 +689,8 @@ static int ad7192_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index c19618bc37c..8209fa542a8 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -783,7 +783,6 @@ static int ad7280_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad7280_state *st = iio_priv(indio_dev);
- unsigned int scale_uv;
int ret;
switch (m) {
@@ -804,13 +803,12 @@ static int ad7280_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
- scale_uv = (4000 * 1000) >> AD7280A_BITS;
+ *val = 4000;
else
- scale_uv = (5000 * 1000) >> AD7280A_BITS;
+ *val = 5000;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val2 = AD7280A_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -835,8 +833,9 @@ static int ad7280_probe(struct spi_device *spi)
int ret;
const unsigned short tACQ_ns[4] = {465, 1010, 1460, 1890};
const unsigned short nAVG[4] = {1, 2, 4, 8};
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -860,7 +859,7 @@ static int ad7280_probe(struct spi_device *spi)
ret = ad7280_chain_setup(st);
if (ret < 0)
- goto error_free_device;
+ return ret;
st->slave_num = ret;
st->scan_cnt = (st->slave_num + 1) * AD7280A_NUM_CH;
@@ -891,7 +890,7 @@ static int ad7280_probe(struct spi_device *spi)
ret = ad7280_channel_init(st);
if (ret < 0)
- goto error_free_device;
+ return ret;
indio_dev->num_channels = ret;
indio_dev->channels = st->channels;
@@ -940,9 +939,6 @@ error_free_attr:
error_free_channels:
kfree(st->channels);
-error_free_device:
- iio_device_free(indio_dev);
-
return ret;
}
@@ -960,7 +956,6 @@ static int ad7280_remove(struct spi_device *spi)
kfree(st->channels);
kfree(st->iio_attr);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index a2e61c2fc8d..d13f8aeeb62 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -164,97 +164,14 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
-static inline ssize_t ad7291_show_hyst(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7291_chip_info *chip = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u16 data;
- int ret;
-
- ret = ad7291_i2c_read(chip, this_attr->address, &data);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", data & AD7291_VALUE_MASK);
-}
-
-static inline ssize_t ad7291_set_hyst(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7291_chip_info *chip = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u16 data;
- int ret;
-
- ret = kstrtou16(buf, 10, &data);
-
- if (ret < 0)
- return ret;
- if (data > AD7291_VALUE_MASK)
- return -EINVAL;
-
- ret = ad7291_i2c_write(chip, this_attr->address, data);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(in_temp0_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst,
- AD7291_HYST(8));
-static IIO_DEVICE_ATTR(in_voltage0_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(0));
-static IIO_DEVICE_ATTR(in_voltage1_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(1));
-static IIO_DEVICE_ATTR(in_voltage2_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(2));
-static IIO_DEVICE_ATTR(in_voltage3_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(3));
-static IIO_DEVICE_ATTR(in_voltage4_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(4));
-static IIO_DEVICE_ATTR(in_voltage5_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(5));
-static IIO_DEVICE_ATTR(in_voltage6_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(6));
-static IIO_DEVICE_ATTR(in_voltage7_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(7));
-
-static struct attribute *ad7291_event_attributes[] = {
- &iio_dev_attr_in_temp0_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage0_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage1_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage2_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage3_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage4_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage5_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage6_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage7_thresh_both_hyst_raw.dev_attr.attr,
- NULL,
-};
-
-static unsigned int ad7291_threshold_reg(u64 event_code)
+static unsigned int ad7291_threshold_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir, enum iio_event_info info)
{
unsigned int offset;
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ switch (chan->type) {
case IIO_VOLTAGE:
- offset = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
+ offset = chan->channel;
break;
case IIO_TEMP:
offset = 8;
@@ -263,69 +180,78 @@ static unsigned int ad7291_threshold_reg(u64 event_code)
return 0;
}
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
- return AD7291_DATA_LOW(offset);
- else
- return AD7291_DATA_HIGH(offset);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (dir == IIO_EV_DIR_FALLING)
+ return AD7291_DATA_HIGH(offset);
+ else
+ return AD7291_DATA_LOW(offset);
+ case IIO_EV_INFO_HYSTERESIS:
+ return AD7291_HYST(offset);
+ default:
+ break;
+ }
+ return 0;
}
static int ad7291_read_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
int ret;
u16 uval;
- ret = ad7291_i2c_read(chip, ad7291_threshold_reg(event_code), &uval);
+ ret = ad7291_i2c_read(chip, ad7291_threshold_reg(chan, dir, info),
+ &uval);
if (ret < 0)
return ret;
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
- case IIO_VOLTAGE:
+ if (info == IIO_EV_INFO_HYSTERESIS || chan->type == IIO_VOLTAGE)
*val = uval & AD7291_VALUE_MASK;
- return 0;
- case IIO_TEMP:
+
+ else
*val = sign_extend32(uval, 11);
- return 0;
- default:
- return -EINVAL;
- };
+
+ return IIO_VAL_INT;
}
static int ad7291_write_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
- case IIO_VOLTAGE:
+ if (info == IIO_EV_INFO_HYSTERESIS || chan->type == IIO_VOLTAGE) {
if (val > AD7291_VALUE_MASK || val < 0)
return -EINVAL;
- break;
- case IIO_TEMP:
+ } else {
if (val > 2047 || val < -2048)
return -EINVAL;
- break;
- default:
- return -EINVAL;
}
- return ad7291_i2c_write(chip, ad7291_threshold_reg(event_code), val);
+ return ad7291_i2c_write(chip, ad7291_threshold_reg(chan, dir, info),
+ val);
}
static int ad7291_read_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
/* To be enabled the channel must simply be on. If any are enabled
we are in continuous sampling mode */
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ switch (chan->type) {
case IIO_VOLTAGE:
- if (chip->c_mask &
- (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))
+ if (chip->c_mask & (1 << (15 - chan->channel)))
return 1;
else
return 0;
@@ -339,11 +265,14 @@ static int ad7291_read_event_config(struct iio_dev *indio_dev,
}
static int ad7291_write_event_config(struct iio_dev *indio_dev,
- u64 event_code,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
int state)
{
int ret = 0;
struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ unsigned int mask;
u16 regval;
mutex_lock(&chip->state_lock);
@@ -354,16 +283,14 @@ static int ad7291_write_event_config(struct iio_dev *indio_dev,
* Possible to disable temp as well but that makes single read tricky.
*/
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ mask = BIT(15 - chan->channel);
+
+ switch (chan->type) {
case IIO_VOLTAGE:
- if ((!state) && (chip->c_mask & (1 << (15 -
- IIO_EVENT_CODE_EXTRACT_CHAN(event_code)))))
- chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
- (event_code)));
- else if (state && (!(chip->c_mask & (1 << (15 -
- IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))))
- chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
- (event_code)));
+ if ((!state) && (chip->c_mask & mask))
+ chip->c_mask &= ~mask;
+ else if (state && (!(chip->c_mask & mask)))
+ chip->c_mask |= mask;
else
break;
@@ -473,6 +400,24 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
}
}
+static const struct iio_event_spec ad7291_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
#define AD7291_VOLTAGE_CHAN(_chan) \
{ \
.type = IIO_VOLTAGE, \
@@ -480,8 +425,8 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.indexed = 1, \
.channel = _chan, \
- .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|\
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) \
+ .event_spec = ad7291_events, \
+ .num_event_specs = ARRAY_SIZE(ad7291_events), \
}
static const struct iio_chan_spec ad7291_channels[] = {
@@ -500,23 +445,17 @@ static const struct iio_chan_spec ad7291_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.indexed = 1,
.channel = 0,
- .event_mask =
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING)
+ .event_spec = ad7291_events,
+ .num_event_specs = ARRAY_SIZE(ad7291_events),
}
};
-static struct attribute_group ad7291_event_attribute_group = {
- .attrs = ad7291_event_attributes,
-};
-
static const struct iio_info ad7291_info = {
.read_raw = &ad7291_read_raw,
- .read_event_config = &ad7291_read_event_config,
- .write_event_config = &ad7291_write_event_config,
- .read_event_value = &ad7291_read_event_value,
- .write_event_value = &ad7291_write_event_value,
- .event_attrs = &ad7291_event_attribute_group,
+ .read_event_config_new = &ad7291_read_event_config,
+ .write_event_config_new = &ad7291_write_event_config,
+ .read_event_value_new = &ad7291_read_event_value,
+ .write_event_value_new = &ad7291_write_event_value,
.driver_module = THIS_MODULE,
};
@@ -528,21 +467,19 @@ static int ad7291_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret = 0;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
if (pdata && pdata->use_external_ref) {
- chip->reg = regulator_get(&client->dev, "vref");
+ chip->reg = devm_regulator_get(&client->dev, "vref");
if (IS_ERR(chip->reg))
- goto error_free;
+ return ret;
ret = regulator_enable(chip->reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
mutex_init(&chip->state_lock);
@@ -601,12 +538,7 @@ error_unreg_irq:
error_disable_reg:
if (chip->reg)
regulator_disable(chip->reg);
-error_put_reg:
- if (chip->reg)
- regulator_put(chip->reg);
-error_free:
- iio_device_free(indio_dev);
-error_ret:
+
return ret;
}
@@ -620,12 +552,8 @@ static int ad7291_remove(struct i2c_client *client)
if (client->irq)
free_irq(client->irq, indio_dev);
- if (chip->reg) {
+ if (chip->reg)
regulator_disable(chip->reg);
- regulator_put(chip->reg);
- }
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 9221a74efd1..93c7299e835 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -42,7 +42,7 @@ struct ad7606_platform_data {
/**
* struct ad7606_chip_info - chip specifc information
- * @name: indentification string for chip
+ * @name: identification string for chip
* @int_vref_mv: the internal reference voltage
* @channels: channel specification
* @num_channels: number of channels
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 72868ceda36..2083673a79c 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -85,7 +85,6 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
{
int ret;
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned int scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -101,11 +100,9 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
*val = (short) ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->range * 1000 * 2)
- >> st->chip_info->channels[0].scan_type.realbits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->range * 2;
+ *val2 = st->chip_info->channels[0].scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -425,8 +422,7 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
struct ad7606_state *st = iio_priv(indio_dev);
if (iio_buffer_enabled(indio_dev)) {
- if (!work_pending(&st->poll_work))
- schedule_work(&st->poll_work);
+ schedule_work(&st->poll_work);
} else {
st->done = true;
wake_up_interruptible(&st->wq_data_avail);
@@ -466,12 +462,11 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
struct ad7606_platform_data *pdata = dev->platform_data;
struct ad7606_state *st;
int ret;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return ERR_PTR(-ENOMEM);
st = iio_priv(indio_dev);
@@ -489,11 +484,11 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
st->oversampling = pdata->default_os;
}
- st->reg = regulator_get(dev, "vcc");
+ st->reg = devm_regulator_get(dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ERR_PTR(ret);
}
st->pdata = pdata;
@@ -554,11 +549,6 @@ error_free_gpios:
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- iio_device_free(indio_dev);
-error_ret:
return ERR_PTR(ret);
}
@@ -570,13 +560,10 @@ int ad7606_remove(struct iio_dev *indio_dev, int irq)
ad7606_ring_cleanup(indio_dev);
free_irq(irq, indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
ad7606_free_gpios(st);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index 2b25cb07fe4..3bf174cb19b 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -46,7 +46,6 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
struct ad7606_state *st = container_of(work_s, struct ad7606_state,
poll_work);
struct iio_dev *indio_dev = iio_priv_to_dev(st);
- s64 time_ns;
__u8 *buf;
int ret;
@@ -78,12 +77,7 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
goto done;
}
- time_ns = iio_get_time_ns();
-
- if (indio_dev->scan_timestamp)
- *((s64 *)(buf + indio_dev->scan_bytes - sizeof(s64))) = time_ns;
-
- iio_push_to_buffers(indio_dev, buf);
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
done:
gpio_set_value(st->pdata->gpio_convst, 0);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index e1f88603d7e..273add3ed63 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -90,17 +90,14 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad7780_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
return ad_sigma_delta_single_conversion(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->int_vref_mv * 100000 * st->gain)
- >> (chan->scan_type.realbits - 1);
- *val = scale_uv / 100000;
- *val2 = (scale_uv % 100000) * 10;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->int_vref_mv * st->gain;
+ *val2 = chan->scan_type.realbits - 1;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
*val -= (1 << (chan->scan_type.realbits - 1));
return IIO_VAL_INT;
@@ -171,7 +168,7 @@ static int ad7780_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int ret, voltage_uv = 0;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -180,11 +177,11 @@ static int ad7780_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
voltage_uv = regulator_get_voltage(st->reg);
}
@@ -210,8 +207,8 @@ static int ad7780_probe(struct spi_device *spi)
if (pdata && gpio_is_valid(pdata->gpio_pdrst)) {
- ret = gpio_request_one(pdata->gpio_pdrst, GPIOF_OUT_INIT_LOW,
- "AD7780 /PDRST");
+ ret = devm_gpio_request_one(&spi->dev, pdata->gpio_pdrst,
+ GPIOF_OUT_INIT_LOW, "AD7780 /PDRST");
if (ret) {
dev_err(&spi->dev, "failed to request GPIO PDRST\n");
goto error_disable_reg;
@@ -223,7 +220,7 @@ static int ad7780_probe(struct spi_device *spi)
ret = ad_sd_setup_buffer_and_trigger(indio_dev);
if (ret)
- goto error_free_gpio;
+ goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
@@ -233,17 +230,9 @@ static int ad7780_probe(struct spi_device *spi)
error_cleanup_buffer_and_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_free_gpio:
- if (pdata && gpio_is_valid(pdata->gpio_pdrst))
- gpio_free(pdata->gpio_pdrst);
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return ret;
}
@@ -256,14 +245,8 @@ static int ad7780_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (gpio_is_valid(st->powerdown_gpio))
- gpio_free(st->powerdown_gpio);
-
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 8470036a337..9f48e5c74ee 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -356,11 +356,9 @@ static int ad7816_probe(struct spi_device *spi_dev)
return -EINVAL;
}
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi_dev->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
dev_set_drvdata(&spi_dev->dev, indio_dev);
@@ -372,25 +370,28 @@ static int ad7816_probe(struct spi_device *spi_dev)
chip->convert_pin = pins[1];
chip->busy_pin = pins[2];
- ret = gpio_request(chip->rdwr_pin, spi_get_device_id(spi_dev)->name);
+ ret = devm_gpio_request(&spi_dev->dev, chip->rdwr_pin,
+ spi_get_device_id(spi_dev)->name);
if (ret) {
dev_err(&spi_dev->dev, "Fail to request rdwr gpio PIN %d.\n",
chip->rdwr_pin);
- goto error_free_device;
+ return ret;
}
gpio_direction_input(chip->rdwr_pin);
- ret = gpio_request(chip->convert_pin, spi_get_device_id(spi_dev)->name);
+ ret = devm_gpio_request(&spi_dev->dev, chip->convert_pin,
+ spi_get_device_id(spi_dev)->name);
if (ret) {
dev_err(&spi_dev->dev, "Fail to request convert gpio PIN %d.\n",
chip->convert_pin);
- goto error_free_gpio_rdwr;
+ return ret;
}
gpio_direction_input(chip->convert_pin);
- ret = gpio_request(chip->busy_pin, spi_get_device_id(spi_dev)->name);
+ ret = devm_gpio_request(&spi_dev->dev, chip->busy_pin,
+ spi_get_device_id(spi_dev)->name);
if (ret) {
dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n",
chip->busy_pin);
- goto error_free_gpio_convert;
+ return ret;
}
gpio_direction_input(chip->busy_pin);
@@ -401,51 +402,31 @@ static int ad7816_probe(struct spi_device *spi_dev)
if (spi_dev->irq) {
/* Only low trigger is supported in ad7816/7/8 */
- ret = request_threaded_irq(spi_dev->irq,
- NULL,
- &ad7816_event_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- indio_dev->name,
- indio_dev);
+ ret = devm_request_threaded_irq(&spi_dev->dev, spi_dev->irq,
+ NULL,
+ &ad7816_event_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ indio_dev->name,
+ indio_dev);
if (ret)
- goto error_free_gpio;
+ return ret;
}
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_irq;
+ return ret;
dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
indio_dev->name);
return 0;
-error_free_irq:
- free_irq(spi_dev->irq, indio_dev);
-error_free_gpio:
- gpio_free(chip->busy_pin);
-error_free_gpio_convert:
- gpio_free(chip->convert_pin);
-error_free_gpio_rdwr:
- gpio_free(chip->rdwr_pin);
-error_free_device:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7816_remove(struct spi_device *spi_dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
- struct ad7816_chip_info *chip = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- dev_set_drvdata(&spi_dev->dev, NULL);
- if (spi_dev->irq)
- free_irq(spi_dev->irq, indio_dev);
- gpio_free(chip->busy_pin);
- gpio_free(chip->convert_pin);
- gpio_free(chip->rdwr_pin);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index b51680c1c33..a591aa6feae 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -36,18 +36,10 @@
#define AD7998_ALERT_STAT_REG 0x1
#define AD7998_CONF_REG 0x2
#define AD7998_CYCLE_TMR_REG 0x3
-#define AD7998_DATALOW_CH1_REG 0x4
-#define AD7998_DATAHIGH_CH1_REG 0x5
-#define AD7998_HYST_CH1_REG 0x6
-#define AD7998_DATALOW_CH2_REG 0x7
-#define AD7998_DATAHIGH_CH2_REG 0x8
-#define AD7998_HYST_CH2_REG 0x9
-#define AD7998_DATALOW_CH3_REG 0xA
-#define AD7998_DATAHIGH_CH3_REG 0xB
-#define AD7998_HYST_CH3_REG 0xC
-#define AD7998_DATALOW_CH4_REG 0xD
-#define AD7998_DATAHIGH_CH4_REG 0xE
-#define AD7998_HYST_CH4_REG 0xF
+
+#define AD7998_DATALOW_REG(x) ((x) * 3 + 0x4)
+#define AD7998_DATAHIGH_REG(x) ((x) * 3 + 0x5)
+#define AD7998_HYST_REG(x) ((x) * 3 + 0x6)
#define AD7998_CYC_MASK 0x7
#define AD7998_CYC_DIS 0x0
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 2b2049c8bc6..9428be82b65 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -163,7 +163,6 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
{
int ret;
struct ad799x_state *st = iio_priv(indio_dev);
- unsigned int scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -180,10 +179,9 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = (st->int_vref_mv * 1000) >> chan->scan_type.realbits;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->int_vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
@@ -254,98 +252,70 @@ error_ret_mutex:
}
static int ad799x_read_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
return 1;
}
-static const u8 ad799x_threshold_addresses[][2] = {
- { AD7998_DATALOW_CH1_REG, AD7998_DATAHIGH_CH1_REG },
- { AD7998_DATALOW_CH2_REG, AD7998_DATAHIGH_CH2_REG },
- { AD7998_DATALOW_CH3_REG, AD7998_DATAHIGH_CH3_REG },
- { AD7998_DATALOW_CH4_REG, AD7998_DATAHIGH_CH4_REG },
-};
+static unsigned int ad799x_threshold_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir,
+ enum iio_event_info info)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (dir == IIO_EV_DIR_FALLING)
+ return AD7998_DATALOW_REG(chan->channel);
+ else
+ return AD7998_DATAHIGH_REG(chan->channel);
+ case IIO_EV_INFO_HYSTERESIS:
+ return AD7998_HYST_REG(chan->channel);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
static int ad799x_write_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
int ret;
struct ad799x_state *st = iio_priv(indio_dev);
- int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_FALLING);
- int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
- ret = ad799x_i2c_write16(st,
- ad799x_threshold_addresses[number][direction],
- val);
+ ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info),
+ val);
mutex_unlock(&indio_dev->mlock);
return ret;
}
static int ad799x_read_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
int ret;
struct ad799x_state *st = iio_priv(indio_dev);
- int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_FALLING);
- int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
u16 valin;
mutex_lock(&indio_dev->mlock);
- ret = ad799x_i2c_read16(st,
- ad799x_threshold_addresses[number][direction],
- &valin);
+ ret = ad799x_i2c_read16(st, ad799x_threshold_reg(chan, dir, info),
+ &valin);
mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
*val = valin;
- return 0;
-}
-
-static ssize_t ad799x_read_channel_config(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad799x_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- int ret;
- u16 val;
- ret = ad799x_i2c_read16(st, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t ad799x_write_channel_config(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad799x_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- long val;
- int ret;
-
- ret = kstrtol(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
- ret = ad799x_i2c_write16(st, this_attr->address, val);
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
+ return IIO_VAL_INT;
}
static irqreturn_t ad799x_event_handler(int irq, void *private)
@@ -383,60 +353,19 @@ done:
return IRQ_HANDLED;
}
-static IIO_DEVICE_ATTR(in_voltage0_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_HYST_CH1_REG);
-
-static IIO_DEVICE_ATTR(in_voltage1_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_HYST_CH2_REG);
-
-static IIO_DEVICE_ATTR(in_voltage2_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_HYST_CH3_REG);
-
-static IIO_DEVICE_ATTR(in_voltage3_thresh_both_hyst_raw,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_HYST_CH4_REG);
-
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ad799x_read_frequency,
ad799x_write_frequency);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("15625 7812 3906 1953 976 488 244 0");
-static struct attribute *ad7993_4_7_8_event_attributes[] = {
- &iio_dev_attr_in_voltage0_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage1_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage2_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage3_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group ad7993_4_7_8_event_attrs_group = {
- .attrs = ad7993_4_7_8_event_attributes,
- .name = "events",
-};
-
-static struct attribute *ad7992_event_attributes[] = {
- &iio_dev_attr_in_voltage0_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in_voltage1_thresh_both_hyst_raw.dev_attr.attr,
+static struct attribute *ad799x_event_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL,
};
-static struct attribute_group ad7992_event_attrs_group = {
- .attrs = ad7992_event_attributes,
+static struct attribute_group ad799x_event_attrs_group = {
+ .attrs = ad799x_event_attributes,
.name = "events",
};
@@ -445,29 +374,35 @@ static const struct iio_info ad7991_info = {
.driver_module = THIS_MODULE,
};
-static const struct iio_info ad7992_info = {
- .read_raw = &ad799x_read_raw,
- .event_attrs = &ad7992_event_attrs_group,
- .read_event_config = &ad799x_read_event_config,
- .read_event_value = &ad799x_read_event_value,
- .write_event_value = &ad799x_write_event_value,
- .driver_module = THIS_MODULE,
-};
-
static const struct iio_info ad7993_4_7_8_info = {
.read_raw = &ad799x_read_raw,
- .event_attrs = &ad7993_4_7_8_event_attrs_group,
- .read_event_config = &ad799x_read_event_config,
- .read_event_value = &ad799x_read_event_value,
- .write_event_value = &ad799x_write_event_value,
+ .event_attrs = &ad799x_event_attrs_group,
+ .read_event_config_new = &ad799x_read_event_config,
+ .read_event_value_new = &ad799x_read_event_value,
+ .write_event_value_new = &ad799x_write_event_value,
.driver_module = THIS_MODULE,
.update_scan_mode = ad7997_8_update_scan_mode,
};
-#define AD799X_EV_MASK (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
+static const struct iio_event_spec ad799x_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
-#define AD799X_CHANNEL(_index, _realbits, _evmask) { \
+#define _AD799X_CHANNEL(_index, _realbits, _ev_spec, _num_ev_spec) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = (_index), \
@@ -475,16 +410,24 @@ static const struct iio_info ad7993_4_7_8_info = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
.scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
- .event_mask = (_evmask), \
+ .event_spec = _ev_spec, \
+ .num_event_specs = _num_ev_spec, \
}
+#define AD799X_CHANNEL(_index, _realbits) \
+ _AD799X_CHANNEL(_index, _realbits, NULL, 0)
+
+#define AD799X_CHANNEL_WITH_EVENTS(_index, _realbits) \
+ _AD799X_CHANNEL(_index, _realbits, ad799x_events, \
+ ARRAY_SIZE(ad799x_events))
+
static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
[ad7991] = {
.channel = {
- AD799X_CHANNEL(0, 12, 0),
- AD799X_CHANNEL(1, 12, 0),
- AD799X_CHANNEL(2, 12, 0),
- AD799X_CHANNEL(3, 12, 0),
+ AD799X_CHANNEL(0, 12),
+ AD799X_CHANNEL(1, 12),
+ AD799X_CHANNEL(2, 12),
+ AD799X_CHANNEL(3, 12),
IIO_CHAN_SOFT_TIMESTAMP(4),
},
.num_channels = 5,
@@ -492,10 +435,10 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7995] = {
.channel = {
- AD799X_CHANNEL(0, 10, 0),
- AD799X_CHANNEL(1, 10, 0),
- AD799X_CHANNEL(2, 10, 0),
- AD799X_CHANNEL(3, 10, 0),
+ AD799X_CHANNEL(0, 10),
+ AD799X_CHANNEL(1, 10),
+ AD799X_CHANNEL(2, 10),
+ AD799X_CHANNEL(3, 10),
IIO_CHAN_SOFT_TIMESTAMP(4),
},
.num_channels = 5,
@@ -503,10 +446,10 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7999] = {
.channel = {
- AD799X_CHANNEL(0, 8, 0),
- AD799X_CHANNEL(1, 8, 0),
- AD799X_CHANNEL(2, 8, 0),
- AD799X_CHANNEL(3, 8, 0),
+ AD799X_CHANNEL(0, 8),
+ AD799X_CHANNEL(1, 8),
+ AD799X_CHANNEL(2, 8),
+ AD799X_CHANNEL(3, 8),
IIO_CHAN_SOFT_TIMESTAMP(4),
},
.num_channels = 5,
@@ -514,20 +457,20 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7992] = {
.channel = {
- AD799X_CHANNEL(0, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(1, 12, AD799X_EV_MASK),
+ AD799X_CHANNEL_WITH_EVENTS(0, 12),
+ AD799X_CHANNEL_WITH_EVENTS(1, 12),
IIO_CHAN_SOFT_TIMESTAMP(3),
},
.num_channels = 3,
.default_config = AD7998_ALERT_EN,
- .info = &ad7992_info,
+ .info = &ad7993_4_7_8_info,
},
[ad7993] = {
.channel = {
- AD799X_CHANNEL(0, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(1, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(2, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(3, 10, AD799X_EV_MASK),
+ AD799X_CHANNEL_WITH_EVENTS(0, 10),
+ AD799X_CHANNEL_WITH_EVENTS(1, 10),
+ AD799X_CHANNEL_WITH_EVENTS(2, 10),
+ AD799X_CHANNEL_WITH_EVENTS(3, 10),
IIO_CHAN_SOFT_TIMESTAMP(4),
},
.num_channels = 5,
@@ -536,10 +479,10 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7994] = {
.channel = {
- AD799X_CHANNEL(0, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(1, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(2, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(3, 12, AD799X_EV_MASK),
+ AD799X_CHANNEL_WITH_EVENTS(0, 12),
+ AD799X_CHANNEL_WITH_EVENTS(1, 12),
+ AD799X_CHANNEL_WITH_EVENTS(2, 12),
+ AD799X_CHANNEL_WITH_EVENTS(3, 12),
IIO_CHAN_SOFT_TIMESTAMP(4),
},
.num_channels = 5,
@@ -548,14 +491,14 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7997] = {
.channel = {
- AD799X_CHANNEL(0, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(1, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(2, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(3, 10, AD799X_EV_MASK),
- AD799X_CHANNEL(4, 10, 0),
- AD799X_CHANNEL(5, 10, 0),
- AD799X_CHANNEL(6, 10, 0),
- AD799X_CHANNEL(7, 10, 0),
+ AD799X_CHANNEL_WITH_EVENTS(0, 10),
+ AD799X_CHANNEL_WITH_EVENTS(1, 10),
+ AD799X_CHANNEL_WITH_EVENTS(2, 10),
+ AD799X_CHANNEL_WITH_EVENTS(3, 10),
+ AD799X_CHANNEL(4, 10),
+ AD799X_CHANNEL(5, 10),
+ AD799X_CHANNEL(6, 10),
+ AD799X_CHANNEL(7, 10),
IIO_CHAN_SOFT_TIMESTAMP(8),
},
.num_channels = 9,
@@ -564,14 +507,14 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7998] = {
.channel = {
- AD799X_CHANNEL(0, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(1, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(2, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(3, 12, AD799X_EV_MASK),
- AD799X_CHANNEL(4, 12, 0),
- AD799X_CHANNEL(5, 12, 0),
- AD799X_CHANNEL(6, 12, 0),
- AD799X_CHANNEL(7, 12, 0),
+ AD799X_CHANNEL_WITH_EVENTS(0, 12),
+ AD799X_CHANNEL_WITH_EVENTS(1, 12),
+ AD799X_CHANNEL_WITH_EVENTS(2, 12),
+ AD799X_CHANNEL_WITH_EVENTS(3, 12),
+ AD799X_CHANNEL(4, 12),
+ AD799X_CHANNEL(5, 12),
+ AD799X_CHANNEL(6, 12),
+ AD799X_CHANNEL(7, 12),
IIO_CHAN_SOFT_TIMESTAMP(8),
},
.num_channels = 9,
@@ -586,8 +529,9 @@ static int ad799x_probe(struct i2c_client *client,
int ret;
struct ad799x_platform_data *pdata = client->dev.platform_data;
struct ad799x_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -606,11 +550,11 @@ static int ad799x_probe(struct i2c_client *client,
st->int_vref_mv = pdata->vref_mv;
- st->reg = regulator_get(&client->dev, "vcc");
+ st->reg = devm_regulator_get(&client->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
st->client = client;
@@ -650,10 +594,6 @@ error_cleanup_ring:
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- iio_device_free(indio_dev);
return ret;
}
@@ -668,12 +608,9 @@ static int ad799x_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
ad799x_ring_cleanup(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
kfree(st->rx_buf);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index c2ebae12ee1..0ff6c03a483 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -35,7 +35,6 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad799x_state *st = iio_priv(indio_dev);
- s64 time_ns;
int b_sent;
u8 cmd;
@@ -65,13 +64,8 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
if (b_sent < 0)
goto out;
- time_ns = iio_get_time_ns();
-
- if (indio_dev->scan_timestamp)
- memcpy(st->rx_buf + indio_dev->scan_bytes - sizeof(s64),
- &time_ns, sizeof(time_ns));
-
- iio_push_to_buffers(indio_dev, st->rx_buf);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
+ iio_get_time_ns());
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index 9a4bb0999b5..ef0a21d8ce1 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -137,43 +137,39 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get platform I/O memory\n");
- retval = -EBUSY;
- goto errout1;
+ return -EBUSY;
}
- iodev = iio_device_alloc(sizeof(struct lpc32xx_adc_info));
- if (!iodev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
- retval = -ENOMEM;
- goto errout1;
- }
+ iodev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
+ if (!iodev)
+ return -ENOMEM;
info = iio_priv(iodev);
- info->adc_base = ioremap(res->start, resource_size(res));
+ info->adc_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!info->adc_base) {
dev_err(&pdev->dev, "failed mapping memory\n");
- retval = -EBUSY;
- goto errout2;
+ return -EBUSY;
}
- info->clk = clk_get(&pdev->dev, NULL);
+ info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed getting clock\n");
- goto errout3;
+ return PTR_ERR(info->clk);
}
irq = platform_get_irq(pdev, 0);
- if ((irq < 0) || (irq >= NR_IRQS)) {
+ if (irq <= 0) {
dev_err(&pdev->dev, "failed getting interrupt resource\n");
- retval = -EINVAL;
- goto errout4;
+ return -EINVAL;
}
- retval = request_irq(irq, lpc32xx_adc_isr, 0, MOD_NAME, info);
+ retval = devm_request_irq(&pdev->dev, irq, lpc32xx_adc_isr, 0,
+ MOD_NAME, info);
if (retval < 0) {
dev_err(&pdev->dev, "failed requesting interrupt\n");
- goto errout4;
+ return retval;
}
platform_set_drvdata(pdev, iodev);
@@ -189,35 +185,18 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
retval = iio_device_register(iodev);
if (retval)
- goto errout5;
+ return retval;
dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
return 0;
-
-errout5:
- free_irq(irq, info);
-errout4:
- clk_put(info->clk);
-errout3:
- iounmap(info->adc_base);
-errout2:
- iio_device_free(iodev);
-errout1:
- return retval;
}
static int lpc32xx_adc_remove(struct platform_device *pdev)
{
struct iio_dev *iodev = platform_get_drvdata(pdev);
- struct lpc32xx_adc_info *info = iio_priv(iodev);
- int irq = platform_get_irq(pdev, 0);
iio_device_unregister(iodev);
- free_irq(irq, info);
- clk_put(info->clk);
- iounmap(info->adc_base);
- iio_device_free(iodev);
return 0;
}
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index a08c1736458..e2dd7830b32 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -35,6 +35,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/input.h>
+#include <linux/clk.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -129,11 +130,24 @@ enum mxs_lradc_ts {
MXS_LRADC_TOUCHSCREEN_5WIRE,
};
+/*
+ * Touchscreen handling
+ */
+enum lradc_ts_plate {
+ LRADC_TOUCH = 0,
+ LRADC_SAMPLE_X,
+ LRADC_SAMPLE_Y,
+ LRADC_SAMPLE_PRESSURE,
+ LRADC_SAMPLE_VALID,
+};
+
struct mxs_lradc {
struct device *dev;
void __iomem *base;
int irq[13];
+ struct clk *clk;
+
uint32_t *buffer;
struct iio_trigger *trig;
@@ -169,32 +183,63 @@ struct mxs_lradc {
#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2)
#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2)
enum mxs_lradc_ts use_touchscreen;
- bool stop_touchscreen;
bool use_touchbutton;
struct input_dev *ts_input;
- struct work_struct ts_work;
+
+ enum mxs_lradc_id soc;
+ enum lradc_ts_plate cur_plate; /* statemachine */
+ bool ts_valid;
+ unsigned ts_x_pos;
+ unsigned ts_y_pos;
+ unsigned ts_pressure;
+
+ /* handle touchscreen's physical behaviour */
+ /* samples per coordinate */
+ unsigned over_sample_cnt;
+ /* time clocks between samples */
+ unsigned over_sample_delay;
+ /* time in clocks to wait after the plates where switched */
+ unsigned settling_delay;
};
#define LRADC_CTRL0 0x00
-#define LRADC_CTRL0_TOUCH_DETECT_ENABLE (1 << 23)
-#define LRADC_CTRL0_TOUCH_SCREEN_TYPE (1 << 22)
-#define LRADC_CTRL0_YNNSW /* YM */ (1 << 21)
-#define LRADC_CTRL0_YPNSW /* YP */ (1 << 20)
-#define LRADC_CTRL0_YPPSW /* YP */ (1 << 19)
-#define LRADC_CTRL0_XNNSW /* XM */ (1 << 18)
-#define LRADC_CTRL0_XNPSW /* XM */ (1 << 17)
-#define LRADC_CTRL0_XPPSW /* XP */ (1 << 16)
-#define LRADC_CTRL0_PLATE_MASK (0x3f << 16)
+# define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE (1 << 23)
+# define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE (1 << 22)
+# define LRADC_CTRL0_MX28_YNNSW /* YM */ (1 << 21)
+# define LRADC_CTRL0_MX28_YPNSW /* YP */ (1 << 20)
+# define LRADC_CTRL0_MX28_YPPSW /* YP */ (1 << 19)
+# define LRADC_CTRL0_MX28_XNNSW /* XM */ (1 << 18)
+# define LRADC_CTRL0_MX28_XNPSW /* XM */ (1 << 17)
+# define LRADC_CTRL0_MX28_XPPSW /* XP */ (1 << 16)
+
+# define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE (1 << 20)
+# define LRADC_CTRL0_MX23_YM (1 << 19)
+# define LRADC_CTRL0_MX23_XM (1 << 18)
+# define LRADC_CTRL0_MX23_YP (1 << 17)
+# define LRADC_CTRL0_MX23_XP (1 << 16)
+
+# define LRADC_CTRL0_MX28_PLATE_MASK \
+ (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \
+ LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \
+ LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW)
+
+# define LRADC_CTRL0_MX23_PLATE_MASK \
+ (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \
+ LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP)
#define LRADC_CTRL1 0x10
#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN (1 << 24)
#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16))
-#define LRADC_CTRL1_LRADC_IRQ_EN_MASK (0x1fff << 16)
+#define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16)
+#define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16)
#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16
#define LRADC_CTRL1_TOUCH_DETECT_IRQ (1 << 8)
#define LRADC_CTRL1_LRADC_IRQ(n) (1 << (n))
-#define LRADC_CTRL1_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff
#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
#define LRADC_CTRL2 0x20
@@ -207,19 +252,33 @@ struct mxs_lradc {
#define LRADC_CH_ACCUMULATE (1 << 29)
#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24)
#define LRADC_CH_NUM_SAMPLES_OFFSET 24
+#define LRADC_CH_NUM_SAMPLES(x) \
+ ((x) << LRADC_CH_NUM_SAMPLES_OFFSET)
#define LRADC_CH_VALUE_MASK 0x3ffff
#define LRADC_CH_VALUE_OFFSET 0
#define LRADC_DELAY(n) (0xd0 + (0x10 * (n)))
#define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xff << 24)
#define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24
+#define LRADC_DELAY_TRIGGER(x) \
+ (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_LRADCS_MASK)
#define LRADC_DELAY_KICK (1 << 20)
#define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16)
#define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16
+#define LRADC_DELAY_TRIGGER_DELAYS(x) \
+ (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_DELAYS_MASK)
#define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11)
#define LRADC_DELAY_LOOP_COUNT_OFFSET 11
+#define LRADC_DELAY_LOOP(x) \
+ (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \
+ LRADC_DELAY_LOOP_COUNT_MASK)
#define LRADC_DELAY_DELAY_MASK 0x7ff
#define LRADC_DELAY_DELAY_OFFSET 0
+#define LRADC_DELAY_DELAY(x) \
+ (((x) << LRADC_DELAY_DELAY_OFFSET) & \
+ LRADC_DELAY_DELAY_MASK)
#define LRADC_CTRL4 0x140
#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
@@ -228,6 +287,475 @@ struct mxs_lradc {
#define LRADC_RESOLUTION 12
#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+static void mxs_lradc_reg_set(struct mxs_lradc *lradc, u32 val, u32 reg)
+{
+ writel(val, lradc->base + reg + STMP_OFFSET_REG_SET);
+}
+
+static void mxs_lradc_reg_clear(struct mxs_lradc *lradc, u32 val, u32 reg)
+{
+ writel(val, lradc->base + reg + STMP_OFFSET_REG_CLR);
+}
+
+static void mxs_lradc_reg_wrt(struct mxs_lradc *lradc, u32 val, u32 reg)
+{
+ writel(val, lradc->base + reg);
+}
+
+static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_PLATE_MASK;
+ else
+ return LRADC_CTRL0_MX28_PLATE_MASK;
+}
+
+static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK;
+ else
+ return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK;
+}
+
+static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL1_MX23_LRADC_IRQ_MASK;
+ else
+ return LRADC_CTRL1_MX28_LRADC_IRQ_MASK;
+}
+
+static u32 mxs_lradc_touch_detect_bit(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE;
+ else
+ return LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE;
+}
+
+static u32 mxs_lradc_drive_x_plate(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_XP | LRADC_CTRL0_MX23_XM;
+ else
+ return LRADC_CTRL0_MX28_XPPSW | LRADC_CTRL0_MX28_XNNSW;
+}
+
+static u32 mxs_lradc_drive_y_plate(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_YM;
+ else
+ return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_YNNSW;
+}
+
+static u32 mxs_lradc_drive_pressure(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XM;
+ else
+ return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW;
+}
+
+static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
+{
+ return !!(readl(lradc->base + LRADC_STATUS) &
+ LRADC_STATUS_TOUCH_DETECT_RAW);
+}
+
+static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
+{
+ /*
+ * prepare for oversampling conversion
+ *
+ * from the datasheet:
+ * "The ACCUMULATE bit in the appropriate channel register
+ * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0;
+ * otherwise, the IRQs will not fire."
+ */
+ mxs_lradc_reg_wrt(lradc, LRADC_CH_ACCUMULATE |
+ LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1),
+ LRADC_CH(ch));
+
+ /* from the datasheet:
+ * "Software must clear this register in preparation for a
+ * multi-cycle accumulation.
+ */
+ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch));
+
+ /* prepare the delay/loop unit according to the oversampling count */
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch) |
+ LRADC_DELAY_TRIGGER_DELAYS(0) |
+ LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
+ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ LRADC_DELAY(3));
+
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+ LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+ LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+
+ /* wake us again, when the complete conversion is done */
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
+ /*
+ * after changing the touchscreen plates setting
+ * the signals need some initial time to settle. Start the
+ * SoC's delay unit and start the conversion later
+ * and automatically.
+ */
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
+ LRADC_DELAY_KICK |
+ LRADC_DELAY_DELAY(lradc->settling_delay),
+ LRADC_DELAY(2));
+}
+
+/*
+ * Pressure detection is special:
+ * We want to do both required measurements for the pressure detection in
+ * one turn. Use the hardware features to chain both conversions and let the
+ * hardware report one interrupt if both conversions are done
+ */
+static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
+ unsigned ch2)
+{
+ u32 reg;
+
+ /*
+ * prepare for oversampling conversion
+ *
+ * from the datasheet:
+ * "The ACCUMULATE bit in the appropriate channel register
+ * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0;
+ * otherwise, the IRQs will not fire."
+ */
+ reg = LRADC_CH_ACCUMULATE |
+ LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1);
+ mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1));
+ mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2));
+
+ /* from the datasheet:
+ * "Software must clear this register in preparation for a
+ * multi-cycle accumulation.
+ */
+ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch1));
+ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch2));
+
+ /* prepare the delay/loop unit according to the oversampling count */
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch1) |
+ LRADC_DELAY_TRIGGER(1 << ch2) | /* start both channels */
+ LRADC_DELAY_TRIGGER_DELAYS(0) |
+ LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
+ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ LRADC_DELAY(3));
+
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+ LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+ LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+
+ /* wake us again, when the conversions are done */
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
+ /*
+ * after changing the touchscreen plates setting
+ * the signals need some initial time to settle. Start the
+ * SoC's delay unit and start the conversion later
+ * and automatically.
+ */
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
+ LRADC_DELAY_KICK |
+ LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2));
+}
+
+static unsigned mxs_lradc_read_raw_channel(struct mxs_lradc *lradc,
+ unsigned channel)
+{
+ u32 reg;
+ unsigned num_samples, val;
+
+ reg = readl(lradc->base + LRADC_CH(channel));
+ if (reg & LRADC_CH_ACCUMULATE)
+ num_samples = lradc->over_sample_cnt;
+ else
+ num_samples = 1;
+
+ val = (reg & LRADC_CH_VALUE_MASK) >> LRADC_CH_VALUE_OFFSET;
+ return val / num_samples;
+}
+
+static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
+ unsigned ch1, unsigned ch2)
+{
+ u32 reg, mask;
+ unsigned pressure, m1, m2;
+
+ mask = LRADC_CTRL1_LRADC_IRQ(ch1) | LRADC_CTRL1_LRADC_IRQ(ch2);
+ reg = readl(lradc->base + LRADC_CTRL1) & mask;
+
+ while (reg != mask) {
+ reg = readl(lradc->base + LRADC_CTRL1) & mask;
+ dev_dbg(lradc->dev, "One channel is still busy: %X\n", reg);
+ }
+
+ m1 = mxs_lradc_read_raw_channel(lradc, ch1);
+ m2 = mxs_lradc_read_raw_channel(lradc, ch2);
+
+ if (m2 == 0) {
+ dev_warn(lradc->dev, "Cannot calculate pressure\n");
+ return 1 << (LRADC_RESOLUTION - 1);
+ }
+
+ /* simply scale the value from 0 ... max ADC resolution */
+ pressure = m1;
+ pressure *= (1 << LRADC_RESOLUTION);
+ pressure /= m2;
+
+ dev_dbg(lradc->dev, "Pressure = %u\n", pressure);
+ return pressure;
+}
+
+#define TS_CH_XP 2
+#define TS_CH_YP 3
+#define TS_CH_XM 4
+#define TS_CH_YM 5
+
+static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
+{
+ u32 reg;
+ int val;
+
+ reg = readl(lradc->base + LRADC_CTRL1);
+
+ /* only channels 3 to 5 are of interest here */
+ if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
+ LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
+ val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
+ } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
+ LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
+ val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
+ } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
+ LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
+ val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
+ } else {
+ return -EIO;
+ }
+
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+
+ return val;
+}
+
+/*
+ * YP(open)--+-------------+
+ * | |--+
+ * | | |
+ * YM(-)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(weak+) XM(open)
+ *
+ * "weak+" means 200k Ohm VDDIO
+ * (-) means GND
+ */
+static void mxs_lradc_setup_touch_detection(struct mxs_lradc *lradc)
+{
+ /*
+ * In order to detect a touch event the 'touch detect enable' bit
+ * enables:
+ * - a weak pullup to the X+ connector
+ * - a strong ground at the Y- connector
+ */
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_touch_detect_bit(lradc),
+ LRADC_CTRL0);
+}
+
+/*
+ * YP(meas)--+-------------+
+ * | |--+
+ * | | |
+ * YM(open)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(+) XM(-)
+ *
+ * (+) means here 1.85 V
+ * (-) means here GND
+ */
+static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
+{
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_X;
+ mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
+}
+
+/*
+ * YP(+)--+-------------+
+ * | |--+
+ * | | |
+ * YM(-)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(open) XM(meas)
+ *
+ * (+) means here 1.85 V
+ * (-) means here GND
+ */
+static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
+{
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_Y;
+ mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
+}
+
+/*
+ * YP(+)--+-------------+
+ * | |--+
+ * | | |
+ * YM(meas)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(meas) XM(-)
+ *
+ * (+) means here 1.85 V
+ * (-) means here GND
+ */
+static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
+{
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
+ mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+}
+
+static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
+{
+ mxs_lradc_setup_touch_detection(lradc);
+
+ lradc->cur_plate = LRADC_TOUCH;
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
+ LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+}
+
+static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
+{
+ input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
+ input_report_abs(lradc->ts_input, ABS_Y, lradc->ts_y_pos);
+ input_report_abs(lradc->ts_input, ABS_PRESSURE, lradc->ts_pressure);
+ input_report_key(lradc->ts_input, BTN_TOUCH, 1);
+ input_sync(lradc->ts_input);
+}
+
+static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
+{
+ mxs_lradc_setup_touch_detection(lradc);
+ lradc->cur_plate = LRADC_SAMPLE_VALID;
+ /*
+ * start a dummy conversion to burn time to settle the signals
+ * note: we are not interested in the conversion's value
+ */
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
+ LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
+ LRADC_DELAY(2));
+}
+
+/*
+ * in order to avoid false measurements, report only samples where
+ * the surface is still touched after the position measurement
+ */
+static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
+{
+ /* if it is still touched, report the sample */
+ if (valid && mxs_lradc_check_touch_event(lradc)) {
+ lradc->ts_valid = true;
+ mxs_lradc_report_ts_event(lradc);
+ }
+
+ /* if it is even still touched, continue with the next measurement */
+ if (mxs_lradc_check_touch_event(lradc)) {
+ mxs_lradc_prepare_y_pos(lradc);
+ return;
+ }
+
+ if (lradc->ts_valid) {
+ /* signal the release */
+ lradc->ts_valid = false;
+ input_report_key(lradc->ts_input, BTN_TOUCH, 0);
+ input_sync(lradc->ts_input);
+ }
+
+ /* if it is released, wait for the next touch via IRQ */
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+}
+
+/* touchscreen's state machine */
+static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
+{
+ int val;
+
+ switch (lradc->cur_plate) {
+ case LRADC_TOUCH:
+ /*
+ * start with the Y-pos, because it uses nearly the same plate
+ * settings like the touch detection
+ */
+ if (mxs_lradc_check_touch_event(lradc)) {
+ mxs_lradc_reg_clear(lradc,
+ LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+ LRADC_CTRL1);
+ mxs_lradc_prepare_y_pos(lradc);
+ }
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
+ LRADC_CTRL1);
+ return;
+
+ case LRADC_SAMPLE_Y:
+ val = mxs_lradc_read_ts_channel(lradc);
+ if (val < 0) {
+ mxs_lradc_enable_touch_detection(lradc); /* re-start */
+ return;
+ }
+ lradc->ts_y_pos = val;
+ mxs_lradc_prepare_x_pos(lradc);
+ return;
+
+ case LRADC_SAMPLE_X:
+ val = mxs_lradc_read_ts_channel(lradc);
+ if (val < 0) {
+ mxs_lradc_enable_touch_detection(lradc); /* re-start */
+ return;
+ }
+ lradc->ts_x_pos = val;
+ mxs_lradc_prepare_pressure(lradc);
+ return;
+
+ case LRADC_SAMPLE_PRESSURE:
+ lradc->ts_pressure =
+ mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+ mxs_lradc_complete_touch_event(lradc);
+ return;
+
+ case LRADC_SAMPLE_VALID:
+ val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
+ mxs_lradc_finish_touch_event(lradc, 1);
+ break;
+ }
+}
+
/*
* Raw I/O operations
*/
@@ -255,28 +783,27 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
if (!ret)
return -EBUSY;
- INIT_COMPLETION(lradc->completion);
+ reinit_completion(&lradc->completion);
/*
* No buffered operation in progress, map the channel and trigger it.
* Virtual channel 0 is always used here as the others are always not
* used if doing raw sampling.
*/
- writel(LRADC_CTRL1_LRADC_IRQ_EN_MASK,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
- writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+ if (lradc->soc == IMX28_LRADC)
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+ LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
/* Clean the slot's previous content, then set new one. */
- writel(LRADC_CTRL4_LRADCSELECT_MASK(0),
- lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_CLR);
- writel(chan->channel, lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_SET);
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, chan->channel, LRADC_CTRL4);
- writel(0, lradc->base + LRADC_CH(0));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
/* Enable the IRQ and start sampling the channel. */
- writel(LRADC_CTRL1_LRADC_IRQ_EN(0),
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
- writel(1 << 0, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, 1 << 0, LRADC_CTRL0);
/* Wait for completion on the channel, 1 second max. */
ret = wait_for_completion_killable_timeout(&lradc->completion, HZ);
@@ -290,8 +817,7 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
ret = IIO_VAL_INT;
err:
- writel(LRADC_CTRL1_LRADC_IRQ_EN(0),
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1);
mutex_unlock(&lradc->lock);
@@ -303,220 +829,33 @@ static const struct iio_info mxs_lradc_iio_info = {
.read_raw = mxs_lradc_read_raw,
};
-/*
- * Touchscreen handling
- */
-enum lradc_ts_plate {
- LRADC_SAMPLE_X,
- LRADC_SAMPLE_Y,
- LRADC_SAMPLE_PRESSURE,
-};
-
-static int mxs_lradc_ts_touched(struct mxs_lradc *lradc)
-{
- uint32_t reg;
-
- /* Enable touch detection. */
- writel(LRADC_CTRL0_PLATE_MASK,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
-
- msleep(LRADC_TS_SAMPLE_DELAY_MS);
-
- reg = readl(lradc->base + LRADC_STATUS);
-
- return reg & LRADC_STATUS_TOUCH_DETECT_RAW;
-}
-
-static int32_t mxs_lradc_ts_sample(struct mxs_lradc *lradc,
- enum lradc_ts_plate plate, int change)
-{
- unsigned long delay, jiff;
- uint32_t reg, ctrl0 = 0, chan = 0;
- /* The touchscreen always uses CTRL4 slot #7. */
- const uint8_t slot = 7;
- uint32_t val;
-
- /*
- * There are three correct configurations of the controller sampling
- * the touchscreen, each of these configuration provides different
- * information from the touchscreen.
- *
- * The following table describes the sampling configurations:
- * +-------------+-------+-------+-------+
- * | Wire \ Axis | X | Y | Z |
- * +---------------------+-------+-------+
- * | X+ (CH2) | HI | TS | TS |
- * +-------------+-------+-------+-------+
- * | X- (CH4) | LO | SH | HI |
- * +-------------+-------+-------+-------+
- * | Y+ (CH3) | SH | HI | HI |
- * +-------------+-------+-------+-------+
- * | Y- (CH5) | TS | LO | SH |
- * +-------------+-------+-------+-------+
- *
- * HI ... strong '1' ; LO ... strong '0'
- * SH ... sample here ; TS ... tri-state
- *
- * There are a few other ways of obtaining the Z coordinate
- * (aka. pressure), but the one in the table seems to be the
- * most reliable one.
- */
- switch (plate) {
- case LRADC_SAMPLE_X:
- ctrl0 = LRADC_CTRL0_XPPSW | LRADC_CTRL0_XNNSW;
- chan = 3;
- break;
- case LRADC_SAMPLE_Y:
- ctrl0 = LRADC_CTRL0_YPPSW | LRADC_CTRL0_YNNSW;
- chan = 4;
- break;
- case LRADC_SAMPLE_PRESSURE:
- ctrl0 = LRADC_CTRL0_YPPSW | LRADC_CTRL0_XNNSW;
- chan = 5;
- break;
- }
-
- if (change) {
- writel(LRADC_CTRL0_PLATE_MASK,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(ctrl0, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
-
- writel(LRADC_CTRL4_LRADCSELECT_MASK(slot),
- lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_CLR);
- writel(chan << LRADC_CTRL4_LRADCSELECT_OFFSET(slot),
- lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_SET);
- }
-
- writel(0xffffffff, lradc->base + LRADC_CH(slot) + STMP_OFFSET_REG_CLR);
- writel(1 << slot, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
-
- delay = jiffies + msecs_to_jiffies(LRADC_TS_SAMPLE_DELAY_MS);
- do {
- jiff = jiffies;
- reg = readl_relaxed(lradc->base + LRADC_CTRL1);
- if (reg & LRADC_CTRL1_LRADC_IRQ(slot))
- break;
- } while (time_before(jiff, delay));
-
- writel(LRADC_CTRL1_LRADC_IRQ(slot),
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
-
- if (time_after_eq(jiff, delay))
- return -ETIMEDOUT;
-
- val = readl(lradc->base + LRADC_CH(slot));
- val &= LRADC_CH_VALUE_MASK;
-
- return val;
-}
-
-static int32_t mxs_lradc_ts_sample_filter(struct mxs_lradc *lradc,
- enum lradc_ts_plate plate)
-{
- int32_t val, tot = 0;
- int i;
-
- val = mxs_lradc_ts_sample(lradc, plate, 1);
-
- /* Delay a bit so the touchscreen is stable. */
- mdelay(2);
-
- for (i = 0; i < LRADC_TS_SAMPLE_AMOUNT; i++) {
- val = mxs_lradc_ts_sample(lradc, plate, 0);
- tot += val;
- }
-
- return tot / LRADC_TS_SAMPLE_AMOUNT;
-}
-
-static void mxs_lradc_ts_work(struct work_struct *ts_work)
-{
- struct mxs_lradc *lradc = container_of(ts_work,
- struct mxs_lradc, ts_work);
- int val_x, val_y, val_p;
- bool valid = false;
-
- while (mxs_lradc_ts_touched(lradc)) {
- /* Disable touch detector so we can sample the touchscreen. */
- writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
-
- if (likely(valid)) {
- input_report_abs(lradc->ts_input, ABS_X, val_x);
- input_report_abs(lradc->ts_input, ABS_Y, val_y);
- input_report_abs(lradc->ts_input, ABS_PRESSURE, val_p);
- input_report_key(lradc->ts_input, BTN_TOUCH, 1);
- input_sync(lradc->ts_input);
- }
-
- valid = false;
-
- val_x = mxs_lradc_ts_sample_filter(lradc, LRADC_SAMPLE_X);
- if (val_x < 0)
- continue;
- val_y = mxs_lradc_ts_sample_filter(lradc, LRADC_SAMPLE_Y);
- if (val_y < 0)
- continue;
- val_p = mxs_lradc_ts_sample_filter(lradc, LRADC_SAMPLE_PRESSURE);
- if (val_p < 0)
- continue;
-
- valid = true;
- }
-
- input_report_abs(lradc->ts_input, ABS_PRESSURE, 0);
- input_report_key(lradc->ts_input, BTN_TOUCH, 0);
- input_sync(lradc->ts_input);
-
- /* Do not restart the TS IRQ if the driver is shutting down. */
- if (lradc->stop_touchscreen)
- return;
-
- /* Restart the touchscreen interrupts. */
- writel(LRADC_CTRL1_TOUCH_DETECT_IRQ,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
- writel(LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
-}
-
static int mxs_lradc_ts_open(struct input_dev *dev)
{
struct mxs_lradc *lradc = input_get_drvdata(dev);
- /* The touchscreen is starting. */
- lradc->stop_touchscreen = false;
-
/* Enable the touch-detect circuitry. */
- writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
-
- /* Enable the touch-detect IRQ. */
- writel(LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
+ mxs_lradc_enable_touch_detection(lradc);
return 0;
}
-static void mxs_lradc_ts_close(struct input_dev *dev)
+static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
{
- struct mxs_lradc *lradc = input_get_drvdata(dev);
+ /* stop all interrupts from firing */
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
+ LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
+ LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
+ LRADC_CTRL1);
- /* Indicate the touchscreen is stopping. */
- lradc->stop_touchscreen = true;
- mb();
-
- /* Wait until touchscreen thread finishes any possible remnants. */
- cancel_work_sync(&lradc->ts_work);
+ /* Power-down touchscreen touch-detect circuitry. */
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+}
- /* Disable touchscreen touch-detect IRQ. */
- writel(LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+static void mxs_lradc_ts_close(struct input_dev *dev)
+{
+ struct mxs_lradc *lradc = input_get_drvdata(dev);
- /* Power-down touchscreen touch-detect circuitry. */
- writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+ mxs_lradc_disable_ts(lradc);
}
static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
@@ -529,10 +868,8 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
return 0;
input = input_allocate_device();
- if (!input) {
- dev_err(dev, "Failed to allocate TS device!\n");
+ if (!input)
return -ENOMEM;
- }
input->name = DRIVER_NAME;
input->id.bustype = BUS_HOST;
@@ -562,8 +899,7 @@ static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
if (!lradc->use_touchscreen)
return;
- cancel_work_sync(&lradc->ts_work);
-
+ mxs_lradc_disable_ts(lradc);
input_unregister_device(lradc->ts_input);
}
@@ -576,31 +912,24 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
struct mxs_lradc *lradc = iio_priv(iio);
unsigned long reg = readl(lradc->base + LRADC_CTRL1);
const uint32_t ts_irq_mask =
- LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
- LRADC_CTRL1_TOUCH_DETECT_IRQ;
+ LRADC_CTRL1_TOUCH_DETECT_IRQ |
+ LRADC_CTRL1_LRADC_IRQ(2) |
+ LRADC_CTRL1_LRADC_IRQ(3) |
+ LRADC_CTRL1_LRADC_IRQ(4) |
+ LRADC_CTRL1_LRADC_IRQ(5);
- if (!(reg & LRADC_CTRL1_LRADC_IRQ_MASK))
+ if (!(reg & mxs_lradc_irq_mask(lradc)))
return IRQ_NONE;
- /*
- * Touchscreen IRQ handling code has priority and therefore
- * is placed here. In case touchscreen IRQ arrives, disable
- * it ASAP
- */
- if (reg & LRADC_CTRL1_TOUCH_DETECT_IRQ) {
- writel(ts_irq_mask,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
- if (!lradc->stop_touchscreen)
- schedule_work(&lradc->ts_work);
- }
+ if (lradc->use_touchscreen && (reg & ts_irq_mask))
+ mxs_lradc_handle_touch(lradc);
if (iio_buffer_enabled(iio))
iio_trigger_poll(iio->trig, iio_get_time_ns());
else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
complete(&lradc->completion);
- writel(reg & LRADC_CTRL1_LRADC_IRQ_MASK,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+ mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), LRADC_CTRL1);
return IRQ_HANDLED;
}
@@ -619,19 +948,13 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
for_each_set_bit(i, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
- writel(chan_value, lradc->base + LRADC_CH(j));
+ mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(j));
lradc->buffer[j] &= LRADC_CH_VALUE_MASK;
lradc->buffer[j] /= LRADC_DELAY_TIMER_LOOP;
j++;
}
- if (iio->scan_timestamp) {
- s64 *timestamp = (s64 *)((u8 *)lradc->buffer +
- ALIGN(j, sizeof(s64)));
- *timestamp = pf->timestamp;
- }
-
- iio_push_to_buffers(iio, (u8 *)lradc->buffer);
+ iio_push_to_buffers_with_timestamp(iio, lradc->buffer, pf->timestamp);
iio_trigger_notify_done(iio->trig);
@@ -644,7 +967,7 @@ static int mxs_lradc_configure_trigger(struct iio_trigger *trig, bool state)
struct mxs_lradc *lradc = iio_priv(iio);
const uint32_t st = state ? STMP_OFFSET_REG_SET : STMP_OFFSET_REG_CLR;
- writel(LRADC_DELAY_KICK, lradc->base + LRADC_DELAY(0) + st);
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_KICK, LRADC_DELAY(0) + st);
return 0;
}
@@ -716,38 +1039,30 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
goto err_mem;
}
- ret = iio_sw_buffer_preenable(iio);
- if (ret < 0)
- goto err_buf;
-
- writel(LRADC_CTRL1_LRADC_IRQ_EN_MASK,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
- writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+ if (lradc->soc == IMX28_LRADC)
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+ LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
- writel(chan_value, lradc->base + LRADC_CH(ofs));
+ mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(ofs));
bitmap_set(&enable, ofs, 1);
ofs++;
}
- writel(LRADC_DELAY_TRIGGER_LRADCS_MASK | LRADC_DELAY_KICK,
- lradc->base + LRADC_DELAY(0) + STMP_OFFSET_REG_CLR);
-
- writel(ctrl4_clr, lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_CLR);
- writel(ctrl4_set, lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_SET);
-
- writel(ctrl1_irq, lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
-
- writel(enable << LRADC_DELAY_TRIGGER_LRADCS_OFFSET,
- lradc->base + LRADC_DELAY(0) + STMP_OFFSET_REG_SET);
+ mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
+ LRADC_DELAY_KICK, LRADC_DELAY(0));
+ mxs_lradc_reg_clear(lradc, ctrl4_clr, LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, ctrl4_set, LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, ctrl1_irq, LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, enable << LRADC_DELAY_TRIGGER_LRADCS_OFFSET,
+ LRADC_DELAY(0));
return 0;
-err_buf:
- kfree(lradc->buffer);
err_mem:
mutex_unlock(&lradc->lock);
return ret;
@@ -757,12 +1072,13 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
{
struct mxs_lradc *lradc = iio_priv(iio);
- writel(LRADC_DELAY_TRIGGER_LRADCS_MASK | LRADC_DELAY_KICK,
- lradc->base + LRADC_DELAY(0) + STMP_OFFSET_REG_CLR);
+ mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
+ LRADC_DELAY_KICK, LRADC_DELAY(0));
- writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(LRADC_CTRL1_LRADC_IRQ_EN_MASK,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+ mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+ if (lradc->soc == IMX28_LRADC)
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+ LRADC_CTRL1);
kfree(lradc->buffer);
mutex_unlock(&lradc->lock);
@@ -857,24 +1173,25 @@ static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
return ret;
/* Configure DELAY CHANNEL 0 for generic ADC sampling. */
- writel(adc_cfg, lradc->base + LRADC_DELAY(0));
+ mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0));
/* Disable remaining DELAY CHANNELs */
- writel(0, lradc->base + LRADC_DELAY(1));
- writel(0, lradc->base + LRADC_DELAY(2));
- writel(0, lradc->base + LRADC_DELAY(3));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(1));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
/* Configure the touchscreen type */
- writel(LRADC_CTRL0_TOUCH_SCREEN_TYPE,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+ if (lradc->soc == IMX28_LRADC) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
- if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE) {
- writel(LRADC_CTRL0_TOUCH_SCREEN_TYPE,
- lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
+ mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
}
/* Start internal temperature sensing. */
- writel(0, lradc->base + LRADC_CTRL2);
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2);
return 0;
}
@@ -883,11 +1200,10 @@ static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
{
int i;
- writel(LRADC_CTRL1_LRADC_IRQ_EN_MASK,
- lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+ mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1);
for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
- writel(0, lradc->base + LRADC_DELAY(i));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i));
}
static const struct of_device_id mxs_lradc_dt_ids[] = {
@@ -897,6 +1213,52 @@ static const struct of_device_id mxs_lradc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_lradc_dt_ids);
+static int mxs_lradc_probe_touchscreen(struct mxs_lradc *lradc,
+ struct device_node *lradc_node)
+{
+ int ret;
+ u32 ts_wires = 0, adapt;
+
+ ret = of_property_read_u32(lradc_node, "fsl,lradc-touchscreen-wires",
+ &ts_wires);
+ if (ret)
+ return -ENODEV; /* touchscreen feature disabled */
+
+ switch (ts_wires) {
+ case 4:
+ lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_4WIRE;
+ break;
+ case 5:
+ if (lradc->soc == IMX28_LRADC) {
+ lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_5WIRE;
+ break;
+ }
+ /* fall through an error message for i.MX23 */
+ default:
+ dev_err(lradc->dev,
+ "Unsupported number of touchscreen wires (%d)\n",
+ ts_wires);
+ return -EINVAL;
+ }
+
+ lradc->over_sample_cnt = 4;
+ ret = of_property_read_u32(lradc_node, "fsl,ave-ctrl", &adapt);
+ if (ret == 0)
+ lradc->over_sample_cnt = adapt;
+
+ lradc->over_sample_delay = 2;
+ ret = of_property_read_u32(lradc_node, "fsl,ave-delay", &adapt);
+ if (ret == 0)
+ lradc->over_sample_delay = adapt;
+
+ lradc->settling_delay = 10;
+ ret = of_property_read_u32(lradc_node, "fsl,settling", &adapt);
+ if (ret == 0)
+ lradc->settling_delay = adapt;
+
+ return 0;
+}
+
static int mxs_lradc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -908,8 +1270,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
struct mxs_lradc *lradc;
struct iio_dev *iio;
struct resource *iores;
- uint32_t ts_wires = 0;
- int ret = 0;
+ int ret = 0, touch_ret;
int i;
/* Allocate the IIO device. */
@@ -920,6 +1281,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
}
lradc = iio_priv(iio);
+ lradc->soc = (enum mxs_lradc_id)of_id->data;
/* Grab the memory area */
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -928,20 +1290,18 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (IS_ERR(lradc->base))
return PTR_ERR(lradc->base);
- INIT_WORK(&lradc->ts_work, mxs_lradc_ts_work);
+ lradc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lradc->clk)) {
+ dev_err(dev, "Failed to get the delay unit clock\n");
+ return PTR_ERR(lradc->clk);
+ }
+ ret = clk_prepare_enable(lradc->clk);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable the delay unit clock\n");
+ return ret;
+ }
- /* Check if touchscreen is enabled in DT. */
- ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires",
- &ts_wires);
- if (ret)
- dev_info(dev, "Touchscreen not enabled.\n");
- else if (ts_wires == 4)
- lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_4WIRE;
- else if (ts_wires == 5)
- lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_5WIRE;
- else
- dev_warn(dev, "Unsupported number of touchscreen wires (%d)\n",
- ts_wires);
+ touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
/* Grab all IRQ sources */
for (i = 0; i < of_cfg->irq_count; i++) {
@@ -985,9 +1345,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
goto err_dev;
/* Register the touchscreen input device. */
- ret = mxs_lradc_ts_register(lradc);
- if (ret)
- goto err_dev;
+ if (touch_ret == 0) {
+ ret = mxs_lradc_ts_register(lradc);
+ if (ret)
+ goto err_ts_register;
+ }
/* Register IIO device. */
ret = iio_device_register(iio);
@@ -1000,6 +1362,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
err_ts:
mxs_lradc_ts_unregister(lradc);
+err_ts_register:
+ mxs_lradc_hw_stop(lradc);
err_dev:
mxs_lradc_trigger_remove(iio);
err_trig:
@@ -1012,14 +1376,13 @@ static int mxs_lradc_remove(struct platform_device *pdev)
struct iio_dev *iio = platform_get_drvdata(pdev);
struct mxs_lradc *lradc = iio_priv(iio);
+ iio_device_unregister(iio);
mxs_lradc_ts_unregister(lradc);
-
mxs_lradc_hw_stop(lradc);
-
- iio_device_unregister(iio);
- iio_triggered_buffer_cleanup(iio);
mxs_lradc_trigger_remove(iio);
+ iio_triggered_buffer_cleanup(iio);
+ clk_disable_unprepare(lradc->clk);
return 0;
}
@@ -1038,3 +1401,4 @@ module_platform_driver(mxs_lradc_driver);
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_DESCRIPTION("Freescale i.MX28 LRADC driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index 20f2d555e7c..970d9edc73b 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -146,7 +146,6 @@ static int spear_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct spear_adc_info *info = iio_priv(indio_dev);
- u32 scale_mv;
u32 status;
switch (mask) {
@@ -168,10 +167,9 @@ static int spear_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_mv = (info->vref_external * 1000) >> DATA_BITS;
- *val = scale_mv / 1000;
- *val2 = (scale_mv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = info->vref_external;
+ *val2 = DATA_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
@@ -320,7 +318,7 @@ static int spear_adc_probe(struct platform_device *pdev)
return -ENOMEM;
}
info->adc_base_spear3xx =
- (struct adc_regs_spear3xx *)info->adc_base_spear6xx;
+ (struct adc_regs_spear3xx __iomem *)info->adc_base_spear6xx;
info->clk = clk_get(dev, NULL);
if (IS_ERR(info->clk)) {
@@ -335,7 +333,7 @@ static int spear_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if ((irq < 0) || (irq >= NR_IRQS)) {
+ if (irq <= 0) {
dev_err(dev, "failed getting interrupt resource\n");
ret = -EINVAL;
goto errout3;
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index ce7d91cb331..0feea5541d0 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -138,6 +138,5 @@ static struct i2c_driver adt7316_driver = {
module_i2c_driver(adt7316_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and"
- "ADT7516/7/8 digital temperature sensor, ADC and DAC");
+MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and ADT7516/7/8 digital temperature sensor, ADC and DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 0db8ef5835a..7f4f0a8245b 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -146,6 +146,5 @@ static struct spi_driver adt7316_driver = {
module_spi_driver(adt7316_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and"
- "ADT7516/7/9 digital temperature sensor, ADC and DAC");
+MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and ADT7516/7/9 digital temperature sensor, ADC and DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 1e1356825d6..80266e801d5 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -412,13 +412,13 @@ static ssize_t adt7316_store_ad_channel(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config2;
- unsigned long data = 0;
+ u8 data;
int ret;
if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
return -EPERM;
- ret = strict_strtoul(buf, 10, &data);
+ ret = kstrtou8(buf, 10, &data);
if (ret)
return -EINVAL;
@@ -823,10 +823,10 @@ static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
- unsigned long data = 0;
+ u8 data;
int ret;
- ret = strict_strtoul(buf, 16, &data);
+ ret = kstrtou8(buf, 16, &data);
if (ret || data > ADT7316_DA_2VREF_CH_MASK)
return -EINVAL;
@@ -878,13 +878,13 @@ static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
- unsigned long data;
+ u8 data;
int ret;
if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
return -EPERM;
- ret = strict_strtoul(buf, 10, &data);
+ ret = kstrtou8(buf, 10, &data);
if (ret || data > ADT7316_DA_EN_MODE_MASK)
return -EINVAL;
@@ -933,7 +933,7 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 ldac_config;
- unsigned long data;
+ u8 data;
int ret;
if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) {
@@ -941,7 +941,7 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
ADT7316_DA_EN_MODE_LDAC)
return -EPERM;
- ret = strict_strtoul(buf, 16, &data);
+ ret = kstrtou8(buf, 16, &data);
if (ret || data > ADT7316_LDAC_EN_DA_MASK)
return -EINVAL;
@@ -1079,11 +1079,11 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 ldac_config;
- unsigned long data;
+ u8 data;
int ret;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
- ret = strict_strtoul(buf, 16, &data);
+ ret = kstrtou8(buf, 16, &data);
if (ret || data > 3)
return -EINVAL;
@@ -1093,7 +1093,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
else if (data & 0x2)
ldac_config |= ADT7516_DAC_CD_IN_VREF;
} else {
- ret = strict_strtoul(buf, 16, &data);
+ ret = kstrtou8(buf, 16, &data);
if (ret)
return -EINVAL;
@@ -1281,11 +1281,11 @@ static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip,
static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip,
int offset_addr, const char *buf, size_t len)
{
- long data;
+ int data;
u8 val;
int ret;
- ret = strict_strtol(buf, 10, &data);
+ ret = kstrtoint(buf, 10, &data);
if (ret || data > 127 || data < -128)
return -EINVAL;
@@ -1442,7 +1442,7 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
int channel, const char *buf, size_t len)
{
u8 msb, lsb, offset;
- unsigned long data;
+ u16 data;
int ret;
if (channel >= ADT7316_DA_MSB_DATA_REGS ||
@@ -1454,7 +1454,7 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
offset = chip->dac_bits - 8;
- ret = strict_strtoul(buf, 10, &data);
+ ret = kstrtou16(buf, 10, &data);
if (ret || data >= (1 << chip->dac_bits))
return -EINVAL;
@@ -1830,11 +1830,11 @@ static ssize_t adt7316_set_int_mask(struct device *dev,
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
+ u16 data;
int ret;
u8 mask;
- ret = strict_strtoul(buf, 16, &data);
+ ret = kstrtou16(buf, 16, &data);
if (ret || data >= ADT7316_VDD_INT_MASK + 1)
return -EINVAL;
@@ -1901,7 +1901,7 @@ static inline ssize_t adt7316_set_ad_bound(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- long data;
+ int data;
u8 val;
int ret;
@@ -1909,7 +1909,7 @@ static inline ssize_t adt7316_set_ad_bound(struct device *dev,
this_attr->address > ADT7316_EX_TEMP_LOW)
return -EPERM;
- ret = strict_strtol(buf, 10, &data);
+ ret = kstrtoint(buf, 10, &data);
if (ret)
return -EINVAL;
@@ -2106,11 +2106,9 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
unsigned short *adt7316_platform_data = dev->platform_data;
int ret = 0;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
dev_set_drvdata(dev, indio_dev);
@@ -2146,58 +2144,44 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
if (adt7316_platform_data[0])
chip->bus.irq_flags = adt7316_platform_data[0];
- ret = request_threaded_irq(chip->bus.irq,
- NULL,
- &adt7316_event_handler,
- chip->bus.irq_flags | IRQF_ONESHOT,
- indio_dev->name,
- indio_dev);
+ ret = devm_request_threaded_irq(dev, chip->bus.irq,
+ NULL,
+ &adt7316_event_handler,
+ chip->bus.irq_flags |
+ IRQF_ONESHOT,
+ indio_dev->name,
+ indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH)
chip->config1 |= ADT7316_INT_POLARITY;
}
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1);
- if (ret) {
- ret = -EIO;
- goto error_unreg_irq;
- }
+ if (ret)
+ return -EIO;
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, chip->config3);
- if (ret) {
- ret = -EIO;
- goto error_unreg_irq;
- }
+ if (ret)
+ return -EIO;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_unreg_irq;
+ return ret;
dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n",
indio_dev->name);
return 0;
-
-error_unreg_irq:
- free_irq(chip->bus.irq, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
EXPORT_SYMBOL(adt7316_probe);
int adt7316_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (chip->bus.irq)
- free_irq(chip->bus.irq, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index f4a0341cc70..7e7f9890a64 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -123,14 +123,14 @@ static int ad7150_read_raw(struct iio_dev *indio_dev,
}
}
-static int ad7150_read_event_config(struct iio_dev *indio_dev, u64 event_code)
+static int ad7150_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
{
int ret;
u8 threshtype;
bool adaptive;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING);
ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
if (ret < 0)
@@ -139,42 +139,47 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev, u64 event_code)
threshtype = (ret >> 5) & 0x03;
adaptive = !!(ret & 0x80);
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
- if (rising)
+ if (dir == IIO_EV_DIR_RISING)
return adaptive && (threshtype == 0x1);
else
return adaptive && (threshtype == 0x0);
case IIO_EV_TYPE_THRESH_ADAPTIVE:
- if (rising)
+ if (dir == IIO_EV_DIR_RISING)
return adaptive && (threshtype == 0x3);
else
return adaptive && (threshtype == 0x2);
case IIO_EV_TYPE_THRESH:
- if (rising)
+ if (dir == IIO_EV_DIR_RISING)
return !adaptive && (threshtype == 0x1);
else
return !adaptive && (threshtype == 0x0);
+ default:
+ break;
}
return -EINVAL;
}
/* lock should be held */
-static int ad7150_write_event_params(struct iio_dev *indio_dev, u64 event_code)
+static int ad7150_write_event_params(struct iio_dev *indio_dev,
+ unsigned int chan, enum iio_event_type type,
+ enum iio_event_direction dir)
{
int ret;
u16 value;
u8 sens, timeout;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING);
+ int rising = (dir == IIO_EV_DIR_RISING);
+ u64 event_code;
+
+ event_code = IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE, chan, type, dir);
if (event_code != chip->current_event)
return 0;
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ switch (type) {
/* Note completely different from the adaptive versions */
case IIO_EV_TYPE_THRESH:
value = chip->threshold[rising][chan];
@@ -211,18 +216,20 @@ static int ad7150_write_event_params(struct iio_dev *indio_dev, u64 event_code)
}
static int ad7150_write_event_config(struct iio_dev *indio_dev,
- u64 event_code, int state)
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
{
u8 thresh_type, cfg, adaptive;
int ret;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING);
+ int rising = (dir == IIO_EV_DIR_RISING);
+ u64 event_code;
/* Something must always be turned on */
if (state == 0)
return -EINVAL;
+ event_code = IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, type, dir);
if (event_code == chip->current_event)
return 0;
mutex_lock(&chip->state_lock);
@@ -232,7 +239,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
cfg = ret & ~((0x03 << 5) | (0x1 << 7));
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
adaptive = 1;
if (rising)
@@ -268,7 +275,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
chip->current_event = event_code;
/* update control attributes */
- ret = ad7150_write_event_params(indio_dev, event_code);
+ ret = ad7150_write_event_params(indio_dev, chan->channel, type, dir);
error_ret:
mutex_unlock(&chip->state_lock);
@@ -276,53 +283,52 @@ error_ret:
}
static int ad7150_read_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
- int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING);
+ int rising = (dir == IIO_EV_DIR_RISING);
/* Complex register sharing going on here */
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
- *val = chip->mag_sensitivity[rising][chan];
- return 0;
-
+ *val = chip->mag_sensitivity[rising][chan->channel];
+ return IIO_VAL_INT;
case IIO_EV_TYPE_THRESH_ADAPTIVE:
- *val = chip->thresh_sensitivity[rising][chan];
- return 0;
-
+ *val = chip->thresh_sensitivity[rising][chan->channel];
+ return IIO_VAL_INT;
case IIO_EV_TYPE_THRESH:
- *val = chip->threshold[rising][chan];
- return 0;
-
+ *val = chip->threshold[rising][chan->channel];
+ return IIO_VAL_INT;
default:
return -EINVAL;
- };
+ }
}
static int ad7150_write_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
int ret;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING);
+ int rising = (dir == IIO_EV_DIR_RISING);
mutex_lock(&chip->state_lock);
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
- chip->mag_sensitivity[rising][chan] = val;
+ chip->mag_sensitivity[rising][chan->channel] = val;
break;
case IIO_EV_TYPE_THRESH_ADAPTIVE:
- chip->thresh_sensitivity[rising][chan] = val;
+ chip->thresh_sensitivity[rising][chan->channel] = val;
break;
case IIO_EV_TYPE_THRESH:
- chip->threshold[rising][chan] = val;
+ chip->threshold[rising][chan->channel] = val;
break;
default:
ret = -EINVAL;
@@ -330,7 +336,7 @@ static int ad7150_write_event_value(struct iio_dev *indio_dev,
}
/* write back if active */
- ret = ad7150_write_event_params(indio_dev, event_code);
+ ret = ad7150_write_event_params(indio_dev, chan->channel, type, dir);
error_ret:
mutex_unlock(&chip->state_lock);
@@ -374,17 +380,22 @@ static ssize_t ad7150_store_timeout(struct device *dev,
struct ad7150_chip_info *chip = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) ==
- IIO_EV_DIR_RISING);
+ enum iio_event_direction dir;
+ enum iio_event_type type;
+ int rising;
u8 data;
int ret;
+ type = IIO_EVENT_CODE_EXTRACT_TYPE(this_attr->address);
+ dir = IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address);
+ rising = (dir == IIO_EV_DIR_RISING);
+
ret = kstrtou8(buf, 10, &data);
if (ret < 0)
return ret;
mutex_lock(&chip->state_lock);
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(this_attr->address)) {
+ switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
chip->mag_timeout[rising][chan] = data;
break;
@@ -396,7 +407,7 @@ static ssize_t ad7150_store_timeout(struct device *dev,
goto error_ret;
}
- ret = ad7150_write_event_params(indio_dev, this_attr->address);
+ ret = ad7150_write_event_params(indio_dev, chan, type, dir);
error_ret:
mutex_unlock(&chip->state_lock);
@@ -424,6 +435,40 @@ static AD7150_TIMEOUT(0, thresh_adaptive, falling, THRESH_ADAPTIVE, FALLING);
static AD7150_TIMEOUT(1, thresh_adaptive, rising, THRESH_ADAPTIVE, RISING);
static AD7150_TIMEOUT(1, thresh_adaptive, falling, THRESH_ADAPTIVE, FALLING);
+static const struct iio_event_spec ad7150_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH_ADAPTIVE,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH_ADAPTIVE,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_chan_spec ad7150_channels[] = {
{
.type = IIO_CAPACITANCE,
@@ -431,26 +476,16 @@ static const struct iio_chan_spec ad7150_channels[] = {
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_AVERAGE_RAW),
- .event_mask =
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_FALLING) |
- IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_FALLING)
+ .event_spec = ad7150_events,
+ .num_event_specs = ARRAY_SIZE(ad7150_events),
}, {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_AVERAGE_RAW),
- .event_mask =
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_FALLING) |
- IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_FALLING)
+ .event_spec = ad7150_events,
+ .num_event_specs = ARRAY_SIZE(ad7150_events),
},
};
@@ -541,10 +576,10 @@ static const struct iio_info ad7150_info = {
.event_attrs = &ad7150_event_attribute_group,
.driver_module = THIS_MODULE,
.read_raw = &ad7150_read_raw,
- .read_event_config = &ad7150_read_event_config,
- .write_event_config = &ad7150_write_event_config,
- .read_event_value = &ad7150_read_event_value,
- .write_event_value = &ad7150_write_event_value,
+ .read_event_config_new = &ad7150_read_event_config,
+ .write_event_config_new = &ad7150_write_event_config,
+ .read_event_value_new = &ad7150_read_event_value,
+ .write_event_value_new = &ad7150_write_event_value,
};
/*
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 75a533bce02..862d68d9963 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -656,20 +656,21 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
- *val2 = 488;
*val = 0;
+ *val2 = 488;
+ ret = IIO_VAL_INT_PLUS_NANO;
break;
case IIO_VOLTAGE:
/* 1170mV / 2^23 */
- *val2 = 139475;
- *val = 0;
+ *val = 1170;
+ *val2 = 23;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
break;
default:
- ret = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ break;
}
- ret = IIO_VAL_INT_PLUS_NANO;
break;
default:
ret = -EINVAL;
diff --git a/drivers/staging/iio/frequency/ad5930.c b/drivers/staging/iio/frequency/ad5930.c
index 69e90e9e60e..a4aeee6ffdf 100644
--- a/drivers/staging/iio/frequency/ad5930.c
+++ b/drivers/staging/iio/frequency/ad5930.c
@@ -94,11 +94,9 @@ static int ad5930_probe(struct spi_device *spi)
struct iio_dev *idev;
int ret = 0;
- idev = iio_device_alloc(sizeof(*st));
- if (idev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ idev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!idev)
+ return -ENOMEM;
spi_set_drvdata(spi, idev);
st = iio_priv(idev);
@@ -110,24 +108,18 @@ static int ad5930_probe(struct spi_device *spi)
ret = iio_device_register(idev);
if (ret)
- goto error_free_dev;
+ return ret;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 16;
spi_setup(spi);
return 0;
-
-error_free_dev:
- iio_device_free(idev);
-error_ret:
- return ret;
}
static int ad5930_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 4e18380c514..c7d0307c8e7 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -81,9 +81,9 @@ static ssize_t ad9832_write(struct device *dev,
struct ad9832_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ unsigned long val;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
goto error_ret;
@@ -214,14 +214,14 @@ static int ad9832_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = regulator_get(&spi->dev, "vcc");
+ reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(reg)) {
ret = regulator_enable(reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_reg;
@@ -279,47 +279,42 @@ static int ad9832_probe(struct spi_device *spi)
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_free_device;
+ goto error_disable_reg;
}
ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
return 0;
-error_free_device:
- iio_device_free(indio_dev);
error_disable_reg:
if (!IS_ERR(reg))
regulator_disable(reg);
-error_put_reg:
- if (!IS_ERR(reg))
- regulator_put(reg);
return ret;
}
@@ -330,11 +325,8 @@ static int ad9832_remove(struct spi_device *spi)
struct ad9832_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 5cba3c01f41..86cda617609 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -70,9 +70,9 @@ static ssize_t ad9834_write(struct device *dev,
struct ad9834_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ unsigned long val;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
goto error_ret;
@@ -327,14 +327,14 @@ static int ad9834_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = regulator_get(&spi->dev, "vcc");
+ reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(reg)) {
ret = regulator_enable(reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_reg;
@@ -388,39 +388,35 @@ static int ad9834_probe(struct spi_device *spi)
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_free_device;
+ goto error_disable_reg;
}
ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
return 0;
-error_free_device:
- iio_device_free(indio_dev);
error_disable_reg:
if (!IS_ERR(reg))
regulator_disable(reg);
-error_put_reg:
- if (!IS_ERR(reg))
- regulator_put(reg);
+
return ret;
}
@@ -430,11 +426,8 @@ static int ad9834_remove(struct spi_device *spi)
struct ad9834_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9850.c b/drivers/staging/iio/frequency/ad9850.c
index 01a8a93031f..af877ff680e 100644
--- a/drivers/staging/iio/frequency/ad9850.c
+++ b/drivers/staging/iio/frequency/ad9850.c
@@ -80,11 +80,9 @@ static int ad9850_probe(struct spi_device *spi)
struct iio_dev *idev;
int ret = 0;
- idev = iio_device_alloc(sizeof(*st));
- if (idev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ idev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!idev)
+ return -ENOMEM;
spi_set_drvdata(spi, idev);
st = iio_priv(idev);
mutex_init(&st->lock);
@@ -96,24 +94,18 @@ static int ad9850_probe(struct spi_device *spi)
ret = iio_device_register(idev);
if (ret)
- goto error_free_dev;
+ return ret;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 16;
spi_setup(spi);
return 0;
-
-error_free_dev:
- iio_device_free(idev);
-error_ret:
- return ret;
}
static int ad9850_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9852.c b/drivers/staging/iio/frequency/ad9852.c
index 1344031232b..11e4367375d 100644
--- a/drivers/staging/iio/frequency/ad9852.c
+++ b/drivers/staging/iio/frequency/ad9852.c
@@ -67,7 +67,6 @@ static ssize_t ad9852_set_parameter(struct device *dev,
const char *buf,
size_t len)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad9852_config *config = (struct ad9852_config *)buf;
@@ -78,99 +77,77 @@ static ssize_t ad9852_set_parameter(struct device *dev,
xfer.tx_buf = &config->phajst0[0];
mutex_lock(&st->lock);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 3;
xfer.tx_buf = &config->phajst1[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 6;
xfer.tx_buf = &config->fretun1[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 6;
xfer.tx_buf = &config->fretun2[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 6;
xfer.tx_buf = &config->dltafre[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->updtclk[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 4;
xfer.tx_buf = &config->ramprat[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->control[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 3;
xfer.tx_buf = &config->outpskm[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 2;
xfer.tx_buf = &config->outpskr[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 3;
xfer.tx_buf = &config->daccntl[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
error_ret:
@@ -229,11 +206,9 @@ static int ad9852_probe(struct spi_device *spi)
struct iio_dev *idev;
int ret = 0;
- idev = iio_device_alloc(sizeof(*st));
- if (idev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ idev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!idev)
+ return -ENOMEM;
st = iio_priv(idev);
spi_set_drvdata(spi, idev);
mutex_init(&st->lock);
@@ -245,7 +220,7 @@ static int ad9852_probe(struct spi_device *spi)
ret = iio_device_register(idev);
if (ret)
- goto error_free_dev;
+ return ret;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
@@ -253,18 +228,11 @@ static int ad9852_probe(struct spi_device *spi)
ad9852_init(st);
return 0;
-
-error_free_dev:
- iio_device_free(idev);
-
-error_ret:
- return ret;
}
static int ad9852_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9910.c b/drivers/staging/iio/frequency/ad9910.c
index e48f874c1fc..755e0482681 100644
--- a/drivers/staging/iio/frequency/ad9910.c
+++ b/drivers/staging/iio/frequency/ad9910.c
@@ -119,7 +119,6 @@ static ssize_t ad9910_set_parameter(struct device *dev,
const char *buf,
size_t len)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad9910_config *config = (struct ad9910_config *)buf;
@@ -130,152 +129,118 @@ static ssize_t ad9910_set_parameter(struct device *dev,
xfer.tx_buf = &config->auxdac[0];
mutex_lock(&st->lock);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->ioupd[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->ftw[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 3;
xfer.tx_buf = &config->pow[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->asf[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->multc[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->dig_rampl[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->dig_ramps[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->dig_rampr[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep0[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep1[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep2[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep3[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep4[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep5[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep6[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 9;
xfer.tx_buf = &config->sin_tonep7[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
error_ret:
@@ -288,7 +253,6 @@ static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9910_set_parameter, 0);
static void ad9910_init(struct ad9910_state *st)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
u8 cfr[5];
@@ -304,9 +268,7 @@ static void ad9910_init(struct ad9910_state *st)
xfer.len = 5;
xfer.tx_buf = &cfr;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
@@ -319,9 +281,7 @@ static void ad9910_init(struct ad9910_state *st)
xfer.len = 5;
xfer.tx_buf = &cfr;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
@@ -334,9 +294,7 @@ static void ad9910_init(struct ad9910_state *st)
xfer.len = 5;
xfer.tx_buf = &cfr;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
@@ -367,11 +325,9 @@ static int ad9910_probe(struct spi_device *spi)
struct iio_dev *idev;
int ret = 0;
- idev = iio_device_alloc(sizeof(*st));
- if (idev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ idev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!idev)
+ return -ENOMEM;
spi_set_drvdata(spi, idev);
st = iio_priv(idev);
mutex_init(&st->lock);
@@ -383,24 +339,18 @@ static int ad9910_probe(struct spi_device *spi)
ret = iio_device_register(idev);
if (ret)
- goto error_free_dev;
+ return ret;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
ad9910_init(st);
return 0;
-
-error_free_dev:
- iio_device_free(idev);
-error_ret:
- return ret;
}
static int ad9910_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9951.c b/drivers/staging/iio/frequency/ad9951.c
index 8234e3c915c..5e8990a0210 100644
--- a/drivers/staging/iio/frequency/ad9951.c
+++ b/drivers/staging/iio/frequency/ad9951.c
@@ -60,7 +60,6 @@ static ssize_t ad9951_set_parameter(struct device *dev,
const char *buf,
size_t len)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad9951_config *config = (struct ad9951_config *)buf;
@@ -71,36 +70,28 @@ static ssize_t ad9951_set_parameter(struct device *dev,
xfer.tx_buf = &config->asf[0];
mutex_lock(&st->lock);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 2;
xfer.tx_buf = &config->arr[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 5;
xfer.tx_buf = &config->ftw0[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
xfer.len = 3;
xfer.tx_buf = &config->ftw1[0];
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
error_ret:
@@ -113,7 +104,6 @@ static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9951_set_parameter, 0);
static void ad9951_init(struct ad9951_state *st)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
u8 cfr[5];
@@ -129,9 +119,7 @@ static void ad9951_init(struct ad9951_state *st)
xfer.len = 5;
xfer.tx_buf = &cfr;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
@@ -143,9 +131,7 @@ static void ad9951_init(struct ad9951_state *st)
xfer.len = 4;
xfer.tx_buf = &cfr;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
@@ -176,11 +162,9 @@ static int ad9951_probe(struct spi_device *spi)
struct iio_dev *idev;
int ret = 0;
- idev = iio_device_alloc(sizeof(*st));
- if (idev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ idev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!idev)
+ return -ENOMEM;
spi_set_drvdata(spi, idev);
st = iio_priv(idev);
mutex_init(&st->lock);
@@ -193,25 +177,18 @@ static int ad9951_probe(struct spi_device *spi)
ret = iio_device_register(idev);
if (ret)
- goto error_free_dev;
+ return ret;
spi->max_speed_hz = 2000000;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
ad9951_init(st);
return 0;
-
-error_free_dev:
- iio_device_free(idev);
-
-error_ret:
- return ret;
}
static int ad9951_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index 0e8e02a3cf5..1fac9894b18 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -57,6 +57,20 @@ static const struct iio_dummy_accel_calibscale dummy_scales[] = {
{ 733, 13, 0x9 }, /* 733.000013 */
};
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+
+/*
+ * simple event - triggered when value rises above
+ * a threshold
+ */
+static const struct iio_event_spec iio_dummy_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
+
+#endif
+
/*
* iio_dummy_channels - Description of available channels
*
@@ -90,6 +104,11 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
* when converting to standard units (microvolts)
*/
BIT(IIO_CHAN_INFO_SCALE),
+ /*
+ * sampling_frequency
+ * The frequency in Hz at which the channels are sampled
+ */
+ .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
/* The ordering of elements in the buffer via an enum */
.scan_index = voltage0,
.scan_type = { /* Description of storage in buffer */
@@ -99,12 +118,8 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
.shift = 0, /* zero shift */
},
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- /*
- * simple event - triggered when value rises above
- * a threshold
- */
- .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
+ .event_spec = &iio_dummy_event,
+ .num_event_specs = 1,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
},
/* Differential ADC channel in_voltage1-voltage2_raw etc*/
@@ -130,6 +145,10 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
* input channels of type IIO_VOLTAGE.
*/
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ /*
+ * sampling_frequency
+ * The frequency in Hz at which the channels are sampled
+ */
.scan_index = diffvoltage1m2,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -147,6 +166,7 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
.channel2 = 4,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_index = diffvoltage3m4,
.scan_type = {
.sign = 's',
@@ -173,6 +193,7 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
*/
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_index = accelx,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -272,6 +293,11 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
*val2 = st->accel_calibscale->val2;
ret = IIO_VAL_INT_PLUS_MICRO;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = 3;
+ *val2 = 33;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
default:
break;
}
@@ -344,10 +370,10 @@ static const struct iio_info iio_dummy_info = {
.read_raw = &iio_dummy_read_raw,
.write_raw = &iio_dummy_write_raw,
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .read_event_config = &iio_simple_dummy_read_event_config,
- .write_event_config = &iio_simple_dummy_write_event_config,
- .read_event_value = &iio_simple_dummy_read_event_value,
- .write_event_value = &iio_simple_dummy_write_event_value,
+ .read_event_config_new = &iio_simple_dummy_read_event_config,
+ .write_event_config_new = &iio_simple_dummy_write_event_config,
+ .read_event_value_new = &iio_simple_dummy_read_event_value,
+ .write_event_value_new = &iio_simple_dummy_write_event_value,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
};
@@ -454,7 +480,8 @@ static int iio_dummy_probe(int index)
* buffer, but avoid the output channel being registered by reducing the
* number of channels by 1.
*/
- ret = iio_simple_dummy_configure_buffer(indio_dev, iio_dummy_channels, 5);
+ ret = iio_simple_dummy_configure_buffer(indio_dev,
+ iio_dummy_channels, 5);
if (ret < 0)
goto error_unregister_events;
diff --git a/drivers/staging/iio/iio_simple_dummy.h b/drivers/staging/iio/iio_simple_dummy.h
index c9e8702caca..b126196cdf3 100644
--- a/drivers/staging/iio/iio_simple_dummy.h
+++ b/drivers/staging/iio/iio_simple_dummy.h
@@ -45,19 +45,29 @@ struct iio_dummy_state {
struct iio_dev;
int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
- u64 event_code);
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir);
int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
- u64 event_code,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
int state);
int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int *val);
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val,
+ int *val2);
int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int val);
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val,
+ int val2);
int iio_simple_dummy_events_register(struct iio_dev *indio_dev);
int iio_simple_dummy_events_unregister(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index 72f400c3cbc..46c134b2a5d 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -82,11 +82,8 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
len += 2;
}
}
- /* Store the timestamp at an 8 byte aligned offset */
- if (indio_dev->scan_timestamp)
- *(s64 *)((u8 *)data + ALIGN(len, sizeof(s64)))
- = iio_get_time_ns();
- iio_push_to_buffers(indio_dev, (u8 *)data);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns());
kfree(data);
@@ -102,14 +99,6 @@ done:
static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
/*
- * iio_sw_buffer_preenable:
- * Generic function for equal sized ring elements + 64 bit timestamp
- * Assumes that any combination of channels can be enabled.
- * Typically replaced to implement restrictions on what combinations
- * can be captured (hardware scan modes).
- */
- .preenable = &iio_sw_buffer_preenable,
- /*
* iio_triggered_buffer_postenable:
* Generic function that simply attaches the pollfunc to the trigger.
* Replace this to mess with hardware state before we attach the
@@ -138,7 +127,7 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev,
goto error_ret;
}
- indio_dev->buffer = buffer;
+ iio_device_attach_buffer(indio_dev, buffer);
/* Enable timestamps by default */
buffer->scan_timestamp = true;
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index 317b77465db..812ebd05a7f 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -23,13 +23,17 @@
/**
* iio_simple_dummy_read_event_config() - is event enabled?
* @indio_dev: the device instance data
- * @event_code: event code of the event being queried
+ * @chan: channel for the event whose state is being queried
+ * @type: type of the event whose state is being queried
+ * @dir: direction of the vent whose state is being queried
*
* This function would normally query the relevant registers or a cache to
* discover if the event generation is enabled on the device.
*/
int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
@@ -39,7 +43,9 @@ int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
/**
* iio_simple_dummy_write_event_config() - set whether event is enabled
* @indio_dev: the device instance data
- * @event_code: event code of event being enabled/disabled
+ * @chan: channel for the event whose state is being set
+ * @type: type of the event whose state is being set
+ * @dir: direction of the vent whose state is being set
* @state: whether to enable or disable the device.
*
* This function would normally set the relevant registers on the devices
@@ -47,7 +53,9 @@ int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
* value.
*/
int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
- u64 event_code,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
int state)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
@@ -56,12 +64,11 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
* Deliberately over the top code splitting to illustrate
* how this is done when multiple events exist.
*/
- switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ switch (chan->type) {
case IIO_VOLTAGE:
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ switch (type) {
case IIO_EV_TYPE_THRESH:
- if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)
+ if (dir == IIO_EV_DIR_RISING)
st->event_en = state;
else
return -EINVAL;
@@ -79,7 +86,10 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
/**
* iio_simple_dummy_read_event_value() - get value associated with event
* @indio_dev: device instance specific data
- * @event_code: event code for the event whose value is being queried
+ * @chan: channel for the event whose value is being read
+ * @type: type of the event whose value is being read
+ * @dir: direction of the vent whose value is being read
+ * @info: info type of the event whose value is being read
* @val: value for the event code.
*
* Many devices provide a large set of events of which only a subset may
@@ -89,25 +99,34 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
* the enabled event is changed.
*/
int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
*val = st->event_val;
- return 0;
+ return IIO_VAL_INT;
}
/**
* iio_simple_dummy_write_event_value() - set value associate with event
* @indio_dev: device instance specific data
- * @event_code: event code for the event whose value is being set
+ * @chan: channel for the event whose value is being set
+ * @type: type of the event whose value is being set
+ * @dir: direction of the vent whose value is being set
+ * @info: info type of the event whose value is being set
* @val: the value to be set.
*/
int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 6330af656a0..0a4298b744e 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -323,10 +323,10 @@ static ssize_t ad5933_store_frequency(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long val;
+ unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -400,12 +400,12 @@ static ssize_t ad5933_store(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long val;
+ u16 val;
int i, ret = 0;
unsigned short dat;
if (this_attr->address != AD5933_IN_PGA_GAIN) {
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
return ret;
}
@@ -434,7 +434,7 @@ static ssize_t ad5933_store(struct device *dev,
ret = ad5933_cmd(st, 0);
break;
case AD5933_OUT_SETTLING_CYCLES:
- val = clamp(val, 0L, 0x7FFL);
+ val = clamp(val, (u16)0, (u16)0x7FF);
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
@@ -448,7 +448,7 @@ static ssize_t ad5933_store(struct device *dev,
AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
break;
case AD5933_FREQ_POINTS:
- val = clamp(val, 0L, 511L);
+ val = clamp(val, (u16)0, (u16)511);
st->freq_points = val;
dat = cpu_to_be16(val);
@@ -574,10 +574,6 @@ static int ad5933_ring_preenable(struct iio_dev *indio_dev)
if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- ret = iio_sw_buffer_preenable(indio_dev);
- if (ret < 0)
- return ret;
-
ret = ad5933_reset(st);
if (ret < 0)
return ret;
@@ -630,10 +626,14 @@ static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
- indio_dev->buffer = iio_kfifo_allocate(indio_dev);
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer;
+
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (buffer)
return -ENOMEM;
+ iio_device_attach_buffer(indio_dev, buffer);
+
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &ad5933_ring_setup_ops;
@@ -676,7 +676,7 @@ static void ad5933_work(struct work_struct *work)
} else {
buf[0] = be16_to_cpu(buf[0]);
}
- iio_push_to_buffers(indio_dev, (u8 *)buf);
+ iio_push_to_buffers(indio_dev, buf);
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
@@ -703,7 +703,9 @@ static int ad5933_probe(struct i2c_client *client,
int ret, voltage_uv = 0;
struct ad5933_platform_data *pdata = client->dev.platform_data;
struct ad5933_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -716,11 +718,11 @@ static int ad5933_probe(struct i2c_client *client,
else
st->pdata = pdata;
- st->reg = regulator_get(&client->dev, "vcc");
+ st->reg = devm_regulator_get(&client->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
voltage_uv = regulator_get_voltage(st->reg);
}
@@ -778,11 +780,6 @@ error_unreg_ring:
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return ret;
}
@@ -795,11 +792,8 @@ static int ad5933_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_buffer_unregister(indio_dev);
iio_kfifo_free(indio_dev->buffer);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index e4998e4d443..488e690388c 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -240,7 +240,7 @@ static ssize_t store_range(struct device *dev,
unsigned long lval;
unsigned int new_range;
- if (strict_strtoul(buf, 10, &lval))
+ if (kstrtoul(buf, 10, &lval))
return -EINVAL;
if (!(lval == 1000UL || lval == 4000UL ||
@@ -279,18 +279,18 @@ static ssize_t store_resolution(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
int status;
- unsigned long lval;
+ unsigned int val;
unsigned int new_adc_bit;
- if (strict_strtoul(buf, 10, &lval))
+ if (kstrtouint(buf, 10, &val))
return -EINVAL;
- if (!(lval == 4 || lval == 8 || lval == 12 || lval == 16)) {
+ if (!(val == 4 || val == 8 || val == 12 || val == 16)) {
dev_err(dev, "The resolution is not supported\n");
return -EINVAL;
}
mutex_lock(&chip->lock);
- status = isl29018_set_resolution(chip, lval, &new_adc_bit);
+ status = isl29018_set_resolution(chip, val, &new_adc_bit);
if (status < 0) {
mutex_unlock(&chip->lock);
dev_err(dev, "Error in setting resolution\n");
@@ -319,11 +319,11 @@ static ssize_t store_prox_infrared_suppression(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- unsigned long lval;
+ int val;
- if (strict_strtoul(buf, 10, &lval))
+ if (kstrtoint(buf, 10, &val))
return -EINVAL;
- if (!(lval == 0UL || lval == 1UL)) {
+ if (!(val == 0 || val == 1)) {
dev_err(dev, "The mode is not supported\n");
return -EINVAL;
}
@@ -331,7 +331,7 @@ static ssize_t store_prox_infrared_suppression(struct device *dev,
/* get the "proximity scheme" i.e. if the chip does on chip
infrared suppression (1 means perform on chip suppression) */
mutex_lock(&chip->lock);
- chip->prox_scheme = (int)lval;
+ chip->prox_scheme = val;
mutex_unlock(&chip->lock);
return count;
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index b377dd3b76a..f8c659568c3 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -493,9 +493,9 @@ static ssize_t taos_power_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- unsigned long value;
+ int value;
- if (strict_strtoul(buf, 0, &value))
+ if (kstrtoint(buf, 0, &value))
return -EINVAL;
if (value == 0)
@@ -536,9 +536,9 @@ static ssize_t taos_gain_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
- unsigned long value;
+ int value;
- if (strict_strtoul(buf, 0, &value))
+ if (kstrtoint(buf, 0, &value))
return -EINVAL;
switch (value) {
@@ -582,9 +582,9 @@ static ssize_t taos_als_time_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
- unsigned long value;
+ int value;
- if (strict_strtoul(buf, 0, &value))
+ if (kstrtoint(buf, 0, &value))
return -EINVAL;
if ((value < 50) || (value > 650))
@@ -619,9 +619,9 @@ static ssize_t taos_als_trim_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
- unsigned long value;
+ int value;
- if (strict_strtoul(buf, 0, &value))
+ if (kstrtoint(buf, 0, &value))
return -EINVAL;
if (value)
@@ -644,9 +644,9 @@ static ssize_t taos_als_cal_target_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
- unsigned long value;
+ int value;
- if (strict_strtoul(buf, 0, &value))
+ if (kstrtoint(buf, 0, &value))
return -EINVAL;
if (value)
@@ -671,9 +671,9 @@ static ssize_t taos_do_calibrate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- unsigned long value;
+ int value;
- if (strict_strtoul(buf, 0, &value))
+ if (kstrtoint(buf, 0, &value))
return -EINVAL;
if (value == 1)
@@ -815,12 +815,9 @@ static int taos_probe(struct i2c_client *clientp,
return -EOPNOTSUPP;
}
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- dev_err(&clientp->dev, "iio allocation failed\n");
- goto fail1;
- }
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
chip->client = clientp;
i2c_set_clientdata(clientp, indio_dev);
@@ -835,14 +832,14 @@ static int taos_probe(struct i2c_client *clientp,
if (ret < 0) {
dev_err(&clientp->dev, "i2c_smbus_write_bytes() to cmd "
"reg failed in taos_probe(), err = %d\n", ret);
- goto fail2;
+ return ret;
}
ret = i2c_smbus_read_byte(clientp);
if (ret < 0) {
dev_err(&clientp->dev, "i2c_smbus_read_byte from "
"reg failed in taos_probe(), err = %d\n", ret);
- goto fail2;
+ return ret;
}
buf[i] = ret;
}
@@ -850,14 +847,14 @@ static int taos_probe(struct i2c_client *clientp,
if (!taos_tsl258x_device(buf)) {
dev_info(&clientp->dev, "i2c device found but does not match "
"expected id in taos_probe()\n");
- goto fail2;
+ return -EINVAL;
}
ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
if (ret < 0) {
dev_err(&clientp->dev, "i2c_smbus_write_byte() to cmd reg "
"failed in taos_probe(), err = %d\n", ret);
- goto fail2;
+ return ret;
}
indio_dev->info = &tsl2583_info;
@@ -867,7 +864,7 @@ static int taos_probe(struct i2c_client *clientp,
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev, "iio registration failed\n");
- goto fail2;
+ return ret;
}
/* Load up the V2 defaults (these are hard coded defaults for now) */
@@ -878,10 +875,6 @@ static int taos_probe(struct i2c_client *clientp,
dev_info(&clientp->dev, "Light sensor found.\n");
return 0;
-fail1:
- iio_device_free(indio_dev);
-fail2:
- return ret;
}
#ifdef CONFIG_PM_SLEEP
@@ -926,7 +919,6 @@ static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
static int taos_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
- iio_device_free(i2c_get_clientdata(client));
return 0;
}
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index c99f890cc6c..18805029d2a 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -124,11 +124,6 @@
#define TSL2X7X_mA13 0xD0
#define TSL2X7X_MAX_TIMER_CNT (0xFF)
-/*Common device IIO EventMask */
-#define TSL2X7X_EVENT_MASK \
- (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING)),
-
#define TSL2X7X_MIN_ITIME 3
/* TAOS txx2x7x Device family members */
@@ -550,7 +545,7 @@ prox_poll_err:
static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
- if (chip->pdata && chip->pdata->platform_default_settings != 0)
+ if (chip->pdata && chip->pdata->platform_default_settings)
memcpy(&(chip->tsl2x7x_settings),
chip->pdata->platform_default_settings,
sizeof(tsl2x7x_default_settings));
@@ -951,7 +946,6 @@ static ssize_t tsl2x7x_gain_available_show(struct device *dev,
case tsl2771:
case tmd2771:
return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 128");
- break;
}
return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
@@ -1223,12 +1217,14 @@ static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
}
static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int ret;
- if (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code) == IIO_INTENSITY)
+ if (chan->type == IIO_INTENSITY)
ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x10);
else
ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x20);
@@ -1237,12 +1233,14 @@ static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
}
static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
- u64 event_code,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
int val)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- if (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code) == IIO_INTENSITY) {
+ if (chan->type == IIO_INTENSITY) {
if (val)
chip->tsl2x7x_settings.interrupts_en |= 0x10;
else
@@ -1260,13 +1258,16 @@ static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
}
static int tsl2x7x_write_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- if (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code) == IIO_INTENSITY) {
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ if (chan->type == IIO_INTENSITY) {
+ switch (dir) {
case IIO_EV_DIR_RISING:
chip->tsl2x7x_settings.als_thresh_high = val;
break;
@@ -1277,7 +1278,7 @@ static int tsl2x7x_write_thresh(struct iio_dev *indio_dev,
return -EINVAL;
}
} else {
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ switch (dir) {
case IIO_EV_DIR_RISING:
chip->tsl2x7x_settings.prox_thres_high = val;
break;
@@ -1295,13 +1296,16 @@ static int tsl2x7x_write_thresh(struct iio_dev *indio_dev,
}
static int tsl2x7x_read_thresh(struct iio_dev *indio_dev,
- u64 event_code,
- int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- if (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code) == IIO_INTENSITY) {
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ if (chan->type == IIO_INTENSITY) {
+ switch (dir) {
case IIO_EV_DIR_RISING:
*val = chip->tsl2x7x_settings.als_thresh_high;
break;
@@ -1312,7 +1316,7 @@ static int tsl2x7x_read_thresh(struct iio_dev *indio_dev,
return -EINVAL;
}
} else {
- switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ switch (dir) {
case IIO_EV_DIR_RISING:
*val = chip->tsl2x7x_settings.prox_thres_high;
break;
@@ -1324,7 +1328,7 @@ static int tsl2x7x_read_thresh(struct iio_dev *indio_dev,
}
}
- return 0;
+ return IIO_VAL_INT;
}
static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
@@ -1346,7 +1350,6 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
break;
default:
return -EINVAL;
- break;
}
break;
case IIO_CHAN_INFO_RAW:
@@ -1366,7 +1369,6 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
break;
default:
return -EINVAL;
- break;
}
break;
case IIO_CHAN_INFO_CALIBSCALE:
@@ -1419,7 +1421,6 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
case tsl2772:
case tmd2772:
return -EINVAL;
- break;
}
chip->tsl2x7x_settings.als_gain = 3;
break;
@@ -1431,7 +1432,6 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
case tsl2771:
case tmd2771:
return -EINVAL;
- break;
}
chip->tsl2x7x_settings.als_gain = 3;
break;
@@ -1508,18 +1508,15 @@ static int tsl2x7x_device_id(unsigned char *id, int target)
case tsl2671:
case tsl2771:
return ((*id & 0xf0) == TRITON_ID);
- break;
case tmd2671:
case tmd2771:
return ((*id & 0xf0) == HALIBUT_ID);
- break;
case tsl2572:
case tsl2672:
case tmd2672:
case tsl2772:
case tmd2772:
return ((*id & 0xf0) == SWORDFISH_ID);
- break;
}
return -EINVAL;
@@ -1675,10 +1672,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
+ .read_event_value_new = &tsl2x7x_read_thresh,
+ .write_event_value_new = &tsl2x7x_write_thresh,
+ .read_event_config_new = &tsl2x7x_read_interrupt_config,
+ .write_event_config_new = &tsl2x7x_write_interrupt_config,
},
[PRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX],
@@ -1686,10 +1683,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
+ .read_event_value_new = &tsl2x7x_read_thresh,
+ .write_event_value_new = &tsl2x7x_write_thresh,
+ .read_event_config_new = &tsl2x7x_read_interrupt_config,
+ .write_event_config_new = &tsl2x7x_write_interrupt_config,
},
[ALSPRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
@@ -1697,10 +1694,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
+ .read_event_value_new = &tsl2x7x_read_thresh,
+ .write_event_value_new = &tsl2x7x_write_thresh,
+ .read_event_config_new = &tsl2x7x_read_interrupt_config,
+ .write_event_config_new = &tsl2x7x_write_interrupt_config,
},
[PRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
@@ -1708,10 +1705,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
+ .read_event_value_new = &tsl2x7x_read_thresh,
+ .write_event_value_new = &tsl2x7x_write_thresh,
+ .read_event_config_new = &tsl2x7x_read_interrupt_config,
+ .write_event_config_new = &tsl2x7x_write_interrupt_config,
},
[ALSPRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
@@ -1719,10 +1716,24 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
+ .read_event_value_new = &tsl2x7x_read_thresh,
+ .write_event_value_new = &tsl2x7x_write_thresh,
+ .read_event_config_new = &tsl2x7x_read_interrupt_config,
+ .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ },
+};
+
+static const struct iio_event_spec tsl2x7x_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
},
};
@@ -1741,7 +1752,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1758,7 +1770,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 1,
@@ -1778,7 +1791,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1789,7 +1803,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 4,
@@ -1803,7 +1818,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 1,
@@ -1823,7 +1839,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1835,7 +1852,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_mask = TSL2X7X_EVENT_MASK
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
},
},
.chan_table_elements = 4,
@@ -1851,7 +1869,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
struct iio_dev *indio_dev;
struct tsl2X7X_chip *chip;
- indio_dev = iio_device_alloc(sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
@@ -1862,22 +1880,21 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
ret = tsl2x7x_i2c_read(chip->client,
TSL2X7X_CHIPID, &device_id);
if (ret < 0)
- goto fail1;
+ return ret;
if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
(tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
dev_info(&chip->client->dev,
"%s: i2c device found does not match expected id\n",
__func__);
- ret = -EINVAL;
- goto fail1;
+ return -EINVAL;
}
ret = i2c_smbus_write_byte(clientp, (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&clientp->dev, "%s: write to cmd reg failed. err = %d\n",
__func__, ret);
- goto fail1;
+ return ret;
}
/* ALS and PROX functions can be invoked via user space poll
@@ -1899,16 +1916,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
indio_dev->num_channels = chip->chip_info->chan_table_elements;
if (clientp->irq) {
- ret = request_threaded_irq(clientp->irq,
- NULL,
- &tsl2x7x_event_handler,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "TSL2X7X_event",
- indio_dev);
+ ret = devm_request_threaded_irq(&clientp->dev, clientp->irq,
+ NULL,
+ &tsl2x7x_event_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ "TSL2X7X_event",
+ indio_dev);
if (ret) {
dev_err(&clientp->dev,
"%s: irq request failed", __func__);
- goto fail1;
+ return ret;
}
}
@@ -1921,20 +1939,12 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
if (ret) {
dev_err(&clientp->dev,
"%s: iio registration failed\n", __func__);
- goto fail2;
+ return ret;
}
dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
return 0;
-
-fail2:
- if (clientp->irq)
- free_irq(clientp->irq, indio_dev);
-fail1:
- iio_device_free(indio_dev);
-
- return ret;
}
static int tsl2x7x_suspend(struct device *dev)
@@ -1980,10 +1990,6 @@ static int tsl2x7x_remove(struct i2c_client *client)
tsl2x7x_chip_off(indio_dev);
iio_device_unregister(indio_dev);
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index c3f3f539e78..99421f90d18 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -23,23 +23,17 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/delay.h>
#define HMC5843_CONFIG_REG_A 0x00
#define HMC5843_CONFIG_REG_B 0x01
#define HMC5843_MODE_REG 0x02
-#define HMC5843_DATA_OUT_X_MSB_REG 0x03
-#define HMC5843_DATA_OUT_X_LSB_REG 0x04
-#define HMC5843_DATA_OUT_Y_MSB_REG 0x05
-#define HMC5843_DATA_OUT_Y_LSB_REG 0x06
-#define HMC5843_DATA_OUT_Z_MSB_REG 0x07
-#define HMC5843_DATA_OUT_Z_LSB_REG 0x08
-/* Beware: Y and Z are exchanged on HMC5883 */
-#define HMC5883_DATA_OUT_Z_MSB_REG 0x05
-#define HMC5883_DATA_OUT_Z_LSB_REG 0x06
-#define HMC5883_DATA_OUT_Y_MSB_REG 0x07
-#define HMC5883_DATA_OUT_Y_LSB_REG 0x08
+#define HMC5843_DATA_OUT_MSB_REGS 0x03
#define HMC5843_STATUS_REG 0x09
+#define HMC5843_ID_REG 0x0a
enum hmc5843_ids {
HMC5843_ID,
@@ -54,19 +48,13 @@ enum hmc5843_ids {
*/
#define HMC5843_RANGE_GAIN_OFFSET 0x05
#define HMC5843_RANGE_GAIN_DEFAULT 0x01
-#define HMC5843_RANGE_GAIN_MAX 0x07
+#define HMC5843_RANGE_GAINS 8
-/*
- * Device status
- */
+/* Device status */
#define HMC5843_DATA_READY 0x01
#define HMC5843_DATA_OUTPUT_LOCK 0x02
-/* Does not exist on HMC5883, not used */
-#define HMC5843_VOLTAGE_REGULATOR_ENABLED 0x04
-/*
- * Mode register configuration
- */
+/* Mode register configuration */
#define HMC5843_MODE_CONVERSION_CONTINUOUS 0x00
#define HMC5843_MODE_CONVERSION_SINGLE 0x01
#define HMC5843_MODE_IDLE 0x02
@@ -78,80 +66,29 @@ enum hmc5843_ids {
* HMC5883: Typical data output rate
*/
#define HMC5843_RATE_OFFSET 0x02
-#define HMC5843_RATE_BITMASK 0x1C
-#define HMC5843_RATE_NOT_USED 0x07
+#define HMC5843_RATE_DEFAULT 0x04
+#define HMC5843_RATES 7
-/*
- * Device measurement configuration
- */
+/* Device measurement configuration */
#define HMC5843_MEAS_CONF_NORMAL 0x00
#define HMC5843_MEAS_CONF_POSITIVE_BIAS 0x01
#define HMC5843_MEAS_CONF_NEGATIVE_BIAS 0x02
-#define HMC5843_MEAS_CONF_NOT_USED 0x03
#define HMC5843_MEAS_CONF_MASK 0x03
-/*
- * Scaling factors: 10000000/Gain
- */
-static const int hmc5843_regval_to_nanoscale[] = {
+/* Scaling factors: 10000000/Gain */
+static const int hmc5843_regval_to_nanoscale[HMC5843_RANGE_GAINS] = {
6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
};
-static const int hmc5883_regval_to_nanoscale[] = {
+static const int hmc5883_regval_to_nanoscale[HMC5843_RANGE_GAINS] = {
7812, 9766, 13021, 16287, 24096, 27701, 32573, 45662
};
-static const int hmc5883l_regval_to_nanoscale[] = {
+static const int hmc5883l_regval_to_nanoscale[HMC5843_RANGE_GAINS] = {
7299, 9174, 12195, 15152, 22727, 25641, 30303, 43478
};
/*
- * From the HMC5843 datasheet:
- * Value | Sensor input field range (Ga) | Gain (counts/milli-Gauss)
- * 0 | (+-)0.7 | 1620
- * 1 | (+-)1.0 | 1300
- * 2 | (+-)1.5 | 970
- * 3 | (+-)2.0 | 780
- * 4 | (+-)3.2 | 530
- * 5 | (+-)3.8 | 460
- * 6 | (+-)4.5 | 390
- * 7 | (+-)6.5 | 280
- *
- * From the HMC5883 datasheet:
- * Value | Recommended sensor field range (Ga) | Gain (counts/Gauss)
- * 0 | (+-)0.9 | 1280
- * 1 | (+-)1.2 | 1024
- * 2 | (+-)1.9 | 768
- * 3 | (+-)2.5 | 614
- * 4 | (+-)4.0 | 415
- * 5 | (+-)4.6 | 361
- * 6 | (+-)5.5 | 307
- * 7 | (+-)7.9 | 219
- *
- * From the HMC5883L datasheet:
- * Value | Recommended sensor field range (Ga) | Gain (LSB/Gauss)
- * 0 | (+-)0.88 | 1370
- * 1 | (+-)1.3 | 1090
- * 2 | (+-)1.9 | 820
- * 3 | (+-)2.5 | 660
- * 4 | (+-)4.0 | 440
- * 5 | (+-)4.7 | 390
- * 6 | (+-)5.6 | 330
- * 7 | (+-)8.1 | 230
- */
-static const int hmc5843_regval_to_input_field_mga[] = {
- 700, 1000, 1500, 2000, 3200, 3800, 4500, 6500
-};
-
-static const int hmc5883_regval_to_input_field_mga[] = {
- 900, 1200, 1900, 2500, 4000, 4600, 5500, 7900
-};
-
-static const int hmc5883l_regval_to_input_field_mga[] = {
- 880, 1300, 1900, 2500, 4000, 4700, 5600, 8100
-};
-
-/*
* From the datasheet:
* Value | HMC5843 | HMC5883/HMC5883L
* | Data output rate (Hz) | Data output rate (Hz)
@@ -164,141 +101,94 @@ static const int hmc5883l_regval_to_input_field_mga[] = {
* 6 | 50 | 75
* 7 | Not used | Not used
*/
-static const char * const hmc5843_regval_to_sample_freq[] = {
- "0.5", "1", "2", "5", "10", "20", "50",
+static const int hmc5843_regval_to_samp_freq[7][2] = {
+ {0, 500000}, {1, 0}, {2, 0}, {5, 0}, {10, 0}, {20, 0}, {50, 0}
};
-static const char * const hmc5883_regval_to_sample_freq[] = {
- "0.75", "1.5", "3", "7.5", "15", "30", "75",
+static const int hmc5883_regval_to_samp_freq[7][2] = {
+ {0, 750000}, {1, 500000}, {3, 0}, {7, 500000}, {15, 0}, {30, 0},
+ {75, 0}
};
/* Describe chip variants */
struct hmc5843_chip_info {
const struct iio_chan_spec *channels;
- const char * const *regval_to_sample_freq;
- const int *regval_to_input_field_mga;
+ const int (*regval_to_samp_freq)[2];
const int *regval_to_nanoscale;
};
/* Each client has this additional data */
struct hmc5843_data {
+ struct i2c_client *client;
struct mutex lock;
u8 rate;
u8 meas_conf;
u8 operating_mode;
u8 range;
const struct hmc5843_chip_info *variant;
+ __be16 buffer[8]; /* 3x 16-bit channels + padding + 64-bit timestamp */
};
/* The lower two bits contain the current conversion mode */
-static s32 hmc5843_configure(struct i2c_client *client,
- u8 operating_mode)
+static s32 hmc5843_set_mode(struct hmc5843_data *data, u8 operating_mode)
{
- return i2c_smbus_write_byte_data(client,
- HMC5843_MODE_REG,
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client, HMC5843_MODE_REG,
operating_mode & HMC5843_MODE_MASK);
+ if (ret >= 0)
+ data->operating_mode = operating_mode;
+ mutex_unlock(&data->lock);
+
+ return ret;
}
-/* Return the measurement value from the specified channel */
-static int hmc5843_read_measurement(struct iio_dev *indio_dev,
- int address,
- int *val)
+static int hmc5843_wait_measurement(struct hmc5843_data *data)
{
- struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct hmc5843_data *data = iio_priv(indio_dev);
s32 result;
int tries = 150;
- mutex_lock(&data->lock);
while (tries-- > 0) {
- result = i2c_smbus_read_byte_data(client,
+ result = i2c_smbus_read_byte_data(data->client,
HMC5843_STATUS_REG);
+ if (result < 0)
+ return result;
if (result & HMC5843_DATA_READY)
break;
msleep(20);
}
if (tries < 0) {
- dev_err(&client->dev, "data not ready\n");
- mutex_unlock(&data->lock);
+ dev_err(&data->client->dev, "data not ready\n");
return -EIO;
}
- result = i2c_smbus_read_word_swapped(client, address);
- mutex_unlock(&data->lock);
- if (result < 0)
- return -EINVAL;
-
- *val = sign_extend32(result, 15);
- return IIO_VAL_INT;
-}
-
-/*
- * From the datasheet:
- * 0 - Continuous-Conversion Mode: In continuous-conversion mode, the
- * device continuously performs conversions and places the result in
- * the data register.
- *
- * 1 - Single-Conversion Mode : Device performs a single measurement,
- * sets RDY high and returns to sleep mode.
- *
- * 2 - Idle Mode : Device is placed in idle mode.
- *
- * 3 - Sleep Mode : Device is placed in sleep mode.
- *
- */
-static ssize_t hmc5843_show_operating_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct hmc5843_data *data = iio_priv(indio_dev);
- return sprintf(buf, "%d\n", data->operating_mode);
+ return 0;
}
-static ssize_t hmc5843_set_operating_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+/* Return the measurement value from the specified channel */
+static int hmc5843_read_measurement(struct hmc5843_data *data,
+ int idx, int *val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct hmc5843_data *data = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long operating_mode = 0;
- s32 status;
- int error;
+ s32 result;
+ __be16 values[3];
mutex_lock(&data->lock);
- error = kstrtoul(buf, 10, &operating_mode);
- if (error) {
- count = error;
- goto exit;
- }
- dev_dbg(dev, "set conversion mode to %lu\n", operating_mode);
- if (operating_mode > HMC5843_MODE_SLEEP) {
- count = -EINVAL;
- goto exit;
- }
-
- status = i2c_smbus_write_byte_data(client, this_attr->address,
- operating_mode);
- if (status) {
- count = -EINVAL;
- goto exit;
+ result = hmc5843_wait_measurement(data);
+ if (result < 0) {
+ mutex_unlock(&data->lock);
+ return result;
}
- data->operating_mode = operating_mode;
-
-exit:
+ result = i2c_smbus_read_i2c_block_data(data->client,
+ HMC5843_DATA_OUT_MSB_REGS, sizeof(values), (u8 *) values);
mutex_unlock(&data->lock);
- return count;
-}
+ if (result < 0)
+ return -EINVAL;
-static IIO_DEVICE_ATTR(operating_mode,
- S_IWUSR | S_IRUGO,
- hmc5843_show_operating_mode,
- hmc5843_set_operating_mode,
- HMC5843_MODE_REG);
+ *val = sign_extend32(be16_to_cpu(values[idx]), 15);
+ return IIO_VAL_INT;
+}
/*
* API for setting the measurement configuration to
@@ -318,23 +208,26 @@ static IIO_DEVICE_ATTR(operating_mode,
* and BN.
*
*/
-static s32 hmc5843_set_meas_conf(struct i2c_client *client,
- u8 meas_conf)
+static s32 hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct hmc5843_data *data = iio_priv(indio_dev);
- u8 reg_val;
- reg_val = (meas_conf & HMC5843_MEAS_CONF_MASK) |
- (data->rate << HMC5843_RATE_OFFSET);
- return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client, HMC5843_CONFIG_REG_A,
+ (meas_conf & HMC5843_MEAS_CONF_MASK) |
+ (data->rate << HMC5843_RATE_OFFSET));
+ if (ret >= 0)
+ data->meas_conf = meas_conf;
+ mutex_unlock(&data->lock);
+
+ return ret;
}
static ssize_t hmc5843_show_measurement_configuration(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct hmc5843_data *data = iio_priv(indio_dev);
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
return sprintf(buf, "%d\n", data->meas_conf);
}
@@ -343,29 +236,19 @@ static ssize_t hmc5843_set_measurement_configuration(struct device *dev,
const char *buf,
size_t count)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct hmc5843_data *data = iio_priv(indio_dev);
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
unsigned long meas_conf = 0;
- int error;
+ int ret;
- error = kstrtoul(buf, 10, &meas_conf);
- if (error)
- return error;
- if (meas_conf >= HMC5843_MEAS_CONF_NOT_USED)
+ ret = kstrtoul(buf, 10, &meas_conf);
+ if (ret)
+ return ret;
+ if (meas_conf >= HMC5843_MEAS_CONF_MASK)
return -EINVAL;
- mutex_lock(&data->lock);
- dev_dbg(dev, "set measurement configuration to %lu\n", meas_conf);
- if (hmc5843_set_meas_conf(client, meas_conf)) {
- count = -EINVAL;
- goto exit;
- }
- data->meas_conf = meas_conf;
+ ret = hmc5843_set_meas_conf(data, meas_conf);
-exit:
- mutex_unlock(&data->lock);
- return count;
+ return (ret < 0) ? ret : count;
}
static IIO_DEVICE_ATTR(meas_conf,
@@ -374,211 +257,221 @@ static IIO_DEVICE_ATTR(meas_conf,
hmc5843_set_measurement_configuration,
0);
-static ssize_t hmc5843_show_sampling_frequencies_available(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t hmc5843_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct hmc5843_data *data = iio_priv(indio_dev);
- ssize_t total_n = 0;
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+ size_t len = 0;
int i;
- for (i = 0; i < HMC5843_RATE_NOT_USED; i++) {
- ssize_t n = sprintf(buf, "%s ", data->variant->regval_to_sample_freq[i]);
- buf += n;
- total_n += n;
- }
+ for (i = 0; i < HMC5843_RATES; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d.%d ", data->variant->regval_to_samp_freq[i][0],
+ data->variant->regval_to_samp_freq[i][1]);
+
/* replace trailing space by newline */
- buf[-1] = '\n';
+ buf[len - 1] = '\n';
- return total_n;
+ return len;
}
-static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hmc5843_show_sampling_frequencies_available);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hmc5843_show_samp_freq_avail);
-static s32 hmc5843_set_rate(struct i2c_client *client,
- u8 rate)
+static int hmc5843_set_samp_freq(struct hmc5843_data *data, u8 rate)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct hmc5843_data *data = iio_priv(indio_dev);
- u8 reg_val;
+ int ret;
- if (rate >= HMC5843_RATE_NOT_USED) {
- dev_err(&client->dev,
- "data output rate is not supported\n");
- return -EINVAL;
- }
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client, HMC5843_CONFIG_REG_A,
+ data->meas_conf | (rate << HMC5843_RATE_OFFSET));
+ if (ret >= 0)
+ data->rate = rate;
+ mutex_unlock(&data->lock);
- reg_val = data->meas_conf | (rate << HMC5843_RATE_OFFSET);
- return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
+ return ret;
}
-static int hmc5843_check_sampling_frequency(struct hmc5843_data *data,
- const char *buf)
+static int hmc5843_get_samp_freq_index(struct hmc5843_data *data,
+ int val, int val2)
{
- const char * const *samp_freq = data->variant->regval_to_sample_freq;
int i;
- for (i = 0; i < HMC5843_RATE_NOT_USED; i++) {
- if (sysfs_streq(buf, samp_freq[i]))
+ for (i = 0; i < HMC5843_RATES; i++)
+ if (val == data->variant->regval_to_samp_freq[i][0] &&
+ val2 == data->variant->regval_to_samp_freq[i][1])
return i;
- }
return -EINVAL;
}
-static ssize_t hmc5843_set_sampling_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int hmc5843_set_range_gain(struct hmc5843_data *data, u8 range)
{
-
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct hmc5843_data *data = iio_priv(indio_dev);
- int rate;
-
- rate = hmc5843_check_sampling_frequency(data, buf);
- if (rate < 0) {
- dev_err(&client->dev,
- "sampling frequency is not supported\n");
- return rate;
- }
+ int ret;
mutex_lock(&data->lock);
- dev_dbg(dev, "set rate to %d\n", rate);
- if (hmc5843_set_rate(client, rate)) {
- count = -EINVAL;
- goto exit;
- }
- data->rate = rate;
-
-exit:
+ ret = i2c_smbus_write_byte_data(data->client, HMC5843_CONFIG_REG_B,
+ range << HMC5843_RANGE_GAIN_OFFSET);
+ if (ret >= 0)
+ data->range = range;
mutex_unlock(&data->lock);
- return count;
+
+ return ret;
}
-static ssize_t hmc5843_show_sampling_frequency(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hmc5843_show_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct hmc5843_data *data = iio_priv(indio_dev);
- s32 rate;
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
- rate = i2c_smbus_read_byte_data(client, this_attr->address);
- if (rate < 0)
- return rate;
- rate = (rate & HMC5843_RATE_BITMASK) >> HMC5843_RATE_OFFSET;
- return sprintf(buf, "%s\n", data->variant->regval_to_sample_freq[rate]);
-}
+ size_t len = 0;
+ int i;
-static IIO_DEVICE_ATTR(sampling_frequency,
- S_IWUSR | S_IRUGO,
- hmc5843_show_sampling_frequency,
- hmc5843_set_sampling_frequency,
- HMC5843_CONFIG_REG_A);
+ for (i = 0; i < HMC5843_RANGE_GAINS; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "0.%09d ", data->variant->regval_to_nanoscale[i]);
-static ssize_t hmc5843_show_range_gain(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 range;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct hmc5843_data *data = iio_priv(indio_dev);
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
- range = data->range;
- return sprintf(buf, "%d\n", data->variant->regval_to_input_field_mga[range]);
+ return len;
}
-static ssize_t hmc5843_set_range_gain(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct hmc5843_data *data = iio_priv(indio_dev);
- unsigned long range = 0;
- int error;
+static IIO_DEVICE_ATTR(scale_available, S_IRUGO,
+ hmc5843_show_scale_avail, NULL, 0);
- mutex_lock(&data->lock);
- error = kstrtoul(buf, 10, &range);
- if (error) {
- count = error;
- goto exit;
- }
- dev_dbg(dev, "set range to %lu\n", range);
+static int hmc5843_get_scale_index(struct hmc5843_data *data, int val, int val2)
+{
+ int i;
- if (range > HMC5843_RANGE_GAIN_MAX) {
- count = -EINVAL;
- goto exit;
- }
+ if (val != 0)
+ return -EINVAL;
- data->range = range;
- range = range << HMC5843_RANGE_GAIN_OFFSET;
- if (i2c_smbus_write_byte_data(client, this_attr->address, range))
- count = -EINVAL;
+ for (i = 0; i < HMC5843_RANGE_GAINS; i++)
+ if (val2 == data->variant->regval_to_nanoscale[i])
+ return i;
-exit:
- mutex_unlock(&data->lock);
- return count;
+ return -EINVAL;
}
-static IIO_DEVICE_ATTR(in_magn_range,
- S_IWUSR | S_IRUGO,
- hmc5843_show_range_gain,
- hmc5843_set_range_gain,
- HMC5843_CONFIG_REG_B);
-
static int hmc5843_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
+ int *val, int *val2, long mask)
{
struct hmc5843_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return hmc5843_read_measurement(indio_dev,
- chan->address,
- val);
+ return hmc5843_read_measurement(data, chan->scan_index, val);
case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = data->variant->regval_to_nanoscale[data->range];
return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = data->variant->regval_to_samp_freq[data->rate][0];
+ *val2 = data->variant->regval_to_samp_freq[data->rate][1];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
-#define HMC5843_CHANNEL(axis, addr) \
+static int hmc5843_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hmc5843_data *data = iio_priv(indio_dev);
+ int rate, range;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ rate = hmc5843_get_samp_freq_index(data, val, val2);
+ if (rate < 0)
+ return -EINVAL;
+
+ return hmc5843_set_samp_freq(data, rate);
+ case IIO_CHAN_INFO_SCALE:
+ range = hmc5843_get_scale_index(data, val, val2);
+ if (range < 0)
+ return -EINVAL;
+
+ return hmc5843_set_range_gain(data, range);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hmc5843_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hmc5843_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = hmc5843_wait_measurement(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ HMC5843_DATA_OUT_MSB_REGS, 3 * sizeof(__be16),
+ (u8 *) data->buffer);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define HMC5843_CHANNEL(axis, idx) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .address = addr \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = idx, \
+ .scan_type = IIO_ST('s', 16, 16, IIO_BE), \
}
static const struct iio_chan_spec hmc5843_channels[] = {
- HMC5843_CHANNEL(X, HMC5843_DATA_OUT_X_MSB_REG),
- HMC5843_CHANNEL(Y, HMC5843_DATA_OUT_Y_MSB_REG),
- HMC5843_CHANNEL(Z, HMC5843_DATA_OUT_Z_MSB_REG),
+ HMC5843_CHANNEL(X, 0),
+ HMC5843_CHANNEL(Y, 1),
+ HMC5843_CHANNEL(Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
+/* Beware: Y and Z are exchanged on HMC5883 */
static const struct iio_chan_spec hmc5883_channels[] = {
- HMC5843_CHANNEL(X, HMC5843_DATA_OUT_X_MSB_REG),
- HMC5843_CHANNEL(Y, HMC5883_DATA_OUT_Y_MSB_REG),
- HMC5843_CHANNEL(Z, HMC5883_DATA_OUT_Z_MSB_REG),
+ HMC5843_CHANNEL(X, 0),
+ HMC5843_CHANNEL(Z, 1),
+ HMC5843_CHANNEL(Y, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static struct attribute *hmc5843_attributes[] = {
&iio_dev_attr_meas_conf.dev_attr.attr,
- &iio_dev_attr_operating_mode.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_magn_range.dev_attr.attr,
+ &iio_dev_attr_scale_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@@ -590,89 +483,101 @@ static const struct attribute_group hmc5843_group = {
static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
[HMC5843_ID] = {
.channels = hmc5843_channels,
- .regval_to_sample_freq = hmc5843_regval_to_sample_freq,
- .regval_to_input_field_mga =
- hmc5843_regval_to_input_field_mga,
+ .regval_to_samp_freq = hmc5843_regval_to_samp_freq,
.regval_to_nanoscale = hmc5843_regval_to_nanoscale,
},
[HMC5883_ID] = {
.channels = hmc5883_channels,
- .regval_to_sample_freq = hmc5883_regval_to_sample_freq,
- .regval_to_input_field_mga =
- hmc5883_regval_to_input_field_mga,
+ .regval_to_samp_freq = hmc5883_regval_to_samp_freq,
.regval_to_nanoscale = hmc5883_regval_to_nanoscale,
},
[HMC5883L_ID] = {
.channels = hmc5883_channels,
- .regval_to_sample_freq = hmc5883_regval_to_sample_freq,
- .regval_to_input_field_mga =
- hmc5883l_regval_to_input_field_mga,
+ .regval_to_samp_freq = hmc5883_regval_to_samp_freq,
.regval_to_nanoscale = hmc5883l_regval_to_nanoscale,
},
};
-/* Called when we have found a new HMC58X3 */
-static void hmc5843_init_client(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hmc5843_init(struct hmc5843_data *data)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct hmc5843_data *data = iio_priv(indio_dev);
-
- data->variant = &hmc5843_chip_info_tbl[id->driver_data];
- indio_dev->channels = data->variant->channels;
- indio_dev->num_channels = 3;
- hmc5843_set_meas_conf(client, data->meas_conf);
- hmc5843_set_rate(client, data->rate);
- hmc5843_configure(client, data->operating_mode);
- i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_B, data->range);
- mutex_init(&data->lock);
+ int ret;
+ u8 id[3];
+
+ ret = i2c_smbus_read_i2c_block_data(data->client, HMC5843_ID_REG,
+ sizeof(id), id);
+ if (ret < 0)
+ return ret;
+ if (id[0] != 'H' || id[1] != '4' || id[2] != '3') {
+ dev_err(&data->client->dev, "no HMC5843/5883/5883L sensor\n");
+ return -ENODEV;
+ }
- pr_info("%s initialized\n", id->name);
+ ret = hmc5843_set_meas_conf(data, HMC5843_MEAS_CONF_NORMAL);
+ if (ret < 0)
+ return ret;
+ ret = hmc5843_set_samp_freq(data, HMC5843_RATE_DEFAULT);
+ if (ret < 0)
+ return ret;
+ ret = hmc5843_set_range_gain(data, HMC5843_RANGE_GAIN_DEFAULT);
+ if (ret < 0)
+ return ret;
+ return hmc5843_set_mode(data, HMC5843_MODE_CONVERSION_CONTINUOUS);
}
static const struct iio_info hmc5843_info = {
.attrs = &hmc5843_group,
.read_raw = &hmc5843_read_raw,
+ .write_raw = &hmc5843_write_raw,
+ .write_raw_get_fmt = &hmc5843_write_raw_get_fmt,
.driver_module = THIS_MODULE,
};
+static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
+
static int hmc5843_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct hmc5843_data *data;
struct iio_dev *indio_dev;
- int err = 0;
+ int ret;
- indio_dev = iio_device_alloc(sizeof(*data));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto exit;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
/* default settings at probe */
data = iio_priv(indio_dev);
- data->meas_conf = HMC5843_MEAS_CONF_NORMAL;
- data->range = HMC5843_RANGE_GAIN_DEFAULT;
- data->operating_mode = HMC5843_MODE_CONVERSION_CONTINUOUS;
+ data->client = client;
+ data->variant = &hmc5843_chip_info_tbl[id->driver_data];
+ mutex_init(&data->lock);
i2c_set_clientdata(client, indio_dev);
- hmc5843_init_client(client, id);
-
indio_dev->info = &hmc5843_info;
indio_dev->name = id->name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = data->variant->channels;
+ indio_dev->num_channels = 4;
+ indio_dev->available_scan_masks = hmc5843_scan_masks;
+
+ ret = hmc5843_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ hmc5843_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
- err = iio_device_register(indio_dev);
- if (err)
- goto exit_free2;
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
return 0;
-exit_free2:
- iio_device_free(indio_dev);
-exit:
- return err;
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
}
static int hmc5843_remove(struct i2c_client *client)
@@ -680,9 +585,10 @@ static int hmc5843_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- /* sleep mode to save power */
- hmc5843_configure(client, HMC5843_MODE_SLEEP);
- iio_device_free(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ /* sleep mode to save power */
+ hmc5843_set_mode(iio_priv(indio_dev), HMC5843_MODE_SLEEP);
return 0;
}
@@ -690,19 +596,18 @@ static int hmc5843_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int hmc5843_suspend(struct device *dev)
{
- hmc5843_configure(to_i2c_client(dev), HMC5843_MODE_SLEEP);
- return 0;
+ struct hmc5843_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return hmc5843_set_mode(data, HMC5843_MODE_SLEEP);
}
static int hmc5843_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct hmc5843_data *data = iio_priv(indio_dev);
+ struct hmc5843_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
- hmc5843_configure(client, data->operating_mode);
-
- return 0;
+ return hmc5843_set_mode(data, HMC5843_MODE_CONVERSION_CONTINUOUS);
}
static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_suspend, hmc5843_resume);
@@ -730,6 +635,6 @@ static struct i2c_driver hmc5843_driver = {
};
module_i2c_driver(hmc5843_driver);
-MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com");
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com>");
MODULE_DESCRIPTION("HMC5843/5883/5883L driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 74025fbae67..00492cad7c5 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -86,7 +86,7 @@ static int ade7753_spi_read_reg_16(struct device *dev,
struct ade7753_state *st = iio_priv(indio_dev);
ssize_t ret;
- ret = spi_w8r16(st->us, ADE7753_READ_REG(reg_address));
+ ret = spi_w8r16be(st->us, ADE7753_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -94,7 +94,6 @@ static int ade7753_spi_read_reg_16(struct device *dev,
}
*val = ret;
- *val = be16_to_cpup(val);
return 0;
}
@@ -186,9 +185,9 @@ static ssize_t ade7753_write_8bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u8 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7753_spi_write_reg_8(dev, this_attr->address, val);
@@ -204,9 +203,9 @@ static ssize_t ade7753_write_16bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7753_spi_write_reg_16(dev, this_attr->address, val);
@@ -399,11 +398,11 @@ static ssize_t ade7753_write_frequency(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7753_state *st = iio_priv(indio_dev);
- unsigned long val;
+ u16 val;
int ret;
u16 reg, t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
return ret;
if (val == 0)
@@ -497,11 +496,9 @@ static int ade7753_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -517,19 +514,13 @@ static int ade7753_probe(struct spi_device *spi)
/* Get the device into a sane initial state */
ret = ade7753_initial_setup(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
- return ret;
}
/* fixme, confirm ordering in this function */
@@ -539,7 +530,6 @@ static int ade7753_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ade7753_stop_device(&indio_dev->dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f649ebe55a0..e0aa13ab365 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -86,7 +86,7 @@ static int ade7754_spi_read_reg_16(struct device *dev,
struct ade7754_state *st = iio_priv(indio_dev);
int ret;
- ret = spi_w8r16(st->us, ADE7754_READ_REG(reg_address));
+ ret = spi_w8r16be(st->us, ADE7754_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -94,7 +94,6 @@ static int ade7754_spi_read_reg_16(struct device *dev,
}
*val = ret;
- *val = be16_to_cpup(val);
return 0;
}
@@ -186,9 +185,9 @@ static ssize_t ade7754_write_8bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u8 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7754_spi_write_reg_8(dev, this_attr->address, val);
@@ -204,9 +203,9 @@ static ssize_t ade7754_write_16bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7754_spi_write_reg_16(dev, this_attr->address, val);
@@ -419,11 +418,11 @@ static ssize_t ade7754_write_frequency(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
- unsigned long val;
+ u16 val;
int ret;
u8 reg, t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
return ret;
if (val == 0)
@@ -520,11 +519,9 @@ static int ade7754_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -540,18 +537,12 @@ static int ade7754_probe(struct spi_device *spi)
/* Get the device into a sane initial state */
ret = ade7754_initial_setup(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
- return ret;
}
/* fixme, confirm ordering in this function */
@@ -561,7 +552,6 @@ static int ade7754_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ade7754_stop_device(&indio_dev->dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 6005d4aab0c..cba183e2483 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -269,9 +269,9 @@ static ssize_t ade7758_write_8bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u8 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7758_spi_write_reg_8(dev, this_attr->address, val);
@@ -287,9 +287,9 @@ static ssize_t ade7758_write_16bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7758_spi_write_reg_16(dev, this_attr->address, val);
@@ -502,11 +502,11 @@ static ssize_t ade7758_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- unsigned long val;
+ u16 val;
int ret;
u8 reg, t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
return ret;
@@ -849,12 +849,11 @@ static int ade7758_probe(struct spi_device *spi)
{
int ret;
struct ade7758_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
@@ -862,10 +861,8 @@ static int ade7758_probe(struct spi_device *spi)
/* Allocate the comms buffers */
st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_dev;
- }
+ if (!st->rx)
+ return -ENOMEM;
st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL);
if (st->tx == NULL) {
ret = -ENOMEM;
@@ -920,9 +917,6 @@ error_free_tx:
kfree(st->tx);
error_free_rx:
kfree(st->rx);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -939,8 +933,6 @@ static int ade7758_remove(struct spi_device *spi)
kfree(st->tx);
kfree(st->rx);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 7d5db717557..c0accf8cce9 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -69,11 +69,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
if (ade7758_spi_read_burst(indio_dev) >= 0)
*dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
- /* Guaranteed to be aligned with 8 byte boundary */
- if (indio_dev->scan_timestamp)
- dat64[1] = pf->timestamp;
-
- iio_push_to_buffers(indio_dev, (u8 *)dat64);
+ iio_push_to_buffers_with_timestamp(indio_dev, dat64, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
@@ -91,15 +87,10 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
struct ade7758_state *st = iio_priv(indio_dev);
unsigned channel;
- int ret;
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- ret = iio_sw_buffer_preenable(indio_dev);
- if (ret < 0)
- return ret;
-
channel = find_first_bit(indio_dev->active_scan_mask,
indio_dev->masklength);
@@ -125,14 +116,17 @@ void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
int ade7758_configure_ring(struct iio_dev *indio_dev)
{
struct ade7758_state *st = iio_priv(indio_dev);
+ struct iio_buffer *buffer;
int ret = 0;
- indio_dev->buffer = iio_kfifo_allocate(indio_dev);
- if (!indio_dev->buffer) {
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (!buffer) {
ret = -ENOMEM;
return ret;
}
+ iio_device_attach_buffer(indio_dev, buffer);
+
indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index d214ac4932c..ea0c9debf8b 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -86,7 +86,7 @@ static int ade7759_spi_read_reg_16(struct device *dev,
struct ade7759_state *st = iio_priv(indio_dev);
int ret;
- ret = spi_w8r16(st->us, ADE7759_READ_REG(reg_address));
+ ret = spi_w8r16be(st->us, ADE7759_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -94,7 +94,6 @@ static int ade7759_spi_read_reg_16(struct device *dev,
}
*val = ret;
- *val = be16_to_cpup(val);
return 0;
}
@@ -185,9 +184,9 @@ static ssize_t ade7759_write_8bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u8 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7759_spi_write_reg_8(dev, this_attr->address, val);
@@ -203,9 +202,9 @@ static ssize_t ade7759_write_16bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = ade7759_spi_write_reg_16(dev, this_attr->address, val);
@@ -360,11 +359,11 @@ static ssize_t ade7759_write_frequency(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7759_state *st = iio_priv(indio_dev);
- unsigned long val;
+ u16 val;
int ret;
u16 reg, t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
return ret;
if (val == 0)
@@ -444,11 +443,9 @@ static int ade7759_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -463,18 +460,13 @@ static int ade7759_probe(struct spi_device *spi)
/* Get the device into a sane initial state */
ret = ade7759_initial_setup(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
/* fixme, confirm ordering in this function */
@@ -484,7 +476,6 @@ static int ade7759_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ade7759_stop_device(&indio_dev->dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index db9ef6c86c1..5b33c7f1aa9 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -208,7 +208,7 @@ static int ade7854_i2c_probe(struct i2c_client *client,
struct ade7854_state *st;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
@@ -225,8 +225,6 @@ static int ade7854_i2c_probe(struct i2c_client *client,
st->irq = client->irq;
ret = ade7854_probe(indio_dev, &client->dev);
- if (ret)
- iio_device_free(indio_dev);
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index 4c6d2041260..94f73bbbc0f 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -278,7 +278,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
struct ade7854_state *st;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
@@ -296,8 +296,6 @@ static int ade7854_spi_probe(struct spi_device *spi)
ret = ade7854_probe(indio_dev, &spi->dev);
- if (ret)
- iio_device_free(indio_dev);
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index e8379c0f117..d620bbd603a 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -100,9 +100,9 @@ static ssize_t ade7854_write_8bit(struct device *dev,
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
- long val;
+ u8 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
ret = st->write_reg_8(dev, this_attr->address, val);
@@ -121,9 +121,9 @@ static ssize_t ade7854_write_16bit(struct device *dev,
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = st->write_reg_16(dev, this_attr->address, val);
@@ -142,9 +142,9 @@ static ssize_t ade7854_write_24bit(struct device *dev,
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
- long val;
+ u32 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou32(buf, 10, &val);
if (ret)
goto error_ret;
ret = st->write_reg_24(dev, this_attr->address, val);
@@ -163,9 +163,9 @@ static ssize_t ade7854_write_32bit(struct device *dev,
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
- long val;
+ u32 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou32(buf, 10, &val);
if (ret)
goto error_ret;
ret = st->write_reg_32(dev, this_attr->address, val);
@@ -550,7 +550,7 @@ int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = ade7854_initial_setup(indio_dev);
@@ -561,9 +561,6 @@ int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
error_unreg_dev:
iio_device_unregister(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-
return ret;
}
EXPORT_SYMBOL(ade7854_probe);
@@ -571,7 +568,6 @@ EXPORT_SYMBOL(ade7854_probe);
int ade7854_remove(struct iio_dev *indio_dev)
{
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 71221161aa6..62d30179301 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -107,16 +107,16 @@ static int ad2s1200_probe(struct spi_device *spi)
unsigned short *pins = spi->dev.platform_data;
for (pn = 0; pn < AD2S1200_PN; pn++)
- if (gpio_request_one(pins[pn], GPIOF_DIR_OUT, DRV_NAME)) {
- pr_err("%s: request gpio pin %d failed\n",
- DRV_NAME, pins[pn]);
- goto error_ret;
+ ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
+ DRV_NAME);
+ if (ret) {
+ dev_err(&spi->dev, "request gpio pin %d failed\n",
+ pins[pn]);
+ return ret;
}
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
mutex_init(&st->lock);
@@ -133,26 +133,18 @@ static int ad2s1200_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
spi->max_speed_hz = AD2S1200_HZ;
spi->mode = SPI_MODE_3;
spi_setup(spi);
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- for (--pn; pn >= 0; pn--)
- gpio_free(pins[pn]);
- return ret;
}
static int ad2s1200_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index dcdadbbcf7e..6966d5f7664 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -206,10 +206,10 @@ static ssize_t ad2s1210_store_fclkin(struct device *dev,
size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned long fclkin;
+ unsigned int fclkin;
int ret;
- ret = strict_strtoul(buf, 10, &fclkin);
+ ret = kstrtouint(buf, 10, &fclkin);
if (ret)
return ret;
if (fclkin < AD2S1210_MIN_CLKIN || fclkin > AD2S1210_MAX_CLKIN) {
@@ -243,10 +243,10 @@ static ssize_t ad2s1210_store_fexcit(struct device *dev,
const char *buf, size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned long fexcit;
+ unsigned int fexcit;
int ret;
- ret = strict_strtoul(buf, 10, &fexcit);
+ ret = kstrtouint(buf, 10, &fexcit);
if (ret < 0)
return ret;
if (fexcit < AD2S1210_MIN_EXCIT || fexcit > AD2S1210_MAX_EXCIT) {
@@ -282,11 +282,11 @@ static ssize_t ad2s1210_store_control(struct device *dev,
const char *buf, size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned long udata;
+ unsigned char udata;
unsigned char data;
int ret;
- ret = strict_strtoul(buf, 16, &udata);
+ ret = kstrtou8(buf, 16, &udata);
if (ret)
return -EINVAL;
@@ -337,10 +337,10 @@ static ssize_t ad2s1210_store_resolution(struct device *dev,
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned char data;
- unsigned long udata;
+ unsigned char udata;
int ret;
- ret = strict_strtoul(buf, 10, &udata);
+ ret = kstrtou8(buf, 10, &udata);
if (ret || udata < 10 || udata > 16) {
pr_err("ad2s1210: resolution out of range\n");
return -EINVAL;
@@ -438,11 +438,11 @@ static ssize_t ad2s1210_store_reg(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned long data;
+ unsigned char data;
int ret;
struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
- ret = strict_strtoul(buf, 10, &data);
+ ret = kstrtou8(buf, 10, &data);
if (ret)
return -EINVAL;
mutex_lock(&st->lock);
@@ -669,16 +669,14 @@ static int ad2s1210_probe(struct spi_device *spi)
if (spi->dev.platform_data == NULL)
return -EINVAL;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->pdata = spi->dev.platform_data;
ret = ad2s1210_setup_gpios(st);
if (ret < 0)
- goto error_free_dev;
+ return ret;
spi_set_drvdata(spi, indio_dev);
@@ -709,9 +707,6 @@ static int ad2s1210_probe(struct spi_device *spi)
error_free_gpios:
ad2s1210_free_gpios(st);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -721,7 +716,6 @@ static int ad2s1210_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad2s1210_free_gpios(iio_priv(indio_dev));
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 40b825286d4..e24c5890652 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -64,11 +64,9 @@ static int ad2s90_probe(struct spi_device *spi)
struct ad2s90_state *st;
int ret = 0;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
@@ -83,7 +81,7 @@ static int ad2s90_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
/* need 600ns between CS and the first falling edge of SCLK */
spi->max_speed_hz = 830000;
@@ -91,17 +89,11 @@ static int ad2s90_probe(struct spi_device *spi)
spi_setup(spi);
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad2s90_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 38a158b77b1..26e1ca0b780 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -83,32 +83,28 @@ static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
- unsigned long val;
+ unsigned int val;
bool enabled;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
- goto error_ret;
+ return ret;
- if (val > 100000) {
- ret = -EINVAL;
- goto error_ret;
- }
+ if (val > 100000)
+ return -EINVAL;
enabled = get_enabled_gptimers() & st->t->bit;
if (enabled)
disable_gptimers(st->t->bit);
- if (!val)
- goto error_ret;
+ if (val == 0)
+ return count;
val = get_sclk() / val;
- if (val <= 4 || val <= st->duty) {
- ret = -EINVAL;
- goto error_ret;
- }
+ if (val <= 4 || val <= st->duty)
+ return -EINVAL;
set_gptimer_period(st->t->id, val);
set_gptimer_pwidth(st->t->id, val - st->duty);
@@ -116,8 +112,7 @@ static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
if (enabled)
enable_gptimers(st->t->bit);
-error_ret:
- return ret ? ret : count;
+ return count;
}
static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 79695974b1d..48a6afa8408 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -53,10 +53,10 @@ static ssize_t iio_trig_periodic_write_freq(struct device *dev,
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
- unsigned long val;
+ int val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoint(buf, 10, &val);
if (ret)
goto error_ret;
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 394254f7d6b..5032ff7c225 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index bfaf69378ac..2c3a9e178fb 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
-obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o
+obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o ipuv3-plane.o
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
index 9cfa2a7efdc..6a9da94c957 100644
--- a/drivers/staging/imx-drm/TODO
+++ b/drivers/staging/imx-drm/TODO
@@ -9,7 +9,6 @@ TODO:
Missing features (not necessarily for moving out of staging):
-- Add KMS plane support for CRTC driver
- Add i.MX6 HDMI support
- Add support for IC (Image converter)
- Add support for CSI (CMOS Sensor interface)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index a2e52a0c53c..51aa9772f95 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -68,6 +68,11 @@ struct imx_drm_connector {
struct module *owner;
};
+int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
+{
+ return crtc->pipe;
+}
+
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
@@ -110,18 +115,12 @@ int imx_drm_crtc_panel_format_pins(struct drm_crtc *crtc, u32 encoder_type,
struct imx_drm_crtc *imx_crtc;
struct imx_drm_crtc_helper_funcs *helper;
- mutex_lock(&imxdrm->mutex);
-
list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list)
if (imx_crtc->crtc == crtc)
goto found;
- mutex_unlock(&imxdrm->mutex);
-
return -EINVAL;
found:
- mutex_unlock(&imxdrm->mutex);
-
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
return helper->set_interface_pix_fmt(crtc,
@@ -191,6 +190,18 @@ static void imx_drm_disable_vblank(struct drm_device *drm, int crtc)
imx_drm_crtc->imx_drm_helper_funcs.disable_vblank(imx_drm_crtc->crtc);
}
+static void imx_drm_driver_preclose(struct drm_device *drm,
+ struct drm_file *file)
+{
+ int i;
+
+ if (!file->is_master)
+ return;
+
+ for (i = 0; i < 4; i++)
+ imx_drm_disable_vblank(drm , i);
+}
+
static const struct file_operations imx_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -396,14 +407,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
/*
* enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
+ * - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler and
* drivers can well take care of their interrupts
*/
- drm->irq_enabled = 1;
+ drm->irq_enabled = true;
drm_mode_config_init(drm);
imx_drm_mode_config_init(drm);
@@ -423,11 +434,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
goto err_init;
/*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = 1;
+ imxdrm->drm->vblank_disable_allowed = true;
if (!imx_drm_device_get())
ret = -EINVAL;
@@ -647,20 +658,14 @@ int imx_drm_encoder_get_mux_id(struct imx_drm_encoder *imx_drm_encoder,
struct imx_drm_crtc *imx_crtc;
int i = 0;
- mutex_lock(&imxdrm->mutex);
-
list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list) {
if (imx_crtc->crtc == crtc)
goto found;
i++;
}
- mutex_unlock(&imxdrm->mutex);
-
return -EINVAL;
found:
- mutex_unlock(&imxdrm->mutex);
-
return i;
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
@@ -774,16 +779,26 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
};
static struct drm_driver imx_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
+ .preclose = imx_drm_driver_preclose,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = imx_drm_enable_vblank,
.disable_vblank = imx_drm_disable_vblank,
@@ -800,6 +815,12 @@ static struct drm_driver imx_drm_driver = {
static int imx_drm_platform_probe(struct platform_device *pdev)
{
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
imx_drm_device->dev = &pdev->dev;
return drm_platform_init(&imx_drm_driver, pdev);
@@ -837,13 +858,11 @@ static int __init imx_drm_init(void)
INIT_LIST_HEAD(&imx_drm_device->encoder_list);
imx_drm_pdev = platform_device_register_simple("imx-drm", -1, NULL, 0);
- if (!imx_drm_pdev) {
- ret = -EINVAL;
+ if (IS_ERR(imx_drm_pdev)) {
+ ret = PTR_ERR(imx_drm_pdev);
goto err_pdev;
}
- imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
-
ret = platform_driver_register(&imx_drm_pdrv);
if (ret)
goto err_pdrv;
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/staging/imx-drm/imx-drm.h
index f2aac91ddf5..ae90c9c1531 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/staging/imx-drm/imx-drm.h
@@ -14,6 +14,8 @@ struct drm_fbdev_cma;
struct drm_framebuffer;
struct platform_device;
+int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
+
struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index af733ea4856..654bf03e05f 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -359,10 +359,8 @@ static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
sprintf(clkname, "di%d_pll", chno);
ldb->clk_pll[chno] = devm_clk_get(ldb->dev, clkname);
- if (IS_ERR(ldb->clk_pll[chno]))
- return PTR_ERR(ldb->clk_pll[chno]);
- return 0;
+ return PTR_ERR_OR_ZERO(ldb->clk_pll[chno]);
}
static int imx_ldb_register(struct imx_ldb_channel *imx_ldb_ch)
@@ -421,7 +419,7 @@ static const char *imx_ldb_bit_mappings[] = {
[LVDS_BIT_MAP_JEIDA] = "jeida",
};
-const int of_get_data_mapping(struct device_node *np)
+static const int of_get_data_mapping(struct device_node *np)
{
const char *bm;
int ret, i;
@@ -466,8 +464,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
- of_match_device(of_match_ptr(imx_ldb_dt_ids),
- &pdev->dev);
+ of_match_device(imx_ldb_dt_ids, &pdev->dev);
struct device_node *child;
const u8 *edidp;
struct imx_ldb *imx_ldb;
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 33d6525cf99..680f4c8fa08 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -151,7 +151,7 @@ static void tve_enable(struct imx_tve *tve)
spin_lock_irqsave(&tve->enable_lock, flags);
if (!tve->enabled) {
- tve->enabled = 1;
+ tve->enabled = true;
clk_prepare_enable(tve->clk);
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
TVE_IPU_CLK_EN | TVE_EN,
@@ -180,7 +180,7 @@ static void tve_disable(struct imx_tve *tve)
spin_lock_irqsave(&tve->enable_lock, flags);
if (tve->enabled) {
- tve->enabled = 0;
+ tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
TVE_IPU_CLK_EN | TVE_EN, 0);
clk_disable_unprepare(tve->clk);
@@ -696,7 +696,7 @@ static int imx_tve_probe(struct platform_device *pdev)
if (val != 0x00100000) {
dev_err(&pdev->dev, "configuration register default value indicates this is not a TVEv2\n");
return -ENODEV;
- };
+ }
/* disable cable detection for VGA mode */
ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0);
diff --git a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
index 74c022e2a53..4826b5c0249 100644
--- a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
+++ b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
@@ -97,6 +97,7 @@ void ipu_idmac_put(struct ipuv3_channel *);
int ipu_idmac_enable_channel(struct ipuv3_channel *channel);
int ipu_idmac_disable_channel(struct ipuv3_channel *channel);
+int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms);
void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
bool doublebuffer);
@@ -283,7 +284,7 @@ int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
int width);
int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *,
- struct ipu_rgb *rgb);
+ const struct ipu_rgb *rgb);
static inline void ipu_cpmem_interlaced_scan(struct ipu_ch_param *p,
int stride)
@@ -303,6 +304,7 @@ int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat);
int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
struct ipu_image *image);
+enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
static inline void ipu_cpmem_set_burstsize(struct ipu_ch_param __iomem *p,
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index ba464e5d9f1..7a22ce619ed 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -30,6 +30,8 @@
#include <linux/irqdomain.h>
#include <linux/of_device.h>
+#include <drm/drm_fourcc.h>
+
#include "imx-ipu-v3.h"
#include "ipu-prv.h"
@@ -139,7 +141,7 @@ u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs)
EXPORT_SYMBOL_GPL(ipu_ch_param_read_field);
int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *p,
- struct ipu_rgb *rgb)
+ const struct ipu_rgb *rgb)
{
int bpp = 0, npb = 0, ro, go, bo, to;
@@ -282,7 +284,7 @@ void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
-static struct ipu_rgb def_rgb_32 = {
+static const struct ipu_rgb def_rgb_32 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
.blue = { .offset = 0, .length = 8, },
@@ -290,31 +292,31 @@ static struct ipu_rgb def_rgb_32 = {
.bits_per_pixel = 32,
};
-static struct ipu_rgb def_bgr_32 = {
- .red = { .offset = 16, .length = 8, },
+static const struct ipu_rgb def_bgr_32 = {
+ .red = { .offset = 0, .length = 8, },
.green = { .offset = 8, .length = 8, },
- .blue = { .offset = 0, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
.transp = { .offset = 24, .length = 8, },
.bits_per_pixel = 32,
};
-static struct ipu_rgb def_rgb_24 = {
- .red = { .offset = 0, .length = 8, },
+static const struct ipu_rgb def_rgb_24 = {
+ .red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
- .blue = { .offset = 16, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
.transp = { .offset = 0, .length = 0, },
.bits_per_pixel = 24,
};
-static struct ipu_rgb def_bgr_24 = {
- .red = { .offset = 16, .length = 8, },
+static const struct ipu_rgb def_bgr_24 = {
+ .red = { .offset = 0, .length = 8, },
.green = { .offset = 8, .length = 8, },
- .blue = { .offset = 0, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
.transp = { .offset = 0, .length = 0, },
.bits_per_pixel = 24,
};
-static struct ipu_rgb def_rgb_16 = {
+static const struct ipu_rgb def_rgb_16 = {
.red = { .offset = 11, .length = 5, },
.green = { .offset = 5, .length = 6, },
.blue = { .offset = 0, .length = 5, },
@@ -322,6 +324,14 @@ static struct ipu_rgb def_rgb_16 = {
.bits_per_pixel = 16,
};
+static const struct ipu_rgb def_bgr_16 = {
+ .red = { .offset = 0, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 11, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * (y) / 4) + (x) / 2)
@@ -329,17 +339,17 @@ static struct ipu_rgb def_rgb_16 = {
(pix->width * pix->height / 4) + \
(pix->width * (y) / 4) + (x) / 2)
-int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat)
+int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 drm_fourcc)
{
- switch (pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
+ switch (drm_fourcc) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
/* pix format */
ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 2);
/* burst size */
ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 63);
break;
- case V4L2_PIX_FMT_UYVY:
+ case DRM_FORMAT_UYVY:
/* bits/pixel */
ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
/* pix format */
@@ -347,7 +357,7 @@ int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat)
/* burst size */
ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
break;
- case V4L2_PIX_FMT_YUYV:
+ case DRM_FORMAT_YUYV:
/* bits/pixel */
ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
/* pix format */
@@ -355,20 +365,25 @@ int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat)
/* burst size */
ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
break;
- case V4L2_PIX_FMT_RGB32:
- ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
break;
- case V4L2_PIX_FMT_RGB565:
- ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
break;
- case V4L2_PIX_FMT_BGR32:
- ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
+ case DRM_FORMAT_BGR888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
break;
- case V4L2_PIX_FMT_RGB24:
+ case DRM_FORMAT_RGB888:
ipu_cpmem_set_format_rgb(cpmem, &def_rgb_24);
break;
- case V4L2_PIX_FMT_BGR24:
- ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
+ case DRM_FORMAT_RGB565:
+ ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
+ break;
+ case DRM_FORMAT_BGR565:
+ ipu_cpmem_set_format_rgb(cpmem, &def_bgr_16);
break;
default:
return -EINVAL;
@@ -378,6 +393,79 @@ int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
+/*
+ * The V4L2 spec defines packed RGB formats in memory byte order, which from
+ * point of view of the IPU corresponds to little-endian words with the first
+ * component in the least significant bits.
+ * The DRM pixel formats and IPU internal representation are ordered the other
+ * way around, with the first named component ordered at the most significant
+ * bits. Further, V4L2 formats are not well defined:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
+ * We choose the interpretation which matches GStreamer behavior.
+ */
+static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_RGB565:
+ /*
+ * Here we choose the 'corrected' interpretation of RGBP, a
+ * little-endian 16-bit word with the red component at the most
+ * significant bits:
+ * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
+ */
+ return DRM_FORMAT_RGB565;
+ case V4L2_PIX_FMT_BGR24:
+ /* B G R <=> [24:0] R:G:B */
+ return DRM_FORMAT_RGB888;
+ case V4L2_PIX_FMT_RGB24:
+ /* R G B <=> [24:0] B:G:R */
+ return DRM_FORMAT_BGR888;
+ case V4L2_PIX_FMT_BGR32:
+ /* B G R A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_RGB32:
+ /* R G B A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_UYVY:
+ return DRM_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return DRM_FORMAT_YUYV;
+ case V4L2_PIX_FMT_YUV420:
+ return DRM_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YVU420:
+ return DRM_FORMAT_YVU420;
+ }
+
+ return -EINVAL;
+}
+
+enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return IPUV3_COLORSPACE_RGB;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return IPUV3_COLORSPACE_YUV;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
+
int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
struct ipu_image *image)
{
@@ -392,7 +480,7 @@ int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
image->rect.height);
ipu_cpmem_set_stride(cpmem, pix->bytesperline);
- ipu_cpmem_set_fmt(cpmem, pix->pixelformat);
+ ipu_cpmem_set_fmt(cpmem, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
@@ -476,7 +564,7 @@ struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
goto out;
}
- channel->busy = 1;
+ channel->busy = true;
channel->num = num;
out:
@@ -494,7 +582,7 @@ void ipu_idmac_put(struct ipuv3_channel *channel)
mutex_lock(&ipu->channel_lock);
- channel->busy = 0;
+ channel->busy = false;
mutex_unlock(&ipu->channel_lock);
}
@@ -610,24 +698,29 @@ int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
-int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
+int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
{
struct ipu_soc *ipu = channel->ipu;
- u32 val;
- unsigned long flags;
unsigned long timeout;
- timeout = jiffies + msecs_to_jiffies(50);
+ timeout = jiffies + msecs_to_jiffies(ms);
while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
idma_mask(channel->num)) {
- if (time_after(jiffies, timeout)) {
- dev_warn(ipu->dev, "disabling busy idmac channel %d\n",
- channel->num);
- break;
- }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
cpu_relax();
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
+
+int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ u32 val;
+ unsigned long flags;
+
spin_lock_irqsave(&ipu->lock, flags);
/* Disable DMA channel(s) */
@@ -888,7 +981,7 @@ static const struct ipu_platform_reg client_reg[] = {
.dc = 5,
.dp = IPU_DP_FLOW_SYNC_BG,
.dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
- .dma[1] = -EINVAL,
+ .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
},
.name = "imx-ipuv3-crtc",
}, {
@@ -913,7 +1006,7 @@ static int ipu_add_subdevice_pdata(struct device *dev,
pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
&reg->pdata, sizeof(struct ipu_platform_reg));
- return pdev ? 0 : -EINVAL;
+ return PTR_ERR_OR_ZERO(pdev);
}
static int ipu_add_client_devices(struct ipu_soc *ipu)
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
index 21bf1c80652..d0e3bc3c53e 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
@@ -91,6 +91,7 @@ enum ipu_dc_map {
IPU_DC_MAP_RGB565,
IPU_DC_MAP_GBR24, /* TVEv2 */
IPU_DC_MAP_BGR666,
+ IPU_DC_MAP_BGR24,
};
struct ipu_dc {
@@ -152,6 +153,8 @@ static int ipu_pixfmt_to_map(u32 fmt)
return IPU_DC_MAP_GBR24;
case V4L2_PIX_FMT_BGR666:
return IPU_DC_MAP_BGR666;
+ case V4L2_PIX_FMT_BGR24:
+ return IPU_DC_MAP_BGR24;
default:
return -EINVAL;
}
@@ -313,7 +316,7 @@ struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel)
return ERR_PTR(-EBUSY);
}
- dc->in_use = 1;
+ dc->in_use = true;
mutex_unlock(&priv->mutex);
@@ -326,7 +329,7 @@ void ipu_dc_put(struct ipu_dc *dc)
struct ipu_dc_priv *priv = dc->priv;
mutex_lock(&priv->mutex);
- dc->in_use = 0;
+ dc->in_use = false;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(ipu_dc_put);
@@ -395,6 +398,12 @@ int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
+ /* bgr24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
+
return 0;
}
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
index 2e97c33b81e..98070dd8c92 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
@@ -307,13 +307,13 @@ int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
goto out;
}
- /* Always allocate at least 128*4 bytes (2 slots) */
- if (slots < 2)
- slots = 2;
-
/* For the MEM_BG channel, first try to allocate twice the slots */
if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
segment = dmfc_find_slots(priv, slots * 2);
+ else if (slots < 2)
+ /* Always allocate at least 128*4 bytes (2 slots) */
+ slots = 2;
+
if (segment >= 0)
slots *= 2;
else
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c b/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
index 231afd6c60f..58f87c8d7c0 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
@@ -325,7 +325,7 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
mutex_init(&priv->mutex);
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
- priv->flow[i].foreground.foreground = 1;
+ priv->flow[i].foreground.foreground = true;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
}
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 6fd37a7453e..ce6ba987ec9 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -25,29 +25,25 @@
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include <linux/clk.h>
+#include <linux/errno.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "ipu-v3/imx-ipu-v3.h"
#include "imx-drm.h"
+#include "ipuv3-plane.h"
#define DRIVER_DESC "i.MX IPUv3 Graphics"
-struct ipu_framebuffer {
- struct drm_framebuffer base;
- void *virt;
- dma_addr_t phys;
- size_t len;
-};
-
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
struct imx_drm_crtc *imx_crtc;
- struct ipuv3_channel *ipu_ch;
+
+ /* plane[0] is the full plane, plane[1] is the partial plane */
+ struct ipu_plane *plane[2];
+
struct ipu_dc *dc;
- struct ipu_dp *dp;
- struct dmfc_channel *dmfc;
struct ipu_di *di;
int enabled;
struct drm_pending_vblank_event *page_flip_event;
@@ -61,35 +57,14 @@ struct ipu_crtc {
#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
-static int calc_vref(struct drm_display_mode *mode)
-{
- unsigned long htotal, vtotal;
-
- htotal = mode->htotal;
- vtotal = mode->vtotal;
-
- if (!htotal || !vtotal)
- return 60;
-
- return mode->clock * 1000 / vtotal / htotal;
-}
-
-static int calc_bandwidth(struct drm_display_mode *mode, unsigned int vref)
-{
- return mode->hdisplay * mode->vdisplay * vref;
-}
-
static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
{
if (ipu_crtc->enabled)
return;
ipu_di_enable(ipu_crtc->di);
- ipu_dmfc_enable_channel(ipu_crtc->dmfc);
- ipu_idmac_enable_channel(ipu_crtc->ipu_ch);
ipu_dc_enable_channel(ipu_crtc->dc);
- if (ipu_crtc->dp)
- ipu_dp_enable_channel(ipu_crtc->dp);
+ ipu_plane_enable(ipu_crtc->plane[0]);
ipu_crtc->enabled = 1;
}
@@ -99,11 +74,8 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
if (!ipu_crtc->enabled)
return;
- if (ipu_crtc->dp)
- ipu_dp_disable_channel(ipu_crtc->dp);
+ ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable_channel(ipu_crtc->dc);
- ipu_idmac_disable_channel(ipu_crtc->ipu_ch);
- ipu_dmfc_disable_channel(ipu_crtc->dmfc);
ipu_di_disable(ipu_crtc->di);
ipu_crtc->enabled = 0;
@@ -159,33 +131,6 @@ static const struct drm_crtc_funcs ipu_crtc_funcs = {
.page_flip = ipu_page_flip,
};
-static int ipu_drm_set_base(struct drm_crtc *crtc, int x, int y)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct drm_gem_cma_object *cma_obj;
- struct drm_framebuffer *fb = crtc->fb;
- unsigned long phys;
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_LOG_KMS("entry is null.\n");
- return -EFAULT;
- }
-
- phys = cma_obj->paddr;
- phys += x * (fb->bits_per_pixel >> 3);
- phys += y * fb->pitches[0];
-
- dev_dbg(ipu_crtc->dev, "%s: phys: 0x%lx\n", __func__, phys);
- dev_dbg(ipu_crtc->dev, "%s: xy: %dx%d\n", __func__, x, y);
-
- ipu_cpmem_set_stride(ipu_get_cpmem(ipu_crtc->ipu_ch), fb->pitches[0]);
- ipu_cpmem_set_buffer(ipu_get_cpmem(ipu_crtc->ipu_ch),
- 0, phys);
-
- return 0;
-}
-
static int ipu_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode,
@@ -193,41 +138,15 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct drm_framebuffer *fb = ipu_crtc->base.fb;
int ret;
struct ipu_di_signal_cfg sig_cfg = {};
u32 out_pixel_fmt;
- struct ipu_ch_param __iomem *cpmem = ipu_get_cpmem(ipu_crtc->ipu_ch);
- int bpp;
- u32 v4l2_fmt;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
- ipu_ch_param_zero(cpmem);
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- v4l2_fmt = V4L2_PIX_FMT_RGB32;
- bpp = 32;
- break;
- case DRM_FORMAT_RGB565:
- v4l2_fmt = V4L2_PIX_FMT_RGB565;
- bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- v4l2_fmt = V4L2_PIX_FMT_RGB24;
- bpp = 24;
- break;
- default:
- dev_err(ipu_crtc->dev, "unsupported pixel format 0x%08x\n",
- fb->pixel_format);
- return -EINVAL;
- }
-
out_pixel_fmt = ipu_crtc->interface_pix_fmt;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -238,7 +157,7 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
sig_cfg.Vsync_pol = 1;
sig_cfg.enable_pol = 1;
- sig_cfg.clk_pol = 0;
+ sig_cfg.clk_pol = 1;
sig_cfg.width = mode->hdisplay;
sig_cfg.height = mode->vdisplay;
sig_cfg.pixel_fmt = out_pixel_fmt;
@@ -257,18 +176,6 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
- if (ipu_crtc->dp) {
- ret = ipu_dp_setup_channel(ipu_crtc->dp, IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing display processor failed with %d\n",
- ret);
- return ret;
- }
- ipu_dp_set_global_alpha(ipu_crtc->dp, 1, 0, 1);
- }
-
ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, sig_cfg.interlaced,
out_pixel_fmt, mode->hdisplay);
if (ret) {
@@ -285,30 +192,9 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
- ipu_cpmem_set_resolution(cpmem, mode->hdisplay, mode->vdisplay);
- ipu_cpmem_set_fmt(cpmem, v4l2_fmt);
- ipu_cpmem_set_high_priority(ipu_crtc->ipu_ch);
-
- ret = ipu_dmfc_init_channel(ipu_crtc->dmfc, mode->hdisplay);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing dmfc channel failed with %d\n",
- ret);
- return ret;
- }
-
- ret = ipu_dmfc_alloc_bandwidth(ipu_crtc->dmfc,
- calc_bandwidth(mode, calc_vref(mode)), 64);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "allocating dmfc bandwidth failed with %d\n",
- ret);
- return ret;
- }
-
- ipu_drm_set_base(crtc, x, y);
-
- return 0;
+ return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay);
}
static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
@@ -332,7 +218,7 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
if (ipu_crtc->newfb) {
ipu_crtc->newfb = NULL;
- ipu_drm_set_base(&ipu_crtc->base, 0, 0);
+ ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, 0, 0);
ipu_crtc_handle_pageflip(ipu_crtc);
}
@@ -370,10 +256,6 @@ static struct drm_crtc_helper_funcs ipu_helper_funcs = {
static int ipu_enable_vblank(struct drm_crtc *crtc)
{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- enable_irq(ipu_crtc->irq);
-
return 0;
}
@@ -381,7 +263,8 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- disable_irq(ipu_crtc->irq);
+ ipu_crtc->page_flip_event = NULL;
+ ipu_crtc->newfb = NULL;
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type,
@@ -418,12 +301,8 @@ static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
{
- if (!IS_ERR_OR_NULL(ipu_crtc->ipu_ch))
- ipu_idmac_put(ipu_crtc->ipu_ch);
- if (!IS_ERR_OR_NULL(ipu_crtc->dmfc))
- ipu_dmfc_put(ipu_crtc->dmfc);
- if (!IS_ERR_OR_NULL(ipu_crtc->dp))
- ipu_dp_put(ipu_crtc->dp);
+ if (!IS_ERR_OR_NULL(ipu_crtc->dc))
+ ipu_dc_put(ipu_crtc->dc);
if (!IS_ERR_OR_NULL(ipu_crtc->di))
ipu_di_put(ipu_crtc->di);
}
@@ -434,32 +313,12 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int ret;
- ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]);
- if (IS_ERR(ipu_crtc->ipu_ch)) {
- ret = PTR_ERR(ipu_crtc->ipu_ch);
- goto err_out;
- }
-
ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
if (IS_ERR(ipu_crtc->dc)) {
ret = PTR_ERR(ipu_crtc->dc);
goto err_out;
}
- ipu_crtc->dmfc = ipu_dmfc_get(ipu, pdata->dma[0]);
- if (IS_ERR(ipu_crtc->dmfc)) {
- ret = PTR_ERR(ipu_crtc->dmfc);
- goto err_out;
- }
-
- if (pdata->dp >= 0) {
- ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp);
- if (IS_ERR(ipu_crtc->dp)) {
- ret = PTR_ERR(ipu_crtc->dp);
- goto err_out;
- }
- }
-
ipu_crtc->di = ipu_di_get(ipu, pdata->di);
if (IS_ERR(ipu_crtc->di)) {
ret = PTR_ERR(ipu_crtc->di);
@@ -477,7 +336,9 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
struct ipu_client_platformdata *pdata)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
+ int dp = -EINVAL;
int ret;
+ int id;
ret = ipu_get_resources(ipu_crtc, pdata);
if (ret) {
@@ -495,19 +356,42 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
goto err_put_resources;
}
- ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch,
- IPU_IRQ_EOF);
+ if (pdata->dp >= 0)
+ dp = IPU_DP_FLOW_SYNC_BG;
+ id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
+ ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
+ pdata->dma[0], dp, BIT(id), true);
+ ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
+ if (ret) {
+ dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
+ ret);
+ goto err_remove_crtc;
+ }
+
+ /* If this crtc is using the DP, add an overlay plane */
+ if (pdata->dp >= 0 && pdata->dma[1] > 0) {
+ ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
+ pdata->dma[1],
+ IPU_DP_FLOW_SYNC_FG,
+ BIT(id), false);
+ if (IS_ERR(ipu_crtc->plane[1]))
+ ipu_crtc->plane[1] = NULL;
+ }
+
+ ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_resources;
+ goto err_put_plane_res;
}
- disable_irq(ipu_crtc->irq);
-
return 0;
+err_put_plane_res:
+ ipu_plane_put_resources(ipu_crtc->plane[0]);
+err_remove_crtc:
+ imx_drm_remove_crtc(ipu_crtc->imx_crtc);
err_put_resources:
ipu_put_resources(ipu_crtc);
@@ -523,7 +407,9 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
@@ -546,6 +432,7 @@ static int ipu_drm_remove(struct platform_device *pdev)
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
+ ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
return 0;
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
new file mode 100644
index 00000000000..d97454a0dff
--- /dev/null
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -0,0 +1,375 @@
+/*
+ * i.MX IPUv3 DP Overlay Planes
+ *
+ * Copyright (C) 2013 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "ipu-v3/imx-ipu-v3.h"
+#include "ipuv3-plane.h"
+
+#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
+
+static const uint32_t ipu_plane_formats[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
+int ipu_plane_irq(struct ipu_plane *ipu_plane)
+{
+ return ipu_idmac_channel_irq(ipu_plane->ipu, ipu_plane->ipu_ch,
+ IPU_IRQ_EOF);
+}
+
+static int calc_vref(struct drm_display_mode *mode)
+{
+ unsigned long htotal, vtotal;
+
+ htotal = mode->htotal;
+ vtotal = mode->vtotal;
+
+ if (!htotal || !vtotal)
+ return 60;
+
+ return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
+}
+
+static inline int calc_bandwidth(int width, int height, unsigned int vref)
+{
+ return width * height * vref;
+}
+
+int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct ipu_ch_param __iomem *cpmem;
+ struct drm_gem_cma_object *cma_obj;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_LOG_KMS("entry is null.\n");
+ return -EFAULT;
+ }
+
+ dev_dbg(ipu_plane->base.dev->dev, "phys = 0x%x, x = %d, y = %d",
+ cma_obj->paddr, x, y);
+
+ cpmem = ipu_get_cpmem(ipu_plane->ipu_ch);
+ ipu_cpmem_set_stride(cpmem, fb->pitches[0]);
+ ipu_cpmem_set_buffer(cpmem, 0, cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * y + x);
+
+ return 0;
+}
+
+int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct ipu_ch_param __iomem *cpmem;
+ struct device *dev = ipu_plane->base.dev->dev;
+ int ret;
+
+ /* no scaling */
+ if (src_w != crtc_w || src_h != crtc_h)
+ return -EINVAL;
+
+ /* clip to crtc bounds */
+ if (crtc_x < 0) {
+ if (-crtc_x > crtc_w)
+ return -EINVAL;
+ src_x += -crtc_x;
+ src_w -= -crtc_x;
+ crtc_w -= -crtc_x;
+ crtc_x = 0;
+ }
+ if (crtc_y < 0) {
+ if (-crtc_y > crtc_h)
+ return -EINVAL;
+ src_y += -crtc_y;
+ src_h -= -crtc_y;
+ crtc_h -= -crtc_y;
+ crtc_y = 0;
+ }
+ if (crtc_x + crtc_w > mode->hdisplay) {
+ if (crtc_x > mode->hdisplay)
+ return -EINVAL;
+ crtc_w = mode->hdisplay - crtc_x;
+ src_w = crtc_w;
+ }
+ if (crtc_y + crtc_h > mode->vdisplay) {
+ if (crtc_y > mode->vdisplay)
+ return -EINVAL;
+ crtc_h = mode->vdisplay - crtc_y;
+ src_h = crtc_h;
+ }
+ /* full plane minimum width is 13 pixels */
+ if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
+ return -EINVAL;
+ if (crtc_h < 2)
+ return -EINVAL;
+
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ ret = ipu_dp_setup_channel(ipu_plane->dp,
+ IPUV3_COLORSPACE_RGB,
+ IPUV3_COLORSPACE_RGB);
+ if (ret) {
+ dev_err(dev,
+ "initializing display processor failed with %d\n",
+ ret);
+ return ret;
+ }
+ ipu_dp_set_global_alpha(ipu_plane->dp, 1, 0, 1);
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ ipu_dp_setup_channel(ipu_plane->dp,
+ ipu_drm_fourcc_to_colorspace(fb->pixel_format),
+ IPUV3_COLORSPACE_UNKNOWN);
+ ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
+ break;
+ }
+
+ ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
+ if (ret) {
+ dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
+ calc_bandwidth(crtc_w, crtc_h,
+ calc_vref(mode)), 64);
+ if (ret) {
+ dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
+ return ret;
+ }
+
+ cpmem = ipu_get_cpmem(ipu_plane->ipu_ch);
+ ipu_ch_param_zero(cpmem);
+ ipu_cpmem_set_resolution(cpmem, src_w, src_h);
+ ret = ipu_cpmem_set_fmt(cpmem, fb->pixel_format);
+ if (ret < 0) {
+ dev_err(dev, "unsupported pixel format 0x%08x\n",
+ fb->pixel_format);
+ return ret;
+ }
+ ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+
+ ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
+{
+ if (!IS_ERR_OR_NULL(ipu_plane->dp))
+ ipu_dp_put(ipu_plane->dp);
+ if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
+ ipu_dmfc_put(ipu_plane->dmfc);
+ if (!IS_ERR_OR_NULL(ipu_plane->ipu_ch))
+ ipu_idmac_put(ipu_plane->ipu_ch);
+}
+
+int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
+{
+ int ret;
+
+ ipu_plane->ipu_ch = ipu_idmac_get(ipu_plane->ipu, ipu_plane->dma);
+ if (IS_ERR(ipu_plane->ipu_ch)) {
+ ret = PTR_ERR(ipu_plane->ipu_ch);
+ DRM_ERROR("failed to get idmac channel: %d\n", ret);
+ return ret;
+ }
+
+ ipu_plane->dmfc = ipu_dmfc_get(ipu_plane->ipu, ipu_plane->dma);
+ if (IS_ERR(ipu_plane->dmfc)) {
+ ret = PTR_ERR(ipu_plane->dmfc);
+ DRM_ERROR("failed to get dmfc: ret %d\n", ret);
+ goto err_out;
+ }
+
+ if (ipu_plane->dp_flow >= 0) {
+ ipu_plane->dp = ipu_dp_get(ipu_plane->ipu, ipu_plane->dp_flow);
+ if (IS_ERR(ipu_plane->dp)) {
+ ret = PTR_ERR(ipu_plane->dp);
+ DRM_ERROR("failed to get dp flow: %d\n", ret);
+ goto err_out;
+ }
+ }
+
+ return 0;
+err_out:
+ ipu_plane_put_resources(ipu_plane);
+
+ return ret;
+}
+
+void ipu_plane_enable(struct ipu_plane *ipu_plane)
+{
+ ipu_dmfc_enable_channel(ipu_plane->dmfc);
+ ipu_idmac_enable_channel(ipu_plane->ipu_ch);
+ if (ipu_plane->dp)
+ ipu_dp_enable_channel(ipu_plane->dp);
+
+ ipu_plane->enabled = true;
+}
+
+void ipu_plane_disable(struct ipu_plane *ipu_plane)
+{
+ ipu_plane->enabled = false;
+
+ ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+
+ if (ipu_plane->dp)
+ ipu_dp_disable_channel(ipu_plane->dp);
+ ipu_idmac_disable_channel(ipu_plane->ipu_ch);
+ ipu_dmfc_disable_channel(ipu_plane->dmfc);
+}
+
+static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
+{
+ bool enable;
+
+ DRM_DEBUG_KMS("mode = %d", mode);
+
+ enable = (mode == DRM_MODE_DPMS_ON);
+
+ if (enable == ipu_plane->enabled)
+ return;
+
+ if (enable) {
+ ipu_plane_enable(ipu_plane);
+ } else {
+ ipu_plane_disable(ipu_plane);
+
+ ipu_idmac_put(ipu_plane->ipu_ch);
+ ipu_dmfc_put(ipu_plane->dmfc);
+ ipu_dp_put(ipu_plane->dp);
+ }
+}
+
+/*
+ * drm_plane API
+ */
+
+static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ int ret = 0;
+
+ DRM_DEBUG_KMS("plane - %p\n", plane);
+
+ if (!ipu_plane->enabled)
+ ret = ipu_plane_get_resources(ipu_plane);
+ if (ret < 0)
+ return ret;
+
+ ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16);
+ if (ret < 0) {
+ ipu_plane_put_resources(ipu_plane);
+ return ret;
+ }
+
+ if (crtc != plane->crtc)
+ dev_info(plane->dev->dev, "crtc change: %p -> %p\n",
+ plane->crtc, crtc);
+ plane->crtc = crtc;
+
+ ipu_plane_dpms(ipu_plane, DRM_MODE_DPMS_ON);
+
+ return 0;
+}
+
+static int ipu_disable_plane(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ ipu_plane_dpms(ipu_plane, DRM_MODE_DPMS_OFF);
+
+ ipu_plane_put_resources(ipu_plane);
+
+ return 0;
+}
+
+static void ipu_plane_destroy(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ ipu_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(ipu_plane);
+}
+
+static struct drm_plane_funcs ipu_plane_funcs = {
+ .update_plane = ipu_update_plane,
+ .disable_plane = ipu_disable_plane,
+ .destroy = ipu_plane_destroy,
+};
+
+struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
+ int dma, int dp, unsigned int possible_crtcs,
+ bool priv)
+{
+ struct ipu_plane *ipu_plane;
+ int ret;
+
+ DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
+ dma, dp, possible_crtcs);
+
+ ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL);
+ if (!ipu_plane) {
+ DRM_ERROR("failed to allocate plane\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ipu_plane->ipu = ipu;
+ ipu_plane->dma = dma;
+ ipu_plane->dp_flow = dp;
+
+ ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs,
+ &ipu_plane_funcs, ipu_plane_formats,
+ ARRAY_SIZE(ipu_plane_formats),
+ priv);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ kfree(ipu_plane);
+ return ERR_PTR(ret);
+ }
+
+ return ipu_plane;
+}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.h b/drivers/staging/imx-drm/ipuv3-plane.h
new file mode 100644
index 00000000000..c0aae5bcb5d
--- /dev/null
+++ b/drivers/staging/imx-drm/ipuv3-plane.h
@@ -0,0 +1,55 @@
+#ifndef __IPUV3_PLANE_H__
+#define __IPUV3_PLANE_H__
+
+#include <drm/drm_crtc.h> /* drm_plane */
+
+struct drm_plane;
+struct drm_device;
+struct ipu_soc;
+struct drm_crtc;
+struct drm_framebuffer;
+
+struct ipuv3_channel;
+struct dmfc_channel;
+struct ipu_dp;
+
+struct ipu_plane {
+ struct drm_plane base;
+
+ struct ipu_soc *ipu;
+ struct ipuv3_channel *ipu_ch;
+ struct dmfc_channel *dmfc;
+ struct ipu_dp *dp;
+
+ int dma;
+ int dp_flow;
+
+ int x;
+ int y;
+
+ bool enabled;
+};
+
+struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
+ int dma, int dp, unsigned int possible_crtcs,
+ bool priv);
+
+/* Init IDMAC, DMFC, DP */
+int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w,
+ uint32_t src_h);
+
+void ipu_plane_enable(struct ipu_plane *plane);
+void ipu_plane_disable(struct ipu_plane *plane);
+int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
+ int x, int y);
+
+int ipu_plane_get_resources(struct ipu_plane *plane);
+void ipu_plane_put_resources(struct ipu_plane *plane);
+
+int ipu_plane_irq(struct ipu_plane *plane);
+
+#endif
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index ddd2e7390b4..a84ee630336 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -604,9 +604,7 @@ static int eucr_probe(struct usb_interface *intf,
if (!(MiscReg03 & 0x02)) {
result = -ENODEV;
quiesce_and_remove_host(us);
- pr_info("keucr: The driver only supports SM/MS card. "
- "To use SD card, "
- "please build driver/usb/storage/ums-eneub6250.ko\n");
+ pr_info("keucr: The driver only supports SM/MS card. To use SD card, please build driver/usb/storage/ums-eneub6250.ko\n");
goto BadDevice;
}
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 471c10c116e..cc5d62d2b01 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -205,7 +205,7 @@ static int line6_send_raw_message_async_part(struct message *msg,
__func__, retval);
usb_free_urb(urb);
kfree(msg);
- return -EINVAL;
+ return retval;
}
return 0;
@@ -340,7 +340,7 @@ static void line6_data_received(struct urb *urb)
line6->message_length = done;
line6_midi_receive(line6, line6->buffer_message, done);
- switch (line6->usbdev->descriptor.idProduct) {
+ switch (le16_to_cpu(line6->usbdev->descriptor.idProduct)) {
case LINE6_DEVID_BASSPODXT:
case LINE6_DEVID_BASSPODXTLIVE:
case LINE6_DEVID_BASSPODXTPRO:
@@ -1010,7 +1010,7 @@ static void line6_disconnect(struct usb_interface *interface)
dev_err(line6->ifcdev,
"driver bug: inconsistent usb device\n");
- switch (line6->usbdev->descriptor.idProduct) {
+ switch (le16_to_cpu(line6->usbdev->descriptor.idProduct)) {
case LINE6_DEVID_BASSPODXT:
case LINE6_DEVID_BASSPODXTLIVE:
case LINE6_DEVID_BASSPODXTPRO:
@@ -1114,7 +1114,7 @@ static int line6_reset_resume(struct usb_interface *interface)
{
struct usb_line6 *line6 = usb_get_intfdata(interface);
- switch (line6->usbdev->descriptor.idProduct) {
+ switch (le16_to_cpu(line6->usbdev->descriptor.idProduct)) {
case LINE6_DEVID_PODSTUDIO_GX:
case LINE6_DEVID_PODSTUDIO_UX1:
case LINE6_DEVID_PODSTUDIO_UX2:
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index e3f9a53dbd9..3f6d78c585f 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -144,7 +144,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
if (retval < 0) {
dev_err(line6->ifcdev, "usb_submit_urb failed\n");
usb_free_urb(urb);
- return -EINVAL;
+ return retval;
}
++line6->line6midi->num_active_send_urbs;
@@ -205,7 +205,7 @@ static void line6_midi_input_trigger(struct snd_rawmidi_substream *substream,
if (up)
line6->line6midi->substream_receive = substream;
else
- line6->line6midi->substream_receive = 0;
+ line6->line6midi->substream_receive = NULL;
}
static struct snd_rawmidi_ops line6_midi_output_ops = {
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index f9135c7cb19..41869caf19a 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -242,13 +242,14 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (line6pcm->flags & LINE6_BITS_PCM_IMPULSE) {
create_impulse_test_signal(line6pcm, urb_out,
bytes_per_frame);
- if (line6pcm->flags & LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
+ if (line6pcm->flags &
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
line6_capture_copy(line6pcm,
urb_out->transfer_buffer,
urb_out->
transfer_buffer_length);
line6_capture_check_period(line6pcm,
- urb_out->transfer_buffer_length);
+ urb_out->transfer_buffer_length);
}
} else {
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 776d3632dc7..af2e7e50c13 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -307,6 +307,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
int ticks;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
+ u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
/* sync time on device with host: */
ticks = (int)get_seconds();
@@ -316,7 +317,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
toneport_send_cmd(usbdev, 0x0301, 0x0000);
/* initialize source select: */
- switch (usbdev->descriptor.idProduct) {
+ switch (le16_to_cpu(usbdev->descriptor.idProduct)) {
case LINE6_DEVID_TONEPORT_UX1:
case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
@@ -326,7 +327,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
0x0000);
}
- if (toneport_has_led(usbdev->descriptor.idProduct))
+ if (toneport_has_led(idProduct))
toneport_update_led(&usbdev->dev);
}
@@ -339,6 +340,7 @@ static int toneport_try_init(struct usb_interface *interface,
int err;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
+ u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
if ((interface == NULL) || (toneport == NULL))
return -ENODEV;
@@ -361,7 +363,7 @@ static int toneport_try_init(struct usb_interface *interface,
return err;
/* register source select control: */
- switch (usbdev->descriptor.idProduct) {
+ switch (le16_to_cpu(usbdev->descriptor.idProduct)) {
case LINE6_DEVID_TONEPORT_UX1:
case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
@@ -382,7 +384,7 @@ static int toneport_try_init(struct usb_interface *interface,
line6_read_serial_number(line6, &toneport->serial_number);
line6_read_data(line6, 0x80c2, &toneport->firmware_version, 1);
- if (toneport_has_led(usbdev->descriptor.idProduct)) {
+ if (toneport_has_led(idProduct)) {
CHECK_RETURN(device_create_file
(&interface->dev, &dev_attr_led_red));
CHECK_RETURN(device_create_file
@@ -428,14 +430,16 @@ void line6_toneport_reset_resume(struct usb_line6_toneport *toneport)
void line6_toneport_disconnect(struct usb_interface *interface)
{
struct usb_line6_toneport *toneport;
+ u16 idProduct;
if (interface == NULL)
return;
toneport = usb_get_intfdata(interface);
del_timer_sync(&toneport->timer);
+ idProduct = le16_to_cpu(toneport->line6.usbdev->descriptor.idProduct);
- if (toneport_has_led(toneport->line6.usbdev->descriptor.idProduct)) {
+ if (toneport_has_led(idProduct)) {
device_remove_file(&interface->dev, &dev_attr_led_red);
device_remove_file(&interface->dev, &dev_attr_led_green);
}
diff --git a/drivers/staging/lustre/include/linux/libcfs/bitmap.h b/drivers/staging/lustre/include/linux/libcfs/bitmap.h
index f3d4a896a75..8b137891791 100644
--- a/drivers/staging/lustre/include/linux/libcfs/bitmap.h
+++ b/drivers/staging/lustre/include/linux/libcfs/bitmap.h
@@ -1,111 +1 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#ifndef _LIBCFS_BITMAP_H_
-#define _LIBCFS_BITMAP_H_
-
-typedef struct {
- int size;
- unsigned long data[0];
-} cfs_bitmap_t;
-
-#define CFS_BITMAP_SIZE(nbits) \
- (((nbits/BITS_PER_LONG)+1)*sizeof(long)+sizeof(cfs_bitmap_t))
-
-static inline
-cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
-{
- cfs_bitmap_t *ptr;
-
- OBD_ALLOC(ptr, CFS_BITMAP_SIZE(size));
- if (ptr == NULL)
- return ptr;
-
- ptr->size = size;
-
- return ptr;
-}
-
-#define CFS_FREE_BITMAP(ptr) OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
-
-static inline
-void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
-{
- set_bit(nbit, bitmap->data);
-}
-
-static inline
-void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
-{
- test_and_clear_bit(nbit, bitmap->data);
-}
-
-static inline
-int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
-{
- return test_bit(nbit, bitmap->data);
-}
-
-static inline
-int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
-{
- return test_and_clear_bit(nbit, bitmap->data);
-}
-
-/* return 0 is bitmap has none set bits */
-static inline
-int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
-{
- return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
-}
-
-static inline
-void cfs_bitmap_copy(cfs_bitmap_t *new, cfs_bitmap_t *old)
-{
- int newsize;
-
- LASSERT(new->size >= old->size);
- newsize = new->size;
- memcpy(new, old, CFS_BITMAP_SIZE(old->size));
- new->size = newsize;
-}
-
-#define cfs_foreach_bit(bitmap, pos) \
- for ((pos) = find_first_bit((bitmap)->data, bitmap->size); \
- (pos) < (bitmap)->size; \
- (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos) + 1))
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index e6439d19f3e..40282b70bd1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -165,11 +165,11 @@ struct ptldebug_header {
#define CDEBUG_DEFAULT_MAX_DELAY (cfs_time_seconds(600)) /* jiffies */
#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_BACKOFF 2
-typedef struct {
+struct cfs_debug_limit_state {
cfs_time_t cdls_next;
unsigned int cdls_delay;
int cdls_count;
-} cfs_debug_limit_state_t;
+};
struct libcfs_debug_msg_data {
const char *msg_file;
@@ -177,7 +177,7 @@ struct libcfs_debug_msg_data {
int msg_subsys;
int msg_line;
int msg_mask;
- cfs_debug_limit_state_t *msg_cdls;
+ struct cfs_debug_limit_state *msg_cdls;
};
#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
@@ -226,7 +226,7 @@ do { \
#define CDEBUG_LIMIT(mask, format, ...) \
do { \
- static cfs_debug_limit_state_t cdls; \
+ static struct cfs_debug_limit_state cdls; \
\
__CDEBUG(&cdls, mask, format, ## __VA_ARGS__);\
} while (0)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 98f5be243c8..9d5ee1a69c0 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -81,10 +81,10 @@ struct cfs_hash_ops;
struct cfs_hash_lock_ops;
struct cfs_hash_hlist_ops;
-typedef union {
+union cfs_hash_lock {
rwlock_t rw; /**< rwlock */
spinlock_t spin; /**< spinlock */
-} cfs_hash_lock_t;
+};
/**
* cfs_hash_bucket is a container of:
@@ -97,22 +97,22 @@ typedef union {
* which depends on requirement of user
* - some extra bytes (caller can require it while creating hash)
*/
-typedef struct cfs_hash_bucket {
- cfs_hash_lock_t hsb_lock; /**< bucket lock */
+struct cfs_hash_bucket {
+ union cfs_hash_lock hsb_lock; /**< bucket lock */
__u32 hsb_count; /**< current entries */
__u32 hsb_version; /**< change version */
unsigned int hsb_index; /**< index of bucket */
int hsb_depmax; /**< max depth on bucket */
long hsb_head[0]; /**< hash-head array */
-} cfs_hash_bucket_t;
+};
/**
* cfs_hash bucket descriptor, it's normally in stack of caller
*/
-typedef struct cfs_hash_bd {
- cfs_hash_bucket_t *bd_bucket; /**< address of bucket */
+struct cfs_hash_bd {
+ struct cfs_hash_bucket *bd_bucket; /**< address of bucket */
unsigned int bd_offset; /**< offset in bucket */
-} cfs_hash_bd_t;
+};
#define CFS_HASH_NAME_LEN 16 /**< default name length */
#define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */
@@ -210,10 +210,10 @@ enum cfs_hash_tag {
* locations; additions must take care to only insert into the new bucket.
*/
-typedef struct cfs_hash {
+struct cfs_hash {
/** serialize with rehash, or serialize all operations if
* the hash-table has CFS_HASH_NO_BKTLOCK */
- cfs_hash_lock_t hs_lock;
+ union cfs_hash_lock hs_lock;
/** hash operations */
struct cfs_hash_ops *hs_ops;
/** hash lock operations */
@@ -221,7 +221,7 @@ typedef struct cfs_hash {
/** hash list operations */
struct cfs_hash_hlist_ops *hs_hops;
/** hash buckets-table */
- cfs_hash_bucket_t **hs_buckets;
+ struct cfs_hash_bucket **hs_buckets;
/** total number of items on this hash-table */
atomic_t hs_count;
/** hash flags, see cfs_hash_tag for detail */
@@ -255,7 +255,7 @@ typedef struct cfs_hash {
/** refcount on this hash table */
atomic_t hs_refcount;
/** rehash buckets-table */
- cfs_hash_bucket_t **hs_rehash_buckets;
+ struct cfs_hash_bucket **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** serialize debug members */
spinlock_t hs_dep_lock;
@@ -272,35 +272,35 @@ typedef struct cfs_hash {
#endif
/** name of htable */
char hs_name[0];
-} cfs_hash_t;
+};
typedef struct cfs_hash_lock_ops {
/** lock the hash table */
- void (*hs_lock)(cfs_hash_lock_t *lock, int exclusive);
+ void (*hs_lock)(union cfs_hash_lock *lock, int exclusive);
/** unlock the hash table */
- void (*hs_unlock)(cfs_hash_lock_t *lock, int exclusive);
+ void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive);
/** lock the hash bucket */
- void (*hs_bkt_lock)(cfs_hash_lock_t *lock, int exclusive);
+ void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive);
/** unlock the hash bucket */
- void (*hs_bkt_unlock)(cfs_hash_lock_t *lock, int exclusive);
+ void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
} cfs_hash_lock_ops_t;
typedef struct cfs_hash_hlist_ops {
/** return hlist_head of hash-head of @bd */
- struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd);
+ struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd);
/** return hash-head size */
- int (*hop_hhead_size)(cfs_hash_t *hs);
+ int (*hop_hhead_size)(struct cfs_hash *hs);
/** add @hnode to hash-head of @bd */
- int (*hop_hnode_add)(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, struct hlist_node *hnode);
+ int (*hop_hnode_add)(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, struct hlist_node *hnode);
/** remove @hnode from hash-head of @bd */
- int (*hop_hnode_del)(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, struct hlist_node *hnode);
+ int (*hop_hnode_del)(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, struct hlist_node *hnode);
} cfs_hash_hlist_ops_t;
typedef struct cfs_hash_ops {
/** return hashed value from @key */
- unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
+ unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
/** return key address of @hnode */
void * (*hs_key)(struct hlist_node *hnode);
/** copy key from @hnode to @key */
@@ -313,13 +313,13 @@ typedef struct cfs_hash_ops {
/** return object address of @hnode, i.e: container_of(...hnode) */
void * (*hs_object)(struct hlist_node *hnode);
/** get refcount of item, always called with holding bucket-lock */
- void (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
+ void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
/** release refcount of item */
- void (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
+ void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
/** release refcount of item, always called with holding bucket-lock */
- void (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
+ void (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode);
/** it's called before removing of @hnode */
- void (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
+ void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
} cfs_hash_ops_t;
/** total number of buckets in @hs */
@@ -340,41 +340,41 @@ typedef struct cfs_hash_ops {
#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
static inline int
-cfs_hash_with_no_lock(cfs_hash_t *hs)
+cfs_hash_with_no_lock(struct cfs_hash *hs)
{
/* caller will serialize all operations for this hash-table */
return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
}
static inline int
-cfs_hash_with_no_bktlock(cfs_hash_t *hs)
+cfs_hash_with_no_bktlock(struct cfs_hash *hs)
{
/* no bucket lock, one single lock to protect the hash-table */
return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
}
static inline int
-cfs_hash_with_rw_bktlock(cfs_hash_t *hs)
+cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
{
/* rwlock to protect hash bucket */
return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
}
static inline int
-cfs_hash_with_spin_bktlock(cfs_hash_t *hs)
+cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
{
/* spinlock to protect hash bucket */
return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
}
static inline int
-cfs_hash_with_add_tail(cfs_hash_t *hs)
+cfs_hash_with_add_tail(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
}
static inline int
-cfs_hash_with_no_itemref(cfs_hash_t *hs)
+cfs_hash_with_no_itemref(struct cfs_hash *hs)
{
/* hash-table doesn't keep refcount on item,
* item can't be removed from hash unless it's
@@ -383,75 +383,75 @@ cfs_hash_with_no_itemref(cfs_hash_t *hs)
}
static inline int
-cfs_hash_with_bigname(cfs_hash_t *hs)
+cfs_hash_with_bigname(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
}
static inline int
-cfs_hash_with_counter(cfs_hash_t *hs)
+cfs_hash_with_counter(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
}
static inline int
-cfs_hash_with_rehash(cfs_hash_t *hs)
+cfs_hash_with_rehash(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_REHASH) != 0;
}
static inline int
-cfs_hash_with_rehash_key(cfs_hash_t *hs)
+cfs_hash_with_rehash_key(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
}
static inline int
-cfs_hash_with_shrink(cfs_hash_t *hs)
+cfs_hash_with_shrink(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
}
static inline int
-cfs_hash_with_assert_empty(cfs_hash_t *hs)
+cfs_hash_with_assert_empty(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
}
static inline int
-cfs_hash_with_depth(cfs_hash_t *hs)
+cfs_hash_with_depth(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
}
static inline int
-cfs_hash_with_nblk_change(cfs_hash_t *hs)
+cfs_hash_with_nblk_change(struct cfs_hash *hs)
{
return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
}
static inline int
-cfs_hash_is_exiting(cfs_hash_t *hs)
+cfs_hash_is_exiting(struct cfs_hash *hs)
{ /* cfs_hash_destroy is called */
return hs->hs_exiting;
}
static inline int
-cfs_hash_is_rehashing(cfs_hash_t *hs)
+cfs_hash_is_rehashing(struct cfs_hash *hs)
{ /* rehash is launched */
return hs->hs_rehash_bits != 0;
}
static inline int
-cfs_hash_is_iterating(cfs_hash_t *hs)
+cfs_hash_is_iterating(struct cfs_hash *hs)
{ /* someone is calling cfs_hash_for_each_* */
return hs->hs_iterating || hs->hs_iterators != 0;
}
static inline int
-cfs_hash_bkt_size(cfs_hash_t *hs)
+cfs_hash_bkt_size(struct cfs_hash *hs)
{
- return offsetof(cfs_hash_bucket_t, hsb_head[0]) +
+ return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
hs->hs_extra_bytes;
}
@@ -459,19 +459,19 @@ cfs_hash_bkt_size(cfs_hash_t *hs)
#define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op
static inline unsigned
-cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
{
return CFS_HOP(hs, hash)(hs, key, mask);
}
static inline void *
-cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
{
return CFS_HOP(hs, key)(hnode);
}
static inline void
-cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
+cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
{
if (CFS_HOP(hs, keycpy) != NULL)
CFS_HOP(hs, keycpy)(hnode, key);
@@ -481,25 +481,25 @@ cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
* Returns 1 on a match,
*/
static inline int
-cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
return CFS_HOP(hs, keycmp)(key, hnode);
}
static inline void *
-cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
{
return CFS_HOP(hs, object)(hnode);
}
static inline void
-cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
return CFS_HOP(hs, get)(hs, hnode);
}
static inline void
-cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
LASSERT(CFS_HOP(hs, put_locked) != NULL);
@@ -507,7 +507,7 @@ cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
}
static inline void
-cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
LASSERT(CFS_HOP(hs, put) != NULL);
@@ -515,37 +515,37 @@ cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
}
static inline void
-cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
if (CFS_HOP(hs, exit))
CFS_HOP(hs, exit)(hs, hnode);
}
-static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
+static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
{
hs->hs_lops->hs_lock(&hs->hs_lock, excl);
}
-static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
+static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
{
hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
}
-static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
+static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
atomic_t *condition)
{
LASSERT(cfs_hash_with_no_bktlock(hs));
return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
}
-static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, int excl)
+static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, int excl)
{
hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
}
-static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, int excl)
+static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, int excl)
{
hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
}
@@ -554,56 +554,56 @@ static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
* operations on cfs_hash bucket (bd: bucket descriptor),
* they are normally for hash-table without rehash
*/
-void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd);
+void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd);
-static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
- cfs_hash_bd_t *bd, int excl)
+static inline void cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
+ struct cfs_hash_bd *bd, int excl)
{
cfs_hash_bd_get(hs, key, bd);
cfs_hash_bd_lock(hs, bd, excl);
}
-static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+static inline unsigned cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
}
-static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
- unsigned index, cfs_hash_bd_t *bd)
+static inline void cfs_hash_bd_index_set(struct cfs_hash *hs,
+ unsigned index, struct cfs_hash_bd *bd)
{
bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
}
static inline void *
-cfs_hash_bd_extra_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
return (void *)bd->bd_bucket +
cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
}
static inline __u32
-cfs_hash_bd_version_get(cfs_hash_bd_t *bd)
+cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
{
/* need hold cfs_hash_bd_lock */
return bd->bd_bucket->hsb_version;
}
static inline __u32
-cfs_hash_bd_count_get(cfs_hash_bd_t *bd)
+cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
{
/* need hold cfs_hash_bd_lock */
return bd->bd_bucket->hsb_count;
}
static inline int
-cfs_hash_bd_depmax_get(cfs_hash_bd_t *bd)
+cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd)
{
return bd->bd_bucket->hsb_depmax;
}
static inline int
-cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
{
if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
@@ -614,14 +614,14 @@ cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
return 0;
}
-void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode);
-void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode);
-void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
- cfs_hash_bd_t *bd_new, struct hlist_node *hnode);
+void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
+ struct cfs_hash_bd *bd_new, struct hlist_node *hnode);
-static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static inline int cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
atomic_t *condition)
{
LASSERT(cfs_hash_with_spin_bktlock(hs));
@@ -629,109 +629,109 @@ static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
&bd->bd_bucket->hsb_lock.spin);
}
-static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
- cfs_hash_bd_t *bd)
+static inline struct hlist_head *cfs_hash_bd_hhead(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd)
{
return hs->hs_hops->hop_hhead(hs, bd);
}
-struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key);
-struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key);
-struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key,
+struct hlist_node *cfs_hash_bd_lookup_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, const void *key);
+struct hlist_node *cfs_hash_bd_peek_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, const void *key);
+struct hlist_node *cfs_hash_bd_findadd_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, const void *key,
struct hlist_node *hnode,
int insist_add);
-struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key,
+struct hlist_node *cfs_hash_bd_finddel_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd, const void *key,
struct hlist_node *hnode);
/**
* operations on cfs_hash bucket (bd: bucket descriptor),
* they are safe for hash-table with rehash
*/
-void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds);
-void cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
-void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
+void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds);
+void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
+void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
-static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
- cfs_hash_bd_t *bds, int excl)
+static inline void cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
+ struct cfs_hash_bd *bds, int excl)
{
cfs_hash_dual_bd_get(hs, key, bds);
cfs_hash_dual_bd_lock(hs, bds, excl);
}
-struct hlist_node *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds,
+struct hlist_node *cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bds,
const void *key);
-struct hlist_node *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds,
+struct hlist_node *cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bds,
const void *key,
struct hlist_node *hnode,
int insist_add);
-struct hlist_node *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds,
+struct hlist_node *cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bds,
const void *key,
struct hlist_node *hnode);
/* Hash init/cleanup functions */
-cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
+struct cfs_hash *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
unsigned bkt_bits, unsigned extra_bytes,
unsigned min_theta, unsigned max_theta,
cfs_hash_ops_t *ops, unsigned flags);
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
-void cfs_hash_putref(cfs_hash_t *hs);
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
+void cfs_hash_putref(struct cfs_hash *hs);
/* Hash addition functions */
-void cfs_hash_add(cfs_hash_t *hs, const void *key,
+void cfs_hash_add(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode);
-int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
+int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode);
-void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode);
/* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
-void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
+void *cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode);
+void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
/* Hash lookup/for_each functions */
#define CFS_HASH_LOOP_HOG 1024
-typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *node, void *data);
-void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
-void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_for_each_nolock(cfs_hash_t *hs,
+void *cfs_hash_lookup(struct cfs_hash *hs, const void *key);
+void cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+int cfs_hash_for_each_nolock(struct cfs_hash *hs,
cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_for_each_empty(cfs_hash_t *hs,
+int cfs_hash_for_each_empty(struct cfs_hash *hs,
cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
cfs_hash_for_each_cb_t, void *data);
typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
-void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
+void cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
-void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+void cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_is_empty(cfs_hash_t *hs);
-__u64 cfs_hash_size_get(cfs_hash_t *hs);
+int cfs_hash_is_empty(struct cfs_hash *hs);
+__u64 cfs_hash_size_get(struct cfs_hash *hs);
/*
* Rehash - Theta is calculated to be the average chained
* hash depth assuming a perfectly uniform hash function.
*/
-void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
-void cfs_hash_rehash_cancel(cfs_hash_t *hs);
-int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
+void cfs_hash_rehash_cancel(struct cfs_hash *hs);
+int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
void *new_key, struct hlist_node *hnode);
#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
/* Validate hnode references the correct key */
static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode)
{
LASSERT(cfs_hash_keycmp(hs, key, hnode));
@@ -739,10 +739,10 @@ cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
/* Validate hnode is in the correct bucket */
static inline void
-cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
- cfs_hash_bd_t bds[2];
+ struct cfs_hash_bd bds[2];
cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
@@ -752,11 +752,11 @@ cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode) {}
static inline void
-cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode) {}
#endif /* CFS_HASH_DEBUG_LEVEL */
@@ -778,13 +778,13 @@ static inline int __cfs_hash_theta_frac(int theta)
(__cfs_hash_theta_int(theta) * 1000);
}
-static inline int __cfs_hash_theta(cfs_hash_t *hs)
+static inline int __cfs_hash_theta(struct cfs_hash *hs)
{
return (atomic_read(&hs->hs_count) <<
CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
}
-static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
+static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
{
LASSERT(min < max);
hs->hs_min_theta = (__u16)min;
@@ -794,7 +794,7 @@ static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
/* Generic debug formatting routines mainly for proc handler */
struct seq_file;
int cfs_hash_debug_header(struct seq_file *m);
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m);
+int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
/*
* Generic djb2 hash algorithm for character arrays.
@@ -830,7 +830,7 @@ cfs_hash_u64_hash(const __u64 key, unsigned mask)
return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
}
-/** iterate over all buckets in @bds (array of cfs_hash_bd_t) */
+/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
#define cfs_hash_for_each_bd(bds, n, i) \
for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 59bff0bea81..bf301048c7a 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -79,20 +79,20 @@ extern lnet_t the_lnet; /* THE network */
/** exclusive lock */
#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
-static inline int lnet_is_wire_handle_none (lnet_handle_wire_t *wh)
+static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh)
{
return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
}
-static inline int lnet_md_exhausted (lnet_libmd_t *md)
+static inline int lnet_md_exhausted(lnet_libmd_t *md)
{
return (md->md_threshold == 0 ||
((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
md->md_offset + md->md_max_size > md->md_length));
}
-static inline int lnet_md_unlinkable (lnet_libmd_t *md)
+static inline int lnet_md_unlinkable(lnet_libmd_t *md)
{
/* Should unlink md when its refcount is 0 and either:
* - md has been flagged for deletion (by auto unlink or LNetM[DE]Unlink,
@@ -193,31 +193,31 @@ int lnet_freelist_init(lnet_freelist_t *fl, int n, int size);
void lnet_freelist_fini(lnet_freelist_t *fl);
static inline void *
-lnet_freelist_alloc (lnet_freelist_t *fl)
+lnet_freelist_alloc(lnet_freelist_t *fl)
{
/* ALWAYS called with liblock held */
lnet_freeobj_t *o;
- if (list_empty (&fl->fl_list))
- return (NULL);
+ if (list_empty(&fl->fl_list))
+ return NULL;
- o = list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
- list_del (&o->fo_list);
- return ((void *)&o->fo_contents);
+ o = list_entry(fl->fl_list.next, lnet_freeobj_t, fo_list);
+ list_del(&o->fo_list);
+ return (void *)&o->fo_contents;
}
static inline void
-lnet_freelist_free (lnet_freelist_t *fl, void *obj)
+lnet_freelist_free(lnet_freelist_t *fl, void *obj)
{
/* ALWAYS called with liblock held */
- lnet_freeobj_t *o = list_entry (obj, lnet_freeobj_t, fo_contents);
+ lnet_freeobj_t *o = list_entry(obj, lnet_freeobj_t, fo_contents);
- list_add (&o->fo_list, &fl->fl_list);
+ list_add(&o->fo_list, &fl->fl_list);
}
static inline lnet_eq_t *
-lnet_eq_alloc (void)
+lnet_eq_alloc(void)
{
/* NEVER called with resource lock held */
struct lnet_res_container *rec = &the_lnet.ln_eq_container;
@@ -251,7 +251,7 @@ lnet_eq_free(lnet_eq_t *eq)
}
static inline lnet_libmd_t *
-lnet_md_alloc (lnet_md_t *umd)
+lnet_md_alloc(lnet_md_t *umd)
{
/* NEVER called with resource lock held */
struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
@@ -322,7 +322,7 @@ lnet_me_free(lnet_me_t *me)
}
static inline lnet_msg_t *
-lnet_msg_alloc (void)
+lnet_msg_alloc(void)
{
/* NEVER called with network lock held */
struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
@@ -353,7 +353,7 @@ lnet_msg_free_locked(lnet_msg_t *msg)
}
static inline void
-lnet_msg_free (lnet_msg_t *msg)
+lnet_msg_free(lnet_msg_t *msg)
{
lnet_net_lock(0);
lnet_msg_free_locked(msg);
@@ -363,13 +363,13 @@ lnet_msg_free (lnet_msg_t *msg)
#else /* !LNET_USE_LIB_FREELIST */
static inline lnet_eq_t *
-lnet_eq_alloc (void)
+lnet_eq_alloc(void)
{
/* NEVER called with liblock held */
lnet_eq_t *eq;
LIBCFS_ALLOC(eq, sizeof(*eq));
- return (eq);
+ return eq;
}
static inline void
@@ -380,7 +380,7 @@ lnet_eq_free(lnet_eq_t *eq)
}
static inline lnet_libmd_t *
-lnet_md_alloc (lnet_md_t *umd)
+lnet_md_alloc(lnet_md_t *umd)
{
/* NEVER called with liblock held */
lnet_libmd_t *md;
@@ -405,7 +405,7 @@ lnet_md_alloc (lnet_md_t *umd)
INIT_LIST_HEAD(&md->md_list);
}
- return (md);
+ return md;
}
static inline void
@@ -423,13 +423,13 @@ lnet_md_free(lnet_libmd_t *md)
}
static inline lnet_me_t *
-lnet_me_alloc (void)
+lnet_me_alloc(void)
{
/* NEVER called with liblock held */
lnet_me_t *me;
LIBCFS_ALLOC(me, sizeof(*me));
- return (me);
+ return me;
}
static inline void
@@ -448,7 +448,7 @@ lnet_msg_alloc(void)
LIBCFS_ALLOC(msg, sizeof(*msg));
/* no need to zero, LIBCFS_ALLOC does for us */
- return (msg);
+ return msg;
}
static inline void
@@ -479,7 +479,7 @@ lnet_res_lh_invalidate(lnet_libhandle_t *lh)
}
static inline void
-lnet_eq2handle (lnet_handle_eq_t *handle, lnet_eq_t *eq)
+lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq)
{
if (eq == NULL) {
LNetInvalidateHandle(handle);
@@ -503,7 +503,7 @@ lnet_handle2eq(lnet_handle_eq_t *handle)
}
static inline void
-lnet_md2handle (lnet_handle_md_t *handle, lnet_libmd_t *md)
+lnet_md2handle(lnet_handle_md_t *handle, lnet_libmd_t *md)
{
handle->cookie = md->md_lh.lh_cookie;
}
@@ -544,7 +544,7 @@ lnet_wire_handle2md(lnet_handle_wire_t *wh)
}
static inline void
-lnet_me2handle (lnet_handle_me_t *handle, lnet_me_t *me)
+lnet_me2handle(lnet_handle_me_t *handle, lnet_me_t *me)
{
handle->cookie = me->me_lh.lh_cookie;
}
@@ -568,7 +568,7 @@ lnet_handle2me(lnet_handle_me_t *handle)
static inline void
lnet_peer_addref_locked(lnet_peer_t *lp)
{
- LASSERT (lp->lp_refcount > 0);
+ LASSERT(lp->lp_refcount > 0);
lp->lp_refcount++;
}
@@ -577,7 +577,7 @@ extern void lnet_destroy_peer_locked(lnet_peer_t *lp);
static inline void
lnet_peer_decref_locked(lnet_peer_t *lp)
{
- LASSERT (lp->lp_refcount > 0);
+ LASSERT(lp->lp_refcount > 0);
lp->lp_refcount--;
if (lp->lp_refcount == 0)
lnet_destroy_peer_locked(lp);
@@ -660,7 +660,7 @@ void lnet_proc_init(void);
void lnet_proc_fini(void);
int lnet_rtrpools_alloc(int im_a_router);
void lnet_rtrpools_free(void);
-lnet_remotenet_t *lnet_find_net_locked (__u32 net);
+lnet_remotenet_t *lnet_find_net_locked(__u32 net);
int lnet_islocalnid(lnet_nid_t nid);
int lnet_islocalnet(__u32 net);
@@ -733,11 +733,11 @@ int lnet_portals_create(void);
void lnet_portals_destroy(void);
/* message functions */
-int lnet_parse (lnet_ni_t *ni, lnet_hdr_t *hdr,
+int lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr,
lnet_nid_t fromnid, void *private, int rdma_req);
void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen);
-lnet_msg_t *lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *get_msg);
+lnet_msg_t *lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *get_msg);
void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len);
void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc);
void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
@@ -748,36 +748,36 @@ void lnet_msg_container_cleanup(struct lnet_msg_container *container);
void lnet_msg_containers_destroy(void);
int lnet_msg_containers_create(void);
-char *lnet_msgtyp2str (int type);
-void lnet_print_hdr (lnet_hdr_t * hdr);
+char *lnet_msgtyp2str(int type);
+void lnet_print_hdr(lnet_hdr_t *hdr);
int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
void lnet_counters_get(lnet_counters_t *counters);
void lnet_counters_reset(void);
-unsigned int lnet_iov_nob (unsigned int niov, struct iovec *iov);
-int lnet_extract_iov (int dst_niov, struct iovec *dst,
+unsigned int lnet_iov_nob(unsigned int niov, struct iovec *iov);
+int lnet_extract_iov(int dst_niov, struct iovec *dst,
int src_niov, struct iovec *src,
unsigned int offset, unsigned int len);
-unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov);
-int lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
+unsigned int lnet_kiov_nob(unsigned int niov, lnet_kiov_t *iov);
+int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
int src_niov, lnet_kiov_t *src,
unsigned int offset, unsigned int len);
-void lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov,
+void lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov,
unsigned int doffset,
unsigned int nsiov, struct iovec *siov,
unsigned int soffset, unsigned int nob);
-void lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov,
+void lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov,
unsigned int iovoffset,
unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int kiovoffset, unsigned int nob);
-void lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov,
+void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int kiovoffset,
unsigned int niov, struct iovec *iov,
unsigned int iovoffset, unsigned int nob);
-void lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov,
+void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
unsigned int doffset,
unsigned int nskiov, lnet_kiov_t *skiov,
unsigned int soffset, unsigned int nob);
@@ -829,7 +829,7 @@ void lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd);
void lnet_register_lnd(lnd_t *lnd);
void lnet_unregister_lnd(lnd_t *lnd);
-int lnet_set_ip_niaddr (lnet_ni_t *ni);
+int lnet_set_ip_niaddr(lnet_ni_t *ni);
int lnet_connect(socket_t **sockp, lnet_nid_t peer_nid,
__u32 local_ip, __u32 peer_ip, int peer_port);
@@ -858,9 +858,9 @@ void lnet_ping_target_fini(void);
int lnet_ping(lnet_process_id_t id, int timeout_ms,
lnet_process_id_t *ids, int n_ids);
-int lnet_parse_ip2nets (char **networksp, char *ip2nets);
-int lnet_parse_routes (char *route_str, int *im_a_router);
-int lnet_parse_networks (struct list_head *nilist, char *networks);
+int lnet_parse_ip2nets(char **networksp, char *ip2nets);
+int lnet_parse_routes(char *route_str, int *im_a_router);
+int lnet_parse_networks(struct list_head *nilist, char *networks);
int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 6825b452e5f..2ddc3aadb8d 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -1768,7 +1768,10 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
return (0);
- return (count == 0 ? -ENOENT : 0);
+ if (count == 0)
+ return -ENOENT;
+ else
+ return 0;
}
void
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index bb15bde0704..92c60a75664 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -53,6 +53,7 @@ lnet_acceptor_port(void)
{
return accept_port;
}
+EXPORT_SYMBOL(lnet_acceptor_port);
static inline int
lnet_accept_magic(__u32 magic, __u32 constant)
@@ -61,9 +62,6 @@ lnet_accept_magic(__u32 magic, __u32 constant)
magic == __swab32(constant));
}
-
-EXPORT_SYMBOL(lnet_acceptor_port);
-
static char *accept = "secure";
CFS_MODULE_PARM(accept, "s", charp, 0444,
@@ -75,7 +73,7 @@ CFS_MODULE_PARM(accept_backlog, "i", int, 0444,
CFS_MODULE_PARM(accept_timeout, "i", int, 0644,
"Acceptor's timeout (seconds)");
-static char *accept_type = NULL;
+static char *accept_type;
int
lnet_acceptor_get_tunables(void)
@@ -95,57 +93,45 @@ lnet_acceptor_timeout(void)
EXPORT_SYMBOL(lnet_acceptor_timeout);
void
-lnet_connect_console_error (int rc, lnet_nid_t peer_nid,
+lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
__u32 peer_ip, int peer_port)
{
switch (rc) {
/* "normal" errors */
case -ECONNREFUSED:
- CNETERR("Connection to %s at host %pI4h on port %d was "
- "refused: check that Lustre is running on that node.\n",
+ CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
libcfs_nid2str(peer_nid),
&peer_ip, peer_port);
break;
case -EHOSTUNREACH:
case -ENETUNREACH:
- CNETERR("Connection to %s at host %pI4h "
- "was unreachable: the network or that node may "
- "be down, or Lustre may be misconfigured.\n",
+ CNETERR("Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
libcfs_nid2str(peer_nid), &peer_ip);
break;
case -ETIMEDOUT:
- CNETERR("Connection to %s at host %pI4h on "
- "port %d took too long: that node may be hung "
- "or experiencing high load.\n",
+ CNETERR("Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
libcfs_nid2str(peer_nid),
&peer_ip, peer_port);
break;
case -ECONNRESET:
- LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h"
- " on port %d was reset: "
- "is it running a compatible version of "
- "Lustre and is %s one of its NIDs?\n",
+ LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
libcfs_nid2str(peer_nid),
&peer_ip, peer_port,
libcfs_nid2str(peer_nid));
break;
case -EPROTO:
- LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at "
- "host %pI4h on port %d: is it running "
- "a compatible version of Lustre?\n",
+ LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
libcfs_nid2str(peer_nid),
&peer_ip, peer_port);
break;
case -EADDRINUSE:
- LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to "
- "connect to %s at host %pI4h on port "
- "%d\n", libcfs_nid2str(peer_nid),
+ LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to connect to %s at host %pI4h on port %d\n",
+ libcfs_nid2str(peer_nid),
&peer_ip, peer_port);
break;
default:
- LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s"
- " at host %pI4h on port %d\n", rc,
- libcfs_nid2str(peer_nid),
+ LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
+ rc, libcfs_nid2str(peer_nid),
&peer_ip, peer_port);
break;
}
@@ -162,7 +148,7 @@ lnet_connect(socket_t **sockp, lnet_nid_t peer_nid,
int port;
int fatal;
- CLASSERT (sizeof(cr) <= 16); /* not too big to be on the stack */
+ CLASSERT(sizeof(cr) <= 16); /* not too big to be on the stack */
for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT;
port >= LNET_ACCEPTOR_MIN_RESERVED_PORT;
@@ -178,7 +164,7 @@ lnet_connect(socket_t **sockp, lnet_nid_t peer_nid,
continue;
}
- CLASSERT (LNET_PROTO_ACCEPTOR_VERSION == 1);
+ CLASSERT(LNET_PROTO_ACCEPTOR_VERSION == 1);
cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
@@ -232,10 +218,10 @@ lnet_accept(socket_t *sock, __u32 magic)
lnet_ni_t *ni;
char *str;
- LASSERT (sizeof(cr) <= 16); /* not too big for the stack */
+ LASSERT(sizeof(cr) <= 16); /* not too big for the stack */
rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT (rc == 0); /* we succeeded before */
+ LASSERT(rc == 0); /* we succeeded before */
if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
@@ -245,15 +231,14 @@ lnet_accept(socket_t *sock, __u32 magic)
* thing sent will be a version query. I send back
* LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */
- memset (&cr, 0, sizeof(cr));
+ memset(&cr, 0, sizeof(cr));
cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
rc = libcfs_sock_write(sock, &cr, sizeof(cr),
accept_timeout);
if (rc != 0)
- CERROR("Error sending magic+version in response"
- "to LNET magic from %pI4h: %d\n",
+ CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
&peer_ip, rc);
return -EPROTO;
}
@@ -265,8 +250,7 @@ lnet_accept(socket_t *sock, __u32 magic)
else
str = "unrecognised";
- LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h"
- " magic %08x: %s acceptor protocol\n",
+ LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
&peer_ip, magic, str);
return -EPROTO;
}
@@ -277,8 +261,8 @@ lnet_accept(socket_t *sock, __u32 magic)
sizeof(cr.acr_version),
accept_timeout);
if (rc != 0) {
- CERROR("Error %d reading connection request version from "
- "%pI4h\n", rc, &peer_ip);
+ CERROR("Error %d reading connection request version from %pI4h\n",
+ rc, &peer_ip);
return -EIO;
}
@@ -292,7 +276,7 @@ lnet_accept(socket_t *sock, __u32 magic)
* "old". */
int peer_version = cr.acr_version;
- memset (&cr, 0, sizeof(cr));
+ memset(&cr, 0, sizeof(cr));
cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
@@ -300,8 +284,7 @@ lnet_accept(socket_t *sock, __u32 magic)
accept_timeout);
if (rc != 0)
- CERROR("Error sending magic+version in response"
- "to version %d from %pI4h: %d\n",
+ CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
peer_version, &peer_ip, rc);
return -EPROTO;
}
@@ -311,8 +294,8 @@ lnet_accept(socket_t *sock, __u32 magic)
offsetof(lnet_acceptor_connreq_t, acr_nid),
accept_timeout);
if (rc != 0) {
- CERROR("Error %d reading connection request from "
- "%pI4h\n", rc, &peer_ip);
+ CERROR("Error %d reading connection request from %pI4h\n",
+ rc, &peer_ip);
return -EIO;
}
@@ -324,8 +307,7 @@ lnet_accept(socket_t *sock, __u32 magic)
ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
if (ni != NULL)
lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h"
- " for %s: No matching NI\n",
+ LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
&peer_ip, libcfs_nid2str(cr.acr_nid));
return -EPERM;
}
@@ -333,8 +315,7 @@ lnet_accept(socket_t *sock, __u32 magic)
if (ni->ni_lnd->lnd_accept == NULL) {
/* This catches a request for the loopback LND */
lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h"
- " for %s: NI doesn not accept IP connections\n",
+ LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
&peer_ip, libcfs_nid2str(cr.acr_nid));
return -EPERM;
}
@@ -358,7 +339,7 @@ lnet_acceptor(void *arg)
int peer_port;
int secure = (int)((long_ptr_t)arg);
- LASSERT (lnet_acceptor_state.pta_sock == NULL);
+ LASSERT(lnet_acceptor_state.pta_sock == NULL);
cfs_block_allsigs();
@@ -366,12 +347,10 @@ lnet_acceptor(void *arg)
0, accept_port, accept_backlog);
if (rc != 0) {
if (rc == -EADDRINUSE)
- LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port"
- " %d: port already in use\n",
+ LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
accept_port);
else
- LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port "
- "%d: unexpected error %d\n",
+ LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n",
accept_port, rc);
lnet_acceptor_state.pta_sock = NULL;
@@ -410,8 +389,7 @@ lnet_acceptor(void *arg)
}
if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- CERROR("Refusing connection from %pI4h: "
- "insecure port %d\n",
+ CERROR("Refusing connection from %pI4h: insecure port %d\n",
&peer_ip, peer_port);
goto failed;
}
@@ -419,8 +397,8 @@ lnet_acceptor(void *arg)
rc = libcfs_sock_read(newsock, &magic, sizeof(magic),
accept_timeout);
if (rc != 0) {
- CERROR("Error %d reading connection request from "
- "%pI4h\n", rc, &peer_ip);
+ CERROR("Error %d reading connection request from %pI4h\n",
+ rc, &peer_ip);
goto failed;
}
@@ -430,7 +408,7 @@ lnet_acceptor(void *arg)
continue;
- failed:
+failed:
libcfs_sock_release(newsock);
}
@@ -469,7 +447,7 @@ lnet_acceptor_start(void)
long rc2;
long secure;
- LASSERT (lnet_acceptor_state.pta_sock == NULL);
+ LASSERT(lnet_acceptor_state.pta_sock == NULL);
rc = lnet_acceptor_get_tunables();
if (rc != 0)
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 28711e6e8b0..de323f779db 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -43,7 +43,7 @@ typedef struct { /* tmp struct for parsing routes */
char ltb_text[0]; /* text buffer */
} lnet_text_buf_t;
-static int lnet_tbnob = 0; /* track text buf allocation */
+static int lnet_tbnob; /* track text buf allocation */
#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
@@ -65,7 +65,7 @@ lnet_syntax(char *name, char *str, int offset, int width)
}
int
-lnet_issep (char c)
+lnet_issep(char c)
{
switch (c) {
case '\n':
@@ -83,7 +83,7 @@ lnet_net_unique(__u32 net, struct list_head *nilist)
struct list_head *tmp;
lnet_ni_t *ni;
- list_for_each (tmp, nilist) {
+ list_for_each(tmp, nilist) {
ni = list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net)
@@ -188,8 +188,8 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
/* _WAY_ conservative */
- LCONSOLE_ERROR_MSG(0x112, "Can't parse networks: string too "
- "long\n");
+ LCONSOLE_ERROR_MSG(0x112,
+ "Can't parse networks: string too long\n");
return -EINVAL;
}
@@ -201,7 +201,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
the_lnet.ln_network_tokens = tokens;
the_lnet.ln_network_tokens_nob = tokensize;
- memcpy (tokens, networks, tokensize);
+ memcpy(tokens, networks, tokensize);
str = tmp = tokens;
/* Add in the loopback network */
@@ -255,8 +255,8 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
net = libcfs_str2net(cfs_trimwhite(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
- LCONSOLE_ERROR_MSG(0x113, "Unrecognised network"
- " type\n");
+ LCONSOLE_ERROR_MSG(0x113,
+ "Unrecognised network type\n");
tmp = str;
goto failed_syntax;
}
@@ -313,8 +313,8 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
}
if (niface == LNET_MAX_INTERFACES) {
- LCONSOLE_ERROR_MSG(0x115, "Too many interfaces "
- "for net %s\n",
+ LCONSOLE_ERROR_MSG(0x115,
+ "Too many interfaces for net %s\n",
libcfs_net2str(net));
goto failed;
}
@@ -366,7 +366,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
}
lnet_text_buf_t *
-lnet_new_text_buf (int str_len)
+lnet_new_text_buf(int str_len)
{
lnet_text_buf_t *ltb;
int nob;
@@ -395,7 +395,7 @@ lnet_new_text_buf (int str_len)
}
void
-lnet_free_text_buf (lnet_text_buf_t *ltb)
+lnet_free_text_buf(lnet_text_buf_t *ltb)
{
lnet_tbnob -= ltb->ltb_size;
LIBCFS_FREE(ltb, ltb->ltb_size);
@@ -420,7 +420,7 @@ lnet_print_text_bufs(struct list_head *tbs)
struct list_head *tmp;
lnet_text_buf_t *ltb;
- list_for_each (tmp, tbs) {
+ list_for_each(tmp, tbs) {
ltb = list_entry(tmp, lnet_text_buf_t, ltb_list);
CDEBUG(D_WARNING, "%s\n", ltb->ltb_text);
@@ -430,7 +430,7 @@ lnet_print_text_bufs(struct list_head *tbs)
}
int
-lnet_str2tbs_sep (struct list_head *tbs, char *str)
+lnet_str2tbs_sep(struct list_head *tbs, char *str)
{
struct list_head pending;
char *sep;
@@ -488,7 +488,7 @@ lnet_str2tbs_sep (struct list_head *tbs, char *str)
}
int
-lnet_expand1tb (struct list_head *list,
+lnet_expand1tb(struct list_head *list,
char *str, char *sep1, char *sep2,
char *item, int itemlen)
{
@@ -496,8 +496,8 @@ lnet_expand1tb (struct list_head *list,
int len2 = strlen(sep2 + 1);
lnet_text_buf_t *ltb;
- LASSERT (*sep1 == '[');
- LASSERT (*sep2 == ']');
+ LASSERT(*sep1 == '[');
+ LASSERT(*sep2 == ']');
ltb = lnet_new_text_buf(len1 + itemlen + len2);
if (ltb == NULL)
@@ -513,7 +513,7 @@ lnet_expand1tb (struct list_head *list,
}
int
-lnet_str2tbs_expand (struct list_head *tbs, char *str)
+lnet_str2tbs_expand(struct list_head *tbs, char *str)
{
char num[16];
struct list_head pending;
@@ -593,7 +593,7 @@ lnet_str2tbs_expand (struct list_head *tbs, char *str)
}
int
-lnet_parse_hops (char *str, unsigned int *hops)
+lnet_parse_hops(char *str, unsigned int *hops)
{
int len = strlen(str);
int nob = len;
@@ -605,7 +605,7 @@ lnet_parse_hops (char *str, unsigned int *hops)
int
-lnet_parse_route (char *str, int *im_a_router)
+lnet_parse_route(char *str, int *im_a_router)
{
/* static scratch buffer OK (single threaded) */
static char cmd[LNET_SINGLE_TEXTBUF_NOB];
@@ -702,28 +702,27 @@ lnet_parse_route (char *str, int *im_a_router)
if (!got_hops)
hops = 1;
- LASSERT (!list_empty(&nets));
- LASSERT (!list_empty(&gateways));
+ LASSERT(!list_empty(&nets));
+ LASSERT(!list_empty(&gateways));
- list_for_each (tmp1, &nets) {
+ list_for_each(tmp1, &nets) {
ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
net = libcfs_str2net(ltb->ltb_text);
- LASSERT (net != LNET_NIDNET(LNET_NID_ANY));
+ LASSERT(net != LNET_NIDNET(LNET_NID_ANY));
- list_for_each (tmp2, &gateways) {
+ list_for_each(tmp2, &gateways) {
ltb = list_entry(tmp2, lnet_text_buf_t, ltb_list);
nid = libcfs_str2nid(ltb->ltb_text);
- LASSERT (nid != LNET_NID_ANY);
+ LASSERT(nid != LNET_NID_ANY);
if (lnet_islocalnid(nid)) {
*im_a_router = 1;
continue;
}
- rc = lnet_add_route (net, hops, nid);
+ rc = lnet_add_route(net, hops, nid);
if (rc != 0) {
- CERROR("Can't create route "
- "to %s via %s\n",
+ CERROR("Can't create route to %s via %s\n",
libcfs_net2str(net),
libcfs_nid2str(nid));
goto out;
@@ -763,7 +762,7 @@ lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
}
int
-lnet_parse_routes (char *routes, int *im_a_router)
+lnet_parse_routes(char *routes, int *im_a_router)
{
struct list_head tbs;
int rc = 0;
@@ -779,14 +778,14 @@ lnet_parse_routes (char *routes, int *im_a_router)
rc = lnet_parse_route_tbs(&tbs, im_a_router);
}
- LASSERT (lnet_tbnob == 0);
+ LASSERT(lnet_tbnob == 0);
return rc;
}
int
lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
{
- LIST_HEAD (list);
+ LIST_HEAD(list);
int rc;
int i;
@@ -815,7 +814,7 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
char *token;
int rc;
- LASSERT (strlen(net_entry) < sizeof(tokens));
+ LASSERT(strlen(net_entry) < sizeof(tokens));
/* work on a copy of the string */
strcpy(tokens, net_entry);
@@ -889,8 +888,8 @@ lnet_splitnets(char *source, struct list_head *nets)
char *bracket;
__u32 net;
- LASSERT (!list_empty(nets));
- LASSERT (nets->next == nets->prev); /* single entry */
+ LASSERT(!list_empty(nets));
+ LASSERT(nets->next == nets->prev); /* single entry */
tb = list_entry(nets->next, lnet_text_buf_t, ltb_list);
@@ -957,7 +956,7 @@ lnet_splitnets(char *source, struct list_head *nets)
}
int
-lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
+lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
{
static char networks[LNET_SINGLE_TEXTBUF_NOB];
static char source[LNET_SINGLE_TEXTBUF_NOB];
@@ -979,7 +978,7 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
- LASSERT (lnet_tbnob == 0);
+ LASSERT(lnet_tbnob == 0);
return -EINVAL;
}
@@ -1017,16 +1016,16 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
break;
dup = 0;
- list_for_each (t, &current_nets) {
+ list_for_each(t, &current_nets) {
tb = list_entry(t, lnet_text_buf_t, ltb_list);
net1 = lnet_netspec2net(tb->ltb_text);
- LASSERT (net1 != LNET_NIDNET(LNET_NID_ANY));
+ LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
list_for_each(t2, &matched_nets) {
tb2 = list_entry(t2, lnet_text_buf_t,
ltb_list);
net2 = lnet_netspec2net(tb2->ltb_text);
- LASSERT (net2 != LNET_NIDNET(LNET_NID_ANY));
+ LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
if (net1 == net2) {
dup = 1;
@@ -1067,7 +1066,7 @@ lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
lnet_free_text_bufs(&raw_entries);
lnet_free_text_bufs(&matched_nets);
lnet_free_text_bufs(&current_nets);
- LASSERT (lnet_tbnob == 0);
+ LASSERT(lnet_tbnob == 0);
if (rc < 0)
return rc;
@@ -1083,7 +1082,7 @@ lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
}
int
-lnet_ipaddr_enumerate (__u32 **ipaddrsp)
+lnet_ipaddr_enumerate(__u32 **ipaddrsp)
{
int up;
__u32 netmask;
@@ -1149,21 +1148,22 @@ lnet_ipaddr_enumerate (__u32 **ipaddrsp)
}
int
-lnet_parse_ip2nets (char **networksp, char *ip2nets)
+lnet_parse_ip2nets(char **networksp, char *ip2nets)
{
__u32 *ipaddrs;
int nip = lnet_ipaddr_enumerate(&ipaddrs);
int rc;
if (nip < 0) {
- LCONSOLE_ERROR_MSG(0x117, "Error %d enumerating local IP "
- "interfaces for ip2nets to match\n", nip);
+ LCONSOLE_ERROR_MSG(0x117,
+ "Error %d enumerating local IP interfaces for ip2nets to match\n",
+ nip);
return nip;
}
if (nip == 0) {
- LCONSOLE_ERROR_MSG(0x118, "No local IP interfaces "
- "for ip2nets to match\n");
+ LCONSOLE_ERROR_MSG(0x118,
+ "No local IP interfaces for ip2nets to match\n");
return -ENOENT;
}
@@ -1176,8 +1176,8 @@ lnet_parse_ip2nets (char **networksp, char *ip2nets)
}
if (rc == 0) {
- LCONSOLE_ERROR_MSG(0x11a, "ip2nets does not match "
- "any local IP interfaces\n");
+ LCONSOLE_ERROR_MSG(0x11a,
+ "ip2nets does not match any local IP interfaces\n");
return -ENOENT;
}
@@ -1185,7 +1185,7 @@ lnet_parse_ip2nets (char **networksp, char *ip2nets)
}
int
-lnet_set_ip_niaddr (lnet_ni_t *ni)
+lnet_set_ip_niaddr(lnet_ni_t *ni)
{
__u32 net = LNET_NIDNET(ni->ni_nid);
char **names;
@@ -1201,7 +1201,7 @@ lnet_set_ip_niaddr (lnet_ni_t *ni)
if (ni->ni_interfaces[0] != NULL) {
- CLASSERT (LNET_MAX_INTERFACES > 1);
+ CLASSERT(LNET_MAX_INTERFACES > 1);
if (ni->ni_interfaces[1] != NULL) {
CERROR("Net %s doesn't support multiple interfaces\n",
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 49b0f1287a6..b6f8ad38628 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -47,14 +47,14 @@ CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
"Reserved");
int
-lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
+lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
{
lnet_test_peer_t *tp;
struct list_head *el;
struct list_head *next;
struct list_head cull;
- LASSERT (the_lnet.ln_init);
+ LASSERT(the_lnet.ln_init);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
@@ -77,31 +77,30 @@ lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
lnet_net_lock(0);
- list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = list_entry (el, lnet_test_peer_t, tp_list);
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
if (tp->tp_threshold == 0 || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
- tp->tp_nid == nid) /* matched this one */
- {
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ tp->tp_nid == nid) { /* matched this one */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
}
}
lnet_net_unlock(0);
- while (!list_empty (&cull)) {
- tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
- list_del (&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
+ list_del(&tp->tp_list);
+ LIBCFS_FREE(tp, sizeof(*tp));
}
return 0;
}
static int
-fail_peer (lnet_nid_t nid, int outgoing)
+fail_peer(lnet_nid_t nid, int outgoing)
{
lnet_test_peer_t *tp;
struct list_head *el;
@@ -109,13 +108,13 @@ fail_peer (lnet_nid_t nid, int outgoing)
struct list_head cull;
int fail = 0;
- INIT_LIST_HEAD (&cull);
+ INIT_LIST_HEAD(&cull);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
- list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = list_entry (el, lnet_test_peer_t, tp_list);
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
if (tp->tp_threshold == 0) {
/* zombie entry */
@@ -123,8 +122,8 @@ fail_peer (lnet_nid_t nid, int outgoing)
/* only cull zombies on outgoing tests,
* since we may be at interrupt priority on
* incoming messages. */
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
}
continue;
}
@@ -138,8 +137,8 @@ fail_peer (lnet_nid_t nid, int outgoing)
if (outgoing &&
tp->tp_threshold == 0) {
/* see above */
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
}
}
break;
@@ -148,30 +147,30 @@ fail_peer (lnet_nid_t nid, int outgoing)
lnet_net_unlock(0);
- while (!list_empty (&cull)) {
- tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
- list_del (&tp->tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
+ list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
+ LIBCFS_FREE(tp, sizeof(*tp));
}
- return (fail);
+ return fail;
}
unsigned int
-lnet_iov_nob (unsigned int niov, struct iovec *iov)
+lnet_iov_nob(unsigned int niov, struct iovec *iov)
{
unsigned int nob = 0;
while (niov-- > 0)
nob += (iov++)->iov_len;
- return (nob);
+ return nob;
}
EXPORT_SYMBOL(lnet_iov_nob);
void
-lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
+lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
unsigned int nsiov, struct iovec *siov, unsigned int soffset,
unsigned int nob)
{
@@ -182,31 +181,31 @@ lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
return;
/* skip complete frags before 'doffset' */
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
while (doffset >= diov->iov_len) {
doffset -= diov->iov_len;
diov++;
ndiov--;
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
}
/* skip complete frags before 'soffset' */
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
while (soffset >= siov->iov_len) {
soffset -= siov->iov_len;
siov++;
nsiov--;
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
}
do {
- LASSERT (ndiov > 0);
- LASSERT (nsiov > 0);
+ LASSERT(ndiov > 0);
+ LASSERT(nsiov > 0);
this_nob = MIN(diov->iov_len - doffset,
siov->iov_len - soffset);
this_nob = MIN(this_nob, nob);
- memcpy ((char *)diov->iov_base + doffset,
+ memcpy((char *)diov->iov_base + doffset,
(char *)siov->iov_base + soffset, this_nob);
nob -= this_nob;
@@ -230,7 +229,7 @@ lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
EXPORT_SYMBOL(lnet_copy_iov2iov);
int
-lnet_extract_iov (int dst_niov, struct iovec *dst,
+lnet_extract_iov(int dst_niov, struct iovec *dst,
int src_niov, struct iovec *src,
unsigned int offset, unsigned int len)
{
@@ -241,27 +240,27 @@ lnet_extract_iov (int dst_niov, struct iovec *dst,
unsigned int niov;
if (len == 0) /* no data => */
- return (0); /* no frags */
+ return 0; /* no frags */
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
while (offset >= src->iov_len) { /* skip initial frags */
offset -= src->iov_len;
src_niov--;
src++;
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
}
niov = 1;
for (;;) {
- LASSERT (src_niov > 0);
- LASSERT ((int)niov <= dst_niov);
+ LASSERT(src_niov > 0);
+ LASSERT((int)niov <= dst_niov);
frag_len = src->iov_len - offset;
dst->iov_base = ((char *)src->iov_base) + offset;
if (len <= frag_len) {
dst->iov_len = len;
- return (niov);
+ return niov;
}
dst->iov_len = frag_len;
@@ -278,21 +277,21 @@ EXPORT_SYMBOL(lnet_extract_iov);
unsigned int
-lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
+lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
{
unsigned int nob = 0;
while (niov-- > 0)
nob += (kiov++)->kiov_len;
- return (nob);
+ return nob;
}
EXPORT_SYMBOL(lnet_kiov_nob);
void
-lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
- unsigned int nob)
+lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
+ unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
+ unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
unsigned int this_nob;
@@ -302,27 +301,27 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
while (doffset >= diov->kiov_len) {
doffset -= diov->kiov_len;
diov++;
ndiov--;
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
}
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
while (soffset >= siov->kiov_len) {
soffset -= siov->kiov_len;
siov++;
nsiov--;
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
}
do {
- LASSERT (ndiov > 0);
- LASSERT (nsiov > 0);
+ LASSERT(ndiov > 0);
+ LASSERT(nsiov > 0);
this_nob = MIN(diov->kiov_len - doffset,
siov->kiov_len - soffset);
this_nob = MIN(this_nob, nob);
@@ -338,7 +337,7 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
* However in practice at least one of the kiovs will be mapped
* kernel pages and the map/unmap will be NOOPs */
- memcpy (daddr, saddr, this_nob);
+ memcpy(daddr, saddr, this_nob);
nob -= this_nob;
if (diov->kiov_len > doffset + this_nob) {
@@ -372,9 +371,9 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
void
-lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
- unsigned int nob)
+lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov, unsigned int iovoffset,
+ unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int kiovoffset, unsigned int nob)
{
/* NB iov, kiov are READ-ONLY */
unsigned int this_nob;
@@ -383,27 +382,27 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
while (iovoffset >= iov->iov_len) {
iovoffset -= iov->iov_len;
iov++;
niov--;
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
}
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
while (kiovoffset >= kiov->kiov_len) {
kiovoffset -= kiov->kiov_len;
kiov++;
nkiov--;
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
}
do {
- LASSERT (niov > 0);
- LASSERT (nkiov > 0);
+ LASSERT(niov > 0);
+ LASSERT(nkiov > 0);
this_nob = MIN(iov->iov_len - iovoffset,
kiov->kiov_len - kiovoffset);
this_nob = MIN(this_nob, nob);
@@ -412,7 +411,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
- memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
+ memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
nob -= this_nob;
if (iov->iov_len > iovoffset + this_nob) {
@@ -442,9 +441,10 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
EXPORT_SYMBOL(lnet_copy_kiov2iov);
void
-lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
- unsigned int niov, struct iovec *iov, unsigned int iovoffset,
- unsigned int nob)
+lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int kiovoffset, unsigned int niov,
+ struct iovec *iov, unsigned int iovoffset,
+ unsigned int nob)
{
/* NB kiov, iov are READ-ONLY */
unsigned int this_nob;
@@ -453,27 +453,27 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
while (kiovoffset >= kiov->kiov_len) {
kiovoffset -= kiov->kiov_len;
kiov++;
nkiov--;
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
}
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
while (iovoffset >= iov->iov_len) {
iovoffset -= iov->iov_len;
iov++;
niov--;
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
}
do {
- LASSERT (nkiov > 0);
- LASSERT (niov > 0);
+ LASSERT(nkiov > 0);
+ LASSERT(niov > 0);
this_nob = MIN(kiov->kiov_len - kiovoffset,
iov->iov_len - iovoffset);
this_nob = MIN(this_nob, nob);
@@ -482,7 +482,7 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
- memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
nob -= this_nob;
if (kiov->kiov_len > kiovoffset + this_nob) {
@@ -511,7 +511,7 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
EXPORT_SYMBOL(lnet_copy_iov2kiov);
int
-lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
+lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
int src_niov, lnet_kiov_t *src,
unsigned int offset, unsigned int len)
{
@@ -522,20 +522,20 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
unsigned int niov;
if (len == 0) /* no data => */
- return (0); /* no frags */
+ return 0; /* no frags */
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
while (offset >= src->kiov_len) { /* skip initial frags */
offset -= src->kiov_len;
src_niov--;
src++;
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
}
niov = 1;
for (;;) {
- LASSERT (src_niov > 0);
- LASSERT ((int)niov <= dst_niov);
+ LASSERT(src_niov > 0);
+ LASSERT((int)niov <= dst_niov);
frag_len = src->kiov_len - offset;
dst->kiov_page = src->kiov_page;
@@ -543,12 +543,13 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
if (len <= frag_len) {
dst->kiov_len = len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
- return (niov);
+ LASSERT(dst->kiov_offset + dst->kiov_len
+ <= PAGE_CACHE_SIZE);
+ return niov;
}
dst->kiov_len = frag_len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
len -= frag_len;
dst++;
@@ -569,8 +570,8 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
lnet_kiov_t *kiov = NULL;
int rc;
- LASSERT (!in_interrupt ());
- LASSERT (mlen == 0 || msg != NULL);
+ LASSERT(!in_interrupt());
+ LASSERT(mlen == 0 || msg != NULL);
if (msg != NULL) {
LASSERT(msg->msg_receiving);
@@ -587,8 +588,8 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
iov = msg->msg_iov;
kiov = msg->msg_kiov;
- LASSERT (niov > 0);
- LASSERT ((iov == NULL) != (kiov == NULL));
+ LASSERT(niov > 0);
+ LASSERT((iov == NULL) != (kiov == NULL));
}
}
@@ -603,12 +604,12 @@ lnet_setpayloadbuffer(lnet_msg_t *msg)
{
lnet_libmd_t *md = msg->msg_md;
- LASSERT (msg->msg_len > 0);
- LASSERT (!msg->msg_routing);
- LASSERT (md != NULL);
- LASSERT (msg->msg_niov == 0);
- LASSERT (msg->msg_iov == NULL);
- LASSERT (msg->msg_kiov == NULL);
+ LASSERT(msg->msg_len > 0);
+ LASSERT(!msg->msg_routing);
+ LASSERT(md != NULL);
+ LASSERT(msg->msg_niov == 0);
+ LASSERT(msg->msg_iov == NULL);
+ LASSERT(msg->msg_kiov == NULL);
msg->msg_niov = md->md_niov;
if ((md->md_options & LNET_MD_KIOV) != 0)
@@ -629,7 +630,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
if (len != 0)
lnet_setpayloadbuffer(msg);
- memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
+ memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
msg->msg_hdr.type = cpu_to_le32(type);
msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
@@ -644,8 +645,8 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
void *priv = msg->msg_private;
int rc;
- LASSERT (!in_interrupt ());
- LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
+ LASSERT(!in_interrupt());
+ LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
(msg->msg_txcredit && msg->msg_peertxcredit));
rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
@@ -698,12 +699,12 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
/* NB: always called with lnet_net_lock held */
static inline int
-lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
+lnet_peer_is_alive(lnet_peer_t *lp, cfs_time_t now)
{
int alive;
cfs_time_t deadline;
- LASSERT (lnet_peer_aliveness_enabled(lp));
+ LASSERT(lnet_peer_aliveness_enabled(lp));
/* Trust lnet_notify() if it has more recent aliveness news, but
* ignore the initial assumed death (see lnet_peers_start_down()).
@@ -731,7 +732,7 @@ lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
int
-lnet_peer_alive_locked (lnet_peer_t *lp)
+lnet_peer_alive_locked(lnet_peer_t *lp)
{
cfs_time_t now = cfs_time_current();
@@ -809,7 +810,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
}
if (!msg->msg_peertxcredit) {
- LASSERT ((lp->lp_txcredits < 0) ==
+ LASSERT((lp->lp_txcredits < 0) ==
!list_empty(&lp->lp_txq));
msg->msg_peertxcredit = 1;
@@ -873,7 +874,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
}
int
-lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
+lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
{
/* lnet_parse is going to lnet_net_unlock immediately after this, so it
* sets do_recv FALSE and I don't do the unlock/send/lock bit. I
@@ -882,18 +883,18 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
lnet_rtrbufpool_t *rbp;
lnet_rtrbuf_t *rb;
- LASSERT (msg->msg_iov == NULL);
- LASSERT (msg->msg_kiov == NULL);
- LASSERT (msg->msg_niov == 0);
- LASSERT (msg->msg_routing);
- LASSERT (msg->msg_receiving);
- LASSERT (!msg->msg_sending);
+ LASSERT(msg->msg_iov == NULL);
+ LASSERT(msg->msg_kiov == NULL);
+ LASSERT(msg->msg_niov == 0);
+ LASSERT(msg->msg_routing);
+ LASSERT(msg->msg_receiving);
+ LASSERT(!msg->msg_sending);
/* non-lnet_parse callers only receive delayed messages */
LASSERT(!do_recv || msg->msg_rx_delayed);
if (!msg->msg_peerrtrcredit) {
- LASSERT ((lp->lp_rtrcredits < 0) ==
+ LASSERT((lp->lp_rtrcredits < 0) ==
!list_empty(&lp->lp_rtrq));
msg->msg_peerrtrcredit = 1;
@@ -913,7 +914,7 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
rbp = lnet_msg2bufpool(msg);
if (!msg->msg_rtrcredit) {
- LASSERT ((rbp->rbp_credits < 0) ==
+ LASSERT((rbp->rbp_credits < 0) ==
!list_empty(&rbp->rbp_msgs));
msg->msg_rtrcredit = 1;
@@ -930,7 +931,7 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
}
}
- LASSERT (!list_empty(&rbp->rbp_bufs));
+ LASSERT(!list_empty(&rbp->rbp_bufs));
rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
list_del(&rb->rb_list);
@@ -985,7 +986,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
!list_empty(&txpeer->lp_txq));
txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
- LASSERT (txpeer->lp_txqnob >= 0);
+ LASSERT(txpeer->lp_txqnob >= 0);
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
@@ -1020,11 +1021,11 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg)
/* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
* there until it gets one allocated, or aborts the wait
* itself */
- LASSERT (msg->msg_kiov != NULL);
+ LASSERT(msg->msg_kiov != NULL);
rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
rbp = rb->rb_pool;
- LASSERT (rbp == lnet_msg2bufpool(msg));
+ LASSERT(rbp == lnet_msg2bufpool(msg));
msg->msg_kiov = NULL;
msg->msg_rtrcredit = 0;
@@ -1172,10 +1173,10 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
* but we might want to use pre-determined router for ACK/REPLY
* in the future */
/* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
- LASSERT (msg->msg_txpeer == NULL);
- LASSERT (!msg->msg_sending);
- LASSERT (!msg->msg_target_is_router);
- LASSERT (!msg->msg_receiving);
+ LASSERT(msg->msg_txpeer == NULL);
+ LASSERT(!msg->msg_sending);
+ LASSERT(!msg->msg_target_is_router);
+ LASSERT(!msg->msg_receiving);
msg->msg_sending = 1;
@@ -1200,7 +1201,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
libcfs_nid2str(src_nid));
return -EINVAL;
}
- LASSERT (!msg->msg_routing);
+ LASSERT(!msg->msg_routing);
}
/* Is this for someone on a local network? */
@@ -1249,7 +1250,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
/* ENOMEM or shutting down */
return rc;
}
- LASSERT (lp->lp_ni == src_ni);
+ LASSERT(lp->lp_ni == src_ni);
} else {
/* sending to a remote network */
lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
@@ -1290,7 +1291,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
src_ni = lp->lp_ni;
src_nid = src_ni->ni_nid;
} else {
- LASSERT (src_ni == lp->lp_ni);
+ LASSERT(src_ni == lp->lp_ni);
lnet_ni_decref_locked(src_ni, cpt);
}
@@ -1311,9 +1312,9 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
/* 'lp' is our best choice of peer */
- LASSERT (!msg->msg_peertxcredit);
- LASSERT (!msg->msg_txcredit);
- LASSERT (msg->msg_txpeer == NULL);
+ LASSERT(!msg->msg_peertxcredit);
+ LASSERT(!msg->msg_txcredit);
+ LASSERT(msg->msg_txpeer == NULL);
msg->msg_txpeer = lp; /* msg takes my ref on lp */
@@ -1509,7 +1510,7 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
return ENOENT; /* +ve: OK but no match */
}
- LASSERT (md->md_offset == 0);
+ LASSERT(md->md_offset == 0);
rlength = hdr->payload_length;
mlength = MIN(rlength, (int)md->md_length);
@@ -1614,31 +1615,31 @@ lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
}
char *
-lnet_msgtyp2str (int type)
+lnet_msgtyp2str(int type)
{
switch (type) {
case LNET_MSG_ACK:
- return ("ACK");
+ return "ACK";
case LNET_MSG_PUT:
- return ("PUT");
+ return "PUT";
case LNET_MSG_GET:
- return ("GET");
+ return "GET";
case LNET_MSG_REPLY:
- return ("REPLY");
+ return "REPLY";
case LNET_MSG_HELLO:
- return ("HELLO");
+ return "HELLO";
default:
- return ("<UNKNOWN>");
+ return "<UNKNOWN>";
}
}
EXPORT_SYMBOL(lnet_msgtyp2str);
void
-lnet_print_hdr(lnet_hdr_t * hdr)
+lnet_print_hdr(lnet_hdr_t *hdr)
{
lnet_process_id_t src = {0};
lnet_process_id_t dst = {0};
- char *type_str = lnet_msgtyp2str (hdr->type);
+ char *type_str = lnet_msgtyp2str(hdr->type);
src.nid = hdr->src_nid;
src.pid = hdr->src_pid;
@@ -1709,7 +1710,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
__u32 payload_length;
__u32 type;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
type = le32_to_cpu(hdr->type);
src_nid = le64_to_cpu(hdr->src_nid);
@@ -1734,7 +1735,8 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
case LNET_MSG_PUT:
case LNET_MSG_REPLY:
- if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
+ if (payload_length >
+ (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
CERROR("%s, src %s: bad %s payload %d "
"(%d max expected)\n",
libcfs_nid2str(from_nid),
@@ -1772,7 +1774,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (!for_me) {
if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
/* should have gone direct */
- CERROR ("%s, src %s: Bad dest nid %s "
+ CERROR("%s, src %s: Bad dest nid %s "
"(should have been sent direct)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1783,7 +1785,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (lnet_islocalnid(dest_nid)) {
/* dest is another local NI; sender should have used
* this node's NID on its own network */
- CERROR ("%s, src %s: Bad dest nid %s "
+ CERROR("%s, src %s: Bad dest nid %s "
"(it's my nid but on a different network)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1792,7 +1794,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
}
if (rdma_req && type == LNET_MSG_GET) {
- CERROR ("%s, src %s: Bad optimized GET for %s "
+ CERROR("%s, src %s: Bad optimized GET for %s "
"(final destination must be me)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1801,7 +1803,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
}
if (!the_lnet.ln_routing) {
- CERROR ("%s, src %s: Dropping message for %s "
+ CERROR("%s, src %s: Dropping message for %s "
"(routing not enabled)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1813,9 +1815,8 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
/* Message looks OK; we're not going to return an error, so we MUST
* call back lnd_recv() come what may... */
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (src_nid, 0)) /* shall we now? */
- {
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(src_nid, 0)) { /* shall we now? */
CERROR("%s, src %s: Dropping %s to simulate failure\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
lnet_msgtyp2str(type));
@@ -1830,7 +1831,9 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
goto drop;
}
- /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
+ /* msg zeroed in lnet_msg_alloc;
+ * i.e. flags all clear, pointers NULL etc
+ */
msg->msg_type = type;
msg->msg_private = private;
@@ -1906,7 +1909,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (rc == 0)
return 0;
- LASSERT (rc == ENOENT);
+ LASSERT(rc == ENOENT);
free_drop:
LASSERT(msg->msg_md == NULL);
@@ -2047,12 +2050,11 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
- {
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) { /* shall we now? */
CERROR("Dropping PUT to %s: simulated failure\n",
libcfs_id2str(target));
return -EIO;
@@ -2113,9 +2115,9 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
rc = lnet_send(self, msg, LNET_NID_ANY);
if (rc != 0) {
- CNETERR( "Error sending PUT to %s: %d\n",
+ CNETERR("Error sending PUT to %s: %d\n",
libcfs_id2str(target), rc);
- lnet_finalize (NULL, msg, rc);
+ lnet_finalize(NULL, msg, rc);
}
/* completion will be signalled by an event */
@@ -2124,7 +2126,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
EXPORT_SYMBOL(LNetPut);
lnet_msg_t *
-lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
+lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
{
/* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
* returns a msg for the LND to pass to lnet_finalize() when the sink
@@ -2144,16 +2146,16 @@ lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
lnet_res_lock(cpt);
- LASSERT (getmd->md_refcount > 0);
+ LASSERT(getmd->md_refcount > 0);
if (msg == NULL) {
- CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
+ CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
goto drop;
}
if (getmd->md_threshold == 0) {
- CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
+ CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
getmd);
lnet_res_unlock(cpt);
@@ -2205,13 +2207,13 @@ lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
{
/* Set the REPLY length, now the RDMA that elides the REPLY message has
* completed and I know it. */
- LASSERT (reply != NULL);
- LASSERT (reply->msg_type == LNET_MSG_GET);
- LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
+ LASSERT(reply != NULL);
+ LASSERT(reply->msg_type == LNET_MSG_GET);
+ LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
/* NB I trusted my peer to RDMA. If she tells me she's written beyond
* the end of my buffer, I might as well be dead. */
- LASSERT (len <= reply->msg_ev.mlength);
+ LASSERT(len <= reply->msg_ev.mlength);
reply->msg_ev.mlength = len;
}
@@ -2229,7 +2231,8 @@ EXPORT_SYMBOL(lnet_set_reply_msg_len);
*
* \param self,target,portal,match_bits,offset See the discussion in LNetPut().
* \param mdh A handle for the MD that describes the memory into which the
- * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
+ * requested data will be received. The MD must be "free floating"
+ * (See LNetMDBind()).
*
* \retval 0 Success, and only in this case events will be generated
* and logged to EQ (if it exists) of the MD.
@@ -2247,12 +2250,11 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
- {
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) { /* shall we now? */
CERROR("Dropping GET to %s: simulated failure\n",
libcfs_id2str(target));
return -EIO;
@@ -2307,9 +2309,9 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
rc = lnet_send(self, msg, LNET_NID_ANY);
if (rc < 0) {
- CNETERR( "Error sending GET to %s: %d\n",
+ CNETERR("Error sending GET to %s: %d\n",
libcfs_id2str(target), rc);
- lnet_finalize (NULL, msg, rc);
+ lnet_finalize(NULL, msg, rc);
}
/* completion will be signalled by an event */
@@ -2348,12 +2350,12 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
* keep order 0 free for 0@lo and order 1 free for a local NID
* match */
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
- list_for_each (e, &the_lnet.ln_nis) {
+ list_for_each(e, &the_lnet.ln_nis) {
ni = list_entry(e, lnet_ni_t, ni_list);
if (ni->ni_nid == dstnid) {
@@ -2390,7 +2392,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
lnet_route_t *route;
lnet_route_t *shortest = NULL;
- LASSERT (!list_empty(&rnet->lrn_routes));
+ LASSERT(!list_empty(&rnet->lrn_routes));
list_for_each_entry(route, &rnet->lrn_routes,
lr_list) {
@@ -2399,7 +2401,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
shortest = route;
}
- LASSERT (shortest != NULL);
+ LASSERT(shortest != NULL);
hops = shortest->lr_hops;
if (srcnidp != NULL)
*srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 670dae34107..efc798e0193 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -36,16 +36,16 @@
#include <linux/lnet/lib-lnet.h>
int
-lolnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- LASSERT (!lntmsg->msg_routing);
- LASSERT (!lntmsg->msg_target_is_router);
+ LASSERT(!lntmsg->msg_routing);
+ LASSERT(!lntmsg->msg_target_is_router);
return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
}
int
-lolnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
struct iovec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
@@ -89,20 +89,20 @@ static int lolnd_instanced;
void
lolnd_shutdown(lnet_ni_t *ni)
{
- CDEBUG (D_NET, "shutdown\n");
- LASSERT (lolnd_instanced);
+ CDEBUG(D_NET, "shutdown\n");
+ LASSERT(lolnd_instanced);
lolnd_instanced = 0;
}
int
-lolnd_startup (lnet_ni_t *ni)
+lolnd_startup(lnet_ni_t *ni)
{
- LASSERT (ni->ni_lnd == &the_lolnd);
- LASSERT (!lolnd_instanced);
+ LASSERT(ni->ni_lnd == &the_lolnd);
+ LASSERT(!lolnd_instanced);
lolnd_instanced = 1;
- return (0);
+ return 0;
}
lnd_t the_lolnd = {
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index afb81755cba..6db8774ff7b 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -37,14 +37,14 @@
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/lnet/lib-lnet.h>
-static int config_on_load = 0;
+static int config_on_load;
CFS_MODULE_PARM(config_on_load, "i", int, 0444,
"configure network at module load");
static struct mutex lnet_config_mutex;
int
-lnet_configure (void *arg)
+lnet_configure(void *arg)
{
/* 'arg' only there so I can be passed to cfs_create_thread() */
int rc = 0;
@@ -64,7 +64,7 @@ lnet_configure (void *arg)
}
int
-lnet_unconfigure (void)
+lnet_unconfigure(void)
{
int refcount;
@@ -124,7 +124,7 @@ init_lnet(void)
}
rc = libcfs_register_ioctl(&lnet_ioctl_handler);
- LASSERT (rc == 0);
+ LASSERT(rc == 0);
if (config_on_load) {
/* Have to schedule a separate thread to avoid deadlocking
@@ -141,7 +141,7 @@ fini_lnet(void)
int rc;
rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
- LASSERT (rc == 0);
+ LASSERT(rc == 0);
LNetFini();
}
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 931f6ca25dc..5e47de36c18 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -30,7 +30,7 @@
/* This is really lnet_proc.c. You might need to update sanity test 215
* if any file format is changed. */
-static ctl_table_header_t *lnet_table_header = NULL;
+static ctl_table_header_t *lnet_table_header;
#define CTL_LNET (0x100)
enum {
@@ -158,7 +158,7 @@ int LL_PROC_PROTO(proc_lnet_routes)
off = LNET_PROC_HOFF_GET(*ppos);
ver = LNET_PROC_VER_GET(*ppos);
- LASSERT (!write);
+ LASSERT(!write);
if (*lenp == 0)
return 0;
@@ -172,11 +172,11 @@ int LL_PROC_PROTO(proc_lnet_routes)
if (*ppos == 0) {
s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
the_lnet.ln_routing ? "enabled" : "disabled");
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %7s %s\n",
"net", "hops", "state", "router");
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
lnet_net_lock(0);
ver = (unsigned int)the_lnet.ln_remote_nets_version;
@@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_lnet_routers)
off = LNET_PROC_HOFF_GET(*ppos);
ver = LNET_PROC_VER_GET(*ppos);
- LASSERT (!write);
+ LASSERT(!write);
if (*lenp == 0)
return 0;
@@ -375,7 +375,7 @@ int LL_PROC_PROTO(proc_lnet_routers)
pingsent,
cfs_duration_sec(cfs_time_sub(deadline, now)),
down_ni, libcfs_nid2str(nid));
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
}
lnet_net_unlock(0);
@@ -437,7 +437,7 @@ int LL_PROC_PROTO(proc_lnet_peers)
"%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
"nid", "refs", "state", "last", "max",
"rtr", "min", "tx", "min", "queue");
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
hoff++;
} else {
@@ -534,7 +534,7 @@ int LL_PROC_PROTO(proc_lnet_peers)
libcfs_nid2str(nid), nrefs, aliveness,
lastalive, maxcr, rtrcr, minrtrcr, txcr,
mintxcr, txqnob);
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
} else { /* peer is NULL */
lnet_net_unlock(cpt);
@@ -592,7 +592,7 @@ static int __proc_lnet_buffers(void *data, int write,
s += snprintf(s, tmpstr + tmpsiz - s,
"%5s %5s %7s %7s\n",
"pages", "count", "credits", "min");
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
if (the_lnet.ln_rtrpools == NULL)
goto out; /* I'm not a router */
@@ -638,7 +638,7 @@ int LL_PROC_PROTO(proc_lnet_nis)
DECLARE_LL_PROC_PPOS_DECL;
- LASSERT (!write);
+ LASSERT(!write);
if (*lenp == 0)
return 0;
@@ -654,7 +654,7 @@ int LL_PROC_PROTO(proc_lnet_nis)
"%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
"nid", "status", "alive", "refs", "peer",
"rtr", "max", "tx", "min");
- LASSERT (tmpstr + tmpsiz - s > 0);
+ LASSERT(tmpstr + tmpsiz - s > 0);
} else {
struct list_head *n;
lnet_ni_t *ni = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index ef5064e0055..b7613c828e7 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -48,16 +48,17 @@ CFS_MODULE_PARM(brw_inject_errors, "i", int, 0644,
"# data errors to inject randomly, zero by default");
static void
-brw_client_fini (sfw_test_instance_t *tsi)
+brw_client_fini(sfw_test_instance_t *tsi)
{
srpc_bulk_t *bulk;
sfw_test_unit_t *tsu;
- LASSERT (tsi->tsi_is_client);
+ LASSERT(tsi->tsi_is_client);
- list_for_each_entry (tsu, &tsi->tsi_units, tsu_list) {
+ list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
bulk = tsu->tsu_private;
- if (bulk == NULL) continue;
+ if (bulk == NULL)
+ continue;
srpc_free_bulk(bulk);
tsu->tsu_private = NULL;
@@ -65,7 +66,7 @@ brw_client_fini (sfw_test_instance_t *tsi)
}
int
-brw_client_init (sfw_test_instance_t *tsi)
+brw_client_init(sfw_test_instance_t *tsi)
{
sfw_session_t *sn = tsi->tsi_batch->bat_session;
int flags;
@@ -130,28 +131,31 @@ brw_client_init (sfw_test_instance_t *tsi)
#define BRW_MSIZE sizeof(__u64)
int
-brw_inject_one_error (void)
+brw_inject_one_error(void)
{
struct timeval tv;
- if (brw_inject_errors <= 0) return 0;
+ if (brw_inject_errors <= 0)
+ return 0;
do_gettimeofday(&tv);
- if ((tv.tv_usec & 1) == 0) return 0;
+ if ((tv.tv_usec & 1) == 0)
+ return 0;
return brw_inject_errors--;
}
void
-brw_fill_page (struct page *pg, int pattern, __u64 magic)
+brw_fill_page(struct page *pg, int pattern, __u64 magic)
{
char *addr = page_address(pg);
int i;
- LASSERT (addr != NULL);
+ LASSERT(addr != NULL);
- if (pattern == LST_BRW_CHECK_NONE) return;
+ if (pattern == LST_BRW_CHECK_NONE)
+ return;
if (magic == BRW_MAGIC)
magic += brw_inject_one_error();
@@ -169,29 +173,31 @@ brw_fill_page (struct page *pg, int pattern, __u64 magic)
return;
}
- LBUG ();
+ LBUG();
return;
}
int
-brw_check_page (struct page *pg, int pattern, __u64 magic)
+brw_check_page(struct page *pg, int pattern, __u64 magic)
{
char *addr = page_address(pg);
__u64 data = 0; /* make compiler happy */
int i;
- LASSERT (addr != NULL);
+ LASSERT(addr != NULL);
if (pattern == LST_BRW_CHECK_NONE)
return 0;
if (pattern == LST_BRW_CHECK_SIMPLE) {
data = *((__u64 *) addr);
- if (data != magic) goto bad_data;
+ if (data != magic)
+ goto bad_data;
addr += PAGE_CACHE_SIZE - BRW_MSIZE;
data = *((__u64 *) addr);
- if (data != magic) goto bad_data;
+ if (data != magic)
+ goto bad_data;
return 0;
}
@@ -199,22 +205,23 @@ brw_check_page (struct page *pg, int pattern, __u64 magic)
if (pattern == LST_BRW_CHECK_FULL) {
for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
data = *(((__u64 *) addr) + i);
- if (data != magic) goto bad_data;
+ if (data != magic)
+ goto bad_data;
}
return 0;
}
- LBUG ();
+ LBUG();
bad_data:
- CERROR ("Bad data in page %p: "LPX64", "LPX64" expected\n",
+ CERROR("Bad data in page %p: "LPX64", "LPX64" expected\n",
pg, data, magic);
return 1;
}
void
-brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -226,7 +233,7 @@ brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
}
int
-brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -234,7 +241,7 @@ brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
for (i = 0; i < bk->bk_niov; i++) {
pg = bk->bk_iovs[i].kiov_page;
if (brw_check_page(pg, pattern, magic) != 0) {
- CERROR ("Bulk page %p (%d/%d) is corrupted!\n",
+ CERROR("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov);
return 1;
}
@@ -244,7 +251,7 @@ brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_client_prep_rpc (sfw_test_unit_t *tsu,
+brw_client_prep_rpc(sfw_test_unit_t *tsu,
lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
{
srpc_bulk_t *bulk = tsu->tsu_private;
@@ -302,7 +309,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu,
}
static void
-brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
{
__u64 magic = BRW_MAGIC;
sfw_test_instance_t *tsi = tsu->tsu_instance;
@@ -311,10 +318,10 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
- LASSERT (sn != NULL);
+ LASSERT(sn != NULL);
if (rpc->crpc_status != 0) {
- CERROR ("BRW RPC to %s failed with %d\n",
+ CERROR("BRW RPC to %s failed with %d\n",
libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_brw_errors);
@@ -326,7 +333,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
__swab32s(&reply->brw_status);
}
- CDEBUG (reply->brw_status ? D_WARNING : D_NET,
+ CDEBUG(reply->brw_status ? D_WARNING : D_NET,
"BRW RPC to %s finished with brw_status: %d\n",
libcfs_id2str(rpc->crpc_dest), reply->brw_status);
@@ -336,10 +343,11 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
goto out;
}
- if (reqst->brw_rw == LST_BRW_WRITE) goto out;
+ if (reqst->brw_rw == LST_BRW_WRITE)
+ goto out;
if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
- CERROR ("Bulk data from %s is corrupted!\n",
+ CERROR("Bulk data from %s is corrupted!\n",
libcfs_id2str(rpc->crpc_dest));
atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -EBADMSG;
@@ -350,18 +358,19 @@ out:
}
void
-brw_server_rpc_done (srpc_server_rpc_t *rpc)
+brw_server_rpc_done(srpc_server_rpc_t *rpc)
{
srpc_bulk_t *blk = rpc->srpc_bulk;
- if (blk == NULL) return;
+ if (blk == NULL)
+ return;
if (rpc->srpc_status != 0)
- CERROR ("Bulk transfer %s %s has failed: %d\n",
+ CERROR("Bulk transfer %s %s has failed: %d\n",
blk->bk_sink ? "from" : "to",
libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
else
- CDEBUG (D_NET, "Transferred %d pages bulk data %s %s\n",
+ CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
blk->bk_niov, blk->bk_sink ? "from" : "to",
libcfs_id2str(rpc->srpc_peer));
@@ -369,21 +378,21 @@ brw_server_rpc_done (srpc_server_rpc_t *rpc)
}
int
-brw_bulk_ready (srpc_server_rpc_t *rpc, int status)
+brw_bulk_ready(srpc_server_rpc_t *rpc, int status)
{
__u64 magic = BRW_MAGIC;
srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
srpc_brw_reqst_t *reqst;
srpc_msg_t *reqstmsg;
- LASSERT (rpc->srpc_bulk != NULL);
- LASSERT (rpc->srpc_reqstbuf != NULL);
+ LASSERT(rpc->srpc_bulk != NULL);
+ LASSERT(rpc->srpc_reqstbuf != NULL);
reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
reqst = &reqstmsg->msg_body.brw_reqst;
if (status != 0) {
- CERROR ("BRW bulk %s failed for RPC from %s: %d\n",
+ CERROR("BRW bulk %s failed for RPC from %s: %d\n",
reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
libcfs_id2str(rpc->srpc_peer), status);
return -EIO;
@@ -396,7 +405,7 @@ brw_bulk_ready (srpc_server_rpc_t *rpc, int status)
__swab64s(&magic);
if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
- CERROR ("Bulk data from %s is corrupted!\n",
+ CERROR("Bulk data from %s is corrupted!\n",
libcfs_id2str(rpc->srpc_peer));
reply->brw_status = EBADMSG;
}
@@ -415,10 +424,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
int npg;
int rc;
- LASSERT (sv->sv_id == SRPC_SERVICE_BRW);
+ LASSERT(sv->sv_id == SRPC_SERVICE_BRW);
if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
- LASSERT (reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
+ LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
__swab32s(&reqst->brw_rw);
__swab32s(&reqst->brw_len);
@@ -426,7 +435,7 @@ brw_server_handle(struct srpc_server_rpc *rpc)
__swab64s(&reqst->brw_rpyid);
__swab64s(&reqst->brw_bulkid);
}
- LASSERT (reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
+ LASSERT(reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
reply->brw_status = 0;
rpc->srpc_done = brw_server_rpc_done;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index cbce662b987..9a52f25b72e 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -75,7 +75,7 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
if (crpc->crp_stamp == 0) {
/* not aborted */
- LASSERT (crpc->crp_status == 0);
+ LASSERT(crpc->crp_status == 0);
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = rpc->crpc_status;
@@ -153,7 +153,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
int i;
- LASSERT (list_empty(&crpc->crp_link));
+ LASSERT(list_empty(&crpc->crp_link));
for (i = 0; i < bulk->bk_niov; i++) {
if (bulk->bk_iovs[i].kiov_page == NULL)
@@ -187,7 +187,7 @@ lstcon_rpc_post(lstcon_rpc_t *crpc)
{
lstcon_rpc_trans_t *trans = crpc->crp_trans;
- LASSERT (trans != NULL);
+ LASSERT(trans != NULL);
atomic_inc(&trans->tas_remaining);
crpc->crp_posted = 1;
@@ -289,7 +289,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
lstcon_rpc_t *crpc;
lstcon_node_t *nd;
- list_for_each_entry (crpc, &trans->tas_rpcs_list, crp_link) {
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
@@ -330,7 +330,7 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
!list_empty(&trans->tas_olink)) /* Not an end session RPC */
return 1;
- return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
+ return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0;
}
int
@@ -349,8 +349,8 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
lstcon_rpc_trans_name(trans->tas_opc));
/* post all requests */
- list_for_each_entry (crpc, &trans->tas_rpcs_list, crp_link) {
- LASSERT (!crpc->crp_posted);
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ LASSERT(!crpc->crp_posted);
lstcon_rpc_post(crpc);
}
@@ -390,8 +390,8 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
srpc_client_rpc_t *rpc = crpc->crp_rpc;
srpc_generic_reply_t *rep;
- LASSERT (nd != NULL && rpc != NULL);
- LASSERT (crpc->crp_stamp != 0);
+ LASSERT(nd != NULL && rpc != NULL);
+ LASSERT(crpc->crp_stamp != 0);
if (crpc->crp_status != 0) {
*msgpp = NULL;
@@ -427,14 +427,14 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
srpc_msg_t *rep;
int error;
- LASSERT (stat != NULL);
+ LASSERT(stat != NULL);
memset(stat, 0, sizeof(*stat));
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
lstcon_rpc_stat_total(stat, 1);
- LASSERT (crpc->crp_stamp != 0);
+ LASSERT(crpc->crp_stamp != 0);
error = lstcon_rpc_get_reply(crpc, &rep);
if (error != 0) {
@@ -455,8 +455,7 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
lstcon_session_feats_check(trans->tas_features);
}
- CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, "
- "RPC error(%d), Framework error(%d)\n",
+ CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
lstcon_rpc_trans_name(trans->tas_opc),
lstcon_rpc_stat_success(stat, 0),
lstcon_rpc_stat_failure(stat, 0),
@@ -482,7 +481,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
struct timeval tv;
int error;
- LASSERT (head_up != NULL);
+ LASSERT(head_up != NULL);
next = head_up;
@@ -498,7 +497,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
- LASSERT (crpc->crp_stamp != 0);
+ LASSERT(crpc->crp_stamp != 0);
error = lstcon_rpc_get_reply(crpc, &msg);
@@ -532,7 +531,9 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
if (readent == NULL)
continue;
- if ((error = readent(trans->tas_opc, msg, ent)) != 0)
+ error = readent(trans->tas_opc, msg, ent);
+
+ if (error != 0)
return error;
}
@@ -568,19 +569,19 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
* user wait for them, just abandon them, they will be recycled
* in callback */
- LASSERT (crpc->crp_status != 0);
+ LASSERT(crpc->crp_status != 0);
crpc->crp_node = NULL;
crpc->crp_trans = NULL;
list_del_init(&crpc->crp_link);
- count ++;
+ count++;
spin_unlock(&rpc->crpc_lock);
atomic_dec(&trans->tas_remaining);
}
- LASSERT (atomic_read(&trans->tas_remaining) == 0);
+ LASSERT(atomic_read(&trans->tas_remaining) == 0);
list_del(&trans->tas_link);
if (!list_empty(&trans->tas_olink))
@@ -669,14 +670,14 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
brq->bar_bid = tsb->tsb_id;
brq->bar_testidx = tsb->tsb_index;
brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN :
- (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP:
+ (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP :
SRPC_BATCH_OPC_QUERY);
if (transop != LST_TRANS_TSBRUN &&
transop != LST_TRANS_TSBSTOP)
return 0;
- LASSERT (tsb->tsb_index == 0);
+ LASSERT(tsb->tsb_index == 0);
batch = (lstcon_batch_t *)tsb;
brq->bar_arg = batch->bat_arg;
@@ -710,7 +711,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
i = idx / SFW_ID_PER_PAGE;
- LASSERT (i < nkiov);
+ LASSERT(i < nkiov);
pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
@@ -728,9 +729,9 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
int end;
int i = 0;
- LASSERT (dist >= 1);
- LASSERT (span >= 1);
- LASSERT (grp->grp_nnode >= 1);
+ LASSERT(dist >= 1);
+ LASSERT(span >= 1);
+ LASSERT(grp->grp_nnode >= 1);
if (span > grp->grp_nnode)
return -EINVAL;
@@ -741,11 +742,11 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
nd = ndl->ndl_node;
if (i < start) {
- i ++;
+ i++;
continue;
}
- if (i > (end >= start ? end: grp->grp_nnode))
+ if (i > (end >= start ? end : grp->grp_nnode))
break;
pid = lstcon_next_id((i - start), nkiov, kiov);
@@ -866,7 +867,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
bulk->bk_sink = 0;
- LASSERT (transop == LST_TRANS_TSBCLIADD);
+ LASSERT(transop == LST_TRANS_TSBCLIADD);
rc = lstcon_dstnodes_prep(test->tes_dst_grp,
test->tes_cliidx++,
@@ -942,8 +943,7 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
}
if (reply->msg_ses_feats != trans->tas_features) {
- CNETERR("Framework features %x from %s is different with "
- "features on this transaction: %x\n",
+ CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
trans->tas_features);
status = mksn_rep->mksn_status = EPROTO;
@@ -1107,8 +1107,8 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
continue;
if (rc < 0) {
- CDEBUG(D_NET, "Condition error while creating RPC "
- " for transaction %d: %d\n", transop, rc);
+ CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
+ transop, rc);
break;
}
@@ -1193,7 +1193,7 @@ lstcon_rpc_pinger(void *arg)
trans = console_session.ses_ping;
- LASSERT (trans != NULL);
+ LASSERT(trans != NULL);
list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
nd = ndl->ndl_node;
@@ -1219,8 +1219,8 @@ lstcon_rpc_pinger(void *arg)
crpc = &nd->nd_ping;
if (crpc->crp_rpc != NULL) {
- LASSERT (crpc->crp_trans == trans);
- LASSERT (!list_empty(&crpc->crp_link));
+ LASSERT(crpc->crp_trans == trans);
+ LASSERT(!list_empty(&crpc->crp_link));
spin_lock(&crpc->crp_rpc->crpc_lock);
@@ -1264,7 +1264,7 @@ lstcon_rpc_pinger(void *arg)
lstcon_rpc_trans_addreq(trans, crpc);
lstcon_rpc_post(crpc);
- count ++;
+ count++;
}
if (console_session.ses_expired) {
@@ -1286,8 +1286,8 @@ lstcon_rpc_pinger_start(void)
stt_timer_t *ptimer;
int rc;
- LASSERT (list_empty(&console_session.ses_rpc_freelist));
- LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT(list_empty(&console_session.ses_rpc_freelist));
+ LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
&console_session.ses_ping);
@@ -1307,7 +1307,7 @@ lstcon_rpc_pinger_start(void)
void
lstcon_rpc_pinger_stop(void)
{
- LASSERT (console_session.ses_shutdown);
+ LASSERT(console_session.ses_shutdown);
stt_del_timer(&console_session.ses_ping_timer);
@@ -1330,7 +1330,7 @@ lstcon_rpc_cleanup_wait(void)
/* Called with hold of global mutex */
- LASSERT (console_session.ses_shutdown);
+ LASSERT(console_session.ses_shutdown);
while (!list_empty(&console_session.ses_trans_list)) {
list_for_each(pacer, &console_session.ses_trans_list) {
@@ -1345,8 +1345,7 @@ lstcon_rpc_cleanup_wait(void)
mutex_unlock(&console_session.ses_mutex);
- CWARN("Session is shutting down, "
- "waiting for termination of transactions\n");
+ CWARN("Session is shutting down, waiting for termination of transactions\n");
cfs_pause(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex);
@@ -1356,8 +1355,7 @@ lstcon_rpc_cleanup_wait(void)
lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
console_session.ses_rpc_lock,
- "Network is not accessible or target is down, "
- "waiting for %d console RPCs to being recycled\n",
+ "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n",
atomic_read(&console_session.ses_rpc_counter));
list_add(&zlist, &console_session.ses_rpc_freelist);
@@ -1392,6 +1390,6 @@ lstcon_rpc_module_init(void)
void
lstcon_rpc_module_fini(void)
{
- LASSERT (list_empty(&console_session.ses_rpc_freelist));
- LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT(list_empty(&console_session.ses_rpc_freelist));
+ LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
}
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 09e4700af64..f1152e4fbcc 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -797,7 +797,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
return rc;
}
- if (dents_up != 0) {
+ if (dents_up) {
/* verbose query */
rc = lstcon_nodes_getent(&grp->grp_ndl_list,
index_p, count_p, dents_up);
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 3bf4afb42ff..82fd363679c 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -74,14 +74,14 @@ stt_add_timer(stt_timer_t *timer)
spin_lock(&stt_data.stt_lock);
- LASSERT (stt_data.stt_nthreads > 0);
- LASSERT (!stt_data.stt_shuttingdown);
- LASSERT (timer->stt_func != NULL);
- LASSERT (list_empty(&timer->stt_list));
- LASSERT (cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
+ LASSERT(stt_data.stt_nthreads > 0);
+ LASSERT(!stt_data.stt_shuttingdown);
+ LASSERT(timer->stt_func != NULL);
+ LASSERT(list_empty(&timer->stt_list));
+ LASSERT(cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
/* a simple insertion sort */
- list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
+ list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) {
stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
if (cfs_time_aftereq(timer->stt_expires, old->stt_expires))
@@ -102,14 +102,14 @@ stt_add_timer(stt_timer_t *timer)
* another CPU.
*/
int
-stt_del_timer (stt_timer_t *timer)
+stt_del_timer(stt_timer_t *timer)
{
int ret = 0;
spin_lock(&stt_data.stt_lock);
- LASSERT (stt_data.stt_nthreads > 0);
- LASSERT (!stt_data.stt_shuttingdown);
+ LASSERT(stt_data.stt_nthreads > 0);
+ LASSERT(!stt_data.stt_shuttingdown);
if (!list_empty(&timer->stt_list)) {
ret = 1;
@@ -122,7 +122,7 @@ stt_del_timer (stt_timer_t *timer)
/* called with stt_data.stt_lock held */
int
-stt_expire_list (struct list_head *slot, cfs_time_t now)
+stt_expire_list(struct list_head *slot, cfs_time_t now)
{
int expired = 0;
stt_timer_t *timer;
@@ -146,7 +146,7 @@ stt_expire_list (struct list_head *slot, cfs_time_t now)
}
int
-stt_check_timers (cfs_time_t *last)
+stt_check_timers(cfs_time_t *last)
{
int expired = 0;
cfs_time_t now;
@@ -169,7 +169,7 @@ stt_check_timers (cfs_time_t *last)
int
-stt_timer_main (void *arg)
+stt_timer_main(void *arg)
{
int rc = 0;
UNUSED(arg);
@@ -193,7 +193,7 @@ stt_timer_main (void *arg)
}
int
-stt_start_timer_thread (void)
+stt_start_timer_thread(void)
{
struct task_struct *task;
@@ -211,7 +211,7 @@ stt_start_timer_thread (void)
int
-stt_startup (void)
+stt_startup(void)
{
int rc = 0;
int i;
@@ -227,20 +227,20 @@ stt_startup (void)
init_waitqueue_head(&stt_data.stt_waitq);
rc = stt_start_timer_thread();
if (rc != 0)
- CERROR ("Can't spawn timer thread: %d\n", rc);
+ CERROR("Can't spawn timer thread: %d\n", rc);
return rc;
}
void
-stt_shutdown (void)
+stt_shutdown(void)
{
int i;
spin_lock(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
- LASSERT (list_empty(&stt_data.stt_hash[i]));
+ LASSERT(list_empty(&stt_data.stt_hash[i]));
stt_data.stt_shuttingdown = 1;
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 2156a44d074..93d59b6a60d 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -16,7 +16,7 @@ config LUSTRE_FS
this file system support as a module, choose M here: the module will
be called lustre.
- To mount Lustre file systems , you also need to install the user space
+ To mount Lustre file systems, you also need to install the user space
mount.lustre and other user space commands which can be found in the
lustre-client package, available from
http://downloads.whamcloud.com/public/lustre/
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 66007b57018..79fc2fe131a 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -548,9 +548,7 @@ static int __init fid_mod_init(void)
seq_type_proc_dir = lprocfs_register(LUSTRE_SEQ_NAME,
proc_lustre_root,
NULL, NULL);
- if (IS_ERR(seq_type_proc_dir))
- return PTR_ERR(seq_type_proc_dir);
- return 0;
+ return PTR_ERR_OR_ZERO(seq_type_proc_dir);
}
static void __exit fid_mod_exit(void)
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 25099cbe37e..45315101848 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -267,7 +267,7 @@ void fld_cache_punch_hole(struct fld_cache *cache,
const seqno_t new_end = range->lsr_end;
struct fld_cache_entry *fldt;
- OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC);
+ OBD_ALLOC_GFP(fldt, sizeof(*fldt), GFP_ATOMIC);
if (!fldt) {
OBD_FREE_PTR(f_new);
/* overlap is not allowed, so dont mess up list. */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 078e98bda68..e47fd50b2a2 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -59,8 +59,6 @@
#include <lustre_mdc.h>
#include "fld_internal.h"
-struct lu_context_key fld_thread_key;
-
/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
* It should be common thing. The same about mdc RPC lock */
static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
@@ -509,14 +507,11 @@ static int __init fld_mod_init(void)
if (IS_ERR(fld_type_proc_dir))
return PTR_ERR(fld_type_proc_dir);
- LU_CONTEXT_KEY_INIT(&fld_thread_key);
- lu_context_key_register(&fld_thread_key);
return 0;
}
static void __exit fld_mod_exit(void)
{
- lu_context_key_degister(&fld_thread_key);
if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) {
lprocfs_remove(&fld_type_proc_dir);
fld_type_proc_dir = NULL;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index edb40afe66f..c485206fc6c 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -3096,13 +3096,13 @@ struct cl_io *cl_io_top(struct cl_io *io);
void cl_io_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_io *io);
-#define CL_IO_SLICE_CLEAN(foo_io, base) \
-do { \
- typeof(foo_io) __foo_io = (foo_io); \
+#define CL_IO_SLICE_CLEAN(foo_io, base) \
+do { \
+ typeof(foo_io) __foo_io = (foo_io); \
\
- CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
- memset(&__foo_io->base + 1, 0, \
- (sizeof *__foo_io) - sizeof __foo_io->base); \
+ CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
+ memset(&__foo_io->base + 1, 0, \
+ sizeof(*__foo_io) - sizeof(__foo_io->base)); \
} while (0)
/** @} cl_io */
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index 9d4011f2908..27316f7b7a7 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -388,8 +388,8 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent);
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
__u32 cl_fid_build_gen(const struct lu_fid *fid);
-# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
+# define CLOBINVRNT(env, clob, expr) \
+ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
int cl_ocd_update(struct obd_device *host,
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 9243dfab43d..eefdb8d061b 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -105,8 +105,8 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry)
#define ll_vfs_mknod(dir,entry,mnt,mode,dev) vfs_mknod(dir,entry,mode,dev)
#define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
-#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
- vfs_rename(old,old_dir,new,new_dir)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1,delegated_inode) \
+ vfs_rename(old,old_dir,new,new_dir,delegated_inode)
#define cfs_bio_io_error(a,b) bio_io_error((a))
#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
@@ -232,9 +232,6 @@ static inline int ll_namei_to_lookup_intent_flag(int flag)
return flag;
}
-# define ll_mrf_ret void
-# define LL_MRF_RETURN(rc)
-
#include <linux/fs.h>
# define ll_umode_t umode_t
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index fa31be886ef..d5b8225ef1a 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -622,7 +622,7 @@ struct lu_site {
/**
* objects hash table
*/
- cfs_hash_t *ls_obj_hash;
+ struct cfs_hash *ls_obj_hash;
/**
* index of bucket on hash table while purging
*/
@@ -659,7 +659,7 @@ struct lu_site {
static inline struct lu_site_bkt_data *
lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
{
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
index 2870487dd28..35aefa2cdad 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
@@ -165,7 +165,7 @@
#define LUSTRE_EHOSTUNREACH 113 /* No route to host */
#define LUSTRE_EALREADY 114 /* Operation already in progress */
#define LUSTRE_EINPROGRESS 115 /* Operation now in progress */
-#define LUSTRE_ESTALE 116 /* Stale NFS file handle */
+#define LUSTRE_ESTALE 116 /* Stale file handle */
#define LUSTRE_EUCLEAN 117 /* Structure needs cleaning */
#define LUSTRE_ENOTNAM 118 /* Not a XENIX named type file */
#define LUSTRE_ENAVAIL 119 /* No XENIX semaphores available */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 984235ccd3a..5ca18d01601 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -831,9 +831,10 @@ static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
+ CLASSERT(sizeof(*src) ==
+ sizeof(fid_seq(src)) +
+ sizeof(fid_oid(src)) +
+ sizeof(fid_ver(src)));
dst->f_seq = cpu_to_le64(fid_seq(src));
dst->f_oid = cpu_to_le32(fid_oid(src));
dst->f_ver = cpu_to_le32(fid_ver(src));
@@ -842,9 +843,10 @@ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
+ CLASSERT(sizeof(*src) ==
+ sizeof(fid_seq(src)) +
+ sizeof(fid_oid(src)) +
+ sizeof(fid_ver(src)));
dst->f_seq = le64_to_cpu(fid_seq(src));
dst->f_oid = le32_to_cpu(fid_oid(src));
dst->f_ver = le32_to_cpu(fid_ver(src));
@@ -853,9 +855,10 @@ static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
+ CLASSERT(sizeof(*src) ==
+ sizeof(fid_seq(src)) +
+ sizeof(fid_oid(src)) +
+ sizeof(fid_ver(src)));
dst->f_seq = cpu_to_be64(fid_seq(src));
dst->f_oid = cpu_to_be32(fid_oid(src));
dst->f_ver = cpu_to_be32(fid_ver(src));
@@ -864,9 +867,10 @@ static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
+ CLASSERT(sizeof(*src) ==
+ sizeof(fid_seq(src)) +
+ sizeof(fid_oid(src)) +
+ sizeof(fid_ver(src)));
dst->f_seq = be64_to_cpu(fid_seq(src));
dst->f_oid = be32_to_cpu(fid_oid(src));
dst->f_ver = be32_to_cpu(fid_ver(src));
@@ -891,9 +895,11 @@ extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
/* Check that there is no alignment padding. */
- CLASSERT(sizeof *f0 ==
- sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver);
- return memcmp(f0, f1, sizeof *f0) == 0;
+ CLASSERT(sizeof(*f0) ==
+ sizeof(f0->f_seq) +
+ sizeof(f0->f_oid) +
+ sizeof(f0->f_ver));
+ return memcmp(f0, f1, sizeof(*f0)) == 0;
}
#define __diff_normalize(val0, val1) \
@@ -1638,8 +1644,10 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
-#define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
-#define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
+#define MAX_MD_SIZE \
+ (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
+#define MIN_MD_SIZE \
+ (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 7020d9cd9eb..bc2b82ffae9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -375,7 +375,7 @@ struct ldlm_namespace {
ldlm_side_t ns_client;
/** Resource hash table for namespace. */
- cfs_hash_t *ns_rs_hash;
+ struct cfs_hash *ns_rs_hash;
/** serialize */
spinlock_t ns_lock;
@@ -1083,7 +1083,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
* Rate-limited version of lock printing function.
*/
#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
- static cfs_debug_limit_state_t _ldlm_cdls; \
+ static struct cfs_debug_limit_state _ldlm_cdls; \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
} while (0)
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index d61c020a464..2feb38b51af 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -197,12 +197,12 @@ struct obd_export {
/** Connection count value from last succesful reconnect rpc */
__u32 exp_conn_cnt;
/** Hash list of all ldlm locks granted on this export */
- cfs_hash_t *exp_lock_hash;
+ struct cfs_hash *exp_lock_hash;
/**
* Hash list for Posix lock deadlock detection, added with
* ldlm_lock::l_exp_flock_hash.
*/
- cfs_hash_t *exp_flock_hash;
+ struct cfs_hash *exp_flock_hash;
struct list_head exp_outstanding_replies;
struct list_head exp_uncommitted_replies;
spinlock_t exp_uncommitted_replies_lock;
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index d9d5814e318..ff119532daf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -590,7 +590,7 @@ fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
static inline void ostid_build_res_name(struct ost_id *oi,
struct ldlm_res_id *name)
{
- memset(name, 0, sizeof *name);
+ memset(name, 0, sizeof(*name));
if (fid_seq_is_mdt0(ostid_seq(oi))) {
name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
index 25f8bfaccef..beccb5e4065 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lite.h
@@ -139,7 +139,11 @@ static inline unsigned long hash_x_index(__u64 hash, int hash64)
{
if (BITS_PER_LONG == 32 && hash64)
hash >>= 32;
- return ~0UL - hash;
+ /* save hash 0 as index 0 because otherwise we'll save it at
+ * page index end (~0UL) and it causes truncate_inode_pages_range()
+ * to loop forever.
+ */
+ return ~0UL - (hash + !hash);
}
/** @} lite */
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index e947002fae0..72edf01b58a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1427,7 +1427,7 @@ struct nrs_fifo_req {
struct nrs_crrn_net {
struct ptlrpc_nrs_resource cn_res;
cfs_binheap_t *cn_binheap;
- cfs_hash_t *cn_cli_hash;
+ struct cfs_hash *cn_cli_hash;
/**
* Used when a new scheduling round commences, in order to synchronize
* all clients with the new round number.
@@ -1568,7 +1568,7 @@ struct nrs_orr_key {
struct nrs_orr_data {
struct ptlrpc_nrs_resource od_res;
cfs_binheap_t *od_binheap;
- cfs_hash_t *od_obj_hash;
+ struct cfs_hash *od_obj_hash;
struct kmem_cache *od_cache;
/**
* Used when a new scheduling round commences, in order to synchronize
@@ -2206,7 +2206,7 @@ do { \
#define DEBUG_REQ(level, req, fmt, args...) \
do { \
if ((level) & (D_ERROR | D_WARNING)) { \
- static cfs_debug_limit_state_t cdls; \
+ static struct cfs_debug_limit_state cdls; \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
} else { \
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index a6122559d55..d0aea15b7c3 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -177,7 +177,7 @@ static inline int lov_stripe_md_cmp(struct lov_stripe_md *m1,
* ->lsm_wire contains padding, but it should be zeroed out during
* allocation.
*/
- return memcmp(&m1->lsm_wire, &m2->lsm_wire, sizeof m1->lsm_wire);
+ return memcmp(&m1->lsm_wire, &m2->lsm_wire, sizeof(m1->lsm_wire));
}
static inline int lov_lum_lsm_cmp(struct lov_user_md *lum,
@@ -429,7 +429,7 @@ struct client_obd {
/* ptlrpc work for writeback in ptlrpcd context */
void *cl_writeback_work;
/* hash tables for osc_quota_info */
- cfs_hash_t *cl_quota_hash[MAXQUOTAS];
+ struct cfs_hash *cl_quota_hash[MAXQUOTAS];
};
#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
@@ -556,7 +556,7 @@ struct lov_obd {
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
int lov_pool_count;
- cfs_hash_t *lov_pools_hash_body; /* used for key access */
+ struct cfs_hash *lov_pools_hash_body; /* used for key access */
struct list_head lov_pool_list; /* used for sequential access */
struct proc_dir_entry *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
@@ -855,11 +855,11 @@ struct obd_device {
* protection of other bits using _bh lock */
unsigned long obd_recovery_expired:1;
/* uuid-export hash body */
- cfs_hash_t *obd_uuid_hash;
+ struct cfs_hash *obd_uuid_hash;
/* nid-export hash body */
- cfs_hash_t *obd_nid_hash;
+ struct cfs_hash *obd_nid_hash;
/* nid stats body */
- cfs_hash_t *obd_nid_stats_hash;
+ struct cfs_hash *obd_nid_stats_hash;
struct list_head obd_nid_stats;
atomic_t obd_refcount;
wait_queue_head_t obd_refcount_waitq;
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 03e6133ef50..9697e7faff2 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -633,8 +633,8 @@ do { \
#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, __GFP_IO)
#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_IOFS)
-#define OBD_ALLOC_PTR(ptr) OBD_ALLOC(ptr, sizeof *(ptr))
-#define OBD_ALLOC_PTR_WAIT(ptr) OBD_ALLOC_WAIT(ptr, sizeof *(ptr))
+#define OBD_ALLOC_PTR(ptr) OBD_ALLOC(ptr, sizeof(*(ptr)))
+#define OBD_ALLOC_PTR_WAIT(ptr) OBD_ALLOC_WAIT(ptr, sizeof(*(ptr)))
#define OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, gfp_mask) \
__OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, gfp_mask)
@@ -643,7 +643,7 @@ do { \
OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
#define OBD_CPT_ALLOC_PTR(ptr, cptab, cpt) \
- OBD_CPT_ALLOC(ptr, cptab, cpt, sizeof *(ptr))
+ OBD_CPT_ALLOC(ptr, cptab, cpt, sizeof(*(ptr)))
# define __OBD_VMALLOC_VEROBSE(ptr, cptab, cpt, size) \
do { \
@@ -773,7 +773,7 @@ do { \
#define OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, flags) \
__OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, flags)
-#define OBD_FREE_PTR(ptr) OBD_FREE(ptr, sizeof *(ptr))
+#define OBD_FREE_PTR(ptr) OBD_FREE(ptr, sizeof(*(ptr)))
#define OBD_SLAB_FREE(ptr, slab, size) \
do { \
@@ -789,19 +789,19 @@ do { \
OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, __GFP_IO)
#define OBD_SLAB_ALLOC_PTR(ptr, slab) \
- OBD_SLAB_ALLOC(ptr, slab, sizeof *(ptr))
+ OBD_SLAB_ALLOC(ptr, slab, sizeof(*(ptr)))
#define OBD_SLAB_CPT_ALLOC_PTR(ptr, slab, cptab, cpt) \
- OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, sizeof *(ptr))
+ OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, sizeof(*(ptr)))
#define OBD_SLAB_ALLOC_PTR_GFP(ptr, slab, flags) \
- OBD_SLAB_ALLOC_GFP(ptr, slab, sizeof *(ptr), flags)
+ OBD_SLAB_ALLOC_GFP(ptr, slab, sizeof(*(ptr)), flags)
#define OBD_SLAB_CPT_ALLOC_PTR_GFP(ptr, slab, cptab, cpt, flags) \
- OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, sizeof *(ptr), flags)
+ OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, sizeof(*(ptr)), flags)
#define OBD_SLAB_FREE_PTR(ptr, slab) \
- OBD_SLAB_FREE((ptr), (slab), sizeof *(ptr))
+ OBD_SLAB_FREE((ptr), (slab), sizeof(*(ptr)))
#define KEY_IS(str) \
(keylen >= (sizeof(str)-1) && memcmp(key, str, (sizeof(str)-1)) == 0)
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 8ff38c64b7a..e60c04d5393 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -701,7 +701,7 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
- memset(&cio->cui_link, 0, sizeof cio->cui_link);
+ memset(&cio->cui_link, 0, sizeof(cio->cui_link));
if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
descr->cld_mode = CLM_GROUP;
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index c65b13c800f..1de1d8eb9b4 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -125,11 +125,11 @@ static inline __u64 min_u64(__u64 x, __u64 y)
#define interval_for_each(node, root) \
for (node = interval_first(root); node != NULL; \
- node = interval_next(node))
+ node = interval_next(node))
#define interval_for_each_reverse(node, root) \
for (node = interval_last(root); node != NULL; \
- node = interval_prev(node))
+ node = interval_prev(node))
static struct interval_node *interval_first(struct interval_node *node)
{
@@ -239,7 +239,7 @@ static void __rotate_change_maxhigh(struct interval_node *node,
left_max = node->in_left ? node->in_left->in_max_high : 0;
right_max = node->in_right ? node->in_right->in_max_high : 0;
node->in_max_high = max_u64(interval_high(node),
- max_u64(left_max,right_max));
+ max_u64(left_max, right_max));
}
/* The left rotation "pivots" around the link from node to node->right, and
@@ -427,8 +427,9 @@ static void interval_erase_color(struct interval_node *node,
} else {
if (node_is_black_or_0(tmp->in_right)) {
struct interval_node *o_left;
- if ((o_left = tmp->in_left))
- o_left->in_color = INTERVAL_BLACK;
+ o_left = tmp->in_left;
+ if (o_left)
+ o_left->in_color = INTERVAL_BLACK;
tmp->in_color = INTERVAL_RED;
__rotate_right(tmp, root);
tmp = parent->in_right;
@@ -436,7 +437,7 @@ static void interval_erase_color(struct interval_node *node,
tmp->in_color = parent->in_color;
parent->in_color = INTERVAL_BLACK;
if (tmp->in_right)
- tmp->in_right->in_color = INTERVAL_BLACK;
+ tmp->in_right->in_color = INTERVAL_BLACK;
__rotate_left(parent, root);
node = *root;
break;
@@ -457,8 +458,9 @@ static void interval_erase_color(struct interval_node *node,
} else {
if (node_is_black_or_0(tmp->in_left)) {
struct interval_node *o_right;
- if ((o_right = tmp->in_right))
- o_right->in_color = INTERVAL_BLACK;
+ o_right = tmp->in_right;
+ if (o_right)
+ o_right->in_color = INTERVAL_BLACK;
tmp->in_color = INTERVAL_RED;
__rotate_left(tmp, root);
tmp = parent->in_left;
@@ -545,7 +547,7 @@ void interval_erase(struct interval_node *node,
update_maxhigh(child ? : parent, node->in_max_high);
update_maxhigh(node, old->in_max_high);
if (parent == old)
- parent = node;
+ parent = node;
goto color;
}
parent = node->in_parent;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 7e316637369..ac5d66aa7f0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -144,7 +144,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
l->l_tree_node = NULL;
list_del_init(&l->l_sl_policy);
- return (list_empty(&n->li_group) ? n : NULL);
+ return list_empty(&n->li_group) ? n : NULL;
}
static inline int lock_mode_to_index(ldlm_mode_t mode)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index c68ed276633..39fcdacc51e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -745,7 +745,7 @@ void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
* Export handle<->flock hash operations.
*/
static unsigned
-ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_u64_hash(*(__u64 *)key, mask);
}
@@ -772,7 +772,7 @@ ldlm_export_flock_object(struct hlist_node *hnode)
}
static void
-ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
struct ldlm_flock *flock;
@@ -787,7 +787,7 @@ ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
struct ldlm_flock *flock;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 6133b3f3471..3900a69742f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -529,7 +529,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
lock_res_nested(oldres, LRT_NEW);
}
LASSERT(memcmp(new_resid, &oldres->lr_name,
- sizeof oldres->lr_name) != 0);
+ sizeof(oldres->lr_name)) != 0);
lock->l_resource = newres;
unlock_res(oldres);
unlock_res_and_lock(lock);
@@ -1891,7 +1891,7 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
return LDLM_ITER_CONTINUE;
}
-static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2040,7 +2040,7 @@ struct export_cl_data {
* Iterator function for ldlm_cancel_locks_for_export.
* Cancels passed locks.
*/
-int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index a100a0b9638..fde9bcd1d48 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -937,7 +937,7 @@ EXPORT_SYMBOL(ldlm_put_ref);
* Export handle<->lock hash operations.
*/
static unsigned
-ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
}
@@ -973,7 +973,7 @@ ldlm_export_lock_object(struct hlist_node *hnode)
}
static void
-ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
@@ -982,7 +982,7 @@ ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 21cb523ac4a..dcc27840313 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -1652,7 +1652,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *ca
LDLM_LOCK_GET(lock);
spin_unlock(&ns->ns_lock);
- lu_ref_add(&lock->l_reference, __FUNCTION__, current);
+ lu_ref_add(&lock->l_reference, __func__, current);
/* Pass the lock through the policy filter and see if it
* should stay in LRU.
@@ -1670,7 +1670,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *ca
result = pf(ns, lock, unused, added, count);
if (result == LDLM_POLICY_KEEP_LOCK) {
lu_ref_del(&lock->l_reference,
- __FUNCTION__, current);
+ __func__, current);
LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
break;
@@ -1693,7 +1693,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *ca
* by itself, or the lock is no longer unused. */
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference,
- __FUNCTION__, current);
+ __func__, current);
LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
continue;
@@ -1724,7 +1724,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *ca
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __FUNCTION__, current);
+ lu_ref_del(&lock->l_reference, __func__, current);
spin_lock(&ns->ns_lock);
added++;
unused--;
@@ -1925,7 +1925,7 @@ struct ldlm_cli_cancel_arg {
void *lc_opaque;
};
-static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2023,7 +2023,7 @@ static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
return helper->iter(lock, helper->closure);
}
-static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 208751a154b..77e022bf8bc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -159,7 +159,7 @@ static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
{
struct ldlm_namespace *ns = m->private;
__u64 res = 0;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
int i;
/* result is not strictly consistant */
@@ -389,7 +389,7 @@ int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
#endif /* LPROCFS */
-static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
+static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
const void *key, unsigned mask)
{
const struct ldlm_res_id *id = key;
@@ -401,7 +401,7 @@ static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
return val & mask;
}
-static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
+static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
const void *key, unsigned mask)
{
const struct ldlm_res_id *id = key;
@@ -453,7 +453,7 @@ static void *ldlm_res_hop_object(struct hlist_node *hnode)
return hlist_entry(hnode, struct ldlm_resource, lr_hash);
}
-static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -461,7 +461,7 @@ static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
ldlm_resource_getref(res);
}
-static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -470,7 +470,7 @@ static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
ldlm_resource_putref_locked(res);
}
-static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -564,7 +564,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
struct ldlm_namespace *ns = NULL;
struct ldlm_ns_bucket *nsb;
ldlm_ns_hash_def_t *nsd;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
int idx;
int rc;
@@ -743,7 +743,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
} while (1);
}
-static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -756,7 +756,7 @@ static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
return 0;
}
-static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -1060,7 +1060,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
{
struct hlist_node *hnode;
struct ldlm_resource *res;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
__u64 version;
int ns_refcount = 0;
@@ -1115,7 +1115,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
lu_ref_fini(&res->lr_reference);
/* We have taken lr_lvb_mutex. Drop it. */
mutex_unlock(&res->lr_lvb_mutex);
- OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+ OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* Synchronize with regard to resource creation. */
@@ -1183,7 +1183,7 @@ struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
return res;
}
-static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
+static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
struct ldlm_resource *res)
{
struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
@@ -1214,7 +1214,7 @@ static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
int ldlm_resource_putref(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "putref res: %p count: %d\n",
@@ -1226,7 +1226,7 @@ int ldlm_resource_putref(struct ldlm_resource *res)
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
ns->ns_lvbo->lvbo_free(res);
- OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+ OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
return 1;
}
return 0;
@@ -1243,7 +1243,7 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res)
res, atomic_read(&res->lr_refcount) - 1);
if (atomic_dec_and_test(&res->lr_refcount)) {
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
&res->lr_name, &bd);
@@ -1256,7 +1256,7 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res)
*/
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
ns->ns_lvbo->lvbo_free(res);
- OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+ OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
return 1;
@@ -1352,7 +1352,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
}
EXPORT_SYMBOL(ldlm_dump_all_namespaces);
-static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 0dd12c8c91b..e3e0578b27f 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -119,25 +119,25 @@ CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
struct cfs_wi_sched *cfs_sched_rehash;
static inline void
-cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
+cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
static inline void
-cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
+cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
static inline void
-cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
+cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
{
spin_lock(&lock->spin);
}
static inline void
-cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
+cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
{
spin_unlock(&lock->spin);
}
static inline void
-cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
+cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
{
if (!exclusive)
read_lock(&lock->rw);
@@ -146,7 +146,7 @@ cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
}
static inline void
-cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
+cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
{
if (!exclusive)
read_unlock(&lock->rw);
@@ -209,7 +209,7 @@ static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
};
static void
-cfs_hash_lock_setup(cfs_hash_t *hs)
+cfs_hash_lock_setup(struct cfs_hash *hs)
{
if (cfs_hash_with_no_lock(hs)) {
hs->hs_lops = &cfs_hash_nl_lops;
@@ -246,13 +246,13 @@ typedef struct {
} cfs_hash_head_t;
static int
-cfs_hash_hh_hhead_size(cfs_hash_t *hs)
+cfs_hash_hh_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_head_t);
}
static struct hlist_head *
-cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
@@ -260,7 +260,7 @@ cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
}
static int
-cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
@@ -268,7 +268,7 @@ cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
}
static int
-cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
hlist_del_init(hnode);
@@ -285,13 +285,13 @@ typedef struct {
} cfs_hash_head_dep_t;
static int
-cfs_hash_hd_hhead_size(cfs_hash_t *hs)
+cfs_hash_hd_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_head_dep_t);
}
static struct hlist_head *
-cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_head_dep_t *head;
@@ -300,7 +300,7 @@ cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
}
static int
-cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
@@ -310,7 +310,7 @@ cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
}
static int
-cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
@@ -329,13 +329,13 @@ typedef struct {
} cfs_hash_dhead_t;
static int
-cfs_hash_dh_hhead_size(cfs_hash_t *hs)
+cfs_hash_dh_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_dhead_t);
}
static struct hlist_head *
-cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_dhead_t *head;
@@ -344,7 +344,7 @@ cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
}
static int
-cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
@@ -359,7 +359,7 @@ cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
}
static int
-cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnd)
{
cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
@@ -384,13 +384,13 @@ typedef struct {
} cfs_hash_dhead_dep_t;
static int
-cfs_hash_dd_hhead_size(cfs_hash_t *hs)
+cfs_hash_dd_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_dhead_dep_t);
}
static struct hlist_head *
-cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_dhead_dep_t *head;
@@ -399,7 +399,7 @@ cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
}
static int
-cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
@@ -414,7 +414,7 @@ cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
}
static int
-cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnd)
{
cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
@@ -457,7 +457,7 @@ static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
};
static void
-cfs_hash_hlist_setup(cfs_hash_t *hs)
+cfs_hash_hlist_setup(struct cfs_hash *hs)
{
if (cfs_hash_with_add_tail(hs)) {
hs->hs_hops = cfs_hash_with_depth(hs) ?
@@ -469,8 +469,8 @@ cfs_hash_hlist_setup(cfs_hash_t *hs)
}
static void
-cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
- unsigned int bits, const void *key, cfs_hash_bd_t *bd)
+cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
+ unsigned int bits, const void *key, struct cfs_hash_bd *bd)
{
unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
@@ -481,7 +481,7 @@ cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
}
void
-cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
+cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
if (likely(hs->hs_rehash_buckets == NULL)) {
@@ -496,7 +496,7 @@ cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
EXPORT_SYMBOL(cfs_hash_bd_get);
static inline void
-cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
+cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
{
if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
return;
@@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
}
void
-cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
int rc;
@@ -539,7 +539,7 @@ cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
EXPORT_SYMBOL(cfs_hash_bd_add_locked);
void
-cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
hs->hs_hops->hop_hnode_del(hs, bd, hnode);
@@ -560,11 +560,11 @@ cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
EXPORT_SYMBOL(cfs_hash_bd_del_locked);
void
-cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
- cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
+cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
+ struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
{
- cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
- cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
+ struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
+ struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
int rc;
if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
@@ -617,7 +617,7 @@ typedef enum cfs_hash_lookup_intent {
} cfs_hash_lookup_intent_t;
static struct hlist_node *
-cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
const void *key, struct hlist_node *hnode,
cfs_hash_lookup_intent_t intent)
@@ -658,7 +658,7 @@ cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
}
struct hlist_node *
-cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
{
return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
CFS_HS_LOOKUP_IT_FIND);
@@ -666,7 +666,7 @@ cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
struct hlist_node *
-cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
{
return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
CFS_HS_LOOKUP_IT_PEEK);
@@ -674,7 +674,7 @@ cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
struct hlist_node *
-cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
const void *key, struct hlist_node *hnode,
int noref)
{
@@ -685,7 +685,7 @@ cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
struct hlist_node *
-cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
const void *key, struct hlist_node *hnode)
{
/* hnode can be NULL, we find the first item with @key */
@@ -695,10 +695,10 @@ cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
static void
-cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, int excl)
{
- cfs_hash_bucket_t *prev = NULL;
+ struct cfs_hash_bucket *prev = NULL;
int i;
/**
@@ -718,10 +718,10 @@ cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
}
static void
-cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, int excl)
{
- cfs_hash_bucket_t *prev = NULL;
+ struct cfs_hash_bucket *prev = NULL;
int i;
cfs_hash_for_each_bd(bds, n, i) {
@@ -733,7 +733,7 @@ cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
}
static struct hlist_node *
-cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, const void *key)
{
struct hlist_node *ehnode;
@@ -749,8 +749,8 @@ cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
}
static struct hlist_node *
-cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds, unsigned n, const void *key,
+cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
+ struct cfs_hash_bd *bds, unsigned n, const void *key,
struct hlist_node *hnode, int noref)
{
struct hlist_node *ehnode;
@@ -770,7 +770,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
if (i == 1) { /* only one bucket */
cfs_hash_bd_add_locked(hs, &bds[0], hnode);
} else {
- cfs_hash_bd_t mybd;
+ struct cfs_hash_bd mybd;
cfs_hash_bd_get(hs, key, &mybd);
cfs_hash_bd_add_locked(hs, &mybd, hnode);
@@ -780,7 +780,7 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
}
static struct hlist_node *
-cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, const void *key,
struct hlist_node *hnode)
{
@@ -797,7 +797,7 @@ cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
}
static void
-cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
{
int rc;
@@ -815,7 +815,7 @@ cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
bd2->bd_bucket = NULL;
} else if (rc > 0) { /* swab bd1 and bd2 */
- cfs_hash_bd_t tmp;
+ struct cfs_hash_bd tmp;
tmp = *bd2;
*bd2 = *bd1;
@@ -824,7 +824,7 @@ cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
}
void
-cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
+cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
{
/* NB: caller should hold hs_lock.rw if REHASH is set */
cfs_hash_bd_from_key(hs, hs->hs_buckets,
@@ -844,21 +844,21 @@ cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
EXPORT_SYMBOL(cfs_hash_dual_bd_get);
void
-cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
{
cfs_hash_multi_bd_lock(hs, bds, 2, excl);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
void
-cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
{
cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
const void *key)
{
return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
@@ -866,7 +866,7 @@ cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
const void *key, struct hlist_node *hnode,
int noref)
{
@@ -876,7 +876,7 @@ cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
const void *key, struct hlist_node *hnode)
{
return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
@@ -884,7 +884,7 @@ cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
static void
-cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
+cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
int bkt_size, int prev_size, int size)
{
int i;
@@ -902,11 +902,11 @@ cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
* needed, the newly allocated buckets if allocation was needed and
* successful, and NULL on error.
*/
-static cfs_hash_bucket_t **
-cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
+static struct cfs_hash_bucket **
+cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
unsigned int old_size, unsigned int new_size)
{
- cfs_hash_bucket_t **new_bkts;
+ struct cfs_hash_bucket **new_bkts;
int i;
LASSERT(old_size == 0 || old_bkts != NULL);
@@ -925,7 +925,7 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
for (i = old_size; i < new_size; i++) {
struct hlist_head *hhead;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
if (new_bkts[i] == NULL) {
@@ -969,7 +969,7 @@ static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(cfs_workitem_t *wi)
{
- cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+ struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
int dep;
int bkt;
int off;
@@ -990,13 +990,13 @@ static int cfs_hash_dep_print(cfs_workitem_t *wi)
return 0;
}
-static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
{
spin_lock_init(&hs->hs_dep_lock);
cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
}
-static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
{
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
return;
@@ -1012,18 +1012,18 @@ static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
-static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
-static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
+static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
+static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
-cfs_hash_t *
+struct cfs_hash *
cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
unsigned bkt_bits, unsigned extra_bytes,
unsigned min_theta, unsigned max_theta,
cfs_hash_ops_t *ops, unsigned flags)
{
- cfs_hash_t *hs;
+ struct cfs_hash *hs;
int len;
CLASSERT(CFS_HASH_THETA_BITS < 15);
@@ -1051,7 +1051,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
len = (flags & CFS_HASH_BIGNAME) == 0 ?
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
- LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
+ LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
if (hs == NULL)
return NULL;
@@ -1084,7 +1084,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
if (hs->hs_buckets != NULL)
return hs;
- LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
+ LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
return NULL;
}
EXPORT_SYMBOL(cfs_hash_create);
@@ -1093,11 +1093,11 @@ EXPORT_SYMBOL(cfs_hash_create);
* Cleanup libcfs hash @hs.
*/
static void
-cfs_hash_destroy(cfs_hash_t *hs)
+cfs_hash_destroy(struct cfs_hash *hs)
{
struct hlist_node *hnode;
struct hlist_node *pos;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
int i;
LASSERT(hs != NULL);
@@ -1148,10 +1148,10 @@ cfs_hash_destroy(cfs_hash_t *hs)
0, CFS_HASH_NBKT(hs));
i = cfs_hash_with_bigname(hs) ?
CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
- LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+ LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
}
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
{
if (atomic_inc_not_zero(&hs->hs_refcount))
return hs;
@@ -1159,7 +1159,7 @@ cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
}
EXPORT_SYMBOL(cfs_hash_getref);
-void cfs_hash_putref(cfs_hash_t *hs)
+void cfs_hash_putref(struct cfs_hash *hs)
{
if (atomic_dec_and_test(&hs->hs_refcount))
cfs_hash_destroy(hs);
@@ -1167,7 +1167,7 @@ void cfs_hash_putref(cfs_hash_t *hs)
EXPORT_SYMBOL(cfs_hash_putref);
static inline int
-cfs_hash_rehash_bits(cfs_hash_t *hs)
+cfs_hash_rehash_bits(struct cfs_hash *hs)
{
if (cfs_hash_with_no_lock(hs) ||
!cfs_hash_with_rehash(hs))
@@ -1204,7 +1204,7 @@ cfs_hash_rehash_bits(cfs_hash_t *hs)
* - too many elements
*/
static inline int
-cfs_hash_rehash_inline(cfs_hash_t *hs)
+cfs_hash_rehash_inline(struct cfs_hash *hs)
{
return !cfs_hash_with_nblk_change(hs) &&
atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
@@ -1215,9 +1215,9 @@ cfs_hash_rehash_inline(cfs_hash_t *hs)
* ops->hs_get function will be called when the item is added.
*/
void
-cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
int bits;
LASSERT(hlist_unhashed(hnode));
@@ -1238,11 +1238,11 @@ cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
EXPORT_SYMBOL(cfs_hash_add);
static struct hlist_node *
-cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
+cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode, int noref)
{
struct hlist_node *ehnode;
- cfs_hash_bd_t bds[2];
+ struct cfs_hash_bd bds[2];
int bits = 0;
LASSERT(hlist_unhashed(hnode));
@@ -1270,7 +1270,7 @@ cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
* Returns 0 on success or -EALREADY on key collisions.
*/
int
-cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
-EALREADY : 0;
@@ -1284,7 +1284,7 @@ EXPORT_SYMBOL(cfs_hash_add_unique);
* Otherwise ops->hs_get is called on the item which was added.
*/
void *
-cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode)
{
hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
@@ -1301,11 +1301,11 @@ EXPORT_SYMBOL(cfs_hash_findadd_unique);
* on the removed object.
*/
void *
-cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
void *obj = NULL;
int bits = 0;
- cfs_hash_bd_t bds[2];
+ struct cfs_hash_bd bds[2];
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1341,7 +1341,7 @@ EXPORT_SYMBOL(cfs_hash_del);
* will be returned and ops->hs_put is called on the removed object.
*/
void *
-cfs_hash_del_key(cfs_hash_t *hs, const void *key)
+cfs_hash_del_key(struct cfs_hash *hs, const void *key)
{
return cfs_hash_del(hs, key, NULL);
}
@@ -1356,11 +1356,11 @@ EXPORT_SYMBOL(cfs_hash_del_key);
* in the hash @hs NULL is returned.
*/
void *
-cfs_hash_lookup(cfs_hash_t *hs, const void *key)
+cfs_hash_lookup(struct cfs_hash *hs, const void *key)
{
void *obj = NULL;
struct hlist_node *hnode;
- cfs_hash_bd_t bds[2];
+ struct cfs_hash_bd bds[2];
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
@@ -1377,7 +1377,7 @@ cfs_hash_lookup(cfs_hash_t *hs, const void *key)
EXPORT_SYMBOL(cfs_hash_lookup);
static void
-cfs_hash_for_each_enter(cfs_hash_t *hs)
+cfs_hash_for_each_enter(struct cfs_hash *hs)
{
LASSERT(!cfs_hash_is_exiting(hs));
@@ -1403,7 +1403,7 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
}
static void
-cfs_hash_for_each_exit(cfs_hash_t *hs)
+cfs_hash_for_each_exit(struct cfs_hash *hs)
{
int remained;
int bits;
@@ -1434,12 +1434,12 @@ cfs_hash_for_each_exit(cfs_hash_t *hs)
* cfs_hash_bd_del_locked
*/
static __u64
-cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
+cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data, int remove_safe)
{
struct hlist_node *hnode;
struct hlist_node *pos;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
__u64 count = 0;
int excl = !!remove_safe;
int loop = 0;
@@ -1492,7 +1492,7 @@ typedef struct {
} cfs_hash_cond_arg_t;
static int
-cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
cfs_hash_cond_arg_t *cond = data;
@@ -1508,7 +1508,7 @@ cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
* any object be reference.
*/
void
-cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
{
cfs_hash_cond_arg_t arg = {
.func = func,
@@ -1520,7 +1520,7 @@ cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
EXPORT_SYMBOL(cfs_hash_cond_del);
void
-cfs_hash_for_each(cfs_hash_t *hs,
+cfs_hash_for_each(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
cfs_hash_for_each_tight(hs, func, data, 0);
@@ -1528,7 +1528,7 @@ cfs_hash_for_each(cfs_hash_t *hs,
EXPORT_SYMBOL(cfs_hash_for_each);
void
-cfs_hash_for_each_safe(cfs_hash_t *hs,
+cfs_hash_for_each_safe(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
cfs_hash_for_each_tight(hs, func, data, 1);
@@ -1536,7 +1536,7 @@ cfs_hash_for_each_safe(cfs_hash_t *hs,
EXPORT_SYMBOL(cfs_hash_for_each_safe);
static int
-cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
*(int *)data = 0;
@@ -1544,7 +1544,7 @@ cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
}
int
-cfs_hash_is_empty(cfs_hash_t *hs)
+cfs_hash_is_empty(struct cfs_hash *hs)
{
int empty = 1;
@@ -1554,7 +1554,7 @@ cfs_hash_is_empty(cfs_hash_t *hs)
EXPORT_SYMBOL(cfs_hash_is_empty);
__u64
-cfs_hash_size_get(cfs_hash_t *hs)
+cfs_hash_size_get(struct cfs_hash *hs)
{
return cfs_hash_with_counter(hs) ?
atomic_read(&hs->hs_count) :
@@ -1578,11 +1578,11 @@ EXPORT_SYMBOL(cfs_hash_size_get);
* two cases, so iteration has to be stopped on change.
*/
static int
-cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
+cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_node *hnode;
struct hlist_node *tmp;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
__u32 version;
int count = 0;
int stop_on_change;
@@ -1639,7 +1639,7 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
}
int
-cfs_hash_for_each_nolock(cfs_hash_t *hs,
+cfs_hash_for_each_nolock(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
if (cfs_hash_with_no_lock(hs) ||
@@ -1672,7 +1672,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_nolock);
* the required locking is in place to prevent concurrent insertions.
*/
int
-cfs_hash_for_each_empty(cfs_hash_t *hs,
+cfs_hash_for_each_empty(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
unsigned i = 0;
@@ -1696,12 +1696,12 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
EXPORT_SYMBOL(cfs_hash_for_each_empty);
void
-cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_head *hhead;
struct hlist_node *hnode;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
cfs_hash_for_each_enter(hs);
cfs_hash_lock(hs, 0);
@@ -1731,11 +1731,11 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
* is held so the callback must never sleep.
*/
void
-cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_node *hnode;
- cfs_hash_bd_t bds[2];
+ struct cfs_hash_bd bds[2];
unsigned i;
cfs_hash_lock(hs, 0);
@@ -1772,7 +1772,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_key);
* theta thresholds for @hs are tunable via cfs_hash_set_theta().
*/
void
-cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
+cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
{
int i;
@@ -1801,7 +1801,7 @@ cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
void
-cfs_hash_rehash_cancel(cfs_hash_t *hs)
+cfs_hash_rehash_cancel(struct cfs_hash *hs)
{
cfs_hash_lock(hs, 1);
cfs_hash_rehash_cancel_locked(hs);
@@ -1810,7 +1810,7 @@ cfs_hash_rehash_cancel(cfs_hash_t *hs)
EXPORT_SYMBOL(cfs_hash_rehash_cancel);
int
-cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
+cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
{
int rc;
@@ -1840,9 +1840,9 @@ cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
EXPORT_SYMBOL(cfs_hash_rehash);
static int
-cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
+cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
{
- cfs_hash_bd_t new;
+ struct cfs_hash_bd new;
struct hlist_head *hhead;
struct hlist_node *hnode;
struct hlist_node *pos;
@@ -1873,9 +1873,9 @@ cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
static int
cfs_hash_rehash_worker(cfs_workitem_t *wi)
{
- cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
- cfs_hash_bucket_t **bkts;
- cfs_hash_bd_t bd;
+ struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
+ struct cfs_hash_bucket **bkts;
+ struct cfs_hash_bd bd;
unsigned int old_size;
unsigned int new_size;
int bsize;
@@ -1965,7 +1965,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
if (bkts != NULL)
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
if (rc != 0)
- CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
+ CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
/* return 1 only if cfs_wi_exit is called */
return rc == -ESRCH;
}
@@ -1980,12 +1980,12 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
* the registered cfs_hash_get() and cfs_hash_put() functions will
* not be called.
*/
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
void *new_key, struct hlist_node *hnode)
{
- cfs_hash_bd_t bds[3];
- cfs_hash_bd_t old_bds[2];
- cfs_hash_bd_t new_bd;
+ struct cfs_hash_bd bds[3];
+ struct cfs_hash_bd old_bds[2];
+ struct cfs_hash_bd new_bd;
LASSERT(!hlist_unhashed(hnode));
@@ -2028,8 +2028,8 @@ int cfs_hash_debug_header(struct seq_file *m)
}
EXPORT_SYMBOL(cfs_hash_debug_header);
-static cfs_hash_bucket_t **
-cfs_hash_full_bkts(cfs_hash_t *hs)
+static struct cfs_hash_bucket **
+cfs_hash_full_bkts(struct cfs_hash *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
if (hs->hs_rehash_buckets == NULL)
@@ -2041,7 +2041,7 @@ cfs_hash_full_bkts(cfs_hash_t *hs)
}
static unsigned int
-cfs_hash_full_nbkt(cfs_hash_t *hs)
+cfs_hash_full_nbkt(struct cfs_hash *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
if (hs->hs_rehash_buckets == NULL)
@@ -2052,7 +2052,7 @@ cfs_hash_full_nbkt(cfs_hash_t *hs)
CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
}
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
+int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
{
int dist[8] = { 0, };
int maxdep = -1;
@@ -2089,7 +2089,7 @@ int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
* Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
*/
for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
cfs_hash_bd_lock(hs, &bd, 0);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index ea9e9490031..0bf8e5d87f1 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -167,7 +167,7 @@ static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
return 0;
down_read(&mm->mmap_sem);
- /* ignore errors, just check how much was sucessfully transfered */
+ /* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, rc, offset;
void *maddr;
@@ -227,8 +227,10 @@ int cfs_get_environ(const char *key, char *value, int *val_len)
* which is already holding mmap_sem for writes. If some other
* thread gets the write lock in the meantime, this thread will
* block, but at least it won't deadlock on itself. LU-1735 */
- if (down_read_trylock(&mm->mmap_sem) == 0)
+ if (down_read_trylock(&mm->mmap_sem) == 0) {
+ kfree(buffer);
return -EDEADLK;
+ }
up_read(&mm->mmap_sem);
addr = mm->env_start;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
index ab1e7316847..cc565b1fb99 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
@@ -137,7 +137,7 @@ void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
char *argv[6];
char buf[32];
- snprintf (buf, sizeof buf, "%d", msgdata->msg_line);
+ snprintf(buf, sizeof(buf), "%d", msgdata->msg_line);
argv[1] = "LBUG";
argv[2] = (char *)msgdata->msg_file;
diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lustre/libcfs/prng.c
index 69224d84bc4..f87e9e51654 100644
--- a/drivers/staging/lustre/lustre/libcfs/prng.c
+++ b/drivers/staging/lustre/lustre/libcfs/prng.c
@@ -70,7 +70,7 @@ static unsigned int seed_y = 362436069;
* cfs_rand - creates new seeds
*
* First it creates new seeds from the previous seeds. Then it generates a
- * new psuedo random number for use.
+ * new pseudo random number for use.
*
* Returns a pseudo-random 32-bit integer
*/
@@ -84,7 +84,7 @@ unsigned int cfs_rand(void)
EXPORT_SYMBOL(cfs_rand);
/**
- * cfs_srand - sets the inital seed
+ * cfs_srand - sets the initial seed
* @seed1 : (seed_x) should have the most entropy in the low bits of the word
* @seed2 : (seed_y) should have the most entropy in the high bits of the word
*
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 357f40079ae..f71a3cc63ad 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -276,7 +276,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
int remain;
int mask = msgdata->msg_mask;
const char *file = kbasename(msgdata->msg_file);
- cfs_debug_limit_state_t *cdls = msgdata->msg_cdls;
+ struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
tcd = cfs_trace_get_tcd();
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 09844be5eec..1f079034bd8 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -743,7 +743,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
/* In the following we use the fact that LOV_USER_MAGIC_V1 and
LOV_USER_MAGIC_V3 have the same initial fields so we do not
- need the make the distiction between the 2 versions */
+ need to make the distinction between the 2 versions */
if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
char *param = NULL;
char *buf;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index bc534db1243..fb85a58db05 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -364,7 +364,7 @@ static int ll_intent_file_open(struct file *file, void *lmm,
that case that lock is also ok */
/* We can also get here if there was cached open handle in revalidate_it
* but it disappeared while we were getting from there to ll_file_open.
- * But this means this file was closed and immediatelly opened which
+ * But this means this file was closed and immediately opened which
* makes a good candidate for using OPEN lock */
/* If lmmsize & lmm are not 0, we are just setting stripe info
* parameters. No need for the open lock */
@@ -2456,8 +2456,8 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
if (rc == -ENOENT) {
clear_nlink(inode);
/* This path cannot be hit for regular files unless in
- * case of obscure races, so no need to to validate
- * size. */
+ * case of obscure races, so no need to validate size.
+ */
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return 0;
} else if (rc != 0) {
@@ -3011,7 +3011,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
/* set layout to file. Unlikely this will fail as old layout was
* surely eliminated */
- memset(&conf, 0, sizeof conf);
+ memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = inode;
conf.coc_lock = lock;
@@ -3034,7 +3034,7 @@ out:
ll_get_fsname(inode->i_sb, NULL, 0),
inode, PFID(&lli->lli_fid));
- memset(&conf, 0, sizeof conf);
+ memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_WAIT;
conf.coc_inode = inode;
rc = ll_layout_conf(inode, &conf);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 1f5825c87a7..e2996c46b2c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -159,7 +159,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
}
if (flags & LLIF_DONE_WRITING) {
/* Some pages are still dirty, it is early to send
- * DONE_WRITE. Wait untill all pages will be flushed
+ * DONE_WRITE. Wait until all pages will be flushed
* and try DONE_WRITE again later. */
LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
lli->lli_flags |= LLIF_DONE_WRITING;
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b868c2bd58d..fd584ff7e2d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1973,10 +1973,10 @@ void ll_umount_begin(struct super_block *sb)
OBD_ALLOC_PTR(ioc_data);
if (ioc_data) {
obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
- sizeof *ioc_data, ioc_data, NULL);
+ sizeof(*ioc_data), ioc_data, NULL);
obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
- sizeof *ioc_data, ioc_data, NULL);
+ sizeof(*ioc_data), ioc_data, NULL);
OBD_FREE_PTR(ioc_data);
}
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 2340458b8a0..e2421ea6135 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -337,8 +337,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
return count;
}
-static ll_mrf_ret
-loop_make_request(struct request_queue *q, struct bio *old_bio)
+static void loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct lloop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
@@ -366,10 +365,9 @@ loop_make_request(struct request_queue *q, struct bio *old_bio)
goto err;
}
loop_add_bio(lo, old_bio);
- LL_MRF_RETURN(0);
+ return;
err:
cfs_bio_io_error(old_bio, old_bio->bi_size);
- LL_MRF_RETURN(0);
}
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index d4d3c17547c..4bf09c4a0c9 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1063,7 +1063,7 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
return 0;
}
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
+ now.tv_sec, (unsigned long)now.tv_usec);
seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
"extents", "calls", "%", "cum%",
@@ -1127,7 +1127,7 @@ static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
return 0;
}
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
+ now.tv_sec, (unsigned long)now.tv_usec);
seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
@@ -1293,7 +1293,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
spin_lock(&sbi->ll_process_lock);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
+ now.tv_sec, (unsigned long)now.tv_usec);
seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
"R/W", "PID", "RANGE START", "RANGE END",
"SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 34815b550e7..90bbdae824a 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -83,7 +83,7 @@ int ll_unlock(__u32 mode, struct lustre_handle *lockh)
}
-/* called from iget5_locked->find_inode() under inode_lock spinlock */
+/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */
static int ll_test_inode(struct inode *inode, void *opaque)
{
struct ll_inode_info *lli = ll_i2info(inode);
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index ae0dc441d1d..e9ba38a553c 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -717,7 +717,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
lli = ll_i2info(inode);
clob = lli->lli_clob;
- memset(ria, 0, sizeof *ria);
+ memset(ria, 0, sizeof(*ria));
cl_object_attr_lock(clob);
ret = cl_object_attr_get(env, clob, attr);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 96c29ad2fc8..7e3e0967993 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -202,11 +202,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
if (*pages) {
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
- *max_pages, (rw == READ), 0, *pages,
- NULL);
- up_read(&current->mm->mmap_sem);
+ result = get_user_pages_fast(user_addr, *max_pages,
+ (rw == READ), *pages);
if (unlikely(result <= 0))
OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
}
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 8eaa38e91b9..f6b5f4b95f3 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -647,7 +647,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
*/
LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
- /* XXX: No fid in reply, this is probaly cross-ref case.
+ /* XXX: No fid in reply, this is probably cross-ref case.
* SA can't handle it yet. */
if (body->valid & OBD_MD_MDS)
GOTO(out, rc = -EAGAIN);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index be125b98b7f..c4d1580b7be 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -297,7 +297,7 @@ static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
}
-static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
struct vvp_pgcache_id *id = data;
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 33d9ce68fed..4276124d92e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -564,50 +564,50 @@ extern struct kmem_cache *lovsub_req_kmem;
extern struct kmem_cache *lov_lock_link_kmem;
-int lov_object_init (const struct lu_env *env, struct lu_object *obj,
+int lov_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf);
-int lovsub_object_init (const struct lu_env *env, struct lu_object *obj,
+int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf);
-int lov_lock_init (const struct lu_env *env, struct cl_object *obj,
+int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init (const struct lu_env *env, struct cl_object *obj,
+int lov_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
-int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj,
+int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
-int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
-int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj,
+int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
-int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
+int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
-void lov_lock_unlink (const struct lu_env *env, struct lov_lock_link *link,
+void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link,
struct lovsub_lock *sub);
struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
int stripe);
-void lov_sub_put (struct lov_io_sub *sub);
-int lov_sublock_modify (const struct lu_env *env, struct lov_lock *lov,
+void lov_sub_put(struct lov_io_sub *sub);
+int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
struct lovsub_lock *sublock,
const struct cl_lock_descr *d, int idx);
-int lov_page_init (const struct lu_env *env, struct cl_object *ob,
+int lov_page_init(const struct lu_env *env, struct cl_object *ob,
struct cl_page *page, struct page *vmpage);
-int lovsub_page_init (const struct lu_env *env, struct cl_object *ob,
+int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
struct cl_page *page, struct page *vmpage);
-int lov_page_init_empty (const struct lu_env *env,
+int lov_page_init_empty(const struct lu_env *env,
struct cl_object *obj,
struct cl_page *page, struct page *vmpage);
-int lov_page_init_raid0 (const struct lu_env *env,
+int lov_page_init_raid0(const struct lu_env *env,
struct cl_object *obj,
struct cl_page *page, struct page *vmpage);
-struct lu_object *lov_object_alloc (const struct lu_env *env,
+struct lu_object *lov_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
struct lu_object *lovsub_object_alloc(const struct lu_env *env,
@@ -617,7 +617,7 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lov_lock *lck,
struct lovsub_lock *sub);
-struct lov_io_sub *lov_page_subio (const struct lu_env *env,
+struct lov_io_sub *lov_page_subio(const struct lu_env *env,
struct lov_io *lio,
const struct cl_page_slice *slice);
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index a4006ef46ad..1f33b04b0c3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -44,6 +44,8 @@
#include <obd_class.h>
#include "lov_cl_internal.h"
+#include "lov_internal.h"
+
struct kmem_cache *lov_lock_kmem;
struct kmem_cache *lov_object_kmem;
@@ -64,47 +66,47 @@ struct lu_kmem_descr lov_caches[] = {
{
.ckd_cache = &lov_lock_kmem,
.ckd_name = "lov_lock_kmem",
- .ckd_size = sizeof (struct lov_lock)
+ .ckd_size = sizeof(struct lov_lock)
},
{
.ckd_cache = &lov_object_kmem,
.ckd_name = "lov_object_kmem",
- .ckd_size = sizeof (struct lov_object)
+ .ckd_size = sizeof(struct lov_object)
},
{
.ckd_cache = &lov_thread_kmem,
.ckd_name = "lov_thread_kmem",
- .ckd_size = sizeof (struct lov_thread_info)
+ .ckd_size = sizeof(struct lov_thread_info)
},
{
.ckd_cache = &lov_session_kmem,
.ckd_name = "lov_session_kmem",
- .ckd_size = sizeof (struct lov_session)
+ .ckd_size = sizeof(struct lov_session)
},
{
.ckd_cache = &lov_req_kmem,
.ckd_name = "lov_req_kmem",
- .ckd_size = sizeof (struct lov_req)
+ .ckd_size = sizeof(struct lov_req)
},
{
.ckd_cache = &lovsub_lock_kmem,
.ckd_name = "lovsub_lock_kmem",
- .ckd_size = sizeof (struct lovsub_lock)
+ .ckd_size = sizeof(struct lovsub_lock)
},
{
.ckd_cache = &lovsub_object_kmem,
.ckd_name = "lovsub_object_kmem",
- .ckd_size = sizeof (struct lovsub_object)
+ .ckd_size = sizeof(struct lovsub_object)
},
{
.ckd_cache = &lovsub_req_kmem,
.ckd_name = "lovsub_req_kmem",
- .ckd_size = sizeof (struct lovsub_req)
+ .ckd_size = sizeof(struct lovsub_req)
},
{
.ckd_cache = &lov_lock_link_kmem,
.ckd_name = "lov_lock_link_kmem",
- .ckd_size = sizeof (struct lov_lock_link)
+ .ckd_size = sizeof(struct lov_lock_link)
},
{
.ckd_cache = NULL
@@ -286,7 +288,7 @@ static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
OBD_FREE_PTR(em);
}
}
- OBD_FREE(emrg, nr * sizeof emrg[0]);
+ OBD_FREE(emrg, nr * sizeof(emrg[0]));
}
static struct lu_device *lov_device_free(const struct lu_env *env,
@@ -297,7 +299,7 @@ static struct lu_device *lov_device_free(const struct lu_env *env,
cl_device_fini(lu2cl_dev(d));
if (ld->ld_target != NULL)
- OBD_FREE(ld->ld_target, nr * sizeof ld->ld_target[0]);
+ OBD_FREE(ld->ld_target, nr * sizeof(ld->ld_target[0]));
if (ld->ld_emrg != NULL)
lov_emerg_free(ld->ld_emrg, nr);
OBD_FREE_PTR(ld);
@@ -321,7 +323,7 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
int i;
int result;
- OBD_ALLOC(emerg, nr * sizeof emerg[0]);
+ OBD_ALLOC(emerg, nr * sizeof(emerg[0]));
if (emerg == NULL)
return ERR_PTR(-ENOMEM);
for (result = i = 0; i < nr && result == 0; i++) {
@@ -361,7 +363,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
if (sub_size < tgt_size) {
struct lovsub_device **newd;
struct lov_device_emerg **emerg;
- const size_t sz = sizeof newd[0];
+ const size_t sz = sizeof(newd[0]);
emerg = lov_emerg_alloc(tgt_size);
if (IS_ERR(emerg))
@@ -446,7 +448,7 @@ static int lov_process_config(const struct lu_env *env,
cmd = cfg->lcfg_command;
rc = lov_process_config_base(d->ld_obd, cfg, &index, &gen);
if (rc == 0) {
- switch(cmd) {
+ switch (cmd) {
case LCFG_LOV_ADD_OBD:
case LCFG_LOV_ADD_INA:
rc = lov_cl_add_target(env, d, index);
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 16770d14ee0..796da893087 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -89,6 +89,8 @@ struct lov_request_set {
extern struct kmem_cache *lov_oinfo_slab;
+extern struct lu_kmem_descr lov_caches[];
+
void lov_finish_set(struct lov_request_set *set);
static inline void lov_get_reqset(struct lov_request_set *set)
@@ -124,7 +126,7 @@ static inline void lov_llh_put(struct lov_lock_handles *llh)
if (atomic_read(&llh->llh_refcount))
return;
- OBD_FREE_RCU(llh, sizeof *llh +
+ OBD_FREE_RCU(llh, sizeof(*llh) +
sizeof(*llh->llh_handles) * llh->llh_stripe_count,
&llh->llh_handle);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index b611aa4e9dc..2792fa5c4be 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -86,7 +86,7 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
struct cl_io *parent = lio->lis_cl.cis_io;
- switch(io->ci_type) {
+ switch (io->ci_type) {
case CIT_SETATTR: {
io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
@@ -282,7 +282,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
* when writing a page. -jay
*/
OBD_ALLOC_LARGE(lio->lis_subs,
- lsm->lsm_stripe_count * sizeof lio->lis_subs[0]);
+ lsm->lsm_stripe_count * sizeof(lio->lis_subs[0]));
if (lio->lis_subs != NULL) {
lio->lis_nr_subios = lio->lis_stripe_count;
lio->lis_single_subio_index = -1;
@@ -356,7 +356,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
for (i = 0; i < lio->lis_nr_subios; i++)
lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
OBD_FREE_LARGE(lio->lis_subs,
- lio->lis_nr_subios * sizeof lio->lis_subs[0]);
+ lio->lis_nr_subios * sizeof(lio->lis_subs[0]));
lio->lis_nr_subios = 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index ec297e87c2a..26bc719b6dc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -88,7 +88,7 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
subenv->lse_io = sub->sub_io;
subenv->lse_sub = sub;
} else {
- subenv = (void*)sub;
+ subenv = (void *)sub;
}
}
return subenv;
@@ -167,7 +167,7 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
lov_sublock_env_put(subenv);
} else {
/* error occurs. */
- sublock = (void*)subenv;
+ sublock = (void *)subenv;
}
if (!IS_ERR(sublock))
@@ -313,7 +313,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
nr++;
}
LASSERT(nr > 0);
- OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
+ OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof(lck->lls_sub[0]));
if (lck->lls_sub == NULL)
return -ENOMEM;
@@ -474,7 +474,7 @@ static void lov_lock_fini(const struct lu_env *env,
*/
LASSERT(lck->lls_sub[i].sub_lock == NULL);
OBD_FREE_LARGE(lck->lls_sub,
- lck->lls_nr * sizeof lck->lls_sub[0]);
+ lck->lls_nr * sizeof(lck->lls_sub[0]));
}
OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
}
@@ -742,7 +742,7 @@ static void lov_lock_cancel(const struct lu_env *env,
continue;
}
- switch(sublock->cll_state) {
+ switch (sublock->cll_state) {
case CLS_HELD:
rc = cl_unuse_try(subenv->lse_env, sublock);
lov_sublock_release(env, lck, i, 0, 0);
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 0b47aba1332..4783450774c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -554,7 +554,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
struct lov_tgt_desc **newtgts, **old = NULL;
__u32 newsize, oldsize = 0;
- newsize = max(lov->lov_tgt_size, (__u32)2);
+ newsize = max_t(__u32, lov->lov_tgt_size, 2);
while (newsize < index + 1)
newsize = newsize << 1;
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
@@ -923,7 +923,7 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
int cmd;
int rc = 0;
- switch(cmd = lcfg->lcfg_command) {
+ switch (cmd = lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD:
case LCFG_LOV_ADD_INA:
case LCFG_LOV_DEL_OBD: {
@@ -1090,7 +1090,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
if (rc)
GOTO(out, rc);
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
if (oa->o_valid & OBD_MD_FLCOOKIE)
@@ -1141,7 +1141,7 @@ static int lov_getattr(const struct lu_env *env, struct obd_export *exp,
if (rc)
return rc;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
@@ -1227,7 +1227,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
if (!list_empty(&rqset->set_requests)) {
LASSERT(rc == 0);
- LASSERT (rqset->set_interpret == NULL);
+ LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_getattr_interpret;
rqset->set_arg = (void *)lovset;
return rc;
@@ -1267,7 +1267,7 @@ static int lov_setattr(const struct lu_env *env, struct obd_export *exp,
if (rc)
return rc;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
rc = obd_setattr(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1408,7 +1408,7 @@ static int lov_punch(const struct lu_env *env, struct obd_export *exp,
if (rc)
return rc;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
rc = obd_punch(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1472,7 +1472,7 @@ static int lov_sync(const struct lu_env *env, struct obd_export *exp,
CDEBUG(D_INFO, "fsync objid "DOSTID" ["LPX64", "LPX64"]\n",
POSTID(&set->set_oi->oi_oa->o_oi), start, end);
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
rc = obd_sync(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1557,7 +1557,7 @@ static int lov_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
if (rc)
return rc;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
struct obd_export *sub_exp;
struct brw_page *sub_pga;
req = list_entry(pos, struct lov_request, rq_link);
@@ -1612,7 +1612,7 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
if (rc)
return rc;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
rc = obd_enqueue(lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1828,7 +1828,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
if (rc)
return rc;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, max_age, rqset);
@@ -1909,7 +1909,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
(int) sizeof(struct obd_uuid))))
return -EFAULT;
- flags = uarg ? *(__u32*)uarg : 0;
+ flags = uarg ? *(__u32 *)uarg : 0;
/* got statfs data */
rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
@@ -2495,7 +2495,7 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
GOTO(out, rc);
} else if (KEY_IS(KEY_CONNECT_FLAG)) {
struct lov_tgt_desc *tgt;
- __u64 ost_idx = *((__u64*)val);
+ __u64 ost_idx = *((__u64 *)val);
LASSERT(*vallen == sizeof(__u64));
LASSERT(ost_idx < lov->desc.ld_tgt_count);
@@ -2564,7 +2564,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
for (i = 0; i < count; i++, val = (char *)val + incr) {
if (next_id) {
- tgt = lov->lov_tgts[((struct obd_id_info*)val)->idx];
+ tgt = lov->lov_tgts[((struct obd_id_info *)val)->idx];
} else {
tgt = lov->lov_tgts[i];
}
@@ -2593,9 +2593,9 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
} else if (next_id) {
err = obd_set_info_async(env, tgt->ltd_exp,
keylen, key, vallen,
- ((struct obd_id_info*)val)->data, set);
+ ((struct obd_id_info *)val)->data, set);
} else if (capa) {
- struct mds_capa_info *info = (struct mds_capa_info*)val;
+ struct mds_capa_info *info = (struct mds_capa_info *)val;
LASSERT(vallen == sizeof(*info));
@@ -2781,7 +2781,7 @@ struct obd_ops lov_obd_ops = {
.o_setup = lov_setup,
.o_precleanup = lov_precleanup,
.o_cleanup = lov_cleanup,
- //.o_process_config = lov_process_config,
+ /*.o_process_config = lov_process_config,*/
.o_connect = lov_connect,
.o_disconnect = lov_disconnect,
.o_statfs = lov_statfs,
@@ -2823,8 +2823,6 @@ struct obd_ops lov_obd_ops = {
struct kmem_cache *lov_oinfo_slab;
-extern struct lu_kmem_descr lov_caches[];
-
int __init lov_init(void)
{
struct lprocfs_static_vars lvars = { 0 };
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 84e55ce3ccd..cf2fa8abfb1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -214,7 +214,7 @@ static int lov_init_raid0(const struct lu_env *env,
r0->lo_nr = lsm->lsm_stripe_count;
LASSERT(r0->lo_nr <= lov_targets_nr(dev));
- OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+ OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof(r0->lo_sub[0]));
if (r0->lo_sub != NULL) {
result = 0;
subconf->coc_inode = conf->coc_inode;
@@ -368,7 +368,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
struct lov_layout_raid0 *r0 = &state->raid0;
if (r0->lo_sub != NULL) {
- OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+ OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof(r0->lo_sub[0]));
r0->lo_sub = NULL;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 55ec26778f8..ec6f6e0572a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -121,7 +121,7 @@ void lov_dump_lmm(int level, void *lmm)
do { \
if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
LASSERT(test); /* so we know what assertion failed */ \
-} while(0)
+} while (0)
/* Pack LOV object metadata for disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
@@ -630,22 +630,22 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
/* FIXME: Bug 1185 - copy fields properly when structs change */
/* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
- CLASSERT(sizeof lum.lmm_objects[0] == sizeof lmmk->lmm_objects[0]);
+ CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
(lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
lustre_swab_lov_mds_md(lmmk);
lustre_swab_lov_user_md_objects(
- (struct lov_user_ost_data*)lmmk->lmm_objects,
+ (struct lov_user_ost_data *)lmmk->lmm_objects,
lmmk->lmm_stripe_count);
}
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove((char*)(&lmmk->lmm_stripe_count) +
+ memmove((char *)(&lmmk->lmm_stripe_count) +
sizeof(lmmk->lmm_stripe_count),
- ((struct lov_mds_md_v3*)lmmk)->lmm_objects,
+ ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
lmmk->lmm_stripe_count *
sizeof(struct lov_ost_data_v1));
lmm_size -= LOV_MAXPOOLNAME;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index dd3c07d5c4d..a1701dfe408 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -86,7 +86,7 @@ void lov_pool_putref_locked(struct pool_desc *pool)
* Chapter 6.4.
* Addison Wesley, 1973
*/
-static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
{
int i;
__u32 result;
@@ -125,7 +125,7 @@ static void *pool_hashobject(struct hlist_node *hnode)
return hlist_entry(hnode, struct pool_desc, pool_hash);
}
-static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct pool_desc *pool;
@@ -133,7 +133,7 @@ static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
lov_pool_getref(pool);
}
-static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
+static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
struct hlist_node *hnode)
{
struct pool_desc *pool;
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 61e6d0b46c9..bf324ae2946 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -245,7 +245,7 @@ int lov_update_enqueue_set(struct lov_request *req, __u32 mode, int rc)
osc_update_enqueue(lov_lockhp, loi, oi->oi_flags,
&req->rq_oi.oi_md->lsm_oinfo[0]->loi_lvb, mode, rc);
if (rc == ELDLM_LOCK_ABORTED && (oi->oi_flags & LDLM_FL_HAS_INTENT))
- memset(lov_lockhp, 0, sizeof *lov_lockhp);
+ memset(lov_lockhp, 0, sizeof(*lov_lockhp));
rc = lov_update_enqueue_lov(set->set_exp, lov_lockhp, loi, oi->oi_flags,
req->rq_idx, &oi->oi_md->lsm_oi, rc);
lov_stripe_unlock(oi->oi_md);
@@ -343,7 +343,7 @@ static struct lov_lock_handles *lov_llh_new(struct lov_stripe_md *lsm)
{
struct lov_lock_handles *llh;
- OBD_ALLOC(llh, sizeof *llh +
+ OBD_ALLOC(llh, sizeof(*llh) +
sizeof(*llh->llh_handles) * lsm->lsm_stripe_count);
if (llh == NULL)
return NULL;
@@ -630,7 +630,7 @@ static int common_attr_done(struct lov_request_set *set)
if (tmp_oa == NULL)
GOTO(out, rc = -ENOMEM);
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
if (!req->rq_complete || req->rq_rc)
@@ -669,7 +669,7 @@ static int brw_done(struct lov_request_set *set)
struct list_head *pos;
struct lov_request *req;
- list_for_each (pos, &set->set_list) {
+ list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
if (!req->rq_complete || req->rq_rc)
@@ -1315,7 +1315,7 @@ out_set:
(tot) = LOV_U64_MAX; \
else \
(tot) += (add); \
- } while(0)
+ } while (0)
int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
{
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt.c b/drivers/staging/lustre/lustre/lvfs/fsfilt.c
index e86df7356cb..0d6ed69ddb2 100644
--- a/drivers/staging/lustre/lustre/lvfs/fsfilt.c
+++ b/drivers/staging/lustre/lustre/lvfs/fsfilt.c
@@ -50,9 +50,8 @@ static struct fsfilt_operations *fsfilt_search_type(const char *type)
list_for_each(p, &fsfilt_types) {
found = list_entry(p, struct fsfilt_operations, fs_list);
- if (!strcmp(found->fs_type, type)) {
+ if (!strcmp(found->fs_type, type))
return found;
- }
}
return NULL;
}
@@ -62,7 +61,8 @@ int fsfilt_register_ops(struct fsfilt_operations *fs_ops)
struct fsfilt_operations *found;
/* lock fsfilt_types list */
- if ((found = fsfilt_search_type(fs_ops->fs_type))) {
+ found = fsfilt_search_type(fs_ops->fs_type);
+ if (found) {
if (found != fs_ops) {
CERROR("different operations for type %s\n",
fs_ops->fs_type);
@@ -103,14 +103,16 @@ struct fsfilt_operations *fsfilt_get_ops(const char *type)
struct fsfilt_operations *fs_ops;
/* lock fsfilt_types list */
- if (!(fs_ops = fsfilt_search_type(type))) {
+ fs_ops = fsfilt_search_type(type);
+ if (!fs_ops) {
char name[32];
int rc;
snprintf(name, sizeof(name) - 1, "fsfilt_%s", type);
name[sizeof(name) - 1] = '\0';
- if (!(rc = request_module("%s", name))) {
+ rc = request_module("%s", name);
+ if (!rc) {
fs_ops = fsfilt_search_type(type);
CDEBUG(D_INFO, "Loaded module '%s'\n", name);
if (!fs_ops)
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
index 97a8be2300d..b21e40cdaca 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
@@ -154,11 +154,10 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
spin_lock(&stats->ls_lock);
if (stats->ls_biggest_alloc_num <= cpuid)
stats->ls_biggest_alloc_num = cpuid + 1;
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
spin_unlock_irqrestore(&stats->ls_lock, flags);
- } else {
+ else
spin_unlock(&stats->ls_lock);
- }
}
/* initialize the ls_percpu[cpuid] non-zero counter */
for (i = 0; i < stats->ls_num; ++i) {
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index 18e1b47a1d6..09474e7553d 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -65,9 +65,9 @@ EXPORT_SYMBOL(obd_memory);
msg)
# define ASSERT_KERNEL_CTXT(msg) LASSERTF(segment_eq(get_fs(), get_ds()), msg)
#else
-# define ASSERT_CTXT_MAGIC(magic) do {} while(0)
-# define ASSERT_NOT_KERNEL_CTXT(msg) do {} while(0)
-# define ASSERT_KERNEL_CTXT(msg) do {} while(0)
+# define ASSERT_CTXT_MAGIC(magic) do {} while (0)
+# define ASSERT_NOT_KERNEL_CTXT(msg) do {} while (0)
+# define ASSERT_KERNEL_CTXT(msg) do {} while (0)
#endif
static void push_group_info(struct lvfs_run_ctxt *save,
@@ -80,7 +80,8 @@ static void push_group_info(struct lvfs_run_ctxt *save,
struct cred *cred;
task_lock(current);
save->group_info = current_cred()->group_info;
- if ((cred = prepare_creds())) {
+ cred = prepare_creds();
+ if (cred) {
cred->group_info = ginfo;
commit_creds(cred);
}
@@ -96,7 +97,8 @@ static void pop_group_info(struct lvfs_run_ctxt *save,
} else {
struct cred *cred;
task_lock(current);
- if ((cred = prepare_creds())) {
+ cred = prepare_creds();
+ if (cred) {
cred->group_info = save->group_info;
commit_creds(cred);
}
@@ -112,7 +114,7 @@ void push_ctxt(struct lvfs_run_ctxt *save, struct lvfs_run_ctxt *new_ctx,
if (new_ctx->dt != NULL)
return;
- //ASSERT_NOT_KERNEL_CTXT("already in kernel context!\n");
+ /* ASSERT_NOT_KERNEL_CTXT("already in kernel context!\n"); */
ASSERT_CTXT_MAGIC(new_ctx->magic);
OBD_SET_CTXT_MAGIC(save);
@@ -137,7 +139,8 @@ void push_ctxt(struct lvfs_run_ctxt *save, struct lvfs_run_ctxt *new_ctx,
save->luc.luc_fsgid = current_fsgid();
save->luc.luc_cap = current_cap();
- if ((cred = prepare_creds())) {
+ cred = prepare_creds();
+ if (cred) {
cred->uid = uc->luc_uid;
cred->gid = uc->luc_gid;
cred->fsuid = uc->luc_fsuid;
@@ -180,7 +183,8 @@ void pop_ctxt(struct lvfs_run_ctxt *saved, struct lvfs_run_ctxt *new_ctx,
current->fs->umask = saved->luc.luc_umask;
if (uc) {
struct cred *cred;
- if ((cred = prepare_creds())) {
+ cred = prepare_creds();
+ if (cred) {
cred->uid = saved->luc.luc_uid;
cred->gid = saved->luc.luc_gid;
cred->fsuid = saved->luc.luc_fsuid;
@@ -220,7 +224,7 @@ int lustre_rename(struct dentry *dir, struct vfsmount *mnt,
GOTO(put_old, err = PTR_ERR(dchild_new));
err = ll_vfs_rename(dir->d_inode, dchild_old, mnt,
- dir->d_inode, dchild_new, mnt);
+ dir->d_inode, dchild_new, mnt, NULL);
dput(dchild_new);
put_old:
@@ -253,32 +257,32 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
return 0;
switch (field) {
- case LPROCFS_FIELDS_FLAGS_CONFIG:
- ret = header->lc_config;
- break;
- case LPROCFS_FIELDS_FLAGS_SUM:
- ret = lc->lc_sum;
- if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- ret += lc->lc_sum_irq;
- break;
- case LPROCFS_FIELDS_FLAGS_MIN:
- ret = lc->lc_min;
- break;
- case LPROCFS_FIELDS_FLAGS_MAX:
- ret = lc->lc_max;
- break;
- case LPROCFS_FIELDS_FLAGS_AVG:
- ret = (lc->lc_max - lc->lc_min) / 2;
- break;
- case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
- ret = lc->lc_sumsquare;
- break;
- case LPROCFS_FIELDS_FLAGS_COUNT:
- ret = lc->lc_count;
- break;
- default:
- break;
- };
+ case LPROCFS_FIELDS_FLAGS_CONFIG:
+ ret = header->lc_config;
+ break;
+ case LPROCFS_FIELDS_FLAGS_SUM:
+ ret = lc->lc_sum;
+ if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ ret += lc->lc_sum_irq;
+ break;
+ case LPROCFS_FIELDS_FLAGS_MIN:
+ ret = lc->lc_min;
+ break;
+ case LPROCFS_FIELDS_FLAGS_MAX:
+ ret = lc->lc_max;
+ break;
+ case LPROCFS_FIELDS_FLAGS_AVG:
+ ret = (lc->lc_max - lc->lc_min) / 2;
+ break;
+ case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
+ ret = lc->lc_sumsquare;
+ break;
+ case LPROCFS_FIELDS_FLAGS_COUNT:
+ ret = lc->lc_count;
+ break;
+ default:
+ break;
+ }
return ret;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 42697934155..e048500edd6 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -1387,7 +1387,7 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
cl_object_put(env, obj);
}
}
- OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
+ OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof(req->crq_o[0]));
}
OBD_FREE_PTR(req);
}
@@ -1452,7 +1452,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
if (req != NULL) {
int result;
- OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
+ OBD_ALLOC(req->crq_o, nr_objects * sizeof(req->crq_o[0]));
if (req->crq_o != NULL) {
req->crq_nrobjs = nr_objects;
req->crq_type = crt;
@@ -1642,7 +1642,7 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
cpu_relax();
}
- POISON(anchor, 0x5a, sizeof *anchor);
+ POISON(anchor, 0x5a, sizeof(*anchor));
return rc;
}
EXPORT_SYMBOL(cl_sync_io_wait);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 7b0e9d26b6c..1a926036724 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -577,9 +577,9 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
* The implementation of using hash table to connect cl_env and thread
*/
-static cfs_hash_t *cl_env_hash;
+static struct cfs_hash *cl_env_hash;
-static unsigned cl_env_hops_hash(cfs_hash_t *lh,
+static unsigned cl_env_hops_hash(struct cfs_hash *lh,
const void *key, unsigned mask)
{
#if BITS_PER_LONG == 64
@@ -604,7 +604,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
return (key == cle->ce_owner);
}
-static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn)
+static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
{
struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
LASSERT(cle->ce_magic == &cl_env_init0);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index b1024a6d37d..4afd962cdb6 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -178,7 +178,7 @@ EXPORT_SYMBOL(obd_alloc_fail);
static inline void obd_data2conn(struct lustre_handle *conn,
struct obd_ioctl_data *data)
{
- memset(conn, 0, sizeof *conn);
+ memset(conn, 0, sizeof(*conn));
conn->cookie = data->ioc_cookie;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 68fe71c8a2a..f6fae16fc7f 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -816,7 +816,7 @@ struct obd_export *class_new_export(struct obd_device *obd,
struct obd_uuid *cluuid)
{
struct obd_export *export;
- cfs_hash_t *hash = NULL;
+ struct cfs_hash *hash = NULL;
int rc = 0;
OBD_ALLOC_PTR(export);
@@ -1384,7 +1384,7 @@ EXPORT_SYMBOL(obd_export_nid2str);
int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
{
- cfs_hash_t *nid_hash;
+ struct cfs_hash *nid_hash;
struct obd_export *doomed_exp = NULL;
int exports_evicted = 0;
@@ -1432,7 +1432,7 @@ EXPORT_SYMBOL(obd_export_evict_by_nid);
int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
{
- cfs_hash_t *uuid_hash;
+ struct cfs_hash *uuid_hash;
struct obd_export *doomed_exp = NULL;
struct obd_uuid doomed_uuid;
int exports_evicted = 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_test.c b/drivers/staging/lustre/lustre/obdclass/llog_test.c
index d9e6d12215f..178f89eccab 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_test.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_test.c
@@ -242,7 +242,7 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
hdr.lrh_len = 8;
hdr.lrh_type = OBD_CFG_REC;
- memset(buf, 0, sizeof buf);
+ memset(buf, 0, sizeof(buf));
rc = llog_write(env, llh, &hdr, NULL, 0, buf, -1);
if (rc < 0) {
CERROR("3b: write 10 records failed at #%d: %d\n",
@@ -277,8 +277,8 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
char buf_even[24];
char buf_odd[32];
- memset(buf_odd, 0, sizeof buf_odd);
- memset(buf_even, 0, sizeof buf_even);
+ memset(buf_odd, 0, sizeof(buf_odd));
+ memset(buf_even, 0, sizeof(buf_even));
if ((i % 2) == 0) {
hdr.lrh_len = 24;
hdr.lrh_type = OBD_CFG_REC;
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index a95f60a4f90..02d76f8dbcb 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -898,7 +898,7 @@ static void lprocfs_free_client_stats(struct nid_stat *client_stat)
void lprocfs_free_per_client_stats(struct obd_device *obd)
{
- cfs_hash_t *hash = obd->obd_nid_stats_hash;
+ struct cfs_hash *hash = obd->obd_nid_stats_hash;
struct nid_stat *stat;
/* we need extra list - because hash_exit called to early */
@@ -1069,7 +1069,7 @@ static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
struct timeval now;
do_gettimeofday(&now);
rc = seq_printf(p, "%-25s %lu.%lu secs.usecs\n",
- "snapshot_time", now.tv_sec, now.tv_usec);
+ "snapshot_time", now.tv_sec, (unsigned long)now.tv_usec);
if (rc < 0)
return rc;
}
@@ -1422,7 +1422,7 @@ void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats)
}
EXPORT_SYMBOL(lprocfs_init_ldlm_stats);
-int lprocfs_exp_print_uuid(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+int lprocfs_exp_print_uuid(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
@@ -1453,7 +1453,7 @@ struct exp_hash_cb_data {
bool first;
};
-int lprocfs_exp_print_hash(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+int lprocfs_exp_print_hash(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *cb_data)
{
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 3a3d5bc5a62..212823ab937 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -71,7 +71,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
struct lu_object_header *top;
struct lu_site *site;
struct lu_object *orig;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
const struct lu_fid *fid;
top = o->lo_header;
@@ -175,8 +175,8 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
top = o->lo_header;
set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
- cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
- cfs_hash_bd_t bd;
+ struct cfs_hash *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
+ struct cfs_hash_bd bd;
cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
list_del_init(&top->loh_lru);
@@ -306,8 +306,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
struct lu_object_header *h;
struct lu_object_header *temp;
struct lu_site_bkt_data *bkt;
- cfs_hash_bd_t bd;
- cfs_hash_bd_t bd2;
+ struct cfs_hash_bd bd;
+ struct cfs_hash_bd bd2;
struct list_head dispose;
int did_sth;
int start;
@@ -526,7 +526,7 @@ int lu_object_invariant(const struct lu_object *o)
EXPORT_SYMBOL(lu_object_invariant);
static struct lu_object *htable_lookup(struct lu_site *s,
- cfs_hash_bd_t *bd,
+ struct cfs_hash_bd *bd,
const struct lu_fid *f,
wait_queue_t *waiter,
__u64 *version)
@@ -589,8 +589,8 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
const struct lu_object_conf *conf)
{
struct lu_object *o;
- cfs_hash_t *hs;
- cfs_hash_bd_t bd;
+ struct cfs_hash *hs;
+ struct cfs_hash_bd bd;
struct lu_site_bkt_data *bkt;
o = lu_object_alloc(env, dev, f, conf);
@@ -618,8 +618,8 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
struct lu_object *o;
struct lu_object *shadow;
struct lu_site *s;
- cfs_hash_t *hs;
- cfs_hash_bd_t bd;
+ struct cfs_hash *hs;
+ struct cfs_hash_bd bd;
__u64 version = 0;
/*
@@ -788,7 +788,7 @@ struct lu_site_print_arg {
};
static int
-lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
@@ -874,7 +874,7 @@ static int lu_htable_order(void)
return bits;
}
-static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
const void *key, unsigned mask)
{
struct lu_fid *fid = (struct lu_fid *)key;
@@ -914,14 +914,14 @@ static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
}
-static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct lu_object_header *h;
h = hlist_entry(hnode, struct lu_object_header, loh_hash);
if (atomic_add_return(1, &h->loh_ref) == 1) {
struct lu_site_bkt_data *bkt;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
cfs_hash_bd_get(hs, &h->loh_fid, &bd);
bkt = cfs_hash_bd_extra_get(hs, &bd);
@@ -929,7 +929,7 @@ static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
}
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
LBUG(); /* we should never called it */
}
@@ -975,12 +975,12 @@ EXPORT_SYMBOL(lu_dev_del_linkage);
int lu_site_init(struct lu_site *s, struct lu_device *top)
{
struct lu_site_bkt_data *bkt;
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
char name[16];
int bits;
int i;
- memset(s, 0, sizeof *s);
+ memset(s, 0, sizeof(*s));
bits = lu_htable_order();
snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
@@ -1110,7 +1110,7 @@ int lu_device_init(struct lu_device *d, struct lu_device_type *t)
{
if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
t->ldt_ops->ldto_start(t);
- memset(d, 0, sizeof *d);
+ memset(d, 0, sizeof(*d));
atomic_set(&d->ld_ref, 0);
d->ld_type = t;
lu_ref_init(&d->ld_reference);
@@ -1206,7 +1206,7 @@ EXPORT_SYMBOL(lu_object_add);
*/
int lu_object_header_init(struct lu_object_header *h)
{
- memset(h, 0, sizeof *h);
+ memset(h, 0, sizeof(*h));
atomic_set(&h->loh_ref, 1);
INIT_HLIST_NODE(&h->loh_hash);
INIT_LIST_HEAD(&h->loh_lru);
@@ -1525,7 +1525,7 @@ static void keys_fini(struct lu_context *ctx)
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
key_fini(ctx, i);
- OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
+ OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof(ctx->lc_value[0]));
ctx->lc_value = NULL;
}
@@ -1574,7 +1574,8 @@ static int keys_fill(struct lu_context *ctx)
static int keys_init(struct lu_context *ctx)
{
- OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
+ OBD_ALLOC(ctx->lc_value,
+ ARRAY_SIZE(lu_keys) * sizeof(ctx->lc_value[0]));
if (likely(ctx->lc_value != NULL))
return keys_fill(ctx);
@@ -1588,7 +1589,7 @@ int lu_context_init(struct lu_context *ctx, __u32 tags)
{
int rc;
- memset(ctx, 0, sizeof *ctx);
+ memset(ctx, 0, sizeof(*ctx));
ctx->lc_state = LCS_INITIALIZED;
ctx->lc_tags = tags;
if (tags & LCT_REMEMBER) {
@@ -1787,10 +1788,10 @@ typedef struct lu_site_stats{
unsigned lss_busy;
} lu_site_stats_t;
-static void lu_site_stats_get(cfs_hash_t *hs,
+static void lu_site_stats_get(struct cfs_hash *hs,
lu_site_stats_t *stats, int populated)
{
- cfs_hash_bd_t bd;
+ struct cfs_hash_bd bd;
int i;
cfs_hash_for_each_bucket(hs, &bd, i) {
@@ -2071,8 +2072,8 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
struct lu_site_bkt_data *bkt;
struct lu_object *shadow;
wait_queue_t waiter;
- cfs_hash_t *hs;
- cfs_hash_bd_t bd;
+ struct cfs_hash *hs;
+ struct cfs_hash_bd bd;
__u64 version = 0;
LASSERT(fid_is_zero(old));
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index d0a64ff5358..362ae541b20 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -417,7 +417,7 @@ int class_attach(struct lustre_cfg *lcfg)
/* do the attach */
if (OBP(obd, attach)) {
- rc = OBP(obd,attach)(obd, sizeof *lcfg, lcfg);
+ rc = OBP(obd, attach)(obd, sizeof(*lcfg), lcfg);
if (rc)
GOTO(out, rc = -EINVAL);
}
@@ -900,7 +900,7 @@ void class_del_profile(const char *prof)
OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
if (lprof->lp_md)
OBD_FREE(lprof->lp_md, strlen(lprof->lp_md) + 1);
- OBD_FREE(lprof, sizeof *lprof);
+ OBD_FREE(lprof, sizeof(*lprof));
}
}
EXPORT_SYMBOL(class_del_profile);
@@ -916,7 +916,7 @@ void class_del_profiles(void)
OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
if (lprof->lp_md)
OBD_FREE(lprof->lp_md, strlen(lprof->lp_md) + 1);
- OBD_FREE(lprof, sizeof *lprof);
+ OBD_FREE(lprof, sizeof(*lprof));
}
}
EXPORT_SYMBOL(class_del_profiles);
@@ -1692,7 +1692,7 @@ EXPORT_SYMBOL(class_manual_cleanup);
*/
static unsigned
-uuid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
sizeof(((struct obd_uuid *)key)->uuid), mask);
@@ -1731,7 +1731,7 @@ uuid_export_object(struct hlist_node *hnode)
}
static void
-uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
@@ -1740,7 +1740,7 @@ uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
@@ -1763,7 +1763,7 @@ static cfs_hash_ops_t uuid_hash_ops = {
*/
static unsigned
-nid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+nid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
}
@@ -1801,7 +1801,7 @@ nid_export_object(struct hlist_node *hnode)
}
static void
-nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+nid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
@@ -1810,7 +1810,7 @@ nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+nid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct obd_export *exp;
@@ -1855,7 +1855,7 @@ nidstats_object(struct hlist_node *hnode)
}
static void
-nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
+nidstats_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct nid_stat *ns;
@@ -1864,7 +1864,7 @@ nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+nidstats_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct nid_stat *ns;
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index af5f27f82bc..e87a1990077 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -48,7 +48,7 @@ static inline __u32 consume(int nob, __u8 **ptr)
{
__u32 value;
- LASSERT(nob <= sizeof value);
+ LASSERT(nob <= sizeof(value));
for (value = 0; nob > 0; --nob)
value = (value << 8) | *((*ptr)++);
@@ -61,7 +61,7 @@ static void uuid_unpack(class_uuid_t in, __u16 *uu, int nr)
{
__u8 *ptr = in;
- LASSERT(nr * sizeof *uu == sizeof(class_uuid_t));
+ LASSERT(nr * sizeof(*uu) == sizeof(class_uuid_t));
while (nr-- > 0)
CONSUME(uu[nr], &ptr);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index c8b43442dc7..1fb0ac4e920 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1089,7 +1089,7 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
} else {
struct lustre_md *md;
md = &info->eti_md;
- memset(md, 0, sizeof *md);
+ memset(md, 0, sizeof(*md));
md->lsm = lsm;
conf->eoc_cl.u.coc_md = md;
}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 90d24d8dea2..ef10e2af787 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -571,7 +571,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
client_obd_list_lock(&cli->cl_loi_list_lock);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
+ now.tv_sec, (unsigned long)now.tv_usec);
seq_printf(seq, "read RPCs in flight: %d\n",
cli->cl_r_in_flight);
seq_printf(seq, "write RPCs in flight: %d\n",
@@ -683,7 +683,7 @@ static int osc_stats_seq_show(struct seq_file *seq, void *v)
do_gettimeofday(&now);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
- now.tv_sec, now.tv_usec);
+ now.tv_sec, (unsigned long)now.tv_usec);
seq_printf(seq, "lockless_write_bytes\t\t"LPU64"\n",
stats->os_lockless_writes);
seq_printf(seq, "lockless_read_bytes\t\t"LPU64"\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 3aeaf845cf2..681d60a7d5e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -105,7 +105,7 @@ static int osc_io_submit(const struct lu_env *env,
struct osc_object *osc = NULL; /* to keep gcc happy */
struct osc_page *opg;
struct cl_io *io;
- LIST_HEAD (list);
+ LIST_HEAD(list);
struct cl_page_list *qin = &queue->c2_qin;
struct cl_page_list *qout = &queue->c2_qout;
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 5d7bdbfc871..c90abfbb1d7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -862,7 +862,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
cap = &req->rq_pill;
req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
- sizeof *lvb);
+ sizeof(*lvb));
result = req_capsule_server_pack(cap);
if (result == 0) {
lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index d272322b29b..6c20b8ecfb8 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -245,7 +245,7 @@ static int osc_page_cache_add(const struct lu_env *env,
void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
pgoff_t start, pgoff_t end)
{
- memset(policy, 0, sizeof *policy);
+ memset(policy, 0, sizeof(*policy));
policy->l_extent.start = cl_offset(obj, start);
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 9720c0e865c..6045a78a2ba 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -139,7 +139,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
* Hash operations for uid/gid <-> osc_quota_info
*/
static unsigned
-oqi_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_u32_hash(*((__u32*)key), mask);
}
@@ -172,17 +172,17 @@ oqi_object(struct hlist_node *hnode)
}
static void
-oqi_get(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
}
static void
-oqi_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
}
static void
-oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct osc_quota_info *oqi;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index ee6707a5ea9..cb197782d9a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -2554,7 +2554,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
}
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- sizeof *lvb);
+ sizeof(*lvb));
ptlrpc_request_set_replen(req);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 810a458caed..c2ab0c8c4d4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -817,7 +817,7 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
struct ptlrpc_request_set *set;
- OBD_ALLOC(set, sizeof *set);
+ OBD_ALLOC(set, sizeof(*set));
if (!set)
return NULL;
atomic_set(&set->set_refcount, 1);
@@ -2690,7 +2690,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- memset(aa, 0, sizeof *aa);
+ memset(aa, 0, sizeof(*aa));
/* Prepare request to be resent with ptlrpcd */
aa->praa_old_state = req->rq_send_state;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 17ca8420887..6756356faac 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -41,7 +41,7 @@
#include "ptlrpc_internal.h"
-static cfs_hash_t *conn_hash = NULL;
+static struct cfs_hash *conn_hash = NULL;
static cfs_hash_ops_t conn_hash_ops;
struct ptlrpc_connection *
@@ -161,7 +161,7 @@ EXPORT_SYMBOL(ptlrpc_connection_fini);
* Hash operations for net_peer<->connection
*/
static unsigned
-conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
}
@@ -195,7 +195,7 @@ conn_object(struct hlist_node *hnode)
}
static void
-conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ptlrpc_connection *conn;
@@ -204,7 +204,7 @@ conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ptlrpc_connection *conn;
@@ -213,7 +213,7 @@ conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
}
static void
-conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ptlrpc_connection *conn;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 5ca69aec72e..7b96a0e88cd 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -682,7 +682,7 @@ int ptlrpc_connect_import(struct obd_import *imp)
CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
aa = ptlrpc_req_async_args(request);
- memset(aa, 0, sizeof *aa);
+ memset(aa, 0, sizeof(*aa));
aa->pcaa_peer_committed = committed_before_reconnect;
aa->pcaa_initial_connect = initial_connect;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 2f55ce26ccb..d0a6e568922 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -1669,7 +1669,7 @@ void req_capsule_init(struct req_capsule *pill,
if (req != NULL && pill == &req->rq_pill && req->rq_pill_init)
return;
- memset(pill, 0, sizeof *pill);
+ memset(pill, 0, sizeof(*pill));
pill->rc_req = req;
pill->rc_loc = location;
req_capsule_init_area(pill);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 6547f46a772..316103ab7c3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -207,7 +207,7 @@ static void enc_pools_release_free_pages(long npages)
p_idx++;
g_idx = 0;
}
- };
+ }
/* free unused pools */
while (p_idx_max1 < p_idx_max2) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index acf75f3873d..21de868da52 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -263,7 +263,7 @@ static struct ptlrpc_hr_service ptlrpc_hr;
*/
static void rs_batch_init(struct rs_batch *b)
{
- memset(b, 0, sizeof *b);
+ memset(b, 0, sizeof(*b));
INIT_LIST_HEAD(&b->rsb_replies);
}
@@ -1306,12 +1306,12 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
}
newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
- OBD_ALLOC(reqcopy, sizeof *reqcopy);
+ OBD_ALLOC(reqcopy, sizeof(*reqcopy));
if (reqcopy == NULL)
return -ENOMEM;
OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
if (!reqmsg) {
- OBD_FREE(reqcopy, sizeof *reqcopy);
+ OBD_FREE(reqcopy, sizeof(*reqcopy));
return -ENOMEM;
}
@@ -1370,7 +1370,7 @@ out_put:
out:
sptlrpc_svc_ctx_decref(reqcopy);
OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
- OBD_FREE(reqcopy, sizeof *reqcopy);
+ OBD_FREE(reqcopy, sizeof(*reqcopy));
return rc;
}
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 90d6ac46935..081407be33a 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -901,10 +901,7 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int err;
struct dt3155_priv *pd;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return -ENODEV;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
return -ENODEV;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 46ed8324503..58684da45e6 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -34,10 +34,9 @@
static unsigned int assume_endura;
module_param(assume_endura, int, 0644);
-MODULE_PARM_DESC(assume_endura, "when probing fails, "
- "hardware is a Pelco Endura");
+MODULE_PARM_DESC(assume_endura,
+ "when probing fails, hardware is a Pelco Endura");
-/* #define GO7007_USB_DEBUG */
/* #define GO7007_I2C_DEBUG */ /* for debugging the EZ-USB I2C adapter */
#define HPI_STATUS_ADDR 0xFFF4
@@ -662,9 +661,7 @@ static int go7007_usb_interface_reset(struct go7007 *go)
if (usb->board->flags & GO7007_USB_EZUSB) {
/* Reset buffer in EZ-USB */
-#ifdef GO7007_USB_DEBUG
- printk(KERN_DEBUG "go7007-usb: resetting EZ-USB buffers\n");
-#endif
+ dev_dbg(go->dev, "resetting EZ-USB buffers\n");
if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 ||
go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0)
return -1;
@@ -678,8 +675,7 @@ static int go7007_usb_interface_reset(struct go7007 *go)
/* Wait for an interrupt to indicate successful hardware reset */
if (go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x55aa) {
- printk(KERN_ERR
- "go7007-usb: unable to reset the USB interface\n");
+ dev_err(go->dev, "unable to reset the USB interface\n");
return -1;
}
return 0;
@@ -693,10 +689,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
u16 status_reg = 0;
int timeout = 500;
-#ifdef GO7007_USB_DEBUG
- printk(KERN_DEBUG
- "go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
-#endif
+ dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data);
for (i = 0; i < 100; ++i) {
r = usb_control_msg(usb->usbdev,
@@ -714,9 +707,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
if (r < 0)
goto write_int_error;
if (i == 100) {
- printk(KERN_ERR
- "go7007-usb: device is hung, status reg = 0x%04x\n",
- status_reg);
+ dev_err(go->dev, "device is hung, status reg = 0x%04x\n", status_reg);
return -1;
}
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), 0x12,
@@ -732,7 +723,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
return 0;
write_int_error:
- printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r);
+ dev_err(go->dev, "error in WriteInterrupt: %d\n", r);
return r;
}
@@ -743,10 +734,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
int r;
int timeout = 500;
-#ifdef GO7007_USB_DEBUG
- printk(KERN_DEBUG
- "go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
-#endif
+ dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data);
go->usb_buf[0] = data & 0xff;
go->usb_buf[1] = data >> 8;
@@ -757,7 +745,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x55aa,
0xf0f0, go->usb_buf, 8, timeout);
if (r < 0) {
- printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r);
+ dev_err(go->dev, "error in WriteInterrupt: %d\n", r);
return r;
}
return 0;
@@ -772,23 +760,19 @@ static void go7007_usb_readinterrupt_complete(struct urb *urb)
if (status) {
if (status != -ESHUTDOWN &&
go->status != STATUS_SHUTDOWN) {
- printk(KERN_ERR
- "go7007-usb: error in read interrupt: %d\n",
- urb->status);
+ dev_err(go->dev, "error in read interrupt: %d\n", urb->status);
} else {
wake_up(&go->interrupt_waitq);
return;
}
} else if (urb->actual_length != urb->transfer_buffer_length) {
- printk(KERN_ERR "go7007-usb: short read in interrupt pipe!\n");
+ dev_err(go->dev, "short read in interrupt pipe!\n");
} else {
go->interrupt_available = 1;
go->interrupt_data = __le16_to_cpu(regs[0]);
go->interrupt_value = __le16_to_cpu(regs[1]);
-#ifdef GO7007_USB_DEBUG
- printk(KERN_DEBUG "go7007-usb: ReadInterrupt: %04x %04x\n",
+ dev_dbg(go->dev, "ReadInterrupt: %04x %04x\n",
go->interrupt_value, go->interrupt_data);
-#endif
}
wake_up(&go->interrupt_waitq);
@@ -801,8 +785,7 @@ static int go7007_usb_read_interrupt(struct go7007 *go)
r = usb_submit_urb(usb->intr_urb, GFP_KERNEL);
if (r < 0) {
- printk(KERN_ERR
- "go7007-usb: unable to submit interrupt urb: %d\n", r);
+ dev_err(go->dev, "unable to submit interrupt urb: %d\n", r);
return r;
}
return 0;
@@ -818,18 +801,17 @@ static void go7007_usb_read_video_pipe_complete(struct urb *urb)
return;
}
if (status) {
- printk(KERN_ERR "go7007-usb: error in video pipe: %d\n",
- status);
+ dev_err(go->dev, "error in video pipe: %d\n", status);
return;
}
if (urb->actual_length != urb->transfer_buffer_length) {
- printk(KERN_ERR "go7007-usb: short read in video pipe!\n");
+ dev_err(go->dev, "short read in video pipe!\n");
return;
}
go7007_parse_video_stream(go, urb->transfer_buffer, urb->actual_length);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r < 0)
- printk(KERN_ERR "go7007-usb: error in video pipe: %d\n", r);
+ dev_err(go->dev, "error in video pipe: %d\n", r);
}
static void go7007_usb_read_audio_pipe_complete(struct urb *urb)
@@ -840,19 +822,19 @@ static void go7007_usb_read_audio_pipe_complete(struct urb *urb)
if (!vb2_is_streaming(&go->vidq))
return;
if (status) {
- printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n",
+ dev_err(go->dev, "error in audio pipe: %d\n",
status);
return;
}
if (urb->actual_length != urb->transfer_buffer_length) {
- printk(KERN_ERR "go7007-usb: short read in audio pipe!\n");
+ dev_err(go->dev, "short read in audio pipe!\n");
return;
}
if (go->audio_deliver != NULL)
go->audio_deliver(go, urb->transfer_buffer, urb->actual_length);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r < 0)
- printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n", r);
+ dev_err(go->dev, "error in audio pipe: %d\n", r);
}
static int go7007_usb_stream_start(struct go7007 *go)
@@ -863,8 +845,7 @@ static int go7007_usb_stream_start(struct go7007 *go)
for (i = 0; i < 8; ++i) {
r = usb_submit_urb(usb->video_urbs[i], GFP_KERNEL);
if (r < 0) {
- printk(KERN_ERR "go7007-usb: error submitting video "
- "urb %d: %d\n", i, r);
+ dev_err(go->dev, "error submitting video urb %d: %d\n", i, r);
goto video_submit_failed;
}
}
@@ -874,8 +855,7 @@ static int go7007_usb_stream_start(struct go7007 *go)
for (i = 0; i < 8; ++i) {
r = usb_submit_urb(usb->audio_urbs[i], GFP_KERNEL);
if (r < 0) {
- printk(KERN_ERR "go7007-usb: error submitting audio "
- "urb %d: %d\n", i, r);
+ dev_err(go->dev, "error submitting audio urb %d: %d\n", i, r);
goto audio_submit_failed;
}
}
@@ -911,9 +891,7 @@ static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len)
int transferred, pipe;
int timeout = 500;
-#ifdef GO7007_USB_DEBUG
- printk(KERN_DEBUG "go7007-usb: DownloadBuffer sending %d bytes\n", len);
-#endif
+ dev_dbg(go->dev, "DownloadBuffer sending %d bytes\n", len);
if (usb->board->flags & GO7007_USB_EZUSB)
pipe = usb_sndbulkpipe(usb->usbdev, 2);
@@ -999,9 +977,8 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i + 1].flags & I2C_M_RD)) {
#ifdef GO7007_I2C_DEBUG
- printk(KERN_DEBUG "go7007-usb: i2c write/read %d/%d "
- "bytes on %02x\n", msgs[i].len,
- msgs[i + 1].len, msgs[i].addr);
+ dev_dbg(go->dev, "i2c write/read %d/%d bytes on %02x\n",
+ msgs[i].len, msgs[i + 1].len, msgs[i].addr);
#endif
buf[0] = 0x01;
buf[1] = msgs[i].len + 1;
@@ -1011,9 +988,8 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
buf[buf_len++] = msgs[++i].len;
} else if (msgs[i].flags & I2C_M_RD) {
#ifdef GO7007_I2C_DEBUG
- printk(KERN_DEBUG "go7007-usb: i2c read %d "
- "bytes on %02x\n", msgs[i].len,
- msgs[i].addr);
+ dev_dbg(go->dev, "i2c read %d bytes on %02x\n",
+ msgs[i].len, msgs[i].addr);
#endif
buf[0] = 0x01;
buf[1] = 1;
@@ -1022,9 +998,8 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
buf_len = 4;
} else {
#ifdef GO7007_I2C_DEBUG
- printk(KERN_DEBUG "go7007-usb: i2c write %d "
- "bytes on %02x\n", msgs[i].len,
- msgs[i].addr);
+ dev_dbg(go->dev, "i2c write %d bytes on %02x\n",
+ msgs[i].len, msgs[i].addr);
#endif
buf[0] = 0x00;
buf[1] = msgs[i].len + 1;
@@ -1082,7 +1057,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
char *name;
int video_pipe, i, v_urb_len;
- printk(KERN_DEBUG "go7007-usb: probing new GO7007 USB board\n");
+ dev_dbg(go->dev, "probing new GO7007 USB board\n");
switch (id->driver_info) {
case GO7007_BOARDID_MATRIX_II:
@@ -1122,14 +1097,13 @@ static int go7007_usb_probe(struct usb_interface *intf,
board = &board_px_tv402u;
break;
case GO7007_BOARDID_LIFEVIEW_LR192:
- printk(KERN_ERR "go7007-usb: The Lifeview TV Walker Ultra "
- "is not supported. Sorry!\n");
+ dev_err(go->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n");
return -ENODEV;
name = "Lifeview TV Walker Ultra";
board = &board_lifeview_lr192;
break;
case GO7007_BOARDID_SENSORAY_2250:
- printk(KERN_INFO "Sensoray 2250 found\n");
+ dev_info(go->dev, "Sensoray 2250 found\n");
name = "Sensoray 2250/2251";
board = &board_sensoray_2250;
break;
@@ -1138,7 +1112,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
board = &board_ads_usbav_709;
break;
default:
- printk(KERN_ERR "go7007-usb: unknown board ID %d!\n",
+ dev_err(go->dev, "unknown board ID %d!\n",
(unsigned int)id->driver_info);
return -ENODEV;
}
@@ -1197,8 +1171,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
go->i2c_adapter.dev.parent = go->dev;
i2c_set_adapdata(&go->i2c_adapter, go);
if (i2c_add_adapter(&go->i2c_adapter) < 0) {
- printk(KERN_ERR
- "go7007-usb: error: i2c_add_adapter failed\n");
+ dev_err(go->dev, "error: i2c_add_adapter failed\n");
goto allocfail;
}
go->i2c_adapter_online = 1;
@@ -1248,8 +1221,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
/* Probe the tuner model on the TV402U */
if (go->board_id == GO7007_BOARDID_PX_TV402U) {
/* Board strapping indicates tuner model */
- if (go7007_usb_vendor_request(go, 0x41, 0, 0, go->usb_buf, 3, 1) < 0) {
- printk(KERN_ERR "go7007-usb: GPIO read failed!\n");
+ if (go7007_usb_vendor_request(go, 0x41, 0, 0, go->usb_buf, 3,
+ 1) < 0) {
+ dev_err(go->dev, "GPIO read failed!\n");
goto allocfail;
}
switch (go->usb_buf[0] >> 6) {
@@ -1273,15 +1247,14 @@ static int go7007_usb_probe(struct usb_interface *intf,
sizeof(go->name));
break;
default:
- printk(KERN_DEBUG "go7007-usb: unable to detect "
- "tuner type!\n");
+ dev_dbg(go->dev, "unable to detect tuner type!\n");
break;
}
/* Configure tuner mode selection inputs connected
* to the EZ-USB GPIO output pins */
if (go7007_usb_vendor_request(go, 0x40, 0x7f02, 0,
NULL, 0, 0) < 0) {
- printk(KERN_ERR "go7007-usb: GPIO write failed!\n");
+ dev_err(go->dev, "GPIO write failed!\n");
goto allocfail;
}
}
@@ -1290,11 +1263,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
* a USB1.1 port. There will be silent corruption of the stream. */
if ((board->flags & GO7007_USB_EZUSB) &&
usbdev->speed != USB_SPEED_HIGH)
- printk(KERN_ERR "go7007-usb: *** WARNING *** This device "
- "must be connected to a USB 2.0 port! "
- "Attempting to capture video through a USB 1.1 "
- "port will result in stream corruption, even "
- "at low bitrates!\n");
+ dev_err(go->dev, "*** WARNING *** This device must be connected to a USB 2.0 port! Attempting to capture video through a USB 1.1 port will result in stream corruption, even at low bitrates!\n");
/* Allocate the URBs and buffers for receiving the video stream */
if (board->flags & GO7007_USB_EZUSB) {
diff --git a/drivers/staging/media/lirc/TODO b/drivers/staging/media/lirc/TODO
index b6cb593f55c..cbea5d84fed 100644
--- a/drivers/staging/media/lirc/TODO
+++ b/drivers/staging/media/lirc/TODO
@@ -2,6 +2,11 @@
(see drivers/media/IR/mceusb.c vs. lirc_mceusb.c in lirc cvs for an
example of a previously completed port).
+- lirc_bt829 uses registers on a Mach64 VT, which has a separate kernel
+ framebuffer driver (atyfb) and userland X driver (mach64). It can't
+ simply be converted to a normal PCI driver, but ideally it should be
+ coordinated with the other drivers.
+
Please send patches to:
Jarod Wilson <jarod@wilsonet.com>
Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index fa31ee7dd6a..30edc740ac2 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -63,7 +63,7 @@ static bool debug;
} while (0)
static int atir_minor;
-static unsigned long pci_addr_phys;
+static phys_addr_t pci_addr_phys;
static unsigned char *pci_addr_lin;
static struct lirc_driver atir_driver;
@@ -78,11 +78,11 @@ static struct pci_dev *do_pci_probe(void)
pci_addr_phys = 0;
if (my_dev->resource[0].flags & IORESOURCE_MEM) {
pci_addr_phys = my_dev->resource[0].start;
- pr_info("memory at 0x%08X\n",
- (unsigned int)pci_addr_phys);
+ pr_info("memory at %pa\n", &pci_addr_phys);
}
if (pci_addr_phys == 0) {
pr_err("no memory resource ?\n");
+ pci_dev_put(my_dev);
return NULL;
}
} else {
@@ -120,19 +120,26 @@ static void atir_set_use_dec(void *data)
int init_module(void)
{
struct pci_dev *pdev;
+ int rc;
pdev = do_pci_probe();
if (pdev == NULL)
return -ENODEV;
- if (!atir_init_start())
- return -ENODEV;
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_put_dev;
+
+ if (!atir_init_start()) {
+ rc = -ENODEV;
+ goto err_disable;
+ }
strcpy(atir_driver.name, "ATIR");
atir_driver.minor = -1;
atir_driver.code_length = 8;
atir_driver.sample_rate = 10;
- atir_driver.data = 0;
+ atir_driver.data = NULL;
atir_driver.add_to_buf = atir_add_to_buf;
atir_driver.set_use_inc = atir_set_use_inc;
atir_driver.set_use_dec = atir_set_use_dec;
@@ -142,24 +149,38 @@ int init_module(void)
atir_minor = lirc_register_driver(&atir_driver);
if (atir_minor < 0) {
pr_err("failed to register driver!\n");
- return atir_minor;
+ rc = atir_minor;
+ goto err_unmap;
}
dprintk("driver is registered on minor %d\n", atir_minor);
return 0;
+
+err_unmap:
+ iounmap(pci_addr_lin);
+err_disable:
+ pci_disable_device(pdev);
+err_put_dev:
+ pci_dev_put(pdev);
+ return rc;
}
void cleanup_module(void)
{
+ struct pci_dev *pdev = to_pci_dev(atir_driver.dev);
+
lirc_unregister_driver(atir_minor);
+ iounmap(pci_addr_lin);
+ pci_disable_device(pdev);
+ pci_dev_put(pdev);
}
static int atir_init_start(void)
{
pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400);
- if (pci_addr_lin == 0) {
+ if (!pci_addr_lin) {
pr_info("pci mem must be mapped\n");
return 0;
}
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 4afa7da11f3..ab2ae115b52 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -625,7 +625,7 @@ static void imon_incoming_packet(struct imon_context *context,
}
if (debug) {
- printk(KERN_INFO "raw packet: ");
+ dev_info(dev, "raw packet: ");
for (i = 0; i < len; ++i)
printk("%02x ", buf[i]);
printk("\n");
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index af08e677b60..2e3a98575d4 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -67,7 +67,7 @@
#include <linux/delay.h>
#include <linux/poll.h>
#include <linux/platform_device.h>
-
+#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/fcntl.h>
@@ -321,7 +321,7 @@ static void on(void)
* status LED and ground
*/
if (type == LIRC_NSLU2) {
- gpio_line_set(NSLU2_LED_GRN, IXP4XX_GPIO_LOW);
+ gpio_set_value(NSLU2_LED_GRN, 0);
return;
}
#endif
@@ -335,7 +335,7 @@ static void off(void)
{
#ifdef CONFIG_LIRC_SERIAL_NSLU2
if (type == LIRC_NSLU2) {
- gpio_line_set(NSLU2_LED_GRN, IXP4XX_GPIO_HIGH);
+ gpio_set_value(NSLU2_LED_GRN, 1);
return;
}
#endif
@@ -707,7 +707,8 @@ static irqreturn_t irq_handler(int i, void *blah)
pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n",
dcd, sense,
tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ (unsigned long)tv.tv_usec,
+ (unsigned long)lasttv.tv_usec);
continue;
}
@@ -719,7 +720,8 @@ static irqreturn_t irq_handler(int i, void *blah)
pr_warn("%d %d %lx %lx %lx %lx\n",
dcd, sense,
tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ (unsigned long)tv.tv_usec,
+ (unsigned long)lasttv.tv_usec);
data = PULSE_MASK;
} else if (deltv > 15) {
data = PULSE_MASK; /* really long time */
@@ -728,7 +730,8 @@ static irqreturn_t irq_handler(int i, void *blah)
pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n",
dcd, sense,
tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ (unsigned long)tv.tv_usec,
+ (unsigned long)lasttv.tv_usec);
/*
* detecting pulse while this
* MUST be a space!
@@ -839,6 +842,16 @@ static int lirc_serial_probe(struct platform_device *dev)
{
int i, nlow, nhigh, result;
+#ifdef CONFIG_LIRC_SERIAL_NSLU2
+ /* This GPIO is used for a LED on the NSLU2 */
+ result = devm_gpio_request(dev, NSLU2_LED_GRN, "lirc-serial");
+ if (result)
+ return result;
+ result = gpio_direction_output(NSLU2_LED_GRN, 0);
+ if (result)
+ return result;
+#endif
+
result = request_irq(irq, irq_handler,
(share_irq ? IRQF_SHARED : 0),
LIRC_DRIVER_NAME, (void *)&hardware);
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 11d5338b4f2..0feeaadf29d 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -61,6 +61,9 @@
#include <media/lirc_dev.h>
#include <media/lirc.h>
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct IR;
struct IR_rx {
@@ -941,7 +944,14 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
schedule();
set_current_state(TASK_INTERRUPTIBLE);
} else {
- unsigned char buf[rbuf->chunk_size];
+ unsigned char buf[MAX_XFER_SIZE];
+
+ if (rbuf->chunk_size > sizeof(buf)) {
+ zilog_error("chunk_size is too big (%d)!\n",
+ rbuf->chunk_size);
+ ret = -EINVAL;
+ break;
+ }
m = lirc_buffer_read(rbuf, buf);
if (m == rbuf->chunk_size) {
ret = copy_to_user((void *)outbuf+written, buf,
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
index 76d5bbd4d93..0c349c8595e 100644
--- a/drivers/staging/media/msi3101/Kconfig
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -1,4 +1,5 @@
config USB_MSI3101
tristate "Mirics MSi3101 SDR Dongle"
depends on USB && VIDEO_DEV && VIDEO_V4L2
- select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_VMALLOC
diff --git a/drivers/staging/media/solo6x10/solo6x10-disp.c b/drivers/staging/media/solo6x10/solo6x10-disp.c
index 32d9953bc36..145295a5db7 100644
--- a/drivers/staging/media/solo6x10/solo6x10-disp.c
+++ b/drivers/staging/media/solo6x10/solo6x10-disp.c
@@ -176,18 +176,27 @@ static void solo_vout_config(struct solo_dev *solo_dev)
static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off,
u16 val, int reg_size)
{
- u16 buf[64];
- int i;
- int ret = 0;
+ u16 *buf;
+ const int n = 64, size = n * sizeof(*buf);
+ int i, ret = 0;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- for (i = 0; i < sizeof(buf) >> 1; i++)
+ for (i = 0; i < n; i++)
buf[i] = cpu_to_le16(val);
- for (i = 0; i < reg_size; i += sizeof(buf))
- ret |= solo_p2m_dma(solo_dev, 1, buf,
- SOLO_MOTION_EXT_ADDR(solo_dev) + off + i,
- sizeof(buf), 0, 0);
+ for (i = 0; i < reg_size; i += size) {
+ ret = solo_p2m_dma(solo_dev, 1, buf,
+ SOLO_MOTION_EXT_ADDR(solo_dev) + off + i,
+ size, 0, 0);
+
+ if (ret)
+ break;
+ }
+ kfree(buf);
return ret;
}
diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
index 333594189b8..7f2f2472655 100644
--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
@@ -87,7 +87,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
if (mutex_lock_interruptible(&p2m_dev->mutex))
return -EINTR;
- INIT_COMPLETION(p2m_dev->completion);
+ reinit_completion(&p2m_dev->completion);
p2m_dev->error = 0;
if (desc_cnt > 1 && solo_dev->type != SOLO_DEV_6110 && desc_mode) {
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index a4c589604b0..d582c5b84c1 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -95,38 +95,11 @@ static unsigned char vop_6110_pal_cif[] = {
0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00,
};
-struct vop_header {
- /* VE_STATUS0 */
- u32 mpeg_size:20, sad_motion_flag:1, video_motion_flag:1, vop_type:2,
- channel:5, source_fl:1, interlace:1, progressive:1;
-
- /* VE_STATUS1 */
- u32 vsize:8, hsize:8, last_queue:4, nop0:8, scale:4;
-
- /* VE_STATUS2 */
- u32 mpeg_off;
-
- /* VE_STATUS3 */
- u32 jpeg_off;
-
- /* VE_STATUS4 */
- u32 jpeg_size:20, interval:10, nop1:2;
-
- /* VE_STATUS5/6 */
- u32 sec, usec;
-
- /* VE_STATUS7/8/9 */
- u32 nop2[3];
-
- /* VE_STATUS10 */
- u32 mpeg_size_alt:20, nop3:12;
-
- u32 end_nops[5];
-} __packed;
+typedef __le32 vop_header[16];
struct solo_enc_buf {
enum solo_enc_types type;
- struct vop_header *vh;
+ const vop_header *vh;
int motion;
};
@@ -346,7 +319,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma,
/* Build a descriptor queue out of an SG list and send it to the P2M for
* processing. */
static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
- struct vb2_dma_sg_desc *vbuf, int off, int size,
+ struct sg_table *vbuf, int off, int size,
unsigned int base, unsigned int base_size)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
@@ -359,7 +332,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
solo_enc->desc_count = 1;
- for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) {
+ for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
struct solo_p2m_desc *desc;
dma_addr_t dma;
int len;
@@ -430,84 +403,145 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
solo_enc->desc_count - 1);
}
+/* Extract values from VOP header - VE_STATUSxx */
+static inline int vop_interlaced(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[0]) >> 30) & 1;
+}
+
+static inline u8 vop_channel(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[0]) >> 24) & 0x1F;
+}
+
+static inline u8 vop_type(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[0]) >> 22) & 3;
+}
+
+static inline u32 vop_mpeg_size(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[0]) & 0xFFFFF;
+}
+
+static inline u8 vop_hsize(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[1]) >> 8) & 0xFF;
+}
+
+static inline u8 vop_vsize(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[1]) & 0xFF;
+}
+
+static inline u32 vop_mpeg_offset(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[2]);
+}
+
+static inline u32 vop_jpeg_offset(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[3]);
+}
+
+static inline u32 vop_jpeg_size(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[4]) & 0xFFFFF;
+}
+
+static inline u32 vop_sec(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[5]);
+}
+
+static inline u32 vop_usec(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[6]);
+}
+
static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
- struct vb2_buffer *vb, struct vop_header *vh)
+ struct vb2_buffer *vb, const vop_header *vh)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_size;
int ret;
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len)
+ if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
return -EIO;
- sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
- solo_enc->jpeg_header,
- solo_enc->jpeg_len);
-
- frame_size = (vh->jpeg_size + solo_enc->jpeg_len + (DMA_ALIGN - 1))
+ frame_size = (vop_jpeg_size(vh) + solo_enc->jpeg_len + (DMA_ALIGN - 1))
& ~(DMA_ALIGN - 1);
- vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len);
+ vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
- dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ /* may discard all previous data in vbuf->sgl */
+ dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
- ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off,
- frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
- SOLO_JPEG_EXT_SIZE(solo_dev));
- dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
+ vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
+ frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
+ SOLO_JPEG_EXT_SIZE(solo_dev));
+ dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
+
+ /* add the header only after dma_unmap_sg() */
+ sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ solo_enc->jpeg_header, solo_enc->jpeg_len);
+
return ret;
}
static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
- struct vb2_buffer *vb, struct vop_header *vh)
+ struct vb2_buffer *vb, const vop_header *vh)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_off, frame_size;
int skip = 0;
int ret;
- if (vb2_plane_size(vb, 0) < vh->mpeg_size)
+ if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
return -EIO;
/* If this is a key frame, add extra header */
- if (!vh->vop_type) {
- sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
- solo_enc->vop,
- solo_enc->vop_len);
-
+ vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME);
+ if (!vop_type(vh)) {
skip = solo_enc->vop_len;
-
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- vb2_set_plane_payload(vb, 0, vh->mpeg_size + solo_enc->vop_len);
+ vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + solo_enc->vop_len);
} else {
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
- vb2_set_plane_payload(vb, 0, vh->mpeg_size);
+ vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
}
/* Now get the actual mpeg payload */
- frame_off = (vh->mpeg_off + sizeof(*vh))
+ frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) + sizeof(*vh))
% SOLO_MP4E_EXT_SIZE(solo_dev);
- frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1))
+ frame_size = (vop_mpeg_size(vh) + skip + (DMA_ALIGN - 1))
& ~(DMA_ALIGN - 1);
- dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ /* may discard all previous data in vbuf->sgl */
+ dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
SOLO_MP4E_EXT_ADDR(solo_dev),
SOLO_MP4E_EXT_SIZE(solo_dev));
- dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
+
+ /* add the header only after dma_unmap_sg() */
+ if (!vop_type(vh))
+ sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ solo_enc->vop, solo_enc->vop_len);
return ret;
}
static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, struct solo_enc_buf *enc_buf)
{
- struct vop_header *vh = enc_buf->vh;
+ const vop_header *vh = enc_buf->vh;
int ret;
/* Check for motion flags */
@@ -531,8 +565,8 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
if (!ret) {
vb->v4l2_buf.sequence = solo_enc->sequence++;
- vb->v4l2_buf.timestamp.tv_sec = vh->sec;
- vb->v4l2_buf.timestamp.tv_usec = vh->usec;
+ vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh);
+ vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh);
}
vb2_buffer_done(vb, ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
@@ -605,15 +639,13 @@ static void solo_handle_ring(struct solo_dev *solo_dev)
/* FAIL... */
if (enc_get_mpeg_dma(solo_dev, solo_dev->vh_dma, off,
- sizeof(struct vop_header)))
+ sizeof(vop_header)))
continue;
- enc_buf.vh = (struct vop_header *)solo_dev->vh_buf;
- enc_buf.vh->mpeg_off -= SOLO_MP4E_EXT_ADDR(solo_dev);
- enc_buf.vh->jpeg_off -= SOLO_JPEG_EXT_ADDR(solo_dev);
+ enc_buf.vh = solo_dev->vh_buf;
/* Sanity check */
- if (enc_buf.vh->mpeg_off != off)
+ if (vop_mpeg_offset(enc_buf.vh) != SOLO_MP4E_EXT_ADDR(solo_dev) + off)
continue;
if (solo_motion_detected(solo_enc))
@@ -1329,7 +1361,7 @@ int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
init_waitqueue_head(&solo_dev->ring_thread_wait);
- solo_dev->vh_size = sizeof(struct vop_header);
+ solo_dev->vh_size = sizeof(vop_header);
solo_dev->vh_buf = pci_alloc_consistent(solo_dev->pdev,
solo_dev->vh_size,
&solo_dev->vh_dma);
diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
index 6f91d2e34b2..f1bbb8cb74e 100644
--- a/drivers/staging/media/solo6x10/solo6x10.h
+++ b/drivers/staging/media/solo6x10/solo6x10.h
@@ -94,7 +94,6 @@
#define SOLO_ENC_MODE_HD1 1
#define SOLO_ENC_MODE_D1 9
-#define SOLO_DEFAULT_GOP 30
#define SOLO_DEFAULT_QP 3
#ifndef V4L2_BUF_FLAG_MOTION_ON
diff --git a/drivers/staging/mt29f_spinand/Kconfig b/drivers/staging/mt29f_spinand/Kconfig
new file mode 100644
index 00000000000..403174817be
--- /dev/null
+++ b/drivers/staging/mt29f_spinand/Kconfig
@@ -0,0 +1,16 @@
+config MTD_SPINAND_MT29F
+ tristate "SPINAND Device Support for Micron"
+ depends on MTD_NAND && SPI
+ help
+ This enables support for accessing Micron SPI NAND flash
+ devices.
+ If you have Micron SPI NAND chip say yes.
+
+ If unsure, say no here.
+
+config MTD_SPINAND_ONDIEECC
+ bool "Use SPINAND internal ECC"
+ depends on MTD_SPINAND_MT29F
+ help
+ Internel ECC.
+ Enables Hardware ECC support for Micron SPI NAND.
diff --git a/drivers/staging/mt29f_spinand/Makefile b/drivers/staging/mt29f_spinand/Makefile
new file mode 100644
index 00000000000..e47af0f7fda
--- /dev/null
+++ b/drivers/staging/mt29f_spinand/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand.o
diff --git a/drivers/staging/mt29f_spinand/TODO b/drivers/staging/mt29f_spinand/TODO
new file mode 100644
index 00000000000..a2209b72d37
--- /dev/null
+++ b/drivers/staging/mt29f_spinand/TODO
@@ -0,0 +1,13 @@
+TODO:
+ - Tested on XLP platform, needs to be tested on other platforms.
+ - Checkpatch.pl cleanups
+ - Sparce fixes.
+ - Clean up coding style to meet kernel standard.
+
+Please send patches
+To:
+Kamlakant Patel <kamlakant.patel@broadcom.com>
+Cc:
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Mona Anonuevo <manonuevo@micron.com>
+linux-mtd@lists.infradead.org
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
new file mode 100644
index 00000000000..51dbc13e757
--- /dev/null
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -0,0 +1,947 @@
+/*
+ * Copyright (c) 2003-2013 Broadcom Corporation
+ *
+ * Copyright (c) 2009-2010 Micron Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+
+#include "mt29f_spinand.h"
+
+#define BUFSIZE (10 * 64 * 2048)
+#define CACHE_BUF 2112
+/*
+ * OOB area specification layout: Total 32 available free bytes.
+ */
+
+static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+ struct spinand_state *state = (struct spinand_state *)info->priv;
+
+ return state;
+}
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+static int enable_hw_ecc;
+static int enable_read_hw_ecc;
+
+static struct nand_ecclayout spinand_oob_64 = {
+ .eccbytes = 24,
+ .eccpos = {
+ 1, 2, 3, 4, 5, 6,
+ 17, 18, 19, 20, 21, 22,
+ 33, 34, 35, 36, 37, 38,
+ 49, 50, 51, 52, 53, 54, },
+ .oobavail = 32,
+ .oobfree = {
+ {.offset = 8,
+ .length = 8},
+ {.offset = 24,
+ .length = 8},
+ {.offset = 40,
+ .length = 8},
+ {.offset = 56,
+ .length = 8},
+ }
+};
+#endif
+
+/*
+ * spinand_cmd - to process a command to send to the SPI Nand
+ * Description:
+ * Set up the command buffer to send to the SPI controller.
+ * The command buffer has to initialized to 0.
+ */
+
+static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
+{
+ struct spi_message message;
+ struct spi_transfer x[4];
+ u8 dummy = 0xff;
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof(x));
+
+ x[0].len = 1;
+ x[0].tx_buf = &cmd->cmd;
+ spi_message_add_tail(&x[0], &message);
+
+ if (cmd->n_addr) {
+ x[1].len = cmd->n_addr;
+ x[1].tx_buf = cmd->addr;
+ spi_message_add_tail(&x[1], &message);
+ }
+
+ if (cmd->n_dummy) {
+ x[2].len = cmd->n_dummy;
+ x[2].tx_buf = &dummy;
+ spi_message_add_tail(&x[2], &message);
+ }
+
+ if (cmd->n_tx) {
+ x[3].len = cmd->n_tx;
+ x[3].tx_buf = cmd->tx_buf;
+ spi_message_add_tail(&x[3], &message);
+ }
+
+ if (cmd->n_rx) {
+ x[3].len = cmd->n_rx;
+ x[3].rx_buf = cmd->rx_buf;
+ spi_message_add_tail(&x[3], &message);
+ }
+
+ return spi_sync(spi, &message);
+}
+
+/*
+ * spinand_read_id- Read SPI Nand ID
+ * Description:
+ * Read ID: read two ID bytes from the SPI Nand device
+ */
+static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
+{
+ int retval;
+ u8 nand_id[3];
+ struct spinand_cmd cmd = {0};
+
+ cmd.cmd = CMD_READ_ID;
+ cmd.n_rx = 3;
+ cmd.rx_buf = &nand_id[0];
+
+ retval = spinand_cmd(spi_nand, &cmd);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev, "error %d reading id\n", retval);
+ return retval;
+ }
+ id[0] = nand_id[1];
+ id[1] = nand_id[2];
+ return retval;
+}
+
+/*
+ * spinand_read_status- send command 0xf to the SPI Nand status register
+ * Description:
+ * After read, write, or erase, the Nand device is expected to set the
+ * busy status.
+ * This function is to allow reading the status of the command: read,
+ * write, and erase.
+ * Once the status turns to be ready, the other status bits also are
+ * valid status bits.
+ */
+static int spinand_read_status(struct spi_device *spi_nand, uint8_t *status)
+{
+ struct spinand_cmd cmd = {0};
+ int ret;
+
+ cmd.cmd = CMD_READ_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_STATUS;
+ cmd.n_rx = 1;
+ cmd.rx_buf = status;
+
+ ret = spinand_cmd(spi_nand, &cmd);
+ if (ret < 0)
+ dev_err(&spi_nand->dev, "err: %d read status register\n", ret);
+
+ return ret;
+}
+
+#define MAX_WAIT_JIFFIES (40 * HZ)
+static int wait_till_ready(struct spi_device *spi_nand)
+{
+ unsigned long deadline;
+ int retval;
+ u8 stat = 0;
+
+ deadline = jiffies + MAX_WAIT_JIFFIES;
+ do {
+ retval = spinand_read_status(spi_nand, &stat);
+ if (retval < 0)
+ return -1;
+ else if (!(stat & 0x1))
+ break;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ if ((stat & 0x1) == 0)
+ return 0;
+
+ return -1;
+}
+/**
+ * spinand_get_otp- send command 0xf to read the SPI Nand OTP register
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
+{
+ struct spinand_cmd cmd = {0};
+ int retval;
+
+ cmd.cmd = CMD_READ_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_OTP;
+ cmd.n_rx = 1;
+ cmd.rx_buf = otp;
+
+ retval = spinand_cmd(spi_nand, &cmd);
+ if (retval < 0)
+ dev_err(&spi_nand->dev, "error %d get otp\n", retval);
+ return retval;
+}
+
+/**
+ * spinand_set_otp- send command 0x1f to write the SPI Nand OTP register
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
+{
+ int retval;
+ struct spinand_cmd cmd = {0};
+
+ cmd.cmd = CMD_WRITE_REG,
+ cmd.n_addr = 1,
+ cmd.addr[0] = REG_OTP,
+ cmd.n_tx = 1,
+ cmd.tx_buf = otp,
+
+ retval = spinand_cmd(spi_nand, &cmd);
+ if (retval < 0)
+ dev_err(&spi_nand->dev, "error %d set otp\n", retval);
+
+ return retval;
+}
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+/**
+ * spinand_enable_ecc- send command 0x1f to write the SPI Nand OTP register
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spinand_enable_ecc(struct spi_device *spi_nand)
+{
+ int retval;
+ u8 otp = 0;
+
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0)
+ return retval;
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ return 0;
+ } else {
+ otp |= OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0)
+ return retval;
+ return spinand_get_otp(spi_nand, &otp);
+ }
+}
+#endif
+
+static int spinand_disable_ecc(struct spi_device *spi_nand)
+{
+ int retval;
+ u8 otp = 0;
+
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0)
+ return retval;
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ otp &= ~OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0)
+ return retval;
+ return spinand_get_otp(spi_nand, &otp);
+ } else
+ return 0;
+}
+
+/**
+ * spinand_write_enable- send command 0x06 to enable write or erase the
+ * Nand cells
+ * Description:
+ * Before write and erase the Nand cells, the write enable has to be set.
+ * After the write or erase, the write enable bit is automatically
+ * cleared (status register bit 2)
+ * Set the bit 2 of the status register has the same effect
+ */
+static int spinand_write_enable(struct spi_device *spi_nand)
+{
+ struct spinand_cmd cmd = {0};
+
+ cmd.cmd = CMD_WR_ENABLE;
+ return spinand_cmd(spi_nand, &cmd);
+}
+
+static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
+{
+ struct spinand_cmd cmd = {0};
+ u16 row;
+
+ row = page_id;
+ cmd.cmd = CMD_READ;
+ cmd.n_addr = 3;
+ cmd.addr[1] = (u8)((row & 0xff00) >> 8);
+ cmd.addr[2] = (u8)(row & 0x00ff);
+
+ return spinand_cmd(spi_nand, &cmd);
+}
+
+/*
+ * spinand_read_from_cache- send command 0x03 to read out the data from the
+ * cache register(2112 bytes max)
+ * Description:
+ * The read can specify 1 to 2112 bytes of data read at the corresponding
+ * locations.
+ * No tRd delay.
+ */
+static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
+ u16 byte_id, u16 len, u8 *rbuf)
+{
+ struct spinand_cmd cmd = {0};
+ u16 column;
+
+ column = byte_id;
+ cmd.cmd = CMD_READ_RDM;
+ cmd.n_addr = 3;
+ cmd.addr[0] = (u8)((column & 0xff00) >> 8);
+ cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
+ cmd.addr[1] = (u8)(column & 0x00ff);
+ cmd.addr[2] = (u8)(0xff);
+ cmd.n_dummy = 0;
+ cmd.n_rx = len;
+ cmd.rx_buf = rbuf;
+
+ return spinand_cmd(spi_nand, &cmd);
+}
+
+/*
+ * spinand_read_page-to read a page with:
+ * @page_id: the physical page number
+ * @offset: the location from 0 to 2111
+ * @len: number of bytes to read
+ * @rbuf: read buffer to hold @len bytes
+ *
+ * Description:
+ * The read includes two commands to the Nand: 0x13 and 0x03 commands
+ * Poll to read status to wait for tRD time.
+ */
+static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
+ u16 offset, u16 len, u8 *rbuf)
+{
+ int ret;
+ u8 status = 0;
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ if (enable_read_hw_ecc) {
+ if (spinand_enable_ecc(spi_nand) < 0)
+ dev_err(&spi_nand->dev, "enable HW ECC failed!");
+ }
+#endif
+ ret = spinand_read_page_to_cache(spi_nand, page_id);
+ if (ret < 0)
+ return ret;
+
+ if (wait_till_ready(spi_nand))
+ dev_err(&spi_nand->dev, "WAIT timedout!!!\n");
+
+ while (1) {
+ ret = spinand_read_status(spi_nand, &status);
+ if (ret < 0) {
+ dev_err(&spi_nand->dev,
+ "err %d read status register\n", ret);
+ return ret;
+ }
+
+ if ((status & STATUS_OIP_MASK) == STATUS_READY) {
+ if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
+ dev_err(&spi_nand->dev, "ecc error, page=%d\n",
+ page_id);
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ret = spinand_read_from_cache(spi_nand, page_id, offset, len, rbuf);
+ if (ret < 0) {
+ dev_err(&spi_nand->dev, "read from cache failed!!\n");
+ return ret;
+ }
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ if (enable_read_hw_ecc) {
+ ret = spinand_disable_ecc(spi_nand);
+ if (ret < 0) {
+ dev_err(&spi_nand->dev, "disable ecc failed!!\n");
+ return ret;
+ }
+ enable_read_hw_ecc = 0;
+ }
+#endif
+ return ret;
+}
+
+/*
+ * spinand_program_data_to_cache--to write a page to cache with:
+ * @byte_id: the location to write to the cache
+ * @len: number of bytes to write
+ * @rbuf: read buffer to hold @len bytes
+ *
+ * Description:
+ * The write command used here is 0x84--indicating that the cache is
+ * not cleared first.
+ * Since it is writing the data to cache, there is no tPROG time.
+ */
+static int spinand_program_data_to_cache(struct spi_device *spi_nand,
+ u16 page_id, u16 byte_id, u16 len, u8 *wbuf)
+{
+ struct spinand_cmd cmd = {0};
+ u16 column;
+
+ column = byte_id;
+ cmd.cmd = CMD_PROG_PAGE_CLRCACHE;
+ cmd.n_addr = 2;
+ cmd.addr[0] = (u8)((column & 0xff00) >> 8);
+ cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
+ cmd.addr[1] = (u8)(column & 0x00ff);
+ cmd.n_tx = len;
+ cmd.tx_buf = wbuf;
+
+ return spinand_cmd(spi_nand, &cmd);
+}
+
+/**
+ * spinand_program_execute--to write a page from cache to the Nand array with
+ * @page_id: the physical page location to write the page.
+ *
+ * Description:
+ * The write command used here is 0x10--indicating the cache is writing to
+ * the Nand array.
+ * Need to wait for tPROG time to finish the transaction.
+ */
+static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
+{
+ struct spinand_cmd cmd = {0};
+ u16 row;
+
+ row = page_id;
+ cmd.cmd = CMD_PROG_PAGE_EXC;
+ cmd.n_addr = 3;
+ cmd.addr[1] = (u8)((row & 0xff00) >> 8);
+ cmd.addr[2] = (u8)(row & 0x00ff);
+
+ return spinand_cmd(spi_nand, &cmd);
+}
+
+/**
+ * spinand_program_page--to write a page with:
+ * @page_id: the physical page location to write the page.
+ * @offset: the location from the cache starting from 0 to 2111
+ * @len: the number of bytes to write
+ * @wbuf: the buffer to hold the number of bytes
+ *
+ * Description:
+ * The commands used here are 0x06, 0x84, and 0x10--indicating that
+ * the write enable is first sent, the write cache command, and the
+ * write execute command.
+ * Poll to wait for the tPROG time to finish the transaction.
+ */
+static int spinand_program_page(struct spi_device *spi_nand,
+ u16 page_id, u16 offset, u16 len, u8 *buf)
+{
+ int retval;
+ u8 status = 0;
+ uint8_t *wbuf;
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ unsigned int i, j;
+
+ enable_read_hw_ecc = 0;
+ wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
+ spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
+
+ for (i = offset, j = 0; i < len; i++, j++)
+ wbuf[i] &= buf[j];
+
+ if (enable_hw_ecc) {
+ retval = spinand_enable_ecc(spi_nand);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev, "enable ecc failed!!\n");
+ return retval;
+ }
+ }
+#else
+ wbuf = buf;
+#endif
+ retval = spinand_write_enable(spi_nand);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev, "write enable failed!!\n");
+ return retval;
+ }
+ if (wait_till_ready(spi_nand))
+ dev_err(&spi_nand->dev, "wait timedout!!!\n");
+
+ retval = spinand_program_data_to_cache(spi_nand, page_id,
+ offset, len, wbuf);
+ if (retval < 0)
+ return retval;
+ retval = spinand_program_execute(spi_nand, page_id);
+ if (retval < 0)
+ return retval;
+ while (1) {
+ retval = spinand_read_status(spi_nand, &status);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev,
+ "error %d reading status register\n",
+ retval);
+ return retval;
+ }
+
+ if ((status & STATUS_OIP_MASK) == STATUS_READY) {
+ if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
+ dev_err(&spi_nand->dev,
+ "program error, page %d\n", page_id);
+ return -1;
+ } else
+ break;
+ }
+ }
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ if (enable_hw_ecc) {
+ retval = spinand_disable_ecc(spi_nand);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev, "disable ecc failed!!\n");
+ return retval;
+ }
+ enable_hw_ecc = 0;
+ }
+#endif
+
+ return 0;
+}
+
+/**
+ * spinand_erase_block_erase--to erase a page with:
+ * @block_id: the physical block location to erase.
+ *
+ * Description:
+ * The command used here is 0xd8--indicating an erase command to erase
+ * one block--64 pages
+ * Need to wait for tERS.
+ */
+static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
+{
+ struct spinand_cmd cmd = {0};
+ u16 row;
+
+ row = block_id;
+ cmd.cmd = CMD_ERASE_BLK;
+ cmd.n_addr = 3;
+ cmd.addr[1] = (u8)((row & 0xff00) >> 8);
+ cmd.addr[2] = (u8)(row & 0x00ff);
+
+ return spinand_cmd(spi_nand, &cmd);
+}
+
+/**
+ * spinand_erase_block--to erase a page with:
+ * @block_id: the physical block location to erase.
+ *
+ * Description:
+ * The commands used here are 0x06 and 0xd8--indicating an erase
+ * command to erase one block--64 pages
+ * It will first to enable the write enable bit (0x06 command),
+ * and then send the 0xd8 erase command
+ * Poll to wait for the tERS time to complete the tranaction.
+ */
+static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
+{
+ int retval;
+ u8 status = 0;
+
+ retval = spinand_write_enable(spi_nand);
+ if (wait_till_ready(spi_nand))
+ dev_err(&spi_nand->dev, "wait timedout!!!\n");
+
+ retval = spinand_erase_block_erase(spi_nand, block_id);
+ while (1) {
+ retval = spinand_read_status(spi_nand, &status);
+ if (retval < 0) {
+ dev_err(&spi_nand->dev,
+ "error %d reading status register\n",
+ (int) retval);
+ return retval;
+ }
+
+ if ((status & STATUS_OIP_MASK) == STATUS_READY) {
+ if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
+ dev_err(&spi_nand->dev,
+ "erase error, block %d\n", block_id);
+ return -1;
+ } else
+ break;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+static int spinand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ const uint8_t *p = buf;
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+
+ enable_hw_ecc = 1;
+ chip->write_buf(mtd, p, eccsize * eccsteps);
+ return 0;
+}
+
+static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ u8 retval, status;
+ uint8_t *p = buf;
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+
+ enable_read_hw_ecc = 1;
+
+ chip->read_buf(mtd, p, eccsize * eccsteps);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ while (1) {
+ retval = spinand_read_status(info->spi, &status);
+ if ((status & STATUS_OIP_MASK) == STATUS_READY) {
+ if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
+ pr_info("spinand: ECC error\n");
+ mtd->ecc_stats.failed++;
+ } else if ((status & STATUS_ECC_MASK) ==
+ STATUS_ECC_1BIT_CORRECTED)
+ mtd->ecc_stats.corrected++;
+ break;
+ }
+ }
+ return 0;
+
+}
+#endif
+
+static void spinand_select_chip(struct mtd_info *mtd, int dev)
+{
+}
+
+static uint8_t spinand_read_byte(struct mtd_info *mtd)
+{
+ struct spinand_state *state = mtd_to_state(mtd);
+ u8 data;
+
+ data = state->buf[state->buf_ptr];
+ state->buf_ptr++;
+ return data;
+}
+
+
+static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+
+ unsigned long timeo = jiffies;
+ int retval, state = chip->state;
+ u8 status;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ while (time_before(jiffies, timeo)) {
+ retval = spinand_read_status(info->spi, &status);
+ if ((status & STATUS_OIP_MASK) == STATUS_READY)
+ return 0;
+
+ cond_resched();
+ }
+ return 0;
+}
+
+static void spinand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+
+ struct spinand_state *state = mtd_to_state(mtd);
+ memcpy(state->buf + state->buf_ptr, buf, len);
+ state->buf_ptr += len;
+}
+
+static void spinand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct spinand_state *state = mtd_to_state(mtd);
+ memcpy(buf, state->buf + state->buf_ptr, len);
+ state->buf_ptr += len;
+}
+
+/*
+ * spinand_reset- send RESET command "0xff" to the Nand device.
+ */
+static void spinand_reset(struct spi_device *spi_nand)
+{
+ struct spinand_cmd cmd = {0};
+
+ cmd.cmd = CMD_RESET;
+
+ if (spinand_cmd(spi_nand, &cmd) < 0)
+ pr_info("spinand reset failed!\n");
+
+ /* elapse 1ms before issuing any other command */
+ udelay(1000);
+
+ if (wait_till_ready(spi_nand))
+ dev_err(&spi_nand->dev, "wait timedout!\n");
+}
+
+static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+ struct spinand_state *state = (struct spinand_state *)info->priv;
+
+ switch (command) {
+ /*
+ * READ0 - read in first 0x800 bytes
+ */
+ case NAND_CMD_READ1:
+ case NAND_CMD_READ0:
+ state->buf_ptr = 0;
+ spinand_read_page(info->spi, page, 0x0, 0x840, state->buf);
+ break;
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ state->buf_ptr = 0;
+ spinand_read_page(info->spi, page, 0x800, 0x40, state->buf);
+ break;
+ case NAND_CMD_RNDOUT:
+ state->buf_ptr = column;
+ break;
+ case NAND_CMD_READID:
+ state->buf_ptr = 0;
+ spinand_read_id(info->spi, (u8 *)state->buf);
+ break;
+ case NAND_CMD_PARAM:
+ state->buf_ptr = 0;
+ break;
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ spinand_erase_block(info->spi, page);
+ break;
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ break;
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN:
+ state->col = column;
+ state->row = page;
+ state->buf_ptr = 0;
+ break;
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG:
+ spinand_program_page(info->spi, state->row, state->col,
+ state->buf_ptr, state->buf);
+ break;
+ case NAND_CMD_STATUS:
+ spinand_get_otp(info->spi, state->buf);
+ if (!(state->buf[0] & 0x80))
+ state->buf[0] = 0x80;
+ state->buf_ptr = 0;
+ break;
+ /* RESET command */
+ case NAND_CMD_RESET:
+ if (wait_till_ready(info->spi))
+ dev_err(&info->spi->dev, "WAIT timedout!!!\n");
+ /* a minimum of 250us must elapse before issuing RESET cmd*/
+ udelay(250);
+ spinand_reset(info->spi);
+ break;
+ default:
+ dev_err(&mtd->dev, "Unknown CMD: 0x%x\n", command);
+ }
+}
+
+/**
+ * spinand_lock_block- send write register 0x1f command to the Nand device
+ *
+ * Description:
+ * After power up, all the Nand blocks are locked. This function allows
+ * one to unlock the blocks, and so it can be written or erased.
+ */
+static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
+{
+ struct spinand_cmd cmd = {0};
+ int ret;
+ u8 otp = 0;
+
+ ret = spinand_get_otp(spi_nand, &otp);
+
+ cmd.cmd = CMD_WRITE_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_BLOCK_LOCK;
+ cmd.n_tx = 1;
+ cmd.tx_buf = &lock;
+
+ ret = spinand_cmd(spi_nand, &cmd);
+ if (ret < 0)
+ dev_err(&spi_nand->dev, "error %d lock block\n", ret);
+
+ return ret;
+}
+/*
+ * spinand_probe - [spinand Interface]
+ * @spi_nand: registered device driver.
+ *
+ * Description:
+ * To set up the device driver parameters to make the device available.
+ */
+static int spinand_probe(struct spi_device *spi_nand)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct spinand_info *info;
+ struct spinand_state *state;
+ struct mtd_part_parser_data ppdata;
+
+ info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->spi = spi_nand;
+
+ spinand_lock_block(spi_nand, BL_ALL_UNLOCKED);
+
+ state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state),
+ GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ info->priv = state;
+ state->buf_ptr = 0;
+ state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL);
+ if (!state->buf)
+ return -ENOMEM;
+
+ chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 0x200;
+ chip->ecc.bytes = 0x6;
+ chip->ecc.steps = 0x4;
+
+ chip->ecc.strength = 1;
+ chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+ chip->ecc.layout = &spinand_oob_64;
+ chip->ecc.read_page = spinand_read_page_hwecc;
+ chip->ecc.write_page = spinand_write_page_hwecc;
+#else
+ chip->ecc.mode = NAND_ECC_SOFT;
+ if (spinand_disable_ecc(spi_nand) < 0)
+ pr_info("%s: disable ecc failed!\n", __func__);
+#endif
+
+ chip->priv = info;
+ chip->read_buf = spinand_read_buf;
+ chip->write_buf = spinand_write_buf;
+ chip->read_byte = spinand_read_byte;
+ chip->cmdfunc = spinand_cmdfunc;
+ chip->waitfunc = spinand_wait;
+ chip->options |= NAND_CACHEPRG;
+ chip->select_chip = spinand_select_chip;
+
+ mtd = devm_kzalloc(&spi_nand->dev, sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi_nand->dev, mtd);
+
+ mtd->priv = chip;
+ mtd->name = dev_name(&spi_nand->dev);
+ mtd->owner = THIS_MODULE;
+ mtd->oobsize = 64;
+
+ if (nand_scan(mtd, 1))
+ return -ENXIO;
+
+ ppdata.of_node = spi_nand->dev.of_node;
+ return mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+}
+
+/*
+ * spinand_remove: Remove the device driver
+ * @spi: the spi device.
+ *
+ * Description:
+ * To remove the device driver parameters and free up allocated memories.
+ */
+static int spinand_remove(struct spi_device *spi)
+{
+ mtd_device_unregister(dev_get_drvdata(&spi->dev));
+
+ return 0;
+}
+
+static const struct of_device_id spinand_dt[] = {
+ { .compatible = "spinand,mt29f", },
+};
+
+/*
+ * Device name structure description
+ */
+static struct spi_driver spinand_driver = {
+ .driver = {
+ .name = "mt29f",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = spinand_dt,
+ },
+ .probe = spinand_probe,
+ .remove = spinand_remove,
+};
+
+module_spi_driver(spinand_driver);
+
+MODULE_DESCRIPTION("SPI NAND driver for Micron");
+MODULE_AUTHOR("Henry Pan <hspan@micron.com>, Kamlakant Patel <kamlakant.patel@broadcom.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
new file mode 100644
index 00000000000..7f2c24dc51b
--- /dev/null
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.h
@@ -0,0 +1,107 @@
+/*-
+ * Copyright 2013 Broadcom Corporation
+ *
+ * Copyright (c) 2009-2010 Micron Technology, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Henry Pan <hspan@micron.com>
+ *
+ * based on nand.h
+ */
+#ifndef __LINUX_MTD_SPI_NAND_H
+#define __LINUX_MTD_SPI_NAND_H
+
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/mtd/mtd.h>
+
+/* cmd */
+#define CMD_READ 0x13
+#define CMD_READ_RDM 0x03
+#define CMD_PROG_PAGE_CLRCACHE 0x02
+#define CMD_PROG_PAGE 0x84
+#define CMD_PROG_PAGE_EXC 0x10
+#define CMD_ERASE_BLK 0xd8
+#define CMD_WR_ENABLE 0x06
+#define CMD_WR_DISABLE 0x04
+#define CMD_READ_ID 0x9f
+#define CMD_RESET 0xff
+#define CMD_READ_REG 0x0f
+#define CMD_WRITE_REG 0x1f
+
+/* feature/ status reg */
+#define REG_BLOCK_LOCK 0xa0
+#define REG_OTP 0xb0
+#define REG_STATUS 0xc0/* timing */
+
+/* status */
+#define STATUS_OIP_MASK 0x01
+#define STATUS_READY (0 << 0)
+#define STATUS_BUSY (1 << 0)
+
+#define STATUS_E_FAIL_MASK 0x04
+#define STATUS_E_FAIL (1 << 2)
+
+#define STATUS_P_FAIL_MASK 0x08
+#define STATUS_P_FAIL (1 << 3)
+
+#define STATUS_ECC_MASK 0x30
+#define STATUS_ECC_1BIT_CORRECTED (1 << 4)
+#define STATUS_ECC_ERROR (2 << 4)
+#define STATUS_ECC_RESERVED (3 << 4)
+
+/*ECC enable defines*/
+#define OTP_ECC_MASK 0x10
+#define OTP_ECC_OFF 0
+#define OTP_ECC_ON 1
+
+#define ECC_DISABLED
+#define ECC_IN_NAND
+#define ECC_SOFT
+
+/* block lock */
+#define BL_ALL_LOCKED 0x38
+#define BL_1_2_LOCKED 0x30
+#define BL_1_4_LOCKED 0x28
+#define BL_1_8_LOCKED 0x20
+#define BL_1_16_LOCKED 0x18
+#define BL_1_32_LOCKED 0x10
+#define BL_1_64_LOCKED 0x08
+#define BL_ALL_UNLOCKED 0
+
+struct spinand_info {
+ struct nand_ecclayout *ecclayout;
+ struct spi_device *spi;
+ void *priv;
+};
+
+struct spinand_state {
+ uint32_t col;
+ uint32_t row;
+ int buf_ptr;
+ u8 *buf;
+};
+
+struct spinand_cmd {
+ u8 cmd;
+ u32 n_addr; /* Number of address */
+ u8 addr[3]; /* Reg Offset */
+ u32 n_dummy; /* Dummy use */
+ u32 n_tx; /* Number of tx bytes */
+ u8 *tx_buf; /* Tx buf */
+ u32 n_rx; /* Number of rx bytes */
+ u8 *rx_buf; /* Rx buf */
+};
+
+extern int spinand_mtd(struct mtd_info *mtd);
+extern void spinand_mtd_release(struct mtd_info *mtd);
+
+#endif /* __LINUX_MTD_SPI_NAND_H */
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 46eabd0e426..235d2b1ec59 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -44,8 +44,8 @@
#include <linux/platform_device.h>
#include <asm/mipsregs.h>
-
-/* fmn.h - For FMN credit configuration and registering fmn_handler.
+/*
+ * fmn.h - For FMN credit configuration and registering fmn_handler.
* FMN is communication mechanism that allows processing agents within
* XLR/XLS to communicate each other.
*/
@@ -90,7 +90,8 @@ static inline struct sk_buff *mac_get_skb_back_ptr(void *addr)
{
struct sk_buff **back_ptr;
- /* this function should be used only for newly allocated packets.
+ /*
+ * this function should be used only for newly allocated packets.
* It assumes the first cacheline is for the back pointer related
* book keeping info.
*/
@@ -102,7 +103,8 @@ static inline void mac_put_skb_back_ptr(struct sk_buff *skb)
{
struct sk_buff **back_ptr = (struct sk_buff **)skb->data;
- /* this function should be used only for newly allocated packets.
+ /*
+ * this function should be used only for newly allocated packets.
* It assumes the first cacheline is for the back pointer related
* book keeping info.
*/
@@ -500,8 +502,10 @@ static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
sizeof(u64));
}
-/* Configure PDE to Round-Robin distribution of packets to the
- * available cpu */
+/*
+ * Configure PDE to Round-Robin distribution of packets to the
+ * available cpu
+ */
static void xlr_config_pde(struct xlr_net_priv *priv)
{
int i = 0;
@@ -528,8 +532,10 @@ static void xlr_config_pde(struct xlr_net_priv *priv)
((bkt_map >> 32) & 0xffffffff));
}
-/* Setup the Message ring credits, bucket size and other
- * common configuration */
+/*
+ * Setup the Message ring credits, bucket size and other
+ * common configuration
+ */
static void xlr_config_common(struct xlr_net_priv *priv)
{
struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
@@ -545,8 +551,10 @@ static void xlr_config_common(struct xlr_net_priv *priv)
bucket_size[i]);
}
- /* Setting non-core Credit counter register
- * Distributing Gmac's credit to CPU's*/
+ /*
+ * Setting non-core Credit counter register
+ * Distributing Gmac's credit to CPU's
+ */
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
xlr_nae_wreg(priv->base_addr,
@@ -593,7 +601,8 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv)
c1 = 3;
c2 = 0;
for (i = 0; i < 64; i++) {
- /* On use_bkt set the b0, b1 are used, else
+ /*
+ * On use_bkt set the b0, b1 are used, else
* the 4 classes are used, here implemented
* a logic to distribute the packets to the
* buckets equally or based on the class
@@ -736,7 +745,8 @@ static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
return ret;
}
-/* XLR ports are RGMII. XLS ports are SGMII mostly except the port0,
+/*
+ * XLR ports are RGMII. XLS ports are SGMII mostly except the port0,
* which can be configured either SGMII or RGMII, considered SGMII
* by default, if board setup to RGMII the port_type need to set
* accordingly.Serdes and PCS layer need to configured for SGMII
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index f91d27e9d7c..cea79663371 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -1096,4 +1096,4 @@ struct xlr_net_priv {
u64 *class_3_spill;
};
-extern void xlr_set_gmac_speed(struct xlr_net_priv *priv);
+void xlr_set_gmac_speed(struct xlr_net_priv *priv);
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 7e61adacd15..9475e20c3d6 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -10,7 +10,7 @@ config KEYBOARD_NVEC
tristate "Keyboard on nVidia compliant EC"
depends on MFD_NVEC && INPUT
help
- Say Y here to enable support for a keyboard connected to
+ Say Y here to enable support for a keyboard connected to
a nVidia compliant embedded controller.
config SERIO_NVEC_PS2
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 5a5c6397e74..3066ee2e753 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -802,7 +802,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
- if(!pdev->dev.of_node) {
+ if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "must be instantiated using device tree\n");
return -ENODEV;
}
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
index 89df1ad8be3..5588be395f2 100644
--- a/drivers/staging/octeon-usb/Makefile
+++ b/drivers/staging/octeon-usb/Makefile
@@ -1,3 +1 @@
-obj-${CONFIG_OCTEON_USB} := octeon-usb.o
-octeon-usb-y := octeon-hcd.o
-octeon-usb-y += cvmx-usb.o
+obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
deleted file mode 100644
index 45dfe94199a..00000000000
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ /dev/null
@@ -1,3158 +0,0 @@
-/***********************license start***************
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
-
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
-
- * This Software, including technical data, may be subject to U.S. export control
- * laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
-
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
- * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- ***********************license end**************************************/
-
-
-/**
- * @file
- *
- * "cvmx-usb.c" defines a set of low level USB functions to help
- * developers create Octeon USB drivers for various operating
- * systems. These functions provide a generic API to the Octeon
- * USB blocks, hiding the internal hardware specific
- * operations.
- */
-#include <linux/delay.h>
-#include <asm/octeon/cvmx.h>
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-sysinfo.h>
-#include "cvmx-usbnx-defs.h"
-#include "cvmx-usbcx-defs.h"
-#include "cvmx-usb.h"
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-helper-board.h>
-
-#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
-#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
-// a normal prefetch
-#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
-// normal prefetches that use the pref instruction
-#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
-#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
-#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
-
-#define MAX_RETRIES 3 /* Maximum number of times to retry failed transactions */
-#define MAX_PIPES 32 /* Maximum number of pipes that can be open at once */
-#define MAX_TRANSACTIONS 256 /* Maximum number of outstanding transactions across all pipes */
-#define MAX_CHANNELS 8 /* Maximum number of hardware channels supported by the USB block */
-#define MAX_USB_ADDRESS 127 /* The highest valid USB device address */
-#define MAX_USB_ENDPOINT 15 /* The highest valid USB endpoint number */
-#define MAX_USB_HUB_PORT 15 /* The highest valid port number on a hub */
-#define MAX_TRANSFER_BYTES ((1<<19)-1) /* The low level hardware can transfer a maximum of this number of bytes in each transfer. The field is 19 bits wide */
-#define MAX_TRANSFER_PACKETS ((1<<10)-1) /* The low level hardware can transfer a maximum of this number of packets in each transfer. The field is 10 bits wide */
-
-/*
- * These defines disable the normal read and write csr. This is so I can add
- * extra debug stuff to the usb specific version and I won't use the normal
- * version by mistake
- */
-#define cvmx_read_csr use_cvmx_usb_read_csr64_instead_of_cvmx_read_csr
-#define cvmx_write_csr use_cvmx_usb_write_csr64_instead_of_cvmx_write_csr
-
-enum cvmx_usb_transaction_flags {
- __CVMX_USB_TRANSACTION_FLAGS_IN_USE = 1<<16,
-};
-
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
-/**
- * Logical transactions may take numerous low level
- * transactions, especially when splits are concerned. This
- * enum represents all of the possible stages a transaction can
- * be in. Note that split completes are always even. This is so
- * the NAK handler can backup to the previous low level
- * transaction with a simple clearing of bit 0.
- */
-enum cvmx_usb_stage {
- CVMX_USB_STAGE_NON_CONTROL,
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
- CVMX_USB_STAGE_SETUP,
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
- CVMX_USB_STAGE_DATA,
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
- CVMX_USB_STAGE_STATUS,
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
-};
-
-/**
- * struct cvmx_usb_transaction - describes each pending USB transaction
- * regardless of type. These are linked together
- * to form a list of pending requests for a pipe.
- *
- * @prev: Transaction before this one in the pipe.
- * @next: Transaction after this one in the pipe.
- * @type: Type of transaction, duplicated of the pipe.
- * @flags: State flags for this transaction.
- * @buffer: User's physical buffer address to read/write.
- * @buffer_length: Size of the user's buffer in bytes.
- * @control_header: For control transactions, physical address of the 8
- * byte standard header.
- * @iso_start_frame: For ISO transactions, the starting frame number.
- * @iso_number_packets: For ISO transactions, the number of packets in the
- * request.
- * @iso_packets: For ISO transactions, the sub packets in the request.
- * @actual_bytes: Actual bytes transfer for this transaction.
- * @stage: For control transactions, the current stage.
- * @callback: User's callback function when complete.
- * @callback_data: User's data.
- */
-struct cvmx_usb_transaction {
- struct cvmx_usb_transaction *prev;
- struct cvmx_usb_transaction *next;
- enum cvmx_usb_transfer type;
- enum cvmx_usb_transaction_flags flags;
- uint64_t buffer;
- int buffer_length;
- uint64_t control_header;
- int iso_start_frame;
- int iso_number_packets;
- struct cvmx_usb_iso_packet *iso_packets;
- int xfersize;
- int pktcnt;
- int retries;
- int actual_bytes;
- enum cvmx_usb_stage stage;
- cvmx_usb_callback_func_t callback;
- void *callback_data;
-};
-
-/**
- * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
- * and some USB device. It contains a list of pending
- * request to the device.
- *
- * @prev: Pipe before this one in the list
- * @next: Pipe after this one in the list
- * @head: The first pending transaction
- * @tail: The last pending transaction
- * @interval: For periodic pipes, the interval between packets in
- * frames
- * @next_tx_frame: The next frame this pipe is allowed to transmit on
- * @flags: State flags for this pipe
- * @device_speed: Speed of device connected to this pipe
- * @transfer_type: Type of transaction supported by this pipe
- * @transfer_dir: IN or OUT. Ignored for Control
- * @multi_count: Max packet in a row for the device
- * @max_packet: The device's maximum packet size in bytes
- * @device_addr: USB device address at other end of pipe
- * @endpoint_num: USB endpoint number at other end of pipe
- * @hub_device_addr: Hub address this device is connected to
- * @hub_port: Hub port this device is connected to
- * @pid_toggle: This toggles between 0/1 on every packet send to track
- * the data pid needed
- * @channel: Hardware DMA channel for this pipe
- * @split_sc_frame: The low order bits of the frame number the split
- * complete should be sent on
- */
-struct cvmx_usb_pipe {
- struct cvmx_usb_pipe *prev;
- struct cvmx_usb_pipe *next;
- struct cvmx_usb_transaction *head;
- struct cvmx_usb_transaction *tail;
- uint64_t interval;
- uint64_t next_tx_frame;
- enum cvmx_usb_pipe_flags flags;
- enum cvmx_usb_speed device_speed;
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_direction transfer_dir;
- int multi_count;
- uint16_t max_packet;
- uint8_t device_addr;
- uint8_t endpoint_num;
- uint8_t hub_device_addr;
- uint8_t hub_port;
- uint8_t pid_toggle;
- uint8_t channel;
- int8_t split_sc_frame;
-};
-
-/**
- * struct cvmx_usb_pipe_list
- *
- * @head: Head of the list, or NULL if empty.
- * @tail: Tail if the list, or NULL if empty.
- */
-struct cvmx_usb_pipe_list {
- struct cvmx_usb_pipe *head;
- struct cvmx_usb_pipe *tail;
-};
-
-struct cvmx_usb_tx_fifo {
- struct {
- int channel;
- int size;
- uint64_t address;
- } entry[MAX_CHANNELS+1];
- int head;
- int tail;
-};
-
-/**
- * struct cvmx_usb_internal_state - the state of the USB block
- *
- * init_flags: Flags passed to initialize.
- * index: Which USB block this is for.
- * idle_hardware_channels: Bit set for every idle hardware channel.
- * usbcx_hprt: Stored port status so we don't need to read a CSR to
- * determine splits.
- * pipe_for_channel: Map channels to pipes.
- * free_transaction_head: List of free transactions head.
- * free_transaction_tail: List of free transactions tail.
- * pipe: Storage for pipes.
- * transaction: Storage for transactions.
- * callback: User global callbacks.
- * callback_data: User data for each callback.
- * indent: Used by debug output to indent functions.
- * port_status: Last port status used for change notification.
- * free_pipes: List of all pipes that are currently closed.
- * idle_pipes: List of open pipes that have no transactions.
- * active_pipes: Active pipes indexed by transfer type.
- * frame_number: Increments every SOF interrupt for time keeping.
- * active_split: Points to the current active split, or NULL.
- */
-struct cvmx_usb_internal_state {
- int init_flags;
- int index;
- int idle_hardware_channels;
- union cvmx_usbcx_hprt usbcx_hprt;
- struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
- struct cvmx_usb_transaction *free_transaction_head;
- struct cvmx_usb_transaction *free_transaction_tail;
- struct cvmx_usb_pipe pipe[MAX_PIPES];
- struct cvmx_usb_transaction transaction[MAX_TRANSACTIONS];
- cvmx_usb_callback_func_t callback[__CVMX_USB_CALLBACK_END];
- void *callback_data[__CVMX_USB_CALLBACK_END];
- int indent;
- struct cvmx_usb_port_status port_status;
- struct cvmx_usb_pipe_list free_pipes;
- struct cvmx_usb_pipe_list idle_pipes;
- struct cvmx_usb_pipe_list active_pipes[4];
- uint64_t frame_number;
- struct cvmx_usb_transaction *active_split;
- struct cvmx_usb_tx_fifo periodic;
- struct cvmx_usb_tx_fifo nonperiodic;
-};
-
-/* This macro spins on a field waiting for it to reach a value */
-#define CVMX_WAIT_FOR_FIELD32(address, type, field, op, value, timeout_usec)\
- ({int result; \
- do { \
- uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
- octeon_get_clock_rate() / 1000000; \
- type c; \
- while (1) { \
- c.u32 = __cvmx_usb_read_csr32(usb, address); \
- if (c.s.field op (value)) { \
- result = 0; \
- break; \
- } else if (cvmx_get_cycle() > done) { \
- result = -1; \
- break; \
- } else \
- cvmx_wait(100); \
- } \
- } while (0); \
- result; })
-
-/*
- * This macro logically sets a single field in a CSR. It does the sequence
- * read, modify, and write
- */
-#define USB_SET_FIELD32(address, type, field, value) \
- do { \
- type c; \
- c.u32 = __cvmx_usb_read_csr32(usb, address); \
- c.s.field = value; \
- __cvmx_usb_write_csr32(usb, address, c.u32); \
- } while (0)
-
-/* Returns the IO address to push/pop stuff data from the FIFOs */
-#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-
-static int octeon_usb_get_clock_type(void)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
-}
-
-/**
- * Read a USB 32bit CSR. It performs the necessary address swizzle
- * for 32bit CSRs and logs the value in a readable format if
- * debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to read
- *
- * Returns: Result of the read
- */
-static inline uint32_t __cvmx_usb_read_csr32(struct cvmx_usb_internal_state *usb,
- uint64_t address)
-{
- uint32_t result = cvmx_read64_uint32(address ^ 4);
- return result;
-}
-
-
-/**
- * Write a USB 32bit CSR. It performs the necessary address
- * swizzle for 32bit CSRs and logs the value in a readable format
- * if debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to write
- * @value: Value to write
- */
-static inline void __cvmx_usb_write_csr32(struct cvmx_usb_internal_state *usb,
- uint64_t address, uint32_t value)
-{
- cvmx_write64_uint32(address ^ 4, value);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-}
-
-
-/**
- * Read a USB 64bit CSR. It logs the value in a readable format if
- * debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to read
- *
- * Returns: Result of the read
- */
-static inline uint64_t __cvmx_usb_read_csr64(struct cvmx_usb_internal_state *usb,
- uint64_t address)
-{
- uint64_t result = cvmx_read64_uint64(address);
- return result;
-}
-
-
-/**
- * Write a USB 64bit CSR. It logs the value in a readable format
- * if debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to write
- * @value: Value to write
- */
-static inline void __cvmx_usb_write_csr64(struct cvmx_usb_internal_state *usb,
- uint64_t address, uint64_t value)
-{
- cvmx_write64_uint64(address, value);
-}
-
-/**
- * Return non zero if this pipe connects to a non HIGH speed
- * device through a high speed hub.
- *
- * @usb: USB block this access is for
- * @pipe: Pipe to check
- *
- * Returns: Non zero if we need to do split transactions
- */
-static inline int __cvmx_usb_pipe_needs_split(struct cvmx_usb_internal_state *usb, struct cvmx_usb_pipe *pipe)
-{
- return ((pipe->device_speed != CVMX_USB_SPEED_HIGH) && (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH));
-}
-
-
-/**
- * Trivial utility function to return the correct PID for a pipe
- *
- * @pipe: pipe to check
- *
- * Returns: PID for pipe
- */
-static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
-{
- if (pipe->pid_toggle)
- return 2; /* Data1 */
- else
- return 0; /* Data0 */
-}
-
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
-
-/**
- * Allocate a usb transaction for use
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: Transaction or NULL
- */
-static inline struct cvmx_usb_transaction *__cvmx_usb_alloc_transaction(struct cvmx_usb_internal_state *usb)
-{
- struct cvmx_usb_transaction *t;
- t = usb->free_transaction_head;
- if (t) {
- usb->free_transaction_head = t->next;
- if (!usb->free_transaction_head)
- usb->free_transaction_tail = NULL;
- }
- if (t) {
- memset(t, 0, sizeof(*t));
- t->flags = __CVMX_USB_TRANSACTION_FLAGS_IN_USE;
- }
- return t;
-}
-
-
-/**
- * Free a usb transaction
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @transaction:
- * Transaction to free
- */
-static inline void __cvmx_usb_free_transaction(struct cvmx_usb_internal_state *usb,
- struct cvmx_usb_transaction *transaction)
-{
- transaction->flags = 0;
- transaction->prev = NULL;
- transaction->next = NULL;
- if (usb->free_transaction_tail)
- usb->free_transaction_tail->next = transaction;
- else
- usb->free_transaction_head = transaction;
- usb->free_transaction_tail = transaction;
-}
-
-
-/**
- * Add a pipe to the tail of a list
- * @list: List to add pipe to
- * @pipe: Pipe to add
- */
-static inline void __cvmx_usb_append_pipe(struct cvmx_usb_pipe_list *list, struct cvmx_usb_pipe *pipe)
-{
- pipe->next = NULL;
- pipe->prev = list->tail;
- if (list->tail)
- list->tail->next = pipe;
- else
- list->head = pipe;
- list->tail = pipe;
-}
-
-
-/**
- * Remove a pipe from a list
- * @list: List to remove pipe from
- * @pipe: Pipe to remove
- */
-static inline void __cvmx_usb_remove_pipe(struct cvmx_usb_pipe_list *list, struct cvmx_usb_pipe *pipe)
-{
- if (list->head == pipe) {
- list->head = pipe->next;
- pipe->next = NULL;
- if (list->head)
- list->head->prev = NULL;
- else
- list->tail = NULL;
- } else if (list->tail == pipe) {
- list->tail = pipe->prev;
- list->tail->next = NULL;
- pipe->prev = NULL;
- } else {
- pipe->prev->next = pipe->next;
- pipe->next->prev = pipe->prev;
- pipe->prev = NULL;
- pipe->next = NULL;
- }
-}
-
-
-/**
- * Initialize a USB port for use. This must be called before any
- * other access to the Octeon USB port is made. The port starts
- * off in the disabled state.
- *
- * @state: Pointer to an empty struct cvmx_usb_state
- * that will be populated by the initialize call.
- * This structure is then passed to all other USB
- * functions.
- * @usb_port_number:
- * Which Octeon USB port to initialize.
- * @flags: Flags to control hardware initialization. See
- * enum cvmx_usb_initialize_flags for the flag
- * definitions. Some flags are mandatory.
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
- enum cvmx_usb_initialize_flags flags)
-{
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
- union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- usb->init_flags = flags;
-
- /* Make sure that state is large enough to store the internal state */
- if (sizeof(*state) < sizeof(*usb))
- return -EINVAL;
- /* At first allow 0-1 for the usb port number */
- if ((usb_port_number < 0) || (usb_port_number > 1))
- return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if ((flags & (CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI |
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0) {
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; /* Only 12 MHZ crystals are supported */
- else
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
- }
-
- if (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /* Check for auto ref clock frequency */
- if (!(flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
-
- memset(usb, 0, sizeof(*usb));
- usb->init_flags = flags;
-
- /* Initialize the USB state structure */
- {
- int i;
- usb->index = usb_port_number;
-
- /* Initialize the transaction double linked list */
- usb->free_transaction_head = NULL;
- usb->free_transaction_tail = NULL;
- for (i = 0; i < MAX_TRANSACTIONS; i++)
- __cvmx_usb_free_transaction(usb, usb->transaction + i);
- for (i = 0; i < MAX_PIPES; i++)
- __cvmx_usb_append_pipe(&usb->free_pipes, usb->pipe + i);
- }
-
- /*
- * Power On Reset and PHY Initialization
- *
- * 1. Wait for DCOK to assert (nothing to do)
- *
- * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
- * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
- */
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hrst = 0;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hclk_rst = 0;
- usbn_clk_ctl.s.enable = 0;
- /*
- * 2b. Select the USB reference clock/crystal parameters by writing
- * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /*
- * The USB port uses 12/24/48MHz 2.5V board clock
- * source at USB_XO. USB_XI should be tied to GND.
- * Most Octeon evaluation boards require this setting
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.cn31xx.p_xenbn = 0;
- } else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
- usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
- else
- usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */
-
- switch (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
- usbn_clk_ctl.s.p_c_sel = 0;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
- usbn_clk_ctl.s.p_c_sel = 1;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
- usbn_clk_ctl.s.p_c_sel = 2;
- break;
- }
- } else {
- /*
- * The USB port uses a 12MHz crystal as clock source
- * at USB_XO and USB_XI
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.cn31xx.p_xenbn = 1;
- } else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
- usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
- else
- usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */
-
- usbn_clk_ctl.s.p_c_sel = 0;
- }
- /*
- * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
- * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
- * such that USB is as close as possible to 125Mhz
- */
- {
- int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
- if (divisor < 4) /* Lower than 4 doesn't seem to work properly */
- divisor = 4;
- usbn_clk_ctl.s.divide = divisor;
- usbn_clk_ctl.s.divide2 = 0;
- }
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
- usbn_clk_ctl.s.hclk_rst = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
- cvmx_wait(64);
- /*
- * 3. Program the power-on reset field in the USBN clock-control
- * register:
- * USBN_CLK_CTL[POR] = 0
- */
- usbn_clk_ctl.s.por = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 4. Wait 1 ms for PHY clock to start */
- mdelay(1);
- /*
- * 5. Program the Reset input from automatic test equipment field in the
- * USBP control and status register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
- */
- usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
- usbn_usbp_ctl_status.s.ate_reset = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 6. Wait 10 cycles */
- cvmx_wait(10);
- /*
- * 7. Clear ATE_RESET field in the USBN clock-control register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
- */
- usbn_usbp_ctl_status.s.ate_reset = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /*
- * 8. Program the PHY reset field in the USBN clock-control register:
- * USBN_CLK_CTL[PRST] = 1
- */
- usbn_clk_ctl.s.prst = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /*
- * 9. Program the USBP control and status register to select host or
- * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
- * device
- */
- usbn_usbp_ctl_status.s.hst_mode = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 10. Wait 1 us */
- udelay(1);
- /*
- * 11. Program the hreset_n field in the USBN clock-control register:
- * USBN_CLK_CTL[HRST] = 1
- */
- usbn_clk_ctl.s.hrst = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 12. Proceed to USB core initialization */
- usbn_clk_ctl.s.enable = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- udelay(1);
-
- /*
- * USB Core Initialization
- *
- * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
- * determine USB core configuration parameters.
- *
- * Nothing needed
- *
- * 2. Program the following fields in the global AHB configuration
- * register (USBC_GAHBCFG)
- * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
- * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
- * Nonperiodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[NPTXFEMPLVL]
- * Periodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[PTXFEMPLVL]
- * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
- */
- {
- union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
- /* Due to an errata, CN31XX doesn't support DMA */
- if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
- usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usb->idle_hardware_channels = 0x1; /* Only use one channel with non DMA */
- else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
- usb->idle_hardware_channels = 0xf7; /* CN5XXX have an errata with channel 3 */
- else
- usb->idle_hardware_channels = 0xff;
- usbcx_gahbcfg.s.hbstlen = 0;
- usbcx_gahbcfg.s.nptxfemplvl = 1;
- usbcx_gahbcfg.s.ptxfemplvl = 1;
- usbcx_gahbcfg.s.glblintrmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
- usbcx_gahbcfg.u32);
- }
- /*
- * 3. Program the following fields in USBC_GUSBCFG register.
- * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
- * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
- * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
- * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
- */
- {
- union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
- usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
- usbcx_gusbcfg.s.toutcal = 0;
- usbcx_gusbcfg.s.ddrsel = 0;
- usbcx_gusbcfg.s.usbtrdtim = 0x5;
- usbcx_gusbcfg.s.phylpwrclksel = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
- usbcx_gusbcfg.u32);
- }
- /*
- * 4. The software must unmask the following bits in the USBC_GINTMSK
- * register.
- * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
- * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
- */
- {
- union cvmx_usbcx_gintmsk usbcx_gintmsk;
- int channel;
-
- usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
- usbcx_gintmsk.s.otgintmsk = 1;
- usbcx_gintmsk.s.modemismsk = 1;
- usbcx_gintmsk.s.hchintmsk = 1;
- usbcx_gintmsk.s.sofmsk = 0;
- /* We need RX FIFO interrupts if we don't have DMA */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usbcx_gintmsk.s.rxflvlmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
- usbcx_gintmsk.u32);
-
- /* Disable all channel interrupts. We'll enable them per channel later */
- for (channel = 0; channel < 8; channel++)
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- }
-
- {
- /*
- * Host Port Initialization
- *
- * 1. Program the host-port interrupt-mask field to unmask,
- * USBC_GINTMSK[PRTINT] = 1
- */
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
- prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
- disconnintmsk, 1);
- /*
- * 2. Program the USBC_HCFG register to select full-speed host
- * or high-speed host.
- */
- {
- union cvmx_usbcx_hcfg usbcx_hcfg;
- usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
- usbcx_hcfg.s.fslssupp = 0;
- usbcx_hcfg.s.fslspclksel = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
- }
- /*
- * 3. Program the port power bit to drive VBUS on the USB,
- * USBC_HPRT[PRTPWR] = 1
- */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtpwr, 1);
-
- /*
- * Steps 4-15 from the manual are done later in the port enable
- */
- }
-
- return 0;
-}
-
-
-/**
- * Shutdown a USB port after a call to cvmx_usb_initialize().
- * The port should be disabled with all pipes closed when this
- * function is called.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_shutdown(struct cvmx_usb_state *state)
-{
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- /* Make sure all pipes are closed */
- if (usb->idle_pipes.head ||
- usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS].head ||
- usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT].head ||
- usb->active_pipes[CVMX_USB_TRANSFER_CONTROL].head ||
- usb->active_pipes[CVMX_USB_TRANSFER_BULK].head)
- return -EBUSY;
-
- /* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.enable = 1;
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hclk_rst = 1;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hrst = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- return 0;
-}
-
-
-/**
- * Enable a USB port. After this call succeeds, the USB port is
- * online and servicing requests.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_enable(struct cvmx_usb_state *state)
-{
- union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
-
- /*
- * If the port is already enabled the just return. We don't need to do
- * anything
- */
- if (usb->usbcx_hprt.s.prtena)
- return 0;
-
- /* If there is nothing plugged into the port then fail immediately */
- if (!usb->usbcx_hprt.s.prtconnsts) {
- return -ETIMEDOUT;
- }
-
- /* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 1);
-
- /*
- * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
- * process to complete.
- */
- mdelay(50);
-
- /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 0);
-
- /* Wait for the USBC_HPRT[PRTENA]. */
- if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
- prtena, ==, 1, 100000))
- return -ETIMEDOUT;
-
- /* Read the port speed field to get the enumerated speed, USBC_HPRT[PRTSPD]. */
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
-
- /*
- * 13. Program the USBC_GRXFSIZ register to select the size of the
- * receive FIFO (25%).
- */
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), union cvmx_usbcx_grxfsiz,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
- /*
- * 14. Program the USBC_GNPTXFSIZ register to select the size and the
- * start address of the non- periodic transmit FIFO for nonperiodic
- * transactions (50%).
- */
- {
- union cvmx_usbcx_gnptxfsiz siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
- siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
- siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
- }
- /*
- * 15. Program the USBC_HPTXFSIZ register to select the size and start
- * address of the periodic transmit FIFO for periodic transactions
- * (25%).
- */
- {
- union cvmx_usbcx_hptxfsiz siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
- siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
- siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
- }
- /* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
- txfflsh, ==, 0, 100);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, rxfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
- rxfflsh, ==, 0, 100);
-
- return 0;
-}
-
-
-/**
- * Disable a USB port. After this call the USB port will not
- * generate data transfers and will not generate events.
- * Transactions in process will fail and call their
- * associated callbacks.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_disable(struct cvmx_usb_state *state)
-{
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- /* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtena, 1);
- return 0;
-}
-
-
-/**
- * Get the current state of the USB port. Use this call to
- * determine if the usb port has anything connected, is enabled,
- * or has some sort of error condition. The return value of this
- * call has "changed" bits to signal of the value of some fields
- * have changed between calls. These "changed" fields are based
- * on the last call to cvmx_usb_set_status(). In order to clear
- * them, you must update the status through cvmx_usb_set_status().
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: Port status information
- */
-struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *state)
-{
- union cvmx_usbcx_hprt usbc_hprt;
- struct cvmx_usb_port_status result;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- memset(&result, 0, sizeof(result));
-
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- result.port_enabled = usbc_hprt.s.prtena;
- result.port_over_current = usbc_hprt.s.prtovrcurract;
- result.port_powered = usbc_hprt.s.prtpwr;
- result.port_speed = usbc_hprt.s.prtspd;
- result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change = (result.connected != usb->port_status.connected);
-
- return result;
-}
-
-
-/**
- * Set the current state of the USB port. The status is used as
- * a reference for the "changed" bits returned by
- * cvmx_usb_get_status(). Other than serving as a reference, the
- * status passed to this function is not used. No fields can be
- * changed through this call.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @port_status:
- * Port status to set, most like returned by cvmx_usb_get_status()
- */
-void cvmx_usb_set_status(struct cvmx_usb_state *state, struct cvmx_usb_port_status port_status)
-{
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- usb->port_status = port_status;
- return;
-}
-
-
-/**
- * Convert a USB transaction into a handle
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @transaction:
- * Transaction to get handle for
- *
- * Returns: Handle
- */
-static inline int __cvmx_usb_get_submit_handle(struct cvmx_usb_internal_state *usb,
- struct cvmx_usb_transaction *transaction)
-{
- return ((unsigned long)transaction - (unsigned long)usb->transaction) /
- sizeof(*transaction);
-}
-
-
-/**
- * Convert a USB pipe into a handle
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe: Pipe to get handle for
- *
- * Returns: Handle
- */
-static inline int __cvmx_usb_get_pipe_handle(struct cvmx_usb_internal_state *usb,
- struct cvmx_usb_pipe *pipe)
-{
- return ((unsigned long)pipe - (unsigned long)usb->pipe) / sizeof(*pipe);
-}
-
-
-/**
- * Open a virtual pipe between the host and a USB device. A pipe
- * must be opened before data can be transferred between a device
- * and Octeon.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @flags: Optional pipe flags defined in
- * enum cvmx_usb_pipe_flags.
- * @device_addr:
- * USB device address to open the pipe to
- * (0-127).
- * @endpoint_num:
- * USB endpoint number to open the pipe to
- * (0-15).
- * @device_speed:
- * The speed of the device the pipe is going
- * to. This must match the device's speed,
- * which may be different than the port speed.
- * @max_packet: The maximum packet length the device can
- * transmit/receive (low speed=0-8, full
- * speed=0-1023, high speed=0-1024). This value
- * comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <10:0>.
- * @transfer_type:
- * The type of transfer this pipe is for.
- * @transfer_dir:
- * The direction the pipe is in. This is not
- * used for control pipes.
- * @interval: For ISOCHRONOUS and INTERRUPT transfers,
- * this is how often the transfer is scheduled
- * for. All other transfers should specify
- * zero. The units are in frames (8000/sec at
- * high speed, 1000/sec for full speed).
- * @multi_count:
- * For high speed devices, this is the maximum
- * allowed number of packet per microframe.
- * Specify zero for non high speed devices. This
- * value comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <12:11>.
- * @hub_device_addr:
- * Hub device address this device is connected
- * to. Devices connected directly to Octeon
- * use zero. This is only used when the device
- * is full/low speed behind a high speed hub.
- * The address will be of the high speed hub,
- * not and full speed hubs after it.
- * @hub_port: Which port on the hub the device is
- * connected. Use zero for devices connected
- * directly to Octeon. Like hub_device_addr,
- * this is only used for full/low speed
- * devices behind a high speed hub.
- *
- * Returns: A non negative value is a pipe handle. Negative
- * values are error codes.
- */
-int cvmx_usb_open_pipe(struct cvmx_usb_state *state, enum cvmx_usb_pipe_flags flags,
- int device_addr, int endpoint_num,
- enum cvmx_usb_speed device_speed, int max_packet,
- enum cvmx_usb_transfer transfer_type,
- enum cvmx_usb_direction transfer_dir, int interval,
- int multi_count, int hub_device_addr, int hub_port)
-{
- struct cvmx_usb_pipe *pipe;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- if (unlikely((device_addr < 0) || (device_addr > MAX_USB_ADDRESS)))
- return -EINVAL;
- if (unlikely((endpoint_num < 0) || (endpoint_num > MAX_USB_ENDPOINT)))
- return -EINVAL;
- if (unlikely(device_speed > CVMX_USB_SPEED_LOW))
- return -EINVAL;
- if (unlikely((max_packet <= 0) || (max_packet > 1024)))
- return -EINVAL;
- if (unlikely(transfer_type > CVMX_USB_TRANSFER_INTERRUPT))
- return -EINVAL;
- if (unlikely((transfer_dir != CVMX_USB_DIRECTION_OUT) &&
- (transfer_dir != CVMX_USB_DIRECTION_IN)))
- return -EINVAL;
- if (unlikely(interval < 0))
- return -EINVAL;
- if (unlikely((transfer_type == CVMX_USB_TRANSFER_CONTROL) && interval))
- return -EINVAL;
- if (unlikely(multi_count < 0))
- return -EINVAL;
- if (unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
- (multi_count != 0)))
- return -EINVAL;
- if (unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
- return -EINVAL;
- if (unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
- return -EINVAL;
-
- /* Find a free pipe */
- pipe = usb->free_pipes.head;
- if (!pipe)
- return -ENOMEM;
- __cvmx_usb_remove_pipe(&usb->free_pipes, pipe);
- pipe->flags = flags | __CVMX_USB_PIPE_FLAGS_OPEN;
- if ((device_speed == CVMX_USB_SPEED_HIGH) &&
- (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (transfer_type == CVMX_USB_TRANSFER_BULK))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
- pipe->device_addr = device_addr;
- pipe->endpoint_num = endpoint_num;
- pipe->device_speed = device_speed;
- pipe->max_packet = max_packet;
- pipe->transfer_type = transfer_type;
- pipe->transfer_dir = transfer_dir;
- /*
- * All pipes use interval to rate limit NAK processing. Force an
- * interval if one wasn't supplied
- */
- if (!interval)
- interval = 1;
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- pipe->interval = interval*8;
- /* Force start splits to be schedule on uFrame 0 */
- pipe->next_tx_frame = ((usb->frame_number+7)&~7) + pipe->interval;
- } else {
- pipe->interval = interval;
- pipe->next_tx_frame = usb->frame_number + pipe->interval;
- }
- pipe->multi_count = multi_count;
- pipe->hub_device_addr = hub_device_addr;
- pipe->hub_port = hub_port;
- pipe->pid_toggle = 0;
- pipe->split_sc_frame = -1;
- __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
-
- /*
- * We don't need to tell the hardware about this pipe yet since
- * it doesn't have any submitted requests
- */
-
- return __cvmx_usb_get_pipe_handle(usb, pipe);
-}
-
-
-/**
- * Poll the RX FIFOs and remove data as needed. This function is only used
- * in non DMA mode. It is very important that this function be called quickly
- * enough to prevent FIFO overflow.
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- */
-static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_internal_state *usb)
-{
- union cvmx_usbcx_grxstsph rx_status;
- int channel;
- int bytes;
- uint64_t address;
- uint32_t *ptr;
-
- rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
- /* Only read data if IN data is there */
- if (rx_status.s.pktsts != 2)
- return;
- /* Check if no data is available */
- if (!rx_status.s.bcnt)
- return;
-
- channel = rx_status.s.chnum;
- bytes = rx_status.s.bcnt;
- if (!bytes)
- return;
-
- /* Get where the DMA engine would have written this data */
- address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8);
- ptr = cvmx_phys_to_ptr(address);
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, address + bytes);
-
- /* Loop writing the FIFO data for this packet into memory */
- while (bytes > 0) {
- *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
- bytes -= 4;
- }
- CVMX_SYNCW;
-
- return;
-}
-
-
-/**
- * Fill the TX hardware fifo with data out of the software
- * fifos
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @fifo: Software fifo to use
- * @available: Amount of space in the hardware fifo
- *
- * Returns: Non zero if the hardware fifo was too small and needs
- * to be serviced again.
- */
-static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_internal_state *usb, struct cvmx_usb_tx_fifo *fifo, int available)
-{
- /*
- * We're done either when there isn't anymore space or the software FIFO
- * is empty
- */
- while (available && (fifo->head != fifo->tail)) {
- int i = fifo->tail;
- const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
- int words = available;
-
- /* Limit the amount of data to waht the SW fifo has */
- if (fifo->entry[i].size <= available) {
- words = fifo->entry[i].size;
- fifo->tail++;
- if (fifo->tail > MAX_CHANNELS)
- fifo->tail = 0;
- }
-
- /* Update the next locations and counts */
- available -= words;
- fifo->entry[i].address += words * 4;
- fifo->entry[i].size -= words;
-
- /*
- * Write the HW fifo data. The read every three writes is due
- * to an errata on CN3XXX chips
- */
- while (words > 3) {
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- words -= 3;
- }
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words) {
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words)
- cvmx_write64_uint32(csr_address, *ptr++);
- }
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- }
- return fifo->head != fifo->tail;
-}
-
-
-/**
- * Check the hardware FIFOs and fill them as needed
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- */
-static void __cvmx_usb_poll_tx_fifo(struct cvmx_usb_internal_state *usb)
-{
- if (usb->periodic.head != usb->periodic.tail) {
- union cvmx_usbcx_hptxsts tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 0);
- }
-
- if (usb->nonperiodic.head != usb->nonperiodic.tail) {
- union cvmx_usbcx_gnptxsts tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 0);
- }
-
- return;
-}
-
-
-/**
- * Fill the TX FIFO with an outgoing packet
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @channel: Channel number to get packet from
- */
-static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_internal_state *usb, int channel)
-{
- union cvmx_usbcx_hccharx hcchar;
- union cvmx_usbcx_hcspltx usbc_hcsplt;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- struct cvmx_usb_tx_fifo *fifo;
-
- /* We only need to fill data on outbound channels */
- hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
- if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
- return;
-
- /* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
- if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
- return;
-
- /* Find out how many bytes we need to fill and convert it into 32bit words */
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
- if (!usbc_hctsiz.s.xfersize)
- return;
-
- if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
- (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
- fifo = &usb->periodic;
- else
- fifo = &usb->nonperiodic;
-
- fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
- fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
- fifo->head++;
- if (fifo->head > MAX_CHANNELS)
- fifo->head = 0;
-
- __cvmx_usb_poll_tx_fifo(usb);
-
- return;
-}
-
-/**
- * Perform channel specific setup for Control transactions. All
- * the generic stuff will already have been done in
- * __cvmx_usb_start_channel()
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe for control transaction
- */
-static void __cvmx_usb_start_channel_control(struct cvmx_usb_internal_state *usb,
- int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction = pipe->head;
- union cvmx_usb_control_header *header =
- cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
- int packets_to_transfer;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
-
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- cvmx_dprintf("%s: ERROR - Non control stage\n", __FUNCTION__);
- break;
- case CVMX_USB_STAGE_SETUP:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = sizeof(*header);
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
- /*
- * Setup send the control header instead of the buffer data. The
- * buffer data will be used in the next stage
- */
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, transaction->control_header);
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = 0;
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_DATA:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (header->s.request_type & 0x80)
- bytes_to_transfer = 0;
- else if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
- }
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- union cvmx_usbcx_hccharx, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- if (!(header->s.request_type & 0x80))
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- union cvmx_usbcx_hccharx, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_STATUS:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the hardware.
- * Further bytes will be sent as continued transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is zero
- * we still need to transfer one packet
- */
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
- if (packets_to_transfer == 0)
- packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must be
- * restarted between every packet for IN transactions, so there
- * is no reason to do multiple packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to what the
- * hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
- return;
-}
-
-
-/**
- * Start a channel to perform the pipe's head transaction
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe to start
- */
-static void __cvmx_usb_start_channel(struct cvmx_usb_internal_state *usb,
- int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction = pipe->head;
-
- /* Make sure all writes to the DMA region get flushed */
- CVMX_SYNCW;
-
- /* Attach the channel to the pipe */
- usb->pipe_for_channel[channel] = pipe;
- pipe->channel = channel;
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /* Mark this channel as in use */
- usb->idle_hardware_channels &= ~(1<<channel);
-
- /* Enable the channel interrupt bits */
- {
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hcintmskx usbc_hcintmsk;
- union cvmx_usbcx_haintmsk usbc_haintmsk;
-
- /* Clear all channel status bits */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
-
- usbc_hcintmsk.u32 = 0;
- usbc_hcintmsk.s.chhltdmsk = 1;
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /* Channels need these extra interrupts when we aren't in DMA mode */
- usbc_hcintmsk.s.datatglerrmsk = 1;
- usbc_hcintmsk.s.frmovrunmsk = 1;
- usbc_hcintmsk.s.bblerrmsk = 1;
- usbc_hcintmsk.s.xacterrmsk = 1;
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /* Splits don't generate xfercompl, so we need ACK and NYET */
- usbc_hcintmsk.s.nyetmsk = 1;
- usbc_hcintmsk.s.ackmsk = 1;
- }
- usbc_hcintmsk.s.nakmsk = 1;
- usbc_hcintmsk.s.stallmsk = 1;
- usbc_hcintmsk.s.xfercomplmsk = 1;
- }
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
-
- /* Enable the channel interrupt to propagate */
- usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index));
- usbc_haintmsk.s.haintmsk |= 1<<channel;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32);
- }
-
- /* Setup the locations the DMA engines use */
- {
- uint64_t dma_address = transaction->buffer + transaction->actual_bytes;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- dma_address = transaction->buffer + transaction->iso_packets[0].offset + transaction->actual_bytes;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, dma_address);
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, dma_address);
- }
-
- /* Setup both the size of the transfer and the SPLIT characteristics */
- {
- union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
- int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
-
- /*
- * ISOCHRONOUS transactions store each individual transfer size
- * in the packet structure, not the global buffer_length
- */
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
-
- /*
- * We need to do split transactions when we are talking to non
- * high speed devices that are behind a high speed hub
- */
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * On the start split phase (stage is even) record the
- * frame number we will need to send the split complete.
- * We only store the lower two bits since the time ahead
- * can only be two frames
- */
- if ((transaction->stage&1) == 0) {
- if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
- else
- pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
- } else
- pipe->split_sc_frame = -1;
-
- usbc_hcsplt.s.spltena = 1;
- usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
- usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
-
- /*
- * SPLIT transactions can only ever transmit one data
- * packet so limit the transfer size to the max packet
- * size
- */
- if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
-
- /*
- * ISOCHRONOUS OUT splits are unique in that they limit
- * data transfers to 188 byte chunks representing the
- * begin/middle/end of the data or all
- */
- if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /*
- * Clear the split complete frame number as
- * there isn't going to be a split complete
- */
- pipe->split_sc_frame = -1;
- /*
- * See if we've started this transfer and sent
- * data
- */
- if (transaction->actual_bytes == 0) {
- /*
- * Nothing sent yet, this is either a
- * begin or the entire payload
- */
- if (bytes_to_transfer <= 188)
- usbc_hcsplt.s.xactpos = 3; /* Entire payload in one go */
- else
- usbc_hcsplt.s.xactpos = 2; /* First part of payload */
- } else {
- /*
- * Continuing the previous data, we must
- * either be in the middle or at the end
- */
- if (bytes_to_transfer <= 188)
- usbc_hcsplt.s.xactpos = 1; /* End of payload */
- else
- usbc_hcsplt.s.xactpos = 0; /* Middle of payload */
- }
- /*
- * Again, the transfer size is limited to 188
- * bytes
- */
- if (bytes_to_transfer > 188)
- bytes_to_transfer = 188;
- }
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the
- * hardware. Further bytes will be sent as continued
- * transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /*
- * Round MAX_TRANSFER_BYTES to a multiple of out packet
- * size
- */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is
- * zero we still need to transfer one packet
- */
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
- if (packets_to_transfer == 0)
- packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must
- * be restarted between every packet for IN
- * transactions, so there is no reason to do multiple
- * packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to
- * what the hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- /* Update the DATA0/DATA1 toggle */
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- /*
- * High speed pipes may need a hardware ping before they start
- */
- if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
- usbc_hctsiz.s.dopng = 1;
-
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
- }
-
- /* Setup the Host Channel Characteristics Register */
- {
- union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
-
- /*
- * Set the startframe odd/even properly. This is only used for
- * periodic
- */
- usbc_hcchar.s.oddfrm = usb->frame_number&1;
-
- /*
- * Set the number of back to back packets allowed by this
- * endpoint. Split transactions interpret "ec" as the number of
- * immediate retries of failure. These retries happen too
- * quickly, so we disable these entirely for splits
- */
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count < 1)
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count > 3)
- usbc_hcchar.s.ec = 3;
- else
- usbc_hcchar.s.ec = pipe->multi_count;
-
- /* Set the rest of the endpoint specific settings */
- usbc_hcchar.s.devaddr = pipe->device_addr;
- usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
- usbc_hcchar.s.epdir = pipe->transfer_dir;
- usbc_hcchar.s.epnum = pipe->endpoint_num;
- usbc_hcchar.s.mps = pipe->max_packet;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
- }
-
- /* Do transaction type specific fixups as needed */
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- __cvmx_usb_start_channel_control(usb, channel, pipe);
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISO transactions require different PIDs depending on
- * direction and how many packets are needed
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 0);
- else /* Need MDATA */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 3);
- }
- }
- break;
- }
- {
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
- transaction->xfersize = usbc_hctsiz.s.xfersize;
- transaction->pktcnt = usbc_hctsiz.s.pktcnt;
- }
- /* Remeber when we start a split transaction */
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, chena, 1);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- __cvmx_usb_fill_tx_fifo(usb, channel);
- return;
-}
-
-
-/**
- * Find a pipe that is ready to be scheduled to hardware.
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @list: Pipe list to search
- * @current_frame:
- * Frame counter to use as a time reference.
- *
- * Returns: Pipe or NULL if none are ready
- */
-static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(struct cvmx_usb_internal_state *usb, struct cvmx_usb_pipe_list *list, uint64_t current_frame)
-{
- struct cvmx_usb_pipe *pipe = list->head;
- while (pipe) {
- if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && pipe->head &&
- (pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
- (!usb->active_split || (usb->active_split == pipe->head))) {
- CVMX_PREFETCH(pipe, 128);
- CVMX_PREFETCH(pipe->head, 0);
- return pipe;
- }
- pipe = pipe->next;
- }
- return NULL;
-}
-
-
-/**
- * Called whenever a pipe might need to be scheduled to the
- * hardware.
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @is_sof: True if this schedule was called on a SOF interrupt.
- */
-static void __cvmx_usb_schedule(struct cvmx_usb_internal_state *usb, int is_sof)
-{
- int channel;
- struct cvmx_usb_pipe *pipe;
- int need_sof;
- enum cvmx_usb_transfer ttype;
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /* Without DMA we need to be careful to not schedule something at the end of a frame and cause an overrun */
- union cvmx_usbcx_hfnum hfnum = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index))};
- union cvmx_usbcx_hfir hfir = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFIR(usb->index))};
- if (hfnum.s.frrem < hfir.s.frint/4)
- goto done;
- }
-
- while (usb->idle_hardware_channels) {
- /* Find an idle channel */
- CVMX_CLZ(channel, usb->idle_hardware_channels);
- channel = 31 - channel;
- if (unlikely(channel > 7))
- break;
-
- /* Find a pipe needing service */
- pipe = NULL;
- if (is_sof) {
- /*
- * Only process periodic pipes on SOF interrupts. This
- * way we are sure that the periodic data is sent in the
- * beginning of the frame
- */
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
- if (likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
- }
- if (likely(!pipe)) {
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
- if (likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
- }
- if (!pipe)
- break;
-
- __cvmx_usb_start_channel(usb, channel, pipe);
- }
-
-done:
- /*
- * Only enable SOF interrupts when we have transactions pending in the
- * future that might need to be scheduled
- */
- need_sof = 0;
- for (ttype = CVMX_USB_TRANSFER_CONTROL; ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
- pipe = usb->active_pipes[ttype].head;
- while (pipe) {
- if (pipe->next_tx_frame > usb->frame_number) {
- need_sof = 1;
- break;
- }
- pipe = pipe->next;
- }
- }
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, sofmsk, need_sof);
- return;
-}
-
-
-/**
- * Call a user's callback for a specific reason.
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe: Pipe the callback is for or NULL
- * @transaction:
- * Transaction the callback is for or NULL
- * @reason: Reason this callback is being called
- * @complete_code:
- * Completion code for the transaction, if any
- */
-static void __cvmx_usb_perform_callback(struct cvmx_usb_internal_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_callback reason,
- enum cvmx_usb_complete complete_code)
-{
- cvmx_usb_callback_func_t callback = usb->callback[reason];
- void *user_data = usb->callback_data[reason];
- int submit_handle = -1;
- int pipe_handle = -1;
- int bytes_transferred = 0;
-
- if (pipe)
- pipe_handle = __cvmx_usb_get_pipe_handle(usb, pipe);
-
- if (transaction) {
- submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
- bytes_transferred = transaction->actual_bytes;
- /* Transactions are allowed to override the default callback */
- if ((reason == CVMX_USB_CALLBACK_TRANSFER_COMPLETE) && transaction->callback) {
- callback = transaction->callback;
- user_data = transaction->callback_data;
- }
- }
-
- if (!callback)
- return;
-
- callback((struct cvmx_usb_state *)usb, reason, complete_code, pipe_handle, submit_handle,
- bytes_transferred, user_data);
-}
-
-
-/**
- * Signal the completion of a transaction and free it. The
- * transaction will be removed from the pipe transaction list.
- *
- * @usb: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe: Pipe the transaction is on
- * @transaction:
- * Transaction that completed
- * @complete_code:
- * Completion code
- */
-static void __cvmx_usb_perform_complete(struct cvmx_usb_internal_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_complete complete_code)
-{
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
-
- /*
- * Isochronous transactions need extra processing as they might not be
- * done after a single data transfer
- */
- if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /* Update the number of bytes transferred in this ISO packet */
- transaction->iso_packets[0].length = transaction->actual_bytes;
- transaction->iso_packets[0].status = complete_code;
-
- /*
- * If there are more ISOs pending and we succeeded, schedule the
- * next one
- */
- if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
- transaction->actual_bytes = 0; /* No bytes transferred for this packet as of yet */
- transaction->iso_number_packets--; /* One less ISO waiting to transfer */
- transaction->iso_packets++; /* Increment to the next location in our packet array */
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- goto done;
- }
- }
-
- /* Remove the transaction from the pipe list */
- if (transaction->next)
- transaction->next->prev = transaction->prev;
- else
- pipe->tail = transaction->prev;
- if (transaction->prev)
- transaction->prev->next = transaction->next;
- else
- pipe->head = transaction->next;
- if (!pipe->head) {
- __cvmx_usb_remove_pipe(usb->active_pipes + pipe->transfer_type, pipe);
- __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
-
- }
- __cvmx_usb_perform_callback(usb, pipe, transaction,
- CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
- complete_code);
- __cvmx_usb_free_transaction(usb, transaction);
-done:
- return;
-}
-
-
-/**
- * Submit a usb transaction to a pipe. Called for all types
- * of transactions.
- *
- * @usb:
- * @pipe_handle:
- * Which pipe to submit to. Will be validated in this function.
- * @type: Transaction type
- * @flags: Flags for the transaction
- * @buffer: User buffer for the transaction
- * @buffer_length:
- * User buffer's length in bytes
- * @control_header:
- * For control transactions, the 8 byte standard header
- * @iso_start_frame:
- * For ISO transactions, the start frame
- * @iso_number_packets:
- * For ISO, the number of packet in the transaction.
- * @iso_packets:
- * A description of each ISO packet
- * @callback: User callback to call when the transaction completes
- * @user_data: User's data for the callback
- *
- * Returns: Submit handle or negative on failure. Matches the result
- * in the external API.
- */
-static int __cvmx_usb_submit_transaction(struct cvmx_usb_internal_state *usb,
- int pipe_handle,
- enum cvmx_usb_transfer type,
- int flags,
- uint64_t buffer,
- int buffer_length,
- uint64_t control_header,
- int iso_start_frame,
- int iso_number_packets,
- struct cvmx_usb_iso_packet *iso_packets,
- cvmx_usb_callback_func_t callback,
- void *user_data)
-{
- int submit_handle;
- struct cvmx_usb_transaction *transaction;
- struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
-
- if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- return -EINVAL;
- /* Fail if the pipe isn't open */
- if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- return -EINVAL;
- if (unlikely(pipe->transfer_type != type))
- return -EINVAL;
-
- transaction = __cvmx_usb_alloc_transaction(usb);
- if (unlikely(!transaction))
- return -ENOMEM;
-
- transaction->type = type;
- transaction->flags |= flags;
- transaction->buffer = buffer;
- transaction->buffer_length = buffer_length;
- transaction->control_header = control_header;
- transaction->iso_start_frame = iso_start_frame; // FIXME: This is not used, implement it
- transaction->iso_number_packets = iso_number_packets;
- transaction->iso_packets = iso_packets;
- transaction->callback = callback;
- transaction->callback_data = user_data;
- if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
- transaction->stage = CVMX_USB_STAGE_SETUP;
- else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-
- transaction->next = NULL;
- if (pipe->tail) {
- transaction->prev = pipe->tail;
- transaction->prev->next = transaction;
- } else {
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
- transaction->prev = NULL;
- pipe->head = transaction;
- __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
- __cvmx_usb_append_pipe(usb->active_pipes + pipe->transfer_type, pipe);
- }
- pipe->tail = transaction;
-
- submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
-
- /* We may need to schedule the pipe if this was the head of the pipe */
- if (!transaction->prev)
- __cvmx_usb_schedule(usb, 0);
-
- return submit_handle;
-}
-
-
-/**
- * Call to submit a USB Bulk transfer to a pipe.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Handle to the pipe for the transfer.
- * @buffer: Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @buffer_length:
- * Length of buffer in bytes.
- * @callback: Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @user_data: User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * Returns: A submitted transaction handle or negative on
- * failure. Negative values are error codes.
- */
-int cvmx_usb_submit_bulk(struct cvmx_usb_state *state, int pipe_handle,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
-{
- int submit_handle;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- /* Pipe handle checking is done later in a common place */
- if (unlikely(!buffer))
- return -EINVAL;
- if (unlikely(buffer_length < 0))
- return -EINVAL;
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_BULK,
- 0, /* flags */
- buffer,
- buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- callback,
- user_data);
- return submit_handle;
-}
-
-
-/**
- * Call to submit a USB Interrupt transfer to a pipe.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Handle to the pipe for the transfer.
- * @buffer: Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @buffer_length:
- * Length of buffer in bytes.
- * @callback: Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @user_data: User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * Returns: A submitted transaction handle or negative on
- * failure. Negative values are error codes.
- */
-int cvmx_usb_submit_interrupt(struct cvmx_usb_state *state, int pipe_handle,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
-{
- int submit_handle;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- /* Pipe handle checking is done later in a common place */
- if (unlikely(!buffer))
- return -EINVAL;
- if (unlikely(buffer_length < 0))
- return -EINVAL;
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_INTERRUPT,
- 0, /* flags */
- buffer,
- buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- callback,
- user_data);
- return submit_handle;
-}
-
-
-/**
- * Call to submit a USB Control transfer to a pipe.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Handle to the pipe for the transfer.
- * @control_header:
- * USB 8 byte control header physical address.
- * Note that this is NOT A POINTER, but the
- * full 64bit physical address of the buffer.
- * @buffer: Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @buffer_length:
- * Length of buffer in bytes.
- * @callback: Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @user_data: User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * Returns: A submitted transaction handle or negative on
- * failure. Negative values are error codes.
- */
-int cvmx_usb_submit_control(struct cvmx_usb_state *state, int pipe_handle,
- uint64_t control_header,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
-{
- int submit_handle;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- union cvmx_usb_control_header *header =
- cvmx_phys_to_ptr(control_header);
-
- /* Pipe handle checking is done later in a common place */
- if (unlikely(!control_header))
- return -EINVAL;
- /* Some drivers send a buffer with a zero length. God only knows why */
- if (unlikely(buffer && (buffer_length < 0)))
- return -EINVAL;
- if (unlikely(!buffer && (buffer_length != 0)))
- return -EINVAL;
- if ((header->s.request_type & 0x80) == 0)
- buffer_length = le16_to_cpu(header->s.length);
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_CONTROL,
- 0, /* flags */
- buffer,
- buffer_length,
- control_header,
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- callback,
- user_data);
- return submit_handle;
-}
-
-
-/**
- * Call to submit a USB Isochronous transfer to a pipe.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Handle to the pipe for the transfer.
- * @start_frame:
- * Number of frames into the future to schedule
- * this transaction.
- * @flags: Flags to control the transfer. See
- * enum cvmx_usb_isochronous_flags for the flag
- * definitions.
- * @number_packets:
- * Number of sequential packets to transfer.
- * "packets" is a pointer to an array of this
- * many packet structures.
- * @packets: Description of each transfer packet as
- * defined by struct cvmx_usb_iso_packet. The array
- * pointed to here must stay valid until the
- * complete callback is called.
- * @buffer: Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @buffer_length:
- * Length of buffer in bytes.
- * @callback: Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @user_data: User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * Returns: A submitted transaction handle or negative on
- * failure. Negative values are error codes.
- */
-int cvmx_usb_submit_isochronous(struct cvmx_usb_state *state, int pipe_handle,
- int start_frame, int flags,
- int number_packets,
- struct cvmx_usb_iso_packet packets[],
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
-{
- int submit_handle;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- /* Pipe handle checking is done later in a common place */
- if (unlikely(start_frame < 0))
- return -EINVAL;
- if (unlikely(flags & ~(CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT | CVMX_USB_ISOCHRONOUS_FLAGS_ASAP)))
- return -EINVAL;
- if (unlikely(number_packets < 1))
- return -EINVAL;
- if (unlikely(!packets))
- return -EINVAL;
- if (unlikely(!buffer))
- return -EINVAL;
- if (unlikely(buffer_length < 0))
- return -EINVAL;
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- flags,
- buffer,
- buffer_length,
- 0, /* control_header */
- start_frame,
- number_packets,
- packets,
- callback,
- user_data);
- return submit_handle;
-}
-
-
-/**
- * Cancel one outstanding request in a pipe. Canceling a request
- * can fail if the transaction has already completed before cancel
- * is called. Even after a successful cancel call, it may take
- * a frame or two for the cvmx_usb_poll() function to call the
- * associated callback.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Pipe handle to cancel requests in.
- * @submit_handle:
- * Handle to transaction to cancel, returned by the submit function.
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_cancel(struct cvmx_usb_state *state, int pipe_handle, int submit_handle)
-{
- struct cvmx_usb_transaction *transaction;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
-
- if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- return -EINVAL;
- if (unlikely((submit_handle < 0) || (submit_handle >= MAX_TRANSACTIONS)))
- return -EINVAL;
-
- /* Fail if the pipe isn't open */
- if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- return -EINVAL;
-
- transaction = usb->transaction + submit_handle;
-
- /* Fail if this transaction already completed */
- if (unlikely((transaction->flags & __CVMX_USB_TRANSACTION_FLAGS_IN_USE) == 0))
- return -EINVAL;
-
- /*
- * If the transaction is the HEAD of the queue and scheduled. We need to
- * treat it special
- */
- if ((pipe->head == transaction) &&
- (pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
- union cvmx_usbcx_hccharx usbc_hcchar;
-
- usb->pipe_for_channel[pipe->channel] = NULL;
- pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- CVMX_SYNCW;
-
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
- /* If the channel isn't enabled then the transaction already completed */
- if (usbc_hcchar.s.chena) {
- usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
- }
- }
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
- return 0;
-}
-
-
-/**
- * Cancel all outstanding requests in a pipe. Logically all this
- * does is call cvmx_usb_cancel() in a loop.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Pipe handle to cancel requests in.
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_cancel_all(struct cvmx_usb_state *state, int pipe_handle)
-{
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
-
- if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- return -EINVAL;
-
- /* Fail if the pipe isn't open */
- if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- return -EINVAL;
-
- /* Simply loop through and attempt to cancel each transaction */
- while (pipe->head) {
- int result = cvmx_usb_cancel(state, pipe_handle,
- __cvmx_usb_get_submit_handle(usb, pipe->head));
- if (unlikely(result != 0))
- return result;
- }
- return 0;
-}
-
-
-/**
- * Close a pipe created with cvmx_usb_open_pipe().
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @pipe_handle:
- * Pipe handle to close.
- *
- * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
- * outstanding transfers.
- */
-int cvmx_usb_close_pipe(struct cvmx_usb_state *state, int pipe_handle)
-{
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
-
- if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- return -EINVAL;
-
- /* Fail if the pipe isn't open */
- if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- return -EINVAL;
-
- /* Fail if the pipe has pending transactions */
- if (unlikely(pipe->head))
- return -EBUSY;
-
- pipe->flags = 0;
- __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
- __cvmx_usb_append_pipe(&usb->free_pipes, pipe);
-
- return 0;
-}
-
-
-/**
- * Register a function to be called when various USB events occur.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- * @reason: Which event to register for.
- * @callback: Function to call when the event occurs.
- * @user_data: User data parameter to the function.
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_register_callback(struct cvmx_usb_state *state,
- enum cvmx_usb_callback reason,
- cvmx_usb_callback_func_t callback,
- void *user_data)
-{
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- if (unlikely(reason >= __CVMX_USB_CALLBACK_END))
- return -EINVAL;
- if (unlikely(!callback))
- return -EINVAL;
-
- usb->callback[reason] = callback;
- usb->callback_data[reason] = user_data;
-
- return 0;
-}
-
-
-/**
- * Get the current USB protocol level frame number. The frame
- * number is always in the range of 0-0x7ff.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: USB frame number
- */
-int cvmx_usb_get_frame_number(struct cvmx_usb_state *state)
-{
- int frame_number;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- union cvmx_usbcx_hfnum usbc_hfnum;
-
- usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- frame_number = usbc_hfnum.s.frnum;
-
- return frame_number;
-}
-
-
-/**
- * Poll a channel for status
- *
- * @usb: USB device
- * @channel: Channel to poll
- *
- * Returns: Zero on success
- */
-static int __cvmx_usb_poll_channel(struct cvmx_usb_internal_state *usb, int channel)
-{
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- union cvmx_usbcx_hccharx usbc_hcchar;
- struct cvmx_usb_pipe *pipe;
- struct cvmx_usb_transaction *transaction;
- int bytes_this_transfer;
- int bytes_in_last_packet;
- int packets_processed;
- int buffer_space_left;
-
- /* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
-
- if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
- /*
- * There seems to be a bug in CN31XX which can cause
- * interrupt IN transfers to get stuck until we do a
- * write of HCCHARX without changing things
- */
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
- return 0;
- }
-
- /*
- * In non DMA mode the channels don't halt themselves. We need
- * to manually disable channels that are left running
- */
- if (!usbc_hcint.s.chhltd) {
- if (usbc_hcchar.s.chena) {
- union cvmx_usbcx_hcintmskx hcintmsk;
- /* Disable all interrupts except CHHLTD */
- hcintmsk.u32 = 0;
- hcintmsk.s.chhltdmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
- usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
- return 0;
- } else if (usbc_hcint.s.xfercompl) {
- /* Successful IN/OUT with transfer complete. Channel halt isn't needed */
- } else {
- cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
- return 0;
- }
- }
- } else {
- /*
- * There is are no interrupts that we need to process when the
- * channel is still running
- */
- if (!usbc_hcint.s.chhltd)
- return 0;
- }
-
- /* Disable the channel interrupts now that it is done */
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- usb->idle_hardware_channels |= (1<<channel);
-
- /* Make sure this channel is tied to a valid pipe */
- pipe = usb->pipe_for_channel[channel];
- CVMX_PREFETCH(pipe, 0);
- CVMX_PREFETCH(pipe, 128);
- if (!pipe)
- return 0;
- transaction = pipe->head;
- CVMX_PREFETCH0(transaction);
-
- /*
- * Disconnect this pipe from the HW channel. Later the schedule
- * function will figure out which pipe needs to go
- */
- usb->pipe_for_channel[channel] = NULL;
- pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /*
- * Read the channel config info so we can figure out how much data
- * transfered
- */
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- /*
- * Calculating the number of bytes successfully transferred is dependent
- * on the transfer direction
- */
- packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
- if (usbc_hcchar.s.epdir) {
- /*
- * IN transactions are easy. For every byte received the
- * hardware decrements xfersize. All we need to do is subtract
- * the current value of xfersize from its starting value and we
- * know how many bytes were written to the buffer
- */
- bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
- } else {
- /*
- * OUT transaction don't decrement xfersize. Instead pktcnt is
- * decremented on every successful packet send. The hardware
- * does this when it receives an ACK, or NYET. If it doesn't
- * receive one of these responses pktcnt doesn't change
- */
- bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
- /*
- * The last packet may not be a full transfer if we didn't have
- * enough data
- */
- if (bytes_this_transfer > transaction->xfersize)
- bytes_this_transfer = transaction->xfersize;
- }
- /* Figure out how many bytes were in the last packet of the transfer */
- if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
- else
- bytes_in_last_packet = bytes_this_transfer;
-
- /*
- * As a special case, setup transactions output the setup header, not
- * the user's data. For this reason we don't count setup data as bytes
- * transferred
- */
- if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
- (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
- bytes_this_transfer = 0;
-
- /*
- * Add the bytes transferred to the running total. It is important that
- * bytes_this_transfer doesn't count any data that needs to be
- * retransmitted
- */
- transaction->actual_bytes += bytes_this_transfer;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
- else
- buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
-
- /*
- * We need to remember the PID toggle state for the next transaction.
- * The hardware already updated it for the next transaction
- */
- pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
-
- /*
- * For high speed bulk out, assume the next transaction will need to do
- * a ping before proceeding. If this isn't true the ACK processing below
- * will clear this flag
- */
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- if (usbc_hcint.s.stall) {
- /*
- * STALL as a response means this transaction cannot be
- * completed because the device can't process transactions. Tell
- * the user. Any data that was transferred will be counted on
- * the actual bytes transferred
- */
- pipe->pid_toggle = 0;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
- } else if (usbc_hcint.s.xacterr) {
- /*
- * We know at least one packet worked if we get a ACK or NAK.
- * Reset the retry counter
- */
- if (usbc_hcint.s.nak || usbc_hcint.s.ack)
- transaction->retries = 0;
- transaction->retries++;
- if (transaction->retries > MAX_RETRIES) {
- /*
- * XactErr as a response means the device signaled
- * something wrong with the transfer. For example, PID
- * toggle errors cause these
- */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
- } else {
- /*
- * If this was a split then clear our split in progress
- * marker
- */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /*
- * Rewind to the beginning of the transaction by anding
- * off the split complete bit
- */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
- }
- } else if (usbc_hcint.s.bblerr) {
- /* Babble Error (BblErr) */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
- } else if (usbc_hcint.s.datatglerr) {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- } else if (usbc_hcint.s.nyet) {
- /*
- * NYET as a response is only allowed in three cases: as a
- * response to a ping, as a response to a split transaction, and
- * as a response to a bulk out. The ping case is handled by
- * hardware, so we only have splits and bulk out
- */
- if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->retries = 0;
- /*
- * If there is more data to go then we need to try
- * again. Otherwise this transaction is complete
- */
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- } else {
- /*
- * Split transactions retry the split complete 4 times
- * then rewind to the start split and do the entire
- * transactions again
- */
- transaction->retries++;
- if ((transaction->retries & 0x3) == 0) {
- /*
- * Rewind to the beginning of the transaction by
- * anding off the split complete bit
- */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- }
- }
- } else if (usbc_hcint.s.ack) {
- transaction->retries = 0;
- /*
- * The ACK bit can only be checked after the other error bits.
- * This is because a multi packet transfer may succeed in a
- * number of packets and then get a different response on the
- * last packet. In this case both ACK and the last response bit
- * will be set. If none of the other response bits is set, then
- * the last packet must have been an ACK
- *
- * Since we got an ACK, we know we don't need to do a ping on
- * this pipe
- */
- pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- /* This should be impossible */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
- break;
- case CVMX_USB_STAGE_SETUP:
- pipe->pid_toggle = 1;
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
- else {
- union cvmx_usb_control_header *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->s.length)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- {
- union cvmx_usb_control_header *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->s.length)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA:
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
- /*
- * For setup OUT data that are splits,
- * the hardware doesn't appear to count
- * transferred data. Here we manually
- * update the data transferred
- */
- if (!usbc_hcchar.s.epdir) {
- if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes += buffer_space_left;
- else
- transaction->actual_bytes += pipe->max_packet;
- }
- } else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- } else {
- transaction->stage = CVMX_USB_STAGE_DATA;
- }
- break;
- case CVMX_USB_STAGE_STATUS:
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
- else
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- break;
- }
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- /*
- * The only time a bulk transfer isn't complete when it
- * finishes with an ACK is during a split transaction.
- * For splits we need to continue the transfer if more
- * data is needed
- */
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- else {
- if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- else {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- } else {
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (usbc_hcint.s.nak))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISOCHRONOUS OUT splits don't require a
- * complete split stage. Instead they use a
- * sequence of begin OUT splits to transfer the
- * data 188 bytes at a time. Once the transfer
- * is complete, the pipe sleeps until the next
- * schedule interval
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- /*
- * If no space left or this wasn't a max
- * size packet then this transfer is
- * complete. Otherwise start it again to
- * send the next 188 bytes
- */
- if (!buffer_space_left || (bytes_this_transfer < 188)) {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- } else {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
- /*
- * We are in the incoming data
- * phase. Keep getting data
- * until we run out of space or
- * get a small packet
- */
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- } else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- }
- } else {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- break;
- }
- } else if (usbc_hcint.s.nak) {
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /*
- * NAK as a response means the device couldn't accept the
- * transaction, but it should be retried in the future. Rewind
- * to the beginning of the transaction by anding off the split
- * complete bit. Retry in the next interval
- */
- transaction->retries = 0;
- transaction->stage &= ~1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
- } else {
- struct cvmx_usb_port_status port;
- port = cvmx_usb_get_status((struct cvmx_usb_state *)usb);
- if (port.port_enabled) {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- } else {
- /*
- * We get channel halted interrupts with no result bits
- * sets when the cable is unplugged
- */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
- }
- }
- return 0;
-}
-
-
-/**
- * Poll the USB block for status and call all needed callback
- * handlers. This function is meant to be called in the interrupt
- * handler for the USB controller. It can also be called
- * periodically in a loop for non-interrupt based operation.
- *
- * @state: USB device state populated by
- * cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-int cvmx_usb_poll(struct cvmx_usb_state *state)
-{
- union cvmx_usbcx_hfnum usbc_hfnum;
- union cvmx_usbcx_gintsts usbc_gintsts;
- struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
-
- CVMX_PREFETCH(usb, 0);
- CVMX_PREFETCH(usb, 1*128);
- CVMX_PREFETCH(usb, 2*128);
- CVMX_PREFETCH(usb, 3*128);
- CVMX_PREFETCH(usb, 4*128);
-
- /* Update the frame counter */
- usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
- usb->frame_number += 0x4000;
- usb->frame_number &= ~0x3fffull;
- usb->frame_number |= usbc_hfnum.s.frnum;
-
- /* Read the pending interrupts */
- usbc_gintsts.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
-
- /* Clear the interrupts now that we know about them */
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
-
- if (usbc_gintsts.s.rxflvl) {
- /*
- * RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be
- * read from the RxFIFO.
- *
- * In DMA mode this is handled by hardware
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- __cvmx_usb_poll_rx_fifo(usb);
- }
- if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
- /* Fill the Tx FIFOs when not in DMA mode */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- __cvmx_usb_poll_tx_fifo(usb);
- }
- if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
- union cvmx_usbcx_hprt usbc_hprt;
- /*
- * Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- *
- * Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application
- * must read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- *
- * Call the user's port callback
- */
- __cvmx_usb_perform_callback(usb, NULL, NULL,
- CVMX_USB_CALLBACK_PORT_CHANGED,
- CVMX_USB_COMPLETE_SUCCESS);
- /* Clear the port change bits */
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbc_hprt.s.prtena = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
- }
- if (usbc_gintsts.s.hchint) {
- /*
- * Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is
- * pending on one of the channels of the core (in Host mode).
- * The application must read the Host All Channels Interrupt
- * (HAINT) register to determine the exact number of the channel
- * on which the interrupt occurred, and then read the
- * corresponding Host Channel-n Interrupt (HCINTn) register to
- * determine the exact cause of the interrupt. The application
- * must clear the appropriate status bit in the HCINTn register
- * to clear this bit.
- */
- union cvmx_usbcx_haint usbc_haint;
- usbc_haint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINT(usb->index));
- while (usbc_haint.u32) {
- int channel;
- CVMX_CLZ(channel, usbc_haint.u32);
- channel = 31 - channel;
- __cvmx_usb_poll_channel(usb, channel);
- usbc_haint.u32 ^= 1<<channel;
- }
- }
-
- __cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
-
- return 0;
-}
diff --git a/drivers/staging/octeon-usb/cvmx-usb.h b/drivers/staging/octeon-usb/cvmx-usb.h
deleted file mode 100644
index 8bf36966ef1..00000000000
--- a/drivers/staging/octeon-usb/cvmx-usb.h
+++ /dev/null
@@ -1,542 +0,0 @@
-/***********************license start***************
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
-
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
-
- * This Software, including technical data, may be subject to U.S. export control
- * laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
-
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
- * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- ***********************license end**************************************/
-
-
-/**
- * "cvmx-usb.h" defines a set of low level USB functions to help
- * developers create Octeon USB drivers for various operating
- * systems. These functions provide a generic API to the Octeon
- * USB blocks, hiding the internal hardware specific
- * operations.
- *
- * At a high level the device driver needs to:
- *
- * - Call cvmx_usb_get_num_ports() to get the number of
- * supported ports.
- * - Call cvmx_usb_initialize() for each Octeon USB port.
- * - Enable the port using cvmx_usb_enable().
- * - Either periodically, or in an interrupt handler, call
- * cvmx_usb_poll() to service USB events.
- * - Manage pipes using cvmx_usb_open_pipe() and
- * cvmx_usb_close_pipe().
- * - Manage transfers using cvmx_usb_submit_*() and
- * cvmx_usb_cancel*().
- * - Shutdown USB on unload using cvmx_usb_shutdown().
- *
- * To monitor USB status changes, the device driver must use
- * cvmx_usb_register_callback() to register for events that it
- * is interested in. Below are a few hints on successfully
- * implementing a driver on top of this API.
- *
- * == Initialization ==
- *
- * When a driver is first loaded, it is normally not necessary
- * to bring up the USB port completely. Most operating systems
- * expect to initialize and enable the port in two independent
- * steps. Normally an operating system will probe hardware,
- * initialize anything found, and then enable the hardware.
- *
- * In the probe phase you should:
- * - Use cvmx_usb_get_num_ports() to determine the number of
- * USB port to be supported.
- * - Allocate space for a struct cvmx_usb_state for each
- * port.
- * - Tell the operating system about each port
- *
- * In the initialization phase you should:
- * - Use cvmx_usb_initialize() on each port.
- * - Do not call cvmx_usb_enable(). This leaves the USB port in
- * the disabled state until the operating system is ready.
- *
- * Finally, in the enable phase you should:
- * - Call cvmx_usb_enable() on the appropriate port.
- * - Note that some operating system use a RESET instead of an
- * enable call. To implement RESET, you should call
- * cvmx_usb_disable() followed by cvmx_usb_enable().
- *
- * == Locking ==
- *
- * All of the functions in the cvmx-usb API assume exclusive
- * access to the USB hardware and internal data structures. This
- * means that the driver must provide locking as necessary.
- *
- * In the single CPU state it is normally enough to disable
- * interrupts before every call to cvmx_usb*() and enable them
- * again after the call is complete. Keep in mind that it is
- * very common for the callback handlers to make additional
- * calls into cvmx-usb, so the disable/enable must be protected
- * against recursion. As an example, the Linux kernel
- * local_irq_save() and local_irq_restore() are perfect for this
- * in the non SMP case.
- *
- * In the SMP case, locking is more complicated. For SMP you not
- * only need to disable interrupts on the local core, but also
- * take a lock to make sure that another core cannot call
- * cvmx-usb.
- *
- * == Port callback ==
- *
- * The port callback prototype needs to look as follows:
- *
- * void port_callback(struct cvmx_usb_state *usb,
- * enum cvmx_usb_callback reason,
- * enum cvmx_usb_complete status,
- * int pipe_handle,
- * int submit_handle,
- * int bytes_transferred,
- * void *user_data);
- * - "usb" is the struct cvmx_usb_state for the port.
- * - "reason" will always be CVMX_USB_CALLBACK_PORT_CHANGED.
- * - "status" will always be CVMX_USB_COMPLETE_SUCCESS.
- * - "pipe_handle" will always be -1.
- * - "submit_handle" will always be -1.
- * - "bytes_transferred" will always be 0.
- * - "user_data" is the void pointer originally passed along
- * with the callback. Use this for any state information you
- * need.
- *
- * The port callback will be called whenever the user plugs /
- * unplugs a device from the port. It will not be called when a
- * device is plugged / unplugged from a hub connected to the
- * root port. Normally all the callback needs to do is tell the
- * operating system to poll the root hub for status. Under
- * Linux, this is performed by calling usb_hcd_poll_rh_status().
- * In the Linux driver we use "user_data". to pass around the
- * Linux "hcd" structure. Once the port callback completes,
- * Linux automatically calls octeon_usb_hub_status_data() which
- * uses cvmx_usb_get_status() to determine the root port status.
- *
- * == Complete callback ==
- *
- * The completion callback prototype needs to look as follows:
- *
- * void complete_callback(struct cvmx_usb_state *usb,
- * enum cvmx_usb_callback reason,
- * enum cvmx_usb_complete status,
- * int pipe_handle,
- * int submit_handle,
- * int bytes_transferred,
- * void *user_data);
- * - "usb" is the struct cvmx_usb_state for the port.
- * - "reason" will always be CVMX_USB_CALLBACK_TRANSFER_COMPLETE.
- * - "status" will be one of the cvmx_usb_complete enumerations.
- * - "pipe_handle" is the handle to the pipe the transaction
- * was originally submitted on.
- * - "submit_handle" is the handle returned by the original
- * cvmx_usb_submit_* call.
- * - "bytes_transferred" is the number of bytes successfully
- * transferred in the transaction. This will be zero on most
- * error conditions.
- * - "user_data" is the void pointer originally passed along
- * with the callback. Use this for any state information you
- * need. For example, the Linux "urb" is stored in here in the
- * Linux driver.
- *
- * In general your callback handler should use "status" and
- * "bytes_transferred" to tell the operating system the how the
- * transaction completed. Normally the pipe is not changed in
- * this callback.
- *
- * == Canceling transactions ==
- *
- * When a transaction is cancelled using cvmx_usb_cancel*(), the
- * actual length of time until the complete callback is called
- * can vary greatly. It may be called before cvmx_usb_cancel*()
- * returns, or it may be called a number of usb frames in the
- * future once the hardware frees the transaction. In either of
- * these cases, the complete handler will receive
- * CVMX_USB_COMPLETE_CANCEL.
- *
- * == Handling pipes ==
- *
- * USB "pipes" is a software construct created by this API to
- * enable the ordering of usb transactions to a device endpoint.
- * Octeon's underlying hardware doesn't have any concept
- * equivalent to "pipes". The hardware instead has eight
- * channels that can be used simultaneously to have up to eight
- * transaction in process at the same time. In order to maintain
- * ordering in a pipe, the transactions for a pipe will only be
- * active in one hardware channel at a time. From an API user's
- * perspective, this doesn't matter but it can be helpful to
- * keep this in mind when you are probing hardware while
- * debugging.
- *
- * Also keep in mind that usb transactions contain state
- * information about the previous transaction to the same
- * endpoint. Each transaction has a PID toggle that changes 0/1
- * between each sub packet. This is maintained in the pipe data
- * structures. For this reason, you generally cannot create and
- * destroy a pipe for every transaction. A sequence of
- * transaction to the same endpoint must use the same pipe.
- *
- * == Root Hub ==
- *
- * Some operating systems view the usb root port as a normal usb
- * hub. These systems attempt to control the root hub with
- * messages similar to the usb 2.0 spec for hub control and
- * status. For these systems it may be necessary to write
- * function to decode standard usb control messages into
- * equivalent cvmx-usb API calls.
- *
- * == Interrupts ==
- *
- * If you plan on using usb interrupts, cvmx_usb_poll() must be
- * called on every usb interrupt. It will read the usb state,
- * call any needed callbacks, and schedule transactions as
- * needed. Your device driver needs only to hookup an interrupt
- * handler and call cvmx_usb_poll(). Octeon's usb port 0 causes
- * CIU bit CIU_INT*_SUM0[USB] to be set (bit 56). For port 1,
- * CIU bit CIU_INT_SUM1[USB1] is set (bit 17). How these bits
- * are turned into interrupt numbers is operating system
- * specific. For Linux, there are the convenient defines
- * OCTEON_IRQ_USB0 and OCTEON_IRQ_USB1 for the IRQ numbers.
- *
- * If you aren't using interrupts, simple call cvmx_usb_poll()
- * in your main processing loop.
- */
-
-#ifndef __CVMX_USB_H__
-#define __CVMX_USB_H__
-
-/**
- * enum cvmx_usb_speed - the possible USB device speeds
- *
- * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
- * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
- * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
- */
-enum cvmx_usb_speed {
- CVMX_USB_SPEED_HIGH = 0,
- CVMX_USB_SPEED_FULL = 1,
- CVMX_USB_SPEED_LOW = 2,
-};
-
-/**
- * enum cvmx_usb_transfer - the possible USB transfer types
- *
- * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
- * transfers
- * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
- * priority periodic transfers
- * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
- * transfers
- * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
- * periodic transfers
- */
-enum cvmx_usb_transfer {
- CVMX_USB_TRANSFER_CONTROL = 0,
- CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
- CVMX_USB_TRANSFER_BULK = 2,
- CVMX_USB_TRANSFER_INTERRUPT = 3,
-};
-
-/**
- * enum cvmx_usb_direction - the transfer directions
- *
- * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
- * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
- */
-enum cvmx_usb_direction {
- CVMX_USB_DIRECTION_OUT,
- CVMX_USB_DIRECTION_IN,
-};
-
-/**
- * enum cvmx_usb_complete - possible callback function status codes
- *
- * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without
- * any errors
- * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented
- * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight by
- * a user call to cvmx_usb_cancel
- * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected
- * error status
- * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response
- * from the device
- * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the
- * device even after a number of retries
- * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
- * error even after a number of retries
- * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error
- * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error
- * even after a number of retries
- */
-enum cvmx_usb_complete {
- CVMX_USB_COMPLETE_SUCCESS,
- CVMX_USB_COMPLETE_SHORT,
- CVMX_USB_COMPLETE_CANCEL,
- CVMX_USB_COMPLETE_ERROR,
- CVMX_USB_COMPLETE_STALL,
- CVMX_USB_COMPLETE_XACTERR,
- CVMX_USB_COMPLETE_DATATGLERR,
- CVMX_USB_COMPLETE_BABBLEERR,
- CVMX_USB_COMPLETE_FRAMEERR,
-};
-
-/**
- * struct cvmx_usb_port_status - the USB port status information
- *
- * @port_enabled: 1 = Usb port is enabled, 0 = disabled
- * @port_over_current: 1 = Over current detected, 0 = Over current not
- * detected. Octeon doesn't support over current detection.
- * @port_powered: 1 = Port power is being supplied to the device, 0 =
- * power is off. Octeon doesn't support turning port power
- * off.
- * @port_speed: Current port speed.
- * @connected: 1 = A device is connected to the port, 0 = No device is
- * connected.
- * @connect_change: 1 = Device connected state changed since the last set
- * status call.
- */
-struct cvmx_usb_port_status {
- uint32_t reserved : 25;
- uint32_t port_enabled : 1;
- uint32_t port_over_current : 1;
- uint32_t port_powered : 1;
- enum cvmx_usb_speed port_speed : 2;
- uint32_t connected : 1;
- uint32_t connect_change : 1;
-};
-
-/**
- * union cvmx_usb_control_header - the structure of a Control packet header
- *
- * @s.request_type: Bit 7 tells the direction: 1=IN, 0=OUT
- * @s.request The standard usb request to make
- * @s.value Value parameter for the request in little endian format
- * @s.index Index for the request in little endian format
- * @s.length Length of the data associated with this request in
- * little endian format
- */
-union cvmx_usb_control_header {
- uint64_t u64;
- struct {
- uint64_t request_type : 8;
- uint64_t request : 8;
- uint64_t value : 16;
- uint64_t index : 16;
- uint64_t length : 16;
- } s;
-};
-
-/**
- * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
- *
- * @offset: This is the offset in bytes into the main buffer where this data
- * is stored.
- * @length: This is the length in bytes of the data.
- * @status: This is the status of this individual packet transfer.
- */
-struct cvmx_usb_iso_packet {
- int offset;
- int length;
- enum cvmx_usb_complete status;
-};
-
-/**
- * enum cvmx_usb_callback - possible callback reasons for the USB API
- *
- * @CVMX_USB_CALLBACK_TRANSFER_COMPLETE: A callback of this type is called when
- * a submitted transfer completes. The
- * completion callback will be called even
- * if the transfer fails or is canceled.
- * The status parameter will contain
- * details of why he callback was called.
- * @CVMX_USB_CALLBACK_PORT_CHANGED: The status of the port changed. For
- * example, someone may have plugged a
- * device in. The status parameter
- * contains CVMX_USB_COMPLETE_SUCCESS. Use
- * cvmx_usb_get_status() to get the new
- * port status.
- * @__CVMX_USB_CALLBACK_END: Do not use. Used internally for array
- * bounds.
- */
-enum cvmx_usb_callback {
- CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
- CVMX_USB_CALLBACK_PORT_CHANGED,
- __CVMX_USB_CALLBACK_END
-};
-
-/**
- * USB state internal data. The contents of this structure
- * may change in future SDKs. No data in it should be referenced
- * by user's of this API.
- */
-struct cvmx_usb_state {
- char data[65536];
-};
-
-/**
- * USB callback functions are always of the following type.
- * The parameters are as follows:
- * - state = USB device state populated by
- * cvmx_usb_initialize().
- * - reason = The enum cvmx_usb_callback used to register
- * the callback.
- * - status = The enum cvmx_usb_complete representing the
- * status code of a transaction.
- * - pipe_handle = The Pipe that caused this callback, or
- * -1 if this callback wasn't associated with a pipe.
- * - submit_handle = Transfer submit handle causing this
- * callback, or -1 if this callback wasn't associated
- * with a transfer.
- * - Actual number of bytes transfer.
- * - user_data = The user pointer supplied to the
- * function cvmx_usb_submit() or
- * cvmx_usb_register_callback() */
-typedef void (*cvmx_usb_callback_func_t)(struct cvmx_usb_state *state,
- enum cvmx_usb_callback reason,
- enum cvmx_usb_complete status,
- int pipe_handle, int submit_handle,
- int bytes_transferred, void *user_data);
-
-/**
- * enum cvmx_usb_initialize_flags - flags to pass the initialization function
- *
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
- * as clock source at USB_XO and
- * USB_XI.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
- * board clock source at USB_XO.
- * USB_XI should be tied to GND.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO: Automatically determine clock type
- * based on function in
- * cvmx-helper-board.c.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
- * crystal
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
- * data transfer use for the USB
- */
-enum cvmx_usb_initialize_flags {
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO = 0,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
- /* Bits 3-4 used to encode the clock frequency */
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
-};
-
-/**
- * enum cvmx_usb_pipe_flags - flags for passing when a pipe is created.
- * Currently no flags need to be passed.
- *
- * @__CVMX_USB_PIPE_FLAGS_OPEN: Used internally to determine if a pipe is
- * open. Do not use.
- * @__CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
- * actively using hardware. Do not use.
- * @__CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high
- * speed pipe is in the ping state. Do not
- * use.
- */
-enum cvmx_usb_pipe_flags {
- __CVMX_USB_PIPE_FLAGS_OPEN = 1 << 16,
- __CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
- __CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
-};
-
-extern int cvmx_usb_get_num_ports(void);
-extern int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
- enum cvmx_usb_initialize_flags flags);
-extern int cvmx_usb_shutdown(struct cvmx_usb_state *state);
-extern int cvmx_usb_enable(struct cvmx_usb_state *state);
-extern int cvmx_usb_disable(struct cvmx_usb_state *state);
-extern struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *state);
-extern void cvmx_usb_set_status(struct cvmx_usb_state *state, struct cvmx_usb_port_status port_status);
-extern int cvmx_usb_open_pipe(struct cvmx_usb_state *state,
- enum cvmx_usb_pipe_flags flags,
- int device_addr, int endpoint_num,
- enum cvmx_usb_speed device_speed, int max_packet,
- enum cvmx_usb_transfer transfer_type,
- enum cvmx_usb_direction transfer_dir, int interval,
- int multi_count, int hub_device_addr,
- int hub_port);
-extern int cvmx_usb_submit_bulk(struct cvmx_usb_state *state, int pipe_handle,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data);
-extern int cvmx_usb_submit_interrupt(struct cvmx_usb_state *state, int pipe_handle,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data);
-extern int cvmx_usb_submit_control(struct cvmx_usb_state *state, int pipe_handle,
- uint64_t control_header,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data);
-
-/**
- * enum cvmx_usb_isochronous_flags - flags to pass the
- * cvmx_usb_submit_isochronous() function.
- *
- * @CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT: Do not return an error if a transfer
- * is less than the maximum packet size
- * of the device.
- * @CVMX_USB_ISOCHRONOUS_FLAGS_ASAP: Schedule the transaction as soon as
- * possible.
- */
-enum cvmx_usb_isochronous_flags {
- CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT = 1 << 0,
- CVMX_USB_ISOCHRONOUS_FLAGS_ASAP = 1 << 1,
-};
-
-extern int cvmx_usb_submit_isochronous(struct cvmx_usb_state *state, int pipe_handle,
- int start_frame, int flags,
- int number_packets,
- struct cvmx_usb_iso_packet packets[],
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data);
-extern int cvmx_usb_cancel(struct cvmx_usb_state *state, int pipe_handle,
- int submit_handle);
-extern int cvmx_usb_cancel_all(struct cvmx_usb_state *state, int pipe_handle);
-extern int cvmx_usb_close_pipe(struct cvmx_usb_state *state, int pipe_handle);
-extern int cvmx_usb_register_callback(struct cvmx_usb_state *state,
- enum cvmx_usb_callback reason,
- cvmx_usb_callback_func_t callback,
- void *user_data);
-extern int cvmx_usb_get_frame_number(struct cvmx_usb_state *state);
-extern int cvmx_usb_poll(struct cvmx_usb_state *state);
-
-#endif /* __CVMX_USB_H__ */
diff --git a/drivers/staging/octeon-usb/cvmx-usbnx-defs.h b/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
deleted file mode 100644
index e06aafa5726..00000000000
--- a/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
+++ /dev/null
@@ -1,885 +0,0 @@
-/***********************license start***************
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
-
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
-
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
-
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- ***********************license end**************************************/
-
-
-/**
- * cvmx-usbnx-defs.h
- *
- * Configuration and status register (CSR) type definitions for
- * Octeon usbnx.
- *
- */
-#ifndef __CVMX_USBNX_TYPEDEFS_H__
-#define __CVMX_USBNX_TYPEDEFS_H__
-
-#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
-#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
-
-#define CVMX_USBNXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
-#define CVMX_USBNXREG2(reg, bid) \
- (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
-
-#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
-#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
-#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
-#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
-
-/**
- * cvmx_usbn#_clk_ctl
- *
- * USBN_CLK_CTL = USBN's Clock Control
- *
- * This register is used to control the frequency of the hclk and the
- * hreset and phy_rst signals.
- */
-union cvmx_usbnx_clk_ctl {
- uint64_t u64;
- /**
- * struct cvmx_usbnx_clk_ctl_s
- * @divide2: The 'hclk' used by the USB subsystem is derived
- * from the eclk.
- * Also see the field DIVIDE. DIVIDE2<1> must currently
- * be zero because it is not implemented, so the maximum
- * ratio of eclk/hclk is currently 16.
- * The actual divide number for hclk is:
- * (DIVIDE2 + 1) * (DIVIDE + 1)
- * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
- * generate the hclk in the USB Subsystem is held
- * in reset. This bit must be set to '0' before
- * changing the value os DIVIDE in this register.
- * The reset to the HCLK_DIVIDERis also asserted
- * when core reset is asserted.
- * @p_x_on: Force USB-PHY on during suspend.
- * '1' USB-PHY XO block is powered-down during
- * suspend.
- * '0' USB-PHY XO block is powered-up during
- * suspend.
- * The value of this field must be set while POR is
- * active.
- * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
- * remain powered in Suspend Mode.
- * '1' The USB-PHY XO Bias, Bandgap and PLL are
- * powered down in suspend mode.
- * The value of this field must be set while POR is
- * active.
- * @p_c_sel: Phy clock speed select.
- * Selects the reference clock / crystal frequency.
- * '11': Reserved
- * '10': 48 MHz (reserved when a crystal is used)
- * '01': 24 MHz (reserved when a crystal is used)
- * '00': 12 MHz
- * The value of this field must be set while POR is
- * active.
- * NOTE: if a crystal is used as a reference clock,
- * this field must be set to 12 MHz.
- * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
- * @sd_mode: Scaledown mode for the USBC. Control timing events
- * in the USBC, for normal operation this must be '0'.
- * @s_bist: Starts bist on the hclk memories, during the '0'
- * to '1' transition.
- * @por: Power On Reset for the PHY.
- * Resets all the PHYS registers and state machines.
- * @enable: When '1' allows the generation of the hclk. When
- * '0' the hclk will not be generated. SEE DIVIDE
- * field of this register.
- * @prst: When this field is '0' the reset associated with
- * the phy_clk functionality in the USB Subsystem is
- * help in reset. This bit should not be set to '1'
- * until the time it takes 6 clocks (hclk or phy_clk,
- * whichever is slower) has passed. Under normal
- * operation once this bit is set to '1' it should not
- * be set to '0'.
- * @hrst: When this field is '0' the reset associated with
- * the hclk functioanlity in the USB Subsystem is
- * held in reset.This bit should not be set to '1'
- * until 12ms after phy_clk is stable. Under normal
- * operation, once this bit is set to '1' it should
- * not be set to '0'.
- * @divide: The frequency of 'hclk' used by the USB subsystem
- * is the eclk frequency divided by the value of
- * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
- * DIVIDE2 of this register.
- * The hclk frequency should be less than 125Mhz.
- * After writing a value to this field the SW should
- * read the field for the value written.
- * The ENABLE field of this register should not be set
- * until AFTER this field is set and then read.
- */
- struct cvmx_usbnx_clk_ctl_s {
- uint64_t reserved_20_63 : 44;
- uint64_t divide2 : 2;
- uint64_t hclk_rst : 1;
- uint64_t p_x_on : 1;
- uint64_t reserved_14_15 : 2;
- uint64_t p_com_on : 1;
- uint64_t p_c_sel : 2;
- uint64_t cdiv_byp : 1;
- uint64_t sd_mode : 2;
- uint64_t s_bist : 1;
- uint64_t por : 1;
- uint64_t enable : 1;
- uint64_t prst : 1;
- uint64_t hrst : 1;
- uint64_t divide : 3;
- } s;
- /**
- * struct cvmx_usbnx_clk_ctl_cn30xx
- * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
- * generate the hclk in the USB Subsystem is held
- * in reset. This bit must be set to '0' before
- * changing the value os DIVIDE in this register.
- * The reset to the HCLK_DIVIDERis also asserted
- * when core reset is asserted.
- * @p_x_on: Force USB-PHY on during suspend.
- * '1' USB-PHY XO block is powered-down during
- * suspend.
- * '0' USB-PHY XO block is powered-up during
- * suspend.
- * The value of this field must be set while POR is
- * active.
- * @p_rclk: Phy refrence clock enable.
- * '1' The PHY PLL uses the XO block output as a
- * reference.
- * '0' Reserved.
- * @p_xenbn: Phy external clock enable.
- * '1' The XO block uses the clock from a crystal.
- * '0' The XO block uses an external clock supplied
- * on the XO pin. USB_XI should be tied to
- * ground for this usage.
- * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
- * remain powered in Suspend Mode.
- * '1' The USB-PHY XO Bias, Bandgap and PLL are
- * powered down in suspend mode.
- * The value of this field must be set while POR is
- * active.
- * @p_c_sel: Phy clock speed select.
- * Selects the reference clock / crystal frequency.
- * '11': Reserved
- * '10': 48 MHz
- * '01': 24 MHz
- * '00': 12 MHz
- * The value of this field must be set while POR is
- * active.
- * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
- * @sd_mode: Scaledown mode for the USBC. Control timing events
- * in the USBC, for normal operation this must be '0'.
- * @s_bist: Starts bist on the hclk memories, during the '0'
- * to '1' transition.
- * @por: Power On Reset for the PHY.
- * Resets all the PHYS registers and state machines.
- * @enable: When '1' allows the generation of the hclk. When
- * '0' the hclk will not be generated.
- * @prst: When this field is '0' the reset associated with
- * the phy_clk functionality in the USB Subsystem is
- * help in reset. This bit should not be set to '1'
- * until the time it takes 6 clocks (hclk or phy_clk,
- * whichever is slower) has passed. Under normal
- * operation once this bit is set to '1' it should not
- * be set to '0'.
- * @hrst: When this field is '0' the reset associated with
- * the hclk functioanlity in the USB Subsystem is
- * held in reset.This bit should not be set to '1'
- * until 12ms after phy_clk is stable. Under normal
- * operation, once this bit is set to '1' it should
- * not be set to '0'.
- * @divide: The 'hclk' used by the USB subsystem is derived
- * from the eclk. The eclk will be divided by the
- * value of this field +1 to determine the hclk
- * frequency. (Also see HRST of this register).
- * The hclk frequency must be less than 125 MHz.
- */
- struct cvmx_usbnx_clk_ctl_cn30xx {
- uint64_t reserved_18_63 : 46;
- uint64_t hclk_rst : 1;
- uint64_t p_x_on : 1;
- uint64_t p_rclk : 1;
- uint64_t p_xenbn : 1;
- uint64_t p_com_on : 1;
- uint64_t p_c_sel : 2;
- uint64_t cdiv_byp : 1;
- uint64_t sd_mode : 2;
- uint64_t s_bist : 1;
- uint64_t por : 1;
- uint64_t enable : 1;
- uint64_t prst : 1;
- uint64_t hrst : 1;
- uint64_t divide : 3;
- } cn30xx;
- struct cvmx_usbnx_clk_ctl_cn30xx cn31xx;
- /**
- * struct cvmx_usbnx_clk_ctl_cn50xx
- * @divide2: The 'hclk' used by the USB subsystem is derived
- * from the eclk.
- * Also see the field DIVIDE. DIVIDE2<1> must currently
- * be zero because it is not implemented, so the maximum
- * ratio of eclk/hclk is currently 16.
- * The actual divide number for hclk is:
- * (DIVIDE2 + 1) * (DIVIDE + 1)
- * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
- * generate the hclk in the USB Subsystem is held
- * in reset. This bit must be set to '0' before
- * changing the value os DIVIDE in this register.
- * The reset to the HCLK_DIVIDERis also asserted
- * when core reset is asserted.
- * @p_rtype: PHY reference clock type
- * '0' The USB-PHY uses a 12MHz crystal as a clock
- * source at the USB_XO and USB_XI pins
- * '1' Reserved
- * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock
- * at the USB_XO pin. USB_XI should be tied to
- * ground in this case.
- * '3' Reserved
- * (bit 14 was P_XENBN on 3xxx)
- * (bit 15 was P_RCLK on 3xxx)
- * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
- * remain powered in Suspend Mode.
- * '1' The USB-PHY XO Bias, Bandgap and PLL are
- * powered down in suspend mode.
- * The value of this field must be set while POR is
- * active.
- * @p_c_sel: Phy clock speed select.
- * Selects the reference clock / crystal frequency.
- * '11': Reserved
- * '10': 48 MHz (reserved when a crystal is used)
- * '01': 24 MHz (reserved when a crystal is used)
- * '00': 12 MHz
- * The value of this field must be set while POR is
- * active.
- * NOTE: if a crystal is used as a reference clock,
- * this field must be set to 12 MHz.
- * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
- * @sd_mode: Scaledown mode for the USBC. Control timing events
- * in the USBC, for normal operation this must be '0'.
- * @s_bist: Starts bist on the hclk memories, during the '0'
- * to '1' transition.
- * @por: Power On Reset for the PHY.
- * Resets all the PHYS registers and state machines.
- * @enable: When '1' allows the generation of the hclk. When
- * '0' the hclk will not be generated. SEE DIVIDE
- * field of this register.
- * @prst: When this field is '0' the reset associated with
- * the phy_clk functionality in the USB Subsystem is
- * help in reset. This bit should not be set to '1'
- * until the time it takes 6 clocks (hclk or phy_clk,
- * whichever is slower) has passed. Under normal
- * operation once this bit is set to '1' it should not
- * be set to '0'.
- * @hrst: When this field is '0' the reset associated with
- * the hclk functioanlity in the USB Subsystem is
- * held in reset.This bit should not be set to '1'
- * until 12ms after phy_clk is stable. Under normal
- * operation, once this bit is set to '1' it should
- * not be set to '0'.
- * @divide: The frequency of 'hclk' used by the USB subsystem
- * is the eclk frequency divided by the value of
- * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
- * DIVIDE2 of this register.
- * The hclk frequency should be less than 125Mhz.
- * After writing a value to this field the SW should
- * read the field for the value written.
- * The ENABLE field of this register should not be set
- * until AFTER this field is set and then read.
- */
- struct cvmx_usbnx_clk_ctl_cn50xx {
- uint64_t reserved_20_63 : 44;
- uint64_t divide2 : 2;
- uint64_t hclk_rst : 1;
- uint64_t reserved_16_16 : 1;
- uint64_t p_rtype : 2;
- uint64_t p_com_on : 1;
- uint64_t p_c_sel : 2;
- uint64_t cdiv_byp : 1;
- uint64_t sd_mode : 2;
- uint64_t s_bist : 1;
- uint64_t por : 1;
- uint64_t enable : 1;
- uint64_t prst : 1;
- uint64_t hrst : 1;
- uint64_t divide : 3;
- } cn50xx;
- struct cvmx_usbnx_clk_ctl_cn50xx cn52xx;
- struct cvmx_usbnx_clk_ctl_cn50xx cn56xx;
-};
-
-/**
- * cvmx_usbn#_usbp_ctl_status
- *
- * USBN_USBP_CTL_STATUS = USBP Control And Status Register
- *
- * Contains general control and status information for the USBN block.
- */
-union cvmx_usbnx_usbp_ctl_status {
- uint64_t u64;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_s
- * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
- * @txvreftune: HS DC Voltage Level Adjustment
- * @txfslstune: FS/LS Source Impedence Adjustment
- * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
- * @sqrxtune: Squelch Threshold Adjustment
- * @compdistune: Disconnect Threshold Adjustment
- * @otgtune: VBUS Valid Threshold Adjustment
- * @otgdisable: OTG Block Disable
- * @portreset: Per_Port Reset
- * @drvvbus: Drive VBUS
- * @lsbist: Low-Speed BIST Enable.
- * @fsbist: Full-Speed BIST Enable.
- * @hsbist: High-Speed BIST Enable.
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internaly generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
- * Normally should be set to zero.
- * When customers have no intent to use USB PHY
- * interface, they should:
- * - still provide 3.3V to USB_VDD33, and
- * - tie USB_REXT to 3.3V supply, and
- * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
- * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tuning: Transmitter Tuning for High-Speed Operation.
- * Tunes the current supply and rise/fall output
- * times for high-speed operation.
- * [20:19] == 11: Current supply increased
- * approximately 9%
- * [20:19] == 10: Current supply increased
- * approximately 4.5%
- * [20:19] == 01: Design default.
- * [20:19] == 00: Current supply decreased
- * approximately 4.5%
- * [22:21] == 11: Rise and fall times are increased.
- * [22:21] == 10: Design default.
- * [22:21] == 01: Rise and fall times are decreased.
- * [22:21] == 00: Rise and fall times are decreased
- * further as compared to the 01 setting.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internaly generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-eanable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_s {
- uint64_t txrisetune : 1;
- uint64_t txvreftune : 4;
- uint64_t txfslstune : 4;
- uint64_t txhsxvtune : 2;
- uint64_t sqrxtune : 3;
- uint64_t compdistune : 3;
- uint64_t otgtune : 3;
- uint64_t otgdisable : 1;
- uint64_t portreset : 1;
- uint64_t drvvbus : 1;
- uint64_t lsbist : 1;
- uint64_t fsbist : 1;
- uint64_t hsbist : 1;
- uint64_t bist_done : 1;
- uint64_t bist_err : 1;
- uint64_t tdata_out : 4;
- uint64_t siddq : 1;
- uint64_t txpreemphasistune : 1;
- uint64_t dma_bmode : 1;
- uint64_t usbc_end : 1;
- uint64_t usbp_bist : 1;
- uint64_t tclk : 1;
- uint64_t dp_pulld : 1;
- uint64_t dm_pulld : 1;
- uint64_t hst_mode : 1;
- uint64_t tuning : 4;
- uint64_t tx_bs_enh : 1;
- uint64_t tx_bs_en : 1;
- uint64_t loop_enb : 1;
- uint64_t vtest_enb : 1;
- uint64_t bist_enb : 1;
- uint64_t tdata_sel : 1;
- uint64_t taddr_in : 4;
- uint64_t tdata_in : 8;
- uint64_t ate_reset : 1;
- } s;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_cn30xx
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internaly generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tuning: Transmitter Tuning for High-Speed Operation.
- * Tunes the current supply and rise/fall output
- * times for high-speed operation.
- * [20:19] == 11: Current supply increased
- * approximately 9%
- * [20:19] == 10: Current supply increased
- * approximately 4.5%
- * [20:19] == 01: Design default.
- * [20:19] == 00: Current supply decreased
- * approximately 4.5%
- * [22:21] == 11: Rise and fall times are increased.
- * [22:21] == 10: Design default.
- * [22:21] == 01: Rise and fall times are decreased.
- * [22:21] == 00: Rise and fall times are decreased
- * further as compared to the 01 setting.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internaly generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-eanable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_cn30xx {
- uint64_t reserved_38_63 : 26;
- uint64_t bist_done : 1;
- uint64_t bist_err : 1;
- uint64_t tdata_out : 4;
- uint64_t reserved_30_31 : 2;
- uint64_t dma_bmode : 1;
- uint64_t usbc_end : 1;
- uint64_t usbp_bist : 1;
- uint64_t tclk : 1;
- uint64_t dp_pulld : 1;
- uint64_t dm_pulld : 1;
- uint64_t hst_mode : 1;
- uint64_t tuning : 4;
- uint64_t tx_bs_enh : 1;
- uint64_t tx_bs_en : 1;
- uint64_t loop_enb : 1;
- uint64_t vtest_enb : 1;
- uint64_t bist_enb : 1;
- uint64_t tdata_sel : 1;
- uint64_t taddr_in : 4;
- uint64_t tdata_in : 8;
- uint64_t ate_reset : 1;
- } cn30xx;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_cn50xx
- * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
- * @txvreftune: HS DC Voltage Level Adjustment
- * @txfslstune: FS/LS Source Impedence Adjustment
- * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
- * @sqrxtune: Squelch Threshold Adjustment
- * @compdistune: Disconnect Threshold Adjustment
- * @otgtune: VBUS Valid Threshold Adjustment
- * @otgdisable: OTG Block Disable
- * @portreset: Per_Port Reset
- * @drvvbus: Drive VBUS
- * @lsbist: Low-Speed BIST Enable.
- * @fsbist: Full-Speed BIST Enable.
- * @hsbist: High-Speed BIST Enable.
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internaly generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internaly generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-eanable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_cn50xx {
- uint64_t txrisetune : 1;
- uint64_t txvreftune : 4;
- uint64_t txfslstune : 4;
- uint64_t txhsxvtune : 2;
- uint64_t sqrxtune : 3;
- uint64_t compdistune : 3;
- uint64_t otgtune : 3;
- uint64_t otgdisable : 1;
- uint64_t portreset : 1;
- uint64_t drvvbus : 1;
- uint64_t lsbist : 1;
- uint64_t fsbist : 1;
- uint64_t hsbist : 1;
- uint64_t bist_done : 1;
- uint64_t bist_err : 1;
- uint64_t tdata_out : 4;
- uint64_t reserved_31_31 : 1;
- uint64_t txpreemphasistune : 1;
- uint64_t dma_bmode : 1;
- uint64_t usbc_end : 1;
- uint64_t usbp_bist : 1;
- uint64_t tclk : 1;
- uint64_t dp_pulld : 1;
- uint64_t dm_pulld : 1;
- uint64_t hst_mode : 1;
- uint64_t reserved_19_22 : 4;
- uint64_t tx_bs_enh : 1;
- uint64_t tx_bs_en : 1;
- uint64_t loop_enb : 1;
- uint64_t vtest_enb : 1;
- uint64_t bist_enb : 1;
- uint64_t tdata_sel : 1;
- uint64_t taddr_in : 4;
- uint64_t tdata_in : 8;
- uint64_t ate_reset : 1;
- } cn50xx;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_cn52xx
- * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
- * @txvreftune: HS DC Voltage Level Adjustment
- * @txfslstune: FS/LS Source Impedence Adjustment
- * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
- * @sqrxtune: Squelch Threshold Adjustment
- * @compdistune: Disconnect Threshold Adjustment
- * @otgtune: VBUS Valid Threshold Adjustment
- * @otgdisable: OTG Block Disable
- * @portreset: Per_Port Reset
- * @drvvbus: Drive VBUS
- * @lsbist: Low-Speed BIST Enable.
- * @fsbist: Full-Speed BIST Enable.
- * @hsbist: High-Speed BIST Enable.
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internaly generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
- * Normally should be set to zero.
- * When customers have no intent to use USB PHY
- * interface, they should:
- * - still provide 3.3V to USB_VDD33, and
- * - tie USB_REXT to 3.3V supply, and
- * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
- * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internaly generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-eanable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_cn52xx {
- uint64_t txrisetune : 1;
- uint64_t txvreftune : 4;
- uint64_t txfslstune : 4;
- uint64_t txhsxvtune : 2;
- uint64_t sqrxtune : 3;
- uint64_t compdistune : 3;
- uint64_t otgtune : 3;
- uint64_t otgdisable : 1;
- uint64_t portreset : 1;
- uint64_t drvvbus : 1;
- uint64_t lsbist : 1;
- uint64_t fsbist : 1;
- uint64_t hsbist : 1;
- uint64_t bist_done : 1;
- uint64_t bist_err : 1;
- uint64_t tdata_out : 4;
- uint64_t siddq : 1;
- uint64_t txpreemphasistune : 1;
- uint64_t dma_bmode : 1;
- uint64_t usbc_end : 1;
- uint64_t usbp_bist : 1;
- uint64_t tclk : 1;
- uint64_t dp_pulld : 1;
- uint64_t dm_pulld : 1;
- uint64_t hst_mode : 1;
- uint64_t reserved_19_22 : 4;
- uint64_t tx_bs_enh : 1;
- uint64_t tx_bs_en : 1;
- uint64_t loop_enb : 1;
- uint64_t vtest_enb : 1;
- uint64_t bist_enb : 1;
- uint64_t tdata_sel : 1;
- uint64_t taddr_in : 4;
- uint64_t tdata_in : 8;
- uint64_t ate_reset : 1;
- } cn52xx;
-};
-
-#endif
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 5dbbd14ec61..d118952c0a7 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -4,6 +4,44 @@
* for more details.
*
* Copyright (C) 2008 Cavium Networks
+ *
+ * Some parts of the code were originally released under BSD license:
+ *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
+ * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,13 +55,379 @@
#include <linux/delay.h>
#include <asm/octeon/cvmx.h>
-#include "cvmx-usb.h"
#include <asm/octeon/cvmx-iob-defs.h>
#include <linux/usb/hcd.h>
#include <linux/err.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include "octeon-hcd.h"
+
+/**
+ * enum cvmx_usb_speed - the possible USB device speeds
+ *
+ * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
+ * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
+ * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
+ */
+enum cvmx_usb_speed {
+ CVMX_USB_SPEED_HIGH = 0,
+ CVMX_USB_SPEED_FULL = 1,
+ CVMX_USB_SPEED_LOW = 2,
+};
+
+/**
+ * enum cvmx_usb_transfer - the possible USB transfer types
+ *
+ * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
+ * transfers
+ * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
+ * priority periodic transfers
+ * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
+ * transfers
+ * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
+ * periodic transfers
+ */
+enum cvmx_usb_transfer {
+ CVMX_USB_TRANSFER_CONTROL = 0,
+ CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
+ CVMX_USB_TRANSFER_BULK = 2,
+ CVMX_USB_TRANSFER_INTERRUPT = 3,
+};
+
+/**
+ * enum cvmx_usb_direction - the transfer directions
+ *
+ * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
+ * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
+ */
+enum cvmx_usb_direction {
+ CVMX_USB_DIRECTION_OUT,
+ CVMX_USB_DIRECTION_IN,
+};
+
+/**
+ * enum cvmx_usb_complete - possible callback function status codes
+ *
+ * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without
+ * any errors
+ * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented
+ * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight
+ * by a user call to cvmx_usb_cancel
+ * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected
+ * error status
+ * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response
+ * from the device
+ * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the
+ * device even after a number of retries
+ * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
+ * error even after a number of retries
+ * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error
+ * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error
+ * even after a number of retries
+ */
+enum cvmx_usb_complete {
+ CVMX_USB_COMPLETE_SUCCESS,
+ CVMX_USB_COMPLETE_SHORT,
+ CVMX_USB_COMPLETE_CANCEL,
+ CVMX_USB_COMPLETE_ERROR,
+ CVMX_USB_COMPLETE_STALL,
+ CVMX_USB_COMPLETE_XACTERR,
+ CVMX_USB_COMPLETE_DATATGLERR,
+ CVMX_USB_COMPLETE_BABBLEERR,
+ CVMX_USB_COMPLETE_FRAMEERR,
+};
+
+/**
+ * struct cvmx_usb_port_status - the USB port status information
+ *
+ * @port_enabled: 1 = Usb port is enabled, 0 = disabled
+ * @port_over_current: 1 = Over current detected, 0 = Over current not
+ * detected. Octeon doesn't support over current detection.
+ * @port_powered: 1 = Port power is being supplied to the device, 0 =
+ * power is off. Octeon doesn't support turning port power
+ * off.
+ * @port_speed: Current port speed.
+ * @connected: 1 = A device is connected to the port, 0 = No device is
+ * connected.
+ * @connect_change: 1 = Device connected state changed since the last set
+ * status call.
+ */
+struct cvmx_usb_port_status {
+ uint32_t reserved : 25;
+ uint32_t port_enabled : 1;
+ uint32_t port_over_current : 1;
+ uint32_t port_powered : 1;
+ enum cvmx_usb_speed port_speed : 2;
+ uint32_t connected : 1;
+ uint32_t connect_change : 1;
+};
+
+/**
+ * union cvmx_usb_control_header - the structure of a Control packet header
+ *
+ * @s.request_type: Bit 7 tells the direction: 1=IN, 0=OUT
+ * @s.request The standard usb request to make
+ * @s.value Value parameter for the request in little endian format
+ * @s.index Index for the request in little endian format
+ * @s.length Length of the data associated with this request in
+ * little endian format
+ */
+union cvmx_usb_control_header {
+ uint64_t u64;
+ struct {
+ uint64_t request_type : 8;
+ uint64_t request : 8;
+ uint64_t value : 16;
+ uint64_t index : 16;
+ uint64_t length : 16;
+ } s;
+};
+
+/**
+ * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
+ *
+ * @offset: This is the offset in bytes into the main buffer where this data
+ * is stored.
+ * @length: This is the length in bytes of the data.
+ * @status: This is the status of this individual packet transfer.
+ */
+struct cvmx_usb_iso_packet {
+ int offset;
+ int length;
+ enum cvmx_usb_complete status;
+};
+
+/**
+ * enum cvmx_usb_initialize_flags - flags used by the initialization function
+ *
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
+ * as clock source at USB_XO and
+ * USB_XI.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
+ * board clock source at USB_XO.
+ * USB_XI should be tied to GND.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
+ * crystal
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
+ * data transfer use for the USB
+ */
+enum cvmx_usb_initialize_flags {
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
+ /* Bits 3-4 used to encode the clock frequency */
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
+};
+
+/**
+ * enum cvmx_usb_pipe_flags - internal flags for a pipe.
+ *
+ * @__CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
+ * actively using hardware. Do not use.
+ * @__CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high
+ * speed pipe is in the ping state. Do not
+ * use.
+ */
+enum cvmx_usb_pipe_flags {
+ __CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
+ __CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
+};
+
+/* Normal prefetch that use the pref instruction. */
+#define CVMX_PREFETCH(address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (0))
+
+/* Maximum number of times to retry failed transactions */
+#define MAX_RETRIES 3
+
+/* Maximum number of hardware channels supported by the USB block */
+#define MAX_CHANNELS 8
+
+/* The highest valid USB device address */
+#define MAX_USB_ADDRESS 127
+
+/* The highest valid USB endpoint number */
+#define MAX_USB_ENDPOINT 15
+
+/* The highest valid port number on a hub */
+#define MAX_USB_HUB_PORT 15
+
+/*
+ * The low level hardware can transfer a maximum of this number of bytes in each
+ * transfer. The field is 19 bits wide
+ */
+#define MAX_TRANSFER_BYTES ((1<<19)-1)
+
+/*
+ * The low level hardware can transfer a maximum of this number of packets in
+ * each transfer. The field is 10 bits wide
+ */
+#define MAX_TRANSFER_PACKETS ((1<<10)-1)
+
+enum {
+ USB_CLOCK_TYPE_REF_12,
+ USB_CLOCK_TYPE_REF_24,
+ USB_CLOCK_TYPE_REF_48,
+ USB_CLOCK_TYPE_CRYSTAL_12,
+};
+
+/**
+ * Logical transactions may take numerous low level
+ * transactions, especially when splits are concerned. This
+ * enum represents all of the possible stages a transaction can
+ * be in. Note that split completes are always even. This is so
+ * the NAK handler can backup to the previous low level
+ * transaction with a simple clearing of bit 0.
+ */
+enum cvmx_usb_stage {
+ CVMX_USB_STAGE_NON_CONTROL,
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_SETUP,
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_DATA,
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_STATUS,
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
+};
+
+/**
+ * struct cvmx_usb_transaction - describes each pending USB transaction
+ * regardless of type. These are linked together
+ * to form a list of pending requests for a pipe.
+ *
+ * @node: List node for transactions in the pipe.
+ * @type: Type of transaction, duplicated of the pipe.
+ * @flags: State flags for this transaction.
+ * @buffer: User's physical buffer address to read/write.
+ * @buffer_length: Size of the user's buffer in bytes.
+ * @control_header: For control transactions, physical address of the 8
+ * byte standard header.
+ * @iso_start_frame: For ISO transactions, the starting frame number.
+ * @iso_number_packets: For ISO transactions, the number of packets in the
+ * request.
+ * @iso_packets: For ISO transactions, the sub packets in the request.
+ * @actual_bytes: Actual bytes transfer for this transaction.
+ * @stage: For control transactions, the current stage.
+ * @urb: URB.
+ */
+struct cvmx_usb_transaction {
+ struct list_head node;
+ enum cvmx_usb_transfer type;
+ uint64_t buffer;
+ int buffer_length;
+ uint64_t control_header;
+ int iso_start_frame;
+ int iso_number_packets;
+ struct cvmx_usb_iso_packet *iso_packets;
+ int xfersize;
+ int pktcnt;
+ int retries;
+ int actual_bytes;
+ enum cvmx_usb_stage stage;
+ struct urb *urb;
+};
+
+/**
+ * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
+ * and some USB device. It contains a list of pending
+ * request to the device.
+ *
+ * @node: List node for pipe list
+ * @next: Pipe after this one in the list
+ * @transactions: List of pending transactions
+ * @interval: For periodic pipes, the interval between packets in
+ * frames
+ * @next_tx_frame: The next frame this pipe is allowed to transmit on
+ * @flags: State flags for this pipe
+ * @device_speed: Speed of device connected to this pipe
+ * @transfer_type: Type of transaction supported by this pipe
+ * @transfer_dir: IN or OUT. Ignored for Control
+ * @multi_count: Max packet in a row for the device
+ * @max_packet: The device's maximum packet size in bytes
+ * @device_addr: USB device address at other end of pipe
+ * @endpoint_num: USB endpoint number at other end of pipe
+ * @hub_device_addr: Hub address this device is connected to
+ * @hub_port: Hub port this device is connected to
+ * @pid_toggle: This toggles between 0/1 on every packet send to track
+ * the data pid needed
+ * @channel: Hardware DMA channel for this pipe
+ * @split_sc_frame: The low order bits of the frame number the split
+ * complete should be sent on
+ */
+struct cvmx_usb_pipe {
+ struct list_head node;
+ struct list_head transactions;
+ uint64_t interval;
+ uint64_t next_tx_frame;
+ enum cvmx_usb_pipe_flags flags;
+ enum cvmx_usb_speed device_speed;
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_direction transfer_dir;
+ int multi_count;
+ uint16_t max_packet;
+ uint8_t device_addr;
+ uint8_t endpoint_num;
+ uint8_t hub_device_addr;
+ uint8_t hub_port;
+ uint8_t pid_toggle;
+ uint8_t channel;
+ int8_t split_sc_frame;
+};
+
+struct cvmx_usb_tx_fifo {
+ struct {
+ int channel;
+ int size;
+ uint64_t address;
+ } entry[MAX_CHANNELS+1];
+ int head;
+ int tail;
+};
+
+/**
+ * struct cvmx_usb_state - the state of the USB block
+ *
+ * init_flags: Flags passed to initialize.
+ * index: Which USB block this is for.
+ * idle_hardware_channels: Bit set for every idle hardware channel.
+ * usbcx_hprt: Stored port status so we don't need to read a CSR to
+ * determine splits.
+ * pipe_for_channel: Map channels to pipes.
+ * pipe: Storage for pipes.
+ * indent: Used by debug output to indent functions.
+ * port_status: Last port status used for change notification.
+ * idle_pipes: List of open pipes that have no transactions.
+ * active_pipes: Active pipes indexed by transfer type.
+ * frame_number: Increments every SOF interrupt for time keeping.
+ * active_split: Points to the current active split, or NULL.
+ */
+struct cvmx_usb_state {
+ int init_flags;
+ int index;
+ int idle_hardware_channels;
+ union cvmx_usbcx_hprt usbcx_hprt;
+ struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
+ int indent;
+ struct cvmx_usb_port_status port_status;
+ struct list_head idle_pipes;
+ struct list_head active_pipes[4];
+ uint64_t frame_number;
+ struct cvmx_usb_transaction *active_split;
+ struct cvmx_usb_tx_fifo periodic;
+ struct cvmx_usb_tx_fifo nonperiodic;
+};
+
struct octeon_hcd {
spinlock_t lock;
struct cvmx_usb_state usb;
@@ -31,107 +435,1565 @@ struct octeon_hcd {
struct list_head dequeue_list;
};
-/* convert between an HCD pointer and the corresponding struct octeon_hcd */
-static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
+/* This macro spins on a field waiting for it to reach a value */
+#define CVMX_WAIT_FOR_FIELD32(address, type, field, op, value, timeout_usec)\
+ ({int result; \
+ do { \
+ uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+ octeon_get_clock_rate() / 1000000; \
+ type c; \
+ while (1) { \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ if (c.s.field op (value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_get_cycle() > done) { \
+ result = -1; \
+ break; \
+ } else \
+ cvmx_wait(100); \
+ } \
+ } while (0); \
+ result; })
+
+/*
+ * This macro logically sets a single field in a CSR. It does the sequence
+ * read, modify, and write
+ */
+#define USB_SET_FIELD32(address, type, field, value) \
+ do { \
+ type c; \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ c.s.field = value; \
+ __cvmx_usb_write_csr32(usb, address, c.u32); \
+ } while (0)
+
+/* Returns the IO address to push/pop stuff data from the FIFOs */
+#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
+
+static int octeon_usb_get_clock_type(void)
{
- return (struct octeon_hcd *)(hcd->hcd_priv);
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+ case CVMX_BOARD_TYPE_UBNT_E100:
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ }
+ return USB_CLOCK_TYPE_REF_48;
}
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
+/**
+ * Read a USB 32bit CSR. It performs the necessary address swizzle
+ * for 32bit CSRs and logs the value in a readable format if
+ * debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
+ *
+ * Returns: Result of the read
+ */
+static inline uint32_t __cvmx_usb_read_csr32(struct cvmx_usb_state *usb,
+ uint64_t address)
{
- return container_of((void *)p, struct usb_hcd, hcd_priv);
+ uint32_t result = cvmx_read64_uint32(address ^ 4);
+ return result;
}
-static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
+
+/**
+ * Write a USB 32bit CSR. It performs the necessary address
+ * swizzle for 32bit CSRs and logs the value in a readable format
+ * if debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
+ */
+static inline void __cvmx_usb_write_csr32(struct cvmx_usb_state *usb,
+ uint64_t address, uint32_t value)
{
- return container_of(p, struct octeon_hcd, usb);
+ cvmx_write64_uint32(address ^ 4, value);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
}
-static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
+
+/**
+ * Read a USB 64bit CSR. It logs the value in a readable format if
+ * debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
+ *
+ * Returns: Result of the read
+ */
+static inline uint64_t __cvmx_usb_read_csr64(struct cvmx_usb_state *usb,
+ uint64_t address)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- unsigned long flags;
+ uint64_t result = cvmx_read64_uint64(address);
+ return result;
+}
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_poll(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
+
+/**
+ * Write a USB 64bit CSR. It logs the value in a readable format
+ * if debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
+ */
+static inline void __cvmx_usb_write_csr64(struct cvmx_usb_state *usb,
+ uint64_t address, uint64_t value)
+{
+ cvmx_write64_uint64(address, value);
}
-static void octeon_usb_port_callback(struct cvmx_usb_state *usb,
- enum cvmx_usb_callback reason,
- enum cvmx_usb_complete status,
- int pipe_handle,
- int submit_handle,
- int bytes_transferred,
- void *user_data)
+/**
+ * Return non zero if this pipe connects to a non HIGH speed
+ * device through a high speed hub.
+ *
+ * @usb: USB block this access is for
+ * @pipe: Pipe to check
+ *
+ * Returns: Non zero if we need to do split transactions
+ */
+static inline int __cvmx_usb_pipe_needs_split(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe)
{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+ return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
+ usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
+}
- spin_unlock(&priv->lock);
- usb_hcd_poll_rh_status(octeon_to_hcd(priv));
- spin_lock(&priv->lock);
+
+/**
+ * Trivial utility function to return the correct PID for a pipe
+ *
+ * @pipe: pipe to check
+ *
+ * Returns: PID for pipe
+ */
+static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
+{
+ if (pipe->pid_toggle)
+ return 2; /* Data1 */
+ else
+ return 0; /* Data0 */
}
-static int octeon_usb_start(struct usb_hcd *hcd)
+
+/**
+ * Return the number of USB ports supported by this Octeon
+ * chip. If the chip doesn't support USB, or is not supported
+ * by this API, a zero will be returned. Most Octeon chips
+ * support one usb port, but some support two ports.
+ * cvmx_usb_initialize() must be called on independent
+ * struct cvmx_usb_state.
+ *
+ * Returns: Number of port, zero if usb isn't supported
+ */
+static int cvmx_usb_get_num_ports(void)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- unsigned long flags;
+ int arch_ports = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ arch_ports = 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ arch_ports = 1;
+ else
+ arch_ports = 0;
+
+ return arch_ports;
+}
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @usb: Pointer to an empty struct cvmx_usb_state
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @usb_port_number:
+ * Which Octeon USB port to initialize.
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
+ int usb_port_number)
+{
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+ union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
+ enum cvmx_usb_initialize_flags flags = 0;
+ int i;
+
+ /* At first allow 0-1 for the usb port number */
+ if ((usb_port_number < 0) || (usb_port_number > 1))
+ return -EINVAL;
+ /* For all chips except 52XX there is only one port */
+ if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
+ return -EINVAL;
+ /* Try to determine clock type automatically */
+ if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
+ /* Only 12 MHZ crystals are supported */
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ } else {
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ switch (octeon_usb_get_clock_type()) {
+ case USB_CLOCK_TYPE_REF_12:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_24:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_48:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ }
+
+ memset(usb, 0, sizeof(*usb));
+ usb->init_flags = flags;
+
+ /* Initialize the USB state structure */
+ usb->index = usb_port_number;
+ INIT_LIST_HEAD(&usb->idle_pipes);
+ for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
+ INIT_LIST_HEAD(&usb->active_pipes[i]);
+
+ /*
+ * Power On Reset and PHY Initialization
+ *
+ * 1. Wait for DCOK to assert (nothing to do)
+ *
+ * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
+ * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
+ */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hrst = 0;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hclk_rst = 0;
+ usbn_clk_ctl.s.enable = 0;
+ /*
+ * 2b. Select the USB reference clock/crystal parameters by writing
+ * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
+ /*
+ * The USB port uses 12/24/48MHz 2.5V board clock
+ * source at USB_XO. USB_XI should be tied to GND.
+ * Most Octeon evaluation boards require this setting
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX))
+ /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
+ usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
+ else
+ /* From CN52XX manual */
+ usbn_clk_ctl.s.p_rtype = 1;
+
+ switch (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
+ usbn_clk_ctl.s.p_c_sel = 0;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
+ usbn_clk_ctl.s.p_c_sel = 1;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
+ usbn_clk_ctl.s.p_c_sel = 2;
+ break;
+ }
+ } else {
+ /*
+ * The USB port uses a 12MHz crystal as clock source
+ * at USB_XO and USB_XI
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
+ else
+ /* From CN56XX,CN52XX,CN50XX manuals. */
+ usbn_clk_ctl.s.p_rtype = 0;
+
+ usbn_clk_ctl.s.p_c_sel = 0;
+ }
+ /*
+ * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
+ * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
+ * such that USB is as close as possible to 125Mhz
+ */
+ {
+ int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
+ /* Lower than 4 doesn't seem to work properly */
+ if (divisor < 4)
+ divisor = 4;
+ usbn_clk_ctl.s.divide = divisor;
+ usbn_clk_ctl.s.divide2 = 0;
+ }
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
+ usbn_clk_ctl.s.hclk_rst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
+ cvmx_wait(64);
+ /*
+ * 3. Program the power-on reset field in the USBN clock-control
+ * register:
+ * USBN_CLK_CTL[POR] = 0
+ */
+ usbn_clk_ctl.s.por = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 4. Wait 1 ms for PHY clock to start */
+ mdelay(1);
+ /*
+ * 5. Program the Reset input from automatic test equipment field in the
+ * USBP control and status register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
+ */
+ usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.s.ate_reset = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 6. Wait 10 cycles */
+ cvmx_wait(10);
+ /*
+ * 7. Clear ATE_RESET field in the USBN clock-control register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
+ */
+ usbn_usbp_ctl_status.s.ate_reset = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /*
+ * 8. Program the PHY reset field in the USBN clock-control register:
+ * USBN_CLK_CTL[PRST] = 1
+ */
+ usbn_clk_ctl.s.prst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /*
+ * 9. Program the USBP control and status register to select host or
+ * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
+ * device
+ */
+ usbn_usbp_ctl_status.s.hst_mode = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 10. Wait 1 us */
+ udelay(1);
+ /*
+ * 11. Program the hreset_n field in the USBN clock-control register:
+ * USBN_CLK_CTL[HRST] = 1
+ */
+ usbn_clk_ctl.s.hrst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 12. Proceed to USB core initialization */
+ usbn_clk_ctl.s.enable = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ udelay(1);
+
+ /*
+ * USB Core Initialization
+ *
+ * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
+ * determine USB core configuration parameters.
+ *
+ * Nothing needed
+ *
+ * 2. Program the following fields in the global AHB configuration
+ * register (USBC_GAHBCFG)
+ * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
+ * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
+ * Nonperiodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[NPTXFEMPLVL]
+ * Periodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[PTXFEMPLVL]
+ * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
+ */
+ {
+ union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
+ /* Due to an errata, CN31XX doesn't support DMA */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+ usbcx_gahbcfg.u32 = 0;
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ /* Only use one channel with non DMA */
+ usb->idle_hardware_channels = 0x1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ /* CN5XXX have an errata with channel 3 */
+ usb->idle_hardware_channels = 0xf7;
+ else
+ usb->idle_hardware_channels = 0xff;
+ usbcx_gahbcfg.s.hbstlen = 0;
+ usbcx_gahbcfg.s.nptxfemplvl = 1;
+ usbcx_gahbcfg.s.ptxfemplvl = 1;
+ usbcx_gahbcfg.s.glblintrmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
+ usbcx_gahbcfg.u32);
+ }
+ /*
+ * 3. Program the following fields in USBC_GUSBCFG register.
+ * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
+ * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
+ * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
+ * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
+ */
+ {
+ union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
+ usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.s.toutcal = 0;
+ usbcx_gusbcfg.s.ddrsel = 0;
+ usbcx_gusbcfg.s.usbtrdtim = 0x5;
+ usbcx_gusbcfg.s.phylpwrclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
+ usbcx_gusbcfg.u32);
+ }
+ /*
+ * 4. The software must unmask the following bits in the USBC_GINTMSK
+ * register.
+ * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
+ * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
+ */
+ {
+ union cvmx_usbcx_gintmsk usbcx_gintmsk;
+ int channel;
+
+ usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.s.otgintmsk = 1;
+ usbcx_gintmsk.s.modemismsk = 1;
+ usbcx_gintmsk.s.hchintmsk = 1;
+ usbcx_gintmsk.s.sofmsk = 0;
+ /* We need RX FIFO interrupts if we don't have DMA */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usbcx_gintmsk.s.rxflvlmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
+ usbcx_gintmsk.u32);
+
+ /*
+ * Disable all channel interrupts. We'll enable them per channel
+ * later.
+ */
+ for (channel = 0; channel < 8; channel++)
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ }
+
+ {
+ /*
+ * Host Port Initialization
+ *
+ * 1. Program the host-port interrupt-mask field to unmask,
+ * USBC_GINTMSK[PRTINT] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
+ prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
+ disconnintmsk, 1);
+ /*
+ * 2. Program the USBC_HCFG register to select full-speed host
+ * or high-speed host.
+ */
+ {
+ union cvmx_usbcx_hcfg usbcx_hcfg;
+ usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.s.fslssupp = 0;
+ usbcx_hcfg.s.fslspclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+ }
+ /*
+ * 3. Program the port power bit to drive VBUS on the USB,
+ * USBC_HPRT[PRTPWR] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtpwr, 1);
+
+ /*
+ * Steps 4-15 from the manual are done later in the port enable
+ */
+ }
- hcd->state = HC_STATE_RUNNING;
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_register_callback(&priv->usb, CVMX_USB_CALLBACK_PORT_CHANGED,
- octeon_usb_port_callback, NULL);
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
-static void octeon_usb_stop(struct usb_hcd *hcd)
+
+/**
+ * Shutdown a USB port after a call to cvmx_usb_initialize().
+ * The port should be disabled with all pipes closed when this
+ * function is called.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- unsigned long flags;
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+
+ /* Make sure all pipes are closed */
+ if (!list_empty(&usb->idle_pipes) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
+ return -EBUSY;
+
+ /* Disable the clocks and put them in power on reset */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.enable = 1;
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hclk_rst = 1;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hrst = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ return 0;
+}
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_register_callback(&priv->usb, CVMX_USB_CALLBACK_PORT_CHANGED,
- NULL, NULL);
- spin_unlock_irqrestore(&priv->lock, flags);
- hcd->state = HC_STATE_HALT;
+
+/**
+ * Enable a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_enable(struct cvmx_usb_state *usb)
+{
+ union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
+
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+
+ /*
+ * If the port is already enabled the just return. We don't need to do
+ * anything
+ */
+ if (usb->usbcx_hprt.s.prtena)
+ return 0;
+
+ /* If there is nothing plugged into the port then fail immediately */
+ if (!usb->usbcx_hprt.s.prtconnsts) {
+ return -ETIMEDOUT;
+ }
+
+ /* Program the port reset bit to start the reset process */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 1);
+
+ /*
+ * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
+ * process to complete.
+ */
+ mdelay(50);
+
+ /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 0);
+
+ /* Wait for the USBC_HPRT[PRTENA]. */
+ if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtena, ==, 1, 100000))
+ return -ETIMEDOUT;
+
+ /*
+ * Read the port speed field to get the enumerated speed,
+ * USBC_HPRT[PRTSPD].
+ */
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+
+ /*
+ * 13. Program the USBC_GRXFSIZ register to select the size of the
+ * receive FIFO (25%).
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), union cvmx_usbcx_grxfsiz,
+ rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+ /*
+ * 14. Program the USBC_GNPTXFSIZ register to select the size and the
+ * start address of the non- periodic transmit FIFO for nonperiodic
+ * transactions (50%).
+ */
+ {
+ union cvmx_usbcx_gnptxfsiz siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
+ siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
+ }
+ /*
+ * 15. Program the USBC_HPTXFSIZ register to select the size and start
+ * address of the periodic transmit FIFO for periodic transactions
+ * (25%).
+ */
+ {
+ union cvmx_usbcx_hptxfsiz siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
+ siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
+ }
+ /* Flush all FIFOs */
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ txfflsh, ==, 0, 100);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, rxfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ rxfflsh, ==, 0, 100);
+
+ return 0;
}
-static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ * Transactions in process will fail and call their
+ * associated callbacks.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_disable(struct cvmx_usb_state *usb)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ /* Disable the port */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtena, 1);
+ return 0;
+}
- return cvmx_usb_get_frame_number(&priv->usb);
+
+/**
+ * Get the current state of the USB port. Use this call to
+ * determine if the usb port has anything connected, is enabled,
+ * or has some sort of error condition. The return value of this
+ * call has "changed" bits to signal of the value of some fields
+ * have changed between calls.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: Port status information
+ */
+static struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *usb)
+{
+ union cvmx_usbcx_hprt usbc_hprt;
+ struct cvmx_usb_port_status result;
+
+ memset(&result, 0, sizeof(result));
+
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ result.port_enabled = usbc_hprt.s.prtena;
+ result.port_over_current = usbc_hprt.s.prtovrcurract;
+ result.port_powered = usbc_hprt.s.prtpwr;
+ result.port_speed = usbc_hprt.s.prtspd;
+ result.connected = usbc_hprt.s.prtconnsts;
+ result.connect_change = (result.connected != usb->port_status.connected);
+
+ return result;
+}
+
+/**
+ * Open a virtual pipe between the host and a USB device. A pipe
+ * must be opened before data can be transferred between a device
+ * and Octeon.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @device_addr:
+ * USB device address to open the pipe to
+ * (0-127).
+ * @endpoint_num:
+ * USB endpoint number to open the pipe to
+ * (0-15).
+ * @device_speed:
+ * The speed of the device the pipe is going
+ * to. This must match the device's speed,
+ * which may be different than the port speed.
+ * @max_packet: The maximum packet length the device can
+ * transmit/receive (low speed=0-8, full
+ * speed=0-1023, high speed=0-1024). This value
+ * comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <10:0>.
+ * @transfer_type:
+ * The type of transfer this pipe is for.
+ * @transfer_dir:
+ * The direction the pipe is in. This is not
+ * used for control pipes.
+ * @interval: For ISOCHRONOUS and INTERRUPT transfers,
+ * this is how often the transfer is scheduled
+ * for. All other transfers should specify
+ * zero. The units are in frames (8000/sec at
+ * high speed, 1000/sec for full speed).
+ * @multi_count:
+ * For high speed devices, this is the maximum
+ * allowed number of packet per microframe.
+ * Specify zero for non high speed devices. This
+ * value comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <12:11>.
+ * @hub_device_addr:
+ * Hub device address this device is connected
+ * to. Devices connected directly to Octeon
+ * use zero. This is only used when the device
+ * is full/low speed behind a high speed hub.
+ * The address will be of the high speed hub,
+ * not and full speed hubs after it.
+ * @hub_port: Which port on the hub the device is
+ * connected. Use zero for devices connected
+ * directly to Octeon. Like hub_device_addr,
+ * this is only used for full/low speed
+ * devices behind a high speed hub.
+ *
+ * Returns: A non-NULL value is a pipe. NULL means an error.
+ */
+static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
+ int device_addr, int
+ endpoint_num,
+ enum cvmx_usb_speed
+ device_speed,
+ int max_packet,
+ enum cvmx_usb_transfer
+ transfer_type,
+ enum cvmx_usb_direction
+ transfer_dir,
+ int interval, int multi_count,
+ int hub_device_addr,
+ int hub_port)
+{
+ struct cvmx_usb_pipe *pipe;
+
+ if (unlikely((device_addr < 0) || (device_addr > MAX_USB_ADDRESS)))
+ return NULL;
+ if (unlikely((endpoint_num < 0) || (endpoint_num > MAX_USB_ENDPOINT)))
+ return NULL;
+ if (unlikely(device_speed > CVMX_USB_SPEED_LOW))
+ return NULL;
+ if (unlikely((max_packet <= 0) || (max_packet > 1024)))
+ return NULL;
+ if (unlikely(transfer_type > CVMX_USB_TRANSFER_INTERRUPT))
+ return NULL;
+ if (unlikely((transfer_dir != CVMX_USB_DIRECTION_OUT) &&
+ (transfer_dir != CVMX_USB_DIRECTION_IN)))
+ return NULL;
+ if (unlikely(interval < 0))
+ return NULL;
+ if (unlikely((transfer_type == CVMX_USB_TRANSFER_CONTROL) && interval))
+ return NULL;
+ if (unlikely(multi_count < 0))
+ return NULL;
+ if (unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
+ (multi_count != 0)))
+ return NULL;
+ if (unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
+ return NULL;
+ if (unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
+ return NULL;
+
+ pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
+ if (!pipe)
+ return NULL;
+ if ((device_speed == CVMX_USB_SPEED_HIGH) &&
+ (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (transfer_type == CVMX_USB_TRANSFER_BULK))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ pipe->device_addr = device_addr;
+ pipe->endpoint_num = endpoint_num;
+ pipe->device_speed = device_speed;
+ pipe->max_packet = max_packet;
+ pipe->transfer_type = transfer_type;
+ pipe->transfer_dir = transfer_dir;
+ INIT_LIST_HEAD(&pipe->transactions);
+
+ /*
+ * All pipes use interval to rate limit NAK processing. Force an
+ * interval if one wasn't supplied
+ */
+ if (!interval)
+ interval = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ pipe->interval = interval*8;
+ /* Force start splits to be schedule on uFrame 0 */
+ pipe->next_tx_frame = ((usb->frame_number+7)&~7) + pipe->interval;
+ } else {
+ pipe->interval = interval;
+ pipe->next_tx_frame = usb->frame_number + pipe->interval;
+ }
+ pipe->multi_count = multi_count;
+ pipe->hub_device_addr = hub_device_addr;
+ pipe->hub_port = hub_port;
+ pipe->pid_toggle = 0;
+ pipe->split_sc_frame = -1;
+ list_add_tail(&pipe->node, &usb->idle_pipes);
+
+ /*
+ * We don't need to tell the hardware about this pipe yet since
+ * it doesn't have any submitted requests
+ */
+
+ return pipe;
+}
+
+
+/**
+ * Poll the RX FIFOs and remove data as needed. This function is only used
+ * in non DMA mode. It is very important that this function be called quickly
+ * enough to prevent FIFO overflow.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ */
+static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
+{
+ union cvmx_usbcx_grxstsph rx_status;
+ int channel;
+ int bytes;
+ uint64_t address;
+ uint32_t *ptr;
+
+ rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
+ /* Only read data if IN data is there */
+ if (rx_status.s.pktsts != 2)
+ return;
+ /* Check if no data is available */
+ if (!rx_status.s.bcnt)
+ return;
+
+ channel = rx_status.s.chnum;
+ bytes = rx_status.s.bcnt;
+ if (!bytes)
+ return;
+
+ /* Get where the DMA engine would have written this data */
+ address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8);
+ ptr = cvmx_phys_to_ptr(address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, address + bytes);
+
+ /* Loop writing the FIFO data for this packet into memory */
+ while (bytes > 0) {
+ *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
+ bytes -= 4;
+ }
+ CVMX_SYNCW;
+
+ return;
+}
+
+
+/**
+ * Fill the TX hardware fifo with data out of the software
+ * fifos
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @fifo: Software fifo to use
+ * @available: Amount of space in the hardware fifo
+ *
+ * Returns: Non zero if the hardware fifo was too small and needs
+ * to be serviced again.
+ */
+static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
+ struct cvmx_usb_tx_fifo *fifo, int available)
+{
+ /*
+ * We're done either when there isn't anymore space or the software FIFO
+ * is empty
+ */
+ while (available && (fifo->head != fifo->tail)) {
+ int i = fifo->tail;
+ const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+ uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
+ int words = available;
+
+ /* Limit the amount of data to waht the SW fifo has */
+ if (fifo->entry[i].size <= available) {
+ words = fifo->entry[i].size;
+ fifo->tail++;
+ if (fifo->tail > MAX_CHANNELS)
+ fifo->tail = 0;
+ }
+
+ /* Update the next locations and counts */
+ available -= words;
+ fifo->entry[i].address += words * 4;
+ fifo->entry[i].size -= words;
+
+ /*
+ * Write the HW fifo data. The read every three writes is due
+ * to an errata on CN3XXX chips
+ */
+ while (words > 3) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ words -= 3;
+ }
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words)
+ cvmx_write64_uint32(csr_address, *ptr++);
+ }
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ }
+ return fifo->head != fifo->tail;
+}
+
+
+/**
+ * Check the hardware FIFOs and fill them as needed
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ */
+static void __cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
+{
+ if (usb->periodic.head != usb->periodic.tail) {
+ union cvmx_usbcx_hptxsts tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 0);
+ }
+
+ if (usb->nonperiodic.head != usb->nonperiodic.tail) {
+ union cvmx_usbcx_gnptxsts tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 0);
+ }
+
+ return;
+}
+
+
+/**
+ * Fill the TX FIFO with an outgoing packet
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @channel: Channel number to get packet from
+ */
+static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
+{
+ union cvmx_usbcx_hccharx hcchar;
+ union cvmx_usbcx_hcspltx usbc_hcsplt;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ struct cvmx_usb_tx_fifo *fifo;
+
+ /* We only need to fill data on outbound channels */
+ hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
+ return;
+
+ /* OUT Splits only have data on the start and not the complete */
+ usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
+ if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
+ return;
+
+ /*
+ * Find out how many bytes we need to fill and convert it into 32bit
+ * words.
+ */
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ if (!usbc_hctsiz.s.xfersize)
+ return;
+
+ if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
+ (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ fifo = &usb->periodic;
+ else
+ fifo = &usb->nonperiodic;
+
+ fifo->entry[fifo->head].channel = channel;
+ fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
+ fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
+ fifo->head++;
+ if (fifo->head > MAX_CHANNELS)
+ fifo->head = 0;
+
+ __cvmx_usb_poll_tx_fifo(usb);
+
+ return;
+}
+
+/**
+ * Perform channel specific setup for Control transactions. All
+ * the generic stuff will already have been done in
+ * __cvmx_usb_start_channel()
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe for control transaction
+ */
+static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction =
+ list_first_entry(&pipe->transactions, typeof(*transaction),
+ node);
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int packets_to_transfer;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ cvmx_dprintf("%s: ERROR - Non control stage\n", __FUNCTION__);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = sizeof(*header);
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
+ /*
+ * Setup send the control header instead of the buffer data. The
+ * buffer data will be used in the next stage
+ */
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, transaction->control_header);
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = 0;
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_DATA:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (header->s.request_type & 0x80)
+ bytes_to_transfer = 0;
+ else if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+ }
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (!(header->s.request_type & 0x80))
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the hardware.
+ * Further bytes will be sent as continued transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is zero
+ * we still need to transfer one packet
+ */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must be
+ * restarted between every packet for IN transactions, so there
+ * is no reason to do multiple packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to what the
+ * hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ return;
+}
+
+
+/**
+ * Start a channel to perform the pipe's head transaction
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe to start
+ */
+static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction =
+ list_first_entry(&pipe->transactions, typeof(*transaction),
+ node);
+
+ /* Make sure all writes to the DMA region get flushed */
+ CVMX_SYNCW;
+
+ /* Attach the channel to the pipe */
+ usb->pipe_for_channel[channel] = pipe;
+ pipe->channel = channel;
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /* Mark this channel as in use */
+ usb->idle_hardware_channels &= ~(1<<channel);
+
+ /* Enable the channel interrupt bits */
+ {
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hcintmskx usbc_hcintmsk;
+ union cvmx_usbcx_haintmsk usbc_haintmsk;
+
+ /* Clear all channel status bits */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
+
+ usbc_hcintmsk.u32 = 0;
+ usbc_hcintmsk.s.chhltdmsk = 1;
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /*
+ * Channels need these extra interrupts when we aren't
+ * in DMA mode.
+ */
+ usbc_hcintmsk.s.datatglerrmsk = 1;
+ usbc_hcintmsk.s.frmovrunmsk = 1;
+ usbc_hcintmsk.s.bblerrmsk = 1;
+ usbc_hcintmsk.s.xacterrmsk = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * Splits don't generate xfercompl, so we need
+ * ACK and NYET.
+ */
+ usbc_hcintmsk.s.nyetmsk = 1;
+ usbc_hcintmsk.s.ackmsk = 1;
+ }
+ usbc_hcintmsk.s.nakmsk = 1;
+ usbc_hcintmsk.s.stallmsk = 1;
+ usbc_hcintmsk.s.xfercomplmsk = 1;
+ }
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
+
+ /* Enable the channel interrupt to propagate */
+ usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index));
+ usbc_haintmsk.s.haintmsk |= 1<<channel;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32);
+ }
+
+ /* Setup the locations the DMA engines use */
+ {
+ uint64_t dma_address = transaction->buffer + transaction->actual_bytes;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ dma_address = transaction->buffer + transaction->iso_packets[0].offset + transaction->actual_bytes;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, dma_address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, dma_address);
+ }
+
+ /* Setup both the size of the transfer and the SPLIT characteristics */
+ {
+ union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
+ int packets_to_transfer;
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+
+ /*
+ * ISOCHRONOUS transactions store each individual transfer size
+ * in the packet structure, not the global buffer_length
+ */
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
+
+ /*
+ * We need to do split transactions when we are talking to non
+ * high speed devices that are behind a high speed hub
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * On the start split phase (stage is even) record the
+ * frame number we will need to send the split complete.
+ * We only store the lower two bits since the time ahead
+ * can only be two frames
+ */
+ if ((transaction->stage&1) == 0) {
+ if (transaction->type == CVMX_USB_TRANSFER_BULK)
+ pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
+ else
+ pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
+ } else
+ pipe->split_sc_frame = -1;
+
+ usbc_hcsplt.s.spltena = 1;
+ usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
+ usbc_hcsplt.s.prtaddr = pipe->hub_port;
+ usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+
+ /*
+ * SPLIT transactions can only ever transmit one data
+ * packet so limit the transfer size to the max packet
+ * size
+ */
+ if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+
+ /*
+ * ISOCHRONOUS OUT splits are unique in that they limit
+ * data transfers to 188 byte chunks representing the
+ * begin/middle/end of the data or all
+ */
+ if (!usbc_hcsplt.s.compsplt &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /*
+ * Clear the split complete frame number as
+ * there isn't going to be a split complete
+ */
+ pipe->split_sc_frame = -1;
+ /*
+ * See if we've started this transfer and sent
+ * data
+ */
+ if (transaction->actual_bytes == 0) {
+ /*
+ * Nothing sent yet, this is either a
+ * begin or the entire payload
+ */
+ if (bytes_to_transfer <= 188)
+ /* Entire payload in one go */
+ usbc_hcsplt.s.xactpos = 3;
+ else
+ /* First part of payload */
+ usbc_hcsplt.s.xactpos = 2;
+ } else {
+ /*
+ * Continuing the previous data, we must
+ * either be in the middle or at the end
+ */
+ if (bytes_to_transfer <= 188)
+ /* End of payload */
+ usbc_hcsplt.s.xactpos = 1;
+ else
+ /* Middle of payload */
+ usbc_hcsplt.s.xactpos = 0;
+ }
+ /*
+ * Again, the transfer size is limited to 188
+ * bytes
+ */
+ if (bytes_to_transfer > 188)
+ bytes_to_transfer = 188;
+ }
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the
+ * hardware. Further bytes will be sent as continued
+ * transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /*
+ * Round MAX_TRANSFER_BYTES to a multiple of out packet
+ * size
+ */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is
+ * zero we still need to transfer one packet
+ */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must
+ * be restarted between every packet for IN
+ * transactions, so there is no reason to do multiple
+ * packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to
+ * what the hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ /* Update the DATA0/DATA1 toggle */
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ /*
+ * High speed pipes may need a hardware ping before they start
+ */
+ if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
+ usbc_hctsiz.s.dopng = 1;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ }
+
+ /* Setup the Host Channel Characteristics Register */
+ {
+ union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
+
+ /*
+ * Set the startframe odd/even properly. This is only used for
+ * periodic
+ */
+ usbc_hcchar.s.oddfrm = usb->frame_number&1;
+
+ /*
+ * Set the number of back to back packets allowed by this
+ * endpoint. Split transactions interpret "ec" as the number of
+ * immediate retries of failure. These retries happen too
+ * quickly, so we disable these entirely for splits
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count < 1)
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count > 3)
+ usbc_hcchar.s.ec = 3;
+ else
+ usbc_hcchar.s.ec = pipe->multi_count;
+
+ /* Set the rest of the endpoint specific settings */
+ usbc_hcchar.s.devaddr = pipe->device_addr;
+ usbc_hcchar.s.eptype = transaction->type;
+ usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.epdir = pipe->transfer_dir;
+ usbc_hcchar.s.epnum = pipe->endpoint_num;
+ usbc_hcchar.s.mps = pipe->max_packet;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ }
+
+ /* Do transaction type specific fixups as needed */
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ __cvmx_usb_start_channel_control(usb, channel, pipe);
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISO transactions require different PIDs depending on
+ * direction and how many packets are needed
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ if (pipe->multi_count < 2) /* Need DATA0 */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 0);
+ else /* Need MDATA */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 3);
+ }
+ }
+ break;
+ }
+ {
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ transaction->xfersize = usbc_hctsiz.s.xfersize;
+ transaction->pktcnt = usbc_hctsiz.s.pktcnt;
+ }
+ /* Remeber when we start a split transaction */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usb->active_split = transaction;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, chena, 1);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_fill_tx_fifo(usb, channel);
+ return;
+}
+
+
+/**
+ * Find a pipe that is ready to be scheduled to hardware.
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @list: Pipe list to search
+ * @current_frame:
+ * Frame counter to use as a time reference.
+ *
+ * Returns: Pipe or NULL if none are ready
+ */
+static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(struct cvmx_usb_state *usb, struct list_head *list, uint64_t current_frame)
+{
+ struct cvmx_usb_pipe *pipe;
+
+ list_for_each_entry(pipe, list, node) {
+ struct cvmx_usb_transaction *t =
+ list_first_entry(&pipe->transactions, typeof(*t), node);
+ if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
+ (pipe->next_tx_frame <= current_frame) &&
+ ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
+ (!usb->active_split || (usb->active_split == t))) {
+ CVMX_PREFETCH(pipe, 128);
+ CVMX_PREFETCH(t, 0);
+ return pipe;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Called whenever a pipe might need to be scheduled to the
+ * hardware.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @is_sof: True if this schedule was called on a SOF interrupt.
+ */
+static void __cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
+{
+ int channel;
+ struct cvmx_usb_pipe *pipe;
+ int need_sof;
+ enum cvmx_usb_transfer ttype;
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /*
+ * Without DMA we need to be careful to not schedule something
+ * at the end of a frame and cause an overrun.
+ */
+ union cvmx_usbcx_hfnum hfnum = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index))};
+ union cvmx_usbcx_hfir hfir = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFIR(usb->index))};
+ if (hfnum.s.frrem < hfir.s.frint/4)
+ goto done;
+ }
+
+ while (usb->idle_hardware_channels) {
+ /* Find an idle channel */
+ channel = __fls(usb->idle_hardware_channels);
+ if (unlikely(channel > 7))
+ break;
+
+ /* Find a pipe needing service */
+ pipe = NULL;
+ if (is_sof) {
+ /*
+ * Only process periodic pipes on SOF interrupts. This
+ * way we are sure that the periodic data is sent in the
+ * beginning of the frame
+ */
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
+ if (likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
+ }
+ if (likely(!pipe)) {
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
+ if (likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
+ }
+ if (!pipe)
+ break;
+
+ __cvmx_usb_start_channel(usb, channel, pipe);
+ }
+
+done:
+ /*
+ * Only enable SOF interrupts when we have transactions pending in the
+ * future that might need to be scheduled
+ */
+ need_sof = 0;
+ for (ttype = CVMX_USB_TRANSFER_CONTROL; ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
+ if (pipe->next_tx_frame > usb->frame_number) {
+ need_sof = 1;
+ break;
+ }
+ }
+ }
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, sofmsk, need_sof);
+ return;
+}
+
+static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
+{
+ return container_of(p, struct octeon_hcd, usb);
+}
+
+static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
+{
+ return container_of((void *)p, struct usb_hcd, hcd_priv);
}
static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
- enum cvmx_usb_callback reason,
enum cvmx_usb_complete status,
- int pipe_handle,
- int submit_handle,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction
+ *transaction,
int bytes_transferred,
- void *user_data)
+ struct urb *urb)
{
struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
struct usb_hcd *hcd = octeon_to_hcd(priv);
struct device *dev = hcd->self.controller;
- struct urb *urb = user_data;
urb->actual_length = bytes_transferred;
urb->hcpriv = NULL;
- if (!list_empty(&urb->urb_list)) {
+ if (!list_empty(&urb->urb_list))
/*
* It is on the dequeue_list, but we are going to call
* usb_hcd_giveback_urb(), so we must clear it from
* the list. We got to it before the
* octeon_usb_urb_dequeue_work() tasklet did.
*/
- list_del(&urb->urb_list);
- /* No longer on the dequeue_list. */
- INIT_LIST_HEAD(&urb->urb_list);
- }
+ list_del_init(&urb->urb_list);
/* For Isochronous transactions we need to update the URB packet status
list from data in our private copy */
@@ -151,10 +2013,10 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
urb->iso_frame_desc[i].actual_length = iso_packet[i].length;
urb->actual_length += urb->iso_frame_desc[i].actual_length;
} else {
- dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%d submit=%d size=%d\n",
+ dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
i, urb->number_of_packets,
- iso_packet[i].status, pipe_handle,
- submit_handle, iso_packet[i].length);
+ iso_packet[i].status, pipe,
+ transaction, iso_packet[i].length);
urb->iso_frame_desc[i].status = -EREMOTEIO;
}
}
@@ -172,26 +2034,26 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
urb->status = -ENOENT;
break;
case CVMX_USB_COMPLETE_STALL:
- dev_dbg(dev, "status=stall pipe=%d submit=%d size=%d\n",
- pipe_handle, submit_handle, bytes_transferred);
+ dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
+ pipe, transaction, bytes_transferred);
urb->status = -EPIPE;
break;
case CVMX_USB_COMPLETE_BABBLEERR:
- dev_dbg(dev, "status=babble pipe=%d submit=%d size=%d\n",
- pipe_handle, submit_handle, bytes_transferred);
+ dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
+ pipe, transaction, bytes_transferred);
urb->status = -EPIPE;
break;
case CVMX_USB_COMPLETE_SHORT:
- dev_dbg(dev, "status=short pipe=%d submit=%d size=%d\n",
- pipe_handle, submit_handle, bytes_transferred);
+ dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
+ pipe, transaction, bytes_transferred);
urb->status = -EREMOTEIO;
break;
case CVMX_USB_COMPLETE_ERROR:
case CVMX_USB_COMPLETE_XACTERR:
case CVMX_USB_COMPLETE_DATATGLERR:
case CVMX_USB_COMPLETE_FRAMEERR:
- dev_dbg(dev, "status=%d pipe=%d submit=%d size=%d\n",
- status, pipe_handle, submit_handle, bytes_transferred);
+ dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
+ status, pipe, transaction, bytes_transferred);
urb->status = -EPROTO;
break;
}
@@ -200,14 +2062,952 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
spin_lock(&priv->lock);
}
+/**
+ * Signal the completion of a transaction and free it. The
+ * transaction will be removed from the pipe transaction list.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe the transaction is on
+ * @transaction:
+ * Transaction that completed
+ * @complete_code:
+ * Completion code
+ */
+static void __cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_complete complete_code)
+{
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+
+ /*
+ * Isochronous transactions need extra processing as they might not be
+ * done after a single data transfer
+ */
+ if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /* Update the number of bytes transferred in this ISO packet */
+ transaction->iso_packets[0].length = transaction->actual_bytes;
+ transaction->iso_packets[0].status = complete_code;
+
+ /*
+ * If there are more ISOs pending and we succeeded, schedule the
+ * next one
+ */
+ if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+ /* No bytes transferred for this packet as of yet */
+ transaction->actual_bytes = 0;
+ /* One less ISO waiting to transfer */
+ transaction->iso_number_packets--;
+ /* Increment to the next location in our packet array */
+ transaction->iso_packets++;
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ goto done;
+ }
+ }
+
+ /* Remove the transaction from the pipe list */
+ list_del(&transaction->node);
+ if (list_empty(&pipe->transactions))
+ list_move_tail(&pipe->node, &usb->idle_pipes);
+ octeon_usb_urb_complete_callback(usb, complete_code, pipe,
+ transaction,
+ transaction->actual_bytes,
+ transaction->urb);
+ kfree(transaction);
+done:
+ return;
+}
+
+
+/**
+ * Submit a usb transaction to a pipe. Called for all types
+ * of transactions.
+ *
+ * @usb:
+ * @pipe: Which pipe to submit to.
+ * @type: Transaction type
+ * @buffer: User buffer for the transaction
+ * @buffer_length:
+ * User buffer's length in bytes
+ * @control_header:
+ * For control transactions, the 8 byte standard header
+ * @iso_start_frame:
+ * For ISO transactions, the start frame
+ * @iso_number_packets:
+ * For ISO, the number of packet in the transaction.
+ * @iso_packets:
+ * A description of each ISO packet
+ * @urb: URB for the callback
+ *
+ * Returns: Transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ enum cvmx_usb_transfer type,
+ uint64_t buffer,
+ int buffer_length,
+ uint64_t control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ struct cvmx_usb_iso_packet *iso_packets,
+ struct urb *urb)
+{
+ struct cvmx_usb_transaction *transaction;
+
+ if (unlikely(pipe->transfer_type != type))
+ return NULL;
+
+ transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
+ if (unlikely(!transaction))
+ return NULL;
+
+ transaction->type = type;
+ transaction->buffer = buffer;
+ transaction->buffer_length = buffer_length;
+ transaction->control_header = control_header;
+ /* FIXME: This is not used, implement it. */
+ transaction->iso_start_frame = iso_start_frame;
+ transaction->iso_number_packets = iso_number_packets;
+ transaction->iso_packets = iso_packets;
+ transaction->urb = urb;
+ if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_SETUP;
+ else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+
+ if (!list_empty(&pipe->transactions)) {
+ list_add_tail(&transaction->node, &pipe->transactions);
+ } else {
+ list_add_tail(&transaction->node, &pipe->transactions);
+ list_move_tail(&pipe->node,
+ &usb->active_pipes[pipe->transfer_type]);
+
+ /*
+ * We may need to schedule the pipe if this was the head of the
+ * pipe.
+ */
+ __cvmx_usb_schedule(usb, 0);
+ }
+
+ return transaction;
+}
+
+
+/**
+ * Call to submit a USB Bulk transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ return __cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ urb);
+}
+
+
+/**
+ * Call to submit a USB Interrupt transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB returned when the callback is called.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ return __cvmx_usb_submit_transaction(usb, pipe,
+ CVMX_USB_TRANSFER_INTERRUPT,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ urb);
+}
+
+
+/**
+ * Call to submit a USB Control transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_control(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ int buffer_length = urb->transfer_buffer_length;
+ uint64_t control_header = urb->setup_dma;
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(control_header);
+
+ if ((header->s.request_type & 0x80) == 0)
+ buffer_length = le16_to_cpu(header->s.length);
+
+ return __cvmx_usb_submit_transaction(usb, pipe,
+ CVMX_USB_TRANSFER_CONTROL,
+ urb->transfer_dma, buffer_length,
+ control_header,
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ urb);
+}
+
+
+/**
+ * Call to submit a USB Isochronous transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB returned when the callback is called.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ struct cvmx_usb_iso_packet *packets;
+
+ packets = (struct cvmx_usb_iso_packet *) urb->setup_packet;
+ return __cvmx_usb_submit_transaction(usb, pipe,
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ 0, /* control_header */
+ urb->start_frame,
+ urb->number_of_packets,
+ packets, urb);
+}
+
+
+/**
+ * Cancel one outstanding request in a pipe. Canceling a request
+ * can fail if the transaction has already completed before cancel
+ * is called. Even after a successful cancel call, it may take
+ * a frame or two for the cvmx_usb_poll() function to call the
+ * associated callback.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe to cancel requests in.
+ * @transaction: Transaction to cancel, returned by the submit function.
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction)
+{
+ /*
+ * If the transaction is the HEAD of the queue and scheduled. We need to
+ * treat it special
+ */
+ if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
+ transaction && (pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
+ union cvmx_usbcx_hccharx usbc_hcchar;
+
+ usb->pipe_for_channel[pipe->channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ CVMX_SYNCW;
+
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ /*
+ * If the channel isn't enabled then the transaction already
+ * completed.
+ */
+ if (usbc_hcchar.s.chena) {
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
+ }
+ }
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
+ return 0;
+}
+
+
+/**
+ * Cancel all outstanding requests in a pipe. Logically all this
+ * does is call cvmx_usb_cancel() in a loop.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe to cancel requests in.
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction, *next;
+
+ /* Simply loop through and attempt to cancel each transaction */
+ list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
+ int result = cvmx_usb_cancel(usb, pipe, transaction);
+ if (unlikely(result != 0))
+ return result;
+ }
+ return 0;
+}
+
+
+/**
+ * Close a pipe created with cvmx_usb_open_pipe().
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe to close.
+ *
+ * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
+ * outstanding transfers.
+ */
+static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe)
+{
+ /* Fail if the pipe has pending transactions */
+ if (!list_empty(&pipe->transactions))
+ return -EBUSY;
+
+ list_del(&pipe->node);
+ kfree(pipe);
+
+ return 0;
+}
+
+/**
+ * Get the current USB protocol level frame number. The frame
+ * number is always in the range of 0-0x7ff.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: USB frame number
+ */
+static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
+{
+ int frame_number;
+ union cvmx_usbcx_hfnum usbc_hfnum;
+
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ frame_number = usbc_hfnum.s.frnum;
+
+ return frame_number;
+}
+
+
+/**
+ * Poll a channel for status
+ *
+ * @usb: USB device
+ * @channel: Channel to poll
+ *
+ * Returns: Zero on success
+ */
+static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
+{
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ union cvmx_usbcx_hccharx usbc_hcchar;
+ struct cvmx_usb_pipe *pipe;
+ struct cvmx_usb_transaction *transaction;
+ int bytes_this_transfer;
+ int bytes_in_last_packet;
+ int packets_processed;
+ int buffer_space_left;
+
+ /* Read the interrupt status bits for the channel */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+
+ if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
+ /*
+ * There seems to be a bug in CN31XX which can cause
+ * interrupt IN transfers to get stuck until we do a
+ * write of HCCHARX without changing things
+ */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ return 0;
+ }
+
+ /*
+ * In non DMA mode the channels don't halt themselves. We need
+ * to manually disable channels that are left running
+ */
+ if (!usbc_hcint.s.chhltd) {
+ if (usbc_hcchar.s.chena) {
+ union cvmx_usbcx_hcintmskx hcintmsk;
+ /* Disable all interrupts except CHHLTD */
+ hcintmsk.u32 = 0;
+ hcintmsk.s.chhltdmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ return 0;
+ } else if (usbc_hcint.s.xfercompl) {
+ /*
+ * Successful IN/OUT with transfer complete.
+ * Channel halt isn't needed.
+ */
+ } else {
+ cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
+ return 0;
+ }
+ }
+ } else {
+ /*
+ * There is are no interrupts that we need to process when the
+ * channel is still running
+ */
+ if (!usbc_hcint.s.chhltd)
+ return 0;
+ }
+
+ /* Disable the channel interrupts now that it is done */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ usb->idle_hardware_channels |= (1<<channel);
+
+ /* Make sure this channel is tied to a valid pipe */
+ pipe = usb->pipe_for_channel[channel];
+ CVMX_PREFETCH(pipe, 0);
+ CVMX_PREFETCH(pipe, 128);
+ if (!pipe)
+ return 0;
+ transaction = list_first_entry(&pipe->transactions, typeof(*transaction),
+ node);
+ CVMX_PREFETCH(transaction, 0);
+
+ /*
+ * Disconnect this pipe from the HW channel. Later the schedule
+ * function will figure out which pipe needs to go
+ */
+ usb->pipe_for_channel[channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /*
+ * Read the channel config info so we can figure out how much data
+ * transfered
+ */
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ /*
+ * Calculating the number of bytes successfully transferred is dependent
+ * on the transfer direction
+ */
+ packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
+ if (usbc_hcchar.s.epdir) {
+ /*
+ * IN transactions are easy. For every byte received the
+ * hardware decrements xfersize. All we need to do is subtract
+ * the current value of xfersize from its starting value and we
+ * know how many bytes were written to the buffer
+ */
+ bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
+ } else {
+ /*
+ * OUT transaction don't decrement xfersize. Instead pktcnt is
+ * decremented on every successful packet send. The hardware
+ * does this when it receives an ACK, or NYET. If it doesn't
+ * receive one of these responses pktcnt doesn't change
+ */
+ bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
+ /*
+ * The last packet may not be a full transfer if we didn't have
+ * enough data
+ */
+ if (bytes_this_transfer > transaction->xfersize)
+ bytes_this_transfer = transaction->xfersize;
+ }
+ /* Figure out how many bytes were in the last packet of the transfer */
+ if (packets_processed)
+ bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
+ else
+ bytes_in_last_packet = bytes_this_transfer;
+
+ /*
+ * As a special case, setup transactions output the setup header, not
+ * the user's data. For this reason we don't count setup data as bytes
+ * transferred
+ */
+ if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
+ (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+ bytes_this_transfer = 0;
+
+ /*
+ * Add the bytes transferred to the running total. It is important that
+ * bytes_this_transfer doesn't count any data that needs to be
+ * retransmitted
+ */
+ transaction->actual_bytes += bytes_this_transfer;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
+ else
+ buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
+
+ /*
+ * We need to remember the PID toggle state for the next transaction.
+ * The hardware already updated it for the next transaction
+ */
+ pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
+
+ /*
+ * For high speed bulk out, assume the next transaction will need to do
+ * a ping before proceeding. If this isn't true the ACK processing below
+ * will clear this flag
+ */
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ if (usbc_hcint.s.stall) {
+ /*
+ * STALL as a response means this transaction cannot be
+ * completed because the device can't process transactions. Tell
+ * the user. Any data that was transferred will be counted on
+ * the actual bytes transferred
+ */
+ pipe->pid_toggle = 0;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
+ } else if (usbc_hcint.s.xacterr) {
+ /*
+ * We know at least one packet worked if we get a ACK or NAK.
+ * Reset the retry counter
+ */
+ if (usbc_hcint.s.nak || usbc_hcint.s.ack)
+ transaction->retries = 0;
+ transaction->retries++;
+ if (transaction->retries > MAX_RETRIES) {
+ /*
+ * XactErr as a response means the device signaled
+ * something wrong with the transfer. For example, PID
+ * toggle errors cause these
+ */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
+ } else {
+ /*
+ * If this was a split then clear our split in progress
+ * marker
+ */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * Rewind to the beginning of the transaction by anding
+ * off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ }
+ } else if (usbc_hcint.s.bblerr) {
+ /* Babble Error (BblErr) */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
+ } else if (usbc_hcint.s.datatglerr) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else if (usbc_hcint.s.nyet) {
+ /*
+ * NYET as a response is only allowed in three cases: as a
+ * response to a ping, as a response to a split transaction, and
+ * as a response to a bulk out. The ping case is handled by
+ * hardware, so we only have splits and bulk out
+ */
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->retries = 0;
+ /*
+ * If there is more data to go then we need to try
+ * again. Otherwise this transaction is complete
+ */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ } else {
+ /*
+ * Split transactions retry the split complete 4 times
+ * then rewind to the start split and do the entire
+ * transactions again
+ */
+ transaction->retries++;
+ if ((transaction->retries & 0x3) == 0) {
+ /*
+ * Rewind to the beginning of the transaction by
+ * anding off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ }
+ }
+ } else if (usbc_hcint.s.ack) {
+ transaction->retries = 0;
+ /*
+ * The ACK bit can only be checked after the other error bits.
+ * This is because a multi packet transfer may succeed in a
+ * number of packets and then get a different response on the
+ * last packet. In this case both ACK and the last response bit
+ * will be set. If none of the other response bits is set, then
+ * the last packet must have been an ACK
+ *
+ * Since we got an ACK, we know we don't need to do a ping on
+ * this pipe
+ */
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ /* This should be impossible */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ pipe->pid_toggle = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ else {
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ {
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ /*
+ * For setup OUT data that are splits,
+ * the hardware doesn't appear to count
+ * transferred data. Here we manually
+ * update the data transferred
+ */
+ if (!usbc_hcchar.s.epdir) {
+ if (buffer_space_left < pipe->max_packet)
+ transaction->actual_bytes += buffer_space_left;
+ else
+ transaction->actual_bytes += pipe->max_packet;
+ }
+ } else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ } else {
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ }
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ else
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ }
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ /*
+ * The only time a bulk transfer isn't complete when it
+ * finishes with an ACK is during a split transaction.
+ * For splits we need to continue the transfer if more
+ * data is needed
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ else {
+ if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ else {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ } else {
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (usbc_hcint.s.nak))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISOCHRONOUS OUT splits don't require a
+ * complete split stage. Instead they use a
+ * sequence of begin OUT splits to transfer the
+ * data 188 bytes at a time. Once the transfer
+ * is complete, the pipe sleeps until the next
+ * schedule interval
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ /*
+ * If no space left or this wasn't a max
+ * size packet then this transfer is
+ * complete. Otherwise start it again to
+ * send the next 188 bytes
+ */
+ if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ } else {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ /*
+ * We are in the incoming data
+ * phase. Keep getting data
+ * until we run out of space or
+ * get a small packet
+ */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ } else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ }
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ break;
+ }
+ } else if (usbc_hcint.s.nak) {
+ /*
+ * If this was a split then clear our split in progress marker.
+ */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * NAK as a response means the device couldn't accept the
+ * transaction, but it should be retried in the future. Rewind
+ * to the beginning of the transaction by anding off the split
+ * complete bit. Retry in the next interval
+ */
+ transaction->retries = 0;
+ transaction->stage &= ~1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ } else {
+ struct cvmx_usb_port_status port;
+ port = cvmx_usb_get_status(usb);
+ if (port.port_enabled) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else {
+ /*
+ * We get channel halted interrupts with no result bits
+ * sets when the cable is unplugged
+ */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ }
+ }
+ return 0;
+}
+
+static void octeon_usb_port_callback(struct cvmx_usb_state *usb)
+{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+
+ spin_unlock(&priv->lock);
+ usb_hcd_poll_rh_status(octeon_to_hcd(priv));
+ spin_lock(&priv->lock);
+}
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_poll(struct cvmx_usb_state *usb)
+{
+ union cvmx_usbcx_hfnum usbc_hfnum;
+ union cvmx_usbcx_gintsts usbc_gintsts;
+
+ CVMX_PREFETCH(usb, 0);
+ CVMX_PREFETCH(usb, 1*128);
+ CVMX_PREFETCH(usb, 2*128);
+ CVMX_PREFETCH(usb, 3*128);
+ CVMX_PREFETCH(usb, 4*128);
+
+ /* Update the frame counter */
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
+ usb->frame_number += 0x4000;
+ usb->frame_number &= ~0x3fffull;
+ usb->frame_number |= usbc_hfnum.s.frnum;
+
+ /* Read the pending interrupts */
+ usbc_gintsts.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
+
+ /* Clear the interrupts now that we know about them */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
+
+ if (usbc_gintsts.s.rxflvl) {
+ /*
+ * RxFIFO Non-Empty (RxFLvl)
+ * Indicates that there is at least one packet pending to be
+ * read from the RxFIFO.
+ *
+ * In DMA mode this is handled by hardware
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_rx_fifo(usb);
+ }
+ if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
+ /* Fill the Tx FIFOs when not in DMA mode */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_tx_fifo(usb);
+ }
+ if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
+ union cvmx_usbcx_hprt usbc_hprt;
+ /*
+ * Disconnect Detected Interrupt (DisconnInt)
+ * Asserted when a device disconnect is detected.
+ *
+ * Host Port Interrupt (PrtInt)
+ * The core sets this bit to indicate a change in port status of
+ * one of the O2P USB core ports in Host mode. The application
+ * must read the Host Port Control and Status (HPRT) register to
+ * determine the exact event that caused this interrupt. The
+ * application must clear the appropriate status bit in the Host
+ * Port Control and Status register to clear this bit.
+ *
+ * Call the user's port callback
+ */
+ octeon_usb_port_callback(usb);
+ /* Clear the port change bits */
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.s.prtena = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
+ }
+ if (usbc_gintsts.s.hchint) {
+ /*
+ * Host Channels Interrupt (HChInt)
+ * The core sets this bit to indicate that an interrupt is
+ * pending on one of the channels of the core (in Host mode).
+ * The application must read the Host All Channels Interrupt
+ * (HAINT) register to determine the exact number of the channel
+ * on which the interrupt occurred, and then read the
+ * corresponding Host Channel-n Interrupt (HCINTn) register to
+ * determine the exact cause of the interrupt. The application
+ * must clear the appropriate status bit in the HCINTn register
+ * to clear this bit.
+ */
+ union cvmx_usbcx_haint usbc_haint;
+ usbc_haint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINT(usb->index));
+ while (usbc_haint.u32) {
+ int channel;
+
+ channel = __fls(usbc_haint.u32);
+ __cvmx_usb_poll_channel(usb, channel);
+ usbc_haint.u32 ^= 1<<channel;
+ }
+ }
+
+ __cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
+
+ return 0;
+}
+
+/* convert between an HCD pointer and the corresponding struct octeon_hcd */
+static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
+{
+ return (struct octeon_hcd *)(hcd->hcd_priv);
+}
+
+static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_poll(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int octeon_usb_start(struct usb_hcd *hcd)
+{
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+
+static void octeon_usb_stop(struct usb_hcd *hcd)
+{
+ hcd->state = HC_STATE_HALT;
+}
+
+static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+
+ return cvmx_usb_get_frame_number(&priv->usb);
+}
+
static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct octeon_hcd *priv = hcd_to_octeon(hcd);
struct device *dev = hcd->self.controller;
- int submit_handle = -1;
- int pipe_handle;
+ struct cvmx_usb_transaction *transaction = NULL;
+ struct cvmx_usb_pipe *pipe;
unsigned long flags;
struct cvmx_usb_iso_packet *iso_packet;
struct usb_host_endpoint *ep = urb->ep;
@@ -276,26 +3076,24 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
dev = dev->parent;
}
}
- pipe_handle = cvmx_usb_open_pipe(&priv->usb,
- 0,
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe),
- speed,
- le16_to_cpu(ep->desc.wMaxPacketSize) & 0x7ff,
- transfer_type,
- usb_pipein(urb->pipe) ? CVMX_USB_DIRECTION_IN : CVMX_USB_DIRECTION_OUT,
- urb->interval,
- (le16_to_cpu(ep->desc.wMaxPacketSize) >> 11) & 0x3,
- split_device,
- split_port);
- if (pipe_handle < 0) {
+ pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe), speed,
+ le16_to_cpu(ep->desc.wMaxPacketSize) & 0x7ff,
+ transfer_type,
+ usb_pipein(urb->pipe) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT,
+ urb->interval,
+ (le16_to_cpu(ep->desc.wMaxPacketSize) >> 11) & 0x3,
+ split_device, split_port);
+ if (!pipe) {
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(dev, "Failed to create pipe\n");
return -ENOMEM;
}
- ep->hcpriv = (void *)(0x10000L + pipe_handle);
+ ep->hcpriv = pipe;
} else {
- pipe_handle = 0xffff & (long)ep->hcpriv;
+ pipe = ep->hcpriv;
}
switch (usb_pipetype(urb->pipe)) {
@@ -323,20 +3121,13 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
* this saves us a bunch of logic.
*/
urb->setup_packet = (char *)iso_packet;
- submit_handle = cvmx_usb_submit_isochronous(&priv->usb, pipe_handle,
- urb->start_frame,
- 0 /* flags */ ,
- urb->number_of_packets,
- iso_packet,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- octeon_usb_urb_complete_callback,
- urb);
+ transaction = cvmx_usb_submit_isochronous(&priv->usb,
+ pipe, urb);
/*
* If submit failed we need to free our private packet
* list.
*/
- if (submit_handle < 0) {
+ if (!transaction) {
urb->setup_packet = NULL;
kfree(iso_packet);
}
@@ -345,59 +3136,41 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
case PIPE_INTERRUPT:
dev_dbg(dev, "Submit interrupt to %d.%d\n",
usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
- submit_handle = cvmx_usb_submit_interrupt(&priv->usb, pipe_handle,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- octeon_usb_urb_complete_callback,
- urb);
+ transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb);
break;
case PIPE_CONTROL:
dev_dbg(dev, "Submit control to %d.%d\n",
usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
- submit_handle = cvmx_usb_submit_control(&priv->usb, pipe_handle,
- urb->setup_dma,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- octeon_usb_urb_complete_callback,
- urb);
+ transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb);
break;
case PIPE_BULK:
dev_dbg(dev, "Submit bulk to %d.%d\n",
usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
- submit_handle = cvmx_usb_submit_bulk(&priv->usb, pipe_handle,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- octeon_usb_urb_complete_callback,
- urb);
+ transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb);
break;
}
- if (submit_handle < 0) {
+ if (!transaction) {
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(dev, "Failed to submit\n");
return -ENOMEM;
}
- urb->hcpriv = (void *)(long)(((submit_handle & 0xffff) << 16) | pipe_handle);
+ urb->hcpriv = transaction;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void octeon_usb_urb_dequeue_work(unsigned long arg)
{
+ struct urb *urb;
+ struct urb *next;
unsigned long flags;
struct octeon_hcd *priv = (struct octeon_hcd *)arg;
spin_lock_irqsave(&priv->lock, flags);
- while (!list_empty(&priv->dequeue_list)) {
- int pipe_handle;
- int submit_handle;
- struct urb *urb = container_of(priv->dequeue_list.next, struct urb, urb_list);
- list_del(&urb->urb_list);
- /* not enqueued on dequeue_list */
- INIT_LIST_HEAD(&urb->urb_list);
- pipe_handle = 0xffff & (long)urb->hcpriv;
- submit_handle = ((long)urb->hcpriv) >> 16;
- cvmx_usb_cancel(&priv->usb, pipe_handle, submit_handle);
+ list_for_each_entry_safe(urb, next, &priv->dequeue_list, urb_list) {
+ list_del_init(&urb->urb_list);
+ cvmx_usb_cancel(&priv->usb, urb->ep->hcpriv, urb->hcpriv);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -429,12 +3202,12 @@ static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, struct usb_host_end
if (ep->hcpriv) {
struct octeon_hcd *priv = hcd_to_octeon(hcd);
- int pipe_handle = 0xffff & (long)ep->hcpriv;
+ struct cvmx_usb_pipe *pipe = ep->hcpriv;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_cancel_all(&priv->usb, pipe_handle);
- if (cvmx_usb_close_pipe(&priv->usb, pipe_handle))
- dev_dbg(dev, "Closing pipe %d failed\n", pipe_handle);
+ cvmx_usb_cancel_all(&priv->usb, pipe);
+ if (cvmx_usb_close_pipe(&priv->usb, pipe))
+ dev_dbg(dev, "Closing pipe %p failed\n", pipe);
spin_unlock_irqrestore(&priv->lock, flags);
ep->hcpriv = NULL;
}
@@ -506,7 +3279,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " C_CONNECTION\n");
/* Clears drivers internal connect status change flag */
spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_RESET:
@@ -515,7 +3288,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Clears the driver's internal Port Reset Change flag.
*/
spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_ENABLE:
@@ -525,7 +3298,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Change flag.
*/
spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_SUSPEND:
@@ -540,7 +3313,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " C_OVER_CURRENT\n");
/* Clears the driver's overcurrent Change flag */
spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
default:
@@ -705,7 +3478,7 @@ static int octeon_usb_driver_probe(struct device *dev)
tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num, CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO);
+ status = cvmx_usb_initialize(&priv->usb, usb_num);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
diff --git a/drivers/staging/octeon-usb/cvmx-usbcx-defs.h b/drivers/staging/octeon-usb/octeon-hcd.h
index d349d77bc35..42fe4fec7d5 100644
--- a/drivers/staging/octeon-usb/cvmx-usbcx-defs.h
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -1,7 +1,14 @@
-/***********************license start***************
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
+/*
+ * Octeon HCD hardware register definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
+ * Some parts of the code were originally released under BSD license:
+ *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -14,17 +21,17 @@
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
-
+ *
* * Neither the name of Cavium Networks nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
-
+ *
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
+ * regulations, and may be subject to export or import regulations in other
* countries.
-
+ *
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
@@ -33,20 +40,12 @@
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- ***********************license end**************************************/
-
-
-/**
- * cvmx-usbcx-defs.h
- *
- * Configuration and status register (CSR) type definitions for
- * Octeon usbcx.
- *
*/
-#ifndef __CVMX_USBCX_TYPEDEFS_H__
-#define __CVMX_USBCX_TYPEDEFS_H__
+
+#ifndef __OCTEON_HCD_H__
+#define __OCTEON_HCD_H__
#define CVMX_USBCXBASE 0x00016F0010000000ull
#define CVMX_USBCXREG1(reg, bid) \
@@ -81,6 +80,19 @@
#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid)
#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid)
+#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
+#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
+
+#define CVMX_USBNXREG1(reg, bid) \
+ (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
+#define CVMX_USBNXREG2(reg, bid) \
+ (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
+
+#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
+#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
+#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
+#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
+
/**
* cvmx_usbc#_gahbcfg
*
@@ -1525,4 +1537,283 @@ union cvmx_usbcx_hptxsts {
} s;
};
-#endif
+/**
+ * cvmx_usbn#_clk_ctl
+ *
+ * USBN_CLK_CTL = USBN's Clock Control
+ *
+ * This register is used to control the frequency of the hclk and the
+ * hreset and phy_rst signals.
+ */
+union cvmx_usbnx_clk_ctl {
+ uint64_t u64;
+ /**
+ * struct cvmx_usbnx_clk_ctl_s
+ * @divide2: The 'hclk' used by the USB subsystem is derived
+ * from the eclk.
+ * Also see the field DIVIDE. DIVIDE2<1> must currently
+ * be zero because it is not implemented, so the maximum
+ * ratio of eclk/hclk is currently 16.
+ * The actual divide number for hclk is:
+ * (DIVIDE2 + 1) * (DIVIDE + 1)
+ * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
+ * generate the hclk in the USB Subsystem is held
+ * in reset. This bit must be set to '0' before
+ * changing the value os DIVIDE in this register.
+ * The reset to the HCLK_DIVIDERis also asserted
+ * when core reset is asserted.
+ * @p_x_on: Force USB-PHY on during suspend.
+ * '1' USB-PHY XO block is powered-down during
+ * suspend.
+ * '0' USB-PHY XO block is powered-up during
+ * suspend.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_rtype: PHY reference clock type
+ * On CN50XX/CN52XX/CN56XX the values are:
+ * '0' The USB-PHY uses a 12MHz crystal as a clock source
+ * at the USB_XO and USB_XI pins.
+ * '1' Reserved.
+ * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the
+ * USB_XO pin. USB_XI should be tied to ground in this
+ * case.
+ * '3' Reserved.
+ * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are:
+ * '0' Reserved.
+ * '1' Reserved.
+ * '2' The PHY PLL uses the XO block output as a reference.
+ * The XO block uses an external clock supplied on the
+ * XO pin. USB_XI should be tied to ground for this
+ * usage.
+ * '3' The XO block uses the clock from a crystal.
+ * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ * remain powered in Suspend Mode.
+ * '1' The USB-PHY XO Bias, Bandgap and PLL are
+ * powered down in suspend mode.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_c_sel: Phy clock speed select.
+ * Selects the reference clock / crystal frequency.
+ * '11': Reserved
+ * '10': 48 MHz (reserved when a crystal is used)
+ * '01': 24 MHz (reserved when a crystal is used)
+ * '00': 12 MHz
+ * The value of this field must be set while POR is
+ * active.
+ * NOTE: if a crystal is used as a reference clock,
+ * this field must be set to 12 MHz.
+ * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
+ * @sd_mode: Scaledown mode for the USBC. Control timing events
+ * in the USBC, for normal operation this must be '0'.
+ * @s_bist: Starts bist on the hclk memories, during the '0'
+ * to '1' transition.
+ * @por: Power On Reset for the PHY.
+ * Resets all the PHYS registers and state machines.
+ * @enable: When '1' allows the generation of the hclk. When
+ * '0' the hclk will not be generated. SEE DIVIDE
+ * field of this register.
+ * @prst: When this field is '0' the reset associated with
+ * the phy_clk functionality in the USB Subsystem is
+ * help in reset. This bit should not be set to '1'
+ * until the time it takes 6 clocks (hclk or phy_clk,
+ * whichever is slower) has passed. Under normal
+ * operation once this bit is set to '1' it should not
+ * be set to '0'.
+ * @hrst: When this field is '0' the reset associated with
+ * the hclk functioanlity in the USB Subsystem is
+ * held in reset.This bit should not be set to '1'
+ * until 12ms after phy_clk is stable. Under normal
+ * operation, once this bit is set to '1' it should
+ * not be set to '0'.
+ * @divide: The frequency of 'hclk' used by the USB subsystem
+ * is the eclk frequency divided by the value of
+ * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
+ * DIVIDE2 of this register.
+ * The hclk frequency should be less than 125Mhz.
+ * After writing a value to this field the SW should
+ * read the field for the value written.
+ * The ENABLE field of this register should not be set
+ * until AFTER this field is set and then read.
+ */
+ struct cvmx_usbnx_clk_ctl_s {
+ uint64_t reserved_20_63 : 44;
+ uint64_t divide2 : 2;
+ uint64_t hclk_rst : 1;
+ uint64_t p_x_on : 1;
+ uint64_t p_rtype : 2;
+ uint64_t p_com_on : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t sd_mode : 2;
+ uint64_t s_bist : 1;
+ uint64_t por : 1;
+ uint64_t enable : 1;
+ uint64_t prst : 1;
+ uint64_t hrst : 1;
+ uint64_t divide : 3;
+ } s;
+};
+
+/**
+ * cvmx_usbn#_usbp_ctl_status
+ *
+ * USBN_USBP_CTL_STATUS = USBP Control And Status Register
+ *
+ * Contains general control and status information for the USBN block.
+ */
+union cvmx_usbnx_usbp_ctl_status {
+ uint64_t u64;
+ /**
+ * struct cvmx_usbnx_usbp_ctl_status_s
+ * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
+ * @txvreftune: HS DC Voltage Level Adjustment
+ * @txfslstune: FS/LS Source Impedence Adjustment
+ * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
+ * @sqrxtune: Squelch Threshold Adjustment
+ * @compdistune: Disconnect Threshold Adjustment
+ * @otgtune: VBUS Valid Threshold Adjustment
+ * @otgdisable: OTG Block Disable
+ * @portreset: Per_Port Reset
+ * @drvvbus: Drive VBUS
+ * @lsbist: Low-Speed BIST Enable.
+ * @fsbist: Full-Speed BIST Enable.
+ * @hsbist: High-Speed BIST Enable.
+ * @bist_done: PHY Bist Done.
+ * Asserted at the end of the PHY BIST sequence.
+ * @bist_err: PHY Bist Error.
+ * Indicates an internal error was detected during
+ * the BIST sequence.
+ * @tdata_out: PHY Test Data Out.
+ * Presents either internaly generated signals or
+ * test register contents, based upon the value of
+ * test_data_out_sel.
+ * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
+ * Normally should be set to zero.
+ * When customers have no intent to use USB PHY
+ * interface, they should:
+ * - still provide 3.3V to USB_VDD33, and
+ * - tie USB_REXT to 3.3V supply, and
+ * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
+ * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
+ * @dma_bmode: When set to 1 the L2C DMA address will be updated
+ * with byte-counts between packets. When set to 0
+ * the L2C DMA address is incremented to the next
+ * 4-byte aligned address after adding byte-count.
+ * @usbc_end: Bigendian input to the USB Core. This should be
+ * set to '0' for operation.
+ * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
+ * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
+ * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D+ line. '1' pull down-resistance is connected
+ * to D+/ '0' pull down resistance is not connected
+ * to D+. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D- line. '1' pull down-resistance is connected
+ * to D-. '0' pull down resistance is not connected
+ * to D-. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @hst_mode: When '0' the USB is acting as HOST, when '1'
+ * USB is acting as device. This field needs to be
+ * set while the USB is in reset.
+ * @tuning: Transmitter Tuning for High-Speed Operation.
+ * Tunes the current supply and rise/fall output
+ * times for high-speed operation.
+ * [20:19] == 11: Current supply increased
+ * approximately 9%
+ * [20:19] == 10: Current supply increased
+ * approximately 4.5%
+ * [20:19] == 01: Design default.
+ * [20:19] == 00: Current supply decreased
+ * approximately 4.5%
+ * [22:21] == 11: Rise and fall times are increased.
+ * [22:21] == 10: Design default.
+ * [22:21] == 01: Rise and fall times are decreased.
+ * [22:21] == 00: Rise and fall times are decreased
+ * further as compared to the 01 setting.
+ * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
+ * Enables or disables bit stuffing on data[15:8]
+ * when bit-stuffing is enabled.
+ * @tx_bs_en: Transmit Bit Stuffing on [7:0].
+ * Enables or disables bit stuffing on data[7:0]
+ * when bit-stuffing is enabled.
+ * @loop_enb: PHY Loopback Test Enable.
+ * '1': During data transmission the receive is
+ * enabled.
+ * '0': During data transmission the receive is
+ * disabled.
+ * Must be '0' for normal operation.
+ * @vtest_enb: Analog Test Pin Enable.
+ * '1' The PHY's analog_test pin is enabled for the
+ * input and output of applicable analog test signals.
+ * '0' THe analog_test pin is disabled.
+ * @bist_enb: Built-In Self Test Enable.
+ * Used to activate BIST in the PHY.
+ * @tdata_sel: Test Data Out Select.
+ * '1' test_data_out[3:0] (PHY) register contents
+ * are output. '0' internaly generated signals are
+ * output.
+ * @taddr_in: Mode Address for Test Interface.
+ * Specifies the register address for writing to or
+ * reading from the PHY test interface register.
+ * @tdata_in: Internal Testing Register Input Data and Select
+ * This is a test bus. Data is present on [3:0],
+ * and its corresponding select (enable) is present
+ * on bits [7:4].
+ * @ate_reset: Reset input from automatic test equipment.
+ * This is a test signal. When the USB Core is
+ * powered up (not in Susned Mode), an automatic
+ * tester can use this to disable phy_clock and
+ * free_clk, then re-eanable them with an aligned
+ * phase.
+ * '1': The phy_clk and free_clk outputs are
+ * disabled. "0": The phy_clock and free_clk outputs
+ * are available within a specific period after the
+ * de-assertion.
+ */
+ struct cvmx_usbnx_usbp_ctl_status_s {
+ uint64_t txrisetune : 1;
+ uint64_t txvreftune : 4;
+ uint64_t txfslstune : 4;
+ uint64_t txhsxvtune : 2;
+ uint64_t sqrxtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t otgtune : 3;
+ uint64_t otgdisable : 1;
+ uint64_t portreset : 1;
+ uint64_t drvvbus : 1;
+ uint64_t lsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t hsbist : 1;
+ uint64_t bist_done : 1;
+ uint64_t bist_err : 1;
+ uint64_t tdata_out : 4;
+ uint64_t siddq : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t usbc_end : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t tclk : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t hst_mode : 1;
+ uint64_t tuning : 4;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t loop_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t bist_enb : 1;
+ uint64_t tdata_sel : 1;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_in : 8;
+ uint64_t ate_reset : 1;
+ } s;
+};
+
+#endif /* __OCTEON_HCD_H__ */
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index e14a1bb0436..0315f60497b 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -72,7 +72,7 @@ struct cvm_oct_core_state {
int baseline_cores;
/*
* The number of additional cores that could be processing
- * input packtes.
+ * input packets.
*/
atomic_t available_cores;
cpumask_t cpu_state;
@@ -80,6 +80,8 @@ struct cvm_oct_core_state {
static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
+static int cvm_irq_cpu;
+
static void cvm_oct_enable_napi(void *_)
{
int cpu = smp_processor_id();
@@ -112,11 +114,7 @@ static void cvm_oct_no_more_work(void)
{
int cpu = smp_processor_id();
- /*
- * CPU zero is special. It always has the irq enabled when
- * waiting for incoming packets.
- */
- if (cpu == 0) {
+ if (cpu == cvm_irq_cpu) {
enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
return;
}
@@ -135,6 +133,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ cvm_irq_cpu = smp_processor_id();
cvm_oct_enable_napi(NULL);
return IRQ_HANDLED;
@@ -514,7 +513,7 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
+ if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
atomic_set(&core_state.available_cores, max_rx_cpus);
else
atomic_set(&core_state.available_cores, num_online_cpus());
@@ -526,7 +525,7 @@ void cvm_oct_rx_initialize(void)
cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&cvm_oct_napi[i].napi);
}
- /* Register an IRQ hander for to receive POW interrupts */
+ /* Register an IRQ handler to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index af8d62818f1..5108bc0bb57 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -64,31 +64,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
if (spx_int_reg.s.spf)
pr_err("SPI1: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
- pr_err("SPI1: SRX Spi4 Calendar table "
- "parity error\n");
+ pr_err("SPI1: SRX Spi4 Calendar table parity error\n");
if (spx_int_reg.s.syncerr)
- pr_err("SPI1: SRX Consecutive Spi4 DIP4 "
- "errors have exceeded "
- "SPX_ERR_CTL[ERRCNT]\n");
+ pr_err("SPI1: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI1: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
- pr_err("SPI1: SRX Selected port has hit "
- "TPA overflow\n");
+ pr_err("SPI1: SRX Selected port has hit TPA overflow\n");
if (spx_int_reg.s.rsverr)
- pr_err("SPI1: SRX Spi4 reserved control "
- "word detected\n");
+ pr_err("SPI1: SRX Spi4 reserved control word detected\n");
if (spx_int_reg.s.drwnng)
- pr_err("SPI1: SRX Spi4 receive FIFO "
- "drowning/overflow\n");
+ pr_err("SPI1: SRX Spi4 receive FIFO drowning/overflow\n");
if (spx_int_reg.s.clserr)
- pr_err("SPI1: SRX Spi4 packet closed on "
- "non-16B alignment without EOP\n");
+ pr_err("SPI1: SRX Spi4 packet closed on non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI1: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
- pr_err("SPI1: SRX Abnormal packet "
- "termination (ERR bit)\n");
+ pr_err("SPI1: SRX Abnormal packet termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI1: SRX Port out of range\n");
}
@@ -99,31 +91,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
if (stx_int_reg.s.syncerr)
- pr_err("SPI1: STX Interface encountered a "
- "fatal error\n");
+ pr_err("SPI1: STX Interface encountered a fatal error\n");
if (stx_int_reg.s.frmerr)
- pr_err("SPI1: STX FRMCNT has exceeded "
- "STX_DIP_CNT[MAXFRM]\n");
+ pr_err("SPI1: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
- pr_err("SPI1: STX Unexpected framing "
- "sequence\n");
+ pr_err("SPI1: STX Unexpected framing sequence\n");
if (stx_int_reg.s.nosync)
- pr_err("SPI1: STX ERRCNT has exceeded "
- "STX_DIP_CNT[MAXDIP]\n");
+ pr_err("SPI1: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
- pr_err("SPI1: STX DIP2 error on the Spi4 "
- "Status channel\n");
+ pr_err("SPI1: STX DIP2 error on the Spi4 Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI1: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
- pr_err("SPI1: STX Transmit packet burst "
- "too big\n");
+ pr_err("SPI1: STX Transmit packet burst too big\n");
if (stx_int_reg.s.calpar1)
- pr_err("SPI1: STX Calendar Table Parity "
- "Error Bank1\n");
+ pr_err("SPI1: STX Calendar Table Parity Error Bank1\n");
if (stx_int_reg.s.calpar0)
- pr_err("SPI1: STX Calendar Table Parity "
- "Error Bank0\n");
+ pr_err("SPI1: STX Calendar Table Parity Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
@@ -144,31 +128,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
if (spx_int_reg.s.spf)
pr_err("SPI0: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
- pr_err("SPI0: SRX Spi4 Calendar table "
- "parity error\n");
+ pr_err("SPI0: SRX Spi4 Calendar table parity error\n");
if (spx_int_reg.s.syncerr)
- pr_err("SPI0: SRX Consecutive Spi4 DIP4 "
- "errors have exceeded "
- "SPX_ERR_CTL[ERRCNT]\n");
+ pr_err("SPI0: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI0: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
- pr_err("SPI0: SRX Selected port has hit "
- "TPA overflow\n");
+ pr_err("SPI0: SRX Selected port has hit TPA overflow\n");
if (spx_int_reg.s.rsverr)
- pr_err("SPI0: SRX Spi4 reserved control "
- "word detected\n");
+ pr_err("SPI0: SRX Spi4 reserved control word detected\n");
if (spx_int_reg.s.drwnng)
- pr_err("SPI0: SRX Spi4 receive FIFO "
- "drowning/overflow\n");
+ pr_err("SPI0: SRX Spi4 receive FIFO drowning/overflow\n");
if (spx_int_reg.s.clserr)
- pr_err("SPI0: SRX Spi4 packet closed on "
- "non-16B alignment without EOP\n");
+ pr_err("SPI0: SRX Spi4 packet closed on non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI0: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
- pr_err("SPI0: SRX Abnormal packet "
- "termination (ERR bit)\n");
+ pr_err("SPI0: SRX Abnormal packet termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI0: SRX Port out of range\n");
}
@@ -179,31 +155,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
if (stx_int_reg.s.syncerr)
- pr_err("SPI0: STX Interface encountered a "
- "fatal error\n");
+ pr_err("SPI0: STX Interface encountered a fatal error\n");
if (stx_int_reg.s.frmerr)
- pr_err("SPI0: STX FRMCNT has exceeded "
- "STX_DIP_CNT[MAXFRM]\n");
+ pr_err("SPI0: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
- pr_err("SPI0: STX Unexpected framing "
- "sequence\n");
+ pr_err("SPI0: STX Unexpected framing sequence\n");
if (stx_int_reg.s.nosync)
- pr_err("SPI0: STX ERRCNT has exceeded "
- "STX_DIP_CNT[MAXDIP]\n");
+ pr_err("SPI0: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
- pr_err("SPI0: STX DIP2 error on the Spi4 "
- "Status channel\n");
+ pr_err("SPI0: STX DIP2 error on the Spi4 Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI0: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
- pr_err("SPI0: STX Transmit packet burst "
- "too big\n");
+ pr_err("SPI0: STX Transmit packet burst too big\n");
if (stx_int_reg.s.calpar1)
- pr_err("SPI0: STX Calendar Table Parity "
- "Error Bank1\n");
+ pr_err("SPI0: STX Calendar Table Parity Error Bank1\n");
if (stx_int_reg.s.calpar0)
- pr_err("SPI0: STX Calendar Table Parity "
- "Error Bank0\n");
+ pr_err("SPI0: STX Calendar Table Parity Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 5631dd9f820..9b4d0b546b8 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -78,10 +78,12 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
- undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
+ MAX_SKB_TO_FREE;
if (undo > 0)
cvmx_fau_atomic_add32(fau, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
+ -skb_to_free;
return skb_to_free;
}
@@ -108,8 +110,10 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
+ MAX_SKB_TO_FREE);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau+qos*4);
total_freed += skb_to_free;
@@ -117,12 +121,14 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
struct sk_buff *to_free_list = NULL;
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
- struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+ struct sk_buff *t;
+ t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
}
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
@@ -211,15 +217,23 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP;
if (USE_ASYNC_IOBDMA) {
- /* Get the number of skbuffs in use by the hardware */
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
CVMX_SYNCIOBDMA;
- skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free =
+ cvmx_scratch_read64(CVMX_SCR_SCRATCH);
} else {
- /* Get the number of skbuffs in use by the hardware */
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
+ skb_to_free = cvmx_fau_fetch_and_add32(
+ priv->fau + qos * 4, MAX_SKB_TO_FREE);
}
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -276,7 +290,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
+ hw_buffer.s.addr = XKPHYS_TO_PHYS(
+ (u64)(page_address(fs->page.p) +
+ fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
@@ -358,7 +374,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/
pko_command.s.dontfree = 0;
- hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
+ hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
+ ((unsigned long)fpa_head >> 7);
+
*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
/*
@@ -422,17 +440,22 @@ dont_put_skbuff_in_hw:
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(
+ CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */
- if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
+ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
+ MAX_OUT_QUEUE_DEPTH)) {
+
if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
netif_stop_queue(dev);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock,
+ flags);
} else {
/* If not using normal queueing. */
queue_type = QUEUE_DROP;
@@ -448,7 +471,8 @@ dont_put_skbuff_in_hw:
priv->queue + qos,
pko_command, hw_buffer,
CVMX_PKO_LOCK_NONE))) {
- printk_ratelimited("%s: Failed to send the packet\n", dev->name);
+ printk_ratelimited("%s: Failed to send the packet\n",
+ dev->name);
queue_type = QUEUE_DROP;
}
skip_xmit:
@@ -493,7 +517,8 @@ skip_xmit:
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean = cvmx_fau_fetch_and_add32(
+ FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {
@@ -527,8 +552,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(work == NULL)) {
- printk_ratelimited("%s: Failed to allocate a work "
- "queue entry\n", dev->name);
+ printk_ratelimited("%s: Failed to allocate a work queue entry\n",
+ dev->name);
priv->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
@@ -709,7 +734,7 @@ void cvm_oct_tx_initialize(void)
/* Disable the interrupt. */
cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
+ /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
i = request_irq(OCTEON_IRQ_TIMER1,
cvm_oct_tx_cleanup_watchdog, 0,
"Ethernet", cvm_oct_device);
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index c3a90e7012a..bd6ca716404 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -163,11 +163,13 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
if (priv->poll)
priv->poll(cvm_oct_device[priv->port]);
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
+ cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
+ cvm_oct_device[priv->port]);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
- }
+ queue_delayed_work(cvm_oct_poll_queue,
+ &priv->port_periodic_work, HZ);
+}
static void cvm_oct_configure_common_hw(void)
{
@@ -453,7 +455,7 @@ int cvm_oct_common_init(struct net_device *dev)
if (priv->of_node)
mac = of_get_mac_address(priv->of_node);
- if (mac && is_valid_ether_addr(mac))
+ if (mac)
memcpy(dev->dev_addr, mac, ETH_ALEN);
else
eth_hw_addr_random(dev);
@@ -584,8 +586,8 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
extern void octeon_mdiobus_force_mod_depencency(void);
-static struct device_node *cvm_oct_of_get_child(const struct device_node *parent,
- int reg_val)
+static struct device_node *cvm_oct_of_get_child(
+ const struct device_node *parent, int reg_val)
{
struct device_node *node = NULL;
int size;
@@ -603,7 +605,7 @@ static struct device_node *cvm_oct_of_get_child(const struct device_node *parent
}
static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
+ int interface, int port)
{
struct device_node *ni, *np;
@@ -713,7 +715,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
int port;
int port_index;
- for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0);
+ for (port_index = 0,
+ port = cvmx_helper_get_ipd_port(interface, 0);
port < cvmx_helper_get_ipd_port(interface, num_ports);
port_index++, port++) {
struct octeon_ethernet *priv;
@@ -726,7 +729,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
/* Initialize the device private structure. */
priv = netdev_priv(dev);
- priv->of_node = cvm_oct_node_for_port(pip, interface, port_index);
+ priv->of_node = cvm_oct_node_for_port(pip, interface,
+ port_index);
INIT_DELAYED_WORK(&priv->port_periodic_work,
cvm_oct_periodic_worker);
@@ -793,7 +797,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
cvmx_pko_get_num_queues(priv->port) *
sizeof(uint32_t);
queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ &priv->port_periodic_work, HZ);
}
}
}
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index 2ff015d8450..d277f048789 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -1,7 +1,8 @@
config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
- select I2C
+ depends on I2C
+ depends on (GPIO_CS5535 || GPIO_CS5535=n)
select BACKLIGHT_CLASS_DEVICE
---help---
In order to support very low power operation, the XO laptop uses a
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 198595e8d74..92b02891704 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -383,7 +383,7 @@ static void dcon_set_source(struct dcon_priv *dcon, int arg)
dcon->pending_src = arg;
- if ((dcon->curr_src != arg) && !work_pending(&dcon->switch_source))
+ if (dcon->curr_src != arg)
schedule_work(&dcon->switch_source);
}
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index 4247d60c918..9f6ebdb2374 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -390,10 +390,6 @@ static int __init quickstart_init(void)
{
int ret;
- /* ACPI Check */
- if (acpi_disabled)
- return -ENODEV;
-
/* ACPI driver register */
ret = acpi_bus_register_driver(&quickstart_acpi_driver);
if (ret)
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index 10b22100dd3..30457909656 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -858,7 +858,7 @@ static inline void ieee80211_extract_country_ie(
}
-int
+static int
ieee80211_TranslateToDbm(
unsigned char SignalStrengthIndex // 0-100 index.
)
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index b65db542e1a..029070603f6 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -887,7 +887,8 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
return skb;
}
-struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
+static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
+ u8 *dest)
{
struct sk_buff *skb;
u8* tag;
@@ -940,7 +941,8 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
return skb;
}
-struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest)
+static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
+ int status, u8 *dest)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -2942,14 +2944,9 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
goto out;
}
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL){
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- ret = -EFAULT;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ ret = PTR_ERR(param);
goto out;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index b3466530cf9..f5a5219fe14 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -32,7 +32,6 @@
******************************************************************************/
#include <linux/compiler.h>
-//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
@@ -186,9 +185,12 @@ int ieee80211_encrypt_fragment(
struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
int res;
- /*added to care about null crypt condition, to solve that system hangs when shared keys error*/
- if (!crypt || !crypt->ops)
- return -1;
+ /*
+ * added to care about null crypt condition, to solve that system hangs
+ * when shared keys error
+ */
+ if (!crypt || !crypt->ops)
+ return -1;
#ifdef CONFIG_IEEE80211_CRYPT_TKIP
struct ieee80211_hdr_4addr *header;
@@ -197,19 +199,22 @@ int ieee80211_encrypt_fragment(
crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
header = (struct ieee80211_hdr_4addr *)frag->data;
if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "TX packet to %pM\n",
- ieee->dev->name, header->addr1);
+ netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
+ "TX packet to %pM\n", header->addr1);
}
return -1;
}
#endif
- /* To encrypt, frame format is:
- * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
-
- // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
- /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
- * call both MSDU and MPDU encryption functions from here. */
+ /*
+ * To encrypt, frame format is:
+ * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
+ *
+ * PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU
+ * encryption.
+ *
+ * Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
+ * call both MSDU and MPDU encryption functions from here.
+ */
atomic_inc(&crypt->refcnt);
res = 0;
if (crypt->ops->encrypt_msdu)
@@ -219,8 +224,7 @@ int ieee80211_encrypt_fragment(
atomic_dec(&crypt->refcnt);
if (res < 0) {
- printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
- ieee->dev->name, frag->len);
+ netdev_info(ieee->dev, "Encryption failed: len=%d.\n", frag->len);
ieee->ieee_stats.tx_discards++;
return -1;
}
@@ -229,7 +233,8 @@ int ieee80211_encrypt_fragment(
}
-void ieee80211_txb_free(struct ieee80211_txb *txb) {
+void ieee80211_txb_free(struct ieee80211_txb *txb)
+{
int i;
if (unlikely(!txb))
return;
@@ -239,13 +244,13 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) {
kfree(txb);
}
-struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
- int gfp_mask)
+static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
+ gfp_t gfp_mask)
{
struct ieee80211_txb *txb;
int i;
txb = kmalloc(
- sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
+ sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
gfp_mask);
if (!txb)
return NULL;
@@ -270,42 +275,43 @@ struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
return txb;
}
-// Classify the to-be send data packet
-// Need to acquire the sent queue index.
+/*
+ * Classify the to-be send data packet
+ * Need to acquire the sent queue index.
+ */
static int
ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
{
- struct ether_header *eh = (struct ether_header*)skb->data;
- unsigned int wme_UP = 0;
+ struct ether_header *eh = (struct ether_header *)skb->data;
+ unsigned int wme_UP = 0;
- if(!network->QoS_Enable) {
- skb->priority = 0;
- return(wme_UP);
- }
+ if (!network->QoS_Enable) {
+ skb->priority = 0;
+ return(wme_UP);
+ }
- if(eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
- const struct iphdr *ih = (struct iphdr*)(skb->data + \
+ if (eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
+ const struct iphdr *ih = (struct iphdr *)(skb->data +
sizeof(struct ether_header));
- wme_UP = (ih->tos >> 5)&0x07;
- } else if (vlan_tx_tag_present(skb)) {//vtag packet
+ wme_UP = (ih->tos >> 5)&0x07;
+ } else if (vlan_tx_tag_present(skb)) {/* vtag packet */
#ifndef VLAN_PRI_SHIFT
#define VLAN_PRI_SHIFT 13 /* Shift to find VLAN user priority */
#define VLAN_PRI_MASK 7 /* Mask for user priority bits in VLAN */
#endif
- u32 tag = vlan_tx_tag_get(skb);
- wme_UP = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
- } else if(ETH_P_PAE == ntohs(((struct ethhdr *)skb->data)->h_proto)) {
- //printk(KERN_WARNING "type = normal packet\n");
- wme_UP = 7;
- }
-
- skb->priority = wme_UP;
- return(wme_UP);
+ u32 tag = vlan_tx_tag_get(skb);
+ wme_UP = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+ } else if (ETH_P_PAE == ntohs(((struct ethhdr *)skb->data)->h_proto)) {
+ wme_UP = 7;
+ }
+
+ skb->priority = wme_UP;
+ return(wme_UP);
}
/* SKBs are added to the ieee->tx_queue. */
int ieee80211_rtl_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
@@ -325,24 +331,25 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
struct ieee80211_crypt_data* crypt;
- //printk(KERN_WARNING "upper layer packet!\n");
spin_lock_irqsave(&ieee->lock, flags);
- /* If there is no driver handler to take the TXB, don't bother
- * creating it... */
- if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
- ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
- printk(KERN_WARNING "%s: No xmit handler.\n",
- ieee->dev->name);
+ /*
+ * If there is no driver handler to take the TXB, don't bother
+ * creating it...
+ */
+ if ((!ieee->hard_start_xmit &&
+ !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) ||
+ ((!ieee->softmac_data_hard_start_xmit &&
+ (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
+ netdev_warn(ieee->dev, "No xmit handler.\n");
goto success;
}
ieee80211_classify(skb,&ieee->current_network);
- if(likely(ieee->raw_tx == 0)){
+ if (likely(ieee->raw_tx == 0)){
if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
- printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, skb->len);
+ netdev_warn(ieee->dev, "skb too small (%d).\n", skb->len);
goto success;
}
@@ -378,7 +385,7 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
/* Determine total amount of storage required for TXB packets */
bytes = skb->len + SNAP_SIZE + sizeof(u16);
- if(ieee->current_network.QoS_Enable) {
+ if (ieee->current_network.QoS_Enable) {
if (encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA |
IEEE80211_FCTL_WEP;
@@ -395,31 +402,31 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
if (ieee->iw_mode == IW_MODE_INFRA) {
fc |= IEEE80211_FCTL_TODS;
- /* To DS: Addr1 = BSSID, Addr2 = SA,
- Addr3 = DA */
+ /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(&header.addr2, &src, ETH_ALEN);
memcpy(&header.addr3, &dest, ETH_ALEN);
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
- /* not From/To DS: Addr1 = DA, Addr2 = SA,
- Addr3 = BSSID */
+ /*
+ * not From/To DS: Addr1 = DA, Addr2 = SA,
+ * Addr3 = BSSID
+ */
memcpy(&header.addr1, dest, ETH_ALEN);
memcpy(&header.addr2, src, ETH_ALEN);
memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
}
- // printk(KERN_WARNING "essid MAC address is %pM", &header.addr1);
header.frame_ctl = cpu_to_le16(fc);
- //hdr_len = IEEE80211_3ADDR_LEN;
- /* Determine fragmentation size based on destination (multicast
- * and broadcast are not fragmented) */
+ /*
+ * Determine fragmentation size based on destination (multicast
+ * and broadcast are not fragmented)
+ */
if (is_multicast_ether_addr(header.addr1)) {
frag_size = MAX_FRAG_THRESHOLD;
qos_ctl = QOS_CTL_NOTCONTAIN_ACK;
- }
- else {
- //printk(KERN_WARNING "&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&frag_size = %d\n", frag_size);
- frag_size = ieee->fts;//default:392
+ } else {
+ /* default:392 */
+ frag_size = ieee->fts;
qos_ctl = 0;
}
@@ -432,11 +439,12 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
hdr_len = IEEE80211_3ADDR_LEN;
}
- /* Determine amount of payload per fragment. Regardless of if
- * this stack is providing the full 802.11 header, one will
- * eventually be affixed to this fragment -- so we must account for
- * it when determining the amount of payload space. */
- //bytes_per_frag = frag_size - (IEEE80211_3ADDR_LEN + (ieee->current_network->QoS_Enable ? 2:0));
+ /*
+ * Determine amount of payload per fragment. Regardless of if
+ * this stack is providing the full 802.11 header, one will
+ * eventually be affixed to this fragment -- so we must account
+ * for it when determining the amount of payload space.
+ */
bytes_per_frag = frag_size - hdr_len;
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
@@ -447,8 +455,10 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
bytes_per_frag -= crypt->ops->extra_prefix_len +
crypt->ops->extra_postfix_len;
- /* Number of fragments is the total bytes_per_frag /
- * payload_per_fragment */
+ /*
+ * Number of fragments is the total bytes_per_frag /
+ * payload_per_fragment
+ */
nr_frags = bytes / bytes_per_frag;
bytes_last_frag = bytes % bytes_per_frag;
if (bytes_last_frag)
@@ -456,13 +466,14 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
else
bytes_last_frag = bytes_per_frag;
- /* When we allocate the TXB we allocate enough space for the reserve
- * and full fragment bytes (bytes_per_frag doesn't include prefix,
- * postfix, header, FCS, etc.) */
+ /*
+ * When we allocate the TXB we allocate enough space for the
+ * reserve and full fragment bytes (bytes_per_frag doesn't
+ * include prefix, postfix, header, FCS, etc.)
+ */
txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
if (unlikely(!txb)) {
- printk(KERN_WARNING "%s: Could not allocate TXB\n",
- ieee->dev->name);
+ netdev_warn(ieee->dev, "Could not allocate TXB\n");
goto failed;
}
txb->encrypted = encrypt;
@@ -474,11 +485,14 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
if (encrypt)
skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
- frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
+ frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(
+ skb_frag, hdr_len);
memcpy(frag_hdr, &header, hdr_len);
- /* If this is not the last fragment, then add the MOREFRAGS
- * bit to the frame control */
+ /*
+ * If this is not the last fragment, then add the MOREFRAGS
+ * bit to the frame control
+ */
if (i != nr_frags - 1) {
frag_hdr->frame_ctl = cpu_to_le16(
fc | IEEE80211_FCTL_MOREFRAGS);
@@ -488,16 +502,17 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
/* The last fragment takes the remaining length */
bytes = bytes_last_frag;
}
- if(ieee->current_network.QoS_Enable) {
- // add 1 only indicate to corresponding seq number control 2006/7/12
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
- //printk(KERN_WARNING "skb->priority = %d,", skb->priority);
- //printk(KERN_WARNING "type:%d: seq = %d\n",UP2AC(skb->priority),ieee->seq_ctrl[UP2AC(skb->priority)+1]);
+ if (ieee->current_network.QoS_Enable) {
+ /*
+ * add 1 only indicate to corresponding seq
+ * number control 2006/7/12
+ */
+ frag_hdr->seq_ctl = cpu_to_le16(
+ ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
} else {
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
+ frag_hdr->seq_ctl = cpu_to_le16(
+ ieee->seq_ctrl[0]<<4 | i);
}
- //frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl<<4 | i);
- //
/* Put a SNAP header on the first fragment */
if (i == 0) {
@@ -512,54 +527,53 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
/* Advance the SKB... */
skb_pull(skb, bytes);
- /* Encryption routine will move the header forward in order
- * to insert the IV between the header and the payload */
+ /*
+ * Encryption routine will move the header forward in
+ * order to insert the IV between the header and the
+ * payload
+ */
if (encrypt)
ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
skb_put(skb_frag, 4);
}
- // Advance sequence number in data frame.
- //printk(KERN_WARNING "QoS Enalbed? %s\n", ieee->current_network.QoS_Enable?"Y":"N");
+ /* Advance sequence number in data frame. */
if (ieee->current_network.QoS_Enable) {
- if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
- ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
- else
- ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
+ if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
+ else
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
}
- //---
- }else{
+ } else {
if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
- printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, skb->len);
+ netdev_warn(ieee->dev, "skb too small (%d).\n", skb->len);
goto success;
}
txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
- if(!txb){
- printk(KERN_WARNING "%s: Could not allocate TXB\n",
- ieee->dev->name);
+ if (!txb) {
+ netdev_warn(ieee->dev, "Could not allocate TXB\n");
goto failed;
}
txb->encrypted = 0;
txb->payload_size = skb->len;
- memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
+ memcpy(skb_put(txb->fragments[0], skb->len), skb->data, skb->len);
}
success:
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
if (txb) {
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
+ if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
ieee80211_softmac_xmit(txb, ieee);
- }else{
+ } else {
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += txb->payload_size;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index e014f7e7439..24d39ccc133 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -145,7 +145,8 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add quality statistics */
/* TODO: Fix these values... */
if (network->stats.signal == 0 || network->stats.rssi == 0)
- printk("========>signal:%d, rssi:%d\n", network->stats.signal, network->stats.rssi);
+ printk("========>signal:%d, rssi:%d\n", network->stats.signal,
+ network->stats.rssi);
iwe.cmd = IWEVQUAL;
// printk("SIGNAL: %d,RSSI: %d,NOISE: %d\n",network->stats.signal,network->stats.rssi,network->stats.noise);
iwe.u.qual.qual = network->stats.signalstrength;
@@ -622,7 +623,7 @@ done:
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- if (ieee->reset_on_keychange &&
+ if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 5947a6f8e16..76a67386b92 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -160,34 +160,34 @@ static struct pci_driver rtl8180_pci_driver = {
u8 read_nic_byte(struct net_device *dev, int x)
{
- return 0xff&readb((u8 *)dev->mem_start + x);
+ return 0xff&readb((u8 __iomem *)dev->mem_start + x);
}
u32 read_nic_dword(struct net_device *dev, int x)
{
- return readl((u8 *)dev->mem_start + x);
+ return readl((u8 __iomem *)dev->mem_start + x);
}
u16 read_nic_word(struct net_device *dev, int x)
{
- return readw((u8 *)dev->mem_start + x);
+ return readw((u8 __iomem *)dev->mem_start + x);
}
void write_nic_byte(struct net_device *dev, int x, u8 y)
{
- writeb(y, (u8 *)dev->mem_start + x);
+ writeb(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
void write_nic_dword(struct net_device *dev, int x, u32 y)
{
- writel(y, (u8 *)dev->mem_start + x);
+ writel(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
void write_nic_word(struct net_device *dev, int x, u16 y)
{
- writew(y, (u8 *)dev->mem_start + x);
+ writew(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
@@ -275,18 +275,18 @@ static int proc_get_stats_tx(struct seq_file *m, void *v)
return 0;
}
-void rtl8180_proc_module_init(void)
+static void rtl8180_proc_module_init(void)
{
DMESG("Initializing proc filesystem");
rtl8180_proc = proc_mkdir(RTL8180_MODULE_NAME, init_net.proc_net);
}
-void rtl8180_proc_module_remove(void)
+static void rtl8180_proc_module_remove(void)
{
remove_proc_entry(RTL8180_MODULE_NAME, init_net.proc_net);
}
-void rtl8180_proc_remove_one(struct net_device *dev)
+static void rtl8180_proc_remove_one(struct net_device *dev)
{
remove_proc_subtree(dev->name, rtl8180_proc);
}
@@ -325,7 +325,7 @@ static const struct rtl8180_proc_file rtl8180_proc_files[] = {
{ "" }
};
-void rtl8180_proc_init_one(struct net_device *dev)
+static void rtl8180_proc_init_one(struct net_device *dev)
{
const struct rtl8180_proc_file *f;
struct proc_dir_entry *dir;
@@ -351,8 +351,8 @@ void rtl8180_proc_init_one(struct net_device *dev)
data type+functions in kernel
*/
-short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
- struct buffer **bufferhead)
+static short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
+ struct buffer **bufferhead)
{
struct buffer *tmp;
@@ -463,7 +463,7 @@ int get_curr_tx_free_desc(struct net_device *dev, int priority)
return ret;
}
-short check_nic_enought_desc(struct net_device *dev, int priority)
+static short check_nic_enought_desc(struct net_device *dev, int priority)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = netdev_priv(dev);
@@ -589,7 +589,7 @@ void fix_rx_fifo(struct net_device *dev)
set_nic_rxring(dev);
}
-void rtl8180_irq_disable(struct net_device *dev)
+static void rtl8180_irq_disable(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -705,8 +705,8 @@ void rtl8180_rtx_disable(struct net_device *dev)
dev_kfree_skb_any(priv->rx_skb);
}
-short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
- int addr)
+static short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
+ int addr)
{
int i;
u32 *desc;
@@ -830,7 +830,7 @@ short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
return 0;
}
-void free_tx_desc_rings(struct net_device *dev)
+static void free_tx_desc_rings(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct pci_dev *pdev = priv->pdev;
@@ -866,7 +866,7 @@ void free_tx_desc_rings(struct net_device *dev)
buffer_free(dev, &(priv->txbeaconbufs), priv->txbuffsize, 1);
}
-void free_rx_desc_ring(struct net_device *dev)
+static void free_rx_desc_ring(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct pci_dev *pdev = priv->pdev;
@@ -878,7 +878,7 @@ void free_rx_desc_ring(struct net_device *dev)
buffer_free(dev, &(priv->rxbuffer), priv->rxbuffersize, 0);
}
-short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
+static short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
{
int i;
u32 *desc;
@@ -1092,7 +1092,7 @@ u16 N_DBPSOfRate(u16 DataRate)
/*
* For Netgear case, they want good-looking signal strength.
*/
-long NetgearSignalStrengthTranslate(long LastSS, long CurrSS)
+static long NetgearSignalStrengthTranslate(long LastSS, long CurrSS)
{
long RetSS;
@@ -1128,7 +1128,7 @@ long NetgearSignalStrengthTranslate(long LastSS, long CurrSS)
/*
* Translate 0-100 signal strength index into dBm.
*/
-long TranslateToDbm8185(u8 SignalStrengthIndex)
+static long TranslateToDbm8185(u8 SignalStrengthIndex)
{
long SignalPower;
@@ -1145,8 +1145,8 @@ long TranslateToDbm8185(u8 SignalStrengthIndex)
* No dramatic adjustion is apply because dynamic mechanism need some degree
* of correctness. Ported from 8187B.
*/
-void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
- bool bCckRate)
+static void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
+ bool bCckRate)
{
/* Determin the current packet is CCK rate. */
priv->bCurCCKPkt = bCckRate;
@@ -1170,7 +1170,7 @@ void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
/*
* This is rough RX isr handling routine
*/
-void rtl8180_rx(struct net_device *dev)
+static void rtl8180_rx(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct sk_buff *tmp_skb;
@@ -1496,7 +1496,7 @@ drop: /* this is used when we have not enough mem */
}
-void rtl8180_dma_kick(struct net_device *dev, int priority)
+static void rtl8180_dma_kick(struct net_device *dev, int priority)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -1508,7 +1508,7 @@ void rtl8180_dma_kick(struct net_device *dev, int priority)
force_pci_posting(dev);
}
-void rtl8180_data_hard_stop(struct net_device *dev)
+static void rtl8180_data_hard_stop(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -1518,7 +1518,7 @@ void rtl8180_data_hard_stop(struct net_device *dev)
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
-void rtl8180_data_hard_resume(struct net_device *dev)
+static void rtl8180_data_hard_resume(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -1532,8 +1532,9 @@ void rtl8180_data_hard_resume(struct net_device *dev)
* This function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
-void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int
-rate) {
+static void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+ int rate)
+{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
int mode;
struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
@@ -1584,7 +1585,7 @@ rate) {
* might use a different lock than tx_lock (for example mgmt_tx_lock)
*/
/* these function may loop if invoked with 0 descriptors or 0 len buffer */
-int rtl8180_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int rtl8180_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
unsigned long flags;
@@ -1660,7 +1661,7 @@ u16 rtl8180_len2duration(u32 len, short rate, short *ext)
return duration;
}
-void rtl8180_prepare_beacon(struct net_device *dev)
+static void rtl8180_prepare_beacon(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct sk_buff *skb;
@@ -1704,7 +1705,7 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
u16 RtsDur = 0;
u16 ThisFrameTime = 0;
u16 TxDescDuration = 0;
- u8 ownbit_flag = false;
+ bool ownbit_flag = false;
switch (priority) {
case MANAGE_PRIORITY:
@@ -1953,7 +1954,7 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
void rtl8180_irq_rx_tasklet(struct r8180_priv *priv);
-void rtl8180_link_change(struct net_device *dev)
+static void rtl8180_link_change(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 beacon_interval;
@@ -1976,7 +1977,7 @@ void rtl8180_link_change(struct net_device *dev)
rtl8180_set_chan(dev, priv->chan);
}
-void rtl8180_rq_tx_ack(struct net_device *dev)
+static void rtl8180_rq_tx_ack(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1985,7 +1986,7 @@ void rtl8180_rq_tx_ack(struct net_device *dev)
priv->ack_tx_to_ieee = 1;
}
-short rtl8180_is_tx_queue_empty(struct net_device *dev)
+static short rtl8180_is_tx_queue_empty(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -2023,7 +2024,7 @@ short rtl8180_is_tx_queue_empty(struct net_device *dev)
return 1;
}
-void rtl8180_hw_wakeup(struct net_device *dev)
+static void rtl8180_hw_wakeup(struct net_device *dev)
{
unsigned long flags;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -2035,7 +2036,7 @@ void rtl8180_hw_wakeup(struct net_device *dev)
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
-void rtl8180_hw_sleep_down(struct net_device *dev)
+static void rtl8180_hw_sleep_down(struct net_device *dev)
{
unsigned long flags;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -2046,7 +2047,7 @@ void rtl8180_hw_sleep_down(struct net_device *dev)
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
-void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
+static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u32 rb = jiffies;
@@ -2093,7 +2094,7 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
-void rtl8180_wmm_param_update(struct work_struct *work)
+static void rtl8180_wmm_param_update(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wmm_param_update_wq);
struct net_device *dev = ieee->dev;
@@ -2195,7 +2196,7 @@ void rtl8180_hw_sleep_wq(struct work_struct *work);
void rtl8180_sw_antenna_wq(struct work_struct *work);
void rtl8180_watch_dog(struct net_device *dev);
-void watch_dog_adaptive(unsigned long data)
+static void watch_dog_adaptive(unsigned long data)
{
struct r8180_priv *priv = ieee80211_priv((struct net_device *)data);
@@ -2213,7 +2214,7 @@ void watch_dog_adaptive(unsigned long data)
TxPwrTracking87SE((struct net_device *)data);
/* Perform DIG immediately. */
- if (CheckDig((struct net_device *)data) == true)
+ if (CheckDig((struct net_device *)data))
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->hw_dig_wq);
rtl8180_watch_dog((struct net_device *)data);
@@ -2271,7 +2272,7 @@ static void rtl8180_set_channel_map(u8 channel_plan, struct ieee80211_device *ie
}
case COUNTRY_CODE_GLOBAL_DOMAIN:
{
- GET_DOT11D_INFO(ieee)->bEnabled = 0;
+ GET_DOT11D_INFO(ieee)->bEnabled = false;
Dot11d_Reset(ieee);
ieee->bGlobalDomain = true;
break;
@@ -2429,7 +2430,7 @@ short rtl8180_init(struct net_device *dev)
init_timer(&priv->SwAntennaDiversityTimer);
priv->SwAntennaDiversityTimer.data = (unsigned long)dev;
priv->SwAntennaDiversityTimer.function = (void *)SwAntennaDiversityTimerCallback;
- priv->bDigMechanism = 1;
+ priv->bDigMechanism = true;
priv->InitialGain = 6;
priv->bXtalCalibration = false;
priv->XtalCal_Xin = 0;
@@ -2548,7 +2549,7 @@ short rtl8180_init(struct net_device *dev)
(priv->EarlyRxThreshold == 7 ?
RCR_ONLYERLPKT : 0);
- priv->IntrMask = IMR_TMGDOK | IMR_TBDER | IMR_THPDER |
+ priv->IntrMask = IMR_TMGDOK | IMR_TBDER |
IMR_THPDER | IMR_THPDOK |
IMR_TVODER | IMR_TVODOK |
IMR_TVIDER | IMR_TVIDOK |
@@ -2757,7 +2758,7 @@ void rtl8185_tx_antenna(struct net_device *dev, u8 ant)
mdelay(1);
}
-void rtl8185_write_phy(struct net_device *dev, u8 adr, u32 data)
+static void rtl8185_write_phy(struct net_device *dev, u8 adr, u32 data)
{
u32 phyw;
@@ -2969,7 +2970,7 @@ void rtl8180_watch_dog(struct net_device *dev)
priv->ieee80211->NumRxBcnInPeriod = 0;
}
-int _rtl8180_up(struct net_device *dev)
+static int _rtl8180_up(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -2991,7 +2992,7 @@ int _rtl8180_up(struct net_device *dev)
return 0;
}
-int rtl8180_open(struct net_device *dev)
+static int rtl8180_open(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret;
@@ -3012,7 +3013,7 @@ int rtl8180_up(struct net_device *dev)
return _rtl8180_up(dev);
}
-int rtl8180_close(struct net_device *dev)
+static int rtl8180_close(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret;
@@ -3065,7 +3066,7 @@ void rtl8180_restart_wq(struct work_struct *work)
up(&priv->wx_sem);
}
-void rtl8180_restart(struct net_device *dev)
+static void rtl8180_restart(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -3106,7 +3107,7 @@ static void r8180_set_multicast(struct net_device *dev)
priv->promisc = promisc;
}
-int r8180_set_mac_adr(struct net_device *dev, void *mac)
+static int r8180_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
@@ -3129,7 +3130,7 @@ int r8180_set_mac_adr(struct net_device *dev, void *mac)
}
/* based on ipw2200 driver */
-int rtl8180_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int rtl8180_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *) rq;
@@ -3251,7 +3252,7 @@ static int rtl8180_pci_probe(struct pci_dev *pdev,
return 0;
fail1:
if (dev->mem_start != (unsigned long)NULL) {
- iounmap((void *)dev->mem_start);
+ iounmap((void __iomem *)dev->mem_start);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
}
@@ -3268,7 +3269,6 @@ fail_free:
pci_disable_device(pdev);
DMESG("wlan driver load failed\n");
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -3298,7 +3298,7 @@ static void rtl8180_pci_remove(struct pci_dev *pdev)
free_tx_desc_rings(dev);
if (dev->mem_start != (unsigned long)NULL) {
- iounmap((void *)dev->mem_start);
+ iounmap((void __iomem *)dev->mem_start);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
}
@@ -3369,7 +3369,7 @@ static void __exit rtl8180_pci_module_exit(void)
DMESG("Exiting");
}
-void rtl8180_try_wake_queue(struct net_device *dev, int pri)
+static void rtl8180_try_wake_queue(struct net_device *dev, int pri)
{
unsigned long flags;
short enough_desc;
@@ -3383,7 +3383,7 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri)
ieee80211_rtl_wake_queue(priv->ieee80211);
}
-void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
+static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u32 *tail; /* tail virtual addr */
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
index b8f2ba010a0..2ccd2cb70fa 100644
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ b/drivers/staging/rtl8187se/r8180_dm.c
@@ -10,10 +10,10 @@ bool CheckHighPower(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
- if(!priv->bRegHighPowerMechanism)
+ if (!priv->bRegHighPowerMechanism)
return false;
- if(ieee->state == IEEE80211_LINKED_SCANNING)
+ if (ieee->state == IEEE80211_LINKED_SCANNING)
return false;
return true;
@@ -30,7 +30,7 @@ bool CheckHighPower(struct net_device *dev)
* and they are related to OFDM and MAC registers.
* So, we don't want to update it so frequently in per-Rx packet base.
*/
-void DoTxHighPower(struct net_device *dev)
+static void DoTxHighPower(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 HiPwrUpperTh = 0;
@@ -57,15 +57,15 @@ void DoTxHighPower(struct net_device *dev)
/* Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah */
priv->bToUpdateTxPwr = true;
- u1bTmp= read_nic_byte(dev, CCK_TXAGC);
+ u1bTmp = read_nic_byte(dev, CCK_TXAGC);
/* If it never enter High Power. */
if (CckTxPwrIdx == u1bTmp) {
- u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
+ u1bTmp = (u1bTmp > 16) ? (u1bTmp - 16) : 0; /* 8dbm */
write_nic_byte(dev, CCK_TXAGC, u1bTmp);
- u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
- u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
+ u1bTmp = read_nic_byte(dev, OFDM_TXAGC);
+ u1bTmp = (u1bTmp > 16) ? (u1bTmp - 16) : 0; /* 8dbm */
write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
}
@@ -74,12 +74,12 @@ void DoTxHighPower(struct net_device *dev)
if (priv->bToUpdateTxPwr) {
priv->bToUpdateTxPwr = false;
/* SD3 required. */
- u1bTmp= read_nic_byte(dev, CCK_TXAGC);
+ u1bTmp = read_nic_byte(dev, CCK_TXAGC);
if (u1bTmp < CckTxPwrIdx) {
write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
}
- u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
+ u1bTmp = read_nic_byte(dev, OFDM_TXAGC);
if (u1bTmp < OfdmTxPwrIdx) {
write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
}
@@ -97,7 +97,7 @@ void DoTxHighPower(struct net_device *dev)
void rtl8180_tx_pw_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, tx_pw_wq);
struct net_device *dev = ieee->dev;
DoTxHighPower(dev);
@@ -125,7 +125,7 @@ bool CheckDig(struct net_device *dev)
/*
* Implementation of DIG for Zebra and Zebra2.
*/
-void DIG_Zebra(struct net_device *dev)
+static void DIG_Zebra(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 CCKFalseAlarm, OFDMFalseAlarm;
@@ -149,11 +149,11 @@ void DIG_Zebra(struct net_device *dev)
#if 1 /* lzm reserved 080826 */
AwakePeriodIn2Sec = (2000 - priv->DozePeriodInPast2Sec);
- priv ->DozePeriodInPast2Sec = 0;
+ priv->DozePeriodInPast2Sec = 0;
if (AwakePeriodIn2Sec) {
- OfdmFA1 = (u16)((OfdmFA1 * AwakePeriodIn2Sec) / 2000) ;
- OfdmFA2 = (u16)((OfdmFA2 * AwakePeriodIn2Sec) / 2000) ;
+ OfdmFA1 = (u16)((OfdmFA1 * AwakePeriodIn2Sec) / 2000);
+ OfdmFA2 = (u16)((OfdmFA2 * AwakePeriodIn2Sec) / 2000);
} else {
;
}
@@ -202,7 +202,7 @@ void DIG_Zebra(struct net_device *dev)
/*
* Dispatch DIG implementation according to RF.
*/
-void DynamicInitGain(struct net_device *dev)
+static void DynamicInitGain(struct net_device *dev)
{
DIG_Zebra(dev);
}
@@ -210,7 +210,7 @@ void DynamicInitGain(struct net_device *dev)
void rtl8180_hw_dig_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_dig_wq);
struct net_device *dev = ieee->dev;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -223,7 +223,7 @@ void rtl8180_hw_dig_wq(struct work_struct *work)
}
-int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
+static int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
{
u8 rate_len;
u8 rate_ex_len;
@@ -234,7 +234,7 @@ int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
rate_len = priv->ieee80211->current_network.rates_len;
rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
- for (idx=0; idx < rate_len; idx++) {
+ for (idx = 0; idx < rate_len; idx++) {
if ((priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate) {
Found = 1;
goto found_rate;
@@ -247,7 +247,7 @@ int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
}
}
return Found;
- found_rate:
+found_rate:
return Found;
}
@@ -255,7 +255,7 @@ int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
* Get the Tx rate one degree up form the input rate in the supported rates.
* Return the upgrade rate if it is successed, otherwise return the input rate.
*/
-u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
+static u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 UpRate;
@@ -315,7 +315,7 @@ u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
* Return the degrade rate if it is successed, otherwise return the input rate.
*/
-u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
+static u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 DownRate;
@@ -375,7 +375,7 @@ u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
* CCK rate.
*/
-bool MgntIsCckRate(u16 rate)
+static bool MgntIsCckRate(u16 rate)
{
bool bReturn = false;
@@ -397,7 +397,7 @@ void TxPwrTracking87SE(struct net_device *dev)
tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL);
CurrentThermal = (tmpu1Byte & 0xf0) >> 4; /*[ 7:4]: thermal meter indication. */
- CurrentThermal = (CurrentThermal > 0x0c) ? 0x0c:CurrentThermal;/* lzm add 080826 */
+ CurrentThermal = (CurrentThermal > 0x0c) ? 0x0c : CurrentThermal;/* lzm add 080826 */
if (CurrentThermal != priv->ThermalMeter) {
/* Update Tx Power level on each channel. */
@@ -435,7 +435,7 @@ void TxPwrTracking87SE(struct net_device *dev)
}
priv->ThermalMeter = CurrentThermal;
}
-void StaRateAdaptive87SE(struct net_device *dev)
+static void StaRateAdaptive87SE(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
unsigned long CurrTxokCnt;
@@ -513,7 +513,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
*/
/*
- * 11Mbps or 36Mbps
+ * 11Mbps or 36Mbps
* Check more times in these rate(key rates).
*/
if (priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
@@ -542,7 +542,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
}
} else if (CurrSignalStrength > -47 && (CurrRetryRate < 50)) {
/*
- * 2For High Power
+ * 2For High Power
*
* Return to highest data rate, if signal strength is good enough.
* SignalStrength threshold(-50dbm) is for RTL8186.
@@ -577,8 +577,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
if (bTryDown && (CurrSignalStrength < -75)) /* cable link */
priv->TryDownCountLowData += TryDownTh;
- }
- else if (priv->CurrentOperaRate == 96) {
+ } else if (priv->CurrentOperaRate == 96) {
/* 2For 48Mbps */
/* Air Link */
if (((CurrRetryRate > 48) && (priv->LastRetryRate > 47))) {
@@ -593,7 +592,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
bTryUp = true;
}
- if (bTryDown && (CurrSignalStrength < -75)){
+ if (bTryDown && (CurrSignalStrength < -75)) {
priv->TryDownCountLowData += TryDownTh;
}
} else if (priv->CurrentOperaRate == 72) {
@@ -618,7 +617,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
bTryDown = true;
} else if (((CurrRetryRate > 33) && (priv->LastRetryRate > 32)) && (CurrSignalStrength > -82)) { /* Cable Link */
bTryDown = true;
- } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2 )) {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
} else if ((CurrRetryRate < 20) && (priv->LastRetryRate < 21)) { /* TO DO: need to consider (RSSI) */
@@ -641,8 +640,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
/* 2For 11Mbps */
if (CurrRetryRate > 95) {
bTryDown = true;
- }
- else if ((CurrRetryRate < 29) && (priv->LastRetryRate < 30)) { /*TO DO: need to consider (RSSI) */
+ } else if ((CurrRetryRate < 29) && (priv->LastRetryRate < 30)) { /*TO DO: need to consider (RSSI) */
bTryUp = true;
}
} else if (priv->CurrentOperaRate == 11) {
@@ -667,12 +665,12 @@ void StaRateAdaptive87SE(struct net_device *dev)
}
if (bTryUp && bTryDown)
- printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
+ printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
/* 1 Test Upgrading Tx Rate
* Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
* To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
- */
+ */
if (!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
&& priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2) {
if (jiffies % (CurrRetryRate + 101) == 0) {
@@ -702,7 +700,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
if (priv->CurrentOperaRate == 22)
bUpdateInitialGain = true;
- /*
+ /*
* The difference in throughput between 48Mbps and 36Mbps is 8M.
* So, we must be careful in this rate scale. Isaiah 2008-02-15.
*/
@@ -718,7 +716,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
if (priv->CurrentOperaRate == 36) {
priv->bUpdateARFR = true;
write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
- } else if(priv->bUpdateARFR) {
+ } else if (priv->bUpdateARFR) {
priv->bUpdateARFR = false;
write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
}
@@ -732,7 +730,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
}
} else {
if (priv->TryupingCount > 0)
- priv->TryupingCount --;
+ priv->TryupingCount--;
}
if (bTryDown) {
@@ -757,7 +755,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate);
/* Reduce chariot training time at weak signal strength situation. SD3 ED demand. */
- if ((CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 )) {
+ if ((CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72)) {
priv->CurrentOperaRate = 72;
}
@@ -781,8 +779,8 @@ void StaRateAdaptive87SE(struct net_device *dev)
priv->TryDownCountLowData--;
}
- /*
- * Keep the Tx fail rate count to equal to 0x15 at most.
+ /*
+ * Keep the Tx fail rate count to equal to 0x15 at most.
* Reduce the fail count at least to 10 sec if tx rate is tending stable.
*/
if (priv->FailTxRateCount >= 0x15 ||
@@ -803,14 +801,14 @@ void StaRateAdaptive87SE(struct net_device *dev)
if (u1bCck == CckTxPwrIdx) {
if (u1bOfdm != (OfdmTxPwrIdx + 2)) {
priv->bEnhanceTxPwr = true;
- u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
+ u1bOfdm = ((u1bOfdm + 2) > 35) ? 35 : (u1bOfdm + 2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
}
} else if (u1bCck < CckTxPwrIdx) {
/* case 2: enter high power */
if (!priv->bEnhanceTxPwr) {
priv->bEnhanceTxPwr = true;
- u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
+ u1bOfdm = ((u1bOfdm + 2) > 35) ? 35 : (u1bOfdm + 2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
}
}
@@ -826,7 +824,7 @@ void StaRateAdaptive87SE(struct net_device *dev)
/* case 2: enter high power */
else if (u1bCck < CckTxPwrIdx) {
priv->bEnhanceTxPwr = false;
- u1bOfdm = ((u1bOfdm - 2) > 0) ? (u1bOfdm - 2): 0;
+ u1bOfdm = ((u1bOfdm - 2) > 0) ? (u1bOfdm - 2) : 0;
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
}
}
@@ -851,7 +849,7 @@ SetInitialGain:
else
priv->InitialGain--;
- printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
+ printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n", priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
} else { /* OFDM */
@@ -859,7 +857,7 @@ SetInitialGain:
priv->InitialGainBackUp = priv->InitialGain;
priv->InitialGain++;
- printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
+ printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n", priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
}
@@ -904,7 +902,7 @@ void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength)
} else { /* Initialization case. */
priv->AdRxSignalStrength = SignalStrength;
}
-
+
if (priv->LastRxPktAntenna) /* Main antenna. */
priv->AdMainAntennaRxOkCnt++;
else /* Aux antenna. */
@@ -943,7 +941,7 @@ bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex)
break;
}
- if(bAntennaSwitched)
+ if (bAntennaSwitched)
priv->CurrAntennaIndex = u1bAntennaIndex;
return bAntennaSwitched;
@@ -1000,8 +998,8 @@ void SwAntennaDiversity(struct net_device *dev)
priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2;
priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
- priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;
- if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) {
+ priv->AdMaxRxSsThreshold : priv->AdRxSsThreshold;
+ if (priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) {
/* Rx signal strength is not improved after we swtiched antenna. => Swich back. */
/* Increase Antenna Diversity checking period due to bad decision. */
priv->AdCheckPeriod *= 2;
@@ -1083,7 +1081,7 @@ void SwAntennaDiversity(struct net_device *dev)
priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
- priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;/* +by amy 080312 */
+ priv->AdMaxRxSsThreshold : priv->AdRxSsThreshold;/* +by amy 080312 */
}
/* Reduce Antenna Diversity checking period if possible. */
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index 9ae96b7852f..7c9a8bfe6d8 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -136,7 +136,7 @@ static const u16 rtl8225z2_rxgain[] = {
};
-void rtl8225z2_set_gain(struct net_device *dev, short gain)
+static void rtl8225z2_set_gain(struct net_device *dev, short gain)
{
const u8 *rtl8225_gain;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -279,8 +279,8 @@ void rtl8225z2_rf_close(struct net_device *dev)
* Map dBm into Tx power index according to current HW model, for example,
* RF and PA, and current wireless mode.
*/
-s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
- s32 PowerInDbm)
+static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
+ s32 PowerInDbm)
{
bool bUseDefault = true;
s8 TxPwrIdx = 0;
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index dab787542c4..4e01653e098 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -50,8 +50,9 @@ static int r8180_wx_get_freq(struct net_device *dev,
}
-int r8180_wx_set_key(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *key)
+static int r8180_wx_set_key(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct iw_point *erq = &(wrqu->encoding);
@@ -1146,12 +1147,12 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
if (priv->ieee80211->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ down(&priv->wx_sem);
#if 1
- ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length);
+ ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length);
#endif
- up(&priv->wx_sem);
- return ret;
+ up(&priv->wx_sem);
+ return ret;
}
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index 978dc5f4f92..dc52a3e584d 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -30,10 +30,13 @@
#define TC_3W_POLL_MAX_TRY_CNT 5
static u8 MAC_REG_TABLE[][2] = {
- /*PAGA 0: */
- /* 0x34(BRSR), 0xBE(RATE_FALLBACK_CTL), 0x1E0(ARFR) would set in HwConfigureRTL8185() */
- /* 0x272(RFSW_CTRL), 0x1CE(AESMSK_QC) set in InitializeAdapter8185(). */
- /* 0x1F0~0x1F8 set in MacConfig_85BASIC() */
+ /*
+ * PAGE 0:
+ * 0x34(BRSR), 0xBE(RATE_FALLBACK_CTL), 0x1E0(ARFR) would set in
+ * HwConfigureRTL8185()
+ * 0x272(RFSW_CTRL), 0x1CE(AESMSK_QC) set in InitializeAdapter8185().
+ * 0x1F0~0x1F8 set in MacConfig_85BASIC()
+ */
{0x08, 0xae}, {0x0a, 0x72}, {0x5b, 0x42},
{0x84, 0x88}, {0x85, 0x24}, {0x88, 0x54}, {0x8b, 0xb8}, {0x8c, 0x03},
{0x8d, 0x40}, {0x8e, 0x00}, {0x8f, 0x00}, {0x5b, 0x18}, {0x91, 0x03},
@@ -44,15 +47,20 @@ static u8 MAC_REG_TABLE[][2] = {
{0xfa, 0x00}, {0xfb, 0x00}, {0xfc, 0x96}, {0xfd, 0xa4}, {0xfe, 0x00},
{0xff, 0x00},
- /*PAGE 1: */
- /* For Flextronics system Logo PCIHCT failure: */
- /* 0x1C4~0x1CD set no-zero value to avoid PCI configuration space 0x45[7]=1 */
+ /*
+ * PAGE 1:
+ * For Flextronics system Logo PCIHCT failure:
+ * 0x1C4~0x1CD set no-zero value to avoid PCI configuration
+ * space 0x45[7]=1
+ */
{0x5e, 0x01},
{0x58, 0x00}, {0x59, 0x00}, {0x5a, 0x04}, {0x5b, 0x00}, {0x60, 0x24},
{0x61, 0x97}, {0x62, 0xF0}, {0x63, 0x09}, {0x80, 0x0F}, {0x81, 0xFF},
{0x82, 0xFF}, {0x83, 0x03},
- {0xC4, 0x22}, {0xC5, 0x22}, {0xC6, 0x22}, {0xC7, 0x22}, {0xC8, 0x22}, /* lzm add 080826 */
- {0xC9, 0x22}, {0xCA, 0x22}, {0xCB, 0x22}, {0xCC, 0x22}, {0xCD, 0x22}, /* lzm add 080826 */
+ /* lzm add 080826 */
+ {0xC4, 0x22}, {0xC5, 0x22}, {0xC6, 0x22}, {0xC7, 0x22}, {0xC8, 0x22},
+ /* lzm add 080826 */
+ {0xC9, 0x22}, {0xCA, 0x22}, {0xCB, 0x22}, {0xCC, 0x22}, {0xCD, 0x22},
{0xe2, 0x00},
@@ -66,21 +74,24 @@ static u8 MAC_REG_TABLE[][2] = {
{0x8f, 0x3f}, {0xc4, 0xff}, {0xc5, 0xff}, {0xc6, 0xff}, {0xc7, 0xff},
{0xc8, 0x00}, {0xc9, 0x00}, {0xca, 0x80}, {0xcb, 0x00},
- /* PAGA 0: */
+ /* PAGE 0: */
{0x5e, 0x00}, {0x9f, 0x03}
};
static u8 ZEBRA_AGC[] = {
0,
- 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
- 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62,
- 0x48, 0x47, 0x46, 0x45, 0x44, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
- 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16,
- 0x17, 0x17, 0x18, 0x18, 0x19, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
- 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21, 0x21, 0x21, 0x22, 0x22, 0x22, 0x23, 0x23, 0x24,
- 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
+ 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78, 0x77, 0x76,
+ 0x75, 0x74, 0x73, 0x72, 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A,
+ 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x48, 0x47, 0x46, 0x45,
+ 0x44, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
+ 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16, 0x17, 0x17, 0x18, 0x18,
+ 0x19, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
+ 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21, 0x21, 0x21, 0x22, 0x22,
+ 0x22, 0x23, 0x23, 0x24, 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
+ 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
};
static u32 ZEBRA_RF_RX_GAIN_TABLE[] = {
@@ -123,19 +134,27 @@ static u8 PlatformIORead1Byte(struct net_device *dev, u32 offset)
static void PlatformIOWrite1Byte(struct net_device *dev, u32 offset, u8 data)
{
write_nic_byte(dev, offset, data);
- read_nic_byte(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */
+ /*
+ * To make sure write operation is completed,
+ * 2005.11.09, by rcnjko.
+ */
+ read_nic_byte(dev, offset);
}
static void PlatformIOWrite2Byte(struct net_device *dev, u32 offset, u16 data)
{
write_nic_word(dev, offset, data);
- read_nic_word(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */
+ /*
+ * To make sure write operation is completed,
+ * 2005.11.09, by rcnjko.
+ */
+ read_nic_word(dev, offset);
}
static void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
{
if (offset == PhyAddr) {
- /* For Base Band configuration. */
+ /* For Base Band configuration. */
unsigned char cmdByte;
unsigned long dataBytes;
unsigned char idx;
@@ -146,16 +165,17 @@ static void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
/*
* 071010, rcnjko:
- * The critical section is only BB read/write race condition.
- * Assumption:
- * 1. We assume NO one will access BB at DIRQL, otherwise, system will crash for
+ * The critical section is only BB read/write race
+ * condition. Assumption:
+ * 1. We assume NO one will access BB at DIRQL, otherwise,
+ * system will crash for
* acquiring the spinlock in such context.
* 2. PlatformIOWrite4Byte() MUST NOT be recursive.
*/
/* NdisAcquireSpinLock( &(pDevice->IoSpinLock) ); */
for (idx = 0; idx < 30; idx++) {
- /* Make sure command bit is clear before access it. */
+ /* Make sure command bit is clear before access it. */
u1bTmp = PlatformIORead1Byte(dev, PhyAddr);
if ((u1bTmp & BIT7) == 0)
break;
@@ -164,14 +184,19 @@ static void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
}
for (idx = 0; idx < 3; idx++)
- PlatformIOWrite1Byte(dev, offset+1+idx, ((u8 *)&dataBytes)[idx]);
+ PlatformIOWrite1Byte(dev, offset+1+idx,
+ ((u8 *)&dataBytes)[idx]);
write_nic_byte(dev, offset, cmdByte);
/* NdisReleaseSpinLock( &(pDevice->IoSpinLock) ); */
} else {
write_nic_dword(dev, offset, data);
- read_nic_dword(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */
+ /*
+ * To make sure write operation is completed, 2005.11.09,
+ * by rcnjko.
+ */
+ read_nic_dword(dev, offset);
}
}
@@ -284,9 +309,13 @@ bool SetAntennaConfig87SE(struct net_device *dev,
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bAntennaSwitched = true;
- u8 ant_diversity_offset = 0x00; /* 0x00 = disabled, 0x80 = enabled */
+ /* 0x00 = disabled, 0x80 = enabled */
+ u8 ant_diversity_offset = 0x00;
- /* printk("SetAntennaConfig87SE(): DefaultAnt(%d), bAntDiversity(%d)\n", DefaultAnt, bAntDiversity); */
+ /*
+ * printk("SetAntennaConfig87SE(): DefaultAnt(%d), bAntDiversity(%d)\n",
+ * DefaultAnt, bAntDiversity);
+ */
/* Threshold for antenna diversity. */
write_phy_cck(dev, 0x0c, 0x09); /* Reg0c : 09 */
@@ -300,22 +329,27 @@ bool SetAntennaConfig87SE(struct net_device *dev,
/* Config CCK RX antenna. */
write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */
- write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset); /* Reg01 : 47 | ant_diversity_offset */
+
+ /* Reg01 : 47 | ant_diversity_offset */
+ write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset);
/* Config OFDM RX antenna. */
write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */
- write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset); /* Reg18 : 32 */
+ /* Reg18 : 32 */
+ write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset);
} else { /* main Antenna */
/* Mac register, main antenna */
write_nic_byte(dev, ANTSEL, 0x03);
/* Config CCK RX antenna. */
write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */
- write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset); /* Reg01 : 47 */
+ /* Reg01 : 47 */
+ write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset);
/* Config OFDM RX antenna. */
write_phy_ofdm(dev, 0x0D, 0x5c); /* Reg0d : 5c */
- write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset); /*Reg18 : 32 */
+ /*Reg18 : 32 */
+ write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset);
}
priv->CurrAntennaIndex = DefaultAnt; /* Update default settings. */
return bAntennaSwitched;
@@ -382,18 +416,23 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
RF_WriteReg(dev, 0x05, 0x059b); mdelay(1);
RF_WriteReg(dev, 0x06, 0x0081); mdelay(1);
RF_WriteReg(dev, 0x07, 0x01A0); mdelay(1);
-/* Don't write RF23/RF24 to make a difference between 87S C cut and D cut. asked by SD3 stevenl. */
+/*
+ * Don't write RF23/RF24 to make a difference between 87S C cut and D cut.
+ * asked by SD3 stevenl.
+ */
RF_WriteReg(dev, 0x0a, 0x0001); mdelay(1);
RF_WriteReg(dev, 0x0b, 0x0418); mdelay(1);
if (d_cut) {
RF_WriteReg(dev, 0x0c, 0x0fbe); mdelay(1);
RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1);
- RF_WriteReg(dev, 0x0e, 0x0807); mdelay(1); /* RX LO buffer */
+ /* RX LO buffer */
+ RF_WriteReg(dev, 0x0e, 0x0807); mdelay(1);
} else {
RF_WriteReg(dev, 0x0c, 0x0fbe); mdelay(1);
RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1);
- RF_WriteReg(dev, 0x0e, 0x0806); mdelay(1); /* RX LO buffer */
+ /* RX LO buffer */
+ RF_WriteReg(dev, 0x0e, 0x0806); mdelay(1);
}
RF_WriteReg(dev, 0x0f, 0x0acc); mdelay(1);
@@ -408,19 +447,24 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
RF_WriteReg(dev, 0x05, 0x0203); mdelay(1); /* 203, 343 */
RF_WriteReg(dev, 0x06, 0x0200); mdelay(1); /* 400 */
- RF_WriteReg(dev, 0x00, 0x0137); mdelay(1); /* switch to reg16-reg30, and HSSI disable 137 */
+ /* switch to reg16-reg30, and HSSI disable 137 */
+ RF_WriteReg(dev, 0x00, 0x0137); mdelay(1);
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
- RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1); /* Z4 synthesizer loop filter setting, 392 */
+ /* Z4 synthesizer loop filter setting, 392 */
+ RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1);
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
- RF_WriteReg(dev, 0x00, 0x0037); mdelay(1); /* switch to reg0-reg15, and HSSI disable */
+ /* switch to reg0-reg15, and HSSI disable */
+ RF_WriteReg(dev, 0x00, 0x0037); mdelay(1);
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
- RF_WriteReg(dev, 0x04, 0x0160); mdelay(1); /* CBC on, Tx Rx disable, High gain */
+ /* CBC on, Tx Rx disable, High gain */
+ RF_WriteReg(dev, 0x04, 0x0160); mdelay(1);
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
- RF_WriteReg(dev, 0x07, 0x0080); mdelay(1); /* Z4 setted channel 1 */
+ /* Z4 setted channel 1 */
+ RF_WriteReg(dev, 0x07, 0x0080); mdelay(1);
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
RF_WriteReg(dev, 0x02, 0x088D); mdelay(1); /* LC calibration */
@@ -428,7 +472,8 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
- RF_WriteReg(dev, 0x00, 0x0137); mdelay(1); /* switch to reg16-reg30 137, and HSSI disable 137 */
+ /* switch to reg16-reg30 137, and HSSI disable 137 */
+ RF_WriteReg(dev, 0x00, 0x0137); mdelay(1);
mdelay(10); /* Deay 10 ms. */ /* 0xfd */
RF_WriteReg(dev, 0x07, 0x0000); mdelay(1);
@@ -442,46 +487,58 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
/* For crystal calibration, added by Roger, 2007.12.11. */
if (priv->bXtalCalibration) { /* reg 30. */
/*
- * enable crystal calibration.
- * RF Reg[30], (1)Xin:[12:9], Xout:[8:5], addr[4:0].
- * (2)PA Pwr delay timer[15:14], default: 2.4us, set BIT15=0
- * (3)RF signal on/off when calibration[13], default: on, set BIT13=0.
- * So we should minus 4 BITs offset.
+ * enable crystal calibration.
+ * RF Reg[30], (1)Xin:[12:9], Xout:[8:5], addr[4:0].
+ * (2)PA Pwr delay timer[15:14], default: 2.4us,
+ * set BIT15=0
+ * (3)RF signal on/off when calibration[13], default: on,
+ * set BIT13=0.
+ * So we should minus 4 BITs offset.
*/
- RF_WriteReg(dev, 0x0f, (priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) | BIT11 | BIT9); mdelay(1);
+ RF_WriteReg(dev, 0x0f, (priv->XtalCal_Xin<<5) |
+ (priv->XtalCal_Xout<<1) | BIT11 | BIT9); mdelay(1);
printk("ZEBRA_Config_85BASIC_HardCode(): (%02x)\n",
- (priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) | BIT11 | BIT9);
+ (priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) |
+ BIT11 | BIT9);
} else {
/* using default value. Xin=6, Xout=6. */
RF_WriteReg(dev, 0x0f, 0x0acc); mdelay(1);
}
-
- RF_WriteReg(dev, 0x00, 0x00bf); mdelay(1); /* switch to reg0-reg15, and HSSI enable */
- RF_WriteReg(dev, 0x0d, 0x08df); mdelay(1); /* Rx BB start calibration, 00c//+edward */
- RF_WriteReg(dev, 0x02, 0x004d); mdelay(1); /* temperature meter off */
+ /* switch to reg0-reg15, and HSSI enable */
+ RF_WriteReg(dev, 0x00, 0x00bf); mdelay(1);
+ /* Rx BB start calibration, 00c//+edward */
+ RF_WriteReg(dev, 0x0d, 0x08df); mdelay(1);
+ /* temperature meter off */
+ RF_WriteReg(dev, 0x02, 0x004d); mdelay(1);
RF_WriteReg(dev, 0x04, 0x0975); mdelay(1); /* Rx mode */
mdelay(10); /* Deay 10 ms.*/ /* 0xfe */
mdelay(10); /* Deay 10 ms.*/ /* 0xfe */
mdelay(10); /* Deay 10 ms.*/ /* 0xfe */
- RF_WriteReg(dev, 0x00, 0x0197); mdelay(1); /* Rx mode*/ /*+edward */
- RF_WriteReg(dev, 0x05, 0x05ab); mdelay(1); /* Rx mode*/ /*+edward */
- RF_WriteReg(dev, 0x00, 0x009f); mdelay(1); /* Rx mode*/ /*+edward */
- RF_WriteReg(dev, 0x01, 0x0000); mdelay(1); /* Rx mode*/ /*+edward */
- RF_WriteReg(dev, 0x02, 0x0000); mdelay(1); /* Rx mode*/ /*+edward */
+ /* Rx mode*/ /*+edward */
+ RF_WriteReg(dev, 0x00, 0x0197); mdelay(1);
+ /* Rx mode*/ /*+edward */
+ RF_WriteReg(dev, 0x05, 0x05ab); mdelay(1);
+ /* Rx mode*/ /*+edward */
+ RF_WriteReg(dev, 0x00, 0x009f); mdelay(1);
+ /* Rx mode*/ /*+edward */
+ RF_WriteReg(dev, 0x01, 0x0000); mdelay(1);
+ /* Rx mode*/ /*+edward */
+ RF_WriteReg(dev, 0x02, 0x0000); mdelay(1);
/* power save parameters. */
u1b24E = read_nic_byte(dev, 0x24E);
write_nic_byte(dev, 0x24E, (u1b24E & (~(BIT5|BIT6))));
- /*=============================================================================
+ /*======================================================================
*
- *===========================================================================
+ *======================================================================
* CCKCONF.TXT
- *===========================================================================
+ *======================================================================
*
* [POWER SAVE] Power Saving Parameters by jong. 2007-11-27
* CCK reg0x00[7]=1'b1 :power saving for TX (default)
* CCK reg0x00[6]=1'b1: power saving for RX (default)
- * CCK reg0x06[4]=1'b1: turn off channel estimation related circuits if not doing channel estimation.
+ * CCK reg0x06[4]=1'b1: turn off channel estimation related
+ * circuits if not doing channel estimation.
* CCK reg0x06[3]=1'b1: turn off unused circuits before cca = 1
* CCK reg0x06[2]=1'b1: turn off cck's circuit if macrst =0
*/
@@ -501,9 +558,9 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
/*
- *===========================================================================
+ *======================================================================
* AGC.txt
- *===========================================================================
+ *======================================================================
*/
write_phy_ofdm(dev, 0x00, 0x12);
@@ -526,11 +583,11 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
PlatformIOWrite4Byte(dev, PhyAddr, 0x00001080); /* Annie, 2006-05-05 */
/*
- *===========================================================================
+ *======================================================================
*
- *===========================================================================
+ *======================================================================
* OFDMCONF.TXT
- *===========================================================================
+ *======================================================================
*/
for (i = 0; i < 60; i++) {
@@ -544,12 +601,16 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
}
/*
- *===========================================================================
+ *======================================================================
* by amy for antenna
- *===========================================================================
+ *======================================================================
*/
- /* Config Sw/Hw Combinational Antenna Diversity. Added by Roger, 2008.02.26. */
- SetAntennaConfig87SE(dev, priv->bDefaultAntenna1, priv->bSwAntennaDiverity);
+ /*
+ * Config Sw/Hw Combinational Antenna Diversity. Added by Roger,
+ * 2008.02.26.
+ */
+ SetAntennaConfig87SE(dev, priv->bDefaultAntenna1,
+ priv->bSwAntennaDiverity);
}
@@ -560,7 +621,8 @@ void UpdateInitialGain(struct net_device *dev)
/* lzm add 080826 */
if (priv->eRFPowerState != eRfOn) {
/* Don't access BB/RF under disable PLL situation.
- * RT_TRACE(COMP_DIG, DBG_LOUD, ("UpdateInitialGain - pHalData->eRFPowerState!=eRfOn\n"));
+ * RT_TRACE(COMP_DIG, DBG_LOUD, ("UpdateInitialGain -
+ * pHalData->eRFPowerState!=eRfOn\n"));
* Back to the original state
*/
priv->InitialGain = priv->InitialGainBackUp;
@@ -635,7 +697,7 @@ static void InitTxPwrTracking87SE(struct net_device *dev)
u4bRfReg = RF_ReadReg(dev, 0x02);
/* Enable Thermal meter indication. */
- RF_WriteReg(dev, 0x02, u4bRfReg|PWR_METER_EN); mdelay(1);
+ RF_WriteReg(dev, 0x02, u4bRfReg|PWR_METER_EN); mdelay(1);
}
static void PhyConfig8185(struct net_device *dev)
@@ -645,16 +707,18 @@ static void PhyConfig8185(struct net_device *dev)
priv->RFProgType = read_nic_byte(dev, CONFIG4) & 0x03;
/* RF config */
ZEBRA_Config_85BASIC_HardCode(dev);
- /* Set default initial gain state to 4, approved by SD3 DZ, by Bruce, 2007-06-06. */
+ /* Set default initial gain state to 4, approved by SD3 DZ, by Bruce,
+ * 2007-06-06.
+ */
if (priv->bDigMechanism) {
if (priv->InitialGain == 0)
priv->InitialGain = 4;
}
/*
- * Enable thermal meter indication to implement TxPower tracking on 87SE.
- * We initialize thermal meter here to avoid unsuccessful configuration.
- * Added by Roger, 2007.12.11.
+ * Enable thermal meter indication to implement TxPower tracking
+ * on 87SE. We initialize thermal meter here to avoid unsuccessful
+ * configuration. Added by Roger, 2007.12.11.
*/
if (priv->bTxPowerTrack)
InitTxPwrTracking87SE(dev);
@@ -667,7 +731,10 @@ static void PhyConfig8185(struct net_device *dev)
static void HwConfigureRTL8185(struct net_device *dev)
{
- /* RTL8185_TODO: Determine Retrylimit, TxAGC, AutoRateFallback control. */
+ /*
+ * RTL8185_TODO: Determine Retrylimit, TxAGC,
+ * AutoRateFallback control.
+ */
u8 bUNIVERSAL_CONTROL_RL = 0;
u8 bUNIVERSAL_CONTROL_AGC = 1;
u8 bUNIVERSAL_CONTROL_ANT = 1;
@@ -691,7 +758,7 @@ static void HwConfigureRTL8185(struct net_device *dev)
write_nic_byte(dev, OFDM_TXAGC, 128);
val8 = val8 & 0xfe;
} else {
- val8 = val8 | 0x01 ;
+ val8 = val8 | 0x01;
}
@@ -715,7 +782,9 @@ static void HwConfigureRTL8185(struct net_device *dev)
if (bAUTO_RATE_FALLBACK_CTL) {
val8 |= RATE_FALLBACK_CTL_ENABLE | RATE_FALLBACK_CTL_AUTO_STEP1;
- /* <RJ_TODO_8185B> We shall set up the ARFR according to user's setting. */
+ /* <RJ_TODO_8185B> We shall set up the ARFR according
+ * to user's setting.
+ */
PlatformIOWrite2Byte(dev, ARFR, 0x0fff); /* set 1M ~ 54Mbps. */
}
write_nic_byte(dev, RATE_FALLBACK, val8);
@@ -724,9 +793,9 @@ static void HwConfigureRTL8185(struct net_device *dev)
static void MacConfig_85BASIC_HardCode(struct net_device *dev)
{
/*
- *==========================================================================
+ *======================================================================
* MACREG.TXT
- *==========================================================================
+ *======================================================================
*/
int nLinesRead = 0;
u32 u4bRegOffset, u4bRegValue, u4bPageIndex = 0;
@@ -745,7 +814,7 @@ static void MacConfig_85BASIC_HardCode(struct net_device *dev)
write_nic_byte(dev, u4bRegOffset, (u8)u4bRegValue);
}
- /* ============================================================================ */
+ /* ================================================================= */
}
static void MacConfig_85BASIC(struct net_device *dev)
@@ -754,12 +823,14 @@ static void MacConfig_85BASIC(struct net_device *dev)
u8 u1DA;
MacConfig_85BASIC_HardCode(dev);
- /* ============================================================================ */
+ /* ================================================================= */
/* Follow TID_AC_MAP of WMac. */
write_nic_word(dev, TID_AC_MAP, 0xfa50);
- /* Interrupt Migration, Jong suggested we use set 0x0000 first, 2005.12.14, by rcnjko. */
+ /* Interrupt Migration, Jong suggested we use set 0x0000 first,
+ * 2005.12.14, by rcnjko.
+ */
write_nic_word(dev, IntMig, 0x0000);
/* Prevent TPC to cause CRC error. Added by Annie, 2006-06-10. */
@@ -768,7 +839,11 @@ static void MacConfig_85BASIC(struct net_device *dev)
PlatformIOWrite1Byte(dev, 0x1F8, 0x00);
/* Asked for by SD3 CM Lin, 2006.06.27, by rcnjko. */
- /* power save parameter based on "87SE power save parameters 20071127.doc", as follow. */
+
+ /*
+ * power save parameter based on
+ * "87SE power save parameters 20071127.doc", as follow.
+ */
/* Enable DA10 TX power saving */
u1DA = read_nic_byte(dev, PHYPR);
@@ -803,31 +878,45 @@ static void ActUpdateChannelAccessSetting(struct net_device *dev,
/*
* <RJ_TODO_8185B>
- * TODO: We still don't know how to set up these registers, just follow WMAC to
- * verify 8185B FPAG.
+ * TODO: We still don't know how to set up these registers,
+ * just follow WMAC to verify 8185B FPAG.
*
* <RJ_TODO_8185B>
* Jong said CWmin/CWmax register are not functional in 8185B,
- * so we shall fill channel access realted register into AC parameter registers,
+ * so we shall fill channel access realted register into AC
+ * parameter registers,
* even in nQBss.
*/
- ChnlAccessSetting->SIFS_Timer = 0x22; /* Suggested by Jong, 2005.12.08. */
+
+ /* Suggested by Jong, 2005.12.08. */
+ ChnlAccessSetting->SIFS_Timer = 0x22;
ChnlAccessSetting->DIFS_Timer = 0x1C; /* 2006.06.02, by rcnjko. */
ChnlAccessSetting->SlotTimeTimer = 9; /* 2006.06.02, by rcnjko. */
- ChnlAccessSetting->EIFS_Timer = 0x5B; /* Suggested by wcchu, it is the default value of EIFS register, 2005.12.08. */
+ /*
+ * Suggested by wcchu, it is the default value of EIFS register,
+ * 2005.12.08.
+ */
+ ChnlAccessSetting->EIFS_Timer = 0x5B;
ChnlAccessSetting->CWminIndex = 3; /* 2006.06.02, by rcnjko. */
ChnlAccessSetting->CWmaxIndex = 7; /* 2006.06.02, by rcnjko. */
write_nic_byte(dev, SIFS, ChnlAccessSetting->SIFS_Timer);
- write_nic_byte(dev, SLOT, ChnlAccessSetting->SlotTimeTimer); /* Rewrited from directly use PlatformEFIOWrite1Byte(), by Annie, 2006-03-29. */
+ /*
+ * Rewrited from directly use PlatformEFIOWrite1Byte(),
+ * by Annie, 2006-03-29.
+ */
+ write_nic_byte(dev, SLOT, ChnlAccessSetting->SlotTimeTimer);
write_nic_byte(dev, EIFS, ChnlAccessSetting->EIFS_Timer);
- write_nic_byte(dev, AckTimeOutReg, 0x5B); /* <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS register, 2005.12.08. */
+ /*
+ * <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS
+ * register, 2005.12.08.
+ */
+ write_nic_byte(dev, AckTimeOutReg, 0x5B);
- for (eACI = 0; eACI < AC_MAX; eACI++) {
+ for (eACI = 0; eACI < AC_MAX; eACI++)
write_nic_byte(dev, ACM_CONTROL, 0);
- }
}
static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
@@ -837,7 +926,10 @@ static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
u8 btSupportedWirelessMode = GetSupportedWirelessMode8185(dev);
if ((btWirelessMode & btSupportedWirelessMode) == 0) {
- /* Don't switch to unsupported wireless mode, 2006.02.15, by rcnjko. */
+ /*
+ * Don't switch to unsupported wireless mode, 2006.02.15,
+ * by rcnjko.
+ */
DMESGW("ActSetWirelessMode8185(): WirelessMode(%d) is not supported (%d)!\n",
btWirelessMode, btSupportedWirelessMode);
return;
@@ -859,11 +951,11 @@ static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
}
}
- /*
- * 2. Swtich band: RF or BB specific actions,
- * for example, refresh tables in omc8255, or change initial gain if necessary.
- * Nothing to do for Zebra to switch band.
- * Update current wireless mode if we switch to specified band successfully.
+ /*
+ * 2. Swtich band: RF or BB specific actions,
+ * for example, refresh tables in omc8255, or change initial gain if
+ * necessary. Nothing to do for Zebra to switch band. Update current
+ * wireless mode if we switch to specified band successfully.
*/
ieee->mode = (WIRELESS_MODE)btWirelessMode;
@@ -876,7 +968,8 @@ static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
else if (ieee->mode == WIRELESS_MODE_G)
DMESG("WIRELESS_MODE_G\n");
- ActUpdateChannelAccessSetting( dev, ieee->mode, &priv->ChannelAccessSetting);
+ ActUpdateChannelAccessSetting(dev, ieee->mode,
+ &priv->ChannelAccessSetting);
}
void rtl8185b_irq_enable(struct net_device *dev)
@@ -892,7 +985,7 @@ static void MgntDisconnectIBSS(struct net_device *dev)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u8 i;
- for (i = 0; i < 6 ; i++)
+ for (i = 0; i < 6; i++)
priv->ieee80211->current_network.bssid[i] = 0x55;
@@ -901,11 +994,12 @@ static void MgntDisconnectIBSS(struct net_device *dev)
/*
* Stop Beacon.
*
- * Vista add a Adhoc profile, HW radio off until OID_DOT11_RESET_REQUEST
- * Driver would set MSR=NO_LINK, then HW Radio ON, MgntQueue Stuck.
- * Because Bcn DMA isn't complete, mgnt queue would stuck until Bcn packet send.
+ * Vista add a Adhoc profile, HW radio off until
+ * OID_DOT11_RESET_REQUEST Driver would set MSR=NO_LINK,
+ * then HW Radio ON, MgntQueue Stuck. Because Bcn DMA isn't
+ * complete, mgnt queue would stuck until Bcn packet send.
*
- * Disable Beacon Queue Own bit, suggested by jong
+ * Disable Beacon Queue Own bit, suggested by jong
*/
ieee80211_stop_send_beacons(priv->ieee80211);
@@ -938,12 +1032,14 @@ static void MgntDisconnectAP(struct net_device *dev, u8 asRsn)
* Commented out by rcnjko, 2005.01.27:
* I move SecClearAllKeys() to MgntActSet_802_11_DISASSOCIATE().
*
- * 2004/09/15, kcwu, the key should be cleared, or the new handshaking will not success
+ * 2004/09/15, kcwu, the key should be cleared, or the new
+ * handshaking will not success
*
- * In WPA WPA2 need to Clear all key ... because new key will set after new handshaking.
- * 2004.10.11, by rcnjko.
+ * In WPA WPA2 need to Clear all key ... because new key will set
+ * after new handshaking. 2004.10.11, by rcnjko.
*/
- MlmeDisassociateRequest(dev, priv->ieee80211->current_network.bssid, asRsn);
+ MlmeDisassociateRequest(dev, priv->ieee80211->current_network.bssid,
+ asRsn);
priv->ieee80211->state = IEEE80211_NOLINK;
}
@@ -964,11 +1060,13 @@ static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
if (priv->ieee80211->iw_mode == IW_MODE_INFRA) {
/*
- * We clear key here instead of MgntDisconnectAP() because that
- * MgntActSet_802_11_DISASSOCIATE() is an interface called by OS,
- * e.g. OID_802_11_DISASSOCIATE in Windows while as MgntDisconnectAP() is
- * used to handle disassociation related things to AP, e.g. send Disassoc
- * frame to AP. 2005.01.27, by rcnjko.
+ * We clear key here instead of MgntDisconnectAP()
+ * because that MgntActSet_802_11_DISASSOCIATE()
+ * is an interface called by OS, e.g.
+ * OID_802_11_DISASSOCIATE in Windows while as
+ * MgntDisconnectAP() is used to handle
+ * disassociation related things to AP, e.g. send
+ * Disassoc frame to AP. 2005.01.27, by rcnjko.
*/
MgntDisconnectAP(dev, asRsn);
}
@@ -979,12 +1077,14 @@ static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
/*
* Description:
* Chang RF Power State.
- * Note that, only MgntActSet_RF_State() is allowed to set HW_VAR_RF_STATE.
+ * Note that, only MgntActSet_RF_State() is allowed to set
+ * HW_VAR_RF_STATE.
*
* Assumption:
* PASSIVE LEVEL.
*/
-static bool SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState)
+static bool SetRFPowerState(struct net_device *dev,
+ RT_RF_POWER_STATE eRFPowerState)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult = false;
@@ -997,7 +1097,8 @@ static bool SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerSt
return bResult;
}
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u32 ChangeSource)
+bool MgntActSet_RF_State(struct net_device *dev,
+ RT_RF_POWER_STATE StateToSet, u32 ChangeSource)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bActionAllowed = false;
@@ -1006,8 +1107,9 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u
u16 RFWaitCounter = 0;
unsigned long flag;
/*
- * Prevent the race condition of RF state change. By Bruce, 2007-11-28.
- * Only one thread can change the RF state at one time, and others should wait to be executed.
+ * Prevent the race condition of RF state change. By Bruce,
+ * 2007-11-28. Only one thread can change the RF state at one time,
+ * and others should wait to be executed.
*/
while (true) {
spin_lock_irqsave(&priv->rf_ps_lock, flag);
@@ -1018,7 +1120,10 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u
RFWaitCounter++;
udelay(1000); /* 1 ms */
- /* Wait too long, return FALSE to avoid to be stuck here. */
+ /*
+ * Wait too long, return FALSE to avoid
+ * to be stuck here.
+ */
if (RFWaitCounter > 1000) { /* 1sec */
printk("MgntActSet_RF_State(): Wait too long to set RF\n");
/* TODO: Reset RF state? */
@@ -1036,8 +1141,10 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u
switch (StateToSet) {
case eRfOn:
/*
- * Turn On RF no matter the IPS setting because we need to update the RF state to Ndis under Vista, or
- * the Windows does not allow the driver to perform site survey any more. By Bruce, 2007-10-02.
+ * Turn On RF no matter the IPS setting because we need to
+ * update the RF state to Ndis under Vista, or the Windows
+ * does not allow the driver to perform site survey any
+ * more. By Bruce, 2007-10-02.
*/
priv->RfOffReason &= (~ChangeSource);
@@ -1045,7 +1152,8 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u
priv->RfOffReason = 0;
bActionAllowed = true;
- if (rtState == eRfOff && ChangeSource >= RF_CHANGE_BY_HW)
+ if (rtState == eRfOff &&
+ ChangeSource >= RF_CHANGE_BY_HW)
bConnectBySSID = true;
}
break;
@@ -1056,13 +1164,18 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u
if (priv->RfOffReason > RF_CHANGE_BY_IPS) {
/*
* 060808, Annie:
- * Disconnect to current BSS when radio off. Asked by QuanTa.
+ * Disconnect to current BSS when radio off.
+ * Asked by QuanTa.
*
- * Calling MgntDisconnect() instead of MgntActSet_802_11_DISASSOCIATE(),
- * because we do NOT need to set ssid to dummy ones.
+ * Calling MgntDisconnect() instead of
+ * MgntActSet_802_11_DISASSOCIATE(), because
+ * we do NOT need to set ssid to dummy ones.
*/
MgntDisconnect(dev, disas_lv_ss);
- /* Clear content of bssDesc[] and bssDesc4Query[] to avoid reporting old bss to UI. */
+ /*
+ * Clear content of bssDesc[] and bssDesc4Query[]
+ * to avoid reporting old bss to UI.
+ */
}
priv->RfOffReason |= ChangeSource;
@@ -1092,18 +1205,21 @@ static void InactivePowerSave(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
/*
- * This flag "bSwRfProcessing", indicates the status of IPS procedure, should be set if the IPS workitem
- * is really scheduled.
- * The old code, sets this flag before scheduling the IPS workitem and however, at the same time the
- * previous IPS workitem did not end yet, fails to schedule the current workitem. Thus, bSwRfProcessing
- * blocks the IPS procedure of switching RF.
+ * This flag "bSwRfProcessing", indicates the status of IPS
+ * procedure, should be set if the IPS workitem is really
+ * scheduled. The old code, sets this flag before scheduling the
+ * IPS workitem and however, at the same time the previous IPS
+ * workitem did not end yet, fails to schedule the current
+ * workitem. Thus, bSwRfProcessing blocks the IPS procedure of
+ * switching RF.
*/
priv->bSwRfProcessing = true;
MgntActSet_RF_State(dev, priv->eInactivePowerState, RF_CHANGE_BY_IPS);
/*
- * To solve CAM values miss in RF OFF, rewrite CAM values after RF ON. By Bruce, 2007-09-20.
+ * To solve CAM values miss in RF OFF, rewrite CAM values after
+ * RF ON. By Bruce, 2007-09-20.
*/
priv->bSwRfProcessing = false;
@@ -1122,10 +1238,10 @@ void IPSEnter(struct net_device *dev)
/*
* Do not enter IPS in the following conditions:
- * (1) RF is already OFF or Sleep
- * (2) bSwRfProcessing (indicates the IPS is still under going)
- * (3) Connected (only disconnected can trigger IPS)
- * (4) IBSS (send Beacon)
+ * (1) RF is already OFF or
+ * Sleep (2) bSwRfProcessing (indicates the IPS is still
+ * under going) (3) Connected (only disconnected can
+ * trigger IPS)(4) IBSS (send Beacon)
* (5) AP mode (send Beacon)
*/
if (rtState == eRfOn && !priv->bSwRfProcessing
@@ -1141,7 +1257,9 @@ void IPSLeave(struct net_device *dev)
RT_RF_POWER_STATE rtState;
if (priv->bInactivePs) {
rtState = priv->eRFPowerState;
- if ((rtState == eRfOff || rtState == eRfSleep) && (!priv->bSwRfProcessing) && priv->RfOffReason <= RF_CHANGE_BY_IPS) {
+ if ((rtState == eRfOff || rtState == eRfSleep) &&
+ !priv->bSwRfProcessing
+ && priv->RfOffReason <= RF_CHANGE_BY_IPS) {
priv->eInactivePowerState = eRfOn;
InactivePowerSave(dev);
}
@@ -1170,27 +1288,32 @@ void rtl8185b_adapter_start(struct net_device *dev)
HwConfigureRTL8185(dev);
write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
write_nic_word(dev, MAC4, ((u32 *)dev->dev_addr)[1] & 0xffff);
- write_nic_byte(dev, MSR, read_nic_byte(dev, MSR) & 0xf3); /* default network type to 'No Link' */
+ /* default network type to 'No Link' */
+ write_nic_byte(dev, MSR, read_nic_byte(dev, MSR) & 0xf3);
write_nic_word(dev, BcnItv, 100);
write_nic_word(dev, AtimWnd, 2);
PlatformIOWrite2Byte(dev, FEMR, 0xFFFF);
write_nic_byte(dev, WPA_CONFIG, 0);
MacConfig_85BASIC(dev);
- /* Override the RFSW_CTRL (MAC offset 0x272-0x273), 2006.06.07, by rcnjko. */
+ /* Override the RFSW_CTRL (MAC offset 0x272-0x273), 2006.06.07,
+ * by rcnjko.
+ */
/* BT_DEMO_BOARD type */
PlatformIOWrite2Byte(dev, RFSW_CTRL, 0x569a);
/*
- *---------------------------------------------------------------------------
+ *---------------------------------------------------------------------
* Set up PHY related.
- *---------------------------------------------------------------------------
+ *---------------------------------------------------------------------
*/
/* Enable Config3.PARAM_En to revise AnaaParm. */
write_nic_byte(dev, CR9346, 0xc0); /* enable config register write */
tmpu8 = read_nic_byte(dev, CONFIG3);
write_nic_byte(dev, CONFIG3, (tmpu8 | CONFIG3_PARM_En));
/* Turn on Analog power. */
- /* Asked for by William, otherwise, MAC 3-wire can't work, 2006.06.27, by rcnjko. */
+ /* Asked for by William, otherwise, MAC 3-wire can't work,
+ * 2006.06.27, by rcnjko.
+ */
write_nic_dword(dev, ANAPARAM2, ANAPARM2_ASIC_ON);
write_nic_dword(dev, ANAPARAM, ANAPARM_ASIC_ON);
write_nic_word(dev, ANAPARAM3, 0x0010);
@@ -1225,7 +1348,8 @@ void rtl8185b_adapter_start(struct net_device *dev)
/*
* We assume RegWirelessMode has already been initialized before,
* however, we has to validate the wireless mode here and provide a
- * reasonable initialized value if necessary. 2005.01.13, by rcnjko.
+ * reasonable initialized value if necessary. 2005.01.13,
+ * by rcnjko.
*/
SupportedWirelessMode = GetSupportedWirelessMode8185(dev);
if ((ieee->mode != WIRELESS_MODE_B) &&
@@ -1272,14 +1396,15 @@ void rtl8185b_adapter_start(struct net_device *dev)
MgntActSet_RF_State(dev, eRfOn, 0);
}
/*
- * If inactive power mode is enabled, disable rf while in disconnected state.
+ * If inactive power mode is enabled, disable rf while in
+ * disconnected state.
*/
if (priv->bInactivePs)
MgntActSet_RF_State(dev , eRfOff, RF_CHANGE_BY_IPS);
ActSetWirelessMode8185(dev, (u8)(InitWirelessMode));
- /* ----------------------------------------------------------------------------- */
+ /* ----------------------------------------------------------------- */
rtl8185b_irq_enable(dev);
@@ -1296,14 +1421,15 @@ void rtl8185b_rx_enable(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
DMESG("NIC in promisc mode");
- if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || \
- dev->flags & IFF_PROMISC) {
+ if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || dev->flags &
+ IFF_PROMISC) {
priv->ReceiveConfig = priv->ReceiveConfig & (~RCR_APM);
priv->ReceiveConfig = priv->ReceiveConfig | RCR_AAP;
}
if (priv->ieee80211->iw_mode == IW_MODE_MONITOR)
- priv->ReceiveConfig = priv->ReceiveConfig | RCR_ACF | RCR_APWRMGT | RCR_AICV;
+ priv->ReceiveConfig = priv->ReceiveConfig | RCR_ACF |
+ RCR_APWRMGT | RCR_AICV;
if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 1639a45da94..0a617b42cc9 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -1,5 +1,3 @@
-EXTRA_CFLAGS += -I$(src)/include
-
r8188eu-y := \
core/rtw_ap.o \
core/rtw_br_ext.o \
@@ -30,7 +28,6 @@ r8188eu-y := \
hal/HalPhyRf.o \
hal/HalPhyRf_8188e.o \
hal/HalPwrSeqCmd.o \
- hal/Hal8188EFWImg_CE.o \
hal/Hal8188EPwrSeq.o \
hal/Hal8188ERateAdaptive.o\
hal/hal_intf.o \
@@ -67,4 +64,4 @@ r8188eu-y := \
obj-$(CONFIG_R8188EU) := r8188eu.o
-ccflags-y += -D__CHECK_ENDIAN__
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
index e50aa50bdb4..f7f389c40e7 100644
--- a/drivers/staging/rtl8188eu/TODO
+++ b/drivers/staging/rtl8188eu/TODO
@@ -2,7 +2,6 @@ TODO:
- find and remove remaining code valid only for 5 HGz. Most of the obvious
ones have been removed, but things like channel > 14 still exist.
- find and remove any code for other chips that is left over
-- convert to external firmware
- convert any remaining unusual variable types
- find codes that can use %pM and %Nph formatting
- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 2c73823d224..2c678f40957 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -348,7 +348,7 @@ void expire_timeout_chk(struct adapter *padapter)
if (psta->state & WIFI_SLEEP_STATE) {
if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
- /* to check if alive by another methods if staion is at ps mode. */
+ /* to check if alive by another methods if station is at ps mode. */
psta->expire_to = pstapriv->expire_to;
psta->state |= WIFI_STA_ALIVE_CHK_STATE;
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
index fbca394cf4f..9f40742ee5c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -527,7 +527,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
case NAT25_CHECK:
return -1;
case NAT25_INSERT:
- /* some muticast with source IP is all zero, maybe other case is illegal */
+ /* some multicast with source IP is all zero, maybe other case is illegal */
/* in class A, B, C, host address is all zero or all one is illegal */
if (iph->saddr == 0)
return 0;
@@ -677,9 +677,8 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
switch (method) {
case NAT25_CHECK:
if (!memcmp(skb->data+ETH_ALEN, ipx->ipx_source.node, ETH_ALEN))
- DEBUG_INFO("NAT25: Check IPX skb_copy\n");
+ DEBUG_INFO("NAT25: Check IPX skb_copy\n");
return 0;
- return -1;
case NAT25_INSERT:
DEBUG_INFO("NAT25: Insert IPX, Dest =%08x,%02x%02x%02x%02x%02x%02x,%04x Source =%08x,%02x%02x%02x%02x%02x%02x,%04x\n",
ipx->ipx_dest.net,
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9632ef48fbc..f45f4eddb74 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -218,7 +218,7 @@ _func_enter_;
_func_exit_;
}
-int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
@@ -1162,7 +1162,7 @@ _func_enter_;
else
memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
- /* jeff: set this becasue at least sw key is ready */
+ /* jeff: set this because at least sw key is ready */
padapter->securitypriv.busetkipkey = true;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
@@ -1667,7 +1667,7 @@ static void traffic_status_watchdog(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
}
-void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
{
struct mlme_priv *pmlmepriv;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 869434c4cf6..806f56f1c43 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -159,7 +159,7 @@ Efuse_CalculateWordCnts(u8 word_en)
/* */
/* Description: */
/* Execute E-Fuse read byte operation. */
-/* Refered from SD1 Richard. */
+/* Referred from SD1 Richard. */
/* */
/* Assumption: */
/* 1. Boot from E-Fuse and successfully auto-load. */
@@ -214,7 +214,7 @@ ReadEFuseByte(
/* Description: */
/* 1. Execute E-Fuse read byte operation according as map offset and */
/* save to E-Fuse table. */
-/* 2. Refered from SD1 Richard. */
+/* 2. Referred from SD1 Richard. */
/* */
/* Assumption: */
/* 1. Boot from E-Fuse and successfully auto-load. */
@@ -542,7 +542,7 @@ u8 rtw_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
{
u8 offset, word_en;
u8 *map;
- u8 newdata[PGPKT_DATA_SIZE];
+ u8 newdata[PGPKT_DATA_SIZE + 1];
s32 i, idx;
u8 ret = _SUCCESS;
u16 mapLen = 0;
@@ -564,7 +564,7 @@ u8 rtw_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
offset = (addr >> 3);
word_en = 0xF;
- _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE + 1);
i = addr & 0x7; /* index of one package */
idx = 0; /* data index */
@@ -634,7 +634,7 @@ u8 rtw_BT_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data
{
u8 offset, word_en;
u8 *map;
- u8 newdata[PGPKT_DATA_SIZE];
+ u8 newdata[PGPKT_DATA_SIZE + 1];
s32 i, idx;
u8 ret = _SUCCESS;
u16 mapLen = 0;
@@ -656,7 +656,7 @@ u8 rtw_BT_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data
offset = (addr >> 3);
word_en = 0xF;
- _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE + 1);
i = addr & 0x7; /* index of one package */
idx = 0; /* data index */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 6fc77428e83..e6f98fb8f11 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -1129,7 +1129,7 @@ void rtw_macaddr_cfg(u8 *mac_addr)
mac[3] = 0x87;
mac[4] = 0x00;
mac[5] = 0x00;
- /* use default mac addresss */
+ /* use default mac address */
memcpy(mac_addr, mac, ETH_ALEN);
DBG_88E("MAC Address from efuse error, assign default one !!!\n");
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ea6607196d8..ac3535d33a4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -557,7 +557,7 @@ _func_enter_;
sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
rssi_final = (src->Rssi+dst->Rssi*4)/5;
} else {
- /* bss info not receving from the right channel, use the original RX signal infos */
+ /* bss info not receiving from the right channel, use the original RX signal infos */
ss_final = dst->PhyInfo.SignalStrength;
sq_final = dst->PhyInfo.SignalQuality;
rssi_final = dst->Rssi;
@@ -636,7 +636,7 @@ _func_enter_;
pnetwork->aid = 0;
pnetwork->join_res = 0;
- /* bss info not receving from the right channel */
+ /* bss info not receiving from the right channel */
if (pnetwork->network.PhyInfo.SignalQuality == 101)
pnetwork->network.PhyInfo.SignalQuality = 0;
} else {
@@ -656,7 +656,7 @@ _func_enter_;
pnetwork->last_scanned = rtw_get_current_time();
- /* bss info not receving from the right channel */
+ /* bss info not receiving from the right channel */
if (pnetwork->network.PhyInfo.SignalQuality == 101)
pnetwork->network.PhyInfo.SignalQuality = 0;
rtw_list_insert_tail(&(pnetwork->list), &(queue->queue));
@@ -670,7 +670,7 @@ _func_enter_;
pnetwork->last_scanned = rtw_get_current_time();
- /* target.Reserved[0]== 1, means that scaned network is a bcn frame. */
+ /* target.Reserved[0]== 1, means that scanned network is a bcn frame. */
if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
update_ie = false;
@@ -1130,7 +1130,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
padapter->securitypriv.wps_ie_len = 0;
}
/* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
- /* if A-MPDU Rx is enabled, reseting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
+ /* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
/* todo: check if AP can send A-MPDU packets */
for (i = 0; i < 16; i++) {
/* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
@@ -1210,7 +1210,7 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength);
}
-/* Notes: the fucntion could be > passive_level (the same context as Rx tasklet) */
+/* Notes: the function could be > passive_level (the same context as Rx tasklet) */
/* pnetwork: returns from rtw_joinbss_event_callback */
/* ptarget_wlan: found from scanned_queue */
/* if join_res > 0, for (fw_state == WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */
@@ -2177,7 +2177,7 @@ _func_enter_;
_func_exit_;
}
-/* the fucntion is at passive_level */
+/* the function is at passive_level */
void rtw_joinbss_reset(struct adapter *padapter)
{
u8 threshold;
@@ -2205,7 +2205,7 @@ void rtw_joinbss_reset(struct adapter *padapter)
}
}
-/* the fucntion is >= passive_level */
+/* the function is >= passive_level */
unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len)
{
u32 ielen, out_len;
@@ -2273,7 +2273,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
return phtpriv->ht_option;
}
-/* the fucntion is > passive_level (in critical_section) */
+/* the function is > passive_level (in critical_section) */
void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
{
u8 *p, max_ampdu_sz;
@@ -2332,7 +2332,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
else
pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
}
- /* switch to the 40M Hz mode accoring to the AP */
+ /* switch to the 40M Hz mode according to the AP */
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
case HT_EXTCHNL_OFFSET_UPPER:
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 4b2eb8e9b56..7ab5ff039c8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1852,7 +1852,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -2199,7 +2199,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -2349,7 +2349,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
/* Commented by Albert 2011/03/08 */
/* According to the P2P specification */
- /* if the sending device will be client, the P2P Capability should be reserved of group negotation response frame */
+ /* if the sending device will be client, the P2P Capability should be reserved of group negotiation response frame */
p2pie[p2pielen++] = 0;
} else {
/* Be group owner or meet the error case */
@@ -2561,7 +2561,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -2729,7 +2729,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -2981,7 +2981,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -3175,7 +3175,7 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -3283,7 +3283,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
unsigned char *mac;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -3534,7 +3534,7 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
unsigned char *mac;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -4484,7 +4484,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
#if defined(CONFIG_88EU_AP_MODE)
@@ -4713,7 +4713,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
#if defined (CONFIG_88EU_AP_MODE)
@@ -4876,7 +4876,7 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
unsigned char *mac;
unsigned char bssrate[NumRates];
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
@@ -5013,7 +5013,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
unsigned int val32;
u16 val16;
#ifdef CONFIG_88EU_AP_MODE
@@ -5153,7 +5153,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
struct pkt_attrib *pattrib;
unsigned char *pbuf, *pframe;
unsigned short val;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -5290,7 +5290,7 @@ void issue_assocreq(struct adapter *padapter)
struct pkt_attrib *pattrib;
unsigned char *pframe, *p;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
__le16 le_tmp;
unsigned int i, j, ie_len, index = 0;
unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
@@ -5625,7 +5625,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
@@ -5740,7 +5740,8 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl, *qc;
+ __le16 *fctrl;
+ unsigned short *qc;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -5860,7 +5861,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -5972,7 +5973,7 @@ void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -6040,7 +6041,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
struct pkt_attrib *pattrib;
u8 *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- u16 *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -6162,7 +6163,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct wlan_network *pnetwork = NULL;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -6698,7 +6699,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
}
}
- /* mark bss info receving from nearby channel as SignalQuality 101 */
+ /* mark bss info receiving from nearby channel as SignalQuality 101 */
if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
bssid->PhyInfo.SignalQuality = 101;
return _SUCCESS;
@@ -8110,7 +8111,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
Save_DM_Func_Flag(padapter);
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
- /* config the initial gain under scaning, need to write the BB registers */
+ /* config the initial gain under scanning, need to write the BB registers */
#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
initialgain = 0x1E;
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
index 8cf915f4cf9..f46cab14a54 100644
--- a/drivers/staging/rtl8188eu/core/rtw_p2p.c
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -135,7 +135,7 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -192,7 +192,7 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -272,7 +272,7 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -342,7 +342,7 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
- unsigned short *fctrl;
+ __le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 58a1661f5a8..b45461fe20f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -193,7 +193,7 @@ void rtw_ps_processor(struct adapter *padapter)
if (pwrpriv->ips_mode_req == IPS_NONE)
goto exit;
- if (rtw_pwr_unassociated_idle(padapter) == false)
+ if (!rtw_pwr_unassociated_idle(padapter))
goto exit;
if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts%4) == 0)) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 20116578736..9f0f30f7069 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -204,11 +204,14 @@ void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpri
int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
{
unsigned long irqL;
- struct adapter *padapter = precvframe->u.hdr.adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
_func_enter_;
-
+ if (!precvframe)
+ return _FAIL;
+ padapter = precvframe->u.hdr.adapter;
+ precvpriv = &padapter->recvpriv;
if (precvframe->u.hdr.pkt) {
dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
precvframe->u.hdr.pkt = NULL;
@@ -1583,7 +1586,7 @@ _func_enter_;
pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = get_next(plist);
- };
+ }
/* free the defrag_q queue and return the prframe */
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
@@ -1798,16 +1801,14 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
}
- /* Indicat the packets to upper layer */
- if (sub_skb) {
- /* Insert NAT2.5 RX here! */
- sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
- sub_skb->dev = padapter->pnetdev;
+ /* Indicate the packets to upper layer */
+ /* Insert NAT2.5 RX here! */
+ sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
+ sub_skb->dev = padapter->pnetdev;
- sub_skb->ip_summed = CHECKSUM_NONE;
+ sub_skb->ip_summed = CHECKSUM_NONE;
- netif_rx(sub_skb);
- }
+ netif_rx(sub_skb);
}
exit:
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 0f076d0cb5f..e0884572977 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -916,7 +916,7 @@ _func_enter_;
add1b[i] = 0x00;
}
- swap_halfs[0] = in[2]; /* Swap halfs */
+ swap_halfs[0] = in[2]; /* Swap halves */
swap_halfs[1] = in[3];
swap_halfs[2] = in[0];
swap_halfs[3] = in[1];
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index c2977be92fb..cd3c9a7c304 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -267,9 +267,8 @@ _func_enter_;
rtw_mfree_sta_priv_lock(pstapriv);
- if (pstapriv->pallocated_stainfo_buf) {
+ if (pstapriv->pallocated_stainfo_buf)
rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4);
- }
}
_func_exit_;
@@ -315,7 +314,7 @@ _func_enter_;
rtw_list_insert_tail(&psta->hash_list, phash_list);
- pstapriv->asoc_sta_count++ ;
+ pstapriv->asoc_sta_count++;
_exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
@@ -419,7 +418,7 @@ _func_enter_;
_cancel_timer_ex(&psta->addba_retry_timer);
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
- for (i = 0; i < 16 ; i++) {
+ for (i = 0; i < 16; i++) {
unsigned long irql;
struct list_head *phead, *plist;
union recv_frame *prframe;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 8018edd3d42..153ec61493a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -80,7 +80,7 @@ int cckratesonly_included(unsigned char *rate, int ratelen)
for (i = 0; i < ratelen; i++) {
if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
(((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
- return false;
+ return false;
}
return true;
@@ -766,7 +766,7 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
for (i = 0; i < (pIE->Length); i++) {
if (i != 2) {
- /* Got the endian issue here. */
+ /* Got the endian issue here. */
pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
} else {
/* modify from fw by Thomas 2010/11/17 */
@@ -1096,13 +1096,13 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
kfree(bssid);
+ _func_exit_;
return _SUCCESS;
_mismatch:
kfree(bssid);
- return _FAIL;
-
_func_exit_;
+ return _FAIL;
}
void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta)
@@ -1186,7 +1186,7 @@ unsigned int should_forbid_n_rate(struct adapter *padapter)
case _RSN_IE_2_:
if ((_rtw_memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
(_rtw_memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
- return false;
+ return false;
default:
break;
}
@@ -1368,21 +1368,21 @@ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- /* Added by Albert 2011/03/22 */
- /* In the P2P mode, the driver should not support the b mode. */
- /* So, the Tx packet shouldn't use the CCK rate */
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return;
#endif /* CONFIG_88EU_P2P */
_rtw_memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
- if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B)) {
+ if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B))
memcpy(supported_rates, rtw_basic_rate_cck, 4);
- } else if (wirelessmode & WIRELESS_11B) {
+ else if (wirelessmode & WIRELESS_11B)
memcpy(supported_rates, rtw_basic_rate_mix, 7);
- } else {
+ else
memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
- }
+
if (wirelessmode & WIRELESS_11B)
update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
@@ -1435,7 +1435,7 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
DBG_88E("link to Airgo Cap\n");
return HT_IOT_PEER_AIRGO;
} else if (_rtw_memcmp(pIE->data, EPIGRAM_OUI, 3)) {
- epigram_vendor_flag = 1;
+ epigram_vendor_flag = 1;
if (ralink_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index bb5cd95c564..a594e51d2e1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -1556,7 +1556,7 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, str
xmitframe_phead = get_list_head(pframe_queue);
xmitframe_plist = get_next(xmitframe_phead);
- while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ if (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
xmitframe_plist = get_next(xmitframe_plist);
@@ -1564,12 +1564,7 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, str
rtw_list_delete(&pxmitframe->list);
ptxservq->qcnt--;
-
- break;
-
- pxmitframe = NULL;
}
-
return pxmitframe;
}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c b/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c
deleted file mode 100644
index 95759bed541..00000000000
--- a/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c
+++ /dev/null
@@ -1,1761 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
-******************************************************************************/
-#include "odm_precomp.h"
-
-const u8 Rtl8188EFwImgArray[Rtl8188EFWImgArrayLength] = {
- 0xE1, 0x88, 0x10, 0x00, 0x0B, 0x00, 0x01, 0x00,
- 0x01, 0x21, 0x11, 0x27, 0x30, 0x36, 0x00, 0x00,
- 0x2D, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x45, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xC1, 0x6F, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xA1, 0xE6, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x02, 0x56, 0xF7, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC2, 0xAF, 0x80, 0xFE, 0x32, 0x12, 0x42, 0x04,
- 0x85, 0xD0, 0x0B, 0x75, 0xD0, 0x08, 0xAA, 0xE0,
- 0xC2, 0x8C, 0xE5, 0x8A, 0x24, 0x67, 0xF5, 0x8A,
- 0xE5, 0x8C, 0x34, 0x79, 0xF5, 0x8C, 0xD2, 0x8C,
- 0xEC, 0x24, 0x89, 0xF8, 0xE6, 0xBC, 0x03, 0x02,
- 0x74, 0xFF, 0xC3, 0x95, 0x81, 0xB4, 0x40, 0x00,
- 0x40, 0xCE, 0x79, 0x04, 0x78, 0x80, 0x16, 0xE6,
- 0x08, 0x70, 0x0B, 0xC2, 0xAF, 0xE6, 0x30, 0xE1,
- 0x03, 0x44, 0x18, 0xF6, 0xD2, 0xAF, 0x08, 0xD9,
- 0xED, 0xEA, 0x8B, 0xD0, 0x22, 0xE5, 0x0C, 0xFF,
- 0x23, 0x24, 0x81, 0xF8, 0x0F, 0x08, 0x08, 0xBF,
- 0x04, 0x04, 0x7F, 0x00, 0x78, 0x81, 0xE6, 0x30,
- 0xE4, 0xF2, 0x00, 0xE5, 0x0C, 0xC3, 0x9F, 0x50,
- 0x20, 0x05, 0x0C, 0x74, 0x88, 0x25, 0x0C, 0xF8,
- 0xE6, 0xFD, 0xA6, 0x81, 0x08, 0xE6, 0xAE, 0x0C,
- 0xBE, 0x03, 0x02, 0x74, 0xFF, 0xCD, 0xF8, 0xE8,
- 0x6D, 0x60, 0xE0, 0x08, 0xE6, 0xC0, 0xE0, 0x80,
- 0xF6, 0xE5, 0x0C, 0xD3, 0x9F, 0x40, 0x27, 0xE5,
- 0x0C, 0x24, 0x89, 0xF8, 0xE6, 0xAE, 0x0C, 0xBE,
- 0x03, 0x02, 0x74, 0xFF, 0xFD, 0x18, 0xE6, 0xCD,
- 0xF8, 0xE5, 0x81, 0x6D, 0x60, 0x06, 0xD0, 0xE0,
- 0xF6, 0x18, 0x80, 0xF5, 0xE5, 0x0C, 0x24, 0x88,
- 0xC8, 0xF6, 0x15, 0x0C, 0x80, 0xD3, 0xE5, 0x0C,
- 0x23, 0x24, 0x81, 0xF8, 0x7F, 0x04, 0xC2, 0xAF,
- 0xE6, 0x30, 0xE0, 0x03, 0x10, 0xE2, 0x0C, 0x7F,
- 0x00, 0x30, 0xE1, 0x07, 0x30, 0xE3, 0x04, 0x7F,
- 0x08, 0x54, 0xF4, 0x54, 0x7C, 0xC6, 0xD2, 0xAF,
- 0x54, 0x80, 0x42, 0x07, 0x22, 0x78, 0x88, 0xA6,
- 0x81, 0x74, 0x03, 0x60, 0x06, 0xFF, 0x08, 0x76,
- 0xFF, 0xDF, 0xFB, 0x7F, 0x04, 0xE4, 0x78, 0x80,
- 0xF6, 0x08, 0xF6, 0x08, 0xDF, 0xFA, 0x78, 0x81,
- 0x76, 0x30, 0x90, 0x45, 0xDE, 0x74, 0x01, 0x93,
- 0xC0, 0xE0, 0xE4, 0x93, 0xC0, 0xE0, 0x43, 0x89,
- 0x01, 0x75, 0x8A, 0x60, 0x75, 0x8C, 0x79, 0xD2,
- 0x8C, 0xD2, 0xAF, 0x22, 0x03, 0xEF, 0xD3, 0x94,
- 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22, 0x74, 0x81,
- 0x2F, 0x2F, 0xF8, 0xE6, 0x20, 0xE5, 0xF4, 0xC2,
- 0xAF, 0xE6, 0x44, 0x30, 0xF6, 0xD2, 0xAF, 0xAE,
- 0x0C, 0xEE, 0xC3, 0x9F, 0x50, 0x21, 0x0E, 0x74,
- 0x88, 0x2E, 0xF8, 0xE6, 0xF9, 0x08, 0xE6, 0x18,
- 0xBE, 0x03, 0x02, 0x74, 0xFF, 0xFD, 0xED, 0x69,
- 0x60, 0x09, 0x09, 0xE7, 0x19, 0x19, 0xF7, 0x09,
- 0x09, 0x80, 0xF3, 0x16, 0x16, 0x80, 0xDA, 0xEE,
- 0xD3, 0x9F, 0x40, 0x04, 0x05, 0x81, 0x05, 0x81,
- 0xEE, 0xD3, 0x9F, 0x40, 0x22, 0x74, 0x88, 0x2E,
- 0xF8, 0x08, 0xE6, 0xF9, 0xEE, 0xB5, 0x0C, 0x02,
- 0xA9, 0x81, 0x18, 0x06, 0x06, 0xE6, 0xFD, 0xED,
- 0x69, 0x60, 0x09, 0x19, 0x19, 0xE7, 0x09, 0x09,
- 0xF7, 0x19, 0x80, 0xF3, 0x1E, 0x80, 0xD9, 0xEF,
- 0x24, 0x88, 0xF8, 0xE6, 0x04, 0xF8, 0xEF, 0x2F,
- 0x04, 0x90, 0x45, 0xDE, 0x93, 0xF6, 0x08, 0xEF,
- 0x2F, 0x93, 0xF6, 0x7F, 0x00, 0x22, 0xEF, 0xD3,
- 0x94, 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22, 0xEF,
- 0x23, 0x24, 0x81, 0xF8, 0xE6, 0x30, 0xE5, 0xF4,
- 0xC2, 0xAF, 0xE6, 0x54, 0x8C, 0xF6, 0xD2, 0xAF,
- 0xE5, 0x0C, 0xB5, 0x07, 0x0A, 0x74, 0x88, 0x2F,
- 0xF8, 0xE6, 0xF5, 0x81, 0x02, 0x42, 0x4D, 0x50,
- 0x2E, 0x74, 0x89, 0x2F, 0xF8, 0xE6, 0xBF, 0x03,
- 0x02, 0x74, 0xFF, 0xFD, 0x18, 0xE6, 0xF9, 0x74,
- 0x88, 0x2F, 0xF8, 0xFB, 0xE6, 0xFC, 0xE9, 0x6C,
- 0x60, 0x08, 0xA8, 0x05, 0xE7, 0xF6, 0x1D, 0x19,
- 0x80, 0xF4, 0xA8, 0x03, 0xA6, 0x05, 0x1F, 0xE5,
- 0x0C, 0xB5, 0x07, 0xE3, 0x7F, 0x00, 0x22, 0x74,
- 0x89, 0x2F, 0xF8, 0xE6, 0xFD, 0x18, 0x86, 0x01,
- 0x0F, 0x74, 0x88, 0x2F, 0xF8, 0xA6, 0x01, 0x08,
- 0x86, 0x04, 0xE5, 0x0C, 0xB5, 0x07, 0x02, 0xAC,
- 0x81, 0xED, 0x6C, 0x60, 0x08, 0x0D, 0x09, 0xA8,
- 0x05, 0xE6, 0xF7, 0x80, 0xF4, 0xE5, 0x0C, 0xB5,
- 0x07, 0xDE, 0x89, 0x81, 0x7F, 0x00, 0x22, 0xEF,
- 0xD3, 0x94, 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22,
- 0xEF, 0x23, 0x24, 0x81, 0xF8, 0xC2, 0xAF, 0xE6,
- 0x30, 0xE5, 0x05, 0x30, 0xE0, 0x02, 0xD2, 0xE4,
- 0xD2, 0xE2, 0xC6, 0xD2, 0xAF, 0x7F, 0x00, 0x30,
- 0xE2, 0x01, 0x0F, 0x02, 0x42, 0x4C, 0x8F, 0xF0,
- 0xE4, 0xFF, 0xFE, 0xE5, 0x0C, 0x23, 0x24, 0x80,
- 0xF8, 0xC2, 0xA9, 0x30, 0xF7, 0x0D, 0x7F, 0x08,
- 0xE6, 0x60, 0x0B, 0x2D, 0xF6, 0x60, 0x30, 0x50,
- 0x2E, 0x80, 0x07, 0x30, 0xF1, 0x06, 0xED, 0xF6,
- 0x60, 0x25, 0x7E, 0x02, 0x08, 0x30, 0xF0, 0x10,
- 0xC2, 0xAF, 0xE6, 0x10, 0xE7, 0x23, 0x0E, 0x30,
- 0xE2, 0x0C, 0xD2, 0xAF, 0x7F, 0x04, 0x80, 0x12,
- 0xC2, 0xAF, 0xE6, 0x10, 0xE7, 0x13, 0x54, 0xEC,
- 0x4E, 0xF6, 0xD2, 0xAF, 0x02, 0x42, 0x4D, 0x7F,
- 0x08, 0x08, 0xEF, 0x44, 0x83, 0xF4, 0xC2, 0xAF,
- 0x56, 0xC6, 0xD2, 0xAF, 0x54, 0x80, 0x4F, 0xFF,
- 0x22, 0xC5, 0xF0, 0xF8, 0xA3, 0xE0, 0x28, 0xF0,
- 0xC5, 0xF0, 0xF8, 0xE5, 0x82, 0x15, 0x82, 0x70,
- 0x02, 0x15, 0x83, 0xE0, 0x38, 0xF0, 0x22, 0xEF,
- 0x5B, 0xFF, 0xEE, 0x5A, 0xFE, 0xED, 0x59, 0xFD,
- 0xEC, 0x58, 0xFC, 0x22, 0xEF, 0x4B, 0xFF, 0xEE,
- 0x4A, 0xFE, 0xED, 0x49, 0xFD, 0xEC, 0x48, 0xFC,
- 0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xFD, 0xA3, 0xE0,
- 0xFE, 0xA3, 0xE0, 0xFF, 0x22, 0xE2, 0xFC, 0x08,
- 0xE2, 0xFD, 0x08, 0xE2, 0xFE, 0x08, 0xE2, 0xFF,
- 0x22, 0xE2, 0xFB, 0x08, 0xE2, 0xF9, 0x08, 0xE2,
- 0xFA, 0x08, 0xE2, 0xCB, 0xF8, 0x22, 0xEC, 0xF2,
- 0x08, 0xED, 0xF2, 0x08, 0xEE, 0xF2, 0x08, 0xEF,
- 0xF2, 0x22, 0xA4, 0x25, 0x82, 0xF5, 0x82, 0xE5,
- 0xF0, 0x35, 0x83, 0xF5, 0x83, 0x22, 0xE0, 0xFB,
- 0xA3, 0xE0, 0xFA, 0xA3, 0xE0, 0xF9, 0x22, 0xEB,
- 0xF0, 0xA3, 0xEA, 0xF0, 0xA3, 0xE9, 0xF0, 0x22,
- 0xD0, 0x83, 0xD0, 0x82, 0xF8, 0xE4, 0x93, 0x70,
- 0x12, 0x74, 0x01, 0x93, 0x70, 0x0D, 0xA3, 0xA3,
- 0x93, 0xF8, 0x74, 0x01, 0x93, 0xF5, 0x82, 0x88,
- 0x83, 0xE4, 0x73, 0x74, 0x02, 0x93, 0x68, 0x60,
- 0xEF, 0xA3, 0xA3, 0xA3, 0x80, 0xDF, 0x02, 0x45,
- 0x8C, 0x02, 0x42, 0xDD, 0xE4, 0x93, 0xA3, 0xF8,
- 0xE4, 0x93, 0xA3, 0x40, 0x03, 0xF6, 0x80, 0x01,
- 0xF2, 0x08, 0xDF, 0xF4, 0x80, 0x29, 0xE4, 0x93,
- 0xA3, 0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3,
- 0x33, 0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83,
- 0x40, 0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6,
- 0xDF, 0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08,
- 0x10, 0x20, 0x40, 0x80, 0x90, 0x45, 0xD1, 0xE4,
- 0x7E, 0x01, 0x93, 0x60, 0xBC, 0xA3, 0xFF, 0x54,
- 0x3F, 0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4,
- 0x93, 0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0,
- 0x25, 0xE0, 0x60, 0xA8, 0x40, 0xB8, 0xE4, 0x93,
- 0xA3, 0xFA, 0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93,
- 0xA3, 0xC8, 0xC5, 0x82, 0xC8, 0xCA, 0xC5, 0x83,
- 0xCA, 0xF0, 0xA3, 0xC8, 0xC5, 0x82, 0xC8, 0xCA,
- 0xC5, 0x83, 0xCA, 0xDF, 0xE9, 0xDE, 0xE7, 0x80,
- 0xBE, 0x00, 0x41, 0x82, 0x09, 0x00, 0x41, 0x82,
- 0x0A, 0x00, 0x41, 0x82, 0x17, 0x00, 0x59, 0xE2,
- 0x5C, 0x24, 0x5E, 0x5D, 0x5F, 0xA1, 0xC0, 0xE0,
- 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0, 0xD0,
- 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01, 0xC0,
- 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
- 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74, 0xE6,
- 0xF0, 0x74, 0x45, 0xA3, 0xF0, 0xD1, 0x35, 0x74,
- 0xE6, 0x04, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x45,
- 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06, 0xD0, 0x05,
- 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01,
- 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82, 0xD0, 0x83,
- 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90, 0x00, 0x54,
- 0xE0, 0x55, 0x35, 0xF5, 0x39, 0xA3, 0xE0, 0x55,
- 0x36, 0xF5, 0x3A, 0xA3, 0xE0, 0x55, 0x37, 0xF5,
- 0x3B, 0xA3, 0xE0, 0x55, 0x38, 0xF5, 0x3C, 0xAD,
- 0x39, 0x7F, 0x54, 0x12, 0x32, 0x1E, 0xAD, 0x3A,
- 0x7F, 0x55, 0x12, 0x32, 0x1E, 0xAD, 0x3B, 0x7F,
- 0x56, 0x12, 0x32, 0x1E, 0xAD, 0x3C, 0x7F, 0x57,
- 0x12, 0x32, 0x1E, 0x53, 0x91, 0xEF, 0x22, 0xC0,
- 0xE0, 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0,
- 0xD0, 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01,
- 0xC0, 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05,
- 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74,
- 0x6F, 0xF0, 0x74, 0x46, 0xA3, 0xF0, 0x12, 0x6C,
- 0x78, 0xE5, 0x41, 0x30, 0xE4, 0x04, 0x7F, 0x02,
- 0x91, 0x27, 0xE5, 0x41, 0x30, 0xE6, 0x03, 0x12,
- 0x6C, 0xD5, 0xE5, 0x43, 0x30, 0xE0, 0x03, 0x12,
- 0x51, 0xC2, 0xE5, 0x43, 0x30, 0xE1, 0x03, 0x12,
- 0x4D, 0x0C, 0xE5, 0x43, 0x30, 0xE2, 0x03, 0x12,
- 0x4C, 0xC1, 0xE5, 0x43, 0x30, 0xE3, 0x03, 0x12,
- 0x6C, 0xE2, 0xE5, 0x43, 0x30, 0xE4, 0x03, 0x12,
- 0x6D, 0x04, 0xE5, 0x43, 0x30, 0xE5, 0x03, 0x12,
- 0x6D, 0x33, 0xE5, 0x43, 0x30, 0xE6, 0x02, 0xF1,
- 0x0F, 0xE5, 0x44, 0x30, 0xE1, 0x03, 0x12, 0x51,
- 0x7F, 0x74, 0x6F, 0x04, 0x90, 0x01, 0xC4, 0xF0,
- 0x74, 0x46, 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06,
- 0xD0, 0x05, 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02,
- 0xD0, 0x01, 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82,
- 0xD0, 0x83, 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90,
- 0x80, 0xDE, 0xE0, 0xB4, 0x01, 0x13, 0x90, 0x81,
- 0x27, 0xE0, 0x60, 0x0D, 0x90, 0x81, 0x2B, 0xE0,
- 0x54, 0xFE, 0xF0, 0x54, 0x07, 0x70, 0x02, 0xF1,
- 0x2A, 0x22, 0x90, 0x81, 0x1F, 0xE0, 0x90, 0x81,
- 0x29, 0x30, 0xE0, 0x05, 0xE0, 0xFF, 0x02, 0x74,
- 0x8F, 0xE0, 0xFF, 0x7D, 0x01, 0xD3, 0x10, 0xAF,
- 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x82, 0x13, 0xED,
- 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0x90, 0x82, 0x14,
- 0xF0, 0x90, 0x81, 0x24, 0xE0, 0xFE, 0xC4, 0x13,
- 0x13, 0x54, 0x03, 0x30, 0xE0, 0x03, 0x02, 0x48,
- 0xA0, 0xEE, 0xC4, 0x13, 0x13, 0x13, 0x54, 0x01,
- 0x30, 0xE0, 0x03, 0x02, 0x48, 0xA0, 0x90, 0x82,
- 0x14, 0xE0, 0xFE, 0x6F, 0x70, 0x03, 0x02, 0x48,
- 0xA0, 0xEF, 0x70, 0x03, 0x02, 0x48, 0x17, 0x24,
- 0xFE, 0x70, 0x03, 0x02, 0x48, 0x50, 0x24, 0xFE,
- 0x60, 0x51, 0x24, 0xFC, 0x70, 0x03, 0x02, 0x48,
- 0x8B, 0x24, 0xFC, 0x60, 0x03, 0x02, 0x48, 0xA0,
- 0xEE, 0xB4, 0x0E, 0x03, 0x12, 0x49, 0x5E, 0x90,
- 0x82, 0x14, 0xE0, 0x70, 0x05, 0x7F, 0x01, 0x12,
- 0x49, 0x93, 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x06,
- 0x03, 0x12, 0x49, 0x34, 0x90, 0x82, 0x14, 0xE0,
- 0xB4, 0x04, 0x0F, 0x90, 0x82, 0x13, 0xE0, 0xFF,
- 0x60, 0x05, 0x12, 0x73, 0x75, 0x80, 0x03, 0x12,
- 0x66, 0x26, 0x90, 0x82, 0x14, 0xE0, 0x64, 0x08,
- 0x60, 0x03, 0x02, 0x48, 0xA0, 0x12, 0x73, 0xD3,
- 0x02, 0x48, 0xA0, 0x90, 0x82, 0x14, 0xE0, 0x70,
- 0x05, 0x7F, 0x01, 0x12, 0x49, 0x93, 0x90, 0x82,
- 0x14, 0xE0, 0xB4, 0x06, 0x03, 0x12, 0x49, 0x34,
- 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x09, 0x12,
- 0x48, 0xA5, 0xBF, 0x01, 0x03, 0x12, 0x49, 0x5E,
- 0x90, 0x82, 0x14, 0xE0, 0x64, 0x0C, 0x60, 0x02,
- 0x01, 0xA0, 0x11, 0xA5, 0xEF, 0x64, 0x01, 0x60,
- 0x02, 0x01, 0xA0, 0x11, 0xFA, 0x01, 0xA0, 0x90,
- 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x07, 0x11, 0xA5,
- 0xBF, 0x01, 0x02, 0x31, 0x5E, 0x90, 0x82, 0x14,
- 0xE0, 0xB4, 0x06, 0x02, 0x31, 0x34, 0x90, 0x82,
- 0x14, 0xE0, 0xB4, 0x0C, 0x07, 0x11, 0xA5, 0xBF,
- 0x01, 0x02, 0x11, 0xFA, 0x90, 0x82, 0x14, 0xE0,
- 0x64, 0x04, 0x70, 0x5C, 0x12, 0x72, 0xF5, 0xEF,
- 0x64, 0x01, 0x70, 0x54, 0x31, 0xBE, 0x80, 0x50,
- 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x07, 0x11,
- 0xA5, 0xBF, 0x01, 0x02, 0x31, 0x5E, 0x90, 0x82,
- 0x14, 0xE0, 0xB4, 0x06, 0x02, 0x31, 0x34, 0x90,
- 0x82, 0x14, 0xE0, 0xB4, 0x0C, 0x07, 0x11, 0xA5,
- 0xBF, 0x01, 0x02, 0x11, 0xFA, 0x90, 0x82, 0x14,
- 0xE0, 0x70, 0x04, 0x7F, 0x01, 0x31, 0x93, 0x90,
- 0x82, 0x14, 0xE0, 0xB4, 0x04, 0x1A, 0x12, 0x73,
- 0xBB, 0x80, 0x15, 0x90, 0x82, 0x14, 0xE0, 0xB4,
- 0x0C, 0x0E, 0x90, 0x81, 0x25, 0xE0, 0xFF, 0x13,
- 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x02, 0x31, 0xB1,
- 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD1, 0xAB, 0xEF,
- 0x64, 0x01, 0x60, 0x08, 0x90, 0x01, 0xB8, 0x74,
- 0x01, 0xF0, 0x80, 0x3D, 0x90, 0x81, 0x24, 0xE0,
- 0xFF, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
- 0x08, 0x90, 0x01, 0xB8, 0x74, 0x02, 0xF0, 0x80,
- 0x28, 0xEF, 0xC4, 0x54, 0x0F, 0x30, 0xE0, 0x08,
- 0x90, 0x01, 0xB8, 0x74, 0x04, 0xF0, 0x80, 0x19,
- 0x90, 0x81, 0x29, 0xE0, 0xD3, 0x94, 0x04, 0x40,
- 0x08, 0x90, 0x01, 0xB8, 0x74, 0x08, 0xF0, 0x80,
- 0x08, 0x90, 0x01, 0xB8, 0xE4, 0xF0, 0x7F, 0x01,
- 0x22, 0x90, 0x01, 0xB9, 0x74, 0x02, 0xF0, 0x7F,
- 0x00, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01,
- 0x70, 0x31, 0x90, 0x81, 0x25, 0xE0, 0x54, 0xFD,
- 0xF0, 0x90, 0x05, 0x22, 0x74, 0x6F, 0xF0, 0x7F,
- 0x01, 0xF1, 0x0D, 0xBF, 0x01, 0x12, 0x90, 0x81,
- 0x24, 0xE0, 0x44, 0x80, 0xF0, 0x90, 0x81, 0x2A,
- 0x74, 0x0E, 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22,
- 0x90, 0x01, 0xB9, 0x74, 0x01, 0xF0, 0x90, 0x01,
- 0xB8, 0x04, 0xF0, 0x22, 0x90, 0x81, 0x25, 0xE0,
- 0x90, 0x06, 0x04, 0x20, 0xE0, 0x0C, 0xE0, 0x44,
- 0x40, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x04, 0xF0,
- 0x80, 0x0E, 0xE0, 0x54, 0x7F, 0xF0, 0x90, 0x81,
- 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x23, 0xF0,
- 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x22, 0x90, 0x81,
- 0x25, 0xE0, 0xC3, 0x13, 0x20, 0xE0, 0x08, 0x90,
- 0x81, 0x2A, 0x74, 0x0C, 0xF0, 0x80, 0x1E, 0x90,
- 0x06, 0x04, 0xE0, 0x44, 0x40, 0xF0, 0xE0, 0x44,
- 0x80, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x04, 0xF0,
- 0x90, 0x05, 0x27, 0xE0, 0x44, 0x80, 0xF0, 0x90,
- 0x81, 0x23, 0x74, 0x04, 0xF0, 0x90, 0x05, 0x22,
- 0xE4, 0xF0, 0x22, 0x90, 0x82, 0x15, 0xEF, 0xF0,
- 0x12, 0x54, 0x65, 0x90, 0x82, 0x15, 0xE0, 0x60,
- 0x05, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90, 0x81,
- 0x2A, 0x74, 0x04, 0xF0, 0x90, 0x81, 0x23, 0xF0,
- 0x22, 0x31, 0xE3, 0x90, 0x81, 0x2A, 0x74, 0x08,
- 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x05,
- 0x22, 0x74, 0xFF, 0xF0, 0xF1, 0x3A, 0x90, 0x01,
- 0x37, 0x74, 0x02, 0xF0, 0xFD, 0x7F, 0x03, 0x51,
- 0x57, 0x31, 0xE3, 0xE4, 0x90, 0x81, 0x2A, 0xF0,
- 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x05, 0x22,
- 0x74, 0xFF, 0xF0, 0xF1, 0x3A, 0x90, 0x85, 0xBB,
- 0x12, 0x20, 0xDA, 0xCC, 0xF0, 0x00, 0xC0, 0x7F,
- 0x8C, 0x7E, 0x08, 0x12, 0x2E, 0xA2, 0x90, 0x85,
- 0xBB, 0x12, 0x20, 0xDA, 0x00, 0x00, 0x00, 0x14,
- 0x7F, 0x70, 0x7E, 0x0E, 0x12, 0x2E, 0xA2, 0x90,
- 0x81, 0xF9, 0x12, 0x20, 0xDA, 0x00, 0x00, 0x00,
- 0x00, 0xE4, 0xFD, 0xFF, 0x12, 0x55, 0x1C, 0x7F,
- 0x7C, 0x7E, 0x08, 0x12, 0x2D, 0x5C, 0xEC, 0x44,
- 0x80, 0xFC, 0x90, 0x82, 0x05, 0x12, 0x20, 0xCE,
- 0x90, 0x82, 0x05, 0x12, 0x44, 0xD9, 0x90, 0x85,
- 0xBB, 0x12, 0x20, 0xCE, 0x7F, 0x7C, 0x7E, 0x08,
- 0x12, 0x2E, 0xA2, 0x90, 0x01, 0x00, 0x74, 0x3F,
- 0xF0, 0xA3, 0xE0, 0x54, 0xFD, 0xF0, 0x90, 0x05,
- 0x53, 0xE0, 0x44, 0x20, 0xF0, 0x22, 0x90, 0x01,
- 0x34, 0x74, 0x40, 0xF0, 0xFD, 0xE4, 0xFF, 0x74,
- 0x3D, 0x2F, 0xF8, 0xE6, 0x4D, 0xFE, 0xF6, 0x74,
- 0x30, 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5,
- 0x83, 0xEE, 0xF0, 0x22, 0xD3, 0x10, 0xAF, 0x01,
- 0xC3, 0xC0, 0xD0, 0xE4, 0x90, 0x81, 0xCB, 0xF0,
- 0x12, 0x1F, 0xA4, 0xFF, 0x54, 0x01, 0xFE, 0x90,
- 0x81, 0x1F, 0xE0, 0x54, 0xFE, 0x4E, 0xFE, 0xF0,
- 0xEF, 0x54, 0x02, 0xFF, 0xEE, 0x54, 0xFD, 0x4F,
- 0xFF, 0xF0, 0x12, 0x1F, 0xA4, 0xFE, 0x54, 0x04,
- 0xFD, 0xEF, 0x54, 0xFB, 0x4D, 0xFF, 0x90, 0x81,
- 0x1F, 0xF0, 0xEE, 0x54, 0x08, 0xFE, 0xEF, 0x54,
- 0xF7, 0x4E, 0xFF, 0xF0, 0x12, 0x1F, 0xA4, 0xFE,
- 0x54, 0x10, 0xFD, 0xEF, 0x54, 0xEF, 0x4D, 0xFF,
- 0x90, 0x81, 0x1F, 0xF0, 0xEE, 0x54, 0x20, 0xFE,
- 0xEF, 0x54, 0xDF, 0x4E, 0xF0, 0x12, 0x1F, 0xA4,
- 0xC3, 0x13, 0x20, 0xE0, 0x02, 0x61, 0x5E, 0x90,
- 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x6D, 0x90,
- 0x81, 0xCB, 0x74, 0x21, 0xF0, 0xEF, 0x13, 0x13,
- 0x54, 0x3F, 0x30, 0xE0, 0x0B, 0x51, 0x4E, 0x90,
- 0x81, 0xCB, 0xE0, 0x44, 0x08, 0xF0, 0x80, 0x0C,
- 0xE4, 0x90, 0x81, 0x20, 0xF0, 0xA3, 0xF0, 0x7D,
- 0x40, 0xFF, 0x91, 0x26, 0x90, 0x81, 0x1F, 0xE0,
- 0xFD, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
- 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x12, 0xF0,
- 0xED, 0xC4, 0x54, 0x0F, 0x30, 0xE0, 0x07, 0x90,
- 0x81, 0xCB, 0xE0, 0x44, 0x14, 0xF0, 0x90, 0x81,
- 0x1F, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0x30, 0xE0,
- 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x80, 0xF0,
- 0x90, 0x81, 0xCB, 0xE0, 0x90, 0x05, 0x27, 0xF0,
- 0x90, 0x81, 0x22, 0xE0, 0x60, 0x02, 0x81, 0x17,
- 0x7F, 0x01, 0x80, 0x15, 0x90, 0x81, 0xCB, 0x74,
- 0x01, 0xF0, 0x90, 0x05, 0x27, 0xF0, 0x90, 0x81,
- 0x22, 0xE0, 0x64, 0x04, 0x60, 0x02, 0x81, 0x17,
- 0xFF, 0x12, 0x53, 0x0E, 0x81, 0x17, 0x90, 0x81,
- 0x1F, 0xE0, 0xFF, 0x20, 0xE0, 0x02, 0x61, 0xE7,
- 0x90, 0x81, 0xCB, 0x74, 0x31, 0xF0, 0xEF, 0x13,
- 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x0B, 0x51, 0x4E,
- 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x08, 0xF0, 0x80,
- 0x06, 0x7D, 0x40, 0xE4, 0xFF, 0x91, 0x26, 0x90,
- 0x81, 0x1F, 0xE0, 0xFD, 0x13, 0x13, 0x13, 0x54,
- 0x1F, 0x30, 0xE0, 0x07, 0x90, 0x81, 0xCB, 0xE0,
- 0x44, 0x02, 0xF0, 0xED, 0xC4, 0x54, 0x0F, 0x30,
- 0xE0, 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x04,
- 0xF0, 0x90, 0x81, 0xCB, 0xE0, 0x90, 0x05, 0x27,
- 0xF0, 0x90, 0x81, 0x23, 0xE0, 0x64, 0x02, 0x70,
- 0x1D, 0xFD, 0x7F, 0x04, 0x12, 0x47, 0x3D, 0x12,
- 0x51, 0x73, 0xBF, 0x01, 0x09, 0x90, 0x81, 0x29,
- 0xE0, 0xFF, 0x7D, 0x01, 0x80, 0x03, 0xE4, 0xFD,
- 0xFF, 0x12, 0x47, 0x3D, 0x80, 0x41, 0x90, 0x81,
- 0x2A, 0xE0, 0x90, 0x81, 0x23, 0xF0, 0x90, 0x05,
- 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x80, 0x30, 0x90,
- 0x81, 0xCB, 0x74, 0x01, 0xF0, 0x90, 0x05, 0x27,
- 0xF0, 0x90, 0x81, 0x23, 0xE0, 0xB4, 0x02, 0x06,
- 0x7D, 0x01, 0x7F, 0x04, 0x80, 0x0B, 0x90, 0x81,
- 0x23, 0xE0, 0xB4, 0x08, 0x07, 0x7D, 0x01, 0x7F,
- 0x0C, 0x12, 0x47, 0x3D, 0xD1, 0x34, 0x90, 0x81,
- 0x29, 0x12, 0x47, 0x39, 0x12, 0x5A, 0xA7, 0xD0,
- 0xD0, 0x92, 0xAF, 0x22, 0x7D, 0x02, 0x7F, 0x02,
- 0x91, 0x26, 0x7D, 0x01, 0x7F, 0x02, 0x74, 0x3D,
- 0x2F, 0xF8, 0xE6, 0xFE, 0xED, 0xF4, 0x5E, 0xFE,
- 0xF6, 0x74, 0x30, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
- 0x01, 0xF5, 0x83, 0xEE, 0xF0, 0x22, 0xEF, 0x70,
- 0x37, 0x7D, 0x78, 0x7F, 0x02, 0x91, 0x26, 0x7D,
- 0x02, 0x7F, 0x03, 0x91, 0x26, 0x7D, 0xC8, 0x7F,
- 0x02, 0x12, 0x71, 0x8F, 0x90, 0x01, 0x57, 0xE4,
- 0xF0, 0x90, 0x01, 0x3C, 0x74, 0x02, 0xF0, 0x7D,
- 0x01, 0x7F, 0x0C, 0x12, 0x47, 0x3D, 0x90, 0x81,
- 0x24, 0xE0, 0x54, 0xF7, 0xF0, 0x54, 0xEF, 0xF0,
- 0x90, 0x06, 0x0A, 0xE0, 0x54, 0xF8, 0xF0, 0x22,
- 0x90, 0x01, 0x36, 0x74, 0x78, 0xF0, 0xA3, 0x74,
- 0x02, 0xF0, 0x7D, 0x78, 0xFF, 0x51, 0x57, 0x7D,
- 0x02, 0x7F, 0x03, 0x51, 0x57, 0x90, 0x06, 0x0A,
- 0xE0, 0x44, 0x07, 0xF0, 0x90, 0x81, 0x32, 0xA3,
- 0xE0, 0x90, 0x05, 0x58, 0xF0, 0x90, 0x80, 0xDE,
- 0xE0, 0xB4, 0x01, 0x15, 0x90, 0x81, 0x25, 0xE0,
- 0x54, 0xFB, 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0x20,
- 0xE2, 0x0E, 0x7D, 0x01, 0x7F, 0x04, 0x02, 0x47,
- 0x3D, 0x90, 0x81, 0x25, 0xE0, 0x44, 0x04, 0xF0,
- 0x22, 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0,
- 0x08, 0x90, 0x81, 0x23, 0xE0, 0x64, 0x02, 0x60,
- 0x3A, 0x90, 0x81, 0x27, 0xE0, 0x70, 0x04, 0xEF,
- 0x30, 0xE0, 0x0A, 0x90, 0x81, 0x2A, 0xE0, 0x64,
- 0x02, 0x60, 0x28, 0xB1, 0x83, 0x90, 0x81, 0x25,
- 0xE0, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
- 0x14, 0x90, 0x81, 0x2D, 0xE0, 0xFF, 0xA3, 0xE0,
- 0x6F, 0x70, 0x0A, 0xF1, 0xCD, 0x91, 0x1C, 0x90,
- 0x81, 0x2E, 0xE0, 0x14, 0xF0, 0x90, 0x01, 0xE6,
- 0xE0, 0x04, 0xF0, 0x22, 0x90, 0x81, 0x1F, 0xE0,
- 0x30, 0xE0, 0x06, 0x90, 0x81, 0x21, 0x74, 0x01,
- 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x45, 0x90,
- 0x81, 0x25, 0xE0, 0xFF, 0x13, 0x13, 0x13, 0x54,
- 0x1F, 0x30, 0xE0, 0x12, 0x90, 0x01, 0x3B, 0xE0,
- 0x30, 0xE4, 0x0B, 0x91, 0x1C, 0x90, 0x81, 0x2D,
- 0xE0, 0x14, 0x90, 0x05, 0x73, 0xF0, 0x90, 0x82,
- 0x0B, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9,
- 0xC3, 0x90, 0x82, 0x0C, 0xE0, 0x94, 0x80, 0x90,
- 0x82, 0x0B, 0xE0, 0x64, 0x80, 0x94, 0x80, 0x40,
- 0x0B, 0x90, 0x01, 0x98, 0xE0, 0x54, 0xFE, 0xF0,
- 0xE0, 0x44, 0x01, 0xF0, 0x12, 0x75, 0xF8, 0xD1,
- 0xD6, 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0, 0x0C,
- 0xE4, 0xF5, 0x1D, 0xA3, 0xF1, 0xFB, 0x90, 0x01,
- 0x57, 0x74, 0x05, 0xF0, 0x90, 0x01, 0xBE, 0xE0,
- 0x04, 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64,
- 0x01, 0x60, 0x02, 0xC1, 0x23, 0x90, 0x81, 0x27,
- 0xE0, 0x70, 0x02, 0xC1, 0x23, 0x90, 0x81, 0x26,
- 0xE0, 0xC4, 0x54, 0x0F, 0x64, 0x01, 0x70, 0x22,
- 0x90, 0x06, 0xAB, 0xE0, 0x90, 0x81, 0x2E, 0xF0,
- 0x90, 0x06, 0xAA, 0xE0, 0x90, 0x81, 0x2D, 0xF0,
- 0xA3, 0xE0, 0xFF, 0x70, 0x08, 0x90, 0x81, 0x2D,
- 0xE0, 0xFE, 0xFF, 0x80, 0x00, 0x90, 0x81, 0x2E,
- 0xEF, 0xF0, 0x90, 0x81, 0x25, 0xE0, 0x44, 0x04,
- 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90, 0x81,
- 0x32, 0xA3, 0xE0, 0x90, 0x05, 0x58, 0xF0, 0x90,
- 0x01, 0x57, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
- 0x02, 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD,
- 0xF0, 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x26, 0xE0,
- 0xFF, 0xC4, 0x54, 0x0F, 0x24, 0xFD, 0x50, 0x02,
- 0x80, 0x0F, 0x90, 0x81, 0x1F, 0xE0, 0x30, 0xE0,
- 0x05, 0x12, 0x6D, 0xF2, 0x80, 0x03, 0x12, 0x6E,
- 0xC9, 0x90, 0x81, 0x25, 0xE0, 0x13, 0x13, 0x13,
- 0x54, 0x1F, 0x30, 0xE0, 0x0E, 0x90, 0x81, 0x2D,
- 0xE0, 0xFF, 0xA3, 0xE0, 0xB5, 0x07, 0x04, 0xF1,
- 0xCD, 0x91, 0x22, 0x90, 0x81, 0x1F, 0xE0, 0xC3,
- 0x13, 0x20, 0xE0, 0x07, 0x90, 0x81, 0x25, 0xE0,
- 0x44, 0x04, 0xF0, 0x22, 0xD1, 0xAB, 0xEF, 0x70,
- 0x02, 0xD1, 0x3C, 0x22, 0x90, 0x81, 0x27, 0xE0,
- 0x64, 0x01, 0x70, 0x66, 0x90, 0x81, 0x26, 0xE0,
- 0x54, 0x0F, 0x60, 0x51, 0x90, 0x81, 0x2A, 0xE0,
- 0x70, 0x03, 0xFF, 0x31, 0x93, 0x90, 0x81, 0x2A,
- 0xE0, 0x64, 0x0C, 0x60, 0x03, 0x12, 0x66, 0x26,
- 0x90, 0x01, 0x5B, 0xE4, 0xF0, 0x90, 0x01, 0x3C,
- 0x74, 0x04, 0xF0, 0xD1, 0xAB, 0xEF, 0x64, 0x01,
- 0x60, 0x38, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
- 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
- 0xFB, 0xFD, 0x7F, 0x58, 0x7E, 0x01, 0x12, 0x50,
- 0x05, 0x90, 0x01, 0x5B, 0x74, 0x05, 0xF0, 0x90,
- 0x06, 0x92, 0x74, 0x01, 0xF0, 0x90, 0x81, 0x24,
- 0xE0, 0x44, 0x08, 0xF0, 0x22, 0x90, 0x81, 0x2A,
- 0xE0, 0x70, 0x07, 0x7D, 0x01, 0x7F, 0x04, 0x12,
- 0x47, 0x3D, 0x22, 0x90, 0x04, 0x1A, 0xE0, 0xF4,
- 0x60, 0x03, 0x7F, 0x00, 0x22, 0x90, 0x04, 0x1B,
- 0xE0, 0x54, 0x07, 0x64, 0x07, 0x7F, 0x01, 0x60,
- 0x02, 0x7F, 0x00, 0x22, 0x12, 0x50, 0x60, 0x90,
- 0x81, 0x2D, 0xE0, 0x14, 0x90, 0x05, 0x73, 0xF0,
- 0x7D, 0x02, 0x7F, 0x02, 0x51, 0x57, 0x90, 0x81,
- 0x42, 0xE0, 0x30, 0xE0, 0x2D, 0x90, 0x80, 0xDE,
- 0xE0, 0xB4, 0x01, 0x26, 0x90, 0x82, 0x17, 0xE0,
- 0x04, 0xF0, 0xE0, 0xB4, 0x0A, 0x0B, 0x90, 0x81,
- 0x44, 0xE0, 0x04, 0xF0, 0xE4, 0x90, 0x82, 0x17,
- 0xF0, 0x90, 0x81, 0x44, 0xE0, 0xFF, 0x90, 0x81,
- 0x43, 0xE0, 0xB5, 0x07, 0x05, 0xE4, 0xA3, 0xF0,
- 0xF1, 0x0B, 0x22, 0xE4, 0xFF, 0x8F, 0x53, 0x90,
- 0x04, 0x1D, 0xE0, 0x60, 0x19, 0x90, 0x05, 0x22,
- 0xE0, 0xF5, 0x56, 0x74, 0xFF, 0xF0, 0xF1, 0x3A,
- 0xBF, 0x01, 0x03, 0x12, 0x74, 0xFB, 0x90, 0x05,
- 0x22, 0xE5, 0x56, 0xF0, 0x80, 0x03, 0x12, 0x74,
- 0xFB, 0x90, 0x04, 0x1F, 0x74, 0x20, 0xF0, 0x7F,
- 0x01, 0x22, 0xE4, 0x90, 0x82, 0x0F, 0xF0, 0xA3,
- 0xF0, 0x90, 0x05, 0xF8, 0xE0, 0x70, 0x0F, 0xA3,
- 0xE0, 0x70, 0x0B, 0xA3, 0xE0, 0x70, 0x07, 0xA3,
- 0xE0, 0x70, 0x03, 0x7F, 0x01, 0x22, 0xD3, 0x90,
- 0x82, 0x10, 0xE0, 0x94, 0xE8, 0x90, 0x82, 0x0F,
- 0xE0, 0x94, 0x03, 0x40, 0x0A, 0x90, 0x01, 0xC0,
- 0xE0, 0x44, 0x20, 0xF0, 0x7F, 0x00, 0x22, 0x7F,
- 0x32, 0x7E, 0x00, 0x12, 0x32, 0xAA, 0x90, 0x82,
- 0x0F, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9,
- 0x80, 0xBF, 0x74, 0x1F, 0x2D, 0xF5, 0x82, 0xE4,
- 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x54, 0x3F, 0xF0,
- 0xEF, 0x60, 0x1D, 0x74, 0x21, 0x2D, 0xF5, 0x82,
- 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x10,
- 0xF0, 0x74, 0x1F, 0x2D, 0xF5, 0x82, 0xE4, 0x34,
- 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x22,
- 0x74, 0x21, 0x2D, 0xF5, 0x82, 0xE4, 0x34, 0xFC,
- 0xF5, 0x83, 0xE0, 0x54, 0xEF, 0xF0, 0x74, 0x1F,
- 0x2D, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83,
- 0xE0, 0x44, 0x40, 0xF0, 0x22, 0xEF, 0x14, 0x90,
- 0x05, 0x73, 0xF0, 0x90, 0x01, 0x3F, 0x74, 0x10,
- 0xF0, 0xFD, 0x7F, 0x03, 0x74, 0x45, 0x2F, 0xF8,
- 0xE6, 0x4D, 0xFE, 0xF6, 0x74, 0x38, 0x2F, 0xF5,
- 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0xEE, 0xF0,
- 0x22, 0xE0, 0x44, 0x02, 0xF0, 0xE4, 0xF5, 0x1D,
- 0x90, 0x81, 0x39, 0xE0, 0xF5, 0x1E, 0xE4, 0xFB,
- 0xFD, 0x7F, 0x54, 0x7E, 0x01, 0x8E, 0x19, 0x8F,
- 0x1A, 0xE5, 0x1E, 0x54, 0x07, 0xC4, 0x33, 0x54,
- 0xE0, 0x85, 0x19, 0x83, 0x85, 0x1A, 0x82, 0xF0,
- 0xE5, 0x1D, 0x54, 0x07, 0xC4, 0x33, 0x54, 0xE0,
- 0xFF, 0xE5, 0x1E, 0x13, 0x13, 0x13, 0x54, 0x1F,
- 0x4F, 0xA3, 0xF0, 0xEB, 0x54, 0x07, 0xC4, 0x33,
- 0x54, 0xE0, 0xFF, 0xE5, 0x1D, 0x13, 0x13, 0x13,
- 0x54, 0x1F, 0x4F, 0x85, 0x1A, 0x82, 0x85, 0x19,
- 0x83, 0xA3, 0xA3, 0xF0, 0xBD, 0x01, 0x0C, 0x85,
- 0x1A, 0x82, 0x8E, 0x83, 0xA3, 0xA3, 0xA3, 0x74,
- 0x03, 0xF0, 0x22, 0x85, 0x1A, 0x82, 0x85, 0x19,
- 0x83, 0xA3, 0xA3, 0xA3, 0x74, 0x01, 0xF0, 0x22,
- 0xE4, 0x90, 0x81, 0x4D, 0xF0, 0x90, 0x81, 0x27,
- 0xE0, 0x60, 0x58, 0x90, 0x80, 0xDE, 0xE0, 0x64,
- 0x01, 0x70, 0x50, 0x90, 0x81, 0x4D, 0x04, 0xF0,
- 0xE4, 0x90, 0x81, 0x2E, 0xF0, 0x90, 0x81, 0x1F,
- 0xE0, 0x30, 0xE0, 0x15, 0x90, 0x81, 0x23, 0xE0,
- 0xB4, 0x02, 0x05, 0xE4, 0x90, 0x81, 0x4D, 0xF0,
- 0x31, 0x73, 0xEF, 0x70, 0x04, 0x90, 0x81, 0x4D,
- 0xF0, 0x90, 0x81, 0x4D, 0xE0, 0x60, 0x24, 0x90,
- 0x81, 0x2B, 0xE0, 0x44, 0x10, 0xF0, 0xE4, 0xF5,
- 0x1D, 0x90, 0x81, 0x2F, 0x12, 0x4F, 0xFB, 0x90,
- 0x01, 0x57, 0x74, 0x05, 0xF0, 0x90, 0x81, 0x2A,
- 0xE0, 0x20, 0xE2, 0x07, 0x7D, 0x01, 0x7F, 0x04,
- 0x12, 0x47, 0x3D, 0x22, 0xE4, 0x90, 0x81, 0x4C,
- 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x70, 0x02, 0x21,
- 0x72, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01, 0x60,
- 0x02, 0x21, 0x72, 0x90, 0x81, 0x26, 0xE0, 0xFF,
- 0xC4, 0x54, 0x0F, 0x60, 0x22, 0x24, 0xFE, 0x60,
- 0x03, 0x04, 0x70, 0x21, 0x90, 0x81, 0x2E, 0xE0,
- 0x14, 0xF0, 0xE0, 0xFF, 0x60, 0x06, 0x90, 0x81,
- 0x30, 0xE0, 0x60, 0x11, 0xEF, 0x70, 0x08, 0x90,
- 0x81, 0x2D, 0xE0, 0xA3, 0xF0, 0x80, 0x00, 0x90,
- 0x81, 0x4C, 0x74, 0x01, 0xF0, 0x90, 0x81, 0x1F,
- 0xE0, 0x30, 0xE0, 0x15, 0x90, 0x81, 0x23, 0xE0,
- 0xB4, 0x02, 0x05, 0xE4, 0x90, 0x81, 0x4C, 0xF0,
- 0x31, 0x73, 0xEF, 0x70, 0x04, 0x90, 0x81, 0x4C,
- 0xF0, 0x90, 0x81, 0x4C, 0xE0, 0x60, 0x43, 0x90,
- 0x81, 0x2B, 0xE0, 0x44, 0x10, 0xF0, 0x90, 0x81,
- 0x30, 0xE0, 0x60, 0x03, 0xB4, 0x01, 0x09, 0xE4,
- 0xF5, 0x1D, 0x90, 0x81, 0x30, 0xE0, 0x80, 0x0D,
- 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x30, 0xE0, 0x75,
- 0xF0, 0x03, 0xA4, 0x24, 0xFE, 0xFF, 0x90, 0x81,
- 0x2F, 0xE0, 0x2F, 0x12, 0x4F, 0xFC, 0x90, 0x01,
- 0x57, 0x74, 0x05, 0xF0, 0x90, 0x81, 0x2A, 0xE0,
- 0x20, 0xE2, 0x07, 0x7D, 0x01, 0x7F, 0x04, 0x12,
- 0x47, 0x3D, 0x22, 0x90, 0x05, 0x43, 0xE0, 0x7F,
- 0x00, 0x30, 0xE7, 0x02, 0x7F, 0x01, 0x22, 0x90,
- 0x81, 0x27, 0xE0, 0x70, 0x07, 0x90, 0x81, 0x1F,
- 0xE0, 0x30, 0xE0, 0x11, 0x90, 0x81, 0x1F, 0xE0,
- 0x30, 0xE0, 0x07, 0x31, 0x73, 0xBF, 0x01, 0x05,
- 0x41, 0x5B, 0x12, 0x4E, 0x3C, 0x22, 0xD3, 0x10,
- 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0x1E,
- 0xE0, 0xB4, 0x01, 0x04, 0x7F, 0x04, 0x80, 0x0B,
- 0x31, 0x73, 0xBF, 0x01, 0x04, 0x7F, 0x01, 0x80,
- 0x02, 0x7F, 0x02, 0x71, 0x0E, 0xD0, 0xD0, 0x92,
- 0xAF, 0x22, 0x90, 0x81, 0x4B, 0xE0, 0x60, 0x0F,
- 0xE4, 0xF0, 0x90, 0x05, 0x53, 0xE0, 0x44, 0x02,
- 0xF0, 0x90, 0x05, 0xFC, 0xE0, 0x04, 0xF0, 0x90,
- 0x81, 0x1F, 0xE0, 0x30, 0xE0, 0x10, 0xA3, 0x74,
- 0x01, 0xF0, 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0xC3,
- 0x13, 0x30, 0xE0, 0x02, 0x31, 0x9E, 0x11, 0xC4,
- 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0, 0x07, 0x91,
- 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x22, 0x90,
- 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x3D, 0x90,
- 0x81, 0x23, 0xE0, 0x7E, 0x00, 0xB4, 0x02, 0x02,
- 0x7E, 0x01, 0x90, 0x81, 0x22, 0xE0, 0x7D, 0x00,
- 0xB4, 0x04, 0x02, 0x7D, 0x01, 0xED, 0x4E, 0x70,
- 0x23, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x02, 0x21,
- 0x9E, 0x51, 0x45, 0x90, 0x81, 0x23, 0xE0, 0xB4,
- 0x08, 0x06, 0xE4, 0xFD, 0x7F, 0x0C, 0x80, 0x09,
- 0x90, 0x81, 0x23, 0xE0, 0x70, 0x06, 0xFD, 0x7F,
- 0x04, 0x12, 0x47, 0x3D, 0x22, 0x90, 0x81, 0x1E,
- 0xE0, 0xB4, 0x01, 0x0F, 0x90, 0x81, 0x23, 0xE0,
- 0x64, 0x02, 0x60, 0x07, 0x7D, 0x01, 0x7F, 0x02,
- 0x12, 0x47, 0x3D, 0x90, 0x81, 0x27, 0xE0, 0x64,
- 0x02, 0x60, 0x14, 0x90, 0x81, 0x26, 0xE0, 0x54,
- 0x0F, 0x60, 0x0C, 0x12, 0x4E, 0xAB, 0xEF, 0x70,
- 0x06, 0xFD, 0x7F, 0x0C, 0x12, 0x47, 0x3D, 0x22,
- 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x3F,
- 0x90, 0x81, 0x23, 0xE0, 0x7E, 0x00, 0xB4, 0x02,
- 0x02, 0x7E, 0x01, 0x90, 0x81, 0x22, 0xE0, 0x7D,
- 0x00, 0xB4, 0x04, 0x02, 0x7D, 0x01, 0xED, 0x4E,
- 0x70, 0x25, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x02,
- 0x21, 0x9E, 0x12, 0x74, 0xAC, 0x90, 0x81, 0x23,
- 0xE0, 0xB4, 0x0C, 0x06, 0xE4, 0xFD, 0x7F, 0x08,
- 0x80, 0x0A, 0x90, 0x81, 0x23, 0xE0, 0xB4, 0x04,
- 0x06, 0xE4, 0xFD, 0xFF, 0x12, 0x47, 0x3D, 0x22,
- 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90,
- 0x81, 0xCB, 0x12, 0x45, 0x1F, 0x12, 0x1F, 0xA4,
- 0xFF, 0x90, 0x81, 0x1E, 0xF0, 0xBF, 0x01, 0x12,
- 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16, 0x90, 0x00,
- 0x01, 0x12, 0x1F, 0xBD, 0x64, 0x01, 0x60, 0x21,
- 0x80, 0x1D, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16,
- 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x64, 0x01,
- 0x60, 0x0F, 0x90, 0x81, 0x1F, 0xE0, 0x20, 0xE0,
- 0x06, 0xE4, 0xFF, 0x71, 0x0E, 0x80, 0x02, 0x31,
- 0x9E, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
- 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0x22,
- 0xE0, 0x90, 0x82, 0x16, 0xF0, 0x6F, 0x70, 0x02,
- 0x81, 0x04, 0xEF, 0x14, 0x60, 0x3E, 0x14, 0x60,
- 0x62, 0x14, 0x70, 0x02, 0x61, 0xB8, 0x14, 0x70,
- 0x02, 0x61, 0xDF, 0x24, 0x04, 0x60, 0x02, 0x81,
- 0x04, 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x04,
- 0x04, 0x91, 0x41, 0x81, 0x04, 0xEF, 0xB4, 0x02,
- 0x04, 0x91, 0x50, 0x81, 0x04, 0x90, 0x82, 0x16,
- 0xE0, 0xFF, 0xB4, 0x03, 0x04, 0x91, 0x54, 0x81,
- 0x04, 0xEF, 0x64, 0x01, 0x60, 0x02, 0x81, 0x04,
- 0x91, 0x43, 0x81, 0x04, 0x90, 0x82, 0x16, 0xE0,
- 0xFF, 0xB4, 0x04, 0x04, 0x91, 0xF3, 0x81, 0x04,
- 0xEF, 0xB4, 0x02, 0x04, 0x91, 0x58, 0x81, 0x04,
- 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x03, 0x04,
- 0x91, 0xE8, 0x81, 0x04, 0xEF, 0x70, 0x7D, 0x91,
- 0x2B, 0x80, 0x79, 0x90, 0x82, 0x16, 0xE0, 0xB4,
- 0x04, 0x05, 0x12, 0x74, 0x60, 0x80, 0x6D, 0x90,
- 0x82, 0x16, 0xE0, 0xB4, 0x01, 0x04, 0x91, 0x21,
- 0x80, 0x62, 0x90, 0x82, 0x16, 0xE0, 0xB4, 0x03,
- 0x05, 0x12, 0x74, 0x71, 0x80, 0x56, 0x90, 0x82,
- 0x16, 0xE0, 0x70, 0x50, 0x91, 0x1F, 0x80, 0x4C,
- 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x04, 0x05,
- 0x12, 0x74, 0x4C, 0x80, 0x3F, 0xEF, 0xB4, 0x01,
- 0x04, 0x91, 0x34, 0x80, 0x37, 0xEF, 0xB4, 0x02,
- 0x04, 0x91, 0xDF, 0x80, 0x2F, 0x90, 0x82, 0x16,
- 0xE0, 0x70, 0x29, 0x91, 0x32, 0x80, 0x25, 0x90,
- 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x03, 0x05, 0x12,
- 0x74, 0x7B, 0x80, 0x18, 0xEF, 0xB4, 0x01, 0x04,
- 0x91, 0x0B, 0x80, 0x10, 0xEF, 0xB4, 0x02, 0x04,
- 0xB1, 0x06, 0x80, 0x08, 0x90, 0x82, 0x16, 0xE0,
- 0x70, 0x02, 0x91, 0x09, 0xD0, 0xD0, 0x92, 0xAF,
- 0x22, 0x91, 0x2B, 0x90, 0x05, 0x22, 0x74, 0x6F,
- 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
- 0x90, 0x81, 0x22, 0x74, 0x04, 0xF0, 0x22, 0x91,
- 0x2B, 0x12, 0x49, 0xDD, 0x90, 0x81, 0x22, 0x74,
- 0x02, 0xF0, 0x22, 0x90, 0x81, 0x22, 0x74, 0x01,
- 0xF0, 0x22, 0x91, 0x2B, 0x90, 0x05, 0x22, 0x74,
- 0xFF, 0xF0, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0,
- 0x22, 0x91, 0xF3, 0x90, 0x05, 0x27, 0xE0, 0x54,
- 0xBF, 0xF0, 0xE4, 0x90, 0x81, 0x22, 0xF0, 0x22,
- 0x91, 0x58, 0x80, 0xEF, 0x91, 0xE8, 0x80, 0xEB,
- 0x91, 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90,
- 0x81, 0x22, 0x04, 0xF0, 0x22, 0xD3, 0x10, 0xAF,
- 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x01, 0x01, 0xE0,
- 0x44, 0x02, 0xF0, 0x90, 0x01, 0x00, 0x74, 0xFF,
- 0xF0, 0x90, 0x06, 0xB7, 0x74, 0x09, 0xF0, 0x90,
- 0x06, 0xB4, 0x74, 0x86, 0xF0, 0x7F, 0x7C, 0x7E,
- 0x08, 0x12, 0x2D, 0x5C, 0xEC, 0x54, 0x7F, 0xFC,
- 0x90, 0x82, 0x01, 0x12, 0x20, 0xCE, 0x90, 0x82,
- 0x01, 0x12, 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12,
- 0x20, 0xCE, 0x7F, 0x7C, 0x7E, 0x08, 0x12, 0x2E,
- 0xA2, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xDA, 0xCC,
- 0xC0, 0x00, 0xC0, 0x7F, 0x8C, 0x7E, 0x08, 0x12,
- 0x2E, 0xA2, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xDA,
- 0x00, 0xC0, 0x00, 0x14, 0x7F, 0x70, 0x7E, 0x0E,
- 0x12, 0x2E, 0xA2, 0x90, 0x81, 0xF9, 0x12, 0x20,
- 0xDA, 0x00, 0x03, 0x3E, 0x60, 0xE4, 0xFD, 0xFF,
- 0xB1, 0x1C, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x91,
- 0x65, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0, 0x22,
- 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90, 0x81, 0x22,
- 0x04, 0xF0, 0x22, 0x90, 0x05, 0x22, 0xE4, 0xF0,
- 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x90,
- 0x81, 0x22, 0x74, 0x01, 0xF0, 0x22, 0x91, 0x65,
- 0x90, 0x05, 0x22, 0x74, 0x6F, 0xF0, 0x90, 0x05,
- 0x27, 0xE0, 0x54, 0xBF, 0xF0, 0x90, 0x81, 0x22,
- 0x74, 0x04, 0xF0, 0x22, 0xD3, 0x10, 0xAF, 0x01,
- 0xC3, 0xC0, 0xD0, 0xC0, 0x07, 0xC0, 0x05, 0x90,
- 0x81, 0xF9, 0x12, 0x44, 0xD9, 0x90, 0x81, 0xE5,
- 0x12, 0x20, 0xCE, 0xD0, 0x05, 0xD0, 0x07, 0x12,
- 0x60, 0xF5, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x90,
- 0x81, 0xC8, 0x12, 0x45, 0x1F, 0xEF, 0x12, 0x45,
- 0x28, 0x55, 0x71, 0x00, 0x55, 0x7A, 0x01, 0x55,
- 0x83, 0x02, 0x55, 0x8B, 0x03, 0x55, 0x94, 0x04,
- 0x55, 0x9C, 0x20, 0x55, 0xA4, 0x21, 0x55, 0xAD,
- 0x23, 0x55, 0xB5, 0x24, 0x55, 0xBE, 0x25, 0x55,
- 0xC7, 0x26, 0x55, 0xCF, 0xC0, 0x00, 0x00, 0x55,
- 0xD8, 0x90, 0x81, 0xC8, 0x12, 0x45, 0x16, 0x02,
- 0x6A, 0xB0, 0x90, 0x81, 0xC8, 0x12, 0x45, 0x16,
- 0x02, 0x65, 0x81, 0x90, 0x81, 0xC8, 0x12, 0x45,
- 0x16, 0x41, 0xC0, 0x90, 0x81, 0xC8, 0x12, 0x45,
- 0x16, 0x02, 0x75, 0xD8, 0x90, 0x81, 0xC8, 0x12,
- 0x45, 0x16, 0x80, 0x44, 0x90, 0x81, 0xC8, 0x12,
- 0x45, 0x16, 0xC1, 0x4B, 0x90, 0x81, 0xC8, 0x12,
- 0x45, 0x16, 0x02, 0x6A, 0xF8, 0x90, 0x81, 0xC8,
- 0x12, 0x45, 0x16, 0xE1, 0xE1, 0x90, 0x81, 0xC8,
- 0x12, 0x45, 0x16, 0x02, 0x4A, 0x6C, 0x90, 0x81,
- 0xC8, 0x12, 0x45, 0x16, 0x02, 0x6B, 0x3E, 0x90,
- 0x81, 0xC8, 0x12, 0x45, 0x16, 0x80, 0x3E, 0x90,
- 0x81, 0xC8, 0x12, 0x45, 0x16, 0x02, 0x6B, 0x4E,
- 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x01, 0xF0, 0x22,
- 0x12, 0x5A, 0x4B, 0x12, 0x1F, 0xA4, 0xFF, 0x54,
- 0x01, 0xFE, 0x90, 0x81, 0x45, 0xE0, 0x54, 0xFE,
- 0x4E, 0xF0, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x14,
- 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x90, 0x81,
- 0x46, 0xF0, 0x90, 0x00, 0x02, 0x12, 0x1F, 0xBD,
- 0x90, 0x81, 0x47, 0xF0, 0x22, 0x12, 0x1F, 0xA4,
- 0xFF, 0x54, 0x01, 0xFE, 0x90, 0x81, 0x3F, 0xE0,
- 0x54, 0xFE, 0x4E, 0xF0, 0x90, 0x00, 0x01, 0x12,
- 0x1F, 0xBD, 0xFE, 0x90, 0x05, 0x54, 0xE0, 0xC3,
- 0x9E, 0x90, 0x81, 0x40, 0xF0, 0xEF, 0x20, 0xE0,
- 0x07, 0x91, 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0,
- 0x90, 0x81, 0x3F, 0xE0, 0x54, 0x01, 0x90, 0x01,
- 0xBC, 0xF0, 0x90, 0x81, 0x40, 0xE0, 0x90, 0x01,
- 0xBD, 0xF0, 0x22, 0x12, 0x1F, 0xA4, 0xFF, 0x54,
- 0x7F, 0x90, 0x81, 0x27, 0xF0, 0xEF, 0xC4, 0x13,
- 0x13, 0x13, 0x54, 0x01, 0xA3, 0xF0, 0x90, 0x00,
- 0x01, 0x12, 0x1F, 0xBD, 0xFF, 0x54, 0xF0, 0xC4,
- 0x54, 0x0F, 0xFE, 0x90, 0x81, 0x26, 0xE0, 0x54,
- 0xF0, 0x4E, 0xF0, 0x90, 0x00, 0x03, 0x12, 0x1F,
- 0xBD, 0x54, 0x01, 0x25, 0xE0, 0xFE, 0x90, 0x81,
- 0x24, 0xE0, 0x54, 0xFD, 0x4E, 0xF0, 0xEF, 0x54,
- 0x0F, 0xC4, 0x54, 0xF0, 0xFF, 0x90, 0x81, 0x26,
- 0xE0, 0x54, 0x0F, 0x4F, 0xF0, 0x90, 0x00, 0x04,
- 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x29, 0xF0, 0xD1,
- 0xC6, 0x90, 0x01, 0xB9, 0x74, 0x01, 0xF0, 0x90,
- 0x01, 0xB8, 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x90,
- 0x01, 0xBA, 0xF0, 0x90, 0x81, 0x29, 0xE0, 0x90,
- 0x01, 0xBB, 0xF0, 0x90, 0x81, 0x26, 0xE0, 0x54,
- 0x0F, 0x90, 0x01, 0xBE, 0xF0, 0x22, 0x90, 0x81,
- 0xCB, 0x12, 0x45, 0x1F, 0x12, 0x72, 0xB3, 0x90,
- 0x81, 0x27, 0xE0, 0xFF, 0x12, 0x4C, 0x3E, 0x90,
- 0x81, 0x27, 0xE0, 0x60, 0x19, 0x90, 0x81, 0xCB,
- 0x12, 0x45, 0x16, 0x90, 0x00, 0x01, 0x12, 0x1F,
- 0xBD, 0x54, 0x0F, 0xFF, 0x90, 0x00, 0x02, 0x12,
- 0x1F, 0xBD, 0xFD, 0x12, 0x72, 0xC4, 0x22, 0xC0,
- 0xE0, 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0,
- 0xD0, 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01,
- 0xC0, 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05,
- 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74,
- 0xF7, 0xF0, 0x74, 0x56, 0xA3, 0xF0, 0x12, 0x6C,
- 0xA5, 0xE5, 0x49, 0x30, 0xE1, 0x03, 0x12, 0x6F,
- 0x79, 0xE5, 0x49, 0x30, 0xE2, 0x02, 0xF1, 0xA5,
- 0xE5, 0x49, 0x30, 0xE3, 0x03, 0x12, 0x6F, 0x8D,
- 0xE5, 0x4A, 0x30, 0xE0, 0x03, 0x12, 0x6F, 0xC9,
- 0xE5, 0x4A, 0x30, 0xE4, 0x03, 0x12, 0x70, 0x22,
- 0xE5, 0x4B, 0x30, 0xE1, 0x02, 0x51, 0x78, 0xE5,
- 0x4B, 0x30, 0xE0, 0x02, 0x31, 0xFF, 0xE5, 0x4B,
- 0x30, 0xE3, 0x02, 0xF1, 0xE0, 0xE5, 0x4C, 0x30,
- 0xE1, 0x05, 0x7F, 0x03, 0x12, 0x44, 0x27, 0xE5,
- 0x4C, 0x30, 0xE4, 0x03, 0x12, 0x4E, 0xC4, 0xE5,
- 0x4C, 0x30, 0xE5, 0x03, 0x12, 0x70, 0x38, 0xE5,
- 0x4C, 0x30, 0xE6, 0x03, 0x12, 0x70, 0xCE, 0x74,
- 0xF7, 0x04, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x56,
- 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06, 0xD0, 0x05,
- 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01,
- 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82, 0xD0, 0x83,
- 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90, 0x81, 0x27,
- 0xE0, 0x60, 0x34, 0x90, 0x06, 0x92, 0xE0, 0x30,
- 0xE0, 0x23, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
- 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
- 0xFB, 0xFD, 0x7F, 0x58, 0x7E, 0x01, 0x11, 0x05,
- 0x90, 0x01, 0x5B, 0x74, 0x05, 0xF0, 0x90, 0x06,
- 0x92, 0x74, 0x01, 0xF0, 0x22, 0x90, 0x81, 0x24,
- 0xE0, 0x54, 0xF7, 0xF0, 0x12, 0x47, 0x2A, 0x22,
- 0x22, 0x12, 0x1F, 0xA4, 0x90, 0x81, 0x31, 0xF0,
- 0x22, 0x90, 0x01, 0xC8, 0xE4, 0xF0, 0xA3, 0xF0,
- 0xA3, 0xF0, 0x7B, 0x01, 0x7A, 0x81, 0x79, 0x51,
- 0x7F, 0xFF, 0xFE, 0x12, 0x2B, 0x27, 0xBF, 0x01,
- 0x09, 0x90, 0x81, 0x51, 0xE0, 0x64, 0x03, 0x60,
- 0x03, 0x22, 0x01, 0xAB, 0xE4, 0x90, 0x81, 0x56,
- 0xF0, 0x90, 0x81, 0x56, 0xE0, 0xFF, 0xC3, 0x94,
- 0x02, 0x40, 0x02, 0x01, 0xE6, 0xC3, 0x74, 0xFE,
- 0x9F, 0xFF, 0xE4, 0x94, 0x00, 0xFE, 0x7B, 0x01,
- 0x7A, 0x81, 0x79, 0x52, 0x12, 0x2B, 0x27, 0xEF,
- 0x64, 0x01, 0x70, 0x77, 0x90, 0x81, 0x52, 0xE0,
- 0xFF, 0x54, 0xC0, 0xFE, 0x60, 0x05, 0xEF, 0x54,
- 0x0C, 0x70, 0x16, 0x90, 0x81, 0x52, 0xE0, 0xFF,
- 0x54, 0x30, 0x60, 0x67, 0xEF, 0x54, 0x03, 0x60,
- 0x62, 0x90, 0x81, 0x53, 0x74, 0x01, 0xF0, 0x80,
- 0x05, 0xE4, 0x90, 0x81, 0x53, 0xF0, 0x90, 0x81,
- 0x53, 0xE0, 0x90, 0x81, 0x52, 0x70, 0x16, 0xE0,
- 0xFF, 0xEE, 0x13, 0x13, 0x54, 0x3F, 0x90, 0x81,
- 0x54, 0xF0, 0xEF, 0x54, 0x0C, 0x13, 0x13, 0x54,
- 0x3F, 0xA3, 0xF0, 0x80, 0x0D, 0xE0, 0xFE, 0x54,
- 0x30, 0x90, 0x81, 0x54, 0xF0, 0xEE, 0x54, 0x03,
- 0xA3, 0xF0, 0x90, 0x81, 0x54, 0xE0, 0x64, 0x30,
- 0x70, 0x54, 0xA3, 0xE0, 0x64, 0x02, 0x70, 0x4E,
- 0x90, 0x00, 0xF5, 0xE0, 0x54, 0x40, 0x90, 0x81,
- 0x57, 0xF0, 0xE0, 0x70, 0x41, 0xA3, 0x74, 0x02,
- 0xF0, 0x80, 0x10, 0x90, 0x81, 0x58, 0x74, 0x01,
- 0xF0, 0x80, 0x08, 0x90, 0x81, 0x56, 0xE0, 0x04,
- 0xF0, 0x01, 0x11, 0x90, 0x01, 0xC4, 0x74, 0xE9,
- 0xF0, 0x74, 0x57, 0xA3, 0xF0, 0x90, 0x81, 0x58,
- 0xE0, 0x90, 0x01, 0xC8, 0xF0, 0x90, 0x81, 0x52,
- 0xE0, 0x90, 0x01, 0xC9, 0xF0, 0x90, 0x81, 0x53,
- 0xE0, 0x90, 0x01, 0xCA, 0xF0, 0xE4, 0xFD, 0x7F,
- 0x1F, 0x12, 0x32, 0x1E, 0x80, 0xD5, 0x22, 0x90,
- 0x00, 0xF7, 0xE0, 0x20, 0xE7, 0x09, 0xE0, 0x7F,
- 0x01, 0x20, 0xE6, 0x0C, 0x7F, 0x02, 0x22, 0x90,
- 0x00, 0xF7, 0xE0, 0x30, 0xE6, 0x02, 0x7F, 0x03,
- 0x22, 0x11, 0xE7, 0x90, 0x80, 0x3C, 0xEF, 0xF0,
- 0x31, 0x13, 0x90, 0x01, 0x64, 0x74, 0x01, 0xF0,
- 0x02, 0x2D, 0xA7, 0x31, 0x81, 0x31, 0xB1, 0x31,
- 0x40, 0x31, 0x5F, 0xE4, 0xF5, 0x35, 0xF5, 0x36,
- 0xF5, 0x37, 0xF5, 0x38, 0xAD, 0x35, 0x7F, 0x50,
- 0x12, 0x32, 0x1E, 0xAD, 0x36, 0x7F, 0x51, 0x12,
- 0x32, 0x1E, 0xAD, 0x37, 0x7F, 0x52, 0x12, 0x32,
- 0x1E, 0xAD, 0x38, 0x7F, 0x53, 0x02, 0x32, 0x1E,
- 0x75, 0x3D, 0x10, 0xE4, 0xF5, 0x3E, 0x75, 0x3F,
- 0x07, 0x75, 0x40, 0x02, 0x90, 0x01, 0x30, 0xE5,
- 0x3D, 0xF0, 0xA3, 0xE5, 0x3E, 0xF0, 0xA3, 0xE5,
- 0x3F, 0xF0, 0xA3, 0xE5, 0x40, 0xF0, 0x22, 0x75,
- 0x45, 0x0E, 0x75, 0x46, 0x01, 0x43, 0x46, 0x10,
- 0x75, 0x47, 0x03, 0x75, 0x48, 0x62, 0x90, 0x01,
- 0x38, 0xE5, 0x45, 0xF0, 0xA3, 0xE5, 0x46, 0xF0,
- 0xA3, 0xE5, 0x47, 0xF0, 0xA3, 0xE5, 0x48, 0xF0,
- 0x22, 0x90, 0x01, 0x30, 0xE4, 0xF0, 0xA3, 0xF0,
- 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x38, 0xF0,
- 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xFD, 0x7F,
- 0x50, 0x12, 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x51,
- 0x12, 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x52, 0x12,
- 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x53, 0x02, 0x32,
- 0x1E, 0x90, 0x01, 0x34, 0x74, 0xFF, 0xF0, 0xA3,
- 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x3C,
- 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xFD,
- 0x7F, 0x54, 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F,
- 0x55, 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F, 0x56,
- 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F, 0x57, 0x02,
- 0x32, 0x1E, 0x90, 0x00, 0x80, 0xE0, 0x44, 0x80,
- 0xFD, 0x7F, 0x80, 0x12, 0x32, 0x1E, 0x90, 0xFD,
- 0x00, 0xE0, 0x54, 0xBF, 0xF0, 0x12, 0x57, 0xE9,
- 0x51, 0x77, 0x12, 0x32, 0x77, 0x51, 0xC9, 0x51,
- 0x5E, 0x7F, 0x01, 0x12, 0x43, 0x15, 0x90, 0x81,
- 0x41, 0x74, 0x02, 0xF0, 0xFF, 0x12, 0x43, 0x15,
- 0x90, 0x81, 0x41, 0xE0, 0x04, 0xF0, 0x7F, 0x03,
- 0x12, 0x43, 0x15, 0x90, 0x81, 0x41, 0xE0, 0x04,
- 0xF0, 0x31, 0x01, 0x51, 0x3F, 0x90, 0x00, 0x80,
- 0xE0, 0x44, 0x40, 0xFD, 0x7F, 0x80, 0x12, 0x32,
- 0x1E, 0x75, 0x20, 0xFF, 0x51, 0x68, 0x51, 0xF9,
- 0x51, 0x7F, 0xE4, 0xFF, 0x02, 0x43, 0x9E, 0x51,
- 0x62, 0x51, 0x6F, 0x51, 0xA7, 0x71, 0x4F, 0x51,
- 0x8A, 0x51, 0x95, 0x90, 0x81, 0x45, 0xE0, 0x54,
- 0xFE, 0xF0, 0xA3, 0x74, 0x03, 0xF0, 0xA3, 0xF0,
- 0xE4, 0xA3, 0xF0, 0xA3, 0xF0, 0x22, 0xE4, 0xF5,
- 0x4D, 0x22, 0xE4, 0x90, 0x80, 0xDE, 0xF0, 0x22,
- 0x75, 0xE8, 0x03, 0x75, 0xA8, 0x84, 0x22, 0xE4,
- 0x90, 0x80, 0xD8, 0xF0, 0xA3, 0xF0, 0x22, 0x90,
- 0x01, 0x94, 0xE0, 0x44, 0x01, 0xF0, 0x22, 0x90,
- 0x01, 0xE4, 0x74, 0x0B, 0xF0, 0xA3, 0x74, 0x01,
- 0xF0, 0x22, 0x90, 0x81, 0x3F, 0xE0, 0x54, 0xFE,
- 0xF0, 0xE4, 0xA3, 0xF0, 0x22, 0x90, 0x81, 0x42,
- 0xE0, 0x54, 0xFE, 0xF0, 0x54, 0x7F, 0xF0, 0xA3,
- 0x74, 0x0A, 0xF0, 0xE4, 0xA3, 0xF0, 0x22, 0x90,
- 0x81, 0x1F, 0xE0, 0x54, 0xFE, 0xF0, 0x54, 0xFD,
- 0xF0, 0x54, 0xFB, 0xF0, 0x54, 0xF7, 0xF0, 0x54,
- 0xEF, 0xF0, 0x54, 0xDF, 0xF0, 0xE4, 0xA3, 0xF0,
- 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0x74, 0x0C, 0xF0,
- 0x22, 0x90, 0x01, 0x01, 0xE0, 0x44, 0x04, 0xF0,
- 0x90, 0x01, 0x9C, 0x74, 0x7E, 0xF0, 0xA3, 0x74,
- 0x92, 0xF0, 0xA3, 0x74, 0xA0, 0xF0, 0xA3, 0x74,
- 0x24, 0xF0, 0x90, 0x01, 0x9B, 0x74, 0x49, 0xF0,
- 0x90, 0x01, 0x9A, 0x74, 0xE0, 0xF0, 0x90, 0x01,
- 0x99, 0xE4, 0xF0, 0x90, 0x01, 0x98, 0x04, 0xF0,
- 0x22, 0xE4, 0x90, 0x81, 0x51, 0xF0, 0xA3, 0xF0,
- 0x90, 0x01, 0x98, 0xE0, 0x7F, 0x00, 0x30, 0xE4,
- 0x02, 0x7F, 0x01, 0xEF, 0x64, 0x01, 0x60, 0x3E,
- 0xC3, 0x90, 0x81, 0x52, 0xE0, 0x94, 0x88, 0x90,
- 0x81, 0x51, 0xE0, 0x94, 0x13, 0x40, 0x08, 0x90,
- 0x01, 0xC1, 0xE0, 0x44, 0x10, 0xF0, 0x22, 0x90,
- 0x81, 0x51, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44,
- 0xA9, 0x7F, 0x14, 0x7E, 0x00, 0x12, 0x32, 0xAA,
- 0xD3, 0x90, 0x81, 0x52, 0xE0, 0x94, 0x32, 0x90,
- 0x81, 0x51, 0xE0, 0x94, 0x00, 0x40, 0xB9, 0x90,
- 0x01, 0xC6, 0xE0, 0x30, 0xE3, 0xB2, 0x22, 0xE4,
- 0x90, 0x81, 0x27, 0xF0, 0xA3, 0xF0, 0x90, 0x81,
- 0x26, 0xE0, 0x54, 0x0F, 0xF0, 0x54, 0xF0, 0xF0,
- 0x90, 0x81, 0x24, 0xE0, 0x54, 0xFD, 0xF0, 0x54,
- 0xF7, 0xF0, 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x2D,
- 0x74, 0x01, 0xF0, 0xA3, 0xF0, 0x90, 0x81, 0x24,
- 0xE0, 0x54, 0xFB, 0xF0, 0xA3, 0xE0, 0x54, 0xFB,
- 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90, 0x81,
- 0x2F, 0x74, 0x07, 0xF0, 0x90, 0x81, 0x32, 0xE4,
- 0xF0, 0xA3, 0x74, 0x02, 0xF0, 0xE4, 0x90, 0x81,
- 0x2B, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x54, 0xFE,
- 0xF0, 0x90, 0x81, 0x29, 0x74, 0x0C, 0xF0, 0x90,
- 0x81, 0x24, 0xE0, 0x54, 0xDF, 0xF0, 0x90, 0x81,
- 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x24, 0xE0,
- 0x54, 0xBF, 0xF0, 0x54, 0x7F, 0xF0, 0xA3, 0xE0,
- 0x54, 0xFE, 0xF0, 0x54, 0xFD, 0xF0, 0x54, 0xF7,
- 0xF0, 0x90, 0x81, 0x34, 0x12, 0x20, 0xDA, 0x00,
- 0x00, 0x00, 0x00, 0x90, 0x80, 0x3C, 0xE0, 0xB4,
- 0x01, 0x08, 0x90, 0x81, 0x31, 0x74, 0x99, 0xF0,
- 0x80, 0x12, 0x90, 0x80, 0x3C, 0xE0, 0x90, 0x81,
- 0x31, 0xB4, 0x03, 0x05, 0x74, 0x90, 0xF0, 0x80,
- 0x03, 0x74, 0x40, 0xF0, 0x90, 0x81, 0x38, 0x74,
- 0x01, 0xF0, 0xA3, 0x74, 0x05, 0xF0, 0xA3, 0xE0,
- 0x54, 0x01, 0x44, 0x28, 0xF0, 0xA3, 0x74, 0x05,
- 0xF0, 0xE4, 0xA3, 0xF0, 0xA3, 0xE0, 0x54, 0xFD,
- 0xF0, 0x54, 0xFB, 0xF0, 0x54, 0xF7, 0xF0, 0x54,
- 0xEF, 0xF0, 0x54, 0xDF, 0xF0, 0x54, 0xBF, 0xF0,
- 0xE4, 0xA3, 0xF0, 0x22, 0xE4, 0x90, 0x81, 0x59,
- 0xF0, 0x90, 0x81, 0x59, 0xE0, 0x64, 0x01, 0xF0,
- 0x24, 0x24, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x5C,
- 0xA3, 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0xFF, 0x90,
- 0x81, 0x29, 0xE0, 0x6F, 0x60, 0x03, 0x12, 0x47,
- 0x2A, 0xD1, 0x08, 0xBF, 0x01, 0x02, 0x91, 0x5F,
- 0xB1, 0xF2, 0x12, 0x32, 0x9E, 0xBF, 0x01, 0x02,
- 0xB1, 0x67, 0x12, 0x42, 0x4D, 0x80, 0xCA, 0xD3,
- 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81,
- 0x24, 0xE0, 0x30, 0xE0, 0x24, 0x90, 0x81, 0x1F,
- 0xE0, 0xFF, 0x30, 0xE0, 0x1A, 0xC3, 0x13, 0x30,
- 0xE0, 0x07, 0xB1, 0xFB, 0xBF, 0x01, 0x12, 0x80,
- 0x0A, 0x90, 0x81, 0x23, 0xE0, 0xFF, 0x60, 0x03,
- 0xB4, 0x08, 0x06, 0x91, 0x96, 0x80, 0x02, 0x91,
- 0xA6, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
- 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0xB1, 0x22, 0x91,
- 0xBA, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x90, 0x81,
- 0x2A, 0xE0, 0x70, 0x0D, 0xD1, 0x2F, 0xBF, 0x01,
- 0x08, 0x91, 0x96, 0x90, 0x01, 0xE5, 0xE0, 0x04,
- 0xF0, 0x22, 0xB1, 0xF3, 0x90, 0x00, 0x08, 0xE0,
- 0x54, 0xEF, 0xFD, 0x7F, 0x08, 0x12, 0x32, 0x1E,
- 0xE4, 0xFF, 0x8F, 0x50, 0xE4, 0x90, 0x81, 0x5A,
- 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x09, 0xE0, 0x7F,
- 0x00, 0x30, 0xE7, 0x02, 0x7F, 0x01, 0xEF, 0x65,
- 0x50, 0x60, 0x3E, 0xC3, 0x90, 0x81, 0x5B, 0xE0,
- 0x94, 0x88, 0x90, 0x81, 0x5A, 0xE0, 0x94, 0x13,
- 0x40, 0x08, 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x10,
- 0xF0, 0x22, 0x90, 0x81, 0x5A, 0xE4, 0x75, 0xF0,
- 0x01, 0x12, 0x44, 0xA9, 0x7F, 0x14, 0x7E, 0x00,
- 0x12, 0x32, 0xAA, 0xD3, 0x90, 0x81, 0x5B, 0xE0,
- 0x94, 0x32, 0x90, 0x81, 0x5A, 0xE0, 0x94, 0x00,
- 0x40, 0xB9, 0x90, 0x01, 0xC6, 0xE0, 0x30, 0xE0,
- 0xB2, 0x22, 0x90, 0x81, 0x31, 0xE0, 0xFD, 0x7F,
- 0x93, 0x12, 0x32, 0x1E, 0x90, 0x81, 0x28, 0xE0,
- 0x60, 0x12, 0x90, 0x01, 0x2F, 0xE0, 0x30, 0xE7,
- 0x05, 0x74, 0x10, 0xF0, 0x80, 0x06, 0x90, 0x01,
- 0x2F, 0x74, 0x90, 0xF0, 0x90, 0x00, 0x08, 0xE0,
- 0x44, 0x10, 0xFD, 0x7F, 0x08, 0x12, 0x32, 0x1E,
- 0x7F, 0x01, 0x91, 0xCA, 0x90, 0x00, 0x90, 0xE0,
- 0x44, 0x01, 0xFD, 0x7F, 0x90, 0x12, 0x32, 0x1E,
- 0x7F, 0x14, 0x7E, 0x00, 0x02, 0x32, 0xAA, 0xD3,
- 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x12, 0x2D,
- 0xA7, 0xE4, 0xF5, 0x52, 0x12, 0x32, 0x9E, 0xEF,
- 0x60, 0x73, 0x63, 0x52, 0x01, 0xE5, 0x52, 0x24,
- 0x67, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x5D, 0xA3,
- 0xF0, 0x90, 0x00, 0x88, 0xE0, 0xF5, 0x50, 0xF5,
- 0x51, 0x54, 0x0F, 0x60, 0xDF, 0xE5, 0x50, 0x30,
- 0xE0, 0x0B, 0x20, 0xE4, 0x03, 0x12, 0x29, 0xC5,
- 0x53, 0x51, 0xEE, 0x80, 0x3F, 0xE5, 0x50, 0x30,
- 0xE1, 0x16, 0x20, 0xE5, 0x0E, 0x12, 0x11, 0xBD,
- 0xEF, 0x70, 0x03, 0x43, 0x51, 0x20, 0x90, 0x01,
- 0x06, 0xE4, 0xF0, 0x53, 0x51, 0xFD, 0x80, 0x24,
- 0xE5, 0x50, 0x30, 0xE2, 0x0B, 0x20, 0xE6, 0x03,
- 0x12, 0x67, 0x06, 0x53, 0x51, 0xFB, 0x80, 0x14,
- 0xE5, 0x50, 0x30, 0xE3, 0x0F, 0x20, 0xE7, 0x09,
- 0x12, 0x61, 0x6E, 0xEF, 0x70, 0x03, 0x43, 0x51,
- 0x80, 0x53, 0x51, 0xF7, 0xAD, 0x51, 0x7F, 0x88,
- 0x12, 0x32, 0x1E, 0x80, 0x87, 0xD0, 0xD0, 0x92,
- 0xAF, 0x22, 0x22, 0x90, 0x00, 0x90, 0xE0, 0x20,
- 0xE0, 0xF9, 0x22, 0x90, 0x81, 0x22, 0xE0, 0x64,
- 0x02, 0x7F, 0x01, 0x60, 0x02, 0x7F, 0x00, 0x22,
- 0x7F, 0x02, 0x90, 0x81, 0x41, 0xE0, 0xFE, 0xEF,
- 0xC3, 0x9E, 0x50, 0x18, 0xEF, 0x25, 0xE0, 0x24,
- 0x81, 0xF8, 0xE6, 0x30, 0xE4, 0x0B, 0x90, 0x01,
- 0xB8, 0x74, 0x08, 0xF0, 0xA3, 0xF0, 0x7F, 0x00,
- 0x22, 0x0F, 0x80, 0xDE, 0x7F, 0x01, 0x22, 0x90,
- 0x02, 0x87, 0xE0, 0x60, 0x08, 0x90, 0x01, 0xB8,
- 0x74, 0x01, 0xF0, 0x80, 0x17, 0x90, 0x02, 0x86,
- 0xE0, 0x20, 0xE1, 0x08, 0x90, 0x01, 0xB8, 0x74,
- 0x04, 0xF0, 0x80, 0x08, 0x90, 0x01, 0xB8, 0xE4,
- 0xF0, 0x7F, 0x01, 0x22, 0x90, 0x01, 0xB9, 0x74,
- 0x08, 0xF0, 0x7F, 0x00, 0x22, 0xE4, 0xFB, 0xFA,
- 0xFD, 0x7F, 0x01, 0x12, 0x44, 0x4E, 0x90, 0x81,
- 0xBD, 0xEF, 0xF0, 0x60, 0xF0, 0xD1, 0x71, 0x80,
- 0xEC, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
- 0x90, 0x01, 0xCC, 0xE0, 0x54, 0x0F, 0x90, 0x81,
- 0xBE, 0xF0, 0x90, 0x81, 0xBE, 0xE0, 0xFD, 0x70,
- 0x02, 0xE1, 0x9C, 0x90, 0x82, 0x09, 0xE0, 0xFF,
- 0x74, 0x01, 0x7E, 0x00, 0xA8, 0x07, 0x08, 0x80,
- 0x05, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
- 0xFF, 0xEF, 0x5D, 0x70, 0x02, 0xE1, 0x95, 0x90,
- 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01,
- 0xD0, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xBF,
- 0xF0, 0x75, 0x13, 0x01, 0x75, 0x14, 0x81, 0x75,
- 0x15, 0xBF, 0x75, 0x16, 0x01, 0x7B, 0x01, 0x7A,
- 0x81, 0x79, 0xC0, 0x12, 0x2B, 0xED, 0x90, 0x82,
- 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01, 0xD1,
- 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xC1, 0xF0,
- 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90,
- 0x01, 0xD2, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81,
- 0xC2, 0xF0, 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0,
- 0x04, 0x90, 0x01, 0xD3, 0x12, 0x45, 0x0A, 0xE0,
- 0x90, 0x81, 0xC3, 0xF0, 0x90, 0x82, 0x09, 0xE0,
- 0x75, 0xF0, 0x04, 0x90, 0x01, 0xF0, 0x12, 0x45,
- 0x0A, 0xE0, 0x90, 0x81, 0xC4, 0xF0, 0x90, 0x82,
- 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01, 0xF1,
- 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xC5, 0xF0,
- 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90,
- 0x01, 0xF2, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81,
- 0xC6, 0xF0, 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0,
- 0x04, 0x90, 0x01, 0xF3, 0x12, 0x45, 0x0A, 0xE0,
- 0x90, 0x81, 0xC7, 0xF0, 0x90, 0x81, 0xBE, 0xE0,
- 0xFF, 0x90, 0x82, 0x09, 0xE0, 0xFE, 0x74, 0x01,
- 0xA8, 0x06, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
- 0xFC, 0xF4, 0x5F, 0x90, 0x81, 0xBE, 0xF0, 0x90,
- 0x82, 0x09, 0xE0, 0xFF, 0x74, 0x01, 0xA8, 0x07,
- 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC, 0x90,
- 0x01, 0xCC, 0xF0, 0x90, 0x81, 0xC0, 0xE0, 0xFF,
- 0x7B, 0x01, 0x7A, 0x81, 0x79, 0xC1, 0x12, 0x55,
- 0x3F, 0x90, 0x82, 0x09, 0xE0, 0x04, 0xF0, 0xE0,
- 0x54, 0x03, 0xF0, 0xC1, 0x82, 0x90, 0x01, 0xC0,
- 0xE0, 0x44, 0x02, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
- 0x22, 0xE4, 0xFB, 0xFA, 0xFD, 0x7F, 0x01, 0x12,
- 0x44, 0x4E, 0x90, 0x81, 0xD0, 0xEF, 0xF0, 0x60,
- 0xF0, 0x12, 0x6C, 0x19, 0x80, 0xEB, 0x90, 0x81,
- 0xD4, 0xEF, 0xF0, 0xA3, 0xED, 0xF0, 0xA3, 0x12,
- 0x20, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x90,
- 0x81, 0xE2, 0xF0, 0x7F, 0x24, 0x7E, 0x08, 0x12,
- 0x2D, 0x5C, 0x90, 0x81, 0xDA, 0x12, 0x20, 0xCE,
- 0x90, 0x81, 0xD4, 0xE0, 0xFB, 0x70, 0x08, 0x90,
- 0x81, 0xDA, 0x12, 0x44, 0xD9, 0x80, 0x16, 0xEB,
- 0x75, 0xF0, 0x08, 0xA4, 0x24, 0x62, 0xF5, 0x82,
- 0xE4, 0x34, 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3,
- 0xE0, 0xFF, 0x12, 0x2D, 0x5C, 0x90, 0x81, 0xDE,
- 0x12, 0x20, 0xCE, 0x90, 0x81, 0xD5, 0xE0, 0xFF,
- 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x17, 0x12, 0x20,
- 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
- 0x07, 0x90, 0x81, 0xDE, 0x12, 0x44, 0xD9, 0xED,
- 0x54, 0x7F, 0xFD, 0xEC, 0x54, 0x80, 0xFC, 0x12,
- 0x44, 0xCC, 0xEC, 0x44, 0x80, 0xFC, 0x90, 0x81,
- 0xDE, 0x12, 0x20, 0xCE, 0x90, 0x81, 0xDA, 0x12,
- 0x44, 0xD9, 0xEC, 0x54, 0x7F, 0xFC, 0x90, 0x85,
- 0xBB, 0x12, 0x20, 0xCE, 0x7F, 0x24, 0x7E, 0x08,
- 0x12, 0x2E, 0xA2, 0x90, 0x81, 0xD4, 0xE0, 0x75,
- 0xF0, 0x08, 0xA4, 0x24, 0x62, 0xF5, 0x82, 0xE4,
- 0x34, 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0,
- 0xFF, 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x81, 0xDE,
- 0x12, 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12, 0x20,
- 0xCE, 0xD0, 0x07, 0xD0, 0x06, 0x12, 0x2E, 0xA2,
- 0x90, 0x81, 0xDA, 0x12, 0x44, 0xD9, 0xEC, 0x44,
- 0x80, 0xFC, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xCE,
- 0x7F, 0x24, 0x7E, 0x08, 0x12, 0x2E, 0xA2, 0x90,
- 0x81, 0xD4, 0xE0, 0x70, 0x04, 0x7F, 0x20, 0x80,
- 0x09, 0x90, 0x81, 0xD4, 0xE0, 0xB4, 0x01, 0x16,
- 0x7F, 0x28, 0x7E, 0x08, 0x12, 0x2D, 0x5C, 0x78,
- 0x08, 0x12, 0x20, 0xA8, 0xEF, 0x54, 0x01, 0xFF,
- 0xE4, 0x90, 0x81, 0xE2, 0xEF, 0xF0, 0x90, 0x81,
- 0xE2, 0xE0, 0x90, 0x81, 0xD4, 0x60, 0x0E, 0xE0,
- 0x75, 0xF0, 0x08, 0xA4, 0x24, 0x66, 0xF5, 0x82,
- 0xE4, 0x34, 0x87, 0x80, 0x0C, 0xE0, 0x75, 0xF0,
- 0x08, 0xA4, 0x24, 0x64, 0xF5, 0x82, 0xE4, 0x34,
- 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
- 0x12, 0x2D, 0x5C, 0xED, 0x54, 0x0F, 0xFD, 0xE4,
- 0xFC, 0x90, 0x81, 0xD6, 0x12, 0x20, 0xCE, 0x90,
- 0x81, 0xD6, 0x02, 0x44, 0xD9, 0x90, 0x81, 0xE3,
- 0xEF, 0xF0, 0xAB, 0x05, 0x90, 0x81, 0xE9, 0x12,
- 0x20, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xAF, 0x03,
- 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x14, 0x12, 0x20,
- 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
- 0x07, 0x90, 0x81, 0xE5, 0x12, 0x44, 0xD9, 0xED,
- 0x54, 0x0F, 0xFD, 0xE4, 0xFC, 0x12, 0x44, 0xCC,
- 0xEC, 0x54, 0x0F, 0xFC, 0x90, 0x81, 0xE9, 0x12,
- 0x20, 0xCE, 0x90, 0x81, 0xE3, 0xE0, 0x75, 0xF0,
- 0x08, 0xA4, 0x24, 0x60, 0xF5, 0x82, 0xE4, 0x34,
- 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
- 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x81, 0xE9, 0x12,
- 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xCE,
- 0xD0, 0x07, 0xD0, 0x06, 0x02, 0x2E, 0xA2, 0xD3,
- 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x12, 0x5F,
- 0xB6, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x78, 0x10,
- 0x74, 0x01, 0xF2, 0x90, 0x02, 0x09, 0xE0, 0x78,
- 0x00, 0xF2, 0x08, 0x74, 0x20, 0xF2, 0x18, 0xE2,
- 0xFF, 0x30, 0xE0, 0x05, 0x08, 0xE2, 0x24, 0x80,
- 0xF2, 0xEF, 0xC3, 0x13, 0x90, 0xFD, 0x10, 0xF0,
- 0x78, 0x01, 0xE2, 0x24, 0x00, 0xF5, 0x82, 0xE4,
- 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x78, 0x03, 0xF2,
- 0x64, 0x04, 0x60, 0x0D, 0xE2, 0xFF, 0x64, 0x08,
- 0x60, 0x07, 0xEF, 0x64, 0x0C, 0x60, 0x02, 0x61,
- 0xDE, 0xE4, 0x78, 0x02, 0xF2, 0x78, 0x03, 0xE2,
- 0xFF, 0x18, 0xE2, 0xC3, 0x9F, 0x50, 0x2D, 0xE2,
- 0xFD, 0x18, 0xE2, 0x2D, 0x90, 0x81, 0x5A, 0xF0,
- 0xE0, 0xFF, 0x24, 0x00, 0xF5, 0x82, 0xE4, 0x34,
- 0xFC, 0xF5, 0x83, 0xE0, 0xFE, 0x74, 0x04, 0x2D,
- 0xF8, 0xEE, 0xF2, 0xEF, 0xB4, 0xFF, 0x06, 0x90,
- 0xFD, 0x10, 0xE0, 0x04, 0xF0, 0x78, 0x02, 0xE2,
- 0x04, 0xF2, 0x80, 0xC9, 0x78, 0x04, 0xE2, 0x78,
- 0x12, 0xF2, 0xFF, 0x78, 0x05, 0xE2, 0x78, 0x11,
- 0xF2, 0x78, 0x06, 0xE2, 0x78, 0x13, 0xF2, 0x78,
- 0x07, 0xE2, 0x78, 0x14, 0xF2, 0x78, 0x08, 0xE2,
- 0x78, 0x33, 0xF2, 0x78, 0x09, 0xE2, 0x78, 0x34,
- 0xF2, 0x78, 0x0A, 0xE2, 0x78, 0x35, 0xF2, 0x78,
- 0x0B, 0xE2, 0x78, 0x36, 0xF2, 0x78, 0x0C, 0xE2,
- 0x78, 0x37, 0xF2, 0x78, 0x0D, 0xE2, 0x78, 0x38,
- 0xF2, 0x78, 0x0E, 0xE2, 0x78, 0x39, 0xF2, 0x78,
- 0x0F, 0xE2, 0x78, 0x3A, 0xF2, 0xE4, 0x78, 0x15,
- 0xF2, 0xEF, 0x24, 0xF8, 0x60, 0x75, 0x24, 0xFC,
- 0x60, 0x6C, 0x24, 0x08, 0x60, 0x02, 0x61, 0xC0,
- 0x78, 0x11, 0xE2, 0xB4, 0x01, 0x05, 0x12, 0x29,
- 0xC5, 0x61, 0xC5, 0x78, 0x11, 0xE2, 0xB4, 0x02,
- 0x05, 0x12, 0x11, 0xBD, 0x61, 0xC5, 0x78, 0x11,
- 0xE2, 0xB4, 0x03, 0x04, 0xF1, 0x06, 0x61, 0xC5,
- 0x78, 0x11, 0xE2, 0xB4, 0x10, 0x17, 0x78, 0x14,
- 0xE2, 0xFE, 0x18, 0xE2, 0xFD, 0xED, 0xFF, 0x78,
- 0x16, 0xEE, 0xF2, 0xFE, 0x08, 0xEF, 0xF2, 0xFF,
- 0x12, 0x32, 0xAA, 0x61, 0xC5, 0x78, 0x11, 0xE2,
- 0xB4, 0x11, 0x17, 0x78, 0x14, 0xE2, 0xFE, 0x18,
- 0xE2, 0xFD, 0xED, 0xFF, 0x78, 0x16, 0xEE, 0xF2,
- 0xFE, 0x08, 0xEF, 0xF2, 0xFF, 0x12, 0x32, 0x06,
- 0x61, 0xC5, 0x78, 0x11, 0xE2, 0xF4, 0x60, 0x02,
- 0x61, 0xC5, 0x18, 0xF2, 0x61, 0xC5, 0x78, 0x15,
- 0x74, 0x01, 0xF2, 0x78, 0x11, 0xE2, 0x64, 0x07,
- 0x60, 0x02, 0x61, 0xAA, 0x78, 0x34, 0xE2, 0xFF,
- 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x08, 0x12, 0x20,
- 0xBB, 0xC0, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
- 0x07, 0x78, 0x33, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD,
- 0xFE, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0xC0, 0x04,
- 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07, 0x78, 0x35,
- 0xE2, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x10,
- 0x12, 0x20, 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
- 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x78, 0x18,
- 0x12, 0x44, 0xFE, 0x78, 0x15, 0xE2, 0x70, 0x02,
- 0x61, 0x93, 0x18, 0xE2, 0xFF, 0x18, 0xE2, 0xFD,
- 0x31, 0x5F, 0x78, 0x1C, 0x12, 0x44, 0xFE, 0x78,
- 0x38, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78,
- 0x08, 0x12, 0x20, 0xBB, 0xC0, 0x04, 0xA9, 0x05,
- 0xAA, 0x06, 0xAB, 0x07, 0x78, 0x37, 0xE2, 0xFF,
- 0xE4, 0xFC, 0xFD, 0xFE, 0xD0, 0x00, 0x12, 0x44,
- 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0, 0x06, 0xC0,
- 0x07, 0x78, 0x39, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD,
- 0xFE, 0x78, 0x10, 0x12, 0x20, 0xBB, 0xD0, 0x03,
- 0xD0, 0x02, 0xD0, 0x01, 0xD0, 0x00, 0x12, 0x44,
- 0xCC, 0x78, 0x20, 0x12, 0x44, 0xFE, 0x78, 0x20,
- 0x12, 0x44, 0xE5, 0x12, 0x20, 0x9B, 0x78, 0x1C,
- 0x12, 0x44, 0xF1, 0x12, 0x44, 0xBF, 0xC0, 0x04,
- 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07, 0x78, 0x18,
- 0x12, 0x44, 0xE5, 0x78, 0x20, 0x12, 0x44, 0xF1,
- 0x12, 0x44, 0xBF, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
- 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x78, 0x18,
- 0x12, 0x44, 0xFE, 0x78, 0x18, 0x12, 0x44, 0xE5,
- 0x90, 0x81, 0xF9, 0x12, 0x20, 0xCE, 0x78, 0x13,
- 0xE2, 0xFD, 0x08, 0xE2, 0xFF, 0x12, 0x55, 0x1C,
- 0x80, 0x1B, 0x78, 0x13, 0xE2, 0xFF, 0x08, 0xE2,
- 0xFD, 0x78, 0x11, 0xE2, 0xFB, 0x78, 0x15, 0xE2,
- 0x90, 0x81, 0xBC, 0xF0, 0x71, 0xE1, 0x80, 0x05,
- 0x78, 0x10, 0x74, 0x02, 0xF2, 0x78, 0x10, 0xE2,
- 0xFF, 0xC3, 0x94, 0x02, 0x50, 0x10, 0xEF, 0x60,
- 0x0A, 0x78, 0x02, 0xE2, 0xFF, 0x18, 0xE2, 0x2F,
- 0xF2, 0x21, 0x90, 0x7F, 0x01, 0x22, 0x7F, 0x00,
- 0x22, 0xAC, 0x07, 0xED, 0xAD, 0x04, 0x78, 0x24,
- 0xF2, 0xED, 0x08, 0xF2, 0xEB, 0xB4, 0x04, 0x07,
- 0x78, 0x27, 0x74, 0x01, 0xF2, 0x80, 0x0E, 0xEB,
- 0x78, 0x27, 0xB4, 0x05, 0x05, 0x74, 0x02, 0xF2,
- 0x80, 0x03, 0x74, 0x04, 0xF2, 0xD3, 0x78, 0x25,
- 0xE2, 0x94, 0xFF, 0x18, 0xE2, 0x94, 0x00, 0x50,
- 0x63, 0xE4, 0x78, 0x26, 0xF2, 0x78, 0x27, 0xE2,
- 0xFF, 0x18, 0xE2, 0xFE, 0xC3, 0x9F, 0x40, 0x02,
- 0xA1, 0x7F, 0x74, 0x33, 0x2E, 0xF8, 0xE2, 0x78,
- 0x28, 0xF2, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x2D,
- 0x74, 0x37, 0x2E, 0xF8, 0xE2, 0x78, 0x32, 0xF2,
- 0xEE, 0xFF, 0x78, 0x25, 0xE2, 0x2F, 0xFF, 0x18,
- 0xE2, 0x34, 0x00, 0x8F, 0x82, 0xF5, 0x83, 0xE0,
- 0x78, 0x29, 0xF2, 0x78, 0x32, 0xE2, 0xFF, 0xF4,
- 0xFE, 0x78, 0x29, 0xE2, 0x5E, 0xFE, 0x18, 0xE2,
- 0xFD, 0xEF, 0x5D, 0x4E, 0xF2, 0x78, 0x24, 0x08,
- 0xE2, 0xFF, 0x08, 0xE2, 0x2F, 0xFF, 0x78, 0x28,
- 0xE2, 0xFD, 0x12, 0x32, 0x1E, 0x78, 0x26, 0xE2,
- 0x04, 0xF2, 0x80, 0xA1, 0xD3, 0x78, 0x25, 0xE2,
- 0x94, 0xFF, 0x18, 0xE2, 0x94, 0x07, 0x50, 0x69,
- 0xE4, 0x78, 0x26, 0xF2, 0x78, 0x27, 0xE2, 0xFF,
- 0x18, 0xE2, 0xFE, 0xC3, 0x9F, 0x40, 0x02, 0xA1,
- 0x7F, 0x74, 0x33, 0x2E, 0xF8, 0xE2, 0x78, 0x28,
- 0xF2, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x2D, 0x78,
- 0x26, 0xE2, 0xFF, 0xFD, 0x18, 0xE2, 0x2D, 0xFD,
- 0x18, 0xE2, 0x34, 0x00, 0x8D, 0x82, 0xF5, 0x83,
- 0xE0, 0x78, 0x29, 0xF2, 0x74, 0x37, 0x2F, 0xF8,
- 0xE2, 0x78, 0x32, 0xF2, 0xE2, 0xFF, 0xF4, 0xFE,
- 0x78, 0x29, 0xE2, 0x5E, 0xFE, 0x18, 0xE2, 0xFD,
- 0xEF, 0x5D, 0x4E, 0xF2, 0x78, 0x28, 0xE2, 0xFF,
- 0x78, 0x26, 0xE2, 0xFD, 0x18, 0xE2, 0x2D, 0xFD,
- 0x18, 0xE2, 0x34, 0x00, 0x8D, 0x82, 0xF5, 0x83,
- 0xEF, 0xF0, 0x78, 0x26, 0xE2, 0x04, 0xF2, 0x80,
- 0x9B, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x0F, 0x78,
- 0x24, 0xE2, 0xFE, 0x08, 0xE2, 0xFF, 0x12, 0x2D,
- 0x5C, 0x78, 0x2E, 0x12, 0x44, 0xFE, 0xE4, 0x78,
- 0x26, 0xF2, 0x78, 0x27, 0xE2, 0xFF, 0x18, 0xE2,
- 0xFE, 0xC3, 0x9F, 0x50, 0x5D, 0x74, 0x33, 0x2E,
- 0xF8, 0xE2, 0x78, 0x28, 0xF2, 0x90, 0x81, 0xBC,
- 0xE0, 0x60, 0x2B, 0x78, 0x2E, 0x12, 0x44, 0xE5,
- 0x78, 0x26, 0xE2, 0xFB, 0x75, 0xF0, 0x08, 0xA4,
- 0xF9, 0xF8, 0x12, 0x20, 0xA8, 0x78, 0x29, 0xEF,
- 0xF2, 0x74, 0x37, 0x2B, 0xF8, 0xE2, 0x78, 0x32,
- 0xF2, 0xE2, 0xFE, 0xF4, 0x5F, 0xFF, 0x78, 0x28,
- 0xE2, 0xFD, 0xEE, 0x5D, 0x4F, 0xF2, 0x78, 0x28,
- 0xE2, 0xFF, 0x78, 0x26, 0xE2, 0xFD, 0xC3, 0x74,
- 0x03, 0x9D, 0xFD, 0xE4, 0x94, 0x00, 0xFC, 0x7B,
- 0xFE, 0x74, 0x2A, 0x2D, 0xF9, 0x74, 0x80, 0x3C,
- 0xFA, 0xEF, 0x12, 0x1F, 0xEA, 0xE2, 0x04, 0xF2,
- 0x80, 0x98, 0x78, 0x2A, 0x12, 0x44, 0xE5, 0x90,
- 0x85, 0xBB, 0x12, 0x20, 0xCE, 0x78, 0x24, 0xE2,
- 0xFE, 0x08, 0xE2, 0xFF, 0x12, 0x2E, 0xA2, 0x22,
- 0x22, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x1F, 0x90,
- 0x00, 0x01, 0x12, 0x1F, 0xBD, 0xFF, 0xFE, 0x12,
- 0x1F, 0xA4, 0xFD, 0xC3, 0x13, 0x30, 0xE0, 0x12,
- 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16, 0x90, 0x00,
- 0x02, 0x12, 0x1F, 0xBD, 0x90, 0x81, 0xCF, 0xF0,
- 0x80, 0x05, 0x90, 0x81, 0xCF, 0xEF, 0xF0, 0x90,
- 0x81, 0xCE, 0xEE, 0xF0, 0x90, 0x81, 0xCF, 0xE0,
- 0xFE, 0x90, 0x81, 0xCE, 0xE0, 0xFF, 0xD3, 0x9E,
- 0x50, 0x38, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16,
- 0x12, 0x1F, 0xA4, 0x54, 0x01, 0xFE, 0x74, 0xDE,
- 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x80, 0xF5, 0x83,
- 0xEE, 0xF0, 0x74, 0xDE, 0x2F, 0xF5, 0x82, 0xE4,
- 0x34, 0x80, 0xF5, 0x83, 0xE0, 0x70, 0x04, 0xD1,
- 0x25, 0x80, 0x07, 0x90, 0x81, 0xCE, 0xE0, 0xFF,
- 0xB1, 0x80, 0x90, 0x81, 0xCE, 0xE0, 0x04, 0xF0,
- 0x80, 0xBA, 0x90, 0x80, 0xDE, 0xE0, 0x70, 0x24,
- 0x90, 0x81, 0x2A, 0xE0, 0x70, 0x04, 0xFF, 0x12,
- 0x49, 0x93, 0x90, 0x81, 0x2A, 0xE0, 0x64, 0x0C,
- 0x60, 0x02, 0xD1, 0x26, 0x90, 0x81, 0x24, 0xE0,
- 0x54, 0xF7, 0xF0, 0x54, 0xEF, 0xF0, 0x54, 0xBF,
- 0xF0, 0x54, 0x7F, 0xF0, 0x22, 0x22, 0x90, 0x06,
- 0x04, 0xE0, 0x54, 0x7F, 0xF0, 0x90, 0x05, 0x22,
- 0xE4, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x0C, 0xF0,
- 0x22, 0x90, 0x81, 0xED, 0xEF, 0xF0, 0xA3, 0xED,
- 0xF0, 0xAD, 0x03, 0xAC, 0x02, 0xE4, 0x90, 0x81,
- 0xF5, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0xC4, 0x74,
- 0x39, 0xF0, 0x74, 0x66, 0xA3, 0xF0, 0xEC, 0x54,
- 0x3F, 0xFC, 0x90, 0x01, 0x40, 0xED, 0xF0, 0xAE,
- 0x04, 0xEE, 0xA3, 0xF0, 0x90, 0x81, 0xED, 0xE0,
- 0x24, 0x81, 0x60, 0x34, 0x24, 0xDA, 0x60, 0x1C,
- 0x24, 0x3C, 0x70, 0x41, 0x90, 0x81, 0xEE, 0xE0,
- 0xC4, 0x33, 0x33, 0x33, 0x54, 0x80, 0x90, 0x81,
- 0xF2, 0xF0, 0xA3, 0x74, 0x69, 0xF0, 0xA3, 0x74,
- 0x80, 0xF0, 0x80, 0x2C, 0x90, 0x81, 0xEE, 0xE0,
- 0x54, 0x01, 0x90, 0x81, 0xF2, 0xF0, 0xA3, 0x74,
- 0xA5, 0xF0, 0xA3, 0x74, 0x01, 0xF0, 0x80, 0x18,
- 0x90, 0x81, 0xEE, 0xE0, 0xC4, 0x54, 0x10, 0x90,
- 0x81, 0xF2, 0xF0, 0xA3, 0x74, 0x7F, 0xF0, 0xA3,
- 0x74, 0x10, 0xF0, 0x80, 0x03, 0x7F, 0x00, 0x22,
- 0x90, 0x81, 0xF3, 0xE0, 0x90, 0x01, 0x06, 0xF0,
- 0x90, 0x81, 0xF2, 0xE0, 0x60, 0x0E, 0x90, 0x01,
- 0x42, 0xF0, 0x90, 0x81, 0xF1, 0xE0, 0x90, 0x01,
- 0x43, 0xF0, 0x80, 0x0D, 0x90, 0x01, 0x43, 0xE4,
- 0xF0, 0x90, 0x81, 0xF2, 0xE0, 0x90, 0x01, 0x42,
- 0xF0, 0x90, 0x81, 0xF4, 0xE0, 0xFF, 0x90, 0x01,
- 0x42, 0xE0, 0x5F, 0xFF, 0x90, 0x81, 0xF2, 0xE0,
- 0x6F, 0x60, 0xEE, 0x74, 0x39, 0x04, 0x90, 0x01,
- 0xC4, 0xF0, 0x74, 0x66, 0xA3, 0xF0, 0x90, 0x01,
- 0x43, 0xE4, 0xF0, 0x7F, 0x01, 0x22, 0xE4, 0x90,
- 0x81, 0x6A, 0xF0, 0x90, 0x87, 0x5F, 0xE0, 0x90,
- 0x81, 0x69, 0xF0, 0xE4, 0x90, 0x81, 0x76, 0xF0,
- 0x90, 0x81, 0x66, 0xF0, 0x90, 0x81, 0x66, 0xE0,
- 0xFF, 0xC3, 0x94, 0x40, 0x50, 0x15, 0x74, 0x79,
- 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
- 0x74, 0xFF, 0xF0, 0x90, 0x81, 0x66, 0xE0, 0x04,
- 0xF0, 0x80, 0xE1, 0xE4, 0x90, 0x81, 0x66, 0xF0,
- 0x90, 0x81, 0x69, 0xE0, 0xFF, 0x90, 0x81, 0x66,
- 0xE0, 0xFE, 0xC3, 0x9F, 0x40, 0x03, 0x02, 0x68,
- 0x12, 0x74, 0xDF, 0x2E, 0xF9, 0xE4, 0x34, 0x86,
- 0x75, 0x13, 0x01, 0xF5, 0x14, 0x89, 0x15, 0x75,
- 0x16, 0x0A, 0x7B, 0x01, 0x7A, 0x81, 0x79, 0x5B,
- 0x12, 0x2B, 0xED, 0x90, 0x81, 0x5C, 0xE0, 0xFF,
- 0x12, 0x2F, 0x27, 0xEF, 0x04, 0x90, 0x81, 0x76,
- 0xF0, 0x90, 0x81, 0x5B, 0xE0, 0xFF, 0xA3, 0xE0,
- 0xFD, 0x12, 0x31, 0xEA, 0xEF, 0x24, 0xC8, 0x90,
- 0x81, 0x78, 0xF0, 0x75, 0xF0, 0x08, 0xA4, 0xF0,
- 0x90, 0x81, 0x5C, 0xE0, 0x54, 0x0F, 0x90, 0x81,
- 0x77, 0xF0, 0xE4, 0x90, 0x81, 0x65, 0xF0, 0x90,
- 0x81, 0x67, 0xF0, 0x90, 0x81, 0x67, 0xE0, 0xFF,
- 0xC3, 0x94, 0x04, 0x50, 0x57, 0x90, 0x81, 0x77,
- 0xE0, 0xFE, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3,
- 0x13, 0xD8, 0xFC, 0x20, 0xE0, 0x3E, 0x90, 0x81,
- 0x67, 0xE0, 0x25, 0xE0, 0xFF, 0x90, 0x81, 0x78,
- 0xE0, 0x2F, 0x24, 0x79, 0xF9, 0xE4, 0x34, 0x81,
- 0xFA, 0x7B, 0x01, 0xC0, 0x03, 0xC0, 0x01, 0x90,
- 0x81, 0x65, 0xE0, 0x75, 0xF0, 0x02, 0xA4, 0x24,
- 0x5D, 0xF9, 0x74, 0x81, 0x35, 0xF0, 0x8B, 0x13,
- 0xF5, 0x14, 0x89, 0x15, 0x75, 0x16, 0x02, 0xD0,
- 0x01, 0xD0, 0x03, 0x12, 0x2B, 0xED, 0x90, 0x81,
- 0x65, 0xE0, 0x04, 0xF0, 0x90, 0x81, 0x67, 0xE0,
- 0x04, 0xF0, 0x80, 0x9F, 0x90, 0x81, 0x76, 0xE0,
- 0xFF, 0x90, 0x81, 0x66, 0xE0, 0x2F, 0xF0, 0x02,
- 0x67, 0x40, 0xE4, 0x90, 0x81, 0x6A, 0xF0, 0x90,
- 0x81, 0x6A, 0xE0, 0xC3, 0x94, 0x40, 0x40, 0x02,
- 0x41, 0xAF, 0xE0, 0xFF, 0x24, 0x79, 0xF5, 0x82,
- 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0x90, 0x81,
- 0x6C, 0xF0, 0xE0, 0xFE, 0x54, 0xF0, 0xC4, 0x54,
- 0x0F, 0xFD, 0x90, 0x81, 0x6B, 0xF0, 0xEE, 0x54,
- 0x0F, 0xFE, 0xA3, 0xF0, 0x74, 0x7A, 0x2F, 0xF5,
- 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0x90,
- 0x81, 0x6D, 0xF0, 0xFC, 0xEE, 0xFE, 0xEC, 0xFB,
- 0xEB, 0xFF, 0x90, 0x81, 0x72, 0xEE, 0xF0, 0xA3,
- 0xEF, 0xF0, 0xED, 0x12, 0x45, 0x28, 0x68, 0x8B,
- 0x00, 0x68, 0xC2, 0x01, 0x69, 0x73, 0x02, 0x6A,
- 0xA0, 0x03, 0x69, 0x8E, 0x04, 0x69, 0xAF, 0x05,
- 0x69, 0xAF, 0x06, 0x69, 0xAF, 0x07, 0x69, 0xAF,
- 0x08, 0x6A, 0x33, 0x09, 0x6A, 0x69, 0x0A, 0x00,
- 0x00, 0x6A, 0xAF, 0x90, 0x81, 0x6A, 0xE0, 0xFD,
- 0x24, 0x7C, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5,
- 0x83, 0xE0, 0xFE, 0x74, 0x7B, 0x2D, 0xF5, 0x82,
- 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFD, 0xED,
- 0xFF, 0x90, 0x81, 0x74, 0xEE, 0xF0, 0xFC, 0xA3,
- 0xEF, 0xF0, 0x90, 0x81, 0x6D, 0xE0, 0xFF, 0x12,
- 0x2F, 0x96, 0x90, 0x81, 0x68, 0x74, 0x02, 0xF0,
- 0x41, 0xA0, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7C,
- 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0,
- 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x08, 0x12,
- 0x20, 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06,
- 0xAB, 0x07, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B,
- 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0,
- 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x12, 0x44, 0xCC,
- 0xC0, 0x04, 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07,
- 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7D, 0xF5, 0x82,
- 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFF, 0xE4,
- 0xFC, 0xFD, 0xFE, 0x78, 0x10, 0x12, 0x20, 0xBB,
- 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01, 0xD0, 0x00,
- 0x12, 0x44, 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
- 0x06, 0xC0, 0x07, 0x90, 0x81, 0x6A, 0xE0, 0x24,
- 0x7E, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
- 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x18,
- 0x12, 0x20, 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
- 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x90, 0x81,
- 0x6E, 0x12, 0x20, 0xCE, 0x90, 0x81, 0x6E, 0x12,
- 0x44, 0xD9, 0x90, 0x85, 0x96, 0x12, 0x20, 0xCE,
- 0x90, 0x81, 0x72, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
- 0x12, 0x2E, 0xE4, 0x90, 0x81, 0x68, 0x74, 0x04,
- 0xF0, 0x41, 0xA0, 0x90, 0x81, 0x6D, 0xE0, 0xFD,
- 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF5, 0x82,
- 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFB, 0xE4,
- 0xFF, 0x12, 0x30, 0xC7, 0x80, 0x19, 0x90, 0x81,
- 0x6D, 0xE0, 0xFD, 0x90, 0x81, 0x6A, 0xE0, 0x24,
- 0x7B, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
- 0xE0, 0xFB, 0xE4, 0xFF, 0x12, 0x30, 0x6A, 0x90,
- 0x81, 0x68, 0x74, 0x01, 0xF0, 0x41, 0xA0, 0x90,
- 0x81, 0x68, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x6A,
- 0xE0, 0x24, 0x7C, 0xF5, 0x82, 0xE4, 0x34, 0x81,
- 0xF5, 0x83, 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE,
- 0x78, 0x08, 0x12, 0x20, 0xBB, 0xA8, 0x04, 0xA9,
- 0x05, 0xAA, 0x06, 0xAB, 0x07, 0x90, 0x81, 0x6A,
- 0xE0, 0x24, 0x7B, 0xF5, 0x82, 0xE4, 0x34, 0x81,
- 0xF5, 0x83, 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE,
- 0x12, 0x44, 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
- 0x06, 0xC0, 0x07, 0x90, 0x81, 0x6C, 0xE0, 0xFF,
- 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x10, 0x12, 0x20,
- 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01, 0xD0,
- 0x00, 0x12, 0x44, 0xCC, 0x90, 0x81, 0x6E, 0x12,
- 0x20, 0xCE, 0x90, 0x81, 0x6B, 0xE0, 0x24, 0xFB,
- 0xFF, 0xC0, 0x07, 0x90, 0x81, 0x6E, 0x12, 0x44,
- 0xD9, 0x90, 0x81, 0xF9, 0x12, 0x20, 0xCE, 0x90,
- 0x81, 0x6D, 0xE0, 0xFD, 0xD0, 0x07, 0x12, 0x55,
- 0x1C, 0x80, 0x6D, 0x90, 0x81, 0x68, 0x74, 0x01,
- 0xF0, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF9,
- 0xE4, 0x34, 0x81, 0x75, 0x13, 0x01, 0xF5, 0x14,
- 0x89, 0x15, 0x75, 0x16, 0x01, 0x7B, 0xFE, 0x7A,
- 0x80, 0x79, 0x33, 0x12, 0x2B, 0xED, 0x90, 0x81,
- 0x6D, 0xE0, 0xFF, 0x90, 0x81, 0x6C, 0xE0, 0xFD,
- 0xE4, 0x90, 0x81, 0xBC, 0xF0, 0x7B, 0x04, 0x80,
- 0x34, 0x90, 0x81, 0x68, 0x74, 0x04, 0xF0, 0x90,
- 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF9, 0xE4, 0x34,
- 0x81, 0x75, 0x13, 0x01, 0xF5, 0x14, 0x89, 0x15,
- 0x75, 0x16, 0x04, 0x7B, 0xFE, 0x7A, 0x80, 0x79,
- 0x33, 0x12, 0x2B, 0xED, 0x90, 0x81, 0x6D, 0xE0,
- 0xFF, 0x90, 0x81, 0x6C, 0xE0, 0xFD, 0xE4, 0x90,
- 0x81, 0xBC, 0xF0, 0x7B, 0x06, 0x12, 0x63, 0xE1,
- 0x90, 0x81, 0x68, 0xE0, 0x24, 0x02, 0xFF, 0x90,
- 0x81, 0x6A, 0xE0, 0x2F, 0xF0, 0x01, 0x17, 0x22,
- 0x90, 0x02, 0x09, 0xE0, 0xFD, 0x12, 0x1F, 0xA4,
- 0xFE, 0xAF, 0x05, 0xED, 0x2E, 0x90, 0x80, 0x3D,
- 0xF0, 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0xFF,
- 0xED, 0x2F, 0x90, 0x80, 0x3E, 0xF0, 0x90, 0x00,
- 0x02, 0x12, 0x1F, 0xBD, 0xFF, 0xED, 0x2F, 0x90,
- 0x80, 0x3F, 0xF0, 0x90, 0x00, 0x03, 0x12, 0x1F,
- 0xBD, 0xFF, 0xED, 0x2F, 0x90, 0x80, 0x40, 0xF0,
- 0x90, 0x00, 0x04, 0x12, 0x1F, 0xBD, 0xFF, 0xAE,
- 0x05, 0xED, 0x2F, 0x90, 0x80, 0x41, 0xF0, 0x22,
- 0x90, 0x00, 0x02, 0x12, 0x1F, 0xBD, 0xFF, 0x30,
- 0xE0, 0x26, 0x12, 0x1F, 0xA4, 0x90, 0x81, 0x38,
- 0xF0, 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x90,
- 0x81, 0x39, 0xF0, 0xEF, 0x54, 0xFE, 0xFF, 0xA3,
- 0xE0, 0x54, 0x01, 0x4F, 0xF0, 0x90, 0x00, 0x03,
- 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x3B, 0xF0, 0x22,
- 0x90, 0x81, 0x38, 0x74, 0x01, 0xF0, 0xA3, 0x74,
- 0x05, 0xF0, 0xA3, 0xE0, 0x54, 0x01, 0x44, 0x28,
- 0xF0, 0xA3, 0x74, 0x05, 0xF0, 0x22, 0x12, 0x1F,
- 0xA4, 0x90, 0x81, 0x3E, 0xF0, 0x90, 0x81, 0x3E,
- 0xE0, 0x90, 0x01, 0xE7, 0xF0, 0x22, 0x12, 0x1F,
- 0xA4, 0x90, 0x81, 0x4A, 0xF0, 0x90, 0x00, 0x01,
- 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x4B, 0xF0, 0x22,
- 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90,
- 0x81, 0xFD, 0xEE, 0xF0, 0xA3, 0xEF, 0xF0, 0xE4,
- 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x81, 0xFD, 0xE0,
- 0xFE, 0xA3, 0xE0, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
- 0x60, 0x2D, 0xC3, 0x90, 0x82, 0x00, 0xE0, 0x94,
- 0xE8, 0x90, 0x81, 0xFF, 0xE0, 0x94, 0x03, 0x40,
- 0x0B, 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x80, 0xF0,
- 0x7F, 0x00, 0x80, 0x15, 0x90, 0x81, 0xFF, 0xE4,
- 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9, 0x7F, 0x0A,
- 0x7E, 0x00, 0x12, 0x32, 0xAA, 0x80, 0xC5, 0x7F,
- 0x01, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
- 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0xD1,
- 0x12, 0x45, 0x1F, 0x90, 0x82, 0x0A, 0xE0, 0xFF,
- 0x04, 0xF0, 0x90, 0x00, 0x01, 0xEF, 0x12, 0x1F,
- 0xFC, 0x7F, 0xAF, 0x7E, 0x01, 0x71, 0x60, 0xEF,
- 0x60, 0x3A, 0x90, 0x81, 0xD1, 0x12, 0x45, 0x16,
- 0x8B, 0x13, 0x8A, 0x14, 0x89, 0x15, 0x90, 0x00,
- 0x0E, 0x12, 0x1F, 0xBD, 0x24, 0x02, 0xF5, 0x16,
- 0x7B, 0x01, 0x7A, 0x01, 0x79, 0xA0, 0x12, 0x2B,
- 0xED, 0x90, 0x81, 0xD1, 0x12, 0x45, 0x16, 0x90,
- 0x00, 0x0E, 0x12, 0x1F, 0xBD, 0x90, 0x01, 0xAE,
- 0xF0, 0xA3, 0x74, 0xFF, 0xF0, 0x90, 0x01, 0xCB,
- 0xE0, 0x64, 0x80, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
- 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
- 0xE4, 0xFF, 0x90, 0x80, 0xD9, 0xE0, 0xFE, 0x90,
- 0x80, 0xD8, 0xE0, 0xFD, 0xB5, 0x06, 0x04, 0x7E,
- 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x64, 0x01,
- 0x60, 0x32, 0x90, 0x01, 0xAF, 0xE0, 0x70, 0x13,
- 0xED, 0x75, 0xF0, 0x0F, 0xA4, 0x24, 0x42, 0xF9,
- 0x74, 0x80, 0x35, 0xF0, 0xFA, 0x7B, 0x01, 0x71,
- 0xB6, 0x7F, 0x01, 0xEF, 0x60, 0x16, 0x90, 0x80,
- 0xD8, 0xE0, 0x04, 0xF0, 0xE0, 0x7F, 0x00, 0xB4,
- 0x0A, 0x02, 0x7F, 0x01, 0xEF, 0x60, 0x05, 0xE4,
- 0x90, 0x80, 0xD8, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
- 0x22, 0x8F, 0x0D, 0x22, 0x8F, 0x0E, 0x22, 0x22,
- 0x90, 0x01, 0x34, 0xE0, 0x55, 0x3D, 0xF5, 0x41,
- 0xA3, 0xE0, 0x55, 0x3E, 0xF5, 0x42, 0xA3, 0xE0,
- 0x55, 0x3F, 0xF5, 0x43, 0xA3, 0xE0, 0x55, 0x40,
- 0xF5, 0x44, 0x90, 0x01, 0x34, 0xE5, 0x41, 0xF0,
- 0xA3, 0xE5, 0x42, 0xF0, 0xA3, 0xE5, 0x43, 0xF0,
- 0xA3, 0xE5, 0x44, 0xF0, 0x22, 0x90, 0x01, 0x3C,
- 0xE0, 0x55, 0x45, 0xF5, 0x49, 0xA3, 0xE0, 0x55,
- 0x46, 0xF5, 0x4A, 0xA3, 0xE0, 0x55, 0x47, 0xF5,
- 0x4B, 0xA3, 0xE0, 0x55, 0x48, 0xF5, 0x4C, 0x90,
- 0x01, 0x3C, 0xE5, 0x49, 0xF0, 0xA3, 0xE5, 0x4A,
- 0xF0, 0xA3, 0xE5, 0x4B, 0xF0, 0xA3, 0xE5, 0x4C,
- 0xF0, 0x53, 0x91, 0xDF, 0x22, 0x90, 0x81, 0x1F,
- 0xE0, 0x30, 0xE0, 0x05, 0xE4, 0xA3, 0xF0, 0xA3,
- 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01,
- 0x70, 0x19, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x13,
- 0x90, 0x01, 0x57, 0xE4, 0xF0, 0x90, 0x01, 0x3C,
- 0x74, 0x02, 0x12, 0x4F, 0xF4, 0x90, 0x01, 0x57,
- 0x74, 0x05, 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0,
- 0x64, 0x01, 0x70, 0x26, 0x90, 0x81, 0x27, 0xE0,
- 0x60, 0x20, 0x90, 0x01, 0x57, 0xE4, 0xF0, 0x90,
- 0x01, 0x3C, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x24,
- 0xE0, 0x54, 0xFB, 0xF0, 0x90, 0x81, 0x2B, 0xE0,
- 0x54, 0xFD, 0xF0, 0x54, 0x07, 0x70, 0x03, 0x12,
- 0x47, 0x2A, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0xB4,
- 0x01, 0x14, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x0E,
- 0x90, 0x81, 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02,
- 0x60, 0x02, 0x80, 0x03, 0xD1, 0x7F, 0x22, 0x90,
- 0x04, 0x1D, 0xE0, 0x70, 0x13, 0x90, 0x80, 0x3E,
- 0xE0, 0xFF, 0xE4, 0xFD, 0xB1, 0x69, 0x8E, 0x4E,
- 0x8F, 0x4F, 0x90, 0x04, 0x1F, 0x74, 0x20, 0xF0,
- 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
- 0x90, 0x82, 0x0E, 0xED, 0xF0, 0x90, 0x82, 0x0D,
- 0xEF, 0xF0, 0xE4, 0xFD, 0xFC, 0xF1, 0x37, 0x7C,
- 0x00, 0xAD, 0x07, 0x90, 0x82, 0x0D, 0xE0, 0x90,
- 0x04, 0x25, 0xF0, 0x90, 0x82, 0x0E, 0xE0, 0x60,
- 0x0E, 0x74, 0x0F, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
- 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0xAF,
- 0x05, 0x74, 0x08, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
- 0xFC, 0xF5, 0x83, 0xE4, 0xF0, 0x74, 0x09, 0x2F,
- 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
- 0x54, 0xF0, 0xF0, 0x74, 0x21, 0x2D, 0xF5, 0x82,
- 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x54, 0xF7,
- 0xF0, 0xAE, 0x04, 0xAF, 0x05, 0xD0, 0xD0, 0x92,
- 0xAF, 0x22, 0x8F, 0x4E, 0xF1, 0x4B, 0xBF, 0x01,
- 0x18, 0x90, 0x80, 0x40, 0xE0, 0xFF, 0x7D, 0x01,
- 0xB1, 0x69, 0xAD, 0x07, 0xAC, 0x06, 0xAF, 0x4E,
- 0x12, 0x4F, 0x82, 0x90, 0x04, 0x1F, 0x74, 0x20,
- 0xF0, 0x22, 0x90, 0x06, 0xA9, 0xE0, 0x90, 0x81,
- 0x4C, 0xF0, 0xE0, 0xFD, 0x54, 0xC0, 0x70, 0x09,
- 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x80,
- 0x72, 0xED, 0x30, 0xE6, 0x4B, 0x90, 0x81, 0x27,
- 0xE0, 0x64, 0x02, 0x70, 0x2A, 0x90, 0x81, 0x24,
- 0xE0, 0xFF, 0xC3, 0x13, 0x20, 0xE0, 0x09, 0x90,
- 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x80, 0x28,
- 0x90, 0x81, 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x01,
- 0x70, 0x2D, 0x90, 0x81, 0x2B, 0xE0, 0x44, 0x04,
- 0xF0, 0x7F, 0x01, 0xB1, 0xD2, 0x80, 0x20, 0x90,
- 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x90, 0x81,
- 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02, 0x60, 0x04,
- 0xB1, 0x4F, 0x80, 0x0B, 0xD1, 0x7F, 0x80, 0x07,
- 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
- 0x81, 0x4C, 0xE0, 0x90, 0x81, 0x2B, 0x30, 0xE7,
- 0x11, 0x12, 0x4F, 0xF1, 0x90, 0x01, 0x57, 0x74,
- 0x05, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x44, 0x04,
- 0xF0, 0x22, 0xE0, 0x54, 0xFD, 0xF0, 0x22, 0x90,
- 0x01, 0x5F, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
- 0x08, 0xF0, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
- 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
- 0xFB, 0xFD, 0x7F, 0x5C, 0x7E, 0x01, 0x12, 0x50,
- 0x05, 0x90, 0x01, 0x5F, 0x74, 0x05, 0xF0, 0x90,
- 0x06, 0x92, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x24,
- 0xE0, 0x44, 0x10, 0xF0, 0x90, 0x81, 0x2A, 0xE0,
- 0x64, 0x0C, 0x60, 0x0C, 0xE4, 0xFD, 0x7F, 0x0C,
- 0x12, 0x47, 0x3D, 0xE4, 0xFF, 0x12, 0x4F, 0x0D,
- 0x22, 0xE4, 0x90, 0x81, 0x4C, 0xF0, 0x90, 0x06,
- 0xA9, 0xE0, 0x90, 0x81, 0x4C, 0xF0, 0xE0, 0x54,
- 0xC0, 0x70, 0x0D, 0x90, 0x81, 0x2B, 0xE0, 0x54,
- 0xFE, 0xF0, 0x54, 0xFD, 0xF0, 0x02, 0x47, 0x2A,
- 0x90, 0x81, 0x4C, 0xE0, 0x30, 0xE6, 0x21, 0x90,
- 0x81, 0x27, 0xE0, 0x64, 0x01, 0x70, 0x20, 0x90,
- 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x90, 0x81,
- 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02, 0x60, 0x04,
- 0xB1, 0x4F, 0x80, 0x0B, 0xD1, 0x7F, 0x80, 0x07,
- 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
- 0x81, 0x4C, 0xE0, 0x90, 0x81, 0x2B, 0x30, 0xE7,
- 0x11, 0x12, 0x4F, 0xF1, 0x90, 0x01, 0x57, 0x74,
- 0x05, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x44, 0x04,
- 0xF0, 0x22, 0xE0, 0x54, 0xFD, 0xF0, 0x22, 0xE4,
- 0xFE, 0xEF, 0xC3, 0x13, 0xFD, 0xEF, 0x30, 0xE0,
- 0x02, 0x7E, 0x80, 0x90, 0xFD, 0x10, 0xED, 0xF0,
- 0xAF, 0x06, 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3,
- 0xC0, 0xD0, 0x90, 0x04, 0x1D, 0xE0, 0x60, 0x1A,
- 0x90, 0x05, 0x22, 0xE0, 0x54, 0x90, 0x60, 0x07,
- 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x08, 0xF0, 0x90,
- 0x01, 0xC6, 0xE0, 0x30, 0xE1, 0xE4, 0x7F, 0x00,
- 0x80, 0x02, 0x7F, 0x01, 0xD0, 0xD0, 0x92, 0xAF,
- 0x22, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x03, 0x12,
- 0x73, 0xE1, 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0,
- 0x03, 0x12, 0x49, 0xDD, 0x22, 0x90, 0x81, 0x27,
- 0xE0, 0x60, 0x35, 0x90, 0x06, 0x92, 0xE0, 0x30,
- 0xE1, 0x24, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
- 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
- 0xFB, 0xFD, 0x7F, 0x5C, 0x7E, 0x01, 0x12, 0x50,
- 0x05, 0x90, 0x01, 0x5F, 0x74, 0x05, 0xF0, 0x90,
- 0x06, 0x92, 0x74, 0x02, 0xF0, 0x22, 0x90, 0x81,
- 0x24, 0xE0, 0x54, 0xEF, 0xF0, 0x12, 0x47, 0x2A,
- 0x22, 0x12, 0x71, 0x48, 0x90, 0x81, 0x4D, 0xEF,
- 0xF0, 0x90, 0x81, 0x24, 0x30, 0xE0, 0x06, 0xE0,
- 0x44, 0x01, 0xF0, 0x80, 0x04, 0xE0, 0x54, 0xFE,
- 0xF0, 0x90, 0x81, 0x4D, 0xE0, 0x30, 0xE6, 0x11,
- 0x90, 0x01, 0x2F, 0xE0, 0x30, 0xE7, 0x04, 0xE4,
- 0xF0, 0x80, 0x06, 0x90, 0x01, 0x2F, 0x74, 0x80,
- 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x30, 0xE0, 0x1A,
- 0x90, 0x81, 0x32, 0xE4, 0xF0, 0xA3, 0x74, 0x07,
- 0xF0, 0x90, 0x81, 0x32, 0xA3, 0xE0, 0x90, 0x05,
- 0x58, 0xF0, 0x90, 0x04, 0xEC, 0xE0, 0x54, 0xDD,
- 0xF0, 0x22, 0x90, 0x04, 0xEC, 0xE0, 0x44, 0x22,
- 0xF0, 0x22, 0x90, 0x81, 0x4A, 0xE0, 0x60, 0x0F,
- 0xE4, 0xF0, 0x90, 0x05, 0x53, 0xE0, 0x44, 0x01,
- 0xF0, 0x90, 0x05, 0xFD, 0xE0, 0x04, 0xF0, 0x22,
- 0x90, 0x81, 0x24, 0xE0, 0xFF, 0xC4, 0x13, 0x13,
- 0x54, 0x03, 0x30, 0xE0, 0x27, 0xEF, 0x54, 0xBF,
- 0xF0, 0x90, 0x04, 0xE0, 0xE0, 0x90, 0x81, 0x25,
- 0x30, 0xE0, 0x06, 0xE0, 0x44, 0x01, 0xF0, 0x80,
- 0x10, 0xE0, 0x54, 0xFE, 0xF0, 0x90, 0x01, 0xB9,
- 0x74, 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x74, 0x04,
- 0xF0, 0x12, 0x47, 0x2A, 0xE4, 0xFF, 0x90, 0x81,
- 0x45, 0xE0, 0x30, 0xE0, 0x48, 0x90, 0x81, 0x49,
- 0xE0, 0xFD, 0x60, 0x41, 0x74, 0x01, 0x7E, 0x00,
- 0xA8, 0x07, 0x08, 0x80, 0x05, 0xC3, 0x33, 0xCE,
- 0x33, 0xCE, 0xD8, 0xF9, 0xFF, 0x90, 0x04, 0xE0,
- 0xE0, 0xFB, 0xEF, 0x5B, 0x60, 0x06, 0xE4, 0x90,
- 0x81, 0x49, 0xF0, 0x22, 0x90, 0x81, 0x47, 0xE0,
- 0xD3, 0x9D, 0x50, 0x10, 0x90, 0x01, 0xC7, 0x74,
- 0x10, 0xF0, 0x11, 0xBE, 0x90, 0x81, 0x45, 0xE0,
- 0x54, 0xFE, 0xF0, 0x22, 0x12, 0x4F, 0x0B, 0x90,
- 0x81, 0x49, 0xE0, 0x04, 0xF0, 0x22, 0x90, 0x80,
- 0x3C, 0xE0, 0x64, 0x02, 0x60, 0x07, 0x90, 0x06,
- 0x90, 0xE0, 0x44, 0x01, 0xF0, 0x22, 0x90, 0x81,
- 0x24, 0xE0, 0xFF, 0xC4, 0x13, 0x13, 0x13, 0x54,
- 0x01, 0x30, 0xE0, 0x2C, 0xEF, 0x54, 0x7F, 0xF0,
- 0x90, 0x04, 0xE0, 0xE0, 0x90, 0x81, 0x25, 0x30,
- 0xE1, 0x06, 0xE0, 0x44, 0x02, 0xF0, 0x80, 0x0F,
- 0xE0, 0x54, 0xFD, 0xF0, 0x90, 0x01, 0xB9, 0x74,
- 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x04, 0xF0, 0x90,
- 0x81, 0x27, 0xE0, 0x60, 0x03, 0x12, 0x47, 0x2A,
- 0x7F, 0x01, 0x01, 0x6E, 0xC3, 0xEE, 0x94, 0x01,
- 0x40, 0x0A, 0x0D, 0xED, 0x13, 0x90, 0xFD, 0x10,
- 0xF0, 0xE4, 0x2F, 0xFF, 0x22, 0xC3, 0xEE, 0x94,
- 0x01, 0x40, 0x24, 0x90, 0xFD, 0x11, 0xE0, 0x6D,
- 0x70, 0x1A, 0x90, 0x01, 0x17, 0xE0, 0xB5, 0x05,
- 0x0D, 0x90, 0x01, 0xE4, 0x74, 0x77, 0xF0, 0x90,
- 0xFD, 0x11, 0xE4, 0xF0, 0x80, 0x06, 0xED, 0x04,
- 0x90, 0xFD, 0x11, 0xF0, 0xE4, 0x2F, 0xFF, 0x22,
- 0xE4, 0x90, 0x81, 0x4E, 0xF0, 0xA3, 0xF0, 0xA3,
- 0xF0, 0x90, 0x00, 0x83, 0xE0, 0x90, 0x81, 0x4E,
- 0xF0, 0x90, 0x00, 0x83, 0xE0, 0xFE, 0x90, 0x81,
- 0x4E, 0xE0, 0xFF, 0xB5, 0x06, 0x01, 0x22, 0xC3,
- 0x90, 0x81, 0x50, 0xE0, 0x94, 0x64, 0x90, 0x81,
- 0x4F, 0xE0, 0x94, 0x00, 0x40, 0x0D, 0x90, 0x01,
- 0xC0, 0xE0, 0x44, 0x40, 0xF0, 0x90, 0x81, 0x4E,
- 0xE0, 0xFF, 0x22, 0x90, 0x81, 0x4F, 0xE4, 0x75,
- 0xF0, 0x01, 0x12, 0x44, 0xA9, 0x80, 0xC2, 0x74,
- 0x45, 0x2F, 0xF8, 0xE6, 0xFE, 0xED, 0xF4, 0x5E,
- 0xFE, 0xF6, 0x74, 0x38, 0x2F, 0xF5, 0x82, 0xE4,
- 0x34, 0x01, 0xF5, 0x83, 0xEE, 0xF0, 0x22, 0xD3,
- 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x82,
- 0x12, 0xED, 0xF0, 0x90, 0x82, 0x11, 0xEF, 0xF0,
- 0xD3, 0x94, 0x07, 0x50, 0x70, 0xE0, 0xFF, 0x74,
- 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33,
- 0xD8, 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x47, 0xE0,
- 0x5F, 0xFD, 0x7F, 0x47, 0x12, 0x32, 0x1E, 0x90,
- 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01, 0xA8, 0x07,
- 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC, 0xFF,
- 0x90, 0x00, 0x46, 0xE0, 0x4F, 0xFD, 0x7F, 0x46,
- 0x12, 0x32, 0x1E, 0x90, 0x82, 0x12, 0xE0, 0x60,
- 0x18, 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01,
- 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
- 0xFC, 0xFF, 0x90, 0x00, 0x45, 0xE0, 0x4F, 0x80,
- 0x17, 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01,
- 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
- 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x45, 0xE0, 0x5F,
- 0xFD, 0x7F, 0x45, 0x80, 0x7E, 0x90, 0x82, 0x11,
- 0xE0, 0x24, 0xF8, 0xF0, 0xE0, 0x24, 0x04, 0xFF,
- 0x74, 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3,
- 0x33, 0xD8, 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x43,
- 0xE0, 0x5F, 0xFD, 0x7F, 0x43, 0x12, 0x32, 0x1E,
- 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01, 0xA8,
- 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC,
- 0xFF, 0x90, 0x00, 0x43, 0xE0, 0x4F, 0xFD, 0x7F,
- 0x43, 0x12, 0x32, 0x1E, 0x90, 0x82, 0x12, 0xE0,
- 0x60, 0x1D, 0x90, 0x82, 0x11, 0xE0, 0x24, 0x04,
- 0xFF, 0x74, 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02,
- 0xC3, 0x33, 0xD8, 0xFC, 0xFF, 0x90, 0x00, 0x42,
- 0xE0, 0x4F, 0xFD, 0x7F, 0x42, 0x80, 0x1C, 0x90,
- 0x82, 0x11, 0xE0, 0x24, 0x04, 0xFF, 0x74, 0x01,
- 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
- 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x42, 0xE0, 0x5F,
- 0xFD, 0x7F, 0x42, 0x12, 0x32, 0x1E, 0xD0, 0xD0,
- 0x92, 0xAF, 0x22, 0x90, 0x81, 0x24, 0xE0, 0x54,
- 0xFB, 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90,
- 0x81, 0x2B, 0xF0, 0x22, 0xEF, 0x24, 0xFE, 0x60,
- 0x0C, 0x04, 0x70, 0x28, 0x90, 0x81, 0x2D, 0x74,
- 0x01, 0xF0, 0xA3, 0xF0, 0x22, 0xED, 0x70, 0x0A,
- 0x90, 0x81, 0x3B, 0xE0, 0x90, 0x81, 0x2D, 0xF0,
- 0x80, 0x05, 0x90, 0x81, 0x2D, 0xED, 0xF0, 0x90,
- 0x81, 0x2D, 0xE0, 0xA3, 0xF0, 0x90, 0x81, 0x25,
- 0xE0, 0x44, 0x08, 0xF0, 0x22, 0x12, 0x4E, 0xAB,
- 0xEF, 0x64, 0x01, 0x60, 0x08, 0x90, 0x01, 0xB8,
- 0x74, 0x01, 0xF0, 0x80, 0x67, 0x90, 0x81, 0x2B,
- 0xE0, 0xFF, 0x54, 0x03, 0x60, 0x08, 0x90, 0x01,
- 0xB8, 0x74, 0x02, 0xF0, 0x80, 0x56, 0x90, 0x81,
- 0x29, 0xE0, 0xFE, 0xE4, 0xC3, 0x9E, 0x50, 0x08,
- 0x90, 0x01, 0xB8, 0x74, 0x04, 0xF0, 0x80, 0x44,
- 0xEF, 0x30, 0xE2, 0x08, 0x90, 0x01, 0xB8, 0x74,
- 0x08, 0xF0, 0x80, 0x38, 0x90, 0x81, 0x2B, 0xE0,
- 0x30, 0xE4, 0x08, 0x90, 0x01, 0xB8, 0x74, 0x10,
- 0xF0, 0x80, 0x29, 0x90, 0x81, 0x25, 0xE0, 0x13,
- 0x13, 0x54, 0x3F, 0x20, 0xE0, 0x08, 0x90, 0x01,
- 0xB8, 0x74, 0x20, 0xF0, 0x80, 0x16, 0x90, 0x81,
- 0x3E, 0xE0, 0x60, 0x08, 0x90, 0x01, 0xB8, 0x74,
- 0x80, 0xF0, 0x80, 0x08, 0x90, 0x01, 0xB8, 0xE4,
- 0xF0, 0x7F, 0x01, 0x22, 0x90, 0x01, 0xB9, 0x74,
- 0x04, 0xF0, 0x7F, 0x00, 0x22, 0xEF, 0x60, 0x42,
- 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01, 0x70, 0x3A,
- 0x90, 0x81, 0x25, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
- 0x05, 0x22, 0x74, 0x0F, 0xF0, 0x90, 0x06, 0x04,
- 0xE0, 0x54, 0xBF, 0xF0, 0xE4, 0xFF, 0x12, 0x4F,
- 0x0D, 0xBF, 0x01, 0x12, 0x90, 0x81, 0x24, 0xE0,
- 0x44, 0x40, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x06,
- 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x01,
- 0xB9, 0x74, 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x74,
- 0x08, 0xF0, 0x22, 0x90, 0x05, 0x22, 0x74, 0x6F,
- 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
- 0x90, 0x81, 0x2A, 0x74, 0x02, 0xF0, 0x90, 0x81,
- 0x23, 0xF0, 0x22, 0x12, 0x54, 0x65, 0x90, 0x81,
- 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x23, 0xF0,
- 0x22, 0x90, 0x81, 0x24, 0xE0, 0xFF, 0x13, 0x13,
- 0x54, 0x3F, 0x30, 0xE0, 0x11, 0xEF, 0x54, 0xFB,
- 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD, 0xF0,
- 0x54, 0x07, 0x70, 0x42, 0x80, 0x3D, 0x90, 0x81,
- 0x30, 0xE0, 0x04, 0xF0, 0x90, 0x81, 0x2B, 0xE0,
- 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x30, 0xE0, 0xFF,
- 0xB4, 0x01, 0x02, 0x80, 0x04, 0xEF, 0xB4, 0x02,
- 0x06, 0x90, 0x05, 0x58, 0xE0, 0x04, 0xF0, 0x90,
- 0x81, 0x38, 0xE0, 0xFF, 0x90, 0x81, 0x30, 0xE0,
- 0xD3, 0x9F, 0x40, 0x0F, 0x90, 0x80, 0xDE, 0xE0,
- 0xB4, 0x01, 0x0B, 0x90, 0x81, 0x25, 0xE0, 0x54,
- 0xFB, 0xF0, 0x22, 0x12, 0x47, 0x2A, 0x22, 0x22,
- 0x90, 0x05, 0x2B, 0xE0, 0x7F, 0x00, 0x30, 0xE7,
- 0x02, 0x7F, 0x01, 0x22, 0x90, 0x05, 0x22, 0x74,
- 0xFF, 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40,
- 0xF0, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0, 0x22,
- 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x12,
- 0x49, 0xDD, 0x90, 0x81, 0x22, 0x74, 0x02, 0xF0,
- 0x22, 0x12, 0x49, 0xE3, 0x90, 0x81, 0x22, 0x74,
- 0x02, 0xF0, 0x22, 0x90, 0x05, 0x22, 0x74, 0x6F,
- 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
- 0x90, 0x81, 0x22, 0x74, 0x04, 0xF0, 0x22, 0xAE,
- 0x07, 0x12, 0x51, 0x73, 0xBF, 0x01, 0x12, 0x90,
- 0x81, 0x23, 0xE0, 0x64, 0x02, 0x60, 0x0A, 0xAF,
- 0x06, 0x7D, 0x01, 0x12, 0x47, 0x3D, 0x7F, 0x01,
- 0x22, 0x7F, 0x00, 0x22, 0x90, 0x01, 0x57, 0xE0,
- 0x60, 0x48, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
- 0x02, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0xFF, 0x13,
- 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x0C, 0xEF, 0x54,
- 0xFB, 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD,
- 0xF0, 0x22, 0x90, 0x81, 0x30, 0xE0, 0x04, 0xF0,
- 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xEF, 0xF0, 0x90,
- 0x81, 0x38, 0xE0, 0xFF, 0x90, 0x81, 0x30, 0xE0,
- 0xD3, 0x9F, 0x40, 0x0E, 0x90, 0x80, 0xDE, 0xE0,
- 0xB4, 0x01, 0x07, 0x90, 0x81, 0x25, 0xE0, 0x54,
- 0xFB, 0xF0, 0x22, 0x90, 0x80, 0x3F, 0xE0, 0xFF,
- 0x7D, 0x01, 0x12, 0x6D, 0x69, 0x8E, 0x54, 0x8F,
- 0x55, 0xAD, 0x55, 0xAC, 0x54, 0xAF, 0x53, 0x12,
- 0x4F, 0x82, 0xAF, 0x55, 0xAE, 0x54, 0x90, 0x04,
- 0x80, 0xE0, 0x54, 0x0F, 0xFD, 0xAC, 0x07, 0x74,
- 0x11, 0x2C, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5,
- 0x83, 0xE0, 0x44, 0x01, 0xF0, 0x74, 0x11, 0x2C,
- 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
- 0x54, 0xFB, 0xF0, 0xAC, 0x07, 0x74, 0x16, 0x2C,
- 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
- 0x44, 0xFA, 0xF0, 0x74, 0x15, 0x2C, 0xF5, 0x82,
- 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x1F,
- 0xF0, 0xAC, 0x07, 0x74, 0x06, 0x2C, 0xF5, 0x82,
- 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x0F,
- 0xF0, 0x90, 0x04, 0x53, 0xE4, 0xF0, 0x90, 0x04,
- 0x52, 0xF0, 0x90, 0x04, 0x51, 0x74, 0xFF, 0xF0,
- 0x90, 0x04, 0x50, 0x74, 0xFD, 0xF0, 0x74, 0x14,
- 0x2C, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83,
- 0xE0, 0x54, 0xC0, 0x4D, 0xFD, 0x74, 0x14, 0x2F,
- 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xED,
- 0xF0, 0x22, 0xAB, 0x07, 0xAA, 0x06, 0xED, 0x2B,
- 0xFB, 0xE4, 0x3A, 0xFA, 0xC3, 0x90, 0x80, 0xDB,
- 0xE0, 0x9B, 0x90, 0x80, 0xDA, 0xE0, 0x9A, 0x50,
- 0x13, 0xA3, 0xE0, 0x24, 0x01, 0xFF, 0x90, 0x80,
- 0xDA, 0xE0, 0x34, 0x00, 0xFE, 0xC3, 0xEB, 0x9F,
- 0xFB, 0xEA, 0x9E, 0xFA, 0xEA, 0x90, 0xFD, 0x11,
- 0xF0, 0xAF, 0x03, 0x74, 0x00, 0x2F, 0xF5, 0x82,
- 0xE4, 0x34, 0xFB, 0xF5, 0x83, 0xE0, 0xFF, 0x22,
- 0x12, 0x1F, 0xA4, 0xFF, 0x54, 0x01, 0xFE, 0x90,
- 0x81, 0x42, 0xE0, 0x54, 0xFE, 0x4E, 0xF0, 0xEF,
- 0xC3, 0x13, 0x30, 0xE0, 0x0A, 0x90, 0x00, 0x01,
- 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x43, 0xF0, 0x22,
- 0x90, 0x81, 0x45, 0xE0, 0x30, 0xE0, 0x2D, 0x90,
- 0x81, 0x48, 0xE0, 0x04, 0xF0, 0xE0, 0xFF, 0x90,
- 0x81, 0x46, 0xE0, 0xB5, 0x07, 0x1E, 0x90, 0x06,
- 0x92, 0xE0, 0x54, 0x1C, 0x70, 0x0B, 0x12, 0x4F,
- 0x0B, 0x90, 0x81, 0x49, 0xE0, 0x04, 0xF0, 0x80,
- 0x06, 0x90, 0x06, 0x92, 0x74, 0x1C, 0xF0, 0xE4,
- 0x90, 0x81, 0x48, 0xF0, 0x22, 0x00, 0xBB, 0x8E,
-};
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
index e4f20da91b4..8a7947d8de7 100644
--- a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -819,7 +819,7 @@ void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup
struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- if (ODM_CheckPowerStatus(adapt) == false)
+ if (!ODM_CheckPowerStatus(adapt))
return;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
@@ -888,7 +888,7 @@ _PHY_PathADDAOn(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n"));
pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
+ if (!is2t) {
pathOn = 0x0bdb25a0;
ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
} else {
@@ -1276,407 +1276,6 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
}
}
-/* Analog Pre-distortion calibration */
-#define APK_BB_REG_NUM 8
-#define APK_CURVE_REG_NUM 4
-#define PATH_NUM 2
-
-static void phy_APCalibrate_8188E(struct adapter *adapt, s8 delta, bool is2t)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- u32 regD[PATH_NUM];
- u32 tmpreg, index, offset, apkbound;
- u8 path, i, pathbound = PATH_NUM;
- u32 BB_backup[APK_BB_REG_NUM];
- u32 BB_REG[APK_BB_REG_NUM] = {
- rFPGA1_TxBlock, rOFDM0_TRxPathEnable,
- rFPGA0_RFMOD, rOFDM0_TRMuxPar,
- rFPGA0_XCD_RFInterfaceSW, rFPGA0_XAB_RFInterfaceSW,
- rFPGA0_XA_RFInterfaceOE, rFPGA0_XB_RFInterfaceOE };
- u32 BB_AP_MODE[APK_BB_REG_NUM] = {
- 0x00000020, 0x00a05430, 0x02040000,
- 0x000800e4, 0x00204000 };
- u32 BB_normal_AP_MODE[APK_BB_REG_NUM] = {
- 0x00000020, 0x00a05430, 0x02040000,
- 0x000800e4, 0x22204000 };
-
- u32 AFE_backup[IQK_ADDA_REG_NUM];
- u32 AFE_REG[IQK_ADDA_REG_NUM] = {
- rFPGA0_XCD_SwitchControl, rBlue_Tooth,
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN };
-
- u32 MAC_backup[IQK_MAC_REG_NUM];
- u32 MAC_REG[IQK_MAC_REG_NUM] = {
- REG_TXPAUSE, REG_BCN_CTRL,
- REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
-
- u32 APK_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = {
- {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
- {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
- };
-
- u32 APK_normal_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = {
- {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, /* path settings equal to path b settings */
- {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
- };
-
- u32 APK_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = {
- {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
- {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
- };
-
- u32 APK_normal_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = {
- {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, /* path settings equal to path b settings */
- {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
- };
-
- u32 AFE_on_off[PATH_NUM] = {
- 0x04db25a4, 0x0b1b25a4}; /* path A on path B off / path A off path B on */
-
- u32 APK_offset[PATH_NUM] = {
- rConfig_AntA, rConfig_AntB};
-
- u32 APK_normal_offset[PATH_NUM] = {
- rConfig_Pmpd_AntA, rConfig_Pmpd_AntB};
-
- u32 APK_value[PATH_NUM] = {
- 0x92fc0000, 0x12fc0000};
-
- u32 APK_normal_value[PATH_NUM] = {
- 0x92680000, 0x12680000};
-
- s8 APK_delta_mapping[APK_BB_REG_NUM][13] = {
- {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
- {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
- };
-
- u32 APK_normal_setting_value_1[13] = {
- 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
- 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
- 0x12680000, 0x00880000, 0x00880000
- };
-
- u32 APK_normal_setting_value_2[16] = {
- 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
- 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
- 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
- 0x00050006
- };
-
- u32 APK_result[PATH_NUM][APK_BB_REG_NUM]; /* val_1_1a, val_1_2a, val_2a, val_3a, val_4a */
- s32 BB_offset, delta_V, delta_offset;
-
- if (*(dm_odm->mp_mode) == 1) {
- struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
- pMptCtx->APK_bound[0] = 45;
- pMptCtx->APK_bound[1] = 52;
- }
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("==>phy_APCalibrate_8188E() delta %d\n", delta));
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("AP Calibration for %s\n", (is2t ? "2T2R" : "1T1R")));
- if (!is2t)
- pathbound = 1;
-
- /* 2 FOR NORMAL CHIP SETTINGS */
-
-/* Temporarily do not allow normal driver to do the following settings
- * because these offset and value will cause RF internal PA to be
- * unpredictably disabled by HW, such that RF Tx signal will disappear
- * after disable/enable card many times on 88CU. RF SD and DD have not
- * find the root cause, so we remove these actions temporarily.
- */
- if (*(dm_odm->mp_mode) != 1)
- return;
- /* settings adjust for normal chip */
- for (index = 0; index < PATH_NUM; index++) {
- APK_offset[index] = APK_normal_offset[index];
- APK_value[index] = APK_normal_value[index];
- AFE_on_off[index] = 0x6fdb25a4;
- }
-
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- for (path = 0; path < pathbound; path++) {
- APK_RF_init_value[path][index] = APK_normal_RF_init_value[path][index];
- APK_RF_value_0[path][index] = APK_normal_RF_value_0[path][index];
- }
- BB_AP_MODE[index] = BB_normal_AP_MODE[index];
- }
-
- apkbound = 6;
-
- /* save BB default value */
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index == 0) /* skip */
- continue;
- BB_backup[index] = ODM_GetBBReg(dm_odm, BB_REG[index], bMaskDWord);
- }
-
- /* save MAC default value */
- _PHY_SaveMACRegisters(adapt, MAC_REG, MAC_backup);
-
- /* save AFE default value */
- _PHY_SaveADDARegisters(adapt, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM);
-
- for (path = 0; path < pathbound; path++) {
- if (path == RF_PATH_A) {
- /* path A APK */
- /* load APK setting */
- /* path-A */
- offset = rPdp_AntA;
- for (index = 0; index < 11; index++) {
- ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
- offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
- offset += 0x04;
- }
-
- ODM_SetBBReg(dm_odm, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000);
-
- offset = rConfig_AntA;
- for (; index < 13; index++) {
- ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
- offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
- offset += 0x04;
- }
-
- /* page-B1 */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x40000000);
-
- /* path A */
- offset = rPdp_AntA;
- for (index = 0; index < 16; index++) {
- ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_2[index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
- offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
-
- offset += 0x04;
- }
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- } else if (path == RF_PATH_B) {
- /* path B APK */
- /* load APK setting */
- /* path-B */
- offset = rPdp_AntB;
- for (index = 0; index < 10; index++) {
- ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
- offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
-
- offset += 0x04;
- }
- ODM_SetBBReg(dm_odm, rConfig_Pmpd_AntA, bMaskDWord, 0x12680000);
- PHY_SetBBReg(adapt, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000);
-
- offset = rConfig_AntA;
- index = 11;
- for (; index < 13; index++) { /* offset 0xb68, 0xb6c */
- ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
- offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
- offset += 0x04;
- }
-
- /* page-B1 */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x40000000);
-
- /* path B */
- offset = 0xb60;
- for (index = 0; index < 16; index++) {
- ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_2[index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
- offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
-
- offset += 0x04;
- }
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
- }
-
- /* save RF default value */
- regD[path] = PHY_QueryRFReg(adapt, path, RF_TXBIAS_A, bMaskDWord);
-
- /* Path A AFE all on, path B AFE All off or vise versa */
- for (index = 0; index < IQK_ADDA_REG_NUM; index++)
- ODM_SetBBReg(dm_odm, AFE_REG[index], bMaskDWord, AFE_on_off[path]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0xe70 %x\n",
- ODM_GetBBReg(dm_odm, rRx_Wait_CCA, bMaskDWord)));
-
- /* BB to AP mode */
- if (path == 0) {
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index == 0) /* skip */
- continue;
- else if (index < 5)
- ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_AP_MODE[index]);
- else if (BB_REG[index] == 0x870)
- ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_backup[index]|BIT10|BIT26);
- else
- ODM_SetBBReg(dm_odm, BB_REG[index], BIT10, 0x0);
- }
-
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- } else {
- /* path B */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_B, bMaskDWord, 0x01008c00);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_B, bMaskDWord, 0x01008c00);
- }
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() offset 0x800 %x\n",
- ODM_GetBBReg(dm_odm, 0x800, bMaskDWord)));
-
- /* MAC settings */
- _PHY_MACSettingCalibration(adapt, MAC_REG, MAC_backup);
-
- if (path == RF_PATH_A) {
- /* Path B to standby mode */
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMaskDWord, 0x10000);
- } else {
- /* Path A to standby mode */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMaskDWord, 0x10000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20103);
- }
-
- delta_offset = ((delta+14)/2);
- if (delta_offset < 0)
- delta_offset = 0;
- else if (delta_offset > 12)
- delta_offset = 12;
-
- /* AP calibration */
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index != 1) /* only DO PA11+PAD01001, AP RF setting */
- continue;
-
- tmpreg = APK_RF_init_value[path][index];
- if (!dm_odm->RFCalibrateInfo.bAPKThermalMeterIgnore) {
- BB_offset = (tmpreg & 0xF0000) >> 16;
-
- if (!(tmpreg & BIT15)) /* sign bit 0 */
- BB_offset = -BB_offset;
-
- delta_V = APK_delta_mapping[index][delta_offset];
-
- BB_offset += delta_V;
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("phy_APCalibrate_8188E() APK index %d tmpreg 0x%x delta_V %d delta_offset %d\n",
- index, tmpreg, delta_V, delta_offset));
-
- if (BB_offset < 0) {
- tmpreg = tmpreg & (~BIT15);
- BB_offset = -BB_offset;
- } else {
- tmpreg = tmpreg | BIT15;
- }
- tmpreg = (tmpreg & 0xFFF0FFFF) | (BB_offset << 16);
- }
-
- ODM_SetRFReg(dm_odm, path, RF_IPA_A, bMaskDWord, 0x8992e);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xc %x\n", PHY_QueryRFReg(adapt, path, RF_IPA_A, bMaskDWord)));
- ODM_SetRFReg(dm_odm, path, RF_AC, bMaskDWord, APK_RF_value_0[path][index]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x0 %x\n", PHY_QueryRFReg(adapt, path, RF_AC, bMaskDWord)));
- ODM_SetRFReg(dm_odm, path, RF_TXBIAS_A, bMaskDWord, tmpreg);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xd %x\n", PHY_QueryRFReg(adapt, path, RF_TXBIAS_A, bMaskDWord)));
- /* PA11+PAD01111, one shot */
- i = 0;
- do {
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80000000);
- ODM_SetBBReg(dm_odm, APK_offset[path], bMaskDWord, APK_value[0]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(dm_odm, APK_offset[path], bMaskDWord)));
- ODM_delay_ms(3);
- ODM_SetBBReg(dm_odm, APK_offset[path], bMaskDWord, APK_value[1]);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(dm_odm, APK_offset[path], bMaskDWord)));
-
- ODM_delay_ms(20);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
-
- if (path == RF_PATH_A)
- tmpreg = ODM_GetBBReg(dm_odm, rAPK, 0x03E00000);
- else
- tmpreg = ODM_GetBBReg(dm_odm, rAPK, 0xF8000000);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xbd8[25:21] %x\n", tmpreg));
-
- i++;
- } while (tmpreg > apkbound && i < 4);
-
- APK_result[path][index] = tmpreg;
- }
- }
-
- /* reload MAC default value */
- _PHY_ReloadMACRegisters(adapt, MAC_REG, MAC_backup);
-
- /* reload BB default value */
- for (index = 0; index < APK_BB_REG_NUM; index++) {
- if (index == 0) /* skip */
- continue;
- ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_backup[index]);
- }
-
- /* reload AFE default value */
- reload_adda_reg(adapt, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM);
-
- /* reload RF path default value */
- for (path = 0; path < pathbound; path++) {
- ODM_SetRFReg(dm_odm, path, 0xd, bMaskDWord, regD[path]);
- if (path == RF_PATH_B) {
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20101);
- }
-
- /* note no index == 0 */
- if (APK_result[path][1] > 6)
- APK_result[path][1] = 6;
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("apk path %d result %d 0x%x \t", path, 1, APK_result[path][1]));
- }
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\n"));
-
- for (path = 0; path < pathbound; path++) {
- ODM_SetRFReg(dm_odm, path, 0x3, bMaskDWord,
- ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (APK_result[path][1] << 5) | APK_result[path][1]));
- if (path == RF_PATH_A)
- ODM_SetRFReg(dm_odm, path, 0x4, bMaskDWord,
- ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x00 << 5) | 0x05));
- else
- ODM_SetRFReg(dm_odm, path, 0x4, bMaskDWord,
- ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x02 << 5) | 0x05));
- ODM_SetRFReg(dm_odm, path, RF_BS_PA_APSET_G9_G11, bMaskDWord,
- ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | 0x08));
- }
-
- dm_odm->RFCalibrateInfo.bAPKdone = true;
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("<==phy_APCalibrate_8188E()\n"));
-}
-
-#define DP_BB_REG_NUM 7
-#define DP_RF_REG_NUM 1
-#define DP_RETRY_LIMIT 10
-#define DP_PATH_NUM 2
-#define DP_DPK_NUM 3
-#define DP_DPK_VALUE_NUM 2
-
void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
@@ -1697,7 +1296,7 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
bool is2t;
is2t = (dm_odm->RFType == ODM_2T2R) ? true : false;
- if (ODM_CheckPowerStatus(adapt) == false)
+ if (!ODM_CheckPowerStatus(adapt))
return;
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
@@ -1867,28 +1466,6 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
("LCK:Finish!!!interface %d\n", dm_odm->InterfaceIndex));
}
-void PHY_APCalibrate_8188E(struct adapter *adapt, s8 delta)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- return;
- if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
- return;
-
-#if FOR_BRAZIL_PRETEST != 1
- if (dm_odm->RFCalibrateInfo.bAPKdone)
-#endif
- return;
-
- if (dm_odm->RFType == ODM_2T2R) {
- phy_APCalibrate_8188E(adapt, delta, true);
- } else {
- /* For 88C 1T1R */
- phy_APCalibrate_8188E(adapt, delta, false);
- }
-}
-
static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2t)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
diff --git a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
index e913a22a642..5700dbce5b8 100644
--- a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
@@ -85,7 +85,7 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
- /* Write the value back to sytem register */
+ /* Write the value back to system register */
rtw_write8(padapter, offset, value);
break;
case PWR_CMD_POLLING:
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 8c858775451..8be2ad7217d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -273,7 +273,7 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
struct rtw_ieee80211_hdr *pwlanhdr;
- u16 *fctrl;
+ __le16 *fctrl;
u32 rate_len, pktlen;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -360,7 +360,7 @@ static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
struct rtw_ieee80211_hdr *pwlanhdr;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u16 *fctrl;
+ __le16 *fctrl;
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
@@ -391,7 +391,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
u8 bForcePowerSave)
{
struct rtw_ieee80211_hdr *pwlanhdr;
- u16 *fctrl;
+ __le16 *fctrl;
u32 pktlen;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
struct wlan_network *cur_network = &pmlmepriv->cur_network;
@@ -450,7 +450,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
{
struct rtw_ieee80211_hdr *pwlanhdr;
- u16 *fctrl;
+ __le16 *fctrl;
u8 *mac, *bssid;
u32 pktlen;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
@@ -484,7 +484,7 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
*pLength = pktlen;
}
-/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */
+/* To check if reserved page content is destroyed by beacon because beacon is too large. */
/* 2010.06.23. Added by tynli. */
void CheckFwRsvdPageContent(struct adapter *Adapter)
{
@@ -496,9 +496,9 @@ void CheckFwRsvdPageContent(struct adapter *Adapter)
/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
/* Input: */
/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
-/* so we need to set the packet length to total lengh. */
+/* so we need to set the packet length to total length. */
/* true: At the second time, we should send the first packet (default:beacon) */
-/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
+/* to Hw again and set the length in descriptor to the real beacon length. */
/* 2009.10.15 by tynli. */
static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
{
@@ -671,7 +671,7 @@ _func_enter_;
DBG_88E("%s: 1 Download RSVD success! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
/* */
/* We just can send the reserved page twice during the time that Tx thread is stopped (e.g. pnpsetpower) */
- /* becuase we need to free the Tx BCN Desc which is used by the first reserved page packet. */
+ /* because we need to free the Tx BCN Desc which is used by the first reserved page packet. */
/* At run time, we cannot get the Tx Desc until it is released in TxHandleInterrupt() so we will return */
/* the beacon TCB in the following code. 2011.11.23. by tynli. */
/* */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 292ba62d722..52b3fba0fae 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -19,6 +19,7 @@
******************************************************************************/
#define _HAL_INIT_C_
+#include <linux/firmware.h>
#include <drv_types.h>
#include <rtw_efuse.h>
@@ -588,13 +589,15 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
u8 writeFW_retry = 0;
u32 fwdl_start_time;
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- u8 *FwImage;
- u32 FwImageLen;
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct device *device = dvobj_to_dev(dvobj);
struct rt_firmware *pFirmware = NULL;
+ const struct firmware *fw;
struct rt_firmware_hdr *pFwHdr = NULL;
u8 *pFirmwareBuf;
- u32 FirmwareLen;
+ u32 FirmwareLen;
+ char fw_name[] = "rtlwifi/rtl8188eufw.bin";
+ static int log_version;
RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
pFirmware = (struct rt_firmware *)rtw_zmalloc(sizeof(struct rt_firmware));
@@ -603,27 +606,32 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
goto Exit;
}
- FwImage = (u8 *)Rtl8188E_FwImageArray;
- FwImageLen = Rtl8188E_FWImgArrayLength;
-
- pFirmware->eFWSource = FW_SOURCE_HEADER_FILE;
-
- switch (pFirmware->eFWSource) {
- case FW_SOURCE_IMG_FILE:
- break;
- case FW_SOURCE_HEADER_FILE:
- if (FwImageLen > FW_8188E_SIZE) {
- rtStatus = _FAIL;
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE));
- goto Exit;
- }
+ if (request_firmware(&fw, fw_name, device)) {
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+ if (!fw) {
+ pr_err("Firmware %s not available\n", fw_name);
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+ if (fw->size > FW_8188E_SIZE) {
+ rtStatus = _FAIL;
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE));
+ goto Exit;
+ }
- pFirmware->szFwBuffer = FwImage;
- pFirmware->ulFwLength = FwImageLen;
- break;
+ pFirmware->szFwBuffer = kzalloc(FW_8188E_SIZE, GFP_KERNEL);
+ if (!pFirmware->szFwBuffer) {
+ rtStatus = _FAIL;
+ goto Exit;
}
+ memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
+ pFirmware->ulFwLength = fw->size;
pFirmwareBuf = pFirmware->szFwBuffer;
FirmwareLen = pFirmware->ulFwLength;
+ release_firmware(fw);
+
DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, FirmwareLen);
/* To Check Fw header. Added by tynli. 2009.12.04. */
@@ -633,8 +641,10 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
pHalData->FirmwareSubVersion = pFwHdr->Subversion;
pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
- DBG_88E("%s: fw_ver =%d fw_subver =%d sig = 0x%x\n",
- __func__, pHalData->FirmwareVersion, pHalData->FirmwareSubVersion, pHalData->FirmwareSignature);
+ if (!log_version++)
+ pr_info("%sFirmware Version %d, SubVersion %d, Signature 0x%x\n",
+ DRIVER_PREFIX, pHalData->FirmwareVersion,
+ pHalData->FirmwareSubVersion, pHalData->FirmwareSignature);
if (IS_FW_HEADER_EXIST(pFwHdr)) {
/* Shift 32 bytes for FW header */
@@ -677,7 +687,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
goto Exit;
}
RT_TRACE(_module_hal_init_c_, _drv_info_, ("Firmware is ready to run!\n"));
-
+ kfree(pFirmware->szFwBuffer);
Exit:
kfree(pFirmware);
@@ -1479,7 +1489,6 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
{
- bool bRet = false;
u16 efuse_addr = *pAddr;
u8 badworden = 0;
u32 PgWriteSuccess = 0;
@@ -1497,7 +1506,6 @@ static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u
else
return true;
}
- return bRet;
}
static bool
@@ -1653,7 +1661,7 @@ hal_EfusePgCheckAvailableAddr(
{
u16 efuse_max_available_len = 0;
- /* Change to check TYPE_EFUSE_MAP_LEN , beacuse 8188E raw 256, logic map over 256. */
+ /* Change to check TYPE_EFUSE_MAP_LEN , because 8188E raw 256, logic map over 256. */
EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
if (Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
@@ -2100,7 +2108,7 @@ static u8 Hal_GetChnlGroup88E(u8 chnl, u8 *pGroup)
if (chnl <= 14) {
bIn24G = true;
- if (chnl < 3) /* Chanel 1-2 */
+ if (chnl < 3) /* Channel 1-2 */
*pGroup = 0;
else if (chnl < 6) /* Channel 3-5 */
*pGroup = 1;
@@ -2182,7 +2190,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
pHalData->bTXPowerDataReadFromEEPORM = true;
for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
- for (ch = 0; ch <= CHANNEL_MAX_NUMBER; ch++) {
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
bIn24G = Hal_GetChnlGroup88E(ch, &group);
if (bIn24G) {
pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
index ff468a68e32..68bb96d83c8 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
@@ -559,7 +559,7 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
/* */
/* 1. Read PHY_REG.TXT BB INIT!! */
- /* We will seperate as 88C / 92C according to chip version */
+ /* We will separate as 88C / 92C according to chip version */
/* */
if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
rtStatus = _FAIL;
@@ -685,7 +685,7 @@ static u8 phy_DbmToTxPwrIdx(struct adapter *Adapter, enum wireless_mode Wireless
/* */
/* Tested by MP, we found that CCK Index 0 equals to 8dbm, OFDM legacy equals to */
- /* 3dbm, and OFDM HT equals to 0dbm repectively. */
+ /* 3dbm, and OFDM HT equals to 0dbm respectively. */
/* Note: */
/* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
/* By Bruce, 2008-01-29. */
@@ -1006,12 +1006,12 @@ _PHY_SetBWMode92C(
switch (pHalData->CurrentChannelBW) {
case HT_CHANNEL_WIDTH_20:
regBwOpMode |= BW_OPMODE_20MHZ;
- /* 2007/02/07 Mark by Emily becasue we have not verify whether this register works */
+ /* 2007/02/07 Mark by Emily because we have not verify whether this register works */
rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
break;
case HT_CHANNEL_WIDTH_40:
regBwOpMode &= ~BW_OPMODE_20MHZ;
- /* 2007/02/07 Mark by Emily becasue we have not verify whether this register works */
+ /* 2007/02/07 Mark by Emily because we have not verify whether this register works */
rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
regRRSR_RSC = (regRRSR_RSC&0x90) | (pHalData->nCur40MhzPrimeSC<<5);
rtw_write8(Adapter, REG_RRSR+2, regRRSR_RSC);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
index bfdf9b3ce77..299e03e3daf 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
@@ -181,7 +181,7 @@ i * Currently, we cannot fully disable driver dynamic
* tx power mechanism because it is referenced by BT
* coexist mechanism.
* In the future, two mechanism shall be separated from
- * each other and maintained independantly. */
+ * each other and maintained independently. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
TxAGC[RF_PATH_A] = 0x10101010;
TxAGC[RF_PATH_B] = 0x10101010;
@@ -216,11 +216,11 @@ i * Currently, we cannot fully disable driver dynamic
ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 1, &direction, &pwrtrac_value);
if (direction == 1) {
- /* Increase TX pwoer */
+ /* Increase TX power */
TxAGC[0] += pwrtrac_value;
TxAGC[1] += pwrtrac_value;
} else if (direction == 2) {
- /* Decrease TX pwoer */
+ /* Decrease TX power */
TxAGC[0] -= pwrtrac_value;
TxAGC[1] -= pwrtrac_value;
}
@@ -292,7 +292,7 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
if (pHalData->pwrGroupCnt == 1)
chnlGroup = 0;
if (pHalData->pwrGroupCnt >= pHalData->PGMaxGroup) {
- if (Channel < 3) /* Chanel 1-2 */
+ if (Channel < 3) /* Channel 1-2 */
chnlGroup = 0;
else if (Channel < 6) /* Channel 3-5 */
chnlGroup = 1;
@@ -349,7 +349,7 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
}
/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+/* In the future, two mechanism shall be separated from each other and maintained independently. Thanks for Lanhsin's reminder. */
/* 92d do not need this */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
writeVal = 0x14141414;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index bd8a9ae5d07..8f43f4966f2 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -332,7 +332,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
/* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
/* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
- /* mgnt frame should be controled by Hw because Fw will also send null data */
+ /* mgnt frame should be controlled by Hw because Fw will also send null data */
/* which we cannot control when Fw LPS enable. */
/* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
/* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 5e656ce4540..cca973211b2 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -464,7 +464,7 @@ static void _InitRetryFunction(struct adapter *Adapter)
/*-----------------------------------------------------------------------------
* Function: usb_AggSettingTxUpdate()
*
- * Overview: Seperate TX/RX parameters update independent for TP detection and
+ * Overview: Separate TX/RX parameters update independent for TP detection and
* dynamic TX/RX aggreagtion parameters update.
*
* Input: struct adapter *
@@ -473,7 +473,7 @@ static void _InitRetryFunction(struct adapter *Adapter)
*
* Revised History:
* When Who Remark
- * 12/10/2010 MHC Seperate to smaller function.
+ * 12/10/2010 MHC Separate to smaller function.
*
*---------------------------------------------------------------------------*/
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
@@ -496,7 +496,7 @@ static void usb_AggSettingTxUpdate(struct adapter *Adapter)
/*-----------------------------------------------------------------------------
* Function: usb_AggSettingRxUpdate()
*
- * Overview: Seperate TX/RX parameters update independent for TP detection and
+ * Overview: Separate TX/RX parameters update independent for TP detection and
* dynamic TX/RX aggreagtion parameters update.
*
* Input: struct adapter *
@@ -505,7 +505,7 @@ static void usb_AggSettingTxUpdate(struct adapter *Adapter)
*
* Revised History:
* When Who Remark
- * 12/10/2010 MHC Seperate to smaller function.
+ * 12/10/2010 MHC Separate to smaller function.
*
*---------------------------------------------------------------------------*/
static void
@@ -847,7 +847,7 @@ _func_enter_;
/* */
/* Init CR MACTXEN, MACRXEN after setting RxFF boundary REG_TRXFF_BNDY to patch */
- /* Hw bug which Hw initials RxFF boundry size to a value which is larger than the real Rx buffer size in 88E. */
+ /* Hw bug which Hw initials RxFF boundary size to a value which is larger than the real Rx buffer size in 88E. */
/* */
/* Enable MACTXEN/MACRXEN block */
value16 = rtw_read16(Adapter, REG_CR);
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
index bc564169b2f..787763ef74c 100644
--- a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -547,6 +547,8 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
DBG_88E("###=> usb_read_port_complete => urb status(%d)\n", purb->status);
+ skb_put(precvbuf->pskb, purb->actual_length);
+ precvbuf->pskb = NULL;
if (rtw_inc_and_chk_continual_urb_error(adapter_to_dvobj(adapt)))
adapt->bSurpriseRemoved = true;
@@ -605,68 +607,68 @@ _func_enter_;
return _FAIL;
}
+ if (!precvbuf) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:precvbuf==NULL\n"));
+ return _FAIL;
+ }
+
if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
if (NULL != precvbuf->pskb)
precvbuf->reuse = true;
}
- if (precvbuf != NULL) {
- rtl8188eu_init_recvbuf(adapter, precvbuf);
-
- /* re-assign for linux based on skb */
- if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
- precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (precvbuf->pskb == NULL) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
- DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
- return _FAIL;
- }
-
- tmpaddr = (size_t)precvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
-
- precvbuf->phead = precvbuf->pskb->head;
- precvbuf->pdata = precvbuf->pskb->data;
- precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
- precvbuf->pend = skb_end_pointer(precvbuf->pskb);
- precvbuf->pbuf = precvbuf->pskb->data;
- } else { /* reuse skb */
- precvbuf->phead = precvbuf->pskb->head;
- precvbuf->pdata = precvbuf->pskb->data;
- precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
- precvbuf->pend = skb_end_pointer(precvbuf->pskb);
- precvbuf->pbuf = precvbuf->pskb->data;
+ rtl8188eu_init_recvbuf(adapter, precvbuf);
- precvbuf->reuse = false;
+ /* re-assign for linux based on skb */
+ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ if (precvbuf->pskb == NULL) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
+ DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
+ return _FAIL;
}
- precvpriv->rx_pending_cnt++;
+ tmpaddr = (size_t)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+ } else { /* reuse skb */
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+
+ precvbuf->reuse = false;
+ }
- purb = precvbuf->purb;
+ precvpriv->rx_pending_cnt++;
- /* translate DMA FIFO addr to pipehandle */
- pipe = ffaddr2pipehdl(pdvobj, addr);
+ purb = precvbuf->purb;
- usb_fill_bulk_urb(purb, pusbd, pipe,
- precvbuf->pbuf,
- MAX_RECVBUF_SZ,
- usb_read_port_complete,
- precvbuf);/* context is precvbuf */
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl(pdvobj, addr);
- err = usb_submit_urb(purb, GFP_ATOMIC);
- if ((err) && (err != (-EPERM))) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x",
- err, purb->status));
- DBG_88E("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n",
- err, purb->status);
- ret = _FAIL;
- }
- } else {
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ precvbuf->pbuf,
+ MAX_RECVBUF_SZ,
+ usb_read_port_complete,
+ precvbuf);/* context is precvbuf */
+
+ err = usb_submit_urb(purb, GFP_ATOMIC);
+ if ((err) && (err != (-EPERM))) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("usb_read_port:precvbuf ==NULL\n"));
+ ("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x",
+ err, purb->status));
+ DBG_88E("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n",
+ err, purb->status);
ret = _FAIL;
}
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h b/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h
deleted file mode 100644
index 949c33b9ed6..00000000000
--- a/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
-******************************************************************************/
-#ifndef __INC_HAL8188E_FW_IMG_H
-#define __INC_HAL8188E_FW_IMG_H
-
-/* V10(1641) */
-#define Rtl8188EFWImgArrayLength 13904
-
-extern const u8 Rtl8188EFwImgArray[Rtl8188EFWImgArrayLength];
-
-#endif /* __INC_HAL8188E_FW_IMG_H */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index c4769e20a5c..25cae8147e7 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -75,7 +75,7 @@ enum rf_radio_path {
#define MAX_PG_GROUP 13
-#define RF_PATH_MAX 2
+#define RF_PATH_MAX 3
#define MAX_RF_PATH RF_PATH_MAX
#define MAX_TX_COUNT 4 /* path numbers */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 0e06d29b2d2..9f2969bf835 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -26,7 +26,7 @@
/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
/* 3. RF register 0x00-2E */
/* 4. Bit Mask for BB/RF register */
-/* 5. Other defintion for BB/RF R/W */
+/* 5. Other definition for BB/RF R/W */
/* */
diff --git a/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h b/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
index fa583f24832..287e9f9eae4 100644
--- a/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
+++ b/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
@@ -45,8 +45,6 @@ void PHY_IQCalibrate_8188E(struct adapter *Adapter, bool ReCovery);
void PHY_LCCalibrate_8188E(struct adapter *pAdapter);
/* AP calibrate */
-void PHY_APCalibrate_8188E(struct adapter *pAdapter, s8 delta);
-
void PHY_DigitalPredistortion_8188E(struct adapter *pAdapter);
void _PHY_SaveADDARegisters(struct adapter *pAdapter, u32 *ADDAReg,
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index cd37ea4df4c..c4d38d14abf 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -306,8 +306,8 @@ struct ieee_ibss_seq {
};
struct rtw_ieee80211_hdr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
@@ -316,8 +316,8 @@ struct rtw_ieee80211_hdr {
} __packed;
struct rtw_ieee80211_hdr_3addr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
@@ -325,8 +325,8 @@ struct rtw_ieee80211_hdr_3addr {
} __packed;
struct rtw_ieee80211_hdr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
@@ -336,8 +336,8 @@ struct rtw_ieee80211_hdr_qos {
} __packed;
struct rtw_ieee80211_hdr_3addr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 4787bacdcad..eaa4bc1b225 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -283,8 +283,6 @@ struct odm_rate_adapt {
/* Declare for common info */
-#define MAX_PATH_NUM_92CS 2
-
struct odm_phy_status_info {
u8 RxPWDBAll;
u8 SignalQuality; /* in 0-100 index. */
@@ -950,7 +948,7 @@ struct odm_dm_struct {
struct timer_list FastAntTrainingTimer;
}; /* DM_Dynamic_Mechanism_Structure */
-#define ODM_RF_PATH_MAX 2
+#define ODM_RF_PATH_MAX 3
enum ODM_RF_RADIO_PATH {
ODM_RF_PATH_A = 0, /* Radio Path A */
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index 63779f5b2a3..df5272221ba 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -69,7 +69,7 @@ struct phy_rx_agc_info {
};
struct phy_status_rpt {
- struct phy_rx_agc_info path_agc[2];
+ struct phy_rx_agc_info path_agc[3];
u8 ch_corr[2];
u8 cck_sig_qual_ofdm_pwdb_all;
u8 cck_agc_rpt_ofdm_cfosho_a;
@@ -79,7 +79,7 @@ struct phy_status_rpt {
u8 path_cfotail[2];
u8 pcts_mask[2];
s8 stream_rxevm[2];
- u8 path_rxsnr[2];
+ u8 path_rxsnr[3];
u8 noise_power_db_lsb;
u8 rsvd_2[3];
u8 stream_csi[2];
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index a9ba6df26b9..622f4c1418b 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -27,7 +27,7 @@
/* Define the debug levels */
/* */
/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
-/* They can help SW engineer to develope or trace states changed */
+/* They can help SW engineer to develop or trace states changed */
/* and also help HW enginner to trace every operation to and from HW, */
/* e.g IO, Tx, Rx. */
/* */
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 520cbbaac35..d1d95f4b87a 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -31,11 +31,6 @@
#include <drv_types.h>
#include <hal_intf.h>
-/* 2 Hardware Parameter Files */
-
-#include "Hal8188EFWImg_CE.h"
-
-
/* 2 OutSrc Header Files */
#include "odm.h"
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 44f24fa31a3..36523edf6a7 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -430,11 +430,6 @@ int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i);
int ATOMIC_INC_RETURN(ATOMIC_T *v);
int ATOMIC_DEC_RETURN(ATOMIC_T *v);
-/* File operation APIs, just for linux now */
-int rtw_is_file_readable(char *path);
-int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz);
-int rtw_store_to_file(char *path, u8 __user *buf, u32 sz);
-
struct rtw_netdev_priv_indicator {
void *priv;
u32 sizeof_priv;
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 819285b9a78..8cafd7adfdc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -745,7 +745,7 @@ struct TDLSoption_param
Result:
0x00: success
-0x01: sucess, and check Response.
+0x01: success, and check Response.
0x02: cmd ignored due to duplicated sequcne number
0x03: cmd dropped due to invalid cmd code
0x04: reserved.
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index 2e618043d35..d0da4fd40d1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -163,14 +163,14 @@ enum LED_STRATEGY_871x {
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
struct led_priv{
- /* add for led controll */
+ /* add for led control */
struct LED_871x SwLed0;
struct LED_871x SwLed1;
enum LED_STRATEGY_871x LedStrategy;
u8 bRegUseLed;
void (*LedControlHandler)(struct adapter *padapter,
enum LED_CTL_MODE LedAction);
- /* add for led controll */
+ /* add for led control */
};
#define rtw_led_control(adapt, action) \
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 22538e61695..4a7143e0eed 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -53,11 +53,11 @@
#define WIFI_SITE_MONITOR 0x00000800 /* to indicate the station is under site surveying */
#define WIFI_MP_STATE 0x00010000
-#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continous tx background */
-#define WIFI_MP_CTX_ST 0x00040000 /* in continous tx with single-tone */
-#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continous tx background due to out of skb */
-#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continous tx */
-#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continous tx with carrier suppression */
+#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continuous tx background */
+#define WIFI_MP_CTX_ST 0x00040000 /* in continuous tx with single-tone */
+#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continuous tx background due to out of skb */
+#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continuous tx */
+#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continuous tx with carrier suppression */
#define WIFI_MP_LPBK_STATE 0x00400000
#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
@@ -239,7 +239,7 @@ struct wifidirect_info {
u8 profileindex; /* Used to point to the index of profileinfo array */
u8 peer_operating_ch;
u8 find_phase_state_exchange_cnt;
- /* The device password ID for group negotation */
+ /* The device password ID for group negotiation */
u16 device_password_id_for_nego;
u8 negotiation_dialog_token;
/* SSID information for group negotitation */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 853ab80a2b8..b1bfa2e30fd 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -107,7 +107,7 @@ extern unsigned char WMM_PARA_OUI[];
/* Note: */
/* We just add new channel plan when the new channel plan is different
* from any of the following channel plan. */
-/* If you just wnat to customize the acitions(scan period or join actions)
+/* If you just want to customize the actions(scan period or join actions)
* about one of the channel plan, */
/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
enum RT_CHANNEL_DOMAIN {
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 3ad22076de3..30fd17f23bf 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -56,7 +56,7 @@
/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
/* 3. RF register 0x00-2E */
/* 4. Bit Mask for BB/RF register */
-/* 5. Other defintion for BB/RF R/W */
+/* 5. Other definition for BB/RF R/W */
/* */
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index bae8885c57f..be9c30c5741 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -83,7 +83,7 @@ struct signal_stat {
u32 total_num; /* num of valid elements */
u32 total_val; /* sum of valid elements */
};
-#define MAX_PATH_NUM_92CS 2
+#define MAX_PATH_NUM_92CS 3
struct phy_info {
u8 RxPWDBAll;
u8 SignalQuality; /* in 0-100 index. */
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 089ecee6c1f..2df88370de5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -119,7 +119,7 @@ enum ht_channel_width {
};
/* */
-/* Represent Extention Channel Offset in HT Capabilities */
+/* Represent Extension Channel Offset in HT Capabilities */
/* This is available only in 40Mhz mode. */
/* */
enum ht_extchnl_offset {
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index 3ed2a39741a..3e909db1d41 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -338,7 +338,7 @@ struct sta_priv {
*/
struct sta_info *sta_aid[NUM_STA];
- u16 sta_dz_bitmap;/* only support 15 stations, staion aid bitmap
+ u16 sta_dz_bitmap;/* only support 15 stations, station aid bitmap
* for sleeping sta. */
u16 tim_bitmap; /* only support 15 stations, aid=0~15 mapping
* bit0~bit15 */
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index a615659f947..84e51997419 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -984,7 +984,7 @@ enum ht_cap_ampdu_factor {
#define P2P_PROVISION_TIMEOUT 5000
/* 3 seconds timeout for sending the prov disc request concurrent mode */
#define P2P_CONCURRENT_PROVISION_TIME 3000
-/* 5 seconds timeout for receiving the group negotation response */
+/* 5 seconds timeout for receiving the group negotiation response */
#define P2P_GO_NEGO_TIMEOUT 5000
/* 3 seconds timeout for sending the negotiation request under concurrent mode */
#define P2P_CONCURRENT_GO_NEGO_TIME 3000
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 95953ebc027..ae545877023 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -938,7 +938,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
if (pPMK->cmd == IW_PMKSA_ADD) {
DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
- if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN) == true)
+ if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
return ret;
else
ret = true;
@@ -1039,7 +1039,7 @@ static int rtw_wx_get_range(struct net_device *dev,
range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
/* TODO: Find real 'good' to 'bad' threshol value for RSSI */
- range->avg_qual.level = 20 + -98;
+ range->avg_qual.level = 178; /* -78 dBm */
range->avg_qual.noise = 0;
range->avg_qual.updated = 7; /* Updated all three */
@@ -1074,7 +1074,7 @@ static int rtw_wx_get_range(struct net_device *dev,
/* The following code will proivde the security capability to network manager. */
/* If the driver doesn't provide this capability to network manager, */
-/* the WPA/WPA2 routers can't be choosen in the network manager. */
+/* the WPA/WPA2 routers can't be chosen in the network manager. */
/*
#define IW_SCAN_CAPA_NONE 0x00
@@ -1373,7 +1373,7 @@ _func_enter_;
}
}
- /* it has still some scan paramater to parse, we only do this now... */
+ /* it has still some scan parameter to parse, we only do this now... */
_status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT);
} else {
_status = rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
@@ -2626,7 +2626,7 @@ static int rtw_get_ap_info(struct net_device *dev,
return -EINVAL;
}
- if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN) == true) {
+ if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN)) {
/* BSSID match, then check if supporting wpa/wpa2 */
DBG_88E("BSSID:%pM\n", (bssid));
@@ -2961,7 +2961,7 @@ static int rtw_p2p_get_status(struct net_device *dev,
/* Commented by Albert 20110520 */
/* This function will return the config method description */
-/* This config method description will show us which config method the remote P2P device is intented to use */
+/* This config method description will show us which config method the remote P2P device is intended to use */
/* by sending the provisioning discovery request frame. */
static int rtw_p2p_get_req_cm(struct net_device *dev,
@@ -3413,7 +3413,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
/* +8 is for the str "InvProc =", we have to clear it at wrqu->data.pointer */
/* Commented by Ouden 20121226 */
- /* The application wants to know P2P initation procedure is support or not. */
+ /* The application wants to know P2P initiation procedure is supported or not. */
/* Format: iwpriv wlanx p2p_get2 InvProc = 00:E0:4C:00:00:05 */
DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
@@ -4040,7 +4040,7 @@ static int rtw_rereg_nd_name(struct net_device *dev,
if (0 != ret)
goto exit;
- if (!memcmp(rereg_priv->old_ifname, "disable%d", 9) == true) {
+ if (!memcmp(rereg_priv->old_ifname, "disable%d", 9)) {
padapter->ledpriv.bRegUseLed = rereg_priv->old_bRegUseLed;
rtw_hal_sw_led_init(padapter);
rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode);
@@ -4049,7 +4049,7 @@ static int rtw_rereg_nd_name(struct net_device *dev,
strncpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
- if (!memcmp(new_ifname, "disable%d", 9) == true) {
+ if (!memcmp(new_ifname, "disable%d", 9)) {
DBG_88E("%s disable\n", __func__);
/* free network queue for Android's timming issue */
rtw_free_network_queue(padapter, true);
@@ -4884,7 +4884,6 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
case _TKIP_:
case _TKIP_WTMIC_:
case _AES_:
- keylen = 16;
default:
keylen = 16;
}
@@ -6146,7 +6145,7 @@ static int rtw_mp_efuse_set(struct net_device *dev,
for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
- /* Change to check TYPE_EFUSE_MAP_LEN, beacuse 8188E raw 256, logic map over 256. */
+ /* Change to check TYPE_EFUSE_MAP_LEN, because 8188E raw 256, logic map over 256. */
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
if ((addr+cnts) > max_available_size) {
DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
@@ -6221,7 +6220,7 @@ static int rtw_mp_efuse_set(struct net_device *dev,
for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
- /* Change to check TYPE_EFUSE_MAP_LEN, beacuse 8188E raw 256, logic map over 256. */
+ /* Change to check TYPE_EFUSE_MAP_LEN, because 8188E raw 256, logic map over 256. */
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
if ((addr+cnts) > max_available_size) {
DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 63bc913eba6..17659bb04be 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -85,7 +85,7 @@ static int rtw_uapsd_acvi_en;
static int rtw_uapsd_acvo_en;
int rtw_ht_enable = 1;
-int rtw_cbw40_enable = 3; /* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */
+int rtw_cbw40_enable = 3; /* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
static int rtw_rx_stbc = 1;/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
@@ -707,6 +707,10 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
return 0;
}
+static const struct device_type wlan_type = {
+ .name = "wlan",
+};
+
struct net_device *rtw_init_netdev(struct adapter *old_padapter)
{
struct adapter *padapter;
@@ -722,6 +726,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
if (!pnetdev)
return NULL;
+ pnetdev->dev.type = &wlan_type;
padapter = rtw_netdev_priv(pnetdev);
padapter->pnetdev = pnetdev;
DBG_88E("register rtw_netdev_ops to netdev_ops\n");
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 4e0bfb7e153..a1ae72772c5 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -356,214 +356,6 @@ inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
return atomic_dec_return(v);
}
-/* Open a file with the specific @param path, @param flag, @param mode
- * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
- * @param path the path of the file to open
- * @param flag file operation flags, please refer to linux document
- * @param mode please refer to linux document
- * @return Linux specific error code
- */
-static int openfile(struct file **fpp, char *path, int flag, int mode)
-{
- struct file *fp;
-
- fp = filp_open(path, flag, mode);
- if (IS_ERR(fp)) {
- *fpp = NULL;
- return PTR_ERR(fp);
- } else {
- *fpp = fp;
- return 0;
- }
-}
-
-/* Close the file with the specific @param fp
- * @param fp the pointer of struct file to close
- * @return always 0
- */
-static int closefile(struct file *fp)
-{
- filp_close(fp, NULL);
- return 0;
-}
-
-static int readfile(struct file *fp, char __user *buf, int len)
-{
- int rlen = 0, sum = 0;
-
- if (!fp->f_op || !fp->f_op->read)
- return -EPERM;
-
- while (sum < len) {
- rlen = fp->f_op->read(fp, buf+sum, len-sum, &fp->f_pos);
- if (rlen > 0)
- sum += rlen;
- else if (0 != rlen)
- return rlen;
- else
- break;
- }
- return sum;
-}
-
-static int writefile(struct file *fp, char __user *buf, int len)
-{
- int wlen = 0, sum = 0;
-
- if (!fp->f_op || !fp->f_op->write)
- return -EPERM;
-
- while (sum < len) {
- wlen = fp->f_op->write(fp, buf+sum, len-sum, &fp->f_pos);
- if (wlen > 0)
- sum += wlen;
- else if (0 != wlen)
- return wlen;
- else
- break;
- }
- return sum;
-}
-
-/* Test if the specifi @param path is a file and readable
- * @param path the path of the file to test
- * @return Linux specific error code
- */
-static int isfilereadable(char *path)
-{
- struct file *fp;
- int ret = 0;
- mm_segment_t oldfs;
- char __user buf;
-
- fp = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- ret = PTR_ERR(fp);
- } else {
- oldfs = get_fs(); set_fs(get_ds());
-
- if (1 != readfile(fp, &buf, 1))
- ret = PTR_ERR(fp);
-
- set_fs(oldfs);
- filp_close(fp, NULL);
- }
- return ret;
-}
-
-/* Open the file with @param path and retrive the file content into
- * memory starting from @param buf for @param sz at most
- * @param path the path of the file to open and read
- * @param buf the starting address of the buffer to store file content
- * @param sz how many bytes to read at most
- * @return the byte we've read, or Linux specific error code
- */
-static int retrievefromfile(char *path, u8 __user *buf, u32 sz)
-{
- int ret = -1;
- mm_segment_t oldfs;
- struct file *fp;
-
- if (path && buf) {
- ret = openfile(&fp, path, O_RDONLY, 0);
- if (0 == ret) {
- DBG_88E("%s openfile path:%s fp =%p\n", __func__,
- path, fp);
-
- oldfs = get_fs(); set_fs(get_ds());
- ret = readfile(fp, buf, sz);
- set_fs(oldfs);
- closefile(fp);
-
- DBG_88E("%s readfile, ret:%d\n", __func__, ret);
-
- } else {
- DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__,
- path, ret);
- }
- } else {
- DBG_88E("%s NULL pointer\n", __func__);
- ret = -EINVAL;
- }
- return ret;
-}
-
-/*
-* Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
-* @param path the path of the file to open and write
-* @param buf the starting address of the data to write into file
-* @param sz how many bytes to write at most
-* @return the byte we've written, or Linux specific error code
-*/
-static int storetofile(char *path, u8 __user *buf, u32 sz)
-{
- int ret = 0;
- mm_segment_t oldfs;
- struct file *fp;
-
- if (path && buf) {
- ret = openfile(&fp, path, O_CREAT|O_WRONLY, 0666);
- if (0 == ret) {
- DBG_88E("%s openfile path:%s fp =%p\n", __func__, path, fp);
-
- oldfs = get_fs(); set_fs(get_ds());
- ret = writefile(fp, buf, sz);
- set_fs(oldfs);
- closefile(fp);
-
- DBG_88E("%s writefile, ret:%d\n", __func__, ret);
-
- } else {
- DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__, path, ret);
- }
- } else {
- DBG_88E("%s NULL pointer\n", __func__);
- ret = -EINVAL;
- }
- return ret;
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return true or false
-*/
-int rtw_is_file_readable(char *path)
-{
- if (isfilereadable(path) == 0)
- return true;
- else
- return false;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read
-*/
-int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz)
-{
- int ret = retrievefromfile(path, buf, sz);
-
- return ret >= 0 ? ret : 0;
-}
-
-/*
- * Open the file with @param path and wirte @param sz byte of data
- * starting from @param buf into the file
- * @param path the path of the file to open and write
- * @param buf the starting address of the data to write into file
- * @param sz how many bytes to write at most
- * @return the byte we've written
- */
-int rtw_store_to_file(char *path, u8 __user *buf, u32 sz)
-{
- int ret = storetofile(path, buf, sz);
- return ret >= 0 ? ret : 0;
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
void *old_priv)
{
@@ -627,13 +419,14 @@ RETURN:
int rtw_change_ifname(struct adapter *padapter, const char *ifname)
{
struct net_device *pnetdev;
- struct net_device *cur_pnetdev = padapter->pnetdev;
+ struct net_device *cur_pnetdev;
struct rereg_nd_name_data *rereg_priv;
int ret;
if (!padapter)
goto error;
+ cur_pnetdev = padapter->pnetdev;
rereg_priv = &padapter->rereg_nd_name_priv;
/* free the old_pnetdev */
@@ -794,7 +587,7 @@ void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
}
/**
- * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
+ * rtw_cbuf_alloc - allocate a rtw_cbuf with given size and do initialization
* @size: size of pointer
*
* Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index e2f4e7d7717..3852ff43810 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -77,8 +77,7 @@ int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
int rtw_os_recvbuf_resource_free(struct adapter *padapter,
struct recv_buf *precvbuf)
{
- if (precvbuf->purb)
- usb_free_urb(precvbuf->purb);
+ usb_free_urb(precvbuf->purb);
return _SUCCESS;
}
@@ -224,8 +223,7 @@ _func_exit_;
_recv_indicatepkt_drop:
/* enqueue back to free_recv_queue */
- if (precv_frame)
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
_func_exit_;
return _FAIL;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 9ca3180ebaa..7d14779310d 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -737,7 +737,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
status = _SUCCESS;
free_hal_data:
- if (status != _SUCCESS && padapter->HalData)
+ if (status != _SUCCESS)
kfree(padapter->HalData);
handle_dualmac:
if (status != _SUCCESS)
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index f7b14f8b7b8..1260f10944e 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -151,7 +151,7 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
MaxChnlNum = pTriple->FirstChnl + j;
}
- pTriple = (struct chnl_txpow_triple *)((u8*)pTriple + 3);
+ pTriple = (struct chnl_txpow_triple *)((u8 *)pTriple + 3);
}
UPDATE_CIE_SRC(dev, pTaddr);
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 71f4549a378..fb7683fa5ff 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -38,7 +38,7 @@ enum dot11d_state {
/**
* struct rt_dot11d_info * @CountryIeLen: value greater than 0 if @CountryIeBuf contains
* valid country information element.
- * @chanell_map: holds channel values
+ * @channel_map: holds channel values
* 0 - invalid,
* 1 - valid (active scan),
* 2 - valid (passive scan)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 0da56c80f08..5af1c19142d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -140,7 +140,7 @@ bool phy_RF8256_Config_ParaFile(struct net_device *dev)
rtStatus = rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF,
(enum rf90_radio_path)eRFPath);
- if (rtStatus != true) {
+ if (!rtStatus) {
RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check "
"Radio[%d] Fail!!\n", eRFPath);
goto phy_RF8256_Config_ParaFile_Fail;
@@ -245,7 +245,7 @@ void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel)
struct r8192_priv *priv = rtllib_priv(dev);
TxAGC = powerlevel;
- if (priv->bDynamicTxLowPower == true) {
+ if (priv->bDynamicTxLowPower) {
if (priv->CustomerID == RT_CID_819x_Netcore)
TxAGC = 0x22;
else
@@ -294,7 +294,7 @@ void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel)
priv->Pwr_Track = writeVal_tmp;
}
- if (priv->bDynamicTxHighPower == true)
+ if (priv->bDynamicTxHighPower)
writeVal = 0x03030303;
else
writeVal = (byte3 << 24) | (byte2 << 16) |
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 74fbd70d583..2cace9a4525 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -720,7 +720,7 @@ start:
}
priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
- if (priv->RegRfOff == true)
+ if (priv->RegRfOff)
priv->rtllib->eRFPowerState = eRfOff;
ulRegRead = read_nic_dword(dev, CPU_GEN);
@@ -745,7 +745,7 @@ start:
}
RT_TRACE(COMP_INIT, "BB Config Start!\n");
rtStatus = rtl8192_BBConfig(dev);
- if (rtStatus != true) {
+ if (!rtStatus) {
RT_TRACE(COMP_ERR, "BB Config failed\n");
return rtStatus;
}
@@ -856,7 +856,7 @@ start:
if (priv->ResetProgress == RESET_TYPE_NORESET) {
RT_TRACE(COMP_INIT, "RF Config Started!\n");
rtStatus = rtl8192_phy_RFConfig(dev);
- if (rtStatus != true) {
+ if (!rtStatus) {
RT_TRACE(COMP_ERR, "RF Config failed\n");
return rtStatus;
}
@@ -869,7 +869,7 @@ start:
write_nic_byte(dev, 0x87, 0x0);
- if (priv->RegRfOff == true) {
+ if (priv->RegRfOff) {
RT_TRACE((COMP_INIT | COMP_RF | COMP_POWER),
"%s(): Turn off RF for RegRfOff ----------\n",
__func__);
@@ -1184,7 +1184,7 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
cb_desc);
if (pci_dma_mapping_error(priv->pdev, mapping))
- RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
+ RT_TRACE(COMP_ERR, "DMA Mapping error\n");
if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1283,7 +1283,7 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping))
- RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
+ RT_TRACE(COMP_ERR, "DMA Mapping error\n");
memset(entry, 0, 12);
entry->LINIP = cb_desc->bLastIniPkt;
entry->FirstSeg = 1;
@@ -1866,15 +1866,15 @@ static void rtl8192_TranslateRxSignalStuff(struct net_device *dev,
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- bpacket_match_bssid = ((RTLLIB_FTYPE_CTL != type) &&
- (!compare_ether_addr(priv->rtllib->
- current_network.bssid,
- (fc & RTLLIB_FCTL_TODS) ? hdr->addr1 :
- (fc & RTLLIB_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
- && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
- bpacket_toself = bpacket_match_bssid && /* check this */
- (!compare_ether_addr(praddr,
- priv->rtllib->dev->dev_addr));
+ bpacket_match_bssid =
+ ((RTLLIB_FTYPE_CTL != type) &&
+ ether_addr_equal(priv->rtllib->current_network.bssid,
+ (fc & RTLLIB_FCTL_TODS) ? hdr->addr1 :
+ (fc & RTLLIB_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3) &&
+ (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
+ bpacket_toself = bpacket_match_bssid && /* check this */
+ ether_addr_equal(praddr, priv->rtllib->dev->dev_addr);
if (WLAN_FC_GET_FRAMETYPE(fc) == RTLLIB_STYPE_BEACON)
bPacketBeacon = true;
if (bpacket_match_bssid)
@@ -2213,7 +2213,7 @@ rtl8192_InitializeVariables(struct net_device *dev)
priv->MidHighPwrTHR_L2 = 0x40;
priv->PwrDomainProtect = false;
- priv->bfirst_after_down = 0;
+ priv->bfirst_after_down = false;
}
void rtl8192_EnableInterrupt(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index dd2a96bfcc0..abcd22f8fdd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -329,7 +329,7 @@ bool init_firmware(struct net_device *dev)
}
rt_status = fw_download_code(dev, mapped_file, file_length);
- if (rt_status != true) {
+ if (!rt_status) {
goto download_firmware_fail;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 9676c591c85..21e6ddde68a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -567,7 +567,7 @@ static bool rtl8192_BB_Config_ParaFile(struct net_device *dev)
rtStatus = rtl8192_phy_checkBBAndRF(dev,
(enum hw90_block)eCheckItem,
(enum rf90_radio_path)0);
- if (rtStatus != true) {
+ if (!rtStatus) {
RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():"
"Check PHY%d Fail!!\n", eCheckItem-1);
return rtStatus;
@@ -1425,7 +1425,7 @@ static bool SetRFPowerState8190(struct net_device *dev,
u8 i = 0, QueueID = 0;
struct rtl8192_tx_ring *ring = NULL;
- if (priv->SetRFPowerStateInProgress == true)
+ if (priv->SetRFPowerStateInProgress)
return false;
RT_TRACE(COMP_PS, "===========> SetRFPowerState8190()!\n");
priv->SetRFPowerStateInProgress = true;
@@ -1443,10 +1443,9 @@ static bool SetRFPowerState8190(struct net_device *dev,
InitilizeCount--;
priv->RegRfOff = false;
rtstatus = NicIFEnableNIC(dev);
- } while ((rtstatus != true) &&
- (InitilizeCount > 0));
+ } while (!rtstatus && (InitilizeCount > 0));
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(COMP_ERR, "%s():Initialize Ada"
"pter fail,return\n",
__func__);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index e0684435555..d93caca9657 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -370,8 +370,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
case eRfOn:
priv->rtllib->RfOffReason &= (~ChangeSource);
- if ((ChangeSource == RF_CHANGE_BY_HW) &&
- (priv->bHwRadioOff == true))
+ if ((ChangeSource == RF_CHANGE_BY_HW) && priv->bHwRadioOff)
priv->bHwRadioOff = false;
if (!priv->rtllib->RfOffReason) {
@@ -405,8 +404,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
disas_lv_ss);
}
}
- if ((ChangeSource == RF_CHANGE_BY_HW) &&
- (priv->bHwRadioOff == false))
+ if ((ChangeSource == RF_CHANGE_BY_HW) && !priv->bHwRadioOff)
priv->bHwRadioOff = true;
priv->rtllib->RfOffReason |= ChangeSource;
bActionAllowed = true;
@@ -428,7 +426,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
PHY_SetRFPowerState(dev, StateToSet);
if (StateToSet == eRfOn) {
- if (bConnectBySSID && (priv->blinked_ingpio == true)) {
+ if (bConnectBySSID && priv->blinked_ingpio) {
queue_delayed_work_rsl(ieee->wq,
&ieee->associate_procedure_wq, 0);
priv->blinked_ingpio = false;
@@ -955,7 +953,7 @@ static int _rtl8192_sta_up(struct net_device *dev, bool is_silent_reset)
RT_TRACE(COMP_INIT, "Bringing up iface");
priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
- if (init_status != true) {
+ if (!init_status) {
RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization is failed!\n",
__func__);
priv->bfirst_init = false;
@@ -1000,7 +998,7 @@ static int rtl8192_sta_down(struct net_device *dev, bool shutdownrf)
priv->bDriverIsGoingToUnload = true;
priv->up = 0;
priv->rtllib->ieee_up = 0;
- priv->bfirst_after_down = 1;
+ priv->bfirst_after_down = true;
RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
@@ -1119,8 +1117,8 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
priv->rtllib->hwscan_sem_up = 1;
priv->rtllib->status = 0;
priv->H2CTxCmdSeq = 0;
- priv->bDisableFrameBursting = 0;
- priv->bDMInitialGainEnable = 1;
+ priv->bDisableFrameBursting = false;
+ priv->bDMInitialGainEnable = true;
priv->polling_timer_on = 0;
priv->up_first_time = 1;
priv->blinked_ingpio = false;
@@ -1162,7 +1160,7 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
priv->CckPwEnl = 6;
priv->ScanDelay = 50;
priv->ResetProgress = RESET_TYPE_NORESET;
- priv->bForcedSilentReset = 0;
+ priv->bForcedSilentReset = false;
priv->bDisableNormalResetCheck = false;
priv->force_reset = false;
memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
@@ -1171,7 +1169,7 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
priv->RxCounter = 0;
priv->rtllib->wx_set_enc = 0;
priv->bHwRadioOff = false;
- priv->RegRfOff = 0;
+ priv->RegRfOff = false;
priv->isRFOff = false;
priv->bInPowerSaveMode = false;
priv->rtllib->RfOffReason = 0;
@@ -1647,7 +1645,7 @@ void rtl819x_watchdog_wqcallback(void *data)
bool bHigherBusyRxTraffic = false;
bool bEnterPS = false;
- if (IS_NIC_DOWN(priv) || (priv->bHwRadioOff == true))
+ if (IS_NIC_DOWN(priv) || priv->bHwRadioOff)
return;
if (priv->rtllib->state >= RTLLIB_LINKED) {
@@ -1888,9 +1886,8 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb_push(skb, priv->rtllib->tx_headroom);
ret = rtl8192_tx(dev, skb);
- if (ret != 0) {
+ if (ret != 0)
kfree_skb(skb);
- };
if (queue_index != MGNT_QUEUE) {
priv->rtllib->stats.tx_bytes += (skb->len -
@@ -1898,7 +1895,6 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
priv->rtllib->stats.tx_packets++;
}
-
return;
}
@@ -1930,15 +1926,11 @@ int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->rtllib->tx_headroom);
ret = rtl8192_tx(dev, skb);
- if (ret != 0) {
+ if (ret != 0)
kfree_skb(skb);
- };
}
-
-
return ret;
-
}
static void rtl8192_tx_isr(struct net_device *dev, int prio)
@@ -2601,14 +2593,9 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
goto out;
}
- ipw = kmalloc(p->length, GFP_KERNEL);
- if (ipw == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(ipw, p->pointer, p->length)) {
- kfree(ipw);
- ret = -EFAULT;
+ ipw = memdup_user(p->pointer, p->length);
+ if (IS_ERR(ipw)) {
+ ret = PTR_ERR(ipw);
goto out;
}
@@ -2982,7 +2969,6 @@ err_rel_rtllib:
free_rtllib(dev);
DMESG("wlan driver load failed\n");
- pci_set_drvdata(pdev, NULL);
err_pci_disable:
pci_disable_device(pdev);
return err;
@@ -3052,7 +3038,7 @@ bool NicIFEnableNIC(struct net_device *dev)
RT_TRACE(COMP_PS, "===========>%s()\n", __func__);
priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
- if (init_status != true) {
+ if (!init_status) {
RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization is failed!\n",
__func__);
priv->bdisable_nic = false;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 1853665764a..2297fc20fd4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -535,7 +535,7 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
}
}
- if (viviflag == true) {
+ if (viviflag) {
write_nic_byte(dev, Pw_Track_Flag, 0);
viviflag = false;
RT_TRACE(COMP_POWER_TRACKING, "we filted this data\n");
@@ -2265,7 +2265,7 @@ void dm_CheckRfCtrlGPIO(void *data)
return;
if (priv->bfirst_after_down) {
- priv->bfirst_after_down = 1;
+ priv->bfirst_after_down = true;
return;
}
@@ -2273,12 +2273,12 @@ void dm_CheckRfCtrlGPIO(void *data)
eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
- if ((priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn)) {
+ if (priv->bHwRadioOff && (eRfPowerStateToSet == eRfOn)) {
RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
printk(KERN_INFO "gpiochangeRF - HW Radio ON\n");
priv->bHwRadioOff = false;
bActuallySet = true;
- } else if ((priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff)) {
+ } else if (!priv->bHwRadioOff && (eRfPowerStateToSet == eRfOff)) {
RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
printk(KERN_INFO "gpiochangeRF - HW Radio OFF\n");
priv->bHwRadioOff = true;
@@ -2289,7 +2289,7 @@ void dm_CheckRfCtrlGPIO(void *data)
mdelay(1000);
priv->bHwRfOffAction = 1;
MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW, true);
- if (priv->bHwRadioOff == true)
+ if (priv->bHwRadioOff)
argv[1] = "RFOFF";
else
argv[1] = "RFON";
@@ -2312,9 +2312,9 @@ void dm_rf_pathcheck_workitemcallback(void *data)
for (i = 0; i < RF90_PATH_MAX; i++) {
if (rfpath & (0x01<<i))
- priv->brfpath_rxenable[i] = 1;
+ priv->brfpath_rxenable[i] = true;
else
- priv->brfpath_rxenable[i] = 0;
+ priv->brfpath_rxenable[i] = false;
}
if (!DM_RxPathSelTable.Enable)
return;
@@ -2946,8 +2946,7 @@ static void dm_dynamic_txpower(struct net_device *dev)
priv->bDynamicTxLowPower = false;
} else {
if (priv->undecorated_smoothed_pwdb <
- txlowpower_threshold &&
- priv->bDynamicTxHighPower == true)
+ txlowpower_threshold && priv->bDynamicTxHighPower)
priv->bDynamicTxHighPower = false;
if (priv->undecorated_smoothed_pwdb < 35)
priv->bDynamicTxLowPower = true;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 658e875232a..29608e5488a 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -264,7 +264,7 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
psearch_list = &ieee->Rx_TS_Admit_List;
for (dir = 0; dir <= DIR_BI_DIR; dir++) {
- if (search_dir[dir] == false)
+ if (!search_dir[dir])
continue;
list_for_each_entry(pRet, psearch_list, List) {
if (memcmp(pRet->Addr, Addr, 6) == 0)
@@ -348,7 +348,7 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
if (*ppTS != NULL) {
return true;
} else {
- if (bAddNewTs == false) {
+ if (!bAddNewTs) {
RTLLIB_DEBUG(RTLLIB_DL_TS, "add new TS failed"
"(tid:%d)\n", UP);
return false;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 8aeaed5a987..1a011b9b9da 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -873,11 +873,11 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
if (net_ratelimit())
printk(KERN_INFO "%s: find HTCControl!\n", __func__);
hdrlen += 4;
- rx_stats->bContainHTC = 1;
+ rx_stats->bContainHTC = true;
}
if (RTLLIB_QOS_HAS_SEQ(fc))
- rx_stats->bIsQosData = 1;
+ rx_stats->bIsQosData = true;
return hdrlen;
}
@@ -957,16 +957,15 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
u8 *dst, u8 *src, u8 *bssid, u8 *addr2)
{
- u8 zero_addr[ETH_ALEN] = {0};
u8 type, stype;
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
/* Filter frames from different BSS */
- if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS)
- && (compare_ether_addr(ieee->current_network.bssid, bssid) != 0)
- && memcmp(ieee->current_network.bssid, zero_addr, ETH_ALEN)) {
+ if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS) &&
+ !ether_addr_equal(ieee->current_network.bssid, bssid) &&
+ !is_zero_ether_addr(ieee->current_network.bssid)) {
return -1;
}
@@ -974,8 +973,8 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn &&
ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame) {
if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) &&
- (compare_ether_addr(dst, ieee->current_network.bssid) != 0) &&
- (compare_ether_addr(bssid, ieee->current_network.bssid) == 0)) {
+ !ether_addr_equal(dst, ieee->current_network.bssid) &&
+ ether_addr_equal(bssid, ieee->current_network.bssid)) {
return -1;
}
}
@@ -1275,7 +1274,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/*Filter pkt not to me*/
multicast = is_multicast_ether_addr(hdr->addr1);
unicast = !multicast;
- if (unicast && (compare_ether_addr(dev->dev_addr, hdr->addr1) != 0)) {
+ if (unicast && !ether_addr_equal(dev->dev_addr, hdr->addr1)) {
if (ieee->bNetPromiscuousMode)
bToOtherSTA = true;
else
@@ -1730,7 +1729,7 @@ static inline void rtllib_extract_country_ie(
network->CountryIeLen = info_element->len;
if (!IS_COUNTRY_IE_VALID(ieee)) {
- if ((rtllib_act_scanning(ieee, false) == true) && (ieee->FirstIe_InScan == 1))
+ if (rtllib_act_scanning(ieee, false) && ieee->FirstIe_InScan)
printk(KERN_INFO "Received beacon ContryIE, SSID: <%s>\n", network->ssid);
Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data);
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 0cbf6f5593a..933bd6deaca 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -604,7 +604,7 @@ static void rtllib_softmac_scan_wq(void *data)
if (!ieee->ieee_up)
return;
- if (rtllib_act_scanning(ieee, true) == true)
+ if (rtllib_act_scanning(ieee, true))
return;
down(&ieee->scan_sem);
@@ -705,7 +705,7 @@ static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
ieee->scan_watch_dog = 0;
if (ieee->scanning_continue == 1) {
ieee->scanning_continue = 0;
- ieee->actscanning = 0;
+ ieee->actscanning = false;
cancel_delayed_work(&ieee->softmac_scan_wq);
}
@@ -1202,7 +1202,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
if ((ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee) & SEC_ALG_TKIP)) ||
- (ieee->bForcedBgMode == true)) {
+ ieee->bForcedBgMode) {
ieee->pHTInfo->bEnableHT = 0;
ieee->mode = WIRELESS_MODE_G;
}
@@ -1535,7 +1535,7 @@ static void rtllib_associate_complete_wq(void *data)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
(&(ieee->PowerSaveControl));
printk(KERN_INFO "Associated successfully\n");
- if (ieee->is_silent_reset == 0) {
+ if (!ieee->is_silent_reset) {
printk(KERN_INFO "normal associate\n");
notify_wx_assoc_event(ieee);
}
@@ -1572,9 +1572,9 @@ static void rtllib_associate_complete_wq(void *data)
pPSC->LpsIdleCount = 0;
ieee->link_change(ieee->dev);
- if (ieee->is_silent_reset == 1) {
+ if (ieee->is_silent_reset) {
printk(KERN_INFO "silent reset associate\n");
- ieee->is_silent_reset = 0;
+ ieee->is_silent_reset = false;
}
if (ieee->data_hard_resume)
@@ -2005,7 +2005,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
return 0;
if (time) {
- if (ieee->bAwakePktSent == true) {
+ if (ieee->bAwakePktSent) {
pPSC->LPSAwakeIntvl = 1;
} else {
u8 MaxPeriod = 1;
@@ -2338,8 +2338,7 @@ inline int rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
}
if (ieee->current_network.mode ==
- IEEE_N_24G &&
- bHalfSupportNmode == true) {
+ IEEE_N_24G && bHalfSupportNmode) {
printk(KERN_INFO "======>enter "
"half N mode\n");
ieee->bHalfWirelessN24GMode =
@@ -3098,7 +3097,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->sta_edca_param[2] = 0x005E4342;
ieee->sta_edca_param[3] = 0x002F3262;
ieee->aggregation = true;
- ieee->enable_rx_imm_BA = 1;
+ ieee->enable_rx_imm_BA = true;
ieee->tx_pending.txb = NULL;
_setup_timer(&ieee->associate_timer,
@@ -3591,14 +3590,9 @@ int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee, struct iw_point *p,
goto out;
}
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- ret = -EFAULT;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ ret = PTR_ERR(param);
goto out;
}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 1cc6a9d5e8a..3183627823f 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -908,7 +908,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
ieee->rate, ieee->HTCurrentOperaRate);
- if (bdhcp == true) {
+ if (bdhcp) {
if (ieee->pHTInfo->IOTAction &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index c7e8d4d8ec2..13af43b90fc 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -753,7 +753,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
/* leave break out intentionly */
case IW_MLME_DISASSOC:
- if (deauth == true)
+ if (deauth)
printk(KERN_INFO "disauth packet !\n");
else
printk(KERN_INFO "dis associate packet!\n");
diff --git a/drivers/staging/rtl8192u/dot11d.h b/drivers/staging/rtl8192u/dot11d.h
deleted file mode 100644
index 92e7a00f3ee..00000000000
--- a/drivers/staging/rtl8192u/dot11d.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __INC_DOT11D_H
-#define __INC_DOT11D_H
-
-#include "ieee80211/ieee80211.h"
-
-
-typedef struct _CHNL_TXPOWER_TRIPLE {
- u8 FirstChnl;
- u8 NumChnls;
- u8 MaxTxPowerInDbm;
-} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
-
-typedef enum _DOT11D_STATE {
- DOT11D_STATE_NONE = 0,
- DOT11D_STATE_LEARNED,
- DOT11D_STATE_DONE,
-} DOT11D_STATE;
-
-typedef struct _RT_DOT11D_INFO {
- /* DECLARE_RT_OBJECT(RT_DOT11D_INFO); */
-
- bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
-
- u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
- u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
- u8 CountryIeWatchdog;
-
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
- u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
-
- DOT11D_STATE State;
-} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a, b) (((a)[0] == (b)[0] && \
- (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && \
- (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
-#define cpMacAddr(des, src) ((des)[0] = (src)[0], \
- (des)[1] = (src)[1], (des)[2] = (src)[2], \
- (des)[3] = (src)[3], (des)[4] = (src)[4], \
- (des)[5] = (src)[5])
-#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
-
-#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
-#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
-
-#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-
-#define IS_COUNTRY_IE_CHANGED(__pIeeeDev, __Ie) \
- (((__Ie).Length == 0 || (__Ie).Length != GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen) ? \
- FALSE : \
- (!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, (__Ie).Octet, (__Ie).Length)))
-
-#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
-#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
-#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
-
-#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-
-
-void
-Dot11d_Init(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_Reset(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 *pTaddr,
- u16 CoutryIeLen,
- u8 *pCoutryIe
- );
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- );
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device *dev
- );
-
-int IsLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-);
-
-int ToLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-);
-#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
index 6aa8c15eba3..bd75e29adc2 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.h
@@ -4,42 +4,43 @@
#include "ieee80211.h"
-//#define DOT11D_MAX_CHNL_NUM 83
-
typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
u8 NumChnls;
u8 MaxTxPowerInDbm;
-}CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
+} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
typedef enum _DOT11D_STATE {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
-}DOT11D_STATE;
+} DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
- //DECLARE_RT_OBJECT(RT_DOT11D_INFO);
+ /* DECLARE_RT_OBJECT(RT_DOT11D_INFO); */
- bool bEnabled; // dot11MultiDomainCapabilityEnabled
+ bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
- u16 CountryIeLen; // > 0 if CountryIeBuf[] contains valid country information element.
+ u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; // Source AP of the country IE.
+ u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
u8 CountryIeWatchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; //!!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan)
- //u8 ChnlListLen; // #Bytes valid in ChnlList[].
- //u8 ChnlList[DOT11D_MAX_CHNL_NUM];
+ u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
-}RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
-#define cpMacAddr(des,src) ((des)[0]=(src)[0],(des)[1]=(src)[1],(des)[2]=(src)[2],(des)[3]=(src)[3],(des)[4]=(src)[4],(des)[5]=(src)[5])
+} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && \
+ (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && \
+ (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
+#define cpMacAddr(des, src) ((des)[0] = (src)[0], \
+ (des)[1] = (src)[1], (des)[2] = (src)[2], \
+ (des)[3] = (src)[3], (des)[4] = (src)[4], \
+ (des)[5] = (src)[5])
#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
-#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
+#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
@@ -51,9 +52,9 @@ typedef struct _RT_DOT11D_INFO {
(!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, (__Ie).Octet, (__Ie).Length)))
#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog
+#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
-#define UPDATE_CIE_WATCHDOG(__pIeeeDev) ++GET_CIE_WATCHDOG(__pIeeeDev)
+#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
@@ -72,7 +73,7 @@ void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
u8 *pTaddr,
- u16 CoutryIeLen,
+ u16 CoutryIeLen,
u8 *pCoutryIe
);
@@ -96,4 +97,4 @@ int ToLegalChannel(
struct ieee80211_device *dev,
u8 channel
);
-#endif // #ifndef __INC_DOT11D_H
+#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 434c4312718..4b036a8db5a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -241,7 +241,7 @@ static int debug = \
//IEEE80211_DL_DATA |
IEEE80211_DL_ERR //awayls open this flags to show error out
;
-struct proc_dir_entry *ieee80211_proc;
+static struct proc_dir_entry *ieee80211_proc;
static int show_debug_level(struct seq_file *m, void *v)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 59900bfa1c1..e730ed64c0f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -2166,7 +2166,8 @@ static inline u8 ieee80211_SignalStrengthTranslate(
return RetSS;
}
-long ieee80211_translate_todbm(u8 signal_strength_index )// 0-100 index.
+/* 0-100 index */
+static long ieee80211_translate_todbm(u8 signal_strength_index)
{
long signal_power; // in dBm.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 5fd696926ee..662c7e41cd5 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -3150,14 +3150,9 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
goto out;
}
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL){
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- ret = -EFAULT;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ ret = PTR_ERR(param);
goto out;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index a7bcc64ff22..157b2d7466e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -237,8 +237,8 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) {
kfree(txb);
}
-struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
- int gfp_mask)
+static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
+ gfp_t gfp_mask)
{
struct ieee80211_txb *txb;
int i;
@@ -303,7 +303,8 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
#define SN_LESS(a, b) (((a-b)&0x800)!=0)
-void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee, struct sk_buff *skb, cb_desc *tcb_desc)
+static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
+ struct sk_buff *skb, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PTX_TS_RECORD pTxTs = NULL;
@@ -412,7 +413,8 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
tcb_desc->bUseShortGI = true;
}
-void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
+static void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee,
+ cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -432,7 +434,9 @@ void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee, cb_desc *tcb_d
return;
}
-void ieee80211_query_protectionmode(struct ieee80211_device *ieee, cb_desc *tcb_desc, struct sk_buff *skb)
+static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
+ cb_desc *tcb_desc,
+ struct sk_buff *skb)
{
// Common Settings
tcb_desc->bRTSSTBC = false;
@@ -543,7 +547,8 @@ NO_PROTECTION:
}
-void ieee80211_txrate_selectmode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
+static void ieee80211_txrate_selectmode(struct ieee80211_device *ieee,
+ cb_desc *tcb_desc)
{
#ifdef TO_DO_LIST
if(!IsDataFrame(pFrame))
@@ -573,7 +578,8 @@ void ieee80211_txrate_selectmode(struct ieee80211_device *ieee, cb_desc *tcb_des
}
}
-void ieee80211_query_seqnum(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *dst)
+static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
+ struct sk_buff *skb, u8 *dst)
{
if (is_multicast_ether_addr(dst))
return;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index db0db934748..3684da340bd 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -251,7 +251,8 @@ static struct sk_buff *ieee80211_DELBA(
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA)
+static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee,
+ u8 *dst, PBA_RECORD pBA)
{
struct sk_buff *skb = NULL;
skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero.
@@ -278,7 +279,8 @@ void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, u16 StatusCode)
+static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
+ PBA_RECORD pBA, u16 StatusCode)
{
struct sk_buff *skb = NULL;
skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames
diff --git a/drivers/staging/rtl8192u/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211_crypt.h
deleted file mode 100644
index 0b4ea431982..00000000000
--- a/drivers/staging/rtl8192u/ieee80211_crypt.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Original code based on Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- *
- * Copyright (c) 2004, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-/*
- * This file defines the interface to the ieee80211 crypto module.
- */
-#ifndef IEEE80211_CRYPT_H
-#define IEEE80211_CRYPT_H
-
-#include <linux/skbuff.h>
-
-struct ieee80211_crypto_ops {
- const char *name;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void * (*init)(int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv);
-
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- char * (*print_stats)(char *p, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the buffer and correct
- * length must be returned */
- int extra_prefix_len, extra_postfix_len;
-
- struct module *owner;
-};
-
-struct ieee80211_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct ieee80211_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
-int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
-void ieee80211_crypt_deinit_handler(unsigned long);
-void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
- struct ieee80211_crypt_data **crypt);
-
-#endif
diff --git a/drivers/staging/rtl8192u/r8180_pm.c b/drivers/staging/rtl8192u/r8180_pm.c
deleted file mode 100644
index 999968d4172..00000000000
--- a/drivers/staging/rtl8192u/r8180_pm.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- Power management interface routines.
- Written by Mariusz Matuszek.
- This code is currently just a placeholder for later work and
- does not do anything useful.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public Licence)
-*/
-
-#ifdef CONFIG_RTL8180_PM
-
-
-#include "r8180_hw.h"
-#include "r8180_pm.h"
-
-int rtl8180_save_state (struct pci_dev *dev, u32 state)
-{
- printk(KERN_NOTICE "r8180 save state call (state %u).\n", state);
- return(-EAGAIN);
-}
-
-
-int rtl8180_suspend (struct pci_dev *dev, u32 state)
-{
- printk(KERN_NOTICE "r8180 suspend call (state %u).\n", state);
- return(-EAGAIN);
-}
-
-
-int rtl8180_resume (struct pci_dev *dev)
-{
- printk(KERN_NOTICE "r8180 resume call.\n");
- return(-EAGAIN);
-}
-
-
-int rtl8180_enable_wake (struct pci_dev *dev, u32 state, int enable)
-{
- printk(KERN_NOTICE "r8180 enable wake call (state %u, enable %d).\n",
- state, enable);
- return(-EAGAIN);
-}
-
-
-
-#endif //CONFIG_RTL8180_PM
diff --git a/drivers/staging/rtl8192u/r8180_pm.h b/drivers/staging/rtl8192u/r8180_pm.h
deleted file mode 100644
index 4be63da0b78..00000000000
--- a/drivers/staging/rtl8192u/r8180_pm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- Power management interface routines.
- Written by Mariusz Matuszek.
- This code is currently just a placeholder for later work and
- does not do anything useful.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public Licence)
-
-*/
-
-#ifdef CONFIG_RTL8180_PM
-
-#ifndef R8180_PM_H
-#define R8180_PM_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-
-int rtl8180_save_state (struct pci_dev *dev, u32 state);
-int rtl8180_suspend (struct pci_dev *dev, u32 state);
-int rtl8180_resume (struct pci_dev *dev);
-int rtl8180_enable_wake (struct pci_dev *dev, u32 state, int enable);
-
-#endif //R8180_PM_H
-
-#endif // CONFIG_RTL8180_PM
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
index 592e7807fa4..fa6dd37d85e 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.h
@@ -13,11 +13,7 @@
#ifndef RTL8225H
#define RTL8225H
-#ifdef RTL8190P
-#define RTL819X_TOTAL_RF_PATH 4 //for 90P
-#else
#define RTL819X_TOTAL_RF_PATH 2 //for 8192U
-#endif
extern void PHY_SetRF8256Bandwidth(struct net_device *dev , HT_CHANNEL_WIDTH Bandwidth);
extern void PHY_RF8256_Config(struct net_device *dev);
extern void phy_RF8256_Config_ParaFile(struct net_device *dev);
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index b484ee128c1..ad3bc567d35 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -409,15 +409,11 @@ typedef struct rx_drvinfo_819x_usb {
#define USB_HWDESC_HEADER_LEN sizeof(tx_desc_819x_usb)
#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(tx_fwinfo_819x_usb))
#define MAX_FRAGMENT_COUNT 8
-#ifdef RTL8192U
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
#define MAX_TRANSMIT_BUFFER_SIZE 32000
#else
#define MAX_TRANSMIT_BUFFER_SIZE 8000
#endif
-#else
-#define MAX_TRANSMIT_BUFFER_SIZE (1600+(MAX_802_11_HEADER_LENGTH+ENCRYPTION_MAX_OVERHEAD)*MAX_FRAGMENT_COUNT)
-#endif
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
#define TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES (sizeof(tx_desc_819x_usb_aggr_subframe) + sizeof(tx_fwinfo_819x_usb))
#endif
@@ -1158,14 +1154,6 @@ typedef enum {
NIC_8192E = 3,
} nic_t;
-
-#ifdef JOHN_HWSEC
-struct ssid_thread {
- struct net_device *dev;
- u8 name[IW_ESSID_MAX_SIZE + 1];
-};
-#endif
-
bool init_firmware(struct net_device *dev);
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb);
short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index cd0946db025..c2bcbe230ed 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -392,7 +392,7 @@ int read_nic_word(struct net_device *dev, int indx, u16 *data)
return 0;
}
-int read_nic_word_E(struct net_device *dev, int indx, u16 *data)
+static int read_nic_word_E(struct net_device *dev, int indx, u16 *data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -585,7 +585,7 @@ static int proc_get_stats_rx(struct seq_file *m, void *v)
return 0;
}
-void rtl8192_proc_module_init(void)
+static void rtl8192_proc_module_init(void)
{
RT_TRACE(COMP_INIT, "Initializing proc filesystem");
rtl8192_proc = proc_mkdir(RTL819xU_MODULE_NAME, init_net.proc_net);
@@ -631,7 +631,7 @@ static const struct rtl8192_proc_file rtl8192_proc_files[] = {
{ "" }
};
-void rtl8192_proc_init_one(struct net_device *dev)
+static void rtl8192_proc_init_one(struct net_device *dev)
{
const struct rtl8192_proc_file *f;
struct proc_dir_entry *dir;
@@ -656,7 +656,7 @@ void rtl8192_proc_init_one(struct net_device *dev)
}
}
-void rtl8192_proc_remove_one(struct net_device *dev)
+static void rtl8192_proc_remove_one(struct net_device *dev)
{
remove_proc_subtree(dev->name, rtl8192_proc);
}
@@ -755,7 +755,7 @@ void rtl8192_set_chan(struct net_device *dev, short ch)
static void rtl8192_rx_isr(struct urb *urb);
-u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
+static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
#ifdef USB_RX_AGGREGATION_SUPPORT
@@ -998,8 +998,8 @@ static void rtl8192_rx_isr(struct urb *urb)
netdev_err(dev, "can not submit rxurb, err is %x, URB status is %x\n", err, urb->status);
}
-u32 rtl819xusb_rx_command_packet(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
u32 status;
@@ -1609,13 +1609,6 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
#else
idx_pipe = 0x04;
#endif
-#ifdef JOHN_DUMP_TXDESC
- int i;
- printk("<Tx descriptor>--rate %x---", rate);
- for (i = 0; i < 8; i++)
- printk("%8x ", tx[i]);
- printk("\n");
-#endif
usb_fill_bulk_urb(tx_urb, priv->udev, usb_sndbulkpipe(priv->udev, idx_pipe),
skb->data, skb->len, rtl8192_tx_isr, skb);
@@ -1636,7 +1629,7 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
*
* \param QUEUEID Software Queue
*/
-u8 MapHwQueueToFirmwareQueue(u8 QueueID)
+static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
{
u8 QueueSelect = 0x0; //defualt set to
@@ -1723,7 +1716,7 @@ u8 MRateToHwRate8190Pci(u8 rate)
}
-u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc)
+static u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc)
{
u8 tmp_Short;
@@ -1934,7 +1927,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
}
-short rtl8192_usb_initendpoints(struct net_device *dev)
+static short rtl8192_usb_initendpoints(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1992,7 +1985,7 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
}
#ifdef THOMAS_BEACON
-void rtl8192_usb_deleteendpoints(struct net_device *dev)
+static void rtl8192_usb_deleteendpoints(struct net_device *dev)
{
int i;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2009,7 +2002,7 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
priv->oldaddr = NULL;
if (priv->pp_rxskb) {
kfree(priv->pp_rxskb);
- priv->pp_rxskb = 0;
+ priv->pp_rxskb = NULL;
}
}
#else
@@ -2292,7 +2285,7 @@ void rtl8192_update_ratr_table(struct net_device *dev)
static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
-bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
+static bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
@@ -3167,7 +3160,7 @@ static struct net_device_stats *rtl8192_stats(struct net_device *dev)
return &priv->ieee80211->stats;
}
-bool HalTxCheckStuck819xUsb(struct net_device *dev)
+static bool HalTxCheckStuck819xUsb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 RegTxCounter;
@@ -3217,7 +3210,7 @@ RESET_TYPE TxCheckStuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-bool HalRxCheckStuck819xUsb(struct net_device *dev)
+static bool HalRxCheckStuck819xUsb(struct net_device *dev)
{
u16 RegRxCounter;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3259,7 +3252,7 @@ bool HalRxCheckStuck819xUsb(struct net_device *dev)
return bStuck;
}
-RESET_TYPE RxCheckStuck(struct net_device *dev)
+static RESET_TYPE RxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
bool bRxCheck = FALSE;
@@ -3796,14 +3789,9 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
goto out;
}
- ipw = kmalloc(p->length, GFP_KERNEL);
- if (ipw == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(ipw, p->pointer, p->length)) {
- kfree(ipw);
- ret = -EFAULT;
+ ipw = memdup_user(p->pointer, p->length);
+ if (IS_ERR(ipw)) {
+ ret = PTR_ERR(ipw);
goto out;
}
@@ -3859,15 +3847,6 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
}
-#ifdef JOHN_HWSEC_DEBUG
- //john's test 0711
- printk("@@ wrq->u pointer = ");
- for (i = 0; i < wrq->u.data.length; i++) {
- if (i%10 == 0) printk("\n");
- printk("%8x|", ((u32 *)wrq->u.data.pointer)[i]);
- }
- printk("\n");
-#endif /*JOHN_HWSEC_DEBUG*/
ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
break;
@@ -3952,7 +3931,8 @@ u8 HwRateToMRate90(bool bIsHT, u8 rate)
* Return:
* None
*/
-void UpdateRxPktTimeStamp8190(struct net_device *dev, struct ieee80211_rx_stats *stats)
+static void UpdateRxPktTimeStamp8190(struct net_device *dev,
+ struct ieee80211_rx_stats *stats)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -4209,7 +4189,7 @@ static u8 rtl819x_evm_dbtopercentage(char value)
// We want good-looking for signal strength/quality
// 2007/7/19 01:09, by cosa.
//
-long rtl819x_signal_scale_mapping(long currsig)
+static long rtl819x_signal_scale_mapping(long currsig)
{
long retsig;
@@ -4478,9 +4458,9 @@ void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
}
-void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
- struct ieee80211_rx_stats *pstats,
- rx_drvinfo_819x_usb *pdrvinfo)
+static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
+ struct ieee80211_rx_stats *pstats,
+ rx_drvinfo_819x_usb *pdrvinfo)
{
// TODO: We must only check packet for current MAC address. Not finish
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
@@ -4549,8 +4529,9 @@ void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
* Return:
* None
*/
-void UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
- struct ieee80211_rx_stats *stats)
+static void
+UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
+ struct ieee80211_rx_stats *stats)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
u32 rcvType = 1; //0: Total, 1:OK, 2:CRC, 3:ICV
@@ -4614,7 +4595,9 @@ void UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
}
-void query_rxdesc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats, bool bIsRxAggrSubframe)
+static void query_rxdesc_status(struct sk_buff *skb,
+ struct ieee80211_rx_stats *stats,
+ bool bIsRxAggrSubframe)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
@@ -4930,7 +4913,8 @@ void rtl819xusb_process_received_packet(struct net_device *dev,
}
-void query_rx_cmdpkt_desc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats)
+static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
+ struct ieee80211_rx_stats *stats)
{
rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index a6e4c37d9c7..41fb67b7337 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -100,14 +100,6 @@ static void dm_check_txpower_tracking(struct net_device *dev);
//static void dm_txpower_reset_recovery(struct net_device *dev);
-// DM --> BB init gain restore
-#ifndef RTL8192U
-static void dm_bb_initialgain_restore(struct net_device *dev);
-
-
-// DM --> BB init gain backup
-static void dm_bb_initialgain_backup(struct net_device *dev);
-#endif
// DM --> Dynamic Init Gain by RSSI
static void dm_dig_init(struct net_device *dev);
static void dm_ctrl_initgain_byrssi(struct net_device *dev);
@@ -122,12 +114,7 @@ static void dm_init_ctstoself(struct net_device *dev);
// DM --> EDCA turbo mode control
static void dm_check_edca_turbo(struct net_device *dev);
-// DM --> HW RF control
-static void dm_check_rfctrl_gpio(struct net_device *dev);
-
-#ifndef RTL8190P
//static void dm_gpio_change_rf(struct net_device *dev);
-#endif
// DM --> Check PBC
static void dm_check_pbc_gpio(struct net_device *dev);
@@ -269,7 +256,6 @@ extern void hal_dm_watchdog(struct net_device *dev)
dm_ctrl_initgain_byrssi(dev);
dm_check_edca_turbo(dev);
dm_bandwidth_autoswitch(dev);
- dm_check_rfctrl_gpio(dev);
dm_check_rx_path_selection(dev);
dm_check_fsync(dev);
@@ -620,16 +606,11 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
tx_cmd.Length = 4;
tx_cmd.Value = Value;
-#ifdef RTL8192U
rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12);
if (rtStatus == RT_STATUS_FAILURE)
{
RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
}
-#else
- cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
- DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
-#endif
mdelay(1);
//DbgPrint("hi, vivi, strange\n");
for(i = 0;i <= 30; i++)
@@ -641,11 +622,7 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
mdelay(1);
continue;
}
-#ifdef RTL8190P
- read_nic_word(dev, 0x1bc, &Avg_TSSI_Meas);
-#else
read_nic_word(dev, 0x13c, &Avg_TSSI_Meas);
-#endif
if(Avg_TSSI_Meas == 0)
{
write_nic_byte(dev, 0x1ba, 0);
@@ -654,14 +631,10 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
for(k = 0;k < 5; k++)
{
-#ifdef RTL8190P
- read_nic_byte(dev, 0x1d8+k, &tmp_report[k]);
-#else
if(k !=4)
read_nic_byte(dev, 0x134+k, &tmp_report[k]);
else
read_nic_byte(dev, 0x13e, &tmp_report[k]);
-#endif
RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
}
@@ -708,10 +681,6 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
-#ifdef RTL8190P
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex = %d\n", priv->rfc_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex_real = %d\n", priv->rfc_txpowertrackingindex_real);
-#endif
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
return;
@@ -720,11 +689,7 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
{
if(Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
{
- if((priv->rfa_txpowertrackingindex > 0)
-#ifdef RTL8190P
- &&(priv->rfc_txpowertrackingindex > 0)
-#endif
- )
+ if (priv->rfa_txpowertrackingindex > 0)
{
priv->rfa_txpowertrackingindex--;
if(priv->rfa_txpowertrackingindex_real > 4)
@@ -732,33 +697,16 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
priv->rfa_txpowertrackingindex_real--;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
}
-#ifdef RTL8190P
- priv->rfc_txpowertrackingindex--;
- if(priv->rfc_txpowertrackingindex_real > 4)
- {
- priv->rfc_txpowertrackingindex_real--;
- rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
- }
-#endif
}
}
else
{
- if((priv->rfa_txpowertrackingindex < 36)
-#ifdef RTL8190P
- &&(priv->rfc_txpowertrackingindex < 36)
-#endif
- )
+ if (priv->rfa_txpowertrackingindex < 36)
{
priv->rfa_txpowertrackingindex++;
priv->rfa_txpowertrackingindex_real++;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
-#ifdef RTL8190P
- priv->rfc_txpowertrackingindex++;
- priv->rfc_txpowertrackingindex_real++;
- rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
-#endif
}
}
priv->cck_present_attentuation_difference
@@ -788,10 +736,6 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
}
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
-#ifdef RTL8190P
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex = %d\n", priv->rfc_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex_real = %d\n", priv->rfc_txpowertrackingindex_real);
-#endif
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
@@ -937,14 +881,10 @@ extern void dm_txpower_trackingcallback(struct work_struct *work)
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq);
struct net_device *dev = priv->ieee80211->dev;
-#ifdef RTL8190P
- dm_TXPowerTrackingCallback_TSSI(dev);
-#else
if(priv->bDcut == TRUE)
dm_TXPowerTrackingCallback_TSSI(dev);
else
dm_TXPowerTrackingCallback_ThermalMeter(dev);
-#endif
}
@@ -1472,14 +1412,10 @@ static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
void dm_initialize_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef RTL8190P
- dm_InitializeTXPowerTracking_TSSI(dev);
-#else
if(priv->bDcut == TRUE)
dm_InitializeTXPowerTracking_TSSI(dev);
else
dm_InitializeTXPowerTracking_ThermalMeter(dev);
-#endif
}// dm_InitializeTXPowerTracking
@@ -1677,14 +1613,10 @@ extern void dm_cck_txpower_adjust(
{ // dm_CCKTxPowerAdjust
struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef RTL8190P
- dm_CCKTxPowerAdjust_TSSI(dev, binch14);
-#else
if(priv->bDcut == TRUE)
dm_CCKTxPowerAdjust_TSSI(dev, binch14);
else
dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14);
-#endif
}
@@ -2194,11 +2126,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(pAdapter, rOFDM0_RxDetector1, 0x40);
*/
@@ -2265,11 +2193,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
/*
else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
@@ -2342,11 +2266,7 @@ static void dm_ctrl_initgain_byrssi_highpwr(
// 3.1 Higher PD_TH for OFDM for high power state.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
@@ -2370,11 +2290,7 @@ static void dm_ctrl_initgain_byrssi_highpwr(
// 3.2 Recover PD_TH for OFDM for normal power region.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
@@ -2516,11 +2432,7 @@ static void dm_pd_th(
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
*/
@@ -2535,11 +2447,7 @@ static void dm_pd_th(
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
@@ -2552,11 +2460,7 @@ static void dm_pd_th(
// Higher PD_TH for OFDM for high power state.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
- #else
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
- #endif
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
*/
@@ -2823,44 +2727,6 @@ static void dm_ctstoself(struct net_device *dev)
}
}
-
-/*-----------------------------------------------------------------------------
- * Function: dm_check_rfctrl_gpio()
- *
- * Overview: Copy 8187B template for 9xseries.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/28/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-static void dm_check_rfctrl_gpio(struct net_device *dev)
-{
- //struct r8192_priv *priv = ieee80211_priv(dev);
-
- // Work around for DTM test, we will not enable HW - radio on/off because r/w
- // page 1 register before extra bus is enabled causing system failures when resuming
- // from S4. 20080218, Emily
-
- // Stop to execute workitem to prevent S3/S4 bug.
-#ifdef RTL8190P
- return;
-#endif
-#ifdef RTL8192U
- return;
-#endif
-#ifdef RTL8192E
- queue_delayed_work(priv->priv_wq,&priv->gpio_change_rf_wq,0);
-#endif
-
-} /* dm_CheckRfCtrlGPIO */
-
/*-----------------------------------------------------------------------------
* Function: dm_check_pbc_gpio()
*
@@ -2879,7 +2745,6 @@ static void dm_check_rfctrl_gpio(struct net_device *dev)
*---------------------------------------------------------------------------*/
static void dm_check_pbc_gpio(struct net_device *dev)
{
-#ifdef RTL8192U
struct r8192_priv *priv = ieee80211_priv(dev);
u8 tmp1byte;
@@ -2895,83 +2760,9 @@ static void dm_check_pbc_gpio(struct net_device *dev)
RT_TRACE(COMP_IO, "CheckPbcGPIO - PBC is pressed\n");
priv->bpbc_pressed = true;
}
-#endif
}
-#ifdef RTL8192E
-
-/*-----------------------------------------------------------------------------
- * Function: dm_GPIOChangeRF
- * Overview: PCI will not support workitem call back HW radio on-off control.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 02/21/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-extern void dm_gpio_change_rf_callback(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,gpio_change_rf_wq);
- struct net_device *dev = priv->ieee80211->dev;
- u8 tmp1byte;
- RT_RF_POWER_STATE eRfPowerStateToSet;
- bool bActuallySet = false;
-
- do{
- bActuallySet=false;
-
- if(!priv->up)
- {
- RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),"dm_gpio_change_rf_callback(): Callback function breaks out!!\n");
- }
- else
- {
- // 0x108 GPIO input register is read only
- //set 0x108 B1= 1: RF-ON; 0: RF-OFF.
- read_nic_byte(dev, GPI, &tmp1byte);
-
- eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
-
- if((priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn))
- {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
-
- priv->bHwRadioOff = false;
- bActuallySet = true;
- }
- else if ((priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff))
- {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
- priv->bHwRadioOff = true;
- bActuallySet = true;
- }
-
- if(bActuallySet)
- {
- #ifdef TO_DO
- MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
- //DrvIFIndicateCurrentPhyStatus(pAdapter);
- #endif
- }
- else
- {
- msleep(2000);
- }
-
- }
- }while(TRUE)
-
-} /* dm_GPIOChangeRF */
-
-#endif
/*-----------------------------------------------------------------------------
* Function: DM_RFPathCheckWorkItemCallBack()
*
@@ -3329,11 +3120,7 @@ static void dm_init_fsync (struct net_device *dev)
priv->ieee80211->fsync_time_interval = 500;
priv->ieee80211->fsync_rate_bitmap = 0x0f000800;
priv->ieee80211->fsync_rssi_threshold = 30;
-#ifdef RTL8190P
- priv->ieee80211->bfsync_enable = true;
-#else
priv->ieee80211->bfsync_enable = false;
-#endif
priv->ieee80211->fsync_multiple_timeinterval = 3;
priv->ieee80211->fsync_firstdiff_ratethreshold= 100;
priv->ieee80211->fsync_seconddiff_ratethreshold= 200;
@@ -3416,20 +3203,12 @@ extern void dm_fsync_timer_callback(unsigned long data)
priv->bswitch_fsync = !priv->bswitch_fsync;
if(priv->bswitch_fsync)
{
- #ifdef RTL8190P
- write_nic_byte(dev, 0xC36, 0x00);
- #else
write_nic_byte(dev,0xC36, 0x1c);
- #endif
write_nic_byte(dev, 0xC3e, 0x90);
}
else
{
- #ifdef RTL8190P
- write_nic_byte(dev, 0xC36, 0x40);
- #else
write_nic_byte(dev, 0xC36, 0x5c);
- #endif
write_nic_byte(dev, 0xC3e, 0x96);
}
}
@@ -3438,11 +3217,7 @@ extern void dm_fsync_timer_callback(unsigned long data)
if(priv->bswitch_fsync)
{
priv->bswitch_fsync = false;
- #ifdef RTL8190P
- write_nic_byte(dev, 0xC36, 0x40);
- #else
write_nic_byte(dev, 0xC36, 0x5c);
- #endif
write_nic_byte(dev, 0xC3e, 0x96);
}
}
@@ -3465,19 +3240,11 @@ extern void dm_fsync_timer_callback(unsigned long data)
if(priv->bswitch_fsync)
{
priv->bswitch_fsync = false;
- #ifdef RTL8190P
- write_nic_byte(dev, 0xC36, 0x40);
- #else
write_nic_byte(dev, 0xC36, 0x5c);
- #endif
write_nic_byte(dev, 0xC3e, 0x96);
}
priv->ContinueDiffCount = 0;
- #ifdef RTL8190P
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x164052cd);
- #else
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
- #endif
}
RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
@@ -3502,19 +3269,13 @@ static void dm_EndSWFsync(struct net_device *dev)
{
priv->bswitch_fsync = false;
- #ifdef RTL8190P
- write_nic_byte(dev, 0xC36, 0x40);
- #else
- write_nic_byte(dev, 0xC36, 0x5c);
- #endif
+ write_nic_byte(dev, 0xC36, 0x5c);
write_nic_byte(dev, 0xC3e, 0x96);
}
priv->ContinueDiffCount = 0;
-#ifndef RTL8190P
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
-#endif
}
@@ -3553,9 +3314,7 @@ static void dm_StartSWFsync(struct net_device *dev)
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
-#ifndef RTL8190P
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
-#endif
}
@@ -3624,11 +3383,7 @@ void dm_check_fsync(struct net_device *dev)
{
if(reg_c38_State != RegC38_Fsync_AP_BCM)
{ //For broadcom AP we write different default value
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x15);
- #else
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
- #endif
+ write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
reg_c38_State = RegC38_Fsync_AP_BCM;
}
@@ -3659,11 +3414,7 @@ void dm_check_fsync(struct net_device *dev)
{
if(reg_c38_State != RegC38_NonFsync_Other_AP)
{
- #ifdef RTL8190P
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x10);
- #else
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x90);
- #endif
+ write_nic_byte(dev, rOFDM0_RxDetector3, 0x90);
reg_c38_State = RegC38_NonFsync_Other_AP;
}
@@ -3845,10 +3596,8 @@ static void dm_dynamic_txpower(struct net_device *dev)
SetTxPowerLevel8190(Adapter,pHalData->CurrentChannel);
#endif
-#ifdef RTL8192U
rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel);
//pHalData->bStartTxCtrlByTPCNFR = FALSE; //Clear th flag of Set TX Power from Sitesurvey
-#endif
}
priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
@@ -3885,9 +3634,6 @@ static void dm_send_rssi_tofw(struct net_device *dev)
tx_cmd.Op = TXCMD_SET_RX_RSSI;
tx_cmd.Length = 4;
tx_cmd.Value = priv->undecorated_smoothed_pwdb;
-
- cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
- DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
}
/*---------------------------Define function prototype------------------------*/
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 61f6620213e..c70af014a31 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -127,156 +127,6 @@ static int r8192_wx_get_power(struct net_device *dev,
return ieee80211_wx_get_power(priv->ieee80211,info,wrqu,extra);
}
-#ifdef JOHN_IOCTL
-u16 read_rtl8225(struct net_device *dev, u8 addr);
-void write_rtl8225(struct net_device *dev, u8 adr, u16 data);
-u32 john_read_rtl8225(struct net_device *dev, u8 adr);
-void _write_rtl8225(struct net_device *dev, u8 adr, u16 data);
-
-static int r8192_wx_read_regs(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 addr;
- u16 data1;
-
- down(&priv->wx_sem);
-
-
- get_user(addr,(u8 *)wrqu->data.pointer);
- data1 = read_rtl8225(dev, addr);
- wrqu->data.length = data1;
-
- up(&priv->wx_sem);
- return 0;
-
-}
-
-static int r8192_wx_write_regs(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 addr;
-
- down(&priv->wx_sem);
-
- get_user(addr, (u8 *)wrqu->data.pointer);
- write_rtl8225(dev, addr, wrqu->data.length);
-
- up(&priv->wx_sem);
- return 0;
-
-}
-
-void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data);
-u8 rtl8187_read_phy(struct net_device *dev,u8 adr, u32 data);
-
-static int r8192_wx_read_bb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 databb;
-
- down(&priv->wx_sem);
-
- databb = rtl8187_read_phy(dev, (u8)wrqu->data.length, 0x00000000);
- wrqu->data.length = databb;
-
- up(&priv->wx_sem);
- return 0;
-}
-
-void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data);
-static int r8192_wx_write_bb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 databb;
-
- down(&priv->wx_sem);
-
- get_user(databb, (u8 *)wrqu->data.pointer);
- rtl8187_write_phy(dev, wrqu->data.length, databb);
-
- up(&priv->wx_sem);
- return 0;
-
-}
-
-
-static int r8192_wx_write_nicb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 addr;
-
- down(&priv->wx_sem);
-
- get_user(addr, (u32 *)wrqu->data.pointer);
- write_nic_byte(dev, addr, wrqu->data.length);
-
- up(&priv->wx_sem);
- return 0;
-
-}
-static int r8192_wx_read_nicb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 addr;
- u16 data1;
-
- down(&priv->wx_sem);
-
- get_user(addr,(u32 *)wrqu->data.pointer);
- read_nic_byte(dev, addr, &data1);
- wrqu->data.length = data1;
-
- up(&priv->wx_sem);
- return 0;
-}
-
-static int r8192_wx_get_ap_status(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *target;
- int name_len;
-
- down(&priv->wx_sem);
-
- //count the length of input ssid
- for(name_len=0 ; ((char *)wrqu->data.pointer)[name_len]!='\0' ; name_len++);
-
- //search for the corresponding info which is received
- list_for_each_entry(target, &ieee->network_list, list) {
- if ( (target->ssid_len == name_len) &&
- (strncmp(target->ssid, (char *)wrqu->data.pointer, name_len)==0)){
- if(target->wpa_ie_len>0 || target->rsn_ie_len>0 )
- //set flags=1 to indicate this ap is WPA
- wrqu->data.flags = 1;
- else wrqu->data.flags = 0;
-
-
- break;
- }
- }
-
- up(&priv->wx_sem);
- return 0;
-}
-
-
-
-#endif
static int r8192_wx_force_reset(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -1106,46 +956,7 @@ static const struct iw_priv_args r8192_private_args[] = {
{
SIOCIWFIRSTPRIV + 0x2,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
- }
-#ifdef JOHN_IOCTL
- ,
- {
- SIOCIWFIRSTPRIV + 0x3,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "readRF"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x4,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "writeRF"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x5,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "readBB"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x6,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "writeBB"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x7,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "readnicb"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x8,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "writenicb"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x9,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
- }
-
-#endif
- ,
+ },
{
SIOCIWFIRSTPRIV + 0x3,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
@@ -1163,15 +974,6 @@ static iw_handler r8192_private_handler[] = {
// r8192_wx_set_monitor_type,
r8192_wx_set_scan_type,
r8192_wx_set_rawtx,
-#ifdef JOHN_IOCTL
- r8192_wx_read_regs,
- r8192_wx_write_regs,
- r8192_wx_read_bb,
- r8192_wx_write_bb,
- r8192_wx_read_nicb,
- r8192_wx_write_nicb,
- r8192_wx_get_ap_status,
-#endif
//r8192_wx_null,
r8192_wx_force_reset,
};
diff --git a/drivers/staging/rtl8192u/r819xU_HTType.h b/drivers/staging/rtl8192u/r819xU_HTType.h
index 19a7bdd1973..2cbb8e6584f 100644
--- a/drivers/staging/rtl8192u/r819xU_HTType.h
+++ b/drivers/staging/rtl8192u/r819xU_HTType.h
@@ -2,41 +2,36 @@
#define _R819XU_HTTYPE_H_
-//------------------------------------------------------------
-// The HT Capability element is present in beacons, association request,
-// reassociation request and probe response frames
-//------------------------------------------------------------
-
-//
-// Operation mode value
-//
+/*----------------------------------------------------------------------
+ * The HT Capability element is present in beacons, association request,
+ * reassociation request and probe response frames
+ *----------------------------------------------------------------------*/
+
+/* Operation mode value */
#define HT_OPMODE_NO_PROTECT 0
#define HT_OPMODE_OPTIONAL 1
-#define HT_OPMODE_40MHZ_PROTECT 2
+#define HT_OPMODE_40MHZ_PROTECT 2
#define HT_OPMODE_MIXED 3
-//
-// MIMO Power Save Settings
-//
-#define MIMO_PS_STATIC 0
+/* MIMO Power Save Settings */
+#define MIMO_PS_STATIC 0
#define MIMO_PS_DYNAMIC 1
#define MIMO_PS_NOLIMIT 3
-//
-// There should be 128 bits to cover all of the MCS rates. However, since
-// 8190 does not support too much rates, one integer is quite enough.
-//
+/* There should be 128 bits to cover all of the MCS rates. However, since
+ * 8190 does not support too much rates, one integer is quite enough. */
-#define sHTCLng 4
+#define sHTCLng 4
-#define HT_SUPPORTED_MCS_1SS_BITMAP 0x000000ff
-#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
-#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP HT_MCS_1SS_BITMAP|HT_MCS_1SS_2SS_BITMAP
+#define HT_SUPPORTED_MCS_1SS_BITMAP 0x000000ff
+#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
+#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP \
+ (HT_MCS_1SS_BITMAP | HT_MCS_1SS_2SS_BITMAP)
-typedef enum _HT_MCS_RATE{
+typedef enum _HT_MCS_RATE {
HT_MCS0 = 0x00000001,
HT_MCS1 = 0x00000002,
HT_MCS2 = 0x00000004,
@@ -47,71 +42,67 @@ typedef enum _HT_MCS_RATE{
HT_MCS7 = 0x00000080,
HT_MCS8 = 0x00000100,
HT_MCS9 = 0x00000200,
- HT_MCS10 = 0x00000400,
- HT_MCS11 = 0x00000800,
- HT_MCS12 = 0x00001000,
- HT_MCS13 = 0x00002000,
- HT_MCS14 = 0x00004000,
- HT_MCS15 = 0x00008000,
- // Do not define MCS32 here although 8190 support MCS32
-}HT_MCS_RATE,*PHT_MCS_RATE;
-
-//
-// Represent Channel Width in HT Capabilities
-//
-typedef enum _HT_CHANNEL_WIDTH{
- HT_CHANNEL_WIDTH_20 = 0,
+ HT_MCS10 = 0x00000400,
+ HT_MCS11 = 0x00000800,
+ HT_MCS12 = 0x00001000,
+ HT_MCS13 = 0x00002000,
+ HT_MCS14 = 0x00004000,
+ HT_MCS15 = 0x00008000,
+ /* Do not define MCS32 here although 8190 support MCS32 */
+} HT_MCS_RATE, *PHT_MCS_RATE;
+
+/* Represent Channel Width in HT Capabilities */
+typedef enum _HT_CHANNEL_WIDTH {
+ HT_CHANNEL_WIDTH_20 = 0,
HT_CHANNEL_WIDTH_20_40 = 1,
-}HT_CHANNEL_WIDTH, *PHT_CHANNEL_WIDTH;
+} HT_CHANNEL_WIDTH, *PHT_CHANNEL_WIDTH;
-//
-// Represent Extension Channel Offset in HT Capabilities
-// This is available only in 40Mhz mode.
-//
-typedef enum _HT_EXTCHNL_OFFSET{
+/* Represent Extension Channel Offset in HT Capabilities
+ * This is available only in 40Mhz mode. */
+typedef enum _HT_EXTCHNL_OFFSET {
HT_EXTCHNL_OFFSET_NO_EXT = 0,
- HT_EXTCHNL_OFFSET_UPPER = 1,
+ HT_EXTCHNL_OFFSET_UPPER = 1,
HT_EXTCHNL_OFFSET_NO_DEF = 2,
- HT_EXTCHNL_OFFSET_LOWER = 3,
-}HT_EXTCHNL_OFFSET, *PHT_EXTCHNL_OFFSET;
-
-typedef enum _CHNLOP{
- CHNLOP_NONE = 0, // No Action now
- CHNLOP_SCAN = 1, // Scan in progress
- CHNLOP_SWBW = 2, // Bandwidth switching in progress
- CHNLOP_SWCHNL = 3, // Software Channel switching in progress
+ HT_EXTCHNL_OFFSET_LOWER = 3,
+} HT_EXTCHNL_OFFSET, *PHT_EXTCHNL_OFFSET;
+
+typedef enum _CHNLOP {
+ CHNLOP_NONE = 0, /* No Action now */
+ CHNLOP_SCAN = 1, /* Scan in progress */
+ CHNLOP_SWBW = 2, /* Bandwidth switching in progress */
+ CHNLOP_SWCHNL = 3, /* Software Channel switching in progress */
} CHNLOP, *PCHNLOP;
-// Determine if the Channel Operation is in progress
+/* Determine if the Channel Operation is in progress */
#define CHHLOP_IN_PROGRESS(_pHTInfo) \
- ((_pHTInfo)->ChnlOp > CHNLOP_NONE) ? TRUE : FALSE
+ (((_pHTInfo)->ChnlOp > CHNLOP_NONE) ? TRUE : FALSE)
-typedef enum _HT_ACTION{
+typedef enum _HT_ACTION {
ACT_RECOMMAND_WIDTH = 0,
ACT_MIMO_PWR_SAVE = 1,
- ACT_PSMP = 2,
+ ACT_PSMP = 2,
ACT_SET_PCO_PHASE = 3,
- ACT_MIMO_CHL_MEASURE = 4,
- ACT_RECIPROCITY_CORRECT = 5,
+ ACT_MIMO_CHL_MEASURE = 4,
+ ACT_RECIPROCITY_CORRECT = 5,
ACT_MIMO_CSI_MATRICS = 6,
- ACT_MIMO_NOCOMPR_STEER = 7,
+ ACT_MIMO_NOCOMPR_STEER = 7,
ACT_MIMO_COMPR_STEER = 8,
ACT_ANTENNA_SELECT = 9,
} HT_ACTION, *PHT_ACTION;
-/* 2007/06/07 MH Define sub-carrier mode for 40MHZ. */
-typedef enum _HT_Bandwidth_40MHZ_Sub_Carrier{
+/* Define sub-carrier mode for 40MHZ. */
+typedef enum _HT_Bandwidth_40MHZ_Sub_Carrier {
SC_MODE_DUPLICATE = 0,
- SC_MODE_LOWER = 1,
- SC_MODE_UPPER = 2,
+ SC_MODE_LOWER = 1,
+ SC_MODE_UPPER = 2,
SC_MODE_FULL40MHZ = 3,
-}HT_BW40_SC_E;
+} HT_BW40_SC_E;
-typedef struct _HT_CAPABILITY_ELE{
+typedef struct _HT_CAPABILITY_ELE {
- //HT capability info
+ /* HT capability info */
u8 AdvCoding:1;
u8 ChlWidth:1;
u8 MimoPwrSave:2;
@@ -127,32 +118,32 @@ typedef struct _HT_CAPABILITY_ELE{
u8 Rsvd1:1;
u8 LSigTxopProtect:1;
- //MAC HT parameters info
+ /* MAC HT parameters info */
u8 MaxRxAMPDUFactor:2;
u8 MPDUDensity:3;
u8 Rsvd2:3;
- //Supported MCS set
+ /* Supported MCS set */
u8 MCS[16];
- //Extended HT Capability Info
+ /* Extended HT Capability Info */
u16 ExtHTCapInfo;
- //TXBF Capabilities
+ /* TXBF Capabilities */
u8 TxBFCap[4];
- //Antenna Selection Capabilities
+ /* Antenna Selection Capabilities */
u8 ASCap;
-}__attribute__((packed)) HT_CAPABILITY_ELE, *PHT_CAPABILITY_ELE;
+} __packed HT_CAPABILITY_ELE, *PHT_CAPABILITY_ELE;
-//------------------------------------------------------------
-// The HT Information element is present in beacons
-// Only AP is required to include this element
-//------------------------------------------------------------
+/*------------------------------------------------------------
+ * The HT Information element is present in beacons
+ * Only AP is required to include this element
+ *------------------------------------------------------------*/
-typedef struct _HT_INFORMATION_ELE{
+typedef struct _HT_INFORMATION_ELE {
u8 ControlChl;
u8 ExtChlOffset:2;
@@ -177,146 +168,146 @@ typedef struct _HT_INFORMATION_ELE{
u8 Rsvd4:4;
u8 BasicMSC[16];
-}__attribute__((packed)) HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
+} __packed HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
-//
-// MIMO Power Save control field.
-// This is appear in MIMO Power Save Action Frame
-//
-typedef struct _MIMOPS_CTRL{
+/* MIMO Power Save control field.
+ * This is appear in MIMO Power Save Action Frame */
+typedef struct _MIMOPS_CTRL {
u8 MimoPsEnable:1;
u8 MimoPsMode:1;
u8 Reserved:6;
} MIMOPS_CTRL, *PMIMOPS_CTRL;
-typedef enum _HT_SPEC_VER{
+typedef enum _HT_SPEC_VER {
HT_SPEC_VER_IEEE = 0,
HT_SPEC_VER_EWC = 1,
-}HT_SPEC_VER, *PHT_SPEC_VER;
+} HT_SPEC_VER, *PHT_SPEC_VER;
-typedef enum _HT_AGGRE_MODE_E{
+typedef enum _HT_AGGRE_MODE_E {
HT_AGG_AUTO = 0,
HT_AGG_FORCE_ENABLE = 1,
HT_AGG_FORCE_DISABLE = 2,
-}HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
-
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variables when card is
-// configured as non-AP STA mode. **Note** Current_xxx should be set
-// to default value in HTInitializeHTInfo()
-//------------------------------------------------------------
-
-typedef struct _RT_HIGH_THROUGHPUT{
-// DECLARE_RT_OBJECT(_RT_HIGH_THROUGHPUT);
- u8 bEnableHT;
- u8 bCurrentHTSupport;
-
- u8 bRegBW40MHz; // Tx 40MHz channel capability
- u8 bCurBW40MHz; // Tx 40MHz channel capability
-
- u8 bRegShortGI40MHz; // Tx Short GI for 40Mhz
- u8 bCurShortGI40MHz; // Tx Short GI for 40MHz
-
- u8 bRegShortGI20MHz; // Tx Short GI for 20MHz
- u8 bCurShortGI20MHz; // Tx Short GI for 20MHz
-
- u8 bRegSuppCCK; // Tx CCK rate capability
- u8 bCurSuppCCK; // Tx CCK rate capability
-
- // 802.11n spec version for "peer"
- HT_SPEC_VER ePeerHTSpecVer;
-
+} HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
- // HT related information for "Self"
- HT_CAPABILITY_ELE SelfHTCap; // This is HT cap element sent to peer STA, which also indicate HT Rx capabilities.
- HT_INFORMATION_ELE SelfHTInfo; // This is HT info element sent to peer STA, which also indicate HT Rx capabilities.
+/*----------------------------------------------------------------------------
+ * The Data structure is used to keep HT related variables when card is
+ * configured as non-AP STA mode.
+ * **Note** Current_xxx should be set to default value in HTInitializeHTInfo()
+ *----------------------------------------------------------------------------*/
- // HT related information for "Peer"
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
-
-
- // A-MSDU related
- u8 bAMSDU_Support; // This indicates Tx A-MSDU capability
- u16 nAMSDU_MaxSize; // This indicates Tx A-MSDU capability
- u8 bCurrent_AMSDU_Support; // This indicates Tx A-MSDU capability
- u16 nCurrent_AMSDU_MaxSize; // This indicates Tx A-MSDU capability
-
-
- // AMPDU related <2006.08.10 Emily>
- u8 bAMPDUEnable; // This indicate Tx A-MPDU capability
- u8 bCurrentAMPDUEnable; // This indicate Tx A-MPDU capability
- u8 AMPDU_Factor; // This indicate Tx A-MPDU capability
- u8 CurrentAMPDUFactor; // This indicate Tx A-MPDU capability
- u8 MPDU_Density; // This indicate Tx A-MPDU capability
- u8 CurrentMPDUDensity; // This indicate Tx A-MPDU capability
+typedef struct _RT_HIGH_THROUGHPUT {
+ u8 bEnableHT;
+ u8 bCurrentHTSupport;
+ /* Tx 40MHz channel capability */
+ u8 bRegBW40MHz;
+ u8 bCurBW40MHz;
+ /* Tx Short GI for 40Mhz */
+ u8 bRegShortGI40MHz;
+ u8 bCurShortGI40MHz;
+ /* Tx Short GI for 20MHz */
+ u8 bRegShortGI20MHz;
+ u8 bCurShortGI20MHz;
+ /* Tx CCK rate capability */
+ u8 bRegSuppCCK;
+ u8 bCurSuppCCK;
+
+ /* 802.11n spec version for "peer" */
+ HT_SPEC_VER ePeerHTSpecVer;
+
+
+ /* HT related information for "Self" */
+ /* This is HT cap element sent to peer STA, which also indicate
+ * HT Rx capabilities. */
+ HT_CAPABILITY_ELE SelfHTCap;
+ HT_INFORMATION_ELE SelfHTInfo;
+
+ /* HT related information for "Peer" */
+ u8 PeerHTCapBuf[32];
+ u8 PeerHTInfoBuf[32];
+
+
+ /* A-MSDU related */
+ /* This indicates Tx A-MSDU capability */
+ u8 bAMSDU_Support;
+ u16 nAMSDU_MaxSize;
+ u8 bCurrent_AMSDU_Support;
+ u16 nCurrent_AMSDU_MaxSize;
+
+
+ /* A-MPDU related */
+ /* This indicate Tx A-MPDU capability */
+ u8 bAMPDUEnable;
+ u8 bCurrentAMPDUEnable;
+ u8 AMPDU_Factor;
+ u8 CurrentAMPDUFactor;
+ u8 MPDU_Density;
+ u8 CurrentMPDUDensity;
- // Forced A-MPDU enable
- HT_AGGRE_MODE_E ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
+ /* Forced A-MPDU enable */
+ HT_AGGRE_MODE_E ForcedAMPDUMode;
+ u8 ForcedAMPDUFactor;
+ u8 ForcedMPDUDensity;
- // Forced A-MSDU enable
- HT_AGGRE_MODE_E ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
+ /* Forced A-MSDU enable */
+ HT_AGGRE_MODE_E ForcedAMSDUMode;
+ u16 ForcedAMSDUMaxSize;
- u8 bForcedShortGI;
+ u8 bForcedShortGI;
- u8 CurrentOpMode;
+ u8 CurrentOpMode;
- // MIMO PS related
- u8 SelfMimoPs;
- u8 PeerMimoPs;
+ /* MIMO PS related */
+ u8 SelfMimoPs;
+ u8 PeerMimoPs;
- // 40MHz Channel Offset settings.
+ /* 40MHz Channel Offset settings. */
HT_EXTCHNL_OFFSET CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz; // If we use 40 MHz to Tx
- u8 PeerBandwidth;
-
- // For Bandwidth Switching
- u8 bSwBwInProgress;
- CHNLOP ChnlOp; // software switching channel in progress. By Bruce, 2008-02-15.
- u8 SwBwStep;
- //RT_TIMER SwBwTimer;
- struct timer_list SwBwTimer;
-
- // For Realtek proprietary A-MPDU factor for aggregation
- u8 bRegRT2RTAggregation;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- // Rx Reorder control
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
+ u8 bCurTxBW40MHz; /* If we use 40 MHz to Tx */
+ u8 PeerBandwidth;
+
+ /* For Bandwidth Switching */
+ u8 bSwBwInProgress;
+ CHNLOP ChnlOp; /* sw switching channel in progress. */
+ u8 SwBwStep;
+ struct timer_list SwBwTimer;
+
+ /* For Realtek proprietary A-MPDU factor for aggregation */
+ u8 bRegRT2RTAggregation;
+ u8 bCurrentRT2RTAggregation;
+ u8 bCurrentRT2RTLongSlotTime;
+ u8 szRT2RTAggBuffer[10];
+
+ /* Rx Reorder control */
+ u8 bRegRxReorderEnable;
+ u8 bCurRxReorderEnable;
+ u8 RxReorderWinSize;
+ u8 RxReorderPendingTime;
+ u16 RxReorderDropCounter;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- u8 UsbTxAggrNum;
+ u8 UsbTxAggrNum;
#endif
#ifdef USB_RX_AGGREGATION_SUPPORT
- u8 UsbRxFwAggrEn;
- u8 UsbRxFwAggrPageNum;
- u8 UsbRxFwAggrPacketNum;
- u8 UsbRxFwAggrTimeout;
+ u8 UsbRxFwAggrEn;
+ u8 UsbRxFwAggrPageNum;
+ u8 UsbRxFwAggrPacketNum;
+ u8 UsbRxFwAggrTimeout;
#endif
- // Add for Broadcom(Linksys) IOT. Joseph
- u8 bIsPeerBcm;
+ /* Add for Broadcom(Linksys) IOT. */
+ u8 bIsPeerBcm;
- // For IOT issue.
- u32 IOTAction;
-}RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
+ /* For IOT issue. */
+ u32 IOTAction;
+} RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variable for "each Sta"
-// when card is configured as "AP mode"
-//------------------------------------------------------------
+/*----------------------------------------------------------------------
+ * The Data structure is used to keep HT related variable for "each Sta"
+ * when card is configured as "AP mode"
+ *----------------------------------------------------------------------*/
-typedef struct _RT_HTINFO_STA_ENTRY{
+typedef struct _RT_HTINFO_STA_ENTRY {
u8 bEnableHT;
u8 bSupportCck;
@@ -335,56 +326,54 @@ typedef struct _RT_HTINFO_STA_ENTRY{
u8 McsRateSet[16];
-}RT_HTINFO_STA_ENTRY, *PRT_HTINFO_STA_ENTRY;
+} RT_HTINFO_STA_ENTRY, *PRT_HTINFO_STA_ENTRY;
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variable for "each AP"
-// when card is configured as "STA mode"
-//------------------------------------------------------------
+/*---------------------------------------------------------------------
+ * The Data structure is used to keep HT related variable for "each AP"
+ * when card is configured as "STA mode"
+ *---------------------------------------------------------------------*/
-typedef struct _BSS_HT{
+typedef struct _BSS_HT {
u8 bdSupportHT;
- // HT related elements
- u8 bdHTCapBuf[32];
- u16 bdHTCapLen;
- u8 bdHTInfoBuf[32];
- u16 bdHTInfoLen;
+ /* HT related elements */
+ u8 bdHTCapBuf[32];
+ u16 bdHTCapLen;
+ u8 bdHTInfoBuf[32];
+ u16 bdHTInfoLen;
- HT_SPEC_VER bdHTSpecVer;
- //HT_CAPABILITY_ELE bdHTCapEle;
- //HT_INFORMATION_ELE bdHTInfoEle;
+ HT_SPEC_VER bdHTSpecVer;
- u8 bdRT2RTAggregation;
- u8 bdRT2RTLongSlotTime;
-}BSS_HT, *PBSS_HT;
+ u8 bdRT2RTAggregation;
+ u8 bdRT2RTLongSlotTime;
+} BSS_HT, *PBSS_HT;
-typedef struct _MIMO_RSSI{
+typedef struct _MIMO_RSSI {
u32 EnableAntenna;
u32 AntennaA;
u32 AntennaB;
u32 AntennaC;
u32 AntennaD;
u32 Average;
-}MIMO_RSSI, *PMIMO_RSSI;
+} MIMO_RSSI, *PMIMO_RSSI;
-typedef struct _MIMO_EVM{
+typedef struct _MIMO_EVM {
u32 EVM1;
- u32 EVM2;
-}MIMO_EVM, *PMIMO_EVM;
+ u32 EVM2;
+} MIMO_EVM, *PMIMO_EVM;
-typedef struct _FALSE_ALARM_STATISTICS{
+typedef struct _FALSE_ALARM_STATISTICS {
u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
+ u32 Cnt_Rate_Illegal;
u32 Cnt_Crc8_fail;
u32 Cnt_all;
-}FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
+} FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
-#endif //__INC_HTTYPE_H
+#endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 56144014b7c..7bdcbd39a3b 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -62,105 +62,6 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
}
/*-----------------------------------------------------------------------------
- * Function: cmpk_message_handle_tx()
- *
- * Overview: Driver internal module can call the API to send message to
- * firmware side. For example, you can send a debug command packet.
- * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
- * Otherwise, you can change MAC/PHT/RF register by firmware at
- * run time. We do not support message more than one segment now.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/06/2008 amy porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-extern rt_status cmpk_message_handle_tx(struct net_device *dev,
- u8 *codevirtualaddress,
- u32 packettype, u32 buffer_len)
-{
-
- bool rt_status = true;
-#ifdef RTL8192U
- return rt_status;
-#else
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 frag_threshold;
- u16 frag_length, frag_offset = 0;
-
- rt_firmware *pfirmware = priv->pFirmware;
- struct sk_buff *skb;
- unsigned char *seg_ptr;
- cb_desc *tcb_desc;
- u8 bLastIniPkt;
-
- firmware_init_param(dev);
- /* Fragmentation might be required */
- frag_threshold = pfirmware->cmdpacket_frag_thresold;
- do {
- if ((buffer_len - frag_offset) > frag_threshold) {
- frag_length = frag_threshold;
- bLastIniPkt = 0;
-
- } else {
- frag_length = buffer_len - frag_offset;
- bLastIniPkt = 1;
-
- }
-
- /* Allocate skb buffer to contain firmware info and tx
- descriptor info add 4 to avoid packet appending overflow. */
-#ifdef RTL8192U
- skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
-#else
- skb = dev_alloc_skb(frag_length + 4);
-#endif
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
- tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = packettype;
- tcb_desc->bLastIniPkt = bLastIniPkt;
-
-#ifdef RTL8192U
- skb_reserve(skb, USB_HWDESC_HEADER_LEN);
-#endif
-
- seg_ptr = skb_put(skb, buffer_len);
- /*
- * Transform from little endian to big endian
- * and pending zero
- */
- memcpy(seg_ptr, codevirtualaddress, buffer_len);
- tcb_desc->txbuf_size = (u16)buffer_len;
-
-
- if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
- (priv->ieee80211->queue_stop)) {
- RT_TRACE(COMP_FIRMWARE, "======> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb, dev);
- }
-
- codevirtualaddress += frag_length;
- frag_offset += frag_length;
-
- } while (frag_offset < buffer_len);
-
- return rt_status;
-
-
-#endif
-}
-
-/*-----------------------------------------------------------------------------
* Function: cmpk_counttxstatistic()
*
* Overview:
@@ -593,8 +494,8 @@ static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
* 05/06/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern u32 cmpk_message_handle_rx(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
int total_length;
u8 cmd_length, exe_cnt = 0;
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index ebe403270a5..52cd437ef7b 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -1,17 +1,17 @@
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
/* Different command packet have dedicated message length and definition. */
-#define CMPK_RX_TX_FB_SIZE sizeof(cmpk_txfb_t) //20
-#define CMPK_TX_SET_CONFIG_SIZE sizeof(cmpk_set_cfg_t) //16
-#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(cmpk_set_cfg_t) //16
-#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)//
-#define CMPK_RX_DBG_MSG_SIZE sizeof(cmpk_rx_dbginfo_t)//
-#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
+#define CMPK_RX_TX_FB_SIZE sizeof(cmpk_txfb_t) /* 20 */
+#define CMPK_TX_SET_CONFIG_SIZE sizeof(cmpk_set_cfg_t) /* 16 */
+#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(cmpk_set_cfg_t) /* 16 */
+#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)
+#define CMPK_RX_DBG_MSG_SIZE sizeof(cmpk_rx_dbginfo_t)
+#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
/* 2008/05/08 amy For USB constant. */
-#define ISR_TxBcnOk BIT27 // Transmit Beacon OK
-#define ISR_TxBcnErr BIT26 // Transmit Beacon Error
-#define ISR_BcnTimerIntr BIT13 // Beacon Timer Interrupt
+#define ISR_TxBcnOk BIT27 /* Transmit Beacon OK */
+#define ISR_TxBcnErr BIT26 /* Transmit Beacon Error */
+#define ISR_BcnTimerIntr BIT13 /* Beacon Timer Interrupt */
/* Define element ID of command packet. */
@@ -20,182 +20,172 @@
/* Define different command packet structure. */
/* 1. RX side: TX feedback packet. */
typedef struct tag_cmd_pkt_tx_feedback {
- // DWORD 0
+ /* DWORD 0 */
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
- /* 2007/07/05 MH Change tx feedback info field. */
+ /* Change tx feedback info field. */
/*------TX Feedback Info Field */
- u8 TID:4; /* */
- u8 fail_reason:3; /* */
+ u8 TID:4;
+ u8 fail_reason:3;
u8 tok:1; /* Transmit ok. */
- u8 reserve1:4; /* */
- u8 pkt_type:2; /* */
- u8 bandwidth:1; /* */
- u8 qos_pkt:1; /* */
+ u8 reserve1:4;
+ u8 pkt_type:2;
+ u8 bandwidth:1;
+ u8 qos_pkt:1;
- // DWORD 1
- u8 reserve2; /* */
+ /* DWORD 1 */
+ u8 reserve2;
/*------TX Feedback Info Field */
- u8 retry_cnt; /* */
- u16 pkt_id; /* */
+ u8 retry_cnt;
+ u16 pkt_id;
- // DWORD 3
- u16 seq_num; /* */
+ /* DWORD 3 */
+ u16 seq_num;
u8 s_rate; /* Start rate. */
u8 f_rate; /* Final rate. */
- // DWORD 4
- u8 s_rts_rate; /* */
- u8 f_rts_rate; /* */
- u16 pkt_length; /* */
+ /* DWORD 4 */
+ u8 s_rts_rate;
+ u8 f_rts_rate;
+ u16 pkt_length;
- // DWORD 5
- u16 reserve3; /* */
- u16 duration; /* */
-}cmpk_txfb_t;
+ /* DWORD 5 */
+ u16 reserve3;
+ u16 duration;
+} cmpk_txfb_t;
/* 2. RX side: Interrupt status packet. It includes Beacon State,
- Beacon Timer Interrupt and other useful informations in MAC ISR Reg. */
+ * Beacon Timer Interrupt and other useful informations in MAC ISR Reg. */
typedef struct tag_cmd_pkt_interrupt_status {
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
u16 reserve;
- u32 interrupt_status; /* Interrupt Status. */
-}cmpk_intr_sta_t;
+ u32 interrupt_status; /* Interrupt Status. */
+} cmpk_intr_sta_t;
/* 3. TX side: Set configuration packet. */
typedef struct tag_cmd_pkt_set_configuration {
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
- u16 reserve1; /* */
+ u16 reserve1;
+ /* Configuration info. */
u8 cfg_reserve1:3;
- u8 cfg_size:2; /* Configuration info. */
- u8 cfg_type:2; /* Configuration info. */
- u8 cfg_action:1; /* Configuration info. */
- u8 cfg_reserve2; /* Configuration info. */
- u8 cfg_page:4; /* Configuration info. */
- u8 cfg_reserve3:4; /* Configuration info. */
- u8 cfg_offset; /* Configuration info. */
- u32 value; /* */
- u32 mask; /* */
-}cmpk_set_cfg_t;
+ u8 cfg_size:2;
+ u8 cfg_type:2;
+ u8 cfg_action:1;
+ u8 cfg_reserve2;
+ u8 cfg_page:4;
+ u8 cfg_reserve3:4;
+ u8 cfg_offset;
+ u32 value;
+ u32 mask;
+} cmpk_set_cfg_t;
/* 4. Both side : TX/RX query configuraton packet. The query structure is the
same as set configuration. */
#define cmpk_query_cfg_t cmpk_set_cfg_t
/* 5. Multi packet feedback status. */
-typedef struct tag_tx_stats_feedback { // PJ quick rxcmd 09042007
- // For endian transfer --> Driver will not the same as firmware structure.
- // DW 0
+typedef struct tag_tx_stats_feedback {
+ /* For endian transfer --> Driver will not the same as
+ firmware structure. */
+ /* DW 0 */
u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
+ u8 length; /* Command packet length */
+ u8 element_id; /* Command packet type */
- // DW 1
- u16 txfail; // Tx Fail count
- u16 txok; // Tx ok count
+ /* DW 1 */
+ u16 txfail; /* Tx fail count */
+ u16 txok; /* Tx ok count */
- // DW 2
- u16 txmcok; // tx multicast
- u16 txretry; // Tx Retry count
+ /* DW 2 */
+ u16 txmcok; /* Tx multicast */
+ u16 txretry; /* Tx retry count */
- // DW 3
- u16 txucok; // tx unicast
- u16 txbcok; // tx broadcast
+ /* DW 3 */
+ u16 txucok; /* Tx unicast */
+ u16 txbcok; /* Tx broadcast */
- // DW 4
- u16 txbcfail; //
- u16 txmcfail; //
+ /* DW 4 */
+ u16 txbcfail;
+ u16 txmcfail;
- // DW 5
- u16 reserve2; //
- u16 txucfail; //
+ /* DW 5 */
+ u16 reserve2;
+ u16 txucfail;
- // DW 6-8
+ /* DW 6-8 */
u32 txmclength;
u32 txbclength;
u32 txuclength;
- // DW 9
+ /* DW 9 */
u16 reserve3_23;
u8 reserve3_1;
u8 rate;
-}__attribute__((packed)) cmpk_tx_status_t;
+} __packed cmpk_tx_status_t;
/* 6. Debug feedback message. */
-/* 2007/10/23 MH Define RX debug message */
+/* Define RX debug message */
typedef struct tag_rx_debug_message_feedback {
- // For endian transfer --> for driver
- // DW 0
+ /* For endian transfer --> for driver */
+ /* DW 0 */
u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
+ u8 length; /* Command packet length */
+ u8 element_id; /* Command packet type */
- // DW 1-??
- // Variable debug message.
+ /* DW 1-?? */
+ /* Variable debug message. */
-}cmpk_rx_dbginfo_t;
+} cmpk_rx_dbginfo_t;
-/* 2008/03/20 MH Define transmit rate history. For big endian format. */
+/* Define transmit rate history. For big endian format. */
typedef struct tag_tx_rate_history {
- // For endian transfer --> for driver
- // DW 0
- u8 element_id; // Command packet type
- u8 length; // Command packet length
+ /* For endian transfer --> for driver */
+ /* DW 0 */
+ u8 element_id; /* Command packet type */
+ u8 length; /* Command packet length */
u16 reserved1;
- // DW 1-2 CCK rate counter
+ /* DW 1-2 CCK rate counter */
u16 cck[4];
- // DW 3-6
+ /* DW 3-6 */
u16 ofdm[8];
- // DW 7-14
- //UINT16 MCS_BW0_SG0[16];
-
- // DW 15-22
- //UINT16 MCS_BW1_SG0[16];
-
- // DW 23-30
- //UINT16 MCS_BW0_SG1[16];
-
- // DW 31-38
- //UINT16 MCS_BW1_SG1[16];
-
- // DW 7-14 BW=0 SG=0
- // DW 15-22 BW=1 SG=0
- // DW 23-30 BW=0 SG=1
- // DW 31-38 BW=1 SG=1
+ /* DW 7-14 BW=0 SG=0
+ * DW 15-22 BW=1 SG=0
+ * DW 23-30 BW=0 SG=1
+ * DW 31-38 BW=1 SG=1
+ */
u16 ht_mcs[4][16];
-}__attribute__((packed)) cmpk_tx_rahis_t;
-
-typedef enum tag_command_packet_directories
-{
- RX_TX_FEEDBACK = 0,
- RX_INTERRUPT_STATUS = 1,
- TX_SET_CONFIG = 2,
- BOTH_QUERY_CONFIG = 3,
- RX_TX_STATUS = 4,
- RX_DBGINFO_FEEDBACK = 5,
- RX_TX_PER_PKT_FEEDBACK = 6,
- RX_TX_RATE_HISTORY = 7,
- RX_CMD_ELE_MAX
-}cmpk_element_e;
-
-typedef enum _rt_status{
+} __packed cmpk_tx_rahis_t;
+
+typedef enum tag_command_packet_directories {
+ RX_TX_FEEDBACK = 0,
+ RX_INTERRUPT_STATUS = 1,
+ TX_SET_CONFIG = 2,
+ BOTH_QUERY_CONFIG = 3,
+ RX_TX_STATUS = 4,
+ RX_DBGINFO_FEEDBACK = 5,
+ RX_TX_PER_PKT_FEEDBACK = 6,
+ RX_TX_RATE_HISTORY = 7,
+ RX_CMD_ELE_MAX
+} cmpk_element_e;
+
+typedef enum _rt_status {
RT_STATUS_SUCCESS,
RT_STATUS_FAILURE,
RT_STATUS_PENDING,
RT_STATUS_RESOURCE
-}rt_status,*prt_status;
-
-extern rt_status cmpk_message_handle_tx(struct net_device *dev, u8 *codevirtualaddress, u32 packettype, u32 buffer_len);
+} rt_status, *prt_status;
-extern u32 cmpk_message_handle_rx(struct net_device *dev, struct ieee80211_rx_stats *pstats);
-extern rt_status SendTxCommandPacket( struct net_device *dev, void *pData, u32 DataLen);
+extern u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats);
+extern rt_status SendTxCommandPacket(struct net_device *dev,
+ void *pData, u32 DataLen);
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index bb924ac97e4..d6a6de3a64f 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -61,20 +61,16 @@ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buff
/* Allocate skb buffer to contain firmware info and tx descriptor info
* add 4 to avoid packet appending overflow.
* */
- #ifdef RTL8192U
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
- #else
- skb = dev_alloc_skb(frag_length + 4);
- #endif
+ if (!skb)
+ return false;
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
tcb_desc->bLastIniPkt = bLastIniPkt;
- #ifdef RTL8192U
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- #endif
seg_ptr = skb->data;
/*
* Transform from little endian to big endian
@@ -299,16 +295,10 @@ bool init_firmware(struct net_device *dev)
mapped_file = pfirmware->firmware_buf;
file_length = fw_entry->size;
} else {
-#ifdef RTL8190P
- memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size);
- mapped_file = pfirmware->firmware_buf;
- file_length = fw_entry->size;
-#else
memset(pfirmware->firmware_buf,0,128);
memcpy(&pfirmware->firmware_buf[128],fw_entry->data,fw_entry->size);
mapped_file = pfirmware->firmware_buf;
file_length = fw_entry->size + 128;
-#endif
}
pfirmware->firmware_buf_size = file_length;
}else if (rst_opt == OPT_FIRMWARE_RESET ) {
@@ -340,15 +330,6 @@ bool init_firmware(struct net_device *dev)
* will set polling bit when firmware code is also configured
*/
pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE;
-#ifdef RTL8190P
- // To initialize IMEM, CPU move code from 0x80000080, hence, we send 0x80 byte packet
- rt_status = fwSendNullPacket(dev, RTL8190_CPU_START_OFFSET);
- if (rt_status != true)
- {
- RT_TRACE(COMP_INIT, "fwSendNullPacket() fail ! \n");
- goto download_firmware_fail;
- }
-#endif
//mdelay(1000);
/*
* To initialize IMEM, CPU move code from 0x80000080,
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index a6fac081e42..39cd426bc5e 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1713,7 +1713,7 @@ void InitialGain819xUsb(struct net_device *dev, u8 Operation)
queue_delayed_work(priv->priv_wq, &priv->initialgain_operate_wq, 0);
}
-extern void InitialGainOperateWorkItemCallBack(struct work_struct *work)
+void InitialGainOperateWorkItemCallBack(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
work);
@@ -1799,12 +1799,6 @@ extern void InitialGainOperateWorkItemCallBack(struct work_struct *work)
RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",
priv->initgain_backup.cca);
-#ifdef RTL8190P
- SetTxPowerLevel8190(Adapter, priv->CurrentChannel);
-#endif
-#ifdef RTL8192E
- SetTxPowerLevel8190(Adapter, priv->CurrentChannel);
-#endif
rtl8192_phy_setTxPower(dev, priv->ieee80211->current_network.channel);
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
index f3c352a10fe..66cbe3f9caf 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ b/drivers/staging/rtl8192u/r819xU_phy.h
@@ -23,7 +23,7 @@ typedef struct _SwChnlCmd {
u32 Para1;
u32 Para2;
u32 msDelay;
-} __attribute__ ((packed)) SwChnlCmd;
+} __packed SwChnlCmd;
extern u32 rtl819XMACPHY_Array_PG[];
extern u32 rtl819XPHY_REG_1T2RArray[];
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 6e81ba0eaf1..82a77b45fb5 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -141,7 +141,7 @@ static uint loadparam(struct _adapter *padapter, struct net_device *pnetdev)
registry_par->ssid.SsidLength = 3;
registry_par->channel = (u8)channel;
registry_par->wireless_mode = (u8)wireless_mode;
- registry_par->vrtl_carrier_sense = (u8)vrtl_carrier_sense ;
+ registry_par->vrtl_carrier_sense = (u8)vrtl_carrier_sense;
registry_par->vcs_type = (u8)vcs_type;
registry_par->frag_thresh = (u16)frag_thresh;
registry_par->preamble = (u8)preamble;
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 088647cdca9..5b6a96e3bd7 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -62,7 +62,7 @@ static void check_hw_pbc(struct _adapter *padapter)
r8712_write8(padapter, GPIO_IO_SEL, tmp1byte);
tmp1byte = r8712_read8(padapter, GPIO_CTRL);
if (tmp1byte == 0xff)
- return ;
+ return;
if (tmp1byte&HAL_8192S_HW_GPIO_WPS_BIT) {
/* Here we only set bPbcPressed to true
* After trigger PBC, the variable will be set to false */
@@ -381,7 +381,7 @@ _next:
*pcmdbuf = cpu_to_le32((cmdsz & 0x0000ffff) |
(pcmd->cmdcode << 16) |
(pcmdpriv->cmd_seq << 24));
- pcmdbuf += 2 ; /* 8 bytes alignment */
+ pcmdbuf += 2; /* 8 bytes alignment */
memcpy((u8 *)pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
while (check_cmd_fifo(padapter, wr_sz) == _FAIL) {
if ((padapter->bDriverStopped == true) ||
@@ -471,11 +471,9 @@ void r8712_event_handle(struct _adapter *padapter, uint *peventbuf)
if (pevt_priv->event_seq > 127)
pevt_priv->event_seq = 0;
peventbuf = peventbuf + 2; /* move to event content, 8 bytes alignment */
- if (peventbuf) {
- event_callback = wlanevents[evt_code].event_callback;
- if (event_callback)
- event_callback(padapter, (u8 *)peventbuf);
- }
+ event_callback = wlanevents[evt_code].event_callback;
+ if (event_callback)
+ event_callback(padapter, (u8 *)peventbuf);
pevt_priv->evt_done_cnt++;
_abort_event_:
return;
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 377fca90580..c9eeb4270ab 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -231,7 +231,7 @@ u16 r8712_efuse_get_current_size(struct _adapter *padapter)
/* read next header */
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
} else
- bContinual = false ;
+ bContinual = false;
}
return efuse_addr;
}
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index d59a74aa304..ea965370d1a 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -108,7 +108,7 @@ void r8712_free_recv_priv(struct recv_priv *precvpriv)
struct _adapter *padapter = precvpriv->adapter;
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
- for (i = 0; i < NR_RECVBUFF ; i++) {
+ for (i = 0; i < NR_RECVBUFF; i++) {
r8712_os_recvbuf_resource_free(padapter, precvbuf);
precvbuf++;
}
@@ -268,7 +268,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
u8 *psta_addr;
struct recv_frame_hdr *pfhdr;
struct sta_info *psta;
- struct sta_priv *pstapriv ;
+ struct sta_priv *pstapriv;
struct list_head *phead;
union recv_frame *prtnframe = NULL;
struct __queue *pfree_recv_queue, *pdefrag_q;
@@ -849,7 +849,7 @@ static void query_rx_phy_status(struct _adapter *padapter,
} else {
/* (1)Get RSSI for HT rate */
for (i = 0; i < ((padapter->registrypriv.rf_config) &
- 0x0f) ; i++) {
+ 0x0f); i++) {
rf_rx_num++;
rx_pwr[i] = ((pphy_head[PHY_STAT_GAIN_TRSW_SHT + i]
& 0x3F) * 2) - 110;
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index f16307f5d82..7e324315e6a 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -965,7 +965,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
psta = r8712_alloc_stainfo(&padapter->stapriv,
pnetwork->MacAddress);
if (psta == NULL)
- goto createbss_cmd_fail ;
+ goto createbss_cmd_fail;
}
r8712_indicate_connect(padapter);
} else {
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index d58aa7e3b15..9fec6eda873 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -820,7 +820,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
intReturn = true;
blInserted = false;
/* overwrite PMKID */
- for (j = 0 ; j < NUM_PMKID_CACHE; j++) {
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].Bssid,
strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite
@@ -845,7 +845,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
bUsed = true;
- psecuritypriv->PMKIDIndex++ ;
+ psecuritypriv->PMKIDIndex++;
if (psecuritypriv->PMKIDIndex == NUM_PMKID_CACHE)
psecuritypriv->PMKIDIndex = 0;
}
@@ -1598,7 +1598,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
wep.Length = wep.KeyLength +
FIELD_OFFSET(struct NDIS_802_11_WEP, KeyMaterial);
} else {
- wep.KeyLength = 0 ;
+ wep.KeyLength = 0;
if (keyindex_provided == 1) { /* set key_id only, no given
* KeyMaterial(erq->length==0).*/
padapter->securitypriv.PrivacyKeyIndex = key;
@@ -1880,7 +1880,7 @@ static int r8711_wx_write32(struct net_device *dev,
u32 data32;
get_user(addr, (u32 __user *)wrqu->data.pointer);
- data32 = ((u32)wrqu->data.length<<16) | (u32)wrqu->data.flags ;
+ data32 = ((u32)wrqu->data.length<<16) | (u32)wrqu->data.flags;
r8712_write32(padapter, addr, data32);
return 0;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index 5d6d55e7b38..ac0baff7f09 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -153,7 +153,7 @@ uint oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv)
padapter->recvpriv.rx_icv_err;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return RNDIS_STATUS_INVALID_LENGTH ;
+ return RNDIS_STATUS_INVALID_LENGTH;
return RNDIS_STATUS_SUCCESS;
}
@@ -169,7 +169,7 @@ uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *padapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- u32 preamblemode = 0 ;
+ u32 preamblemode = 0;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -522,7 +522,7 @@ uint oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv)
else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)
ulInfo = ADHOCMODE;
else
- ulInfo = NOTASSOCIATED ;
+ ulInfo = NOTASSOCIATED;
*(u32 *)poid_par_priv->information_buf = ulInfo;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
return RNDIS_STATUS_SUCCESS;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 659615481f6..8fa0f9d49a8 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -1641,7 +1641,7 @@ void r8712_update_registrypriv_dev_network(struct _adapter *adapter)
struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
pdev_network->Privacy = cpu_to_le32(psecuritypriv->PrivacyAlgrthm
- > 0 ? 1 : 0) ; /* adhoc no 802.1x */
+ > 0 ? 1 : 0); /* adhoc no 802.1x */
pdev_network->Rssi = 0;
switch (pregistrypriv->wireless_mode) {
case WIRELESS_11B:
@@ -1786,7 +1786,7 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
psta = r8712_get_stainfo(&padapter->stapriv,
pcur_network->network.MacAddress);
if (psta) {
- for (i = 0; i < 16 ; i++) {
+ for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
preorder_ctrl->indicate_seq = 0xffff;
preorder_ctrl->wend_b = 0xffff;
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 5638d5e065f..0563318a19f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -110,7 +110,7 @@ static u32 fw_iocmd_read(struct _adapter *pAdapter, struct IOCMD_STRUCT iocmd)
u16 iocmd_value = iocmd.value;
u8 iocmd_idx = iocmd.index;
- cmd32 = (iocmd_class << 24) | (iocmd_value << 8) | iocmd_idx ;
+ cmd32 = (iocmd_class << 24) | (iocmd_value << 8) | iocmd_idx;
if (r8712_fw_cmd(pAdapter, cmd32))
r8712_fw_cmd_data(pAdapter, &val32, 1);
else
@@ -128,7 +128,7 @@ static u8 fw_iocmd_write(struct _adapter *pAdapter,
r8712_fw_cmd_data(pAdapter, &value, 0);
msleep(100);
- cmd32 = (iocmd_class << 24) | (iocmd_value << 8) | iocmd_idx ;
+ cmd32 = (iocmd_class << 24) | (iocmd_value << 8) | iocmd_idx;
return r8712_fw_cmd(pAdapter, cmd32);
}
@@ -189,8 +189,8 @@ u32 r8712_rf_reg_read(struct _adapter *pAdapter, u8 path, u8 offset)
u32 rf_data;
struct IOCMD_STRUCT iocmd;
- iocmd.cmdclass = IOCMD_CLASS_BB_RF ;
- iocmd.value = rf_addr ;
+ iocmd.cmdclass = IOCMD_CLASS_BB_RF;
+ iocmd.value = rf_addr;
iocmd.index = IOCMD_RF_READ_IDX;
rf_data = fw_iocmd_read(pAdapter, iocmd);
return rf_data;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index e33fd6db246..5349669707c 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -835,7 +835,7 @@ static void mix_column(u8 *in, u8 *out)
u8 temp[4];
u8 tempb[4];
- for (i = 0 ; i < 4; i++) {
+ for (i = 0; i < 4; i++) {
if ((in[i] & 0x80) == 0x80)
add1b[i] = 0x1b;
else
@@ -1187,7 +1187,7 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
length = pxmitpriv->frag_len -
pattrib->hdrlen -
pattrib->iv_len -
- pattrib->icv_len ;
+ pattrib->icv_len;
aes_cipher(prwskey, pattrib->
hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
@@ -1315,7 +1315,7 @@ static sint aes_decipher(u8 *key, uint hdrlen,
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
}
- for (j = 0 ; j < 8; j++)
+ for (j = 0; j < 8; j++)
mic[j] = aes_out[j];
/* Insert MIC into payload */
for (j = 0; j < 8; j++)
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 1247b3d9719..8db6849d4b2 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -138,7 +138,7 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
}
phash_list = &(pstapriv->sta_hash[index]);
list_insert_tail(&psta->hash_list, phash_list);
- pstapriv->asoc_sta_count++ ;
+ pstapriv->asoc_sta_count++;
/* For the SMC router, the sequence number of first packet of WPS handshake
* will be 0. In this case, this packet will be dropped by recv_decache function
@@ -149,7 +149,7 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
&wRxSeqInitialValue, 2);
/* for A-MPDU Rx reordering buffer control */
- for (i = 0; i < 16 ; i++) {
+ for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
preorder_ctrl->padapter = pstapriv->padapter;
preorder_ctrl->indicate_seq = 0xffff;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index c812d6c7dc3..dbefa43e4c2 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -353,11 +353,6 @@ static void disable_ht_for_spec_devid(const struct usb_device_id *pdid,
}
}
-static u8 key_2char2num(u8 hch, u8 lch)
-{
- return (hex_to_bin(hch) << 4) | hex_to_bin(lch);
-}
-
/*
* drv_init() - a device potentially for us
*
@@ -465,16 +460,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
r8712_efuse_pg_packet_read(padapter, offset,
&pdata[i]);
- if (r8712_initmac) {
- /* Users specify the mac address */
- int jj, kk;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN;
- jj++, kk += 3)
- mac[jj] =
- key_2char2num(r8712_initmac[kk],
- r8712_initmac[kk + 1]);
- } else {
+ if (!r8712_initmac || !mac_pton(r8712_initmac, mac)) {
/* Use the mac address stored in the Efuse
* offset = 0x12 for usb in efuse
*/
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 4d22bb7008f..0ac9130faf6 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -51,7 +51,7 @@ void _r8712_open_pktfile(_pkt *pktptr, struct pkt_file *pfile)
pfile->pkt = pktptr;
pfile->cur_addr = pfile->buf_start = pktptr->data;
pfile->pkt_len = pfile->buf_len = pktptr->len;
- pfile->cur_buffer = pfile->buf_start ;
+ pfile->cur_buffer = pfile->buf_start;
}
uint _r8712_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index 61087054640..a474eede70a 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -973,7 +973,7 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
rts51x_pp_status(chip, lun, status, 32);
- buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(status));
rts51x_set_xfer_buf(status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
@@ -988,7 +988,7 @@ static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
rts51x_read_status(chip, lun, rts51x_status, 16);
- buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status));
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(rts51x_status));
rts51x_set_xfer_buf(rts51x_status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
diff --git a/drivers/staging/sb105x/sb_mp_register.h b/drivers/staging/sb105x/sb_mp_register.h
index 304e1bcd1e3..16de497415e 100644
--- a/drivers/staging/sb105x/sb_mp_register.h
+++ b/drivers/staging/sb105x/sb_mp_register.h
@@ -15,8 +15,8 @@
#ifndef UART_SB105X_H
#define UART_SB105X_H
-/*
- * option register
+/*
+ * option register
*/
/* Device Information Register */
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index a10cdb17038..5cd3efff97d 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -543,14 +543,14 @@ static int mp_startup(struct sb_uart_state *state, int init_hw)
if (init_hw) {
mp_change_speed(state, NULL);
- if (info->tty->termios.c_cflag & CBAUD)
+ if (info->tty && (info->tty->termios.c_cflag & CBAUD))
uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
}
info->flags |= UIF_INITIALIZED;
-
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
}
if (retval && capable(CAP_SYS_ADMIN))
@@ -1216,7 +1216,7 @@ static int mp_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
return (inb(mp_devs[arg].option_reg_addr+MP_OPTR_IIR0+(state->port->line/8)));
}
case TIOCGGETPORTTYPE:
- ret = get_device_type(arg);;
+ ret = get_device_type(arg);
return ret;
case TIOCSMULTIECHO: /* set to multi-drop mode(RS422) or echo mode(RS485)*/
outb( ( inb(info->interface_config_addr) & ~0x03 ) | 0x01 ,
@@ -1808,10 +1808,7 @@ void mp_unregister_driver(struct uart_driver *drv)
drv->tty_driver = NULL;
- if (drv->state)
- {
- kfree(drv->state);
- }
+ kfree(drv->state);
}
diff --git a/drivers/staging/sbe-2t3e3/cpld.c b/drivers/staging/sbe-2t3e3/cpld.c
index 27365f9bc0b..c6370d3d637 100644
--- a/drivers/staging/sbe-2t3e3/cpld.c
+++ b/drivers/staging/sbe-2t3e3/cpld.c
@@ -240,7 +240,7 @@ void cpld_select_panel(struct channel *sc, u32 panel)
}
-extern void cpld_set_clock(struct channel *sc, u32 mode)
+void cpld_set_clock(struct channel *sc, u32 mode)
{
if (sc->p.clock_source == mode)
return;
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
index 490a31e0fd4..b9262a78dd6 100644
--- a/drivers/staging/sep/sep_crypto.c
+++ b/drivers/staging/sep/sep_crypto.c
@@ -1134,7 +1134,7 @@ static int sep_crypto_block_data(struct ablkcipher_request *req)
if (int_error < 0) {
dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
- return -ENOMEM;
+ return int_error;
} else if (int_error == 1) {
ta_ctx->src_sg = new_sg;
ta_ctx->src_sg_hold = new_sg;
@@ -1149,7 +1149,7 @@ static int sep_crypto_block_data(struct ablkcipher_request *req)
if (int_error < 0) {
dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
int_error);
- return -ENOMEM;
+ return int_error;
} else if (int_error == 1) {
ta_ctx->dst_sg = new_sg;
ta_ctx->dst_sg_hold = new_sg;
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index 6a98a208bbf..1e80a4013b8 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -493,8 +493,7 @@ int sep_free_dma_table_data_handler(struct sep_device *sep,
* then we skip this step altogether as restricted
* memory is not available to the o/s at all.
*/
- if (((*dma_ctx)->secure_dma == false) &&
- (dma->out_map_array)) {
+ if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
for (count = 0; count < dma->out_num_pages; count++) {
dma_unmap_page(&sep->pdev->dev,
@@ -515,8 +514,7 @@ int sep_free_dma_table_data_handler(struct sep_device *sep,
}
/* Again, we do this only for non secure dma */
- if (((*dma_ctx)->secure_dma == false) &&
- (dma->out_page_array)) {
+ if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
for (count = 0; count < dma->out_num_pages; count++) {
if (!PageReserved(dma->out_page_array[count]))
@@ -1263,13 +1261,8 @@ static int sep_lock_user_pages(struct sep_device *sep,
}
/* Convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr,
- num_pages,
- ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
- 0, page_array, NULL);
-
- up_read(&current->mm->mmap_sem);
+ result = get_user_pages_fast(app_virt_addr, num_pages,
+ ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
/* Check the number of pages locked - if not all then exit with error */
if (result != num_pages) {
@@ -1952,7 +1945,7 @@ static int sep_prepare_input_dma_table(struct sep_device *sep,
}
/* Check if the pages are in Kernel Virtual Address layout */
- if (is_kva == true)
+ if (is_kva)
error = sep_lock_kernel_pages(sep, app_virt_addr,
data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
dma_ctx);
@@ -2446,7 +2439,7 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
/* Lock the pages of the buffer and translate them to pages */
- if (is_kva == true) {
+ if (is_kva) {
dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
current->pid);
error = sep_lock_kernel_pages(sep, app_virt_in_addr,
@@ -2490,7 +2483,7 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
goto end_function;
}
- if (dma_ctx->secure_dma == true) {
+ if (dma_ctx->secure_dma) {
/* secure_dma requires use of non accessible memory */
dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
current->pid);
@@ -2727,11 +2720,11 @@ int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
dcb_table_ptr->tail_data_size = 0;
dcb_table_ptr->out_vr_tail_pt = 0;
- if (isapplet == true) {
+ if (isapplet) {
/* Check if there is enough data for DMA operation */
if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
- if (is_kva == true) {
+ if (is_kva) {
error = -ENODEV;
goto end_function_error;
} else {
@@ -2772,7 +2765,7 @@ int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
if (tail_size) {
if (tail_size > sizeof(dcb_table_ptr->tail_data))
return -EINVAL;
- if (is_kva == true) {
+ if (is_kva) {
error = -ENODEV;
goto end_function_error;
} else {
@@ -2883,7 +2876,7 @@ static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
return 0;
- if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
+ if (!(*dma_ctx)->secure_dma && isapplet) {
dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
current->pid);
@@ -2902,7 +2895,7 @@ static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
pt_hold = (unsigned long)dcb_table_ptr->
out_vr_tail_pt;
tail_pt = (void *)pt_hold;
- if (is_kva == true) {
+ if (is_kva) {
error = -ENODEV;
break;
} else {
@@ -4080,6 +4073,7 @@ static int sep_register_driver_with_fs(struct sep_device *sep)
if (ret_val) {
dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
ret_val);
+ misc_deregister(&sep->miscdev_sep);
return ret_val;
}
diff --git a/drivers/staging/silicom/bp_mod.h b/drivers/staging/silicom/bp_mod.h
index cfa1f43fa4a..8154a7bf050 100644
--- a/drivers/staging/silicom/bp_mod.h
+++ b/drivers/staging/silicom/bp_mod.h
@@ -22,7 +22,7 @@ do { \
int i; \
if (1) { \
for (i = 0; i < 1000; i++) { \
- udelay(x) ; \
+ udelay(x); \
} \
} else { \
msleep(x); \
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 495272d0134..39dc92a271a 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -1,11 +1,11 @@
/******************************************************************************/
/* */
-/* Bypass Control utility, Copyright (c) 2005-20011 Silicom */
+/* Bypass Control utility, Copyright (c) 2005-2011 Silicom */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation, located in the file LICENSE. */
-/* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. */
+/* Copyright(c) 2007 - 2009, 2013 Intel Corporation. All rights reserved. */
/* */
/* */
/******************************************************************************/
@@ -124,80 +124,60 @@ int bp_proc_create(void);
int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
int get_dev_idx_bsf(int bus, int slot, int func);
-static unsigned long str_to_hex(char *p);
+static int bp_get_dev_idx_bsf(struct net_device *dev, int *index)
+{
+ struct ethtool_drvinfo drvinfo = {0};
+ char *buf;
+ int bus, slot, func;
+
+ if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
+ dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+ else
+ return -EOPNOTSUPP;
+
+ if (!drvinfo.bus_info)
+ return -ENODATA;
+ if (!strcmp(drvinfo.bus_info, "N/A"))
+ return -ENODATA;
+
+ buf = strchr(drvinfo.bus_info, ':');
+ if (!buf)
+ return -EINVAL;
+ buf++;
+ if (sscanf(buf, "%x:%x.%x", &bus, &slot, &func) != 3)
+ return -EINVAL;
+
+ *index = get_dev_idx_bsf(bus, slot, func);
+ return 0;
+}
+
static int bp_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
static struct bpctl_dev *pbpctl_dev, *pbpctl_dev_m;
int dev_num = 0, ret = 0, ret_d = 0, time_left = 0;
+
/* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */
/* return NOTIFY_DONE; */
if (!dev)
return NOTIFY_DONE;
- if (event == NETDEV_REGISTER) {
- {
- struct ethtool_drvinfo drvinfo;
- char cbuf[32];
- char *buf = NULL;
- char res[10];
- int i = 0, ifindex, idx_dev = 0;
- int bus = 0, slot = 0, func = 0;
- ifindex = dev->ifindex;
-
- memset(res, 0, 10);
- memset(&drvinfo, 0, sizeof(struct ethtool_drvinfo));
-
- if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
- } else
- return NOTIFY_DONE;
- if (!drvinfo.bus_info)
- return NOTIFY_DONE;
- if (!strcmp(drvinfo.bus_info, "N/A"))
- return NOTIFY_DONE;
- memcpy(&cbuf, drvinfo.bus_info, 32);
- buf = &cbuf[0];
- while (*buf++ != ':')
- ;
- for (i = 0; i < 10; i++, buf++) {
- if (*buf == ':')
- break;
- res[i] = *buf;
-
- }
- buf++;
- bus = str_to_hex(res);
- memset(res, 0, 10);
-
- for (i = 0; i < 10; i++, buf++) {
- if (*buf == '.')
- break;
- res[i] = *buf;
-
- }
- buf++;
- slot = str_to_hex(res);
- func = str_to_hex(buf);
- idx_dev = get_dev_idx_bsf(bus, slot, func);
-
- if (idx_dev != -1) {
+ if (event == NETDEV_REGISTER) {
+ int idx_dev;
- bpctl_dev_arr[idx_dev].ifindex = ifindex;
- bpctl_dev_arr[idx_dev].ndev = dev;
+ if (bp_get_dev_idx_bsf(dev, &idx_dev))
+ return NOTIFY_DONE;
- bypass_proc_remove_dev_sd(&bpctl_dev_arr
- [idx_dev]);
- bypass_proc_create_dev_sd(&bpctl_dev_arr
- [idx_dev]);
+ if (idx_dev == -1)
+ return NOTIFY_DONE;
- }
+ bpctl_dev_arr[idx_dev].ifindex = dev->ifindex;
+ bpctl_dev_arr[idx_dev].ndev = dev;
- }
+ bypass_proc_remove_dev_sd(&bpctl_dev_arr[idx_dev]);
+ bypass_proc_create_dev_sd(&bpctl_dev_arr[idx_dev]);
return NOTIFY_DONE;
-
}
if (event == NETDEV_UNREGISTER) {
int idx_dev = 0;
@@ -5269,36 +5249,6 @@ int get_dev_idx_bsf(int bus, int slot, int func)
return -1;
}
-static void str_low(char *str)
-{
- int i;
-
- for (i = 0; i < strlen(str); i++)
- if ((str[i] >= 65) && (str[i] <= 90))
- str[i] += 32;
-}
-
-static unsigned long str_to_hex(char *p)
-{
- unsigned long hex = 0;
- unsigned long length = strlen(p), shift = 0;
- unsigned char dig = 0;
-
- str_low(p);
- length = strlen(p);
-
- if (length == 0)
- return 0;
-
- do {
- dig = p[--length];
- dig = dig < 'a' ? (dig - '0') : (dig - 'a' + 0xa);
- hex |= (dig << shift);
- shift += 4;
- } while (length);
- return hex;
-}
-
static int get_dev_idx(int ifindex)
{
int idx_dev = 0;
@@ -5329,70 +5279,26 @@ static struct bpctl_dev *get_dev_idx_p(int ifindex)
static void if_scan_init(void)
{
- int idx_dev = 0;
struct net_device *dev;
- int ifindex;
+
/* rcu_read_lock(); */
/* rtnl_lock(); */
/* rcu_read_lock(); */
for_each_netdev(&init_net, dev) {
+ int idx_dev;
- struct ethtool_drvinfo drvinfo;
- char cbuf[32];
- char *buf = NULL;
- char res[10];
- int i = 0;
- int bus = 0, slot = 0, func = 0;
- ifindex = dev->ifindex;
-
- memset(res, 0, 10);
- memset(&drvinfo, 0, sizeof(struct ethtool_drvinfo));
-
- if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
- } else
+ if (bp_get_dev_idx_bsf(dev, &idx_dev))
continue;
- if (!strcmp(drvinfo.bus_info, "N/A"))
- continue;
- memcpy(&cbuf, drvinfo.bus_info, 32);
- buf = &cbuf[0];
- while (*buf++ != ':')
- ;
- for (i = 0; i < 10; i++, buf++) {
- if (*buf == ':')
- break;
- res[i] = *buf;
-
- }
- buf++;
- bus = str_to_hex(res);
- memset(res, 0, 10);
-
- for (i = 0; i < 10; i++, buf++) {
- if (*buf == '.')
- break;
- res[i] = *buf;
-
- }
- buf++;
- slot = str_to_hex(res);
- func = str_to_hex(buf);
- idx_dev = get_dev_idx_bsf(bus, slot, func);
-
- if (idx_dev != -1) {
-
- bpctl_dev_arr[idx_dev].ifindex = ifindex;
- bpctl_dev_arr[idx_dev].ndev = dev;
-
- }
+ if (idx_dev == -1)
+ continue;
+ bpctl_dev_arr[idx_dev].ifindex = dev->ifindex;
+ bpctl_dev_arr[idx_dev].ndev = dev;
}
/* rtnl_unlock(); */
/* rcu_read_unlock(); */
-
}
static long device_ioctl(struct file *file, /* see include/linux/fs.h */
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 869dcd3b385..652272b96a5 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -62,6 +62,7 @@
#define SLIC_OFFLOAD_IP_CHECKSUM 1
#define STATS_TIMER_INTERVAL 2
#define PING_TIMER_INTERVAL 1
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/string.h>
@@ -791,8 +792,8 @@ static bool slic_mac_filter(struct adapter *adapter,
struct mcast_address *mcaddr = adapter->mcastaddrs;
while (mcaddr) {
- if (!compare_ether_addr(mcaddr->address,
- ether_frame->ether_dhost)) {
+ if (ether_addr_equal(mcaddr->address,
+ ether_frame->ether_dhost)) {
adapter->rcv_multicasts++;
netdev->stats.multicast++;
return true;
@@ -1834,7 +1835,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
#endif
seq_printf(seq, "driver_version : %s\n", slic_proc_version);
- seq_printf(seq, "Microcode versions: \n");
+ seq_puts(seq, "Microcode versions: \n");
seq_printf(seq, " Gigabit (gb) : %s %s\n",
MOJAVE_UCODE_VERS_STRING, MOJAVE_UCODE_VERS_DATE);
seq_printf(seq, " Gigabit Receiver : %s %s\n",
@@ -1865,8 +1866,8 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
config->macinfo[i].macaddrA[4],
config->macinfo[i].macaddrA[5]);
}
- seq_printf(seq, " IF Init State Duplex/Speed irq\n");
- seq_printf(seq, " -------------------------------\n");
+ seq_puts(seq, " IF Init State Duplex/Speed irq\n");
+ seq_puts(seq, " -------------------------------\n");
for (i = 0; i < card->adapters_allocated; i++) {
struct adapter *adapter;
@@ -1909,7 +1910,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
switch (config->FruFormat) {
case ATK_FRU_FORMAT:
{
- seq_printf(seq,
+ seq_puts(seq,
"Vendor : Alacritech, Inc.\n");
seq_printf(seq,
"Assembly # : %c%c%c%c%c%c\n",
@@ -1942,9 +1943,9 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
default:
{
- seq_printf(seq,
+ seq_puts(seq,
"Vendor : Alacritech, Inc.\n");
- seq_printf(seq,
+ seq_puts(seq,
"Serial # : Empty FRU\n");
break;
}
@@ -1953,7 +1954,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
switch (config->OEMFruFormat) {
case VENDOR1_FRU_FORMAT:
{
- seq_printf(seq, "FRU Information:\n");
+ seq_puts(seq, "FRU Information:\n");
seq_printf(seq, " Commodity # : %c\n",
oemfru[0]);
seq_printf(seq,
@@ -1976,7 +1977,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
case VENDOR2_FRU_FORMAT:
{
- seq_printf(seq, "FRU Information:\n");
+ seq_puts(seq, "FRU Information:\n");
seq_printf(seq,
" Part # : "
"%c%c%c%c%c%c%c%c\n",
@@ -1999,12 +2000,12 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
case VENDOR3_FRU_FORMAT:
{
- seq_printf(seq, "FRU Information:\n");
+ seq_puts(seq, "FRU Information:\n");
}
case VENDOR4_FRU_FORMAT:
{
- seq_printf(seq, "FRU Information:\n");
+ seq_puts(seq, "FRU Information:\n");
seq_printf(seq,
" FRU Number : "
"%c%c%c%c%c%c%c%c\n",
@@ -2231,14 +2232,8 @@ static void slic_debug_card_destroy(struct sliccard *card)
if (adapter)
slic_debug_adapter_destroy(adapter);
}
- if (card->debugfs_cardinfo) {
- debugfs_remove(card->debugfs_cardinfo);
- card->debugfs_cardinfo = NULL;
- }
- if (card->debugfs_dir) {
- debugfs_remove(card->debugfs_dir);
- card->debugfs_dir = NULL;
- }
+ debugfs_remove(card->debugfs_cardinfo);
+ debugfs_remove(card->debugfs_dir);
}
static void slic_debug_init(void)
@@ -2256,10 +2251,7 @@ static void slic_debug_init(void)
static void slic_debug_cleanup(void)
{
- if (slic_debugfs) {
- debugfs_remove(slic_debugfs);
- slic_debugfs = NULL;
- }
+ debugfs_remove(slic_debugfs);
}
/*
@@ -2333,7 +2325,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
/* Check to see if it already exists */
mlist = adapter->mcastaddrs;
while (mlist) {
- if (!compare_ether_addr(mlist->address, address))
+ if (ether_addr_equal(mlist->address, address))
return 0;
mlist = mlist->next;
}
@@ -2627,6 +2619,67 @@ static void slic_xmit_complete(struct adapter *adapter)
adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
}
+static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
+ struct net_device *dev)
+{
+ if (isr & ~ISR_IO) {
+ if (isr & ISR_ERR) {
+ adapter->error_interrupts++;
+ if (isr & ISR_RMISS) {
+ int count;
+ int pre_count;
+ int errors;
+
+ struct slic_rcvqueue *rcvq =
+ &adapter->rcvqueue;
+
+ adapter->error_rmiss_interrupts++;
+
+ if (!rcvq->errors)
+ rcv_count = rcvq->count;
+ pre_count = rcvq->count;
+ errors = rcvq->errors;
+
+ while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
+ count = slic_rcvqueue_fill(adapter);
+ if (!count)
+ break;
+ }
+ } else if (isr & ISR_XDROP) {
+ dev_err(&dev->dev,
+ "isr & ISR_ERR [%x] "
+ "ISR_XDROP \n", isr);
+ } else {
+ dev_err(&dev->dev,
+ "isr & ISR_ERR [%x]\n",
+ isr);
+ }
+ }
+
+ if (isr & ISR_LEVENT) {
+ adapter->linkevent_interrupts++;
+ slic_link_event_handler(adapter);
+ }
+
+ if ((isr & ISR_UPC) || (isr & ISR_UPCERR) ||
+ (isr & ISR_UPCBSY)) {
+ adapter->upr_interrupts++;
+ slic_upr_request_complete(adapter, isr);
+ }
+ }
+
+ if (isr & ISR_RCV) {
+ adapter->rcv_interrupts++;
+ slic_rcv_handler(adapter);
+ }
+
+ if (isr & ISR_CMD) {
+ adapter->xmit_interrupts++;
+ slic_xmit_complete(adapter);
+ }
+}
+
+
static irqreturn_t slic_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
@@ -2641,64 +2694,7 @@ static irqreturn_t slic_interrupt(int irq, void *dev_id)
adapter->num_isrs++;
switch (adapter->card->state) {
case CARD_UP:
- if (isr & ~ISR_IO) {
- if (isr & ISR_ERR) {
- adapter->error_interrupts++;
- if (isr & ISR_RMISS) {
- int count;
- int pre_count;
- int errors;
-
- struct slic_rcvqueue *rcvq =
- &adapter->rcvqueue;
-
- adapter->
- error_rmiss_interrupts++;
- if (!rcvq->errors)
- rcv_count = rcvq->count;
- pre_count = rcvq->count;
- errors = rcvq->errors;
-
- while (rcvq->count <
- SLIC_RCVQ_FILLTHRESH) {
- count =
- slic_rcvqueue_fill
- (adapter);
- if (!count)
- break;
- }
- } else if (isr & ISR_XDROP) {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x] "
- "ISR_XDROP \n", isr);
- } else {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x]\n",
- isr);
- }
- }
-
- if (isr & ISR_LEVENT) {
- adapter->linkevent_interrupts++;
- slic_link_event_handler(adapter);
- }
-
- if ((isr & ISR_UPC) ||
- (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- }
-
- if (isr & ISR_RCV) {
- adapter->rcv_interrupts++;
- slic_rcv_handler(adapter);
- }
-
- if (isr & ISR_CMD) {
- adapter->xmit_interrupts++;
- slic_xmit_complete(adapter);
- }
+ slic_interrupt_card_up(isr, adapter, dev);
break;
case CARD_DOWN:
@@ -3645,16 +3641,15 @@ static int slic_entry_probe(struct pci_dev *pcidev,
return err;
if (slic_debug > 0 && did_version++ == 0) {
- printk(KERN_DEBUG "%s\n", slic_banner);
- printk(KERN_DEBUG "%s\n", slic_proc_version);
+ dev_dbg(&pcidev->dev, "%s\n", slic_banner);
+ dev_dbg(&pcidev->dev, "%s\n", slic_proc_version);
}
if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for "
- "consistent allocations\n");
+ dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n");
goto err_out_disable_pci;
}
} else {
@@ -3776,8 +3771,7 @@ static int __init slic_module_init(void)
slic_init_driver();
if (debug >= 0 && slic_debug != debug)
- printk(KERN_DEBUG KBUILD_MODNAME ": debug level is %d.\n",
- debug);
+ pr_debug("debug level is %d.\n", debug);
if (debug >= 0)
slic_debug = debug;
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
index 8add64b1cb0..ba199ffff17 100644
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ b/drivers/staging/sm7xxfb/sm7xxfb.c
@@ -130,7 +130,8 @@ static int __init sm7xx_vga_setup(char *options)
for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) {
if (strstr(options, vesa_mode_table[i].index)) {
smtc_scr_info.lfb_width = vesa_mode_table[i].lfb_width;
- smtc_scr_info.lfb_height = vesa_mode_table[i].lfb_height;
+ smtc_scr_info.lfb_height =
+ vesa_mode_table[i].lfb_height;
smtc_scr_info.lfb_depth = vesa_mode_table[i].lfb_depth;
return 0;
}
@@ -259,8 +260,7 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
if (sfb->fb.var.bits_per_pixel == 16) {
u32 *pal = sfb->fb.pseudo_palette;
val = chan_to_field(red, &sfb->fb.var.red);
- val |= chan_to_field(green, \
- &sfb->fb.var.green);
+ val |= chan_to_field(green, &sfb->fb.var.green);
val |= chan_to_field(blue, &sfb->fb.var.blue);
#ifdef __BIG_ENDIAN
pal[regno] =
@@ -274,8 +274,7 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
} else {
u32 *pal = sfb->fb.pseudo_palette;
val = chan_to_field(red, &sfb->fb.var.red);
- val |= chan_to_field(green, \
- &sfb->fb.var.green);
+ val |= chan_to_field(green, &sfb->fb.var.green);
val |= chan_to_field(blue, &sfb->fb.var.blue);
#ifdef __BIG_ENDIAN
val =
@@ -508,9 +507,9 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
/* init SEQ register SR30 - SR75 */
for (i = 0; i < SIZE_SR30_SR75; i++)
- if (((i + 0x30) != 0x62) \
- && ((i + 0x30) != 0x6a) \
- && ((i + 0x30) != 0x6b))
+ if ((i + 0x30) != 0x62 &&
+ (i + 0x30) != 0x6a &&
+ (i + 0x30) != 0x6b)
smtc_seqw(i + 0x30,
VGAMode[j].Init_SR30_SR75[i]);
@@ -933,7 +932,6 @@ static void smtcfb_pci_remove(struct pci_dev *pdev)
struct smtcfb_info *sfb;
sfb = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, NULL);
smtc_unmap_smem(sfb);
smtc_unmap_mmio(sfb);
unregister_framebuffer(&sfb->fb);
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index 8c3e7a60a9b..efd6f4560d3 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -51,6 +51,7 @@ config SPEAKUP_SYNTH_ACNTSA
config SPEAKUP_SYNTH_ACNTPC
tristate "Accent PC synthesizer support"
+ depends on ISA || COMPILE_TEST
---help---
This is the Speakup driver for the accent pc
synthesizer. You can say y to build it into the kernel,
@@ -102,6 +103,7 @@ config SPEAKUP_SYNTH_DECEXT
config SPEAKUP_SYNTH_DECPC
depends on m
+ depends on ISA || COMPILE_TEST
tristate "DECtalk PC (big ISA card) synthesizer support"
---help---
@@ -124,6 +126,7 @@ config SPEAKUP_SYNTH_DECPC
config SPEAKUP_SYNTH_DTLK
tristate "DoubleTalk PC synthesizer support"
+ depends on ISA || COMPILE_TEST
---help---
This is the Speakup driver for the internal DoubleTalk
@@ -134,6 +137,7 @@ config SPEAKUP_SYNTH_DTLK
config SPEAKUP_SYNTH_KEYPC
tristate "Keynote Gold PC synthesizer support"
+ depends on ISA || COMPILE_TEST
---help---
This is the Speakup driver for the Keynote Gold
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 51bdea3a5be..e2f597ee626 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -586,6 +586,25 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
EXPORT_SYMBOL_GPL(spk_var_show);
/*
+ * Used to reset either default_pitch or default_vol.
+ */
+static inline void spk_reset_default_value(char *header_name,
+ int *synth_default_value, int idx)
+{
+ struct st_var_header *param;
+
+ if (synth && synth_default_value) {
+ param = spk_var_header_by_name(header_name);
+ if (param) {
+ spk_set_num_var(synth_default_value[idx],
+ param, E_NEW_DEFAULT);
+ spk_set_num_var(0, param, E_DEFAULT);
+ pr_info("%s reset to default value\n", param->name);
+ }
+ }
+}
+
+/*
* This function is called when a user echos a value to one of the
* variable parameters.
*/
@@ -597,7 +616,7 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
int len;
char *cp;
struct var_t *var_data;
- int value;
+ long value;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
@@ -619,61 +638,54 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
len = E_INC;
else
len = E_SET;
- value = simple_strtol(cp, NULL, 10);
- ret = spk_set_num_var(value, param, len);
+ if (kstrtol(cp, 10, &value) == 0)
+ ret = spk_set_num_var(value, param, len);
+ else
+ pr_warn("overflow or parsing error has occured");
if (ret == -ERANGE) {
var_data = param->data;
pr_warn("value for %s out of range, expect %d to %d\n",
- attr->attr.name,
+ param->name,
var_data->u.n.low, var_data->u.n.high);
}
+
+ /*
+ * If voice was just changed, we might need to reset our default
+ * pitch and volume.
+ */
+ if (param->var_id == VOICE && synth &&
+ (ret == 0 || ret == -ERESTART)) {
+ var_data = param->data;
+ value = var_data->u.n.value;
+ spk_reset_default_value("pitch", synth->default_pitch,
+ value);
+ spk_reset_default_value("vol", synth->default_vol,
+ value);
+ }
break;
case VAR_STRING:
- len = strlen(buf);
- if ((len >= 1) && (buf[len - 1] == '\n'))
+ len = strlen(cp);
+ if ((len >= 1) && (cp[len - 1] == '\n'))
--len;
- if ((len >= 2) && (buf[0] == '"') && (buf[len - 1] == '"')) {
- ++buf;
+ if ((len >= 2) && (cp[0] == '"') && (cp[len - 1] == '"')) {
+ ++cp;
len -= 2;
}
- cp = (char *) buf;
cp[len] = '\0';
- ret = spk_set_string_var(buf, param, len);
+ ret = spk_set_string_var(cp, param, len);
if (ret == -E2BIG)
pr_warn("value too long for %s\n",
- attr->attr.name);
+ param->name);
break;
default:
pr_warn("%s unknown type %d\n",
param->name, (int)param->var_type);
break;
}
- /*
- * If voice was just changed, we might need to reset our default
- * pitch and volume.
- */
- if (strcmp(attr->attr.name, "voice") == 0) {
- if (synth && synth->default_pitch) {
- param = spk_var_header_by_name("pitch");
- if (param) {
- spk_set_num_var(synth->default_pitch[value],
- param, E_NEW_DEFAULT);
- spk_set_num_var(0, param, E_DEFAULT);
- }
- }
- if (synth && synth->default_vol) {
- param = spk_var_header_by_name("vol");
- if (param) {
- spk_set_num_var(synth->default_vol[value],
- param, E_NEW_DEFAULT);
- spk_set_num_var(0, param, E_DEFAULT);
- }
- }
- }
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ret == -ERESTART)
- pr_info("%s reset to default value\n", attr->attr.name);
+ pr_info("%s reset to default value\n", param->name);
return count;
}
EXPORT_SYMBOL_GPL(spk_var_store);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 14079c4949a..47502fa5f3f 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -90,7 +90,7 @@ const struct st_bits_data spk_punc_info[] = {
{"repeats", "()", CH_RPT},
{"extended numeric", "", B_EXNUM},
{"symbols", "", B_SYM},
- {0, 0}
+ {NULL, NULL}
};
static char mark_cut_flag;
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index 80141aca712..1c8a7f4a0ef 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -223,7 +223,7 @@ static void do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = PROCSPEECH;
outb_p(ch, speakup_info.port_tts);
- if (jiffies >= jiff_max && ch == SPACE) {
+ if (time_after_eq(jiffies, jiff_max) && ch == SPACE) {
timeout = SPK_XMITR_TIMEOUT;
while (synth_writable()) {
if (!--timeout)
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 95d3132f0a3..70cf1591676 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -179,7 +179,7 @@ static void do_catch_up(struct spk_synth *synth)
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
- if ((jiffies >= jiff_max) && (ch == SPACE)) {
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 3508aee98ab..61a3ceeb0d3 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -39,7 +39,7 @@ static struct var_t vars[] = {
{ RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } },
{ PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } },
{ VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } },
- { TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, 0 } },
+ { TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, NULL } },
{ PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 9aa2a78cd71..445a3fda380 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -46,7 +46,7 @@ static struct st_var_header var_headers[] = {
{ "direct", DIRECT, VAR_NUM, NULL, NULL },
};
-static struct st_var_header *var_ptrs[MAXVARS] = { 0, 0, 0 };
+static struct st_var_header *var_ptrs[MAXVARS] = { NULL, NULL, NULL };
static struct punc_var_t punc_vars[] = {
{ PUNC_SOME, 1 },
@@ -280,7 +280,7 @@ int spk_set_mask_bits(const char *input, const int which, const int how)
if (!cp)
cp = spk_punc_info[which].value;
else {
- for ( ; *cp; cp++) {
+ for (; *cp; cp++) {
if (*cp < SPACE)
break;
if (mask < PUNC) {
@@ -294,11 +294,11 @@ int spk_set_mask_bits(const char *input, const int which, const int how)
cp = (u_char *)input;
}
if (how&2) {
- for ( ; *cp; cp++)
+ for (; *cp; cp++)
if (*cp > SPACE)
spk_chartab[*cp] |= mask;
} else {
- for ( ; *cp; cp++)
+ for (; *cp; cp++)
if (*cp > SPACE)
spk_chartab[*cp] &= ~mask;
}
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 386362c9964..28b39307102 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -911,8 +911,6 @@ static int synaptics_rmi4_probe
rmi4_data->input_dev = input_allocate_device();
if (rmi4_data->input_dev == NULL) {
- dev_err(&client->dev, "%s:input device alloc failed\n",
- __func__);
retval = -ENOMEM;
goto err_input;
}
diff --git a/drivers/staging/tidspbridge/core/sync.c b/drivers/staging/tidspbridge/core/sync.c
index 7bb550acaf4..743ff09d82d 100644
--- a/drivers/staging/tidspbridge/core/sync.c
+++ b/drivers/staging/tidspbridge/core/sync.c
@@ -72,7 +72,7 @@ int sync_wait_on_multiple_events(struct sync_object **events,
spin_lock_bh(&sync_lock);
for (i = 0; i < count; i++) {
if (completion_done(&events[i]->comp)) {
- INIT_COMPLETION(events[i]->comp);
+ reinit_completion(&events[i]->comp);
*index = i;
spin_unlock_bh(&sync_lock);
status = 0;
@@ -92,7 +92,7 @@ int sync_wait_on_multiple_events(struct sync_object **events,
spin_lock_bh(&sync_lock);
for (i = 0; i < count; i++) {
if (completion_done(&events[i]->comp)) {
- INIT_COMPLETION(events[i]->comp);
+ reinit_completion(&events[i]->comp);
*index = i;
status = 0;
}
diff --git a/drivers/staging/tidspbridge/include/dspbridge/sync.h b/drivers/staging/tidspbridge/include/dspbridge/sync.h
index 58a0d5c5543..fc19b970708 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/sync.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/sync.h
@@ -59,7 +59,7 @@ static inline void sync_init_event(struct sync_object *event)
static inline void sync_reset_event(struct sync_object *event)
{
- INIT_COMPLETION(event->comp);
+ reinit_completion(&event->comp);
event->multi_comp = NULL;
}
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 6d04eb48bfb..1aa4a3fd0f1 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -332,7 +332,7 @@ static void bridge_recover(struct work_struct *work)
struct dev_object *dev;
struct cfg_devnode *dev_node;
if (atomic_read(&bridge_cref)) {
- INIT_COMPLETION(bridge_comp);
+ reinit_completion(&bridge_comp);
while (!wait_for_completion_timeout(&bridge_comp,
msecs_to_jiffies(REC_TIMEOUT)))
pr_info("%s:%d handle(s) still opened\n",
@@ -348,7 +348,7 @@ static void bridge_recover(struct work_struct *work)
void bridge_recover_schedule(void)
{
- INIT_COMPLETION(bridge_open_comp);
+ reinit_completion(&bridge_open_comp);
recover = true;
queue_work(bridge_rec_queue, &bridge_recovery_work);
}
@@ -389,7 +389,7 @@ static int omap3_bridge_startup(struct platform_device *pdev)
#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
bridge_rec_queue = create_workqueue("bridge_rec_queue");
INIT_WORK(&bridge_recovery_work, bridge_recover);
- INIT_COMPLETION(bridge_comp);
+ reinit_completion(&bridge_comp);
#endif
#ifdef CONFIG_PM
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c
index d460f5823c6..012e4a38d2d 100644
--- a/drivers/staging/tidspbridge/rmgr/dspdrv.c
+++ b/drivers/staging/tidspbridge/rmgr/dspdrv.c
@@ -36,7 +36,7 @@
/*
* ======== dsp_init ========
- * Allocates bridge resources. Loads a base image onto DSP, if specified.
+ * Allocates bridge resources. Loads a base image onto DSP, if specified.
*/
u32 dsp_init(u32 *init_status)
{
@@ -106,7 +106,7 @@ func_cont:
/*
* ======== dsp_deinit ========
- * Frees the resources allocated for bridge.
+ * Frees the resources allocated for bridge.
*/
bool dsp_deinit(u32 device_context)
{
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index d8957a55662..76a1ff0e627 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -357,8 +357,9 @@ static int stub_probe(struct usb_interface *interface,
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
(busid_priv->status == STUB_BUSID_OTHER)) {
- dev_info(&interface->dev, "%s is not in match_busid table... "
- "skip!\n", udev_busid);
+ dev_info(&interface->dev,
+ "%s is not in match_busid table... skip!\n",
+ udev_busid);
/*
* Return value should be ENODEV or ENOXIO to continue trying
@@ -375,8 +376,10 @@ static int stub_probe(struct usb_interface *interface,
}
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
- dev_dbg(&udev->dev, "%s is attached on vhci_hcd... skip!\n",
- udev_busid);
+ dev_dbg(&udev->dev,
+ "%s is attached on vhci_hcd... skip!\n",
+ udev_busid);
+
return -ENODEV;
}
@@ -386,10 +389,10 @@ static int stub_probe(struct usb_interface *interface,
return -ENODEV;
busid_priv->interf_count++;
- dev_info(&interface->dev, "usbip-host: register new interface "
- "(bus %u dev %u ifn %u)\n",
- udev->bus->busnum, udev->devnum,
- interface->cur_altsetting->desc.bInterfaceNumber);
+ dev_info(&interface->dev,
+ "usbip-host: register new interface (bus %u dev %u ifn %u)\n",
+ udev->bus->busnum, udev->devnum,
+ interface->cur_altsetting->desc.bInterfaceNumber);
/* set private data to usb_interface */
usb_set_intfdata(interface, sdev);
@@ -412,9 +415,10 @@ static int stub_probe(struct usb_interface *interface,
if (!sdev)
return -ENOMEM;
- dev_info(&interface->dev, "usbip-host: register new device "
- "(bus %u dev %u ifn %u)\n", udev->bus->busnum, udev->devnum,
- interface->cur_altsetting->desc.bInterfaceNumber);
+ dev_info(&interface->dev,
+ "usbip-host: register new device (bus %u dev %u ifn %u)\n",
+ udev->bus->busnum, udev->devnum,
+ interface->cur_altsetting->desc.bInterfaceNumber);
busid_priv->interf_count = 0;
busid_priv->shutdown_busid = 0;
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index 33027cce670..baf857f7cc8 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -255,14 +255,14 @@ static int __init usbip_host_init(void)
}
ret = usb_register(&stub_driver);
- if (ret < 0) {
+ if (ret) {
pr_err("usb_register failed %d\n", ret);
goto err_usb_register;
}
ret = driver_create_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
- if (ret < 0) {
+ if (ret) {
pr_err("driver_create_file failed\n");
goto err_create_file;
}
diff --git a/drivers/staging/usbip/userspace/configure.ac b/drivers/staging/usbip/userspace/configure.ac
index 2be4060f903..0ee5d926399 100644
--- a/drivers/staging/usbip/userspace/configure.ac
+++ b/drivers/staging/usbip/userspace/configure.ac
@@ -70,7 +70,6 @@ AC_ARG_WITH([tcp-wrappers],
[AC_MSG_RESULT([not found]); exit 1])
else
AC_MSG_RESULT([no]);
- LIBS="$saved_LIBS"
fi],
dnl [ACTION-IF-NOT-GIVEN]
[AC_MSG_RESULT([(default)])
diff --git a/drivers/staging/usbip/userspace/doc/usbip.8 b/drivers/staging/usbip/userspace/doc/usbip.8
index ccdadc87c47..a6097be25d2 100644
--- a/drivers/staging/usbip/userspace/doc/usbip.8
+++ b/drivers/staging/usbip/userspace/doc/usbip.8
@@ -3,7 +3,7 @@
usbip \- manage USB/IP devices
.SH SYNOPSIS
.B usbip
-[\foptions\R] <\fIcommand\fR> <\fIargs\fR>
+[\fIoptions\fR] <\fIcommand\fR> <\fIargs\fR>
.SH DESCRIPTION
On a USB/IP server, devices can be listed, bound, and unbound using
@@ -23,6 +23,12 @@ Print debugging information.
Log to syslog.
.PP
+.HP
+\fB\-\-tcp-port PORT\fR
+.IP
+Connect to PORT on remote host (used for attach and list --remote).
+.PP
+
.SH COMMANDS
.HP
\fBversion\fR
diff --git a/drivers/staging/usbip/userspace/doc/usbipd.8 b/drivers/staging/usbip/userspace/doc/usbipd.8
index d896936f178..ac4635db3f0 100644
--- a/drivers/staging/usbip/userspace/doc/usbipd.8
+++ b/drivers/staging/usbip/userspace/doc/usbipd.8
@@ -14,10 +14,22 @@ Devices have to explicitly be exported using
before usbipd makes them available to other hosts.
The daemon accepts connections from USB/IP clients
-on TCP port 3240.
+on TCP port 3240 by default.
.SH OPTIONS
.HP
+\fB\-4\fR, \fB\-\-ipv4\fR
+.IP
+Bind to IPv4. Default is both.
+.PP
+
+.HP
+\fB\-6\fR, \fB\-\-ipv6\fR
+.IP
+Bind to IPv6. Default is both.
+.PP
+
+.HP
\fB\-D\fR, \fB\-\-daemon\fR
.IP
Run as a daemon process.
@@ -29,6 +41,19 @@ Run as a daemon process.
Print debugging information.
.PP
+.HP
+\fB\-PFILE\fR, \fB\-\-pid FILE\fR
+.IP
+Write process id to FILE.
+.br
+If no FILE specified, use /var/run/usbipd.pid
+.PP
+
+\fB\-tPORT\fR, \fB\-\-tcp\-port PORT\fR
+.IP
+Listen on TCP/IP port PORT.
+.PP
+
\fB\-h\fR, \fB\-\-help\fR
.IP
Print the program help message and exit.
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.c b/drivers/staging/usbip/userspace/src/usbip_network.c
index c39a07f1d38..b4c37e76a6e 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.c
+++ b/drivers/staging/usbip/userspace/src/usbip_network.c
@@ -25,6 +25,10 @@
#include <netinet/tcp.h>
#include <unistd.h>
+#ifdef HAVE_LIBWRAP
+#include <tcpd.h>
+#endif
+
#include "usbip_common.h"
#include "usbip_network.h"
@@ -239,6 +243,18 @@ int usbip_net_set_keepalive(int sockfd)
return ret;
}
+int usbip_net_set_v6only(int sockfd)
+{
+ const int val = 1;
+ int ret;
+
+ ret = setsockopt(sockfd, IPPROTO_IPV6, IPV6_V6ONLY, &val, sizeof(val));
+ if (ret < 0)
+ dbg("setsockopt: IPV6_V6ONLY");
+
+ return ret;
+}
+
/*
* IPv6 Ready
*/
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h
index 2d0e4277b62..f19ae19799b 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.h
+++ b/drivers/staging/usbip/userspace/src/usbip_network.h
@@ -180,6 +180,7 @@ int usbip_net_recv_op_common(int sockfd, uint16_t *code);
int usbip_net_set_reuseaddr(int sockfd);
int usbip_net_set_nodelay(int sockfd);
int usbip_net_set_keepalive(int sockfd);
+int usbip_net_set_v6only(int sockfd);
int usbip_net_tcp_connect(char *hostname, char *port);
#endif /* __USBIP_NETWORK_H */
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index 1c76cfd274d..7980f8b5517 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -56,6 +56,13 @@ static const char usbip_version_string[] = PACKAGE_STRING;
static const char usbipd_help_string[] =
"usage: usbipd [options]\n"
+ "\n"
+ " -4, --ipv4\n"
+ " Bind to IPv4. Default is both.\n"
+ "\n"
+ " -6, --ipv6\n"
+ " Bind to IPv6. Default is both.\n"
+ "\n"
" -D, --daemon\n"
" Run as a daemon process.\n"
"\n"
@@ -354,14 +361,15 @@ static void addrinfo_to_text(struct addrinfo *ai, char buf[],
snprintf(buf, buf_size, "%s:%s", hbuf, sbuf);
}
-static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[])
+static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[],
+ int maxsockfd)
{
struct addrinfo *ai;
int ret, nsockfd = 0;
const size_t ai_buf_size = NI_MAXHOST + NI_MAXSERV + 2;
char ai_buf[ai_buf_size];
- for (ai = ai_head; ai && nsockfd < MAXSOCKFD; ai = ai->ai_next) {
+ for (ai = ai_head; ai && nsockfd < maxsockfd; ai = ai->ai_next) {
int sock;
addrinfo_to_text(ai, ai_buf, ai_buf_size);
dbg("opening %s", ai_buf);
@@ -374,6 +382,9 @@ static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[])
usbip_net_set_reuseaddr(sock);
usbip_net_set_nodelay(sock);
+ /* We use seperate sockets for IPv4 and IPv6
+ * (see do_standalone_mode()) */
+ usbip_net_set_v6only(sock);
if (sock >= FD_SETSIZE) {
err("FD_SETSIZE: %s: sock=%d, max=%d",
@@ -402,11 +413,6 @@ static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[])
sockfdlist[nsockfd++] = sock;
}
- if (nsockfd == 0)
- return -1;
-
- dbg("listening on %d address%s", nsockfd, (nsockfd == 1) ? "" : "es");
-
return nsockfd;
}
@@ -473,11 +479,11 @@ static void remove_pid_file()
}
}
-static int do_standalone_mode(int daemonize)
+static int do_standalone_mode(int daemonize, int ipv4, int ipv6)
{
struct addrinfo *ai_head;
int sockfdlist[MAXSOCKFD];
- int nsockfd;
+ int nsockfd, family;
int i, terminate;
struct pollfd *fds;
struct timespec timeout;
@@ -501,21 +507,36 @@ static int do_standalone_mode(int daemonize)
set_signal();
write_pid_file();
- ai_head = do_getaddrinfo(NULL, PF_UNSPEC);
+ info("starting " PROGNAME " (%s)", usbip_version_string);
+
+ /*
+ * To suppress warnings on systems with bindv6only disabled
+ * (default), we use seperate sockets for IPv6 and IPv4 and set
+ * IPV6_V6ONLY on the IPv6 sockets.
+ */
+ if (ipv4 && ipv6)
+ family = AF_UNSPEC;
+ else if (ipv4)
+ family = AF_INET;
+ else
+ family = AF_INET6;
+
+ ai_head = do_getaddrinfo(NULL, family);
if (!ai_head) {
usbip_host_driver_close();
return -1;
}
-
- info("starting " PROGNAME " (%s)", usbip_version_string);
-
- nsockfd = listen_all_addrinfo(ai_head, sockfdlist);
+ nsockfd = listen_all_addrinfo(ai_head, sockfdlist,
+ sizeof(sockfdlist) / sizeof(*sockfdlist));
+ freeaddrinfo(ai_head);
if (nsockfd <= 0) {
err("failed to open a listening socket");
- freeaddrinfo(ai_head);
usbip_host_driver_close();
return -1;
}
+
+ dbg("listening on %d address%s", nsockfd, (nsockfd == 1) ? "" : "es");
+
fds = calloc(nsockfd, sizeof(struct pollfd));
for (i = 0; i < nsockfd; i++) {
fds[i].fd = sockfdlist[i];
@@ -551,7 +572,6 @@ static int do_standalone_mode(int daemonize)
info("shutting down " PROGNAME);
free(fds);
- freeaddrinfo(ai_head);
usbip_host_driver_close();
return 0;
@@ -560,6 +580,9 @@ static int do_standalone_mode(int daemonize)
int main(int argc, char *argv[])
{
static const struct option longopts[] = {
+ { "ipv4", no_argument, NULL, '4' },
+ { "ipv6", no_argument, NULL, '6' },
+ { "daemon", no_argument, NULL, 'D' },
{ "daemon", no_argument, NULL, 'D' },
{ "debug", no_argument, NULL, 'd' },
{ "pid", optional_argument, NULL, 'P' },
@@ -576,6 +599,7 @@ int main(int argc, char *argv[])
} cmd;
int daemonize = 0;
+ int ipv4 = 0, ipv6 = 0;
int opt, rc = -1;
pid_file = NULL;
@@ -587,12 +611,18 @@ int main(int argc, char *argv[])
cmd = cmd_standalone_mode;
for (;;) {
- opt = getopt_long(argc, argv, "DdP::t:hv", longopts, NULL);
+ opt = getopt_long(argc, argv, "46DdP::t:hv", longopts, NULL);
if (opt == -1)
break;
switch (opt) {
+ case '4':
+ ipv4 = 1;
+ break;
+ case '6':
+ ipv6 = 1;
+ break;
case 'D':
daemonize = 1;
break;
@@ -618,9 +648,12 @@ int main(int argc, char *argv[])
}
}
+ if (!ipv4 && !ipv6)
+ ipv4 = ipv6 = 1;
+
switch (cmd) {
case cmd_standalone_mode:
- rc = do_standalone_mode(daemonize);
+ rc = do_standalone_mode(daemonize, ipv4, ipv6);
remove_pid_file();
break;
case cmd_version:
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index d7974cb2cc6..e810ad53e2a 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -999,12 +999,6 @@ static int vhci_hcd_probe(struct platform_device *pdev)
usbip_dbg_vhci_hc("name %s id %d\n", pdev->name, pdev->id);
- /* will be removed */
- if (pdev->dev.dma_mask) {
- dev_info(&pdev->dev, "vhci_hcd DMA not supported\n");
- return -EINVAL;
- }
-
/*
* Allocate and initialize hcd.
* Our private data is also allocated automatically.
@@ -1146,11 +1140,11 @@ static int __init vhci_hcd_init(void)
return -ENODEV;
ret = platform_driver_register(&vhci_driver);
- if (ret < 0)
+ if (ret)
goto err_driver_register;
ret = platform_device_register(&the_pdev);
- if (ret < 0)
+ if (ret)
goto err_platform_device_register;
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
index 76c8490b073..7949d58ad7d 100644
--- a/drivers/staging/vt6655/80211mgr.c
+++ b/drivers/staging/vt6655/80211mgr.c
@@ -165,9 +165,8 @@ vMgrDecodeBeacon(
break;
case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
+ if (pFrame->pRSN == NULL)
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
@@ -382,9 +381,8 @@ vMgrDecodeAssocRequest(
break;
case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
+ if (pFrame->pRSN == NULL)
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
@@ -556,9 +554,8 @@ vMgrDecodeReassocRequest(
break;
case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
+ if (pFrame->pRSN == NULL)
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
@@ -742,9 +739,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
+ if (pFrame->pRSN == NULL)
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
@@ -858,9 +854,9 @@ vMgrDecodeAuthen(
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_CHALLENGE);
- if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE)) {
+ if (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len) &&
+ pItem->byElementID == WLAN_EID_CHALLENGE)
pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
- }
return;
}
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
index fc056fc6199..82b0bd1c056 100644
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ b/drivers/staging/vt6655/aes_ccmp.c
@@ -46,7 +46,7 @@
* SBOX Table
*/
-unsigned char sbox_table[256] =
+static unsigned char sbox_table[256] =
{
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
@@ -66,7 +66,7 @@ unsigned char sbox_table[256] =
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
-unsigned char dot2_table[256] = {
+static unsigned char dot2_table[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
@@ -85,7 +85,7 @@ unsigned char dot2_table[256] = {
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
-unsigned char dot3_table[256] = {
+static unsigned char dot3_table[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
@@ -110,7 +110,7 @@ unsigned char dot3_table[256] = {
/*--------------------- Export Functions --------------------------*/
-void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
+static void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
{
unsigned long *dwPtrA = (unsigned long *)a;
unsigned long *dwPtrB = (unsigned long *)b;
@@ -122,7 +122,7 @@ void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
-void xor_32(unsigned char *a, unsigned char *b, unsigned char *out)
+static void xor_32(unsigned char *a, unsigned char *b, unsigned char *out)
{
unsigned long *dwPtrA = (unsigned long *)a;
unsigned long *dwPtrB = (unsigned long *)b;
@@ -131,7 +131,7 @@ void xor_32(unsigned char *a, unsigned char *b, unsigned char *out)
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
-void AddRoundKey(unsigned char *key, int round)
+static void AddRoundKey(unsigned char *key, int round)
{
unsigned char sbox_key[4];
unsigned char rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
@@ -149,7 +149,7 @@ void AddRoundKey(unsigned char *key, int round)
xor_32(&key[12], &key[8], &key[12]);
}
-void SubBytes(unsigned char *in, unsigned char *out)
+static void SubBytes(unsigned char *in, unsigned char *out)
{
int i;
@@ -158,7 +158,7 @@ void SubBytes(unsigned char *in, unsigned char *out)
}
}
-void ShiftRows(unsigned char *in, unsigned char *out)
+static void ShiftRows(unsigned char *in, unsigned char *out)
{
out[0] = in[0];
out[1] = in[5];
@@ -178,7 +178,7 @@ void ShiftRows(unsigned char *in, unsigned char *out)
out[15] = in[11];
}
-void MixColumns(unsigned char *in, unsigned char *out)
+static void MixColumns(unsigned char *in, unsigned char *out)
{
out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
out[1] = in[0] ^ dot2_table[in[1]] ^ dot3_table[in[2]] ^ in[3];
@@ -186,7 +186,7 @@ void MixColumns(unsigned char *in, unsigned char *out)
out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
}
-void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciphertext)
+static void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciphertext)
{
int i;
int round;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index c26418d806f..959568a1eb6 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -2434,13 +2434,12 @@ void BBvSetVGAGainOffset(PSDevice pDevice, unsigned char byData)
BBbReadEmbedded(pDevice->PortOffset, 0x0A, &byBBRxConf);//CR10
// patch for 3253B0 Baseband with Cardbus module
- if (byData == pDevice->abyBBVGA[0]) {
+ if (byData == pDevice->abyBBVGA[0])
byBBRxConf |= 0x20;//0010 0000
- } else if (pDevice->bShortSlotTime) {
+ else if (pDevice->bShortSlotTime)
byBBRxConf &= 0xDF;//1101 1111
- } else {
+ else
byBBRxConf |= 0x20;//0010 0000
- }
pDevice->byBBVGACurrent = byData;
BBbWriteEmbedded(pDevice->PortOffset, 0x0A, byBBRxConf);//CR10
}
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index f983915168b..a23b591eeac 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -148,7 +148,8 @@ BSSpSearchBSSList(
if (pDevice->bLinkPass == false) pCurrBSS->bSelected = false;
if ((pCurrBSS->bActive) &&
(pCurrBSS->bSelected == false)) {
- if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) {
+ if (ether_addr_equal(pCurrBSS->abyBSSID,
+ pbyBSSID)) {
if (pSSID != NULL) {
// compare ssid
if (!memcmp(pSSID->abySSID,
@@ -275,7 +276,8 @@ BSSvClearBSSList(
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (bKeepCurrBSSID) {
if (pMgmt->sBSSList[ii].bActive &&
- !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyCurrBSSID)) {
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyCurrBSSID)) {
// bKeepCurrBSSID = false;
continue;
}
@@ -318,7 +320,7 @@ BSSpAddrIsInBSSList(
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSSList = &(pMgmt->sBSSList[ii]);
if (pBSSList->bActive) {
- if (!compare_ether_addr(pBSSList->abyBSSID, abyBSSID)) {
+ if (ether_addr_equal(pBSSList->abyBSSID, abyBSSID)) {
if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len) {
if (memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
@@ -733,7 +735,8 @@ BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr,
// Index = 0 reserved for AP Node
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- if (!compare_ether_addr(abyDstAddr, pMgmt->sNodeDBTable[ii].abyMACAddr)) {
+ if (ether_addr_equal(abyDstAddr,
+ pMgmt->sNodeDBTable[ii].abyMACAddr)) {
*puNodeIndex = ii;
return true;
}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 7f36a7103c3..e93fdc88d84 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1153,10 +1153,6 @@ static void device_free_info(PSDevice pDevice) {
pci_release_regions(pDevice->pcid);
if (dev)
free_netdev(dev);
-
- if (pDevice->pcid) {
- pci_set_drvdata(pDevice->pcid, NULL);
- }
}
static bool device_init_rings(PSDevice pDevice) {
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index a9533f3f252..0ff51cb4a20 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -172,9 +172,9 @@ s_vProcessRxMACHeader(PSDevice pDevice, unsigned char *pbyRxBufferAddr,
};
pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
- if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) {
+ if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_Bridgetunnel)) {
cbHeaderSize += 6;
- } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
+ } else if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_RFC1042)) {
cbHeaderSize += 6;
pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
if ((*pwType != TYPE_PKT_IPX) && (*pwType != cpu_to_le16(0xF380))) {
@@ -420,7 +420,8 @@ device_receive_frame(
s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader);
// filter packet send from myself
- if (!compare_ether_addr((unsigned char *)&(pDevice->sRxEthHeader.abySrcAddr[0]), pDevice->abyCurrentNetAddr))
+ if (ether_addr_equal(pDevice->sRxEthHeader.abySrcAddr,
+ pDevice->abyCurrentNetAddr))
return false;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 8acff44a9e7..aab0012bba9 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -720,7 +720,6 @@ static int hostap_get_encryption(PSDevice pDevice,
* Return Value:
*
*/
-
int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_hostapd_param *param;
@@ -731,7 +730,7 @@ int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
@@ -755,8 +754,8 @@ int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
break;
case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR \n");
- return -EOPNOTSUPP;
- break;
+ ret = -EOPNOTSUPP;
+ goto out;
case VIAWGET_HOSTAPD_FLUSH:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_FLUSH \n");
spin_lock_irq(&pDevice->lock);
@@ -790,40 +789,36 @@ int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_FLAGS_STA \n");
ret = hostap_set_flags_sta(pDevice, param);
break;
-
case VIAWGET_HOSTAPD_MLME:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_MLME \n");
- return -EOPNOTSUPP;
-
+ ret = -EOPNOTSUPP;
+ goto out;
case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT \n");
ret = hostap_set_generic_element(pDevice, param);
break;
-
case VIAWGET_HOSTAPD_SCAN_REQ:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SCAN_REQ \n");
- return -EOPNOTSUPP;
-
+ ret = -EOPNOTSUPP;
+ goto out;
case VIAWGET_HOSTAPD_STA_CLEAR_STATS:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_STA_CLEAR_STATS \n");
- return -EOPNOTSUPP;
-
+ ret = -EOPNOTSUPP;
+ goto out;
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6655_hostap_ioctl: unknown cmd=%d\n",
(int)param->cmd);
- return -EOPNOTSUPP;
- break;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if ((ret == 0) && ap_ioctl) {
if (copy_to_user(p->pointer, param, p->length)) {
ret = -EFAULT;
- goto out;
}
}
out:
kfree(param);
-
return ret;
}
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 9de698ef25f..4bff8aa96be 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -663,7 +663,8 @@ int iwctl_siwap(struct net_device *dev,
unsigned int ii, uSameBssidNum = 0;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyDesireBSSID)) {
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyDesireBSSID)) {
uSameBssidNum++;
}
}
@@ -840,7 +841,8 @@ int iwctl_siwessid(struct net_device *dev,
// by means of judging if there are two same BSSID exist in list ?
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pCurr->abyBSSID)) {
uSameBssidNum++;
}
}
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 92b84b5ea11..04c1304d16e 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -141,7 +141,7 @@ bool KeybGetKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
@@ -208,7 +208,7 @@ bool KeybSetKey(
j = i;
}
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
@@ -385,7 +385,7 @@ bool KeybRemoveKey(
for (i = 0; i < MAX_KEY_TABLE; i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
s_vCheckKeyTableValid(pTable, dwIoBase);
@@ -429,7 +429,7 @@ bool KeybRemoveAllKey(
for (i = 0; i < MAX_KEY_TABLE; i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
for (u = 0; u < MAX_GROUP_KEY; u++) {
pTable->KeyTable[i].GroupKey[u].bKeyValid = false;
@@ -512,7 +512,7 @@ bool KeybGetTransmitKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
index 0828d18ad52..387d20623aa 100644
--- a/drivers/staging/vt6655/michael.h
+++ b/drivers/staging/vt6655/michael.h
@@ -39,18 +39,18 @@ void MIC_vInit(unsigned long dwK0, unsigned long dwK1);
void MIC_vUnInit(void);
-// Append bytes to the message to be MICed
+/* Append bytes to the message to be MICed */
void MIC_vAppend(unsigned char *src, unsigned int nBytes);
-// Get the MIC result. Destination should accept 8 bytes of result.
-// This also resets the message to empty.
+/* Get the MIC result. Destination should accept 8 bytes of result. */
+/* This also resets the message to empty. */
void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR);
/*--------------------- Export Macros ------------------------------*/
-// Rotation functions on 32 bit values
+/* Rotation functions on 32 bit values */
#define ROL32(A, n) \
(((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
#define ROR32(A, n) ROL32((A), 32-(n))
-#endif //__MICHAEL_H__
+#endif /*__MICHAEL_H__ */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 6948984a25a..ce173cc16c1 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -55,7 +55,7 @@
/*--------------------- Static Variables --------------------------*/
-const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
+static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
@@ -73,7 +73,7 @@ const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
};
-const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
+static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -90,7 +90,7 @@ const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M
};
-const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
+static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -107,7 +107,7 @@ const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M
};
-unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
+static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
@@ -177,7 +177,7 @@ unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
//{{ RobertYu:20050104
// 40MHz reference frequency
// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
-const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
+static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 451FE2
@@ -200,7 +200,7 @@ const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11a: 12BACF
};
-const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
+static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
@@ -219,7 +219,7 @@ const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11b/g
};
-const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
+static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -285,7 +285,7 @@ const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
};
-const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
+static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -349,7 +349,7 @@ const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
};
-const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
+static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -428,7 +428,7 @@ const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
* Return Value: true if succeeded; false if failed.
*
*/
-bool s_bAL7230Init(unsigned long dwIoBase)
+static bool s_bAL7230Init(unsigned long dwIoBase)
{
int ii;
bool bResult;
@@ -471,7 +471,7 @@ bool s_bAL7230Init(unsigned long dwIoBase)
}
// Need to Pull PLLON low when writing channel registers through 3-wire interface
-bool s_bAL7230SelectChannel(unsigned long dwIoBase, unsigned char byChannel)
+static bool s_bAL7230SelectChannel(unsigned long dwIoBase, unsigned char byChannel)
{
bool bResult;
@@ -631,7 +631,7 @@ bool IFRFbWriteEmbedded(unsigned long dwIoBase, unsigned long dwData)
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbAL2230Init(unsigned long dwIoBase)
+static bool RFbAL2230Init(unsigned long dwIoBase)
{
int ii;
bool bResult;
@@ -678,7 +678,7 @@ bool RFbAL2230Init(unsigned long dwIoBase)
return bResult;
}
-bool RFbAL2230SelectChannel(unsigned long dwIoBase, unsigned char byChannel)
+static bool RFbAL2230SelectChannel(unsigned long dwIoBase, unsigned char byChannel)
{
bool bResult;
@@ -776,36 +776,6 @@ bool RFbInit(
}
/*
- * Description: RF ShutDown function
- *
- * Parameters:
- * In:
- * byBBType
- * byRFType
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-bool RFbShutDown(
- PSDevice pDevice
-)
-{
- bool bResult = true;
-
- switch (pDevice->byRFType) {
- case RF_AIROHA7230:
- bResult = IFRFbWriteEmbedded(pDevice->PortOffset, 0x1ABAEF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW);
- break;
- default:
- bResult = true;
- break;
- }
- return bResult;
-}
-
-/*
* Description: Select channel
*
* Parameters:
diff --git a/drivers/staging/vt6655/tkip.c b/drivers/staging/vt6655/tkip.c
index b3e087e1903..e7c17c6f7ea 100644
--- a/drivers/staging/vt6655/tkip.c
+++ b/drivers/staging/vt6655/tkip.c
@@ -55,7 +55,7 @@
/* The 2nd table is the same as the 1st but with the upper and lower */
/* bytes swapped. To allow an endian tolerant implementation, the byte */
/* halves have been expressed independently here. */
-const unsigned char TKIP_Sbox_Lower[256] = {
+static const unsigned char TKIP_Sbox_Lower[256] = {
0xA5, 0x84, 0x99, 0x8D, 0x0D, 0xBD, 0xB1, 0x54,
0x50, 0x03, 0xA9, 0x7D, 0x19, 0x62, 0xE6, 0x9A,
0x45, 0x9D, 0x40, 0x87, 0x15, 0xEB, 0xC9, 0x0B,
@@ -90,7 +90,7 @@ const unsigned char TKIP_Sbox_Lower[256] = {
0xC3, 0xB0, 0x77, 0x11, 0xCB, 0xFC, 0xD6, 0x3A
};
-const unsigned char TKIP_Sbox_Upper[256] = {
+static const unsigned char TKIP_Sbox_Upper[256] = {
0xC6, 0xF8, 0xEE, 0xF6, 0xFF, 0xD6, 0xDE, 0x91,
0x60, 0x02, 0xCE, 0x56, 0xE7, 0xB5, 0x4D, 0xEC,
0x8F, 0x1F, 0x89, 0xFA, 0xEF, 0xB2, 0x8E, 0xFB,
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
index d8f4f8e7d05..d2bdb71fe62 100644
--- a/drivers/staging/vt6655/vntwifi.c
+++ b/drivers/staging/vt6655/vntwifi.c
@@ -752,25 +752,3 @@ VNTWIFIbChannelSwitch(
//spin_unlock_irq(&pDevice->lock);
return true;
}
-
-/*
- bool
- VNTWIFIbRadarPresent(
- void *pMgmtObject,
- unsigned char byChannel
-) {
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (byChannel == (unsigned char) pMgmt->uCurrChannel) &&
- (pMgmt->bSwitchChannel != true) &&
- (pMgmt->b11hEnable == true)) {
- if (!compare_ether_addr(pMgmt->abyIBSSDFSOwner, CARDpGetCurrentAddress(pMgmt->pAdapter))) {
- pMgmt->byNewChannel = CARDbyAutoChannelSelect(pMgmt->pAdapter,(unsigned char) pMgmt->uCurrChannel);
- pMgmt->bSwitchChannel = true;
- }
- BEACONbSendBeacon(pMgmt);
- CARDbChannelSwitch(pMgmt->pAdapter, 0, pMgmt->byNewChannel, 10);
- }
- return true;
- }
-*/
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
index d551653537b..9c57eefe78f 100644
--- a/drivers/staging/vt6655/wcmd.c
+++ b/drivers/staging/vt6655/wcmd.c
@@ -233,7 +233,7 @@ s_vProbeChannel(
*
*
* Return Value:
- * A ptr to Tx frame or NULL on allocation failue
+ * A ptr to Tx frame or NULL on allocation failure
*
-*/
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
index 9eb81b4eee8..f05f9f55398 100644
--- a/drivers/staging/vt6655/wctl.c
+++ b/drivers/staging/vt6655/wctl.c
@@ -75,8 +75,8 @@ bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader)
for (ii = 0; ii < DUPLICATE_RX_CACHE_LENGTH; ii++) {
pCacheEntry = &(pCache->asCacheEntry[uIndex]);
if ((pCacheEntry->wFmSequence == pMACHeader->wSeqCtl) &&
- (!compare_ether_addr(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
-) {
+ ether_addr_equal(pCacheEntry->abyAddr2,
+ pMACHeader->abyAddr2)) {
/* Duplicate match */
return true;
}
@@ -111,8 +111,8 @@ unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
if ((pDevice->sRxDFCB[ii].bInUse == true) &&
- (!compare_ether_addr(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
-) {
+ ether_addr_equal(pDevice->sRxDFCB[ii].abyAddr2,
+ pMACHeader->abyAddr2)) {
//
return ii;
}
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index 9938813f997..ed4b32b6d9c 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -1680,7 +1680,8 @@ s_vMgrRxDeauthentication(
vMgrDecodeDeauthen(&sFrame);
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP deauthed me, reason=%d.\n", cpu_to_le16((*(sFrame.pwReason))));
// TODO: update BSS list for specific BSSID if pre-authentication case
- if (!compare_ether_addr(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID)) {
+ if (ether_addr_equal(sFrame.pHdr->sA3.abyAddr3,
+ pMgmt->abyCurrBSSID)) {
if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
index c5293bbcdab..b697fa6c3b1 100644
--- a/drivers/staging/vt6655/wpa.c
+++ b/drivers/staging/vt6655/wpa.c
@@ -45,12 +45,12 @@
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
-const unsigned char abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
-const unsigned char abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
-const unsigned char abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
-const unsigned char abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
-const unsigned char abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
-const unsigned char abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
+static const unsigned char abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
+static const unsigned char abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
+static const unsigned char abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
+static const unsigned char abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
+static const unsigned char abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
+static const unsigned char abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
/*+
*
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index e8d9ecd2913..044368a46c5 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -394,7 +394,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
} else {
// Key Table Full
- if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
+ if (ether_addr_equal(param->addr, pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
//spin_unlock_irq(&pDevice->lock);
return -EINVAL;
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
index b61328fbee8..85302c5e2ba 100644
--- a/drivers/staging/vt6655/wroute.c
+++ b/drivers/staging/vt6655/wroute.c
@@ -179,7 +179,7 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
pHeadTD = pHeadTD->next;
}
- pLastTD->pTDInfo->skb = 0;
+ pLastTD->pTDInfo->skb = NULL;
pLastTD->pTDInfo->byFlags = 0;
pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
diff --git a/drivers/staging/vt6655/wroute.h b/drivers/staging/vt6655/wroute.h
index 5ecc190ae77..3abc1d36f89 100644
--- a/drivers/staging/vt6655/wroute.h
+++ b/drivers/staging/vt6655/wroute.h
@@ -41,4 +41,4 @@
bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex);
-#endif // __WROUTE_H__
+#endif /* __WROUTE_H__ */
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index 28a4c4c3041..6c7693911cd 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -96,9 +96,9 @@ u8 dot3_table[256] = {
static void xor_128(u8 *a, u8 *b, u8 *out)
{
- u32 * dwPtrA = (u32 *) a;
- u32 * dwPtrB = (u32 *) b;
- u32 * dwPtrOut = (u32 *) out;
+ u32 *dwPtrA = (u32 *) a;
+ u32 *dwPtrB = (u32 *) b;
+ u32 *dwPtrOut = (u32 *) out;
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
@@ -108,9 +108,9 @@ static void xor_128(u8 *a, u8 *b, u8 *out)
static void xor_32(u8 *a, u8 *b, u8 *out)
{
- u32 * dwPtrA = (u32 *) a;
- u32 * dwPtrB = (u32 *) b;
- u32 * dwPtrOut = (u32 *) out;
+ u32 *dwPtrA = (u32 *) a;
+ u32 *dwPtrB = (u32 *) b;
+ u32 *dwPtrOut = (u32 *) out;
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
@@ -218,7 +218,7 @@ void AESv128(u8 *key, u8 *data, u8 *ciphertext)
*
*/
-bool AESbGenCCMP(u8 * pbyRxKey, u8 * pbyFrame, u16 wFrameSize)
+bool AESbGenCCMP(u8 *pbyRxKey, u8 *pbyFrame, u16 wFrameSize)
{
u8 abyNonce[13];
u8 MIC_IV[16];
@@ -231,8 +231,8 @@ bool AESbGenCCMP(u8 * pbyRxKey, u8 * pbyFrame, u16 wFrameSize)
u8 abyLastCipher[16];
struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *) pbyFrame;
- u8 * pbyIV;
- u8 * pbyPayload;
+ u8 *pbyIV;
+ u8 *pbyPayload;
u16 wHLen = 22;
/* 8 is IV, 8 is MIC, 4 is CRC */
u16 wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index ee79bbdf1a0..dad3f8c78e2 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -57,6 +57,7 @@
#include "control.h"
#include "rndis.h"
#include "iowpa.h"
+#include "power.h"
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
@@ -126,7 +127,7 @@ PKnownBSS BSSpSearchBSSList(struct vnt_private *pDevice,
if ((pCurrBSS->bActive) &&
(pCurrBSS->bSelected == false)) {
- if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) {
+ if (ether_addr_equal(pCurrBSS->abyBSSID, pbyBSSID)) {
if (pSSID != NULL) {
// compare ssid
if ( !memcmp(pSSID->abySSID,
@@ -242,8 +243,8 @@ void BSSvClearBSSList(struct vnt_private *pDevice, int bKeepCurrBSSID)
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (bKeepCurrBSSID) {
if (pMgmt->sBSSList[ii].bActive &&
- !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyCurrBSSID)) {
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyCurrBSSID)) {
//mike mark: there are two BSSID's in list. If that AP is in hidden ssid mode, one SSID is null,
// but other's might not be obvious, so if it associate's with your STA,
// you must keep the two of them!!
@@ -277,7 +278,7 @@ PKnownBSS BSSpAddrIsInBSSList(struct vnt_private *pDevice,
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSSList = &(pMgmt->sBSSList[ii]);
if (pBSSList->bActive) {
- if (!compare_ether_addr(pBSSList->abyBSSID, abyBSSID)) {
+ if (ether_addr_equal(pBSSList->abyBSSID, abyBSSID)) {
if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){
if (memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
@@ -623,8 +624,8 @@ int BSSbIsSTAInNodeDB(struct vnt_private *pDevice,
// Index = 0 reserved for AP Node
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- if (!compare_ether_addr(abyDstAddr,
- pMgmt->sNodeDBTable[ii].abyMACAddr)) {
+ if (ether_addr_equal(abyDstAddr,
+ pMgmt->sNodeDBTable[ii].abyMACAddr)) {
*puNodeIndex = ii;
return true;
}
@@ -813,8 +814,10 @@ void BSSvAddMulticastNode(struct vnt_private *pDevice)
*
-*/
-void BSSvSecondCallBack(struct vnt_private *pDevice)
+void BSSvSecondCallBack(struct work_struct *work)
{
+ struct vnt_private *pDevice = container_of(work,
+ struct vnt_private, second_callback_work.work);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int ii;
PWLAN_IE_SSID pItemSSID, pCurrSSID;
@@ -822,6 +825,9 @@ void BSSvSecondCallBack(struct vnt_private *pDevice)
u32 uNonShortSlotSTACnt = 0;
u32 uLongPreambleSTACnt = 0;
+ if (pDevice->Flags & fMP_DISCONNECTED)
+ return;
+
spin_lock_irq(&pDevice->lock);
pDevice->uAssocCount = 0;
@@ -1119,15 +1125,26 @@ else {
}
}
- if (pDevice->bLinkPass == true) {
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
- }
+ if (pDevice->bLinkPass == true) {
+ if (pMgmt->eAuthenMode < WMAC_AUTH_WPA ||
+ pDevice->fWPA_Authened == true) {
+ if (++pDevice->tx_data_time_out > 40) {
+ pDevice->tx_trigger = true;
+
+ PSbSendNullPacket(pDevice);
+
+ pDevice->tx_trigger = false;
+ pDevice->tx_data_time_out = 0;
+ }
+ }
+
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+ }
spin_unlock_irq(&pDevice->lock);
- pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
- add_timer(&pMgmt->sTimerSecondCallback);
+ schedule_delayed_work(&pDevice->second_callback_work, HZ);
}
/*+
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index bce3b465416..fc418555bc4 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -262,7 +262,7 @@ void BSSvCreateOneNode(struct vnt_private *, u32 *puNodeIndex);
void BSSvUpdateAPNode(struct vnt_private *, u16 *pwCapInfo,
PWLAN_IE_SUPP_RATES pItemRates, PWLAN_IE_SUPP_RATES pExtSuppRates);
-void BSSvSecondCallBack(struct vnt_private *);
+void BSSvSecondCallBack(struct work_struct *work);
void BSSvUpdateNodeTxCounter(struct vnt_private *, PSStatCounter pStatistic,
u8 byTSR, u8 byPktNO);
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index 5158ff4b346..e430b35463b 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -403,7 +403,7 @@ exit:
void CHvInitChannelTable(struct vnt_private *pDevice)
{
- int bMultiBand = false;
+ bool bMultiBand = false;
int ii;
for (ii = 1; ii <= CB_MAX_CHANNEL; ii++)
diff --git a/drivers/staging/vt6656/datarate.c b/drivers/staging/vt6656/datarate.c
index 17fbc35ebcb..af9eab0c00a 100644
--- a/drivers/staging/vt6656/datarate.c
+++ b/drivers/staging/vt6656/datarate.c
@@ -44,9 +44,9 @@
#include "rf.h"
/* static int msglevel = MSG_LEVEL_DEBUG; */
-static int msglevel =MSG_LEVEL_INFO;
-const u8 acbyIERate[MAX_RATE] =
-{0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+static int msglevel = MSG_LEVEL_INFO;
+const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18,
+ 0x24, 0x30, 0x48, 0x60, 0x6C};
#define AUTORATE_TXOK_CNT 0x0400
#define AUTORATE_TXFAIL_CNT 0x0064
@@ -56,13 +56,13 @@ void s_vResetCounter(PKnownNodeDB psNodeDBTable);
void s_vResetCounter(PKnownNodeDB psNodeDBTable)
{
- u8 ii;
+ u8 ii;
- /* clear statistics counter for auto_rate */
- for (ii = 0; ii <= MAX_RATE; ii++) {
- psNodeDBTable->uTxOk[ii] = 0;
- psNodeDBTable->uTxFail[ii] = 0;
- }
+ /* clear statistics counter for auto_rate */
+ for (ii = 0; ii <= MAX_RATE; ii++) {
+ psNodeDBTable->uTxOk[ii] = 0;
+ psNodeDBTable->uTxFail[ii] = 0;
+ }
}
/*+
@@ -97,21 +97,18 @@ void s_vResetCounter(PKnownNodeDB psNodeDBTable)
* Return Value: RateIdx
*
-*/
-u16
-RATEwGetRateIdx(
- u8 byRate
- )
+u16 RATEwGetRateIdx(u8 byRate)
{
- u16 ii;
+ u16 ii;
- /* erase BasicRate flag */
- byRate = byRate & 0x7F;
+ /* erase BasicRate flag */
+ byRate = byRate & 0x7F;
- for (ii = 0; ii < MAX_RATE; ii ++) {
- if (acbyIERate[ii] == byRate)
- return ii;
- }
- return 0;
+ for (ii = 0; ii < MAX_RATE; ii++) {
+ if (acbyIERate[ii] == byRate)
+ return ii;
+ }
+ return 0;
}
/*+
@@ -139,7 +136,7 @@ void RATEvParseMaxRate(struct vnt_private *pDevice,
int bUpdateBasicRate, u16 *pwMaxBasicRate, u16 *pwMaxSuppRate,
u16 *pwSuppRate, u8 *pbyTopCCKRate, u8 *pbyTopOFDMRate)
{
- int ii;
+ int ii;
u8 byHighSuppRate = 0, byRate = 0;
u16 wOldBasicRate = pDevice->wBasicRate;
u32 uRateLen;
@@ -147,83 +144,88 @@ void RATEvParseMaxRate(struct vnt_private *pDevice,
if (pItemRates == NULL)
return;
- *pwSuppRate = 0;
- uRateLen = pItemRates->len;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen);
- if (pDevice->byBBType != BB_TYPE_11B) {
- if (uRateLen > WLAN_RATES_MAXLEN)
- uRateLen = WLAN_RATES_MAXLEN;
- } else {
- if (uRateLen > WLAN_RATES_MAXLEN_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
- }
-
- for (ii = 0; ii < uRateLen; ii++) {
- byRate = (u8)(pItemRates->abyRates[ii]);
- if (WLAN_MGMT_IS_BASICRATE(byRate) &&
- (bUpdateBasicRate == true)) {
- /*
- * add to basic rate set, update pDevice->byTopCCKBasicRate and
- * pDevice->byTopOFDMBasicRate
- */
- CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
- }
- byRate = (u8)(pItemRates->abyRates[ii]&0x7F);
- if (byHighSuppRate == 0)
- byHighSuppRate = byRate;
- if (byRate > byHighSuppRate)
- byHighSuppRate = byRate;
- *pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
- }
- if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
- (pDevice->byBBType != BB_TYPE_11B)) {
-
- unsigned int uExtRateLen = pItemExtRates->len;
-
- if (uExtRateLen > WLAN_RATES_MAXLEN)
- uExtRateLen = WLAN_RATES_MAXLEN;
-
- for (ii = 0; ii < uExtRateLen ; ii++) {
- byRate = (u8)(pItemExtRates->abyRates[ii]);
- /* select highest basic rate */
- if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
- /*
- * add to basic rate set, update pDevice->byTopCCKBasicRate and
- * pDevice->byTopOFDMBasicRate
- */
- CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
- }
- byRate = (u8)(pItemExtRates->abyRates[ii]&0x7F);
- if (byHighSuppRate == 0)
- byHighSuppRate = byRate;
- if (byRate > byHighSuppRate)
- byHighSuppRate = byRate;
- *pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
-
- /* DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n",
- RATEwGetRateIdx(byRate), byRate)); */
- }
- }
-
- if ((pDevice->byPacketType == PK_TYPE_11GB)
- && CARDbIsOFDMinBasicRate((void *)pDevice)) {
- pDevice->byPacketType = PK_TYPE_11GA;
- }
-
- *pbyTopCCKRate = pDevice->byTopCCKBasicRate;
- *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate;
- *pwMaxSuppRate = RATEwGetRateIdx(byHighSuppRate);
- if ((pDevice->byPacketType==PK_TYPE_11B) || (pDevice->byPacketType==PK_TYPE_11GB))
- *pwMaxBasicRate = pDevice->byTopCCKBasicRate;
- else
- *pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
- if (wOldBasicRate != pDevice->wBasicRate)
- CARDvSetRSPINF((void *)pDevice, pDevice->byBBType);
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n");
+ *pwSuppRate = 0;
+ uRateLen = pItemRates->len;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen);
+ if (pDevice->byBBType != BB_TYPE_11B) {
+ if (uRateLen > WLAN_RATES_MAXLEN)
+ uRateLen = WLAN_RATES_MAXLEN;
+ } else {
+ if (uRateLen > WLAN_RATES_MAXLEN_11B)
+ uRateLen = WLAN_RATES_MAXLEN_11B;
+ }
+
+ for (ii = 0; ii < uRateLen; ii++) {
+ byRate = (u8)(pItemRates->abyRates[ii]);
+ if (WLAN_MGMT_IS_BASICRATE(byRate) &&
+ (bUpdateBasicRate == true)) {
+ /*
+ * add to basic rate set, update pDevice->byTopCCKBasicRate and
+ * pDevice->byTopOFDMBasicRate
+ */
+ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"ParseMaxRate AddBasicRate: %d\n",
+ RATEwGetRateIdx(byRate));
+ }
+ byRate = (u8)(pItemRates->abyRates[ii]&0x7F);
+ if (byHighSuppRate == 0)
+ byHighSuppRate = byRate;
+ if (byRate > byHighSuppRate)
+ byHighSuppRate = byRate;
+ *pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
+ }
+ if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
+ (pDevice->byBBType != BB_TYPE_11B)) {
+
+ unsigned int uExtRateLen = pItemExtRates->len;
+
+ if (uExtRateLen > WLAN_RATES_MAXLEN)
+ uExtRateLen = WLAN_RATES_MAXLEN;
+
+ for (ii = 0; ii < uExtRateLen; ii++) {
+ byRate = (u8)(pItemExtRates->abyRates[ii]);
+ /* select highest basic rate */
+ if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
+ /*
+ * add to basic rate set, update pDevice->byTopCCKBasicRate and
+ * pDevice->byTopOFDMBasicRate
+ */
+ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"ParseMaxRate AddBasicRate: %d\n",
+ RATEwGetRateIdx(byRate));
+ }
+ byRate = (u8)(pItemExtRates->abyRates[ii]&0x7F);
+ if (byHighSuppRate == 0)
+ byHighSuppRate = byRate;
+ if (byRate > byHighSuppRate)
+ byHighSuppRate = byRate;
+ *pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
+
+ /* DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n",
+ * RATEwGetRateIdx(byRate), byRate));
+ */
+ }
+ }
+
+ if ((pDevice->byPacketType == PK_TYPE_11GB)
+ && CARDbIsOFDMinBasicRate((void *)pDevice)) {
+ pDevice->byPacketType = PK_TYPE_11GA;
+ }
+
+ *pbyTopCCKRate = pDevice->byTopCCKBasicRate;
+ *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate;
+ *pwMaxSuppRate = RATEwGetRateIdx(byHighSuppRate);
+ if ((pDevice->byPacketType == PK_TYPE_11B) || (pDevice->byPacketType == PK_TYPE_11GB))
+ *pwMaxBasicRate = pDevice->byTopCCKBasicRate;
+ else
+ *pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
+ if (wOldBasicRate != pDevice->wBasicRate)
+ CARDvSetRSPINF((void *)pDevice, pDevice->byBBType);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n");
}
/*+
@@ -263,71 +265,68 @@ void RATEvTxRateFallBack(struct vnt_private *pDevice,
psNodeDBTable->uTimeCount++;
- if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
- dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
-
- if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) &&
- (dwTxDiff < AUTORATE_TXFAIL_CNT) &&
- (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) {
- return;
- }
-
- if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT) {
- psNodeDBTable->uTimeCount = 0;
- }
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii] == true) {
- wIdxUpRate = (u16) ii;
- }
- } else {
- bAutoRate[ii] = false;
- }
- }
-
- for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) {
- if ( (psNodeDBTable->uTxOk[ii] != 0) ||
- (psNodeDBTable->uTxFail[ii] != 0) ) {
- dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
- if (ii < RATE_11M) {
- psNodeDBTable->uTxFail[ii] *= 4;
- }
- dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]);
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n",
- ii, (int)psNodeDBTable->uTxOk[ii], (int)psNodeDBTable->uTxFail[ii], (int)dwThroughputTbl[ii]);
- }
- dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
-
- wIdxDownRate = psNodeDBTable->wTxDataRate;
- for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
- ii--;
- if ( (dwThroughputTbl[ii] > dwThroughput) &&
- (bAutoRate[ii]==true) ) {
- dwThroughput = dwThroughputTbl[ii];
- wIdxDownRate = (u16) ii;
- }
- }
- psNodeDBTable->wTxDataRate = wIdxDownRate;
- if (psNodeDBTable->uTxOk[MAX_RATE]) {
- if (psNodeDBTable->uTxOk[MAX_RATE] >
- (psNodeDBTable->uTxFail[MAX_RATE] * 4) ) {
- psNodeDBTable->wTxDataRate = wIdxUpRate;
- }
- } else { /* adhoc, if uTxOk(total) == 0 & uTxFail(total) == 0 */
- if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
- psNodeDBTable->wTxDataRate = wIdxUpRate;
- }
-
- if (pDevice->byBBType == BB_TYPE_11A) {
- if (psNodeDBTable->wTxDataRate <= RATE_11M)
- psNodeDBTable->wTxDataRate = RATE_6M;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uTxOk[MAX_RATE] %d, uTxFail[MAX_RATE]:%d\n",(int)psNodeDBTable->uTxOk[MAX_RATE], (int)psNodeDBTable->uTxFail[MAX_RATE]);
- s_vResetCounter(psNodeDBTable);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", (int)psNodeDBTable->wTxDataRate, (int)wIdxUpRate, (int)wIdxDownRate);
- return;
+ if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
+ dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
+
+ if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) &&
+ (dwTxDiff < AUTORATE_TXFAIL_CNT) &&
+ (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) {
+ return;
+ }
+
+ if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT)
+ psNodeDBTable->uTimeCount = 0;
+
+ for (ii = 0; ii < MAX_RATE; ii++) {
+ if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
+ if (bAutoRate[ii] == true)
+ wIdxUpRate = (u16) ii;
+ } else {
+ bAutoRate[ii] = false;
+ }
+ }
+
+ for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) {
+ if ((psNodeDBTable->uTxOk[ii] != 0) ||
+ (psNodeDBTable->uTxFail[ii] != 0)) {
+ dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
+ if (ii < RATE_11M)
+ psNodeDBTable->uTxFail[ii] *= 4;
+ dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]);
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n",
+ ii, (int)psNodeDBTable->uTxOk[ii], (int)psNodeDBTable->uTxFail[ii], (int)dwThroughputTbl[ii]);
+ }
+ dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
+
+ wIdxDownRate = psNodeDBTable->wTxDataRate;
+ for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
+ ii--;
+ if ((dwThroughputTbl[ii] > dwThroughput) &&
+ (bAutoRate[ii] == true)) {
+ dwThroughput = dwThroughputTbl[ii];
+ wIdxDownRate = (u16) ii;
+ }
+ }
+ psNodeDBTable->wTxDataRate = wIdxDownRate;
+ if (psNodeDBTable->uTxOk[MAX_RATE]) {
+ if (psNodeDBTable->uTxOk[MAX_RATE] >
+ (psNodeDBTable->uTxFail[MAX_RATE] * 4)) {
+ psNodeDBTable->wTxDataRate = wIdxUpRate;
+ }
+ } else { /* adhoc, if uTxOk(total) == 0 & uTxFail(total) == 0 */
+ if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
+ psNodeDBTable->wTxDataRate = wIdxUpRate;
+ }
+
+ if (pDevice->byBBType == BB_TYPE_11A) {
+ if (psNodeDBTable->wTxDataRate <= RATE_11M)
+ psNodeDBTable->wTxDataRate = RATE_6M;
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uTxOk[MAX_RATE] %d, uTxFail[MAX_RATE]:%d\n", (int)psNodeDBTable->uTxOk[MAX_RATE], (int)psNodeDBTable->uTxFail[MAX_RATE]);
+ s_vResetCounter(psNodeDBTable);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", (int)psNodeDBTable->wTxDataRate, (int)wIdxUpRate, (int)wIdxDownRate);
+ return;
}
/*+
@@ -343,29 +342,24 @@ void RATEvTxRateFallBack(struct vnt_private *pDevice,
* Return Value: None
*
-*/
-u8
-RATEuSetIE (
- PWLAN_IE_SUPP_RATES pSrcRates,
- PWLAN_IE_SUPP_RATES pDstRates,
- unsigned int uRateLen
- )
+u8 RATEuSetIE(PWLAN_IE_SUPP_RATES pSrcRates, PWLAN_IE_SUPP_RATES pDstRates,
+ unsigned int uRateLen)
{
- unsigned int ii, uu, uRateCnt = 0;
-
- if ((pSrcRates == NULL) || (pDstRates == NULL))
- return 0;
-
- if (pSrcRates->len == 0)
- return 0;
-
- for (ii = 0; ii < uRateLen; ii++) {
- for (uu = 0; uu < pSrcRates->len; uu++) {
- if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) {
- pDstRates->abyRates[uRateCnt ++] = pSrcRates->abyRates[uu];
- break;
- }
- }
- }
- return (u8)uRateCnt;
+ unsigned int ii, uu, uRateCnt = 0;
+
+ if ((pSrcRates == NULL) || (pDstRates == NULL))
+ return 0;
+
+ if (pSrcRates->len == 0)
+ return 0;
+
+ for (ii = 0; ii < uRateLen; ii++) {
+ for (uu = 0; uu < pSrcRates->len; uu++) {
+ if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) {
+ pDstRates->abyRates[uRateCnt++] = pSrcRates->abyRates[uu];
+ break;
+ }
+ }
+ }
+ return (u8)uRateCnt;
}
-
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 4675135aa25..afe7074c303 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -146,15 +146,6 @@
/*
* TX FIFO header
*/
-typedef struct tagSTxBufHead {
- u32 adwTxKey[4];
- u16 wFIFOCtl;
- u16 wTimeStamp;
- u16 wFragCtl;
- u16 wReserved;
-} __attribute__ ((__packed__))
-STxBufHead, *PSTxBufHead;
-typedef const STxBufHead *PCSTxBufHead;
typedef struct tagSTxShortBufHead {
u16 wFIFOCtl;
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 8e396341c5e..62b7de19b37 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -384,8 +384,8 @@ struct vnt_private {
struct tasklet_struct CmdWorkItem;
struct tasklet_struct EventWorkItem;
- struct tasklet_struct ReadWorkItem;
- struct tasklet_struct RxMngWorkItem;
+ struct work_struct read_work_item;
+ struct work_struct rx_mng_work_item;
u32 rx_buf_sz;
int multicast_limit;
@@ -579,6 +579,9 @@ struct vnt_private {
u8 abyOFDMAPwrTbl[42];
u16 wCurrentRate;
+ u16 tx_rate_fb0;
+ u16 tx_rate_fb1;
+
u16 wRTSThreshold;
u16 wFragmentationThreshold;
u8 byShortRetryLimit;
@@ -707,13 +710,12 @@ struct vnt_private {
u8 byBBCR09;
/* command timer */
- struct timer_list sTimerCommand;
-
- struct timer_list sTimerTxData;
- unsigned long nTxDataTimeCout;
- int fTxDataInSleep;
- int IsTxDataTrigger;
+ struct delayed_work run_command_work;
+ /* One second callback */
+ struct delayed_work second_callback_work;
+ u8 tx_data_time_out;
+ bool tx_trigger;
int fWPA_Authened; /*is WPA/WPA-PSK or WPA2/WPA2-PSK authen?? */
u8 byReAssocCount;
u8 byLinkWaitCount;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index ea7d443b11d..75dc92d6405 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -136,9 +136,9 @@ static void s_vProcessRxMACHeader(struct vnt_private *pDevice,
};
pbyRxBuffer = (u8 *) (pbyRxBufferAddr + cbHeaderSize);
- if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) {
+ if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_Bridgetunnel)) {
cbHeaderSize += 6;
- } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
+ } else if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_RFC1042)) {
cbHeaderSize += 6;
pwType = (u16 *) (pbyRxBufferAddr + cbHeaderSize);
if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
@@ -361,7 +361,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
if (pMgmt->sNodeDBTable[0].bActive) {
- if (!compare_ether_addr(pMgmt->abyCurrBSSID, pMACHeader->addr2)) {
+ if (ether_addr_equal(pMgmt->abyCurrBSSID, pMACHeader->addr2)) {
if (pMgmt->sNodeDBTable[0].uInActiveCount != 0)
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
}
@@ -374,8 +374,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
return false;
}
- if (compare_ether_addr(pDevice->abyCurrentNetAddr,
- pMACHeader->addr1)) {
+ if (!ether_addr_equal(pDevice->abyCurrentNetAddr, pMACHeader->addr1)) {
return false;
}
}
@@ -383,8 +382,8 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
// Use for TKIP MIC
s_vGetDASA(pbyFrame, &cbHeaderSize, &pDevice->sRxEthHeader);
- if (!compare_ether_addr((u8 *)&(pDevice->sRxEthHeader.h_source[0]),
- pDevice->abyCurrentNetAddr))
+ if (ether_addr_equal((u8 *)pDevice->sRxEthHeader.h_source,
+ pDevice->abyCurrentNetAddr))
return false;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
@@ -560,7 +559,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
}
if (pDevice->bIsRxMngWorkItemQueued == false) {
pDevice->bIsRxMngWorkItemQueued = true;
- tasklet_schedule(&pDevice->RxMngWorkItem);
+ schedule_work(&pDevice->rx_mng_work_item);
}
}
@@ -1333,11 +1332,16 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
return true;
}
-void RXvWorkItem(struct vnt_private *pDevice)
+void RXvWorkItem(struct work_struct *work)
{
+ struct vnt_private *pDevice =
+ container_of(work, struct vnt_private, read_work_item);
int ntStatus;
struct vnt_rcb *pRCB = NULL;
+ if (pDevice->Flags & fMP_DISCONNECTED)
+ return;
+
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
spin_lock_irq(&pDevice->lock);
@@ -1384,17 +1388,22 @@ void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb)
(pDevice->bIsRxWorkItemQueued == false) ) {
pDevice->bIsRxWorkItemQueued = true;
- tasklet_schedule(&pDevice->ReadWorkItem);
+ schedule_work(&pDevice->read_work_item);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList);
}
-void RXvMngWorkItem(struct vnt_private *pDevice)
+void RXvMngWorkItem(struct work_struct *work)
{
+ struct vnt_private *pDevice =
+ container_of(work, struct vnt_private, rx_mng_work_item);
struct vnt_rcb *pRCB = NULL;
struct vnt_rx_mgmt *pRxPacket;
int bReAllocSkb = false;
+ if (pDevice->Flags & fMP_DISCONNECTED)
+ return;
+
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Mng Thread\n");
spin_lock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index 95388dc03ee..8d524345dfd 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -32,9 +32,9 @@
#include "device.h"
#include "wcmd.h"
-void RXvWorkItem(void *Context);
+void RXvWorkItem(struct work_struct *work);
-void RXvMngWorkItem(void *Context);
+void RXvMngWorkItem(struct work_struct *work);
void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb);
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index a1dc3a4cfd9..cd2ea76c8b1 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -35,8 +35,8 @@
#include "control.h"
#include "rndis.h"
-static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
+static int msglevel = MSG_LEVEL_INFO;
+/* static int msglevel = MSG_LEVEL_DEBUG; */
#define FIRMWARE_VERSION 0x133 /* version 1.51 */
#define FIRMWARE_NAME "vntwusb.fw"
@@ -72,18 +72,17 @@ int FIRMWAREbDownload(struct vnt_private *pDevice)
memcpy(pBuffer, fw->data + ii, wLength);
NdisStatus = CONTROLnsRequestOutAsyn(pDevice,
- 0,
- 0x1200+ii,
- 0x0000,
- wLength,
- pBuffer
- );
+ 0,
+ 0x1200+ii,
+ 0x0000,
+ wLength,
+ pBuffer);
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO"Download firmware...%d %zu\n", ii, fw->size);
if (NdisStatus != STATUS_SUCCESS)
goto free_fw;
- }
+ }
result = true;
free_fw:
@@ -101,48 +100,47 @@ int FIRMWAREbBrach2Sram(struct vnt_private *pDevice)
{
int NdisStatus;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Branch to Sram\n");
-
- NdisStatus = CONTROLnsRequestOut(pDevice,
- 1,
- 0x1200,
- 0x0000,
- 0,
- NULL
- );
-
- if (NdisStatus != STATUS_SUCCESS) {
- return (false);
- } else {
- return (true);
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Branch to Sram\n");
+
+ NdisStatus = CONTROLnsRequestOut(pDevice,
+ 1,
+ 0x1200,
+ 0x0000,
+ 0,
+ NULL);
+ if (NdisStatus != STATUS_SUCCESS)
+ return false;
+ else
+ return true;
}
int FIRMWAREbCheckVersion(struct vnt_private *pDevice)
{
int ntStatus;
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- 0,
- MESSAGE_REQUEST_VERSION,
- 2,
- (u8 *) &(pDevice->wFirmwareVersion));
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
- if (ntStatus != STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Invalid.\n");
- return false;
- }
- if (pDevice->wFirmwareVersion == 0xFFFF) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"In Loader.\n");
- return false;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
- if (pDevice->wFirmwareVersion < FIRMWARE_VERSION) {
- // branch to loader for download new firmware
- FIRMWAREbBrach2Sram(pDevice);
- return false;
- }
- return true;
+ ntStatus = CONTROLnsRequestIn(pDevice,
+ MESSAGE_TYPE_READ,
+ 0,
+ MESSAGE_REQUEST_VERSION,
+ 2,
+ (u8 *) &(pDevice->wFirmwareVersion));
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n",
+ pDevice->wFirmwareVersion);
+ if (ntStatus != STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Invalid.\n");
+ return false;
+ }
+ if (pDevice->wFirmwareVersion == 0xFFFF) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"In Loader.\n");
+ return false;
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n",
+ pDevice->wFirmwareVersion);
+ if (pDevice->wFirmwareVersion < FIRMWARE_VERSION) {
+ /* branch to loader for download new firmware */
+ FIRMWAREbBrach2Sram(pDevice);
+ return false;
+ }
+ return true;
}
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index c699a3058b3..ae1676d190c 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -414,7 +414,7 @@ static int hostap_set_encryption(struct vnt_private *pDevice,
int ret = 0;
s32 iNodeIndex = -1;
int ii;
- int bKeyTableFull = false;
+ bool bKeyTableFull = false;
u16 wKeyCtl = 0;
param->u.crypt.err = 0;
@@ -685,7 +685,7 @@ int vt6656_hostap_ioctl(struct vnt_private *pDevice, struct iw_point *p)
p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 8872e0f84f4..63917abbbd0 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -60,7 +60,7 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.status = pDevice->eOPMode;
if (pDevice->scStatistic.LinkQuality > 100)
pDevice->scStatistic.LinkQuality = 100;
- pDevice->wstats.qual.qual =(u8)pDevice->scStatistic.LinkQuality;
+ pDevice->wstats.qual.qual = (u8)pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
pDevice->wstats.qual.noise = 0;
@@ -190,7 +190,7 @@ int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info,
return -EAGAIN;
}
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
+ for (ii = 0, jj = 0; jj < MAX_BSS_NUM; jj++) {
if (current_ev >= end_buf)
break;
pBSS = &(pMgmt->sBSSList[jj]);
@@ -225,7 +225,7 @@ int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info,
iwe.u.freq.m = pBSS->uChannel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
- current_ev = iwe_stream_add_event(info, current_ev,end_buf, &iwe, IW_EV_FREQ_LEN);
+ current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
{
int f = (int)pBSS->uChannel - 1;
if (f < 0)
@@ -400,7 +400,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc\n");
break;
case IW_MODE_AUTO:
case IW_MODE_INFRA:
@@ -409,7 +409,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure\n");
break;
case IW_MODE_MASTER:
@@ -422,7 +422,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point\n");
break;
case IW_MODE_REPEAT:
@@ -657,8 +657,8 @@ int iwctl_siwap(struct net_device *dev, struct iw_request_info *info,
unsigned uSameBssidNum = 0;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyDesireBSSID)) {
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyDesireBSSID)) {
uSameBssidNum++;
}
}
@@ -786,8 +786,8 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
if (wrq->flags == 0) {
// Just send an empty SSID list
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memset(pMgmt->abyDesireBSSID, 0xFF,6);
- PRINT_K("set essid to 'any' \n");
+ memset(pMgmt->abyDesireBSSID, 0xFF, 6);
+ PRINT_K("set essid to 'any'\n");
// Unknown desired AP, so here need not associate??
return 0;
} else {
@@ -798,15 +798,15 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
memcpy(pItemSSID->abySSID, extra, wrq->length);
if (pItemSSID->abySSID[wrq->length] == '\0') {
- if (wrq->length>0)
+ if (wrq->length > 0)
pItemSSID->len = wrq->length;
} else {
pItemSSID->len = wrq->length;
}
- PRINT_K("set essid to %s \n", pItemSSID->abySSID);
+ PRINT_K("set essid to %s\n", pItemSSID->abySSID);
// mike: need clear desiredBSSID
- if (pItemSSID->len==0) {
+ if (pItemSSID->len == 0) {
memset(pMgmt->abyDesireBSSID, 0xFF, 6);
return 0;
}
@@ -840,8 +840,8 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
// are two same BSSID exist in list ?
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
- pCurr->abyBSSID)) {
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pCurr->abyBSSID)) {
uSameBssidNum++;
}
}
@@ -860,7 +860,7 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
return 0;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set essid = %s \n", pItemSSID->abySSID);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set essid = %s\n", pItemSSID->abySSID);
}
if (pDevice->flags & DEVICE_FLAGS_OPENED)
@@ -893,7 +893,7 @@ int iwctl_giwessid(struct net_device *dev, struct iw_request_info *info,
memcpy(extra, pItemSSID->abySSID, pItemSSID->len);
extra[pItemSSID->len] = '\0';
- wrq->length = pItemSSID->len;
+ wrq->length = pItemSSID->len;
wrq->flags = 1; // active
return 0;
@@ -915,7 +915,7 @@ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
0x60, 0x6C, 0x90
};
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRATE\n");
if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
rc = -EINVAL;
return rc;
@@ -953,7 +953,7 @@ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
}
// Check that it is valid
// brate is index of abySupportedRates[]
- if (brate > 13 ) {
+ if (brate > 13) {
rc = -EINVAL;
return rc;
}
@@ -967,7 +967,7 @@ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
pDevice->uConnectionRate = 3;
} else {
pDevice->uConnectionRate = brate;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fixed to Rate %d \n", pDevice->uConnectionRate);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fixed to Rate %d\n", pDevice->uConnectionRate);
}
} else {
pDevice->bFixRate = false;
@@ -1017,7 +1017,7 @@ int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info,
if (pDevice->byBBType == BB_TYPE_11A)
brate = 0x6C;
}
- if (pDevice->uConnectionRate == 13)
+ if (pDevice->uConnectionRate == 13)
brate = abySupportedRates[pDevice->wCurrentRate];
wrq->value = brate * 500000;
// If more than one rate, set auto
@@ -1286,7 +1286,7 @@ int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info,
if (index < 1) { // get default key
if (pDevice->byKeyIndex < WLAN_WEP_NKEYS)
index = pDevice->byKeyIndex;
- else
+ else
index = 0;
} else {
index--;
@@ -1366,14 +1366,14 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R\n");
rc = -EINVAL;
break;
case IW_POWER_ALL_R:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ALL_R \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ALL_R\n");
rc = -EINVAL;
case IW_POWER_ON:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ON \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ON\n");
break;
default:
rc = -EINVAL;
@@ -1465,7 +1465,7 @@ int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info,
case IW_AUTH_CIPHER_PAIRWISE:
pairwise = wrq->value;
PRINT_K("iwctl_siwauth:set pairwise=%d\n", pairwise);
- if (pairwise == IW_AUTH_CIPHER_CCMP){
+ if (pairwise == IW_AUTH_CIPHER_CCMP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
} else if (pairwise == IW_AUTH_CIPHER_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
@@ -1490,13 +1490,13 @@ int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info,
}
break;
case IW_AUTH_KEY_MGMT:
- PRINT_K("iwctl_siwauth(wpa_version=%d):set KEY_MGMT=%d\n", wpa_version,wrq->value);
- if (wpa_version == IW_AUTH_WPA_VERSION_WPA2){
+ PRINT_K("iwctl_siwauth(wpa_version=%d):set KEY_MGMT=%d\n", wpa_version, wrq->value);
+ if (wpa_version == IW_AUTH_WPA_VERSION_WPA2) {
if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
else pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
} else if (wpa_version == IW_AUTH_WPA_VERSION_WPA) {
- if (wrq->value == 0){
+ if (wrq->value == 0) {
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
} else if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
@@ -1558,17 +1558,17 @@ int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info,
if (pMgmt == NULL)
return -EFAULT;
- if (wrq->length){
+ if (wrq->length) {
if ((wrq->length < 2) || (extra[1] + 2 != wrq->length)) {
ret = -EINVAL;
goto out;
}
- if (wrq->length > MAX_WPA_IE_LEN){
+ if (wrq->length > MAX_WPA_IE_LEN) {
ret = -ENOMEM;
goto out;
}
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)){
+ if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)) {
ret = -EFAULT;
goto out;
}
@@ -1615,7 +1615,7 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext*)extra;
- struct viawget_wpa_param *param=NULL;
+ struct viawget_wpa_param *param = NULL;
// original member
wpa_alg alg_name;
u8 addr[6];
@@ -1658,8 +1658,8 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
alg_name = WPA_ALG_CCMP;
break;
default:
- PRINT_K("Unknown alg = %d\n",ext->alg);
- ret= -ENOMEM;
+ PRINT_K("Unknown alg = %d\n", ext->alg);
+ ret = -ENOMEM;
goto error;
}
// recover addr
@@ -1671,7 +1671,7 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
set_tx = 1;
// recover seq,seq_len
if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- seq_len=IW_ENCODE_SEQ_MAX_SIZE;
+ seq_len = IW_ENCODE_SEQ_MAX_SIZE;
memcpy(seq, ext->rx_seq, seq_len);
}
// recover key,key_len
@@ -1702,7 +1702,7 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
/****set if current action is Network Manager count?? */
/****this method is so foolish,but there is no other way??? */
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- if (param->u.wpa_key.key_index ==0) {
+ if (param->u.wpa_key.key_index == 0) {
pDevice->bwextstep0 = true;
}
if ((pDevice->bwextstep0 == true) && (param->u.wpa_key.key_index == 1)) {
@@ -1761,7 +1761,7 @@ int iwctl_siwmlme(struct net_device *dev, struct iw_request_info *info,
ret = -EINVAL;
return ret;
}
- switch (mlme->cmd){
+ switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
if (pDevice->bLinkPass == true) {
@@ -1815,7 +1815,6 @@ static const iw_handler iwctl_handler[] = {
IW_HANDLER(SIOCGIWPOWER, iwctl_giwpower),
IW_HANDLER(SIOCSIWGENIE, iwctl_siwgenie),
IW_HANDLER(SIOCGIWGENIE, iwctl_giwgenie),
- IW_HANDLER(SIOCSIWMLME, iwctl_siwmlme),
IW_HANDLER(SIOCSIWAUTH, iwctl_siwauth),
IW_HANDLER(SIOCGIWAUTH, iwctl_giwauth),
IW_HANDLER(SIOCSIWENCODEEXT, iwctl_siwencodeext),
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 205590b0e9c..be92c048a12 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -151,7 +151,7 @@ int KeybGetKey(PSKeyManagement pTable, u8 *pbyBSSID, u32 dwKeyIndex,
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
@@ -213,7 +213,7 @@ int KeybSetKey(struct vnt_private *pDevice, PSKeyManagement pTable,
j = i;
}
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
@@ -395,7 +395,7 @@ int KeybRemoveKey(struct vnt_private *pDevice, PSKeyManagement pTable,
} else {
for (i=0;i<MAX_KEY_TABLE;i++) {
if ( (pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
@@ -445,7 +445,7 @@ int KeybRemoveAllKey(struct vnt_private *pDevice, PSKeyManagement pTable,
for (i=0;i<MAX_KEY_TABLE;i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
for (u = 0; u < MAX_GROUP_KEY; u++)
pTable->KeyTable[i].GroupKey[u].bKeyValid = false;
@@ -480,7 +480,7 @@ int KeybGetTransmitKey(PSKeyManagement pTable, u8 *pbyBSSID, u32 dwKeyType,
for (i = 0; i < MAX_KEY_TABLE; i++) {
if ((pTable->KeyTable[i].bInUse == true) &&
- !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 6f9d2818244..aae228c533e 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -702,6 +702,16 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
device_set_options(pDevice);
spin_lock_init(&pDevice->lock);
+ INIT_DELAYED_WORK(&pDevice->run_command_work, vRunCommand);
+ INIT_DELAYED_WORK(&pDevice->second_callback_work, BSSvSecondCallBack);
+ INIT_WORK(&pDevice->read_work_item, RXvWorkItem);
+ INIT_WORK(&pDevice->rx_mng_work_item, RXvMngWorkItem);
+
+ pDevice->pControlURB = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!pDevice->pControlURB) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR"Failed to alloc control urb\n");
+ goto err_netdev;
+ }
pDevice->tx_80211 = device_dma0_tx_80211;
pDevice->vnt_mgmt.pAdapter = (void *) pDevice;
@@ -713,14 +723,15 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, pDevice);
SET_NETDEV_DEV(netdev, &intf->dev);
memcpy(pDevice->dev->dev_addr, fake_mac, ETH_ALEN);
+
+ usb_device_reset(pDevice);
+
rc = register_netdev(netdev);
if (rc) {
printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
goto err_netdev;
}
- usb_device_reset(pDevice);
-
return 0;
err_netdev:
@@ -849,23 +860,15 @@ static bool device_alloc_bufs(struct vnt_private *pDevice)
pRCB++;
}
- pDevice->pControlURB = usb_alloc_urb(0, GFP_ATOMIC);
- if (pDevice->pControlURB == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc control urb\n");
- goto free_rx_tx;
- }
-
pDevice->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC);
if (pDevice->pInterruptURB == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int urb\n");
- usb_free_urb(pDevice->pControlURB);
goto free_rx_tx;
}
pDevice->intBuf.pDataBuf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (pDevice->intBuf.pDataBuf == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int buf\n");
- usb_free_urb(pDevice->pControlURB);
usb_free_urb(pDevice->pInterruptURB);
goto free_rx_tx;
}
@@ -981,10 +984,11 @@ static int device_open(struct net_device *dev)
}
vMgrObjectInit(pDevice);
- tasklet_init(&pDevice->RxMngWorkItem, (void *)RXvMngWorkItem, (unsigned long)pDevice);
- tasklet_init(&pDevice->ReadWorkItem, (void *)RXvWorkItem, (unsigned long)pDevice);
+
tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice);
- add_timer(&pDevice->vnt_mgmt.sTimerSecondCallback);
+
+ schedule_delayed_work(&pDevice->second_callback_work, HZ);
+
pDevice->int_interval = 100; /* max 100 microframes */
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
@@ -1000,7 +1004,7 @@ static int device_open(struct net_device *dev)
pDevice->bWPASuppWextEnabled = false;
pDevice->byReAssocCount = 0;
- RXvWorkItem(pDevice);
+ schedule_work(&pDevice->read_work_item);
INTvWorkItem(pDevice);
/* if WEP key already set by iwconfig but device not yet open */
@@ -1035,9 +1039,7 @@ free_rx_tx:
device_free_rx_bufs(pDevice);
device_free_tx_bufs(pDevice);
device_free_int_bufs(pDevice);
- usb_kill_urb(pDevice->pControlURB);
usb_kill_urb(pDevice->pInterruptURB);
- usb_free_urb(pDevice->pControlURB);
usb_free_urb(pDevice->pInterruptURB);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open fail.. \n");
@@ -1076,18 +1078,19 @@ static int device_close(struct net_device *dev)
MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES);
MP_CLEAR_FLAG(pDevice, fMP_POST_READS);
pDevice->fKillEventPollingThread = true;
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- del_timer(&pDevice->sTimerTxData);
+ cancel_delayed_work_sync(&pDevice->run_command_work);
+ cancel_delayed_work_sync(&pDevice->second_callback_work);
if (pDevice->bDiversityRegCtlON) {
del_timer(&pDevice->TimerSQ3Tmax1);
del_timer(&pDevice->TimerSQ3Tmax2);
del_timer(&pDevice->TimerSQ3Tmax3);
}
- tasklet_kill(&pDevice->RxMngWorkItem);
- tasklet_kill(&pDevice->ReadWorkItem);
+
+ cancel_work_sync(&pDevice->rx_mng_work_item);
+ cancel_work_sync(&pDevice->read_work_item);
+
tasklet_kill(&pDevice->EventWorkItem);
pDevice->bRoaming = false;
@@ -1105,9 +1108,7 @@ static int device_close(struct net_device *dev)
device_free_int_bufs(pDevice);
device_free_frag_bufs(pDevice);
- usb_kill_urb(pDevice->pControlURB);
usb_kill_urb(pDevice->pInterruptURB);
- usb_free_urb(pDevice->pControlURB);
usb_free_urb(pDevice->pInterruptURB);
BSSvClearNodeDBTable(pDevice, 0);
@@ -1131,9 +1132,12 @@ static void vt6656_disconnect(struct usb_interface *intf)
if (device->dev) {
unregister_netdev(device->dev);
+
+ usb_kill_urb(device->pControlURB);
+ usb_free_urb(device->pControlURB);
+
free_netdev(device->dev);
}
-
}
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index edc8975b2e2..e7d5487d104 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -233,9 +233,8 @@ void PSvSendPSPOLL(struct vnt_private *pDevice)
pTxPacket->cbPayloadLen = 0;
/* log failure if sending failed */
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
+ if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
- }
}
/*
@@ -257,10 +256,8 @@ int PSbSendNullPacket(struct vnt_private *pDevice)
if (pDevice->bLinkPass == false)
return false;
- if ((pDevice->bEnablePSMode == false) &&
- (pDevice->fTxDataInSleep == false)) {
- return false;
- }
+ if (pDevice->bEnablePSMode == false && pDevice->tx_trigger == false)
+ return false;
memset(pMgmt->pbyPSPacketPool, 0, sizeof(struct vnt_tx_mgmt)
+ WLAN_NULLDATA_FR_MAXLEN);
@@ -269,7 +266,7 @@ int PSbSendNullPacket(struct vnt_private *pDevice)
+ sizeof(struct vnt_tx_mgmt));
flags = WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL);
+ WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL);
if (pDevice->bEnablePSMode)
flags |= WLAN_SET_FC_PWRMGT(1);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 14f3e852215..35a3ddb41a6 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -98,21 +98,18 @@ static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
static void *s_vGetFreeContext(struct vnt_private *pDevice);
-static void s_vGenerateTxParameter(struct vnt_private *pDevice,
- u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
- void *rts_cts, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
- struct ethhdr *psEthHeader, bool need_rts);
-
-static u32 s_uFillDataHead(struct vnt_private *pDevice,
- u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
- u32 uDMAIdx, int bNeedAck, u8 byFBOption);
+static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
+ u8 byPktType, u16 wCurrentRate, struct vnt_tx_buffer *tx_buffer,
+ struct vnt_mic_hdr **mic_hdr, u32 need_mic, u32 cbFrameSize,
+ int bNeedACK, u32 uDMAIdx, struct ethhdr *psEthHeader, bool need_rts);
static void s_vGenerateMACHeader(struct vnt_private *pDevice,
u8 *pbyBufferAddr, u16 wDuration, struct ethhdr *psEthHeader,
int bNeedEncrypt, u16 wFragType, u32 uDMAIdx, u32 uFragIdx);
-static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
- u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf, u16 wPayloadLen,
+static void s_vFillTxKey(struct vnt_private *pDevice,
+ struct vnt_tx_fifo_head *fifo_head, u8 *pbyIVHead,
+ PSKeyItem pTransmitKey, u8 *pbyHdrBuf, u16 wPayloadLen,
struct vnt_mic_hdr *mic_hdr);
static void s_vSWencryption(struct vnt_private *pDevice,
@@ -124,11 +121,11 @@ static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
u8 byPktType, u32 cbFrameLength, u16 wCurrentRate);
-static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
+static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
int bNeedAck, u16 wCurrentRate, u8 byFBOption);
-static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
+static u16 s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption);
@@ -183,10 +180,12 @@ static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
ETH_ALEN);
}
-static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
- u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf,
- u16 wPayloadLen, struct vnt_mic_hdr *mic_hdr)
+static void s_vFillTxKey(struct vnt_private *pDevice,
+ struct vnt_tx_fifo_head *fifo_head, u8 *pbyIVHead,
+ PSKeyItem pTransmitKey, u8 *pbyHdrBuf, u16 wPayloadLen,
+ struct vnt_mic_hdr *mic_hdr)
{
+ u8 *pbyBuf = (u8 *)&fifo_head->adwTxKey[0];
u32 *pdwIV = (u32 *)pbyIVHead;
u32 *pdwExtIV = (u32 *)((u8 *)pbyIVHead + 4);
struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *)pbyHdrBuf;
@@ -431,185 +430,114 @@ static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
{
u32 uCTSTime = 0, uDurTime = 0;
- switch (byDurType) {
+ switch (byDurType) {
+ case RTSDUR_BB:
+ case RTSDUR_BA:
+ case RTSDUR_BA_F0:
+ case RTSDUR_BA_F1:
+ uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType,
+ 14, pDevice->byTopCCKBasicRate);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS +
+ s_uGetTxRsvTime(pDevice, byPktType,
+ cbFrameLength, wRate, bNeedAck);
+ break;
- case RTSDUR_BB: //RTSDuration_bb
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- break;
+ case RTSDUR_AA:
+ case RTSDUR_AA_F0:
+ case RTSDUR_AA_F1:
+ uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType,
+ 14, pDevice->byTopOFDMBasicRate);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS +
+ s_uGetTxRsvTime(pDevice, byPktType,
+ cbFrameLength, wRate, bNeedAck);
+ break;
- case RTSDUR_BA: //RTSDuration_ba
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- break;
+ case CTSDUR_BA:
+ case CTSDUR_BA_F0:
+ case CTSDUR_BA_F1:
+ uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice,
+ byPktType, cbFrameLength, wRate, bNeedAck);
+ break;
- case RTSDUR_AA: //RTSDuration_aa
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- break;
+ default:
+ break;
+ }
- case CTSDUR_BA: //CTSDuration_ba
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- break;
+ return cpu_to_le16((u16)uDurTime);
+}
- case RTSDUR_BA_F0: //RTSDuration_ba_f0
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- break;
-
- case RTSDUR_AA_F0: //RTSDuration_aa_f0
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- break;
+static u16 vnt_rxtx_datahead_g(struct vnt_private *priv, u8 pkt_type, u16 rate,
+ struct vnt_tx_datahead_g *buf, u32 frame_len, int need_ack)
+{
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->a);
+ BBvCalculateParameter(priv, frame_len, priv->byTopCCKBasicRate,
+ PK_TYPE_11B, &buf->b);
- case RTSDUR_BA_F1: //RTSDuration_ba_f1
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- break;
-
- case RTSDUR_AA_F1: //RTSDuration_aa_f1
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- break;
+ /* Get Duration and TimeStamp */
+ buf->wDuration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->wDuration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
- case CTSDUR_BA_F0: //CTSDuration_ba_f0
- if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- break;
+ buf->wTimeStampOff_a = vnt_time_stamp_off(priv, rate);
+ buf->wTimeStampOff_b = vnt_time_stamp_off(priv,
+ priv->byTopCCKBasicRate);
- case CTSDUR_BA_F1: //CTSDuration_ba_f1
- if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- break;
+ return buf->wDuration_a;
+}
- default:
- break;
- }
+static u16 vnt_rxtx_datahead_g_fb(struct vnt_private *priv, u8 pkt_type,
+ u16 rate, struct vnt_tx_datahead_g_fb *buf,
+ u32 frame_len, int need_ack)
+{
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->a);
- return cpu_to_le16((u16)uDurTime);
+ BBvCalculateParameter(priv, frame_len, priv->byTopCCKBasicRate,
+ PK_TYPE_11B, &buf->b);
+
+ /* Get Duration and TimeStamp */
+ buf->wDuration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->wDuration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
+
+ buf->wDuration_a_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->wDuration_a_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
+
+ buf->wTimeStampOff_a = vnt_time_stamp_off(priv, rate);
+ buf->wTimeStampOff_b = vnt_time_stamp_off(priv,
+ priv->byTopCCKBasicRate);
+
+ return buf->wDuration_a;
}
-static u32 s_uFillDataHead(struct vnt_private *pDevice,
- u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
- u32 uDMAIdx, int bNeedAck, u8 byFBOption)
+static u16 vnt_rxtx_datahead_a_fb(struct vnt_private *priv, u8 pkt_type,
+ u16 rate, struct vnt_tx_datahead_a_fb *buf,
+ u32 frame_len, int need_ack)
{
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->a);
+ /* Get Duration and TimeStampOff */
+ buf->wDuration = s_uGetDataDuration(priv, pkt_type, need_ack);
- if (pTxDataHead == NULL) {
- return 0;
- }
+ buf->wDuration_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->wDuration_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (byFBOption == AUTO_FB_NONE) {
- struct vnt_tx_datahead_g *pBuf =
- (struct vnt_tx_datahead_g *)pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
- byPktType, &pBuf->a);
- BBvCalculateParameter(pDevice, cbFrameLength,
- pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
- //Get Duration and TimeStamp
- pBuf->wDuration_a = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wDuration_b = s_uGetDataDuration(pDevice,
- PK_TYPE_11B, bNeedAck);
-
- pBuf->wTimeStampOff_a = vnt_time_stamp_off(pDevice,
- wCurrentRate);
- pBuf->wTimeStampOff_b = vnt_time_stamp_off(pDevice,
- pDevice->byTopCCKBasicRate);
- return (pBuf->wDuration_a);
- } else {
- // Auto Fallback
- struct vnt_tx_datahead_g_fb *pBuf =
- (struct vnt_tx_datahead_g_fb *)pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
- byPktType, &pBuf->a);
- BBvCalculateParameter(pDevice, cbFrameLength,
- pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
- //Get Duration and TimeStamp
- pBuf->wDuration_a = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wDuration_b = s_uGetDataDuration(pDevice,
- PK_TYPE_11B, bNeedAck);
- pBuf->wDuration_a_f0 = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wDuration_a_f1 = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wTimeStampOff_a = vnt_time_stamp_off(pDevice,
- wCurrentRate);
- pBuf->wTimeStampOff_b = vnt_time_stamp_off(pDevice,
- pDevice->byTopCCKBasicRate);
- return (pBuf->wDuration_a);
- } //if (byFBOption == AUTO_FB_NONE)
- }
- else if (byPktType == PK_TYPE_11A) {
- if (byFBOption != AUTO_FB_NONE) {
- struct vnt_tx_datahead_a_fb *pBuf =
- (struct vnt_tx_datahead_a_fb *)pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
- byPktType, &pBuf->a);
- //Get Duration and TimeStampOff
- pBuf->wDuration = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wDuration_f0 = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wDuration_f1 = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
- wCurrentRate);
- return (pBuf->wDuration);
- } else {
- struct vnt_tx_datahead_ab *pBuf =
- (struct vnt_tx_datahead_ab *)pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
- byPktType, &pBuf->ab);
- //Get Duration and TimeStampOff
- pBuf->wDuration = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
- wCurrentRate);
- return (pBuf->wDuration);
- }
- }
- else if (byPktType == PK_TYPE_11B) {
- struct vnt_tx_datahead_ab *pBuf =
- (struct vnt_tx_datahead_ab *)pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
- byPktType, &pBuf->ab);
- //Get Duration and TimeStampOff
- pBuf->wDuration = s_uGetDataDuration(pDevice,
- byPktType, bNeedAck);
- pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
- wCurrentRate);
- return (pBuf->wDuration);
- }
- return 0;
+ buf->wTimeStampOff = vnt_time_stamp_off(priv, rate);
+
+ return buf->wDuration;
+}
+
+static u16 vnt_rxtx_datahead_ab(struct vnt_private *priv, u8 pkt_type,
+ u16 rate, struct vnt_tx_datahead_ab *buf,
+ u32 frame_len, int need_ack)
+{
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->ab);
+ /* Get Duration and TimeStampOff */
+ buf->wDuration = s_uGetDataDuration(priv, pkt_type, need_ack);
+
+ buf->wTimeStampOff = vnt_time_stamp_off(priv, rate);
+
+ return buf->wDuration;
}
static int vnt_fill_ieee80211_rts(struct vnt_private *priv,
@@ -632,7 +560,7 @@ static int vnt_fill_ieee80211_rts(struct vnt_private *priv,
return 0;
}
-static int vnt_rxtx_rts_g_head(struct vnt_private *priv,
+static u16 vnt_rxtx_rts_g_head(struct vnt_private *priv,
struct vnt_rts_g *buf, struct ethhdr *eth_hdr,
u8 pkt_type, u32 frame_len, int need_ack,
u16 current_rate, u8 fb_option)
@@ -653,10 +581,11 @@ static int vnt_rxtx_rts_g_head(struct vnt_private *priv,
vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
- return 0;
+ return vnt_rxtx_datahead_g(priv, pkt_type, current_rate,
+ &buf->data_head, frame_len, need_ack);
}
-static int vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
+static u16 vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
struct vnt_rts_g_fb *buf, struct ethhdr *eth_hdr,
u8 pkt_type, u32 frame_len, int need_ack,
u16 current_rate, u8 fb_option)
@@ -678,20 +607,21 @@ static int vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
buf->wRTSDuration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
- frame_len, pkt_type, current_rate, need_ack, fb_option);
+ frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
buf->wRTSDuration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
- frame_len, pkt_type, current_rate, need_ack, fb_option);
+ frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
buf->wRTSDuration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
- frame_len, pkt_type, current_rate, need_ack, fb_option);
+ frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
buf->wRTSDuration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
- frame_len, pkt_type, current_rate, need_ack, fb_option);
+ frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
- return 0;
+ return vnt_rxtx_datahead_g_fb(priv, pkt_type, current_rate,
+ &buf->data_head, frame_len, need_ack);
}
-static int vnt_rxtx_rts_ab_head(struct vnt_private *priv,
+static u16 vnt_rxtx_rts_ab_head(struct vnt_private *priv,
struct vnt_rts_ab *buf, struct ethhdr *eth_hdr,
u8 pkt_type, u32 frame_len, int need_ack,
u16 current_rate, u8 fb_option)
@@ -706,10 +636,11 @@ static int vnt_rxtx_rts_ab_head(struct vnt_private *priv,
vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
- return 0;
+ return vnt_rxtx_datahead_ab(priv, pkt_type, current_rate,
+ &buf->data_head, frame_len, need_ack);
}
-static int vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
+static u16 vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
struct vnt_rts_a_fb *buf, struct ethhdr *eth_hdr,
u8 pkt_type, u32 frame_len, int need_ack,
u16 current_rate, u8 fb_option)
@@ -723,23 +654,24 @@ static int vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
pkt_type, current_rate, need_ack, fb_option);
buf->wRTSDuration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
- frame_len, pkt_type, current_rate, need_ack, fb_option);
+ frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
buf->wRTSDuration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
- frame_len, pkt_type, current_rate, need_ack, fb_option);
+ frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
- return 0;
+ return vnt_rxtx_datahead_a_fb(priv, pkt_type, current_rate,
+ &buf->data_head, frame_len, need_ack);
}
-static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
+static u16 s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption)
{
if (!head)
- return;
+ return 0;
/* Note: So far RTSHead doesn't appear in ATIM
* & Beacom DMA, so we don't need to take them
@@ -750,36 +682,38 @@ static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
case PK_TYPE_11GB:
case PK_TYPE_11GA:
if (byFBOption == AUTO_FB_NONE)
- vnt_rxtx_rts_g_head(pDevice, &head->rts_g,
+ return vnt_rxtx_rts_g_head(pDevice, &head->rts_g,
psEthHeader, byPktType, cbFrameLength,
bNeedAck, wCurrentRate, byFBOption);
else
- vnt_rxtx_rts_g_fb_head(pDevice, &head->rts_g_fb,
+ return vnt_rxtx_rts_g_fb_head(pDevice, &head->rts_g_fb,
psEthHeader, byPktType, cbFrameLength,
bNeedAck, wCurrentRate, byFBOption);
break;
case PK_TYPE_11A:
if (byFBOption) {
- vnt_rxtx_rts_a_fb_head(pDevice, &head->rts_a_fb,
+ return vnt_rxtx_rts_a_fb_head(pDevice, &head->rts_a_fb,
psEthHeader, byPktType, cbFrameLength,
bNeedAck, wCurrentRate, byFBOption);
break;
}
case PK_TYPE_11B:
- vnt_rxtx_rts_ab_head(pDevice, &head->rts_ab,
+ return vnt_rxtx_rts_ab_head(pDevice, &head->rts_ab,
psEthHeader, byPktType, cbFrameLength,
bNeedAck, wCurrentRate, byFBOption);
}
+
+ return 0;
}
-static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
+static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
int bNeedAck, u16 wCurrentRate, u8 byFBOption)
{
u32 uCTSFrameLen = 14;
if (!head)
- return;
+ return 0;
if (byFBOption != AUTO_FB_NONE) {
/* Auto Fall back */
@@ -792,16 +726,19 @@ static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
wCurrentRate, bNeedAck, byFBOption);
/* Get CTSDuration_ba_f0 */
pBuf->wCTSDuration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
- CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate,
- bNeedAck, byFBOption);
+ CTSDUR_BA_F0, cbFrameLength, byPktType,
+ pDevice->tx_rate_fb0, bNeedAck, byFBOption);
/* Get CTSDuration_ba_f1 */
pBuf->wCTSDuration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
- CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate,
- bNeedAck, byFBOption);
+ CTSDUR_BA_F1, cbFrameLength, byPktType,
+ pDevice->tx_rate_fb1, bNeedAck, byFBOption);
/* Get CTS Frame body */
pBuf->data.duration = pBuf->wDuration_ba;
pBuf->data.frame_control = TYPE_CTL_CTS;
memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+
+ return vnt_rxtx_datahead_g_fb(pDevice, byPktType, wCurrentRate,
+ &pBuf->data_head, cbFrameLength, bNeedAck);
} else {
struct vnt_cts *pBuf = &head->cts_g;
/* Get SignalField,ServiceField,Length */
@@ -815,7 +752,12 @@ static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
pBuf->data.duration = pBuf->wDuration_ba;
pBuf->data.frame_control = TYPE_CTL_CTS;
memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+
+ return vnt_rxtx_datahead_g(pDevice, byPktType, wCurrentRate,
+ &pBuf->data_head, cbFrameLength, bNeedAck);
}
+
+ return 0;
}
/*+
@@ -841,112 +783,160 @@ static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
*
-*/
-static void s_vGenerateTxParameter(struct vnt_private *pDevice,
- u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
- void *rts_cts, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
- struct ethhdr *psEthHeader, bool need_rts)
+static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
+ u8 byPktType, u16 wCurrentRate, struct vnt_tx_buffer *tx_buffer,
+ struct vnt_mic_hdr **mic_hdr, u32 need_mic, u32 cbFrameSize,
+ int bNeedACK, u32 uDMAIdx, struct ethhdr *psEthHeader, bool need_rts)
{
- union vnt_tx_data_head *head = rts_cts;
+ struct vnt_tx_fifo_head *pFifoHead = &tx_buffer->fifo_head;
+ union vnt_tx_data_head *head = NULL;
u32 cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
u16 wFifoCtl;
u8 byFBOption = AUTO_FB_NONE;
- //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n");
- PSTxBufHead pFifoHead = (PSTxBufHead)pTxBufHead;
- pFifoHead->wReserved = wCurrentRate;
- wFifoCtl = pFifoHead->wFIFOCtl;
+ pFifoHead->wReserved = wCurrentRate;
+ wFifoCtl = pFifoHead->wFIFOCtl;
- if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
- byFBOption = AUTO_FB_0;
- }
- else if (wFifoCtl & FIFOCTL_AUTO_FB_1) {
- byFBOption = AUTO_FB_1;
- }
+ if (wFifoCtl & FIFOCTL_AUTO_FB_0)
+ byFBOption = AUTO_FB_0;
+ else if (wFifoCtl & FIFOCTL_AUTO_FB_1)
+ byFBOption = AUTO_FB_1;
- if (!pvRrvTime)
- return;
+ if (!pFifoHead)
+ return 0;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
+ if (pDevice->bLongHeader)
+ cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (need_rts) {
- //Fill RsvTime
- struct vnt_rrv_time_rts *pBuf =
- (struct vnt_rrv_time_rts *)pvRrvTime;
- pBuf->wRTSTxRrvTime_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
- byPktType, cbFrameSize, wCurrentRate);
- pBuf->wRTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
- byPktType, cbFrameSize, wCurrentRate);
- pBuf->wRTSTxRrvTime_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
- byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
- byPktType, cbFrameSize, wCurrentRate, bNeedACK);
- pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
- PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate,
- bNeedACK);
- /* Fill RTS */
- s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
- bNeedACK, psEthHeader, wCurrentRate, byFBOption);
- }
- else {//RTS_needless, PCF mode
- //Fill RsvTime
- struct vnt_rrv_time_cts *pBuf =
- (struct vnt_rrv_time_cts *)pvRrvTime;
- pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice, byPktType,
- cbFrameSize, wCurrentRate, bNeedACK);
- pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
- PK_TYPE_11B, cbFrameSize,
- pDevice->byTopCCKBasicRate, bNeedACK);
- pBuf->wCTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
+ if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
+ if (need_rts) {
+ struct vnt_rrv_time_rts *pBuf =
+ &tx_buffer->tx_head.tx_rts.rts;
+
+ pBuf->wRTSTxRrvTime_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wRTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wRTSTxRrvTime_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
byPktType, cbFrameSize, wCurrentRate);
- /* Fill CTS */
- s_vFillCTSHead(pDevice, uDMAIdx, byPktType, head,
- cbFrameSize, bNeedACK, wCurrentRate, byFBOption);
- }
- }
- else if (byPktType == PK_TYPE_11A) {
- if (need_rts) {
- //Fill RsvTime
- struct vnt_rrv_time_ab *pBuf =
- (struct vnt_rrv_time_ab *)pvRrvTime;
- pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 2,
+
+ pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ byPktType, cbFrameSize, wCurrentRate, bNeedACK);
+ pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize,
+ pDevice->byTopCCKBasicRate, bNeedACK);
+
+ if (need_mic) {
+ *mic_hdr = &tx_buffer->
+ tx_head.tx_rts.tx.mic.hdr;
+ head = &tx_buffer->tx_head.tx_rts.tx.mic.head;
+ } else {
+ head = &tx_buffer->tx_head.tx_rts.tx.head;
+ }
+
+ /* Fill RTS */
+ return s_vFillRTSHead(pDevice, byPktType, head,
+ cbFrameSize, bNeedACK, psEthHeader,
+ wCurrentRate, byFBOption);
+
+ } else {
+ struct vnt_rrv_time_cts *pBuf = &tx_buffer->
+ tx_head.tx_cts.cts;
+
+ pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ byPktType, cbFrameSize, wCurrentRate, bNeedACK);
+ pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize,
+ pDevice->byTopCCKBasicRate, bNeedACK);
+
+ pBuf->wCTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
+ byPktType, cbFrameSize, wCurrentRate);
+
+ if (need_mic) {
+ *mic_hdr = &tx_buffer->
+ tx_head.tx_cts.tx.mic.hdr;
+ head = &tx_buffer->tx_head.tx_cts.tx.mic.head;
+ } else {
+ head = &tx_buffer->tx_head.tx_cts.tx.head;
+ }
+
+ /* Fill CTS */
+ return s_vFillCTSHead(pDevice, uDMAIdx, byPktType,
+ head, cbFrameSize, bNeedACK, wCurrentRate,
+ byFBOption);
+ }
+ } else if (byPktType == PK_TYPE_11A) {
+ if (need_mic) {
+ *mic_hdr = &tx_buffer->tx_head.tx_ab.tx.mic.hdr;
+ head = &tx_buffer->tx_head.tx_ab.tx.mic.head;
+ } else {
+ head = &tx_buffer->tx_head.tx_ab.tx.head;
+ }
+
+ if (need_rts) {
+ struct vnt_rrv_time_ab *pBuf = &tx_buffer->
+ tx_head.tx_ab.ab;
+
+ pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 2,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, byPktType,
- cbFrameSize, wCurrentRate, bNeedACK);
- /* Fill RTS */
- s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
- bNeedACK, psEthHeader, wCurrentRate, byFBOption);
- } else {
- //Fill RsvTime
- struct vnt_rrv_time_ab *pBuf =
- (struct vnt_rrv_time_ab *)pvRrvTime;
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11A,
- cbFrameSize, wCurrentRate, bNeedACK);
- }
- }
- else if (byPktType == PK_TYPE_11B) {
- if (need_rts) {
- //Fill RsvTime
- struct vnt_rrv_time_ab *pBuf =
- (struct vnt_rrv_time_ab *)pvRrvTime;
- pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 0,
+
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ byPktType, cbFrameSize, wCurrentRate, bNeedACK);
+
+ /* Fill RTS */
+ return s_vFillRTSHead(pDevice, byPktType, head,
+ cbFrameSize, bNeedACK, psEthHeader,
+ wCurrentRate, byFBOption);
+ } else {
+ struct vnt_rrv_time_ab *pBuf = &tx_buffer->
+ tx_head.tx_ab.ab;
+
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11A, cbFrameSize,
+ wCurrentRate, bNeedACK);
+
+ return vnt_rxtx_datahead_a_fb(pDevice, byPktType,
+ wCurrentRate, &head->data_head_a_fb,
+ cbFrameSize, bNeedACK);
+ }
+ } else if (byPktType == PK_TYPE_11B) {
+ if (need_mic) {
+ *mic_hdr = &tx_buffer->tx_head.tx_ab.tx.mic.hdr;
+ head = &tx_buffer->tx_head.tx_ab.tx.mic.head;
+ } else {
+ head = &tx_buffer->tx_head.tx_ab.tx.head;
+ }
+
+ if (need_rts) {
+ struct vnt_rrv_time_ab *pBuf = &tx_buffer->
+ tx_head.tx_ab.ab;
+
+ pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 0,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B,
- cbFrameSize, wCurrentRate, bNeedACK);
- /* Fill RTS */
- s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize, wCurrentRate,
+ bNeedACK);
+
+ /* Fill RTS */
+ return s_vFillRTSHead(pDevice, byPktType, head,
+ cbFrameSize,
bNeedACK, psEthHeader, wCurrentRate, byFBOption);
- }
- else { //RTS_needless, non PCF mode
- //Fill RsvTime
- struct vnt_rrv_time_ab *pBuf =
- (struct vnt_rrv_time_ab *)pvRrvTime;
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B,
- cbFrameSize, wCurrentRate, bNeedACK);
- }
- }
- //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter END.\n");
+ } else {
+ struct vnt_rrv_time_ab *pBuf = &tx_buffer->
+ tx_head.tx_ab.ab;
+
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize,
+ wCurrentRate, bNeedACK);
+
+ return vnt_rxtx_datahead_ab(pDevice, byPktType,
+ wCurrentRate, &head->data_head_ab,
+ cbFrameSize, bNeedACK);
+ }
+ }
+
+ return 0;
}
/*
u8 * pbyBuffer,//point to pTxBufHead
@@ -955,11 +945,12 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
*/
static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
- struct vnt_tx_buffer *pTxBufHead, int bNeedEncryption,
+ struct vnt_tx_buffer *tx_buffer, int bNeedEncryption,
u32 uSkbPacketLen, u32 uDMAIdx, struct ethhdr *psEthHeader,
u8 *pPacket, PSKeyItem pTransmitKey, u32 uNodeIndex, u16 wCurrentRate,
u32 *pcbHeaderLen, u32 *pcbTotalLen)
{
+ struct vnt_tx_fifo_head *pTxBufHead = &tx_buffer->fifo_head;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u32 cbFrameSize, cbFrameBodySize;
u32 cb802_1_H_len;
@@ -973,17 +964,14 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
= {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
u32 uDuration;
u32 cbHeaderLength = 0, uPadding = 0;
- void *pvRrvTime;
struct vnt_mic_hdr *pMICHDR;
- void *rts_cts = NULL;
- void *pvTxDataHd;
u8 byFBOption = AUTO_FB_NONE, byFragType;
u16 wTxBufSize;
u32 dwMICKey0, dwMICKey1, dwMIC_Priority;
u32 *pdwMIC_L, *pdwMIC_R;
int bSoftWEP = false;
- pvRrvTime = pMICHDR = pvTxDataHd = NULL;
+ pMICHDR = NULL;
if (bNeedEncryption && pTransmitKey->pvKeyTable) {
if (((PSKeyTable)pTransmitKey->pvKeyTable)->bSoftWEP == true)
@@ -1047,16 +1035,27 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
- //Set Auto Fallback Ctl
- if (wCurrentRate >= RATE_18M) {
- if (pDevice->byAutoFBCtrl == AUTO_FB_0) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0;
- byFBOption = AUTO_FB_0;
- } else if (pDevice->byAutoFBCtrl == AUTO_FB_1) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1;
- byFBOption = AUTO_FB_1;
- }
- }
+ /* Set Auto Fallback Ctl */
+ if (wCurrentRate >= RATE_18M) {
+ if (pDevice->byAutoFBCtrl == AUTO_FB_0) {
+ pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0;
+
+ pDevice->tx_rate_fb0 =
+ wFB_Opt0[FB_RATE0][wCurrentRate - RATE_18M];
+ pDevice->tx_rate_fb1 =
+ wFB_Opt0[FB_RATE1][wCurrentRate - RATE_18M];
+
+ byFBOption = AUTO_FB_0;
+ } else if (pDevice->byAutoFBCtrl == AUTO_FB_1) {
+ pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1;
+ pDevice->tx_rate_fb0 =
+ wFB_Opt1[FB_RATE0][wCurrentRate - RATE_18M];
+ pDevice->tx_rate_fb1 =
+ wFB_Opt1[FB_RATE1][wCurrentRate - RATE_18M];
+
+ byFBOption = AUTO_FB_1;
+ }
+ }
if (bSoftWEP != true) {
if ((bNeedEncryption) && (pTransmitKey != NULL)) { //WEP enabled
@@ -1105,118 +1104,47 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
}
pbyTxBufferAddr = (u8 *) &(pTxBufHead->adwTxKey[0]);
- wTxBufSize = sizeof(STxBufHead);
+ wTxBufSize = sizeof(struct vnt_tx_fifo_head);
+
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
- pvRrvTime = (struct vnt_rrv_time_rts *)
- (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_rts));
- rts_cts = (struct vnt_rts_g *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_g *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
- cbMICHDR + sizeof(struct vnt_rts_g));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
- cbMICHDR + sizeof(struct vnt_rts_g) +
- sizeof(struct vnt_tx_datahead_g);
+ cbMICHDR + sizeof(struct vnt_rts_g);
}
else { //RTS_needless
- pvRrvTime = (struct vnt_rrv_time_cts *)
- (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_g *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- cbMICHDR + sizeof(struct vnt_cts));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- cbMICHDR + sizeof(struct vnt_cts) +
- sizeof(struct vnt_tx_datahead_g);
+ cbMICHDR + sizeof(struct vnt_cts);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
- pvRrvTime = (struct vnt_rrv_time_rts *)(pbyTxBufferAddr +
- wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_rts));
- rts_cts = (struct vnt_rts_g_fb *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_g_fb *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
- cbMICHDR + sizeof(struct vnt_rts_g_fb));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
- cbMICHDR + sizeof(struct vnt_rts_g_fb) +
- sizeof(struct vnt_tx_datahead_g_fb);
+ cbMICHDR + sizeof(struct vnt_rts_g_fb);
}
else if (bRTS == false) { //RTS_needless
- pvRrvTime = (struct vnt_rrv_time_cts *)
- (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- rts_cts = (struct vnt_cts_fb *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_g_fb *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- cbMICHDR + sizeof(struct vnt_cts_fb));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- cbMICHDR + sizeof(struct vnt_cts_fb) +
- sizeof(struct vnt_tx_datahead_g_fb);
+ cbMICHDR + sizeof(struct vnt_cts_fb);
}
} // Auto Fall Back
}
else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
- pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr +
- wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- rts_cts = (struct vnt_rts_ab *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
- sizeof(struct vnt_rts_ab));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
- cbMICHDR + sizeof(struct vnt_rts_ab) +
- sizeof(struct vnt_tx_datahead_ab);
+ cbMICHDR + sizeof(struct vnt_rts_ab);
}
else if (bRTS == false) { //RTS_needless, no MICHDR
- pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
- wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
cbMICHDR + sizeof(struct vnt_tx_datahead_ab);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
- pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
- wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- rts_cts = (struct vnt_rts_a_fb *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_a_fb *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
- sizeof(struct vnt_rts_a_fb));
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
- cbMICHDR + sizeof(struct vnt_rts_a_fb) +
- sizeof(struct vnt_tx_datahead_a_fb);
+ cbMICHDR + sizeof(struct vnt_rts_a_fb);
}
else if (bRTS == false) { //RTS_needless
- pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
- wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- pvTxDataHd = (struct vnt_tx_datahead_a_fb *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
cbMICHDR + sizeof(struct vnt_tx_datahead_a_fb);
}
@@ -1235,20 +1163,18 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
//uDMAIdx = TYPE_AC0DMA;
//pTxBufHead = (PSTxBufHead) &(pTxBufHead->adwTxKey[0]);
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
- (void *)pbyTxBufferAddr, pvRrvTime, rts_cts,
- cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, bRTS);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
- byFBOption);
+ /* Fill FIFO, RrvTime, RTS and CTS */
+ uDuration = s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ tx_buffer, &pMICHDR, cbMICHDR,
+ cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, bRTS);
+
// Generate TX MAC Header
s_vGenerateMACHeader(pDevice, pbyMacHdr, (u16)uDuration, psEthHeader, bNeedEncryption,
byFragType, uDMAIdx, 0);
if (bNeedEncryption == true) {
//Fill TXKEY
- s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
+ s_vFillTxKey(pDevice, pTxBufHead, pbyIVHead, pTransmitKey,
pbyMacHdr, (u16)cbFrameBodySize, pMICHDR);
if (pDevice->bEnableHostWEP) {
@@ -1469,13 +1395,12 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct vnt_tx_buffer *pTX_Buffer;
- PSTxBufHead pTxBufHead;
struct vnt_usb_send_context *pContext;
+ struct vnt_tx_fifo_head *pTxBufHead;
struct ieee80211_hdr *pMACHeader;
struct ethhdr sEthHeader;
u8 byPktType, *pbyTxBufferAddr;
- void *rts_cts = NULL;
- void *pvTxDataHd, *pvRrvTime, *pMICHDR;
+ struct vnt_mic_hdr *pMICHDR = NULL;
u32 uDuration, cbReqCount, cbHeaderSize, cbFrameBodySize, cbFrameSize;
int bNeedACK, bIsPSPOLL = false;
u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbFCSlen = 4;
@@ -1492,10 +1417,10 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
}
pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
- pbyTxBufferAddr = (u8 *)&(pTX_Buffer->adwTxKey[0]);
cbFrameBodySize = pPacket->cbPayloadLen;
- pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxBufHead);
+ pTxBufHead = &pTX_Buffer->fifo_head;
+ pbyTxBufferAddr = (u8 *)&pTxBufHead->adwTxKey[0];
+ wTxBufSize = sizeof(struct vnt_tx_fifo_head);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
@@ -1607,21 +1532,10 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
//Set RrvTime/RTS/CTS Buffer
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
-
- pvRrvTime = (struct vnt_rrv_time_cts *) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = NULL;
- rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- pvTxDataHd = (struct vnt_tx_datahead_g *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + sizeof(struct vnt_cts));
cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
+ sizeof(struct vnt_cts);
}
else { // 802.11a/b packet
- pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = NULL;
- pvTxDataHd = (struct vnt_tx_datahead_ab *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab));
cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
sizeof(struct vnt_tx_datahead_ab);
}
@@ -1638,14 +1552,10 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pTxBufHead->wFragCtl |= (u16)FRAGCTL_NONFRAG;
/* Fill FIFO,RrvTime,RTS,and CTS */
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
- pbyTxBufferAddr, pvRrvTime, rts_cts,
+ uDuration = s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ pTX_Buffer, &pMICHDR, 0,
cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, false);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- AUTO_FB_NONE);
-
pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize;
@@ -1684,7 +1594,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
}
} while(false);
//Fill TXKEY
- s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
+ s_vFillTxKey(pDevice, pTxBufHead, pbyIVHead, pTransmitKey,
(u8 *)pMACHeader, (u16)cbFrameBodySize, NULL);
memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen);
@@ -1708,12 +1618,16 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_a =
+ struct vnt_tx_datahead_g *data_head = &pTX_Buffer->tx_head.
+ tx_cts.tx.head.cts_g.data_head;
+ data_head->wDuration_a =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_b =
+ data_head->wDuration_b =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
} else {
- ((struct vnt_tx_datahead_ab *)pvTxDataHd)->wDuration =
+ struct vnt_tx_datahead_ab *data_head = &pTX_Buffer->tx_head.
+ tx_ab.tx.head.data_head_ab;
+ data_head->wDuration =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
}
}
@@ -1727,10 +1641,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
if (WLAN_GET_FC_TODS(pMACHeader->frame_control) == 0) {
- s_vSaveTxPktInfo(pDevice, (u8) (pTX_Buffer->byPKTNO & 0x0F), &(pMACHeader->addr1[0]), (u16)cbFrameSize, pTX_Buffer->wFIFOCtl);
+ s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
+ &pMACHeader->addr1[0], (u16)cbFrameSize,
+ pTxBufHead->wFIFOCtl);
}
else {
- s_vSaveTxPktInfo(pDevice, (u8) (pTX_Buffer->byPKTNO & 0x0F), &(pMACHeader->addr3[0]), (u16)cbFrameSize, pTX_Buffer->wFIFOCtl);
+ s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
+ &pMACHeader->addr3[0], (u16)cbFrameSize,
+ pTxBufHead->wFIFOCtl);
}
PIPEnsSendBulkOut(pDevice,pContext);
@@ -1825,15 +1743,13 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct vnt_tx_buffer *pTX_Buffer;
+ struct vnt_tx_fifo_head *pTxBufHead;
u8 byPktType;
u8 *pbyTxBufferAddr;
- void *rts_cts = NULL;
- void *pvTxDataHd;
u32 uDuration, cbReqCount;
struct ieee80211_hdr *pMACHeader;
u32 cbHeaderSize, cbFrameBodySize;
int bNeedACK, bIsPSPOLL = false;
- PSTxBufHead pTxBufHead;
u32 cbFrameSize;
u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbFCSlen = 4;
u32 uPadding = 0;
@@ -1844,7 +1760,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
u16 wTxBufSize;
u32 cbMacHdLen;
struct ethhdr sEthHeader;
- void *pvRrvTime, *pMICHDR;
+ struct vnt_mic_hdr *pMICHDR;
u32 wCurrentRate = RATE_1M;
PUWLAN_80211HDR p80211Header;
u32 uNodeIndex = 0;
@@ -1855,7 +1771,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
u32 cbExtSuppRate = 0;
struct vnt_usb_send_context *pContext;
- pvRrvTime = pMICHDR = pvTxDataHd = NULL;
+ pMICHDR = NULL;
if(skb->len <= WLAN_HDR_ADDR3_LEN) {
cbFrameBodySize = 0;
@@ -1874,9 +1790,9 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
- pbyTxBufferAddr = (u8 *)(&pTX_Buffer->adwTxKey[0]);
- pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxBufHead);
+ pTxBufHead = &pTX_Buffer->fifo_head;
+ pbyTxBufferAddr = (u8 *)&pTxBufHead->adwTxKey[0];
+ wTxBufSize = sizeof(struct vnt_tx_fifo_head);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
@@ -2014,25 +1930,11 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
- pvRrvTime = (struct vnt_rrv_time_cts *) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
- pvTxDataHd = (struct vnt_tx_datahead_g *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR +
- sizeof(struct vnt_cts));
cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR +
- sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
+ sizeof(struct vnt_cts);
}
else {//802.11a/b packet
-
- pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
sizeof(struct vnt_tx_datahead_ab);
}
@@ -2048,15 +1950,11 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
pTxBufHead->wFragCtl |= (u16)FRAGCTL_NONFRAG;
/* Fill FIFO,RrvTime,RTS,and CTS */
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
- pbyTxBufferAddr, pvRrvTime, rts_cts,
+ uDuration = s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ pTX_Buffer, &pMICHDR, cbMICHDR,
cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, false);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- AUTO_FB_NONE);
-
- pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
+ pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate;
@@ -2139,7 +2037,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
- s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
+ s_vFillTxKey(pDevice, pTxBufHead, pbyIVHead, pTransmitKey,
pbyMacHdr, (u16)cbFrameBodySize, pMICHDR);
if (pDevice->bEnableHostWEP) {
@@ -2164,12 +2062,16 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_a =
+ struct vnt_tx_datahead_g *data_head = &pTX_Buffer->tx_head.
+ tx_cts.tx.head.cts_g.data_head;
+ data_head->wDuration_a =
cpu_to_le16(p80211Header->sA2.wDurationID);
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_b =
+ data_head->wDuration_b =
cpu_to_le16(p80211Header->sA2.wDurationID);
} else {
- ((struct vnt_tx_datahead_ab *)pvTxDataHd)->wDuration =
+ struct vnt_tx_datahead_ab *data_head = &pTX_Buffer->tx_head.
+ tx_ab.tx.head.data_head_ab;
+ data_head->wDuration =
cpu_to_le16(p80211Header->sA2.wDurationID);
}
}
@@ -2183,10 +2085,14 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
if (WLAN_GET_FC_TODS(pMACHeader->frame_control) == 0) {
- s_vSaveTxPktInfo(pDevice, (u8) (pTX_Buffer->byPKTNO & 0x0F), &(pMACHeader->addr1[0]), (u16)cbFrameSize, pTX_Buffer->wFIFOCtl);
+ s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
+ &pMACHeader->addr1[0], (u16)cbFrameSize,
+ pTxBufHead->wFIFOCtl);
}
else {
- s_vSaveTxPktInfo(pDevice, (u8) (pTX_Buffer->byPKTNO & 0x0F), &(pMACHeader->addr3[0]), (u16)cbFrameSize, pTX_Buffer->wFIFOCtl);
+ s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
+ &pMACHeader->addr3[0], (u16)cbFrameSize,
+ pTxBufHead->wFIFOCtl);
}
PIPEnsSendBulkOut(pDevice,pContext);
return ;
@@ -2568,7 +2474,10 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
pContext->Type = CONTEXT_DATA_PACKET;
pContext->uBufLen = (u16)BytesToWrite + 4 ; //USB header
- s_vSaveTxPktInfo(pDevice, (u8) (pTX_Buffer->byPKTNO & 0x0F), &(pContext->sEthHeader.h_dest[0]), (u16) (BytesToWrite-uHeaderLen), pTX_Buffer->wFIFOCtl);
+ s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
+ &pContext->sEthHeader.h_dest[0],
+ (u16)(BytesToWrite-uHeaderLen),
+ pTX_Buffer->fifo_head.wFIFOCtl);
status = PIPEnsSendBulkOut(pDevice,pContext);
@@ -2719,7 +2628,10 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
pContext->Type = CONTEXT_DATA_PACKET;
pContext->uBufLen = (u16)BytesToWrite + 4 ; //USB header
- s_vSaveTxPktInfo(pDevice, (u8) (pTX_Buffer->byPKTNO & 0x0F), &(pContext->sEthHeader.h_dest[0]), (u16) (BytesToWrite-uHeaderLen), pTX_Buffer->wFIFOCtl);
+ s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
+ &pContext->sEthHeader.h_dest[0],
+ (u16)(BytesToWrite - uHeaderLen),
+ pTX_Buffer->fifo_head.wFIFOCtl);
status = PIPEnsSendBulkOut(pDevice,pContext);
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 4bbee1c2fca..eecbe890027 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -117,6 +117,7 @@ struct vnt_rts_g {
u16 wDuration_bb;
u16 wReserved;
struct ieee80211_rts data;
+ struct vnt_tx_datahead_g data_head;
} __packed;
struct vnt_rts_g_fb {
@@ -131,6 +132,7 @@ struct vnt_rts_g_fb {
u16 wRTSDuration_ba_f1;
u16 wRTSDuration_aa_f1;
struct ieee80211_rts data;
+ struct vnt_tx_datahead_g_fb data_head;
} __packed;
struct vnt_rts_ab {
@@ -138,6 +140,7 @@ struct vnt_rts_ab {
u16 wDuration;
u16 wReserved;
struct ieee80211_rts data;
+ struct vnt_tx_datahead_ab data_head;
} __packed;
struct vnt_rts_a_fb {
@@ -147,6 +150,7 @@ struct vnt_rts_a_fb {
u16 wRTSDuration_f0;
u16 wRTSDuration_f1;
struct ieee80211_rts data;
+ struct vnt_tx_datahead_a_fb data_head;
} __packed;
/* CTS buffer header */
@@ -156,6 +160,7 @@ struct vnt_cts {
u16 wReserved;
struct ieee80211_cts data;
u16 reserved2;
+ struct vnt_tx_datahead_g data_head;
} __packed;
struct vnt_cts_fb {
@@ -166,6 +171,7 @@ struct vnt_cts_fb {
u16 wCTSDuration_ba_f1;
struct ieee80211_cts data;
u16 reserved2;
+ struct vnt_tx_datahead_g_fb data_head;
} __packed;
union vnt_tx_data_head {
@@ -178,12 +184,37 @@ union vnt_tx_data_head {
/* cts g */
struct vnt_cts cts_g;
struct vnt_cts_fb cts_g_fb;
+ /* no rts/cts */
+ struct vnt_tx_datahead_a_fb data_head_a_fb;
+ struct vnt_tx_datahead_ab data_head_ab;
};
-struct vnt_tx_buffer {
- u8 byType;
- u8 byPKTNO;
- u16 wTxByteCount;
+struct vnt_tx_mic_hdr {
+ struct vnt_mic_hdr hdr;
+ union vnt_tx_data_head head;
+} __packed;
+
+union vnt_tx {
+ struct vnt_tx_mic_hdr mic;
+ union vnt_tx_data_head head;
+};
+
+union vnt_tx_head {
+ struct {
+ struct vnt_rrv_time_rts rts;
+ union vnt_tx tx;
+ } __packed tx_rts;
+ struct {
+ struct vnt_rrv_time_cts cts;
+ union vnt_tx tx;
+ } __packed tx_cts;
+ struct {
+ struct vnt_rrv_time_ab ab;
+ union vnt_tx tx;
+ } __packed tx_ab;
+};
+
+struct vnt_tx_fifo_head {
u32 adwTxKey[4];
u16 wFIFOCtl;
u16 wTimeStamp;
@@ -191,6 +222,14 @@ struct vnt_tx_buffer {
u16 wReserved;
} __packed;
+struct vnt_tx_buffer {
+ u8 byType;
+ u8 byPKTNO;
+ u16 wTxByteCount;
+ struct vnt_tx_fifo_head fifo_head;
+ union vnt_tx_head tx_head;
+} __packed;
+
struct vnt_beacon_buffer {
u8 byType;
u8 byPKTNO;
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 3a03f1d5b68..5fc18ad822d 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -118,6 +118,9 @@ int PIPEnsControlOut(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
if (pDevice->Flags & fMP_CONTROL_READS)
return STATUS_FAILURE;
+ if (pDevice->pControlURB->hcpriv)
+ return STATUS_FAILURE;
+
MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
pDevice->sUsbCtlRequest.bRequestType = 0x40;
@@ -177,6 +180,9 @@ int PIPEnsControlIn(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
if (pDevice->Flags & fMP_CONTROL_WRITES)
return STATUS_FAILURE;
+ if (pDevice->pControlURB->hcpriv)
+ return STATUS_FAILURE;
+
MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
pDevice->sUsbCtlRequest.bRequestType = 0xC0;
@@ -656,8 +662,6 @@ static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
pDevice->ulBulkOutBytesWrite += ulBufLen;
pDevice->ulBulkOutContCRCError = 0;
- pDevice->nTxDataTimeCout = 0;
-
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status);
pDevice->ulBulkOutError++;
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 0013cb73d83..2f8e2a87533 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -268,20 +268,14 @@ struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
void vCommandTimerWait(struct vnt_private *pDevice, unsigned long MSecond)
{
-
- init_timer(&pDevice->sTimerCommand);
-
- pDevice->sTimerCommand.data = (unsigned long)pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
- pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000);
-
- add_timer(&pDevice->sTimerCommand);
-
- return;
+ schedule_delayed_work(&pDevice->run_command_work,
+ msecs_to_jiffies(MSecond));
}
-void vRunCommand(struct vnt_private *pDevice)
+void vRunCommand(struct work_struct *work)
{
+ struct vnt_private *pDevice =
+ container_of(work, struct vnt_private, run_command_work.work);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PWLAN_IE_SSID pItemSSID;
PWLAN_IE_SSID pItemSSIDCurr;
@@ -292,6 +286,9 @@ void vRunCommand(struct vnt_private *pDevice)
u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
u8 byData;
+ if (pDevice->Flags & fMP_DISCONNECTED)
+ return;
+
if (pDevice->dwDiagRefCount != 0)
return;
if (pDevice->bCmdRunning != true)
@@ -660,22 +657,6 @@ void vRunCommand(struct vnt_private *pDevice)
netif_wake_queue(pDevice->dev);
}
- if(pDevice->IsTxDataTrigger != false) { //TxDataTimer is not triggered at the first time
- // printk("Re-initial TxDataTimer****\n");
- del_timer(&pDevice->sTimerTxData);
- init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (unsigned long) pDevice;
- pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = false;
- pDevice->nTxDataTimeCout = 0;
- }
- else {
- // printk("mike:-->First time trigger TimerTxData InSleep\n");
- }
- pDevice->IsTxDataTrigger = true;
- add_timer(&pDevice->sTimerTxData);
-
}
else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
@@ -687,7 +668,6 @@ void vRunCommand(struct vnt_private *pDevice)
vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
return;
}
- pDevice->byLinkWaitCount = 0;
s_bCommandComplete(pDevice);
break;
@@ -696,7 +676,7 @@ void vRunCommand(struct vnt_private *pDevice)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- del_timer(&pMgmt->sTimerSecondCallback);
+ cancel_delayed_work_sync(&pDevice->second_callback_work);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pDevice->bLinkPass = false;
@@ -724,7 +704,7 @@ void vRunCommand(struct vnt_private *pDevice)
}
pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- add_timer(&pMgmt->sTimerSecondCallback);
+ schedule_delayed_work(&pDevice->second_callback_work, HZ);
}
s_bCommandComplete(pDevice);
break;
@@ -1156,14 +1136,8 @@ static int s_bClearBSSID_SCAN(struct vnt_private *pDevice)
//mike add:reset command timer
void vResetCommandTimer(struct vnt_private *pDevice)
{
+ cancel_delayed_work_sync(&pDevice->run_command_work);
- //delete timer
- del_timer(&pDevice->sTimerCommand);
- //init timer
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long)pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
- pDevice->sTimerCommand.expires = RUN_AT(HZ);
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
@@ -1171,33 +1145,3 @@ void vResetCommandTimer(struct vnt_private *pDevice)
pDevice->bCmdRunning = false;
pDevice->bCmdClear = false;
}
-
-void BSSvSecondTxData(struct vnt_private *pDevice)
-{
- struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
-
- pDevice->nTxDataTimeCout++;
-
- if (pDevice->nTxDataTimeCout < 4) { //don't tx data if timer less than 40s
- // printk("mike:%s-->no data Tx not exceed the desired Time as %d\n",__FUNCTION__,
- // (int)pDevice->nTxDataTimeCout);
- pDevice->sTimerTxData.expires = RUN_AT(10 * HZ); //10s callback
- add_timer(&pDevice->sTimerTxData);
- return;
- }
-
- spin_lock_irq(&pDevice->lock);
- //is wap_supplicant running successful OR only open && sharekey mode!
- if (((pDevice->bLinkPass == true) &&
- (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
- (pDevice->fWPA_Authened == true)) { //wpa linking
- // printk("mike:%s-->InSleep Tx Data Procedure\n",__FUNCTION__);
- pDevice->fTxDataInSleep = true;
- PSbSendNullPacket(pDevice); //send null packet
- pDevice->fTxDataInSleep = false;
- }
- spin_unlock_irq(&pDevice->lock);
-
- pDevice->sTimerTxData.expires = RUN_AT(10 * HZ); //10s callback
- add_timer(&pDevice->sTimerTxData);
-}
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index db8b4cf7fd6..caf2684ce91 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -105,15 +105,6 @@ void vResetCommandTimer(struct vnt_private *);
int bScheduleCommand(struct vnt_private *, CMD_CODE eCommand, u8 *pbyItem0);
-void vRunCommand(struct vnt_private *);
-
-/*
-void
-WCMDvCommandThread(
- void * Context
- );
-*/
-
-void BSSvSecondTxData(struct vnt_private *);
+void vRunCommand(struct work_struct *work);
#endif /* __WCMD_H__ */
diff --git a/drivers/staging/vt6656/wctl.c b/drivers/staging/vt6656/wctl.c
index 47a655db51e..814342cd948 100644
--- a/drivers/staging/vt6656/wctl.c
+++ b/drivers/staging/vt6656/wctl.c
@@ -69,8 +69,7 @@ bool WCTLbIsDuplicate (PSCache pCache, struct ieee80211_hdr *pMACHeader)
for (ii = 0; ii < DUPLICATE_RX_CACHE_LENGTH; ii++) {
pCacheEntry = &(pCache->asCacheEntry[uIndex]);
if ((pCacheEntry->wFmSequence == pMACHeader->seq_ctrl) &&
- (!compare_ether_addr(&(pCacheEntry->abyAddr2[0]),
- &(pMACHeader->addr2[0]))) &&
+ ether_addr_equal(pCacheEntry->abyAddr2, pMACHeader->addr2) &&
(LOBYTE(pCacheEntry->wFrameCtl) == LOBYTE(pMACHeader->frame_control))
) {
/* Duplicate match */
@@ -110,8 +109,8 @@ unsigned int WCTLuSearchDFCB(struct vnt_private *pDevice,
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
if ((pDevice->sRxDFCB[ii].bInUse == true) &&
- (!compare_ether_addr(&(pDevice->sRxDFCB[ii].abyAddr2[0]),
- &(pMACHeader->addr2[0])))) {
+ ether_addr_equal(pDevice->sRxDFCB[ii].abyAddr2,
+ pMACHeader->addr2)) {
return ii;
}
}
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index b6cbd138a2b..e26c41519b1 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -81,7 +81,7 @@
#include "control.h"
#include "rndis.h"
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
static int ChannelExceedZoneType(struct vnt_private *, u8 byCurrChannel);
@@ -213,24 +213,6 @@ void vMgrObjectInit(struct vnt_private *pDevice)
pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
BSSvClearBSSList((void *) pDevice, false);
- init_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->sTimerSecondCallback.data = (unsigned long)pDevice;
- pMgmt->sTimerSecondCallback.function = (TimerFunction)BSSvSecondCallBack;
- pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long)pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
- pDevice->sTimerCommand.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (unsigned long)pDevice;
- pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = false;
- pDevice->IsTxDataTrigger = false;
- pDevice->nTxDataTimeCout = 0;
-
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
@@ -844,8 +826,8 @@ static void s_vMgrRxAssocResponse(struct vnt_private *pDevice,
pDevice->bwextstep3 = false;
pDevice->bWPASuppWextEnabled = false;
-if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
- timer_expire(pDevice->sTimerCommand, 0);
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC)
+ schedule_delayed_work(&pDevice->run_command_work, 0);
return;
}
@@ -1127,7 +1109,7 @@ static void s_vMgrRxAuthenSequence_2(struct vnt_private *pDevice,
if ( cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS ){
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (OPEN) Successful.\n");
pMgmt->eCurrState = WMAC_STATE_AUTH;
- timer_expire(pDevice->sTimerCommand, 0);
+ schedule_delayed_work(&pDevice->run_command_work, 0);
}
else {
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (OPEN) Failed.\n");
@@ -1302,7 +1284,7 @@ static void s_vMgrRxAuthenSequence_4(struct vnt_private *pDevice,
if ( cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS ){
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (SHAREDKEY) Successful.\n");
pMgmt->eCurrState = WMAC_STATE_AUTH;
- timer_expire(pDevice->sTimerCommand, 0);
+ schedule_delayed_work(&pDevice->run_command_work, 0);
}
else{
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (SHAREDKEY) Failed.\n");
@@ -1422,8 +1404,8 @@ static void s_vMgrRxDeauthentication(struct vnt_private *pDevice,
pDevice->fWPA_Authened = false;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP deauthed me, reason=%d.\n", cpu_to_le16((*(sFrame.pwReason))));
// TODO: update BSS list for specific BSSID if pre-authentication case
- if (!compare_ether_addr(sFrame.pHdr->sA3.abyAddr3,
- pMgmt->abyCurrBSSID)) {
+ if (ether_addr_equal(sFrame.pHdr->sA3.abyAddr3,
+ pMgmt->abyCurrBSSID)) {
if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
@@ -3095,7 +3077,7 @@ struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
*
*
* Return Value:
- * A ptr to frame or NULL on allocation failue
+ * A ptr to frame or NULL on allocation failure
*
-*/
diff --git a/drivers/staging/vt6656/wmgr.h b/drivers/staging/vt6656/wmgr.h
index 5424c7f820a..26ba47da467 100644
--- a/drivers/staging/vt6656/wmgr.h
+++ b/drivers/staging/vt6656/wmgr.h
@@ -310,9 +310,6 @@ struct vnt_manager {
u8 byMgmtPacketPool[sizeof(struct vnt_tx_mgmt)
+ WLAN_A3FR_MAXLEN];
- /* One second callback timer */
- struct timer_list sTimerSecondCallback;
-
/* Temporarily Rx Mgmt Packet Descriptor */
struct vnt_rx_mgmt sRxPacket;
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 9f1b413ce86..003bd7c614e 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -227,7 +227,7 @@ int wpa_set_keys(struct vnt_private *pDevice, void *ctx)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
// Key Table Full
- if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
+ if (ether_addr_equal(param->addr, pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
return -EINVAL;
} else {
diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h
index 6160b2fab83..fc0ef24fad3 100644
--- a/drivers/staging/winbond/core.h
+++ b/drivers/staging/winbond/core.h
@@ -18,8 +18,8 @@
struct mlme_frame {
s8 *pMMPDU;
u16 len;
- u8 DataType;
- u8 IsInUsed;
+ u8 data_type;
+ u8 is_in_used;
u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
u8 TxMMPDUInUse[(MAX_NUM_TX_MMPDU + 3) & ~0x03];
@@ -52,13 +52,9 @@ struct wbsoft_priv {
struct hw_data sHwData; /*For HAL */
struct wb35_mds Mds;
- atomic_t ThreadCount;
-
u32 RxByteCount;
u32 TxByteCount;
- u8 LinkName[WB_MAX_LINK_NAME_LEN];
-
bool enabled;
};
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index fcc3d2165ba..cac7720bef2 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -412,7 +412,7 @@ static void MLME_GetNextPacket(struct wbsoft_priv *adapter,
desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len;
desc->buffer_total_size += adapter->sMlmeFrame.len;
desc->buffer_number++;
- desc->Type = adapter->sMlmeFrame.DataType;
+ desc->Type = adapter->sMlmeFrame.data_type;
}
static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
@@ -440,7 +440,7 @@ static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID,
MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
/* Return resource */
- adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
+ adapter->sMlmeFrame.is_in_used = PACKET_FREE_TO_USE;
}
void
diff --git a/drivers/staging/winbond/mto.c b/drivers/staging/winbond/mto.c
index 560c0ab617d..b031ecd4f3c 100644
--- a/drivers/staging/winbond/mto.c
+++ b/drivers/staging/winbond/mto.c
@@ -21,6 +21,7 @@
#include "wbhal.h"
#include "wb35reg_f.h"
#include "core.h"
+#include "mto.h"
/* Declare SQ3 to rate and fragmentation threshold table */
/* Declare fragmentation threshold table */
@@ -45,12 +46,6 @@ static int retryrate_rec[MTO_MAX_DATA_RATE_LEVELS];
static u8 boSparseTxTraffic;
-void MTO_Init(struct wbsoft_priv *adapter);
-void TxRateReductionCtrl(struct wbsoft_priv *adapter);
-void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 t0, u8 index);
-void MTO_TxFailed(struct wbsoft_priv *adapter);
-void hal_get_dto_para(struct wbsoft_priv *adapter, char *buffer);
-
/*
* ===========================================================================
* MTO_Init --
diff --git a/drivers/staging/winbond/mto.h b/drivers/staging/winbond/mto.h
index a0f659cf99f..8d41eeda45b 100644
--- a/drivers/staging/winbond/mto.h
+++ b/drivers/staging/winbond/mto.h
@@ -127,12 +127,8 @@ extern u16 MTO_Frag_Th_Tbl[];
#define MTO_DATA_RATE() MTO_Data_Rate_Tbl[MTO_RATE_LEVEL()]
#define MTO_FRAG_TH() MTO_Frag_Th_Tbl[MTO_FRAG_TH_LEVEL()]
-extern void MTO_Init(struct wbsoft_priv *);
-extern void MTO_PeriodicTimerExpired(struct wbsoft_priv *);
-extern void MTO_SetDTORateRange(struct wbsoft_priv *, u8 *, u8);
-extern u8 MTO_GetTxRate(struct wbsoft_priv *adapter, u32 fpdu_len);
-extern u8 MTO_GetTxFallbackRate(struct wbsoft_priv *adapter);
-extern void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 t0, u8 index);
+void MTO_Init(struct wbsoft_priv *);
+void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 t0, u8 index);
#endif /* __MTO_H__ */
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index cfbfbbb5386..8aecced62dd 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -27,10 +27,12 @@
#define DEG2RAD(X) (0.017453 * (X))
static const s32 Angles[] = {
- FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)), FIXED(DEG2RAD(14.0362)),
- FIXED(DEG2RAD(7.12502)), FIXED(DEG2RAD(3.57633)), FIXED(DEG2RAD(1.78991)),
- FIXED(DEG2RAD(0.895174)), FIXED(DEG2RAD(0.447614)), FIXED(DEG2RAD(0.223811)),
- FIXED(DEG2RAD(0.111906)), FIXED(DEG2RAD(0.055953)), FIXED(DEG2RAD(0.027977))
+ FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)),
+ FIXED(DEG2RAD(14.0362)), FIXED(DEG2RAD(7.12502)),
+ FIXED(DEG2RAD(3.57633)), FIXED(DEG2RAD(1.78991)),
+ FIXED(DEG2RAD(0.895174)), FIXED(DEG2RAD(0.447614)),
+ FIXED(DEG2RAD(0.223811)), FIXED(DEG2RAD(0.111906)),
+ FIXED(DEG2RAD(0.055953)), FIXED(DEG2RAD(0.027977))
};
/****************** LOCAL FUNCTION DECLARATION SECTION **********************/
@@ -42,7 +44,7 @@ static const s32 Angles[] = {
/****************** FUNCTION DEFINITION SECTION *****************************/
-s32 _s13_to_s32(u32 data)
+static s32 _s13_to_s32(u32 data)
{
u32 val;
@@ -54,22 +56,8 @@ s32 _s13_to_s32(u32 data)
return (s32) val;
}
-u32 _s32_to_s13(s32 data)
-{
- u32 val;
-
- if (data > 4095)
- data = 4095;
- else if (data < -4096)
- data = -4096;
-
- val = data & 0x1FFF;
-
- return val;
-}
-
/****************************************************************************/
-s32 _s4_to_s32(u32 data)
+static s32 _s4_to_s32(u32 data)
{
s32 val;
@@ -81,7 +69,7 @@ s32 _s4_to_s32(u32 data)
return val;
}
-u32 _s32_to_s4(s32 data)
+static u32 _s32_to_s4(s32 data)
{
u32 val;
@@ -96,7 +84,7 @@ u32 _s32_to_s4(s32 data)
}
/****************************************************************************/
-s32 _s5_to_s32(u32 data)
+static s32 _s5_to_s32(u32 data)
{
s32 val;
@@ -108,7 +96,7 @@ s32 _s5_to_s32(u32 data)
return val;
}
-u32 _s32_to_s5(s32 data)
+static u32 _s32_to_s5(s32 data)
{
u32 val;
@@ -123,7 +111,7 @@ u32 _s32_to_s5(s32 data)
}
/****************************************************************************/
-s32 _s6_to_s32(u32 data)
+static s32 _s6_to_s32(u32 data)
{
s32 val;
@@ -135,7 +123,7 @@ s32 _s6_to_s32(u32 data)
return val;
}
-u32 _s32_to_s6(s32 data)
+static u32 _s32_to_s6(s32 data)
{
u32 val;
@@ -150,34 +138,7 @@ u32 _s32_to_s6(s32 data)
}
/****************************************************************************/
-s32 _s9_to_s32(u32 data)
-{
- s32 val;
-
- val = data & 0x00FF;
-
- if ((data & BIT(8)) != 0)
- val |= 0xFFFFFF00;
-
- return val;
-}
-
-u32 _s32_to_s9(s32 data)
-{
- u32 val;
-
- if (data > 255)
- data = 255;
- else if (data < -256)
- data = -256;
-
- val = data & 0x01FF;
-
- return val;
-}
-
-/****************************************************************************/
-s32 _floor(s32 n)
+static s32 _floor(s32 n)
{
if (n > 0)
n += 5;
@@ -193,7 +154,7 @@ s32 _floor(s32 n)
* sqsum is the input and the output is sq_rt;
* The maximum of sqsum = 2^27 -1;
*/
-u32 _sqrt(u32 sqsum)
+static u32 _sqrt(u32 sqsum)
{
u32 sq_rt;
@@ -261,7 +222,7 @@ u32 _sqrt(u32 sqsum)
}
/****************************************************************************/
-void _sin_cos(s32 angle, s32 *sin, s32 *cos)
+static void _sin_cos(s32 angle, s32 *sin, s32 *cos)
{
s32 X, Y, TargetAngle, CurrAngle;
unsigned Step;
@@ -296,7 +257,8 @@ void _sin_cos(s32 angle, s32 *sin, s32 *cos)
}
}
-static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 *pValue)
+static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number,
+ u32 *pValue)
{
if (number < 0x1000)
number += 0x1000;
@@ -304,7 +266,8 @@ static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 *p
}
#define hw_get_dxx_reg(_A, _B, _C) hal_get_dxx_reg(_A, _B, (u32 *)_C)
-static unsigned char hal_set_dxx_reg(struct hw_data *pHwData, u16 number, u32 value)
+static unsigned char hal_set_dxx_reg(struct hw_data *pHwData, u16 number,
+ u32 value)
{
unsigned char ret;
@@ -316,7 +279,7 @@ static unsigned char hal_set_dxx_reg(struct hw_data *pHwData, u16 number, u32 va
#define hw_set_dxx_reg(_A, _B, _C) hal_set_dxx_reg(_A, _B, (u32)_C)
-void _reset_rx_cal(struct hw_data *phw_data)
+static void _reset_rx_cal(struct hw_data *phw_data)
{
u32 val;
@@ -336,7 +299,7 @@ void _reset_rx_cal(struct hw_data *phw_data)
/**********************************************/
-void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequency)
+static void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequency)
{
u32 reg_agc_ctrl3;
u32 reg_a_acq_ctrl;
@@ -407,7 +370,8 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
PHY_DEBUG(("[CAL] ** adc_dc_cal_i = %d (0x%04X)\n",
_s9_to_s32(val&0x000001FF), val&0x000001FF));
PHY_DEBUG(("[CAL] ** adc_dc_cal_q = %d (0x%04X)\n",
- _s9_to_s32((val&0x0003FE00)>>9), (val&0x0003FE00)>>9));
+ _s9_to_s32((val&0x0003FE00)>>9),
+ (val&0x0003FE00)>>9));
#endif
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val);
@@ -430,249 +394,8 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3);
}
-/****************************************************************/
-void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
-{
- u32 reg_agc_ctrl3;
- u32 reg_mode_ctrl;
- u32 reg_dc_cancel;
- s32 iqcal_image_i;
- s32 iqcal_image_q;
- u32 sqsum;
- s32 mag_0;
- s32 mag_1;
- s32 fix_cancel_dc_i = 0;
- u32 val;
- int loop;
-
- PHY_DEBUG(("[CAL] -> [2]_txidac_dc_offset_cancellation()\n"));
-
- /* a. Set to "TX calibration mode" */
-
- /* 0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */
- phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2);
- /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */
- phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6);
- /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
- phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A);
- /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
- phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C);
- /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
- phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
-
- hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */
-
- /* a. Disable AGC */
- hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3);
- reg_agc_ctrl3 &= ~BIT(2);
- reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
- hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3);
-
- hw_get_dxx_reg(phw_data, REG_AGC_CTRL5, &val);
- val |= MASK_AGC_FIX_GAIN;
- hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val);
-
- /* b. set iqcal_mode[1:0] to 0x2 and set iqcal_tone[3:2] to 0 */
- hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
-
- PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl));
- reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE);
-
- /* mode=2, tone=0 */
- /* reg_mode_ctrl |= (MASK_CALIB_START|2); */
-
- /* mode=2, tone=1 */
- /* reg_mode_ctrl |= (MASK_CALIB_START|2|(1<<2)); */
-
- /* mode=2, tone=2 */
- reg_mode_ctrl |= (MASK_CALIB_START|2|(2<<2));
- hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
- PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
-
- hw_get_dxx_reg(phw_data, 0x5C, &reg_dc_cancel);
- PHY_DEBUG(("[CAL] DC_CANCEL (read) = 0x%08X\n", reg_dc_cancel));
-
- for (loop = 0; loop < LOOP_TIMES; loop++) {
- PHY_DEBUG(("[CAL] [%d.] ==================================\n", loop));
-
- /* c. reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel */
- reg_dc_cancel &= ~(0x03FF);
- PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
- hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
-
- hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val);
- PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val));
-
- iqcal_image_i = _s13_to_s32(val & 0x00001FFF);
- iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
- sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q;
- mag_0 = (s32) _sqrt(sqsum);
- PHY_DEBUG(("[CAL] mag_0=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
- mag_0, iqcal_image_i, iqcal_image_q));
-
- /* d. */
- reg_dc_cancel |= (1 << CANCEL_DC_I_SHIFT);
- PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
- hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
-
- hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val);
- PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val));
-
- iqcal_image_i = _s13_to_s32(val & 0x00001FFF);
- iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
- sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q;
- mag_1 = (s32) _sqrt(sqsum);
- PHY_DEBUG(("[CAL] mag_1=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
- mag_1, iqcal_image_i, iqcal_image_q));
-
- /* e. Calculate the correct DC offset cancellation value for I */
- if (mag_0 != mag_1)
- fix_cancel_dc_i = (mag_0*10000) / (mag_0*10000 - mag_1*10000);
- else {
- if (mag_0 == mag_1)
- PHY_DEBUG(("[CAL] ***** mag_0 = mag_1 !!\n"));
- fix_cancel_dc_i = 0;
- }
-
- PHY_DEBUG(("[CAL] ** fix_cancel_dc_i = %d (0x%04X)\n",
- fix_cancel_dc_i, _s32_to_s5(fix_cancel_dc_i)));
-
- if ((abs(mag_1-mag_0)*6) > mag_0)
- break;
- }
-
- if (loop >= 19)
- fix_cancel_dc_i = 0;
-
- reg_dc_cancel &= ~(0x03FF);
- reg_dc_cancel |= (_s32_to_s5(fix_cancel_dc_i) << CANCEL_DC_I_SHIFT);
- hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
- PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
-
- /* g. */
- reg_mode_ctrl &= ~MASK_CALIB_START;
- hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
- PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
-}
-
-/*****************************************************/
-void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
-{
- u32 reg_agc_ctrl3;
- u32 reg_mode_ctrl;
- u32 reg_dc_cancel;
- s32 iqcal_image_i;
- s32 iqcal_image_q;
- u32 sqsum;
- s32 mag_0;
- s32 mag_1;
- s32 fix_cancel_dc_q = 0;
- u32 val;
- int loop;
-
- PHY_DEBUG(("[CAL] -> [3]_txqdac_dc_offset_cacellation()\n"));
- /*0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */
- phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2);
- /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */
- phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6);
- /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
- phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A);
- /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
- phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C);
- /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
- phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
-
- hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */
-
- /* a. Disable AGC */
- hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3);
- reg_agc_ctrl3 &= ~BIT(2);
- reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
- hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3);
-
- hw_get_dxx_reg(phw_data, REG_AGC_CTRL5, &val);
- val |= MASK_AGC_FIX_GAIN;
- hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val);
-
- /* a. set iqcal_mode[1:0] to 0x3 and set iqcal_tone[3:2] to 0 */
- hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
- PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl));
-
- /* reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE); */
- reg_mode_ctrl &= ~(MASK_IQCAL_MODE);
- reg_mode_ctrl |= (MASK_CALIB_START|3);
- hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
- PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
-
- hw_get_dxx_reg(phw_data, 0x5C, &reg_dc_cancel);
- PHY_DEBUG(("[CAL] DC_CANCEL (read) = 0x%08X\n", reg_dc_cancel));
-
- for (loop = 0; loop < LOOP_TIMES; loop++) {
- PHY_DEBUG(("[CAL] [%d.] ==================================\n", loop));
-
- /* b. reset cancel_dc_q[4:0] in register DC_Cancel */
- reg_dc_cancel &= ~(0x001F);
- PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
- hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
-
- hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val);
- PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val));
-
- iqcal_image_i = _s13_to_s32(val & 0x00001FFF);
- iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
- sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q;
- mag_0 = _sqrt(sqsum);
- PHY_DEBUG(("[CAL] mag_0=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
- mag_0, iqcal_image_i, iqcal_image_q));
-
- /* c. */
- reg_dc_cancel |= (1 << CANCEL_DC_Q_SHIFT);
- PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
- hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
-
- hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val);
- PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val));
-
- iqcal_image_i = _s13_to_s32(val & 0x00001FFF);
- iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
- sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q;
- mag_1 = _sqrt(sqsum);
- PHY_DEBUG(("[CAL] mag_1=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
- mag_1, iqcal_image_i, iqcal_image_q));
-
- /* d. Calculate the correct DC offset cancellation value for I */
- if (mag_0 != mag_1)
- fix_cancel_dc_q = (mag_0*10000) / (mag_0*10000 - mag_1*10000);
- else {
- if (mag_0 == mag_1)
- PHY_DEBUG(("[CAL] ***** mag_0 = mag_1 !!\n"));
- fix_cancel_dc_q = 0;
- }
-
- PHY_DEBUG(("[CAL] ** fix_cancel_dc_q = %d (0x%04X)\n",
- fix_cancel_dc_q, _s32_to_s5(fix_cancel_dc_q)));
-
- if ((abs(mag_1-mag_0)*6) > mag_0)
- break;
- }
-
- if (loop >= 19)
- fix_cancel_dc_q = 0;
-
- reg_dc_cancel &= ~(0x001F);
- reg_dc_cancel |= (_s32_to_s5(fix_cancel_dc_q) << CANCEL_DC_Q_SHIFT);
- hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
- PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
-
-
- /* f. */
- reg_mode_ctrl &= ~MASK_CALIB_START;
- hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
- PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
-}
-
/* 20060612.1.a 20060718.1 Modify */
-u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
+static u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
s32 a_2_threshold,
s32 b_2_threshold)
{
@@ -711,7 +434,8 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
loop = LOOP_TIMES;
while (loop > 0) {
- PHY_DEBUG(("[CAL] [%d.] <_tx_iq_calibration_loop>\n", (LOOP_TIMES-loop+1)));
+ PHY_DEBUG(("[CAL] [%d.] <_tx_iq_calibration_loop>\n",
+ (LOOP_TIMES-loop+1)));
iqcal_tone_i_avg = 0;
iqcal_tone_q_avg = 0;
@@ -719,8 +443,8 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
return 0;
for (capture_time = 0; capture_time < 10; capture_time++) {
/*
- * a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to
- * enable "IQ calibration Mode II"
+ * a. Set iqcal_mode[1:0] to 0x2 and set "calib_start"
+ * to 0x1 to enable "IQ calibration Mode II"
*/
reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE);
reg_mode_ctrl &= ~MASK_IQCAL_MODE;
@@ -749,8 +473,8 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
/*
- * d. Set iqcal_mode[1:0] to 0x3 and set "calib_start" to 0x1 to
- * enable "IQ calibration Mode II"
+ * d. Set iqcal_mode[1:0] to 0x3 and set "calib_start"
+ * to 0x1 to enable "IQ calibration Mode II"
*/
/* hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val); */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
@@ -766,7 +490,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
iqcal_tone_i = _s13_to_s32(val & 0x00001FFF);
iqcal_tone_q = _s13_to_s32((val & 0x03FFE000) >> 13);
PHY_DEBUG(("[CAL] ** iqcal_tone_i = %d, iqcal_tone_q = %d\n",
- iqcal_tone_i, iqcal_tone_q));
+ iqcal_tone_i, iqcal_tone_q));
if (capture_time == 0)
continue;
else {
@@ -955,7 +679,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
return 1;
}
-void _tx_iq_calibration_winbond(struct hw_data *phw_data)
+static void _tx_iq_calibration_winbond(struct hw_data *phw_data)
{
u32 reg_agc_ctrl3;
#ifdef _DEBUG
@@ -1101,7 +825,7 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
}
/*****************************************************/
-u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 frequency)
+static u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 frequency)
{
u32 reg_mode_ctrl;
s32 iqcal_tone_i;
@@ -1146,7 +870,8 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
/* for (loop = 0; loop < LOOP_TIMES; loop++) */
loop = LOOP_TIMES;
while (loop > 0) {
- PHY_DEBUG(("[CAL] [%d.] <_rx_iq_calibration_loop>\n", (LOOP_TIMES-loop+1)));
+ PHY_DEBUG(("[CAL] [%d.] <_rx_iq_calibration_loop>\n",
+ (LOOP_TIMES-loop+1)));
iqcal_tone_i_avg = 0;
iqcal_tone_q_avg = 0;
iqcal_image_i_avg = 0;
@@ -1199,13 +924,13 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
/* d. */
rot_tone_i_b = (iqcal_tone_i * iqcal_tone_i +
- iqcal_tone_q * iqcal_tone_q) / 1024;
+ iqcal_tone_q * iqcal_tone_q) / 1024;
rot_tone_q_b = (iqcal_tone_i * iqcal_tone_q * (-1) +
- iqcal_tone_q * iqcal_tone_i) / 1024;
+ iqcal_tone_q * iqcal_tone_i) / 1024;
rot_image_i_b = (iqcal_image_i * iqcal_tone_i -
- iqcal_image_q * iqcal_tone_q) / 1024;
+ iqcal_image_q * iqcal_tone_q) / 1024;
rot_image_q_b = (iqcal_image_i * iqcal_tone_q +
- iqcal_image_q * iqcal_tone_i) / 1024;
+ iqcal_image_q * iqcal_tone_i) / 1024;
PHY_DEBUG(("[CAL] ** rot_tone_i_b = %d\n", rot_tone_i_b));
PHY_DEBUG(("[CAL] ** rot_tone_q_b = %d\n", rot_tone_q_b));
@@ -1225,8 +950,10 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
b_2 = (rot_image_q_b * 32768) / rot_tone_i_b -
phw_data->iq_rsdl_phase_tx_d2;
- PHY_DEBUG(("[CAL] ** iq_rsdl_gain_tx_d2 = %d\n", phw_data->iq_rsdl_gain_tx_d2));
- PHY_DEBUG(("[CAL] ** iq_rsdl_phase_tx_d2= %d\n", phw_data->iq_rsdl_phase_tx_d2));
+ PHY_DEBUG(("[CAL] ** iq_rsdl_gain_tx_d2 = %d\n",
+ phw_data->iq_rsdl_gain_tx_d2));
+ PHY_DEBUG(("[CAL] ** iq_rsdl_phase_tx_d2= %d\n",
+ phw_data->iq_rsdl_phase_tx_d2));
PHY_DEBUG(("[CAL] ***** EPSILON/2 = %d\n", a_2));
PHY_DEBUG(("[CAL] ***** THETA/2 = %d\n", b_2));
@@ -1272,7 +999,8 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
/* e. */
pwr_tone = (iqcal_tone_i*iqcal_tone_i + iqcal_tone_q*iqcal_tone_q);
- pwr_image = (iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q)*factor;
+ pwr_image = (iqcal_image_i*iqcal_image_i +
+ iqcal_image_q*iqcal_image_q)*factor;
PHY_DEBUG(("[CAL] ** pwr_tone = %d\n", pwr_tone));
PHY_DEBUG(("[CAL] ** pwr_image = %d\n", pwr_image));
@@ -1371,7 +1099,7 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
/*************************************************/
/***************************************************************/
-void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency)
+static void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency)
{
/* figo 20050523 marked this flag for can't compile for release */
#ifdef _DEBUG
@@ -1569,7 +1297,8 @@ unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *phw_data)
sqsum = iqcal_tone_i0*iqcal_tone_i0 + iqcal_tone_q0*iqcal_tone_q0;
iq_mag_0_tx = (s32) _sqrt(sqsum);
- PHY_DEBUG(("[CAL] ** auto_adjust_txvga_for_iq_mag_0_tx=%d\n", iq_mag_0_tx));
+ PHY_DEBUG(("[CAL] ** auto_adjust_txvga_for_iq_mag_0_tx=%d\n",
+ iq_mag_0_tx));
if (iq_mag_0_tx >= 700 && iq_mag_0_tx <= 1750)
break;
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index 75b775252af..5fd4c4a72ee 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -43,7 +43,7 @@
*/
/* MAX2825 (pure b/g) */
-u32 max2825_rf_data[] = {
+static u32 max2825_rf_data[] = {
(0x00<<18) | 0x000a2,
(0x01<<18) | 0x21cc0,
(0x02<<18) | 0x13806,
@@ -59,7 +59,7 @@ u32 max2825_rf_data[] = {
(0x0C<<18) | 0x0c100 /* 11a: 0x0c300, 11g: 0x0c100 */
};
-u32 max2825_channel_data_24[][3] = {
+static u32 max2825_channel_data_24[][3] = {
{(0x03 << 18) | 0x30142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 01 */
{(0x03 << 18) | 0x32141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channel 02 */
{(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channel 03 */
@@ -76,11 +76,11 @@ u32 max2825_channel_data_24[][3] = {
{(0x03 << 18) | 0x32941, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6} /* channel 14 (2484MHz) */
};
-u32 max2825_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+static u32 max2825_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
/* ========================================== */
/* MAX2827 (a/b/g) */
-u32 max2827_rf_data[] = {
+static u32 max2827_rf_data[] = {
(0x00 << 18) | 0x000a2,
(0x01 << 18) | 0x21cc0,
(0x02 << 18) | 0x13806,
@@ -96,7 +96,7 @@ u32 max2827_rf_data[] = {
(0x0C << 18) | 0x0c100 /* 11a: 0x0c300, 11g: 0x0c100 */
};
-u32 max2827_channel_data_24[][3] = {
+static u32 max2827_channel_data_24[][3] = {
{(0x03 << 18) | 0x30142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 01 */
{(0x03 << 18) | 0x32141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 02 */
{(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 03 */
@@ -113,7 +113,7 @@ u32 max2827_channel_data_24[][3] = {
{(0x03 << 18) | 0x32941, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6} /* channel 14 (2484MHz) */
};
-u32 max2827_channel_data_50[][3] = {
+static u32 max2827_channel_data_50[][3] = {
{(0x03 << 18) | 0x33cc3, (0x04 << 18) | 0x08ccc, (0x05 << 18) | 0x2A9A6}, /* channel 36 */
{(0x03 << 18) | 0x302c0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2A9A6}, /* channel 40 */
{(0x03 << 18) | 0x302c2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2A9A6}, /* channel 44 */
@@ -124,12 +124,12 @@ u32 max2827_channel_data_50[][3] = {
{(0x03 << 18) | 0x30ac2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2A9A6} /* channel 64 */
};
-u32 max2827_power_data_24[] = {(0x0C << 18) | 0x0C000, (0x0C << 18) | 0x0D600, (0x0C << 18) | 0x0C100};
-u32 max2827_power_data_50[] = {(0x0C << 18) | 0x0C400, (0x0C << 18) | 0x0D500, (0x0C << 18) | 0x0C300};
+static u32 max2827_power_data_24[] = {(0x0C << 18) | 0x0C000, (0x0C << 18) | 0x0D600, (0x0C << 18) | 0x0C100};
+static u32 max2827_power_data_50[] = {(0x0C << 18) | 0x0C400, (0x0C << 18) | 0x0D500, (0x0C << 18) | 0x0C300};
/* ======================================================= */
/* MAX2828 (a/b/g) */
-u32 max2828_rf_data[] = {
+static u32 max2828_rf_data[] = {
(0x00 << 18) | 0x000a2,
(0x01 << 18) | 0x21cc0,
(0x02 << 18) | 0x13806,
@@ -145,7 +145,7 @@ u32 max2828_rf_data[] = {
(0x0C << 18) | 0x0c100 /* 11a: 0x0c300, 11g: 0x0c100 */
};
-u32 max2828_channel_data_24[][3] = {
+static u32 max2828_channel_data_24[][3] = {
{(0x03 << 18) | 0x30142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 01 */
{(0x03 << 18) | 0x32141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 02 */
{(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 03 */
@@ -162,7 +162,7 @@ u32 max2828_channel_data_24[][3] = {
{(0x03 << 18) | 0x32941, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6} /* channel 14 (2484MHz) */
};
-u32 max2828_channel_data_50[][3] = {
+static u32 max2828_channel_data_50[][3] = {
{(0x03 << 18) | 0x33cc3, (0x04 << 18) | 0x08ccc, (0x05 << 18) | 0x289A6}, /* channel 36 */
{(0x03 << 18) | 0x302c0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x289A6}, /* channel 40 */
{(0x03 << 18) | 0x302c2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 44 */
@@ -173,12 +173,12 @@ u32 max2828_channel_data_50[][3] = {
{(0x03 << 18) | 0x30ac2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6} /* channel 64 */
};
-u32 max2828_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
-u32 max2828_power_data_50[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+static u32 max2828_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+static u32 max2828_power_data_50[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
/* ========================================================== */
/* MAX2829 (a/b/g) */
-u32 max2829_rf_data[] = {
+static u32 max2829_rf_data[] = {
(0x00 << 18) | 0x000a2,
(0x01 << 18) | 0x23520,
(0x02 << 18) | 0x13802,
@@ -194,7 +194,7 @@ u32 max2829_rf_data[] = {
(0x0C << 18) | 0x0F300 /* TXVGA=51, (MAX-6 dB) */
};
-u32 max2829_channel_data_24[][3] = {
+static u32 max2829_channel_data_24[][3] = {
{(3 << 18) | 0x30142, (4 << 18) | 0x0b333, (5 << 18) | 0x289C6}, /* 01 (2412MHz) */
{(3 << 18) | 0x32141, (4 << 18) | 0x08444, (5 << 18) | 0x289C6}, /* 02 (2417MHz) */
{(3 << 18) | 0x32143, (4 << 18) | 0x0aeee, (5 << 18) | 0x289C6}, /* 03 (2422MHz) */
@@ -211,7 +211,7 @@ u32 max2829_channel_data_24[][3] = {
{(3 << 18) | 0x32941, (4 << 18) | 0x09999, (5 << 18) | 0x289C6}, /* 14 (2484MHz) */
};
-u32 max2829_channel_data_50[][4] = {
+static u32 max2829_channel_data_50[][4] = {
{36, (3 << 18) | 0x33cc3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A946}, /* 36 (5.180GHz) */
{40, (3 << 18) | 0x302c0, (4 << 18) | 0x08000, (5 << 18) | 0x2A946}, /* 40 (5.200GHz) */
{44, (3 << 18) | 0x302c2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A946}, /* 44 (5.220GHz) */
@@ -296,51 +296,6 @@ u32 max2829_channel_data_50[][4] = {
* 0x0c 0x0c000
* ====================================================================
*/
-u32 maxim_317_rf_data[] = {
- (0x00 << 18) | 0x000a2,
- (0x01 << 18) | 0x214c0,
- (0x02 << 18) | 0x13802,
- (0x03 << 18) | 0x30143,
- (0x04 << 18) | 0x0accc,
- (0x05 << 18) | 0x28986,
- (0x06 << 18) | 0x18008,
- (0x07 << 18) | 0x38400,
- (0x08 << 18) | 0x05108,
- (0x09 << 18) | 0x27ff8,
- (0x0A << 18) | 0x14000,
- (0x0B << 18) | 0x37f99,
- (0x0C << 18) | 0x0c000
-};
-
-u32 maxim_317_channel_data_24[][3] = {
- {(0x03 << 18) | 0x30143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 01 */
- {(0x03 << 18) | 0x32140, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 02 */
- {(0x03 << 18) | 0x32142, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 03 */
- {(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 04 */
- {(0x03 << 18) | 0x31140, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 05 */
- {(0x03 << 18) | 0x31142, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 06 */
- {(0x03 << 18) | 0x31143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 07 */
- {(0x03 << 18) | 0x33140, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 08 */
- {(0x03 << 18) | 0x33142, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 09 */
- {(0x03 << 18) | 0x33143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 10 */
- {(0x03 << 18) | 0x30940, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 11 */
- {(0x03 << 18) | 0x30942, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 12 */
- {(0x03 << 18) | 0x30943, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986} /* channe1 13 */
-};
-
-u32 maxim_317_channel_data_50[][3] = {
- {(0x03 << 18) | 0x33cc0, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2a986}, /* channel 36 */
- {(0x03 << 18) | 0x302c0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2a986}, /* channel 40 */
- {(0x03 << 18) | 0x302c3, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x2a986}, /* channel 44 */
- {(0x03 << 18) | 0x322c1, (0x04 << 18) | 0x09666, (0x05 << 18) | 0x2a986}, /* channel 48 */
- {(0x03 << 18) | 0x312c2, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x2a986}, /* channel 52 */
- {(0x03 << 18) | 0x332c0, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2a99e}, /* channel 56 */
- {(0x03 << 18) | 0x30ac0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2a99e}, /* channel 60 */
- {(0x03 << 18) | 0x30ac3, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x2a99e} /* channel 64 */
-};
-
-u32 maxim_317_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
-u32 maxim_317_power_data_50[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
/*
* ===================================================================
@@ -388,7 +343,7 @@ u32 maxim_317_power_data_50[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100}
* 0x0f 0xf00a0 ; Restore Initial Setting
* ==================================================================
*/
-u32 al2230_rf_data[] = {
+static u32 al2230_rf_data[] = {
(0x00 << 20) | 0x09EFC,
(0x01 << 20) | 0x8CCCC,
(0x02 << 20) | 0x40058,
@@ -406,7 +361,7 @@ u32 al2230_rf_data[] = {
(0x0F << 20) | 0xF01A0
};
-u32 al2230s_rf_data[] = {
+static u32 al2230s_rf_data[] = {
(0x00 << 20) | 0x09EFC,
(0x01 << 20) | 0x8CCCC,
(0x02 << 20) | 0x40058,
@@ -424,7 +379,7 @@ u32 al2230s_rf_data[] = {
(0x0F << 20) | 0xF01A0
};
-u32 al2230_channel_data_24[][2] = {
+static u32 al2230_channel_data_24[][2] = {
{(0x00 << 20) | 0x09EFC, (0x01 << 20) | 0x8CCCC}, /* channe1 01 */
{(0x00 << 20) | 0x09EFC, (0x01 << 20) | 0x8CCCD}, /* channe1 02 */
{(0x00 << 20) | 0x09E7C, (0x01 << 20) | 0x8CCCC}, /* channe1 03 */
@@ -446,7 +401,7 @@ u32 al2230_channel_data_24[][2] = {
#define AIROHA_TXVGA_MIDDLE_INDEX 12 /* Index for 0x96602 */
#define AIROHA_TXVGA_HIGH_INDEX 8 /* Index for 0x97602 1.0.24.0 1.0.28.0 */
-u32 al2230_txvga_data[][2] = {
+static u32 al2230_txvga_data[][2] = {
/* value , index */
{0x090202, 0},
{0x094202, 2},
@@ -497,7 +452,7 @@ u32 al2230_txvga_data[][2] = {
*/
/* channel independent registers: */
-u32 al7230_rf_data_24[] = {
+static u32 al7230_rf_data_24[] = {
(0x00 << 24) | 0x003790,
(0x01 << 24) | 0x133331,
(0x02 << 24) | 0x841FF2,
@@ -516,7 +471,7 @@ u32 al7230_rf_data_24[] = {
(0x0F << 24) | 0x1ABA8F
};
-u32 al7230_channel_data_24[][2] = {
+static u32 al7230_channel_data_24[][2] = {
{(0x00 << 24) | 0x003790, (0x01 << 24) | 0x133331}, /* channe1 01 */
{(0x00 << 24) | 0x003790, (0x01 << 24) | 0x1B3331}, /* channe1 02 */
{(0x00 << 24) | 0x003790, (0x01 << 24) | 0x033331}, /* channe1 03 */
@@ -534,7 +489,7 @@ u32 al7230_channel_data_24[][2] = {
};
/* channel independent registers: */
-u32 al7230_rf_data_50[] = {
+static u32 al7230_rf_data_50[] = {
(0x00 << 24) | 0x0FF520,
(0x01 << 24) | 0x000001,
(0x02 << 24) | 0x451FE2,
@@ -553,7 +508,7 @@ u32 al7230_rf_data_50[] = {
(0x0F << 24) | 0x12BACF /* 5Ghz default state */
};
-u32 al7230_channel_data_5[][4] = {
+static u32 al7230_channel_data_5[][4] = {
/* channel dependent registers: 0x00, 0x01 and 0x04 */
/* 11J =========== */
{184, (0x00 << 24) | 0x0FF520, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 184 */
@@ -603,7 +558,7 @@ u32 al7230_channel_data_5[][4] = {
*/
/* TXVGA Mapping Table <=== Register 0x0B */
-u32 al7230_txvga_data[][2] = {
+static u32 al7230_txvga_data[][2] = {
{0x08040B, 0}, /* TXVGA = 0; */
{0x08041B, 1}, /* TXVGA = 1; */
{0x08042B, 2}, /* TXVGA = 2; */
@@ -675,7 +630,7 @@ u32 al7230_txvga_data[][2] = {
* W89RF242 RFIC SPI programming initial data
* Winbond WLAN 11g RFIC BB-SPI register -- version FA5976A rev 1.3b
*/
-u32 w89rf242_rf_data[] = {
+static u32 w89rf242_rf_data[] = {
(0x00 << 24) | 0xF86100, /* 3E184; MODA (0x00) -- Normal mode ; calibration off */
(0x01 << 24) | 0xEFFFC2, /* 3BFFF; MODB (0x01) -- turn off RSSI, and other circuits are turned on */
(0x02 << 24) | 0x102504, /* 04094; FSET (0x02) -- default 20MHz crystal ; Icmp=1.5mA */
@@ -696,7 +651,7 @@ u32 w89rf242_rf_data[] = {
(0x12 << 24) | 0x000024 /* TMODC (0x12) -- Turn OFF Temperature sensor */
};
-u32 w89rf242_channel_data_24[][2] = {
+static u32 w89rf242_channel_data_24[][2] = {
{(0x03 << 24) | 0x025B06, (0x04 << 24) | 0x080408}, /* channe1 01 */
{(0x03 << 24) | 0x025C46, (0x04 << 24) | 0x080408}, /* channe1 02 */
{(0x03 << 24) | 0x025D86, (0x04 << 24) | 0x080408}, /* channe1 03 */
@@ -713,9 +668,7 @@ u32 w89rf242_channel_data_24[][2] = {
{(0x03 << 24) | 0x026D06, (0x04 << 24) | 0x080408} /* channe1 14 */
};
-u32 w89rf242_power_data_24[] = {(0x05 << 24) | 0x24C48A, (0x05 << 24) | 0x24C48A, (0x05 << 24) | 0x24C48A};
-
-u32 w89rf242_txvga_old_mapping[][2] = {
+static u32 w89rf242_txvga_old_mapping[][2] = {
{0, 0} , /* New <-> Old */
{1, 1} ,
{2, 2} ,
@@ -738,7 +691,7 @@ u32 w89rf242_txvga_old_mapping[][2] = {
{34, 19},
};
-u32 w89rf242_txvga_data[][5] = {
+static u32 w89rf242_txvga_data[][5] = {
/* low gain mode */
{(0x05 << 24) | 0x24C00A, 0, 0x00292315, 0x0800FEFF, 0x52523131}, /* min gain */
{(0x05 << 24) | 0x24C80A, 1, 0x00292315, 0x0800FEFF, 0x52523131},
@@ -920,7 +873,7 @@ void Uxx_power_on_procedure(struct hw_data *pHwData)
Wb35Reg_WriteSync(pHwData, 0x03f8, 0x7ff);
}
-static void Set_ChanIndep_RfData_al7230_24(struct hw_data *pHwData, u32 *pltmp,
+static void Set_ChanIndep_RfData_al7230_24(struct hw_data *pHwData, u32 *pltmp,
char number)
{
u8 i;
@@ -930,7 +883,7 @@ static void Set_ChanIndep_RfData_al7230_24(struct hw_data *pHwData, u32 *pltmp,
}
}
-static void Set_ChanIndep_RfData_al7230_50(struct hw_data *pHwData, u32 *pltmp,
+static void Set_ChanIndep_RfData_al7230_50(struct hw_data *pHwData, u32 *pltmp,
char number)
{
u8 i;
@@ -1088,7 +1041,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
msleep(5);
ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse((0x0F << 20) | 0xF01A0, 20);
- Wb35Reg_WriteSync(pHwData, 0x0864, ltmp) ;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
Wb35Reg_WriteSync(pHwData, 0x105c, pHwData->reg.BB5C);
pHwData->reg.BB50 &= ~0x13; /* (MASK_IQCAL_MODE|MASK_CALIB_START); */
@@ -1620,13 +1573,13 @@ void BBProcessor_initial(struct hw_data *pHwData)
reg->SQ3_filter[i] = 0x2f; /* half of Bit 0 ~ 6 */
}
-static inline void set_tx_power_per_channel_max2829(struct hw_data *pHwData,
+static inline void set_tx_power_per_channel_max2829(struct hw_data *pHwData,
struct chan_info Channel)
{
RFSynthesizer_SetPowerIndex(pHwData, 100);
}
-static void set_tx_power_per_channel_al2230(struct hw_data *pHwData,
+static void set_tx_power_per_channel_al2230(struct hw_data *pHwData,
struct chan_info Channel)
{
u8 index = 100;
@@ -1636,7 +1589,7 @@ static void set_tx_power_per_channel_al2230(struct hw_data *pHwData,
RFSynthesizer_SetPowerIndex(pHwData, index);
}
-static void set_tx_power_per_channel_al7230(struct hw_data *pHwData,
+static void set_tx_power_per_channel_al7230(struct hw_data *pHwData,
struct chan_info Channel)
{
u8 i, index = 100;
@@ -1660,7 +1613,7 @@ static void set_tx_power_per_channel_al7230(struct hw_data *pHwData,
RFSynthesizer_SetPowerIndex(pHwData, index);
}
-static void set_tx_power_per_channel_wb242(struct hw_data *pHwData,
+static void set_tx_power_per_channel_wb242(struct hw_data *pHwData,
struct chan_info Channel)
{
u8 index = 100;
@@ -2096,7 +2049,7 @@ void Mxx_initial(struct hw_data *pHwData)
pltmp[5] = reg->M38_MacControl;
/* M3C */
- tmp = (DEFAULT_PIFST << 26) | (DEFAULT_EIFST << 16) | (DEFAULT_DIFST << 8) | (DEFAULT_SIFST << 4) | DEFAULT_OSIFST ;
+ tmp = (DEFAULT_PIFST << 26) | (DEFAULT_EIFST << 16) | (DEFAULT_DIFST << 8) | (DEFAULT_SIFST << 4) | DEFAULT_OSIFST;
reg->M3C_MacControl = tmp;
pltmp[6] = tmp;
diff --git a/drivers/staging/winbond/wb35tx.c b/drivers/staging/winbond/wb35tx.c
index 30a77ccfe48..708c5b05f86 100644
--- a/drivers/staging/winbond/wb35tx.c
+++ b/drivers/staging/winbond/wb35tx.c
@@ -180,7 +180,7 @@ void Wb35Tx_CurrentTime(struct wbsoft_priv *adapter, u32 TimeCount)
{
struct hw_data *pHwData = &adapter->sHwData;
struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx;
- unsigned char Trigger = false;
+ bool Trigger = false;
if (pWb35Tx->TxTimer > TimeCount)
Trigger = true;
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3fa1ae4d3d7..07891a3e316 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -122,16 +122,16 @@ static void wbsoft_tx(struct ieee80211_hw *dev,
{
struct wbsoft_priv *priv = dev->priv;
- if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) {
+ if (priv->sMlmeFrame.is_in_used != PACKET_FREE_TO_USE) {
priv->sMlmeFrame.wNumTxMMPDUDiscarded++;
kfree_skb(skb);
return;
}
- priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
+ priv->sMlmeFrame.is_in_used = PACKET_COME_FROM_MLME;
priv->sMlmeFrame.pMMPDU = skb->data;
- priv->sMlmeFrame.DataType = FRAME_TYPE_802_11_MANAGEMENT;
+ priv->sMlmeFrame.data_type = FRAME_TYPE_802_11_MANAGEMENT;
priv->sMlmeFrame.len = skb->len;
priv->sMlmeFrame.wNumTxMMPDU++;
diff --git a/drivers/staging/wlags49_h2/hcf.h b/drivers/staging/wlags49_h2/hcf.h
index 2abeaa11d8c..71b44653690 100644
--- a/drivers/staging/wlags49_h2/hcf.h
+++ b/drivers/staging/wlags49_h2/hcf.h
@@ -372,22 +372,22 @@ typedef IFB_STRCT* IFBP;
/********************** W C I F U N C T I O N S P R O T O T Y P E S ******************************/
/***********************************************************************************************************/
-EXTERN_C int hcf_action (IFBP ifbp, hcf_16 cmd );
-EXTERN_C int hcf_connect (IFBP ifbp, hcf_io io_base );
-EXTERN_C int hcf_get_info (IFBP ifbp, LTVP ltvp );
-EXTERN_C int hcf_service_nic (IFBP ifbp, wci_bufp bufp, unsigned int len );
-EXTERN_C int hcf_cntl (IFBP ifbp, hcf_16 cmd );
-EXTERN_C int hcf_put_info (IFBP ifbp, LTVP ltvp );
-EXTERN_C int hcf_rcv_msg (IFBP ifbp, DESC_STRCT *descp, unsigned int offset );
-EXTERN_C int hcf_send_msg (IFBP ifbp, DESC_STRCT *dp, hcf_16 tx_cntl );
+EXTERN_C int hcf_action(IFBP ifbp, hcf_16 cmd);
+EXTERN_C int hcf_connect(IFBP ifbp, hcf_io io_base);
+EXTERN_C int hcf_get_info(IFBP ifbp, LTVP ltvp);
+EXTERN_C int hcf_service_nic(IFBP ifbp, wci_bufp bufp, unsigned int len);
+EXTERN_C int hcf_cntl(IFBP ifbp, hcf_16 cmd);
+EXTERN_C int hcf_put_info(IFBP ifbp, LTVP ltvp);
+EXTERN_C int hcf_rcv_msg(IFBP ifbp, DESC_STRCT *descp, unsigned int offset);
+EXTERN_C int hcf_send_msg(IFBP ifbp, DESC_STRCT *dp, hcf_16 tx_cntl);
#if HCF_DMA
-EXTERN_C void hcf_dma_tx_put (IFBP ifbp, DESC_STRCT *d, hcf_16 tx_cntl );
+EXTERN_C void hcf_dma_tx_put(IFBP ifbp, DESC_STRCT *d, hcf_16 tx_cntl);
EXTERN_C DESC_STRCT* hcf_dma_tx_get (IFBP ifbp );
EXTERN_C DESC_STRCT* hcf_dma_rx_get (IFBP ifbp );
-EXTERN_C void hcf_dma_rx_put (IFBP ifbp, DESC_STRCT *d );
+EXTERN_C void hcf_dma_rx_put(IFBP ifbp, DESC_STRCT *d);
#endif // HCF_DMA
#if (HCF_ASSERT) & HCF_ASSERT_LNK_MSF_RTN
-EXTERN_C void msf_assert (unsigned int line_number, hcf_16 trace, hcf_32 qual );
+EXTERN_C void msf_assert(unsigned int line_number, hcf_16 trace, hcf_32 qual);
#endif // HCF_ASSERT_LNK_MSF_RTN
#endif // HCF_H
diff --git a/drivers/staging/wlags49_h2/sta_h2.c b/drivers/staging/wlags49_h2/sta_h2.c
index 19bed819df1..25ac76f7d36 100644
--- a/drivers/staging/wlags49_h2/sta_h2.c
+++ b/drivers/staging/wlags49_h2/sta_h2.c
@@ -4472,8 +4472,8 @@ memimage fw_image = {
"FUPU7D37dhfwci\001C", /* signature, <format number>, C/Bin type */
(CFG_PROG_STRCT *) fw_image_code,
0x000F368E,
- 00000000, /* (dummy) pdaplug */
- 00000000, /* (dummy) priplug */
+ NULL, /* (dummy) pdaplug */
+ NULL, /* (dummy) priplug */
(CFG_RANGE20_STRCT *) fw_image_infocompat,
(CFG_IDENTITY_STRCT *) fw_image_infoidentity,
};
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index f1bce18ea82..a4fd5c4717a 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -98,10 +98,10 @@ static int prism2_domibset_pstr32(wlandevice_t *wlandev,
/* The interface functions, called by the cfg80211 layer */
-int prism2_change_virtual_intf(struct wiphy *wiphy,
- struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
+static int prism2_change_virtual_intf(struct wiphy *wiphy,
+ struct net_device *dev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
{
wlandevice_t *wlandev = dev->ml_priv;
u32 data;
@@ -122,7 +122,7 @@ int prism2_change_virtual_intf(struct wiphy *wiphy,
data = 1;
break;
default:
- printk(KERN_WARNING "Operation mode: %d not support\n", type);
+ netdev_warn(dev, "Operation mode: %d not support\n", type);
return -EOPNOTSUPP;
}
@@ -140,9 +140,9 @@ exit:
return err;
}
-int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
{
wlandevice_t *wlandev = dev->ml_priv;
u32 did;
@@ -199,9 +199,10 @@ exit:
return err;
}
-int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
- void (*callback)(void *cookie, struct key_params*))
+static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback)(void *cookie, struct key_params*))
{
wlandevice_t *wlandev = dev->ml_priv;
struct key_params params;
@@ -228,8 +229,8 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr)
{
wlandevice_t *wlandev = dev->ml_priv;
u32 did;
@@ -274,8 +275,8 @@ exit:
return err;
}
-int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool unicast, bool multicast)
+static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool unicast, bool multicast)
{
wlandevice_t *wlandev = dev->ml_priv;
@@ -293,8 +294,8 @@ int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
}
-int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
{
wlandevice_t *wlandev = dev->ml_priv;
struct p80211msg_lnxreq_commsquality quality;
@@ -327,7 +328,7 @@ int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
return result;
}
-int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct net_device *dev;
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
@@ -352,7 +353,7 @@ int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EBUSY;
if (wlandev->macmode == WLAN_MACMODE_ESS_AP) {
- printk(KERN_ERR "Can't scan in AP mode\n");
+ netdev_err(dev, "Can't scan in AP mode\n");
return -EOPNOTSUPP;
}
@@ -436,7 +437,7 @@ exit:
return err;
}
-int prism2_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int prism2_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
@@ -478,8 +479,8 @@ exit:
return err;
}
-int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme)
+static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
{
wlandevice_t *wlandev = dev->ml_priv;
struct ieee80211_channel *channel = sme->channel;
@@ -510,7 +511,7 @@ int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && is_wep))
msg_join.authtype.data = P80211ENUM_authalg_sharedkey;
else
- printk(KERN_WARNING
+ netdev_warn(dev,
"Unhandled authorisation type for connect (%d)\n",
sme->auth_type);
@@ -602,8 +603,8 @@ exit:
return err;
}
-int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
+static int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
{
wlandevice_t *wlandev = dev->ml_priv;
struct p80211msg_lnxreq_autojoin msg_join;
@@ -626,20 +627,20 @@ int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev,
}
-int prism2_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params)
+static int prism2_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
{
return -EOPNOTSUPP;
}
-int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+static int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
return -EOPNOTSUPP;
}
-int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
+static int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
@@ -665,8 +666,8 @@ exit:
return err;
}
-int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+static int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index c1a8cb62515..5b8b094c872 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -355,7 +355,7 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags)
/* Check whether we need to reset the RX pipe */
if (result == -EPIPE) {
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"%s rx pipe stalled: requesting reset\n",
hw->wlandev->netdev->name);
if (!test_and_set_bit(WORK_RX_HALT, &hw->usb_flags))
@@ -405,7 +405,7 @@ static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t memflags)
/* Test whether we need to reset the TX pipe */
if (result == -EPIPE) {
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
netdev->name);
set_bit(WORK_TX_HALT, &hw->usb_flags);
@@ -454,11 +454,11 @@ static void hfa384x_usb_defer(struct work_struct *data)
ret = usb_clear_halt(hw->usb, hw->endp_in);
if (ret != 0) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Failed to clear rx pipe for %s: err=%d\n",
netdev->name, ret);
} else {
- printk(KERN_INFO "%s rx pipe reset complete.\n",
+ netdev_info(hw->wlandev->netdev, "%s rx pipe reset complete.\n",
netdev->name);
clear_bit(WORK_RX_HALT, &hw->usb_flags);
set_bit(WORK_RX_RESUME, &hw->usb_flags);
@@ -471,7 +471,7 @@ static void hfa384x_usb_defer(struct work_struct *data)
ret = submit_rx_urb(hw, GFP_KERNEL);
if (ret != 0) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Failed to resume %s rx pipe.\n", netdev->name);
} else {
clear_bit(WORK_RX_RESUME, &hw->usb_flags);
@@ -485,11 +485,11 @@ static void hfa384x_usb_defer(struct work_struct *data)
usb_kill_urb(&hw->tx_urb);
ret = usb_clear_halt(hw->usb, hw->endp_out);
if (ret != 0) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Failed to clear tx pipe for %s: err=%d\n",
netdev->name, ret);
} else {
- printk(KERN_INFO "%s tx pipe reset complete.\n",
+ netdev_info(hw->wlandev->netdev, "%s tx pipe reset complete.\n",
netdev->name);
clear_bit(WORK_TX_HALT, &hw->usb_flags);
set_bit(WORK_TX_RESUME, &hw->usb_flags);
@@ -1211,7 +1211,7 @@ int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis)
result = usb_reset_device(hw->usb);
if (result < 0) {
- printk(KERN_ERR "usb_reset_device() failed, result=%d.\n",
+ netdev_err(hw->wlandev->netdev, "usb_reset_device() failed, result=%d.\n",
result);
}
@@ -1311,7 +1311,7 @@ cleanup:
if (ctlx->state == CTLX_COMPLETE) {
result = completor->complete(completor);
} else {
- printk(KERN_WARNING "CTLX[%d] error: state(%s)\n",
+ netdev_warn(hw->wlandev->netdev, "CTLX[%d] error: state(%s)\n",
le16_to_cpu(ctlx->outbuf.type),
ctlxstr(ctlx->state));
result = -EIO;
@@ -2018,7 +2018,7 @@ int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
if (hw->dlstate != HFA384x_DLSTATE_FLASHENABLED)
return -EINVAL;
- printk(KERN_INFO "Download %d bytes to flash @0x%06x\n", len, daddr);
+ netdev_info(hw->wlandev->netdev, "Download %d bytes to flash @0x%06x\n", len, daddr);
/* Convert to flat address for arithmetic */
/* NOTE: dlbuffer RID stores the address in AUX format */
@@ -2028,7 +2028,7 @@ int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
hw->bufinfo.page, hw->bufinfo.offset, dlbufaddr);
#if 0
- printk(KERN_WARNING "dlbuf@0x%06lx len=%d to=%d\n", dlbufaddr,
+ netdev_warn(hw->wlandev->netdev, "dlbuf@0x%06lx len=%d to=%d\n", dlbufaddr,
hw->bufinfo.len, hw->dltimeout);
#endif
/* Calculations to determine how many fills of the dlbuffer to do
@@ -2055,14 +2055,14 @@ int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
burnlo = HFA384x_ADDR_CMD_MKOFF(burndaddr);
burnhi = HFA384x_ADDR_CMD_MKPAGE(burndaddr);
- printk(KERN_INFO "Writing %d bytes to flash @0x%06x\n",
+ netdev_info(hw->wlandev->netdev, "Writing %d bytes to flash @0x%06x\n",
burnlen, burndaddr);
/* Set the download mode */
result = hfa384x_cmd_download(hw, HFA384x_PROGMODE_NV,
burnlo, burnhi, burnlen);
if (result) {
- printk(KERN_ERR "download(NV,lo=%x,hi=%x,len=%x) "
+ netdev_err(hw->wlandev->netdev, "download(NV,lo=%x,hi=%x,len=%x) "
"cmd failed, result=%d. Aborting d/l\n",
burnlo, burnhi, burnlen, result);
goto exit_proc;
@@ -2094,7 +2094,7 @@ int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
HFA384x_PROGMODE_NVWRITE,
0, 0, 0);
if (result) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"download(NVWRITE,lo=%x,hi=%x,len=%x) "
"cmd failed, result=%d. Aborting d/l\n",
burnlo, burnhi, burnlen, result);
@@ -2279,7 +2279,7 @@ int hfa384x_drvr_ramdl_enable(hfa384x_t *hw, u32 exeaddr)
/* Check that a port isn't active */
for (i = 0; i < HFA384x_PORTID_MAX; i++) {
if (hw->port_enabled[i]) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Can't download with a macport enabled.\n");
return -EINVAL;
}
@@ -2287,7 +2287,7 @@ int hfa384x_drvr_ramdl_enable(hfa384x_t *hw, u32 exeaddr)
/* Check that we're not already in a download state */
if (hw->dlstate != HFA384x_DLSTATE_DISABLED) {
- printk(KERN_ERR "Download state not disabled.\n");
+ netdev_err(hw->wlandev->netdev, "Download state not disabled.\n");
return -EINVAL;
}
@@ -2352,7 +2352,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
if (hw->dlstate != HFA384x_DLSTATE_RAMENABLED)
return -EINVAL;
- printk(KERN_INFO "Writing %d bytes to ram @0x%06x\n", len, daddr);
+ netdev_info(hw->wlandev->netdev, "Writing %d bytes to ram @0x%06x\n", len, daddr);
/* How many dowmem calls? */
nwrites = len / HFA384x_USB_RWMEM_MAXLEN;
@@ -2449,7 +2449,7 @@ int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len)
len);
if (result) {
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"Read from index %zd failed, continuing\n", i);
continue;
}
@@ -2462,13 +2462,13 @@ int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len)
pdrcode = le16_to_cpu(pda[currpdr + 1]);
/* Test the record length */
if (pdrlen > HFA384x_PDR_LEN_MAX || pdrlen == 0) {
- printk(KERN_ERR "pdrlen invalid=%d\n", pdrlen);
+ netdev_err(hw->wlandev->netdev, "pdrlen invalid=%d\n", pdrlen);
pdaok = 0;
break;
}
/* Test the code */
if (!hfa384x_isgood_pdrcode(pdrcode)) {
- printk(KERN_ERR "pdrcode invalid=%d\n",
+ netdev_err(hw->wlandev->netdev, "pdrcode invalid=%d\n",
pdrcode);
pdaok = 0;
break;
@@ -2484,7 +2484,7 @@ int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len)
}
}
if (pdaok) {
- printk(KERN_INFO
+ netdev_info(hw->wlandev->netdev,
"PDA Read from 0x%08x in %s space.\n",
pdaloc[i].cardaddr,
pdaloc[i].auxctl == 0 ? "EXTDS" :
@@ -2564,20 +2564,20 @@ int hfa384x_drvr_start(hfa384x_t *hw)
result =
usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status);
if (result < 0) {
- printk(KERN_ERR "Cannot get bulk in endpoint status.\n");
+ netdev_err(hw->wlandev->netdev, "Cannot get bulk in endpoint status.\n");
goto done;
}
if ((status == 1) && usb_clear_halt(hw->usb, hw->endp_in))
- printk(KERN_ERR "Failed to reset bulk in endpoint.\n");
+ netdev_err(hw->wlandev->netdev, "Failed to reset bulk in endpoint.\n");
result =
usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status);
if (result < 0) {
- printk(KERN_ERR "Cannot get bulk out endpoint status.\n");
+ netdev_err(hw->wlandev->netdev, "Cannot get bulk out endpoint status.\n");
goto done;
}
if ((status == 1) && usb_clear_halt(hw->usb, hw->endp_out))
- printk(KERN_ERR "Failed to reset bulk out endpoint.\n");
+ netdev_err(hw->wlandev->netdev, "Failed to reset bulk out endpoint.\n");
/* Synchronous unlink, in case we're trying to restart the driver */
usb_kill_urb(&hw->rx_urb);
@@ -2585,7 +2585,7 @@ int hfa384x_drvr_start(hfa384x_t *hw)
/* Post the IN urb */
result = submit_rx_urb(hw, GFP_KERNEL);
if (result != 0) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Fatal, failed to submit RX URB, result=%d\n", result);
goto done;
}
@@ -2605,7 +2605,7 @@ int hfa384x_drvr_start(hfa384x_t *hw)
result = result2 = hfa384x_cmd_initialize(hw);
if (result1 != 0) {
if (result2 != 0) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"cmd_initialize() failed on two attempts, results %d and %d\n",
result1, result2);
usb_kill_urb(&hw->rx_urb);
@@ -2616,9 +2616,9 @@ int hfa384x_drvr_start(hfa384x_t *hw)
pr_debug("but second attempt succeeded. All should be ok\n");
}
} else if (result2 != 0) {
- printk(KERN_WARNING "First cmd_initialize() succeeded, but second attempt failed (result=%d)\n",
+ netdev_warn(hw->wlandev->netdev, "First cmd_initialize() succeeded, but second attempt failed (result=%d)\n",
result2);
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"Most likely the card will be functional\n");
goto done;
}
@@ -2709,7 +2709,7 @@ int hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
char *ptr;
if (hw->tx_urb.status == -EINPROGRESS) {
- printk(KERN_WARNING "TX URB already in use\n");
+ netdev_warn(hw->wlandev->netdev, "TX URB already in use\n");
result = 3;
goto exit;
}
@@ -2784,7 +2784,7 @@ int hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
result = 1;
ret = submit_tx_urb(hw, &hw->tx_urb, GFP_ATOMIC);
if (ret != 0) {
- printk(KERN_ERR "submit_tx_urb() failed, error=%d\n", ret);
+ netdev_err(hw->wlandev->netdev, "submit_tx_urb() failed, error=%d\n", ret);
result = 3;
}
@@ -3009,7 +3009,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx)
break;
default:
- printk(KERN_ERR "CTLX[%d] not in a terminating state(%s)\n",
+ netdev_err(hw->wlandev->netdev, "CTLX[%d] not in a terminating state(%s)\n",
le16_to_cpu(ctlx->outbuf.type), ctlxstr(ctlx->state));
break;
} /* switch */
@@ -3091,7 +3091,7 @@ static void hfa384x_usbctlxq_run(hfa384x_t *hw)
* this CTLX back in the "pending" queue
* and schedule a reset ...
*/
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
hw->wlandev->netdev->name);
list_move(&head->list, &hw->ctlxq.pending);
@@ -3101,12 +3101,12 @@ static void hfa384x_usbctlxq_run(hfa384x_t *hw)
}
if (result == -ESHUTDOWN) {
- printk(KERN_WARNING "%s urb shutdown!\n",
+ netdev_warn(hw->wlandev->netdev, "%s urb shutdown!\n",
hw->wlandev->netdev->name);
break;
}
- printk(KERN_ERR "Failed to submit CTLX[%d]: error=%d\n",
+ netdev_err(hw->wlandev->netdev, "Failed to submit CTLX[%d]: error=%d\n",
le16_to_cpu(head->outbuf.type), result);
unlocked_usbctlx_complete(hw, head);
} /* while */
@@ -3173,7 +3173,7 @@ static void hfa384x_usbin_callback(struct urb *urb)
break;
case -EPIPE:
- printk(KERN_WARNING "%s rx pipe stalled: requesting reset\n",
+ netdev_warn(hw->wlandev->netdev, "%s rx pipe stalled: requesting reset\n",
wlandev->netdev->name);
if (!test_and_set_bit(WORK_RX_HALT, &hw->usb_flags))
schedule_work(&hw->usb_work);
@@ -3224,7 +3224,7 @@ static void hfa384x_usbin_callback(struct urb *urb)
result = submit_rx_urb(hw, GFP_ATOMIC);
if (result != 0) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Fatal, failed to resubmit rx_urb. error=%d\n",
result);
}
@@ -3360,7 +3360,7 @@ retry:
* Check that our message is what we're expecting ...
*/
if (ctlx->outbuf.type != intype) {
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"Expected IN[%d], received IN[%d] - ignored.\n",
le16_to_cpu(ctlx->outbuf.type),
le16_to_cpu(intype));
@@ -3396,7 +3396,7 @@ retry:
/*
* Throw this CTLX away ...
*/
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Matched IN URB, CTLX[%d] in invalid state(%s)."
" Discarded.\n",
le16_to_cpu(ctlx->outbuf.type),
@@ -3534,7 +3534,7 @@ static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb)
break;
default:
- printk(KERN_WARNING "Received frame on unsupported port=%d\n",
+ netdev_warn(hw->wlandev->netdev, "Received frame on unsupported port=%d\n",
HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status));
goto done;
break;
@@ -3596,7 +3596,7 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
skb = dev_alloc_skb(skblen);
if (skb == NULL) {
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"alloc_skb failed trying to allocate %d bytes\n",
skblen);
return;
@@ -3714,7 +3714,7 @@ static void hfa384x_usbout_callback(struct urb *urb)
case -EPIPE:
{
hfa384x_t *hw = wlandev->priv;
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
wlandev->netdev->name);
if (!test_and_set_bit
@@ -3747,7 +3747,7 @@ static void hfa384x_usbout_callback(struct urb *urb)
break;
default:
- printk(KERN_INFO "unknown urb->status=%d\n",
+ netdev_info(wlandev->netdev, "unknown urb->status=%d\n",
urb->status);
++(wlandev->linux_stats.tx_errors);
break;
@@ -3841,7 +3841,7 @@ retry:
default:
/* This is NOT a valid CTLX "success" state! */
- printk(KERN_ERR
+ netdev_err(hw->wlandev->netdev,
"Illegal CTLX[%d] success state(%s, %d) in OUT URB\n",
le16_to_cpu(ctlx->outbuf.type),
ctlxstr(ctlx->state), urb->status);
@@ -3851,7 +3851,7 @@ retry:
/* If the pipe has stalled then we need to reset it */
if ((urb->status == -EPIPE) &&
!test_and_set_bit(WORK_TX_HALT, &hw->usb_flags)) {
- printk(KERN_WARNING
+ netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
hw->wlandev->netdev->name);
schedule_work(&hw->usb_work);
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 2fecca2302f..2e0bd24f997 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -138,7 +138,7 @@ typedef struct p80211_frmrx_t {
} p80211_frmrx_t;
/* called by /proc/net/wireless */
-struct iw_statistics *p80211wext_get_wireless_stats(netdevice_t * dev);
+struct iw_statistics *p80211wext_get_wireless_stats(netdevice_t *dev);
/* wireless extensions' ioctls */
extern struct iw_handler_def p80211wext_handler_def;
int p80211wext_event_associated(struct wlandevice *wlandev, int assoc);
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 77e50a4aa7e..c4fabadf5d7 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -134,7 +134,7 @@ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen)
return -1;
#ifdef WEP_DEBUG
- printk(KERN_DEBUG "WEP key %d len %d = %*phC\n", keynum, keylen,
+ pr_debug("WEP key %d len %d = %*phC\n", keynum, keylen,
8, key);
#endif
@@ -182,7 +182,7 @@ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
keylen += 3; /* add in IV bytes */
#ifdef WEP_DEBUG
- printk(KERN_DEBUG "D %d: %*ph (%d %d) %*phC\n", len, 3, key,
+ pr_debug("D %d: %*ph (%d %d) %*phC\n", len, 3, key,
keyidx, keylen, 5, key + 3);
#endif
@@ -259,7 +259,7 @@ int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
keylen += 3; /* add in IV bytes */
#ifdef WEP_DEBUG
- printk(KERN_DEBUG "E %d (%d/%d %d) %*ph %*phC\n", len,
+ pr_debug("E %d (%d/%d %d) %*ph %*phC\n", len,
iv[3], keynum, keylen, 3, key, 5, key + 3);
#endif
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 3b3e17d3f6f..b3ff603e622 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -2070,7 +2070,6 @@ static void xgifb_remove(struct pci_dev *pdev)
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
pci_disable_device(pdev);
framebuffer_release(fb_info);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver xgifb_driver = {
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 46dea3f1088..400c726753f 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -845,11 +845,10 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
VCLKIndex = TVCLKBASE_315_25 + HiTVVCLK;
if (pVBInfo->SetFlag & TVSimuMode) {
- if (modeflag & Charx8Dot) {
+ if (modeflag & Charx8Dot)
VCLKIndex = TVCLKBASE_315_25 + HiTVSimuVCLK;
- } else {
+ else
VCLKIndex = TVCLKBASE_315_25 + HiTVTextVCLK;
- }
}
/* 301lv */
@@ -5274,9 +5273,8 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
outb(0x00, pVBInfo->P3c8);
- for (i = 0; i < 256 * 3; i++) {
+ for (i = 0; i < 256 * 3; i++)
outb(0x0F, (pVBInfo->P3c8 + 1)); /* DAC_TEST_PARMS */
- }
mdelay(1);
@@ -5291,9 +5289,8 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
/* avoid display something, set BLACK DAC if not restore DAC */
outb(0x00, pVBInfo->P3c8);
- for (i = 0; i < 256 * 3; i++) {
+ for (i = 0; i < 256 * 3; i++)
outb(0, (pVBInfo->P3c8 + 1));
- }
xgifb_reg_set(pVBInfo->P3c4, 0x01, SR01);
xgifb_reg_set(pVBInfo->P3d4, 0x63, CR63);
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 7168eedbd96..f17e5b9bd33 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1284,7 +1284,7 @@ static const struct SiS_LVDSData XGI_LVDS1024x768Des_1[] = {
{0, 1048, 0, 771}, /* 04 (640x480x60Hz) */
{0, 1048, 0, 771}, /* 05 (800x600x60Hz) */
{0, 1048, 805, 770} /* 06 (1024x768x60Hz) */
-} ;
+};
static const struct SiS_LVDSData XGI_LVDS1024x768Des_2[] = {
{1142, 856, 622, 587}, /* 00 (320x200,320x400,640x200,640x400) */
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
index 8a4181f846a..b15f778b4c6 100644
--- a/drivers/staging/xillybus/Kconfig
+++ b/drivers/staging/xillybus/Kconfig
@@ -16,14 +16,14 @@ if XILLYBUS
config XILLYBUS_PCIE
tristate "Xillybus over PCIe"
- depends on XILLYBUS && PCI
+ depends on PCI
help
Set to M if you want Xillybus to use PCI Express for communicating
with the FPGA.
config XILLYBUS_OF
tristate "Xillybus over Device Tree"
- depends on XILLYBUS && OF_ADDRESS && OF_IRQ
+ depends on OF_ADDRESS && OF_IRQ
help
Set to M if you want Xillybus to find its resources from the
Open Firmware Flattened Device Tree. If the target is an embedded
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
index 7db6f03a005..2ebaf166038 100644
--- a/drivers/staging/xillybus/xillybus_core.c
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -101,7 +101,7 @@ static struct workqueue_struct *xillybus_wq;
* wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
*/
-static void malformed_message(u32 *buf)
+static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
{
int opcode;
int msg_channel, msg_bufno, msg_data, msg_dir;
@@ -112,9 +112,9 @@ static void malformed_message(u32 *buf)
msg_bufno = (buf[0] >> 12) & 0x3ff;
msg_data = buf[1] & 0xfffffff;
- pr_warn("xillybus: Malformed message (skipping): "
- "opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
- opcode, msg_channel, msg_dir, msg_bufno, msg_data);
+ dev_warn(endpoint->dev,
+ "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
+ opcode, msg_channel, msg_dir, msg_bufno, msg_data);
}
/*
@@ -152,16 +152,16 @@ irqreturn_t xillybus_isr(int irq, void *data)
for (i = 0; i < buf_size; i += 2)
if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
- malformed_message(&buf[i]);
- pr_warn("xillybus: Sending a NACK on "
- "counter %x (instead of %x) on entry %d\n",
+ malformed_message(ep, &buf[i]);
+ dev_warn(ep->dev,
+ "Sending a NACK on counter %x (instead of %x) on entry %d\n",
((buf[i+1] >> 28) & 0xf),
ep->msg_counter,
i/2);
if (++ep->failed_messages > 10)
- pr_err("xillybus: Lost sync with "
- "interrupt messages. Stopping.\n");
+ dev_err(ep->dev,
+ "Lost sync with interrupt messages. Stopping.\n");
else {
ep->ephw->hw_sync_sgl_for_device(
ep,
@@ -177,7 +177,7 @@ irqreturn_t xillybus_isr(int irq, void *data)
break;
if (i >= buf_size) {
- pr_err("xillybus: Bad interrupt message. Stopping.\n");
+ dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
return IRQ_HANDLED;
}
@@ -196,7 +196,7 @@ irqreturn_t xillybus_isr(int irq, void *data)
if ((msg_channel > ep->num_channels) ||
(msg_channel == 0)) {
- malformed_message(&buf[i]);
+ malformed_message(ep, &buf[i]);
break;
}
@@ -204,7 +204,7 @@ irqreturn_t xillybus_isr(int irq, void *data)
if (msg_dir) { /* Write channel */
if (msg_bufno >= channel->num_wr_buffers) {
- malformed_message(&buf[i]);
+ malformed_message(ep, &buf[i]);
break;
}
spin_lock(&channel->wr_spinlock);
@@ -221,7 +221,7 @@ irqreturn_t xillybus_isr(int irq, void *data)
/* Read channel */
if (msg_bufno >= channel->num_rd_buffers) {
- malformed_message(&buf[i]);
+ malformed_message(ep, &buf[i]);
break;
}
@@ -243,14 +243,14 @@ irqreturn_t xillybus_isr(int irq, void *data)
if ((msg_channel > ep->num_channels) ||
(msg_channel == 0) || (!msg_dir) ||
!ep->channels[msg_channel]->wr_supports_nonempty) {
- malformed_message(&buf[i]);
+ malformed_message(ep, &buf[i]);
break;
}
channel = ep->channels[msg_channel];
if (msg_bufno >= channel->num_wr_buffers) {
- malformed_message(&buf[i]);
+ malformed_message(ep, &buf[i]);
break;
}
spin_lock(&channel->wr_spinlock);
@@ -283,16 +283,11 @@ irqreturn_t xillybus_isr(int irq, void *data)
case XILLYMSG_OPCODE_FATAL_ERROR:
ep->fatal_error = 1;
wake_up_interruptible(&ep->ep_wait); /* For select() */
- pr_err("xillybus: FPGA reported a fatal "
- "error. This means that the low-level "
- "communication with the device has failed. "
- "This hardware problem is most likely "
- "unrelated to xillybus (neither kernel "
- "module nor FPGA core), but reports are "
- "still welcome. All I/O is aborted.\n");
+ dev_err(ep->dev,
+ "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
break;
default:
- malformed_message(&buf[i]);
+ malformed_message(ep, &buf[i]);
break;
}
}
@@ -486,8 +481,8 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
if ((channelnum > ep->num_channels) ||
((channelnum == 0) && !is_writebuf)) {
- pr_err("xillybus: IDT requests channel out "
- "of range. Aborting.\n");
+ dev_err(ep->dev,
+ "IDT requests channel out of range. Aborting.\n");
return -ENODEV;
}
@@ -565,9 +560,8 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
*/
if ((left_of_wr_salami < bytebufsize) &&
(left_of_wr_salami > 0)) {
- pr_err("xillybus: "
- "Corrupt buffer allocation "
- "in IDT. Aborting.\n");
+ dev_err(ep->dev,
+ "Corrupt buffer allocation in IDT. Aborting.\n");
return -ENODEV;
}
@@ -644,9 +638,8 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
*/
if ((left_of_rd_salami < bytebufsize) &&
(left_of_rd_salami > 0)) {
- pr_err("xillybus: "
- "Corrupt buffer allocation "
- "in IDT. Aborting.\n");
+ dev_err(ep->dev,
+ "Corrupt buffer allocation in IDT. Aborting.\n");
return -ENODEV;
}
@@ -706,19 +699,19 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
}
if (!msg_buf_done) {
- pr_err("xillybus: Corrupt IDT: No message buffer. "
- "Aborting.\n");
+ dev_err(ep->dev,
+ "Corrupt IDT: No message buffer. Aborting.\n");
return -ENODEV;
}
return 0;
memfail:
- pr_err("xillybus: Failed to allocate write buffer memory. "
- "Aborting.\n");
+ dev_err(ep->dev,
+ "Failed to allocate write buffer memory. Aborting.\n");
return -ENOMEM;
dmafail:
- pr_err("xillybus: Failed to map DMA memory!. Aborting.\n");
+ dev_err(ep->dev, "Failed to map DMA memory!. Aborting.\n");
return -ENOMEM;
}
@@ -745,8 +738,8 @@ static void xilly_scan_idt(struct xilly_endpoint *endpoint,
scan++;
if (scan > end_of_idt) {
- pr_err("xillybus: IDT device name list overflow. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "IDT device name list overflow. Aborting.\n");
idt_handle->chandesc = NULL;
return;
} else
@@ -757,8 +750,8 @@ static void xilly_scan_idt(struct xilly_endpoint *endpoint,
if (len & 0x03) {
idt_handle->chandesc = NULL;
- pr_err("xillybus: Corrupt IDT device name list. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "Corrupt IDT device name list. Aborting.\n");
}
idt_handle->entries = len >> 2;
@@ -787,7 +780,7 @@ static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
XILLY_TIMEOUT);
if (channel->wr_sleepy) {
- pr_err("xillybus: Failed to obtain IDT. Aborting.\n");
+ dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
if (endpoint->fatal_error)
return -EIO;
@@ -803,8 +796,8 @@ static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
DMA_FROM_DEVICE);
if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
- pr_err("xillybus: IDT length mismatch (%d != %d). "
- "Aborting.\n",
+ dev_err(endpoint->dev,
+ "IDT length mismatch (%d != %d). Aborting.\n",
channel->wr_buffers[0]->end_offset, endpoint->idtlen);
rc = -ENODEV;
return rc;
@@ -812,7 +805,7 @@ static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
if (crc32_le(~0, channel->wr_buffers[0]->addr,
endpoint->idtlen+1) != 0) {
- pr_err("xillybus: IDT failed CRC check. Aborting.\n");
+ dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
rc = -ENODEV;
return rc;
}
@@ -821,9 +814,8 @@ static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
/* Check version number. Accept anything below 0x82 for now. */
if (*version > 0x82) {
- pr_err("xillybus: No support for IDT version 0x%02x. "
- "Maybe the xillybus driver needs an upgarde. "
- "Aborting.\n",
+ dev_err(endpoint->dev,
+ "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n",
(int) *version);
rc = -ENODEV;
return rc;
@@ -1312,9 +1304,8 @@ static int xillybus_myflush(struct xilly_channel *channel, long timeout)
channel->rd_wait,
(!channel->rd_full),
timeout) == 0) {
- pr_warn("xillybus: "
- "Timed out while flushing. "
- "Output data may be lost.\n");
+ dev_warn(channel->endpoint->dev,
+ "Timed out while flushing. Output data may be lost.\n");
rc = -ETIMEDOUT;
break;
@@ -1354,12 +1345,11 @@ static void xillybus_autoflush(struct work_struct *work)
rc = xillybus_myflush(channel, -1);
if (rc == -EINTR)
- pr_warn("xillybus: Autoflush failed because "
- "work queue thread got a signal.\n");
+ dev_warn(channel->endpoint->dev,
+ "Autoflush failed because work queue thread got a signal.\n");
else if (rc)
- pr_err("xillybus: Autoflush failed under "
- "weird circumstances.\n");
-
+ dev_err(channel->endpoint->dev,
+ "Autoflush failed under weird circumstances.\n");
}
static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
@@ -1615,8 +1605,8 @@ static int xillybus_open(struct inode *inode, struct file *filp)
mutex_unlock(&ep_list_lock);
if (!endpoint) {
- pr_err("xillybus: open() failed to find a device "
- "for major=%d and minor=%d\n", major, minor);
+ pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n",
+ major, minor);
return -ENODEV;
}
@@ -1642,15 +1632,15 @@ static int xillybus_open(struct inode *inode, struct file *filp)
if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
(channel->wr_synchronous || !channel->wr_allow_partial ||
!channel->wr_supports_nonempty)) {
- pr_err("xillybus: open() failed: "
- "O_NONBLOCK not allowed for read on this device\n");
+ dev_err(endpoint->dev,
+ "open() failed: O_NONBLOCK not allowed for read on this device\n");
return -ENODEV;
}
if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
(channel->rd_synchronous || !channel->rd_allow_partial)) {
- pr_err("xillybus: open() failed: "
- "O_NONBLOCK not allowed for write on this device\n");
+ dev_err(endpoint->dev,
+ "open() failed: O_NONBLOCK not allowed for write on this device\n");
return -ENODEV;
}
@@ -1765,8 +1755,8 @@ static int xillybus_release(struct inode *inode, struct file *filp)
rc = mutex_lock_interruptible(&channel->rd_mutex);
if (rc) {
- pr_warn("xillybus: Failed to close file. "
- "Hardware left in messy state.\n");
+ dev_warn(channel->endpoint->dev,
+ "Failed to close file. Hardware left in messy state.\n");
return rc;
}
@@ -1791,8 +1781,8 @@ static int xillybus_release(struct inode *inode, struct file *filp)
if (filp->f_mode & FMODE_READ) {
rc = mutex_lock_interruptible(&channel->wr_mutex);
if (rc) {
- pr_warn("xillybus: Failed to close file. "
- "Hardware left in messy state.\n");
+ dev_warn(channel->endpoint->dev,
+ "Failed to close file. Hardware left in messy state.\n");
return rc;
}
@@ -1853,10 +1843,8 @@ static int xillybus_release(struct inode *inode, struct file *filp)
if (channel->wr_sleepy) {
mutex_unlock(&channel->wr_mutex);
- pr_warn("xillybus: Hardware failed to "
- "respond to close command, "
- "therefore left in "
- "messy state.\n");
+ dev_warn(channel->endpoint->dev,
+ "Hardware failed to respond to close command, therefore left in messy state.\n");
return -EINTR;
}
}
@@ -2022,7 +2010,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
xillyname);
if (rc) {
- pr_warn("xillybus: Failed to obtain major/minors");
+ dev_warn(endpoint->dev, "Failed to obtain major/minors");
goto error1;
}
@@ -2034,7 +2022,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
endpoint->num_channels);
if (rc) {
- pr_warn("xillybus: Failed to add cdev. Aborting.\n");
+ dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n");
goto error2;
}
@@ -2057,14 +2045,15 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
"%s", devname);
if (IS_ERR(device)) {
- pr_warn("xillybus: Failed to create %s "
- "device. Aborting.\n", devname);
+ dev_warn(endpoint->dev,
+ "Failed to create %s device. Aborting.\n",
+ devname);
goto error3;
}
}
- pr_info("xillybus: Created %d device files.\n",
- endpoint->num_channels);
+ dev_info(endpoint->dev, "Created %d device files.\n",
+ endpoint->num_channels);
return 0; /* succeed */
error3:
@@ -2093,8 +2082,8 @@ static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
endpoint->lowest_minor),
endpoint->num_channels);
- pr_info("xillybus: Removed %d device files.\n",
- endpoint->num_channels);
+ dev_info(endpoint->dev, "Removed %d device files.\n",
+ endpoint->num_channels);
}
@@ -2107,7 +2096,7 @@ struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
if (!endpoint) {
- pr_err("xillybus: Failed to allocate memory. Aborting.\n");
+ dev_err(dev, "Failed to allocate memory. Aborting.\n");
return NULL;
}
@@ -2141,8 +2130,8 @@ static int xilly_quiesce(struct xilly_endpoint *endpoint)
XILLY_TIMEOUT);
if (endpoint->idtlen < 0) {
- pr_err("xillybus: Failed to quiesce the device on "
- "exit. Quitting while leaving a mess.\n");
+ dev_err(endpoint->dev,
+ "Failed to quiesce the device on exit. Quitting while leaving a mess.\n");
return -ENODEV;
}
return 0; /* Success */
@@ -2209,7 +2198,7 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
XILLY_TIMEOUT);
if (endpoint->idtlen < 0) {
- pr_err("xillybus: No response from FPGA. Aborting.\n");
+ dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
rc = -ENODEV;
goto failed_quiesce;
}
@@ -2323,7 +2312,7 @@ static int __init xillybus_init(void)
xillybus_class = class_create(THIS_MODULE, xillyname);
if (IS_ERR(xillybus_class)) {
rc = PTR_ERR(xillybus_class);
- pr_warn("xillybus: Failed to register class xillybus\n");
+ pr_warn("Failed to register class xillybus\n");
return rc;
}
diff --git a/drivers/staging/xillybus/xillybus_of.c b/drivers/staging/xillybus/xillybus_of.c
index 92c2931f434..394bfea1af6 100644
--- a/drivers/staging/xillybus/xillybus_of.c
+++ b/drivers/staging/xillybus/xillybus_of.c
@@ -117,14 +117,15 @@ static int xilly_drv_probe(struct platform_device *op)
rc = of_address_to_resource(dev->of_node, 0, &endpoint->res);
if (rc) {
- pr_warn("xillybus: Failed to obtain device tree "
- "resource\n");
+ dev_warn(endpoint->dev,
+ "Failed to obtain device tree resource\n");
goto failed_request_regions;
}
if (!request_mem_region(endpoint->res.start,
resource_size(&endpoint->res), xillyname)) {
- pr_err("xillybus: request_mem_region failed. Aborting.\n");
+ dev_err(endpoint->dev,
+ "request_mem_region failed. Aborting.\n");
rc = -EBUSY;
goto failed_request_regions;
}
@@ -132,7 +133,8 @@ static int xilly_drv_probe(struct platform_device *op)
endpoint->registers = of_iomap(dev->of_node, 0);
if (!endpoint->registers) {
- pr_err("xillybus: Failed to map I/O memory. Aborting.\n");
+ dev_err(endpoint->dev,
+ "Failed to map I/O memory. Aborting.\n");
goto failed_iomap0;
}
@@ -141,8 +143,8 @@ static int xilly_drv_probe(struct platform_device *op)
rc = request_irq(irq, xillybus_isr, 0, xillyname, endpoint);
if (rc) {
- pr_err("xillybus: Failed to register IRQ handler. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "Failed to register IRQ handler. Aborting.\n");
rc = -ENODEV;
goto failed_register_irq;
}
@@ -198,15 +200,4 @@ static struct platform_driver xillybus_platform_driver = {
},
};
-static int __init xillybus_of_init(void)
-{
- return platform_driver_register(&xillybus_platform_driver);
-}
-
-static void __exit xillybus_of_exit(void)
-{
- platform_driver_unregister(&xillybus_platform_driver);
-}
-
-module_init(xillybus_of_init);
-module_exit(xillybus_of_exit);
+module_platform_driver(xillybus_platform_driver);
diff --git a/drivers/staging/xillybus/xillybus_pcie.c b/drivers/staging/xillybus/xillybus_pcie.c
index 67013652358..1811aa76421 100644
--- a/drivers/staging/xillybus/xillybus_pcie.c
+++ b/drivers/staging/xillybus/xillybus_pcie.c
@@ -134,7 +134,7 @@ static int xilly_probe(struct pci_dev *pdev,
struct xilly_endpoint *endpoint;
int rc = 0;
- endpoint = xillybus_init_endpoint(pdev, NULL, &pci_hw);
+ endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);
if (!endpoint)
return -ENOMEM;
@@ -148,29 +148,29 @@ static int xilly_probe(struct pci_dev *pdev,
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
if (rc) {
- pr_err("xillybus: pci_enable_device() failed. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "pci_enable_device() failed. Aborting.\n");
goto no_enable;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- pr_err("xillybus: Incorrect BAR configuration. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "Incorrect BAR configuration. Aborting.\n");
rc = -ENODEV;
goto bad_bar;
}
rc = pci_request_regions(pdev, xillyname);
if (rc) {
- pr_err("xillybus: pci_request_regions() failed. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "pci_request_regions() failed. Aborting.\n");
goto failed_request_regions;
}
endpoint->registers = pci_iomap(pdev, 0, 128);
if (!endpoint->registers) {
- pr_err("xillybus: Failed to map BAR 0. Aborting.\n");
+ dev_err(endpoint->dev, "Failed to map BAR 0. Aborting.\n");
goto failed_iomap0;
}
@@ -178,16 +178,16 @@ static int xilly_probe(struct pci_dev *pdev,
/* Set up a single MSI interrupt */
if (pci_enable_msi(pdev)) {
- pr_err("xillybus: Failed to enable MSI interrupts. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "Failed to enable MSI interrupts. Aborting.\n");
rc = -ENODEV;
goto failed_enable_msi;
}
rc = request_irq(pdev->irq, xillybus_isr, 0, xillyname, endpoint);
if (rc) {
- pr_err("xillybus: Failed to register MSI handler. "
- "Aborting.\n");
+ dev_err(endpoint->dev,
+ "Failed to register MSI handler. Aborting.\n");
rc = -ENODEV;
goto failed_register_msi;
}
@@ -202,8 +202,7 @@ static int xilly_probe(struct pci_dev *pdev,
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
endpoint->dma_using_dac = 0;
else {
- pr_err("xillybus: Failed to set DMA mask. "
- "Aborting.\n");
+ dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
rc = -ENODEV;
goto failed_dmamask;
}
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2c4ed52ca84..79ce363b2ea 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -648,6 +648,9 @@ static ssize_t reset_store(struct device *dev,
zram = dev_to_zram(dev);
bdev = bdget_disk(zram->disk, 0);
+ if (!bdev)
+ return -ENOMEM;
+
/* Do not reset an active device! */
if (bdev->bd_holders)
return -EBUSY;
@@ -660,8 +663,7 @@ static ssize_t reset_store(struct device *dev,
return -EINVAL;
/* Make sure all pending I/O is finished */
- if (bdev)
- fsync_bdev(bdev);
+ fsync_bdev(bdev);
zram_reset_device(zram, true);
return len;
@@ -896,13 +898,10 @@ static void destroy_device(struct zram *zram)
sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
- if (zram->disk) {
- del_gendisk(zram->disk);
- put_disk(zram->disk);
- }
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
- if (zram->queue)
- blk_cleanup_queue(zram->queue);
+ blk_cleanup_queue(zram->queue);
}
static int __init zram_init(void)
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
index 7fab032298f..0ae13cd0908 100644
--- a/drivers/staging/zsmalloc/Kconfig
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -1,5 +1,6 @@
config ZSMALLOC
bool "Memory allocator for compressed pages"
+ depends on MMU
default n
help
zsmalloc is a slab-based memory allocator designed to store
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 38e44b9abf0..d70e9119e90 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -805,14 +805,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
int iscsi_task_attr;
int sam_task_attr;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->cmd_pdus++;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->num_cmds++;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->cmd_pdus);
hdr = (struct iscsi_scsi_req *) buf;
payload_length = ntoh24(hdr->dlength);
@@ -1254,20 +1247,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
int rc;
if (!payload_length) {
- pr_err("DataOUT payload is ZERO, protocol error.\n");
- return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
- buf);
+ pr_warn("DataOUT payload is ZERO, ignoring.\n");
+ return 0;
}
/* iSCSI write */
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rx_data_octets += payload_length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(payload_length, &conn->sess->rx_data_octets);
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("DataSegmentLength: %u is greater than"
@@ -1486,7 +1471,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload);
static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
{
- struct iscsi_cmd *cmd;
+ struct iscsi_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
int rc;
bool data_crc_failed = false;
@@ -1954,6 +1939,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
(unsigned char *)hdr);
}
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
+ (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
+ pr_err("Multi sequence text commands currently not supported\n");
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
+ (unsigned char *)hdr);
+ }
+
pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
hdr->exp_statsn, payload_length);
@@ -2630,14 +2622,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return -1;
}
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->tx_data_octets += datain.length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(datain.length, &conn->sess->tx_data_octets);
/*
* Special case for successfully execution w/ both DATAIN
* and Sense Data.
@@ -3162,9 +3147,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
if (inc_stat_sn)
cmd->stat_sn = conn->stat_sn++;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rsp_pdus++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->rsp_pdus);
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
@@ -3374,6 +3357,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_tiqn *tiqn;
struct iscsi_tpg_np *tpg_np;
int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+ int target_name_printed;
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
@@ -3411,19 +3395,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
continue;
}
- len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
- len += 1;
-
- if ((len + payload_len) > buffer_len) {
- end_of_buf = 1;
- goto eob;
- }
- memcpy(payload + payload_len, buf, len);
- payload_len += len;
+ target_name_printed = 0;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+ /* If demo_mode_discovery=0 and generate_node_acls=0
+ * (demo mode dislabed) do not return
+ * TargetName+TargetAddress unless a NodeACL exists.
+ */
+
+ if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+ (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+ (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
+ cmd->conn->sess->sess_ops->InitiatorName))) {
+ continue;
+ }
+
spin_lock(&tpg->tpg_state_lock);
if ((tpg->tpg_state == TPG_STATE_FREE) ||
(tpg->tpg_state == TPG_STATE_INACTIVE)) {
@@ -3438,6 +3426,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np);
+ if (!target_name_printed) {
+ len = sprintf(buf, "TargetName=%s",
+ tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy(payload + payload_len, buf, len);
+ payload_len += len;
+ target_name_printed = 1;
+ }
+
len = sprintf(buf, "TargetAddress="
"%s:%hu,%hu",
(inaddr_any == false) ?
@@ -4092,9 +4096,7 @@ restart:
* hit default in the switch below.
*/
memset(buffer, 0xff, ISCSI_HDR_LEN);
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->conn_digest_errors++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->conn_digest_errors);
} else {
pr_debug("Got HeaderDigest CRC32C"
" 0x%08x\n", checksum);
@@ -4381,7 +4383,7 @@ int iscsit_close_connection(
int iscsit_close_session(struct iscsi_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (atomic_read(&sess->nconn)) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 7505fddca15..de77d9aa22c 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open(
/*
* Set Identifier.
*/
- chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ chap->id = conn->tpg->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
@@ -146,6 +146,7 @@ static int chap_server_compute_md5(
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
@@ -184,7 +185,9 @@ static int chap_server_compute_md5(
goto out;
}
- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ /* Include the terminating NULL in the compare */
+ compare_len = strlen(auth->userid) + 1;
+ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index fd145259361..e3318edb233 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
\
- return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+ return sprintf(page, "%u\n", nacl->node_attrib.name); \
} \
\
static ssize_t iscsi_nacl_attrib_store_##name( \
@@ -897,7 +897,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
if (!se_nacl_new)
return ERR_PTR(-ENOMEM);
- cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
* when converting a NdoeACL from demo mode -> explict
@@ -920,9 +920,9 @@ static struct se_node_acl *lio_target_make_nodeacl(
return ERR_PTR(-ENOMEM);
}
- stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+ stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
stats_cg->default_groups[1] = NULL;
- config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
return se_nacl;
@@ -967,7 +967,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
@@ -1041,6 +1041,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(prod_mode_write_protect);
TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_discovery,
+ */
+DEF_TPG_ATTRIB(demo_mode_discovery);
+TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_erl
+ */
+DEF_TPG_ATTRIB(default_erl);
+TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -1051,6 +1061,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_cache_dynamic_acls.attr,
&iscsi_tpg_attrib_demo_mode_write_protect.attr,
&iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ &iscsi_tpg_attrib_demo_mode_discovery.attr,
+ &iscsi_tpg_attrib_default_erl.attr,
NULL,
};
@@ -1514,21 +1526,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
return ERR_PTR(-ENOMEM);
}
- stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
- stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
- stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
- stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
- stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+ stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
+ stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
+ stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
+ stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
+ stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
stats_cg->default_groups[5] = NULL;
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
"iscsi_logout_stats", &iscsi_stat_logout_cit);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
@@ -1784,6 +1796,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_STATUS;
+
+ if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+ }
cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
return 0;
@@ -1815,21 +1832,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ return tpg->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+ return tpg->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+ return tpg->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
@@ -1837,7 +1854,7 @@ static int lio_tpg_check_demo_mode_write_protect(
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+ return tpg->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
@@ -1845,7 +1862,7 @@ static int lio_tpg_check_prod_mode_write_protect(
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+ return tpg->tpg_attrib.prod_mode_write_protect;
}
static void lio_tpg_release_fabric_acl(
@@ -1908,9 +1925,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
se_node_acl);
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
- ISCSI_NODE_ATTRIB(acl)->nacl = acl;
- iscsit_set_default_node_attribues(acl);
+ acl->node_attrib.nacl = acl;
+ iscsit_set_default_node_attribues(acl, tpg);
}
static int lio_check_stop_free(struct se_cmd *se_cmd)
@@ -1995,17 +2015,17 @@ int iscsi_target_register_configfs(void)
* Setup default attribute lists for various fabric->tf_cit_tmpl
* sturct config_item_type's
*/
- TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+ fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 9a5721b8ff9..48f7b3bf4e8 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -37,9 +37,6 @@
#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
#define NA_RANDOM_R2T_OFFSETS 0
-#define NA_DEFAULT_ERL 0
-#define NA_DEFAULT_ERL_MAX 2
-#define NA_DEFAULT_ERL_MIN 0
/* struct iscsi_tpg_attrib sanity values */
#define TA_AUTHENTICATION 1
@@ -58,6 +55,8 @@
#define TA_DEMO_MODE_WRITE_PROTECT 1
/* Disabled by default in production mode w/ explict ACLs */
#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_DEMO_MODE_DISCOVERY 1
+#define TA_DEFAULT_ERL 0
#define TA_CACHE_CORE_NPS 0
@@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table {
CMDSN_NORMAL_OPERATION = 0,
CMDSN_LOWER_THAN_EXP = 1,
CMDSN_HIGHER_THAN_EXP = 2,
+ CMDSN_MAXCMDSN_OVERRUN = 3,
};
/* Used for iscsi_handle_immediate_data() return values */
@@ -650,14 +650,13 @@ struct iscsi_session {
/* Used for session reference counting */
int session_usage_count;
int session_waiting_on_uc;
- u32 cmd_pdus;
- u32 rsp_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
- u32 conn_digest_errors;
- u32 conn_timeout_errors;
+ atomic_long_t cmd_pdus;
+ atomic_long_t rsp_pdus;
+ atomic_long_t tx_data_octets;
+ atomic_long_t rx_data_octets;
+ atomic_long_t conn_digest_errors;
+ atomic_long_t conn_timeout_errors;
u64 creation_time;
- spinlock_t session_stats_lock;
/* Number of active connections */
atomic_t nconn;
atomic_t session_continuation;
@@ -755,11 +754,6 @@ struct iscsi_node_acl {
struct se_node_acl se_node_acl;
};
-#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
-
-#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
-#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
-
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
@@ -769,6 +763,8 @@ struct iscsi_tpg_attrib {
u32 default_cmdsn_depth;
u32 demo_mode_write_protect;
u32 prod_mode_write_protect;
+ u32 demo_mode_discovery;
+ u32 default_erl;
struct iscsi_portal_group *tpg;
};
@@ -835,12 +831,6 @@ struct iscsi_portal_group {
struct list_head tpg_list;
} ____cacheline_aligned;
-#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
-#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
-#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
-#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
-#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
-
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
@@ -871,8 +861,6 @@ struct iscsi_tiqn {
struct iscsi_logout_stats logout_stats;
} ____cacheline_aligned;
-#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
-
struct iscsit_global {
/* In core shutdown */
u32 in_shutdown;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 6c7a5104a4c..7087c736daa 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
cmd->maxcmdsn_inc = 1;
- if (!mutex_trylock(&sess->cmdsn_mutex)) {
- sess->max_cmd_sn += 1;
- pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
- return;
- }
+ mutex_lock(&sess->cmdsn_mutex);
sess->max_cmd_sn += 1;
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
mutex_unlock(&sess->cmdsn_mutex);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 41052e512d9..0d1e6ee3e99 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -757,7 +757,7 @@ int iscsit_check_post_dataout(
static void iscsit_handle_time2retain_timeout(unsigned long data)
{
struct iscsi_session *sess = (struct iscsi_session *) data;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
@@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
- sess->conn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
spin_unlock(&tiqn->sess_err_stats.lock);
}
}
@@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
* Only start Time2Retain timer when the associated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
- spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
- tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
- spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ spin_lock(&sess->tpg->tpg_state_lock);
+ tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&sess->tpg->tpg_state_lock);
if (!tpg_active)
return;
@@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
*/
int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 1794c753954..4eb93b2b647 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1(
}
sess->creation_time = get_jiffies_64();
- spin_lock_init(&sess->session_stats_lock);
/*
* The FFP CmdSN window values will be allocated from the TPG's
* Initiator Node's ACL once the login has been successfully completed.
@@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2(
* Assign a new TPG Session Handle. Note this is protected with
* struct iscsi_portal_group->np_login_sem from iscsit_access_np().
*/
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
if (!sess->tsih)
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
/*
* Create the default params from user defined values..
*/
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ conn->tpg->param_list, 1) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
@@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2(
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2(
iscsi_login_set_conn_values(sess, conn, pdu->cid);
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
@@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2(
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -691,7 +690,7 @@ int iscsi_post_login_handler(
int stop_timer = 0;
struct iscsi_session *sess = conn->sess;
struct se_session *se_sess = sess->se_sess;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct iscsi_thread_set *ts;
@@ -1154,7 +1153,7 @@ old_sess_out:
spin_lock_bh(&conn->sess->conn_lock);
if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
struct se_portal_group *se_tpg =
- &ISCSI_TPG_C(conn)->tpg_se_tpg;
+ &conn->tpg->tpg_se_tpg;
atomic_set(&conn->sess->session_continuation, 0);
spin_unlock_bh(&conn->sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index ef6d836a4d0..83c965c6538 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -88,7 +88,7 @@ int extract_param(
if (len < 0)
return -1;
- if (len > max_length) {
+ if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
return -1;
@@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication(
iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
se_node_acl);
- auth = ISCSI_NODE_AUTH(iscsi_nacl);
+ auth = &iscsi_nacl->node_auth;
}
} else {
/*
@@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero(
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ if (conn->tpg->tpg_attrib.authentication &&
!strncmp(param->value, NONE, 4)) {
pr_err("Initiator sent AuthMethod=None but"
" Target is enforcing iSCSI Authentication,"
@@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero(
return -1;
}
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ if (conn->tpg->tpg_attrib.authentication &&
!login->auth_complete)
return 0;
@@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
}
if (!login->auth_complete &&
- ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ conn->tpg->tpg_attrib.authentication) {
pr_err("Initiator is requesting CSG: 1, has not been"
" successfully authenticated, and the Target is"
" enforcing iSCSI Authentication, login failed.\n");
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 93bdc475eb0..16454a922e2 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(
}
void iscsit_set_default_node_attribues(
- struct iscsi_node_acl *acl)
+ struct iscsi_node_acl *acl,
+ struct iscsi_portal_group *tpg)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
@@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues(
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
- a->default_erl = NA_DEFAULT_ERL;
+ a->default_erl = tpg->tpg_attrib.default_erl;
}
int iscsit_na_dataout_timeout(
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index c970b326ef2..0c69a46a62e 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,7 +1,8 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
-extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
+ struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f788e8b5e85..10339551030 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->cmd_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rsp_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->tx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->tx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->rx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_digest_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_digest_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_timeout_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_timeout_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4faeb47fa5e..39761837608 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+ a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
+ a->default_erl = TA_DEFAULT_ERL;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto err_out;
- ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+ tpg->tpg_attrib.tpg = tpg;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
@@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
return -EINVAL;
}
- if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+ if (tpg->tpg_attrib.authentication) {
if (!strcmp(param->value, NONE)) {
ret = iscsi_update_param_value(param, CHAP);
if (ret)
@@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect(
return 0;
}
+
+int iscsit_ta_demo_mode_discovery(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_discovery = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
+ " %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
+ "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_default_erl(
+ struct iscsi_portal_group *tpg,
+ u32 default_erl)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
+ pr_err("Illegal value for default_erl: %u\n", default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index b77693e2c20..213c0fc7fdc 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b0cac0c342e..0819e688a39 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
*/
if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
- " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+ " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
sess->max_cmd_sn);
- ret = CMDSN_ERROR_CANNOT_RECOVER;
+ ret = CMDSN_MAXCMDSN_OVERRUN;
} else if (cmdsn == sess->exp_cmd_sn) {
sess->exp_cmd_sn++;
@@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
+ case CMDSN_MAXCMDSN_OVERRUN:
+ default:
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
- ret = cmdsn_ret;
- break;
- default:
- reason = ISCSI_REASON_PROTOCOL_ERROR;
- reject = true;
- ret = cmdsn_ret;
+ /*
+ * Existing callers for iscsit_sequence_cmd() will silently
+ * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
+ * return for CMDSN_MAXCMDSN_OVERRUN as well..
+ */
+ ret = CMDSN_LOWER_THAN_EXP;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
@@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
- conn->sess->conn_timeout_errors++;
+ atomic_long_inc(&conn->sess->conn_timeout_errors);
spin_unlock_bh(&tiqn->sess_err_stats.lock);
}
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0f6d69dabca..1b41e677615 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth(
return sdev->queue_depth;
}
+static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag);
+
+ if (tag)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag = 0;
+
+ return tag;
+}
+
/*
* Locate the SAM Task Attr from struct scsi_cmnd *
*/
@@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
-
+ if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+ goto out_done;
+ }
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
@@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
}
tl_cmd->sc = sc;
+ tl_cmd->sc_cmd_tag = sc->tag;
INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
queue_work(tcm_loop_workqueue, &tl_cmd->work);
return 0;
@@ -242,41 +261,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
-static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+ struct tcm_loop_nexus *tl_nexus,
+ int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
- struct se_portal_group *se_tpg;
struct se_session *se_sess;
+ struct se_portal_group *se_tpg;
struct tcm_loop_cmd *tl_cmd = NULL;
- struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tmr *tl_tmr = NULL;
- struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED, rc;
- /*
- * Locate the tcm_loop_hba_t pointer
- */
- tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
- se_sess = tl_nexus->se_sess;
- /*
- * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
- */
- tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- se_tpg = &tl_tpg->tl_se_tpg;
+ int ret = TMR_FUNCTION_FAILED, rc;
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
- return FAILED;
+ return ret;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
@@ -287,6 +286,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
se_cmd = &tl_cmd->tl_se_cmd;
+ se_tpg = &tl_tpg->tl_se_tpg;
+ se_sess = tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
@@ -294,17 +295,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
- rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
+ rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
if (rc < 0)
goto release;
+
+ if (tmr == TMR_ABORT_TASK)
+ se_cmd->se_tmr_req->ref_task_tag = task;
+
/*
- * Locate the underlying TCM struct se_lun from sc->device->lun
+ * Locate the underlying TCM struct se_lun
*/
- if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
+ if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
+ ret = TMR_LUN_DOES_NOT_EXIST;
goto release;
+ }
/*
- * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
- * to wake us up.
+ * Queue the TMR to TCM Core and sleep waiting for
+ * tcm_loop_queue_tm_rsp() to wake us up.
*/
transport_generic_handle_tmr(se_cmd);
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
@@ -312,8 +319,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* The TMR LUN_RESET has completed, check the response status and
* then release allocations.
*/
- ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
- SUCCESS : FAILED;
+ ret = se_cmd->se_tmr_req->response;
release:
if (se_cmd)
transport_generic_free_cmd(se_cmd, 1);
@@ -323,6 +329,94 @@ release:
return ret;
}
+static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ sc->tag, TMR_ABORT_TASK);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ 0, TMR_LUN_RESET);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+static int tcm_loop_target_reset(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ if (!tl_hba) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ if (tl_tpg) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return SUCCESS;
+ }
+ return FAILED;
+}
+
static int tcm_loop_slave_alloc(struct scsi_device *sd)
{
set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
@@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
static int tcm_loop_slave_configure(struct scsi_device *sd)
{
+ if (sd->tagged_supported) {
+ scsi_activate_tcq(sd, sd->queue_depth);
+ scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
+ sd->host->cmd_per_lun);
+ } else {
+ scsi_adjust_queue_depth(sd, 0,
+ sd->host->cmd_per_lun);
+ }
+
return 0;
}
@@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = {
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
+ .change_queue_type = tcm_loop_change_queue_type,
+ .eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
+ .eh_target_reset_handler = tcm_loop_target_reset,
.can_queue = 1024,
.this_id = -1,
.sg_tablesize = 256,
@@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
{
- return 1;
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ return tl_cmd->sc_cmd_tag;
}
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
@@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus(
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
- tl_nexus = tpg->tl_hba->tl_nexus;
+ if (!tl_hba)
+ return -ENODEV;
+
+ tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -1061,8 +1173,56 @@ check_newline:
TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+static ssize_t tcm_loop_tpg_show_transport_status(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ const char *status = NULL;
+ ssize_t ret = -EINVAL;
+
+ switch (tl_tpg->tl_transport_status) {
+ case TCM_TRANSPORT_ONLINE:
+ status = "online";
+ break;
+ case TCM_TRANSPORT_OFFLINE:
+ status = "offline";
+ break;
+ default:
+ break;
+ }
+
+ if (status)
+ ret = snprintf(page, PAGE_SIZE, "%s\n", status);
+
+ return ret;
+}
+
+static ssize_t tcm_loop_tpg_store_transport_status(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+
+ if (!strncmp(page, "online", 6)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return count;
+ }
+ if (!strncmp(page, "offline", 7)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+ return count;
+ }
+ return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_nexus.attr,
+ &tcm_loop_tpg_transport_status.attr,
NULL,
};
@@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
/*
* Once fabric->tf_ops has been setup, now register the fabric for
* use within TCM
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index dd7a84ee78e..54c59d0b660 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -10,6 +10,8 @@
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
+ /* Tagged command queueing */
+ u32 sc_cmd_tag;
/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
struct scsi_cmnd *sc;
/* The TCM I/O descriptor that is accessed via container_of() */
@@ -40,8 +42,12 @@ struct tcm_loop_nacl {
struct se_node_acl se_node_acl;
};
+#define TCM_TRANSPORT_ONLINE 0
+#define TCM_TRANSPORT_OFFLINE 1
+
struct tcm_loop_tpg {
unsigned short tl_tpgt;
+ unsigned short tl_transport_status;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e51b09a04d5..24884cac19c 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47244102281..fdcee326bfb 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -44,7 +44,7 @@
static sense_reason_t core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port, int explict, int offline);
+ struct se_port *port, int explicit, int offline);
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
- buf[off] = 0x80; /* T_SUP */
- buf[off] |= 0x40; /* O_SUP */
- buf[off] |= 0x8; /* U_SUP */
- buf[off] |= 0x4; /* S_SUP */
- buf[off] |= 0x2; /* AN_SUP */
- buf[off++] |= 0x1; /* AO_SUP */
+ buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
/*
* TARGET PORT GROUP
*/
@@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
if (ext_hdr != 0) {
buf[4] = 0x10;
/*
- * Set the implict transition time (in seconds) for the application
+ * Set the implicit transition time (in seconds) for the application
* client to use as a base for it's transition timeout value.
*
* Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
@@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (tg_pt_gp)
- buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
+ buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
}
@@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
/*
- * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
*
* See spc4r17 section 6.35
*/
@@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
- * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+ * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
@@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
- " while TPGS_EXPLICT_ALUA is disabled\n");
+ " while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
@@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
/*
- * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+ * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
* the Target Port in question for the the incoming
* SET_TARGET_PORT_GROUPS op.
*/
@@ -487,7 +482,7 @@ static inline int core_alua_state_transition(
u8 *alua_ascq)
{
/*
- * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+ * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
* spc4r17 section 5.9.2.5
*/
switch (cdb[0]) {
@@ -515,9 +510,9 @@ static inline int core_alua_state_transition(
}
/*
- * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 1: Is used to signal LUN not accessible, and check condition/not ready
* return 0: Used to signal success
- * reutrn -1: Used to signal failure, and invalid cdb field
+ * return -1: Used to signal failure, and invalid cdb field
*/
sense_reason_t
target_alua_state_check(struct se_cmd *cmd)
@@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd)
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
/*
- * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
+ * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
* For the Optimized ALUA access state case, we want to process the
* incoming fabric cmd ASAP..
*/
- if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
return 0;
switch (out_alua_state) {
@@ -620,13 +615,13 @@ out:
}
/*
- * Check implict and explict ALUA state change request.
+ * Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
core_alua_check_transition(int state, int *primary)
{
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
case ALUA_ACCESS_STATE_STANDBY:
case ALUA_ACCESS_STATE_UNAVAILABLE:
@@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary)
static char *core_alua_dump_state(int state)
{
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
@@ -676,10 +671,10 @@ char *core_alua_dump_status(int status)
switch (status) {
case ALUA_STATUS_NONE:
return "None";
- case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
- return "Altered by Explict STPG";
- case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
- return "Altered by Implict ALUA";
+ case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
+ return "Altered by Explicit STPG";
+ case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
+ return "Altered by Implicit ALUA";
default:
return "Unknown";
}
@@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt(
struct se_node_acl *nacl,
unsigned char *md_buf,
int new_state,
- int explict)
+ int explicit)
{
struct se_dev_entry *se_deve;
struct se_lun_acl *lacl;
@@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt(
old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
- tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
/*
* Check for the optional ALUA primary state transition delay
*/
@@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt(
* change, a device server shall establish a unit attention
* condition for the initiator port associated with every I_T
* nexus with the additional sense code set to ASYMMETRIC
- * ACCESS STATE CHAGED.
+ * ACCESS STATE CHANGED.
*
* After an explicit target port asymmetric access state
* change, a device server shall establish a unit attention
@@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt(
lacl = se_deve->se_lun_acl;
/*
* se_deve->se_lun_acl pointer may be NULL for a
- * entry created without explict Node+MappedLUN ACLs
+ * entry created without explicit Node+MappedLUN ACLs
*/
if (!lacl)
continue;
- if (explict &&
+ if (explicit &&
(nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
(l_port != NULL) && (l_port == port))
continue;
@@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt(
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " from primary access state %s to %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ " from primary access state %s to %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
core_alua_dump_state(new_state));
@@ -880,7 +875,7 @@ int core_alua_do_port_transition(
struct se_port *l_port,
struct se_node_acl *l_nacl,
int new_state,
- int explict)
+ int explicit)
{
struct se_device *dev;
struct se_port *port;
@@ -917,7 +912,7 @@ int core_alua_do_port_transition(
* success.
*/
core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
- md_buf, new_state, explict);
+ md_buf, new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
kfree(md_buf);
@@ -946,7 +941,7 @@ int core_alua_do_port_transition(
continue;
/*
* If the target behavior port asymmetric access state
- * is changed for any target port group accessiable via
+ * is changed for any target port group accessible via
* a logical unit within a LU group, the target port
* behavior group asymmetric access states for the same
* target port group accessible via other logical units
@@ -970,7 +965,7 @@ int core_alua_do_port_transition(
* success.
*/
core_alua_do_transition_tg_pt(tg_pt_gp, port,
- nacl, md_buf, new_state, explict);
+ nacl, md_buf, new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
@@ -987,7 +982,7 @@ int core_alua_do_port_transition(
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
- l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+ l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
core_alua_dump_state(new_state));
atomic_dec(&lu_gp->lu_gp_ref_cnt);
@@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata(
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port,
- int explict,
+ int explicit,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state(
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
- port->sep_tg_pt_secondary_stat = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ port->sep_tg_pt_secondary_stat = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " to secondary access state: %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ " to secondary access state: %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct se_device is released via core_alua_free_lu_gp_mem().
*
* If the passed lu_gp does NOT match the default_lu_gp, assume
- * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+ * we want to re-associate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
if (lu_gp != default_lu_gp)
@@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
tg_pt_gp->tg_pt_gp_dev = dev;
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+ ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
/*
- * Enable both explict and implict ALUA support by default
+ * Enable both explicit and implicit ALUA support by default
*/
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+ TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
/*
* Set the default Active/NonOptimized Delay in milliseconds
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
- tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
+
+ /*
+ * Enable all supported states
+ */
+ tg_pt_gp->tg_pt_gp_alua_supported_states =
+ ALUA_T_SUP | ALUA_O_SUP |
+ ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
if (def_group) {
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp(
* been called from target_core_alua_drop_tg_pt_gp().
*
* Here we remove *tg_pt_gp from the global list so that
- * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+ * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp(
* core_alua_free_tg_pt_gp_mem().
*
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
- * assume we want to re-assocate a given tg_pt_gp_mem with
+ * assume we want to re-associate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
- (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
- return sprintf(page, "Implict and Explict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
- return sprintf(page, "Implict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
- return sprintf(page, "Explict\n");
+ if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
+ (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
+ return sprintf(page, "Implicit and Explicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
+ return sprintf(page, "Implicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
+ return sprintf(page, "Explicit\n");
else
return sprintf(page, "None\n");
}
@@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type(
}
if (tmp == 3)
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+ TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
else if (tmp == 2)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
else if (tmp == 1)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
else
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
@@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs(
return count;
}
-ssize_t core_alua_show_implict_trans_secs(
+ssize_t core_alua_show_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
}
-ssize_t core_alua_store_implict_trans_secs(
+ssize_t core_alua_store_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
@@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs(
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
- pr_err("Unable to extract implict_trans_secs\n");
+ pr_err("Unable to extract implicit_trans_secs\n");
return ret;
}
- if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
- pr_err("Passed implict_trans_secs: %lu, exceeds"
- " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
- ALUA_MAX_IMPLICT_TRANS_SECS);
+ if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
+ pr_err("Passed implicit_trans_secs: %lu, exceeds"
+ " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
+ ALUA_MAX_IMPLICIT_TRANS_SECS);
return -EINVAL;
}
- tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
return count;
}
@@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status(
return ret;
}
if ((tmp != ALUA_STATUS_NONE) &&
- (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index e539c3e7f4a..88e2e835f14 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -7,15 +7,15 @@
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
-#define TPGS_IMPLICT_ALUA 0x10
-#define TPGS_EXPLICT_ALUA 0x20
+#define TPGS_IMPLICIT_ALUA 0x10
+#define TPGS_EXPLICIT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
* from spc4r17 section 6.27 Table 245
*/
-#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
+#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
@@ -23,13 +23,24 @@
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
+ * from spc4r36j section 6.37 Table 306
+ */
+#define ALUA_T_SUP 0x80
+#define ALUA_O_SUP 0x40
+#define ALUA_LBD_SUP 0x10
+#define ALUA_U_SUP 0x08
+#define ALUA_S_SUP 0x04
+#define ALUA_AN_SUP 0x02
+#define ALUA_AO_SUP 0x01
+
+/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
-#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
-#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
+#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -46,17 +57,17 @@
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
- * Used for implict and explict ALUA transitional delay, that is disabled
+ * Used for implicit and explicit ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
- * Used for the recommended application client implict transition timeout
+ * Used for the recommended application client implicit transition timeout
* in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
*/
-#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0
-#define ALUA_MAX_IMPLICT_TRANS_SECS 255
+#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
+#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
@@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
-extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
char *);
-extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 82e81c542e4..272755d03e5 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
tf->tf_group.default_groups[0] = &tf->tf_disc_group;
tf->tf_group.default_groups[1] = NULL;
config_group_init_type_name(&tf->tf_group, name,
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
- &TF_CIT_TMPL(tf)->tfc_discovery_cit);
+ &tf->tf_cit_tmpl.tfc_discovery_cit);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
@@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do implict ALUA on non valid"
+ pr_err("Unable to do implicit ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
@@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
}
new_state = (int)tmp;
- if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- pr_err("Unable to process implict configfs ALUA"
- " transition while TPGS_IMPLICT_ALUA is disabled\n");
+ if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
+ pr_err("Unable to process implicit configfs ALUA"
+ " transition while TPGS_IMPLICIT_ALUA is disabled\n");
return -EINVAL;
}
@@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
- (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
@@ -2131,6 +2131,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
/*
+ * alua_supported_states
+ */
+
+#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
+ struct t10_alua_tg_pt_gp *t, char *p) \
+{ \
+ return sprintf(p, "%d\n", !!(t->_var & _bit)); \
+}
+
+#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
+ struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
+{ \
+ unsigned long tmp; \
+ int ret; \
+ \
+ if (!t->tg_pt_gp_valid_id) { \
+ pr_err("Unable to do set ##_name ALUA state on non" \
+ " valid tg_pt_gp ID: %hu\n", \
+ t->tg_pt_gp_valid_id); \
+ return -EINVAL; \
+ } \
+ \
+ ret = kstrtoul(p, 0, &tmp); \
+ if (ret < 0) { \
+ pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
+ return -EINVAL; \
+ } \
+ if (tmp > 1) { \
+ pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
+ return -EINVAL; \
+ } \
+ if (!tmp) \
+ t->_var |= _bit; \
+ else \
+ t->_var &= ~_bit; \
+ \
+ return c; \
+}
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
+ tg_pt_gp_alua_supported_states, ALUA_T_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
+ tg_pt_gp_alua_supported_states, ALUA_T_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
+ tg_pt_gp_alua_supported_states, ALUA_O_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
+ tg_pt_gp_alua_supported_states, ALUA_O_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
+ tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
+ tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
+ tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
+ tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
+ tg_pt_gp_alua_supported_states, ALUA_S_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
+ tg_pt_gp_alua_supported_states, ALUA_S_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
+ tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
+ tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
+ tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
+ tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
+
+/*
* alua_write_metadata
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
@@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
/*
- * implict_trans_secs
+ * implicit_trans_secs
*/
-static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
+static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- return core_alua_show_implict_trans_secs(tg_pt_gp, page);
+ return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
+static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
- return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
+ return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
}
-SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
+SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
/*
* preferred
@@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_core_alua_tg_pt_gp_alua_access_state.attr,
&target_core_alua_tg_pt_gp_alua_access_status.attr,
&target_core_alua_tg_pt_gp_alua_access_type.attr,
+ &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
+ &target_core_alua_tg_pt_gp_alua_support_offline.attr,
+ &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
+ &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
+ &target_core_alua_tg_pt_gp_alua_support_standby.attr,
+ &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
+ &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
- &target_core_alua_tg_pt_gp_implict_trans_secs.attr,
+ &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
&target_core_alua_tg_pt_gp_preferred.attr,
&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
&target_core_alua_tg_pt_gp_members.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d90dbb0f1a6..207b340498a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+ percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+ percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
}
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
- /* TODO: get rid of this and use atomics for stats */
dev = se_lun->lun_se_dev;
- spin_lock_irqsave(&dev->stats_lock, flags);
- dev->num_cmds++;
+ atomic_long_inc(&dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- dev->write_bytes += se_cmd->data_length;
+ atomic_long_add(se_cmd->data_length, &dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- dev->read_bytes += se_cmd->data_length;
- spin_unlock_irqrestore(&dev->stats_lock, flags);
-
- spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
- spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+ atomic_long_add(se_cmd->data_length, &dev->read_bytes);
return 0;
}
@@ -314,14 +313,14 @@ int core_enable_device_list_for_node(
deve = nacl->device_list[mapped_lun];
/*
- * Check if the call is handling demo mode -> explict LUN ACL
+ * Check if the call is handling demo mode -> explicit LUN ACL
* transition. This transition must be for the same struct se_lun
* + mapped_lun that was setup in demo mode..
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explict"
+ " already set for demo mode -> explicit"
" LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
@@ -329,7 +328,7 @@ int core_enable_device_list_for_node(
if (deve->se_lun != lun) {
pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
- " -> explict LUN ACL transition\n");
+ " -> explicit LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
}
@@ -1407,6 +1406,7 @@ static void scsi_dump_inquiry(struct se_device *dev)
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
+ struct se_lun *xcopy_lun;
dev = hba->transport->alloc_device(hba, name);
if (!dev)
@@ -1423,7 +1423,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
INIT_LIST_HEAD(&dev->g_dev_node);
- spin_lock_init(&dev->stats_lock);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
@@ -1469,6 +1468,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+ xcopy_lun = &dev->xcopy_lun;
+ xcopy_lun->lun_se_dev = dev;
+ init_completion(&xcopy_lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
+ spin_lock_init(&xcopy_lun->lun_acl_lock);
+ spin_lock_init(&xcopy_lun->lun_sep_lock);
+ init_completion(&xcopy_lun->lun_ref_comp);
+
return dev;
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 3503996d7d1..dae2ad6a669 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun(
}
config_group_init_type_name(&lacl->se_lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
+ "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
@@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(
nacl_cg->default_groups[4] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
"fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
return &se_nacl->acl_group;
}
@@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np(
se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
@@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun(
}
config_group_init_type_name(&lun->lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_port_cit);
config_group_init_type_name(&lun->port_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
+ "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
@@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg(
se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
- &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
- &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_np_cit);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_param_cit);
return &se_tpg->tpg_group;
}
@@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(
wwn->wwn_group.default_groups[1] = NULL;
config_group_init_type_name(&wwn->wwn_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_cit);
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
return &wwn->wwn_group;
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b662f89deda..0e34cda3271 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
} else {
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
- * Perform implict vfs_fsync_range() for fd_do_writev() ops
+ * Perform implicit vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b9a3394fe47..c87959f1276 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
+static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int ret;
+
+ ret = bdev_alignment_offset(bd);
+ if (ret == -1)
+ return 0;
+
+ /* convert offset-bytes to offset-lbas */
+ return ret / bdev_logical_block_size(bd);
+}
+
+static unsigned int iblock_get_lbppbe(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+
+ return ilog2(logs_per_phys);
+}
+
+static unsigned int iblock_get_io_min(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+
+ return bdev_io_min(bd);
+}
+
+static unsigned int iblock_get_io_opt(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+
+ return bdev_io_opt(bd);
+}
+
static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
@@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = {
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
+ .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
+ .get_lbppbe = iblock_get_lbppbe,
+ .get_io_min = iblock_get_io_min,
+ .get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
};
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 579128abe3f..47b63b094cd 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
-struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
- unsigned char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
@@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
-int transport_clear_lun_from_sessions(struct se_lun *);
+int transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d1ae4c5c3ff..2f5d77932c8 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder(
* statement.
*/
if (!ret && !other_cdb) {
- pr_debug("Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explicit CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
*/
if (!registered_nexus) {
- pr_debug("Allowing implict CDB: 0x%02x"
+ pr_debug("Allowing implicit CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder(
* allow commands from registered nexuses.
*/
- pr_debug("Allowing implict CDB: 0x%02x for %s"
+ pr_debug("Allowing implicit CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
alua_port_list) {
/*
* This pointer will be NULL for demo mode MappedLUNs
- * that have not been make explict via a ConfigFS
+ * that have not been make explicit via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
if (!deve_tmp->se_lun_acl)
@@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
smp_mb__after_atomic_dec();
}
-static int core_scsi3_check_implict_release(
+static int core_scsi3_check_implicit_release(
struct se_device *dev,
struct t10_pr_registration *pr_reg)
{
@@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release(
}
if (pr_res_holder == pr_reg) {
/*
- * Perform an implict RELEASE if the registration that
+ * Perform an implicit RELEASE if the registration that
* is being released is holding the reservation.
*
* From spc4r17, section 5.7.11.1:
@@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release(
* For 'All Registrants' reservation types, all existing
* registrations are still processed as reservation holders
* in core_scsi3_pr_seq_non_holder() after the initial
- * reservation holder is implictly released here.
+ * reservation holder is implicitly released here.
*/
} else if (pr_reg->pr_reg_all_tg_pt &&
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
@@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
- pr_holder = core_scsi3_check_implict_release(
+ pr_holder = core_scsi3_check_implicit_release(
cmd->se_dev, pr_reg);
if (pr_holder < 0) {
ret = TCM_RESERVATION_CONFLICT;
@@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
- int explict)
+ int explicit)
{
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
@@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release(
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+ tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
@@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt(
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
- * Do an implict RELEASE of the existing reservation.
+ * Do an implicit RELEASE of the existing reservation.
*/
if (dev->dev_pr_res_holder)
__core_scsi3_complete_pro_release(dev, nacl,
@@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* 5.7.11.4 Preempting, Table 52 and Figure 7.
*
* For a ZERO SA Reservation key, release
- * all other registrations and do an implict
+ * all other registrations and do an implicit
* release of active persistent reservation.
*
* For a non-ZERO SA Reservation key, only
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 131327ac7f5..4ffe5f2ec0e 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -27,7 +27,6 @@
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
-#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index d9b92b2c524..52ae54e6010 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->dev_attrib.block_size & 0xff;
+
+ if (dev->transport->get_lbppbe)
+ buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
+
+ if (dev->transport->get_alignment_offset_lbas) {
+ u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
+ buf[14] = (lalba >> 8) & 0x3f;
+ buf[15] = lalba & 0xff;
+ }
+
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
- buf[14] = 0x80;
+ buf[14] |= 0x80;
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 074539558a5..021c3f4a4f0 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
buf[5] = 0x80;
/*
- * Set TPGS field for explict and/or implict ALUA access type
+ * Set TPGS field for explicit and/or implicit ALUA access type
* and opteration.
*
* See spc4r17 section 6.4.2 Table 135
@@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
int have_tp = 0;
+ int opt, min;
/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
- put_unaligned_be16(1, &buf[6]);
+ if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
+ put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
+ else
+ put_unaligned_be16(1, &buf[6]);
/*
* Set MAXIMUM TRANSFER LENGTH
@@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
+ if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
+ put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
+ else
+ put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
@@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[3] << 8) + cdb[4];
/*
- * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
cmd->sam_task_attr = MSG_HEAD_TAG;
@@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_report_luns;
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
- * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
cmd->sam_task_attr = MSG_HEAD_TAG;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 9c642e02cba..03538994d2f 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -32,7 +32,6 @@
#include <linux/utsname.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/blkdev.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
struct se_device *dev =
container_of(sgrps, struct se_device, dev_stat_grps);
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_resets));
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
@@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)dev->num_cmds);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_cmds));
}
DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
@@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->read_bytes) >> 20);
}
DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
@@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->write_bytes) >> 20);
}
DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
@@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuInResets */
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));
}
DEV_STAT_SCSI_LU_ATTR_RO(resets);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 250009909d4..70c638f730a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -386,9 +386,7 @@ int core_tmr_lun_reset(
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock_irq(&dev->stats_lock);
- dev->num_resets++;
- spin_unlock_irq(&dev->stats_lock);
+ atomic_long_inc(&dev->num_resets);
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index b9a6ec0aa5f..f697f8baec5 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
return acl;
}
+EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
/* core_tpg_add_node_to_devs():
*
@@ -633,6 +634,13 @@ int core_tpg_set_initiator_node_tag(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+static void core_tpg_lun_ref_release(struct percpu_ref *ref)
+{
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+ complete(&lun->lun_ref_comp);
+}
+
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
@@ -646,15 +654,20 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
- ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
if (ret < 0)
return ret;
+ ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ if (ret < 0) {
+ percpu_ref_cancel_init(&lun->lun_ref);
+ return ret;
+ }
+
return 0;
}
@@ -691,10 +704,9 @@ int core_tpg_register(
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
}
se_tpg->se_tpg_type = se_tpg_type;
@@ -815,10 +827,16 @@ int core_tpg_post_addlun(
{
int ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
if (ret < 0)
return ret;
+ ret = core_dev_export(lun_ptr, tpg, lun);
+ if (ret < 0) {
+ percpu_ref_cancel_init(&lun->lun_ref);
+ return ret;
+ }
+
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
@@ -827,14 +845,6 @@ int core_tpg_post_addlun(
return 0;
}
-static void core_tpg_shutdown_lun(
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- core_clear_lun_from_tpg(lun, tpg);
- transport_clear_lun_from_sessions(lun);
-}
-
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
u32 unpacked_lun)
@@ -869,7 +879,8 @@ int core_tpg_post_dellun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
- core_tpg_shutdown_lun(tpg, lun);
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_ref(lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 81e945eefbb..91953da0f62 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -28,7 +28,6 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
@@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess)
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
- * If last kref is dropping now for an explict NodeACL, awake sleeping
+ * If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context.
*/
@@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
if (write_pending)
cmd->t_state = TRANSPORT_WRITE_PENDING;
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (remove_from_lists)
- target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- complete(&cmd->transport_lun_stop_comp);
- return 1;
- }
-
if (remove_from_lists) {
target_remove_from_state_list(cmd);
@@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- unsigned long flags;
- if (!lun)
+ if (!lun || !cmd->lun_ref_active)
return;
- spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (!list_empty(&cmd->se_lun_node))
- list_del_init(&cmd->se_lun_node);
- spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+ percpu_ref_put(&lun->lun_ref);
}
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
cmd->transport_state |= CMD_T_FAILED;
/*
- * Check for case where an explict ABORT_TASK has been received
+ * Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
*/
if (cmd->transport_state & CMD_T_ABORTED &&
@@ -1092,13 +1070,10 @@ void transport_init_se_cmd(
int task_attr,
unsigned char *sense_buffer)
{
- INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
- init_completion(&cmd->transport_lun_fe_stop_comp);
- init_completion(&cmd->transport_lun_stop_comp);
init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp);
init_completion(&cmd->task_stop_comp);
@@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd)
/*
* If the received CDB has aleady been aborted stop processing it here.
*/
- if (transport_check_aborted_status(cmd, 1)) {
- complete(&cmd->transport_lun_stop_comp);
+ if (transport_check_aborted_status(cmd, 1))
return;
- }
/*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->transport_lun_stop_comp);
- return;
- }
- /*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
+ spin_lock_irq(&cmd->t_state_lock);
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
__func__, __LINE__,
@@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
-/* transport_lun_wait_for_tasks():
- *
- * Called from ConfigFS context to stop the passed struct se_cmd to allow
- * an struct se_lun to be successfully shutdown.
- */
-static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
-{
- unsigned long flags;
- int ret = 0;
-
- /*
- * If the frontend has already requested this struct se_cmd to
- * be stopped, we can safely ignore this struct se_cmd.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_STOP) {
- cmd->transport_state &= ~CMD_T_LUN_STOP;
-
- pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
- cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_cmd_check_stop(cmd, false, false);
- return -EPERM;
- }
- cmd->transport_state |= CMD_T_LUN_FE_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- // XXX: audit task_flags checks.
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if ((cmd->transport_state & CMD_T_BUSY) &&
- (cmd->transport_state & CMD_T_SENT)) {
- if (!target_stop_cmd(cmd, &flags))
- ret++;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- pr_debug("ConfigFS: cmd: %p stop tasks ret:"
- " %d\n", cmd, ret);
- if (!ret) {
- pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- wait_for_completion(&cmd->transport_lun_stop_comp);
- pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- }
-
- return 0;
-}
-
-static void __transport_clear_lun_from_sessions(struct se_lun *lun)
-{
- struct se_cmd *cmd = NULL;
- unsigned long lun_flags, cmd_flags;
- /*
- * Do exception processing and return CHECK_CONDITION status to the
- * Initiator Port.
- */
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty(&lun->lun_cmd_list)) {
- cmd = list_first_entry(&lun->lun_cmd_list,
- struct se_cmd, se_lun_node);
- list_del_init(&cmd->se_lun_node);
-
- spin_lock(&cmd->t_state_lock);
- pr_debug("SE_LUN[%d] - Setting cmd->transport"
- "_lun_stop for ITT: 0x%08x\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
- cmd->transport_state |= CMD_T_LUN_STOP;
- spin_unlock(&cmd->t_state_lock);
-
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-
- if (!cmd->se_lun) {
- pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- /*
- * If the Storage engine still owns the iscsi_cmd_t, determine
- * and/or stop its context.
- */
- pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
-
- if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
-
- pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
- "_wait_for_tasks(): SUCCESS\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- goto check_cond;
- }
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
- target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
-
- /*
- * The Storage engine stopped this struct se_cmd before it was
- * send to the fabric frontend for delivery back to the
- * Initiator Node. Return this SCSI CDB back with an
- * CHECK_CONDITION status.
- */
-check_cond:
- transport_send_check_condition_and_sense(cmd,
- TCM_NON_EXISTENT_LUN, 0);
- /*
- * If the fabric frontend is waiting for this iscsi_cmd_t to
- * be released, notify the waiting thread now that LU has
- * finished accessing it.
- */
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
- pr_debug("SE_LUN[%d] - Detected FE stop for"
- " struct se_cmd: %p ITT: 0x%08x\n",
- lun->unpacked_lun,
- cmd, cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock,
- cmd_flags);
- transport_cmd_check_stop(cmd, false, false);
- complete(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
- pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- }
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-}
-
-static int transport_clear_lun_thread(void *p)
+static int transport_clear_lun_ref_thread(void *p)
{
struct se_lun *lun = p;
- __transport_clear_lun_from_sessions(lun);
+ percpu_ref_kill(&lun->lun_ref);
+
+ wait_for_completion(&lun->lun_ref_comp);
complete(&lun->lun_shutdown_comp);
return 0;
}
-int transport_clear_lun_from_sessions(struct se_lun *lun)
+int transport_clear_lun_ref(struct se_lun *lun)
{
struct task_struct *kt;
- kt = kthread_run(transport_clear_lun_thread, lun,
+ kt = kthread_run(transport_clear_lun_ref_thread, lun,
"tcm_cl_%u", lun->unpacked_lun);
if (IS_ERR(kt)) {
pr_err("Unable to start clear_lun thread\n");
@@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
- /*
- * If we are already stopped due to an external event (ie: LUN shutdown)
- * sleep until the connection can have the passed struct se_cmd back.
- * The cmd->transport_lun_stopped_sem will be upped by
- * transport_clear_lun_from_sessions() once the ConfigFS context caller
- * has completed its operation on the struct se_cmd.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("wait_for_tasks: Stopping"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe"
- "_stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
- /*
- * There is a special case for WRITES where a FE exception +
- * LUN shutdown means ConfigFS context is still sleeping on
- * transport_lun_stop_comp in transport_lun_wait_for_tasks().
- * We go ahead and up transport_lun_stop_comp just to be sure
- * here.
- */
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->transport_lun_stop_comp);
- wait_for_completion(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- target_remove_from_state_list(cmd);
- /*
- * At this point, the frontend who was the originator of this
- * struct se_cmd, now owns the structure and can be released through
- * normal means below.
- */
- pr_debug("wait_for_tasks: Stopped"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
- "stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_LUN_STOP;
- }
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
@@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
cmd->transport_state |= CMD_T_ABORTED;
smp_mb__after_atomic_inc();
+ return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 0204952fe4d..be912b36daa 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -19,7 +19,7 @@
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
-#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 474cd44fac1..6b88a9958f6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
- if (xpt_cmd->remote_port)
- kfree(se_cmd->se_lun);
-
kfree(xpt_cmd);
}
@@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun(
return 0;
}
- pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
- if (!pt_cmd->se_lun) {
- pr_err("Unable to allocate pt_cmd->se_lun\n");
- return -ENOMEM;
- }
- init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
- INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
- spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
- spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
- spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
-
+ pt_cmd->se_lun = &se_dev->xcopy_lun;
pt_cmd->se_dev = se_dev;
pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
- pt_cmd->se_lun->lun_se_dev = se_dev;
pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
@@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd(
return 0;
out:
- if (remote_port == true)
- kfree(cmd->se_lun);
return ret;
}
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 0dd54a44abc..752863acecb 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -22,6 +22,7 @@
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
+#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
struct ft_transport_id {
__u8 format;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 0e5a1caed17..479ec5621a4 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,6 +28,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
+#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct fc_lport *lport;
+ struct se_session *se_sess;
if (!cmd)
return;
+ se_sess = cmd->sess->se_sess;
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
lport->tt.seq_release(fr_seq(fp));
fc_frame_free(fp);
+ percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(cmd->sess); /* undo get from lookup at recv */
- kfree(cmd);
}
void ft_release_cmd(struct se_cmd *se_cmd)
@@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
+ struct se_session *se_sess = sess->se_sess;
+ int tag;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ if (tag < 0)
goto busy;
+
+ cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct ft_cmd));
+
+ cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->seq = lport->tt.seq_assign(lport, fp);
if (!cmd->seq) {
- kfree(cmd);
+ percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4e0050840a7..c6932fb53a8 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -571,16 +571,16 @@ int ft_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
ft_nacl_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* register the fabric for use within TCM
*/
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4859505ae2e..ae52c08dad0 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
if (!sess)
return NULL;
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
+ sizeof(struct ft_cmd));
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index dbfc390330a..f35a1f75b15 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -56,7 +56,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
select THERMAL_GOV_USER_SPACE
help
Select this if you want to let the user space manage the
- lpatform thermals.
+ platform thermals.
endchoice
@@ -69,6 +69,7 @@ config THERMAL_GOV_STEP_WISE
bool "Step_wise thermal governor"
help
Enable this to manage platform thermals using a simple linear
+ governor.
config THERMAL_GOV_USER_SPACE
bool "User_space thermal governor"
@@ -78,7 +79,6 @@ config THERMAL_GOV_USER_SPACE
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
- select CPU_FREQ_TABLE
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
@@ -117,14 +117,14 @@ config SPEAR_THERMAL
depends on OF
help
Enable this to plug the SPEAr thermal sensor driver into the Linux
- thermal framework
+ thermal framework.
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE
help
Enable this to plug the R-Car thermal sensor driver into the Linux
- thermal framework
+ thermal framework.
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index d17902886c3..02a46f23d14 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -469,10 +469,10 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
&cpufreq_cooling_ops);
- if (!cool_dev) {
+ if (IS_ERR(cool_dev)) {
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
- return ERR_PTR(-EINVAL);
+ return cool_dev;
}
cpufreq_dev->cool_dev = cool_dev;
cpufreq_dev->cpufreq_state = 0;
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index b40b37cd25e..8f181b3f842 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -675,6 +675,11 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2e},
{ X86_VENDOR_INTEL, 6, 0x2f},
{ X86_VENDOR_INTEL, 6, 0x3a},
+ { X86_VENDOR_INTEL, 6, 0x3c},
+ { X86_VENDOR_INTEL, 6, 0x3e},
+ { X86_VENDOR_INTEL, 6, 0x3f},
+ { X86_VENDOR_INTEL, 6, 0x45},
+ { X86_VENDOR_INTEL, 6, 0x46},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -758,21 +763,39 @@ static int powerclamp_init(void)
/* probe cpu features and ids here */
retval = powerclamp_probe();
if (retval)
- return retval;
+ goto exit_free;
+
/* set default limit, maybe adjusted during runtime based on feedback */
window_size = 2;
register_hotcpu_notifier(&powerclamp_cpu_notifier);
+
powerclamp_thread = alloc_percpu(struct task_struct *);
+ if (!powerclamp_thread) {
+ retval = -ENOMEM;
+ goto exit_unregister;
+ }
+
cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
&powerclamp_cooling_ops);
- if (IS_ERR(cooling_dev))
- return -ENODEV;
+ if (IS_ERR(cooling_dev)) {
+ retval = -ENODEV;
+ goto exit_free_thread;
+ }
if (!duration)
duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+
powerclamp_create_debug_files();
return 0;
+
+exit_free_thread:
+ free_percpu(powerclamp_thread);
+exit_unregister:
+ unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+exit_free:
+ kfree(cpu_clamping_mask);
+ return retval;
}
module_init(powerclamp_init);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4962a6aaf29..f1d511a9475 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -247,10 +247,11 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
if (!pos->tzp && !pos->ops->bind)
continue;
- if (!pos->tzp && pos->ops->bind) {
+ if (pos->ops->bind) {
ret = pos->ops->bind(pos, cdev);
if (ret)
print_bind_err_msg(pos, cdev, ret);
+ continue;
}
tzp = pos->tzp;
@@ -282,8 +283,8 @@ static void bind_tz(struct thermal_zone_device *tz)
mutex_lock(&thermal_list_lock);
- /* If there is no platform data, try to use ops->bind */
- if (!tzp && tz->ops->bind) {
+ /* If there is ops->bind, try to use ops->bind */
+ if (tz->ops->bind) {
list_for_each_entry(pos, &thermal_cdev_list, node) {
ret = tz->ops->bind(tz, pos);
if (ret)
@@ -1038,7 +1039,8 @@ static void thermal_release(struct device *dev)
sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
kfree(tz);
- } else {
+ } else if(!strncmp(dev_name(dev), "cooling_device",
+ sizeof("cooling_device") - 1)){
cdev = to_cooling_device(dev);
kfree(cdev);
}
@@ -1606,15 +1608,17 @@ exit:
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
#ifdef CONFIG_NET
+static const struct genl_multicast_group thermal_event_mcgrps[] = {
+ { .name = THERMAL_GENL_MCAST_GROUP_NAME, },
+};
+
static struct genl_family thermal_event_genl_family = {
.id = GENL_ID_GENERATE,
.name = THERMAL_GENL_FAMILY_NAME,
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
- .name = THERMAL_GENL_MCAST_GROUP_NAME,
+ .mcgrps = thermal_event_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
};
int thermal_generate_netlink_event(struct thermal_zone_device *tz,
@@ -1675,7 +1679,8 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz,
return result;
}
- result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
+ result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
+ 0, GFP_ATOMIC);
if (result)
dev_err(&tz->device, "Failed to send netlink event:%d", result);
@@ -1685,17 +1690,7 @@ EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
static int genetlink_init(void)
{
- int result;
-
- result = genl_register_family(&thermal_event_genl_family);
- if (result)
- return result;
-
- result = genl_register_mc_group(&thermal_event_genl_family,
- &thermal_event_mcgrp);
- if (result)
- genl_unregister_family(&thermal_event_genl_family);
- return result;
+ return genl_register_family(&thermal_event_genl_family);
}
static void genetlink_exit(void)
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index a93a424873f..8096fcbe2dc 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -349,7 +349,7 @@ bfin_jc_early_write(struct console *co, const char *buf, unsigned int count)
bfin_jc_straight_buffer_write(buf, count);
}
-static struct __initdata console bfin_jc_early_console = {
+static struct console bfin_jc_early_console __initdata = {
.name = "early_BFJC",
.write = bfin_jc_early_write,
.flags = CON_ANYTIME | CON_PRINTBUFFER,
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 9bffcec5ad8..0419b69e270 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -32,6 +32,7 @@
#include <linux/poll.h>
#include <asm/epapr_hcalls.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/cdev.h>
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 44fbebab507..3502a7bbb69 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -86,6 +86,21 @@ static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
return i;
}
+static bool hvc_dcc_check(void)
+{
+ unsigned long time = jiffies + (HZ / 10);
+
+ /* Write a test character to check if it is handled */
+ __dcc_putchar('\n');
+
+ while (time_is_after_jiffies(time)) {
+ if (!(__dcc_getstatus() & DCC_STATUS_TX))
+ return true;
+ }
+
+ return false;
+}
+
static const struct hv_ops hvc_dcc_get_put_ops = {
.get_chars = hvc_dcc_get_chars,
.put_chars = hvc_dcc_put_chars,
@@ -93,6 +108,9 @@ static const struct hv_ops hvc_dcc_get_put_ops = {
static int __init hvc_dcc_console_init(void)
{
+ if (!hvc_dcc_check())
+ return -ENODEV;
+
hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
return 0;
}
@@ -100,6 +118,9 @@ console_initcall(hvc_dcc_console_init);
static int __init hvc_dcc_init(void)
{
+ if (!hvc_dcc_check())
+ return -ENODEV;
+
hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return 0;
}
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index fd17a9b804b..db19a38c8c6 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1354,8 +1354,7 @@ out_error_memory:
mempool_destroy(hvc_iucv_mempool);
kmem_cache_destroy(hvc_iucv_buffer_cache);
out_error:
- if (hvc_iucv_filter)
- kfree(hvc_iucv_filter);
+ kfree(hvc_iucv_filter);
hvc_iucv_devices = 0; /* ensure that we do not provide any device */
return rc;
}
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index cd69b48f6df..6496872e2e4 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -329,7 +329,7 @@ static void udbg_init_opal_common(void)
void __init hvc_opal_init_early(void)
{
struct device_node *stdout_node = NULL;
- const u32 *termno;
+ const __be32 *termno;
const char *name = NULL;
const struct hv_ops *ops;
u32 index;
@@ -371,7 +371,7 @@ void __init hvc_opal_init_early(void)
if (!stdout_node)
return;
termno = of_get_property(stdout_node, "reg", NULL);
- index = termno ? *termno : 0;
+ index = termno ? be32_to_cpup(termno) : 0;
if (index >= MAX_NR_HVC_CONSOLES)
return;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index c791b18cdd0..b594abfbf21 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -48,6 +48,7 @@
#include <asm/prom.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
+#include <asm/machdep.h>
#include "hvc_console.h"
@@ -457,7 +458,9 @@ void __init hvc_vio_init_early(void)
if (hvterm_priv0.proto == HV_PROTOCOL_HVSI)
goto out;
#endif
- add_preferred_console("hvc", 0, NULL);
+ /* Check whether the user has requested a different console. */
+ if (!strstr(cmd_line, "console="))
+ add_preferred_console("hvc", 0, NULL);
hvc_instantiate(0, 0, ops);
out:
of_node_put(stdout_node);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index c193af6a628..636c9baad7a 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -183,7 +183,7 @@ static int dom0_write_console(uint32_t vtermno, const char *str, int len)
{
int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
if (rc < 0)
- return 0;
+ return rc;
return len;
}
@@ -642,7 +642,22 @@ struct console xenboot_console = {
void xen_raw_console_write(const char *str)
{
- dom0_write_console(0, str, strlen(str));
+ ssize_t len = strlen(str);
+ int rc = 0;
+
+ if (xen_domain()) {
+ rc = dom0_write_console(0, str, len);
+#ifdef CONFIG_X86
+ if (rc == -ENOSYS && xen_hvm_domain())
+ goto outb_print;
+
+ } else if (xen_cpuid_base()) {
+ int i;
+outb_print:
+ for (i = 0; i < len; i++)
+ outb(str[i], 0xe9);
+#endif
+ }
}
void xen_raw_printk(const char *fmt, ...)
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index ac2767100df..347050ea414 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -9,7 +9,7 @@
static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
{
- packet->seqno = atomic_inc_return(&pv->seqno);
+ packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
/* Assumes that always succeeds, works in practice */
return pv->put_chars(pv->termno, (char *)packet, packet->len);
@@ -28,7 +28,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
/* Send version query */
q.hdr.type = VS_QUERY_PACKET_HEADER;
q.hdr.len = sizeof(struct hvsi_query);
- q.verb = VSV_SEND_VERSION_NUMBER;
+ q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
hvsi_send_packet(pv, &q.hdr);
}
@@ -40,7 +40,7 @@ static int hvsi_send_close(struct hvsi_priv *pv)
ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
ctrl.hdr.len = sizeof(struct hvsi_control);
- ctrl.verb = VSV_CLOSE_PROTOCOL;
+ ctrl.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
return hvsi_send_packet(pv, &ctrl.hdr);
}
@@ -69,14 +69,14 @@ static void hvsi_got_control(struct hvsi_priv *pv)
{
struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
- switch (pkt->verb) {
+ switch (be16_to_cpu(pkt->verb)) {
case VSV_CLOSE_PROTOCOL:
/* We restart the handshaking */
hvsi_start_handshake(pv);
break;
case VSV_MODEM_CTL_UPDATE:
/* Transition of carrier detect */
- hvsi_cd_change(pv, pkt->word & HVSI_TSCD);
+ hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD);
break;
}
}
@@ -87,7 +87,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
struct hvsi_query_response r;
/* We only handle version queries */
- if (pkt->verb != VSV_SEND_VERSION_NUMBER)
+ if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER)
return;
pr_devel("HVSI@%x: Got version query, sending response...\n",
@@ -96,7 +96,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
/* Send version response */
r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
r.hdr.len = sizeof(struct hvsi_query_response);
- r.verb = VSV_SEND_VERSION_NUMBER;
+ r.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
r.u.version = HVSI_VERSION;
r.query_seqno = pkt->hdr.seqno;
hvsi_send_packet(pv, &r.hdr);
@@ -112,7 +112,7 @@ static void hvsi_got_response(struct hvsi_priv *pv)
switch(r->verb) {
case VSV_SEND_MODEM_CTL_STATUS:
- hvsi_cd_change(pv, r->u.mctrl_word & HVSI_TSCD);
+ hvsi_cd_change(pv, be32_to_cpu(r->u.mctrl_word) & HVSI_TSCD);
pv->mctrl_update = 1;
break;
}
@@ -265,8 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
pv->mctrl_update = 0;
q.hdr.type = VS_QUERY_PACKET_HEADER;
q.hdr.len = sizeof(struct hvsi_query);
- q.hdr.seqno = atomic_inc_return(&pv->seqno);
- q.verb = VSV_SEND_MODEM_CTL_STATUS;
+ q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS);
rc = hvsi_send_packet(pv, &q.hdr);
if (rc <= 0) {
pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc);
@@ -304,9 +303,9 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
ctrl.hdr.len = sizeof(struct hvsi_control);
- ctrl.verb = VSV_SET_MODEM_CTL;
- ctrl.mask = HVSI_TSDTR;
- ctrl.word = dtr ? HVSI_TSDTR : 0;
+ ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+ ctrl.mask = cpu_to_be32(HVSI_TSDTR);
+ ctrl.word = cpu_to_be32(dtr ? HVSI_TSDTR : 0);
return hvsi_send_packet(pv, &ctrl.hdr);
}
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 0e888621f48..7332e2ca461 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -495,7 +495,7 @@ static int dashtty_write(struct tty_struct *tty, const unsigned char *buf,
count = dport->xmit_cnt;
/* xmit buffer no longer empty? */
if (count)
- INIT_COMPLETION(dport->xmit_empty);
+ reinit_completion(&dport->xmit_empty);
mutex_unlock(&dport->xmit_lock);
if (total) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 7a744b69c3d..7cdd1eb9406 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -767,7 +767,7 @@ static size_t __process_echoes(struct tty_struct *tty)
* of echo overrun before the next commit), then discard enough
* data at the tail to prevent a subsequent overrun */
while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
- if (echo_buf(ldata, tail == ECHO_OP_START)) {
+ if (echo_buf(ldata, tail) == ECHO_OP_START) {
if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB)
tail += 3;
else
@@ -1752,20 +1752,14 @@ int is_ignored(int sig)
static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
- int canon_change = 1;
- if (old)
- canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
- if (canon_change) {
+ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->canon_head = ldata->read_tail;
ldata->erasing = 0;
ldata->lnext = 0;
}
- if (canon_change && !L_ICANON(tty) && read_cnt(ldata))
- wake_up_interruptible(&tty->read_wait);
-
ldata->icanon = (L_ICANON(tty) != 0);
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1820,9 +1814,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
start_tty(tty);
- }
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index d6080c3831e..cd042936955 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -959,7 +959,7 @@ static int receive_flow_control(struct nozomi *dc)
dev_err(&dc->pdev->dev,
"ERROR: flow control received for non-existing port\n");
return 0;
- };
+ }
DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
*((u16 *)&ctrl_dl));
@@ -1025,7 +1025,7 @@ static enum ctrl_port_type port2ctrl(enum port_type port,
dev_err(&dc->pdev->dev,
"ERROR: send flow control " \
"received for non-existing port\n");
- };
+ }
return CTRL_ERROR;
}
@@ -1805,7 +1805,7 @@ static int ntty_ioctl(struct tty_struct *tty,
default:
DBG1("ERR: 0x%08X, %d", cmd, cmd);
break;
- };
+ }
return rval;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 570df9d2a5d..e33d38cb170 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2322,7 +2322,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
fcr = uart_config[port->type].fcr;
- if (baud < 2400 || fifo_bug) {
+ if ((baud < 2400 && !up->dma) || fifo_bug) {
fcr &= ~UART_FCR_TRIGGER_MASK;
fcr |= UART_FCR_TRIGGER_1;
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index daf710f5c3f..4658e3e0ec4 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -56,11 +56,11 @@
struct dw8250_data {
- int last_lcr;
- int last_mcr;
- int line;
- struct clk *clk;
- u8 usr_reg;
+ u8 usr_reg;
+ int last_mcr;
+ int line;
+ struct clk *clk;
+ struct uart_8250_dma dma;
};
static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
@@ -76,17 +76,33 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
return value;
}
+static void dw8250_force_idle(struct uart_port *p)
+{
+ serial8250_clear_and_reinit_fifos(container_of
+ (p, struct uart_8250_port, port));
+ (void)p->serial_in(p, UART_RX);
+}
+
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
- if (offset == UART_LCR)
- d->last_lcr = value;
-
if (offset == UART_MCR)
d->last_mcr = value;
writeb(value, p->membase + (offset << p->regshift));
+
+ /* Make sure LCR write wasn't ignored */
+ if (offset == UART_LCR) {
+ int tries = 1000;
+ while (tries--) {
+ if (value == p->serial_in(p, UART_LCR))
+ return;
+ dw8250_force_idle(p);
+ writeb(value, p->membase + (UART_LCR << p->regshift));
+ }
+ dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ }
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -107,13 +123,22 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
- if (offset == UART_LCR)
- d->last_lcr = value;
-
if (offset == UART_MCR)
d->last_mcr = value;
writel(value, p->membase + (offset << p->regshift));
+
+ /* Make sure LCR write wasn't ignored */
+ if (offset == UART_LCR) {
+ int tries = 1000;
+ while (tries--) {
+ if (value == p->serial_in(p, UART_LCR))
+ return;
+ dw8250_force_idle(p);
+ writel(value, p->membase + (UART_LCR << p->regshift));
+ }
+ dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ }
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
@@ -131,9 +156,8 @@ static int dw8250_handle_irq(struct uart_port *p)
if (serial8250_handle_irq(p, iir)) {
return 1;
} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
- /* Clear the USR and write the LCR again. */
+ /* Clear the USR */
(void)p->serial_in(p, d->usr_reg);
- p->serial_out(p, UART_LCR, d->last_lcr);
return 1;
}
@@ -153,6 +177,14 @@ dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
pm_runtime_put_sync_suspend(port->dev);
}
+static bool dw8250_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw8250_data *data = param;
+
+ return chan->chan_id == data->dma.tx_chan_id ||
+ chan->chan_id == data->dma.rx_chan_id;
+}
+
static void dw8250_setup_port(struct uart_8250_port *up)
{
struct uart_port *p = &up->port;
@@ -241,7 +273,8 @@ static int dw8250_probe_of(struct uart_port *p,
}
#ifdef CONFIG_ACPI
-static int dw8250_probe_acpi(struct uart_8250_port *up)
+static int dw8250_probe_acpi(struct uart_8250_port *up,
+ struct dw8250_data *data)
{
const struct acpi_device_id *id;
struct uart_port *p = &up->port;
@@ -260,9 +293,7 @@ static int dw8250_probe_acpi(struct uart_8250_port *up)
if (!p->uartclk)
p->uartclk = (unsigned int)id->driver_data;
- up->dma = devm_kzalloc(p->dev, sizeof(*up->dma), GFP_KERNEL);
- if (!up->dma)
- return -ENOMEM;
+ up->dma = &data->dma;
up->dma->rxconf.src_maxburst = p->fifosize / 4;
up->dma->txconf.dst_maxburst = p->fifosize / 4;
@@ -270,7 +301,8 @@ static int dw8250_probe_acpi(struct uart_8250_port *up)
return 0;
}
#else
-static inline int dw8250_probe_acpi(struct uart_8250_port *up)
+static inline int dw8250_probe_acpi(struct uart_8250_port *up,
+ struct dw8250_data *data)
{
return -ENODEV;
}
@@ -314,6 +346,12 @@ static int dw8250_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk);
}
+ data->dma.rx_chan_id = -1;
+ data->dma.tx_chan_id = -1;
+ data->dma.rx_param = data;
+ data->dma.tx_param = data;
+ data->dma.fn = dw8250_dma_filter;
+
uart.port.iotype = UPIO_MEM;
uart.port.serial_in = dw8250_serial_in;
uart.port.serial_out = dw8250_serial_out;
@@ -324,7 +362,7 @@ static int dw8250_probe(struct platform_device *pdev)
if (err)
return err;
} else if (ACPI_HANDLE(&pdev->dev)) {
- err = dw8250_probe_acpi(&uart);
+ err = dw8250_probe_acpi(&uart, data);
if (err)
return err;
} else {
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 5f3bba12c15..d1a9078003b 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -122,7 +122,7 @@ static int serial8250_em_probe(struct platform_device *pdev)
up.port.dev = &pdev->dev;
up.port.private_data = priv;
- clk_enable(priv->sclk);
+ clk_prepare_enable(priv->sclk);
up.port.uartclk = clk_get_rate(priv->sclk);
up.port.iotype = UPIO_MEM32;
@@ -134,7 +134,7 @@ static int serial8250_em_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
dev_err(&pdev->dev, "unable to register 8250 port\n");
- clk_disable(priv->sclk);
+ clk_disable_unprepare(priv->sclk);
return ret;
}
@@ -148,7 +148,7 @@ static int serial8250_em_remove(struct platform_device *pdev)
struct serial8250_em_priv *priv = platform_get_drvdata(pdev);
serial8250_unregister_port(priv->line);
- clk_disable(priv->sclk);
+ clk_disable_unprepare(priv->sclk);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index c810da7c7a8..4697a514b80 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -9,6 +9,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
+#undef DEBUG
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -27,8 +28,6 @@
#include "8250.h"
-#undef SERIAL_DEBUG_PCI
-
/*
* init function returns:
* > 0 - number of ports
@@ -63,7 +62,7 @@ static int pci_default_setup(struct serial_private*,
static void moan_device(const char *str, struct pci_dev *dev)
{
- printk(KERN_WARNING
+ dev_err(&dev->dev,
"%s: %s\n"
"Please send the output of lspci -vv, this\n"
"message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
@@ -233,7 +232,7 @@ static int pci_inteli960ni_init(struct pci_dev *dev)
/* is firmware started? */
pci_read_config_dword(dev, 0x44, (void *)&oldval);
if (oldval == 0x00001000L) { /* RESET value */
- printk(KERN_DEBUG "Local i960 firmware missing");
+ dev_dbg(&dev->dev, "Local i960 firmware missing\n");
return -ENODEV;
}
return 0;
@@ -827,7 +826,7 @@ static int pci_netmos_9900_numports(struct pci_dev *dev)
if (sub_serports > 0) {
return sub_serports;
} else {
- printk(KERN_NOTICE "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
+ dev_err(&dev->dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
return 0;
}
}
@@ -931,7 +930,7 @@ static int pci_ite887x_init(struct pci_dev *dev)
}
if (!inta_addr[i]) {
- printk(KERN_ERR "ite887x: could not find iobase\n");
+ dev_err(&dev->dev, "ite887x: could not find iobase\n");
return -ENODEV;
}
@@ -1024,9 +1023,9 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
/* Tornado device */
if (deviceID == 0x07000200) {
number_uarts = ioread8(p + 4);
- printk(KERN_DEBUG
+ dev_dbg(&dev->dev,
"%d ports detected on Oxford PCI Express device\n",
- number_uarts);
+ number_uarts);
}
pci_iounmap(dev, p);
return number_uarts;
@@ -1308,6 +1307,29 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
+static int pci_pericom_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset, maxnr;
+
+ bar = FL_GET_BASE(board->flags);
+ if (board->flags & FL_BASE_BARS)
+ bar += idx;
+ else
+ offset += idx * board->uart_offset;
+
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+ (board->reg_shift + 3);
+
+ if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+ return 1;
+
+ port->port.uartclk = 14745600;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
static int
ce4100_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1324,6 +1346,120 @@ ce4100_serial_setup(struct serial_private *priv,
return ret;
}
+#define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a
+#define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c
+
+#define BYT_PRV_CLK 0x800
+#define BYT_PRV_CLK_EN (1 << 0)
+#define BYT_PRV_CLK_M_VAL_SHIFT 1
+#define BYT_PRV_CLK_N_VAL_SHIFT 16
+#define BYT_PRV_CLK_UPDATE (1 << 31)
+
+#define BYT_GENERAL_REG 0x808
+#define BYT_GENERAL_DIS_RTS_N_OVERRIDE (1 << 3)
+
+#define BYT_TX_OVF_INT 0x820
+#define BYT_TX_OVF_INT_MASK (1 << 1)
+
+static void
+byt_set_termios(struct uart_port *p, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = tty_termios_baud_rate(termios);
+ unsigned int m = 6912;
+ unsigned int n = 15625;
+ u32 reg;
+
+ /* For baud rates 1M, 2M, 3M and 4M the dividers must be adjusted. */
+ if (baud == 1000000 || baud == 2000000 || baud == 4000000) {
+ m = 64;
+ n = 100;
+
+ p->uartclk = 64000000;
+ } else if (baud == 3000000) {
+ m = 48;
+ n = 100;
+
+ p->uartclk = 48000000;
+ } else {
+ p->uartclk = 44236800;
+ }
+
+ /* Reset the clock */
+ reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT);
+ writel(reg, p->membase + BYT_PRV_CLK);
+ reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE;
+ writel(reg, p->membase + BYT_PRV_CLK);
+
+ /*
+ * If auto-handshake mechanism is not enabled,
+ * disable rts_n override
+ */
+ reg = readl(p->membase + BYT_GENERAL_REG);
+ reg &= ~BYT_GENERAL_DIS_RTS_N_OVERRIDE;
+ if (termios->c_cflag & CRTSCTS)
+ reg |= BYT_GENERAL_DIS_RTS_N_OVERRIDE;
+ writel(reg, p->membase + BYT_GENERAL_REG);
+
+ serial8250_do_set_termios(p, termios, old);
+}
+
+static bool byt_dma_filter(struct dma_chan *chan, void *param)
+{
+ return chan->chan_id == *(int *)param;
+}
+
+static int
+byt_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct uart_8250_dma *dma;
+ int ret;
+
+ dma = devm_kzalloc(port->port.dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ switch (priv->dev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_UART1:
+ dma->rx_chan_id = 3;
+ dma->tx_chan_id = 2;
+ break;
+ case PCI_DEVICE_ID_INTEL_BYT_UART2:
+ dma->rx_chan_id = 5;
+ dma->tx_chan_id = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dma->rxconf.slave_id = dma->rx_chan_id;
+ dma->rxconf.src_maxburst = 16;
+
+ dma->txconf.slave_id = dma->tx_chan_id;
+ dma->txconf.dst_maxburst = 16;
+
+ dma->fn = byt_dma_filter;
+ dma->rx_param = &dma->rx_chan_id;
+ dma->tx_param = &dma->tx_chan_id;
+
+ ret = pci_default_setup(priv, board, port, idx);
+ port->port.iotype = UPIO_MEM;
+ port->port.type = PORT_16550A;
+ port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+ port->port.set_termios = byt_set_termios;
+ port->port.fifosize = 64;
+ port->tx_loadsz = 64;
+ port->dma = dma;
+ port->capabilities = UART_CAP_FIFO | UART_CAP_AFE;
+
+ /* Disable Tx counter interrupts */
+ writel(BYT_TX_OVF_INT_MASK, port->port.membase + BYT_TX_OVF_INT);
+
+ return ret;
+}
+
static int
pci_omegapci_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1344,17 +1480,80 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
return ret;
}
+static int pci_fintek_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct pci_dev *pdev = priv->dev;
+ unsigned long base;
+ unsigned long iobase;
+ unsigned long ciobase = 0;
+ u8 config_base;
+
+ /*
+ * We are supposed to be able to read these from the PCI config space,
+ * but the values there don't seem to match what we need to use, so
+ * just use these hard-coded values for now, as they are correct.
+ */
+ switch (idx) {
+ case 0: iobase = 0xe000; config_base = 0x40; break;
+ case 1: iobase = 0xe008; config_base = 0x48; break;
+ case 2: iobase = 0xe010; config_base = 0x50; break;
+ case 3: iobase = 0xe018; config_base = 0x58; break;
+ case 4: iobase = 0xe020; config_base = 0x60; break;
+ case 5: iobase = 0xe028; config_base = 0x68; break;
+ case 6: iobase = 0xe030; config_base = 0x70; break;
+ case 7: iobase = 0xe038; config_base = 0x78; break;
+ case 8: iobase = 0xe040; config_base = 0x80; break;
+ case 9: iobase = 0xe048; config_base = 0x88; break;
+ case 10: iobase = 0xe050; config_base = 0x90; break;
+ case 11: iobase = 0xe058; config_base = 0x98; break;
+ default:
+ /* Unknown number of ports, get out of here */
+ return -EINVAL;
+ }
+
+ if (idx < 4) {
+ base = pci_resource_start(priv->dev, 3);
+ ciobase = (int)(base + (0x8 * idx));
+ }
+
+ dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%lx ciobase=0x%lx config_base=0x%2x\n",
+ __func__, idx, iobase, ciobase, config_base);
+
+ /* Enable UART I/O port */
+ pci_write_config_byte(pdev, config_base + 0x00, 0x01);
+
+ /* Select 128-byte FIFO and 8x FIFO threshold */
+ pci_write_config_byte(pdev, config_base + 0x01, 0x33);
+
+ /* LSB UART */
+ pci_write_config_byte(pdev, config_base + 0x04, (u8)(iobase & 0xff));
+
+ /* MSB UART */
+ pci_write_config_byte(pdev, config_base + 0x05, (u8)((iobase & 0xff00) >> 8));
+
+ /* irq number, this usually fails, but the spec says to do it anyway. */
+ pci_write_config_byte(pdev, config_base + 0x06, pdev->irq);
+
+ port->port.iotype = UPIO_PORT;
+ port->port.iobase = iobase;
+ port->port.mapbase = 0;
+ port->port.membase = NULL;
+ port->port.regshift = 0;
+
+ return 0;
+}
+
static int skip_tx_en_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
port->port.flags |= UPF_NO_TXEN_TEST;
- printk(KERN_DEBUG "serial8250: skipping TxEn test for device "
- "[%04x:%04x] subsystem [%04x:%04x]\n",
- priv->dev->vendor,
- priv->dev->device,
- priv->dev->subsystem_vendor,
- priv->dev->subsystem_device);
+ dev_dbg(&priv->dev->dev,
+ "serial8250: skipping TxEn test for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ priv->dev->vendor, priv->dev->device,
+ priv->dev->subsystem_vendor, priv->dev->subsystem_device);
return pci_default_setup(priv, board, port, idx);
}
@@ -1662,6 +1861,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = kt_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_UART1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_UART2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
/*
* ITE
*/
@@ -1826,6 +2039,31 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.exit = pci_plx9050_exit,
},
/*
+ * Pericom
+ */
+ {
+ .vendor = 0x12d8,
+ .device = 0x7952,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = 0x12d8,
+ .device = 0x7954,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = 0x12d8,
+ .device = 0x7958,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+
+ /*
* PLX
*/
{
@@ -2255,6 +2493,27 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_brcm_trumanage_setup,
},
+ {
+ .vendor = 0x1c29,
+ .device = 0x1104,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_setup,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1108,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_setup,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1112,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_setup,
+ },
/*
* Default "match everything" terminator entry
@@ -2449,9 +2708,13 @@ enum pci_board_num_t {
pbn_ADDIDATA_PCIe_4_3906250,
pbn_ADDIDATA_PCIe_8_3906250,
pbn_ce4100_1_115200,
+ pbn_byt,
pbn_omegapci,
pbn_NETMOS9900_2s_115200,
pbn_brcm_trumanage,
+ pbn_fintek_4,
+ pbn_fintek_8,
+ pbn_fintek_12,
};
/*
@@ -3185,6 +3448,13 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.reg_shift = 2,
},
+ [pbn_byt] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 2764800,
+ .uart_offset = 0x80,
+ .reg_shift = 2,
+ },
[pbn_omegapci] = {
.flags = FL_BASE0,
.num_ports = 8,
@@ -3202,6 +3472,24 @@ static struct pciserial_board pci_boards[] = {
.reg_shift = 2,
.base_baud = 115200,
},
+ [pbn_fintek_4] = {
+ .num_ports = 4,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
+ [pbn_fintek_8] = {
+ .num_ports = 8,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
+ [pbn_fintek_12] = {
+ .num_ports = 12,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3362,14 +3650,15 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
if (quirk->setup(priv, board, &uart, i))
break;
-#ifdef SERIAL_DEBUG_PCI
- printk(KERN_DEBUG "Setup PCI port: port %lx, irq %d, type %d\n",
- uart.port.iobase, uart.port.irq, uart.port.iotype);
-#endif
+ dev_dbg(&dev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
+ uart.port.iobase, uart.port.irq, uart.port.iotype);
priv->line[i] = serial8250_register_8250_port(&uart);
if (priv->line[i] < 0) {
- printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), priv->line[i]);
+ dev_err(&dev->dev,
+ "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
+ uart.port.iobase, uart.port.irq,
+ uart.port.iotype, priv->line[i]);
break;
}
}
@@ -3462,7 +3751,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
}
if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
- printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
+ dev_err(&dev->dev, "invalid driver_data: %ld\n",
ent->driver_data);
return -EINVAL;
}
@@ -3520,8 +3809,6 @@ static void pciserial_remove_one(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
-
pciserial_remove_ports(priv);
pci_disable_device(dev);
@@ -3555,7 +3842,7 @@ static int pciserial_resume_one(struct pci_dev *dev)
err = pci_enable_device(dev);
/* FIXME: We cannot simply error out here */
if (err)
- printk(KERN_ERR "pciserial: Unable to re-enable ports, trying to continue.\n");
+ dev_err(&dev->dev, "Unable to re-enable ports, trying to continue.\n");
pciserial_resume_ports(priv);
}
return 0;
@@ -4848,6 +5135,15 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ce4100_1_115200 },
+ /* Intel BayTrail */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT_UART1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT_UART2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
/*
* Cronyx Omega PCI
@@ -4918,6 +5214,11 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
0, pbn_exar_XR17V358 },
+ /* Fintek PCI serial cards */
+ { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
+ { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
+ { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index febd45cd502..a3817ab8602 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -709,7 +709,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE)
+ depends on HAVE_CLK && (SUPERH || ARM || COMPILE_TEST)
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
@@ -1512,6 +1512,7 @@ config SERIAL_FSL_LPUART_CONSOLE
config SERIAL_ST_ASC
tristate "ST ASC serial port support"
select SERIAL_CORE
+ depends on ARM || COMPILE_TEST
help
This driver is for the on-chip Asychronous Serial Controller on
STMicroelectronics STi SoCs.
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 8b90f0b6dfd..33bd8606be6 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -728,7 +728,6 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
amba_set_drvdata(dev, uap);
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
- amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
clk_put(uap->clk);
unmap:
@@ -745,8 +744,6 @@ static int pl010_remove(struct amba_device *dev)
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
- amba_set_drvdata(dev, NULL);
-
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index aaa22867e65..7203864992a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2147,7 +2147,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_set_drvdata(dev, uap);
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
- amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
pl011_dma_remove(uap);
}
@@ -2160,8 +2159,6 @@ static int pl011_remove(struct amba_device *dev)
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
- amba_set_drvdata(dev, NULL);
-
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 569872f4c9b..c9f5c9dcc15 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -533,7 +533,7 @@ arc_uart_init_one(struct platform_device *pdev, int dev_id)
unsigned long *plat_data;
struct arc_uart_port *uart = &arc_uart_ports[dev_id];
- plat_data = (unsigned long *)dev_get_platdata(&pdev->dev);
+ plat_data = dev_get_platdata(&pdev->dev);
if (!plat_data)
return -ENODEV;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 6b0f75eac8a..c7d99af46a9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -99,6 +99,7 @@ static void atmel_stop_rx(struct uart_port *port);
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
#define UART_GET_IP_NAME(port) __raw_readl((port)->membase + ATMEL_US_NAME)
+#define UART_GET_IP_VERSION(port) __raw_readl((port)->membase + ATMEL_US_VERSION)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -1503,6 +1504,7 @@ static void atmel_get_ip_name(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int name = UART_GET_IP_NAME(port);
+ u32 version;
int usart, uart;
/* usart and uart ascii */
usart = 0x55534152;
@@ -1517,7 +1519,22 @@ static void atmel_get_ip_name(struct uart_port *port)
dev_dbg(port->dev, "This is uart\n");
atmel_port->is_usart = false;
} else {
- dev_err(port->dev, "Not supported ip name, set to uart\n");
+ /* fallback for older SoCs: use version field */
+ version = UART_GET_IP_VERSION(port);
+ switch (version) {
+ case 0x302:
+ case 0x10213:
+ dev_dbg(port->dev, "This version is usart\n");
+ atmel_port->is_usart = true;
+ break;
+ case 0x203:
+ case 0x10202:
+ dev_dbg(port->dev, "This version is uart\n");
+ atmel_port->is_usart = false;
+ break;
+ default:
+ dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
+ }
}
}
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index 87636cc61a2..4f229703328 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -766,9 +766,8 @@ static int sport_uart_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev),
- DRV_NAME);
+ ret = peripheral_request_list(dev_get_platdata(&pdev->dev),
+ DRV_NAME);
if (ret) {
dev_err(&pdev->dev,
"Fail to request SPORT peripherals\n");
@@ -844,8 +843,7 @@ static int sport_uart_probe(struct platform_device *pdev)
out_error_unmap:
iounmap(sport->port.membase);
out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
out_error_free_mem:
kfree(sport);
bfin_sport_uart_ports[pdev->id] = NULL;
@@ -864,8 +862,7 @@ static int sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
iounmap(sport->port.membase);
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
kfree(sport);
bfin_sport_uart_ports[pdev->id] = NULL;
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 3c75e8e0402..869ceba2ec5 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -680,7 +680,7 @@ static int bfin_serial_startup(struct uart_port *port)
default:
uart_dma_ch_rx = uart_dma_ch_tx = 0;
break;
- };
+ }
if (uart_dma_ch_rx &&
request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
@@ -726,7 +726,7 @@ static int bfin_serial_startup(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
- IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ 0, "BFIN_UART_MODEM_STATUS", uart)) {
uart->cts_pin = -1;
dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
}
@@ -765,7 +765,7 @@ static void bfin_serial_shutdown(struct uart_port *port)
break;
default:
break;
- };
+ }
#endif
free_irq(uart->rx_irq, uart);
free_irq(uart->tx_irq, uart);
@@ -1240,7 +1240,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
*/
#endif
ret = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev),
+ dev_get_platdata(&pdev->dev),
DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
@@ -1358,8 +1358,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
out_error_unmap:
iounmap(uart->port.membase);
out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
out_error_free_mem:
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
@@ -1377,8 +1376,7 @@ static int bfin_serial_remove(struct platform_device *pdev)
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
iounmap(uart->port.membase);
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
@@ -1432,8 +1430,8 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev), DRIVER_NAME);
+ ret = peripheral_request_list(dev_get_platdata(&pdev->dev),
+ DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
@@ -1463,8 +1461,7 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
return 0;
out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
return ret;
}
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 7e4e4088471..8d0b994357c 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -459,7 +459,6 @@ static int uart_clps711x_probe(struct platform_device *pdev)
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(&pdev->dev, "Registering UART driver failed\n");
- devm_clk_put(&pdev->dev, s->uart_clk);
return ret;
}
@@ -487,7 +486,6 @@ static int uart_clps711x_remove(struct platform_device *pdev)
for (i = 0; i < UART_CLPS711X_NR; i++)
uart_remove_one_port(&s->uart, &s->port[i]);
- devm_clk_put(&pdev->dev, s->uart_clk);
uart_unregister_driver(&s->uart);
return 0;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 1a535f70dc4..7d76214612c 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -41,6 +41,8 @@
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
#include <linux/fs_uart_pd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
@@ -1207,7 +1209,7 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
spin_lock_init(&pinfo->port.lock);
- pinfo->port.irq = of_irq_to_resource(np, 0, NULL);
+ pinfo->port.irq = irq_of_parse_and_map(np, 0);
if (pinfo->port.irq == NO_IRQ) {
ret = -EINVAL;
goto out_pram;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 18f79575894..527a969b095 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -45,6 +45,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include "cpm_uart.h"
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index af286e6713e..59039097099 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1008,7 +1008,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
return -ENODEV;
}
- pl_data = (struct ifx_modem_platform_data *)dev_get_platdata(&spi->dev);
+ pl_data = dev_get_platdata(&spi->dev);
if (!pl_data) {
dev_err(&spi->dev, "missing platform data!");
return -ENODEV;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 042aa077b5b..b2cfdb66194 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -223,8 +223,7 @@ struct imx_port {
struct dma_chan *dma_chan_rx, *dma_chan_tx;
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
- unsigned int rx_bytes, tx_bytes;
- struct work_struct tsk_dma_rx, tsk_dma_tx;
+ unsigned int tx_bytes;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
};
@@ -505,34 +504,25 @@ static void dma_tx_callback(void *data)
dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
return;
}
-
- schedule_work(&sport->tsk_dma_tx);
}
-static void dma_tx_work(struct work_struct *w)
+static void imx_dma_tx(struct imx_port *sport)
{
- struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
enum dma_status status;
- unsigned long flags;
int ret;
- status = chan->device->device_tx_status(chan, (dma_cookie_t)0, NULL);
+ status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
if (DMA_IN_PROGRESS == status)
return;
- spin_lock_irqsave(&sport->port.lock, flags);
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (sport->tx_bytes == 0) {
- spin_unlock_irqrestore(&sport->port.lock, flags);
- return;
- }
- if (xmit->tail > xmit->head) {
+ if (xmit->tail > xmit->head && xmit->head > 0) {
sport->dma_tx_nents = 2;
sg_init_table(sgl, 2);
sg_set_buf(sgl, xmit->buf + xmit->tail,
@@ -542,7 +532,6 @@ static void dma_tx_work(struct work_struct *w)
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
if (ret == 0) {
@@ -609,11 +598,7 @@ static void imx_start_tx(struct uart_port *port)
}
if (sport->dma_is_enabled) {
- /*
- * We may in the interrupt context, so arise a work_struct to
- * do the real job.
- */
- schedule_work(&sport->tsk_dma_tx);
+ imx_dma_tx(sport);
return;
}
@@ -732,6 +717,7 @@ out:
return IRQ_HANDLED;
}
+static int start_rx_dma(struct imx_port *sport);
/*
* If the RXFIFO is filled with some data, and then we
* arise a DMA operation to receive them.
@@ -750,7 +736,7 @@ static void imx_dma_rxint(struct imx_port *sport)
writel(temp, sport->port.membase + UCR1);
/* tell the DMA to receive the data. */
- schedule_work(&sport->tsk_dma_rx);
+ start_rx_dma(sport);
}
}
@@ -795,8 +781,15 @@ static irqreturn_t imx_int(int irq, void *dev_id)
static unsigned int imx_tx_empty(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
+ unsigned int ret;
+
+ ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
- return (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
+ /* If the TX DMA is working, return 0. */
+ if (sport->dma_is_enabled && sport->dma_is_txing)
+ ret = 0;
+
+ return ret;
}
/*
@@ -865,22 +858,6 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
}
#define RX_BUF_SIZE (PAGE_SIZE)
-static int start_rx_dma(struct imx_port *sport);
-static void dma_rx_work(struct work_struct *w)
-{
- struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_rx);
- struct tty_port *port = &sport->port.state->port;
-
- if (sport->rx_bytes) {
- tty_insert_flip_string(port, sport->rx_buf, sport->rx_bytes);
- tty_flip_buffer_push(port);
- sport->rx_bytes = 0;
- }
-
- if (sport->dma_is_rxing)
- start_rx_dma(sport);
-}
-
static void imx_rx_dma_done(struct imx_port *sport)
{
unsigned long temp;
@@ -912,6 +889,7 @@ static void dma_rx_callback(void *data)
struct imx_port *sport = data;
struct dma_chan *chan = sport->dma_chan_rx;
struct scatterlist *sgl = &sport->rx_sgl;
+ struct tty_port *port = &sport->port.state->port;
struct dma_tx_state state;
enum dma_status status;
unsigned int count;
@@ -919,13 +897,15 @@ static void dma_rx_callback(void *data)
/* unmap it first */
dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
- status = chan->device->device_tx_status(chan, (dma_cookie_t)0, &state);
+ status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
count = RX_BUF_SIZE - state.residue;
dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
if (count) {
- sport->rx_bytes = count;
- schedule_work(&sport->tsk_dma_rx);
+ tty_insert_flip_string(port, sport->rx_buf, count);
+ tty_flip_buffer_push(port);
+
+ start_rx_dma(sport);
} else
imx_rx_dma_done(sport);
}
@@ -1007,7 +987,6 @@ static int imx_uart_dma_init(struct imx_port *sport)
ret = -ENOMEM;
goto err;
}
- sport->rx_bytes = 0;
/* Prepare for TX : */
sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
@@ -1038,11 +1017,7 @@ err:
static void imx_enable_dma(struct imx_port *sport)
{
unsigned long temp;
- struct tty_port *port = &sport->port.state->port;
- port->low_latency = 1;
- INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
- INIT_WORK(&sport->tsk_dma_rx, dma_rx_work);
init_waitqueue_head(&sport->dma_wait);
/* set UCR1 */
@@ -1063,7 +1038,6 @@ static void imx_enable_dma(struct imx_port *sport)
static void imx_disable_dma(struct imx_port *sport)
{
unsigned long temp;
- struct tty_port *port = &sport->port.state->port;
/* clear UCR1 */
temp = readl(sport->port.membase + UCR1);
@@ -1081,7 +1055,6 @@ static void imx_disable_dma(struct imx_port *sport)
writel(temp, sport->port.membase + UCR4);
sport->dma_is_enabled = 0;
- port->low_latency = 0;
}
/* half the RX buffer size */
@@ -1303,6 +1276,16 @@ static void imx_shutdown(struct uart_port *port)
clk_disable_unprepare(sport->clk_ipg);
}
+static void imx_flush_buffer(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ if (sport->dma_is_enabled) {
+ sport->tx_bytes = 0;
+ dmaengine_terminate_all(sport->dma_chan_tx);
+ }
+}
+
static void
imx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -1539,7 +1522,7 @@ imx_verify_port(struct uart_port *port, struct serial_struct *ser)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
- if ((void *)sport->port.mapbase != ser->iomem_base)
+ if (sport->port.mapbase != (unsigned long)ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
@@ -1623,6 +1606,7 @@ static struct uart_ops imx_pops = {
.break_ctl = imx_break_ctl,
.startup = imx_startup,
.shutdown = imx_shutdown,
+ .flush_buffer = imx_flush_buffer,
.set_termios = imx_set_termios,
.type = imx_type,
.release_port = imx_release_port,
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index cb3c81eb099..1d9420548e1 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -832,7 +832,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
up->curregs[5] |= Tx8;
up->parity_mask = 0xff;
break;
- };
+ }
up->curregs[4] &= ~0x0c;
if (cflag & CSTOPB)
up->curregs[4] |= SB2;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index b2e707aa603..8d71e4047bb 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -690,7 +690,7 @@ static void max310x_handle_tx(struct uart_port *port)
max310x_port_write(port, MAX310X_THR_REG,
xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- };
+ }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index d3db042f649..52c930fac21 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -293,7 +293,7 @@ static void serial_hsu_enable_ms(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
}
-void hsu_dma_tx(struct uart_hsu_port *up)
+static void hsu_dma_tx(struct uart_hsu_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
struct hsu_dma_buffer *dbuf = &up->txbuf;
@@ -340,7 +340,8 @@ void hsu_dma_tx(struct uart_hsu_port *up)
}
/* The buffer is already cache coherent */
-void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
+static void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc,
+ struct hsu_dma_buffer *dbuf)
{
dbuf->ofs = 0;
@@ -386,7 +387,8 @@ static void serial_hsu_stop_tx(struct uart_port *port)
/* This is always called in spinlock protected mode, so
* modify timeout timer is safe here */
-void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts, unsigned long *flags)
+static void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts,
+ unsigned long *flags)
{
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
@@ -1183,7 +1185,7 @@ static struct console serial_hsu_console = {
#define SERIAL_HSU_CONSOLE NULL
#endif
-struct uart_ops serial_hsu_pops = {
+static struct uart_ops serial_hsu_pops = {
.tx_empty = serial_hsu_tx_empty,
.set_mctrl = serial_hsu_set_mctrl,
.get_mctrl = serial_hsu_get_mctrl,
@@ -1451,7 +1453,6 @@ static void serial_hsu_remove(struct pci_dev *pdev)
uart_remove_one_port(&serial_hsu_reg, &up->port);
}
- pci_set_drvdata(pdev, NULL);
free_irq(pdev->irq, priv);
pci_disable_device(pdev);
}
@@ -1504,4 +1505,4 @@ module_init(hsu_pci_init);
module_exit(hsu_pci_exit);
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:medfield-hsu");
+MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 5be1df39f9f..ec06505e3ae 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1301,7 +1301,6 @@ static struct uart_ops mpc52xx_uart_ops = {
.shutdown = mpc52xx_uart_shutdown,
.set_termios = mpc52xx_uart_set_termios,
/* .pm = mpc52xx_uart_pm, Not supported yet */
-/* .set_wake = mpc52xx_uart_set_wake, Not supported yet */
.type = mpc52xx_uart_type,
.release_port = mpc52xx_uart_release_port,
.request_port = mpc52xx_uart_request_port,
@@ -1766,7 +1765,7 @@ mpc52xx_uart_of_remove(struct platform_device *op)
static int
mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
{
- struct uart_port *port = (struct uart_port *) platform_get_drvdata(op);
+ struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_suspend_port(&mpc52xx_uart_driver, port);
@@ -1777,7 +1776,7 @@ mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
static int
mpc52xx_uart_of_resume(struct platform_device *op)
{
- struct uart_port *port = (struct uart_port *) platform_get_drvdata(op);
+ struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_resume_port(&mpc52xx_uart_driver, port);
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 8d702677acc..e30a3ca3cea 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -2030,7 +2030,7 @@ static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
{
struct mpsc_pdata *pdata;
- pdata = (struct mpsc_pdata *)dev_get_platdata(&pd->dev);
+ pdata = dev_get_platdata(&pd->dev);
pi->port.uartclk = pdata->brg_clk_freq;
pi->port.iotype = UPIO_MEM;
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index a67e7081f00..db0448ae59d 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -43,6 +43,7 @@
#include <linux/kthread.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "mrst_max3110.h"
@@ -61,6 +62,7 @@ struct uart_max3110 {
struct task_struct *main_thread;
struct task_struct *read_thread;
struct mutex thread_mutex;
+ struct mutex io_mutex;
u32 baud;
u16 cur_conf;
@@ -90,6 +92,7 @@ static int max3110_write_then_read(struct uart_max3110 *max,
struct spi_transfer x;
int ret;
+ mutex_lock(&max->io_mutex);
spi_message_init(&message);
memset(&x, 0, sizeof x);
x.len = len;
@@ -104,6 +107,7 @@ static int max3110_write_then_read(struct uart_max3110 *max,
/* Do the i/o */
ret = spi_sync(spi, &message);
+ mutex_unlock(&max->io_mutex);
return ret;
}
@@ -491,19 +495,9 @@ static int serial_m3110_startup(struct uart_port *port)
port->state->port.low_latency = 1;
if (max->irq) {
- max->read_thread = NULL;
- ret = request_irq(max->irq, serial_m3110_irq,
- IRQ_TYPE_EDGE_FALLING, "max3110", max);
- if (ret) {
- max->irq = 0;
- pr_err(PR_FMT "unable to allocate IRQ, polling\n");
- } else {
- /* Enable RX IRQ only */
- config |= WC_RXA_IRQ_ENABLE;
- }
- }
-
- if (max->irq == 0) {
+ /* Enable RX IRQ only */
+ config |= WC_RXA_IRQ_ENABLE;
+ } else {
/* If IRQ is disabled, start a read thread for input data */
max->read_thread =
kthread_run(max3110_read_thread, max, "max3110_read");
@@ -517,8 +511,6 @@ static int serial_m3110_startup(struct uart_port *port)
ret = max3110_out(max, config);
if (ret) {
- if (max->irq)
- free_irq(max->irq, max);
if (max->read_thread)
kthread_stop(max->read_thread);
max->read_thread = NULL;
@@ -540,9 +532,6 @@ static void serial_m3110_shutdown(struct uart_port *port)
max->read_thread = NULL;
}
- if (max->irq)
- free_irq(max->irq, max);
-
/* Disable interrupts from this port */
config = WC_TAG | WC_SW_SHDI;
max3110_out(max, config);
@@ -749,7 +738,8 @@ static int serial_m3110_suspend(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct uart_max3110 *max = spi_get_drvdata(spi);
- disable_irq(max->irq);
+ if (max->irq > 0)
+ disable_irq(max->irq);
uart_suspend_port(&serial_m3110_reg, &max->port);
max3110_out(max, max->cur_conf | WC_SW_SHDI);
return 0;
@@ -762,7 +752,8 @@ static int serial_m3110_resume(struct device *dev)
max3110_out(max, max->cur_conf);
uart_resume_port(&serial_m3110_reg, &max->port);
- enable_irq(max->irq);
+ if (max->irq > 0)
+ enable_irq(max->irq);
return 0;
}
@@ -803,6 +794,7 @@ static int serial_m3110_probe(struct spi_device *spi)
max->irq = (u16)spi->irq;
mutex_init(&max->thread_mutex);
+ mutex_init(&max->io_mutex);
max->word_7bits = 0;
max->parity = 0;
@@ -840,6 +832,16 @@ static int serial_m3110_probe(struct spi_device *spi)
goto err_kthread;
}
+ if (max->irq) {
+ ret = request_irq(max->irq, serial_m3110_irq,
+ IRQ_TYPE_EDGE_FALLING, "max3110", max);
+ if (ret) {
+ max->irq = 0;
+ dev_warn(&spi->dev,
+ "unable to allocate IRQ, will use polling method\n");
+ }
+ }
+
spi_set_drvdata(spi, max);
pmax = max;
@@ -867,6 +869,9 @@ static int serial_m3110_remove(struct spi_device *dev)
free_page((unsigned long)max->con_xmit.buf);
+ if (max->irq)
+ free_irq(max->irq, max);
+
if (max->main_thread)
kthread_stop(max->main_thread);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 10e9d70b5c4..d8b6fee77a0 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -39,6 +39,7 @@
#include <asm/cacheflush.h>
#define MXS_AUART_PORTS 5
+#define MXS_AUART_FIFO_SIZE 16
#define AUART_CTRL0 0x00000000
#define AUART_CTRL0_SET 0x00000004
@@ -548,6 +549,9 @@ static int mxs_auart_dma_init(struct mxs_auart_port *s)
s->flags |= MXS_AUART_DMA_ENABLED;
dev_dbg(s->dev, "enabled the DMA support.");
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ s->port.fifosize = UART_XMIT_SIZE;
+
return 0;
err_out:
@@ -741,6 +745,9 @@ static int mxs_auart_startup(struct uart_port *u)
writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
u->membase + AUART_INTR);
+ /* Reset FIFO size (it could have changed if DMA was enabled) */
+ u->fifosize = MXS_AUART_FIFO_SIZE;
+
/*
* Enable fifo so all four bytes of a DMA word are written to
* output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
@@ -1056,7 +1063,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.membase = ioremap(r->start, resource_size(r));
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
- s->port.fifosize = 16;
+ s->port.fifosize = MXS_AUART_FIFO_SIZE;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
s->port.dev = s->dev = &pdev->dev;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 816d1a23f9d..fa511ebab67 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -39,6 +39,7 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/serial-omap.h>
@@ -134,6 +135,7 @@ struct uart_omap_port {
struct uart_port port;
struct uart_omap_dma uart_dma;
struct device *dev;
+ int wakeirq;
unsigned char ier;
unsigned char lcr;
@@ -175,7 +177,7 @@ struct uart_omap_port {
bool is_suspending;
};
-#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
+#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
@@ -214,10 +216,23 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
return pdata->get_context_loss_count(up->dev);
}
+static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
+ bool enable)
+{
+ if (!up->wakeirq)
+ return;
+
+ if (enable)
+ enable_irq(up->wakeirq);
+ else
+ disable_irq(up->wakeirq);
+}
+
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
+ serial_omap_enable_wakeirq(up, enable);
if (!pdata || !pdata->enable_wakeup)
return;
@@ -242,12 +257,12 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
unsigned int n16 = port->uartclk / (16 * baud);
int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
- if(baudAbsDiff13 < 0)
+ if (baudAbsDiff13 < 0)
baudAbsDiff13 = -baudAbsDiff13;
- if(baudAbsDiff16 < 0)
+ if (baudAbsDiff16 < 0)
baudAbsDiff16 = -baudAbsDiff16;
- return (baudAbsDiff13 > baudAbsDiff16);
+ return (baudAbsDiff13 >= baudAbsDiff16);
}
/*
@@ -258,13 +273,13 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
- unsigned int divisor;
+ unsigned int mode;
if (!serial_omap_baud_is_mode16(port, baud))
- divisor = 13;
+ mode = 13;
else
- divisor = 16;
- return port->uartclk/(baud * divisor);
+ mode = 16;
+ return port->uartclk/(mode * baud);
}
static void serial_omap_enable_ms(struct uart_port *port)
@@ -283,28 +298,40 @@ static void serial_omap_enable_ms(struct uart_port *port)
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- struct circ_buf *xmit = &up->port.state->xmit;
int res;
pm_runtime_get_sync(up->dev);
- /* handle rs485 */
+ /* Handle RS-485 */
if (up->rs485.flags & SER_RS485_ENABLED) {
- /* do nothing if current tx not yet completed */
- res = serial_in(up, UART_LSR) & UART_LSR_TEMT;
- if (!res)
- return;
-
- /* if there's no more data to send, turn off rts */
- if (uart_circ_empty(xmit)) {
- /* if rts not already disabled */
+ if (up->scr & OMAP_UART_SCR_TX_EMPTY) {
+ /* THR interrupt is fired when both TX FIFO and TX
+ * shift register are empty. This means there's nothing
+ * left to transmit now, so make sure the THR interrupt
+ * is fired when TX FIFO is below the trigger level,
+ * disable THR interrupts and toggle the RS-485 GPIO
+ * data direction pin if needed.
+ */
+ up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
res = (up->rs485.flags & SER_RS485_RTS_AFTER_SEND) ? 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
- if (up->rs485.delay_rts_after_send > 0) {
+ if (up->rs485.delay_rts_after_send > 0)
mdelay(up->rs485.delay_rts_after_send);
- }
gpio_set_value(up->rts_gpio, res);
}
+ } else {
+ /* We're asked to stop, but there's still stuff in the
+ * UART FIFO, so make sure the THR interrupt is fired
+ * when both TX FIFO and TX shift register are empty.
+ * The next THR interrupt (if no transmission is started
+ * in the meantime) will indicate the end of a
+ * transmission. Therefore we _don't_ disable THR
+ * interrupts in this situation.
+ */
+ up->scr |= OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ return;
}
}
@@ -384,15 +411,18 @@ static void serial_omap_start_tx(struct uart_port *port)
pm_runtime_get_sync(up->dev);
- /* handle rs485 */
+ /* Handle RS-485 */
if (up->rs485.flags & SER_RS485_ENABLED) {
+ /* Fire THR interrupts when FIFO is below trigger level */
+ up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
+
/* if rts not already enabled */
res = (up->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
gpio_set_value(up->rts_gpio, res);
- if (up->rs485.delay_rts_before_send > 0) {
+ if (up->rs485.delay_rts_before_send > 0)
mdelay(up->rs485.delay_rts_before_send);
- }
}
}
@@ -699,6 +729,20 @@ static int serial_omap_startup(struct uart_port *port)
if (retval)
return retval;
+ /* Optional wake-up IRQ */
+ if (up->wakeirq) {
+ retval = request_irq(up->wakeirq, serial_omap_irq,
+ up->port.irqflags, up->name, up);
+ if (retval) {
+ free_irq(up->port.irq, up);
+ return retval;
+ }
+ disable_irq(up->wakeirq);
+ } else {
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
+ }
+
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
@@ -787,6 +831,8 @@ static void serial_omap_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
free_irq(up->port.irq, up);
+ if (up->wakeirq)
+ free_irq(up->wakeirq, up);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
@@ -938,7 +984,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
*/
/* Set receive FIFO threshold to 16 characters and
- * transmit FIFO threshold to 16 spaces
+ * transmit FIFO threshold to 32 spaces
*/
up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK;
@@ -1060,15 +1106,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
-static int serial_omap_set_wake(struct uart_port *port, unsigned int state)
-{
- struct uart_omap_port *up = to_uart_omap_port(port);
-
- serial_omap_enable_wakeup(up, state);
-
- return 0;
-}
-
static void
serial_omap_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
@@ -1353,6 +1390,15 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
up->ier = mode;
serial_out(up, UART_IER, up->ier);
+ /* If RS-485 is disabled, make sure the THR interrupt is fired when
+ * TX FIFO is below the trigger level.
+ */
+ if (!(up->rs485.flags & SER_RS485_ENABLED) &&
+ (up->scr & OMAP_UART_SCR_TX_EMPTY)) {
+ up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ }
+
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
@@ -1401,7 +1447,6 @@ static struct uart_ops serial_omap_pops = {
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
- .set_wake = serial_omap_set_wake,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
@@ -1582,11 +1627,23 @@ static int serial_omap_probe(struct platform_device *pdev)
struct uart_omap_port *up;
struct resource *mem, *irq;
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
- int ret;
+ int ret, uartirq = 0, wakeirq = 0;
+ /* The optional wakeirq may be specified in the board dts file */
if (pdev->dev.of_node) {
+ uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!uartirq)
+ return -EPROBE_DEFER;
+ wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
omap_up_info = of_get_uart_port_info(&pdev->dev);
pdev->dev.platform_data = omap_up_info;
+ } else {
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -ENODEV;
+ }
+ uartirq = irq->start;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1595,12 +1652,6 @@ static int serial_omap_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no irq resource?\n");
- return -ENODEV;
- }
-
if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
pdev->dev.driver->name)) {
dev_err(&pdev->dev, "memory region already claimed\n");
@@ -1634,7 +1685,8 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
- up->port.irq = irq->start;
+ up->port.irq = uartirq;
+ up->wakeirq = wakeirq;
up->port.regshift = 2;
up->port.fifosize = 64;
@@ -1670,8 +1722,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.uartclk = omap_up_info->uartclk;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
- dev_warn(&pdev->dev, "No clock speed specified: using default:"
- "%d\n", DEFAULT_CLK_SPEED);
+ dev_warn(&pdev->dev,
+ "No clock speed specified: using default: %d\n",
+ DEFAULT_CLK_SPEED);
}
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 44077c0b767..0aa2b528ef3 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1614,7 +1614,6 @@ static struct uart_ops pch_uart_ops = {
.shutdown = pch_uart_shutdown,
.set_termios = pch_uart_set_termios,
/* .pm = pch_uart_pm, Not supported yet */
-/* .set_wake = pch_uart_set_wake, Not supported yet */
.type = pch_uart_type,
.release_port = pch_uart_release_port,
.request_port = pch_uart_request_port,
@@ -1996,6 +1995,8 @@ module_exit(pch_uart_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel EG20T PCH UART PCI Driver");
+MODULE_DEVICE_TABLE(pci, pch_uart_pci_id);
+
module_param(default_baud, uint, S_IRUGO);
MODULE_PARM_DESC(default_baud,
"Default BAUD for initial driver state and console (default 9600)");
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index f87f1a0c8c6..481b781b26e 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -57,6 +57,8 @@
#include <linux/bitops.h>
#include <linux/sysrq.h>
#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -1072,7 +1074,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
uap->curregs[5] |= Tx8;
uap->parity_mask = 0xff;
break;
- };
+ }
uap->curregs[4] &= ~(SB_MASK);
if (cflag & CSTOPB)
uap->curregs[4] |= SB2;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index ba25722a713..753d4525b36 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -647,7 +647,10 @@ void sa1100_register_uart_fns(struct sa1100_port_fns *fns)
sa1100_pops.set_mctrl = fns->set_mctrl;
sa1100_pops.pm = fns->pm;
- sa1100_pops.set_wake = fns->set_wake;
+ /*
+ * FIXME: fns->set_wake is unused - this should be called from
+ * the suspend() callback if device_may_wakeup(dev)) is set.
+ */
}
void __init sa1100_register_uart(int idx, int port)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f3dfa19a1cb..c1af04d4668 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -407,7 +407,14 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* todo - possibly remove AFC and do manual CTS */
+ unsigned int umcon = rd_regl(port, S3C2410_UMCON);
+
+ if (mctrl & TIOCM_RTS)
+ umcon |= S3C2410_UMCOM_RTS_LOW;
+ else
+ umcon &= ~S3C2410_UMCOM_RTS_LOW;
+
+ wr_regl(port, S3C2410_UMCON, umcon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
@@ -774,8 +781,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (termios->c_cflag & CSTOPB)
ulcon |= S3C2410_LCON_STOPB;
- umcon = (termios->c_cflag & CRTSCTS) ? S3C2410_UMCOM_AFC : 0;
-
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
ulcon |= S3C2410_LCON_PODD;
@@ -792,6 +797,15 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
+
+ umcon = rd_regl(port, S3C2410_UMCON);
+ if (termios->c_cflag & CRTSCTS) {
+ umcon |= S3C2410_UMCOM_AFC;
+ /* Disable RTS when RX FIFO contains 63 bytes */
+ umcon &= ~S3C2412_UMCON_AFC_8;
+ } else {
+ umcon &= ~S3C2410_UMCOM_AFC;
+ }
wr_regl(port, S3C2410_UMCON, umcon);
if (ourport->info->has_divslot)
@@ -1254,7 +1268,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->baudclk = ERR_PTR(-EINVAL);
ourport->info = ourport->drv_data->info;
ourport->cfg = (dev_get_platdata(&pdev->dev)) ?
- (struct s3c2410_uartcfg *)dev_get_platdata(&pdev->dev) :
+ dev_get_platdata(&pdev->dev) :
ourport->drv_data->def_cfg;
ourport->port.fifosize = (ourport->info->fifosize) ?
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index aaa617a6c49..8827e5424ce 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -63,7 +63,7 @@ struct s3c24xx_uart_port {
/* conversion functions */
-#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
+#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
/* register access controls */
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 49e9bbfe6ca..a447f71538e 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -986,6 +986,7 @@ static int sccnxp_probe(struct platform_device *pdev)
return 0;
}
+ uart_unregister_driver(&s->uart);
err_out:
if (!IS_ERR(s->regulator))
return regulator_disable(s->regulator);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 0489a2bdcdf..dfe79ccc4fb 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1018,7 +1018,7 @@ static int tegra_uart_startup(struct uart_port *u)
goto fail_hw_init;
}
- ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ ret = request_irq(u->irq, tegra_uart_isr, 0,
dev_name(u->dev), tup);
if (ret < 0) {
dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 440a962412d..90a080b1f9e 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1220,8 +1220,6 @@ static void pciserial_txx9_remove_one(struct pci_dev *dev)
{
struct uart_txx9_port *up = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
-
if (up) {
serial_txx9_unregister_port(up->port.line);
pci_disable_device(dev);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 537750261aa..7d8103cd3e2 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work)
desc = s->desc_rx[new];
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
/* Handle incomplete DMA receive */
struct dma_chan *chan = s->chan_rx;
struct shdma_desc *sh_desc = container_of(desc,
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 61c1ad03db5..f186a8fb888 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -529,7 +529,7 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
while (sirfport->rx_completed != sirfport->rx_issued) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
- sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
@@ -706,12 +706,19 @@ static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long flags;
spin_lock_irqsave(&sirfport->rx_lock, flags);
while (sirfport->rx_completed != sirfport->rx_issued) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
- sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ uint_en->sirfsoc_rx_timeout_en)
+ sirfsoc_rx_submit_one_dma_desc(port,
+ sirfport->rx_completed++);
+ else
+ sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index fb8d0a00260..b7d679c0881 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -368,15 +368,6 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFSOC_UART_NR 6
#define SIRFSOC_PORT_TYPE 0xa5
-/* Baud Rate Calculation */
-#define SIRF_MIN_SAMPLE_DIV 0xf
-#define SIRF_MAX_SAMPLE_DIV 0x3f
-#define SIRF_IOCLK_DIV_MAX 0xffff
-#define SIRF_SAMPLE_DIV_SHIFT 16
-#define SIRF_IOCLK_DIV_MASK 0xffff
-#define SIRF_SAMPLE_DIV_MASK 0x3f0000
-#define SIRF_BAUD_RATE_SUPPORT_NR 18
-
/* Uart Common Use Macro*/
#define SIRFSOC_RX_DMA_BUF_SIZE 256
#define BYTES_TO_ALIGN(dma_addr) ((unsigned long)(dma_addr) & 0x3)
@@ -453,9 +444,6 @@ struct sirfsoc_uart_port {
int rx_issued;
};
-/* Hardware Flow Control */
-#define SIRFUART_AFC_CTRL_RX_THD 0x70
-
/* Register Access Control */
#define portaddr(port, reg) ((port)->membase + (reg))
#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5d6136b2a04..380fb5355cb 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -894,7 +894,7 @@ static int sunsab_console_setup(struct console *con, char *options)
case B115200: baud = 115200; break;
case B230400: baud = 230400; break;
case B460800: baud = 460800; break;
- };
+ }
/*
* Temporary fix.
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 699cc1b5f6a..db79b76f5c8 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -522,7 +522,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break)
serio_interrupt(&up->serio, ch, 0);
#endif
break;
- };
+ }
}
} while (serial_in(up, UART_LSR) & UART_LSR_DR);
}
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 135a1520353..45a8c6aa583 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -319,7 +319,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
serio_interrupt(&up->serio, ch, 0);
#endif
break;
- };
+ }
}
}
@@ -897,7 +897,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
up->curregs[R5] |= Tx8;
up->parity_mask = 0xff;
break;
- };
+ }
up->curregs[R4] &= ~0x0c;
if (cflag & CSTOPB)
up->curregs[R4] |= SB2;
@@ -1239,7 +1239,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
default: case B9600: baud = 9600; break;
case B19200: baud = 19200; break;
case B38400: baud = 38400; break;
- };
+ }
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 88317482b81..d569ca58bab 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -25,6 +25,8 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
@@ -269,7 +271,7 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
return 1;
bdp++;
- };
+ }
}
/*
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 7e4150aa69c..e46e9f3f19b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1,7 +1,7 @@
/*
* Xilinx PS UART driver
*
- * 2011 (c) Xilinx Inc.
+ * 2011 - 2013 (C) Xilinx Inc.
*
* This program is free software; you can redistribute it
* and/or modify it under the terms of the GNU General Public
@@ -11,13 +11,17 @@
*
*/
+#if defined(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/platform_device.h>
#include <linux/serial.h>
+#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/console.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
@@ -29,12 +33,22 @@
#define XUARTPS_MAJOR 0 /* use dynamic node allocation */
#define XUARTPS_MINOR 0 /* works best with devtmpfs */
#define XUARTPS_NR_PORTS 2
-#define XUARTPS_FIFO_SIZE 16 /* FIFO size */
+#define XUARTPS_FIFO_SIZE 64 /* FIFO size */
#define XUARTPS_REGISTER_SPACE 0xFFF
#define xuartps_readl(offset) ioread32(port->membase + offset)
#define xuartps_writel(val, offset) iowrite32(val, port->membase + offset)
+/* Rx Trigger level */
+static int rx_trigger_level = 56;
+module_param(rx_trigger_level, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
+
+/* Rx Timeout */
+static int rx_timeout = 10;
+module_param(rx_timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
+
/********************************Register Map********************************/
/** UART
*
@@ -128,6 +142,9 @@
#define XUARTPS_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
#define XUARTPS_IXR_MASK 0x00001FFF /* Valid bit mask */
+/* Goes in read_status_mask for break detection as the HW doesn't do it*/
+#define XUARTPS_IXR_BRK 0x80000000
+
/** Channel Status Register
*
* The channel status register (CSR) is provided to enable the control logic
@@ -139,15 +156,27 @@
#define XUARTPS_SR_TXFULL 0x00000010 /* TX FIFO full */
#define XUARTPS_SR_RXTRIG 0x00000001 /* Rx Trigger */
+/* baud dividers min/max values */
+#define XUARTPS_BDIV_MIN 4
+#define XUARTPS_BDIV_MAX 255
+#define XUARTPS_CD_MAX 65535
+
/**
* struct xuartps - device data
- * @refclk Reference clock
- * @aperclk APB clock
+ * @port Pointer to the UART port
+ * @refclk Reference clock
+ * @aperclk APB clock
+ * @baud Current baud rate
+ * @clk_rate_change_nb Notifier block for clock changes
*/
struct xuartps {
+ struct uart_port *port;
struct clk *refclk;
struct clk *aperclk;
+ unsigned int baud;
+ struct notifier_block clk_rate_change_nb;
};
+#define to_xuartps(_nb) container_of(_nb, struct xuartps, clk_rate_change_nb);
/**
* xuartps_isr - Interrupt handler
@@ -171,6 +200,23 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
*/
isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);
+ /*
+ * There is no hardware break detection, so we interpret framing
+ * error with all-zeros data as a break sequence. Most of the time,
+ * there's another non-zero byte at the end of the sequence.
+ */
+
+ if (isrstatus & XUARTPS_IXR_FRAMING) {
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) &
+ XUARTPS_SR_RXEMPTY)) {
+ if (!xuartps_readl(XUARTPS_FIFO_OFFSET)) {
+ port->read_status_mask |= XUARTPS_IXR_BRK;
+ isrstatus &= ~XUARTPS_IXR_FRAMING;
+ }
+ }
+ xuartps_writel(XUARTPS_IXR_FRAMING, XUARTPS_ISR_OFFSET);
+ }
+
/* drop byte with parity error if IGNPAR specified */
if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)
isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);
@@ -184,6 +230,30 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
while ((xuartps_readl(XUARTPS_SR_OFFSET) &
XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
data = xuartps_readl(XUARTPS_FIFO_OFFSET);
+
+ /* Non-NULL byte after BREAK is garbage (99%) */
+ if (data && (port->read_status_mask &
+ XUARTPS_IXR_BRK)) {
+ port->read_status_mask &= ~XUARTPS_IXR_BRK;
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ /*
+ * uart_handle_sysrq_char() doesn't work if
+ * spinlocked, for some reason
+ */
+ if (port->sysrq) {
+ spin_unlock(&port->lock);
+ if (uart_handle_sysrq_char(port,
+ (unsigned char)data)) {
+ spin_lock(&port->lock);
+ continue;
+ }
+ spin_lock(&port->lock);
+ }
+
port->icount.rx++;
if (isrstatus & XUARTPS_IXR_PARITY) {
@@ -247,63 +317,196 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
}
/**
- * xuartps_set_baud_rate - Calculate and set the baud rate
- * @port: Handle to the uart port structure
- * @baud: Baud rate to set
- *
+ * xuartps_calc_baud_divs - Calculate baud rate divisors
+ * @clk: UART module input clock
+ * @baud: Desired baud rate
+ * @rbdiv: BDIV value (return value)
+ * @rcd: CD value (return value)
+ * @div8: Value for clk_sel bit in mod (return value)
* Returns baud rate, requested baud when possible, or actual baud when there
- * was too much error
- **/
-static unsigned int xuartps_set_baud_rate(struct uart_port *port,
- unsigned int baud)
+ * was too much error, zero if no valid divisors are found.
+ *
+ * Formula to obtain baud rate is
+ * baud_tx/rx rate = clk/CD * (BDIV + 1)
+ * input_clk = (Uart User Defined Clock or Apb Clock)
+ * depends on UCLKEN in MR Reg
+ * clk = input_clk or input_clk/8;
+ * depends on CLKS in MR reg
+ * CD and BDIV depends on values in
+ * baud rate generate register
+ * baud rate clock divisor register
+ */
+static unsigned int xuartps_calc_baud_divs(unsigned int clk, unsigned int baud,
+ u32 *rbdiv, u32 *rcd, int *div8)
{
- unsigned int sel_clk;
- unsigned int calc_baud = 0;
- unsigned int brgr_val, brdiv_val;
+ u32 cd, bdiv;
+ unsigned int calc_baud;
+ unsigned int bestbaud = 0;
unsigned int bauderror;
+ unsigned int besterror = ~0;
- /* Formula to obtain baud rate is
- * baud_tx/rx rate = sel_clk/CD * (BDIV + 1)
- * input_clk = (Uart User Defined Clock or Apb Clock)
- * depends on UCLKEN in MR Reg
- * sel_clk = input_clk or input_clk/8;
- * depends on CLKS in MR reg
- * CD and BDIV depends on values in
- * baud rate generate register
- * baud rate clock divisor register
- */
- sel_clk = port->uartclk;
- if (xuartps_readl(XUARTPS_MR_OFFSET) & XUARTPS_MR_CLKSEL)
- sel_clk = sel_clk / 8;
-
- /* Find the best values for baud generation */
- for (brdiv_val = 4; brdiv_val < 255; brdiv_val++) {
+ if (baud < clk / ((XUARTPS_BDIV_MAX + 1) * XUARTPS_CD_MAX)) {
+ *div8 = 1;
+ clk /= 8;
+ } else {
+ *div8 = 0;
+ }
- brgr_val = sel_clk / (baud * (brdiv_val + 1));
- if (brgr_val < 2 || brgr_val > 65535)
+ for (bdiv = XUARTPS_BDIV_MIN; bdiv <= XUARTPS_BDIV_MAX; bdiv++) {
+ cd = DIV_ROUND_CLOSEST(clk, baud * (bdiv + 1));
+ if (cd < 1 || cd > XUARTPS_CD_MAX)
continue;
- calc_baud = sel_clk / (brgr_val * (brdiv_val + 1));
+ calc_baud = clk / (cd * (bdiv + 1));
if (baud > calc_baud)
bauderror = baud - calc_baud;
else
bauderror = calc_baud - baud;
- /* use the values when percent error is acceptable */
- if (((bauderror * 100) / baud) < 3) {
- calc_baud = baud;
- break;
+ if (besterror > bauderror) {
+ *rbdiv = bdiv;
+ *rcd = cd;
+ bestbaud = calc_baud;
+ besterror = bauderror;
}
}
+ /* use the values when percent error is acceptable */
+ if (((besterror * 100) / baud) < 3)
+ bestbaud = baud;
+
+ return bestbaud;
+}
- /* Set the values for the new baud rate */
- xuartps_writel(brgr_val, XUARTPS_BAUDGEN_OFFSET);
- xuartps_writel(brdiv_val, XUARTPS_BAUDDIV_OFFSET);
+/**
+ * xuartps_set_baud_rate - Calculate and set the baud rate
+ * @port: Handle to the uart port structure
+ * @baud: Baud rate to set
+ * Returns baud rate, requested baud when possible, or actual baud when there
+ * was too much error, zero if no valid divisors are found.
+ */
+static unsigned int xuartps_set_baud_rate(struct uart_port *port,
+ unsigned int baud)
+{
+ unsigned int calc_baud;
+ u32 cd = 0, bdiv = 0;
+ u32 mreg;
+ int div8;
+ struct xuartps *xuartps = port->private_data;
+
+ calc_baud = xuartps_calc_baud_divs(port->uartclk, baud, &bdiv, &cd,
+ &div8);
+
+ /* Write new divisors to hardware */
+ mreg = xuartps_readl(XUARTPS_MR_OFFSET);
+ if (div8)
+ mreg |= XUARTPS_MR_CLKSEL;
+ else
+ mreg &= ~XUARTPS_MR_CLKSEL;
+ xuartps_writel(mreg, XUARTPS_MR_OFFSET);
+ xuartps_writel(cd, XUARTPS_BAUDGEN_OFFSET);
+ xuartps_writel(bdiv, XUARTPS_BAUDDIV_OFFSET);
+ xuartps->baud = baud;
return calc_baud;
}
+#ifdef CONFIG_COMMON_CLK
+/**
+ * xuartps_clk_notitifer_cb - Clock notifier callback
+ * @nb: Notifier block
+ * @event: Notify event
+ * @data: Notifier data
+ * Returns NOTIFY_OK on success, NOTIFY_BAD on error.
+ */
+static int xuartps_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ u32 ctrl_reg;
+ struct uart_port *port;
+ int locked = 0;
+ struct clk_notifier_data *ndata = data;
+ unsigned long flags = 0;
+ struct xuartps *xuartps = to_xuartps(nb);
+
+ port = xuartps->port;
+ if (port->suspended)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ {
+ u32 bdiv;
+ u32 cd;
+ int div8;
+
+ /*
+ * Find out if current baud-rate can be achieved with new clock
+ * frequency.
+ */
+ if (!xuartps_calc_baud_divs(ndata->new_rate, xuartps->baud,
+ &bdiv, &cd, &div8))
+ return NOTIFY_BAD;
+
+ spin_lock_irqsave(&xuartps->port->lock, flags);
+
+ /* Disable the TX and RX to set baud rate */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS),
+ XUARTPS_CR_OFFSET);
+
+ spin_unlock_irqrestore(&xuartps->port->lock, flags);
+
+ return NOTIFY_OK;
+ }
+ case POST_RATE_CHANGE:
+ /*
+ * Set clk dividers to generate correct baud with new clock
+ * frequency.
+ */
+
+ spin_lock_irqsave(&xuartps->port->lock, flags);
+
+ locked = 1;
+ port->uartclk = ndata->new_rate;
+
+ xuartps->baud = xuartps_set_baud_rate(xuartps->port,
+ xuartps->baud);
+ /* fall through */
+ case ABORT_RATE_CHANGE:
+ if (!locked)
+ spin_lock_irqsave(&xuartps->port->lock, flags);
+
+ /* Set TX/RX Reset */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST),
+ XUARTPS_CR_OFFSET);
+
+ while (xuartps_readl(XUARTPS_CR_OFFSET) &
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST))
+ cpu_relax();
+
+ /*
+ * Clear the RX disable and TX disable bits and then set the TX
+ * enable bit and RX enable bit to enable the transmitter and
+ * receiver.
+ */
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
+ ctrl_reg = xuartps_readl(XUARTPS_CR_OFFSET);
+ xuartps_writel(
+ (ctrl_reg & ~(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS)) |
+ (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
+ XUARTPS_CR_OFFSET);
+
+ spin_unlock_irqrestore(&xuartps->port->lock, flags);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+#endif
+
/*----------------------Uart Operations---------------------------*/
/**
@@ -346,7 +549,7 @@ static void xuartps_start_tx(struct uart_port *port)
port->state->xmit.tail = (port->state->xmit.tail + 1) &
(UART_XMIT_SIZE - 1);
}
-
+ xuartps_writel(XUARTPS_IXR_TXEMPTY, XUARTPS_ISR_OFFSET);
/* Enable the TX Empty interrupt */
xuartps_writel(XUARTPS_IXR_TXEMPTY, XUARTPS_IER_OFFSET);
@@ -437,7 +640,7 @@ static void xuartps_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
unsigned int cval = 0;
- unsigned int baud;
+ unsigned int baud, minbaud, maxbaud;
unsigned long flags;
unsigned int ctrl_reg, mode_reg;
@@ -454,8 +657,14 @@ static void xuartps_set_termios(struct uart_port *port,
(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS),
XUARTPS_CR_OFFSET);
- /* Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk */
- baud = uart_get_baud_rate(port, termios, old, 0, 10000000);
+ /*
+ * Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk
+ * min and max baud should be calculated here based on port->uartclk.
+ * this way we get a valid baud and can safely call set_baud()
+ */
+ minbaud = port->uartclk / ((XUARTPS_BDIV_MAX + 1) * XUARTPS_CD_MAX * 8);
+ maxbaud = port->uartclk / (XUARTPS_BDIV_MIN + 1);
+ baud = uart_get_baud_rate(port, termios, old, minbaud, maxbaud);
baud = xuartps_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -480,7 +689,7 @@ static void xuartps_set_termios(struct uart_port *port,
| (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
XUARTPS_CR_OFFSET);
- xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
port->read_status_mask = XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_RXTRIG |
XUARTPS_IXR_OVERRUN | XUARTPS_IXR_TOUT;
@@ -531,13 +740,17 @@ static void xuartps_set_termios(struct uart_port *port,
cval |= XUARTPS_MR_PARITY_MARK;
else
cval |= XUARTPS_MR_PARITY_SPACE;
- } else if (termios->c_cflag & PARODD)
+ } else {
+ if (termios->c_cflag & PARODD)
cval |= XUARTPS_MR_PARITY_ODD;
else
cval |= XUARTPS_MR_PARITY_EVEN;
- } else
+ }
+ } else {
cval |= XUARTPS_MR_PARITY_NONE;
- xuartps_writel(cval , XUARTPS_MR_OFFSET);
+ }
+ cval |= mode_reg & 1;
+ xuartps_writel(cval, XUARTPS_MR_OFFSET);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -583,11 +796,17 @@ static int xuartps_startup(struct uart_port *port)
| XUARTPS_MR_PARITY_NONE | XUARTPS_MR_CHARLEN_8_BIT,
XUARTPS_MR_OFFSET);
- /* Set the RX FIFO Trigger level to 14 assuming FIFO size as 16 */
- xuartps_writel(14, XUARTPS_RXWM_OFFSET);
+ /*
+ * Set the RX FIFO Trigger level to use most of the FIFO, but it
+ * can be tuned with a module parameter
+ */
+ xuartps_writel(rx_trigger_level, XUARTPS_RXWM_OFFSET);
- /* Receive Timeout register is enabled with value of 10 */
- xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
+ /*
+ * Receive Timeout register is enabled but it
+ * can be tuned with a module parameter
+ */
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
/* Clear out any pending interrupts before enabling them */
xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET);
@@ -727,6 +946,54 @@ static void xuartps_enable_ms(struct uart_port *port)
/* N/A */
}
+#ifdef CONFIG_CONSOLE_POLL
+static int xuartps_poll_get_char(struct uart_port *port)
+{
+ u32 imr;
+ int c;
+
+ /* Disable all interrupts */
+ imr = xuartps_readl(XUARTPS_IMR_OFFSET);
+ xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+
+ /* Check if FIFO is empty */
+ if (xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_RXEMPTY)
+ c = NO_POLL_CHAR;
+ else /* Read a character */
+ c = (unsigned char) xuartps_readl(XUARTPS_FIFO_OFFSET);
+
+ /* Enable interrupts */
+ xuartps_writel(imr, XUARTPS_IER_OFFSET);
+
+ return c;
+}
+
+static void xuartps_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ u32 imr;
+
+ /* Disable all interrupts */
+ imr = xuartps_readl(XUARTPS_IMR_OFFSET);
+ xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+
+ /* Wait until FIFO is empty */
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_TXEMPTY))
+ cpu_relax();
+
+ /* Write a character */
+ xuartps_writel(c, XUARTPS_FIFO_OFFSET);
+
+ /* Wait until FIFO is empty */
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_TXEMPTY))
+ cpu_relax();
+
+ /* Enable interrupts */
+ xuartps_writel(imr, XUARTPS_IER_OFFSET);
+
+ return;
+}
+#endif
+
/** The UART operations structure
*/
static struct uart_ops xuartps_ops = {
@@ -759,6 +1026,10 @@ static struct uart_ops xuartps_ops = {
.config_port = xuartps_config_port, /* Configure when driver
* adds a xuartps port
*/
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = xuartps_poll_get_char,
+ .poll_put_char = xuartps_poll_put_char,
+#endif
};
static struct uart_port xuartps_port[2];
@@ -837,7 +1108,7 @@ static void xuartps_console_write(struct console *co, const char *s,
{
struct uart_port *port = &xuartps_port[co->index];
unsigned long flags;
- unsigned int imr;
+ unsigned int imr, ctrl;
int locked = 1;
if (oops_in_progress)
@@ -849,9 +1120,19 @@ static void xuartps_console_write(struct console *co, const char *s,
imr = xuartps_readl(XUARTPS_IMR_OFFSET);
xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+ /*
+ * Make sure that the tx part is enabled. Set the TX enable bit and
+ * clear the TX disable bit to enable the transmitter.
+ */
+ ctrl = xuartps_readl(XUARTPS_CR_OFFSET);
+ xuartps_writel((ctrl & ~XUARTPS_CR_TX_DIS) | XUARTPS_CR_TX_EN,
+ XUARTPS_CR_OFFSET);
+
uart_console_write(port, s, count, xuartps_console_putchar);
xuartps_console_wait_tx(port);
+ xuartps_writel(ctrl, XUARTPS_CR_OFFSET);
+
/* restore interrupt state, it seems like there may be a h/w bug
* in that the interrupt enable register should not need to be
* written based on the data sheet
@@ -933,6 +1214,119 @@ static struct uart_driver xuartps_uart_driver = {
#endif
};
+#ifdef CONFIG_PM_SLEEP
+/**
+ * xuartps_suspend - suspend event
+ * @device: Pointer to the device structure
+ *
+ * Returns 0
+ */
+static int xuartps_suspend(struct device *device)
+{
+ struct uart_port *port = dev_get_drvdata(device);
+ struct tty_struct *tty;
+ struct device *tty_dev;
+ int may_wake = 0;
+
+ /* Get the tty which could be NULL so don't assume it's valid */
+ tty = tty_port_tty_get(&port->state->port);
+ if (tty) {
+ tty_dev = tty->dev;
+ may_wake = device_may_wakeup(tty_dev);
+ tty_kref_put(tty);
+ }
+
+ /*
+ * Call the API provided in serial_core.c file which handles
+ * the suspend.
+ */
+ uart_suspend_port(&xuartps_uart_driver, port);
+ if (console_suspend_enabled && !may_wake) {
+ struct xuartps *xuartps = port->private_data;
+
+ clk_disable(xuartps->refclk);
+ clk_disable(xuartps->aperclk);
+ } else {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* Empty the receive FIFO 1st before making changes */
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_RXEMPTY))
+ xuartps_readl(XUARTPS_FIFO_OFFSET);
+ /* set RX trigger level to 1 */
+ xuartps_writel(1, XUARTPS_RXWM_OFFSET);
+ /* disable RX timeout interrups */
+ xuartps_writel(XUARTPS_IXR_TOUT, XUARTPS_IDR_OFFSET);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * xuartps_resume - Resume after a previous suspend
+ * @device: Pointer to the device structure
+ *
+ * Returns 0
+ */
+static int xuartps_resume(struct device *device)
+{
+ struct uart_port *port = dev_get_drvdata(device);
+ unsigned long flags = 0;
+ u32 ctrl_reg;
+ struct tty_struct *tty;
+ struct device *tty_dev;
+ int may_wake = 0;
+
+ /* Get the tty which could be NULL so don't assume it's valid */
+ tty = tty_port_tty_get(&port->state->port);
+ if (tty) {
+ tty_dev = tty->dev;
+ may_wake = device_may_wakeup(tty_dev);
+ tty_kref_put(tty);
+ }
+
+ if (console_suspend_enabled && !may_wake) {
+ struct xuartps *xuartps = port->private_data;
+
+ clk_enable(xuartps->aperclk);
+ clk_enable(xuartps->refclk);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Set TX/RX Reset */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST),
+ XUARTPS_CR_OFFSET);
+ while (xuartps_readl(XUARTPS_CR_OFFSET) &
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST))
+ cpu_relax();
+
+ /* restore rx timeout value */
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
+ /* Enable Tx/Rx */
+ ctrl_reg = xuartps_readl(XUARTPS_CR_OFFSET);
+ xuartps_writel(
+ (ctrl_reg & ~(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS)) |
+ (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
+ XUARTPS_CR_OFFSET);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ } else {
+ spin_lock_irqsave(&port->lock, flags);
+ /* restore original rx trigger level */
+ xuartps_writel(rx_trigger_level, XUARTPS_RXWM_OFFSET);
+ /* enable RX timeout interrupt */
+ xuartps_writel(XUARTPS_IXR_TOUT, XUARTPS_IER_OFFSET);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return uart_resume_port(&xuartps_uart_driver, port);
+}
+#endif /* ! CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(xuartps_dev_pm_ops, xuartps_suspend, xuartps_resume);
+
/* ---------------------------------------------------------------------
* Platform bus binding
*/
@@ -949,27 +1343,26 @@ static int xuartps_probe(struct platform_device *pdev)
struct resource *res, *res2;
struct xuartps *xuartps_data;
- xuartps_data = kzalloc(sizeof(*xuartps_data), GFP_KERNEL);
+ xuartps_data = devm_kzalloc(&pdev->dev, sizeof(*xuartps_data),
+ GFP_KERNEL);
if (!xuartps_data)
return -ENOMEM;
- xuartps_data->aperclk = clk_get(&pdev->dev, "aper_clk");
+ xuartps_data->aperclk = devm_clk_get(&pdev->dev, "aper_clk");
if (IS_ERR(xuartps_data->aperclk)) {
dev_err(&pdev->dev, "aper_clk clock not found.\n");
- rc = PTR_ERR(xuartps_data->aperclk);
- goto err_out_free;
+ return PTR_ERR(xuartps_data->aperclk);
}
- xuartps_data->refclk = clk_get(&pdev->dev, "ref_clk");
+ xuartps_data->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xuartps_data->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
- rc = PTR_ERR(xuartps_data->refclk);
- goto err_out_clk_put_aper;
+ return PTR_ERR(xuartps_data->refclk);
}
rc = clk_prepare_enable(xuartps_data->aperclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable APER clock.\n");
- goto err_out_clk_put;
+ return rc;
}
rc = clk_prepare_enable(xuartps_data->refclk);
if (rc) {
@@ -989,13 +1382,21 @@ static int xuartps_probe(struct platform_device *pdev)
goto err_out_clk_disable;
}
+#ifdef CONFIG_COMMON_CLK
+ xuartps_data->clk_rate_change_nb.notifier_call =
+ xuartps_clk_notifier_cb;
+ if (clk_notifier_register(xuartps_data->refclk,
+ &xuartps_data->clk_rate_change_nb))
+ dev_warn(&pdev->dev, "Unable to register clock notifier.\n");
+#endif
+
/* Initialize the port structure */
port = xuartps_get_port();
if (!port) {
dev_err(&pdev->dev, "Cannot get uart_port structure\n");
rc = -ENODEV;
- goto err_out_clk_disable;
+ goto err_out_notif_unreg;
} else {
/* Register the port.
* This function also registers this device with the tty layer
@@ -1006,26 +1407,26 @@ static int xuartps_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(xuartps_data->refclk);
port->private_data = xuartps_data;
+ xuartps_data->port = port;
platform_set_drvdata(pdev, port);
rc = uart_add_one_port(&xuartps_uart_driver, port);
if (rc) {
dev_err(&pdev->dev,
"uart_add_one_port() failed; err=%i\n", rc);
- goto err_out_clk_disable;
+ goto err_out_notif_unreg;
}
return 0;
}
+err_out_notif_unreg:
+#ifdef CONFIG_COMMON_CLK
+ clk_notifier_unregister(xuartps_data->refclk,
+ &xuartps_data->clk_rate_change_nb);
+#endif
err_out_clk_disable:
clk_disable_unprepare(xuartps_data->refclk);
err_out_clk_dis_aper:
clk_disable_unprepare(xuartps_data->aperclk);
-err_out_clk_put:
- clk_put(xuartps_data->refclk);
-err_out_clk_put_aper:
- clk_put(xuartps_data->aperclk);
-err_out_free:
- kfree(xuartps_data);
return rc;
}
@@ -1043,13 +1444,14 @@ static int xuartps_remove(struct platform_device *pdev)
int rc;
/* Remove the xuartps port from the serial core */
+#ifdef CONFIG_COMMON_CLK
+ clk_notifier_unregister(xuartps_data->refclk,
+ &xuartps_data->clk_rate_change_nb);
+#endif
rc = uart_remove_one_port(&xuartps_uart_driver, port);
port->mapbase = 0;
clk_disable_unprepare(xuartps_data->refclk);
clk_disable_unprepare(xuartps_data->aperclk);
- clk_put(xuartps_data->refclk);
- clk_put(xuartps_data->aperclk);
- kfree(xuartps_data);
return rc;
}
@@ -1067,6 +1469,7 @@ static struct platform_driver xuartps_platform_driver = {
.owner = THIS_MODULE,
.name = XUARTPS_NAME, /* Driver name */
.of_match_table = xuartps_of_match,
+ .pm = &xuartps_dev_pm_ops,
},
};
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 40a9fe9d3b1..ce396ecdf41 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -51,7 +51,7 @@
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
+static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
static bool __read_mostly sysrq_always_enabled;
unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index f597e88a705..c94d2349dd0 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -140,6 +140,10 @@ EXPORT_SYMBOL(tty_port_destroy);
static void tty_port_destructor(struct kref *kref)
{
struct tty_port *port = container_of(kref, struct tty_port, kref);
+
+ /* check if last port ref was dropped before tty release */
+ if (WARN_ON(port->itty))
+ return;
if (port->xmit_buf)
free_page((unsigned long)port->xmit_buf);
tty_port_destroy(port);
@@ -480,8 +484,6 @@ int tty_port_close_start(struct tty_port *port,
if (port->count) {
spin_unlock_irqrestore(&port->lock, flags);
- if (port->ops->drop)
- port->ops->drop(port);
return 0;
}
set_bit(ASYNCB_CLOSING, &port->flags);
@@ -500,9 +502,7 @@ int tty_port_close_start(struct tty_port *port,
/* Flush the ldisc buffering */
tty_ldisc_flush(tty);
- /* Don't call port->drop for the last reference. Callers will want
- to drop the last active reference in ->shutdown() or the tty
- shutdown path */
+ /* Report to caller this is the last port reference */
return 1;
}
EXPORT_SYMBOL(tty_port_close_start);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 9a8e8c5a0c7..61b1137d7e5 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1300,21 +1300,30 @@ static void csi_m(struct vc_data *vc)
case 27:
vc->vc_reverse = 0;
break;
- case 38: /* ANSI X3.64-1979 (SCO-ish?)
- * Enables underscore, white foreground
- * with white underscore (Linux - use
- * default foreground).
+ case 38:
+ case 48: /* ITU T.416
+ * Higher colour modes.
+ * They break the usual properties of SGR codes
+ * and thus need to be detected and ignored by
+ * hand. Strictly speaking, that standard also
+ * wants : rather than ; as separators, contrary
+ * to ECMA-48, but no one produces such codes
+ * and almost no one accepts them.
*/
- vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
- vc->vc_underline = 1;
+ i++;
+ if (i > vc->vc_npar)
+ break;
+ if (vc->vc_par[i] == 5) /* 256 colours */
+ i++; /* ubiquitous */
+ else if (vc->vc_par[i] == 2) /* 24 bit colours */
+ i += 3; /* extremely rare */
+ /* Subcommands 3 (CMY) and 4 (CMYK) are so insane
+ * that detecting them is not worth the few extra
+ * bytes of kernel's size.
+ */
break;
- case 39: /* ANSI X3.64-1979 (SCO-ish?)
- * Disable underline option.
- * Reset colour to default? It did this
- * before...
- */
+ case 39:
vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
- vc->vc_underline = 0;
break;
case 49:
vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 0e808cf91d9..67beb844493 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -288,13 +288,13 @@ static int uio_dev_add_attributes(struct uio_device *idev)
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
- goto err_map;
+ goto err_map_kobj;
kobject_init(&map->kobj, &map_attr_type);
map->mem = mem;
mem->map = map;
ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
if (ret)
- goto err_map;
+ goto err_map_kobj;
ret = kobject_uevent(&map->kobj, KOBJ_ADD);
if (ret)
goto err_map;
@@ -313,14 +313,14 @@ static int uio_dev_add_attributes(struct uio_device *idev)
}
portio = kzalloc(sizeof(*portio), GFP_KERNEL);
if (!portio)
- goto err_portio;
+ goto err_portio_kobj;
kobject_init(&portio->kobj, &portio_attr_type);
portio->port = port;
port->portio = portio;
ret = kobject_add(&portio->kobj, idev->portio_dir,
"port%d", pi);
if (ret)
- goto err_portio;
+ goto err_portio_kobj;
ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
if (ret)
goto err_portio;
@@ -329,14 +329,18 @@ static int uio_dev_add_attributes(struct uio_device *idev)
return 0;
err_portio:
- for (pi--; pi >= 0; pi--) {
+ pi--;
+err_portio_kobj:
+ for (; pi >= 0; pi--) {
port = &idev->info->port[pi];
portio = port->portio;
kobject_put(&portio->kobj);
}
kobject_put(idev->portio_dir);
err_map:
- for (mi--; mi>=0; mi--) {
+ mi--;
+err_map_kobj:
+ for (; mi >= 0; mi--) {
mem = &idev->info->mem[mi];
map = mem->map;
kobject_put(&map->kobj);
@@ -601,6 +605,7 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct uio_device *idev = vma->vm_private_data;
struct page *page;
unsigned long offset;
+ void *addr;
int mi = uio_find_mem_index(vma);
if (mi < 0)
@@ -612,10 +617,11 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
offset = (vmf->pgoff - mi) << PAGE_SHIFT;
+ addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset;
if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
- page = virt_to_page(idev->info->mem[mi].addr + offset);
+ page = virt_to_page(addr);
else
- page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset);
+ page = vmalloc_to_page(addr);
get_page(page);
vmf->page = page;
return 0;
@@ -809,10 +815,9 @@ int __uio_register_device(struct module *owner,
info->uio_dev = NULL;
- idev = kzalloc(sizeof(*idev), GFP_KERNEL);
+ idev = devm_kzalloc(parent, sizeof(*idev), GFP_KERNEL);
if (!idev) {
- ret = -ENOMEM;
- goto err_kzalloc;
+ return -ENOMEM;
}
idev->owner = owner;
@@ -822,7 +827,7 @@ int __uio_register_device(struct module *owner,
ret = uio_get_minor(idev);
if (ret)
- goto err_get_minor;
+ return ret;
idev->dev = device_create(&uio_class, parent,
MKDEV(uio_major, idev->minor), idev,
@@ -840,7 +845,7 @@ int __uio_register_device(struct module *owner,
info->uio_dev = idev;
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
- ret = request_irq(info->irq, uio_interrupt,
+ ret = devm_request_irq(parent, info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
if (ret)
goto err_request_irq;
@@ -854,9 +859,6 @@ err_uio_dev_add_attributes:
device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
err_device_create:
uio_free_minor(idev);
-err_get_minor:
- kfree(idev);
-err_kzalloc:
return ret;
}
EXPORT_SYMBOL_GPL(__uio_register_device);
@@ -877,13 +879,9 @@ void uio_unregister_device(struct uio_info *info)
uio_free_minor(idev);
- if (info->irq && (info->irq != UIO_IRQ_CUSTOM))
- free_irq(info->irq, idev);
-
uio_dev_del_attributes(idev);
device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
- kfree(idev);
return;
}
diff --git a/drivers/uio/uio_aec.c b/drivers/uio/uio_aec.c
index f3611c2d83b..1549fab633c 100644
--- a/drivers/uio/uio_aec.c
+++ b/drivers/uio/uio_aec.c
@@ -147,7 +147,6 @@ static void remove(struct pci_dev *pdev)
uio_unregister_device(info);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
iounmap(info->priv);
kfree(info);
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index 22cdf385ab3..30f533ce375 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -106,7 +106,6 @@ static void hilscher_pci_remove(struct pci_dev *dev)
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
iounmap(info->mem[0].internal_addr);
kfree (info);
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index a1768b2f449..f764adbfe03 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -42,7 +42,7 @@
enum mf624_interrupt_source {ADC, CTR4, ALL};
-void mf624_disable_interrupt(enum mf624_interrupt_source source,
+static void mf624_disable_interrupt(enum mf624_interrupt_source source,
struct uio_info *info)
{
void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
@@ -70,7 +70,7 @@ void mf624_disable_interrupt(enum mf624_interrupt_source source,
}
}
-void mf624_enable_interrupt(enum mf624_interrupt_source source,
+static void mf624_enable_interrupt(enum mf624_interrupt_source source,
struct uio_info *info)
{
void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
@@ -220,7 +220,6 @@ static void mf624_pci_remove(struct pci_dev *dev)
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
iounmap(info->mem[0].internal_addr);
iounmap(info->mem[1].internal_addr);
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 28a766b9e19..4c345db8b01 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -127,7 +127,6 @@ static void netx_pci_remove(struct pci_dev *dev)
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
iounmap(info->mem[0].internal_addr);
kfree(info);
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 90ff17a0202..76669313e9a 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -112,11 +112,11 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
/* alloc uioinfo for one device */
- uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
+ uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
+ GFP_KERNEL);
if (!uioinfo) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "unable to kmalloc\n");
- return ret;
+ return -ENOMEM;
}
uioinfo->name = pdev->dev.of_node->name;
uioinfo->version = "devicetree";
@@ -125,20 +125,19 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
dev_err(&pdev->dev, "missing platform_data\n");
- goto bad0;
+ return ret;
}
if (uioinfo->handler || uioinfo->irqcontrol ||
uioinfo->irq_flags & IRQF_SHARED) {
dev_err(&pdev->dev, "interrupt configuration error\n");
- goto bad0;
+ return ret;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "unable to kmalloc\n");
- goto bad0;
+ return -ENOMEM;
}
priv->uioinfo = uioinfo;
@@ -153,7 +152,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
uioinfo->irq = UIO_IRQ_NONE;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
- goto bad1;
+ return ret;
}
}
@@ -209,20 +208,12 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
ret = uio_register_device(&pdev->dev, priv->uioinfo);
if (ret) {
dev_err(&pdev->dev, "unable to register uio device\n");
- goto bad2;
+ pm_runtime_disable(&pdev->dev);
+ return ret;
}
platform_set_drvdata(pdev, priv);
return 0;
- bad2:
- pm_runtime_disable(&pdev->dev);
- bad1:
- kfree(priv);
- bad0:
- /* kfree uioinfo for OF */
- if (pdev->dev.of_node)
- kfree(uioinfo);
- return ret;
}
static int uio_pdrv_genirq_remove(struct platform_device *pdev)
@@ -235,11 +226,6 @@ static int uio_pdrv_genirq_remove(struct platform_device *pdev)
priv->uioinfo->handler = NULL;
priv->uioinfo->irqcontrol = NULL;
- /* kfree uioinfo for OF */
- if (pdev->dev.of_node)
- kfree(priv->uioinfo);
-
- kfree(priv);
return 0;
}
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index f519da9034b..96c4a19b191 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -158,14 +158,12 @@ static int pruss_probe(struct platform_device *dev)
if (pdata->sram_pool) {
gdev->sram_pool = pdata->sram_pool;
gdev->sram_vaddr =
- gen_pool_alloc(gdev->sram_pool, sram_pool_sz);
+ (unsigned long)gen_pool_dma_alloc(gdev->sram_pool,
+ sram_pool_sz, &gdev->sram_paddr);
if (!gdev->sram_vaddr) {
dev_err(&dev->dev, "Could not allocate SRAM pool\n");
goto out_free;
}
- gdev->sram_paddr =
- gen_pool_virt_to_phys(gdev->sram_pool,
- gdev->sram_vaddr);
}
gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c
index 54198321708..9cfdfcafa26 100644
--- a/drivers/uio/uio_sercos3.c
+++ b/drivers/uio/uio_sercos3.c
@@ -188,7 +188,6 @@ static void sercos3_pci_remove(struct pci_dev *dev)
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
for (i = 0; i < 5; i++) {
if (info->mem[i].internal_addr)
iounmap(info->mem[i].internal_addr);
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index 5651231a743..f3eecd967a8 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -34,6 +34,7 @@
#include <linux/stringify.h>
#include <linux/usb.h>
#include <linux/mutex.h>
+#include <linux/ratelimit.h>
/*
#define VERBOSE_DEBUG
@@ -59,13 +60,12 @@
atm_printk(KERN_INFO, instance , format , ## arg)
#define atm_warn(instance, format, arg...) \
atm_printk(KERN_WARNING, instance , format , ## arg)
-#define atm_dbg(instance, format, arg...) \
- dynamic_pr_debug("ATM dev %d: " format , \
- (instance)->atm_dev->number , ## arg)
-#define atm_rldbg(instance, format, arg...) \
- if (printk_ratelimit()) \
- atm_dbg(instance , format , ## arg)
-
+#define atm_dbg(instance, format, ...) \
+ pr_debug("ATM dev %d: " format, \
+ (instance)->atm_dev->number, ##__VA_ARGS__)
+#define atm_rldbg(instance, format, ...) \
+ pr_debug_ratelimited("ATM dev %d: " format, \
+ (instance)->atm_dev->number, ##__VA_ARGS__)
/* flags, set by mini-driver in bind() */
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index aa491627a45..892cc96466e 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -344,7 +344,7 @@ void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
/* it could happen that we reinitialize this completion, while
* somebody was waiting for that completion. The timeout and
* while loop handle such cases, but this might be improved */
- INIT_COMPLETION(c67x00->endpoint_disable);
+ reinit_completion(&c67x00->endpoint_disable);
c67x00_sched_kick(c67x00);
wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index 464584c6cca..a8571316568 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -48,6 +48,7 @@
#define PORTSC_SUSP BIT(7)
#define PORTSC_HSP BIT(9)
#define PORTSC_PTC (0x0FUL << 16)
+#define PORTSC_PHCD(d) ((d) ? BIT(22) : BIT(23))
/* PTS and PTW for non lpm version only */
#define PORTSC_PTS(d) \
(u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index be822a2c177..bb5d976e5b8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -108,30 +108,23 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
}
data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
- if (!IS_ERR(data->phy)) {
- ret = usb_phy_init(data->phy);
- if (ret) {
- dev_err(&pdev->dev, "unable to init phy: %d\n", ret);
- goto err_clk;
- }
- } else if (PTR_ERR(data->phy) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ if (IS_ERR(data->phy)) {
+ ret = PTR_ERR(data->phy);
goto err_clk;
}
pdata.phy = data->phy;
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_clk;
if (data->usbmisc_data) {
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
ret);
- goto err_phy;
+ goto err_clk;
}
}
@@ -143,7 +136,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register ci_hdrc platform device, err=%d\n",
ret);
- goto err_phy;
+ goto err_clk;
}
if (data->usbmisc_data) {
@@ -164,9 +157,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
-err_phy:
- if (data->phy)
- usb_phy_shutdown(data->phy);
err_clk:
clk_disable_unprepare(data->clk);
return ret;
@@ -178,10 +168,6 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(data->ci_pdev);
-
- if (data->phy)
- usb_phy_shutdown(data->phy);
-
clk_disable_unprepare(data->clk);
return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 23763dcec06..5d8981c5235 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -172,6 +172,27 @@ u8 hw_port_test_get(struct ci_hdrc *ci)
return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
}
+/* The PHY enters/leaves low power mode */
+static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+{
+ enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
+ bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
+
+ if (enable && !lpm) {
+ hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
+ PORTSC_PHCD(ci->hw_bank.lpm));
+ } else if (!enable && lpm) {
+ hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
+ 0);
+ /*
+ * The controller needs at least 1ms to reflect
+ * PHY's status, the PHY also needs some time (less
+ * than 1ms) to leave low power mode.
+ */
+ usleep_range(1500, 2000);
+ }
+}
+
static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
{
u32 reg;
@@ -199,6 +220,8 @@ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
if (ci->hw_ep_max > ENDPT_MAX)
return -ENODEV;
+ ci_hdrc_enter_lpm(ci, false);
+
/* Disable all interrupts bits */
hw_write(ci, OP_USBINTR, 0xffffffff, 0);
@@ -369,16 +392,28 @@ static irqreturn_t ci_irq(int irq, void *data)
static int ci_get_platdata(struct device *dev,
struct ci_hdrc_platform_data *platdata)
{
- /* Get the vbus regulator */
- platdata->reg_vbus = devm_regulator_get(dev, "vbus");
- if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
- platdata->reg_vbus = NULL; /* no vbus regualator is needed */
- } else if (IS_ERR(platdata->reg_vbus)) {
- dev_err(dev, "Getting regulator error: %ld\n",
- PTR_ERR(platdata->reg_vbus));
- return PTR_ERR(platdata->reg_vbus);
+ if (!platdata->phy_mode)
+ platdata->phy_mode = of_usb_get_phy_mode(dev->of_node);
+
+ if (!platdata->dr_mode)
+ platdata->dr_mode = of_usb_get_dr_mode(dev->of_node);
+
+ if (platdata->dr_mode == USB_DR_MODE_UNKNOWN)
+ platdata->dr_mode = USB_DR_MODE_OTG;
+
+ if (platdata->dr_mode != USB_DR_MODE_PERIPHERAL) {
+ /* Get the vbus regulator */
+ platdata->reg_vbus = devm_regulator_get(dev, "vbus");
+ if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
+ /* no vbus regualator is needed */
+ platdata->reg_vbus = NULL;
+ } else if (IS_ERR(platdata->reg_vbus)) {
+ dev_err(dev, "Getting regulator error: %ld\n",
+ PTR_ERR(platdata->reg_vbus));
+ return PTR_ERR(platdata->reg_vbus);
+ }
}
return 0;
@@ -465,6 +500,33 @@ static void ci_get_otg_capable(struct ci_hdrc *ci)
}
}
+static int ci_usb_phy_init(struct ci_hdrc *ci)
+{
+ if (ci->platdata->phy) {
+ ci->transceiver = ci->platdata->phy;
+ return usb_phy_init(ci->transceiver);
+ } else {
+ ci->global_phy = true;
+ ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR(ci->transceiver))
+ ci->transceiver = NULL;
+
+ return 0;
+ }
+}
+
+static void ci_usb_phy_destroy(struct ci_hdrc *ci)
+{
+ if (!ci->transceiver)
+ return;
+
+ otg_set_peripheral(ci->transceiver->otg, NULL);
+ if (ci->global_phy)
+ usb_put_phy(ci->transceiver);
+ else
+ usb_phy_shutdown(ci->transceiver);
+}
+
static int ci_hdrc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -473,7 +535,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
void __iomem *base;
int ret;
enum usb_dr_mode dr_mode;
- struct device_node *of_node = dev->of_node ?: dev->parent->of_node;
if (!dev->platform_data) {
dev_err(dev, "platform data missing\n");
@@ -493,10 +554,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci->dev = dev;
ci->platdata = dev->platform_data;
- if (ci->platdata->phy)
- ci->transceiver = ci->platdata->phy;
- else
- ci->global_phy = true;
ret = hw_device_init(ci, base);
if (ret < 0) {
@@ -504,27 +561,25 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ ret = ci_usb_phy_init(ci);
+ if (ret) {
+ dev_err(dev, "unable to init phy: %d\n", ret);
+ return ret;
+ }
+
ci->hw_bank.phys = res->start;
ci->irq = platform_get_irq(pdev, 0);
if (ci->irq < 0) {
dev_err(dev, "missing IRQ\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto destroy_phy;
}
ci_get_otg_capable(ci);
- if (!ci->platdata->phy_mode)
- ci->platdata->phy_mode = of_usb_get_phy_mode(of_node);
-
hw_phymode_configure(ci);
- if (!ci->platdata->dr_mode)
- ci->platdata->dr_mode = of_usb_get_dr_mode(of_node);
-
- if (ci->platdata->dr_mode == USB_DR_MODE_UNKNOWN)
- ci->platdata->dr_mode = USB_DR_MODE_OTG;
-
dr_mode = ci->platdata->dr_mode;
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
@@ -537,11 +592,23 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_hdrc_gadget_init(ci);
if (ret)
dev_info(dev, "doesn't support gadget\n");
+ if (!ret && ci->transceiver) {
+ ret = otg_set_peripheral(ci->transceiver->otg,
+ &ci->gadget);
+ /*
+ * If we implement all USB functions using chipidea drivers,
+ * it doesn't need to call above API, meanwhile, if we only
+ * use gadget function, calling above API is useless.
+ */
+ if (ret && ret != -ENOTSUPP)
+ goto destroy_phy;
+ }
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto destroy_phy;
}
if (ci->is_otg) {
@@ -594,6 +661,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
free_irq(ci->irq, ci);
stop:
ci_role_destroy(ci);
+destroy_phy:
+ ci_usb_phy_destroy(ci);
return ret;
}
@@ -605,6 +674,8 @@ static int ci_hdrc_remove(struct platform_device *pdev)
dbg_remove_files(ci);
free_irq(ci->irq, ci);
ci_role_destroy(ci);
+ ci_hdrc_enter_lpm(ci, true);
+ ci_usb_phy_destroy(ci);
kfree(ci->hw_bank.regmap);
return 0;
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 64d7a6d9a1a..59e6020ea75 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -103,15 +103,15 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
+ if (ci->platdata->reg_vbus)
+ regulator_disable(ci->platdata->reg_vbus);
}
- if (ci->platdata->reg_vbus)
- regulator_disable(ci->platdata->reg_vbus);
}
void ci_hdrc_host_destroy(struct ci_hdrc *ci)
{
- if (ci->role == CI_ROLE_HOST)
+ if (ci->role == CI_ROLE_HOST && ci->hcd)
host_stop(ci);
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9333083dd11..b34c81969cb 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -20,7 +20,6 @@
#include <linux/pm_runtime.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
#include <linux/usb/chipidea.h>
#include "ci.h"
@@ -686,9 +685,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
usb_ep_fifo_flush(&ci->ep0out->ep);
usb_ep_fifo_flush(&ci->ep0in->ep);
- if (ci->driver)
- ci->driver->disconnect(gadget);
-
/* make sure to disable all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_disable(ep);
@@ -718,6 +714,11 @@ __acquires(ci->lock)
int retval;
spin_unlock(&ci->lock);
+ if (ci->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (ci->driver)
+ ci->driver->disconnect(&ci->gadget);
+ }
+
retval = _gadget_stop_activity(&ci->gadget);
if (retval)
goto done;
@@ -1461,6 +1462,8 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
hw_device_state(ci, ci->ep0out->qh.dma);
dev_dbg(ci->dev, "Connected to host\n");
} else {
+ if (ci->driver)
+ ci->driver->disconnect(&ci->gadget);
hw_device_state(ci, 0);
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
@@ -1633,23 +1636,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
retval = usb_ep_enable(&ci->ep0in->ep);
if (retval)
return retval;
- spin_lock_irqsave(&ci->lock, flags);
ci->driver = driver;
pm_runtime_get_sync(&ci->gadget.dev);
if (ci->vbus_active) {
+ spin_lock_irqsave(&ci->lock, flags);
hw_device_reset(ci, USBMODE_CM_DC);
} else {
pm_runtime_put_sync(&ci->gadget.dev);
- goto done;
+ return retval;
}
retval = hw_device_state(ci, ci->ep0out->qh.dma);
+ spin_unlock_irqrestore(&ci->lock, flags);
if (retval)
pm_runtime_put_sync(&ci->gadget.dev);
- done:
- spin_unlock_irqrestore(&ci->lock, flags);
return retval;
}
@@ -1786,34 +1788,9 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.ep0 = &ci->ep0in->ep;
- if (ci->global_phy) {
- ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR(ci->transceiver))
- ci->transceiver = NULL;
- }
-
- if (ci->platdata->flags & CI_HDRC_REQUIRE_TRANSCEIVER) {
- if (ci->transceiver == NULL) {
- retval = -ENODEV;
- goto destroy_eps;
- }
- }
-
- if (ci->transceiver) {
- retval = otg_set_peripheral(ci->transceiver->otg,
- &ci->gadget);
- /*
- * If we implement all USB functions using chipidea drivers,
- * it doesn't need to call above API, meanwhile, if we only
- * use gadget function, calling above API is useless.
- */
- if (retval && retval != -ENOTSUPP)
- goto put_transceiver;
- }
-
retval = usb_add_gadget_udc(dev, &ci->gadget);
if (retval)
- goto remove_trans;
+ goto destroy_eps;
pm_runtime_no_callbacks(&ci->gadget.dev);
pm_runtime_enable(&ci->gadget.dev);
@@ -1823,17 +1800,6 @@ static int udc_start(struct ci_hdrc *ci)
return retval;
-remove_trans:
- if (ci->transceiver) {
- otg_set_peripheral(ci->transceiver->otg, NULL);
- if (ci->global_phy)
- usb_put_phy(ci->transceiver);
- }
-
- dev_err(dev, "error = %i\n", retval);
-put_transceiver:
- if (ci->transceiver && ci->global_phy)
- usb_put_phy(ci->transceiver);
destroy_eps:
destroy_eps(ci);
free_pools:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index d3318a0df8e..4d387596f3f 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -101,6 +101,7 @@ struct wdm_device {
struct work_struct rxwork;
int werr;
int rerr;
+ int resp_count;
struct list_head device_list;
int (*manage_power)(struct usb_interface *, int);
@@ -253,6 +254,10 @@ static void wdm_int_callback(struct urb *urb)
"NOTIFY_NETWORK_CONNECTION %s network",
dr->wValue ? "connected to" : "disconnected from");
goto exit;
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+ dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)",
+ urb->actual_length);
+ goto exit;
default:
clear_bit(WDM_POLL_RUNNING, &desc->flags);
dev_err(&desc->intf->dev,
@@ -262,9 +267,9 @@ static void wdm_int_callback(struct urb *urb)
}
spin_lock(&desc->iuspin);
- clear_bit(WDM_READ, &desc->flags);
responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
- if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
+ if (!desc->resp_count++ && !responding
+ && !test_bit(WDM_DISCONNECTING, &desc->flags)
&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
@@ -521,10 +526,36 @@ retry:
desc->length -= cntr;
/* in case we had outstanding data */
- if (!desc->length)
+ if (!desc->length) {
clear_bit(WDM_READ, &desc->flags);
- spin_unlock_irq(&desc->iuspin);
+ if (--desc->resp_count) {
+ set_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ if (rv) {
+ dev_err(&desc->intf->dev,
+ "%s: usb_submit_urb failed with result %d\n",
+ __func__, rv);
+ spin_lock_irq(&desc->iuspin);
+ clear_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+
+ if (rv == -ENOMEM) {
+ rv = schedule_work(&desc->rxwork);
+ if (rv)
+ dev_err(&desc->intf->dev, "Cannot schedule work\n");
+ } else {
+ spin_lock_irq(&desc->iuspin);
+ desc->resp_count = 0;
+ spin_unlock_irq(&desc->iuspin);
+ }
+ }
+ } else
+ spin_unlock_irq(&desc->iuspin);
+ } else
+ spin_unlock_irq(&desc->iuspin);
rv = cntr;
@@ -635,6 +666,9 @@ static int wdm_release(struct inode *inode, struct file *file)
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
kill_urbs(desc);
+ spin_lock_irq(&desc->iuspin);
+ desc->resp_count = 0;
+ spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
} else {
/* must avoid dev_printk here as desc->intf is invalid */
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index db535b0aa17..fed7f68d025 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@ config USB_DEFAULT_PERSIST
bool "Enable USB persist by default"
default y
help
- Say N here if you don't want USB power session persistance
+ Say N here if you don't want USB power session persistence
enabled by default. If you say N it will make suspended USB
devices that lose power get reenumerated as if they had been
unplugged, causing any mounted filesystems to be lost. The
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 71dc5d768fa..967152a63bd 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -914,10 +914,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
- ctrl.bRequestType, ctrl.bRequest,
- __le16_to_cpup(&ctrl.wValue),
- __le16_to_cpup(&ctrl.wIndex),
- __le16_to_cpup(&ctrl.wLength));
+ ctrl.bRequestType, ctrl.bRequest, ctrl.wValue,
+ ctrl.wIndex, ctrl.wLength);
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
@@ -1636,32 +1634,32 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
static int proc_control_compat(struct dev_state *ps,
struct usbdevfs_ctrltransfer32 __user *p32)
{
- struct usbdevfs_ctrltransfer __user *p;
- __u32 udata;
- p = compat_alloc_user_space(sizeof(*p));
- if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
- get_user(udata, &p32->data) ||
+ struct usbdevfs_ctrltransfer __user *p;
+ __u32 udata;
+ p = compat_alloc_user_space(sizeof(*p));
+ if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
+ get_user(udata, &p32->data) ||
put_user(compat_ptr(udata), &p->data))
return -EFAULT;
- return proc_control(ps, p);
+ return proc_control(ps, p);
}
static int proc_bulk_compat(struct dev_state *ps,
struct usbdevfs_bulktransfer32 __user *p32)
{
- struct usbdevfs_bulktransfer __user *p;
- compat_uint_t n;
- compat_caddr_t addr;
+ struct usbdevfs_bulktransfer __user *p;
+ compat_uint_t n;
+ compat_caddr_t addr;
- p = compat_alloc_user_space(sizeof(*p));
+ p = compat_alloc_user_space(sizeof(*p));
- if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
- get_user(n, &p32->len) || put_user(n, &p->len) ||
- get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
- get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
- return -EFAULT;
+ if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
+ get_user(n, &p32->len) || put_user(n, &p->len) ||
+ get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
+ get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
+ return -EFAULT;
- return proc_bulk(ps, p);
+ return proc_bulk(ps, p);
}
static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg)
{
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f7841d44fed..47aade2a5e7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1179,8 +1179,8 @@ static int usb_resume_interface(struct usb_device *udev,
"reset_resume", status);
} else {
intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "reset_resume", driver->name);
+ dev_dbg(&intf->dev, "no reset_resume for driver %s?\n",
+ driver->name);
}
} else {
status = driver->resume(intf);
@@ -1790,6 +1790,9 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret = -EPERM;
+ if (enable && !udev->usb2_hw_lpm_allowed)
+ return 0;
+
if (hcd->driver->set_usb2_hw_lpm) {
ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
if (!ret)
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 7421888087a..ea337a718cc 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -8,7 +8,7 @@
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2001 (kernel hotplug, usb_device_id,
- more docs, etc)
+ * more docs, etc)
* (C) Copyright Yggdrasil Computing, Inc. 2000
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
@@ -27,29 +27,21 @@
static const struct file_operations *usb_minors[MAX_USB_MINORS];
static DECLARE_RWSEM(minor_rwsem);
-static int usb_open(struct inode * inode, struct file * file)
+static int usb_open(struct inode *inode, struct file *file)
{
- int minor = iminor(inode);
- const struct file_operations *c;
int err = -ENODEV;
- const struct file_operations *old_fops, *new_fops = NULL;
+ const struct file_operations *new_fops;
down_read(&minor_rwsem);
- c = usb_minors[minor];
+ new_fops = fops_get(usb_minors[iminor(inode)]);
- if (!c || !(new_fops = fops_get(c)))
+ if (!new_fops)
goto done;
- old_fops = file->f_op;
- file->f_op = new_fops;
+ replace_fops(file, new_fops);
/* Curiouser and curiouser... NULL ->open() as "no device" ? */
if (file->f_op->open)
- err = file->f_op->open(inode,file);
- if (err) {
- fops_put(file->f_op);
- file->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
+ err = file->f_op->open(inode, file);
done:
up_read(&minor_rwsem);
return err;
@@ -166,7 +158,7 @@ int usb_register_dev(struct usb_interface *intf,
char *temp;
#ifdef CONFIG_USB_DYNAMIC_MINORS
- /*
+ /*
* We don't care what the device tries to start at, we want to start
* at zero to pack the devices into the smallest available space with
* no holes in the minor range.
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b9d3c43e385..dfe9d0f2297 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -215,6 +215,9 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto disable_pci;
}
+ hcd->amd_resume_bug = (usb_hcd_amd_remote_wakeup_quirk(dev) &&
+ driver->flags & (HCD_USB11 | HCD_USB3)) ? 1 : 0;
+
if (driver->flags & HCD_MEMORY) {
/* EHCI, OHCI */
hcd->rsrc_start = pci_resource_start(dev, 0);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index d6a8d23f047..6bffb8c87bc 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -6,7 +6,7 @@
* (C) Copyright Deti Fliegl 1999
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2002
- *
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -40,6 +40,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -92,10 +93,7 @@ EXPORT_SYMBOL_GPL (usb_bus_list);
/* used when allocating bus numbers */
#define USB_MAXBUS 64
-struct usb_busmap {
- unsigned long busmap [USB_MAXBUS / (8*sizeof (unsigned long))];
-};
-static struct usb_busmap busmap;
+static DECLARE_BITMAP(busmap, USB_MAXBUS);
/* used when updating list of hcds */
DEFINE_MUTEX(usb_bus_list_lock); /* exported only for usbfs */
@@ -171,7 +169,7 @@ static const u8 usb25_rh_dev_descriptor[18] = {
};
/* usb 2.0 root hub device descriptor */
-static const u8 usb2_rh_dev_descriptor [18] = {
+static const u8 usb2_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
0x01, /* __u8 bDescriptorType; Device */
0x00, 0x02, /* __le16 bcdUSB; v2.0 */
@@ -194,7 +192,7 @@ static const u8 usb2_rh_dev_descriptor [18] = {
/* no usb 2.0 root hub "device qualifier" descriptor: one speed only */
/* usb 1.1 root hub device descriptor */
-static const u8 usb11_rh_dev_descriptor [18] = {
+static const u8 usb11_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
0x01, /* __u8 bDescriptorType; Device */
0x10, 0x01, /* __le16 bcdUSB; v1.1 */
@@ -219,7 +217,7 @@ static const u8 usb11_rh_dev_descriptor [18] = {
/* Configuration descriptors for our root hubs */
-static const u8 fs_rh_config_descriptor [] = {
+static const u8 fs_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
@@ -228,13 +226,13 @@ static const u8 fs_rh_config_descriptor [] = {
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
- 0xc0, /* __u8 bmAttributes;
+ 0xc0, /* __u8 bmAttributes;
Bit 7: must be set,
6: Self-powered,
5: Remote wakeup,
4..0: resvd */
0x00, /* __u8 MaxPower; */
-
+
/* USB 1.1:
* USB 2.0, single TT organization (mandatory):
* one interface, protocol 0
@@ -256,17 +254,17 @@ static const u8 fs_rh_config_descriptor [] = {
0x00, /* __u8 if_bInterfaceSubClass; */
0x00, /* __u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
0x00, /* __u8 if_iInterface; */
-
+
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
0x05, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
- 0x03, /* __u8 ep_bmAttributes; Interrupt */
- 0x02, 0x00, /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
+ 0x03, /* __u8 ep_bmAttributes; Interrupt */
+ 0x02, 0x00, /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
0xff /* __u8 ep_bInterval; (255ms -- usb 2.0 spec) */
};
-static const u8 hs_rh_config_descriptor [] = {
+static const u8 hs_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
@@ -275,13 +273,13 @@ static const u8 hs_rh_config_descriptor [] = {
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
- 0xc0, /* __u8 bmAttributes;
+ 0xc0, /* __u8 bmAttributes;
Bit 7: must be set,
6: Self-powered,
5: Remote wakeup,
4..0: resvd */
0x00, /* __u8 MaxPower; */
-
+
/* USB 1.1:
* USB 2.0, single TT organization (mandatory):
* one interface, protocol 0
@@ -303,12 +301,12 @@ static const u8 hs_rh_config_descriptor [] = {
0x00, /* __u8 if_bInterfaceSubClass; */
0x00, /* __u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
0x00, /* __u8 if_iInterface; */
-
+
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
0x05, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
- 0x03, /* __u8 ep_bmAttributes; Interrupt */
+ 0x03, /* __u8 ep_bmAttributes; Interrupt */
/* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
* see hub.c:hub_configure() for details. */
(USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
@@ -428,7 +426,7 @@ rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
char const *s;
static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
- // language ids
+ /* language ids */
switch (id) {
case 0:
/* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
@@ -464,7 +462,7 @@ rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
{
struct usb_ctrlrequest *cmd;
- u16 typeReq, wValue, wIndex, wLength;
+ u16 typeReq, wValue, wIndex, wLength;
u8 *ubuf = urb->transfer_buffer;
unsigned len = 0;
int status;
@@ -526,10 +524,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
*/
case DeviceRequest | USB_REQ_GET_STATUS:
- tbuf [0] = (device_may_wakeup(&hcd->self.root_hub->dev)
+ tbuf[0] = (device_may_wakeup(&hcd->self.root_hub->dev)
<< USB_DEVICE_REMOTE_WAKEUP)
| (1 << USB_DEVICE_SELF_POWERED);
- tbuf [1] = 0;
+ tbuf[1] = 0;
len = 2;
break;
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
@@ -546,7 +544,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
goto error;
break;
case DeviceRequest | USB_REQ_GET_CONFIGURATION:
- tbuf [0] = 1;
+ tbuf[0] = 1;
len = 1;
/* FALLTHROUGH */
case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
@@ -609,13 +607,13 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
}
break;
case DeviceRequest | USB_REQ_GET_INTERFACE:
- tbuf [0] = 0;
+ tbuf[0] = 0;
len = 1;
/* FALLTHROUGH */
case DeviceOutRequest | USB_REQ_SET_INTERFACE:
break;
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
- // wValue == urb->dev->devaddr
+ /* wValue == urb->dev->devaddr */
dev_dbg (hcd->self.controller, "root hub device address %d\n",
wValue);
break;
@@ -625,9 +623,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
/* ENDPOINT REQUESTS */
case EndpointRequest | USB_REQ_GET_STATUS:
- // ENDPOINT_HALT flag
- tbuf [0] = 0;
- tbuf [1] = 0;
+ /* ENDPOINT_HALT flag */
+ tbuf[0] = 0;
+ tbuf[1] = 0;
len = 2;
/* FALLTHROUGH */
case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
@@ -683,7 +681,7 @@ error:
if (urb->transfer_buffer_length < len)
len = urb->transfer_buffer_length;
urb->actual_length = len;
- // always USB_DIR_IN, toward host
+ /* always USB_DIR_IN, toward host */
memcpy (ubuf, bufp, len);
/* report whether RH hardware supports remote wakeup */
@@ -877,11 +875,11 @@ static ssize_t authorized_default_store(struct device *dev,
usb_hcd = bus_to_hcd(usb_bus);
result = sscanf(buf, "%u\n", &val);
if (result == 1) {
- usb_hcd->authorized_default = val? 1 : 0;
+ usb_hcd->authorized_default = val ? 1 : 0;
result = size;
- }
- else
+ } else {
result = -EINVAL;
+ }
return result;
}
static DEVICE_ATTR_RW(authorized_default);
@@ -941,12 +939,12 @@ static int usb_register_bus(struct usb_bus *bus)
int busnum;
mutex_lock(&usb_bus_list_lock);
- busnum = find_next_zero_bit (busmap.busmap, USB_MAXBUS, 1);
+ busnum = find_next_zero_bit(busmap, USB_MAXBUS, 1);
if (busnum >= USB_MAXBUS) {
printk (KERN_ERR "%s: too many buses\n", usbcore_name);
goto error_find_busnum;
}
- set_bit (busnum, busmap.busmap);
+ set_bit(busnum, busmap);
bus->busnum = busnum;
/* Add it to the local list of buses */
@@ -987,7 +985,7 @@ static void usb_deregister_bus (struct usb_bus *bus)
usb_notify_remove_bus(bus);
- clear_bit (bus->busnum, busmap.busmap);
+ clear_bit(bus->busnum, busmap);
}
/**
@@ -1033,6 +1031,7 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return retval;
}
+ usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
}
retval = usb_new_device (usb_dev);
@@ -1120,21 +1119,21 @@ long usb_calc_bus_time (int speed, int is_input, int isoc, int bytecount)
case USB_SPEED_LOW: /* INTR only */
if (is_input) {
tmp = (67667L * (31L + 10L * BitTime (bytecount))) / 1000L;
- return (64060L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp);
+ return 64060L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp;
} else {
tmp = (66700L * (31L + 10L * BitTime (bytecount))) / 1000L;
- return (64107L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp);
+ return 64107L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp;
}
case USB_SPEED_FULL: /* ISOC or INTR */
if (isoc) {
tmp = (8354L * (31L + 10L * BitTime (bytecount))) / 1000L;
- return (((is_input) ? 7268L : 6265L) + BW_HOST_DELAY + tmp);
+ return ((is_input) ? 7268L : 6265L) + BW_HOST_DELAY + tmp;
} else {
tmp = (8354L * (31L + 10L * BitTime (bytecount))) / 1000L;
- return (9107L + BW_HOST_DELAY + tmp);
+ return 9107L + BW_HOST_DELAY + tmp;
}
case USB_SPEED_HIGH: /* ISOC or INTR */
- // FIXME adjust for input vs output
+ /* FIXME adjust for input vs output */
if (isoc)
tmp = HS_NSECS_ISO (bytecount);
else
@@ -1651,6 +1650,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
static void __usb_hcd_giveback_urb(struct urb *urb)
{
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
+ struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
unsigned long flags;
@@ -1662,6 +1662,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
unmap_urb_for_dma(hcd, urb);
usbmon_urb_complete(&hcd->self, urb, status);
+ usb_anchor_suspend_wakeups(anchor);
usb_unanchor_urb(urb);
/* pass ownership to the completion handler */
@@ -1681,6 +1682,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->complete(urb);
local_irq_restore(flags);
+ usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue);
@@ -1703,7 +1705,9 @@ static void usb_giveback_urb_bh(unsigned long param)
urb = list_entry(local_list.next, struct urb, urb_list);
list_del_init(&urb->urb_list);
+ bh->completing_ep = urb->ep;
__usb_hcd_giveback_urb(urb);
+ bh->completing_ep = NULL;
}
/* check if there are new URBs to giveback */
@@ -1812,7 +1816,7 @@ rescan:
case USB_ENDPOINT_XFER_INT:
s = "-intr"; break;
default:
- s = "-iso"; break;
+ s = "-iso"; break;
};
s;
}));
@@ -2073,8 +2077,11 @@ EXPORT_SYMBOL_GPL(usb_alloc_streams);
*
* Reverts a group of bulk endpoints back to not using stream IDs.
* Can fail if we are given bad arguments, or HCD is broken.
+ *
+ * Return: On success, the number of allocated streams. On failure, a negative
+ * error code.
*/
-void usb_free_streams(struct usb_interface *interface,
+int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
@@ -2085,14 +2092,14 @@ void usb_free_streams(struct usb_interface *interface,
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (dev->speed != USB_SPEED_SUPER)
- return;
+ return -EINVAL;
/* Streams only apply to bulk endpoints. */
for (i = 0; i < num_eps; i++)
if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
- return;
+ return -EINVAL;
- hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+ return hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
}
EXPORT_SYMBOL_GPL(usb_free_streams);
@@ -2245,7 +2252,7 @@ static void hcd_resume_work(struct work_struct *work)
}
/**
- * usb_hcd_resume_root_hub - called by HCD to resume its root hub
+ * usb_hcd_resume_root_hub - called by HCD to resume its root hub
* @hcd: host controller for this root hub
*
* The USB host controller calls this function when its root hub is
@@ -2324,15 +2331,8 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum);
irqreturn_t usb_hcd_irq (int irq, void *__hcd)
{
struct usb_hcd *hcd = __hcd;
- unsigned long flags;
irqreturn_t rc;
- /* IRQF_DISABLED doesn't work correctly with shared IRQs
- * when the first handler doesn't use it. So let's just
- * assume it's never used.
- */
- local_irq_save(flags);
-
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
else if (hcd->driver->irq(hcd) == IRQ_NONE)
@@ -2340,7 +2340,6 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
else
rc = IRQ_HANDLED;
- local_irq_restore(flags);
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_irq);
@@ -2547,13 +2546,6 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd,
if (hcd->driver->irq) {
- /* IRQF_DISABLED doesn't work as advertised when used together
- * with IRQF_SHARED. As usb_hcd_irq() will always disable
- * interrupts we can remove it here.
- */
- if (irqflags & IRQF_SHARED)
- irqflags &= ~IRQF_DISABLED;
-
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
@@ -2600,7 +2592,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
/* Keep old behaviour if authorized_default is not in [0, 1]. */
if (authorized_default < 0 || authorized_default > 1)
- hcd->authorized_default = hcd->wireless? 0 : 1;
+ hcd->authorized_default = hcd->wireless ? 0 : 1;
else
hcd->authorized_default = authorized_default;
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -2743,7 +2735,7 @@ err_allocate_root_hub:
err_register_bus:
hcd_buffer_destroy(hcd);
return retval;
-}
+}
EXPORT_SYMBOL_GPL(usb_add_hcd);
/**
@@ -2818,7 +2810,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
EXPORT_SYMBOL_GPL(usb_remove_hcd);
void
-usb_hcd_platform_shutdown(struct platform_device* dev)
+usb_hcd_platform_shutdown(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -2840,7 +2832,7 @@ struct usb_mon_operations *mon_ops;
* Notice that the code is minimally error-proof. Because usbmon needs
* symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
*/
-
+
int usb_mon_register (struct usb_mon_operations *ops)
{
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e6b682c6c23..a7c04e24ca4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -120,7 +120,7 @@ static inline char *portspeed(struct usb_hub *hub, int portstatus)
if (hub_is_superspeed(hub->hdev))
return "5.0 Gb/s";
if (portstatus & USB_PORT_STAT_HIGH_SPEED)
- return "480 Mb/s";
+ return "480 Mb/s";
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
return "1.5 Mb/s";
else
@@ -135,7 +135,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -156,6 +156,11 @@ static int usb_device_supports_lpm(struct usb_device *udev)
"Power management will be impacted.\n");
return 0;
}
+
+ /* udev is root hub */
+ if (!udev->parent)
+ return 1;
+
if (udev->parent->lpm_capable)
return 1;
@@ -310,9 +315,9 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
return;
udev_u1_del = udev->bos->ss_cap->bU1devExitLat;
- udev_u2_del = udev->bos->ss_cap->bU2DevExitLat;
+ udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat);
hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat;
- hub_u2_del = udev->parent->bos->ss_cap->bU2DevExitLat;
+ hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat);
usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del);
@@ -433,7 +438,7 @@ static void set_port_led(
case HUB_LED_OFF: s = "off"; break;
case HUB_LED_AUTO: s = "auto"; break;
default: s = "??"; break;
- }; s; }),
+ } s; }),
status);
}
@@ -857,7 +862,7 @@ static int hub_hub_status(struct usb_hub *hub,
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
- *change = le16_to_cpu(hub->status->hub.wHubChange);
+ *change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
@@ -956,7 +961,7 @@ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
*/
set_bit(port1, hub->change_bits);
- kick_khubd(hub);
+ kick_khubd(hub);
}
/**
@@ -1107,16 +1112,13 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/*
* USB3 protocol ports will automatically transition
* to Enabled state when detect an USB3.0 device attach.
- * Do not disable USB3 protocol ports.
+ * Do not disable USB3 protocol ports, just pretend
+ * power was lost
*/
- if (!hub_is_superspeed(hdev)) {
+ portstatus &= ~USB_PORT_STAT_ENABLE;
+ if (!hub_is_superspeed(hdev))
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
- portstatus &= ~USB_PORT_STAT_ENABLE;
- } else {
- /* Pretend that power was lost for USB3 devs */
- portstatus &= ~USB_PORT_STAT_ENABLE;
- }
}
/* Clear status-change flags; we'll debounce later */
@@ -1130,6 +1132,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
+ if (portchange & USB_PORT_STAT_C_RESET) {
+ need_debounce_delay = true;
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_RESET);
+ }
if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
hub_is_superspeed(hub->hdev)) {
need_debounce_delay = true;
@@ -1361,7 +1368,7 @@ static int hub_configure(struct usb_hub *hub,
if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
!(hub_is_superspeed(hdev))) {
int i;
- char portstr [USB_MAXCHILDREN + 1];
+ char portstr[USB_MAXCHILDREN + 1];
for (i = 0; i < hdev->maxchild; i++)
portstr[i] = hub->descriptor->u.hs.DeviceRemovable
@@ -1429,32 +1436,32 @@ static int hub_configure(struct usb_hub *hub,
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
switch (wHubCharacteristics & HUB_CHAR_TTTT) {
- case HUB_TTTT_8_BITS:
- if (hdev->descriptor.bDeviceProtocol != 0) {
- hub->tt.think_time = 666;
- dev_dbg(hub_dev, "TT requires at most %d "
- "FS bit times (%d ns)\n",
- 8, hub->tt.think_time);
- }
- break;
- case HUB_TTTT_16_BITS:
- hub->tt.think_time = 666 * 2;
- dev_dbg(hub_dev, "TT requires at most %d "
- "FS bit times (%d ns)\n",
- 16, hub->tt.think_time);
- break;
- case HUB_TTTT_24_BITS:
- hub->tt.think_time = 666 * 3;
+ case HUB_TTTT_8_BITS:
+ if (hdev->descriptor.bDeviceProtocol != 0) {
+ hub->tt.think_time = 666;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
- 24, hub->tt.think_time);
- break;
- case HUB_TTTT_32_BITS:
- hub->tt.think_time = 666 * 4;
- dev_dbg(hub_dev, "TT requires at most %d "
- "FS bit times (%d ns)\n",
- 32, hub->tt.think_time);
- break;
+ 8, hub->tt.think_time);
+ }
+ break;
+ case HUB_TTTT_16_BITS:
+ hub->tt.think_time = 666 * 2;
+ dev_dbg(hub_dev, "TT requires at most %d "
+ "FS bit times (%d ns)\n",
+ 16, hub->tt.think_time);
+ break;
+ case HUB_TTTT_24_BITS:
+ hub->tt.think_time = 666 * 3;
+ dev_dbg(hub_dev, "TT requires at most %d "
+ "FS bit times (%d ns)\n",
+ 24, hub->tt.think_time);
+ break;
+ case HUB_TTTT_32_BITS:
+ hub->tt.think_time = 666 * 4;
+ dev_dbg(hub_dev, "TT requires at most %d "
+ "FS bit times (%d ns)\n",
+ 32, hub->tt.think_time);
+ break;
}
/* probe() zeroes hub->indicator[] */
@@ -1560,7 +1567,7 @@ static int hub_configure(struct usb_hub *hub,
/* maybe cycle the hub leds */
if (hub->has_indicators && blinkenlights)
- hub->indicator [0] = INDICATOR_CYCLE;
+ hub->indicator[0] = INDICATOR_CYCLE;
for (i = 0; i < hdev->maxchild; i++) {
ret = usb_hub_create_port_device(hub, i + 1);
@@ -1978,7 +1985,7 @@ static void choose_devnum(struct usb_device *udev)
if (devnum >= 128)
devnum = find_next_zero_bit(bus->devmap.devicemap,
128, 1);
- bus->devnum_next = ( devnum >= 127 ? 1 : devnum + 1);
+ bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1);
}
if (devnum < 128) {
set_bit(devnum, bus->devmap.devicemap);
@@ -2018,8 +2025,8 @@ static void hub_free_dev(struct usb_device *udev)
* Something got disconnected. Get rid of it and all of its children.
*
* If *pdev is a normal device then the parent hub must already be locked.
- * If *pdev is a root hub then this routine will acquire the
- * usb_bus_list_lock on behalf of the caller.
+ * If *pdev is a root hub then the caller must hold the usb_bus_list_lock,
+ * which protects the set of root hubs as well as the list of buses.
*
* Only hub drivers (including virtual root hub drivers for host
* controllers) should ever call this.
@@ -2232,8 +2239,7 @@ static int usb_enumerate_device(struct usb_device *udev)
udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- }
- else {
+ } else {
/* read the standard strings and cache them if present */
udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
udev->manufacturer = usb_cache_string(udev,
@@ -2489,7 +2495,7 @@ error_device_descriptor:
usb_autosuspend_device(usb_dev);
error_autoresume:
out_authorized:
- usb_unlock_device(usb_dev); // complements locktree
+ usb_unlock_device(usb_dev); /* complements locktree */
return result;
}
@@ -3108,8 +3114,8 @@ static int finish_port_resume(struct usb_device *udev)
retry_reset_resume:
status = usb_reset_and_verify_device(udev);
- /* 10.5.4.5 says be sure devices in the tree are still there.
- * For now let's assume the device didn't go crazy on resume,
+ /* 10.5.4.5 says be sure devices in the tree are still there.
+ * For now let's assume the device didn't go crazy on resume,
* and device drivers will know about any resume quirks.
*/
if (status == 0) {
@@ -3211,7 +3217,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
if (status == 0 && !port_is_suspended(hub, portstatus))
goto SuspendCleared;
- // dev_dbg(hub->intfdev, "resume port %d\n", port1);
+ /* dev_dbg(hub->intfdev, "resume port %d\n", port1); */
set_bit(port1, hub->busy_bits);
@@ -3855,7 +3861,7 @@ EXPORT_SYMBOL_GPL(usb_enable_ltm);
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
- *
+ *
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
@@ -3949,6 +3955,32 @@ static int hub_set_address(struct usb_device *udev, int devnum)
return retval;
}
+/*
+ * There are reports of USB 3.0 devices that say they support USB 2.0 Link PM
+ * when they're plugged into a USB 2.0 port, but they don't work when LPM is
+ * enabled.
+ *
+ * Only enable USB 2.0 Link PM if the port is internal (hardwired), or the
+ * device says it supports the new USB 2.0 Link PM errata by setting the BESL
+ * support bit in the BOS descriptor.
+ */
+static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
+{
+ int connect_type;
+
+ if (!udev->usb2_hw_lpm_capable)
+ return;
+
+ connect_type = usb_get_hub_port_connect_type(udev->parent,
+ udev->portnum);
+
+ if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) ||
+ connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
+ udev->usb2_hw_lpm_allowed = 1;
+ usb_set_usb2_hardware_lpm(udev, 1);
+ }
+}
+
/* Reset device, (re)assign address, get device descriptor.
* Device connection must be stable, no more debouncing needed.
* Returns device in USB_STATE_ADDRESS, except on error.
@@ -4055,7 +4087,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
udev->tt = &hub->tt;
udev->ttport = port1;
}
-
+
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
@@ -4130,11 +4162,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
#undef GET_DESCRIPTOR_BUFSIZE
}
- /*
- * If device is WUSB, we already assigned an
- * unauthorized address in the Connect Ack sequence;
- * authorization will assign the final address.
- */
+ /*
+ * If device is WUSB, we already assigned an
+ * unauthorized address in the Connect Ack sequence;
+ * authorization will assign the final address.
+ */
if (udev->wusb == 0) {
for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
retval = hub_set_address(udev, devnum);
@@ -4163,7 +4195,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
msleep(10);
if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
break;
- }
+ }
retval = usb_get_device_descriptor(udev, 8);
if (retval < 8) {
@@ -4219,7 +4251,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
-
+
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
if (retval != -ENODEV)
@@ -4242,6 +4274,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
/* notify HCD that we have a device connected and addressed */
if (hcd->driver->update_device)
hcd->driver->update_device(hcd, udev);
+ hub_set_initial_usb2_lpm_policy(udev);
fail:
if (retval) {
hub_port_disable(hub, port1, 0);
@@ -4316,7 +4349,7 @@ hub_power_remaining (struct usb_hub *hub)
}
if (remaining < 0) {
dev_warn(hub->intfdev, "%dmA over power budget!\n",
- - remaining);
+ -remaining);
remaining = 0;
}
return remaining;
@@ -4427,7 +4460,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE)
- goto done;
+ goto done;
return;
}
if (hub_is_superspeed(hub->hdev))
@@ -4450,7 +4483,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
}
usb_set_device_state(udev, USB_STATE_POWERED);
- udev->bus_mA = hub->mA_per_port;
+ udev->bus_mA = hub->mA_per_port;
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
@@ -4504,7 +4537,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto loop_disable;
}
}
-
+
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
@@ -4564,7 +4597,7 @@ loop:
dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
port1);
}
-
+
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
@@ -4729,7 +4762,7 @@ static void hub_events(void)
* EM interference sometimes causes badly
* shielded USB devices to be shutdown by
* the hub, this hack enables them again.
- * Works at least with mouse driver.
+ * Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change
@@ -4841,7 +4874,7 @@ static void hub_events(void)
dev_dbg(hub_dev, "over-current change\n");
clear_hub_feature(hdev, C_HUB_OVER_CURRENT);
msleep(500); /* Cool down */
- hub_power_on(hub, true);
+ hub_power_on(hub, true);
hub_hub_status(hub, &status, &unused);
if (status & HUB_STATUS_OVERCURRENT)
dev_err(hub_dev, "over-current "
@@ -4861,7 +4894,7 @@ static void hub_events(void)
usb_unlock_device(hdev);
kref_put(&hub->kref, hub_release);
- } /* end while (1) */
+ } /* end while (1) */
}
static int hub_thread(void *__unused)
@@ -4886,7 +4919,7 @@ static int hub_thread(void *__unused)
static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_INT_CLASS,
+ | USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
@@ -5086,6 +5119,12 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
parent_hub = usb_hub_to_struct_hub(parent_hdev);
+ /* Disable USB2 hardware LPM.
+ * It will be re-enabled by the enumeration process.
+ */
+ if (udev->usb2_hw_lpm_enabled == 1)
+ usb_set_usb2_hardware_lpm(udev, 0);
+
bos = udev->bos;
udev->bos = NULL;
@@ -5120,13 +5159,13 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (ret < 0)
goto re_enumerate;
-
+
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor, bos)) {
dev_info(&udev->dev, "device firmware changed\n");
udev->descriptor = descriptor; /* for disconnect() calls */
goto re_enumerate;
- }
+ }
/* Restore the device's previous configuration */
if (!udev->actconfig)
@@ -5151,7 +5190,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
udev->actconfig->desc.bConfigurationValue, ret);
mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
- }
+ }
mutex_unlock(hcd->bandwidth_mutex);
usb_set_device_state(udev, USB_STATE_CONFIGURED);
@@ -5193,12 +5232,13 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
+ usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
usb_release_bos_descriptor(udev);
udev->bos = bos;
return 0;
-
+
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
@@ -5461,6 +5501,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
if (!hub)
return NULL;
- return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
+ return ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
}
#endif
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 82927e1ed27..bb315970e47 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1182,8 +1182,12 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
put_device(&dev->actconfig->interface[i]->dev);
dev->actconfig->interface[i] = NULL;
}
+
+ if (dev->usb2_hw_lpm_enabled == 1)
+ usb_set_usb2_hardware_lpm(dev, 0);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);
+
dev->actconfig = NULL;
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 01fe36273f3..12924dbfdc2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -13,6 +13,7 @@
#include <linux/usb.h>
#include <linux/usb/quirks.h>
+#include <linux/usb/hcd.h>
#include "usb.h"
/* Lists of quirky USB devices, split in device quirks and interface quirks.
@@ -161,6 +162,21 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
{ } /* terminating entry must be last */
};
+static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ /* Lenovo Mouse with Pixart controller */
+ { USB_DEVICE(0x17ef, 0x602e), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Pixart Mouse */
+ { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Optical Mouse M90/M100 */
+ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ { } /* terminating entry must be last */
+};
+
static bool usb_match_any_interface(struct usb_device *udev,
const struct usb_device_id *id)
{
@@ -187,6 +203,18 @@ static bool usb_match_any_interface(struct usb_device *udev,
return false;
}
+static int usb_amd_resume_quirk(struct usb_device *udev)
+{
+ struct usb_hcd *hcd;
+
+ hcd = bus_to_hcd(udev->bus);
+ /* The device should be attached directly to root hub */
+ if (udev->level == 1 && hcd->amd_resume_bug == 1)
+ return 1;
+
+ return 0;
+}
+
static u32 __usb_detect_quirks(struct usb_device *udev,
const struct usb_device_id *id)
{
@@ -212,6 +240,15 @@ static u32 __usb_detect_quirks(struct usb_device *udev,
void usb_detect_quirks(struct usb_device *udev)
{
udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
+
+ /*
+ * Pixart-based mice would trigger remote wakeup issue on AMD
+ * Yangtze chipset, so set them as RESET_RESUME flag.
+ */
+ if (usb_amd_resume_quirk(udev))
+ udev->quirks |= __usb_detect_quirks(udev,
+ usb_amd_resume_quirk_list);
+
if (udev->quirks)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 6d2c8edb1ff..52a97adf02a 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -23,14 +23,16 @@ static ssize_t field##_show(struct device *dev, \
{ \
struct usb_device *udev; \
struct usb_host_config *actconfig; \
+ ssize_t rc = 0; \
\
udev = to_usb_device(dev); \
+ usb_lock_device(udev); \
actconfig = udev->actconfig; \
if (actconfig) \
- return sprintf(buf, format_string, \
+ rc = sprintf(buf, format_string, \
actconfig->desc.field); \
- else \
- return 0; \
+ usb_unlock_device(udev); \
+ return rc; \
} \
#define usb_actconfig_attr(field, format_string) \
@@ -45,12 +47,15 @@ static ssize_t bMaxPower_show(struct device *dev,
{
struct usb_device *udev;
struct usb_host_config *actconfig;
+ ssize_t rc = 0;
udev = to_usb_device(dev);
+ usb_lock_device(udev);
actconfig = udev->actconfig;
- if (!actconfig)
- return 0;
- return sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
+ if (actconfig)
+ rc = sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
+ usb_unlock_device(udev);
+ return rc;
}
static DEVICE_ATTR_RO(bMaxPower);
@@ -59,12 +64,15 @@ static ssize_t configuration_show(struct device *dev,
{
struct usb_device *udev;
struct usb_host_config *actconfig;
+ ssize_t rc = 0;
udev = to_usb_device(dev);
+ usb_lock_device(udev);
actconfig = udev->actconfig;
- if ((!actconfig) || (!actconfig->string))
- return 0;
- return sprintf(buf, "%s\n", actconfig->string);
+ if (actconfig && actconfig->string)
+ rc = sprintf(buf, "%s\n", actconfig->string);
+ usb_unlock_device(udev);
+ return rc;
}
static DEVICE_ATTR_RO(configuration);
@@ -390,7 +398,8 @@ static DEVICE_ATTR_RW(autosuspend);
static const char on_string[] = "on";
static const char auto_string[] = "auto";
-static void warn_level(void) {
+static void warn_level(void)
+{
static int level_warned;
if (!level_warned) {
@@ -449,7 +458,7 @@ static ssize_t usb2_hardware_lpm_show(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
const char *p;
- if (udev->usb2_hw_lpm_enabled == 1)
+ if (udev->usb2_hw_lpm_allowed == 1)
p = "enabled";
else
p = "disabled";
@@ -469,8 +478,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
ret = strtobool(buf, &value);
- if (!ret)
+ if (!ret) {
+ udev->usb2_hw_lpm_allowed = value;
ret = usb_set_usb2_hardware_lpm(udev, value);
+ }
usb_unlock_device(udev);
@@ -644,7 +655,7 @@ static ssize_t authorized_store(struct device *dev,
result = usb_deauthorize_device(usb_dev);
else
result = usb_authorize_device(usb_dev);
- return result < 0? result : size;
+ return result < 0 ? result : size;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
authorized_show, authorized_store);
@@ -764,6 +775,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
*/
+ usb_lock_device(udev);
for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
nleft > 0; ++cfgno) {
if (cfgno < 0) {
@@ -784,6 +796,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
+ usb_unlock_device(udev);
return count - nleft;
}
@@ -870,9 +883,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *string;
intf = to_usb_interface(dev);
- string = intf->cur_altsetting->string;
- barrier(); /* The altsetting might change! */
-
+ string = ACCESS_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
@@ -888,7 +899,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
- alt = intf->cur_altsetting;
+ alt = ACCESS_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
@@ -909,23 +920,14 @@ static ssize_t supports_autosuspend_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct usb_interface *intf;
- struct usb_device *udev;
- int ret;
+ int s;
- intf = to_usb_interface(dev);
- udev = interface_to_usbdev(intf);
-
- usb_lock_device(udev);
+ device_lock(dev);
/* Devices will be autosuspended even when an interface isn't claimed */
- if (!intf->dev.driver ||
- to_usb_driver(intf->dev.driver)->supports_autosuspend)
- ret = sprintf(buf, "%u\n", 1);
- else
- ret = sprintf(buf, "%u\n", 0);
- usb_unlock_device(udev);
+ s = (!dev->driver || to_usb_driver(dev->driver)->supports_autosuspend);
+ device_unlock(dev);
- return ret;
+ return sprintf(buf, "%u\n", s);
}
static DEVICE_ATTR_RO(supports_autosuspend);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index c12bc790a6a..e62208356c8 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -138,13 +138,19 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);
+static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
+{
+ return atomic_read(&anchor->suspend_wakeups) == 0 &&
+ list_empty(&anchor->urb_list);
+}
+
/* Callers must hold anchor->lock */
static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
urb->anchor = NULL;
list_del(&urb->anchor_list);
usb_put_urb(urb);
- if (list_empty(&anchor->urb_list))
+ if (usb_anchor_check_wakeup(anchor))
wake_up(&anchor->wait);
}
@@ -846,6 +852,39 @@ void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
/**
+ * usb_anchor_suspend_wakeups
+ * @anchor: the anchor you want to suspend wakeups on
+ *
+ * Call this to stop the last urb being unanchored from waking up any
+ * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
+ * back path to delay waking up until after the completion handler has run.
+ */
+void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
+{
+ if (anchor)
+ atomic_inc(&anchor->suspend_wakeups);
+}
+EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
+
+/**
+ * usb_anchor_resume_wakeups
+ * @anchor: the anchor you want to resume wakeups on
+ *
+ * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
+ * wake up any current waiters if the anchor is empty.
+ */
+void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
+{
+ if (!anchor)
+ return;
+
+ atomic_dec(&anchor->suspend_wakeups);
+ if (usb_anchor_check_wakeup(anchor))
+ wake_up(&anchor->wait);
+}
+EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
+
+/**
* usb_wait_anchor_empty_timeout - wait for an anchor to be unused
* @anchor: the anchor you want to become unused
* @timeout: how long you are willing to wait in milliseconds
@@ -858,7 +897,8 @@ EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
unsigned int timeout)
{
- return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
+ return wait_event_timeout(anchor->wait,
+ usb_anchor_check_wakeup(anchor),
msecs_to_jiffies(timeout));
}
EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 255c14464bf..4e243c37f17 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
}
/* root hub's parent is the usb hcd. */
- parent_handle = DEVICE_ACPI_HANDLE(dev->parent);
+ parent_handle = ACPI_HANDLE(dev->parent);
*handle = acpi_get_child(parent_handle, udev->portnum);
if (!*handle)
return -ENODEV;
@@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
raw_port_num = usb_hcd_find_raw_port_number(hcd,
port_num);
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
+ *handle = acpi_get_child(ACPI_HANDLE(&udev->dev),
raw_port_num);
if (!*handle)
return -ENODEV;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0a6ee2e70b2..4d1144990d4 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -497,7 +497,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->authorized = 1;
else {
dev->authorized = usb_hcd->authorized_default;
- dev->wusb = usb_bus_is_wusb(bus)? 1 : 0;
+ dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
}
return dev;
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 823857767a1..c49383669cd 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,6 +35,7 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
+extern int usb_device_supports_lpm(struct usb_device *udev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 474162e9d01..74f9cf02da0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -584,7 +584,7 @@ static int dwc3_remove(struct platform_device *pdev)
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
dwc3_debugfs_exit(dwc);
@@ -691,7 +691,6 @@ static int dwc3_resume(struct device *dev)
usb_phy_init(dwc->usb3_phy);
usb_phy_init(dwc->usb2_phy);
- msleep(100);
spin_lock_irqsave(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f2e88a3a11..8b20c70d91e 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -119,10 +119,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err1;
platform_set_drvdata(pdev, exynos);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2e252aae51c..31443aeedcd 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -165,7 +165,6 @@ static int dwc3_pci_probe(struct pci_dev *pci,
return 0;
err3:
- pci_set_drvdata(pci, NULL);
platform_device_put(dwc3);
err1:
pci_disable_device(pci);
@@ -180,7 +179,6 @@ static void dwc3_pci_remove(struct pci_dev *pci)
platform_device_unregister(glue->dwc3);
platform_device_unregister(glue->usb2_phy);
platform_device_unregister(glue->usb3_phy);
- pci_set_drvdata(pci, NULL);
pci_disable_device(pci);
}
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 7fa93f4bc50..95f7649c71a 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -352,7 +352,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
break;
default:
return -EINVAL;
- };
+ }
response_pkt = (__le16 *) dwc->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
@@ -470,7 +470,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
default:
return -EINVAL;
- };
+ }
return 0;
}
@@ -709,7 +709,7 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
- };
+ }
return ret;
}
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 5e29ddeb4d3..8cfc3191be5 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -568,10 +568,6 @@ try_again:
dbgp_printk("Could not find attached debug device\n");
goto err;
}
- if (ret < 0) {
- dbgp_printk("Attached device is not a debug device\n");
- goto err;
- }
dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 48cddf3cd6b..a91e6422f93 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -58,6 +58,20 @@ config USB_GADGET_DEBUG
trying to track down. Never enable these messages for a
production build.
+config USB_GADGET_VERBOSE
+ bool "Verbose debugging Messages (DEVELOPMENT)"
+ depends on USB_GADGET_DEBUG
+ help
+ Many controller and gadget drivers will print verbose debugging
+ messages if you use this option to ask for those messages.
+
+ Avoid enabling these messages, even if you're actively
+ debugging such a driver. Many drivers will emit so many
+ messages that the driver timings are affected, which will
+ either create new failure modes or remove the one you're
+ trying to track down. Never enable these messages for a
+ production build.
+
config USB_GADGET_DEBUG_FILES
boolean "Debugging information files (DEVELOPMENT)"
depends on PROC_FS
@@ -525,6 +539,9 @@ config USB_F_SUBSET
config USB_F_RNDIS
tristate
+config USB_F_MASS_STORAGE
+ tristate
+
choice
tristate "USB Gadget Drivers"
default USB_ETH
@@ -662,6 +679,16 @@ config USB_CONFIGFS_PHONET
help
The Phonet protocol implementation for USB device.
+config USB_CONFIGFS_MASS_STORAGE
+ boolean "Mass storage"
+ depends on USB_CONFIGFS
+ select USB_F_MASS_STORAGE
+ help
+ The Mass Storage Gadget acts as a USB Mass Storage disk drive.
+ As its storage repository it can use a regular file or a block
+ device (in much the same way as the "loop" device driver),
+ specified as a module parameter or sysfs option.
+
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -878,6 +905,7 @@ config USB_MASS_STORAGE
tristate "Mass Storage Gadget"
depends on BLOCK
select USB_LIBCOMPOSITE
+ select USB_F_MASS_STORAGE
help
The Mass Storage Gadget acts as a USB Mass Storage disk drive.
As its storage repository it can use a regular file or a block
@@ -1001,6 +1029,7 @@ config USB_G_ACM_MS
select USB_LIBCOMPOSITE
select USB_U_SERIAL
select USB_F_ACM
+ select USB_F_MASS_STORAGE
help
This driver provides two functions in one configuration:
a mass storage, and a CDC ACM (serial port) link.
@@ -1015,8 +1044,8 @@ config USB_G_MULTI
select USB_LIBCOMPOSITE
select USB_U_SERIAL
select USB_U_ETHER
- select USB_U_RNDIS
select USB_F_ACM
+ select USB_F_MASS_STORAGE
help
The Multifunction Composite Gadget provides Ethernet (RNDIS
and/or CDC Ethernet), mass storage and ACM serial link
@@ -1035,6 +1064,8 @@ config USB_G_MULTI
config USB_G_MULTI_RNDIS
bool "RNDIS + CDC Serial + Storage configuration"
depends on USB_G_MULTI
+ select USB_U_RNDIS
+ select USB_F_RNDIS
default y
help
This option enables a configuration with RNDIS, CDC Serial and
@@ -1048,6 +1079,7 @@ config USB_G_MULTI_CDC
bool "CDC Ethernet + CDC Serial + Storage configuration"
depends on USB_G_MULTI
default n
+ select USB_F_ECM
help
This option enables a configuration with CDC Ethernet (ECM), CDC
Serial and Mass Storage functions available in the Multifunction
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 386db9daf1d..f1af39603d4 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -1,7 +1,8 @@
#
# USB peripheral controller drivers
#
-ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_USB_GADGET_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_GADGET) += udc-core.o
obj-$(CONFIG_USB_LIBCOMPOSITE) += libcomposite.o
@@ -60,6 +61,8 @@ usb_f_ecm_subset-y := f_subset.o
obj-$(CONFIG_USB_F_SUBSET) += usb_f_ecm_subset.o
usb_f_rndis-y := f_rndis.o
obj-$(CONFIG_USB_F_RNDIS) += usb_f_rndis.o
+usb_f_mass_storage-y := f_mass_storage.o storage_common.o
+obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/acm_ms.c b/drivers/usb/gadget/acm_ms.c
index 4b947bb50f6..7bfa134fe0e 100644
--- a/drivers/usb/gadget/acm_ms.c
+++ b/drivers/usb/gadget/acm_ms.c
@@ -31,16 +31,7 @@
#define ACM_MS_VENDOR_NUM 0x1d6b /* Linux Foundation */
#define ACM_MS_PRODUCT_NUM 0x0106 /* Composite Gadget: ACM + MS*/
-/*-------------------------------------------------------------------------*/
-
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_mass_storage.c"
+#include "f_mass_storage.h"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
@@ -104,18 +95,35 @@ static struct usb_gadget_strings *dev_strings[] = {
/****************************** Configurations ******************************/
static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
-FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-static struct fsg_common fsg_common;
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_DEBUG */
+
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
/*-------------------------------------------------------------------------*/
static struct usb_function *f_acm;
static struct usb_function_instance *f_acm_inst;
+
+static struct usb_function_instance *fi_msg;
+static struct usb_function *f_msg;
+
/*
* We _always_ have both ACM and mass storage functions.
*/
static int __init acm_ms_do_config(struct usb_configuration *c)
{
+ struct fsg_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -123,31 +131,37 @@ static int __init acm_ms_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- f_acm_inst = usb_get_function_instance("acm");
- if (IS_ERR(f_acm_inst))
- return PTR_ERR(f_acm_inst);
+ opts = fsg_opts_from_func_inst(fi_msg);
f_acm = usb_get_function(f_acm_inst);
- if (IS_ERR(f_acm)) {
- status = PTR_ERR(f_acm);
- goto err_func;
+ if (IS_ERR(f_acm))
+ return PTR_ERR(f_acm);
+
+ f_msg = usb_get_function(fi_msg);
+ if (IS_ERR(f_msg)) {
+ status = PTR_ERR(f_msg);
+ goto put_acm;
}
status = usb_add_function(c, f_acm);
if (status < 0)
- goto err_conf;
+ goto put_msg;
- status = fsg_bind_config(c->cdev, c, &fsg_common);
- if (status < 0)
- goto err_fsg;
+ status = fsg_common_run_thread(opts->common);
+ if (status)
+ goto remove_acm;
+
+ status = usb_add_function(c, f_msg);
+ if (status)
+ goto remove_acm;
return 0;
-err_fsg:
+remove_acm:
usb_remove_function(c, f_acm);
-err_conf:
+put_msg:
+ usb_put_function(f_msg);
+put_acm:
usb_put_function(f_acm);
-err_func:
- usb_put_function_instance(f_acm_inst);
return status;
}
@@ -163,45 +177,82 @@ static struct usb_configuration acm_ms_config_driver = {
static int __init acm_ms_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
+ struct fsg_opts *opts;
+ struct fsg_config config;
int status;
- void *retp;
- /* set up mass storage function */
- retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data);
- if (IS_ERR(retp)) {
- status = PTR_ERR(retp);
- return PTR_ERR(retp);
+ f_acm_inst = usb_get_function_instance("acm");
+ if (IS_ERR(f_acm_inst))
+ return PTR_ERR(f_acm_inst);
+
+ fi_msg = usb_get_function_instance("mass_storage");
+ if (IS_ERR(fi_msg)) {
+ status = PTR_ERR(fi_msg);
+ goto fail_get_msg;
}
+ /* set up mass storage function */
+ fsg_config_from_params(&config, &fsg_mod_data, fsg_num_buffers);
+ opts = fsg_opts_from_func_inst(fi_msg);
+
+ opts->no_configfs = true;
+ status = fsg_common_set_num_buffers(opts->common, fsg_num_buffers);
+ if (status)
+ goto fail;
+
+ status = fsg_common_set_nluns(opts->common, config.nluns);
+ if (status)
+ goto fail_set_nluns;
+
+ status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
+ if (status)
+ goto fail_set_cdev;
+
+ fsg_common_set_sysfs(opts->common, true);
+ status = fsg_common_create_luns(opts->common, &config);
+ if (status)
+ goto fail_set_cdev;
+
+ fsg_common_set_inquiry_string(opts->common, config.vendor_name,
+ config.product_name);
/*
* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
- goto fail1;
+ goto fail_string_ids;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
/* register our configuration */
status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config);
if (status < 0)
- goto fail1;
+ goto fail_string_ids;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
DRIVER_DESC);
- fsg_common_put(&fsg_common);
return 0;
/* error recovery */
-fail1:
- fsg_common_put(&fsg_common);
+fail_string_ids:
+ fsg_common_remove_luns(opts->common);
+fail_set_cdev:
+ fsg_common_free_luns(opts->common);
+fail_set_nluns:
+ fsg_common_free_buffers(opts->common);
+fail:
+ usb_put_function_instance(fi_msg);
+fail_get_msg:
+ usb_put_function_instance(f_acm_inst);
return status;
}
static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
{
+ usb_put_function(f_msg);
+ usb_put_function_instance(fi_msg);
usb_put_function(f_acm);
usb_put_function_instance(f_acm_inst);
return 0;
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index a9a4346c83a..54a1e2954ce 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -3078,8 +3078,6 @@ static void udc_pci_remove(struct pci_dev *pdev)
if (dev->active)
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
udc_remove(dev);
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d4f0f330575..3e7ae707f69 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -354,7 +354,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
return DIV_ROUND_UP(val, 8);
default:
return DIV_ROUND_UP(val, 2);
- };
+ }
}
static int config_buf(struct usb_configuration *config,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 8f0d6141e5e..25885112fa3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -557,7 +557,7 @@ static struct config_group *function_make(
fi = usb_get_function_instance(func_name);
if (IS_ERR(fi))
- return ERR_PTR(PTR_ERR(fi));
+ return ERR_CAST(fi);
ret = config_item_set_name(&fi->group.cg_item, name);
if (ret) {
@@ -991,6 +991,14 @@ static struct configfs_subsystem gadget_subsys = {
.su_mutex = __MUTEX_INITIALIZER(gadget_subsys.su_mutex),
};
+void unregister_gadget_item(struct config_item *item)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+
+ unregister_gadget(gi);
+}
+EXPORT_SYMBOL(unregister_gadget_item);
+
static int __init gadget_cfs_init(void)
{
int ret;
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
new file mode 100644
index 00000000000..a7b564a913d
--- /dev/null
+++ b/drivers/usb/gadget/configfs.h
@@ -0,0 +1,6 @@
+#ifndef USB__GADGET__CONFIGFS__H
+#define USB__GADGET__CONFIGFS__H
+
+void unregister_gadget_item(struct config_item *item);
+
+#endif /* USB__GADGET__CONFIGFS__H */
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index b8a2376971a..8f4dae31092 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -544,7 +544,7 @@ static int dummy_enable(struct usb_ep *_ep,
default:
val = "ctrl";
break;
- }; val; }),
+ } val; }),
max, ep->stream_en ? "enabled" : "disabled");
/* at this point real hardware should be NAKing transfers
@@ -2271,7 +2271,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
default:
s = "?";
break;
- }; s; }),
+ } s; }),
ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
@@ -2287,7 +2287,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
default: \
s = "-iso"; \
break; \
- }; s; }),
+ } s; }),
urb->actual_length, urb->transfer_buffer_length);
}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 44cf775a862..774e8b89cdb 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -373,7 +373,7 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
if (req->buf == NULL)
req->buf = (void *)0xDEADBABE;
- INIT_COMPLETION(ffs->ep0req_completion);
+ reinit_completion(&ffs->ep0req_completion);
ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
if (unlikely(ret < 0))
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a01d7d38c01..a03ba2c8358 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -213,12 +213,14 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/freezer.h>
+#include <linux/module.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include "gadget_chips.h"
+#include "configfs.h"
/*------------------------------------------------------------------------*/
@@ -228,26 +230,30 @@
static const char fsg_string_interface[] = "Mass Storage";
-#include "storage_common.c"
+#include "storage_common.h"
+#include "f_mass_storage.h"
+/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
+static struct usb_string fsg_strings[] = {
+ {FSG_STRING_INTERFACE, fsg_string_interface},
+ {}
+};
+
+static struct usb_gadget_strings fsg_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = fsg_strings,
+};
+
+static struct usb_gadget_strings *fsg_strings_array[] = {
+ &fsg_stringtab,
+ NULL,
+};
/*-------------------------------------------------------------------------*/
struct fsg_dev;
struct fsg_common;
-/* FSF callback functions */
-struct fsg_operations {
- /*
- * Callback function to call when thread exits. If no
- * callback is set or it returns value lower then zero MSF
- * will force eject all LUNs it operates on (including those
- * marked as non-removable or with prevent_medium_removal flag
- * set).
- */
- int (*thread_exits)(struct fsg_common *common);
-};
-
/* Data shared by all the FSG instances. */
struct fsg_common {
struct usb_gadget *gadget;
@@ -268,13 +274,14 @@ struct fsg_common {
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
struct fsg_buffhd *buffhds;
+ unsigned int fsg_num_buffers;
int cmnd_size;
u8 cmnd[MAX_COMMAND_SIZE];
unsigned int nluns;
unsigned int lun;
- struct fsg_lun *luns;
+ struct fsg_lun **luns;
struct fsg_lun *curlun;
unsigned int bulk_out_maxpacket;
@@ -294,6 +301,7 @@ struct fsg_common {
unsigned int short_packet_received:1;
unsigned int bad_lun_okay:1;
unsigned int running:1;
+ unsigned int sysfs:1;
int thread_wakeup_needed;
struct completion thread_notifier;
@@ -313,27 +321,6 @@ struct fsg_common {
struct kref ref;
};
-struct fsg_config {
- unsigned nluns;
- struct fsg_lun_config {
- const char *filename;
- char ro;
- char removable;
- char cdrom;
- char nofua;
- } luns[FSG_MAX_LUNS];
-
- /* Callback functions. */
- const struct fsg_operations *ops;
- /* Gadget's private data. */
- void *private_data;
-
- const char *vendor_name; /* 8 characters or less */
- const char *product_name; /* 16 characters or less */
-
- char can_stall;
-};
-
struct fsg_dev {
struct usb_function function;
struct usb_gadget *gadget; /* Copy of cdev->gadget */
@@ -2172,7 +2159,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
if (common->lun < common->nluns)
- common->curlun = &common->luns[common->lun];
+ common->curlun = common->luns[common->lun];
else
common->curlun = NULL;
common->tag = cbw->Tag;
@@ -2244,7 +2231,7 @@ reset:
if (common->fsg) {
fsg = common->fsg;
- for (i = 0; i < fsg_num_buffers; ++i) {
+ for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
if (bh->inreq) {
@@ -2303,7 +2290,7 @@ reset:
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
/* Allocate the requests */
- for (i = 0; i < fsg_num_buffers; ++i) {
+ for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
@@ -2320,7 +2307,9 @@ reset:
common->running = 1;
for (i = 0; i < common->nluns; ++i)
- common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+ if (common->luns[i])
+ common->luns[i]->unit_attention_data =
+ SS_RESET_OCCURRED;
return rc;
}
@@ -2372,7 +2361,7 @@ static void handle_exception(struct fsg_common *common)
/* Cancel all the pending transfers */
if (likely(common->fsg)) {
- for (i = 0; i < fsg_num_buffers; ++i) {
+ for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
if (bh->inreq_busy)
usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
@@ -2384,7 +2373,7 @@ static void handle_exception(struct fsg_common *common)
/* Wait until everything is idle */
for (;;) {
int num_active = 0;
- for (i = 0; i < fsg_num_buffers; ++i) {
+ for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
num_active += bh->inreq_busy + bh->outreq_busy;
}
@@ -2407,7 +2396,7 @@ static void handle_exception(struct fsg_common *common)
*/
spin_lock_irq(&common->lock);
- for (i = 0; i < fsg_num_buffers; ++i) {
+ for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
bh->state = BUF_STATE_EMPTY;
}
@@ -2420,7 +2409,9 @@ static void handle_exception(struct fsg_common *common)
common->state = FSG_STATE_STATUS_PHASE;
else {
for (i = 0; i < common->nluns; ++i) {
- curlun = &common->luns[i];
+ curlun = common->luns[i];
+ if (!curlun)
+ continue;
curlun->prevent_medium_removal = 0;
curlun->sense_data = SS_NO_SENSE;
curlun->unit_attention_data = SS_NO_SENSE;
@@ -2462,8 +2453,9 @@ static void handle_exception(struct fsg_common *common)
* CONFIG_CHANGE cases.
*/
/* for (i = 0; i < common->nluns; ++i) */
- /* common->luns[i].unit_attention_data = */
- /* SS_RESET_OCCURRED; */
+ /* if (common->luns[i]) */
+ /* common->luns[i]->unit_attention_data = */
+ /* SS_RESET_OCCURRED; */
break;
case FSG_STATE_CONFIG_CHANGE:
@@ -2559,12 +2551,13 @@ static int fsg_main_thread(void *common_)
if (!common->ops || !common->ops->thread_exits
|| common->ops->thread_exits(common) < 0) {
- struct fsg_lun *curlun = common->luns;
+ struct fsg_lun **curlun_it = common->luns;
unsigned i = common->nluns;
down_write(&common->filesem);
- for (; i--; ++curlun) {
- if (!fsg_lun_is_open(curlun))
+ for (; i--; ++curlun_it) {
+ struct fsg_lun *curlun = *curlun_it;
+ if (!curlun || !fsg_lun_is_open(curlun))
continue;
fsg_lun_close(curlun);
@@ -2580,6 +2573,56 @@ static int fsg_main_thread(void *common_)
/*************************** DEVICE ATTRIBUTES ***************************/
+static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+
+ return fsg_show_ro(curlun, buf);
+}
+
+static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+
+ return fsg_show_nofua(curlun, buf);
+}
+
+static ssize_t file_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+
+ return fsg_show_file(curlun, filesem, buf);
+}
+
+static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+
+ return fsg_store_ro(curlun, filesem, buf, count);
+}
+
+static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+
+ return fsg_store_nofua(curlun, buf, count);
+}
+
+static ssize_t file_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+
+ return fsg_store_file(curlun, filesem, buf, count);
+}
+
static DEVICE_ATTR_RW(ro);
static DEVICE_ATTR_RW(nofua);
static DEVICE_ATTR_RW(file);
@@ -2597,221 +2640,422 @@ static void fsg_lun_release(struct device *dev)
/* Nothing needs to be done */
}
-static inline void fsg_common_get(struct fsg_common *common)
+void fsg_common_get(struct fsg_common *common)
{
kref_get(&common->ref);
}
+EXPORT_SYMBOL_GPL(fsg_common_get);
-static inline void fsg_common_put(struct fsg_common *common)
+void fsg_common_put(struct fsg_common *common)
{
kref_put(&common->ref, fsg_common_release);
}
+EXPORT_SYMBOL_GPL(fsg_common_put);
-static struct fsg_common *fsg_common_init(struct fsg_common *common,
- struct usb_composite_dev *cdev,
- struct fsg_config *cfg)
+/* check if fsg_num_buffers is within a valid range */
+static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers)
{
- struct usb_gadget *gadget = cdev->gadget;
- struct fsg_buffhd *bh;
- struct fsg_lun *curlun;
- struct fsg_lun_config *lcfg;
- int nluns, i, rc;
- char *pathbuf;
-
- rc = fsg_num_buffers_validate();
- if (rc != 0)
- return ERR_PTR(rc);
-
- /* Find out how many LUNs there should be */
- nluns = cfg->nluns;
- if (nluns < 1 || nluns > FSG_MAX_LUNS) {
- dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
- return ERR_PTR(-EINVAL);
- }
+ if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
+ return 0;
+ pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
+ fsg_num_buffers, 2, 4);
+ return -EINVAL;
+}
- /* Allocate? */
+static struct fsg_common *fsg_common_setup(struct fsg_common *common)
+{
if (!common) {
- common = kzalloc(sizeof *common, GFP_KERNEL);
+ common = kzalloc(sizeof(*common), GFP_KERNEL);
if (!common)
return ERR_PTR(-ENOMEM);
common->free_storage_on_release = 1;
} else {
- memset(common, 0, sizeof *common);
common->free_storage_on_release = 0;
}
+ init_rwsem(&common->filesem);
+ spin_lock_init(&common->lock);
+ kref_init(&common->ref);
+ init_completion(&common->thread_notifier);
+ init_waitqueue_head(&common->fsg_wait);
+ common->state = FSG_STATE_TERMINATED;
- common->buffhds = kcalloc(fsg_num_buffers,
- sizeof *(common->buffhds), GFP_KERNEL);
- if (!common->buffhds) {
- if (common->free_storage_on_release)
- kfree(common);
- return ERR_PTR(-ENOMEM);
+ return common;
+}
+
+void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
+{
+ common->sysfs = sysfs;
+}
+EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
+
+static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
+{
+ if (buffhds) {
+ struct fsg_buffhd *bh = buffhds;
+ while (n--) {
+ kfree(bh->buf);
+ ++bh;
+ }
+ kfree(buffhds);
}
+}
- common->ops = cfg->ops;
- common->private_data = cfg->private_data;
+int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
+{
+ struct fsg_buffhd *bh, *buffhds;
+ int i, rc;
- common->gadget = gadget;
- common->ep0 = gadget->ep0;
- common->ep0req = cdev->req;
- common->cdev = cdev;
+ rc = fsg_num_buffers_validate(n);
+ if (rc != 0)
+ return rc;
+
+ buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
+ if (!buffhds)
+ return -ENOMEM;
- /* Maybe allocate device-global string IDs, and patch descriptors */
- if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
- rc = usb_string_id(cdev);
- if (unlikely(rc < 0))
+ /* Data buffers cyclic list */
+ bh = buffhds;
+ i = n;
+ goto buffhds_first_it;
+ do {
+ bh->next = bh + 1;
+ ++bh;
+buffhds_first_it:
+ bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
+ if (unlikely(!bh->buf))
goto error_release;
- fsg_strings[FSG_STRING_INTERFACE].id = rc;
- fsg_intf_desc.iInterface = rc;
- }
+ } while (--i);
+ bh->next = buffhds;
+ _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
+ common->fsg_num_buffers = n;
+ common->buffhds = buffhds;
+
+ return 0;
+
+error_release:
/*
- * Create the LUNs, open their backing files, and register the
- * LUN devices in sysfs.
+ * "buf"s pointed to by heads after n - i are NULL
+ * so releasing them won't hurt
*/
- curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
- if (unlikely(!curlun)) {
- rc = -ENOMEM;
- goto error_release;
+ _fsg_common_free_buffers(buffhds, n);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
+
+static inline void fsg_common_remove_sysfs(struct fsg_lun *lun)
+{
+ device_remove_file(&lun->dev, &dev_attr_nofua);
+ /*
+ * device_remove_file() =>
+ *
+ * here the attr (e.g. dev_attr_ro) is only used to be passed to:
+ *
+ * sysfs_remove_file() =>
+ *
+ * here e.g. both dev_attr_ro_cdrom and dev_attr_ro are in
+ * the same namespace and
+ * from here only attr->name is passed to:
+ *
+ * sysfs_hash_and_remove()
+ *
+ * attr->name is the same for dev_attr_ro_cdrom and
+ * dev_attr_ro
+ * attr->name is the same for dev_attr_file and
+ * dev_attr_file_nonremovable
+ *
+ * so we don't differentiate between removing e.g. dev_attr_ro_cdrom
+ * and dev_attr_ro
+ */
+ device_remove_file(&lun->dev, &dev_attr_ro);
+ device_remove_file(&lun->dev, &dev_attr_file);
+}
+
+void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs)
+{
+ if (sysfs) {
+ fsg_common_remove_sysfs(lun);
+ device_unregister(&lun->dev);
}
- common->luns = curlun;
+ fsg_lun_close(lun);
+ kfree(lun);
+}
+EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
- init_rwsem(&common->filesem);
+static void _fsg_common_remove_luns(struct fsg_common *common, int n)
+{
+ int i;
- for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
- curlun->cdrom = !!lcfg->cdrom;
- curlun->ro = lcfg->cdrom || lcfg->ro;
- curlun->initially_ro = curlun->ro;
- curlun->removable = lcfg->removable;
- curlun->dev.release = fsg_lun_release;
- curlun->dev.parent = &gadget->dev;
- /* curlun->dev.driver = &fsg_driver.driver; XXX */
- dev_set_drvdata(&curlun->dev, &common->filesem);
- dev_set_name(&curlun->dev, "lun%d", i);
-
- rc = device_register(&curlun->dev);
- if (rc) {
- INFO(common, "failed to register LUN%d: %d\n", i, rc);
- common->nluns = i;
- put_device(&curlun->dev);
- goto error_release;
+ for (i = 0; i < n; ++i)
+ if (common->luns[i]) {
+ fsg_common_remove_lun(common->luns[i], common->sysfs);
+ common->luns[i] = NULL;
}
+}
+EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
- rc = device_create_file(&curlun->dev,
- curlun->cdrom
- ? &dev_attr_ro_cdrom
- : &dev_attr_ro);
- if (rc)
- goto error_luns;
- rc = device_create_file(&curlun->dev,
- curlun->removable
- ? &dev_attr_file
- : &dev_attr_file_nonremovable);
- if (rc)
- goto error_luns;
- rc = device_create_file(&curlun->dev, &dev_attr_nofua);
- if (rc)
- goto error_luns;
+void fsg_common_remove_luns(struct fsg_common *common)
+{
+ _fsg_common_remove_luns(common, common->nluns);
+}
- if (lcfg->filename) {
- rc = fsg_lun_open(curlun, lcfg->filename);
- if (rc)
- goto error_luns;
- } else if (!curlun->removable) {
- ERROR(common, "no file given for LUN%d\n", i);
- rc = -EINVAL;
- goto error_luns;
- }
+void fsg_common_free_luns(struct fsg_common *common)
+{
+ fsg_common_remove_luns(common);
+ kfree(common->luns);
+ common->luns = NULL;
+}
+EXPORT_SYMBOL_GPL(fsg_common_free_luns);
+
+int fsg_common_set_nluns(struct fsg_common *common, int nluns)
+{
+ struct fsg_lun **curlun;
+
+ /* Find out how many LUNs there should be */
+ if (nluns < 1 || nluns > FSG_MAX_LUNS) {
+ pr_err("invalid number of LUNs: %u\n", nluns);
+ return -EINVAL;
}
+
+ curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
+ if (unlikely(!curlun))
+ return -ENOMEM;
+
+ if (common->luns)
+ fsg_common_free_luns(common);
+
+ common->luns = curlun;
common->nluns = nluns;
- /* Data buffers cyclic list */
- bh = common->buffhds;
- i = fsg_num_buffers;
- goto buffhds_first_it;
- do {
- bh->next = bh + 1;
- ++bh;
-buffhds_first_it:
- bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
- if (unlikely(!bh->buf)) {
- rc = -ENOMEM;
- goto error_release;
- }
- } while (--i);
- bh->next = common->buffhds;
+ pr_info("Number of LUNs=%d\n", common->nluns);
- /* Prepare inquiryString */
- i = get_default_bcdDevice();
- snprintf(common->inquiry_string, sizeof common->inquiry_string,
- "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
- /* Assume product name dependent on the first LUN */
- cfg->product_name ?: (common->luns->cdrom
- ? "File-CD Gadget"
- : "File-Stor Gadget"),
- i);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
+
+void fsg_common_set_ops(struct fsg_common *common,
+ const struct fsg_operations *ops)
+{
+ common->ops = ops;
+}
+EXPORT_SYMBOL_GPL(fsg_common_set_ops);
+
+void fsg_common_free_buffers(struct fsg_common *common)
+{
+ _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
+ common->buffhds = NULL;
+}
+EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
+
+int fsg_common_set_cdev(struct fsg_common *common,
+ struct usb_composite_dev *cdev, bool can_stall)
+{
+ struct usb_string *us;
+
+ common->gadget = cdev->gadget;
+ common->ep0 = cdev->gadget->ep0;
+ common->ep0req = cdev->req;
+ common->cdev = cdev;
+
+ us = usb_gstrings_attach(cdev, fsg_strings_array,
+ ARRAY_SIZE(fsg_strings));
+ if (IS_ERR(us))
+ return PTR_ERR(us);
+
+ fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;
/*
* Some peripheral controllers are known not to be able to
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
- common->can_stall = cfg->can_stall &&
- !(gadget_is_at91(common->gadget));
+ common->can_stall = can_stall && !(gadget_is_at91(common->gadget));
- spin_lock_init(&common->lock);
- kref_init(&common->ref);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
- /* Tell the thread to start working */
- common->thread_task =
- kthread_create(fsg_main_thread, common, "file-storage");
- if (IS_ERR(common->thread_task)) {
- rc = PTR_ERR(common->thread_task);
- goto error_release;
+static inline int fsg_common_add_sysfs(struct fsg_common *common,
+ struct fsg_lun *lun)
+{
+ int rc;
+
+ rc = device_register(&lun->dev);
+ if (rc) {
+ put_device(&lun->dev);
+ return rc;
}
- init_completion(&common->thread_notifier);
- init_waitqueue_head(&common->fsg_wait);
- /* Information */
- INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
- INFO(common, "Number of LUNs=%d\n", common->nluns);
+ rc = device_create_file(&lun->dev,
+ lun->cdrom
+ ? &dev_attr_ro_cdrom
+ : &dev_attr_ro);
+ if (rc)
+ goto error;
+ rc = device_create_file(&lun->dev,
+ lun->removable
+ ? &dev_attr_file
+ : &dev_attr_file_nonremovable);
+ if (rc)
+ goto error;
+ rc = device_create_file(&lun->dev, &dev_attr_nofua);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ /* removing nonexistent files is a no-op */
+ fsg_common_remove_sysfs(lun);
+ device_unregister(&lun->dev);
+ return rc;
+}
+
+int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
+ unsigned int id, const char *name,
+ const char **name_pfx)
+{
+ struct fsg_lun *lun;
+ char *pathbuf, *p;
+ int rc = -ENOMEM;
+
+ if (!common->nluns || !common->luns)
+ return -ENODEV;
+
+ if (common->luns[id])
+ return -EBUSY;
+
+ if (!cfg->filename && !cfg->removable) {
+ pr_err("no file given for LUN%d\n", id);
+ return -EINVAL;
+ }
+
+ lun = kzalloc(sizeof(*lun), GFP_KERNEL);
+ if (!lun)
+ return -ENOMEM;
+
+ lun->name_pfx = name_pfx;
+
+ lun->cdrom = !!cfg->cdrom;
+ lun->ro = cfg->cdrom || cfg->ro;
+ lun->initially_ro = lun->ro;
+ lun->removable = !!cfg->removable;
+
+ if (!common->sysfs) {
+ /* we DON'T own the name!*/
+ lun->name = name;
+ } else {
+ lun->dev.release = fsg_lun_release;
+ lun->dev.parent = &common->gadget->dev;
+ dev_set_drvdata(&lun->dev, &common->filesem);
+ dev_set_name(&lun->dev, "%s", name);
+ lun->name = dev_name(&lun->dev);
+
+ rc = fsg_common_add_sysfs(common, lun);
+ if (rc) {
+ pr_info("failed to register LUN%d: %d\n", id, rc);
+ goto error_sysfs;
+ }
+ }
+
+ common->luns[id] = lun;
+
+ if (cfg->filename) {
+ rc = fsg_lun_open(lun, cfg->filename);
+ if (rc)
+ goto error_lun;
+ }
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
- for (i = 0, nluns = common->nluns, curlun = common->luns;
- i < nluns;
- ++curlun, ++i) {
- char *p = "(no medium)";
- if (fsg_lun_is_open(curlun)) {
- p = "(error)";
- if (pathbuf) {
- p = d_path(&curlun->filp->f_path,
- pathbuf, PATH_MAX);
- if (IS_ERR(p))
- p = "(error)";
- }
+ p = "(no medium)";
+ if (fsg_lun_is_open(lun)) {
+ p = "(error)";
+ if (pathbuf) {
+ p = d_path(&lun->filp->f_path, pathbuf, PATH_MAX);
+ if (IS_ERR(p))
+ p = "(error)";
}
- LINFO(curlun, "LUN: %s%s%sfile: %s\n",
- curlun->removable ? "removable " : "",
- curlun->ro ? "read only " : "",
- curlun->cdrom ? "CD-ROM " : "",
- p);
}
+ pr_info("LUN: %s%s%sfile: %s\n",
+ lun->removable ? "removable " : "",
+ lun->ro ? "read only " : "",
+ lun->cdrom ? "CD-ROM " : "",
+ p);
kfree(pathbuf);
+ return 0;
+
+error_lun:
+ if (common->sysfs) {
+ fsg_common_remove_sysfs(lun);
+ device_unregister(&lun->dev);
+ }
+ fsg_lun_close(lun);
+ common->luns[id] = NULL;
+error_sysfs:
+ kfree(lun);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fsg_common_create_lun);
+
+int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
+{
+ char buf[8]; /* enough for 100000000 different numbers, decimal */
+ int i, rc;
+
+ for (i = 0; i < common->nluns; ++i) {
+ snprintf(buf, sizeof(buf), "lun%d", i);
+ rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
+ if (rc)
+ goto fail;
+ }
+
+ pr_info("Number of LUNs=%d\n", common->nluns);
+
+ return 0;
+
+fail:
+ _fsg_common_remove_luns(common, i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fsg_common_create_luns);
+
+void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ const char *pn)
+{
+ int i;
+
+ /* Prepare inquiryString */
+ i = get_default_bcdDevice();
+ snprintf(common->inquiry_string, sizeof(common->inquiry_string),
+ "%-8s%-16s%04x", vn ?: "Linux",
+ /* Assume product name dependent on the first LUN */
+ pn ?: ((*common->luns)->cdrom
+ ? "File-CD Gadget"
+ : "File-Stor Gadget"),
+ i);
+}
+EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
+
+int fsg_common_run_thread(struct fsg_common *common)
+{
+ common->state = FSG_STATE_IDLE;
+ /* Tell the thread to start working */
+ common->thread_task =
+ kthread_create(fsg_main_thread, common, "file-storage");
+ if (IS_ERR(common->thread_task)) {
+ common->state = FSG_STATE_TERMINATED;
+ return PTR_ERR(common->thread_task);
+ }
+
DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
wake_up_process(common->thread_task);
- return common;
-
-error_luns:
- common->nluns = i + 1;
-error_release:
- common->state = FSG_STATE_TERMINATED; /* The thread is dead */
- /* Call fsg_common_release() directly, ref might be not initialised. */
- fsg_common_release(&common->ref);
- return ERR_PTR(rc);
+ return 0;
}
+EXPORT_SYMBOL_GPL(fsg_common_run_thread);
static void fsg_common_release(struct kref *ref)
{
@@ -2824,36 +3068,26 @@ static void fsg_common_release(struct kref *ref)
}
if (likely(common->luns)) {
- struct fsg_lun *lun = common->luns;
+ struct fsg_lun **lun_it = common->luns;
unsigned i = common->nluns;
/* In error recovery common->nluns may be zero. */
- for (; i; --i, ++lun) {
- device_remove_file(&lun->dev, &dev_attr_nofua);
- device_remove_file(&lun->dev,
- lun->cdrom
- ? &dev_attr_ro_cdrom
- : &dev_attr_ro);
- device_remove_file(&lun->dev,
- lun->removable
- ? &dev_attr_file
- : &dev_attr_file_nonremovable);
+ for (; i; --i, ++lun_it) {
+ struct fsg_lun *lun = *lun_it;
+ if (!lun)
+ continue;
+ if (common->sysfs)
+ fsg_common_remove_sysfs(lun);
fsg_lun_close(lun);
- device_unregister(&lun->dev);
+ if (common->sysfs)
+ device_unregister(&lun->dev);
+ kfree(lun);
}
kfree(common->luns);
}
- {
- struct fsg_buffhd *bh = common->buffhds;
- unsigned i = fsg_num_buffers;
- do {
- kfree(bh->buf);
- } while (++bh, --i);
- }
-
- kfree(common->buffhds);
+ _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
if (common->free_storage_on_release)
kfree(common);
}
@@ -2861,24 +3095,6 @@ static void fsg_common_release(struct kref *ref)
/*-------------------------------------------------------------------------*/
-static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
-{
- struct fsg_dev *fsg = fsg_from_func(f);
- struct fsg_common *common = fsg->common;
-
- DBG(fsg, "unbind\n");
- if (fsg->common->fsg == fsg) {
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
- /* FIXME: make interruptible or killable somehow? */
- wait_event(common->fsg_wait, common->fsg != fsg);
- }
-
- fsg_common_put(common);
- usb_free_all_descriptors(&fsg->function);
- kfree(fsg);
-}
-
static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
@@ -2887,6 +3103,19 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
unsigned max_burst;
int ret;
+ struct fsg_opts *opts;
+
+ opts = fsg_opts_from_func_inst(f->fi);
+ if (!opts->no_configfs) {
+ ret = fsg_common_set_cdev(fsg->common, c->cdev,
+ fsg->common->can_stall);
+ if (ret)
+ return ret;
+ fsg_common_set_inquiry_string(fsg->common, 0, 0);
+ ret = fsg_common_run_thread(fsg->common);
+ if (ret)
+ return ret;
+ }
fsg->gadget = gadget;
@@ -2939,95 +3168,472 @@ autoconf_fail:
return -ENOTSUPP;
}
-/****************************** ADD FUNCTION ******************************/
+/****************************** ALLOCATE FUNCTION *************************/
-static struct usb_gadget_strings *fsg_strings_array[] = {
- &fsg_stringtab,
+static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct fsg_dev *fsg = fsg_from_func(f);
+ struct fsg_common *common = fsg->common;
+
+ DBG(fsg, "unbind\n");
+ if (fsg->common->fsg == fsg) {
+ fsg->common->new_fsg = NULL;
+ raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+ /* FIXME: make interruptible or killable somehow? */
+ wait_event(common->fsg_wait, common->fsg != fsg);
+ }
+
+ usb_free_all_descriptors(&fsg->function);
+}
+
+static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct fsg_lun_opts, group);
+}
+
+static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct fsg_opts,
+ func_inst.group);
+}
+
+CONFIGFS_ATTR_STRUCT(fsg_lun_opts);
+CONFIGFS_ATTR_OPS(fsg_lun_opts);
+
+static void fsg_lun_attr_release(struct config_item *item)
+{
+ struct fsg_lun_opts *lun_opts;
+
+ lun_opts = to_fsg_lun_opts(item);
+ kfree(lun_opts);
+}
+
+static struct configfs_item_operations fsg_lun_item_ops = {
+ .release = fsg_lun_attr_release,
+ .show_attribute = fsg_lun_opts_attr_show,
+ .store_attribute = fsg_lun_opts_attr_store,
+};
+
+static ssize_t fsg_lun_opts_file_show(struct fsg_lun_opts *opts, char *page)
+{
+ struct fsg_opts *fsg_opts;
+
+ fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
+
+ return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
+}
+
+static ssize_t fsg_lun_opts_file_store(struct fsg_lun_opts *opts,
+ const char *page, size_t len)
+{
+ struct fsg_opts *fsg_opts;
+
+ fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
+
+ return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
+}
+
+static struct fsg_lun_opts_attribute fsg_lun_opts_file =
+ __CONFIGFS_ATTR(file, S_IRUGO | S_IWUSR, fsg_lun_opts_file_show,
+ fsg_lun_opts_file_store);
+
+static ssize_t fsg_lun_opts_ro_show(struct fsg_lun_opts *opts, char *page)
+{
+ return fsg_show_ro(opts->lun, page);
+}
+
+static ssize_t fsg_lun_opts_ro_store(struct fsg_lun_opts *opts,
+ const char *page, size_t len)
+{
+ struct fsg_opts *fsg_opts;
+
+ fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
+
+ return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
+}
+
+static struct fsg_lun_opts_attribute fsg_lun_opts_ro =
+ __CONFIGFS_ATTR(ro, S_IRUGO | S_IWUSR, fsg_lun_opts_ro_show,
+ fsg_lun_opts_ro_store);
+
+static ssize_t fsg_lun_opts_removable_show(struct fsg_lun_opts *opts,
+ char *page)
+{
+ return fsg_show_removable(opts->lun, page);
+}
+
+static ssize_t fsg_lun_opts_removable_store(struct fsg_lun_opts *opts,
+ const char *page, size_t len)
+{
+ return fsg_store_removable(opts->lun, page, len);
+}
+
+static struct fsg_lun_opts_attribute fsg_lun_opts_removable =
+ __CONFIGFS_ATTR(removable, S_IRUGO | S_IWUSR,
+ fsg_lun_opts_removable_show,
+ fsg_lun_opts_removable_store);
+
+static ssize_t fsg_lun_opts_cdrom_show(struct fsg_lun_opts *opts, char *page)
+{
+ return fsg_show_cdrom(opts->lun, page);
+}
+
+static ssize_t fsg_lun_opts_cdrom_store(struct fsg_lun_opts *opts,
+ const char *page, size_t len)
+{
+ struct fsg_opts *fsg_opts;
+
+ fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
+
+ return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
+ len);
+}
+
+static struct fsg_lun_opts_attribute fsg_lun_opts_cdrom =
+ __CONFIGFS_ATTR(cdrom, S_IRUGO | S_IWUSR, fsg_lun_opts_cdrom_show,
+ fsg_lun_opts_cdrom_store);
+
+static ssize_t fsg_lun_opts_nofua_show(struct fsg_lun_opts *opts, char *page)
+{
+ return fsg_show_nofua(opts->lun, page);
+}
+
+static ssize_t fsg_lun_opts_nofua_store(struct fsg_lun_opts *opts,
+ const char *page, size_t len)
+{
+ return fsg_store_nofua(opts->lun, page, len);
+}
+
+static struct fsg_lun_opts_attribute fsg_lun_opts_nofua =
+ __CONFIGFS_ATTR(nofua, S_IRUGO | S_IWUSR, fsg_lun_opts_nofua_show,
+ fsg_lun_opts_nofua_store);
+
+static struct configfs_attribute *fsg_lun_attrs[] = {
+ &fsg_lun_opts_file.attr,
+ &fsg_lun_opts_ro.attr,
+ &fsg_lun_opts_removable.attr,
+ &fsg_lun_opts_cdrom.attr,
+ &fsg_lun_opts_nofua.attr,
NULL,
};
-static int fsg_bind_config(struct usb_composite_dev *cdev,
- struct usb_configuration *c,
- struct fsg_common *common)
+static struct config_item_type fsg_lun_type = {
+ .ct_item_ops = &fsg_lun_item_ops,
+ .ct_attrs = fsg_lun_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *fsg_lun_make(struct config_group *group,
+ const char *name)
{
- struct fsg_dev *fsg;
+ struct fsg_lun_opts *opts;
+ struct fsg_opts *fsg_opts;
+ struct fsg_lun_config config;
+ char *num_str;
+ u8 num;
+ int ret;
+
+ num_str = strchr(name, '.');
+ if (!num_str) {
+ pr_err("Unable to locate . in LUN.NUMBER\n");
+ return ERR_PTR(-EINVAL);
+ }
+ num_str++;
+
+ ret = kstrtou8(num_str, 0, &num);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fsg_opts = to_fsg_opts(&group->cg_item);
+ if (num >= FSG_MAX_LUNS)
+ return ERR_PTR(-ERANGE);
+
+ mutex_lock(&fsg_opts->lock);
+ if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.removable = true;
+
+ ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
+ (const char **)&group->cg_item.ci_name);
+ if (ret) {
+ kfree(opts);
+ goto out;
+ }
+ opts->lun = fsg_opts->common->luns[num];
+ opts->lun_id = num;
+ mutex_unlock(&fsg_opts->lock);
+
+ config_group_init_type_name(&opts->group, name, &fsg_lun_type);
+
+ return &opts->group;
+out:
+ mutex_unlock(&fsg_opts->lock);
+ return ERR_PTR(ret);
+}
+
+static void fsg_lun_drop(struct config_group *group, struct config_item *item)
+{
+ struct fsg_lun_opts *lun_opts;
+ struct fsg_opts *fsg_opts;
+
+ lun_opts = to_fsg_lun_opts(item);
+ fsg_opts = to_fsg_opts(&group->cg_item);
+
+ mutex_lock(&fsg_opts->lock);
+ if (fsg_opts->refcnt) {
+ struct config_item *gadget;
+
+ gadget = group->cg_item.ci_parent->ci_parent;
+ unregister_gadget_item(gadget);
+ }
+
+ fsg_common_remove_lun(lun_opts->lun, fsg_opts->common->sysfs);
+ fsg_opts->common->luns[lun_opts->lun_id] = NULL;
+ lun_opts->lun_id = 0;
+ mutex_unlock(&fsg_opts->lock);
+
+ config_item_put(item);
+}
+
+CONFIGFS_ATTR_STRUCT(fsg_opts);
+CONFIGFS_ATTR_OPS(fsg_opts);
+
+static void fsg_attr_release(struct config_item *item)
+{
+ struct fsg_opts *opts = to_fsg_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations fsg_item_ops = {
+ .release = fsg_attr_release,
+ .show_attribute = fsg_opts_attr_show,
+ .store_attribute = fsg_opts_attr_store,
+};
+
+static ssize_t fsg_opts_stall_show(struct fsg_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->common->can_stall);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t fsg_opts_stall_store(struct fsg_opts *opts, const char *page,
+ size_t len)
+{
+ int ret;
+ bool stall;
+
+ mutex_lock(&opts->lock);
+
+ if (opts->refcnt) {
+ mutex_unlock(&opts->lock);
+ return -EBUSY;
+ }
+
+ ret = strtobool(page, &stall);
+ if (!ret) {
+ opts->common->can_stall = stall;
+ ret = len;
+ }
+
+ mutex_unlock(&opts->lock);
+
+ return ret;
+}
+
+static struct fsg_opts_attribute fsg_opts_stall =
+ __CONFIGFS_ATTR(stall, S_IRUGO | S_IWUSR, fsg_opts_stall_show,
+ fsg_opts_stall_store);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+static ssize_t fsg_opts_num_buffers_show(struct fsg_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->common->fsg_num_buffers);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t fsg_opts_num_buffers_store(struct fsg_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u8 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ goto end;
+
+ ret = fsg_num_buffers_validate(num);
+ if (ret)
+ goto end;
+
+ fsg_common_set_num_buffers(opts->common, num);
+ ret = len;
+
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct fsg_opts_attribute fsg_opts_num_buffers =
+ __CONFIGFS_ATTR(num_buffers, S_IRUGO | S_IWUSR,
+ fsg_opts_num_buffers_show,
+ fsg_opts_num_buffers_store);
+
+#endif
+
+static struct configfs_attribute *fsg_attrs[] = {
+ &fsg_opts_stall.attr,
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ &fsg_opts_num_buffers.attr,
+#endif
+ NULL,
+};
+
+static struct configfs_group_operations fsg_group_ops = {
+ .make_group = fsg_lun_make,
+ .drop_item = fsg_lun_drop,
+};
+
+static struct config_item_type fsg_func_type = {
+ .ct_item_ops = &fsg_item_ops,
+ .ct_group_ops = &fsg_group_ops,
+ .ct_attrs = fsg_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void fsg_free_inst(struct usb_function_instance *fi)
+{
+ struct fsg_opts *opts;
+
+ opts = fsg_opts_from_func_inst(fi);
+ fsg_common_put(opts->common);
+ kfree(opts);
+}
+
+static struct usb_function_instance *fsg_alloc_inst(void)
+{
+ struct fsg_opts *opts;
+ struct fsg_lun_config config;
int rc;
- fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = fsg_free_inst;
+ opts->common = fsg_common_setup(opts->common);
+ if (IS_ERR(opts->common)) {
+ rc = PTR_ERR(opts->common);
+ goto release_opts;
+ }
+ rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS);
+ if (rc)
+ goto release_opts;
+
+ rc = fsg_common_set_num_buffers(opts->common,
+ CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
+ if (rc)
+ goto release_luns;
+
+ pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
+
+ memset(&config, 0, sizeof(config));
+ config.removable = true;
+ rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
+ (const char **)&opts->func_inst.group.cg_item.ci_name);
+ opts->lun0.lun = opts->common->luns[0];
+ opts->lun0.lun_id = 0;
+ config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
+ opts->default_groups[0] = &opts->lun0.group;
+ opts->func_inst.group.default_groups = opts->default_groups;
+
+ config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
+
+ return &opts->func_inst;
+
+release_luns:
+ kfree(opts->common->luns);
+release_opts:
+ kfree(opts);
+ return ERR_PTR(rc);
+}
+
+static void fsg_free(struct usb_function *f)
+{
+ struct fsg_dev *fsg;
+ struct fsg_opts *opts;
+
+ fsg = container_of(f, struct fsg_dev, function);
+ opts = container_of(f->fi, struct fsg_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
+
+ kfree(fsg);
+}
+
+static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
+{
+ struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
+ struct fsg_common *common = opts->common;
+ struct fsg_dev *fsg;
+
+ fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
if (unlikely(!fsg))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- fsg->function.name = FSG_DRIVER_DESC;
- fsg->function.strings = fsg_strings_array;
- fsg->function.bind = fsg_bind;
- fsg->function.unbind = fsg_unbind;
- fsg->function.setup = fsg_setup;
- fsg->function.set_alt = fsg_set_alt;
- fsg->function.disable = fsg_disable;
+ mutex_lock(&opts->lock);
+ opts->refcnt++;
+ mutex_unlock(&opts->lock);
+ fsg->function.name = FSG_DRIVER_DESC;
+ fsg->function.bind = fsg_bind;
+ fsg->function.unbind = fsg_unbind;
+ fsg->function.setup = fsg_setup;
+ fsg->function.set_alt = fsg_set_alt;
+ fsg->function.disable = fsg_disable;
+ fsg->function.free_func = fsg_free;
fsg->common = common;
- /*
- * Our caller holds a reference to common structure so we
- * don't have to be worry about it being freed until we return
- * from this function. So instead of incrementing counter now
- * and decrement in error recovery we increment it only when
- * call to usb_add_function() was successful.
- */
- rc = usb_add_function(c, &fsg->function);
- if (unlikely(rc))
- kfree(fsg);
- else
- fsg_common_get(fsg->common);
- return rc;
+ return &fsg->function;
}
+DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Nazarewicz");
/************************* Module parameters *************************/
-struct fsg_module_parameters {
- char *file[FSG_MAX_LUNS];
- bool ro[FSG_MAX_LUNS];
- bool removable[FSG_MAX_LUNS];
- bool cdrom[FSG_MAX_LUNS];
- bool nofua[FSG_MAX_LUNS];
-
- unsigned int file_count, ro_count, removable_count, cdrom_count;
- unsigned int nofua_count;
- unsigned int luns; /* nluns */
- bool stall; /* can_stall */
-};
-#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
- module_param_array_named(prefix ## name, params.name, type, \
- &prefix ## params.name ## _count, \
- S_IRUGO); \
- MODULE_PARM_DESC(prefix ## name, desc)
-
-#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
- module_param_named(prefix ## name, params.name, type, \
- S_IRUGO); \
- MODULE_PARM_DESC(prefix ## name, desc)
-
-#define FSG_MODULE_PARAMETERS(prefix, params) \
- _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
- "names of backing files or devices"); \
- _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
- "true to force read-only"); \
- _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
- "true to simulate removable media"); \
- _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
- "true to simulate CD-ROM instead of disk"); \
- _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \
- "true to ignore SCSI WRITE(10,12) FUA bit"); \
- _FSG_MODULE_PARAM(prefix, params, luns, uint, \
- "number of LUNs"); \
- _FSG_MODULE_PARAM(prefix, params, stall, bool, \
- "false to prevent bulk stalls")
-
-static void
-fsg_config_from_params(struct fsg_config *cfg,
- const struct fsg_module_parameters *params)
+void fsg_config_from_params(struct fsg_config *cfg,
+ const struct fsg_module_parameters *params,
+ unsigned int fsg_num_buffers)
{
struct fsg_lun_config *lun;
unsigned i;
@@ -3055,19 +3661,7 @@ fsg_config_from_params(struct fsg_config *cfg,
/* Finalise */
cfg->can_stall = params->stall;
+ cfg->fsg_num_buffers = fsg_num_buffers;
}
+EXPORT_SYMBOL_GPL(fsg_config_from_params);
-static inline struct fsg_common *
-fsg_common_from_params(struct fsg_common *common,
- struct usb_composite_dev *cdev,
- const struct fsg_module_parameters *params)
- __attribute__((unused));
-static inline struct fsg_common *
-fsg_common_from_params(struct fsg_common *common,
- struct usb_composite_dev *cdev,
- const struct fsg_module_parameters *params)
-{
- struct fsg_config cfg;
- fsg_config_from_params(&cfg, params);
- return fsg_common_init(common, cdev, &cfg);
-}
diff --git a/drivers/usb/gadget/f_mass_storage.h b/drivers/usb/gadget/f_mass_storage.h
new file mode 100644
index 00000000000..b4866fcef30
--- /dev/null
+++ b/drivers/usb/gadget/f_mass_storage.h
@@ -0,0 +1,166 @@
+#ifndef USB_F_MASS_STORAGE_H
+#define USB_F_MASS_STORAGE_H
+
+#include <linux/usb/composite.h>
+#include "storage_common.h"
+
+struct fsg_module_parameters {
+ char *file[FSG_MAX_LUNS];
+ bool ro[FSG_MAX_LUNS];
+ bool removable[FSG_MAX_LUNS];
+ bool cdrom[FSG_MAX_LUNS];
+ bool nofua[FSG_MAX_LUNS];
+
+ unsigned int file_count, ro_count, removable_count, cdrom_count;
+ unsigned int nofua_count;
+ unsigned int luns; /* nluns */
+ bool stall; /* can_stall */
+};
+
+#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
+ module_param_array_named(prefix ## name, params.name, type, \
+ &prefix ## params.name ## _count, \
+ S_IRUGO); \
+ MODULE_PARM_DESC(prefix ## name, desc)
+
+#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
+ module_param_named(prefix ## name, params.name, type, \
+ S_IRUGO); \
+ MODULE_PARM_DESC(prefix ## name, desc)
+
+#define __FSG_MODULE_PARAMETERS(prefix, params) \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
+ "names of backing files or devices"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
+ "true to force read-only"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
+ "true to simulate removable media"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
+ "true to simulate CD-ROM instead of disk"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \
+ "true to ignore SCSI WRITE(10,12) FUA bit"); \
+ _FSG_MODULE_PARAM(prefix, params, luns, uint, \
+ "number of LUNs"); \
+ _FSG_MODULE_PARAM(prefix, params, stall, bool, \
+ "false to prevent bulk stalls")
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#define FSG_MODULE_PARAMETERS(prefix, params) \
+ __FSG_MODULE_PARAMETERS(prefix, params); \
+ module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO);\
+ MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers")
+#else
+
+#define FSG_MODULE_PARAMETERS(prefix, params) \
+ __FSG_MODULE_PARAMETERS(prefix, params)
+
+#endif
+
+struct fsg_common;
+
+/* FSF callback functions */
+struct fsg_operations {
+ /*
+ * Callback function to call when thread exits. If no
+ * callback is set or it returns value lower then zero MSF
+ * will force eject all LUNs it operates on (including those
+ * marked as non-removable or with prevent_medium_removal flag
+ * set).
+ */
+ int (*thread_exits)(struct fsg_common *common);
+};
+
+struct fsg_lun_opts {
+ struct config_group group;
+ struct fsg_lun *lun;
+ int lun_id;
+};
+
+struct fsg_opts {
+ struct fsg_common *common;
+ struct usb_function_instance func_inst;
+ struct fsg_lun_opts lun0;
+ struct config_group *default_groups[2];
+ bool no_configfs; /* for legacy gadgets */
+
+ /*
+ * Read/write access to configfs attributes is handled by configfs.
+ *
+ * This is to protect the data from concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
+};
+
+struct fsg_lun_config {
+ const char *filename;
+ char ro;
+ char removable;
+ char cdrom;
+ char nofua;
+};
+
+struct fsg_config {
+ unsigned nluns;
+ struct fsg_lun_config luns[FSG_MAX_LUNS];
+
+ /* Callback functions. */
+ const struct fsg_operations *ops;
+ /* Gadget's private data. */
+ void *private_data;
+
+ const char *vendor_name; /* 8 characters or less */
+ const char *product_name; /* 16 characters or less */
+
+ char can_stall;
+ unsigned int fsg_num_buffers;
+};
+
+static inline struct fsg_opts *
+fsg_opts_from_func_inst(const struct usb_function_instance *fi)
+{
+ return container_of(fi, struct fsg_opts, func_inst);
+}
+
+void fsg_common_get(struct fsg_common *common);
+
+void fsg_common_put(struct fsg_common *common);
+
+void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs);
+
+int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n);
+
+void fsg_common_free_buffers(struct fsg_common *common);
+
+int fsg_common_set_cdev(struct fsg_common *common,
+ struct usb_composite_dev *cdev, bool can_stall);
+
+void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs);
+
+void fsg_common_remove_luns(struct fsg_common *common);
+
+void fsg_common_free_luns(struct fsg_common *common);
+
+int fsg_common_set_nluns(struct fsg_common *common, int nluns);
+
+void fsg_common_set_ops(struct fsg_common *common,
+ const struct fsg_operations *ops);
+
+int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
+ unsigned int id, const char *name,
+ const char **name_pfx);
+
+int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
+
+void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ const char *pn);
+
+int fsg_common_run_thread(struct fsg_common *common);
+
+void fsg_config_from_params(struct fsg_config *cfg,
+ const struct fsg_module_parameters *params,
+ unsigned int fsg_num_buffers);
+
+#endif /* USB_F_MASS_STORAGE_H */
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index f3bb363f1d4..807127d56fa 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 5327c82472e..2344efe4f4c 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -76,7 +76,9 @@ struct gfs_ffs_obj {
USB_GADGET_COMPOSITE_OPTIONS();
+#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
USB_ETHERNET_MODULE_PARAMETERS();
+#endif
static struct usb_device_descriptor gfs_dev_desc = {
.bLength = sizeof gfs_dev_desc,
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index c64deb9e3d6..f8276801571 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1165,7 +1165,7 @@ static int udc_proc_read(struct seq_file *m, void *v)
s = "invalid"; break;
default:
s = "?"; break;
- }; s; }),
+ } s; }),
(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
@@ -1701,7 +1701,6 @@ static void goku_remove(struct pci_dev *pdev)
if (dev->enabled)
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
dev->regs = NULL;
INFO(dev, "unbind\n");
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 67128be1e1b..6a2a65aa005 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3078,7 +3078,9 @@ static int __init lpc32xx_udc_probe(struct platform_device *pdev)
udc->isp1301_i2c_client->addr);
pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto resource_fail;
udc->board = &lpc32xx_usbddata;
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 080e577773d..8e27a8c9644 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -37,16 +37,16 @@
#define DRIVER_DESC "Mass Storage Gadget"
#define DRIVER_VERSION "2009/09/11"
-/*-------------------------------------------------------------------------*/
-
/*
- * kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ * Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with any other driver!! Ever!!
+ * Instead: allocate your own, using normal USB-IF procedures.
*/
-#include "f_mass_storage.c"
+#define FSG_VENDOR_ID 0x0525 /* NetChip */
+#define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
+
+#include "f_mass_storage.h"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
@@ -97,11 +97,28 @@ static struct usb_gadget_strings *dev_strings[] = {
NULL,
};
+static struct usb_function_instance *fi_msg;
+static struct usb_function *f_msg;
+
/****************************** Configurations ******************************/
static struct fsg_module_parameters mod_data = {
.stall = 1
};
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static unsigned long msg_registered;
@@ -115,13 +132,7 @@ static int msg_thread_exits(struct fsg_common *common)
static int __init msg_do_config(struct usb_configuration *c)
{
- static const struct fsg_operations ops = {
- .thread_exits = msg_thread_exits,
- };
- static struct fsg_common common;
-
- struct fsg_common *retp;
- struct fsg_config config;
+ struct fsg_opts *opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -129,15 +140,24 @@ static int __init msg_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- fsg_config_from_params(&config, &mod_data);
- config.ops = &ops;
+ opts = fsg_opts_from_func_inst(fi_msg);
+
+ f_msg = usb_get_function(fi_msg);
+ if (IS_ERR(f_msg))
+ return PTR_ERR(f_msg);
+
+ ret = fsg_common_run_thread(opts->common);
+ if (ret)
+ goto put_func;
+
+ ret = usb_add_function(c, f_msg);
+ if (ret)
+ goto put_func;
- retp = fsg_common_init(&common, c->cdev, &config);
- if (IS_ERR(retp))
- return PTR_ERR(retp);
+ return 0;
- ret = fsg_bind_config(c->cdev, c, &common);
- fsg_common_put(&common);
+put_func:
+ usb_put_function(f_msg);
return ret;
}
@@ -152,23 +172,79 @@ static struct usb_configuration msg_config_driver = {
static int __init msg_bind(struct usb_composite_dev *cdev)
{
+ static const struct fsg_operations ops = {
+ .thread_exits = msg_thread_exits,
+ };
+ struct fsg_opts *opts;
+ struct fsg_config config;
int status;
+ fi_msg = usb_get_function_instance("mass_storage");
+ if (IS_ERR(fi_msg))
+ return PTR_ERR(fi_msg);
+
+ fsg_config_from_params(&config, &mod_data, fsg_num_buffers);
+ opts = fsg_opts_from_func_inst(fi_msg);
+
+ opts->no_configfs = true;
+ status = fsg_common_set_num_buffers(opts->common, fsg_num_buffers);
+ if (status)
+ goto fail;
+
+ status = fsg_common_set_nluns(opts->common, config.nluns);
+ if (status)
+ goto fail_set_nluns;
+
+ fsg_common_set_ops(opts->common, &ops);
+
+ status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
+ if (status)
+ goto fail_set_cdev;
+
+ fsg_common_set_sysfs(opts->common, true);
+ status = fsg_common_create_luns(opts->common, &config);
+ if (status)
+ goto fail_set_cdev;
+
+ fsg_common_set_inquiry_string(opts->common, config.vendor_name,
+ config.product_name);
+
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
- return status;
+ goto fail_string_ids;
msg_device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
status = usb_add_config(cdev, &msg_config_driver, msg_do_config);
if (status < 0)
- return status;
+ goto fail_string_ids;
+
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
DRIVER_DESC ", version: " DRIVER_VERSION "\n");
set_bit(0, &msg_registered);
return 0;
+
+fail_string_ids:
+ fsg_common_remove_luns(opts->common);
+fail_set_cdev:
+ fsg_common_free_luns(opts->common);
+fail_set_nluns:
+ fsg_common_free_buffers(opts->common);
+fail:
+ usb_put_function_instance(fi_msg);
+ return status;
}
+static int msg_unbind(struct usb_composite_dev *cdev)
+{
+ if (!IS_ERR(f_msg))
+ usb_put_function(f_msg);
+
+ if (!IS_ERR(fi_msg))
+ usb_put_function_instance(fi_msg);
+
+ return 0;
+}
/****************************** Some noise ******************************/
@@ -179,6 +255,7 @@ static __refdata struct usb_composite_driver msg_driver = {
.needs_serial = 1,
.strings = dev_strings,
.bind = msg_bind,
+ .unbind = msg_unbind,
};
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 23393254a8a..4fdaa54a2a2 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/netdevice.h>
#include "u_serial.h"
#if defined USB_ETH_RNDIS
@@ -32,22 +33,11 @@ MODULE_AUTHOR("Michal Nazarewicz");
MODULE_LICENSE("GPL");
-/***************************** All the files... *****************************/
+#include "f_mass_storage.h"
-/*
- * kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_mass_storage.c"
-
-#define USBF_ECM_INCLUDED
-#include "f_ecm.c"
+#include "u_ecm.h"
#ifdef USB_ETH_RNDIS
-# define USB_FRNDIS_INCLUDED
-# include "f_rndis.c"
+# include "u_rndis.h"
# include "rndis.h"
#endif
#include "u_ether.h"
@@ -132,22 +122,36 @@ static struct usb_gadget_strings *dev_strings[] = {
/****************************** Configurations ******************************/
static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
-FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
-static struct fsg_common fsg_common;
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
-static u8 host_mac[ETH_ALEN];
+#endif /* CONFIG_USB_DEBUG */
+
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
static struct usb_function_instance *fi_acm;
-static struct eth_dev *the_dev;
+static struct usb_function_instance *fi_msg;
/********** RNDIS **********/
#ifdef USB_ETH_RNDIS
+static struct usb_function_instance *fi_rndis;
static struct usb_function *f_acm_rndis;
+static struct usb_function *f_rndis;
+static struct usb_function *f_msg_rndis;
static __init int rndis_do_config(struct usb_configuration *c)
{
+ struct fsg_opts *fsg_opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -155,27 +159,50 @@ static __init int rndis_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- ret = rndis_bind_config(c, host_mac, the_dev);
+ f_rndis = usb_get_function(fi_rndis);
+ if (IS_ERR(f_rndis))
+ return PTR_ERR(f_rndis);
+
+ ret = usb_add_function(c, f_rndis);
if (ret < 0)
- return ret;
+ goto err_func_rndis;
f_acm_rndis = usb_get_function(fi_acm);
- if (IS_ERR(f_acm_rndis))
- return PTR_ERR(f_acm_rndis);
+ if (IS_ERR(f_acm_rndis)) {
+ ret = PTR_ERR(f_acm_rndis);
+ goto err_func_acm;
+ }
ret = usb_add_function(c, f_acm_rndis);
if (ret)
goto err_conf;
- ret = fsg_bind_config(c->cdev, c, &fsg_common);
- if (ret < 0)
+ f_msg_rndis = usb_get_function(fi_msg);
+ if (IS_ERR(f_msg_rndis)) {
+ ret = PTR_ERR(f_msg_rndis);
goto err_fsg;
+ }
+
+ fsg_opts = fsg_opts_from_func_inst(fi_msg);
+ ret = fsg_common_run_thread(fsg_opts->common);
+ if (ret)
+ goto err_run;
+
+ ret = usb_add_function(c, f_msg_rndis);
+ if (ret)
+ goto err_run;
return 0;
+err_run:
+ usb_put_function(f_msg_rndis);
err_fsg:
usb_remove_function(c, f_acm_rndis);
err_conf:
usb_put_function(f_acm_rndis);
+err_func_acm:
+ usb_remove_function(c, f_rndis);
+err_func_rndis:
+ usb_put_function(f_rndis);
return ret;
}
@@ -205,10 +232,14 @@ static __ref int rndis_config_register(struct usb_composite_dev *cdev)
/********** CDC ECM **********/
#ifdef CONFIG_USB_G_MULTI_CDC
+static struct usb_function_instance *fi_ecm;
static struct usb_function *f_acm_multi;
+static struct usb_function *f_ecm;
+static struct usb_function *f_msg_multi;
static __init int cdc_do_config(struct usb_configuration *c)
{
+ struct fsg_opts *fsg_opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -216,28 +247,51 @@ static __init int cdc_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- ret = ecm_bind_config(c, host_mac, the_dev);
+ f_ecm = usb_get_function(fi_ecm);
+ if (IS_ERR(f_ecm))
+ return PTR_ERR(f_ecm);
+
+ ret = usb_add_function(c, f_ecm);
if (ret < 0)
- return ret;
+ goto err_func_ecm;
/* implicit port_num is zero */
f_acm_multi = usb_get_function(fi_acm);
- if (IS_ERR(f_acm_multi))
- return PTR_ERR(f_acm_multi);
+ if (IS_ERR(f_acm_multi)) {
+ ret = PTR_ERR(f_acm_multi);
+ goto err_func_acm;
+ }
ret = usb_add_function(c, f_acm_multi);
if (ret)
goto err_conf;
- ret = fsg_bind_config(c->cdev, c, &fsg_common);
- if (ret < 0)
+ f_msg_multi = usb_get_function(fi_msg);
+ if (IS_ERR(f_msg_multi)) {
+ ret = PTR_ERR(f_msg_multi);
goto err_fsg;
+ }
+
+ fsg_opts = fsg_opts_from_func_inst(fi_msg);
+ ret = fsg_common_run_thread(fsg_opts->common);
+ if (ret)
+ goto err_run;
+
+ ret = usb_add_function(c, f_msg_multi);
+ if (ret)
+ goto err_run;
return 0;
+err_run:
+ usb_put_function(f_msg_multi);
err_fsg:
usb_remove_function(c, f_acm_multi);
err_conf:
usb_put_function(f_acm_multi);
+err_func_acm:
+ usb_remove_function(c, f_ecm);
+err_func_ecm:
+ usb_put_function(f_ecm);
return ret;
}
@@ -270,19 +324,67 @@ static __ref int cdc_config_register(struct usb_composite_dev *cdev)
static int __ref multi_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
+#ifdef CONFIG_USB_G_MULTI_CDC
+ struct f_ecm_opts *ecm_opts;
+#endif
+#ifdef USB_ETH_RNDIS
+ struct f_rndis_opts *rndis_opts;
+#endif
+ struct fsg_opts *fsg_opts;
+ struct fsg_config config;
int status;
if (!can_support_ecm(cdev->gadget)) {
dev_err(&gadget->dev, "controller '%s' not usable\n",
- gadget->name);
+ gadget->name);
return -EINVAL;
}
- /* set up network link layer */
- the_dev = gether_setup(cdev->gadget, dev_addr, host_addr, host_mac,
- qmult);
- if (IS_ERR(the_dev))
- return PTR_ERR(the_dev);
+#ifdef CONFIG_USB_G_MULTI_CDC
+ fi_ecm = usb_get_function_instance("ecm");
+ if (IS_ERR(fi_ecm))
+ return PTR_ERR(fi_ecm);
+
+ ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
+
+ gether_set_qmult(ecm_opts->net, qmult);
+ if (!gether_set_host_addr(ecm_opts->net, host_addr))
+ pr_info("using host ethernet address: %s", host_addr);
+ if (!gether_set_dev_addr(ecm_opts->net, dev_addr))
+ pr_info("using self ethernet address: %s", dev_addr);
+#endif
+
+#ifdef USB_ETH_RNDIS
+ fi_rndis = usb_get_function_instance("rndis");
+ if (IS_ERR(fi_rndis)) {
+ status = PTR_ERR(fi_rndis);
+ goto fail;
+ }
+
+ rndis_opts = container_of(fi_rndis, struct f_rndis_opts, func_inst);
+
+ gether_set_qmult(rndis_opts->net, qmult);
+ if (!gether_set_host_addr(rndis_opts->net, host_addr))
+ pr_info("using host ethernet address: %s", host_addr);
+ if (!gether_set_dev_addr(rndis_opts->net, dev_addr))
+ pr_info("using self ethernet address: %s", dev_addr);
+#endif
+
+#if (defined CONFIG_USB_G_MULTI_CDC && defined USB_ETH_RNDIS)
+ /*
+ * If both ecm and rndis are selected then:
+ * 1) rndis borrows the net interface from ecm
+ * 2) since the interface is shared it must not be bound
+ * twice - in ecm's _and_ rndis' binds, so do it here.
+ */
+ gether_set_gadget(ecm_opts->net, cdev->gadget);
+ status = gether_register_netdev(ecm_opts->net);
+ if (status)
+ goto fail0;
+
+ rndis_borrow_net(fi_rndis, ecm_opts->net);
+ ecm_opts->bound = true;
+#endif
/* set up serial link layer */
fi_acm = usb_get_function_instance("acm");
@@ -292,57 +394,102 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
}
/* set up mass storage function */
- {
- void *retp;
- retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data);
- if (IS_ERR(retp)) {
- status = PTR_ERR(retp);
- goto fail1;
- }
+ fi_msg = usb_get_function_instance("mass_storage");
+ if (IS_ERR(fi_msg)) {
+ status = PTR_ERR(fi_msg);
+ goto fail1;
}
+ fsg_config_from_params(&config, &fsg_mod_data, fsg_num_buffers);
+ fsg_opts = fsg_opts_from_func_inst(fi_msg);
+
+ fsg_opts->no_configfs = true;
+ status = fsg_common_set_num_buffers(fsg_opts->common, fsg_num_buffers);
+ if (status)
+ goto fail2;
+
+ status = fsg_common_set_nluns(fsg_opts->common, config.nluns);
+ if (status)
+ goto fail_set_nluns;
+
+ status = fsg_common_set_cdev(fsg_opts->common, cdev, config.can_stall);
+ if (status)
+ goto fail_set_cdev;
+
+ fsg_common_set_sysfs(fsg_opts->common, true);
+ status = fsg_common_create_luns(fsg_opts->common, &config);
+ if (status)
+ goto fail_set_cdev;
+
+ fsg_common_set_inquiry_string(fsg_opts->common, config.vendor_name,
+ config.product_name);
/* allocate string IDs */
status = usb_string_ids_tab(cdev, strings_dev);
if (unlikely(status < 0))
- goto fail2;
+ goto fail_string_ids;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
/* register configurations */
status = rndis_config_register(cdev);
if (unlikely(status < 0))
- goto fail2;
+ goto fail_string_ids;
status = cdc_config_register(cdev);
if (unlikely(status < 0))
- goto fail2;
+ goto fail_string_ids;
usb_composite_overwrite_options(cdev, &coverwrite);
/* we're done */
dev_info(&gadget->dev, DRIVER_DESC "\n");
- fsg_common_put(&fsg_common);
return 0;
/* error recovery */
+fail_string_ids:
+ fsg_common_remove_luns(fsg_opts->common);
+fail_set_cdev:
+ fsg_common_free_luns(fsg_opts->common);
+fail_set_nluns:
+ fsg_common_free_buffers(fsg_opts->common);
fail2:
- fsg_common_put(&fsg_common);
+ usb_put_function_instance(fi_msg);
fail1:
usb_put_function_instance(fi_acm);
fail0:
- gether_cleanup(the_dev);
+#ifdef USB_ETH_RNDIS
+ usb_put_function_instance(fi_rndis);
+fail:
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+ usb_put_function_instance(fi_ecm);
+#endif
return status;
}
static int __exit multi_unbind(struct usb_composite_dev *cdev)
{
#ifdef CONFIG_USB_G_MULTI_CDC
+ usb_put_function(f_msg_multi);
+#endif
+#ifdef USB_ETH_RNDIS
+ usb_put_function(f_msg_rndis);
+#endif
+ usb_put_function_instance(fi_msg);
+#ifdef CONFIG_USB_G_MULTI_CDC
usb_put_function(f_acm_multi);
#endif
#ifdef USB_ETH_RNDIS
usb_put_function(f_acm_rndis);
#endif
usb_put_function_instance(fi_acm);
- gether_cleanup(the_dev);
+#ifdef USB_ETH_RNDIS
+ usb_put_function(f_rndis);
+ usb_put_function_instance(fi_rndis);
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+ usb_put_function(f_ecm);
+ usb_put_function_instance(fi_ecm);
+#endif
return 0;
}
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index 561b30efb8e..234711eabea 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -310,6 +310,7 @@ static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
*/
trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
if (!trb_hw) {
+ kfree(trb);
dev_err(u3d->dev,
"%s, dma_pool_alloc fail\n", __func__);
return NULL;
@@ -454,6 +455,7 @@ static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
if (!trb_hw) {
+ kfree(trb);
dev_err(u3d->dev,
"%s, trb_hw alloc fail\n", __func__);
return -ENOMEM;
@@ -1936,7 +1938,7 @@ static int mv_u3d_probe(struct platform_device *dev)
}
u3d->irq = r->start;
if (request_irq(u3d->irq, mv_u3d_irq,
- IRQF_DISABLED | IRQF_SHARED, driver_name, u3d)) {
+ IRQF_SHARED, driver_name, u3d)) {
u3d->irq = 0;
dev_err(&dev->dev, "Request irq %d for u3d failed\n",
u3d->irq);
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 0781bff7001..fc852177c08 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -129,7 +129,7 @@ static char *type_string (u8 bmAttributes)
case USB_ENDPOINT_XFER_BULK: return "bulk";
case USB_ENDPOINT_XFER_ISOC: return "iso";
case USB_ENDPOINT_XFER_INT: return "intr";
- };
+ }
return "control";
}
#endif
@@ -1630,7 +1630,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
val = "intr"; break;
default:
val = "iso"; break;
- }; val; }),
+ } val; }),
usb_endpoint_maxp (d) & 0x1fff,
ep->dma ? "dma" : "pio", ep->fifo_size
);
@@ -2680,7 +2680,6 @@ static void net2280_remove (struct pci_dev *pdev)
if (dev->enabled)
pci_disable_device (pdev);
device_remove_file (&pdev->dev, &dev_attr_registers);
- pci_set_drvdata (pdev, NULL);
INFO (dev, "unbind\n");
}
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 24174e1d156..32d5e923750 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -3080,7 +3080,6 @@ static void pch_udc_remove(struct pci_dev *pdev)
if (dev->active)
pci_disable_device(pdev);
kfree(dev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 9575085ded8..a3ad732bc81 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1068,7 +1068,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
s = "RNDIS_INITIALIZED"; break;
case RNDIS_DATA_INITIALIZED:
s = "RNDIS_DATA_INITIALIZED"; break;
- }; s; }),
+ } s; }),
param->medium,
(param->media_state) ? 0 : param->speed*100,
(param->media_state) ? "disconnected" : "connected",
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a8a99e4748d..9875d9c0823 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -83,9 +83,12 @@ struct s3c_hsotg_req;
* @dir_in: Set to true if this endpoint is of the IN direction, which
* means that it is sending data to the Host.
* @index: The index for the endpoint registers.
+ * @mc: Multi Count - number of transactions per microframe
+ * @interval - Interval for periodic endpoints
* @name: The name array passed to the USB core.
* @halted: Set if the endpoint has been halted.
* @periodic: Set if this is a periodic ep, such as Interrupt
+ * @isochronous: Set if this is a isochronous ep
* @sent_zlp: Set if we've sent a zero-length packet.
* @total_data: The total number of data bytes done.
* @fifo_size: The size of the FIFO (for periodic IN endpoints)
@@ -121,9 +124,12 @@ struct s3c_hsotg_ep {
unsigned char dir_in;
unsigned char index;
+ unsigned char mc;
+ unsigned char interval;
unsigned int halted:1;
unsigned int periodic:1;
+ unsigned int isochronous:1;
unsigned int sent_zlp:1;
char name[10];
@@ -468,6 +474,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
void *data;
int can_write;
int pkt_round;
+ int max_transfer;
to_write -= (buf_pos - hs_ep->last_load);
@@ -535,8 +542,10 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
can_write *= 4; /* fifo size is in 32bit quantities. */
}
- dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
- __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
+ max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
+
+ dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
+ __func__, gnptxsts, can_write, to_write, max_transfer);
/*
* limit to 512 bytes of data, it seems at least on the non-periodic
@@ -551,19 +560,21 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
* the transfer to return that it did not run out of fifo space
* doing it.
*/
- if (to_write > hs_ep->ep.maxpacket) {
- to_write = hs_ep->ep.maxpacket;
+ if (to_write > max_transfer) {
+ to_write = max_transfer;
- s3c_hsotg_en_gsint(hsotg,
- periodic ? GINTSTS_PTxFEmp :
- GINTSTS_NPTxFEmp);
+ /* it's needed only when we do not use dedicated fifos */
+ if (!hsotg->dedicated_fifos)
+ s3c_hsotg_en_gsint(hsotg,
+ periodic ? GINTSTS_PTxFEmp :
+ GINTSTS_NPTxFEmp);
}
/* see if we can write data */
if (to_write > can_write) {
to_write = can_write;
- pkt_round = to_write % hs_ep->ep.maxpacket;
+ pkt_round = to_write % max_transfer;
/*
* Round the write down to an
@@ -581,9 +592,11 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
* is more room left.
*/
- s3c_hsotg_en_gsint(hsotg,
- periodic ? GINTSTS_PTxFEmp :
- GINTSTS_NPTxFEmp);
+ /* it's needed only when we do not use dedicated fifos */
+ if (!hsotg->dedicated_fifos)
+ s3c_hsotg_en_gsint(hsotg,
+ periodic ? GINTSTS_PTxFEmp :
+ GINTSTS_NPTxFEmp);
}
dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
@@ -727,8 +740,16 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
else
packets = 1; /* send one packet if length is zero. */
+ if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
+ dev_err(hsotg->dev, "req length > maxpacket*mc\n");
+ return;
+ }
+
if (dir_in && index != 0)
- epsize = DxEPTSIZ_MC(1);
+ if (hs_ep->isochronous)
+ epsize = DxEPTSIZ_MC(packets);
+ else
+ epsize = DxEPTSIZ_MC(1);
else
epsize = 0;
@@ -820,6 +841,9 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
__func__, readl(hsotg->regs + epctrl_reg));
+
+ /* enable ep interrupts */
+ s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
}
/**
@@ -1091,6 +1115,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
struct s3c_hsotg_ep *ep;
int ret;
+ bool halted;
dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
__func__, set ? "SET" : "CLEAR");
@@ -1105,6 +1130,8 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
switch (le16_to_cpu(ctrl->wValue)) {
case USB_ENDPOINT_HALT:
+ halted = ep->halted;
+
s3c_hsotg_ep_sethalt(&ep->ep, set);
ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
@@ -1114,7 +1141,12 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
return ret;
}
- if (!set) {
+ /*
+ * we have to complete all requests for ep if it was
+ * halted, and the halt was cleared by CLEAR_FEATURE
+ */
+
+ if (!set && halted) {
/*
* If we have request in progress,
* then complete it
@@ -1147,6 +1179,8 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
return 1;
}
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
+
/**
* s3c_hsotg_process_control - process a control request
* @hsotg: The device state
@@ -1246,11 +1280,15 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
* don't believe we need to anything more to get the EP
* to reply with a STALL packet
*/
+
+ /*
+ * complete won't be called, so we enqueue
+ * setup request here
+ */
+ s3c_hsotg_enqueue_setup(hsotg);
}
}
-static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
-
/**
* s3c_hsotg_complete_setup - completion of a setup transfer
* @ep: The endpoint the request was on.
@@ -1698,6 +1736,7 @@ static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
void __iomem *regs = hsotg->regs;
u32 mpsval;
+ u32 mcval;
u32 reg;
if (ep == 0) {
@@ -1705,15 +1744,19 @@ static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
mpsval = s3c_hsotg_ep0_mps(mps);
if (mpsval > 3)
goto bad_mps;
+ hs_ep->ep.maxpacket = mps;
+ hs_ep->mc = 1;
} else {
- if (mps >= DxEPCTL_MPS_LIMIT+1)
+ mpsval = mps & DxEPCTL_MPS_MASK;
+ if (mpsval > 1024)
goto bad_mps;
-
- mpsval = mps;
+ mcval = ((mps >> 11) & 0x3) + 1;
+ hs_ep->mc = mcval;
+ if (mcval > 3)
+ goto bad_mps;
+ hs_ep->ep.maxpacket = mpsval;
}
- hs_ep->ep.maxpacket = mps;
-
/*
* update both the in and out endpoint controldir_ registers, even
* if one of the directions may not be in use.
@@ -1782,8 +1825,16 @@ static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
{
struct s3c_hsotg_req *hs_req = hs_ep->req;
- if (!hs_ep->dir_in || !hs_req)
+ if (!hs_ep->dir_in || !hs_req) {
+ /**
+ * if request is not enqueued, we disable interrupts
+ * for endpoints, excepting ep0
+ */
+ if (hs_ep->index != 0)
+ s3c_hsotg_ctrl_epint(hsotg, hs_ep->index,
+ hs_ep->dir_in, 0);
return 0;
+ }
if (hs_req->req.actual < hs_req->req.length) {
dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
@@ -1887,8 +1938,10 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
u32 ints;
+ u32 ctrl;
ints = readl(hsotg->regs + epint_reg);
+ ctrl = readl(hsotg->regs + epctl_reg);
/* Clear endpoint interrupts */
writel(ints, hsotg->regs + epint_reg);
@@ -1897,6 +1950,14 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
__func__, idx, dir_in ? "in" : "out", ints);
if (ints & DxEPINT_XferCompl) {
+ if (hs_ep->isochronous && hs_ep->interval == 1) {
+ if (ctrl & DxEPCTL_EOFrNum)
+ ctrl |= DxEPCTL_SetEvenFr;
+ else
+ ctrl |= DxEPCTL_SetOddFr;
+ writel(ctrl, hsotg->regs + epctl_reg);
+ }
+
dev_dbg(hsotg->dev,
"%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
__func__, readl(hsotg->regs + epctl_reg),
@@ -1963,7 +2024,7 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
if (ints & DxEPINT_Back2BackSetup)
dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
- if (dir_in) {
+ if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
if (ints & DIEPMSK_INTknTXFEmpMsk) {
dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
@@ -2092,12 +2153,14 @@ static void kill_all_requests(struct s3c_hsotg *hsotg,
}
#define call_gadget(_hs, _entry) \
+do { \
if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
(_hs)->driver && (_hs)->driver->_entry) { \
spin_unlock(&_hs->lock); \
(_hs)->driver->_entry(&(_hs)->gadget); \
spin_lock(&_hs->lock); \
- }
+ } \
+} while (0)
/**
* s3c_hsotg_disconnect - disconnect service
@@ -2241,15 +2304,19 @@ static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
GAHBCFG_HBstLen_Incr4,
hsotg->regs + GAHBCFG);
else
- writel(GAHBCFG_GlblIntrEn, hsotg->regs + GAHBCFG);
+ writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NPTxFEmpLvl |
+ GAHBCFG_PTxFEmpLvl) : 0) |
+ GAHBCFG_GlblIntrEn,
+ hsotg->regs + GAHBCFG);
/*
- * Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
- * up being flooded with interrupts if the host is polling the
- * endpoint to try and read data.
+ * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
+ * when we have no data to transfer. Otherwise we get being flooded by
+ * interrupts.
*/
- writel(((hsotg->dedicated_fifos) ? DIEPMSK_TxFIFOEmpty : 0) |
+ writel(((hsotg->dedicated_fifos) ? DIEPMSK_TxFIFOEmpty |
+ DIEPMSK_INTknTXFEmpMsk : 0) |
DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk |
DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
DIEPMSK_INTknEPMisMsk,
@@ -2378,10 +2445,14 @@ irq_retry:
if (gintsts & (GINTSTS_OEPInt | GINTSTS_IEPInt)) {
u32 daint = readl(hsotg->regs + DAINT);
- u32 daint_out = daint >> DAINT_OutEP_SHIFT;
- u32 daint_in = daint & ~(daint_out << DAINT_OutEP_SHIFT);
+ u32 daintmsk = readl(hsotg->regs + DAINTMSK);
+ u32 daint_out, daint_in;
int ep;
+ daint &= daintmsk;
+ daint_out = daint >> DAINT_OutEP_SHIFT;
+ daint_in = daint & ~(daint_out << DAINT_OutEP_SHIFT);
+
dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
@@ -2577,16 +2648,25 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
epctrl |= DxEPCTL_SNAK;
/* update the endpoint state */
- hs_ep->ep.maxpacket = mps;
+ s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps);
/* default, set to non-periodic */
+ hs_ep->isochronous = 0;
hs_ep->periodic = 0;
+ hs_ep->halted = 0;
+ hs_ep->interval = desc->bInterval;
+
+ if (hs_ep->interval > 1 && hs_ep->mc > 1)
+ dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
- dev_err(hsotg->dev, "no current ISOC support\n");
- ret = -EINVAL;
- goto out;
+ epctrl |= DxEPCTL_EPType_Iso;
+ epctrl |= DxEPCTL_SetEvenFr;
+ hs_ep->isochronous = 1;
+ if (dir_in)
+ hs_ep->periodic = 1;
+ break;
case USB_ENDPOINT_XFER_BULK:
epctrl |= DxEPCTL_EPType_Bulk;
@@ -2634,7 +2714,6 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
/* enable the endpoint interrupt */
s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
-out:
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}
@@ -2776,6 +2855,8 @@ static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
writel(epctl, hs->regs + epreg);
+ hs_ep->halted = value;
+
return 0;
}
@@ -2903,7 +2984,7 @@ static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
int ret;
if (!hsotg) {
- printk(KERN_ERR "%s: called with no device\n", __func__);
+ pr_err("%s: called with no device\n", __func__);
return -ENODEV;
}
@@ -3066,7 +3147,7 @@ static void s3c_hsotg_initep(struct s3c_hsotg *hsotg,
hs_ep->parent = hsotg;
hs_ep->ep.name = hs_ep->name;
- hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
+ hs_ep->ep.maxpacket = epnum ? 1024 : EP0_MPS_LIMIT;
hs_ep->ep.ops = &s3c_hsotg_ep_ops;
/*
@@ -3200,7 +3281,7 @@ static int state_show(struct seq_file *seq, void *v)
readl(regs + GNPTXSTS),
readl(regs + GRXSTSR));
- seq_printf(seq, "\nEndpoint status:\n");
+ seq_puts(seq, "\nEndpoint status:\n");
for (idx = 0; idx < 15; idx++) {
u32 in, out;
@@ -3217,7 +3298,7 @@ static int state_show(struct seq_file *seq, void *v)
seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
in, out);
- seq_printf(seq, "\n");
+ seq_puts(seq, "\n");
}
return 0;
@@ -3251,7 +3332,7 @@ static int fifo_show(struct seq_file *seq, void *v)
u32 val;
int idx;
- seq_printf(seq, "Non-periodic FIFOs:\n");
+ seq_puts(seq, "Non-periodic FIFOs:\n");
seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
val = readl(regs + GNPTXFSIZ);
@@ -3259,7 +3340,7 @@ static int fifo_show(struct seq_file *seq, void *v)
val >> GNPTXFSIZ_NPTxFDep_SHIFT,
val & GNPTXFSIZ_NPTxFStAddr_MASK);
- seq_printf(seq, "\nPeriodic TXFIFOs:\n");
+ seq_puts(seq, "\nPeriodic TXFIFOs:\n");
for (idx = 1; idx <= 15; idx++) {
val = readl(regs + DPTXFSIZn(idx));
@@ -3330,7 +3411,7 @@ static int ep_show(struct seq_file *seq, void *v)
readl(regs + DIEPTSIZ(index)),
readl(regs + DOEPTSIZ(index)));
- seq_printf(seq, "\n");
+ seq_puts(seq, "\n");
seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
seq_printf(seq, "total_data=%ld\n", ep->total_data);
@@ -3341,7 +3422,7 @@ static int ep_show(struct seq_file *seq, void *v)
list_for_each_entry(req, &ep->queue, queue) {
if (--show_limit < 0) {
- seq_printf(seq, "not showing more requests...\n");
+ seq_puts(seq, "not showing more requests...\n");
break;
}
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 08a1a3210a2..ec20a1f50c2 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -23,242 +23,17 @@
* The valid range of num_buffers is: num >= 2 && num <= 4.
*/
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/usb/composite.h>
-#include <linux/usb/storage.h>
-#include <scsi/scsi.h>
-#include <asm/unaligned.h>
-
-
-/*
- * Thanks to NetChip Technologies for donating this product ID.
- *
- * DO NOT REUSE THESE IDs with any other driver!! Ever!!
- * Instead: allocate your own, using normal USB-IF procedures.
- */
-#define FSG_VENDOR_ID 0x0525 /* NetChip */
-#define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
-
-
-/*-------------------------------------------------------------------------*/
-
-
-#ifndef DEBUG
-#undef VERBOSE_DEBUG
-#undef DUMP_MSGS
-#endif /* !DEBUG */
-
-#ifdef VERBOSE_DEBUG
-#define VLDBG LDBG
-#else
-#define VLDBG(lun, fmt, args...) do { } while (0)
-#endif /* VERBOSE_DEBUG */
-
-#define LDBG(lun, fmt, args...) dev_dbg (&(lun)->dev, fmt, ## args)
-#define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args)
-#define LWARN(lun, fmt, args...) dev_warn(&(lun)->dev, fmt, ## args)
-#define LINFO(lun, fmt, args...) dev_info(&(lun)->dev, fmt, ## args)
-
-
-#ifdef DUMP_MSGS
-
-# define dump_msg(fsg, /* const char * */ label, \
- /* const u8 * */ buf, /* unsigned */ length) do { \
- if (length < 512) { \
- DBG(fsg, "%s, length %u:\n", label, length); \
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \
- 16, 1, buf, length, 0); \
- } \
-} while (0)
-
-# define dump_cdb(fsg) do { } while (0)
-
-#else
-
-# define dump_msg(fsg, /* const char * */ label, \
- /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
-
-# ifdef VERBOSE_DEBUG
-
-# define dump_cdb(fsg) \
- print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \
- 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \
-
-# else
-
-# define dump_cdb(fsg) do { } while (0)
-
-# endif /* VERBOSE_DEBUG */
-
-#endif /* DUMP_MSGS */
-
-/*-------------------------------------------------------------------------*/
-
-/* Length of a SCSI Command Data Block */
-#define MAX_COMMAND_SIZE 16
-
-/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
-#define SS_NO_SENSE 0
-#define SS_COMMUNICATION_FAILURE 0x040800
-#define SS_INVALID_COMMAND 0x052000
-#define SS_INVALID_FIELD_IN_CDB 0x052400
-#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
-#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
-#define SS_MEDIUM_NOT_PRESENT 0x023a00
-#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
-#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
-#define SS_RESET_OCCURRED 0x062900
-#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
-#define SS_UNRECOVERED_READ_ERROR 0x031100
-#define SS_WRITE_ERROR 0x030c02
-#define SS_WRITE_PROTECTED 0x072700
-
-#define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */
-#define ASC(x) ((u8) ((x) >> 8))
-#define ASCQ(x) ((u8) (x))
-
-
-/*-------------------------------------------------------------------------*/
-
-
-struct fsg_lun {
- struct file *filp;
- loff_t file_length;
- loff_t num_sectors;
-
- unsigned int initially_ro:1;
- unsigned int ro:1;
- unsigned int removable:1;
- unsigned int cdrom:1;
- unsigned int prevent_medium_removal:1;
- unsigned int registered:1;
- unsigned int info_valid:1;
- unsigned int nofua:1;
-
- u32 sense_data;
- u32 sense_data_info;
- u32 unit_attention_data;
-
- unsigned int blkbits; /* Bits of logical block size of bound block device */
- unsigned int blksize; /* logical block size of bound block device */
- struct device dev;
-};
-
-static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
-{
- return curlun->filp != NULL;
-}
-
-static inline struct fsg_lun *fsg_lun_from_dev(struct device *dev)
-{
- return container_of(dev, struct fsg_lun, dev);
-}
-
-
-/* Big enough to hold our biggest descriptor */
-#define EP0_BUFSIZE 256
-#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
-
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-
-static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
-module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO);
-MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers");
-
-#else
-
-/*
- * Number of buffers we will use.
- * 2 is usually enough for good buffering pipeline
- */
-#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
-
-#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-
-/* check if fsg_num_buffers is within a valid range */
-static inline int fsg_num_buffers_validate(void)
-{
- if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
- return 0;
- pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
- fsg_num_buffers, 2 ,4);
- return -EINVAL;
-}
-
-/* Default size of buffer length. */
-#define FSG_BUFLEN ((u32)16384)
-
-/* Maximal number of LUNs supported in mass storage function */
-#define FSG_MAX_LUNS 8
-
-enum fsg_buffer_state {
- BUF_STATE_EMPTY = 0,
- BUF_STATE_FULL,
- BUF_STATE_BUSY
-};
-
-struct fsg_buffhd {
- void *buf;
- enum fsg_buffer_state state;
- struct fsg_buffhd *next;
-
- /*
- * The NetChip 2280 is faster, and handles some protocol faults
- * better, if we don't submit any short bulk-out read requests.
- * So we will record the intended request length here.
- */
- unsigned int bulk_out_intended_length;
-
- struct usb_request *inreq;
- int inreq_busy;
- struct usb_request *outreq;
- int outreq_busy;
-};
-
-enum fsg_state {
- /* This one isn't used anywhere */
- FSG_STATE_COMMAND_PHASE = -10,
- FSG_STATE_DATA_PHASE,
- FSG_STATE_STATUS_PHASE,
-
- FSG_STATE_IDLE = 0,
- FSG_STATE_ABORT_BULK_OUT,
- FSG_STATE_RESET,
- FSG_STATE_INTERFACE_CHANGE,
- FSG_STATE_CONFIG_CHANGE,
- FSG_STATE_DISCONNECT,
- FSG_STATE_EXIT,
- FSG_STATE_TERMINATED
-};
-
-enum data_direction {
- DATA_DIR_UNKNOWN = 0,
- DATA_DIR_FROM_HOST,
- DATA_DIR_TO_HOST,
- DATA_DIR_NONE
-};
-
-
-/*-------------------------------------------------------------------------*/
-
-
-static inline u32 get_unaligned_be24(u8 *buf)
-{
- return 0xffffff & (u32) get_unaligned_be32(buf - 1);
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-
-enum {
- FSG_STRING_INTERFACE
-};
-
+#include "storage_common.h"
/* There is only one interface. */
-static struct usb_interface_descriptor
-fsg_intf_desc = {
+struct usb_interface_descriptor fsg_intf_desc = {
.bLength = sizeof fsg_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
@@ -268,14 +43,14 @@ fsg_intf_desc = {
.bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */
.iInterface = FSG_STRING_INTERFACE,
};
+EXPORT_SYMBOL(fsg_intf_desc);
/*
* Three full-speed endpoint descriptors: bulk-in, bulk-out, and
* interrupt-in.
*/
-static struct usb_endpoint_descriptor
-fsg_fs_bulk_in_desc = {
+struct usb_endpoint_descriptor fsg_fs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -283,9 +58,9 @@ fsg_fs_bulk_in_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
/* wMaxPacketSize set by autoconfiguration */
};
+EXPORT_SYMBOL(fsg_fs_bulk_in_desc);
-static struct usb_endpoint_descriptor
-fsg_fs_bulk_out_desc = {
+struct usb_endpoint_descriptor fsg_fs_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -293,13 +68,15 @@ fsg_fs_bulk_out_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
/* wMaxPacketSize set by autoconfiguration */
};
+EXPORT_SYMBOL(fsg_fs_bulk_out_desc);
-static struct usb_descriptor_header *fsg_fs_function[] = {
+struct usb_descriptor_header *fsg_fs_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
(struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
NULL,
};
+EXPORT_SYMBOL(fsg_fs_function);
/*
@@ -310,8 +87,7 @@ static struct usb_descriptor_header *fsg_fs_function[] = {
* and a "device qualifier" ... plus more construction options
* for the configuration descriptor.
*/
-static struct usb_endpoint_descriptor
-fsg_hs_bulk_in_desc = {
+struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -319,9 +95,9 @@ fsg_hs_bulk_in_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
+EXPORT_SYMBOL(fsg_hs_bulk_in_desc);
-static struct usb_endpoint_descriptor
-fsg_hs_bulk_out_desc = {
+struct usb_endpoint_descriptor fsg_hs_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -330,17 +106,18 @@ fsg_hs_bulk_out_desc = {
.wMaxPacketSize = cpu_to_le16(512),
.bInterval = 1, /* NAK every 1 uframe */
};
+EXPORT_SYMBOL(fsg_hs_bulk_out_desc);
-static struct usb_descriptor_header *fsg_hs_function[] = {
+struct usb_descriptor_header *fsg_hs_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
(struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
NULL,
};
+EXPORT_SYMBOL(fsg_hs_function);
-static struct usb_endpoint_descriptor
-fsg_ss_bulk_in_desc = {
+struct usb_endpoint_descriptor fsg_ss_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -348,16 +125,17 @@ fsg_ss_bulk_in_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
+EXPORT_SYMBOL(fsg_ss_bulk_in_desc);
-static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = {
+struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = {
.bLength = sizeof(fsg_ss_bulk_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/*.bMaxBurst = DYNAMIC, */
};
+EXPORT_SYMBOL(fsg_ss_bulk_in_comp_desc);
-static struct usb_endpoint_descriptor
-fsg_ss_bulk_out_desc = {
+struct usb_endpoint_descriptor fsg_ss_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -365,15 +143,17 @@ fsg_ss_bulk_out_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
+EXPORT_SYMBOL(fsg_ss_bulk_out_desc);
-static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
+struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
.bLength = sizeof(fsg_ss_bulk_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/*.bMaxBurst = DYNAMIC, */
};
+EXPORT_SYMBOL(fsg_ss_bulk_out_comp_desc);
-static struct usb_descriptor_header *fsg_ss_function[] = {
+struct usb_descriptor_header *fsg_ss_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_in_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc,
@@ -381,17 +161,7 @@ static struct usb_descriptor_header *fsg_ss_function[] = {
(struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc,
NULL,
};
-
-/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
-static struct usb_string fsg_strings[] = {
- {FSG_STRING_INTERFACE, fsg_string_interface},
- {}
-};
-
-static struct usb_gadget_strings fsg_stringtab = {
- .language = 0x0409, /* en-us */
- .strings = fsg_strings,
-};
+EXPORT_SYMBOL(fsg_ss_function);
/*-------------------------------------------------------------------------*/
@@ -401,7 +171,7 @@ static struct usb_gadget_strings fsg_stringtab = {
* the caller must own fsg->filesem for writing.
*/
-static void fsg_lun_close(struct fsg_lun *curlun)
+void fsg_lun_close(struct fsg_lun *curlun)
{
if (curlun->filp) {
LDBG(curlun, "close backing file\n");
@@ -409,9 +179,9 @@ static void fsg_lun_close(struct fsg_lun *curlun)
curlun->filp = NULL;
}
}
+EXPORT_SYMBOL(fsg_lun_close);
-
-static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
+int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
{
int ro;
struct file *filp = NULL;
@@ -508,6 +278,7 @@ out:
fput(filp);
return rc;
}
+EXPORT_SYMBOL(fsg_lun_open);
/*-------------------------------------------------------------------------*/
@@ -516,7 +287,7 @@ out:
* Sync the file data, don't bother with the metadata.
* This code was copied from fs/buffer.c:sys_fdatasync().
*/
-static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
+int fsg_lun_fsync_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
@@ -524,8 +295,9 @@ static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
return 0;
return vfs_fsync(filp, 1);
}
+EXPORT_SYMBOL(fsg_lun_fsync_sub);
-static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
/* Convert to Minutes-Seconds-Frames */
@@ -542,34 +314,28 @@ static void store_cdrom_address(u8 *dest, int msf, u32 addr)
put_unaligned_be32(addr, dest);
}
}
-
+EXPORT_SYMBOL(store_cdrom_address);
/*-------------------------------------------------------------------------*/
-static ssize_t ro_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ssize_t fsg_show_ro(struct fsg_lun *curlun, char *buf)
{
- struct fsg_lun *curlun = fsg_lun_from_dev(dev);
-
return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
? curlun->ro
: curlun->initially_ro);
}
+EXPORT_SYMBOL(fsg_show_ro);
-static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ssize_t fsg_show_nofua(struct fsg_lun *curlun, char *buf)
{
- struct fsg_lun *curlun = fsg_lun_from_dev(dev);
-
return sprintf(buf, "%u\n", curlun->nofua);
}
+EXPORT_SYMBOL(fsg_show_nofua);
-static ssize_t file_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ char *buf)
{
- struct fsg_lun *curlun = fsg_lun_from_dev(dev);
- struct rw_semaphore *filesem = dev_get_drvdata(dev);
char *p;
ssize_t rc;
@@ -591,17 +357,44 @@ static ssize_t file_show(struct device *dev, struct device_attribute *attr,
up_read(filesem);
return rc;
}
+EXPORT_SYMBOL(fsg_show_file);
+ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf)
+{
+ return sprintf(buf, "%u\n", curlun->cdrom);
+}
+EXPORT_SYMBOL(fsg_show_cdrom);
-static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf)
+{
+ return sprintf(buf, "%u\n", curlun->removable);
+}
+EXPORT_SYMBOL(fsg_show_removable);
+
+/*
+ * The caller must hold fsg->filesem for reading when calling this function.
+ */
+static ssize_t _fsg_store_ro(struct fsg_lun *curlun, bool ro)
+{
+ if (fsg_lun_is_open(curlun)) {
+ LDBG(curlun, "read-only status change prevented\n");
+ return -EBUSY;
+ }
+
+ curlun->ro = ro;
+ curlun->initially_ro = ro;
+ LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+
+ return 0;
+}
+
+ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count)
{
ssize_t rc;
- struct fsg_lun *curlun = fsg_lun_from_dev(dev);
- struct rw_semaphore *filesem = dev_get_drvdata(dev);
- unsigned ro;
+ bool ro;
- rc = kstrtouint(buf, 2, &ro);
+ rc = strtobool(buf, &ro);
if (rc)
return rc;
@@ -610,27 +403,21 @@ static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
* backing file is closed.
*/
down_read(filesem);
- if (fsg_lun_is_open(curlun)) {
- LDBG(curlun, "read-only status change prevented\n");
- rc = -EBUSY;
- } else {
- curlun->ro = ro;
- curlun->initially_ro = ro;
- LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+ rc = _fsg_store_ro(curlun, ro);
+ if (!rc)
rc = count;
- }
up_read(filesem);
+
return rc;
}
+EXPORT_SYMBOL(fsg_store_ro);
-static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count)
{
- struct fsg_lun *curlun = fsg_lun_from_dev(dev);
- unsigned nofua;
+ bool nofua;
int ret;
- ret = kstrtouint(buf, 2, &nofua);
+ ret = strtobool(buf, &nofua);
if (ret)
return ret;
@@ -642,12 +429,11 @@ static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
return count;
}
+EXPORT_SYMBOL(fsg_store_nofua);
-static ssize_t file_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ssize_t fsg_store_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count)
{
- struct fsg_lun *curlun = fsg_lun_from_dev(dev);
- struct rw_semaphore *filesem = dev_get_drvdata(dev);
int rc = 0;
if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
@@ -674,3 +460,45 @@ static ssize_t file_store(struct device *dev, struct device_attribute *attr,
up_write(filesem);
return (rc < 0 ? rc : count);
}
+EXPORT_SYMBOL(fsg_store_file);
+
+ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count)
+{
+ bool cdrom;
+ int ret;
+
+ ret = strtobool(buf, &cdrom);
+ if (ret)
+ return ret;
+
+ down_read(filesem);
+ ret = cdrom ? _fsg_store_ro(curlun, true) : 0;
+
+ if (!ret) {
+ curlun->cdrom = cdrom;
+ ret = count;
+ }
+ up_read(filesem);
+
+ return ret;
+}
+EXPORT_SYMBOL(fsg_store_cdrom);
+
+ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf,
+ size_t count)
+{
+ bool removable;
+ int ret;
+
+ ret = strtobool(buf, &removable);
+ if (ret)
+ return ret;
+
+ curlun->removable = removable;
+
+ return count;
+}
+EXPORT_SYMBOL(fsg_store_removable);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/storage_common.h b/drivers/usb/gadget/storage_common.h
new file mode 100644
index 00000000000..c74c2fdbd56
--- /dev/null
+++ b/drivers/usb/gadget/storage_common.h
@@ -0,0 +1,229 @@
+#ifndef USB_STORAGE_COMMON_H
+#define USB_STORAGE_COMMON_H
+
+#include <linux/device.h>
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+#ifndef DEBUG
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* !DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG LDBG
+#else
+#define VLDBG(lun, fmt, args...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define _LMSG(func, lun, fmt, args...) \
+ do { \
+ if ((lun)->name_pfx && *(lun)->name_pfx) \
+ func("%s/%s: " fmt, *(lun)->name_pfx, \
+ (lun)->name, ## args); \
+ else \
+ func("%s: " fmt, (lun)->name, ## args); \
+ } while (0)
+
+#define LDBG(lun, fmt, args...) _LMSG(pr_debug, lun, fmt, ## args)
+#define LERROR(lun, fmt, args...) _LMSG(pr_err, lun, fmt, ## args)
+#define LWARN(lun, fmt, args...) _LMSG(pr_warn, lun, fmt, ## args)
+#define LINFO(lun, fmt, args...) _LMSG(pr_info, lun, fmt, ## args)
+
+
+#ifdef DUMP_MSGS
+
+# define dump_msg(fsg, /* const char * */ label, \
+ /* const u8 * */ buf, /* unsigned */ length) \
+do { \
+ if (length < 512) { \
+ DBG(fsg, "%s, length %u:\n", label, length); \
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, buf, length, 0); \
+ } \
+} while (0)
+
+# define dump_cdb(fsg) do { } while (0)
+
+#else
+
+# define dump_msg(fsg, /* const char * */ label, \
+ /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
+
+# ifdef VERBOSE_DEBUG
+
+# define dump_cdb(fsg) \
+ print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \
+ 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \
+
+# else
+
+# define dump_cdb(fsg) do { } while (0)
+
+# endif /* VERBOSE_DEBUG */
+
+#endif /* DUMP_MSGS */
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE 16
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE 0
+#define SS_COMMUNICATION_FAILURE 0x040800
+#define SS_INVALID_COMMAND 0x052000
+#define SS_INVALID_FIELD_IN_CDB 0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
+#define SS_MEDIUM_NOT_PRESENT 0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
+#define SS_RESET_OCCURRED 0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
+#define SS_UNRECOVERED_READ_ERROR 0x031100
+#define SS_WRITE_ERROR 0x030c02
+#define SS_WRITE_PROTECTED 0x072700
+
+#define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */
+#define ASC(x) ((u8) ((x) >> 8))
+#define ASCQ(x) ((u8) (x))
+
+struct fsg_lun {
+ struct file *filp;
+ loff_t file_length;
+ loff_t num_sectors;
+
+ unsigned int initially_ro:1;
+ unsigned int ro:1;
+ unsigned int removable:1;
+ unsigned int cdrom:1;
+ unsigned int prevent_medium_removal:1;
+ unsigned int registered:1;
+ unsigned int info_valid:1;
+ unsigned int nofua:1;
+
+ u32 sense_data;
+ u32 sense_data_info;
+ u32 unit_attention_data;
+
+ unsigned int blkbits; /* Bits of logical block size
+ of bound block device */
+ unsigned int blksize; /* logical block size of bound block device */
+ struct device dev;
+ const char *name; /* "lun.name" */
+ const char **name_pfx; /* "function.name" */
+};
+
+static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
+{
+ return curlun->filp != NULL;
+}
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE 256
+#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
+
+/* Default size of buffer length. */
+#define FSG_BUFLEN ((u32)16384)
+
+/* Maximal number of LUNs supported in mass storage function */
+#define FSG_MAX_LUNS 8
+
+enum fsg_buffer_state {
+ BUF_STATE_EMPTY = 0,
+ BUF_STATE_FULL,
+ BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+ void *buf;
+ enum fsg_buffer_state state;
+ struct fsg_buffhd *next;
+
+ /*
+ * The NetChip 2280 is faster, and handles some protocol faults
+ * better, if we don't submit any short bulk-out read requests.
+ * So we will record the intended request length here.
+ */
+ unsigned int bulk_out_intended_length;
+
+ struct usb_request *inreq;
+ int inreq_busy;
+ struct usb_request *outreq;
+ int outreq_busy;
+};
+
+enum fsg_state {
+ /* This one isn't used anywhere */
+ FSG_STATE_COMMAND_PHASE = -10,
+ FSG_STATE_DATA_PHASE,
+ FSG_STATE_STATUS_PHASE,
+
+ FSG_STATE_IDLE = 0,
+ FSG_STATE_ABORT_BULK_OUT,
+ FSG_STATE_RESET,
+ FSG_STATE_INTERFACE_CHANGE,
+ FSG_STATE_CONFIG_CHANGE,
+ FSG_STATE_DISCONNECT,
+ FSG_STATE_EXIT,
+ FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+ DATA_DIR_UNKNOWN = 0,
+ DATA_DIR_FROM_HOST,
+ DATA_DIR_TO_HOST,
+ DATA_DIR_NONE
+};
+
+static inline u32 get_unaligned_be24(u8 *buf)
+{
+ return 0xffffff & (u32) get_unaligned_be32(buf - 1);
+}
+
+static inline struct fsg_lun *fsg_lun_from_dev(struct device *dev)
+{
+ return container_of(dev, struct fsg_lun, dev);
+}
+
+enum {
+ FSG_STRING_INTERFACE
+};
+
+extern struct usb_interface_descriptor fsg_intf_desc;
+
+extern struct usb_endpoint_descriptor fsg_fs_bulk_in_desc;
+extern struct usb_endpoint_descriptor fsg_fs_bulk_out_desc;
+extern struct usb_descriptor_header *fsg_fs_function[];
+
+extern struct usb_endpoint_descriptor fsg_hs_bulk_in_desc;
+extern struct usb_endpoint_descriptor fsg_hs_bulk_out_desc;
+extern struct usb_descriptor_header *fsg_hs_function[];
+
+extern struct usb_endpoint_descriptor fsg_ss_bulk_in_desc;
+extern struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc;
+extern struct usb_endpoint_descriptor fsg_ss_bulk_out_desc;
+extern struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc;
+extern struct usb_descriptor_header *fsg_ss_function[];
+
+void fsg_lun_close(struct fsg_lun *curlun);
+int fsg_lun_open(struct fsg_lun *curlun, const char *filename);
+int fsg_lun_fsync_sub(struct fsg_lun *curlun);
+void store_cdrom_address(u8 *dest, int msf, u32 addr);
+ssize_t fsg_show_ro(struct fsg_lun *curlun, char *buf);
+ssize_t fsg_show_nofua(struct fsg_lun *curlun, char *buf);
+ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ char *buf);
+ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf);
+ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf);
+ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count);
+ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count);
+ssize_t fsg_store_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count);
+ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count);
+ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf,
+ size_t count);
+
+#endif /* USB_STORAGE_COMMON_H */
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 0ff33396eef..6c3d7950d2a 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -472,7 +472,7 @@ static int usbg_bot_setup(struct usb_function *f,
bot_enqueue_cmd_cbw(fu);
return 0;
break;
- };
+ }
return -ENOTSUPP;
}
@@ -617,7 +617,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
default:
BUG();
- };
+ }
return;
cleanup:
@@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void)
}
fabric->tf_ops = usbg_ops;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
printk(KERN_ERR "target_fabric_configfs_register() failed"
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 59891b1c48f..27768a7d986 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -356,7 +356,8 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
err1:
- dev_err(&udc->dev, "failed to start %s: %d\n",
+ if (ret != -EISNAM)
+ dev_err(&udc->dev, "failed to start %s: %d\n",
udc->driver->function, ret);
udc->driver = NULL;
udc->dev.driver = NULL;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 0deb9d6cde2..0dd07ae1555 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -95,6 +95,18 @@ unsigned autoresume = DEFAULT_AUTORESUME;
module_param(autoresume, uint, S_IRUGO);
MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
+/* Maximum Autoresume time */
+unsigned max_autoresume;
+module_param(max_autoresume, uint, S_IRUGO);
+MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup");
+
+/* Interval between two remote wakeups */
+unsigned autoresume_interval_ms;
+module_param(autoresume_interval_ms, uint, S_IRUGO);
+MODULE_PARM_DESC(autoresume_interval_ms,
+ "milliseconds to increase successive wakeup delays");
+
+static unsigned autoresume_step_ms;
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
@@ -183,8 +195,16 @@ static void zero_suspend(struct usb_composite_dev *cdev)
return;
if (autoresume) {
- mod_timer(&autoresume_timer, jiffies + (HZ * autoresume));
- DBG(cdev, "suspend, wakeup in %d seconds\n", autoresume);
+ if (max_autoresume &&
+ (autoresume_step_ms > max_autoresume * 1000))
+ autoresume_step_ms = autoresume * 1000;
+
+ mod_timer(&autoresume_timer, jiffies +
+ msecs_to_jiffies(autoresume_step_ms));
+ DBG(cdev, "suspend, wakeup in %d milliseconds\n",
+ autoresume_step_ms);
+
+ autoresume_step_ms += autoresume_interval_ms;
} else
DBG(cdev, "%s\n", __func__);
}
@@ -316,6 +336,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
if (autoresume) {
sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ autoresume_step_ms = autoresume * 1000;
}
/* support OTG systems */
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index b3f20d7f15d..a9707da7da0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -54,7 +54,7 @@ config USB_EHCI_HCD
config USB_EHCI_ROOT_HUB_TT
bool "Root Hub Transaction Translators"
- depends on USB_EHCI_HCD || USB_CHIPIDEA_HOST
+ depends on USB_EHCI_HCD
---help---
Some EHCI chips have vendor-specific extensions to integrate
transaction translators, so that no OHCI or UHCI companion
@@ -66,7 +66,7 @@ config USB_EHCI_ROOT_HUB_TT
config USB_EHCI_TT_NEWSCHED
bool "Improved Transaction Translator scheduling"
- depends on USB_EHCI_HCD || USB_CHIPIDEA_HOST
+ depends on USB_EHCI_HCD
default y
---help---
This changes the periodic scheduling code to fill more of the low
@@ -203,12 +203,11 @@ config USB_EHCI_SH
Enables support for the on-chip EHCI controller on the SuperH.
If you use the PCI EHCI controller, this option is not necessary.
-config USB_EHCI_S5P
+config USB_EHCI_EXYNOS
tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
depends on PLAT_S5P || ARCH_EXYNOS
help
- Enable support for the Samsung S5Pxxxx and Exynos3/4/5 SOC's
- on-chip EHCI controller.
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
bool "EHCI support for Marvell PXA/MMP USB controller"
@@ -224,7 +223,7 @@ config USB_EHCI_MV
on-chip EHCI USB controller" for those.
config USB_W90X900_EHCI
- bool "W90X900(W90P910) EHCI support"
+ tristate "W90X900(W90P910) EHCI support"
depends on ARCH_W90X900
---help---
Enables support for the W90X900 USB controller
@@ -367,14 +366,54 @@ config USB_OHCI_HCD
if USB_OHCI_HCD
config USB_OHCI_HCD_OMAP1
- bool "OHCI support for OMAP1/2 chips"
+ tristate "OHCI support for OMAP1/2 chips"
depends on ARCH_OMAP1
default y
---help---
Enables support for the OHCI controller on OMAP1/2 chips.
+config USB_OHCI_HCD_SPEAR
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
+
+config USB_OHCI_HCD_S3C2410
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
+
+config USB_OHCI_HCD_LPC32XX
+ tristate "Support for LPC on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_LPC32XX
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
+
+config USB_OHCI_HCD_PXA27X
+ tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && (PXA27x || PXA3xx)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ PXA27x/PXA3xx chips.
+
+config USB_OHCI_HCD_AT91
+ tristate "Support for Atmel on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
+
config USB_OHCI_HCD_OMAP3
- bool "OHCI support for OMAP3 and later chips"
+ tristate "OHCI support for OMAP3 and later chips"
depends on (ARCH_OMAP3 || ARCH_OMAP4)
default y
---help---
@@ -454,8 +493,8 @@ config USB_OHCI_SH
If you use the PCI OHCI controller, this option is not necessary.
config USB_OHCI_EXYNOS
- boolean "OHCI support for Samsung EXYNOS SoC Series"
- depends on ARCH_EXYNOS
+ tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on PLAT_S5P || ARCH_EXYNOS
help
Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 50b0041c09a..01e879ef365 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -34,10 +34,11 @@ obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o
obj-$(CONFIG_USB_EHCI_HCD_OMAP) += ehci-omap.o
obj-$(CONFIG_USB_EHCI_HCD_ORION) += ehci-orion.o
obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
-obj-$(CONFIG_USB_EHCI_S5P) += ehci-s5p.o
+obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+obj-$(CONFIG_USB_W90X900_EHCI) += ehci-w90x900.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
@@ -46,6 +47,14 @@ obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
obj-$(CONFIG_USB_OHCI_HCD_PCI) += ohci-pci.o
obj-$(CONFIG_USB_OHCI_HCD_PLATFORM) += ohci-platform.o
+obj-$(CONFIG_USB_OHCI_EXYNOS) += ohci-exynos.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP1) += ohci-omap.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP3) += ohci-omap3.o
+obj-$(CONFIG_USB_OHCI_HCD_SPEAR) += ohci-spear.o
+obj-$(CONFIG_USB_OHCI_HCD_AT91) += ohci-at91.o
+obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o
+obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o
+obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index df13d425e9c..205f4a33658 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -227,8 +227,7 @@ static int bcma_hcd_probe(struct bcma_device *dev)
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 3b645ff46f7..284f8417eae 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -30,13 +30,17 @@ static const char hcd_name[] = "ehci-atmel";
static struct hc_driver __read_mostly ehci_atmel_hc_driver;
/* interface and function clocks */
-static struct clk *iclk, *fclk;
+static struct clk *iclk, *fclk, *uclk;
static int clocked;
/*-------------------------------------------------------------------------*/
static void atmel_start_clock(void)
{
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(uclk, 48000000);
+ clk_prepare_enable(uclk);
+ }
clk_prepare_enable(iclk);
clk_prepare_enable(fclk);
clocked = 1;
@@ -46,6 +50,8 @@ static void atmel_stop_clock(void)
{
clk_disable_unprepare(fclk);
clk_disable_unprepare(iclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(uclk);
clocked = 0;
}
@@ -90,10 +96,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail_create_hcd;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
@@ -130,6 +135,14 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
retval = -ENOENT;
goto fail_request_resource;
}
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ uclk = devm_clk_get(&pdev->dev, "usb_clk");
+ if (IS_ERR(uclk)) {
+ dev_err(&pdev->dev, "failed to get uclk\n");
+ retval = PTR_ERR(uclk);
+ goto fail_request_resource;
+ }
+ }
ehci = hcd_to_ehci(hcd);
/* registers start at offset 0x0 */
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index aa5b603f393..4a9c2edbcb2 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -334,6 +334,7 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
+static int debug_bandwidth_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
@@ -347,6 +348,13 @@ static const struct file_operations debug_async_fops = {
.release = debug_close,
.llseek = default_llseek,
};
+static const struct file_operations debug_bandwidth_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_bandwidth_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
@@ -379,7 +387,7 @@ struct debug_buffer {
case QH_LOW_SPEED: tmp = 'l'; break; \
case QH_HIGH_SPEED: tmp = 'h'; break; \
default: tmp = '?'; break; \
- }; tmp; })
+ } tmp; })
static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
{
@@ -525,6 +533,89 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
return strlen(buf->output_buf);
}
+static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
+{
+ struct ehci_hcd *ehci;
+ struct ehci_tt *tt;
+ struct ehci_per_sched *ps;
+ unsigned temp, size;
+ char *next;
+ unsigned i;
+ u8 *bw;
+ u16 *bf;
+ u8 budget[EHCI_BANDWIDTH_SIZE];
+
+ ehci = hcd_to_ehci(bus_to_hcd(buf->bus));
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ spin_lock_irq(&ehci->lock);
+
+ /* Dump the HS bandwidth table */
+ temp = scnprintf(next, size,
+ "HS bandwidth allocation (us per microframe)\n");
+ size -= temp;
+ next += temp;
+ for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+ bw = &ehci->bandwidth[i];
+ temp = scnprintf(next, size,
+ "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+ i, bw[0], bw[1], bw[2], bw[3],
+ bw[4], bw[5], bw[6], bw[7]);
+ size -= temp;
+ next += temp;
+ }
+
+ /* Dump all the FS/LS tables */
+ list_for_each_entry(tt, &ehci->tt_list, tt_list) {
+ temp = scnprintf(next, size,
+ "\nTT %s port %d FS/LS bandwidth allocation (us per frame)\n",
+ dev_name(&tt->usb_tt->hub->dev),
+ tt->tt_port + !!tt->usb_tt->multi);
+ size -= temp;
+ next += temp;
+
+ bf = tt->bandwidth;
+ temp = scnprintf(next, size,
+ " %5u%5u%5u%5u%5u%5u%5u%5u\n",
+ bf[0], bf[1], bf[2], bf[3],
+ bf[4], bf[5], bf[6], bf[7]);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size,
+ "FS/LS budget (us per microframe)\n");
+ size -= temp;
+ next += temp;
+ compute_tt_budget(budget, tt);
+ for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+ bw = &budget[i];
+ temp = scnprintf(next, size,
+ "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+ i, bw[0], bw[1], bw[2], bw[3],
+ bw[4], bw[5], bw[6], bw[7]);
+ size -= temp;
+ next += temp;
+ }
+ list_for_each_entry(ps, &tt->ps_list, ps_list) {
+ temp = scnprintf(next, size,
+ "%s ep %02x: %4u @ %2u.%u+%u mask %04x\n",
+ dev_name(&ps->udev->dev),
+ ps->ep->desc.bEndpointAddress,
+ ps->tt_usecs,
+ ps->bw_phase, ps->phase_uf,
+ ps->bw_period, ps->cs_mask);
+ size -= temp;
+ next += temp;
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+
+ return next - buf->output_buf;
+}
+
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
@@ -571,7 +662,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_QH:
hw = p.qh->hw;
temp = scnprintf (next, size, " qh%d-%04x/%p",
- p.qh->period,
+ p.qh->ps.period,
hc32_to_cpup(ehci,
&hw->hw_info2)
/* uframe masks */
@@ -618,7 +709,8 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
speed_char (scratch),
scratch & 0x007f,
(scratch >> 8) & 0x000f, type,
- p.qh->usecs, p.qh->c_usecs,
+ p.qh->ps.usecs,
+ p.qh->ps.c_usecs,
temp,
0x7ff & (scratch >> 16));
@@ -645,7 +737,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_SITD:
temp = scnprintf (next, size,
" sitd%d-%04x/%p",
- p.sitd->stream->interval,
+ p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
p.sitd);
@@ -918,6 +1010,7 @@ static int debug_close(struct inode *inode, struct file *file)
return 0;
}
+
static int debug_async_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
@@ -925,6 +1018,14 @@ static int debug_async_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
+static int debug_bandwidth_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_bandwidth_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
@@ -957,6 +1058,10 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
&debug_async_fops))
goto file_error;
+ if (!debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
+ &debug_bandwidth_fops))
+ goto file_error;
+
if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
&debug_periodic_fops))
goto file_error;
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-exynos.c
index 7c3de95c705..e97c198e052 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,5 +1,5 @@
/*
- * SAMSUNG S5P USB HOST EHCI Controller
+ * SAMSUNG EXYNOS USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/usb-ehci-s5p.h>
#include <linux/usb/phy.h>
#include <linux/usb/samsung_usb_phy.h>
#include <linux/usb.h>
@@ -29,7 +28,7 @@
#include "ehci.h"
-#define DRIVER_DESC "EHCI s5p driver"
+#define DRIVER_DESC "EHCI EXYNOS driver"
#define EHCI_INSNREG00(base) (base + 0x90)
#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
@@ -40,21 +39,18 @@
(EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
-static const char hcd_name[] = "ehci-s5p";
-static struct hc_driver __read_mostly s5p_ehci_hc_driver;
+static const char hcd_name[] = "ehci-exynos";
+static struct hc_driver __read_mostly exynos_ehci_hc_driver;
-struct s5p_ehci_hcd {
+struct exynos_ehci_hcd {
struct clk *clk;
struct usb_phy *phy;
struct usb_otg *otg;
- struct s5p_ehci_platdata *pdata;
};
-static struct s5p_ehci_platdata empty_platdata;
+#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
-#define to_s5p_ehci(hcd) (struct s5p_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
-
-static void s5p_setup_vbus_gpio(struct platform_device *pdev)
+static void exynos_setup_vbus_gpio(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int err;
@@ -73,10 +69,9 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev)
dev_err(dev, "can't request ehci vbus gpio %d", gpio);
}
-static int s5p_ehci_probe(struct platform_device *pdev)
+static int exynos_ehci_probe(struct platform_device *pdev)
{
- struct s5p_ehci_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s5p_ehci_hcd *s5p_ehci;
+ struct exynos_ehci_hcd *exynos_ehci;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
@@ -89,53 +84,45 @@ static int s5p_ehci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
- s5p_setup_vbus_gpio(pdev);
+ exynos_setup_vbus_gpio(pdev);
- hcd = usb_create_hcd(&s5p_ehci_hc_driver,
+ hcd = usb_create_hcd(&exynos_ehci_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
}
- s5p_ehci = to_s5p_ehci(hcd);
+ exynos_ehci = to_exynos_ehci(hcd);
if (of_device_is_compatible(pdev->dev.of_node,
- "samsung,exynos5440-ehci")) {
- s5p_ehci->pdata = &empty_platdata;
+ "samsung,exynos5440-ehci"))
goto skip_phy;
- }
phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(phy)) {
- /* Fallback to pdata */
- if (!pdata) {
- usb_put_hcd(hcd);
- dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- } else {
- s5p_ehci->pdata = pdata;
- }
+ usb_put_hcd(hcd);
+ dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
} else {
- s5p_ehci->phy = phy;
- s5p_ehci->otg = phy->otg;
+ exynos_ehci->phy = phy;
+ exynos_ehci->otg = phy->otg;
}
skip_phy:
- s5p_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
+ exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
- if (IS_ERR(s5p_ehci->clk)) {
+ if (IS_ERR(exynos_ehci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
- err = PTR_ERR(s5p_ehci->clk);
+ err = PTR_ERR(exynos_ehci->clk);
goto fail_clk;
}
- err = clk_prepare_enable(s5p_ehci->clk);
+ err = clk_prepare_enable(exynos_ehci->clk);
if (err)
goto fail_clk;
@@ -162,13 +149,11 @@ skip_phy:
goto fail_io;
}
- if (s5p_ehci->otg)
- s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self);
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (s5p_ehci->phy)
- usb_phy_init(s5p_ehci->phy);
- else if (s5p_ehci->pdata->phy_init)
- s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
+ if (exynos_ehci->phy)
+ usb_phy_init(exynos_ehci->phy);
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
@@ -187,33 +172,29 @@ skip_phy:
return 0;
fail_add_hcd:
- if (s5p_ehci->phy)
- usb_phy_shutdown(s5p_ehci->phy);
- else if (s5p_ehci->pdata->phy_exit)
- s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
+ if (exynos_ehci->phy)
+ usb_phy_shutdown(exynos_ehci->phy);
fail_io:
- clk_disable_unprepare(s5p_ehci->clk);
+ clk_disable_unprepare(exynos_ehci->clk);
fail_clk:
usb_put_hcd(hcd);
return err;
}
-static int s5p_ehci_remove(struct platform_device *pdev)
+static int exynos_ehci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
usb_remove_hcd(hcd);
- if (s5p_ehci->otg)
- s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self);
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (s5p_ehci->phy)
- usb_phy_shutdown(s5p_ehci->phy);
- else if (s5p_ehci->pdata->phy_exit)
- s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
+ if (exynos_ehci->phy)
+ usb_phy_shutdown(exynos_ehci->phy);
- clk_disable_unprepare(s5p_ehci->clk);
+ clk_disable_unprepare(exynos_ehci->clk);
usb_put_hcd(hcd);
@@ -221,45 +202,39 @@ static int s5p_ehci_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int s5p_ehci_suspend(struct device *dev)
+static int exynos_ehci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd);
- struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
bool do_wakeup = device_may_wakeup(dev);
int rc;
rc = ehci_suspend(hcd, do_wakeup);
- if (s5p_ehci->otg)
- s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self);
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (s5p_ehci->phy)
- usb_phy_shutdown(s5p_ehci->phy);
- else if (s5p_ehci->pdata->phy_exit)
- s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
+ if (exynos_ehci->phy)
+ usb_phy_shutdown(exynos_ehci->phy);
- clk_disable_unprepare(s5p_ehci->clk);
+ clk_disable_unprepare(exynos_ehci->clk);
return rc;
}
-static int s5p_ehci_resume(struct device *dev)
+static int exynos_ehci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd);
- struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
- clk_prepare_enable(s5p_ehci->clk);
+ clk_prepare_enable(exynos_ehci->clk);
- if (s5p_ehci->otg)
- s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self);
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
- if (s5p_ehci->phy)
- usb_phy_init(s5p_ehci->phy);
- else if (s5p_ehci->pdata->phy_init)
- s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
+ if (exynos_ehci->phy)
+ usb_phy_init(exynos_ehci->phy);
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
@@ -268,13 +243,13 @@ static int s5p_ehci_resume(struct device *dev)
return 0;
}
#else
-#define s5p_ehci_suspend NULL
-#define s5p_ehci_resume NULL
+#define exynos_ehci_suspend NULL
+#define exynos_ehci_resume NULL
#endif
-static const struct dev_pm_ops s5p_ehci_pm_ops = {
- .suspend = s5p_ehci_suspend,
- .resume = s5p_ehci_resume,
+static const struct dev_pm_ops exynos_ehci_pm_ops = {
+ .suspend = exynos_ehci_suspend,
+ .resume = exynos_ehci_resume,
};
#ifdef CONFIG_OF
@@ -286,40 +261,40 @@ static const struct of_device_id exynos_ehci_match[] = {
MODULE_DEVICE_TABLE(of, exynos_ehci_match);
#endif
-static struct platform_driver s5p_ehci_driver = {
- .probe = s5p_ehci_probe,
- .remove = s5p_ehci_remove,
+static struct platform_driver exynos_ehci_driver = {
+ .probe = exynos_ehci_probe,
+ .remove = exynos_ehci_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "s5p-ehci",
+ .name = "exynos-ehci",
.owner = THIS_MODULE,
- .pm = &s5p_ehci_pm_ops,
+ .pm = &exynos_ehci_pm_ops,
.of_match_table = of_match_ptr(exynos_ehci_match),
}
};
-static const struct ehci_driver_overrides s5p_overrides __initdata = {
- .extra_priv_size = sizeof(struct s5p_ehci_hcd),
+static const struct ehci_driver_overrides exynos_overrides __initdata = {
+ .extra_priv_size = sizeof(struct exynos_ehci_hcd),
};
-static int __init ehci_s5p_init(void)
+static int __init ehci_exynos_init(void)
{
if (usb_disabled())
return -ENODEV;
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- ehci_init_driver(&s5p_ehci_hc_driver, &s5p_overrides);
- return platform_driver_register(&s5p_ehci_driver);
+ ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
+ return platform_driver_register(&exynos_ehci_driver);
}
-module_init(ehci_s5p_init);
+module_init(ehci_exynos_init);
-static void __exit ehci_s5p_cleanup(void)
+static void __exit ehci_exynos_cleanup(void)
{
- platform_driver_unregister(&s5p_ehci_driver);
+ platform_driver_unregister(&exynos_ehci_driver);
}
-module_exit(ehci_s5p_cleanup);
+module_exit(ehci_exynos_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_ALIAS("platform:s5p-ehci");
+MODULE_ALIAS("platform:exynos-ehci");
MODULE_AUTHOR("Jingoo Han");
MODULE_AUTHOR("Joonyoung Shim");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index f2407b2e8a9..a06d5012201 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -57,7 +57,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
- pdata = (struct fsl_usb2_platform_data *)dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
@@ -664,7 +664,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 83ab51af250..b52a66ce92e 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 86ab9fd9fe9..e8ba4c44223 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -110,6 +110,9 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
#include "ehci.h"
#include "pci-quirks.h"
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+ struct ehci_tt *tt);
+
/*
* The MosChip MCS9990 controller updates its microframe counter
* a little before the frame counter, and occasionally we will read
@@ -484,6 +487,7 @@ static int ehci_init(struct usb_hcd *hcd)
INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
+ INIT_LIST_HEAD(&ehci->tt_list);
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
@@ -956,6 +960,7 @@ rescan:
goto idle_timeout;
/* BUG_ON(!list_empty(&stream->free_list)); */
+ reserve_release_iso_bandwidth(ehci, stream, -1);
kfree(stream);
goto done;
}
@@ -982,6 +987,8 @@ idle_timeout:
if (qh->clearing_tt)
goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
+ if (qh->ps.bw_uperiod)
+ reserve_release_intr_bandwidth(ehci, qh, -1);
qh_destroy(ehci, qh);
break;
}
@@ -1022,7 +1029,6 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* the toggle bit in the QH.
*/
if (qh) {
- usb_settoggle(qh->dev, epnum, is_out, 0);
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
} else {
@@ -1030,6 +1036,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
+ usb_settoggle(qh->ps.udev, epnum, is_out, 0);
qh->exception = 1;
if (eptype == USB_ENDPOINT_XFER_BULK)
start_unlink_async(ehci, qh);
@@ -1048,6 +1055,19 @@ static int ehci_get_frame (struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
+/* Device addition and removal */
+
+static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ spin_lock_irq(&ehci->lock);
+ drop_tt(udev);
+ spin_unlock_irq(&ehci->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_PM
/* suspend/resume, section 4.3 */
@@ -1075,6 +1095,14 @@ int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irq(&ehci->lock);
+ synchronize_irq(hcd->irq);
+
+ /* Check for race with a wakeup request */
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ ehci_resume(hcd, false);
+ return -EBUSY;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(ehci_suspend);
@@ -1158,7 +1186,7 @@ static const struct hc_driver ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -1191,6 +1219,11 @@ static const struct hc_driver ehci_hc_driver = {
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+
+ /*
+ * device support
+ */
+ .free_dev = ehci_remove_device,
};
void ehci_init_driver(struct hc_driver *drv,
@@ -1238,11 +1271,6 @@ MODULE_LICENSE ("GPL");
#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
#endif
-#ifdef CONFIG_USB_W90X900_EHCI
-#include "ehci-w90x900.c"
-#define PLATFORM_DRIVER ehci_hcd_w90x900_driver
-#endif
-
#ifdef CONFIG_USB_OCTEON_EHCI
#include "ehci-octeon.c"
#define PLATFORM_DRIVER ehci_octeon_driver
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 52a77734a22..c0fb6a8ae6a 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -224,11 +224,11 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
hw->hw_next = EHCI_LIST_END(ehci);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
hw->hw_alt_next = EHCI_LIST_END(ehci);
- hw->hw_token &= ~QTD_STS_ACTIVE;
ehci->dummy->hw = hw;
for (i = 0; i < ehci->periodic_size; i++)
- ehci->periodic[i] = ehci->dummy->qh_dma;
+ ehci->periodic[i] = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
} else {
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = EHCI_LIST_END(ehci);
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 0f717dc688b..f341651d6f6 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -42,7 +42,6 @@
static const char hcd_name[] = "ehci-msm";
static struct hc_driver __read_mostly msm_hc_driver;
-static struct usb_phy *phy;
static int ehci_msm_reset(struct usb_hcd *hcd)
{
@@ -70,6 +69,7 @@ static int ehci_msm_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
+ struct usb_phy *phy;
int ret;
dev_dbg(&pdev->dev, "ehci_msm proble\n");
@@ -108,10 +108,14 @@ static int ehci_msm_probe(struct platform_device *pdev)
* powering up VBUS, mapping of registers address space and power
* management.
*/
- phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (pdev->dev.of_node)
+ phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ else
+ phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "unable to find transceiver\n");
- ret = -ENODEV;
+ ret = -EPROBE_DEFER;
goto put_hcd;
}
@@ -121,6 +125,7 @@ static int ehci_msm_probe(struct platform_device *pdev)
goto put_hcd;
}
+ hcd->phy = phy;
device_init_wakeup(&pdev->dev, 1);
/*
* OTG device parent of HCD takes care of putting
@@ -147,7 +152,7 @@ static int ehci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- otg_set_host(phy->otg, NULL);
+ otg_set_host(hcd->phy->otg, NULL);
/* FIXME: need to call usb_remove_hcd() here? */
@@ -186,12 +191,19 @@ static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
.resume = ehci_msm_pm_resume,
};
+static struct of_device_id msm_ehci_dt_match[] = {
+ { .compatible = "qcom,ehci-host", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
+
static struct platform_driver ehci_msm_driver = {
.probe = ehci_msm_probe,
.remove = ehci_msm_remove,
.driver = {
.name = "msm_hsusb_host",
.pm = &ehci_msm_dev_pm_ops,
+ .of_match_table = msm_ehci_dt_match,
},
};
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 35cdbd88bbb..417c10da945 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index 45cc0015841..4c528b2c033 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -116,8 +116,10 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
* We can DMA from anywhere. But the descriptors must be in
* the lower 4GB.
*/
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 78b01fa475b..6fa82d6b766 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -104,7 +104,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
- int ret = -ENODEV;
+ int ret;
int irq;
int i;
struct omap_hcd *omap;
@@ -144,11 +144,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ ret = -ENODEV;
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index d1dfb9db5b4..2ba76730e65 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -180,10 +180,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err1;
if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 854c2ec7b69..3e86bf4371b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -58,8 +58,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- struct pci_dev *p_smbus;
- u8 rev;
u32 temp;
int retval;
@@ -175,22 +173,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* SB600 and old version of SB700 have a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
- if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
- p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_SBX00_SMBUS,
- NULL);
- if (!p_smbus)
- break;
- rev = p_smbus->revision;
- if ((pdev->device == 0x4386) || (rev == 0x3a)
- || (rev == 0x3b)) {
- u8 tmp;
- ehci_info(ehci, "applying AMD SB600/SB700 USB "
- "freeze workaround\n");
- pci_read_config_byte(pdev, 0x53, &tmp);
- pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
- }
- pci_dev_put(p_smbus);
+ if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
+ usb_amd_hang_symptom_quirk()) {
+ u8 tmp;
+ ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
+ pci_read_config_byte(pdev, 0x53, &tmp);
+ pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
break;
case PCI_VENDOR_ID_NETMOS:
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f6b790ca8cf..7f30b7168d5 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -78,7 +78,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct resource *res_mem;
struct usb_ehci_pdata *pdata;
int irq;
- int err = -ENOMEM;
+ int err;
if (usb_disabled())
return -ENODEV;
@@ -89,10 +89,10 @@ static int ehci_platform_probe(struct platform_device *dev)
*/
if (!dev_get_platdata(&dev->dev))
dev->dev.platform_data = &ehci_platform_defaults;
- if (!dev->dev.dma_mask)
- dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- if (!dev->dev.coherent_dma_mask)
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
pdata = dev_get_platdata(&dev->dev);
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 601e208bd78..893b707f000 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
#else
.irq = ehci_irq,
#endif
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 932293fa32d..875d2fcc9e0 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -16,6 +16,8 @@
#include <linux/signal.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -28,7 +30,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index fd983771b02..8188542ba17 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index a7f776a13eb..db05bd8ee9d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -105,9 +105,9 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
is_out = qh->is_out;
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
- if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
+ if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
- usb_settoggle (qh->dev, epnum, is_out, 1);
+ usb_settoggle(qh->ps.udev, epnum, is_out, 1);
}
}
@@ -247,8 +247,6 @@ static int qtd_copy_status (
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
-__releases(ehci->lock)
-__acquires(ehci->lock)
{
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
/* ... update hc-wide periodic stats */
@@ -274,11 +272,8 @@ __acquires(ehci->lock)
urb->actual_length, urb->transfer_buffer_length);
#endif
- /* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
- spin_unlock (&ehci->lock);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
- spin_lock (&ehci->lock);
}
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
@@ -802,26 +797,35 @@ qh_make (
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
- qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ unsigned tmp;
+
+ qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
- qh->start = NO_FRAME;
+ qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
- qh->c_usecs = 0;
+ qh->ps.c_usecs = 0;
qh->gap_uf = 0;
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
+ if (urb->interval > 1 && urb->interval < 8) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
- } else if (qh->period > ehci->periodic_size) {
- qh->period = ehci->periodic_size;
- urb->interval = qh->period << 3;
+ } else if (urb->interval > ehci->periodic_size << 3) {
+ urb->interval = ehci->periodic_size << 3;
}
+ qh->ps.period = urb->interval >> 3;
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
} else {
int think_time;
@@ -831,27 +835,35 @@ qh_make (
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
- qh->c_usecs = qh->usecs + HS_USECS (0);
- qh->usecs = HS_USECS (1);
+ qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
+ qh->ps.usecs = HS_USECS(1);
} else { // SPLIT+DATA, gap, CSPLIT
- qh->usecs += HS_USECS (1);
- qh->c_usecs = HS_USECS (0);
+ qh->ps.usecs += HS_USECS(1);
+ qh->ps.c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
- qh->tt_usecs = NS_TO_US (think_time +
+ qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
- qh->period = urb->interval;
- if (qh->period > ehci->periodic_size) {
- qh->period = ehci->periodic_size;
- urb->interval = qh->period;
- }
+ if (urb->interval > ehci->periodic_size)
+ urb->interval = ehci->periodic_size;
+ qh->ps.period = urb->interval;
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+ urb->ep->desc.bInterval);
+ tmp = rounddown_pow_of_two(tmp);
+
+ /* Allow urb->interval to override */
+ qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_uperiod = qh->ps.bw_period << 3;
}
}
/* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
+ qh->ps.udev = urb->dev;
+ qh->ps.ep = urb->ep;
/* using TT? */
switch (urb->dev->speed) {
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 85dd24ed97a..e113fd73aea 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -103,83 +103,210 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
*hw_p = *shadow_next_periodic(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
else
- *hw_p = ehci->dummy->qh_dma;
+ *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
}
-/* how many of the uframe's 125 usecs are allocated? */
-static unsigned short
-periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
+/*-------------------------------------------------------------------------*/
+
+/* Bandwidth and TT management */
+
+/* Find the TT data structure for this device; create it if necessary */
+static struct ehci_tt *find_tt(struct usb_device *udev)
{
- __hc32 *hw_p = &ehci->periodic [frame];
- union ehci_shadow *q = &ehci->pshadow [frame];
- unsigned usecs = 0;
- struct ehci_qh_hw *hw;
-
- while (q->ptr) {
- switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
- case Q_TYPE_QH:
- hw = q->qh->hw;
- /* is it in the S-mask? */
- if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
- usecs += q->qh->usecs;
- /* ... or C-mask? */
- if (hw->hw_info2 & cpu_to_hc32(ehci,
- 1 << (8 + uframe)))
- usecs += q->qh->c_usecs;
- hw_p = &hw->hw_next;
- q = &q->qh->qh_next;
- break;
- // case Q_TYPE_FSTN:
- default:
- /* for "save place" FSTNs, count the relevant INTR
- * bandwidth from the previous frame
- */
- if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
- ehci_dbg (ehci, "ignoring FSTN cost ...\n");
- }
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- break;
- case Q_TYPE_ITD:
- if (q->itd->hw_transaction[uframe])
- usecs += q->itd->stream->usecs;
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- break;
- case Q_TYPE_SITD:
- /* is it in the S-mask? (count SPLIT, DATA) */
- if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
- 1 << uframe)) {
- if (q->sitd->hw_fullspeed_ep &
- cpu_to_hc32(ehci, 1<<31))
- usecs += q->sitd->stream->usecs;
- else /* worst case for OUT start-split */
- usecs += HS_USECS_ISO (188);
- }
+ struct usb_tt *utt = udev->tt;
+ struct ehci_tt *tt, **tt_index, **ptt;
+ unsigned port;
+ bool allocated_index = false;
+
+ if (!utt)
+ return NULL; /* Not below a TT */
+
+ /*
+ * Find/create our data structure.
+ * For hubs with a single TT, we get it directly.
+ * For hubs with multiple TTs, there's an extra level of pointers.
+ */
+ tt_index = NULL;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ if (!tt_index) { /* Create the index array */
+ tt_index = kzalloc(utt->hub->maxchild *
+ sizeof(*tt_index), GFP_ATOMIC);
+ if (!tt_index)
+ return ERR_PTR(-ENOMEM);
+ utt->hcpriv = tt_index;
+ allocated_index = true;
+ }
+ port = udev->ttport - 1;
+ ptt = &tt_index[port];
+ } else {
+ port = 0;
+ ptt = (struct ehci_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt) { /* Create the ehci_tt */
+ struct ehci_hcd *ehci =
+ hcd_to_ehci(bus_to_hcd(udev->bus));
- /* ... C-mask? (count CSPLIT, DATA) */
- if (q->sitd->hw_uframe &
- cpu_to_hc32(ehci, 1 << (8 + uframe))) {
- /* worst case for IN complete-split */
- usecs += q->sitd->stream->c_usecs;
+ tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
+ if (!tt) {
+ if (allocated_index) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
}
+ return ERR_PTR(-ENOMEM);
+ }
+ list_add_tail(&tt->tt_list, &ehci->tt_list);
+ INIT_LIST_HEAD(&tt->ps_list);
+ tt->usb_tt = utt;
+ tt->tt_port = port;
+ *ptt = tt;
+ }
- hw_p = &q->sitd->hw_next;
- q = &q->sitd->sitd_next;
- break;
+ return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct ehci_tt *tt, **tt_index, **ptt;
+ int cnt, i;
+
+ if (!utt || !utt->hcpriv)
+ return; /* Not below a TT, or never allocated */
+
+ cnt = 0;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ ptt = &tt_index[udev->ttport - 1];
+
+ /* How many entries are left in tt_index? */
+ for (i = 0; i < utt->hub->maxchild; ++i)
+ cnt += !!tt_index[i];
+ } else {
+ tt_index = NULL;
+ ptt = (struct ehci_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt || !list_empty(&tt->ps_list))
+ return; /* never allocated, or still in use */
+
+ list_del(&tt->tt_list);
+ *ptt = NULL;
+ kfree(tt);
+ if (cnt == 1) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+}
+
+static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
+ struct ehci_per_sched *ps)
+{
+ dev_dbg(&ps->udev->dev,
+ "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
+ ps->ep->desc.bEndpointAddress,
+ (sign >= 0 ? "reserve" : "release"), type,
+ (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
+ ps->phase, ps->phase_uf, ps->period,
+ ps->usecs, ps->c_usecs, ps->cs_mask);
+}
+
+static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
+ struct ehci_qh *qh, int sign)
+{
+ unsigned start_uf;
+ unsigned i, j, m;
+ int usecs = qh->ps.usecs;
+ int c_usecs = qh->ps.c_usecs;
+ int tt_usecs = qh->ps.tt_usecs;
+ struct ehci_tt *tt;
+
+ if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
+ return;
+ start_uf = qh->ps.bw_phase << 3;
+
+ bandwidth_dbg(ehci, sign, "intr", &qh->ps);
+
+ if (sign < 0) { /* Release bandwidth */
+ usecs = -usecs;
+ c_usecs = -c_usecs;
+ tt_usecs = -tt_usecs;
+ }
+
+ /* Entire transaction (high speed) or start-split (full/low speed) */
+ for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += qh->ps.bw_uperiod)
+ ehci->bandwidth[i] += usecs;
+
+ /* Complete-split (full/low speed) */
+ if (qh->ps.c_usecs) {
+ /* NOTE: adjustments needed for FSTN */
+ for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += qh->ps.bw_uperiod) {
+ for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
+ if (qh->ps.cs_mask & m)
+ ehci->bandwidth[i+j] += c_usecs;
+ }
}
}
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
- if (usecs > ehci->uframe_periodic_max)
- ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
- frame * 8 + uframe, usecs);
-#endif
- return usecs;
+
+ /* FS/LS bus bandwidth */
+ if (tt_usecs) {
+ tt = find_tt(qh->ps.udev);
+ if (sign > 0)
+ list_add_tail(&qh->ps.ps_list, &tt->ps_list);
+ else
+ list_del(&qh->ps.ps_list);
+
+ for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
+ i += qh->ps.bw_period)
+ tt->bandwidth[i] += tt_usecs;
+ }
}
/*-------------------------------------------------------------------------*/
-static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+ struct ehci_tt *tt)
+{
+ struct ehci_per_sched *ps;
+ unsigned uframe, uf, x;
+ u8 *budget_line;
+
+ if (!tt)
+ return;
+ memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
+
+ /* Add up the contributions from all the endpoints using this TT */
+ list_for_each_entry(ps, &tt->ps_list, ps_list) {
+ for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += ps->bw_uperiod) {
+ budget_line = &budget_table[uframe];
+ x = ps->tt_usecs;
+
+ /* propagate the time forward */
+ for (uf = ps->phase_uf; uf < 8; ++uf) {
+ x += budget_line[uf];
+
+ /* Each microframe lasts 125 us */
+ if (x <= 125) {
+ budget_line[uf] = x;
+ break;
+ } else {
+ budget_line[uf] = 125;
+ x -= 125;
+ }
+ }
+ }
+ }
+}
+
+static int __maybe_unused same_tt(struct usb_device *dev1,
+ struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
return 0;
@@ -227,68 +354,6 @@ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
}
}
-/* How many of the tt's periodic downstream 1000 usecs are allocated?
- *
- * While this measures the bandwidth in terms of usecs/uframe,
- * the low/fullspeed bus has no notion of uframes, so any particular
- * low/fullspeed transfer can "carry over" from one uframe to the next,
- * since the TT just performs downstream transfers in sequence.
- *
- * For example two separate 100 usec transfers can start in the same uframe,
- * and the second one would "carry over" 75 usecs into the next uframe.
- */
-static void
-periodic_tt_usecs (
- struct ehci_hcd *ehci,
- struct usb_device *dev,
- unsigned frame,
- unsigned short tt_usecs[8]
-)
-{
- __hc32 *hw_p = &ehci->periodic [frame];
- union ehci_shadow *q = &ehci->pshadow [frame];
- unsigned char uf;
-
- memset(tt_usecs, 0, 16);
-
- while (q->ptr) {
- switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
- case Q_TYPE_ITD:
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- continue;
- case Q_TYPE_QH:
- if (same_tt(dev, q->qh->dev)) {
- uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
- tt_usecs[uf] += q->qh->tt_usecs;
- }
- hw_p = &q->qh->hw->hw_next;
- q = &q->qh->qh_next;
- continue;
- case Q_TYPE_SITD:
- if (same_tt(dev, q->sitd->urb->dev)) {
- uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
- tt_usecs[uf] += q->sitd->stream->tt_usecs;
- }
- hw_p = &q->sitd->hw_next;
- q = &q->sitd->sitd_next;
- continue;
- // case Q_TYPE_FSTN:
- default:
- ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
- frame);
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- }
- }
-
- carryover_tt_bandwidth(tt_usecs);
-
- if (max_tt_usecs[7] < tt_usecs[7])
- ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
- frame, tt_usecs[7] - max_tt_usecs[7]);
-}
-
/*
* Return true if the device's tt's downstream bus is available for a
* periodic transfer of the specified length (usecs), starting at the
@@ -312,20 +377,29 @@ periodic_tt_usecs (
*/
static int tt_available (
struct ehci_hcd *ehci,
- unsigned period,
- struct usb_device *dev,
+ struct ehci_per_sched *ps,
+ struct ehci_tt *tt,
unsigned frame,
- unsigned uframe,
- u16 usecs
+ unsigned uframe
)
{
+ unsigned period = ps->bw_period;
+ unsigned usecs = ps->tt_usecs;
+
if ((period == 0) || (uframe >= 7)) /* error */
return 0;
- for (; frame < ehci->periodic_size; frame += period) {
- unsigned short tt_usecs[8];
+ for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
+ frame += period) {
+ unsigned i, uf;
+ unsigned short tt_usecs[8];
- periodic_tt_usecs (ehci, dev, frame, tt_usecs);
+ if (tt->bandwidth[frame] + usecs > 900)
+ return 0;
+
+ uf = frame << 3;
+ for (i = 0; i < 8; (++i, ++uf))
+ tt_usecs[i] = ehci->tt_budget[uf];
if (max_tt_usecs[uframe] <= tt_usecs[uframe])
return 0;
@@ -337,7 +411,7 @@ static int tt_available (
*/
if (125 < usecs) {
int ufs = (usecs / 125);
- int i;
+
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
if (0 < tt_usecs[i])
return 0;
@@ -391,7 +465,7 @@ static int tt_no_collision (
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
- if (same_tt (dev, here.qh->dev)) {
+ if (same_tt(dev, here.qh->ps.udev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
@@ -471,19 +545,19 @@ static void disable_periodic(struct ehci_hcd *ehci)
static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
- unsigned period = qh->period;
+ unsigned period = qh->ps.period;
- dev_dbg (&qh->dev->dev,
+ dev_dbg(&qh->ps.udev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
- for (i = qh->start; i < ehci->periodic_size; i += period) {
+ for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
union ehci_shadow *prev = &ehci->pshadow[i];
__hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
@@ -503,7 +577,7 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
- if (qh->period > here.qh->period)
+ if (qh->ps.period > here.qh->ps.period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw->hw_next;
@@ -523,10 +597,10 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->xacterrs = 0;
qh->exception = 0;
- /* update per-qh bandwidth for usbfs */
- ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
+ /* update per-qh bandwidth for debugfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
+ ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+ : (qh->ps.usecs * 8);
list_add(&qh->intr_node, &ehci->intr_qh_list);
@@ -556,22 +630,21 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
*/
/* high bandwidth, or otherwise part of every microframe */
- if ((period = qh->period) == 0)
- period = 1;
+ period = qh->ps.period ? : 1;
- for (i = qh->start; i < ehci->periodic_size; i += period)
+ for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
periodic_unlink (ehci, i, qh);
- /* update per-qh bandwidth for usbfs */
- ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
+ /* update per-qh bandwidth for debugfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
+ ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+ : (qh->ps.usecs * 8);
- dev_dbg (&qh->dev->dev,
+ dev_dbg(&qh->ps.udev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
- qh->period,
+ qh->ps.period,
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
@@ -694,11 +767,9 @@ static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
- unsigned period,
+ unsigned uperiod,
unsigned usecs
) {
- int claimed;
-
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
@@ -708,25 +779,10 @@ static int check_period (
/* convert "usecs we need" to "max already claimed" */
usecs = ehci->uframe_periodic_max - usecs;
- /* we "know" 2 and 4 uframe intervals were rejected; so
- * for period 0, check _every_ microframe in the schedule.
- */
- if (unlikely (period == 0)) {
- do {
- for (uframe = 0; uframe < 7; uframe++) {
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
- }
- } while ((frame += 1) < ehci->periodic_size);
-
- /* just check the specified uframe, at that period */
- } else {
- do {
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
- } while ((frame += period) < ehci->periodic_size);
+ for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += uperiod) {
+ if (ehci->bandwidth[uframe] > usecs)
+ return 0;
}
// success!
@@ -737,40 +793,40 @@ static int check_intr_schedule (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
- const struct ehci_qh *qh,
- __hc32 *c_maskp
+ struct ehci_qh *qh,
+ unsigned *c_maskp,
+ struct ehci_tt *tt
)
{
int retval = -ENOSPC;
u8 mask = 0;
- if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
- if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
+ if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
goto done;
- if (!qh->c_usecs) {
+ if (!qh->ps.c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
- if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
- qh->tt_usecs)) {
+ if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
unsigned i;
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
- for (i=uframe+1; i<8 && i<uframe+4; i++)
- if (!check_period (ehci, frame, i,
- qh->period, qh->c_usecs))
+ for (i = uframe+2; i < 8 && i <= uframe+4; i++)
+ if (!check_period(ehci, frame, i,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
else
mask |= 1 << i;
retval = 0;
- *c_maskp = cpu_to_hc32(ehci, mask << 8);
+ *c_maskp = mask;
}
#else
/* Make sure this tt's buffer is also available for CSPLITs.
@@ -781,15 +837,15 @@ static int check_intr_schedule (
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
- *c_maskp = cpu_to_hc32(ehci, mask << 8);
+ *c_maskp = mask;
mask |= 1 << uframe;
- if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
- if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
- qh->period, qh->c_usecs))
+ if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
+ if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
- if (!check_period (ehci, frame, uframe + qh->gap_uf,
- qh->period, qh->c_usecs))
+ if (!check_period(ehci, frame, uframe + qh->gap_uf,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
retval = 0;
}
@@ -803,62 +859,67 @@ done:
*/
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int status;
+ int status = 0;
unsigned uframe;
- __hc32 c_mask;
- unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ unsigned c_mask;
struct ehci_qh_hw *hw = qh->hw;
+ struct ehci_tt *tt;
hw->hw_next = EHCI_LIST_END(ehci);
- frame = qh->start;
/* reuse the previous schedule slots, if we can */
- if (frame < qh->period) {
- uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
- status = check_intr_schedule (ehci, frame, --uframe,
- qh, &c_mask);
- } else {
- uframe = 0;
- c_mask = 0;
- status = -ENOSPC;
+ if (qh->ps.phase != NO_FRAME) {
+ ehci_dbg(ehci, "reused qh %p schedule\n", qh);
+ return 0;
+ }
+
+ uframe = 0;
+ c_mask = 0;
+ tt = find_tt(qh->ps.udev);
+ if (IS_ERR(tt)) {
+ status = PTR_ERR(tt);
+ goto done;
}
+ compute_tt_budget(ehci->tt_budget, tt);
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
- if (status) {
- /* "normal" case, uframing flexible except with splits */
- if (qh->period) {
- int i;
-
- for (i = qh->period; status && i > 0; --i) {
- frame = ++ehci->random_frame % qh->period;
- for (uframe = 0; uframe < 8; uframe++) {
- status = check_intr_schedule (ehci,
- frame, uframe, qh,
- &c_mask);
- if (status == 0)
- break;
- }
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->ps.bw_period) {
+ int i;
+ unsigned frame;
+
+ for (i = qh->ps.bw_period; i > 0; --i) {
+ frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(ehci,
+ frame, uframe, qh, &c_mask, tt);
+ if (status == 0)
+ goto got_it;
}
-
- /* qh->period == 0 means every uframe */
- } else {
- frame = 0;
- status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
}
- if (status)
- goto done;
- qh->start = frame;
- /* reset S-frame and (maybe) C-frame masks */
- hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
- hw->hw_info2 |= qh->period
- ? cpu_to_hc32(ehci, 1 << uframe)
- : cpu_to_hc32(ehci, QH_SMASK);
- hw->hw_info2 |= c_mask;
- } else
- ehci_dbg (ehci, "reused qh %p schedule\n", qh);
+ /* qh->ps.bw_period == 0 means every uframe */
+ } else {
+ status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
+ }
+ if (status)
+ goto done;
+
+ got_it:
+ qh->ps.phase = (qh->ps.period ? ehci->random_frame &
+ (qh->ps.period - 1) : 0);
+ qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
+ qh->ps.phase_uf = uframe;
+ qh->ps.cs_mask = qh->ps.period ?
+ (c_mask << 8) | (1 << uframe) :
+ QH_SMASK;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
+ reserve_release_intr_bandwidth(ehci, qh, 1);
done:
return status;
@@ -969,7 +1030,8 @@ iso_stream_alloc (gfp_t mem_flags)
if (likely (stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
- stream->next_uframe = -1;
+ stream->next_uframe = NO_FRAME;
+ stream->ps.phase = NO_FRAME;
}
return stream;
}
@@ -978,25 +1040,24 @@ static void
iso_stream_init (
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
- struct usb_device *dev,
- int pipe,
- unsigned interval
+ struct urb *urb
)
{
static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
+ struct usb_device *dev = urb->dev;
u32 buf1;
unsigned epnum, maxp;
int is_input;
- long bandwidth;
+ unsigned tmp;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
- epnum = usb_pipeendpoint (pipe);
- is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
- maxp = usb_maxpacket(dev, pipe, !is_input);
+ epnum = usb_pipeendpoint(urb->pipe);
+ is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
+ maxp = usb_endpoint_maxp(&urb->ep->desc);
if (is_input) {
buf1 = (1 << 11);
} else {
@@ -1020,9 +1081,19 @@ iso_stream_init (
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
- stream->usecs = HS_USECS_ISO (maxp);
- bandwidth = stream->usecs * 8;
- bandwidth /= interval;
+ stream->ps.usecs = HS_USECS_ISO(maxp);
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+
+ stream->uperiod = urb->interval;
+ stream->ps.period = urb->interval >> 3;
+ stream->bandwidth = stream->ps.usecs * 8 /
+ stream->ps.bw_uperiod;
} else {
u32 addr;
@@ -1036,36 +1107,46 @@ iso_stream_init (
addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
- stream->usecs = HS_USECS_ISO (maxp);
+ stream->ps.usecs = HS_USECS_ISO(maxp);
think_time = dev->tt ? dev->tt->think_time : 0;
- stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
+ stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
hs_transfers = max (1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
addr |= 1 << 31;
- stream->c_usecs = stream->usecs;
- stream->usecs = HS_USECS_ISO (1);
- stream->raw_mask = 1;
+ stream->ps.c_usecs = stream->ps.usecs;
+ stream->ps.usecs = HS_USECS_ISO(1);
+ stream->ps.cs_mask = 1;
/* c-mask as specified in USB 2.0 11.18.4 3.c */
tmp = (1 << (hs_transfers + 2)) - 1;
- stream->raw_mask |= tmp << (8 + 2);
+ stream->ps.cs_mask |= tmp << (8 + 2);
} else
- stream->raw_mask = smask_out [hs_transfers - 1];
- bandwidth = stream->usecs + stream->c_usecs;
- bandwidth /= interval << 3;
+ stream->ps.cs_mask = smask_out[hs_transfers - 1];
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ stream->ps.bw_uperiod = stream->ps.bw_period << 3;
- /* stream->splits gets created from raw_mask later */
+ stream->ps.period = urb->interval;
+ stream->uperiod = urb->interval << 3;
+ stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
+ stream->ps.bw_period;
+
+ /* stream->splits gets created from cs_mask later */
stream->address = cpu_to_hc32(ehci, addr);
}
- stream->bandwidth = bandwidth;
- stream->udev = dev;
+ stream->ps.udev = dev;
+ stream->ps.ep = urb->ep;
stream->bEndpointAddress = is_input | epnum;
- stream->interval = interval;
stream->maxp = maxp;
}
@@ -1090,9 +1171,7 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely (stream != NULL)) {
ep->hcpriv = stream;
- stream->ep = ep;
- iso_stream_init(ehci, stream, urb->dev, urb->pipe,
- urb->interval);
+ iso_stream_init(ehci, stream, urb);
}
/* if dev->ep [epnum] is a QH, hw is set */
@@ -1137,7 +1216,7 @@ itd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
+ iso_sched->span = urb->number_of_packets * stream->uperiod;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
@@ -1236,7 +1315,7 @@ itd_urb_transaction (
memset (itd, 0, sizeof *itd);
itd->itd_dma = itd_dma;
- itd->frame = 9999; /* an invalid value */
+ itd->frame = NO_FRAME;
list_add (&itd->itd_list, &sched->td_list);
}
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1249,49 +1328,106 @@ itd_urb_transaction (
/*-------------------------------------------------------------------------*/
+static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
+ struct ehci_iso_stream *stream, int sign)
+{
+ unsigned uframe;
+ unsigned i, j;
+ unsigned s_mask, c_mask, m;
+ int usecs = stream->ps.usecs;
+ int c_usecs = stream->ps.c_usecs;
+ int tt_usecs = stream->ps.tt_usecs;
+ struct ehci_tt *tt;
+
+ if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
+ return;
+ uframe = stream->ps.bw_phase << 3;
+
+ bandwidth_dbg(ehci, sign, "iso", &stream->ps);
+
+ if (sign < 0) { /* Release bandwidth */
+ usecs = -usecs;
+ c_usecs = -c_usecs;
+ tt_usecs = -tt_usecs;
+ }
+
+ if (!stream->splits) { /* High speed */
+ for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += stream->ps.bw_uperiod)
+ ehci->bandwidth[i] += usecs;
+
+ } else { /* Full speed */
+ s_mask = stream->ps.cs_mask;
+ c_mask = s_mask >> 8;
+
+ /* NOTE: adjustment needed for frame overflow */
+ for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
+ i += stream->ps.bw_uperiod) {
+ for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
+ (++j, m <<= 1)) {
+ if (s_mask & m)
+ ehci->bandwidth[i+j] += usecs;
+ else if (c_mask & m)
+ ehci->bandwidth[i+j] += c_usecs;
+ }
+ }
+
+ tt = find_tt(stream->ps.udev);
+ if (sign > 0)
+ list_add_tail(&stream->ps.ps_list, &tt->ps_list);
+ else
+ list_del(&stream->ps.ps_list);
+
+ for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
+ i += stream->ps.bw_period)
+ tt->bandwidth[i] += tt_usecs;
+ }
+}
+
static inline int
itd_slot_ok (
struct ehci_hcd *ehci,
- u32 mod,
- u32 uframe,
- u8 usecs,
- u32 period
+ struct ehci_iso_stream *stream,
+ unsigned uframe
)
{
- uframe %= period;
- do {
- /* can't commit more than uframe_periodic_max usec */
- if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
- > (ehci->uframe_periodic_max - usecs))
- return 0;
+ unsigned usecs;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = ehci->uframe_periodic_max - stream->ps.usecs;
- /* we know urb->interval is 2^N uframes */
- uframe += period;
- } while (uframe < mod);
+ for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += stream->ps.bw_uperiod) {
+ if (ehci->bandwidth[uframe] > usecs)
+ return 0;
+ }
return 1;
}
static inline int
sitd_slot_ok (
struct ehci_hcd *ehci,
- u32 mod,
struct ehci_iso_stream *stream,
- u32 uframe,
+ unsigned uframe,
struct ehci_iso_sched *sched,
- u32 period_uframes
+ struct ehci_tt *tt
)
{
- u32 mask, tmp;
- u32 frame, uf;
+ unsigned mask, tmp;
+ unsigned frame, uf;
+
+ mask = stream->ps.cs_mask << (uframe & 7);
- mask = stream->raw_mask << (uframe & 7);
+ /* for OUT, don't wrap SSPLIT into H-microframe 7 */
+ if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
+ return 0;
/* for IN, don't wrap CSPLIT into the next frame */
if (mask & ~0xffff)
return 0;
/* check bandwidth */
- uframe %= period_uframes;
+ uframe &= stream->ps.bw_uperiod - 1;
frame = uframe >> 3;
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
@@ -1299,54 +1435,48 @@ sitd_slot_ok (
* tt_available scheduling guarantees 10+% for control/bulk.
*/
uf = uframe & 7;
- if (!tt_available(ehci, period_uframes >> 3,
- stream->udev, frame, uf, stream->tt_usecs))
+ if (!tt_available(ehci, &stream->ps, tt, frame, uf))
return 0;
#else
/* tt must be idle for start(s), any gap, and csplit.
* assume scheduling slop leaves 10+% for control/bulk.
*/
- if (!tt_no_collision(ehci, period_uframes >> 3,
- stream->udev, frame, mask))
+ if (!tt_no_collision(ehci, stream->ps.bw_period,
+ stream->ps.udev, frame, mask))
return 0;
#endif
- /* this multi-pass logic is simple, but performance may
- * suffer when the schedule data isn't cached.
- */
do {
- u32 max_used;
-
- frame = uframe >> 3;
- uf = uframe & 7;
+ unsigned max_used;
+ unsigned i;
/* check starts (OUT uses more than one) */
- max_used = ehci->uframe_periodic_max - stream->usecs;
- for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
- if (periodic_usecs (ehci, frame, uf) > max_used)
+ uf = uframe;
+ max_used = ehci->uframe_periodic_max - stream->ps.usecs;
+ for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
+ if (ehci->bandwidth[uf] > max_used)
return 0;
}
/* for IN, check CSPLIT */
- if (stream->c_usecs) {
- uf = uframe & 7;
- max_used = ehci->uframe_periodic_max - stream->c_usecs;
- do {
- tmp = 1 << uf;
- tmp <<= 8;
- if ((stream->raw_mask & tmp) == 0)
+ if (stream->ps.c_usecs) {
+ max_used = ehci->uframe_periodic_max -
+ stream->ps.c_usecs;
+ uf = uframe & ~7;
+ tmp = 1 << (2+8);
+ for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
+ if ((stream->ps.cs_mask & tmp) == 0)
continue;
- if (periodic_usecs (ehci, frame, uf)
- > max_used)
+ if (ehci->bandwidth[uf+i] > max_used)
return 0;
- } while (++uf < 8);
+ }
}
- /* we know urb->interval is 2^N uframes */
- uframe += period_uframes;
- } while (uframe < mod);
+ uframe += stream->ps.bw_uperiod;
+ } while (uframe < EHCI_BANDWIDTH_SIZE);
- stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
+ stream->ps.cs_mask <<= uframe & 7;
+ stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
return 1;
}
@@ -1361,8 +1491,6 @@ sitd_slot_ok (
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
-#define SCHEDULING_DELAY 40 /* microframes */
-
static int
iso_stream_schedule (
struct ehci_hcd *ehci,
@@ -1370,134 +1498,184 @@ iso_stream_schedule (
struct ehci_iso_stream *stream
)
{
- u32 now, base, next, start, period, span;
- int status;
+ u32 now, base, next, start, period, span, now2;
+ u32 wrap = 0, skip = 0;
+ int status = 0;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
+ bool empty = list_empty(&stream->td_list);
+ bool new_stream = false;
- period = urb->interval;
+ period = stream->uperiod;
span = sched->span;
- if (!stream->highspeed) {
- period <<= 3;
+ if (!stream->highspeed)
span <<= 3;
- }
- now = ehci_read_frame_index(ehci) & (mod - 1);
+ /* Start a new isochronous stream? */
+ if (unlikely(empty && !hcd_periodic_completion_in_progress(
+ ehci_to_hcd(ehci), urb->ep))) {
- /* Typical case: reuse current schedule, stream is still active.
- * Hopefully there are no gaps from the host falling behind
- * (irq delays etc). If there are, the behavior depends on
- * whether URB_ISO_ASAP is set.
- */
- if (likely (!list_empty (&stream->td_list))) {
+ /* Schedule the endpoint */
+ if (stream->ps.phase == NO_FRAME) {
+ int done = 0;
+ struct ehci_tt *tt = find_tt(stream->ps.udev);
- /* Take the isochronous scheduling threshold into account */
- if (ehci->i_thresh)
- next = now + ehci->i_thresh; /* uframe cache */
- else
- next = (now + 2 + 7) & ~0x07; /* full frame cache */
-
- /*
- * Use ehci->last_iso_frame as the base. There can't be any
- * TDs scheduled for earlier than that.
- */
- base = ehci->last_iso_frame << 3;
- next = (next - base) & (mod - 1);
- start = (stream->next_uframe - base) & (mod - 1);
-
- /* Is the schedule already full? */
- if (unlikely(start < period)) {
- ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
- urb, stream->next_uframe, base,
- period, mod);
- status = -ENOSPC;
- goto fail;
- }
-
- /* Behind the scheduling threshold? */
- if (unlikely(start < next)) {
- unsigned now2 = (now - base) & (mod - 1);
+ if (IS_ERR(tt)) {
+ status = PTR_ERR(tt);
+ goto fail;
+ }
+ compute_tt_budget(ehci->tt_budget, tt);
- /* USB_ISO_ASAP: Round up to the first available slot */
- if (urb->transfer_flags & URB_ISO_ASAP)
- start += (next - start + period - 1) & -period;
+ start = ((-(++ehci->random_frame)) << 3) & (period - 1);
- /*
- * Not ASAP: Use the next slot in the stream,
- * no matter what.
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
*/
- else if (start + span - period < now2) {
- ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
- urb, start + base,
- span - period, now2 + base);
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (stream->highspeed) {
+ if (itd_slot_ok(ehci, stream, start))
+ done = 1;
+ } else {
+ if ((start % 8) >= 6)
+ continue;
+ if (sitd_slot_ok(ehci, stream, start,
+ sched, tt))
+ done = 1;
+ }
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ ehci_dbg(ehci, "iso sched full %p", urb);
+ status = -ENOSPC;
+ goto fail;
}
+ stream->ps.phase = (start >> 3) &
+ (stream->ps.period - 1);
+ stream->ps.bw_phase = stream->ps.phase &
+ (stream->ps.bw_period - 1);
+ stream->ps.phase_uf = start & 7;
+ reserve_release_iso_bandwidth(ehci, stream, 1);
+ }
+
+ /* New stream is already scheduled; use the upcoming slot */
+ else {
+ start = (stream->ps.phase << 3) + stream->ps.phase_uf;
}
- start += base;
+ stream->next_uframe = start;
+ new_stream = true;
}
- /* need to schedule; when's the next (u)frame we could start?
- * this is bigger than ehci->i_thresh allows; scheduling itself
- * isn't free, the delay should handle reasonably slow cpus. it
- * can also help high bandwidth if the dma and irq loads don't
- * jump until after the queue is primed.
+ now = ehci_read_frame_index(ehci) & (mod - 1);
+
+ /* Take the isochronous scheduling threshold into account */
+ if (ehci->i_thresh)
+ next = now + ehci->i_thresh; /* uframe cache */
+ else
+ next = (now + 2 + 7) & ~0x07; /* full frame cache */
+
+ /*
+ * Use ehci->last_iso_frame as the base. There can't be any
+ * TDs scheduled for earlier than that.
*/
- else {
- int done = 0;
+ base = ehci->last_iso_frame << 3;
+ next = (next - base) & (mod - 1);
+ start = (stream->next_uframe - base) & (mod - 1);
- base = now & ~0x07;
- start = base + SCHEDULING_DELAY;
+ if (unlikely(new_stream))
+ goto do_ASAP;
- /* find a uframe slot with enough bandwidth.
- * Early uframes are more precious because full-speed
- * iso IN transfers can't use late uframes,
- * and therefore they should be allocated last.
- */
- next = start;
- start += period;
- do {
- start--;
- /* check schedule: enough space? */
- if (stream->highspeed) {
- if (itd_slot_ok(ehci, mod, start,
- stream->usecs, period))
- done = 1;
- } else {
- if ((start % 8) >= 6)
- continue;
- if (sitd_slot_ok(ehci, mod, stream,
- start, sched, period))
- done = 1;
- }
- } while (start > next && !done);
+ /*
+ * Typical case: reuse current schedule, stream may still be active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc). If there are, the behavior depends on
+ * whether URB_ISO_ASAP is set.
+ */
+ now2 = (now - base) & (mod - 1);
+
+ /* Is the schedule already full? */
+ if (unlikely(!empty && start < period)) {
+ ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+ urb, stream->next_uframe, base, period, mod);
+ status = -ENOSPC;
+ goto fail;
+ }
+
+ /* Is the next packet scheduled after the base time? */
+ if (likely(!empty || start <= now2 + period)) {
+
+ /* URB_ISO_ASAP: make sure that start >= next */
+ if (unlikely(start < next &&
+ (urb->transfer_flags & URB_ISO_ASAP)))
+ goto do_ASAP;
+
+ /* Otherwise use start, if it's not in the past */
+ if (likely(start >= now2))
+ goto use_start;
- /* no room in the schedule */
- if (!done) {
- ehci_dbg(ehci, "iso sched full %p", urb);
- status = -ENOSPC;
- goto fail;
+ /* Otherwise we got an underrun while the queue was empty */
+ } else {
+ if (urb->transfer_flags & URB_ISO_ASAP)
+ goto do_ASAP;
+ wrap = mod;
+ now2 += mod;
+ }
+
+ /* How many uframes and packets do we need to skip? */
+ skip = (now2 - start + period - 1) & -period;
+ if (skip >= span) { /* Entirely in the past? */
+ ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
+ urb, start + base, span - period, now2 + base,
+ base);
+
+ /* Try to keep the last TD intact for scanning later */
+ skip = span - period;
+
+ /* Will it come before the current scan position? */
+ if (empty) {
+ skip = span; /* Skip the entire URB */
+ status = 1; /* and give it back immediately */
+ iso_sched_free(stream, sched);
+ sched = NULL;
}
}
+ urb->error_count = skip / period;
+ if (sched)
+ sched->first_packet = urb->error_count;
+ goto use_start;
+ do_ASAP:
+ /* Use the first slot after "next" */
+ start = next + ((start - next) & (period - 1));
+
+ use_start:
/* Tried to schedule too far into the future? */
- if (unlikely(start - base + span - period >= mod)) {
+ if (unlikely(start + span - period >= mod + wrap)) {
ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
- urb, start - base, span - period, mod);
+ urb, start, span - period, mod + wrap);
status = -EFBIG;
goto fail;
}
- stream->next_uframe = start & (mod - 1);
+ start += base;
+ stream->next_uframe = (start + skip) & (mod - 1);
/* report high speed start in uframes; full speed, in frames */
- urb->start_frame = stream->next_uframe;
+ urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
/* Make sure scan_isoc() sees these */
if (ehci->isoc_count == 0)
ehci->last_iso_frame = now >> 3;
- return 0;
+ return status;
fail:
iso_sched_free(stream, sched);
@@ -1610,7 +1788,8 @@ static void itd_link_urb(
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
- for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+ for (packet = iso_sched->first_packet, itd = NULL;
+ packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
// BUG_ON (list_empty (&iso_sched->td_list));
@@ -1630,7 +1809,7 @@ static void itd_link_urb(
itd_patch(ehci, itd, iso_sched, packet, uframe);
- next_uframe += stream->interval;
+ next_uframe += stream->uperiod;
next_uframe &= mod - 1;
packet++;
@@ -1770,9 +1949,9 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
- if (unlikely (urb->interval != stream->interval)) {
+ if (unlikely(urb->interval != stream->uperiod)) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
+ stream->uperiod, urb->interval);
goto done;
}
@@ -1804,10 +1983,14 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
- if (likely (status == 0))
+ if (likely(status == 0)) {
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
- else
+ } else if (status > 0) {
+ status = 0;
+ ehci_urb_done(ehci, urb, 0);
+ } else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+ }
done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
done:
@@ -1833,7 +2016,7 @@ sitd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many frames are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
+ iso_sched->span = urb->number_of_packets * stream->ps.period;
/* figure out per-frame sitd fields that we'll need later
* when we fit new sitds into the schedule.
@@ -1925,7 +2108,7 @@ sitd_urb_transaction (
memset (sitd, 0, sizeof *sitd);
sitd->sitd_dma = sitd_dma;
- sitd->frame = 9999; /* an invalid value */
+ sitd->frame = NO_FRAME;
list_add (&sitd->sitd_list, &iso_sched->td_list);
}
@@ -2008,7 +2191,7 @@ static void sitd_link_urb(
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
- for (packet = 0, sitd = NULL;
+ for (packet = sched->first_packet, sitd = NULL;
packet < urb->number_of_packets;
packet++) {
@@ -2027,7 +2210,7 @@ static void sitd_link_urb(
sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
- next_uframe += stream->interval << 3;
+ next_uframe += stream->uperiod;
}
stream->next_uframe = next_uframe & (mod - 1);
@@ -2146,9 +2329,9 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
- if (urb->interval != stream->interval) {
+ if (urb->interval != stream->ps.period) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
+ stream->ps.period, urb->interval);
goto done;
}
@@ -2178,10 +2361,14 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
- if (status == 0)
+ if (likely(status == 0)) {
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
- else
+ } else if (status > 0) {
+ status = 0;
+ ehci_urb_done(ehci, urb, 0);
+ } else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+ }
done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
done:
@@ -2259,7 +2446,8 @@ restart:
q.itd->hw_next != EHCI_LIST_END(ehci))
*hw_p = q.itd->hw_next;
else
- *hw_p = ehci->dummy->qh_dma;
+ *hw_p = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
wmb();
modified = itd_complete (ehci, q.itd);
@@ -2294,7 +2482,8 @@ restart:
q.sitd->hw_next != EHCI_LIST_END(ehci))
*hw_p = q.sitd->hw_next;
else
- *hw_p = ehci->dummy->qh_dma;
+ *hw_p = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
wmb();
modified = sitd_complete (ehci, q.sitd);
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index b2de52d3961..8a734498079 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 93e59a13bc1..dc899eb2b86 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1cf0adba3fc..ee6f9ffaa0e 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -81,10 +81,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 14ced00ba22..f6459dfb6f5 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -97,8 +97,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
{
struct ehci_hcd *ehci;
unsigned uframe_periodic_max;
- unsigned frame, uframe;
- unsigned short allocated_max;
+ unsigned uframe;
unsigned long flags;
ssize_t ret;
@@ -122,16 +121,14 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
/*
* for request to decrease max periodic bandwidth, we have to check
- * every microframe in the schedule to see whether the decrease is
- * possible.
+ * to see whether the decrease is possible.
*/
if (uframe_periodic_max < ehci->uframe_periodic_max) {
- allocated_max = 0;
+ u8 allocated_max = 0;
- for (frame = 0; frame < ehci->periodic_size; ++frame)
- for (uframe = 0; uframe < 7; ++uframe)
- allocated_max = max(allocated_max,
- periodic_usecs (ehci, frame, uframe));
+ for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe)
+ allocated_max = max(allocated_max,
+ ehci->bandwidth[uframe]);
if (allocated_max > uframe_periodic_max) {
ehci_info(ehci,
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 78fa76da332..b9fd0396011 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -362,10 +362,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
@@ -388,7 +387,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
err = clk_prepare_enable(tegra->clk);
if (err)
- goto cleanup_clk_get;
+ goto cleanup_hcd_create;
tegra_periph_reset_assert(tegra->clk);
udelay(1);
@@ -465,8 +464,6 @@ cleanup_phy:
usb_phy_shutdown(hcd->phy);
cleanup_clk_en:
clk_disable_unprepare(tegra->clk);
-cleanup_clk_get:
- clk_put(tegra->clk);
cleanup_hcd_create:
usb_put_hcd(hcd);
return err;
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index cca4be90a86..67026ffbf9a 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
* Generic hardware linkage.
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* Basic lifecycle operations.
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 59e0e24c753..cdad8438c02 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -11,13 +11,28 @@
*
*/
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
/* enable phy0 and phy1 for w90p910 */
#define ENPHY (0x01<<8)
#define PHY0_CTR (0xA4)
#define PHY1_CTR (0xA8)
+#define DRIVER_DESC "EHCI w90x900 driver"
+
+static const char hcd_name[] = "ehci-w90x900 ";
+
+static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
+
static int usb_w90x900_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
@@ -90,8 +105,8 @@ err1:
return retval;
}
-static
-void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static void usb_w90x900_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
@@ -99,54 +114,6 @@ void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev)
usb_put_hcd(hcd);
}
-static const struct hc_driver ehci_w90x900_hc_driver = {
- .description = hcd_name,
- .product_desc = "Nuvoton w90x900 EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_USB2|HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_setup,
- .start = ehci_run,
-
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
-#endif
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
static int ehci_w90x900_probe(struct platform_device *pdev)
{
if (usb_disabled())
@@ -173,7 +140,25 @@ static struct platform_driver ehci_hcd_w90x900_driver = {
},
};
+static int __init ehci_w90X900_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_w90x900_hc_driver, NULL);
+ return platform_driver_register(&ehci_hcd_w90x900_driver);
+}
+module_init(ehci_w90X900_init);
+
+static void __exit ehci_w90X900_cleanup(void)
+{
+ platform_driver_unregister(&ehci_hcd_w90x900_driver);
+}
+module_exit(ehci_w90X900_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 usb ehci driver!");
-MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:w90p910-ehci");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index eba962e6ebf..95979f9f438 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 291db7d09f2..e8f41c5e771 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -54,6 +54,28 @@ struct ehci_stats {
unsigned long unlink;
};
+/*
+ * Scheduling and budgeting information for periodic transfers, for both
+ * high-speed devices and full/low-speed devices lying behind a TT.
+ */
+struct ehci_per_sched {
+ struct usb_device *udev; /* access to the TT */
+ struct usb_host_endpoint *ep;
+ struct list_head ps_list; /* node on ehci_tt's ps_list */
+ u16 tt_usecs; /* time on the FS/LS bus */
+ u16 cs_mask; /* C-mask and S-mask bytes */
+ u16 period; /* actual period in frames */
+ u16 phase; /* actual phase, frame part */
+ u8 bw_phase; /* same, for bandwidth
+ reservation */
+ u8 phase_uf; /* uframe part of the phase */
+ u8 usecs, c_usecs; /* times on the HS bus */
+ u8 bw_uperiod; /* period in microframes, for
+ bandwidth reservation */
+ u8 bw_period; /* same, in frames */
+};
+#define NO_FRAME 29999 /* frame not assigned yet */
+
/* ehci_hcd->lock guards shared data against other CPUs:
* ehci_hcd: async, unlink, periodic (and shadow), ...
* usb_host_endpoint: hcpriv
@@ -230,6 +252,15 @@ struct ehci_hcd { /* one per controller */
struct dentry *debug_dir;
#endif
+ /* bandwidth usage */
+#define EHCI_BANDWIDTH_SIZE 64
+#define EHCI_BANDWIDTH_FRAMES (EHCI_BANDWIDTH_SIZE >> 3)
+ u8 bandwidth[EHCI_BANDWIDTH_SIZE];
+ /* us allocated per uframe */
+ u8 tt_budget[EHCI_BANDWIDTH_SIZE];
+ /* us budgeted per uframe */
+ struct list_head tt_list;
+
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));
};
@@ -385,6 +416,7 @@ struct ehci_qh {
struct list_head intr_node; /* list of intr QHs */
struct ehci_qtd *dummy;
struct list_head unlink_node;
+ struct ehci_per_sched ps; /* scheduling info */
unsigned unlink_cycle;
@@ -398,16 +430,8 @@ struct ehci_qh {
u8 xacterrs; /* XactErr retry counter */
#define QH_XACTERR_MAX 32 /* XactErr retry limit */
- /* periodic schedule info */
- u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
- u8 c_usecs; /* ... split completion bw */
- u16 tt_usecs; /* tt downstream bandwidth */
- unsigned short period; /* polling interval */
- unsigned short start; /* where polling starts */
-#define NO_FRAME ((unsigned short)~0) /* pick new start */
- struct usb_device *dev; /* access to TT */
unsigned is_out:1; /* bulk or intr OUT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
unsigned dequeue_during_giveback:1;
@@ -434,6 +458,7 @@ struct ehci_iso_packet {
struct ehci_iso_sched {
struct list_head td_list;
unsigned span;
+ unsigned first_packet;
struct ehci_iso_packet packet [0];
};
@@ -449,22 +474,17 @@ struct ehci_iso_stream {
u8 highspeed;
struct list_head td_list; /* queued itds/sitds */
struct list_head free_list; /* list of unused itds/sitds */
- struct usb_device *udev;
- struct usb_host_endpoint *ep;
/* output of (re)scheduling */
- int next_uframe;
+ struct ehci_per_sched ps; /* scheduling info */
+ unsigned next_uframe;
__hc32 splits;
/* the rest is derived from the endpoint descriptor,
- * trusting urb->interval == f(epdesc->bInterval) and
* including the extra info for hw_bufp[0..2]
*/
- u8 usecs, c_usecs;
- u16 interval;
- u16 tt_usecs;
+ u16 uperiod; /* period in uframes */
u16 maxp;
- u16 raw_mask;
unsigned bandwidth;
/* This is used to initialize iTD's hw_bufp fields */
@@ -579,6 +599,35 @@ struct ehci_fstn {
/*-------------------------------------------------------------------------*/
+/*
+ * USB-2.0 Specification Sections 11.14 and 11.18
+ * Scheduling and budgeting split transactions using TTs
+ *
+ * A hub can have a single TT for all its ports, or multiple TTs (one for each
+ * port). The bandwidth and budgeting information for the full/low-speed bus
+ * below each TT is self-contained and independent of the other TTs or the
+ * high-speed bus.
+ *
+ * "Bandwidth" refers to the number of microseconds on the FS/LS bus allocated
+ * to an interrupt or isochronous endpoint for each frame. "Budget" refers to
+ * the best-case estimate of the number of full-speed bytes allocated to an
+ * endpoint for each microframe within an allocated frame.
+ *
+ * Removal of an endpoint invalidates a TT's budget. Instead of trying to
+ * keep an up-to-date record, we recompute the budget when it is needed.
+ */
+
+struct ehci_tt {
+ u16 bandwidth[EHCI_BANDWIDTH_FRAMES];
+
+ struct list_head tt_list; /* List of all ehci_tt's */
+ struct list_head ps_list; /* Items using this TT */
+ struct usb_tt *usb_tt;
+ int tt_port; /* TT port number */
+};
+
+/*-------------------------------------------------------------------------*/
+
/* Prepare the PORTSC wakeup flags during controller suspend/resume */
#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0b46542591f..0551c0af0fd 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -26,6 +26,8 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index fce13bcc4a3..55486bd23cf 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -412,7 +412,7 @@ struct debug_buffer {
tmp = 'h'; break; \
default: \
tmp = '?'; break; \
- }; tmp; })
+ } tmp; })
static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
{
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
index 299253c826c..e1c6d850a7e 100644
--- a/drivers/usb/host/fusbh200-hcd.c
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -402,7 +402,7 @@ struct debug_buffer {
case QH_LOW_SPEED: tmp = 'l'; break; \
case QH_HIGH_SPEED: tmp = 'h'; break; \
default: tmp = '?'; break; \
- }; tmp; })
+ } tmp; })
static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token)
{
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 5b86ffb88f1..ada0a52797b 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -199,10 +199,14 @@ static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc);
- return -ENOSYS;
+ /*
+ * We cannot query the HWA for the WUSB time since that requires sending
+ * a synchronous URB and this function can be called in_interrupt.
+ * Instead, query the USB frame number for our parent and use that.
+ */
+ return usb_get_current_frame_number(wa->usb_dev);
}
static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
@@ -566,14 +570,10 @@ found:
goto error;
}
wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
- /* Make LE fields CPU order */
- wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
- wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
- wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
- if (wa_descr->bcdWAVersion > 0x0100)
+ if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
- wa_descr->bcdWAVersion & 0xff00 >> 8,
- wa_descr->bcdWAVersion & 0x00ff);
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00 >> 8,
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
result = 0;
error:
return result;
@@ -679,7 +679,8 @@ static void hwahc_security_release(struct hwahc *hwahc)
/* nothing to do here so far... */
}
-static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
+ kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
@@ -724,7 +725,7 @@ static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
goto error_wusbhc_create;
}
- result = wa_create(&hwahc->wa, iface);
+ result = wa_create(&hwahc->wa, iface, quirks);
if (result < 0)
goto error_wa_create;
return 0;
@@ -780,7 +781,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
- result = hwahc_create(hwahc, usb_iface);
+ result = hwahc_create(hwahc, usb_iface, id->driver_info);
if (result < 0) {
dev_err(dev, "Cannot initialize internals: %d\n", result);
goto error_hwahc_create;
@@ -824,6 +825,12 @@ static void hwahc_disconnect(struct usb_interface *usb_iface)
}
static struct usb_device_id hwahc_id_table[] = {
+ /* Alereon 5310 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC },
+ /* Alereon 5611 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC },
/* FIXME: use class labels for this */
{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
{},
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 6f29abad681..935a2dd367a 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2108,7 +2108,7 @@ static int isp1362_show(struct seq_file *s, void *unused)
default:
s = "?";
break;
- };
+ }
s;}), ep->maxpacket) ;
list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
seq_printf(s, " urb%p, %d/%d\n", urb,
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index caa3764a340..418444ebb1b 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -13,19 +13,24 @@
*/
#include <linux/clk.h>
-#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/atmel.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
#include <mach/cpu.h>
-#ifndef CONFIG_ARCH_AT91
-#error "CONFIG_ARCH_AT91 must be defined."
-#endif
+
+#include "ohci.h"
#define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
#define at91_for_each_port(index) \
@@ -33,7 +38,17 @@
/* interface, function and usb clocks; sometimes also an AHB clock */
static struct clk *iclk, *fclk, *uclk, *hclk;
+/* interface and function clocks; sometimes also an AHB clock */
+
+#define DRIVER_DESC "OHCI Atmel driver"
+
+static const char hcd_name[] = "ohci-atmel";
+
+static struct hc_driver __read_mostly ohci_at91_hc_driver;
static int clocked;
+static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength);
+static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
extern int usb_disabled(void);
@@ -117,6 +132,8 @@ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
+ struct at91_usbh_data *board;
+ struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd = NULL;
@@ -177,8 +194,10 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
}
}
+ board = hcd->self.controller->platform_data;
+ ohci = hcd_to_ohci(hcd);
+ ohci->num_ports = board->ports;
at91_start_hc(pdev);
- ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
if (retval == 0)
@@ -238,36 +257,6 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
}
/*-------------------------------------------------------------------------*/
-
-static int
-ohci_at91_reset (struct usb_hcd *hcd)
-{
- struct at91_usbh_data *board = dev_get_platdata(hcd->self.controller);
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- ohci->num_ports = board->ports;
- return 0;
-}
-
-static int
-ohci_at91_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start %s\n",
- hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
- return 0;
-}
-
static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
{
if (!valid_port(port))
@@ -297,8 +286,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
*/
static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
- int length = ohci_hub_status_data(hcd, buf);
+ struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ int length = orig_ohci_hub_status_data(hcd, buf);
int port;
at91_for_each_port(port) {
@@ -376,7 +365,8 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
}
- ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
+ ret = orig_ohci_hub_control(hcd, typeReq, wValue, wIndex + 1,
+ buf, wLength);
if (ret)
goto out;
@@ -430,51 +420,6 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/*-------------------------------------------------------------------------*/
-static const struct hc_driver ohci_at91_hc_driver = {
- .description = hcd_name,
- .product_desc = "AT91 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_at91_reset,
- .start = ohci_at91_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_at91_hub_status_data,
- .hub_control = ohci_at91_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
{
struct platform_device *pdev = data;
@@ -524,7 +469,7 @@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
static int ohci_at91_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int i, gpio;
+ int i, gpio, ret;
enum of_gpio_flags flags;
struct at91_usbh_data *pdata;
u32 ports;
@@ -536,10 +481,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -703,7 +647,11 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (at91_suspend_entering_slow_clock()) {
- ohci_usb_reset (ohci);
+ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ ohci->hc_control &= OHCI_CTRL_RWC;
+ ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
+
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
at91_stop_clock();
@@ -730,8 +678,6 @@ static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
#define ohci_hcd_at91_drv_resume NULL
#endif
-MODULE_ALIAS("platform:at91_ohci");
-
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
.remove = ohci_hcd_at91_drv_remove,
@@ -744,3 +690,40 @@ static struct platform_driver ohci_hcd_at91_driver = {
.of_match_table = of_match_ptr(at91_ohci_dt_ids),
},
};
+
+static int __init ohci_at91_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&ohci_at91_hc_driver, NULL);
+
+ /*
+ * The Atmel HW has some unusual quirks, which require Atmel-specific
+ * workarounds. We override certain hc_driver functions here to
+ * achieve that. We explicitly do not enhance ohci_driver_overrides to
+ * allow this more easily, since this is an unusual case, and we don't
+ * want to encourage others to override these functions by making it
+ * too easy.
+ */
+
+ orig_ohci_hub_control = ohci_at91_hc_driver.hub_control;
+ orig_ohci_hub_status_data = ohci_at91_hc_driver.hub_status_data;
+
+ ohci_at91_hc_driver.hub_status_data = ohci_at91_hub_status_data;
+ ohci_at91_hc_driver.hub_control = ohci_at91_hub_control;
+
+ return platform_driver_register(&ohci_hcd_at91_driver);
+}
+module_init(ohci_at91_init);
+
+static void __exit ohci_at91_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_at91_driver);
+}
+module_exit(ohci_at91_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_ohci");
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 31b81f9eacd..3fca52ec02a 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -17,7 +17,7 @@
case PIPE_BULK: temp = "bulk"; break; \
case PIPE_INTERRUPT: temp = "intr"; break; \
default: temp = "isoc"; break; \
- }; temp;})
+ } temp;})
#define pipestring(pipe) edstring(usb_pipetype(pipe))
/* debug| print the main components of an URB
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
deleted file mode 100644
index 84a20d5223b..00000000000
--- a/drivers/usb/host/ohci-ep93xx.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for ep93xx.
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- *
- * Modified for pxa27x from ohci-lh7a404.c
- * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
- *
- * Modified for ep93xx from ohci-pxa27x.c
- * by Lennert Buytenhek <buytenh@wantstofly.org> 28-2-2006
- * Based on an earlier driver by Ray Lehtiniemi
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/signal.h>
-#include <linux/platform_device.h>
-
-static struct clk *usb_host_clock;
-
-static int ohci_ep93xx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start %s\n",
- hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static struct hc_driver ohci_ep93xx_hc_driver = {
- .description = hcd_name,
- .product_desc = "EP93xx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
- .start = ohci_ep93xx_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
- .get_frame_number = ohci_get_frame,
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-static int ohci_hcd_ep93xx_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct resource *res;
- int irq;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- hcd = usb_create_hcd(&ohci_ep93xx_hc_driver, &pdev->dev, "ep93xx");
- if (!hcd)
- return -ENOMEM;
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
- goto err_put_hcd;
- }
-
- usb_host_clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(usb_host_clock)) {
- ret = PTR_ERR(usb_host_clock);
- goto err_put_hcd;
- }
-
- clk_enable(usb_host_clock);
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- ret = usb_add_hcd(hcd, irq, 0);
- if (ret)
- goto err_clk_disable;
-
- return 0;
-
-err_clk_disable:
- clk_disable(usb_host_clock);
-err_put_hcd:
- usb_put_hcd(hcd);
-
- return ret;
-}
-
-static int ohci_hcd_ep93xx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- clk_disable(usb_host_clock);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- clk_disable(usb_host_clock);
- return 0;
-}
-
-static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- clk_enable(usb_host_clock);
-
- ohci_resume(hcd, false);
- return 0;
-}
-#endif
-
-
-static struct platform_driver ohci_hcd_ep93xx_driver = {
- .probe = ohci_hcd_ep93xx_drv_probe,
- .remove = ohci_hcd_ep93xx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
-#ifdef CONFIG_PM
- .suspend = ohci_hcd_ep93xx_drv_suspend,
- .resume = ohci_hcd_ep93xx_drv_resume,
-#endif
- .driver = {
- .name = "ep93xx-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:ep93xx-ohci");
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index dc6ee9adacf..91ec9b2cd37 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -12,98 +12,55 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/usb-ohci-exynos.h>
#include <linux/usb/phy.h>
#include <linux/usb/samsung_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI EXYNOS driver"
+
+static const char hcd_name[] = "ohci-exynos";
+static struct hc_driver __read_mostly exynos_ohci_hc_driver;
+
+#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
struct exynos_ohci_hcd {
- struct device *dev;
- struct usb_hcd *hcd;
struct clk *clk;
struct usb_phy *phy;
struct usb_otg *otg;
- struct exynos4_ohci_platdata *pdata;
};
-static void exynos_ohci_phy_enable(struct exynos_ohci_hcd *exynos_ohci)
+static void exynos_ohci_phy_enable(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(exynos_ohci->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
if (exynos_ohci->phy)
usb_phy_init(exynos_ohci->phy);
- else if (exynos_ohci->pdata && exynos_ohci->pdata->phy_init)
- exynos_ohci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
}
-static void exynos_ohci_phy_disable(struct exynos_ohci_hcd *exynos_ohci)
+static void exynos_ohci_phy_disable(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(exynos_ohci->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
if (exynos_ohci->phy)
usb_phy_shutdown(exynos_ohci->phy);
- else if (exynos_ohci->pdata && exynos_ohci->pdata->phy_exit)
- exynos_ohci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
-}
-
-static int ohci_exynos_reset(struct usb_hcd *hcd)
-{
- return ohci_init(hcd_to_ohci(hcd));
-}
-
-static int ohci_exynos_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ohci_dbg(ohci, "ohci_exynos_start, ohci:%p", ohci);
-
- ret = ohci_run(ohci);
- if (ret < 0) {
- dev_err(hcd->self.controller, "can't start %s\n",
- hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
}
-static const struct hc_driver exynos_ohci_hc_driver = {
- .description = hcd_name,
- .product_desc = "EXYNOS OHCI Host Controller",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- .irq = ohci_irq,
- .flags = HCD_MEMORY|HCD_USB11,
-
- .reset = ohci_exynos_reset,
- .start = ohci_exynos_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- .get_frame_number = ohci_get_frame,
-
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
static int exynos_ohci_probe(struct platform_device *pdev)
{
- struct exynos4_ohci_platdata *pdata = dev_get_platdata(&pdev->dev);
struct exynos_ohci_hcd *exynos_ohci;
struct usb_hcd *hcd;
- struct ohci_hcd *ohci;
struct resource *res;
struct usb_phy *phy;
int irq;
@@ -114,15 +71,18 @@ static int exynos_ohci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
- exynos_ohci = devm_kzalloc(&pdev->dev, sizeof(struct exynos_ohci_hcd),
- GFP_KERNEL);
- if (!exynos_ohci)
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ hcd = usb_create_hcd(&exynos_ohci_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
+ }
+
+ exynos_ohci = to_exynos_ohci(hcd);
if (of_device_is_compatible(pdev->dev.of_node,
"samsung,exynos5440-ohci"))
@@ -130,30 +90,15 @@ static int exynos_ohci_probe(struct platform_device *pdev)
phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(phy)) {
- /* Fallback to pdata */
- if (!pdata) {
- dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- } else {
- exynos_ohci->pdata = pdata;
- }
+ usb_put_hcd(hcd);
+ dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
} else {
exynos_ohci->phy = phy;
exynos_ohci->otg = phy->otg;
}
skip_phy:
-
- exynos_ohci->dev = &pdev->dev;
-
- hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- dev_err(&pdev->dev, "Unable to create HCD\n");
- return -ENOMEM;
- }
-
- exynos_ohci->hcd = hcd;
exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ohci->clk)) {
@@ -190,26 +135,21 @@ skip_phy:
}
if (exynos_ohci->otg)
- exynos_ohci->otg->set_host(exynos_ohci->otg,
- &exynos_ohci->hcd->self);
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_enable(exynos_ohci);
+ platform_set_drvdata(pdev, hcd);
- ohci = hcd_to_ohci(hcd);
- ohci_hcd_init(ohci);
+ exynos_ohci_phy_enable(pdev);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
-
- platform_set_drvdata(pdev, exynos_ohci);
-
return 0;
fail_add_hcd:
- exynos_ohci_phy_disable(exynos_ohci);
+ exynos_ohci_phy_disable(pdev);
fail_io:
clk_disable_unprepare(exynos_ohci->clk);
fail_clk:
@@ -219,16 +159,15 @@ fail_clk:
static int exynos_ohci_remove(struct platform_device *pdev)
{
- struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
usb_remove_hcd(hcd);
if (exynos_ohci->otg)
- exynos_ohci->otg->set_host(exynos_ohci->otg,
- &exynos_ohci->hcd->self);
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_disable(exynos_ohci);
+ exynos_ohci_phy_disable(pdev);
clk_disable_unprepare(exynos_ohci->clk);
@@ -239,8 +178,7 @@ static int exynos_ohci_remove(struct platform_device *pdev)
static void exynos_ohci_shutdown(struct platform_device *pdev)
{
- struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
@@ -249,9 +187,10 @@ static void exynos_ohci_shutdown(struct platform_device *pdev)
#ifdef CONFIG_PM
static int exynos_ohci_suspend(struct device *dev)
{
- struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
- struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
unsigned long flags;
int rc = 0;
@@ -271,10 +210,9 @@ static int exynos_ohci_suspend(struct device *dev)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (exynos_ohci->otg)
- exynos_ohci->otg->set_host(exynos_ohci->otg,
- &exynos_ohci->hcd->self);
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_disable(exynos_ohci);
+ exynos_ohci_phy_disable(pdev);
clk_disable_unprepare(exynos_ohci->clk);
@@ -286,16 +224,16 @@ fail:
static int exynos_ohci_resume(struct device *dev)
{
- struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
- struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
clk_prepare_enable(exynos_ohci->clk);
if (exynos_ohci->otg)
- exynos_ohci->otg->set_host(exynos_ohci->otg,
- &exynos_ohci->hcd->self);
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
- exynos_ohci_phy_enable(exynos_ohci);
+ exynos_ohci_phy_enable(pdev);
ohci_resume(hcd, false);
@@ -306,6 +244,10 @@ static int exynos_ohci_resume(struct device *dev)
#define exynos_ohci_resume NULL
#endif
+static const struct ohci_driver_overrides exynos_overrides __initconst = {
+ .extra_priv_size = sizeof(struct exynos_ohci_hcd),
+};
+
static const struct dev_pm_ops exynos_ohci_pm_ops = {
.suspend = exynos_ohci_suspend,
.resume = exynos_ohci_resume,
@@ -331,6 +273,23 @@ static struct platform_driver exynos_ohci_driver = {
.of_match_table = of_match_ptr(exynos_ohci_match),
}
};
+static int __init ohci_exynos_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
+ return platform_driver_register(&exynos_ohci_driver);
+}
+module_init(ohci_exynos_init);
+
+static void __exit ohci_exynos_cleanup(void)
+{
+ platform_driver_unregister(&exynos_ohci_driver);
+}
+module_exit(ohci_exynos_cleanup);
MODULE_ALIAS("platform:exynos-ohci");
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 604cad1bcf9..8ada13f8dde 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1161,10 +1161,12 @@ void ohci_init_driver(struct hc_driver *drv,
/* Copy the generic table to drv and then apply the overrides */
*drv = ohci_hc_driver;
- drv->product_desc = over->product_desc;
- drv->hcd_priv_size += over->extra_priv_size;
- if (over->reset)
- drv->reset = over->reset;
+ if (over) {
+ drv->product_desc = over->product_desc;
+ drv->hcd_priv_size += over->extra_priv_size;
+ if (over->reset)
+ drv->reset = over->reset;
+ }
}
EXPORT_SYMBOL_GPL(ohci_init_driver);
@@ -1179,46 +1181,6 @@ MODULE_LICENSE ("GPL");
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
-#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX)
-#include "ohci-s3c2410.c"
-#define S3C2410_PLATFORM_DRIVER ohci_hcd_s3c2410_driver
-#endif
-
-#ifdef CONFIG_USB_OHCI_EXYNOS
-#include "ohci-exynos.c"
-#define EXYNOS_PLATFORM_DRIVER exynos_ohci_driver
-#endif
-
-#ifdef CONFIG_USB_OHCI_HCD_OMAP1
-#include "ohci-omap.c"
-#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
-#endif
-
-#ifdef CONFIG_USB_OHCI_HCD_OMAP3
-#include "ohci-omap3.c"
-#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
-#endif
-
-#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
-#include "ohci-pxa27x.c"
-#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
-#endif
-
-#ifdef CONFIG_ARCH_EP93XX
-#include "ohci-ep93xx.c"
-#define EP93XX_PLATFORM_DRIVER ohci_hcd_ep93xx_driver
-#endif
-
-#ifdef CONFIG_ARCH_AT91
-#include "ohci-at91.c"
-#define AT91_PLATFORM_DRIVER ohci_hcd_at91_driver
-#endif
-
-#ifdef CONFIG_ARCH_LPC32XX
-#include "ohci-nxp.c"
-#define NXP_PLATFORM_DRIVER usb_hcd_nxp_driver
-#endif
-
#ifdef CONFIG_ARCH_DAVINCI_DA8XX
#include "ohci-da8xx.c"
#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
@@ -1229,11 +1191,6 @@ MODULE_LICENSE ("GPL");
#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
#endif
-#ifdef CONFIG_PLAT_SPEAR
-#include "ohci-spear.c"
-#define SPEAR_PLATFORM_DRIVER spear_ohci_hcd_driver
-#endif
-
#ifdef CONFIG_PPC_PS3
#include "ohci-ps3.c"
#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
@@ -1296,18 +1253,6 @@ static int __init ohci_hcd_mod_init(void)
goto error_platform;
#endif
-#ifdef OMAP1_PLATFORM_DRIVER
- retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_omap1_platform;
-#endif
-
-#ifdef OMAP3_PLATFORM_DRIVER
- retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_omap3_platform;
-#endif
-
#ifdef OF_PLATFORM_DRIVER
retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
@@ -1332,79 +1277,19 @@ static int __init ohci_hcd_mod_init(void)
goto error_tmio;
#endif
-#ifdef S3C2410_PLATFORM_DRIVER
- retval = platform_driver_register(&S3C2410_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_s3c2410;
-#endif
-
-#ifdef EXYNOS_PLATFORM_DRIVER
- retval = platform_driver_register(&EXYNOS_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_exynos;
-#endif
-
-#ifdef EP93XX_PLATFORM_DRIVER
- retval = platform_driver_register(&EP93XX_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_ep93xx;
-#endif
-
-#ifdef AT91_PLATFORM_DRIVER
- retval = platform_driver_register(&AT91_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_at91;
-#endif
-
-#ifdef NXP_PLATFORM_DRIVER
- retval = platform_driver_register(&NXP_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_nxp;
-#endif
-
#ifdef DAVINCI_PLATFORM_DRIVER
retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER);
if (retval < 0)
goto error_davinci;
#endif
-#ifdef SPEAR_PLATFORM_DRIVER
- retval = platform_driver_register(&SPEAR_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_spear;
-#endif
-
return retval;
/* Error path */
-#ifdef SPEAR_PLATFORM_DRIVER
- platform_driver_unregister(&SPEAR_PLATFORM_DRIVER);
- error_spear:
-#endif
#ifdef DAVINCI_PLATFORM_DRIVER
platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
error_davinci:
#endif
-#ifdef NXP_PLATFORM_DRIVER
- platform_driver_unregister(&NXP_PLATFORM_DRIVER);
- error_nxp:
-#endif
-#ifdef AT91_PLATFORM_DRIVER
- platform_driver_unregister(&AT91_PLATFORM_DRIVER);
- error_at91:
-#endif
-#ifdef EP93XX_PLATFORM_DRIVER
- platform_driver_unregister(&EP93XX_PLATFORM_DRIVER);
- error_ep93xx:
-#endif
-#ifdef EXYNOS_PLATFORM_DRIVER
- platform_driver_unregister(&EXYNOS_PLATFORM_DRIVER);
- error_exynos:
-#endif
-#ifdef S3C2410_PLATFORM_DRIVER
- platform_driver_unregister(&S3C2410_PLATFORM_DRIVER);
- error_s3c2410:
-#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
@@ -1421,14 +1306,6 @@ static int __init ohci_hcd_mod_init(void)
platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
-#ifdef OMAP3_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
- error_omap3_platform:
-#endif
-#ifdef OMAP1_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
- error_omap1_platform:
-#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
error_platform:
@@ -1450,27 +1327,9 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
-#ifdef SPEAR_PLATFORM_DRIVER
- platform_driver_unregister(&SPEAR_PLATFORM_DRIVER);
-#endif
#ifdef DAVINCI_PLATFORM_DRIVER
platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
#endif
-#ifdef NXP_PLATFORM_DRIVER
- platform_driver_unregister(&NXP_PLATFORM_DRIVER);
-#endif
-#ifdef AT91_PLATFORM_DRIVER
- platform_driver_unregister(&AT91_PLATFORM_DRIVER);
-#endif
-#ifdef EP93XX_PLATFORM_DRIVER
- platform_driver_unregister(&EP93XX_PLATFORM_DRIVER);
-#endif
-#ifdef EXYNOS_PLATFORM_DRIVER
- platform_driver_unregister(&EXYNOS_PLATFORM_DRIVER);
-#endif
-#ifdef S3C2410_PLATFORM_DRIVER
- platform_driver_unregister(&S3C2410_PLATFORM_DRIVER);
-#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
@@ -1483,12 +1342,6 @@ static void __exit ohci_hcd_mod_exit(void)
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
-#ifdef OMAP3_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
-#endif
-#ifdef OMAP1_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
-#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 2347ab83f04..61705a760e7 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -212,10 +212,11 @@ __acquires(ohci->lock)
/* Sometimes PCI D3 suspend trashes frame timings ... */
periodic_reinit (ohci);
- /* the following code is executed with ohci->lock held and
- * irqs disabled if and only if autostopped is true
+ /*
+ * The following code is executed with ohci->lock held and
+ * irqs disabled if and only if autostopped is true. This
+ * will cause sparse to warn about a "context imbalance".
*/
-
skip_resume:
/* interrupts might have been disabled */
ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable);
@@ -531,7 +532,7 @@ ohci_hub_descriptor (
temp |= 0x0010;
else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */
temp |= 0x0008;
- desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ohci, temp);
+ desc->wHubCharacteristics = cpu_to_le16(temp);
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
rh = roothub_b (ohci);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 7d7d507d54e..e99db8a6d55 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -19,10 +19,19 @@
* or implied.
*/
#include <linux/clk.h>
-#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/usb/isp1301.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -57,6 +66,11 @@
#define start_int_umask(irq)
#endif
+#define DRIVER_DESC "OHCI NXP driver"
+
+static const char hcd_name[] = "ohci-nxp";
+static struct hc_driver __read_mostly ohci_nxp_hc_driver;
+
static struct i2c_client *isp1301_i2c_client;
extern int usb_disabled(void);
@@ -132,14 +146,14 @@ static inline void isp1301_vbus_off(void)
OTG1_VBUS_DRV);
}
-static void nxp_start_hc(void)
+static void ohci_nxp_start_hc(void)
{
unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
__raw_writel(tmp, USB_OTG_STAT_CONTROL);
isp1301_vbus_on();
}
-static void nxp_stop_hc(void)
+static void ohci_nxp_stop_hc(void)
{
unsigned long tmp;
isp1301_vbus_off();
@@ -147,68 +161,9 @@ static void nxp_stop_hc(void)
__raw_writel(tmp, USB_OTG_STAT_CONTROL);
}
-static int ohci_nxp_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop(hcd);
- return ret;
- }
- return 0;
-}
-
-static const struct hc_driver ohci_nxp_hc_driver = {
- .description = hcd_name,
- .product_desc = "nxp OHCI",
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- .hcd_priv_size = sizeof(struct ohci_hcd),
- /*
- * basic lifecycle operations
- */
- .start = ohci_nxp_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-static int usb_hcd_nxp_probe(struct platform_device *pdev)
+static int ohci_hcd_nxp_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd = 0;
- struct ohci_hcd *ohci;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
@@ -226,8 +181,9 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_disable;
dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
if (usb_disabled()) {
@@ -313,17 +269,15 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
goto fail_resource;
}
- nxp_start_hc();
+ ohci_nxp_start_hc();
platform_set_drvdata(pdev, hcd);
- ohci = hcd_to_ohci(hcd);
- ohci_hcd_init(ohci);
dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0)
return ret;
- nxp_stop_hc();
+ ohci_nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
fail_hcd:
@@ -345,12 +299,12 @@ fail_disable:
return ret;
}
-static int usb_hcd_nxp_remove(struct platform_device *pdev)
+static int ohci_hcd_nxp_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
- nxp_stop_hc();
+ ohci_nxp_stop_hc();
usb_put_hcd(hcd);
clk_disable(usb_pll_clk);
clk_put(usb_pll_clk);
@@ -366,20 +320,40 @@ static int usb_hcd_nxp_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:usb-ohci");
#ifdef CONFIG_OF
-static const struct of_device_id usb_hcd_nxp_match[] = {
+static const struct of_device_id ohci_hcd_nxp_match[] = {
{ .compatible = "nxp,ohci-nxp" },
{},
};
-MODULE_DEVICE_TABLE(of, usb_hcd_nxp_match);
+MODULE_DEVICE_TABLE(of, ohci_hcd_nxp_match);
#endif
-static struct platform_driver usb_hcd_nxp_driver = {
+static struct platform_driver ohci_hcd_nxp_driver = {
.driver = {
.name = "usb-ohci",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(usb_hcd_nxp_match),
+ .of_match_table = of_match_ptr(ohci_hcd_nxp_match),
},
- .probe = usb_hcd_nxp_probe,
- .remove = usb_hcd_nxp_remove,
+ .probe = ohci_hcd_nxp_probe,
+ .remove = ohci_hcd_nxp_remove,
};
+static int __init ohci_nxp_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_nxp_hc_driver, NULL);
+ return platform_driver_register(&ohci_hcd_nxp_driver);
+}
+module_init(ohci_nxp_init);
+
+static void __exit ohci_nxp_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_nxp_driver);
+}
+module_exit(ohci_nxp_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index 342dc7e543b..6c16dcef15c 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -127,8 +127,9 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
}
/* Ohci is a 32-bit device. */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 31d3a12eb48..f253214741b 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -14,12 +14,21 @@
* This file is licenced under the GPL.
*/
-#include <linux/signal.h>
-#include <linux/jiffies.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
#include <asm/io.h>
#include <asm/mach-types.h>
@@ -42,10 +51,7 @@
#define OMAP1510_LB_MMU_RAM_H 0xfffec234
#define OMAP1510_LB_MMU_RAM_L 0xfffec238
-
-#ifndef CONFIG_ARCH_OMAP
-#error "This file is OMAP bus glue. CONFIG_OMAP must be defined."
-#endif
+#define DRIVER_DESC "OHCI OMAP driver"
#ifdef CONFIG_TPS65010
#include <linux/i2c/tps65010.h>
@@ -68,8 +74,9 @@ extern int ocpi_enable(void);
static struct clk *usb_host_ck;
static struct clk *usb_dc_ck;
-static int host_enabled;
-static int host_initialized;
+
+static const char hcd_name[] = "ohci-omap";
+static struct hc_driver __read_mostly ohci_omap_hc_driver;
static void omap_ohci_clock_power(int on)
{
@@ -188,7 +195,7 @@ static void start_hnp(struct ohci_hcd *ohci)
/*-------------------------------------------------------------------------*/
-static int ohci_omap_init(struct usb_hcd *hcd)
+static int ohci_omap_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct omap_usb_config *config = dev_get_platdata(hcd->self.controller);
@@ -198,9 +205,9 @@ static int ohci_omap_init(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "starting USB Controller\n");
if (config->otg) {
- ohci_to_hcd(ohci)->self.otg_port = config->otg;
+ hcd->self.otg_port = config->otg;
/* default/minimum OTG power budget: 8 mA */
- ohci_to_hcd(ohci)->power_budget = 8;
+ hcd->power_budget = 8;
}
/* boards can use OTG transceivers in non-OTG modes */
@@ -238,9 +245,15 @@ static int ohci_omap_init(struct usb_hcd *hcd)
omap_1510_local_bus_init();
}
- if ((ret = ohci_init(ohci)) < 0)
+ ret = ohci_setup(hcd);
+ if (ret < 0)
return ret;
+ if (config->otg || config->rwc) {
+ ohci->hc_control = OHCI_CTRL_RWC;
+ writel(OHCI_CTRL_RWC, &ohci->regs->control);
+ }
+
/* board-specific power switching and overcurrent support */
if (machine_is_omap_osk() || machine_is_omap_innovator()) {
u32 rh = roothub_a (ohci);
@@ -281,14 +294,6 @@ static int ohci_omap_init(struct usb_hcd *hcd)
return 0;
}
-static void ohci_omap_stop(struct usb_hcd *hcd)
-{
- dev_dbg(hcd->self.controller, "stopping USB Controller\n");
- ohci_stop(hcd);
- omap_ohci_clock_power(0);
-}
-
-
/*-------------------------------------------------------------------------*/
/**
@@ -304,7 +309,6 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
{
int retval, irq;
struct usb_hcd *hcd = 0;
- struct ohci_hcd *ohci;
if (pdev->num_resources != 2) {
printk(KERN_ERR "hcd probe: invalid num_resources: %i\n",
@@ -354,12 +358,6 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
goto err2;
}
- ohci = hcd_to_ohci(hcd);
- ohci_hcd_init(ohci);
-
- host_initialized = 0;
- host_enabled = 1;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = -ENXIO;
@@ -369,11 +367,6 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
if (retval)
goto err3;
- host_initialized = 1;
-
- if (!host_enabled)
- omap_ohci_clock_power(0);
-
return 0;
err3:
iounmap(hcd->regs);
@@ -402,7 +395,9 @@ err0:
static inline void
usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
+ dev_dbg(hcd->self.controller, "stopping USB Controller\n");
usb_remove_hcd(hcd);
+ omap_ohci_clock_power(0);
if (!IS_ERR_OR_NULL(hcd->phy)) {
(void) otg_set_host(hcd->phy->otg, 0);
usb_put_phy(hcd->phy);
@@ -418,76 +413,6 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
/*-------------------------------------------------------------------------*/
-static int
-ohci_omap_start (struct usb_hcd *hcd)
-{
- struct omap_usb_config *config;
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- if (!host_enabled)
- return 0;
- config = dev_get_platdata(hcd->self.controller);
- if (config->otg || config->rwc) {
- ohci->hc_control = OHCI_CTRL_RWC;
- writel(OHCI_CTRL_RWC, &ohci->regs->control);
- }
-
- if ((ret = ohci_run (ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop (hcd);
- return ret;
- }
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_omap_hc_driver = {
- .description = hcd_name,
- .product_desc = "OMAP OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_omap_init,
- .start = ohci_omap_start,
- .stop = ohci_omap_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
static int ohci_hcd_omap_drv_probe(struct platform_device *dev)
{
return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev);
@@ -506,16 +431,23 @@ static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
-static int ohci_omap_suspend(struct platform_device *dev, pm_message_t message)
+static int ohci_omap_suspend(struct platform_device *pdev, pm_message_t message)
{
- struct ohci_hcd *ohci = hcd_to_ohci(platform_get_drvdata(dev));
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
omap_ohci_clock_power(0);
- return 0;
+ return ret;
}
static int ohci_omap_resume(struct platform_device *dev)
@@ -553,4 +485,29 @@ static struct platform_driver ohci_hcd_omap_driver = {
},
};
+static const struct ohci_driver_overrides omap_overrides __initconst = {
+ .product_desc = "OMAP OHCI",
+ .reset = ohci_omap_reset
+};
+
+static int __init ohci_omap_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_omap_hc_driver, &omap_overrides);
+ return platform_driver_register(&ohci_hcd_omap_driver);
+}
+module_init(ohci_omap_init);
+
+static void __exit ohci_omap_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_omap_driver);
+}
+module_exit(ohci_omap_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:ohci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index a09af26f69e..21457417a85 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -29,90 +29,22 @@
* - add kernel-doc
*/
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/usb/otg.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/dma-mapping.h>
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_omap3_init(struct usb_hcd *hcd)
-{
- dev_dbg(hcd->self.controller, "starting OHCI controller\n");
-
- return ohci_init(hcd_to_ohci(hcd));
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_omap3_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- /*
- * RemoteWakeupConnected has to be set explicitly before
- * calling ohci_run. The reset value of RWC is 0.
- */
- ohci->hc_control = OHCI_CTRL_RWC;
- writel(OHCI_CTRL_RWC, &ohci->regs->control);
-
- ret = ohci_run(ohci);
-
- if (ret < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop(hcd);
- }
-
- return ret;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_omap3_hc_driver = {
- .description = hcd_name,
- .product_desc = "OMAP3 OHCI Host Controller",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_omap3_init,
- .start = ohci_omap3_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
+#include "ohci.h"
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
+#define DRIVER_DESC "OHCI OMAP3 driver"
-/*-------------------------------------------------------------------------*/
+static const char hcd_name[] = "ohci-omap3";
+static struct hc_driver __read_mostly ohci_omap3_hc_driver;
/*
* configure so an HC device and id are always provided
@@ -129,10 +61,11 @@ static const struct hc_driver ohci_omap3_hc_driver = {
static int ohci_hcd_omap3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ohci_hcd *ohci;
struct usb_hcd *hcd = NULL;
void __iomem *regs = NULL;
struct resource *res;
- int ret = -ENODEV;
+ int ret;
int irq;
if (usb_disabled())
@@ -166,11 +99,11 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_io;
+ ret = -ENODEV;
hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
dev_name(dev));
if (!hcd) {
@@ -185,7 +118,12 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ohci_hcd_init(hcd_to_ohci(hcd));
+ ohci = hcd_to_ohci(hcd);
+ /*
+ * RemoteWakeupConnected has to be set explicitly before
+ * calling ohci_run. The reset value of RWC is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
ret = usb_add_hcd(hcd, irq, 0);
if (ret) {
@@ -248,5 +186,25 @@ static struct platform_driver ohci_hcd_omap3_driver = {
},
};
+static int __init ohci_omap3_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_omap3_hc_driver, NULL);
+ return platform_driver_register(&ohci_hcd_omap3_driver);
+}
+module_init(ohci_omap3_init);
+
+static void __exit ohci_omap3_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_omap3_driver);
+}
+module_exit(ohci_omap3_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:ohci-omap3");
MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ec337c2bd5e..90879e9ccbe 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -150,28 +150,16 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
static int ohci_quirk_amd700(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct pci_dev *amd_smbus_dev;
- u8 rev;
if (usb_amd_find_chipset_info())
ohci->flags |= OHCI_QUIRK_AMD_PLL;
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
- if (!amd_smbus_dev)
- return 0;
-
- rev = amd_smbus_dev->revision;
-
/* SB800 needs pre-fetch fix */
- if ((rev >= 0x40) && (rev <= 0x4f)) {
+ if (usb_amd_prefetch_quirk()) {
ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
}
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
-
return 0;
}
@@ -323,3 +311,4 @@ module_exit(ohci_pci_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: ehci_pci");
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index a4c6410f0ed..f351ff5b171 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -139,14 +139,21 @@ static int ohci_platform_remove(struct platform_device *dev)
static int ohci_platform_suspend(struct device *dev)
{
- struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ohci_pdata *pdata = dev->platform_data;
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
if (pdata->power_suspend)
pdata->power_suspend(pdev);
- return 0;
+ return ret;
}
static int ohci_platform_resume(struct device *dev)
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 75f5a1e2f01..81f3eba215c 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -14,6 +14,8 @@
*/
#include <linux/signal.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/prom.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 93371a235e8..e89ac4d4b87 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -19,15 +19,26 @@
* This file is licenced under the GPL.
*/
-#include <linux/device.h>
-#include <linux/signal.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
-#include <mach/hardware.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/usb-pxa3xx-ulpi.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include <mach/hardware.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI PXA27x/PXA3x driver"
/*
* UHC: USB Host Controller (OHCI-like) register definitions
@@ -101,16 +112,16 @@
#define PXA_UHC_MAX_PORTNUM 3
-struct pxa27x_ohci {
- /* must be 1st member here for hcd_to_ohci() to work */
- struct ohci_hcd ohci;
+static const char hcd_name[] = "ohci-pxa27x";
- struct device *dev;
+static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
+
+struct pxa27x_ohci {
struct clk *clk;
void __iomem *mmio_base;
};
-#define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)hcd_to_ohci(hcd)
+#define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv)
/*
PMM_NPS_MODE -- PMM Non-power switching mode
@@ -122,10 +133,10 @@ struct pxa27x_ohci {
PMM_PERPORT_MODE -- PMM per port switching mode
Ports are powered individually.
*/
-static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *ohci, int mode)
+static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
{
- uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA);
- uint32_t uhcrhdb = __raw_readl(ohci->mmio_base + UHCRHDB);
+ uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
+ uint32_t uhcrhdb = __raw_readl(pxa_ohci->mmio_base + UHCRHDB);
switch (mode) {
case PMM_NPS_MODE:
@@ -149,20 +160,18 @@ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *ohci, int mode)
uhcrhda |= RH_A_NPS;
}
- __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA);
- __raw_writel(uhcrhdb, ohci->mmio_base + UHCRHDB);
+ __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
+ __raw_writel(uhcrhdb, pxa_ohci->mmio_base + UHCRHDB);
return 0;
}
-extern int usb_disabled(void);
-
/*-------------------------------------------------------------------------*/
-static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
+static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci,
struct pxaohci_platform_data *inf)
{
- uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR);
- uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA);
+ uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
+ uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
if (inf->flags & ENABLE_PORT1)
uhchr &= ~UHCHR_SSEP1;
@@ -194,17 +203,17 @@ static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2);
}
- __raw_writel(uhchr, ohci->mmio_base + UHCHR);
- __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA);
+ __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+ __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
}
-static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci)
+static inline void pxa27x_reset_hc(struct pxa27x_ohci *pxa_ohci)
{
- uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR);
+ uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
- __raw_writel(uhchr | UHCHR_FHR, ohci->mmio_base + UHCHR);
+ __raw_writel(uhchr | UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
udelay(11);
- __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR);
+ __raw_writel(uhchr & ~UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
}
#ifdef CONFIG_PXA27x
@@ -213,25 +222,26 @@ extern void pxa27x_clear_otgph(void);
#define pxa27x_clear_otgph() do {} while (0)
#endif
-static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev)
+static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
int retval = 0;
struct pxaohci_platform_data *inf;
uint32_t uhchr;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
inf = dev_get_platdata(dev);
- clk_prepare_enable(ohci->clk);
+ clk_prepare_enable(pxa_ohci->clk);
- pxa27x_reset_hc(ohci);
+ pxa27x_reset_hc(pxa_ohci);
- uhchr = __raw_readl(ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
- __raw_writel(uhchr, ohci->mmio_base + UHCHR);
+ uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
+ __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
- while (__raw_readl(ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
+ while (__raw_readl(pxa_ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
cpu_relax();
- pxa27x_setup_hc(ohci, inf);
+ pxa27x_setup_hc(pxa_ohci, inf);
if (inf->init)
retval = inf->init(dev);
@@ -240,38 +250,39 @@ static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev)
return retval;
if (cpu_is_pxa3xx())
- pxa3xx_u2d_start_hc(&ohci_to_hcd(&ohci->ohci)->self);
+ pxa3xx_u2d_start_hc(&hcd->self);
- uhchr = __raw_readl(ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
- __raw_writel(uhchr, ohci->mmio_base + UHCHR);
- __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, ohci->mmio_base + UHCHIE);
+ uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
+ __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+ __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE);
/* Clear any OTG Pin Hold */
pxa27x_clear_otgph();
return 0;
}
-static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev)
+static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
struct pxaohci_platform_data *inf;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
uint32_t uhccoms;
inf = dev_get_platdata(dev);
if (cpu_is_pxa3xx())
- pxa3xx_u2d_stop_hc(&ohci_to_hcd(&ohci->ohci)->self);
+ pxa3xx_u2d_stop_hc(&hcd->self);
if (inf->exit)
inf->exit(dev);
- pxa27x_reset_hc(ohci);
+ pxa27x_reset_hc(pxa_ohci);
/* Host Controller Reset */
- uhccoms = __raw_readl(ohci->mmio_base + UHCCOMS) | 0x01;
- __raw_writel(uhccoms, ohci->mmio_base + UHCCOMS);
+ uhccoms = __raw_readl(pxa_ohci->mmio_base + UHCCOMS) | 0x01;
+ __raw_writel(uhccoms, pxa_ohci->mmio_base + UHCCOMS);
udelay(10);
- clk_disable_unprepare(ohci->clk);
+ clk_disable_unprepare(pxa_ohci->clk);
}
#ifdef CONFIG_OF
@@ -287,6 +298,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct pxaohci_platform_data *pdata;
u32 tmp;
+ int ret;
if (!np)
return 0;
@@ -295,10 +307,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -356,7 +367,8 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
int retval, irq;
struct usb_hcd *hcd;
struct pxaohci_platform_data *inf;
- struct pxa27x_ohci *ohci;
+ struct pxa27x_ohci *pxa_ohci;
+ struct ohci_hcd *ohci;
struct resource *r;
struct clk *usb_clk;
@@ -409,29 +421,31 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
}
/* initialize "struct pxa27x_ohci" */
- ohci = (struct pxa27x_ohci *)hcd_to_ohci(hcd);
- ohci->dev = &pdev->dev;
- ohci->clk = usb_clk;
- ohci->mmio_base = (void __iomem *)hcd->regs;
+ pxa_ohci = to_pxa27x_ohci(hcd);
+ pxa_ohci->clk = usb_clk;
+ pxa_ohci->mmio_base = (void __iomem *)hcd->regs;
- if ((retval = pxa27x_start_hc(ohci, &pdev->dev)) < 0) {
+ retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
+ if (retval < 0) {
pr_debug("pxa27x_start_hc failed");
goto err3;
}
/* Select Power Management Mode */
- pxa27x_ohci_select_pmm(ohci, inf->port_mode);
+ pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
if (inf->power_budget)
hcd->power_budget = inf->power_budget;
- ohci_hcd_init(hcd_to_ohci(hcd));
+ /* The value of NDP in roothub_a is incorrect on this hardware */
+ ohci = hcd_to_ohci(hcd);
+ ohci->num_ports = 3;
retval = usb_add_hcd(hcd, irq, 0);
if (retval == 0)
return retval;
- pxa27x_stop_hc(ohci, &pdev->dev);
+ pxa27x_stop_hc(pxa_ohci, &pdev->dev);
err3:
iounmap(hcd->regs);
err2:
@@ -459,88 +473,18 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
*/
void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
- struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
usb_remove_hcd(hcd);
- pxa27x_stop_hc(ohci, &pdev->dev);
+ pxa27x_stop_hc(pxa_ohci, &pdev->dev);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ clk_put(pxa_ohci->clk);
usb_put_hcd(hcd);
- clk_put(ohci->clk);
}
/*-------------------------------------------------------------------------*/
-static int
-ohci_pxa27x_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- ohci_dbg (ohci, "ohci_pxa27x_start, ohci:%p", ohci);
-
- /* The value of NDP in roothub_a is incorrect on this hardware */
- ohci->num_ports = 3;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run (ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start %s",
- hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_pxa27x_hc_driver = {
- .description = hcd_name,
- .product_desc = "PXA27x OHCI",
- .hcd_priv_size = sizeof(struct pxa27x_ohci),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_pxa27x_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev)
{
pr_debug ("In ohci_hcd_pxa27x_drv_probe");
@@ -563,32 +507,42 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
+
- if (time_before(jiffies, ohci->ohci.next_statechange))
+ if (time_before(jiffies, ohci->next_statechange))
msleep(5);
- ohci->ohci.next_statechange = jiffies;
+ ohci->next_statechange = jiffies;
- pxa27x_stop_hc(ohci, dev);
- return 0;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
+ pxa27x_stop_hc(pxa_ohci, dev);
+ return ret;
}
static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
struct pxaohci_platform_data *inf = dev_get_platdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int status;
- if (time_before(jiffies, ohci->ohci.next_statechange))
+ if (time_before(jiffies, ohci->next_statechange))
msleep(5);
- ohci->ohci.next_statechange = jiffies;
+ ohci->next_statechange = jiffies;
- if ((status = pxa27x_start_hc(ohci, dev)) < 0)
+ status = pxa27x_start_hc(pxa_ohci, dev);
+ if (status < 0)
return status;
/* Select Power Management Mode */
- pxa27x_ohci_select_pmm(ohci, inf->port_mode);
+ pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
ohci_resume(hcd, false);
return 0;
@@ -600,9 +554,6 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
};
#endif
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:pxa27x-ohci");
-
static struct platform_driver ohci_hcd_pxa27x_driver = {
.probe = ohci_hcd_pxa27x_drv_probe,
.remove = ohci_hcd_pxa27x_drv_remove,
@@ -617,3 +568,27 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
},
};
+static const struct ohci_driver_overrides pxa27x_overrides __initconst = {
+ .extra_priv_size = sizeof(struct pxa27x_ohci),
+};
+
+static int __init ohci_pxa27x_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
+ return platform_driver_register(&ohci_hcd_pxa27x_driver);
+}
+module_init(ohci_pxa27x_init);
+
+static void __exit ohci_pxa27x_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_pxa27x_driver);
+}
+module_exit(ohci_pxa27x_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa27x-ohci");
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 4919afa4125..f90101b9cdb 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -19,19 +19,36 @@
* This file is licenced under the GPL.
*/
-#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/usb-ohci-s3c2410.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
#define valid_port(idx) ((idx) == 1 || (idx) == 2)
/* clock device associated with the hcd */
+
+#define DRIVER_DESC "OHCI S3C2410 driver"
+
+static const char hcd_name[] = "ohci-s3c2410";
+
static struct clk *clk;
static struct clk *usb_clk;
/* forward definitions */
+static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength);
+static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
+
static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
/* conversion functions */
@@ -47,10 +64,10 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
- clk_enable(usb_clk);
+ clk_prepare_enable(usb_clk);
mdelay(2); /* let the bus clock stabilise */
- clk_enable(clk);
+ clk_prepare_enable(clk);
if (info != NULL) {
info->hcd = hcd;
@@ -75,8 +92,8 @@ static void s3c2410_stop_hc(struct platform_device *dev)
(info->enable_oc)(info, 0);
}
- clk_disable(clk);
- clk_disable(usb_clk);
+ clk_disable_unprepare(clk);
+ clk_disable_unprepare(usb_clk);
}
/* ohci_s3c2410_hub_status_data
@@ -93,7 +110,7 @@ ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
int orig;
int portno;
- orig = ohci_hub_status_data(hcd, buf);
+ orig = orig_ohci_hub_status_data(hcd, buf);
if (info == NULL)
return orig;
@@ -164,7 +181,7 @@ static int ohci_s3c2410_hub_control(
* process the request straight away and exit */
if (info == NULL) {
- ret = ohci_hub_control(hcd, typeReq, wValue,
+ ret = orig_ohci_hub_control(hcd, typeReq, wValue,
wIndex, buf, wLength);
goto out;
}
@@ -214,7 +231,7 @@ static int ohci_s3c2410_hub_control(
break;
}
- ret = ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ ret = orig_ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
if (ret)
goto out;
@@ -374,8 +391,6 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
s3c2410_start_hc(dev, hcd);
- ohci_hcd_init(hcd_to_ohci(hcd));
-
retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
if (retval != 0)
goto err_ioremap;
@@ -392,71 +407,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
/*-------------------------------------------------------------------------*/
-static int
-ohci_s3c2410_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ret = ohci_init(ohci);
- if (ret < 0)
- return ret;
-
- ret = ohci_run(ohci);
- if (ret < 0) {
- dev_err(hcd->self.controller, "can't start %s\n",
- hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-
-static const struct hc_driver ohci_s3c2410_hc_driver = {
- .description = hcd_name,
- .product_desc = "S3C24XX OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_s3c2410_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_s3c2410_hub_status_data,
- .hub_control = ohci_s3c2410_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/* device driver */
+static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
{
@@ -533,4 +484,39 @@ static struct platform_driver ohci_hcd_s3c2410_driver = {
},
};
+static int __init ohci_s3c2410_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&ohci_s3c2410_hc_driver, NULL);
+
+ /*
+ * The Samsung HW has some unusual quirks, which require
+ * Sumsung-specific workarounds. We override certain hc_driver
+ * functions here to achieve that. We explicitly do not enhance
+ * ohci_driver_overrides to allow this more easily, since this
+ * is an unusual case, and we don't want to encourage others to
+ * override these functions by making it too easy.
+ */
+
+ orig_ohci_hub_control = ohci_s3c2410_hc_driver.hub_control;
+ orig_ohci_hub_status_data = ohci_s3c2410_hc_driver.hub_status_data;
+
+ ohci_s3c2410_hc_driver.hub_status_data = ohci_s3c2410_hub_status_data;
+ ohci_s3c2410_hc_driver.hub_control = ohci_s3c2410_hub_control;
+
+ return platform_driver_register(&ohci_hcd_s3c2410_driver);
+}
+module_init(ohci_s3c2410_init);
+
+static void __exit ohci_s3c2410_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_s3c2410_driver);
+}
+module_exit(ohci_s3c2410_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:s3c2410-ohci");
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 17b2a7dad77..aa9e127bbe7 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -185,6 +185,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
if (usb_disabled())
return -ENODEV;
+ /*
+ * We don't call dma_set_mask_and_coherent() here because the
+ * DMA mask has already been appropraitely setup by the core
+ * SA-1111 bus code (which includes bug workarounds.)
+ */
+
hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index d479d5ddab8..2a5de5fecd8 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -216,14 +216,21 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
{
struct device *dev = &pdev->dev;
- struct ohci_hcd *ohci = hcd_to_ohci(platform_get_drvdata(pdev));
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
- return 0;
+ return ret;
}
static int ohci_sm501_resume(struct platform_device *pdev)
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index cc9dd9e4f05..6b02107d281 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -11,92 +11,37 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/signal.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+#define DRIVER_DESC "OHCI SPEAr driver"
+
+static const char hcd_name[] = "SPEAr-ohci";
struct spear_ohci {
- struct ohci_hcd ohci;
struct clk *clk;
};
-#define to_spear_ohci(hcd) (struct spear_ohci *)hcd_to_ohci(hcd)
-
-static void spear_start_ohci(struct spear_ohci *ohci)
-{
- clk_prepare_enable(ohci->clk);
-}
-
-static void spear_stop_ohci(struct spear_ohci *ohci)
-{
- clk_disable_unprepare(ohci->clk);
-}
-
-static int ohci_spear_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ret = ohci_init(ohci);
- if (ret < 0)
- return ret;
- ohci->regs = hcd->regs;
-
- ret = ohci_run(ohci);
- if (ret < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop(hcd);
- return ret;
- }
+#define to_spear_ohci(hcd) (struct spear_ohci *)(hcd_to_ohci(hcd)->priv)
- create_debug_files(ohci);
-
-#ifdef DEBUG
- ohci_dump(ohci, 1);
-#endif
- return 0;
-}
-
-static const struct hc_driver ohci_spear_hc_driver = {
- .description = hcd_name,
- .product_desc = "SPEAr OHCI",
- .hcd_priv_size = sizeof(struct spear_ohci),
-
- /* generic hardware linkage */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /* basic lifecycle operations */
- .start = ohci_spear_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
-
- /* managing i/o requests and associated device resources */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /* scheduling support */
- .get_frame_number = ohci_get_frame,
-
- /* root hub support */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-
- .start_port_reset = ohci_start_port_reset,
-};
+static struct hc_driver __read_mostly ohci_spear_hc_driver;
static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
{
const struct hc_driver *driver = &ohci_spear_hc_driver;
+ struct ohci_hcd *ohci;
struct usb_hcd *hcd = NULL;
struct clk *usbh_clk;
- struct spear_ohci *ohci_p;
+ struct spear_ohci *sohci_p;
struct resource *res;
int retval, irq;
@@ -111,10 +56,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
@@ -151,16 +95,18 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- ohci_p = (struct spear_ohci *)hcd_to_ohci(hcd);
- ohci_p->clk = usbh_clk;
- spear_start_ohci(ohci_p);
- ohci_hcd_init(hcd_to_ohci(hcd));
+ sohci_p = to_spear_ohci(hcd);
+ sohci_p->clk = usbh_clk;
+
+ clk_prepare_enable(sohci_p->clk);
+
+ ohci = hcd_to_ohci(hcd);
retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
if (retval == 0)
return retval;
- spear_stop_ohci(ohci_p);
+ clk_disable_unprepare(sohci_p->clk);
err_put_hcd:
usb_put_hcd(hcd);
fail:
@@ -172,11 +118,11 @@ fail:
static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct spear_ohci *ohci_p = to_spear_ohci(hcd);
+ struct spear_ohci *sohci_p = to_spear_ohci(hcd);
usb_remove_hcd(hcd);
- if (ohci_p->clk)
- spear_stop_ohci(ohci_p);
+ if (sohci_p->clk)
+ clk_disable_unprepare(sohci_p->clk);
usb_put_hcd(hcd);
return 0;
@@ -188,13 +134,14 @@ static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct spear_ohci *ohci_p = to_spear_ohci(hcd);
+ struct spear_ohci *sohci_p = to_spear_ohci(hcd);
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
- spear_stop_ohci(ohci_p);
+ clk_disable_unprepare(sohci_p->clk);
+
return 0;
}
@@ -202,13 +149,13 @@ static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct spear_ohci *ohci_p = to_spear_ohci(hcd);
+ struct spear_ohci *sohci_p = to_spear_ohci(hcd);
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
- spear_start_ohci(ohci_p);
+ clk_prepare_enable(sohci_p->clk);
ohci_resume(hcd, false);
return 0;
}
@@ -234,4 +181,28 @@ static struct platform_driver spear_ohci_hcd_driver = {
},
};
+static const struct ohci_driver_overrides spear_overrides __initconst = {
+ .extra_priv_size = sizeof(struct spear_ohci),
+};
+static int __init ohci_spear_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
+ return platform_driver_register(&spear_ohci_hcd_driver);
+}
+module_init(ohci_spear_init);
+
+static void __exit ohci_spear_cleanup(void)
+{
+ platform_driver_unregister(&spear_ohci_hcd_driver);
+}
+module_exit(ohci_spear_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Deepak Sikri");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spear-ohci");
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 08ef2829a7e..dfbdd3aefe9 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -79,11 +79,30 @@
#define USB_INTEL_USB3_PSSEN 0xD8
#define USB_INTEL_USB3PRM 0xDC
+/*
+ * amd_chipset_gen values represent AMD different chipset generations
+ */
+enum amd_chipset_gen {
+ NOT_AMD_CHIPSET = 0,
+ AMD_CHIPSET_SB600,
+ AMD_CHIPSET_SB700,
+ AMD_CHIPSET_SB800,
+ AMD_CHIPSET_HUDSON2,
+ AMD_CHIPSET_BOLTON,
+ AMD_CHIPSET_YANGTZE,
+ AMD_CHIPSET_UNKNOWN,
+};
+
+struct amd_chipset_type {
+ enum amd_chipset_gen gen;
+ u8 rev;
+};
+
static struct amd_chipset_info {
struct pci_dev *nb_dev;
struct pci_dev *smbus_dev;
int nb_type;
- int sb_type;
+ struct amd_chipset_type sb_type;
int isoc_reqs;
int probe_count;
int probe_result;
@@ -91,6 +110,51 @@ static struct amd_chipset_info {
static DEFINE_SPINLOCK(amd_lock);
+/*
+ * amd_chipset_sb_type_init - initialize amd chipset southbridge type
+ *
+ * AMD FCH/SB generation and revision is identified by SMBus controller
+ * vendor, device and revision IDs.
+ *
+ * Returns: 1 if it is an AMD chipset, 0 otherwise.
+ */
+static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+{
+ u8 rev = 0;
+ pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
+
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x10 && rev <= 0x1f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB600;
+ else if (rev >= 0x30 && rev <= 0x3f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ else if (rev >= 0x40 && rev <= 0x4f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+
+ if (!pinfo->smbus_dev) {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
+
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ }
+
+ pinfo->sb_type.rev = rev;
+ return 1;
+}
+
void sb800_prefetch(struct device *dev, int on)
{
u16 misc;
@@ -106,7 +170,6 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
int usb_amd_find_chipset_info(void)
{
- u8 rev = 0;
unsigned long flags;
struct amd_chipset_info info;
int ret;
@@ -122,27 +185,17 @@ int usb_amd_find_chipset_info(void)
memset(&info, 0, sizeof(info));
spin_unlock_irqrestore(&amd_lock, flags);
- info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
- if (info.smbus_dev) {
- rev = info.smbus_dev->revision;
- if (rev >= 0x40)
- info.sb_type = 1;
- else if (rev >= 0x30 && rev <= 0x3b)
- info.sb_type = 3;
- } else {
- info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x780b, NULL);
- if (!info.smbus_dev) {
- ret = 0;
- goto commit;
- }
-
- rev = info.smbus_dev->revision;
- if (rev >= 0x11 && rev <= 0x18)
- info.sb_type = 2;
+ if (!amd_chipset_sb_type_init(&info)) {
+ ret = 0;
+ goto commit;
}
- if (info.sb_type == 0) {
+ /* Below chipset generations needn't enable AMD PLL quirk */
+ if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
+ info.sb_type.gen == AMD_CHIPSET_SB600 ||
+ info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+ (info.sb_type.gen == AMD_CHIPSET_SB700 &&
+ info.sb_type.rev > 0x3b)) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
@@ -197,6 +250,39 @@ commit:
}
EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
+{
+ /* Make sure amd chipset type has already been initialized */
+ usb_amd_find_chipset_info();
+ if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
+ return 0;
+
+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+ return 1;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
+
+bool usb_amd_hang_symptom_quirk(void)
+{
+ u8 rev;
+
+ usb_amd_find_chipset_info();
+ rev = amd_chipset.sb_type.rev;
+ /* SB600 and old version of SB700 have hang symptom bug */
+ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
+ (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
+ rev >= 0x3a && rev <= 0x3b);
+}
+EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
+
+bool usb_amd_prefetch_quirk(void)
+{
+ usb_amd_find_chipset_info();
+ /* SB800 needs pre-fetch fix */
+ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
+}
+EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
+
/*
* The hardware normally enables the A-link power management feature, which
* lets the system lower the power consumption in idle states.
@@ -229,7 +315,9 @@ static void usb_amd_quirk_pll(int disable)
}
}
- if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
outb_p(AB_REG_BAR_LOW, 0xcd6);
addr_low = inb_p(0xcd7);
outb_p(AB_REG_BAR_HIGH, 0xcd6);
@@ -240,7 +328,8 @@ static void usb_amd_quirk_pll(int disable)
outl_p(0x40, AB_DATA(addr));
outl_p(0x34, AB_INDX(addr));
val = inl_p(AB_DATA(addr));
- } else if (amd_chipset.sb_type == 3) {
+ } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
+ amd_chipset.sb_type.rev <= 0x3b) {
pci_read_config_dword(amd_chipset.smbus_dev,
AB_REG_BAR_SB700, &addr);
outl(AX_INDXC, AB_INDX(addr));
@@ -353,7 +442,7 @@ void usb_amd_dev_put(void)
amd_chipset.nb_dev = NULL;
amd_chipset.smbus_dev = NULL;
amd_chipset.nb_type = 0;
- amd_chipset.sb_type = 0;
+ memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
amd_chipset.isoc_reqs = 0;
amd_chipset.probe_result = 0;
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index ed6700d00fe..638e88f7a28 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -5,6 +5,8 @@
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
int usb_amd_find_chipset_info(void);
+bool usb_amd_hang_symptom_quirk(void);
+bool usb_amd_prefetch_quirk(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5477bf5df21..79620c39217 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1413,7 +1413,7 @@ static int sl811h_show(struct seq_file *s, void *unused)
case SL11H_CTL1MASK_SE0: s = " se0/reset"; break;
case SL11H_CTL1MASK_K: s = " k/resume"; break;
default: s = "j"; break;
- }; s; }),
+ } s; }),
(t & SL11H_CTL1MASK_LSPD) ? " lowspeed" : "",
(t & SL11H_CTL1MASK_SUSPEND) ? " suspend" : "");
@@ -1446,7 +1446,7 @@ static int sl811h_show(struct seq_file *s, void *unused)
case USB_PID_SETUP: s = "setup"; break;
case USB_PID_ACK: s = "status"; break;
default: s = "?"; break;
- }; s;}),
+ } s;}),
ep->maxpacket,
ep->nak_count, ep->error_count);
list_for_each_entry (urb, &ep->hep->urb_list, urb_list) {
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 74af2c6287d..0196f766df7 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -163,8 +163,7 @@ static int ssb_hcd_probe(struct ssb_device *dev,
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 45573754652..8e239cdd95d 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -310,14 +310,14 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
unsigned short portsc1, portsc2;
- usbcmd = uhci_readw(uhci, 0);
- usbstat = uhci_readw(uhci, 2);
- usbint = uhci_readw(uhci, 4);
- usbfrnum = uhci_readw(uhci, 6);
- flbaseadd = uhci_readl(uhci, 8);
- sof = uhci_readb(uhci, 12);
- portsc1 = uhci_readw(uhci, 16);
- portsc2 = uhci_readw(uhci, 18);
+ usbcmd = uhci_readw(uhci, USBCMD);
+ usbstat = uhci_readw(uhci, USBSTS);
+ usbint = uhci_readw(uhci, USBINTR);
+ usbfrnum = uhci_readw(uhci, USBFRNUM);
+ flbaseadd = uhci_readl(uhci, USBFLBASEADD);
+ sof = uhci_readb(uhci, USBSOF);
+ portsc1 = uhci_readw(uhci, USBPORTSC1);
+ portsc2 = uhci_readw(uhci, USBPORTSC2);
out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
usbcmd,
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 9189bc984c9..93e17b12fb3 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -75,8 +75,6 @@ static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf)
return !!*buf;
}
-#define OK(x) len = (x); break
-
#define CLR_RH_PORTSTAT(x) \
status = uhci_readw(uhci, port_addr); \
status &= ~(RWC_BITS|WZ_BITS); \
@@ -244,7 +242,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- int status, lstatus, retval = 0, len = 0;
+ int status, lstatus, retval = 0;
unsigned int port = wIndex - 1;
unsigned long port_addr = USBPORTSC1 + 2 * port;
u16 wPortChange, wPortStatus;
@@ -258,7 +256,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case GetHubStatus:
*(__le32 *)buf = cpu_to_le32(0);
- OK(4); /* hub power */
+ retval = 4; /* hub power */
+ break;
case GetPortStatus:
if (port >= uhci->rh_numports)
goto err;
@@ -311,13 +310,14 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
*(__le16 *)buf = cpu_to_le16(wPortStatus);
*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
- OK(4);
+ retval = 4;
+ break;
case SetHubFeature: /* We don't implement these */
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
- OK(0);
+ break;
default:
goto err;
}
@@ -329,7 +329,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
SET_RH_PORTSTAT(USBPORTSC_SUSP);
- OK(0);
+ break;
case USB_PORT_FEAT_RESET:
SET_RH_PORTSTAT(USBPORTSC_PR);
@@ -338,10 +338,10 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* USB v2.0 7.1.7.5 */
uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
- OK(0);
+ break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
- OK(0);
+ break;
default:
goto err;
}
@@ -356,10 +356,10 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Disable terminates Resume signalling */
uhci_finish_suspend(uhci, port, port_addr);
- OK(0);
+ break;
case USB_PORT_FEAT_C_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PEC);
- OK(0);
+ break;
case USB_PORT_FEAT_SUSPEND:
if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) {
@@ -382,32 +382,32 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
uhci->ports_timeout = jiffies +
msecs_to_jiffies(20);
}
- OK(0);
+ break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(port, &uhci->port_c_suspend);
- OK(0);
+ break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
goto err;
case USB_PORT_FEAT_C_CONNECTION:
CLR_RH_PORTSTAT(USBPORTSC_CSC);
- OK(0);
+ break;
case USB_PORT_FEAT_C_OVER_CURRENT:
CLR_RH_PORTSTAT(USBPORTSC_OCC);
- OK(0);
+ break;
case USB_PORT_FEAT_C_RESET:
/* this driver won't report these */
- OK(0);
+ break;
default:
goto err;
}
break;
case GetHubDescriptor:
- len = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
- memcpy(buf, root_hub_hub_des, len);
- if (len > 2)
+ retval = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
+ memcpy(buf, root_hub_hub_des, retval);
+ if (retval > 2)
buf[2] = uhci->rh_numports;
- OK(len);
+ break;
default:
err:
retval = -EPIPE;
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 0f228c46eed..4cd79888804 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -162,6 +162,8 @@ static void uhci_shutdown(struct pci_dev *pdev)
#ifdef CONFIG_PM
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated);
+
static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
@@ -174,12 +176,6 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done_okay; /* Already suspended or dead */
- if (uhci->rh_state > UHCI_RH_SUSPENDED) {
- dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n");
- rc = -EBUSY;
- goto done;
- };
-
/* All PCI host controllers are required to disable IRQ generation
* at the source, so we must turn off PIRQ.
*/
@@ -195,8 +191,15 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
done_okay:
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-done:
spin_unlock_irq(&uhci->lock);
+
+ synchronize_irq(hcd->irq);
+
+ /* Check for race with a wakeup request */
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ uhci_pci_resume(hcd, false);
+ rc = -EBUSY;
+ }
return rc;
}
@@ -299,3 +302,5 @@ static struct pci_driver uhci_pci_driver = {
},
#endif
};
+
+MODULE_SOFTDEP("pre: ehci_pci");
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index d033a0ec7f0..3003fefaa96 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -75,10 +75,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
pdev->name);
@@ -105,8 +104,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
uhci->regs = hcd->regs;
- ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED |
- IRQF_SHARED);
+ ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
if (ret)
goto err_uhci;
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index ecc88db804e..1b0888f8da9 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -134,7 +134,7 @@ static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
default:
ret = asl_urb_enqueue(whc, urb, mem_flags);
break;
- };
+ }
return ret;
}
@@ -160,7 +160,7 @@ static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
default:
ret = asl_urb_dequeue(whc, urb, status);
break;
- };
+ }
return ret;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e8b4c56dcf6..805f2348eeb 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -296,7 +296,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
/* Wait for last stop endpoint command to finish */
timeleft = wait_for_completion_interruptible_timeout(
cmd->completion,
- USB_CTRL_SET_TIMEOUT);
+ XHCI_CMD_DEFAULT_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
timeleft == 0 ? "Timeout" : "Signal");
@@ -524,7 +524,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
* the compliance mode timer is deleted. A port won't enter
* compliance mode if it has previously entered U0.
*/
-void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
+static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
+ u16 wIndex)
{
u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 83bcd13622c..49b8bd063fa 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1693,9 +1693,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- struct dev_info *dev_info, *next;
struct xhci_cd *cur_cd, *next_cd;
- unsigned long flags;
int size;
int i, j, num_ports;
@@ -1756,13 +1754,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
scratchpad_free(xhci);
- spin_lock_irqsave(&xhci->lock, flags);
- list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
- list_del(&dev_info->list);
- kfree(dev_info);
- }
- spin_unlock_irqrestore(&xhci->lock, flags);
-
if (!xhci->rh_bw)
goto no_bw;
@@ -2231,7 +2222,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
u32 page_size, temp;
int i;
- INIT_LIST_HEAD(&xhci->lpm_failed_devs);
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6bfbd80ec2b..1e2f3f49584 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -178,7 +178,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->type == TYPE_EVENT &&
last_trb_on_last_seg(xhci, ring,
ring->deq_seg, ring->dequeue)) {
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ ring->cycle_state ^= 1;
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
@@ -726,7 +726,7 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
- struct xhci_td *cur_td, int status, char *adjective)
+ struct xhci_td *cur_td, int status)
{
struct usb_hcd *hcd;
struct urb *urb;
@@ -765,10 +765,9 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
* bit cleared) so that the HW will skip over them.
*/
-static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, struct xhci_event_cmd *event)
{
- unsigned int slot_id;
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct xhci_ring *ep_ring;
@@ -779,10 +778,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_dequeue_state deq_state;
- if (unlikely(TRB_TO_SUSPEND_PORT(
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
- slot_id = TRB_TO_SLOT_ID(
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
virt_dev = xhci->devs[slot_id];
if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev,
@@ -795,7 +791,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
}
memset(&deq_state, 0, sizeof(deq_state));
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = &xhci->devs[slot_id]->eps[ep_index];
@@ -891,7 +886,7 @@ remove_finished_td:
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
- xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -1001,7 +996,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN, "killed");
+ -ESHUTDOWN);
}
while (!list_empty(&temp_ep->cancelled_td_list)) {
cur_td = list_first_entry(
@@ -1010,7 +1005,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN, "killed");
+ -ESHUTDOWN);
}
}
}
@@ -1077,11 +1072,9 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
* endpoint doorbell to restart the ring, but only if there aren't more
* cancellations pending.
*/
-static void handle_set_deq_completion(struct xhci_hcd *xhci,
- struct xhci_event_cmd *event,
- union xhci_trb *trb)
+static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 cmd_comp_code)
{
- unsigned int slot_id;
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
@@ -1089,7 +1082,6 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
@@ -1107,11 +1099,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
- if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
+ if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
- switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
+ switch (cmd_comp_code) {
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
"of stream ID configuration\n");
@@ -1134,7 +1126,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
"completion code of %u.\n",
- GET_COMP_CODE(le32_to_cpu(event->status)));
+ cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
@@ -1171,21 +1163,17 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
-static void handle_reset_ep_completion(struct xhci_hcd *xhci,
- struct xhci_event_cmd *event,
- union xhci_trb *trb)
+static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 cmd_comp_code)
{
- int slot_id;
unsigned int ep_index;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
- "Ignoring reset ep completion code of %u",
- GET_COMP_CODE(le32_to_cpu(event->status)));
+ "Ignoring reset ep completion code of %u", cmd_comp_code);
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
@@ -1386,21 +1374,149 @@ static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
return cur_trb_is_good;
}
+static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
+ u32 cmd_comp_code)
+{
+ if (cmd_comp_code == COMP_SUCCESS)
+ xhci->slot_id = slot_id;
+ else
+ xhci->slot_id = 0;
+ complete(&xhci->addr_dev);
+}
+
+static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *virt_dev;
+
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return;
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+ xhci_free_virt_device(xhci, slot_id);
+}
+
+static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_event_cmd *event, u32 cmd_comp_code)
+{
+ struct xhci_virt_device *virt_dev;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ unsigned int ep_index;
+ unsigned int ep_state;
+ u32 add_flags, drop_flags;
+
+ virt_dev = xhci->devs[slot_id];
+ if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
+ return;
+ /*
+ * Configure endpoint commands can come from the USB core
+ * configuration or alt setting changes, or because the HW
+ * needed an extra configure endpoint command after a reset
+ * endpoint command or streams were being configured.
+ * If the command was for a halted endpoint, the xHCI driver
+ * is not waiting on the configure endpoint command.
+ */
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "Could not get input context, bad type.\n");
+ return;
+ }
+
+ add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+ drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+ /* Input ctx add_flags are the endpoint index plus one */
+ ep_index = xhci_last_valid_endpoint(add_flags) - 1;
+
+ /* A usb_set_interface() call directly after clearing a halted
+ * condition may race on this quirky hardware. Not worth
+ * worrying about, since this is prototype hardware. Not sure
+ * if this will work for streams, but streams support was
+ * untested on this prototype.
+ */
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
+ ep_index != (unsigned int) -1 &&
+ add_flags - SLOT_FLAG == drop_flags) {
+ ep_state = virt_dev->eps[ep_index].ep_state;
+ if (!(ep_state & EP_HALTED))
+ goto bandwidth_change;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Completed config ep cmd - "
+ "last ep index = %d, state = %d",
+ ep_index, ep_state);
+ /* Clear internal halted state and restart ring(s) */
+ virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ return;
+ }
+bandwidth_change:
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Completed config ep cmd");
+ virt_dev->cmd_status = cmd_comp_code;
+ complete(&virt_dev->cmd_completion);
+ return;
+}
+
+static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_event_cmd *event, u32 cmd_comp_code)
+{
+ struct xhci_virt_device *virt_dev;
+
+ virt_dev = xhci->devs[slot_id];
+ if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
+ return;
+ virt_dev->cmd_status = cmd_comp_code;
+ complete(&virt_dev->cmd_completion);
+}
+
+static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
+ u32 cmd_comp_code)
+{
+ xhci->devs[slot_id]->cmd_status = cmd_comp_code;
+ complete(&xhci->addr_dev);
+}
+
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_event_cmd *event)
+{
+ struct xhci_virt_device *virt_dev;
+
+ xhci_dbg(xhci, "Completed reset device command.\n");
+ virt_dev = xhci->devs[slot_id];
+ if (virt_dev)
+ handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
+ else
+ xhci_warn(xhci, "Reset device command completion "
+ "for disabled slot %u\n", slot_id);
+}
+
+static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+{
+ if (!(xhci->quirks & XHCI_NEC_HOST)) {
+ xhci->error_bitmask |= 1 << 6;
+ return;
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "NEC firmware version %2x.%02x",
+ NEC_FW_MAJOR(le32_to_cpu(event->status)),
+ NEC_FW_MINOR(le32_to_cpu(event->status)));
+}
+
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
- struct xhci_input_control_ctx *ctrl_ctx;
- struct xhci_virt_device *virt_dev;
- unsigned int ep_index;
- struct xhci_ring *ep_ring;
- unsigned int ep_state;
+ u32 cmd_comp_code;
+ union xhci_trb *cmd_trb;
+ u32 cmd_type;
cmd_dma = le64_to_cpu(event->cmd_trb);
+ cmd_trb = xhci->cmd_ring->dequeue;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue);
+ cmd_trb);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
if (cmd_dequeue_dma == 0) {
xhci->error_bitmask |= 1 << 4;
@@ -1412,19 +1528,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
return;
}
- trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
- (struct xhci_generic_trb *) event);
+ trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
- if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
- (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
+ cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+ if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
/* If the return value is 0, we think the trb pointed by
* command ring dequeue pointer is a good trb. The good
* trb means we don't want to cancel the trb, but it have
* been stopped by host. So we should handle it normally.
* Otherwise, driver should invoke inc_deq() and return.
*/
- if (handle_stopped_cmd_ring(xhci,
- GET_COMP_CODE(le32_to_cpu(event->status)))) {
+ if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) {
inc_deq(xhci, xhci->cmd_ring);
return;
}
@@ -1436,117 +1550,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
return;
}
- switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
- & TRB_TYPE_BITMASK) {
- case TRB_TYPE(TRB_ENABLE_SLOT):
- if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
- xhci->slot_id = slot_id;
- else
- xhci->slot_id = 0;
- complete(&xhci->addr_dev);
+ cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+ switch (cmd_type) {
+ case TRB_ENABLE_SLOT:
+ xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
break;
- case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id]) {
- if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
- /* Delete default control endpoint resources */
- xhci_free_device_endpoint_resources(xhci,
- xhci->devs[slot_id], true);
- xhci_free_virt_device(xhci, slot_id);
- }
+ case TRB_DISABLE_SLOT:
+ xhci_handle_cmd_disable_slot(xhci, slot_id);
break;
- case TRB_TYPE(TRB_CONFIG_EP):
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- break;
- /*
- * Configure endpoint commands can come from the USB core
- * configuration or alt setting changes, or because the HW
- * needed an extra configure endpoint command after a reset
- * endpoint command or streams were being configured.
- * If the command was for a halted endpoint, the xHCI driver
- * is not waiting on the configure endpoint command.
- */
- ctrl_ctx = xhci_get_input_control_ctx(xhci,
- virt_dev->in_ctx);
- if (!ctrl_ctx) {
- xhci_warn(xhci, "Could not get input context, bad type.\n");
- break;
- }
- /* Input ctx add_flags are the endpoint index plus one */
- ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
- /* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware. Not worth
- * worrying about, since this is prototype hardware. Not sure
- * if this will work for streams, but streams support was
- * untested on this prototype.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_index != (unsigned int) -1 &&
- le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
- le32_to_cpu(ctrl_ctx->drop_flags)) {
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
- ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
- if (!(ep_state & EP_HALTED))
- goto bandwidth_change;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Completed config ep cmd - "
- "last ep index = %d, state = %d",
- ep_index, ep_state);
- /* Clear internal halted state and restart ring(s) */
- xhci->devs[slot_id]->eps[ep_index].ep_state &=
- ~EP_HALTED;
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- break;
- }
-bandwidth_change:
- xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
- "Completed config ep cmd");
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(le32_to_cpu(event->status));
- complete(&xhci->devs[slot_id]->cmd_completion);
+ case TRB_CONFIG_EP:
+ xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
break;
- case TRB_TYPE(TRB_EVAL_CONTEXT):
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- break;
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
- complete(&xhci->devs[slot_id]->cmd_completion);
+ case TRB_EVAL_CONTEXT:
+ xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
break;
- case TRB_TYPE(TRB_ADDR_DEV):
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
- complete(&xhci->addr_dev);
+ case TRB_ADDR_DEV:
+ xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
break;
- case TRB_TYPE(TRB_STOP_RING):
- handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
+ case TRB_STOP_RING:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
break;
- case TRB_TYPE(TRB_SET_DEQ):
- handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+ case TRB_SET_DEQ:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
- case TRB_TYPE(TRB_CMD_NOOP):
+ case TRB_CMD_NOOP:
break;
- case TRB_TYPE(TRB_RESET_EP):
- handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
+ case TRB_RESET_EP:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
- case TRB_TYPE(TRB_RESET_DEV):
- xhci_dbg(xhci, "Completed reset device command.\n");
- slot_id = TRB_TO_SLOT_ID(
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
- else
- xhci_warn(xhci, "Reset device command completion "
- "for disabled slot %u\n", slot_id);
+ case TRB_RESET_DEV:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_reset_dev(xhci, slot_id, event);
break;
- case TRB_TYPE(TRB_NEC_GET_FW):
- if (!(xhci->quirks & XHCI_NEC_HOST)) {
- xhci->error_bitmask |= 1 << 6;
- break;
- }
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "NEC firmware version %2x.%02x",
- NEC_FW_MAJOR(le32_to_cpu(event->status)),
- NEC_FW_MINOR(le32_to_cpu(event->status)));
+ case TRB_NEC_GET_FW:
+ xhci_handle_cmd_nec_get_fw(xhci, event);
break;
default:
/* Skip over unknown commands on the event ring */
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6e0d886bcce..4265b48856f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3459,7 +3459,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Wait for the Reset Device command to finish */
timeleft = wait_for_completion_interruptible_timeout(
reset_device_cmd->completion,
- USB_CTRL_SET_TIMEOUT);
+ XHCI_CMD_DEFAULT_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for reset device command\n",
timeleft == 0 ? "Timeout" : "Signal");
@@ -3583,11 +3583,6 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
- if (udev->usb2_hw_lpm_enabled) {
- xhci_set_usb2_hardware_lpm(hcd, udev, 0);
- udev->usb2_hw_lpm_enabled = 0;
- }
-
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = xhci_readl(xhci, &xhci->op_regs->status);
@@ -3721,9 +3716,6 @@ disable_slot:
* the device).
* We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
* we should only issue and wait on one address command at the same time.
- *
- * We add one to the device address issued by the hardware because the USB core
- * uses address 1 for the root hubs (even though they're not really devices).
*/
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
{
@@ -3868,16 +3860,13 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
slot_ctx->dev_info >> 27);
- /* Use kernel assigned address for devices; store xHC assigned
- * address locally. */
- virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
- + 1;
/* Zero the input context control for later use */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
- "Internal device address = %d", virt_dev->address);
+ "Internal device address = %d",
+ le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
return 0;
}
@@ -4025,133 +4014,6 @@ static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
}
-static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
- struct usb_device *udev)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct dev_info *dev_info;
- __le32 __iomem **port_array;
- __le32 __iomem *addr, *pm_addr;
- u32 temp, dev_id;
- unsigned int port_num;
- unsigned long flags;
- int hird;
- int ret;
-
- if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
- !udev->lpm_capable)
- return -EINVAL;
-
- /* we only support lpm for non-hub device connected to root hub yet */
- if (!udev->parent || udev->parent->parent ||
- udev->descriptor.bDeviceClass == USB_CLASS_HUB)
- return -EINVAL;
-
- spin_lock_irqsave(&xhci->lock, flags);
-
- /* Look for devices in lpm_failed_devs list */
- dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
- le16_to_cpu(udev->descriptor.idProduct);
- list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
- if (dev_info->dev_id == dev_id) {
- ret = -EINVAL;
- goto finish;
- }
- }
-
- port_array = xhci->usb2_ports;
- port_num = udev->portnum - 1;
-
- if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
- ret = -EINVAL;
- goto finish;
- }
-
- /*
- * Test USB 2.0 software LPM.
- * FIXME: some xHCI 1.0 hosts may implement a new register to set up
- * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
- * in the June 2011 errata release.
- */
- xhci_dbg(xhci, "test port %d software LPM\n", port_num);
- /*
- * Set L1 Device Slot and HIRD/BESL.
- * Check device's USB 2.0 extension descriptor to determine whether
- * HIRD or BESL shoule be used. See USB2.0 LPM errata.
- */
- pm_addr = port_array[port_num] + PORTPMSC;
- hird = xhci_calculate_hird_besl(xhci, udev);
- temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
- xhci_writel(xhci, temp, pm_addr);
-
- /* Set port link state to U2(L1) */
- addr = port_array[port_num];
- xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
-
- /* wait for ACK */
- spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(10);
- spin_lock_irqsave(&xhci->lock, flags);
-
- /* Check L1 Status */
- ret = xhci_handshake(xhci, pm_addr,
- PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
- if (ret != -ETIMEDOUT) {
- /* enter L1 successfully */
- temp = xhci_readl(xhci, addr);
- xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
- port_num, temp);
- ret = 0;
- } else {
- temp = xhci_readl(xhci, pm_addr);
- xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
- port_num, temp & PORT_L1S_MASK);
- ret = -EINVAL;
- }
-
- /* Resume the port */
- xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
-
- spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(10);
- spin_lock_irqsave(&xhci->lock, flags);
-
- /* Clear PLC */
- xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
-
- /* Check PORTSC to make sure the device is in the right state */
- if (!ret) {
- temp = xhci_readl(xhci, addr);
- xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
- if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
- (temp & PORT_PLS_MASK) != XDEV_U0) {
- xhci_dbg(xhci, "port L1 resume fail\n");
- ret = -EINVAL;
- }
- }
-
- if (ret) {
- /* Insert dev to lpm_failed_devs list */
- xhci_warn(xhci, "device LPM test failed, may disconnect and "
- "re-enumerate\n");
- dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
- if (!dev_info) {
- ret = -ENOMEM;
- goto finish;
- }
- dev_info->dev_id = dev_id;
- INIT_LIST_HEAD(&dev_info->list);
- list_add(&dev_info->list, &xhci->lpm_failed_devs);
- } else {
- xhci_ring_device(xhci, udev->slot_id);
- }
-
-finish:
- spin_unlock_irqrestore(&xhci->lock, flags);
- return ret;
-}
-
int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
@@ -4228,7 +4090,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
}
pm_val &= ~PORT_HIRD_MASK;
- pm_val |= PORT_HIRD(hird) | PORT_RWE;
+ pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
xhci_writel(xhci, pm_val, pm_addr);
pm_val = xhci_readl(xhci, pm_addr);
pm_val |= PORT_HLE;
@@ -4236,7 +4098,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
/* flush write */
xhci_readl(xhci, pm_addr);
} else {
- pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
+ pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
xhci_writel(xhci, pm_val, pm_addr);
/* flush write */
xhci_readl(xhci, pm_addr);
@@ -4279,24 +4141,26 @@ static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int ret;
int portnum = udev->portnum - 1;
- ret = xhci_usb2_software_lpm_test(hcd, udev);
- if (!ret) {
- xhci_dbg(xhci, "software LPM test succeed\n");
- if (xhci->hw_lpm_support == 1 &&
- xhci_check_usb2_port_capability(xhci, portnum, XHCI_HLC)) {
- udev->usb2_hw_lpm_capable = 1;
- udev->l1_params.timeout = XHCI_L1_TIMEOUT;
- udev->l1_params.besl = XHCI_DEFAULT_BESL;
- if (xhci_check_usb2_port_capability(xhci, portnum,
- XHCI_BLC))
- udev->usb2_hw_lpm_besl_capable = 1;
- ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
- if (!ret)
- udev->usb2_hw_lpm_enabled = 1;
- }
+ if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
+ !udev->lpm_capable)
+ return 0;
+
+ /* we only support lpm for non-hub device connected to root hub yet */
+ if (!udev->parent || udev->parent->parent ||
+ udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return 0;
+
+ if (xhci->hw_lpm_support == 1 &&
+ xhci_check_usb2_port_capability(
+ xhci, portnum, XHCI_HLC)) {
+ udev->usb2_hw_lpm_capable = 1;
+ udev->l1_params.timeout = XHCI_L1_TIMEOUT;
+ udev->l1_params.besl = XHCI_DEFAULT_BESL;
+ if (xhci_check_usb2_port_capability(xhci, portnum,
+ XHCI_BLC))
+ udev->usb2_hw_lpm_besl_capable = 1;
}
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 941d5f59e4d..03c74b7965f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -383,6 +383,7 @@ struct xhci_op_regs {
#define PORT_RWE (1 << 3)
#define PORT_HIRD(p) (((p) & 0xf) << 4)
#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS_MASK (0xff << 8)
#define PORT_L1DS(p) (((p) & 0xff) << 8)
#define PORT_HLE (1 << 16)
@@ -934,8 +935,6 @@ struct xhci_virt_device {
/* Rings saved to ensure old alt settings can be re-instated */
struct xhci_ring **ring_cache;
int num_rings_cached;
- /* Store xHC assigned device address */
- int address;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
struct completion cmd_completion;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index aa28ac8c760..b4152820d65 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -120,7 +120,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
struct usb_host_endpoint *e;
e = alt->endpoint + ep;
- switch (e->desc.bmAttributes) {
+ switch (usb_endpoint_type(&e->desc)) {
case USB_ENDPOINT_XFER_BULK:
break;
case USB_ENDPOINT_XFER_ISOC:
@@ -437,7 +437,7 @@ alloc_sglist(int nents, int max, int vary)
if (max == 0)
return NULL;
- sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
+ sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, nents);
@@ -573,7 +573,7 @@ static int is_good_config(struct usbtest_dev *tdev, int len)
{
struct usb_config_descriptor *config;
- if (len < sizeof *config)
+ if (len < sizeof(*config))
return 0;
config = (struct usb_config_descriptor *) tdev->buf;
@@ -606,6 +606,76 @@ static int is_good_config(struct usbtest_dev *tdev, int len)
return 0;
}
+static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
+{
+ struct usb_ext_cap_descriptor *ext;
+ u32 attr;
+
+ ext = (struct usb_ext_cap_descriptor *) buf;
+
+ if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
+ ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
+ return 0;
+ }
+
+ attr = le32_to_cpu(ext->bmAttributes);
+ /* bits[1:4] is used and others are reserved */
+ if (attr & ~0x1e) { /* reserved == 0 */
+ ERROR(tdev, "reserved bits set\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
+{
+ struct usb_ss_cap_descriptor *ss;
+
+ ss = (struct usb_ss_cap_descriptor *) buf;
+
+ if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
+ ERROR(tdev, "bogus superspeed device capability descriptor length\n");
+ return 0;
+ }
+
+ /*
+ * only bit[1] of bmAttributes is used for LTM and others are
+ * reserved
+ */
+ if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
+ ERROR(tdev, "reserved bits set in bmAttributes\n");
+ return 0;
+ }
+
+ /* bits[0:3] of wSpeedSupported is used and others are reserved */
+ if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
+ ERROR(tdev, "reserved bits set in wSpeedSupported\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
+{
+ struct usb_ss_container_id_descriptor *con_id;
+
+ con_id = (struct usb_ss_container_id_descriptor *) buf;
+
+ if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
+ ERROR(tdev, "bogus container id descriptor length\n");
+ return 0;
+ }
+
+ if (con_id->bReserved) { /* reserved == 0 */
+ ERROR(tdev, "reserved bits set\n");
+ return 0;
+ }
+
+ return 1;
+}
+
/* sanity test for standard requests working with usb_control_mesg() and some
* of the utility functions which use it.
*
@@ -683,12 +753,96 @@ static int ch9_postconfig(struct usbtest_dev *dev)
/* there's always [9.4.3] a device descriptor [9.6.1] */
retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
- dev->buf, sizeof udev->descriptor);
- if (retval != sizeof udev->descriptor) {
+ dev->buf, sizeof(udev->descriptor));
+ if (retval != sizeof(udev->descriptor)) {
dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
+ /*
+ * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
+ * 3.0 spec
+ */
+ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0300) {
+ struct usb_bos_descriptor *bos = NULL;
+ struct usb_dev_cap_header *header = NULL;
+ unsigned total, num, length;
+ u8 *buf;
+
+ retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
+ sizeof(*udev->bos->desc));
+ if (retval != sizeof(*udev->bos->desc)) {
+ dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
+ return (retval < 0) ? retval : -EDOM;
+ }
+
+ bos = (struct usb_bos_descriptor *)dev->buf;
+ total = le16_to_cpu(bos->wTotalLength);
+ num = bos->bNumDeviceCaps;
+
+ if (total > TBUF_SIZE)
+ total = TBUF_SIZE;
+
+ /*
+ * get generic device-level capability descriptors [9.6.2]
+ * in USB 3.0 spec
+ */
+ retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
+ total);
+ if (retval != total) {
+ dev_err(&iface->dev, "bos descriptor set --> %d\n",
+ retval);
+ return (retval < 0) ? retval : -EDOM;
+ }
+
+ length = sizeof(*udev->bos->desc);
+ buf = dev->buf;
+ for (i = 0; i < num; i++) {
+ buf += length;
+ if (buf + sizeof(struct usb_dev_cap_header) >
+ dev->buf + total)
+ break;
+
+ header = (struct usb_dev_cap_header *)buf;
+ length = header->bLength;
+
+ if (header->bDescriptorType !=
+ USB_DT_DEVICE_CAPABILITY) {
+ dev_warn(&udev->dev, "not device capability descriptor, skip\n");
+ continue;
+ }
+
+ switch (header->bDevCapabilityType) {
+ case USB_CAP_TYPE_EXT:
+ if (buf + USB_DT_USB_EXT_CAP_SIZE >
+ dev->buf + total ||
+ !is_good_ext(dev, buf)) {
+ dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
+ return -EDOM;
+ }
+ break;
+ case USB_SS_CAP_TYPE:
+ if (buf + USB_DT_USB_SS_CAP_SIZE >
+ dev->buf + total ||
+ !is_good_ss_cap(dev, buf)) {
+ dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
+ return -EDOM;
+ }
+ break;
+ case CONTAINER_ID_TYPE:
+ if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
+ dev->buf + total ||
+ !is_good_con_id(dev, buf)) {
+ dev_err(&iface->dev, "bogus container id descriptor\n");
+ return -EDOM;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
/* there's always [9.4.3] at least one config descriptor [9.6.3] */
for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
@@ -954,7 +1108,7 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
* device, but some are chosen to trigger protocol stalls
* or short reads.
*/
- memset(&req, 0, sizeof req);
+ memset(&req, 0, sizeof(req));
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
@@ -1074,7 +1228,7 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
if (!u)
goto cleanup;
- reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
+ reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
@@ -1667,13 +1821,13 @@ test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
if (param->sglen > 10)
return -EDOM;
- memset(&context, 0, sizeof context);
+ memset(&context, 0, sizeof(context));
context.count = param->iterations * param->sglen;
context.dev = dev;
init_completion(&context.done);
spin_lock_init(&context.lock);
- memset(urbs, 0, sizeof urbs);
+ memset(urbs, 0, sizeof(urbs));
udev = testdev_to_usbdev(dev);
dev_info(&dev->intf->dev,
"... iso period %d %sframes, wMaxPacket %04x\n",
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index c258a97ef1b..57dfc0cedb0 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -75,6 +75,7 @@ config USB_MUSB_TUSB6010
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
depends on ARCH_OMAP2PLUS
+ select GENERIC_PHY
config USB_MUSB_AM35X
tristate "AM35x"
@@ -90,7 +91,7 @@ config USB_MUSB_BLACKFIN
depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523)
config USB_MUSB_UX500
- tristate "U8500 and U5500"
+ tristate "Ux500 platforms"
endchoice
@@ -112,7 +113,7 @@ choice
allow using DMA on multiplatform kernels.
config USB_UX500_DMA
- bool 'ST Ericsson U8500 and U5500'
+ bool 'ST Ericsson Ux500'
depends on USB_MUSB_UX500
help
Enable DMA transfers on UX500 platforms.
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 5c310c66421..ca45b39db5b 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -89,7 +89,6 @@ struct am35x_glue {
struct clk *phy_clk;
struct clk *clk;
};
-#define glue_to_musb(g) platform_get_drvdata(g->musb)
/*
* am35x_musb_enable - enable interrupts
@@ -452,14 +451,18 @@ static const struct musb_platform_ops am35x_ops = {
.set_vbus = am35x_musb_set_vbus,
};
-static u64 am35x_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info am35x_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
static int am35x_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct am35x_glue *glue;
-
+ struct platform_device_info pinfo;
struct clk *phy_clk;
struct clk *clk;
@@ -471,12 +474,6 @@ static int am35x_probe(struct platform_device *pdev)
goto err0;
}
- musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
- if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
- goto err1;
- }
-
phy_clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(phy_clk)) {
dev_err(&pdev->dev, "failed to get PHY clock\n");
@@ -503,12 +500,7 @@ static int am35x_probe(struct platform_device *pdev)
goto err6;
}
- musb->dev.parent = &pdev->dev;
- musb->dev.dma_mask = &am35x_dmamask;
- musb->dev.coherent_dma_mask = am35x_dmamask;
-
glue->dev = &pdev->dev;
- glue->musb = musb;
glue->phy_clk = phy_clk;
glue->clk = clk;
@@ -516,22 +508,17 @@ static int am35x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, glue);
- ret = platform_device_add_resources(musb, pdev->resource,
- pdev->num_resources);
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto err7;
- }
-
- ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
- if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
- goto err7;
- }
-
- ret = platform_device_add(musb);
- if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ pinfo = am35x_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
goto err7;
}
@@ -550,9 +537,6 @@ err4:
clk_put(phy_clk);
err3:
- platform_device_put(musb);
-
-err1:
kfree(glue);
err0:
@@ -615,23 +599,16 @@ static int am35x_resume(struct device *dev)
return 0;
}
-
-static struct dev_pm_ops am35x_pm_ops = {
- .suspend = am35x_suspend,
- .resume = am35x_resume,
-};
-
-#define DEV_PM_OPS &am35x_pm_ops
-#else
-#define DEV_PM_OPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(am35x_pm_ops, am35x_suspend, am35x_resume);
+
static struct platform_driver am35x_driver = {
.probe = am35x_probe,
.remove = am35x_remove,
.driver = {
.name = "musb-am35x",
- .pm = DEV_PM_OPS,
+ .pm = &am35x_pm_ops,
},
};
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 72e2056b608..d9692f78e22 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -561,23 +561,16 @@ static int bfin_resume(struct device *dev)
return 0;
}
-
-static struct dev_pm_ops bfin_pm_ops = {
- .suspend = bfin_suspend,
- .resume = bfin_resume,
-};
-
-#define DEV_PM_OPS &bfin_pm_ops
-#else
-#define DEV_PM_OPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(bfin_pm_ops, bfin_suspend, bfin_resume);
+
static struct platform_driver bfin_driver = {
.probe = bfin_probe,
.remove = __exit_p(bfin_remove),
.driver = {
.name = "musb-blackfin",
- .pm = DEV_PM_OPS,
+ .pm = &bfin_pm_ops,
},
};
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index d9ddf4122f3..2f2c1cb3642 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -472,7 +472,11 @@ static const struct musb_platform_ops da8xx_ops = {
.set_vbus = da8xx_musb_set_vbus,
};
-static u64 da8xx_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info da8xx_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
static int da8xx_probe(struct platform_device *pdev)
{
@@ -480,7 +484,7 @@ static int da8xx_probe(struct platform_device *pdev)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct da8xx_glue *glue;
-
+ struct platform_device_info pinfo;
struct clk *clk;
int ret = -ENOMEM;
@@ -491,12 +495,6 @@ static int da8xx_probe(struct platform_device *pdev)
goto err0;
}
- musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
- if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
- goto err1;
- }
-
clk = clk_get(&pdev->dev, "usb20");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -510,12 +508,7 @@ static int da8xx_probe(struct platform_device *pdev)
goto err4;
}
- musb->dev.parent = &pdev->dev;
- musb->dev.dma_mask = &da8xx_dmamask;
- musb->dev.coherent_dma_mask = da8xx_dmamask;
-
glue->dev = &pdev->dev;
- glue->musb = musb;
glue->clk = clk;
pdata->platform_ops = &da8xx_ops;
@@ -535,22 +528,17 @@ static int da8xx_probe(struct platform_device *pdev)
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto err5;
- }
-
- ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
- if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
- goto err5;
- }
-
- ret = platform_device_add(musb);
- if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ pinfo = da8xx_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
goto err5;
}
@@ -563,9 +551,6 @@ err4:
clk_put(clk);
err3:
- platform_device_put(musb);
-
-err1:
kfree(glue);
err0:
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index ed0834e2b72..1121fd741bf 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -505,14 +505,19 @@ static const struct musb_platform_ops davinci_ops = {
.set_vbus = davinci_musb_set_vbus,
};
-static u64 davinci_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info davinci_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
static int davinci_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
+ struct resource musb_resources[3];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct davinci_glue *glue;
+ struct platform_device_info pinfo;
struct clk *clk;
int ret = -ENOMEM;
@@ -523,12 +528,6 @@ static int davinci_probe(struct platform_device *pdev)
goto err0;
}
- musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
- if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
- goto err1;
- }
-
clk = clk_get(&pdev->dev, "usb");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -542,12 +541,7 @@ static int davinci_probe(struct platform_device *pdev)
goto err4;
}
- musb->dev.parent = &pdev->dev;
- musb->dev.dma_mask = &davinci_dmamask;
- musb->dev.coherent_dma_mask = davinci_dmamask;
-
glue->dev = &pdev->dev;
- glue->musb = musb;
glue->clk = clk;
pdata->platform_ops = &davinci_ops;
@@ -567,22 +561,26 @@ static int davinci_probe(struct platform_device *pdev)
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto err5;
- }
-
- ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
- if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
- goto err5;
- }
-
- ret = platform_device_add(musb);
- if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ /*
+ * For DM6467 3 resources are passed. A placeholder for the 3rd
+ * resource is always there, so it's safe to always copy it...
+ */
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
+ pinfo = davinci_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
goto err5;
}
@@ -595,9 +593,6 @@ err4:
clk_put(clk);
err3:
- platform_device_put(musb);
-
-err1:
kfree(glue);
err0:
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
index 41ac5b5b57c..8be9b02c3cc 100644
--- a/drivers/usb/musb/musb_am335x.c
+++ b/drivers/usb/musb/musb_am335x.c
@@ -46,7 +46,7 @@ static struct platform_driver am335x_child_driver = {
.remove = am335x_child_remove,
.driver = {
.name = "am335x-usb-childs",
- .of_match_table = of_match_ptr(am335x_child_of_match),
+ .of_match_table = am335x_child_of_match,
},
};
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index cd70cc88617..0a43329569d 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -617,7 +617,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
/* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
default:
s = "VALID"; break;
- }; s; }),
+ } s; }),
VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
musb->port1_status);
@@ -1809,8 +1809,7 @@ static void musb_free(struct musb *musb)
disable_irq_wake(musb->nIrq);
free_irq(musb->nIrq, musb);
}
- if (musb->dma_controller)
- dma_controller_destroy(musb->dma_controller);
+ cancel_work_sync(&musb->irq_work);
musb_host_free(musb);
}
@@ -1885,8 +1884,13 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
pm_runtime_get_sync(musb->controller);
- if (use_dma && dev->dma_mask)
+ if (use_dma && dev->dma_mask) {
musb->dma_controller = dma_controller_create(musb, musb->mregs);
+ if (IS_ERR(musb->dma_controller)) {
+ status = PTR_ERR(musb->dma_controller);
+ goto fail2_5;
+ }
+ }
/* be sure interrupts are disabled before connecting ISR */
musb_platform_disable(musb);
@@ -1946,6 +1950,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
status = musb_gadget_setup(musb);
+ if (status)
+ musb_host_cleanup(musb);
break;
default:
dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
@@ -1972,10 +1978,12 @@ fail5:
fail4:
musb_gadget_cleanup(musb);
+ musb_host_cleanup(musb);
fail3:
if (musb->dma_controller)
dma_controller_destroy(musb->dma_controller);
+fail2_5:
pm_runtime_put_sync(musb->controller);
fail2:
@@ -2032,6 +2040,9 @@ static int musb_remove(struct platform_device *pdev)
musb_exit_debugfs(musb);
musb_shutdown(pdev);
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
+
musb_free(musb);
device_init_wakeup(dev, 0);
return 0;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 1c5bf75ee8f..29f7cd7c796 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -46,6 +46,7 @@
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/musb.h>
+#include <linux/phy/phy.h>
struct musb;
struct musb_hw_ep;
@@ -341,6 +342,7 @@ struct musb {
u16 int_tx;
struct usb_phy *xceiv;
+ struct phy *phy;
int nIrq;
unsigned irq_wake:1;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ae959746f77..ff9d6de2b74 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -484,6 +484,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
if (ret)
goto err;
+ ret = -EINVAL;
if (port > MUSB_DMA_NUM_CHANNELS || !port)
goto err;
if (is_tx)
@@ -503,6 +504,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
dc = dma_request_slave_channel(dev, str);
if (!dc) {
dev_err(dev, "Falied to request %s.\n", str);
+ ret = -EPROBE_DEFER;
goto err;
}
cppi41_channel->dc = dc;
@@ -510,7 +512,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
return 0;
err:
cppi41_release_all_dma_chans(controller);
- return -EINVAL;
+ return ret;
}
void dma_controller_destroy(struct dma_controller *c)
@@ -526,7 +528,7 @@ struct dma_controller *dma_controller_create(struct musb *musb,
void __iomem *base)
{
struct cppi41_dma_controller *controller;
- int ret;
+ int ret = 0;
if (!musb->controller->of_node) {
dev_err(musb->controller, "Need DT for the DMA engine.\n");
@@ -553,5 +555,7 @@ struct dma_controller *dma_controller_create(struct musb *musb,
plat_get_fail:
kfree(controller);
kzalloc_fail:
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
return NULL;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index bd4138d80a4..1901f6fe580 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -121,6 +121,43 @@ struct dsps_glue {
unsigned long last_timer; /* last timer data for each instance */
};
+static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->state));
+ del_timer(&glue->timer);
+ glue->last_timer = jiffies;
+ return;
+ }
+ if (musb->port_mode != MUSB_PORT_MODE_DUAL_ROLE)
+ return;
+
+ if (!musb->g.dev.driver)
+ return;
+
+ if (time_after(glue->last_timer, timeout) &&
+ timer_pending(&glue->timer)) {
+ dev_dbg(musb->controller,
+ "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ glue->last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&glue->timer, timeout);
+}
+
/**
* dsps_musb_enable - enable interrupts
*/
@@ -143,6 +180,7 @@ static void dsps_musb_enable(struct musb *musb)
/* Force the DRVVBUS IRQ so we can start polling for ID change. */
dsps_writel(reg_base, wrp->coreintr_set,
(1 << wrp->drvvbus) << wrp->usb_shift);
+ dsps_musb_try_idle(musb, 0);
}
/**
@@ -171,6 +209,7 @@ static void otg_timer(unsigned long _musb)
const struct dsps_musb_wrapper *wrp = glue->wrp;
u8 devctl;
unsigned long flags;
+ int skip_session = 0;
/*
* We poll because DSPS IP's won't expose several OTG-critical
@@ -183,10 +222,12 @@ static void otg_timer(unsigned long _musb)
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_BCON:
- devctl &= ~MUSB_DEVCTL_SESSION;
- dsps_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ skip_session = 1;
+ /* fall */
- devctl = dsps_readb(musb->mregs, MUSB_DEVCTL);
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_B_IDLE:
if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
@@ -194,60 +235,21 @@ static void otg_timer(unsigned long _musb)
musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
+ if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
+ dsps_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
break;
case OTG_STATE_A_WAIT_VFALL:
musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
dsps_writel(musb->ctrl_base, wrp->coreintr_set,
MUSB_INTR_VBUSERROR << wrp->usb_shift);
break;
- case OTG_STATE_B_IDLE:
- devctl = dsps_readb(mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&glue->timer,
- jiffies + wrp->poll_seconds * HZ);
- else
- musb->xceiv->state = OTG_STATE_A_IDLE;
- break;
default:
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
}
-static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
- struct device *dev = musb->controller;
- struct dsps_glue *glue = dev_get_drvdata(dev->parent);
-
- if (timeout == 0)
- timeout = jiffies + msecs_to_jiffies(3);
-
- /* Never idle if active, or when VBUS timeout is not set as host */
- if (musb->is_active || (musb->a_wait_bcon == 0 &&
- musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
- dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->state));
- del_timer(&glue->timer);
- glue->last_timer = jiffies;
- return;
- }
- if (musb->port_mode == MUSB_PORT_MODE_HOST)
- return;
-
- if (time_after(glue->last_timer, timeout) &&
- timer_pending(&glue->timer)) {
- dev_dbg(musb->controller,
- "Longer idle timer already pending, ignoring...\n");
- return;
- }
- glue->last_timer = timeout;
-
- dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
- usb_otg_state_string(musb->xceiv->state),
- jiffies_to_msecs(timeout - jiffies));
- mod_timer(&glue->timer, timeout);
-}
-
static irqreturn_t dsps_interrupt(int irq, void *hci)
{
struct musb *musb = hci;
@@ -443,7 +445,7 @@ static int get_musb_port_mode(struct device *dev)
case USB_DR_MODE_OTG:
default:
return MUSB_PORT_MODE_DUAL_ROLE;
- };
+ }
}
static int dsps_create_musb_pdev(struct dsps_glue *glue,
@@ -631,7 +633,7 @@ static struct platform_driver dsps_usbss_driver = {
.remove = dsps_remove,
.driver = {
.name = "musb-dsps",
- .of_match_table = of_match_ptr(musb_dsps_of_match),
+ .of_match_table = musb_dsps_of_match,
},
};
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 3671898a453..d2d3a173b31 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1121,7 +1121,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
case USB_ENDPOINT_XFER_INT: s = "int"; break;
default: s = "iso"; break;
- }; s; }),
+ } s; }),
musb_ep->is_in ? "IN" : "OUT",
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 9a2b8c85f19..6582a20bec0 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -253,7 +253,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
default: s = "-intr"; break;
- }; s; }),
+ } s; }),
epnum, buf + offset, len);
/* Configure endpoint */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index d1d6b83aabc..9af6bba5eac 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -220,6 +220,23 @@ int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
return retval;
}
+static int musb_has_gadget(struct musb *musb)
+{
+ /*
+ * In host-only mode we start a connection right away. In OTG mode
+ * we have to wait until we loaded a gadget. We don't really need a
+ * gadget if we operate as a host but we should not start a session
+ * as a device without a gadget or else we explode.
+ */
+#ifdef CONFIG_USB_MUSB_HOST
+ return 1;
+#else
+ if (musb->port_mode == MUSB_PORT_MODE_HOST)
+ return 1;
+ return musb->g.dev.driver != NULL;
+#endif
+}
+
int musb_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
@@ -362,7 +379,7 @@ int musb_hub_control(
* initialization logic, e.g. for OTG, or change any
* logic relating to VBUS power-up.
*/
- if (!hcd->self.is_b_host)
+ if (!hcd->self.is_b_host && musb_has_gadget(musb))
musb_start(musb);
break;
case USB_PORT_FEAT_RESET:
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 59d2245db1c..2a408cdaf7b 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -38,6 +38,7 @@
#include <linux/delay.h>
#include <linux/usb/musb-omap.h>
#include <linux/usb/omap_control_usb.h>
+#include <linux/of_platform.h>
#include "musb_core.h"
#include "omap2430.h"
@@ -305,6 +306,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
default:
dev_dbg(dev, "ID float\n");
}
+
+ atomic_notifier_call_chain(&musb->xceiv->notifier,
+ musb->xceiv->last_event, NULL);
}
@@ -348,11 +352,21 @@ static int omap2430_musb_init(struct musb *musb)
* up through ULPI. TWL4030-family PMICs include one,
* which needs a driver, drivers aren't always needed.
*/
- if (dev->parent->of_node)
+ if (dev->parent->of_node) {
+ musb->phy = devm_phy_get(dev->parent, "usb2-phy");
+
+ /* We can't totally remove musb->xceiv as of now because
+ * musb core uses xceiv.state and xceiv.otg. Once we have
+ * a separate state machine to handle otg, these can be moved
+ * out of xceiv and then we can start using the generic PHY
+ * framework
+ */
musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent,
"usb-phy", 0);
- else
+ } else {
musb->xceiv = devm_usb_get_phy_dev(dev, 0);
+ musb->phy = devm_phy_get(dev, "usb");
+ }
if (IS_ERR(musb->xceiv)) {
status = PTR_ERR(musb->xceiv);
@@ -364,6 +378,10 @@ static int omap2430_musb_init(struct musb *musb)
return -EPROBE_DEFER;
}
+ if (IS_ERR(musb->phy)) {
+ pr_err("HS USB OTG: no PHY configured\n");
+ return PTR_ERR(musb->phy);
+ }
musb->isr = omap2430_musb_interrupt;
status = pm_runtime_get_sync(dev);
@@ -397,7 +415,7 @@ static int omap2430_musb_init(struct musb *musb)
if (glue->status != OMAP_MUSB_UNKNOWN)
omap_musb_set_mailbox(glue);
- usb_phy_init(musb->xceiv);
+ phy_init(musb->phy);
pm_runtime_put_noidle(musb->controller);
return 0;
@@ -460,6 +478,7 @@ static int omap2430_musb_exit(struct musb *musb)
del_timer_sync(&musb_idle_timer);
omap2430_low_level_exit(musb);
+ phy_exit(musb->phy);
return 0;
}
@@ -509,8 +528,12 @@ static int omap2430_probe(struct platform_device *pdev)
glue->dev = &pdev->dev;
glue->musb = musb;
glue->status = OMAP_MUSB_UNKNOWN;
+ glue->control_otghs = ERR_PTR(-ENODEV);
if (np) {
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev,
@@ -539,22 +562,20 @@ static int omap2430_probe(struct platform_device *pdev)
of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
of_property_read_u32(np, "power", (u32 *)&pdata->power);
config->multipoint = of_property_read_bool(np, "multipoint");
- pdata->has_mailbox = of_property_read_bool(np,
- "ti,has-mailbox");
pdata->board_data = data;
pdata->config = config;
- }
- if (pdata->has_mailbox) {
- glue->control_otghs = omap_get_control_dev();
- if (IS_ERR(glue->control_otghs)) {
- dev_vdbg(&pdev->dev, "Failed to get control device\n");
- ret = PTR_ERR(glue->control_otghs);
- goto err2;
+ control_node = of_parse_phandle(np, "ctrl-module", 0);
+ if (control_node) {
+ control_pdev = of_find_device_by_node(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ ret = -EINVAL;
+ goto err2;
+ }
+ glue->control_otghs = &control_pdev->dev;
}
- } else {
- glue->control_otghs = ERR_PTR(-ENODEV);
}
pdata->platform_ops = &omap2430_ops;
@@ -638,7 +659,7 @@ static int omap2430_runtime_suspend(struct device *dev)
OTG_INTERFSEL);
omap2430_low_level_exit(musb);
- usb_phy_set_suspend(musb->xceiv, 1);
+ phy_power_off(musb->phy);
}
return 0;
@@ -653,8 +674,7 @@ static int omap2430_runtime_resume(struct device *dev)
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
-
- usb_phy_set_suspend(musb->xceiv, 0);
+ phy_power_on(musb->phy);
}
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index b3b3ed72388..4432314d70e 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1152,7 +1152,11 @@ static const struct musb_platform_ops tusb_ops = {
.set_vbus = tusb_musb_set_vbus,
};
-static u64 tusb_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info tusb_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
static int tusb_probe(struct platform_device *pdev)
{
@@ -1160,7 +1164,7 @@ static int tusb_probe(struct platform_device *pdev)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct tusb6010_glue *glue;
-
+ struct platform_device_info pinfo;
int ret = -ENOMEM;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
@@ -1169,18 +1173,7 @@ static int tusb_probe(struct platform_device *pdev)
goto err0;
}
- musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
- if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
- goto err1;
- }
-
- musb->dev.parent = &pdev->dev;
- musb->dev.dma_mask = &tusb_dmamask;
- musb->dev.coherent_dma_mask = tusb_dmamask;
-
glue->dev = &pdev->dev;
- glue->musb = musb;
pdata->platform_ops = &tusb_ops;
@@ -1204,31 +1197,23 @@ static int tusb_probe(struct platform_device *pdev)
musb_resources[2].end = pdev->resource[2].end;
musb_resources[2].flags = pdev->resource[2].flags;
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto err3;
- }
-
- ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
- if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
- goto err3;
- }
-
- ret = platform_device_add(musb);
- if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ pinfo = tusb_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
goto err3;
}
return 0;
err3:
- platform_device_put(musb);
-
-err1:
kfree(glue);
err0:
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 59256b12f74..122446bf166 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -259,7 +259,7 @@ static int ux500_probe(struct platform_device *pdev)
goto err1;
}
- clk = clk_get(&pdev->dev, "usb");
+ clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(clk);
@@ -376,17 +376,10 @@ static int ux500_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops ux500_pm_ops = {
- .suspend = ux500_suspend,
- .resume = ux500_resume,
-};
-
-#define DEV_PM_OPS (&ux500_pm_ops)
-#else
-#define DEV_PM_OPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(ux500_pm_ops, ux500_suspend, ux500_resume);
+
static const struct of_device_id ux500_match[] = {
{ .compatible = "stericsson,db8500-musb", },
{}
@@ -397,7 +390,7 @@ static struct platform_driver ux500_driver = {
.remove = ux500_remove,
.driver = {
.name = "musb-ux500",
- .pm = DEV_PM_OPS,
+ .pm = &ux500_pm_ops,
.of_match_table = ux500_match,
},
};
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index d5589f9c60a..08e2f39027e 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -66,17 +66,6 @@ config OMAP_CONTROL_USB
power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
additional register to power on USB3 PHY.
-config OMAP_USB2
- tristate "OMAP USB2 PHY Driver"
- depends on ARCH_OMAP2PLUS
- select OMAP_CONTROL_USB
- select USB_PHY
- help
- Enable this to support the transceiver that is part of SOC. This
- driver takes care of all the PHY functionality apart from comparator.
- The USB OTG controller communicates with the comparator using this
- driver.
-
config OMAP_USB3
tristate "OMAP USB3 PHY Driver"
depends on ARCH_OMAP2PLUS || COMPILE_TEST
@@ -93,6 +82,7 @@ config AM335X_CONTROL_USB
config AM335X_PHY_USB
tristate "AM335x USB PHY Driver"
+ depends on ARM || COMPILE_TEST
select USB_PHY
select AM335X_CONTROL_USB
select NOP_USB_XCEIV
@@ -123,16 +113,6 @@ config SAMSUNG_USB3PHY
Enable this to support Samsung USB 3.0 (Super Speed) phy controller
for samsung SoCs.
-config TWL4030_USB
- tristate "TWL4030 USB Transceiver Driver"
- depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
- select USB_PHY
- help
- Enable this to support the USB OTG transceiver on TWL4030
- family chips (including the TWL5030 and TPS659x0 devices).
- This transceiver supports high and full speed devices plus,
- in host mode, low speed.
-
config TWL6030_USB
tristate "TWL6030 USB Transceiver Driver"
depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
@@ -214,6 +194,19 @@ config USB_RCAR_PHY
To compile this driver as a module, choose M here: the
module will be called phy-rcar-usb.
+config USB_RCAR_GEN2_PHY
+ tristate "Renesas R-Car Gen2 USB PHY support"
+ depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
+ select USB_PHY
+ help
+ Say Y here to add support for the Renesas R-Car Gen2 USB PHY driver.
+ It is typically used to control internal USB PHY for USBHS,
+ and to configure shared USB channels 0 and 2.
+ This driver supports R8A7790 and R8A7791.
+
+ To compile this driver as a module, choose M here: the
+ module will be called phy-rcar-gen2-usb.
+
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
depends on ARM
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 2135e85f46e..022c1da7fb7 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -15,12 +15,10 @@ obj-$(CONFIG_NOP_USB_XCEIV) += phy-generic.o
obj-$(CONFIG_OMAP_CONTROL_USB) += phy-omap-control.o
obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
obj-$(CONFIG_AM335X_PHY_USB) += phy-am335x.o
-obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
obj-$(CONFIG_OMAP_USB3) += phy-omap-usb3.o
obj-$(CONFIG_SAMSUNG_USBPHY) += phy-samsung-usb.o
obj-$(CONFIG_SAMSUNG_USB2PHY) += phy-samsung-usb2.o
obj-$(CONFIG_SAMSUNG_USB3PHY) += phy-samsung-usb3.o
-obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_TWL6030_USB) += phy-twl6030-usb.o
obj-$(CONFIG_USB_EHCI_TEGRA) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
@@ -29,5 +27,6 @@ obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o
+obj-$(CONFIG_USB_RCAR_GEN2_PHY) += phy-rcar-gen2-usb.o
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 22cf07d62e4..634f49acd20 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -26,6 +26,41 @@ struct am335x_control_usb {
#define USBPHY_OTGVDET_EN (1 << 19)
#define USBPHY_OTGSESSEND_EN (1 << 20)
+#define AM335X_PHY0_WK_EN (1 << 0)
+#define AM335X_PHY1_WK_EN (1 << 8)
+
+static void am335x_phy_wkup(struct phy_control *phy_ctrl, u32 id, bool on)
+{
+ struct am335x_control_usb *usb_ctrl;
+ u32 val;
+ u32 reg;
+
+ usb_ctrl = container_of(phy_ctrl, struct am335x_control_usb, phy_ctrl);
+
+ switch (id) {
+ case 0:
+ reg = AM335X_PHY0_WK_EN;
+ break;
+ case 1:
+ reg = AM335X_PHY1_WK_EN;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock(&usb_ctrl->lock);
+ val = readl(usb_ctrl->wkup);
+
+ if (on)
+ val |= reg;
+ else
+ val &= ~reg;
+
+ writel(val, usb_ctrl->wkup);
+ spin_unlock(&usb_ctrl->lock);
+}
+
static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
{
struct am335x_control_usb *usb_ctrl;
@@ -59,6 +94,7 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
static const struct phy_control ctrl_am335x = {
.phy_power = am335x_phy_power,
+ .phy_wkup = am335x_phy_wkup,
};
static const struct of_device_id omap_control_usb_id_table[] = {
@@ -117,6 +153,12 @@ static int am335x_control_usb_probe(struct platform_device *pdev)
ctrl_usb->phy_reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ctrl_usb->phy_reg))
return PTR_ERR(ctrl_usb->phy_reg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wakeup");
+ ctrl_usb->wkup = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctrl_usb->wkup))
+ return PTR_ERR(ctrl_usb->wkup);
+
spin_lock_init(&ctrl_usb->lock);
ctrl_usb->phy_ctrl = *phy_ctrl;
@@ -129,7 +171,7 @@ static struct platform_driver am335x_control_driver = {
.driver = {
.name = "am335x-control-usb",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(omap_control_usb_id_table),
+ .of_match_table = omap_control_usb_id_table,
},
};
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index c4d614d1f17..6370e50649d 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -53,21 +53,20 @@ static int am335x_phy_probe(struct platform_device *pdev)
}
ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen,
- USB_PHY_TYPE_USB2, 0, false, false);
+ USB_PHY_TYPE_USB2, 0, false);
if (ret)
return ret;
ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
if (ret)
- goto err_add;
+ return ret;
am_phy->usb_phy_gen.phy.init = am335x_init;
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
platform_set_drvdata(pdev, am_phy);
+
return 0;
-err_add:
- usb_phy_gen_cleanup_phy(&am_phy->usb_phy_gen);
return ret;
}
@@ -79,6 +78,40 @@ static int am335x_phy_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
+
+static int am335x_phy_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct am335x_phy *am_phy = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(dev))
+ phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, true);
+ phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
+ return 0;
+}
+
+static int am335x_phy_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct am335x_phy *am_phy = platform_get_drvdata(pdev);
+
+ phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, true);
+ if (device_may_wakeup(dev))
+ phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, false);
+ return 0;
+}
+
+static const struct dev_pm_ops am335x_pm_ops = {
+ SET_RUNTIME_PM_OPS(am335x_phy_runtime_suspend,
+ am335x_phy_runtime_resume, NULL)
+};
+
+#define DEV_PM_OPS (&am335x_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
static const struct of_device_id am335x_phy_ids[] = {
{ .compatible = "ti,am335x-usb-phy" },
{ }
@@ -91,7 +124,8 @@ static struct platform_driver am335x_phy_driver = {
.driver = {
.name = "am335x-phy-driver",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(am335x_phy_ids),
+ .pm = DEV_PM_OPS,
+ .of_match_table = am335x_phy_ids,
},
};
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index fa7c9f9628b..7f3c73b967c 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -134,7 +134,7 @@ int write_ulpi(u8 addr, u8 data)
/* Operations that will be called from OTG Finite State Machine */
/* Charge vbus for vbus pulsing in SRP */
-void fsl_otg_chrg_vbus(int on)
+void fsl_otg_chrg_vbus(struct otg_fsm *fsm, int on)
{
u32 tmp;
@@ -170,7 +170,7 @@ void fsl_otg_dischrg_vbus(int on)
}
/* A-device driver vbus, controlled through PP bit in PORTSC */
-void fsl_otg_drv_vbus(int on)
+void fsl_otg_drv_vbus(struct otg_fsm *fsm, int on)
{
u32 tmp;
@@ -188,7 +188,7 @@ void fsl_otg_drv_vbus(int on)
* Pull-up D+, signalling connect by periperal. Also used in
* data-line pulsing in SRP
*/
-void fsl_otg_loc_conn(int on)
+void fsl_otg_loc_conn(struct otg_fsm *fsm, int on)
{
u32 tmp;
@@ -207,7 +207,7 @@ void fsl_otg_loc_conn(int on)
* port. In host mode, controller will automatically send SOF.
* Suspend will block the data on the port.
*/
-void fsl_otg_loc_sof(int on)
+void fsl_otg_loc_sof(struct otg_fsm *fsm, int on)
{
u32 tmp;
@@ -222,7 +222,7 @@ void fsl_otg_loc_sof(int on)
}
/* Start SRP pulsing by data-line pulsing, followed with v-bus pulsing. */
-void fsl_otg_start_pulse(void)
+void fsl_otg_start_pulse(struct otg_fsm *fsm)
{
u32 tmp;
@@ -235,7 +235,7 @@ void fsl_otg_start_pulse(void)
fsl_otg_loc_conn(1);
#endif
- fsl_otg_add_timer(b_data_pulse_tmr);
+ fsl_otg_add_timer(fsm, b_data_pulse_tmr);
}
void b_data_pulse_end(unsigned long foo)
@@ -252,14 +252,14 @@ void b_data_pulse_end(unsigned long foo)
void fsl_otg_pulse_vbus(void)
{
srp_wait_done = 0;
- fsl_otg_chrg_vbus(1);
+ fsl_otg_chrg_vbus(&fsl_otg_dev->fsm, 1);
/* start the timer to end vbus charge */
- fsl_otg_add_timer(b_vbus_pulse_tmr);
+ fsl_otg_add_timer(&fsl_otg_dev->fsm, b_vbus_pulse_tmr);
}
void b_vbus_pulse_end(unsigned long foo)
{
- fsl_otg_chrg_vbus(0);
+ fsl_otg_chrg_vbus(&fsl_otg_dev->fsm, 0);
/*
* As USB3300 using the same a_sess_vld and b_sess_vld voltage
@@ -267,7 +267,7 @@ void b_vbus_pulse_end(unsigned long foo)
* residual voltage of vbus pulsing and A device pull up
*/
fsl_otg_dischrg_vbus(1);
- fsl_otg_add_timer(b_srp_wait_tmr);
+ fsl_otg_add_timer(&fsl_otg_dev->fsm, b_srp_wait_tmr);
}
void b_srp_end(unsigned long foo)
@@ -289,7 +289,7 @@ void a_wait_enum(unsigned long foo)
{
VDBG("a_wait_enum timeout\n");
if (!fsl_otg_dev->phy.otg->host->b_hnp_enable)
- fsl_otg_add_timer(a_wait_enum_tmr);
+ fsl_otg_add_timer(&fsl_otg_dev->fsm, a_wait_enum_tmr);
else
otg_statemachine(&fsl_otg_dev->fsm);
}
@@ -375,8 +375,42 @@ void fsl_otg_uninit_timers(void)
kfree(b_vbus_pulse_tmr);
}
+static struct fsl_otg_timer *fsl_otg_get_timer(enum otg_fsm_timer t)
+{
+ struct fsl_otg_timer *timer;
+
+ /* REVISIT: use array of pointers to timers instead */
+ switch (t) {
+ case A_WAIT_VRISE:
+ timer = a_wait_vrise_tmr;
+ break;
+ case A_WAIT_BCON:
+ timer = a_wait_vrise_tmr;
+ break;
+ case A_AIDL_BDIS:
+ timer = a_wait_vrise_tmr;
+ break;
+ case B_ASE0_BRST:
+ timer = a_wait_vrise_tmr;
+ break;
+ case B_SE0_SRP:
+ timer = a_wait_vrise_tmr;
+ break;
+ case B_SRP_FAIL:
+ timer = a_wait_vrise_tmr;
+ break;
+ case A_WAIT_ENUM:
+ timer = a_wait_vrise_tmr;
+ break;
+ default:
+ timer = NULL;
+ }
+
+ return timer;
+}
+
/* Add timer to timer list */
-void fsl_otg_add_timer(void *gtimer)
+void fsl_otg_add_timer(struct otg_fsm *fsm, void *gtimer)
{
struct fsl_otg_timer *timer = gtimer;
struct fsl_otg_timer *tmp_timer;
@@ -394,8 +428,19 @@ void fsl_otg_add_timer(void *gtimer)
list_add_tail(&timer->list, &active_timers);
}
+static void fsl_otg_fsm_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
+{
+ struct fsl_otg_timer *timer;
+
+ timer = fsl_otg_get_timer(t);
+ if (!timer)
+ return;
+
+ fsl_otg_add_timer(fsm, timer);
+}
+
/* Remove timer from the timer list; clear timeout status */
-void fsl_otg_del_timer(void *gtimer)
+void fsl_otg_del_timer(struct otg_fsm *fsm, void *gtimer)
{
struct fsl_otg_timer *timer = gtimer;
struct fsl_otg_timer *tmp_timer, *del_tmp;
@@ -405,6 +450,17 @@ void fsl_otg_del_timer(void *gtimer)
list_del(&timer->list);
}
+static void fsl_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
+{
+ struct fsl_otg_timer *timer;
+
+ timer = fsl_otg_get_timer(t);
+ if (!timer)
+ return;
+
+ fsl_otg_del_timer(fsm, timer);
+}
+
/*
* Reduce timer count by 1, and find timeout conditions.
* Called by fsl_otg 1ms timer interrupt
@@ -468,7 +524,7 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
retval = dev->driver->pm->resume(dev);
if (fsm->id) {
/* default-b */
- fsl_otg_drv_vbus(1);
+ fsl_otg_drv_vbus(fsm, 1);
/*
* Workaround: b_host can't driver
* vbus, but PP in PORTSC needs to
@@ -493,7 +549,7 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
retval = dev->driver->pm->suspend(dev);
if (fsm->id)
/* default-b */
- fsl_otg_drv_vbus(0);
+ fsl_otg_drv_vbus(fsm, 0);
}
otg_dev->host_working = 0;
}
@@ -757,8 +813,8 @@ static struct otg_fsm_ops fsl_otg_ops = {
.loc_sof = fsl_otg_loc_sof,
.start_pulse = fsl_otg_start_pulse,
- .add_timer = fsl_otg_add_timer,
- .del_timer = fsl_otg_del_timer,
+ .add_timer = fsl_otg_fsm_add_timer,
+ .del_timer = fsl_otg_fsm_del_timer,
.start_host = fsl_otg_start_host,
.start_gadget = fsl_otg_start_gadget,
@@ -1011,7 +1067,7 @@ static int show_fsl_usb2_otg_state(struct device *dev,
"b_bus_suspend: %d\n"
"b_conn: %d\n"
"b_se0_srp: %d\n"
- "b_sess_end: %d\n"
+ "b_ssend_srp: %d\n"
"b_sess_vld: %d\n"
"id: %d\n",
fsm->a_bus_req,
@@ -1026,7 +1082,7 @@ static int show_fsl_usb2_otg_state(struct device *dev,
fsm->b_bus_suspend,
fsm->b_conn,
fsm->b_se0_srp,
- fsm->b_sess_end,
+ fsm->b_ssend_srp,
fsm->b_sess_vld,
fsm->id);
size -= t;
@@ -1057,7 +1113,7 @@ static long fsl_otg_ioctl(struct file *file, unsigned int cmd,
break;
case SET_A_SUSPEND_REQ:
- fsl_otg_dev->fsm.a_suspend_req = arg;
+ fsl_otg_dev->fsm.a_suspend_req_inf = arg;
break;
case SET_A_BUS_DROP:
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index e1859b8ef56..7365170a2f2 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -401,6 +401,6 @@ struct fsl_otg_config {
#define GET_A_BUS_REQ _IOR(OTG_IOCTL_MAGIC, 8, int)
#define GET_B_BUS_REQ _IOR(OTG_IOCTL_MAGIC, 9, int)
-void fsl_otg_add_timer(void *timer);
-void fsl_otg_del_timer(void *timer);
+void fsl_otg_add_timer(struct otg_fsm *fsm, void *timer);
+void fsl_otg_del_timer(struct otg_fsm *fsm, void *timer);
void fsl_otg_pulse_vbus(void);
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index 7f4596606e1..329c2d2f859 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -41,17 +41,17 @@ static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
fsm->protocol, protocol);
/* stop old protocol */
if (fsm->protocol == PROTO_HOST)
- ret = fsm->ops->start_host(fsm, 0);
+ ret = otg_start_host(fsm, 0);
else if (fsm->protocol == PROTO_GADGET)
- ret = fsm->ops->start_gadget(fsm, 0);
+ ret = otg_start_gadget(fsm, 0);
if (ret)
return ret;
/* start new protocol */
if (protocol == PROTO_HOST)
- ret = fsm->ops->start_host(fsm, 1);
+ ret = otg_start_host(fsm, 1);
else if (protocol == PROTO_GADGET)
- ret = fsm->ops->start_gadget(fsm, 1);
+ ret = otg_start_gadget(fsm, 1);
if (ret)
return ret;
@@ -69,42 +69,50 @@ void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
{
switch (old_state) {
case OTG_STATE_B_IDLE:
- otg_del_timer(fsm, b_se0_srp_tmr);
+ otg_del_timer(fsm, B_SE0_SRP);
fsm->b_se0_srp = 0;
+ fsm->adp_sns = 0;
+ fsm->adp_prb = 0;
break;
case OTG_STATE_B_SRP_INIT:
+ fsm->data_pulse = 0;
fsm->b_srp_done = 0;
break;
case OTG_STATE_B_PERIPHERAL:
break;
case OTG_STATE_B_WAIT_ACON:
- otg_del_timer(fsm, b_ase0_brst_tmr);
+ otg_del_timer(fsm, B_ASE0_BRST);
fsm->b_ase0_brst_tmout = 0;
break;
case OTG_STATE_B_HOST:
break;
case OTG_STATE_A_IDLE:
+ fsm->adp_prb = 0;
break;
case OTG_STATE_A_WAIT_VRISE:
- otg_del_timer(fsm, a_wait_vrise_tmr);
+ otg_del_timer(fsm, A_WAIT_VRISE);
fsm->a_wait_vrise_tmout = 0;
break;
case OTG_STATE_A_WAIT_BCON:
- otg_del_timer(fsm, a_wait_bcon_tmr);
+ otg_del_timer(fsm, A_WAIT_BCON);
fsm->a_wait_bcon_tmout = 0;
break;
case OTG_STATE_A_HOST:
- otg_del_timer(fsm, a_wait_enum_tmr);
+ otg_del_timer(fsm, A_WAIT_ENUM);
break;
case OTG_STATE_A_SUSPEND:
- otg_del_timer(fsm, a_aidl_bdis_tmr);
+ otg_del_timer(fsm, A_AIDL_BDIS);
fsm->a_aidl_bdis_tmout = 0;
- fsm->a_suspend_req = 0;
+ fsm->a_suspend_req_inf = 0;
break;
case OTG_STATE_A_PERIPHERAL:
+ otg_del_timer(fsm, A_BIDL_ADIS);
+ fsm->a_bidl_adis_tmout = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
- otg_del_timer(fsm, a_wait_vrise_tmr);
+ otg_del_timer(fsm, A_WAIT_VFALL);
+ fsm->a_wait_vfall_tmout = 0;
+ otg_del_timer(fsm, A_WAIT_VRISE);
break;
case OTG_STATE_A_VBUS_ERR:
break;
@@ -127,14 +135,19 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
+ /*
+ * Driver is responsible for starting ADP probing
+ * if ADP sensing times out.
+ */
+ otg_start_adp_sns(fsm);
otg_set_protocol(fsm, PROTO_UNDEF);
- otg_add_timer(fsm, b_se0_srp_tmr);
+ otg_add_timer(fsm, B_SE0_SRP);
break;
case OTG_STATE_B_SRP_INIT:
otg_start_pulse(fsm);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_UNDEF);
- otg_add_timer(fsm, b_srp_fail_tmr);
+ otg_add_timer(fsm, B_SRP_FAIL);
break;
case OTG_STATE_B_PERIPHERAL:
otg_chrg_vbus(fsm, 0);
@@ -147,7 +160,7 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
- otg_add_timer(fsm, b_ase0_brst_tmr);
+ otg_add_timer(fsm, B_ASE0_BRST);
fsm->a_bus_suspend = 0;
break;
case OTG_STATE_B_HOST:
@@ -163,6 +176,7 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
+ otg_start_adp_prb(fsm);
otg_set_protocol(fsm, PROTO_HOST);
break;
case OTG_STATE_A_WAIT_VRISE:
@@ -170,14 +184,14 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
- otg_add_timer(fsm, a_wait_vrise_tmr);
+ otg_add_timer(fsm, A_WAIT_VRISE);
break;
case OTG_STATE_A_WAIT_BCON:
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
- otg_add_timer(fsm, a_wait_bcon_tmr);
+ otg_add_timer(fsm, A_WAIT_BCON);
break;
case OTG_STATE_A_HOST:
otg_drv_vbus(fsm, 1);
@@ -188,15 +202,15 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
* When HNP is triggered while a_bus_req = 0, a_host will
* suspend too fast to complete a_set_b_hnp_en
*/
- if (!fsm->a_bus_req || fsm->a_suspend_req)
- otg_add_timer(fsm, a_wait_enum_tmr);
+ if (!fsm->a_bus_req || fsm->a_suspend_req_inf)
+ otg_add_timer(fsm, A_WAIT_ENUM);
break;
case OTG_STATE_A_SUSPEND:
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
- otg_add_timer(fsm, a_aidl_bdis_tmr);
+ otg_add_timer(fsm, A_AIDL_BDIS);
break;
case OTG_STATE_A_PERIPHERAL:
@@ -204,12 +218,14 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_GADGET);
otg_drv_vbus(fsm, 1);
+ otg_add_timer(fsm, A_BIDL_ADIS);
break;
case OTG_STATE_A_WAIT_VFALL:
otg_drv_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
+ otg_add_timer(fsm, A_WAIT_VFALL);
break;
case OTG_STATE_A_VBUS_ERR:
otg_drv_vbus(fsm, 0);
@@ -250,7 +266,8 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_IDLE);
else if (fsm->b_sess_vld && fsm->otg->gadget)
otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
- else if (fsm->b_bus_req && fsm->b_sess_end && fsm->b_se0_srp)
+ else if ((fsm->b_bus_req || fsm->adp_change || fsm->power_up) &&
+ fsm->b_ssend_srp && fsm->b_se0_srp)
otg_set_state(fsm, OTG_STATE_B_SRP_INIT);
break;
case OTG_STATE_B_SRP_INIT:
@@ -277,13 +294,14 @@ int otg_statemachine(struct otg_fsm *fsm)
case OTG_STATE_B_HOST:
if (!fsm->id || !fsm->b_sess_vld)
otg_set_state(fsm, OTG_STATE_B_IDLE);
- else if (!fsm->b_bus_req || !fsm->a_conn)
+ else if (!fsm->b_bus_req || !fsm->a_conn || fsm->test_device)
otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
break;
case OTG_STATE_A_IDLE:
if (fsm->id)
otg_set_state(fsm, OTG_STATE_B_IDLE);
- else if (!fsm->a_bus_drop && (fsm->a_bus_req || fsm->a_srp_det))
+ else if (!fsm->a_bus_drop && (fsm->a_bus_req ||
+ fsm->a_srp_det || fsm->adp_change || fsm->power_up))
otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
break;
case OTG_STATE_A_WAIT_VRISE:
@@ -301,7 +319,7 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_HOST:
- if ((!fsm->a_bus_req || fsm->a_suspend_req) &&
+ if ((!fsm->a_bus_req || fsm->a_suspend_req_inf) &&
fsm->otg->host->b_hnp_enable)
otg_set_state(fsm, OTG_STATE_A_SUSPEND);
else if (fsm->id || !fsm->b_conn || fsm->a_bus_drop)
@@ -324,14 +342,14 @@ int otg_statemachine(struct otg_fsm *fsm)
case OTG_STATE_A_PERIPHERAL:
if (fsm->id || fsm->a_bus_drop)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
- else if (fsm->b_bus_suspend)
+ else if (fsm->a_bidl_adis_tmout || fsm->b_bus_suspend)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
break;
case OTG_STATE_A_WAIT_VFALL:
- if (fsm->id || fsm->a_bus_req || (!fsm->a_sess_vld &&
- !fsm->b_conn))
+ if (fsm->a_wait_vfall_tmout || fsm->id || fsm->a_bus_req ||
+ (!fsm->a_sess_vld && !fsm->b_conn))
otg_set_state(fsm, OTG_STATE_A_IDLE);
break;
case OTG_STATE_A_VBUS_ERR:
diff --git a/drivers/usb/phy/phy-fsm-usb.h b/drivers/usb/phy/phy-fsm-usb.h
index fbe586206f3..7441b46a27f 100644
--- a/drivers/usb/phy/phy-fsm-usb.h
+++ b/drivers/usb/phy/phy-fsm-usb.h
@@ -34,45 +34,76 @@
#define PROTO_HOST (1)
#define PROTO_GADGET (2)
+enum otg_fsm_timer {
+ /* Standard OTG timers */
+ A_WAIT_VRISE,
+ A_WAIT_VFALL,
+ A_WAIT_BCON,
+ A_AIDL_BDIS,
+ B_ASE0_BRST,
+ A_BIDL_ADIS,
+
+ /* Auxiliary timers */
+ B_SE0_SRP,
+ B_SRP_FAIL,
+ A_WAIT_ENUM,
+
+ NUM_OTG_FSM_TIMERS,
+};
+
/* OTG state machine according to the OTG spec */
struct otg_fsm {
/* Input */
+ int id;
+ int adp_change;
+ int power_up;
+ int test_device;
+ int a_bus_drop;
+ int a_bus_req;
+ int a_srp_det;
+ int a_vbus_vld;
+ int b_conn;
int a_bus_resume;
int a_bus_suspend;
int a_conn;
+ int b_bus_req;
+ int b_se0_srp;
+ int b_ssend_srp;
+ int b_sess_vld;
+ /* Auxilary inputs */
int a_sess_vld;
- int a_srp_det;
- int a_vbus_vld;
int b_bus_resume;
int b_bus_suspend;
- int b_conn;
- int b_se0_srp;
- int b_sess_end;
- int b_sess_vld;
- int id;
+
+ /* Output */
+ int data_pulse;
+ int drv_vbus;
+ int loc_conn;
+ int loc_sof;
+ int adp_prb;
+ int adp_sns;
/* Internal variables */
int a_set_b_hnp_en;
int b_srp_done;
int b_hnp_enable;
+ int a_clr_err;
+
+ /* Informative variables */
+ int a_bus_drop_inf;
+ int a_bus_req_inf;
+ int a_clr_err_inf;
+ int b_bus_req_inf;
+ /* Auxilary informative variables */
+ int a_suspend_req_inf;
/* Timeout indicator for timers */
int a_wait_vrise_tmout;
+ int a_wait_vfall_tmout;
int a_wait_bcon_tmout;
int a_aidl_bdis_tmout;
int b_ase0_brst_tmout;
-
- /* Informative variables */
- int a_bus_drop;
- int a_bus_req;
- int a_clr_err;
- int a_suspend_req;
- int b_bus_req;
-
- /* Output */
- int drv_vbus;
- int loc_conn;
- int loc_sof;
+ int a_bidl_adis_tmout;
struct otg_fsm_ops *ops;
struct usb_otg *otg;
@@ -83,65 +114,123 @@ struct otg_fsm {
};
struct otg_fsm_ops {
- void (*chrg_vbus)(int on);
- void (*drv_vbus)(int on);
- void (*loc_conn)(int on);
- void (*loc_sof)(int on);
- void (*start_pulse)(void);
- void (*add_timer)(void *timer);
- void (*del_timer)(void *timer);
+ void (*chrg_vbus)(struct otg_fsm *fsm, int on);
+ void (*drv_vbus)(struct otg_fsm *fsm, int on);
+ void (*loc_conn)(struct otg_fsm *fsm, int on);
+ void (*loc_sof)(struct otg_fsm *fsm, int on);
+ void (*start_pulse)(struct otg_fsm *fsm);
+ void (*start_adp_prb)(struct otg_fsm *fsm);
+ void (*start_adp_sns)(struct otg_fsm *fsm);
+ void (*add_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer);
+ void (*del_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer);
int (*start_host)(struct otg_fsm *fsm, int on);
int (*start_gadget)(struct otg_fsm *fsm, int on);
};
-static inline void otg_chrg_vbus(struct otg_fsm *fsm, int on)
+static inline int otg_chrg_vbus(struct otg_fsm *fsm, int on)
{
- fsm->ops->chrg_vbus(on);
+ if (!fsm->ops->chrg_vbus)
+ return -EOPNOTSUPP;
+ fsm->ops->chrg_vbus(fsm, on);
+ return 0;
}
-static inline void otg_drv_vbus(struct otg_fsm *fsm, int on)
+static inline int otg_drv_vbus(struct otg_fsm *fsm, int on)
{
+ if (!fsm->ops->drv_vbus)
+ return -EOPNOTSUPP;
if (fsm->drv_vbus != on) {
fsm->drv_vbus = on;
- fsm->ops->drv_vbus(on);
+ fsm->ops->drv_vbus(fsm, on);
}
+ return 0;
}
-static inline void otg_loc_conn(struct otg_fsm *fsm, int on)
+static inline int otg_loc_conn(struct otg_fsm *fsm, int on)
{
+ if (!fsm->ops->loc_conn)
+ return -EOPNOTSUPP;
if (fsm->loc_conn != on) {
fsm->loc_conn = on;
- fsm->ops->loc_conn(on);
+ fsm->ops->loc_conn(fsm, on);
}
+ return 0;
}
-static inline void otg_loc_sof(struct otg_fsm *fsm, int on)
+static inline int otg_loc_sof(struct otg_fsm *fsm, int on)
{
+ if (!fsm->ops->loc_sof)
+ return -EOPNOTSUPP;
if (fsm->loc_sof != on) {
fsm->loc_sof = on;
- fsm->ops->loc_sof(on);
+ fsm->ops->loc_sof(fsm, on);
+ }
+ return 0;
+}
+
+static inline int otg_start_pulse(struct otg_fsm *fsm)
+{
+ if (!fsm->ops->start_pulse)
+ return -EOPNOTSUPP;
+ if (!fsm->data_pulse) {
+ fsm->data_pulse = 1;
+ fsm->ops->start_pulse(fsm);
}
+ return 0;
}
-static inline void otg_start_pulse(struct otg_fsm *fsm)
+static inline int otg_start_adp_prb(struct otg_fsm *fsm)
{
- fsm->ops->start_pulse();
+ if (!fsm->ops->start_adp_prb)
+ return -EOPNOTSUPP;
+ if (!fsm->adp_prb) {
+ fsm->adp_sns = 0;
+ fsm->adp_prb = 1;
+ fsm->ops->start_adp_prb(fsm);
+ }
+ return 0;
}
-static inline void otg_add_timer(struct otg_fsm *fsm, void *timer)
+static inline int otg_start_adp_sns(struct otg_fsm *fsm)
{
- fsm->ops->add_timer(timer);
+ if (!fsm->ops->start_adp_sns)
+ return -EOPNOTSUPP;
+ if (!fsm->adp_sns) {
+ fsm->adp_sns = 1;
+ fsm->ops->start_adp_sns(fsm);
+ }
+ return 0;
}
-static inline void otg_del_timer(struct otg_fsm *fsm, void *timer)
+static inline int otg_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer)
{
- fsm->ops->del_timer(timer);
+ if (!fsm->ops->add_timer)
+ return -EOPNOTSUPP;
+ fsm->ops->add_timer(fsm, timer);
+ return 0;
}
-int otg_statemachine(struct otg_fsm *fsm);
+static inline int otg_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer)
+{
+ if (!fsm->ops->del_timer)
+ return -EOPNOTSUPP;
+ fsm->ops->del_timer(fsm, timer);
+ return 0;
+}
-/* Defined by device specific driver, for different timer implementation */
-extern struct fsl_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr,
- *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_fail_tmr,
- *a_wait_enum_tmr;
+static inline int otg_start_host(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->start_host)
+ return -EOPNOTSUPP;
+ return fsm->ops->start_host(fsm, on);
+}
+
+static inline int otg_start_gadget(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->start_gadget)
+ return -EOPNOTSUPP;
+ return fsm->ops->start_gadget(fsm, on);
+}
+
+int otg_statemachine(struct otg_fsm *fsm);
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index efe59f3f7fd..fce3a9e9bb5 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -35,6 +35,9 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
#include "phy-generic.h"
@@ -64,6 +67,23 @@ static int nop_set_suspend(struct usb_phy *x, int suspend)
return 0;
}
+static void nop_reset_set(struct usb_phy_gen_xceiv *nop, int asserted)
+{
+ int value;
+
+ if (!gpio_is_valid(nop->gpio_reset))
+ return;
+
+ value = asserted;
+ if (nop->reset_active_low)
+ value = !value;
+
+ gpio_set_value_cansleep(nop->gpio_reset, value);
+
+ if (!asserted)
+ usleep_range(10000, 20000);
+}
+
int usb_gen_phy_init(struct usb_phy *phy)
{
struct usb_phy_gen_xceiv *nop = dev_get_drvdata(phy->dev);
@@ -74,13 +94,10 @@ int usb_gen_phy_init(struct usb_phy *phy)
}
if (!IS_ERR(nop->clk))
- clk_enable(nop->clk);
+ clk_prepare_enable(nop->clk);
- if (!IS_ERR(nop->reset)) {
- /* De-assert RESET */
- if (regulator_enable(nop->reset))
- dev_err(phy->dev, "Failed to de-assert reset\n");
- }
+ /* De-assert RESET */
+ nop_reset_set(nop, 0);
return 0;
}
@@ -90,14 +107,11 @@ void usb_gen_phy_shutdown(struct usb_phy *phy)
{
struct usb_phy_gen_xceiv *nop = dev_get_drvdata(phy->dev);
- if (!IS_ERR(nop->reset)) {
- /* Assert RESET */
- if (regulator_disable(nop->reset))
- dev_err(phy->dev, "Failed to assert reset\n");
- }
+ /* Assert RESET */
+ nop_reset_set(nop, 1);
if (!IS_ERR(nop->clk))
- clk_disable(nop->clk);
+ clk_disable_unprepare(nop->clk);
if (!IS_ERR(nop->vcc)) {
if (regulator_disable(nop->vcc))
@@ -136,8 +150,7 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
}
int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
- enum usb_phy_type type, u32 clk_rate, bool needs_vcc,
- bool needs_reset)
+ enum usb_phy_type type, u32 clk_rate, bool needs_vcc)
{
int err;
@@ -160,14 +173,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
}
}
- if (!IS_ERR(nop->clk)) {
- err = clk_prepare(nop->clk);
- if (err) {
- dev_err(dev, "Error preparing clock\n");
- return err;
- }
- }
-
nop->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(nop->vcc)) {
dev_dbg(dev, "Error getting vcc regulator: %ld\n",
@@ -176,12 +181,22 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
return -EPROBE_DEFER;
}
- nop->reset = devm_regulator_get(dev, "reset");
- if (IS_ERR(nop->reset)) {
- dev_dbg(dev, "Error getting reset regulator: %ld\n",
- PTR_ERR(nop->reset));
- if (needs_reset)
- return -EPROBE_DEFER;
+ if (gpio_is_valid(nop->gpio_reset)) {
+ unsigned long gpio_flags;
+
+ /* Assert RESET */
+ if (nop->reset_active_low)
+ gpio_flags = GPIOF_OUT_INIT_LOW;
+ else
+ gpio_flags = GPIOF_OUT_INIT_HIGH;
+
+ err = devm_gpio_request_one(dev, nop->gpio_reset,
+ gpio_flags, dev_name(dev));
+ if (err) {
+ dev_err(dev, "Error requesting RESET GPIO %d\n",
+ nop->gpio_reset);
+ return err;
+ }
}
nop->dev = dev;
@@ -200,13 +215,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
}
EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
-void usb_phy_gen_cleanup_phy(struct usb_phy_gen_xceiv *nop)
-{
- if (!IS_ERR(nop->clk))
- clk_unprepare(nop->clk);
-}
-EXPORT_SYMBOL_GPL(usb_phy_gen_cleanup_phy);
-
static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -217,31 +225,36 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
int err;
u32 clk_rate = 0;
bool needs_vcc = false;
- bool needs_reset = false;
+
+ nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
+ if (!nop)
+ return -ENOMEM;
+
+ nop->reset_active_low = true; /* default behaviour */
if (dev->of_node) {
struct device_node *node = dev->of_node;
+ enum of_gpio_flags flags;
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
needs_vcc = of_property_read_bool(node, "vcc-supply");
- needs_reset = of_property_read_bool(node, "reset-supply");
+ nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
+ 0, &flags);
+ if (nop->gpio_reset == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
} else if (pdata) {
type = pdata->type;
clk_rate = pdata->clk_rate;
needs_vcc = pdata->needs_vcc;
- needs_reset = pdata->needs_reset;
+ nop->gpio_reset = pdata->gpio_reset;
}
- nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
- if (!nop)
- return -ENOMEM;
-
-
- err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc,
- needs_reset);
+ err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc);
if (err)
return err;
@@ -252,15 +265,13 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
err);
- goto err_add;
+ return err;
}
platform_set_drvdata(pdev, nop);
return 0;
-err_add:
- usb_phy_gen_cleanup_phy(nop);
return err;
}
@@ -268,7 +279,6 @@ static int usb_phy_gen_xceiv_remove(struct platform_device *pdev)
{
struct usb_phy_gen_xceiv *nop = platform_get_drvdata(pdev);
- usb_phy_gen_cleanup_phy(nop);
usb_remove_phy(&nop->phy);
return 0;
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index 61687d5a965..d2a220d8173 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -6,15 +6,14 @@ struct usb_phy_gen_xceiv {
struct device *dev;
struct clk *clk;
struct regulator *vcc;
- struct regulator *reset;
+ int gpio_reset;
+ bool reset_active_low;
};
int usb_gen_phy_init(struct usb_phy *phy);
void usb_gen_phy_shutdown(struct usb_phy *phy);
int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
- enum usb_phy_type type, u32 clk_rate, bool needs_vcc,
- bool needs_reset);
-void usb_phy_gen_cleanup_phy(struct usb_phy_gen_xceiv *nop);
+ enum usb_phy_type type, u32 clk_rate, bool needs_vcc);
#endif
diff --git a/drivers/usb/phy/phy-omap-control.c b/drivers/usb/phy/phy-omap-control.c
index a4dda8e1256..09c5ace1edd 100644
--- a/drivers/usb/phy/phy-omap-control.c
+++ b/drivers/usb/phy/phy-omap-control.c
@@ -20,87 +20,77 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/usb/omap_control_usb.h>
-static struct omap_control_usb *control_usb;
-
-/**
- * omap_get_control_dev - returns the device pointer for this control device
- *
- * This API should be called to get the device pointer for this control
- * module device. This device pointer should be used for called other
- * exported API's in this driver.
- *
- * To be used by PHY driver and glue driver.
- */
-struct device *omap_get_control_dev(void)
-{
- if (!control_usb)
- return ERR_PTR(-ENODEV);
-
- return control_usb->dev;
-}
-EXPORT_SYMBOL_GPL(omap_get_control_dev);
-
/**
- * omap_control_usb3_phy_power - power on/off the serializer using control
- * module
+ * omap_control_usb_phy_power - power on/off the phy using control module reg
* @dev: the control module device
- * @on: 0 to off and 1 to on based on powering on or off the PHY
- *
- * usb3 PHY driver should call this API to power on or off the PHY.
+ * @on: 0 or 1, based on powering on or off the PHY
*/
-void omap_control_usb3_phy_power(struct device *dev, bool on)
+void omap_control_usb_phy_power(struct device *dev, int on)
{
u32 val;
unsigned long rate;
- struct omap_control_usb *control_usb = dev_get_drvdata(dev);
+ struct omap_control_usb *control_usb;
- if (control_usb->type != OMAP_CTRL_DEV_TYPE2)
+ if (IS_ERR(dev) || !dev) {
+ pr_err("%s: invalid device\n", __func__);
return;
+ }
- rate = clk_get_rate(control_usb->sys_clk);
- rate = rate/1000000;
-
- val = readl(control_usb->phy_power);
-
- if (on) {
- val &= ~(OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK |
- OMAP_CTRL_USB_PWRCTL_CLK_FREQ_MASK);
- val |= OMAP_CTRL_USB3_PHY_TX_RX_POWERON <<
- OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
- val |= rate << OMAP_CTRL_USB_PWRCTL_CLK_FREQ_SHIFT;
- } else {
- val &= ~OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK;
- val |= OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF <<
- OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ control_usb = dev_get_drvdata(dev);
+ if (!control_usb) {
+ dev_err(dev, "%s: invalid control usb device\n", __func__);
+ return;
}
- writel(val, control_usb->phy_power);
-}
-EXPORT_SYMBOL_GPL(omap_control_usb3_phy_power);
+ if (control_usb->type == OMAP_CTRL_TYPE_OTGHS)
+ return;
-/**
- * omap_control_usb_phy_power - power on/off the phy using control module reg
- * @dev: the control module device
- * @on: 0 or 1, based on powering on or off the PHY
- */
-void omap_control_usb_phy_power(struct device *dev, int on)
-{
- u32 val;
- struct omap_control_usb *control_usb = dev_get_drvdata(dev);
+ val = readl(control_usb->power);
- val = readl(control_usb->dev_conf);
+ switch (control_usb->type) {
+ case OMAP_CTRL_TYPE_USB2:
+ if (on)
+ val &= ~OMAP_CTRL_DEV_PHY_PD;
+ else
+ val |= OMAP_CTRL_DEV_PHY_PD;
+ break;
- if (on)
- val &= ~OMAP_CTRL_DEV_PHY_PD;
- else
- val |= OMAP_CTRL_DEV_PHY_PD;
+ case OMAP_CTRL_TYPE_PIPE3:
+ rate = clk_get_rate(control_usb->sys_clk);
+ rate = rate/1000000;
+
+ if (on) {
+ val &= ~(OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK |
+ OMAP_CTRL_USB_PWRCTL_CLK_FREQ_MASK);
+ val |= OMAP_CTRL_USB3_PHY_TX_RX_POWERON <<
+ OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ val |= rate << OMAP_CTRL_USB_PWRCTL_CLK_FREQ_SHIFT;
+ } else {
+ val &= ~OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK;
+ val |= OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF <<
+ OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ }
+ break;
- writel(val, control_usb->dev_conf);
+ case OMAP_CTRL_TYPE_DRA7USB2:
+ if (on)
+ val &= ~OMAP_CTRL_USB2_PHY_PD;
+ else
+ val |= OMAP_CTRL_USB2_PHY_PD;
+ break;
+ default:
+ dev_err(dev, "%s: type %d not recognized\n",
+ __func__, control_usb->type);
+ break;
+ }
+
+ writel(val, control_usb->power);
}
EXPORT_SYMBOL_GPL(omap_control_usb_phy_power);
@@ -172,11 +162,19 @@ void omap_control_usb_set_mode(struct device *dev,
{
struct omap_control_usb *ctrl_usb;
- if (IS_ERR(dev) || control_usb->type != OMAP_CTRL_DEV_TYPE1)
+ if (IS_ERR(dev) || !dev)
return;
ctrl_usb = dev_get_drvdata(dev);
+ if (!ctrl_usb) {
+ dev_err(dev, "Invalid control usb device\n");
+ return;
+ }
+
+ if (ctrl_usb->type != OMAP_CTRL_TYPE_OTGHS)
+ return;
+
switch (mode) {
case USB_MODE_HOST:
omap_control_usb_host_mode(ctrl_usb);
@@ -193,12 +191,46 @@ void omap_control_usb_set_mode(struct device *dev,
}
EXPORT_SYMBOL_GPL(omap_control_usb_set_mode);
+#ifdef CONFIG_OF
+
+static const enum omap_control_usb_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
+static const enum omap_control_usb_type usb2_data = OMAP_CTRL_TYPE_USB2;
+static const enum omap_control_usb_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
+static const enum omap_control_usb_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
+
+static const struct of_device_id omap_control_usb_id_table[] = {
+ {
+ .compatible = "ti,control-phy-otghs",
+ .data = &otghs_data,
+ },
+ {
+ .compatible = "ti,control-phy-usb2",
+ .data = &usb2_data,
+ },
+ {
+ .compatible = "ti,control-phy-pipe3",
+ .data = &pipe3_data,
+ },
+ {
+ .compatible = "ti,control-phy-dra7usb2",
+ .data = &dra7usb2_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
+#endif
+
+
static int omap_control_usb_probe(struct platform_device *pdev)
{
struct resource *res;
- struct device_node *np = pdev->dev.of_node;
- struct omap_control_usb_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ const struct of_device_id *of_id;
+ struct omap_control_usb *control_usb;
+
+ of_id = of_match_device(of_match_ptr(omap_control_usb_id_table),
+ &pdev->dev);
+ if (!of_id)
+ return -EINVAL;
control_usb = devm_kzalloc(&pdev->dev, sizeof(*control_usb),
GFP_KERNEL);
@@ -207,40 +239,27 @@ static int omap_control_usb_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (np) {
- of_property_read_u32(np, "ti,type", &control_usb->type);
- } else if (pdata) {
- control_usb->type = pdata->type;
- } else {
- dev_err(&pdev->dev, "no pdata present\n");
- return -EINVAL;
- }
-
- control_usb->dev = &pdev->dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "control_dev_conf");
- control_usb->dev_conf = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(control_usb->dev_conf))
- return PTR_ERR(control_usb->dev_conf);
+ control_usb->dev = &pdev->dev;
+ control_usb->type = *(enum omap_control_usb_type *)of_id->data;
- if (control_usb->type == OMAP_CTRL_DEV_TYPE1) {
+ if (control_usb->type == OMAP_CTRL_TYPE_OTGHS) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"otghs_control");
control_usb->otghs_control = devm_ioremap_resource(
&pdev->dev, res);
if (IS_ERR(control_usb->otghs_control))
return PTR_ERR(control_usb->otghs_control);
- }
-
- if (control_usb->type == OMAP_CTRL_DEV_TYPE2) {
+ } else {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "phy_power_usb");
- control_usb->phy_power = devm_ioremap_resource(
- &pdev->dev, res);
- if (IS_ERR(control_usb->phy_power))
- return PTR_ERR(control_usb->phy_power);
+ "power");
+ control_usb->power = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(control_usb->power)) {
+ dev_err(&pdev->dev, "Couldn't get power register\n");
+ return PTR_ERR(control_usb->power);
+ }
+ }
+ if (control_usb->type == OMAP_CTRL_TYPE_PIPE3) {
control_usb->sys_clk = devm_clk_get(control_usb->dev,
"sys_clkin");
if (IS_ERR(control_usb->sys_clk)) {
@@ -249,20 +268,11 @@ static int omap_control_usb_probe(struct platform_device *pdev)
}
}
-
dev_set_drvdata(control_usb->dev, control_usb);
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id omap_control_usb_id_table[] = {
- { .compatible = "ti,omap-control-usb" },
- {}
-};
-MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
-#endif
-
static struct platform_driver omap_control_usb_driver = {
.probe = omap_control_usb_probe,
.driver = {
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index 4e8a0405f95..0c6ba29bddd 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/usb/omap_control_usb.h>
+#include <linux/of_platform.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
@@ -100,7 +101,7 @@ static int omap_usb3_suspend(struct usb_phy *x, int suspend)
udelay(1);
} while (--timeout);
- omap_control_usb3_phy_power(phy->control_dev, 0);
+ omap_control_usb_phy_power(phy->control_dev, 0);
phy->is_suspended = 1;
} else if (!suspend && phy->is_suspended) {
@@ -189,15 +190,21 @@ static int omap_usb3_init(struct usb_phy *x)
if (ret)
return ret;
- omap_control_usb3_phy_power(phy->control_dev, 1);
+ omap_control_usb_phy_power(phy->control_dev, 1);
return 0;
}
static int omap_usb3_probe(struct platform_device *pdev)
{
- struct omap_usb *phy;
- struct resource *res;
+ struct omap_usb *phy;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+
+ if (!node)
+ return -EINVAL;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
@@ -239,13 +246,20 @@ static int omap_usb3_probe(struct platform_device *pdev)
return -EINVAL;
}
- phy->control_dev = omap_get_control_dev();
- if (IS_ERR(phy->control_dev)) {
- dev_dbg(&pdev->dev, "Failed to get control device\n");
- return -ENODEV;
+ control_node = of_parse_phandle(node, "ctrl-module", 0);
+ if (!control_node) {
+ dev_err(&pdev->dev, "Failed to get control device phandle\n");
+ return -EINVAL;
}
+ control_pdev = of_find_device_by_node(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ return -EINVAL;
+ }
+
+ phy->control_dev = &control_pdev->dev;
- omap_control_usb3_phy_power(phy->control_dev, 0);
+ omap_control_usb_phy_power(phy->control_dev, 0);
usb_add_phy_dev(&phy->phy);
platform_set_drvdata(pdev, phy);
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
new file mode 100644
index 00000000000..a99a6953f11
--- /dev/null
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -0,0 +1,248 @@
+/*
+ * Renesas R-Car Gen2 USB phy driver
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/usb-rcar-gen2-phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+struct rcar_gen2_usb_phy_priv {
+ struct usb_phy phy;
+ void __iomem *base;
+ struct clk *clk;
+ spinlock_t lock;
+ int usecount;
+ u32 ugctrl2;
+};
+
+#define usb_phy_to_priv(p) container_of(p, struct rcar_gen2_usb_phy_priv, phy)
+
+/* Low Power Status register */
+#define USBHS_LPSTS_REG 0x02
+#define USBHS_LPSTS_SUSPM (1 << 14)
+
+/* USB General control register */
+#define USBHS_UGCTRL_REG 0x80
+#define USBHS_UGCTRL_CONNECT (1 << 2)
+#define USBHS_UGCTRL_PLLRESET (1 << 0)
+
+/* USB General control register 2 */
+#define USBHS_UGCTRL2_REG 0x84
+#define USBHS_UGCTRL2_USB0_PCI (1 << 4)
+#define USBHS_UGCTRL2_USB0_HS (3 << 4)
+#define USBHS_UGCTRL2_USB2_PCI (0 << 31)
+#define USBHS_UGCTRL2_USB2_SS (1 << 31)
+
+/* USB General status register */
+#define USBHS_UGSTS_REG 0x88
+#define USBHS_UGSTS_LOCK (3 << 8)
+
+/* Enable USBHS internal phy */
+static int __rcar_gen2_usbhs_phy_enable(void __iomem *base)
+{
+ u32 val;
+ int i;
+
+ /* USBHS PHY power on */
+ val = ioread32(base + USBHS_UGCTRL_REG);
+ val &= ~USBHS_UGCTRL_PLLRESET;
+ iowrite32(val, base + USBHS_UGCTRL_REG);
+
+ val = ioread16(base + USBHS_LPSTS_REG);
+ val |= USBHS_LPSTS_SUSPM;
+ iowrite16(val, base + USBHS_LPSTS_REG);
+
+ for (i = 0; i < 20; i++) {
+ val = ioread32(base + USBHS_UGSTS_REG);
+ if ((val & USBHS_UGSTS_LOCK) == USBHS_UGSTS_LOCK) {
+ val = ioread32(base + USBHS_UGCTRL_REG);
+ val |= USBHS_UGCTRL_CONNECT;
+ iowrite32(val, base + USBHS_UGCTRL_REG);
+ return 0;
+ }
+ udelay(1);
+ }
+
+ /* Timed out waiting for the PLL lock */
+ return -ETIMEDOUT;
+}
+
+/* Disable USBHS internal phy */
+static int __rcar_gen2_usbhs_phy_disable(void __iomem *base)
+{
+ u32 val;
+
+ /* USBHS PHY power off */
+ val = ioread32(base + USBHS_UGCTRL_REG);
+ val &= ~USBHS_UGCTRL_CONNECT;
+ iowrite32(val, base + USBHS_UGCTRL_REG);
+
+ val = ioread16(base + USBHS_LPSTS_REG);
+ val &= ~USBHS_LPSTS_SUSPM;
+ iowrite16(val, base + USBHS_LPSTS_REG);
+
+ val = ioread32(base + USBHS_UGCTRL_REG);
+ val |= USBHS_UGCTRL_PLLRESET;
+ iowrite32(val, base + USBHS_UGCTRL_REG);
+ return 0;
+}
+
+/* Setup USB channels */
+static void __rcar_gen2_usb_phy_init(struct rcar_gen2_usb_phy_priv *priv)
+{
+ u32 val;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Set USB channels in the USBHS UGCTRL2 register */
+ val = ioread32(priv->base);
+ val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS);
+ val |= priv->ugctrl2;
+ iowrite32(val, priv->base);
+}
+
+/* Shutdown USB channels */
+static void __rcar_gen2_usb_phy_shutdown(struct rcar_gen2_usb_phy_priv *priv)
+{
+ __rcar_gen2_usbhs_phy_disable(priv->base);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int rcar_gen2_usb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+ struct rcar_gen2_usb_phy_priv *priv = usb_phy_to_priv(phy);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ retval = suspend ? __rcar_gen2_usbhs_phy_disable(priv->base) :
+ __rcar_gen2_usbhs_phy_enable(priv->base);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return retval;
+}
+
+static int rcar_gen2_usb_phy_init(struct usb_phy *phy)
+{
+ struct rcar_gen2_usb_phy_priv *priv = usb_phy_to_priv(phy);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /*
+ * Enable the clock and setup USB channels
+ * if it's the first user
+ */
+ if (!priv->usecount++)
+ __rcar_gen2_usb_phy_init(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
+static void rcar_gen2_usb_phy_shutdown(struct usb_phy *phy)
+{
+ struct rcar_gen2_usb_phy_priv *priv = usb_phy_to_priv(phy);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!priv->usecount) {
+ dev_warn(phy->dev, "Trying to disable phy with 0 usecount\n");
+ goto out;
+ }
+
+ /* Disable everything if it's the last user */
+ if (!--priv->usecount)
+ __rcar_gen2_usb_phy_shutdown(priv);
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int rcar_gen2_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_gen2_phy_platform_data *pdata;
+ struct rcar_gen2_usb_phy_priv *priv;
+ struct resource *res;
+ void __iomem *base;
+ struct clk *clk;
+ int retval;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ clk = devm_clk_get(&pdev->dev, "usbhs");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Can't get the clock\n");
+ return PTR_ERR(clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&priv->lock);
+ priv->clk = clk;
+ priv->base = base;
+ priv->ugctrl2 = pdata->chan0_pci ?
+ USBHS_UGCTRL2_USB0_PCI : USBHS_UGCTRL2_USB0_HS;
+ priv->ugctrl2 |= pdata->chan2_pci ?
+ USBHS_UGCTRL2_USB2_PCI : USBHS_UGCTRL2_USB2_SS;
+ priv->phy.dev = dev;
+ priv->phy.label = dev_name(dev);
+ priv->phy.init = rcar_gen2_usb_phy_init;
+ priv->phy.shutdown = rcar_gen2_usb_phy_shutdown;
+ priv->phy.set_suspend = rcar_gen2_usb_phy_set_suspend;
+
+ retval = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
+ if (retval < 0) {
+ dev_err(dev, "Failed to add USB phy\n");
+ return retval;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return retval;
+}
+
+static int rcar_gen2_usb_phy_remove(struct platform_device *pdev)
+{
+ struct rcar_gen2_usb_phy_priv *priv = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&priv->phy);
+
+ return 0;
+}
+
+static struct platform_driver rcar_gen2_usb_phy_driver = {
+ .driver = {
+ .name = "usb_phy_rcar_gen2",
+ },
+ .probe = rcar_gen2_usb_phy_probe,
+ .remove = rcar_gen2_usb_phy_remove,
+};
+
+module_platform_driver(rcar_gen2_usb_phy_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen2 USB phy");
+MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c
index ff70e4b19b9..b3ba86627b7 100644
--- a/drivers/usb/phy/phy-samsung-usb2.c
+++ b/drivers/usb/phy/phy-samsung-usb2.c
@@ -411,6 +411,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
sphy->drv_data = drv_data;
sphy->phy.dev = sphy->dev;
sphy->phy.label = "samsung-usb2phy";
+ sphy->phy.type = USB_PHY_TYPE_USB2;
sphy->phy.init = samsung_usb2phy_init;
sphy->phy.shutdown = samsung_usb2phy_shutdown;
@@ -426,7 +427,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sphy);
- return usb_add_phy(&sphy->phy, USB_PHY_TYPE_USB2);
+ return usb_add_phy_dev(&sphy->phy);
}
static int samsung_usb2phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-samsung-usb3.c b/drivers/usb/phy/phy-samsung-usb3.c
index c6eb22213de..cc0819248ac 100644
--- a/drivers/usb/phy/phy-samsung-usb3.c
+++ b/drivers/usb/phy/phy-samsung-usb3.c
@@ -271,6 +271,7 @@ static int samsung_usb3phy_probe(struct platform_device *pdev)
sphy->clk = clk;
sphy->phy.dev = sphy->dev;
sphy->phy.label = "samsung-usb3phy";
+ sphy->phy.type = USB_PHY_TYPE_USB3;
sphy->phy.init = samsung_usb3phy_init;
sphy->phy.shutdown = samsung_usb3phy_shutdown;
sphy->drv_data = samsung_usbphy_get_driver_data(pdev);
@@ -283,7 +284,7 @@ static int samsung_usb3phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sphy);
- return usb_add_phy(&sphy->phy, USB_PHY_TYPE_USB3);
+ return usb_add_phy_dev(&sphy->phy);
}
static int samsung_usb3phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index e9cb1cb8abc..82232acf1ab 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -1090,7 +1090,7 @@ static struct platform_driver tegra_usb_phy_driver = {
.driver = {
.name = "tegra-phy",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(tegra_usb_phy_id_table),
+ .of_match_table = tegra_usb_phy_id_table,
},
};
module_platform_driver(tegra_usb_phy_driver);
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 16dbc938267..30e8a61552d 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/of.h>
/* usb register definitions */
#define USB_VENDOR_ID_LSB 0x00
diff --git a/drivers/usb/phy/phy-ulpi-viewport.c b/drivers/usb/phy/phy-ulpi-viewport.c
index 7c22a5390fc..18bb8264b5a 100644
--- a/drivers/usb/phy/phy-ulpi-viewport.c
+++ b/drivers/usb/phy/phy-ulpi-viewport.c
@@ -36,7 +36,7 @@ static int ulpi_viewport_wait(void __iomem *view, u32 mask)
return 0;
udelay(1);
- };
+ }
return -ETIMEDOUT;
}
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index a9984c700d2..1b74523e1fe 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -98,7 +98,7 @@ struct usb_phy *devm_usb_get_phy(struct device *dev, enum usb_phy_type type)
ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
phy = usb_get_phy(type);
if (!IS_ERR(phy)) {
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 781426230d6..6e1b69d0f5f 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -279,7 +279,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
old_rdtodo = priv->rdtodo;
- if (old_rdtodo + size < old_rdtodo) {
+ if (old_rdtodo > SHRT_MAX - size) {
dev_dbg(dev, "To many bulk_in urbs to do.\n");
spin_unlock(&priv->lock);
goto resubmit;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b21d553c245..9ced8937a8f 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1967,8 +1967,16 @@ static int ftdi_process_packet(struct usb_serial_port *port,
port->icount.dsr++;
if (diff_status & FTDI_RS0_RI)
port->icount.rng++;
- if (diff_status & FTDI_RS0_RLSD)
+ if (diff_status & FTDI_RS0_RLSD) {
+ struct tty_struct *tty;
+
port->icount.dcd++;
+ tty = tty_port_tty_get(&port->port);
+ if (tty)
+ usb_serial_handle_dcd_change(port, tty,
+ status & FTDI_RS0_RLSD);
+ tty_kref_put(tty);
+ }
wake_up_interruptible(&port->port.delta_msr_wait);
priv->prev_status = status;
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1f31e6b4c25..2b01ec8651c 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -7,7 +7,6 @@
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
@@ -37,7 +36,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
-/* All of the device info needed for the Generic Serial Converter */
struct usb_serial_driver usb_serial_generic_device = {
.driver = {
.owner = THIS_MODULE,
@@ -66,7 +64,6 @@ int usb_serial_generic_register(void)
generic_device_ids[0].match_flags =
USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
- /* register our generic driver with ourselves */
retval = usb_serial_register_drivers(serial_drivers,
"usbserial_generic", generic_device_ids);
#endif
@@ -76,7 +73,6 @@ int usb_serial_generic_register(void)
void usb_serial_generic_deregister(void)
{
#ifdef CONFIG_USB_SERIAL_GENERIC
- /* remove our generic driver */
usb_serial_deregister_drivers(serial_drivers);
#endif
}
@@ -86,13 +82,11 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
int result = 0;
unsigned long flags;
- /* clear the throttle flags */
spin_lock_irqsave(&port->lock, flags);
port->throttled = 0;
port->throttle_req = 0;
spin_unlock_irqrestore(&port->lock, flags);
- /* if we have a bulk endpoint, start reading from it */
if (port->bulk_in_size)
result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
@@ -127,12 +121,16 @@ int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
}
/**
- * usb_serial_generic_write_start - kick off an URB write
- * @port: Pointer to the &struct usb_serial_port data
+ * usb_serial_generic_write_start - start writing buffered data
+ * @port: usb-serial port
+ * @mem_flags: flags to use for memory allocations
+ *
+ * Serialised using USB_SERIAL_WRITE_BUSY flag.
*
- * Returns zero on success, or a negative errno value
+ * Return: Zero on success or if busy, otherwise a negative errno value.
*/
-static int usb_serial_generic_write_start(struct usb_serial_port *port)
+int usb_serial_generic_write_start(struct usb_serial_port *port,
+ gfp_t mem_flags)
{
struct urb *urb;
int count, result;
@@ -163,7 +161,7 @@ retry:
spin_unlock_irqrestore(&port->lock, flags);
clear_bit(i, &port->write_urbs_free);
- result = usb_submit_urb(urb, GFP_ATOMIC);
+ result = usb_submit_urb(urb, mem_flags);
if (result) {
dev_err_console(port, "%s - error submitting urb: %d\n",
__func__, result);
@@ -175,34 +173,34 @@ retry:
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return result;
}
-
- /* Try sending off another urb, unless in irq context (in which case
- * there will be no free urb). */
- if (!in_irq())
+ /*
+ * Try sending off another urb, unless called from completion handler
+ * (in which case there will be no free urb or no data).
+ */
+ if (mem_flags != GFP_ATOMIC)
goto retry;
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return 0;
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_write_start);
/**
- * usb_serial_generic_write - generic write function for serial USB devices
- * @tty: Pointer to &struct tty_struct for the device
- * @port: Pointer to the &usb_serial_port structure for the device
- * @buf: Pointer to the data to write
- * @count: Number of bytes to write
+ * usb_serial_generic_write - generic write function
+ * @tty: tty for the port
+ * @port: usb-serial port
+ * @buf: data to write
+ * @count: number of bytes to write
*
- * Returns the number of characters actually written, which may be anything
- * from zero to @count. If an error occurs, it returns the negative errno
- * value.
+ * Return: The number of characters buffered, which may be anything from
+ * zero to @count, or a negative errno value.
*/
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
int result;
- /* only do something if we have a bulk out endpoint */
if (!port->bulk_out_size)
return -ENODEV;
@@ -210,7 +208,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
return 0;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
- result = usb_serial_generic_write_start(port);
+ result = usb_serial_generic_write_start(port, GFP_KERNEL);
if (result)
return result;
@@ -337,10 +335,11 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
-
- /* The per character mucking around with sysrq path it too slow for
- stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases
- where the USB serial is not a console anyway */
+ /*
+ * The per character mucking around with sysrq path it too slow for
+ * stuff like 3G modems, so shortcircuit it in the 99.9999999% of
+ * cases where the USB serial is not a console anyway.
+ */
if (!port->port.console || !port->sysrq)
tty_insert_flip_string(&port->port, ch, urb->actual_length);
else {
@@ -413,7 +412,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
} else {
- usb_serial_generic_write_start(port);
+ usb_serial_generic_write_start(port, GFP_ATOMIC);
}
usb_serial_port_softint(port);
@@ -425,8 +424,6 @@ void usb_serial_generic_throttle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
- /* Set the throttle request flag. It will be picked up
- * by usb_serial_generic_read_bulk_callback(). */
spin_lock_irqsave(&port->lock, flags);
port->throttle_req = 1;
spin_unlock_irqrestore(&port->lock, flags);
@@ -438,7 +435,6 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
int was_throttled;
- /* Clear the throttle flags */
spin_lock_irq(&port->lock);
was_throttled = port->throttled;
port->throttled = port->throttle_req = 0;
@@ -558,10 +554,10 @@ int usb_serial_handle_break(struct usb_serial_port *port)
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
/**
- * usb_serial_handle_dcd_change - handle a change of carrier detect state
- * @port: usb_serial_port structure for the open port
- * @tty: tty_struct structure for the port
- * @status: new carrier detect status, nonzero if active
+ * usb_serial_handle_dcd_change - handle a change of carrier detect state
+ * @port: usb-serial port
+ * @tty: tty for the port
+ * @status: new carrier detect status, nonzero if active
*/
void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
struct tty_struct *tty, unsigned int status)
@@ -570,6 +566,16 @@ void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
dev_dbg(&usb_port->dev, "%s - status %d\n", __func__, status);
+ if (tty) {
+ struct tty_ldisc *ld = tty_ldisc_ref(tty);
+
+ if (ld) {
+ if (ld->ops->dcd_change)
+ ld->ops->dcd_change(tty, status);
+ tty_ldisc_deref(ld);
+ }
+ }
+
if (status)
wake_up_interruptible(&port->open_wait);
else if (tty && !C_CLOCAL(tty))
@@ -595,7 +601,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
}
if (port->bulk_out_size) {
- r = usb_serial_generic_write_start(port);
+ r = usb_serial_generic_write_start(port, GFP_NOIO);
if (r < 0)
c++;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 84657e07dc5..439c951f261 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -455,7 +455,7 @@ static int parport_prologue(struct parport *pp)
return -1;
}
mos_parport->msg_pending = true; /* synch usb call pending */
- INIT_COMPLETION(mos_parport->syncmsg_compl);
+ reinit_completion(&mos_parport->syncmsg_compl);
spin_unlock(&release_lock);
mutex_lock(&mos_parport->serial->disc_mutex);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index fdf953539c6..e5bdd987b9e 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1532,7 +1532,11 @@ static int mos7840_tiocmget(struct tty_struct *tty)
return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
+ if (status != 1)
+ return -EIO;
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
+ if (status != 1)
+ return -EIO;
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
| ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index acaee066b99..c3d94853b4a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1376,6 +1376,23 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 7f78f300f8f..f06ed82e63d 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -208,9 +208,9 @@ static int cbaf_check(struct cbaf *cbaf)
ar_name = "ASSOCIATE";
ar_assoc = 1;
break;
- };
+ }
break;
- };
+ }
dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
"(%zu bytes): %s\n",
@@ -623,6 +623,8 @@ static int cbaf_probe(struct usb_interface *iface,
error_create_group:
error_check:
+ usb_put_intf(iface);
+ usb_put_dev(cbaf->usb_dev);
kfree(cbaf->buffer);
error_kmalloc_buffer:
kfree(cbaf);
@@ -637,6 +639,7 @@ static void cbaf_disconnect(struct usb_interface *iface)
sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
usb_set_intfdata(iface, NULL);
usb_put_intf(iface);
+ usb_put_dev(cbaf->usb_dev);
kfree(cbaf->buffer);
/* paranoia: clean up crypto keys */
kzfree(cbaf);
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 33a12788f9c..e538b72c4e3 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -973,7 +973,7 @@ int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
default:
WARN_ON(1);
result = NOTIFY_BAD;
- };
+ }
return result;
}
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index a09b65ebd9b..368360f9a93 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -33,7 +33,8 @@
* wa->usb_dev and wa->usb_iface initialized and refcounted,
* wa->wa_descr initialized.
*/
-int wa_create(struct wahc *wa, struct usb_interface *iface)
+int wa_create(struct wahc *wa, struct usb_interface *iface,
+ kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
@@ -41,14 +42,15 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
+ wa->quirks = quirks;
/* Fill up Data Transfer EP pointers */
wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
- wa->xfer_result_size = usb_endpoint_maxp(wa->dti_epd);
- wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
- if (wa->xfer_result == NULL) {
+ wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd);
+ wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL);
+ if (wa->dti_buf == NULL) {
result = -ENOMEM;
- goto error_xfer_result_alloc;
+ goto error_dti_buf_alloc;
}
result = wa_nep_create(wa, iface);
if (result < 0) {
@@ -59,8 +61,8 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
return 0;
error_nep_create:
- kfree(wa->xfer_result);
-error_xfer_result_alloc:
+ kfree(wa->dti_buf);
+error_dti_buf_alloc:
wa_rpipes_destroy(wa);
error_rpipes_create:
return result;
@@ -76,7 +78,7 @@ void __wa_destroy(struct wahc *wa)
usb_kill_urb(wa->buf_in_urb);
usb_put_urb(wa->buf_in_urb);
}
- kfree(wa->xfer_result);
+ kfree(wa->dti_buf);
wa_nep_destroy(wa);
wa_rpipes_destroy(wa);
}
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index cf250c21e94..e614f02f0cf 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -117,11 +117,25 @@ struct wa_rpipe {
struct wahc *wa;
spinlock_t seg_lock;
struct list_head seg_list;
+ struct list_head list_node;
atomic_t segs_available;
u8 buffer[1]; /* For reads/writes on USB */
};
+enum wa_dti_state {
+ WA_DTI_TRANSFER_RESULT_PENDING,
+ WA_DTI_ISOC_PACKET_STATUS_PENDING
+};
+
+enum wa_quirks {
+ /*
+ * The Alereon HWA expects the data frames in isochronous transfer
+ * requests to be concatenated and not sent as separate packets.
+ */
+ WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
+};
+
/**
* Instance of a HWA Host Controller
*
@@ -178,14 +192,26 @@ struct wahc {
u16 rpipes;
unsigned long *rpipe_bm; /* rpipe usage bitmap */
- spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
+ struct list_head rpipe_delayed_list; /* delayed RPIPES. */
+ spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */
struct mutex rpipe_mutex; /* assigning resources to endpoints */
+ /*
+ * dti_state is used to track the state of the dti_urb. When dti_state
+ * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
+ * dti_isoc_xfer_seg identify which xfer the incoming isoc packet status
+ * refers to.
+ */
+ enum wa_dti_state dti_state;
+ u32 dti_isoc_xfer_in_progress;
+ u8 dti_isoc_xfer_seg;
struct urb *dti_urb; /* URB for reading xfer results */
struct urb *buf_in_urb; /* URB for reading data in */
struct edc dti_edc; /* DTI error density counter */
- struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
- size_t xfer_result_size;
+ void *dti_buf;
+ size_t dti_buf_size;
+
+ unsigned long dto_in_use; /* protect dto endoint serialization. */
s32 status; /* For reading status */
@@ -200,10 +226,13 @@ struct wahc {
struct work_struct xfer_enqueue_work;
struct work_struct xfer_error_work;
atomic_t xfer_id_count;
+
+ kernel_ulong_t quirks;
};
-extern int wa_create(struct wahc *wa, struct usb_interface *iface);
+extern int wa_create(struct wahc *wa, struct usb_interface *iface,
+ kernel_ulong_t);
extern void __wa_destroy(struct wahc *wa);
void wa_reset_all(struct wahc *wa);
@@ -239,7 +268,8 @@ static inline void wa_nep_disarm(struct wahc *wa)
/* RPipes */
static inline void wa_rpipe_init(struct wahc *wa)
{
- spin_lock_init(&wa->rpipe_bm_lock);
+ INIT_LIST_HEAD(&wa->rpipe_delayed_list);
+ spin_lock_init(&wa->rpipe_lock);
mutex_init(&wa->rpipe_mutex);
}
@@ -247,6 +277,7 @@ static inline void wa_init(struct wahc *wa)
{
edc_init(&wa->nep_edc);
atomic_set(&wa->notifs_queued, 0);
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
wa_rpipe_init(wa);
edc_init(&wa->dti_edc);
INIT_LIST_HEAD(&wa->xfer_list);
@@ -255,6 +286,7 @@ static inline void wa_init(struct wahc *wa)
spin_lock_init(&wa->xfer_list_lock);
INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
+ wa->dto_in_use = 0;
atomic_set(&wa->xfer_id_count, 1);
}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index fd4f1ce6256..b48e74cc54d 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -143,17 +143,18 @@ static void rpipe_init(struct wa_rpipe *rpipe)
kref_init(&rpipe->refcnt);
spin_lock_init(&rpipe->seg_lock);
INIT_LIST_HEAD(&rpipe->seg_list);
+ INIT_LIST_HEAD(&rpipe->list_node);
}
static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
- spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
if (rpipe_idx < wa->rpipes)
set_bit(rpipe_idx, wa->rpipe_bm);
- spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
return rpipe_idx;
}
@@ -162,9 +163,9 @@ static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
- spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
clear_bit(rpipe_idx, wa->rpipe_bm);
- spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
}
void rpipe_destroy(struct kref *_rpipe)
@@ -333,7 +334,10 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
/* FIXME: compute so seg_size > ep->maxpktsize */
rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
/* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
- rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
+ else
+ rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
epcd->bMaxBurst, 16U), 1U);
@@ -361,8 +365,10 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
epcd->bMaxSequence, 32U), 2U);
rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;
rpipe->descr.bInterval = ep->desc.bInterval;
- /* FIXME: bOverTheAirInterval */
- rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval;
+ else
+ rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
/* FIXME: xmit power & preamble blah blah */
rpipe->descr.bmAttribute = (ep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK);
@@ -477,7 +483,7 @@ error:
*/
int wa_rpipes_create(struct wahc *wa)
{
- wa->rpipes = wa->wa_descr->wNumRPipes;
+ wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
GFP_KERNEL);
if (wa->rpipe_bm == NULL)
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 6ad02f57c36..ed5abe87b04 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -91,7 +91,8 @@
#include "wusbhc.h"
enum {
- WA_SEGS_MAX = 255,
+ /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
+ WA_SEGS_MAX = 128,
};
enum wa_seg_status {
@@ -107,6 +108,7 @@ enum wa_seg_status {
};
static void wa_xfer_delayed_run(struct wa_rpipe *);
+static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
/*
* Life cycle governed by 'struct urb' (the refcount of the struct is
@@ -114,24 +116,27 @@ static void wa_xfer_delayed_run(struct wa_rpipe *);
* struct).
*/
struct wa_seg {
- struct urb urb;
- struct urb *dto_urb; /* for data output? */
+ struct urb tr_urb; /* transfer request urb. */
+ struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
+ struct urb *dto_urb; /* for data output. */
struct list_head list_node; /* for rpipe->req_list */
struct wa_xfer *xfer; /* out xfer */
u8 index; /* which segment we are */
+ int isoc_frame_count; /* number of isoc frames in this segment. */
+ int isoc_frame_offset; /* starting frame offset in the xfer URB. */
+ int isoc_size; /* size of all isoc frames sent by this seg. */
enum wa_seg_status status;
ssize_t result; /* bytes xfered or error */
struct wa_xfer_hdr xfer_hdr;
- u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
};
static inline void wa_seg_init(struct wa_seg *seg)
{
- usb_init_urb(&seg->urb);
+ usb_init_urb(&seg->tr_urb);
/* set the remaining memory to 0. */
- memset(((void *)seg) + sizeof(seg->urb), 0,
- sizeof(*seg) - sizeof(seg->urb));
+ memset(((void *)seg) + sizeof(seg->tr_urb), 0,
+ sizeof(*seg) - sizeof(seg->tr_urb));
}
/*
@@ -153,12 +158,17 @@ struct wa_xfer {
unsigned is_dma:1;
size_t seg_size;
int result;
+ /* Isoc frame that the current transfer buffer corresponds to. */
+ int dto_isoc_frame_index;
gfp_t gfp; /* allocation mask */
struct wusb_dev *wusb_dev; /* for activity timestamps */
};
+static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame);
+
static inline void wa_xfer_init(struct wa_xfer *xfer)
{
kref_init(&xfer->refcnt);
@@ -169,7 +179,7 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)
/*
* Destroy a transfer structure
*
- * Note that freeing xfer->seg[cnt]->urb will free the containing
+ * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
* xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
*/
static void wa_xfer_destroy(struct kref *_xfer)
@@ -178,9 +188,17 @@ static void wa_xfer_destroy(struct kref *_xfer)
if (xfer->seg) {
unsigned cnt;
for (cnt = 0; cnt < xfer->segs; cnt++) {
- usb_free_urb(xfer->seg[cnt]->dto_urb);
- usb_free_urb(&xfer->seg[cnt]->urb);
+ struct wa_seg *seg = xfer->seg[cnt];
+ if (seg) {
+ usb_free_urb(seg->isoc_pack_desc_urb);
+ if (seg->dto_urb) {
+ kfree(seg->dto_urb->sg);
+ usb_free_urb(seg->dto_urb);
+ }
+ usb_free_urb(&seg->tr_urb);
+ }
}
+ kfree(xfer->seg);
}
kfree(xfer);
}
@@ -196,6 +214,59 @@ static void wa_xfer_put(struct wa_xfer *xfer)
}
/*
+ * Try to get exclusive access to the DTO endpoint resource. Return true
+ * if successful.
+ */
+static inline int __wa_dto_try_get(struct wahc *wa)
+{
+ return (test_and_set_bit(0, &wa->dto_in_use) == 0);
+}
+
+/* Release the DTO endpoint resource. */
+static inline void __wa_dto_put(struct wahc *wa)
+{
+ clear_bit_unlock(0, &wa->dto_in_use);
+}
+
+/* Service RPIPEs that are waiting on the DTO resource. */
+static void wa_check_for_delayed_rpipes(struct wahc *wa)
+{
+ unsigned long flags;
+ int dto_waiting = 0;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
+ rpipe = list_first_entry(&wa->rpipe_delayed_list,
+ struct wa_rpipe, list_node);
+ __wa_xfer_delayed_run(rpipe, &dto_waiting);
+ /* remove this RPIPE from the list if it is not waiting. */
+ if (!dto_waiting) {
+ pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
+ __func__,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ list_del_init(&rpipe->list_node);
+ }
+ }
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+/* add this RPIPE to the end of the delayed RPIPE list. */
+static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ /* add rpipe to the list if it is not already on it. */
+ if (list_empty(&rpipe->list_node)) {
+ pr_debug("%s: adding RPIPE %d to the delayed list.\n",
+ __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
+ list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
+ }
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+/*
* xfer is referenced
*
* xfer->lock has to be unlocked
@@ -232,6 +303,31 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
}
/*
+ * Initialize a transfer's ID
+ *
+ * We need to use a sequential number; if we use the pointer or the
+ * hash of the pointer, it can repeat over sequential transfers and
+ * then it will confuse the HWA....wonder why in hell they put a 32
+ * bit handle in there then.
+ */
+static void wa_xfer_id_init(struct wa_xfer *xfer)
+{
+ xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+}
+
+/* Return the xfer's ID. */
+static inline u32 wa_xfer_id(struct wa_xfer *xfer)
+{
+ return xfer->id;
+}
+
+/* Return the xfer's ID in transport format (little endian). */
+static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
+{
+ return cpu_to_le32(xfer->id);
+}
+
+/*
* If transfer is done, wrap it up and return true
*
* xfer->lock has to be locked
@@ -253,33 +349,37 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
switch (seg->status) {
case WA_SEG_DONE:
if (found_short && seg->result > 0) {
- dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
- xfer, cnt, seg->result);
+ dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
+ xfer, wa_xfer_id(xfer), cnt,
+ seg->result);
urb->status = -EINVAL;
goto out;
}
urb->actual_length += seg->result;
- if (seg->result < xfer->seg_size
+ if (!(usb_pipeisoc(xfer->urb->pipe))
+ && seg->result < xfer->seg_size
&& cnt != xfer->segs-1)
found_short = 1;
- dev_dbg(dev, "xfer %p#%u: DONE short %d "
+ dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
"result %zu urb->actual_length %d\n",
- xfer, seg->index, found_short, seg->result,
- urb->actual_length);
+ xfer, wa_xfer_id(xfer), seg->index, found_short,
+ seg->result, urb->actual_length);
break;
case WA_SEG_ERROR:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
- xfer, seg->index, seg->result);
+ dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
goto out;
case WA_SEG_ABORTED:
- dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
- xfer, seg->index, urb->status);
+ dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
xfer->result = urb->status;
goto out;
default:
- dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
- xfer, cnt, seg->status);
+ dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
+ xfer, wa_xfer_id(xfer), cnt, seg->status);
xfer->result = -EINVAL;
goto out;
}
@@ -290,29 +390,6 @@ out:
}
/*
- * Initialize a transfer's ID
- *
- * We need to use a sequential number; if we use the pointer or the
- * hash of the pointer, it can repeat over sequential transfers and
- * then it will confuse the HWA....wonder why in hell they put a 32
- * bit handle in there then.
- */
-static void wa_xfer_id_init(struct wa_xfer *xfer)
-{
- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
-}
-
-/*
- * Return the xfer's ID associated with xfer
- *
- * Need to generate a
- */
-static u32 wa_xfer_id(struct wa_xfer *xfer)
-{
- return xfer->id;
-}
-
-/*
* Search for a transfer list ID on the HCD's URB list
*
* For 32 bit architectures, we use the pointer itself; for 64 bits, a
@@ -356,15 +433,11 @@ static void __wa_xfer_abort_cb(struct urb *urb)
*
* The callback (see above) does nothing but freeing up the data by
* putting the URB. Because the URB is allocated at the head of the
- * struct, the whole space we allocated is kfreed.
- *
- * We'll get an 'aborted transaction' xfer result on DTI, that'll
- * politely ignore because at this point the transaction has been
- * marked as aborted already.
+ * struct, the whole space we allocated is kfreed. *
*/
-static void __wa_xfer_abort(struct wa_xfer *xfer)
+static int __wa_xfer_abort(struct wa_xfer *xfer)
{
- int result;
+ int result = -ENOMEM;
struct device *dev = &xfer->wa->usb_iface->dev;
struct wa_xfer_abort_buffer *b;
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
@@ -375,7 +448,7 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)
b->cmd.bLength = sizeof(b->cmd);
b->cmd.bRequestType = WA_XFER_ABORT;
b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
- b->cmd.dwTransferID = wa_xfer_id(xfer);
+ b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
usb_init_urb(&b->urb);
usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
@@ -385,7 +458,7 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)
result = usb_submit_urb(&b->urb, GFP_ATOMIC);
if (result < 0)
goto error_submit;
- return; /* callback frees! */
+ return result; /* callback frees! */
error_submit:
@@ -394,11 +467,52 @@ error_submit:
xfer, result);
kfree(b);
error_kmalloc:
- return;
+ return result;
}
/*
+ * Calculate the number of isoc frames starting from isoc_frame_offset
+ * that will fit a in transfer segment.
+ */
+static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
+ int isoc_frame_offset, int *total_size)
+{
+ int segment_size = 0, frame_count = 0;
+ int index = isoc_frame_offset;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+
+ while ((index < xfer->urb->number_of_packets)
+ && ((segment_size + iso_frame_desc[index].length)
+ <= xfer->seg_size)) {
+ /*
+ * For Alereon HWA devices, only include an isoc frame in a
+ * segment if it is physically contiguous with the previous
+ * frame. This is required because those devices expect
+ * the isoc frames to be sent as a single USB transaction as
+ * opposed to one transaction per frame with standard HWA.
+ */
+ if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ && (index > isoc_frame_offset)
+ && ((iso_frame_desc[index - 1].offset +
+ iso_frame_desc[index - 1].length) !=
+ iso_frame_desc[index].offset))
+ break;
+
+ /* this frame fits. count it. */
+ ++frame_count;
+ segment_size += iso_frame_desc[index].length;
+
+ /* move to the next isoc frame. */
+ ++index;
+ }
+
+ *total_size = segment_size;
+ return frame_count;
+}
+
+/*
*
* @returns < 0 on error, transfer segment request size if ok
*/
@@ -422,43 +536,92 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
result = sizeof(struct wa_xfer_bi);
break;
case USB_ENDPOINT_XFER_ISOC:
- dev_err(dev, "FIXME: ISOC not implemented\n");
- result = -ENOSYS;
- goto error;
+ if (usb_pipeout(urb->pipe)) {
+ *pxfer_type = WA_XFER_TYPE_ISO;
+ result = sizeof(struct wa_xfer_hwaiso);
+ } else {
+ dev_err(dev, "FIXME: ISOC IN not implemented\n");
+ result = -ENOSYS;
+ goto error;
+ }
+ break;
default:
/* never happens */
BUG();
result = -EINVAL; /* shut gcc up */
- };
+ }
xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
- xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
- * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
- /* Compute the segment size and make sure it is a multiple of
- * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
- * a check (FIXME) */
+
maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
- if (xfer->seg_size < maxpktsize) {
- dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
- "%zu\n", xfer->seg_size, maxpktsize);
- result = -EINVAL;
- goto error;
+ if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
+ int index = 0;
+
+ xfer->seg_size = maxpktsize;
+ xfer->segs = 0;
+ /*
+ * loop over urb->number_of_packets to determine how many
+ * xfer segments will be needed to send the isoc frames.
+ */
+ while (index < urb->number_of_packets) {
+ int seg_size; /* don't care. */
+ index += __wa_seg_calculate_isoc_frame_count(xfer,
+ index, &seg_size);
+ ++xfer->segs;
+ }
+ } else {
+ xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+ * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+ /* Compute the segment size and make sure it is a multiple of
+ * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+ * a check (FIXME) */
+ if (xfer->seg_size < maxpktsize) {
+ dev_err(dev,
+ "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
+ xfer->seg_size, maxpktsize);
+ result = -EINVAL;
+ goto error;
+ }
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+ xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
+ xfer->seg_size);
+ if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
+ xfer->segs = 1;
}
- xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
- xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
- if (xfer->segs >= WA_SEGS_MAX) {
- dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
- (int)(urb->transfer_buffer_length / xfer->seg_size),
+
+ if (xfer->segs > WA_SEGS_MAX) {
+ dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
+ (urb->transfer_buffer_length/xfer->seg_size),
WA_SEGS_MAX);
result = -EINVAL;
goto error;
}
- if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
- xfer->segs = 1;
error:
return result;
}
+static void __wa_setup_isoc_packet_descr(
+ struct wa_xfer_packet_info_hwaiso *packet_desc,
+ struct wa_xfer *xfer,
+ struct wa_seg *seg) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int frame_index;
+
+ /* populate isoc packet descriptor. */
+ packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
+ packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
+ (sizeof(packet_desc->PacketLength[0]) *
+ seg->isoc_frame_count));
+ for (frame_index = 0; frame_index < seg->isoc_frame_count;
+ ++frame_index) {
+ int offset_index = frame_index + seg->isoc_frame_offset;
+ packet_desc->PacketLength[frame_index] =
+ cpu_to_le16(iso_frame_desc[offset_index].length);
+ }
+}
+
+
/* Fill in the common request header and xfer-type specific data. */
static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
struct wa_xfer_hdr *xfer_hdr0,
@@ -466,12 +629,13 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
size_t xfer_hdr_size)
{
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+ struct wa_seg *seg = xfer->seg[0];
- xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+ xfer_hdr0 = &seg->xfer_hdr;
xfer_hdr0->bLength = xfer_hdr_size;
xfer_hdr0->bRequestType = xfer_type;
xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
- xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
+ xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
xfer_hdr0->bTransferSegment = 0;
switch (xfer_type) {
case WA_XFER_TYPE_CTL: {
@@ -484,8 +648,18 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
}
case WA_XFER_TYPE_BI:
break;
- case WA_XFER_TYPE_ISO:
- printk(KERN_ERR "FIXME: ISOC not implemented\n");
+ case WA_XFER_TYPE_ISO: {
+ struct wa_xfer_hwaiso *xfer_iso =
+ container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
+ struct wa_xfer_packet_info_hwaiso *packet_desc =
+ ((void *)xfer_iso) + xfer_hdr_size;
+
+ /* populate the isoc section of the transfer request. */
+ xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
+ /* populate isoc packet descriptor. */
+ __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
+ break;
+ }
default:
BUG();
};
@@ -494,12 +668,12 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
/*
* Callback for the OUT data phase of the segment request
*
- * Check wa_seg_cb(); most comments also apply here because this
+ * Check wa_seg_tr_cb(); most comments also apply here because this
* function does almost the same thing and they work closely
* together.
*
* If the seg request has failed but this DTO phase has succeeded,
- * wa_seg_cb() has already failed the segment and moved the
+ * wa_seg_tr_cb() has already failed the segment and moved the
* status to WA_SEG_ERROR, so this will go through 'case 0' and
* effectively do nothing.
*/
@@ -512,6 +686,139 @@ static void wa_seg_dto_cb(struct urb *urb)
struct wa_rpipe *rpipe;
unsigned long flags;
unsigned rpipe_ready = 0;
+ int data_send_done = 1, release_dto = 0, holding_dto = 0;
+ u8 done = 0;
+ int result;
+
+ /* free the sg if it was used. */
+ kfree(urb->sg);
+ urb->sg = NULL;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /* Alereon HWA sends all isoc frames in a single transfer. */
+ if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ xfer->dto_isoc_frame_index += seg->isoc_frame_count;
+ else
+ xfer->dto_isoc_frame_index += 1;
+ if (xfer->dto_isoc_frame_index < seg->isoc_frame_count) {
+ data_send_done = 0;
+ holding_dto = 1; /* checked in error cases. */
+ /*
+ * if this is the last isoc frame of the segment, we
+ * can release DTO after sending this frame.
+ */
+ if ((xfer->dto_isoc_frame_index + 1) >=
+ seg->isoc_frame_count)
+ release_dto = 1;
+ }
+ dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
+ wa_xfer_id(xfer), seg->index,
+ xfer->dto_isoc_frame_index, holding_dto, release_dto);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ seg->result += urb->actual_length;
+ if (data_send_done) {
+ dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
+ wa_xfer_id(xfer), seg->index, seg->result);
+ if (seg->status < WA_SEG_PENDING)
+ seg->status = WA_SEG_PENDING;
+ } else {
+ /* should only hit this for isoc xfers. */
+ /*
+ * Populate the dto URB with the next isoc frame buffer,
+ * send the URB and release DTO if we no longer need it.
+ */
+ __wa_populate_dto_urb_isoc(xfer, seg,
+ seg->isoc_frame_offset +
+ xfer->dto_isoc_frame_index);
+
+ /* resubmit the URB with the next isoc frame. */
+ result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
+ wa_xfer_id(xfer), seg->index, result);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ goto error_dto_submit;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (release_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ if (holding_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ break;
+ default: /* Other errors ... */
+ dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
+ wa_xfer_id(xfer), seg->index, urb->status);
+ goto error_default;
+ }
+
+ return;
+
+error_dto_submit:
+error_default:
+ spin_lock_irqsave(&xfer->lock, flags);
+ rpipe = xfer->ep->hcpriv;
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (seg->status != WA_SEG_ERROR) {
+ seg->status = WA_SEG_ERROR;
+ seg->result = urb->status;
+ xfer->segs_done++;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (holding_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+
+}
+
+/*
+ * Callback for the isoc packet descriptor phase of the segment request
+ *
+ * Check wa_seg_tr_cb(); most comments also apply here because this
+ * function does almost the same thing and they work closely
+ * together.
+ *
+ * If the seg request has failed but this phase has succeeded,
+ * wa_seg_tr_cb() has already failed the segment and moved the
+ * status to WA_SEG_ERROR, so this will go through 'case 0' and
+ * effectively do nothing.
+ */
+static void wa_seg_iso_pack_desc_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready = 0;
u8 done = 0;
switch (urb->status) {
@@ -519,11 +826,10 @@ static void wa_seg_dto_cb(struct urb *urb)
spin_lock_irqsave(&xfer->lock, flags);
wa = xfer->wa;
dev = &wa->usb_iface->dev;
- dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
- xfer, seg->index, urb->actual_length);
- if (seg->status < WA_SEG_PENDING)
+ dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
+ wa_xfer_id(xfer), seg->index);
+ if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
seg->status = WA_SEG_PENDING;
- seg->result = urb->actual_length;
spin_unlock_irqrestore(&xfer->lock, flags);
break;
case -ECONNRESET: /* URB unlinked; no need to do anything */
@@ -534,15 +840,15 @@ static void wa_seg_dto_cb(struct urb *urb)
wa = xfer->wa;
dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
- dev_dbg(dev, "xfer %p#%u: data out error %d\n",
- xfer, seg->index, urb->status);
+ pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
+ wa_xfer_id(xfer), seg->index, urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors "
- "exceeded, resetting device\n");
+ dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
+ usb_unlink_urb(seg->dto_urb);
seg->status = WA_SEG_ERROR;
seg->result = urb->status;
xfer->segs_done++;
@@ -572,11 +878,11 @@ static void wa_seg_dto_cb(struct urb *urb)
* We have to check before setting the status to WA_SEG_PENDING
* because sometimes the xfer result callback arrives before this
* callback (geeeeeeze), so it might happen that we are already in
- * another state. As well, we don't set it if the transfer is inbound,
+ * another state. As well, we don't set it if the transfer is not inbound,
* as in that case, wa_seg_dto_cb will do it when the OUT data phase
* finishes.
*/
-static void wa_seg_cb(struct urb *urb)
+static void wa_seg_tr_cb(struct urb *urb)
{
struct wa_seg *seg = urb->context;
struct wa_xfer *xfer = seg->xfer;
@@ -592,8 +898,11 @@ static void wa_seg_cb(struct urb *urb)
spin_lock_irqsave(&xfer->lock, flags);
wa = xfer->wa;
dev = &wa->usb_iface->dev;
- dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
- if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
+ xfer, wa_xfer_id(xfer), seg->index);
+ if (xfer->is_inbound &&
+ seg->status < WA_SEG_PENDING &&
+ !(usb_pipeisoc(xfer->urb->pipe)))
seg->status = WA_SEG_PENDING;
spin_unlock_irqrestore(&xfer->lock, flags);
break;
@@ -606,14 +915,16 @@ static void wa_seg_cb(struct urb *urb)
dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: request error %d\n",
- xfer, seg->index, urb->status);
+ dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
dev_err(dev, "DTO: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
}
+ usb_unlink_urb(seg->isoc_pack_desc_urb);
usb_unlink_urb(seg->dto_urb);
seg->status = WA_SEG_ERROR;
seg->result = urb->status;
@@ -629,9 +940,11 @@ static void wa_seg_cb(struct urb *urb)
}
}
-/* allocate an SG list to store bytes_to_transfer bytes and copy the
+/*
+ * Allocate an SG list to store bytes_to_transfer bytes and copy the
* subset of the in_sg that matches the buffer subset
- * we are about to transfer. */
+ * we are about to transfer.
+ */
static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
const unsigned int bytes_transferred,
const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
@@ -710,6 +1023,75 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
}
/*
+ * Populate DMA buffer info for the isoc dto urb.
+ */
+static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame)
+{
+ seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ /* dto urb buffer address pulled from iso_frame_desc. */
+ seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
+ xfer->urb->iso_frame_desc[curr_iso_frame].offset;
+ /* The Alereon HWA sends a single URB with all isoc segs. */
+ if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ seg->dto_urb->transfer_buffer_length = seg->isoc_size;
+ else
+ seg->dto_urb->transfer_buffer_length =
+ xfer->urb->iso_frame_desc[curr_iso_frame].length;
+}
+
+/*
+ * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
+ */
+static int __wa_populate_dto_urb(struct wa_xfer *xfer,
+ struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
+{
+ int result = 0;
+
+ if (xfer->is_dma) {
+ seg->dto_urb->transfer_dma =
+ xfer->urb->transfer_dma + buf_itr_offset;
+ seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ seg->dto_urb->transfer_flags &=
+ ~URB_NO_TRANSFER_DMA_MAP;
+ /* this should always be 0 before a resubmit. */
+ seg->dto_urb->num_mapped_sgs = 0;
+
+ if (xfer->urb->transfer_buffer) {
+ seg->dto_urb->transfer_buffer =
+ xfer->urb->transfer_buffer +
+ buf_itr_offset;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ } else {
+ seg->dto_urb->transfer_buffer = NULL;
+
+ /*
+ * allocate an SG list to store seg_size bytes
+ * and copy the subset of the xfer->urb->sg that
+ * matches the buffer subset we are about to
+ * read.
+ */
+ seg->dto_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ buf_itr_offset, buf_itr_size,
+ &(seg->dto_urb->num_sgs));
+ if (!(seg->dto_urb->sg))
+ result = -ENOMEM;
+ }
+ }
+ seg->dto_urb->transfer_buffer_length = buf_itr_size;
+
+ return result;
+}
+
+/*
* Allocate the segs array and initialize each of them
*
* The segments are freed by wa_xfer_destroy() when the xfer use count
@@ -719,13 +1101,14 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
*/
static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
{
- int result, cnt;
+ int result, cnt, iso_frame_offset;
size_t alloc_size = sizeof(*xfer->seg[0])
- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
struct usb_device *usb_dev = xfer->wa->usb_dev;
const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
struct wa_seg *seg;
size_t buf_itr, buf_size, buf_itr_size;
+ int xfer_isoc_frame_offset = 0;
result = -ENOMEM;
xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -733,18 +1116,35 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
goto error_segs_kzalloc;
buf_itr = 0;
buf_size = xfer->urb->transfer_buffer_length;
+ iso_frame_offset = 0;
for (cnt = 0; cnt < xfer->segs; cnt++) {
- seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
+ size_t iso_pkt_descr_size = 0;
+ int seg_isoc_frame_count = 0, seg_isoc_size = 0;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg_isoc_frame_count =
+ __wa_seg_calculate_isoc_frame_count(xfer,
+ xfer_isoc_frame_offset, &seg_isoc_size);
+
+ iso_pkt_descr_size =
+ sizeof(struct wa_xfer_packet_info_hwaiso) +
+ (seg_isoc_frame_count * sizeof(__le16));
+ }
+ seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
+ GFP_ATOMIC);
if (seg == NULL)
goto error_seg_kmalloc;
wa_seg_init(seg);
seg->xfer = xfer;
seg->index = cnt;
- usb_fill_bulk_urb(&seg->urb, usb_dev,
+ seg->isoc_frame_count = seg_isoc_frame_count;
+ seg->isoc_frame_offset = xfer_isoc_frame_offset;
+ seg->isoc_size = seg_isoc_size;
+ usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
usb_sndbulkpipe(usb_dev,
dto_epd->bEndpointAddress),
&seg->xfer_hdr, xfer_hdr_size,
- wa_seg_cb, seg);
+ wa_seg_tr_cb, seg);
buf_itr_size = min(buf_size, xfer->seg_size);
if (xfer->is_inbound == 0 && buf_size > 0) {
/* outbound data. */
@@ -756,69 +1156,64 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
usb_sndbulkpipe(usb_dev,
dto_epd->bEndpointAddress),
NULL, 0, wa_seg_dto_cb, seg);
- if (xfer->is_dma) {
- seg->dto_urb->transfer_dma =
- xfer->urb->transfer_dma + buf_itr;
- seg->dto_urb->transfer_flags |=
- URB_NO_TRANSFER_DMA_MAP;
- seg->dto_urb->transfer_buffer = NULL;
- seg->dto_urb->sg = NULL;
- seg->dto_urb->num_sgs = 0;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /* iso packet descriptor. */
+ seg->isoc_pack_desc_urb =
+ usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->isoc_pack_desc_urb == NULL)
+ goto error_iso_pack_desc_alloc;
+ /*
+ * The buffer for the isoc packet descriptor
+ * after the transfer request header in the
+ * segment object memory buffer.
+ */
+ usb_fill_bulk_urb(
+ seg->isoc_pack_desc_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ (void *)(&seg->xfer_hdr) +
+ xfer_hdr_size,
+ iso_pkt_descr_size,
+ wa_seg_iso_pack_desc_cb, seg);
+
+ /*
+ * Fill in the xfer buffer information for the
+ * first isoc frame. Subsequent frames in this
+ * segment will be filled in and sent from the
+ * DTO completion routine, if needed.
+ */
+ __wa_populate_dto_urb_isoc(xfer, seg,
+ xfer_isoc_frame_offset);
+ /* adjust starting frame offset for next seg. */
+ xfer_isoc_frame_offset += seg_isoc_frame_count;
} else {
- /* do buffer or SG processing. */
- seg->dto_urb->transfer_flags &=
- ~URB_NO_TRANSFER_DMA_MAP;
- /* this should always be 0 before a resubmit. */
- seg->dto_urb->num_mapped_sgs = 0;
-
- if (xfer->urb->transfer_buffer) {
- seg->dto_urb->transfer_buffer =
- xfer->urb->transfer_buffer +
- buf_itr;
- seg->dto_urb->sg = NULL;
- seg->dto_urb->num_sgs = 0;
- } else {
- /* allocate an SG list to store seg_size
- bytes and copy the subset of the
- xfer->urb->sg that matches the
- buffer subset we are about to read.
- */
- seg->dto_urb->sg =
- wa_xfer_create_subset_sg(
- xfer->urb->sg,
- buf_itr, buf_itr_size,
- &(seg->dto_urb->num_sgs));
-
- if (!(seg->dto_urb->sg)) {
- seg->dto_urb->num_sgs = 0;
- goto error_sg_alloc;
- }
-
- seg->dto_urb->transfer_buffer = NULL;
- }
+ /* fill in the xfer buffer information. */
+ result = __wa_populate_dto_urb(xfer, seg,
+ buf_itr, buf_itr_size);
+ if (result < 0)
+ goto error_seg_outbound_populate;
+
+ buf_itr += buf_itr_size;
+ buf_size -= buf_itr_size;
}
- seg->dto_urb->transfer_buffer_length = buf_itr_size;
}
seg->status = WA_SEG_READY;
- buf_itr += buf_itr_size;
- buf_size -= buf_itr_size;
}
return 0;
-error_sg_alloc:
+ /*
+ * Free the memory for the current segment which failed to init.
+ * Use the fact that cnt is left at were it failed. The remaining
+ * segments will be cleaned up by wa_xfer_destroy.
+ */
+error_iso_pack_desc_alloc:
+error_seg_outbound_populate:
usb_free_urb(xfer->seg[cnt]->dto_urb);
error_dto_alloc:
kfree(xfer->seg[cnt]);
- cnt--;
+ xfer->seg[cnt] = NULL;
error_seg_kmalloc:
- /* use the fact that cnt is left at were it failed */
- for (; cnt >= 0; cnt--) {
- if (xfer->seg[cnt] && xfer->is_inbound == 0) {
- usb_free_urb(xfer->seg[cnt]->dto_urb);
- kfree(xfer->seg[cnt]->dto_urb->sg);
- }
- kfree(xfer->seg[cnt]);
- }
error_segs_kzalloc:
return result;
}
@@ -856,21 +1251,45 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
wa_xfer_id_init(xfer);
__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
- /* Fill remainig headers */
+ /* Fill remaining headers */
xfer_hdr = xfer_hdr0;
- transfer_size = urb->transfer_buffer_length;
- xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
- xfer->seg_size : transfer_size;
- transfer_size -= xfer->seg_size;
- for (cnt = 1; cnt < xfer->segs; cnt++) {
- xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
- memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
- xfer_hdr->bTransferSegment = cnt;
- xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
- cpu_to_le32(xfer->seg_size)
- : cpu_to_le32(transfer_size);
- xfer->seg[cnt]->status = WA_SEG_READY;
+ if (xfer_type == WA_XFER_TYPE_ISO) {
+ xfer_hdr0->dwTransferLength =
+ cpu_to_le32(xfer->seg[0]->isoc_size);
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ struct wa_xfer_packet_info_hwaiso *packet_desc;
+ struct wa_seg *seg = xfer->seg[cnt];
+
+ xfer_hdr = &seg->xfer_hdr;
+ packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
+ /*
+ * Copy values from the 0th header. Segment specific
+ * values are set below.
+ */
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength =
+ cpu_to_le32(seg->isoc_size);
+ __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
+ seg->status = WA_SEG_READY;
+ }
+ } else {
+ transfer_size = urb->transfer_buffer_length;
+ xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size) :
+ cpu_to_le32(transfer_size);
transfer_size -= xfer->seg_size;
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength =
+ transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size)
+ : cpu_to_le32(transfer_size);
+ xfer->seg[cnt]->status = WA_SEG_READY;
+ transfer_size -= xfer->seg_size;
+ }
}
xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
result = 0;
@@ -885,20 +1304,46 @@ error_setup_sizes:
* rpipe->seg_lock is held!
*/
static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
- struct wa_seg *seg)
+ struct wa_seg *seg, int *dto_done)
{
int result;
- result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
+
+ /* default to done unless we encounter a multi-frame isoc segment. */
+ *dto_done = 1;
+
+ /* submit the transfer request. */
+ result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
if (result < 0) {
- printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
- xfer, seg->index, result);
+ pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
+ __func__, xfer, seg->index, result);
goto error_seg_submit;
}
+ /* submit the isoc packet descriptor if present. */
+ if (seg->isoc_pack_desc_urb) {
+ struct wahc *wa = xfer->wa;
+
+ result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
+ if (result < 0) {
+ pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ goto error_iso_pack_desc_submit;
+ }
+ xfer->dto_isoc_frame_index = 0;
+ /*
+ * If this segment contains more than one isoc frame, hold
+ * onto the dto resource until we send all frames.
+ * Only applies to non-Alereon devices.
+ */
+ if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
+ && (seg->isoc_frame_count > 1))
+ *dto_done = 0;
+ }
+ /* submit the out data if this is an out request. */
if (seg->dto_urb) {
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
- printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
- xfer, seg->index, result);
+ pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
+ __func__, xfer, seg->index, result);
goto error_dto_submit;
}
}
@@ -907,38 +1352,48 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
return 0;
error_dto_submit:
- usb_unlink_urb(&seg->urb);
+ usb_unlink_urb(seg->isoc_pack_desc_urb);
+error_iso_pack_desc_submit:
+ usb_unlink_urb(&seg->tr_urb);
error_seg_submit:
seg->status = WA_SEG_ERROR;
seg->result = result;
+ *dto_done = 1;
return result;
}
/*
- * Execute more queued request segments until the maximum concurrent allowed
+ * Execute more queued request segments until the maximum concurrent allowed.
+ * Return true if the DTO resource was acquired and released.
*
* The ugly unlock/lock sequence on the error path is needed as the
* xfer->lock normally nests the seg_lock and not viceversa.
- *
*/
-static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
{
- int result;
+ int result, dto_acquired = 0, dto_done = 0;
struct device *dev = &rpipe->wa->usb_iface->dev;
struct wa_seg *seg;
struct wa_xfer *xfer;
unsigned long flags;
+ *dto_waiting = 0;
+
spin_lock_irqsave(&rpipe->seg_lock, flags);
while (atomic_read(&rpipe->segs_available) > 0
- && !list_empty(&rpipe->seg_list)) {
+ && !list_empty(&rpipe->seg_list)
+ && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
list_node);
list_del(&seg->list_node);
xfer = seg->xfer;
- result = __wa_seg_submit(rpipe, xfer, seg);
- dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
- xfer, seg->index, atomic_read(&rpipe->segs_available), result);
+ result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
+ /* release the dto resource if this RPIPE is done with it. */
+ if (dto_done)
+ __wa_dto_put(rpipe->wa);
+ dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ atomic_read(&rpipe->segs_available), result);
if (unlikely(result < 0)) {
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
spin_lock_irqsave(&xfer->lock, flags);
@@ -948,7 +1403,37 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
spin_lock_irqsave(&rpipe->seg_lock, flags);
}
}
+ /*
+ * Mark this RPIPE as waiting if dto was not acquired, there are
+ * delayed segs and no active transfers to wake us up later.
+ */
+ if (!dto_acquired && !list_empty(&rpipe->seg_list)
+ && (atomic_read(&rpipe->segs_available) ==
+ le16_to_cpu(rpipe->descr.wRequests)))
+ *dto_waiting = 1;
+
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+
+ return dto_done;
+}
+
+static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+{
+ int dto_waiting;
+ int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
+
+ /*
+ * If this RPIPE is waiting on the DTO resource, add it to the tail of
+ * the waiting list.
+ * Otherwise, if the WA DTO resource was acquired and released by
+ * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
+ * DTO and failed during that time. Check the delayed list and process
+ * any waiters. Start searching from the next RPIPE index.
+ */
+ if (dto_waiting)
+ wa_add_delayed_rpipe(rpipe->wa, rpipe);
+ else if (dto_done)
+ wa_check_for_delayed_rpipes(rpipe->wa);
}
/*
@@ -960,7 +1445,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
*/
static int __wa_xfer_submit(struct wa_xfer *xfer)
{
- int result;
+ int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
struct wahc *wa = xfer->wa;
struct device *dev = &wa->usb_iface->dev;
unsigned cnt;
@@ -979,27 +1464,58 @@ static int __wa_xfer_submit(struct wa_xfer *xfer)
result = 0;
spin_lock_irqsave(&rpipe->seg_lock, flags);
for (cnt = 0; cnt < xfer->segs; cnt++) {
+ int delay_seg = 1;
+
available = atomic_read(&rpipe->segs_available);
empty = list_empty(&rpipe->seg_list);
seg = xfer->seg[cnt];
- dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
- xfer, cnt, available, empty,
- available == 0 || !empty ? "delayed" : "submitted");
- if (available == 0 || !empty) {
- dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
+ if (available && empty) {
+ /*
+ * Only attempt to acquire DTO if we have a segment
+ * to send.
+ */
+ dto_acquired = __wa_dto_try_get(rpipe->wa);
+ if (dto_acquired) {
+ delay_seg = 0;
+ result = __wa_seg_submit(rpipe, xfer, seg,
+ &dto_done);
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
+ xfer, wa_xfer_id(xfer), cnt, available,
+ empty);
+ if (dto_done)
+ __wa_dto_put(rpipe->wa);
+
+ if (result < 0) {
+ __wa_xfer_abort(xfer);
+ goto error_seg_submit;
+ }
+ }
+ }
+
+ if (delay_seg) {
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
+ xfer, wa_xfer_id(xfer), cnt, available, empty);
seg->status = WA_SEG_DELAYED;
list_add_tail(&seg->list_node, &rpipe->seg_list);
- } else {
- result = __wa_seg_submit(rpipe, xfer, seg);
- if (result < 0) {
- __wa_xfer_abort(xfer);
- goto error_seg_submit;
- }
}
xfer->segs_submitted++;
}
error_seg_submit:
+ /*
+ * Mark this RPIPE as waiting if dto was not acquired, there are
+ * delayed segs and no active transfers to wake us up later.
+ */
+ if (!dto_acquired && !list_empty(&rpipe->seg_list)
+ && (atomic_read(&rpipe->segs_available) ==
+ le16_to_cpu(rpipe->descr.wRequests)))
+ dto_waiting = 1;
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+
+ if (dto_waiting)
+ wa_add_delayed_rpipe(rpipe->wa, rpipe);
+ else if (dto_done)
+ wa_check_for_delayed_rpipes(rpipe->wa);
+
return result;
}
@@ -1025,7 +1541,7 @@ error_seg_submit:
* result never kicks in, the xfer will timeout from the USB code and
* dequeue() will be called.
*/
-static void wa_urb_enqueue_b(struct wa_xfer *xfer)
+static int wa_urb_enqueue_b(struct wa_xfer *xfer)
{
int result;
unsigned long flags;
@@ -1036,18 +1552,22 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
unsigned done;
result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
- if (result < 0)
+ if (result < 0) {
+ pr_err("%s: error_rpipe_get\n", __func__);
goto error_rpipe_get;
+ }
result = -ENODEV;
/* FIXME: segmentation broken -- kills DWA */
mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
if (urb->dev == NULL) {
mutex_unlock(&wusbhc->mutex);
+ pr_err("%s: error usb dev gone\n", __func__);
goto error_dev_gone;
}
wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
if (wusb_dev == NULL) {
mutex_unlock(&wusbhc->mutex);
+ pr_err("%s: error wusb dev gone\n", __func__);
goto error_dev_gone;
}
mutex_unlock(&wusbhc->mutex);
@@ -1055,21 +1575,28 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
spin_lock_irqsave(&xfer->lock, flags);
xfer->wusb_dev = wusb_dev;
result = urb->status;
- if (urb->status != -EINPROGRESS)
+ if (urb->status != -EINPROGRESS) {
+ pr_err("%s: error_dequeued\n", __func__);
goto error_dequeued;
+ }
result = __wa_xfer_setup(xfer, urb);
- if (result < 0)
+ if (result < 0) {
+ pr_err("%s: error_xfer_setup\n", __func__);
goto error_xfer_setup;
+ }
result = __wa_xfer_submit(xfer);
- if (result < 0)
+ if (result < 0) {
+ pr_err("%s: error_xfer_submit\n", __func__);
goto error_xfer_submit;
+ }
spin_unlock_irqrestore(&xfer->lock, flags);
- return;
+ return 0;
- /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
- * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
- * upundo setup().
+ /*
+ * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
+ * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
+ * setup().
*/
error_xfer_setup:
error_dequeued:
@@ -1081,8 +1608,7 @@ error_dev_gone:
rpipe_put(xfer->ep->hcpriv);
error_rpipe_get:
xfer->result = result;
- wa_xfer_giveback(xfer);
- return;
+ return result;
error_xfer_submit:
done = __wa_xfer_is_done(xfer);
@@ -1090,6 +1616,8 @@ error_xfer_submit:
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
+ /* return success since the completion routine will run. */
+ return 0;
}
/*
@@ -1123,7 +1651,8 @@ void wa_urb_enqueue_run(struct work_struct *ws)
list_del_init(&xfer->list_node);
urb = xfer->urb;
- wa_urb_enqueue_b(xfer);
+ if (wa_urb_enqueue_b(xfer) < 0)
+ wa_xfer_giveback(xfer);
usb_put_urb(urb); /* taken when queuing */
}
}
@@ -1229,7 +1758,19 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
queue_work(wusbd, &wa->xfer_enqueue_work);
} else {
- wa_urb_enqueue_b(xfer);
+ result = wa_urb_enqueue_b(xfer);
+ if (result < 0) {
+ /*
+ * URB submit/enqueue failed. Clean up, return an
+ * error and do not run the callback. This avoids
+ * an infinite submit/complete loop.
+ */
+ dev_err(dev, "%s: URB enqueue failed: %d\n",
+ __func__, result);
+ wa_put(xfer->wa);
+ wa_xfer_put(xfer);
+ return result;
+ }
}
return 0;
@@ -1264,7 +1805,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
struct wa_xfer *xfer;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
- unsigned cnt;
+ unsigned cnt, done = 0, xfer_abort_pending;
unsigned rpipe_ready = 0;
xfer = urb->hcpriv;
@@ -1278,6 +1819,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
goto out;
}
spin_lock_irqsave(&xfer->lock, flags);
+ pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
rpipe = xfer->ep->hcpriv;
if (rpipe == NULL) {
pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
@@ -1293,9 +1835,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
if (xfer->seg == NULL) /* still hasn't reached */
goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/
- __wa_xfer_abort(xfer);
+ xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
for (cnt = 0; cnt < xfer->segs; cnt++) {
seg = xfer->seg[cnt];
+ pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
+ __func__, wa_xfer_id(xfer), cnt, seg->status);
switch (seg->status) {
case WA_SEG_NOTREADY:
case WA_SEG_READY:
@@ -1304,42 +1848,50 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
WARN_ON(1);
break;
case WA_SEG_DELAYED:
+ /*
+ * delete from rpipe delayed list. If no segments on
+ * this xfer have been submitted, __wa_xfer_is_done will
+ * trigger a giveback below. Otherwise, the submitted
+ * segments will be completed in the DTI interrupt.
+ */
seg->status = WA_SEG_ABORTED;
spin_lock_irqsave(&rpipe->seg_lock, flags2);
list_del(&seg->list_node);
xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
break;
- case WA_SEG_SUBMITTED:
- seg->status = WA_SEG_ABORTED;
- usb_unlink_urb(&seg->urb);
- if (xfer->is_inbound == 0)
- usb_unlink_urb(seg->dto_urb);
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- break;
- case WA_SEG_PENDING:
- seg->status = WA_SEG_ABORTED;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- break;
- case WA_SEG_DTI_PENDING:
- usb_unlink_urb(wa->dti_urb);
- seg->status = WA_SEG_ABORTED;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- break;
case WA_SEG_DONE:
case WA_SEG_ERROR:
case WA_SEG_ABORTED:
break;
+ /*
+ * In the states below, the HWA device already knows
+ * about the transfer. If an abort request was sent,
+ * allow the HWA to process it and wait for the
+ * results. Otherwise, the DTI state and seg completed
+ * counts can get out of sync.
+ */
+ case WA_SEG_SUBMITTED:
+ case WA_SEG_PENDING:
+ case WA_SEG_DTI_PENDING:
+ /*
+ * Check if the abort was successfully sent. This could
+ * be false if the HWA has been removed but we haven't
+ * gotten the disconnect notification yet.
+ */
+ if (!xfer_abort_pending) {
+ seg->status = WA_SEG_ABORTED;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ xfer->segs_done++;
+ }
+ break;
}
}
xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
- __wa_xfer_is_done(xfer);
+ done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
- wa_xfer_completion(xfer);
+ if (done)
+ wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
return 0;
@@ -1410,13 +1962,56 @@ static int wa_xfer_status_to_errno(u8 status)
}
/*
+ * If a last segment flag and/or a transfer result error is encountered,
+ * no other segment transfer results will be returned from the device.
+ * Mark the remaining submitted or pending xfers as completed so that
+ * the xfer will complete cleanly.
+ */
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ struct wa_seg *incoming_seg)
+{
+ int index;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
+ index++) {
+ struct wa_seg *current_seg = xfer->seg[index];
+
+ BUG_ON(current_seg == NULL);
+
+ switch (current_seg->status) {
+ case WA_SEG_SUBMITTED:
+ case WA_SEG_PENDING:
+ case WA_SEG_DTI_PENDING:
+ rpipe_avail_inc(rpipe);
+ /*
+ * do not increment RPIPE avail for the WA_SEG_DELAYED case
+ * since it has not been submitted to the RPIPE.
+ */
+ case WA_SEG_DELAYED:
+ xfer->segs_done++;
+ current_seg->status = incoming_seg->status;
+ break;
+ case WA_SEG_ABORTED:
+ break;
+ default:
+ WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
+ __func__, wa_xfer_id(xfer), index,
+ current_seg->status);
+ break;
+ }
+ }
+}
+
+/*
* Process a xfer result completion message
*
- * inbound transfers: need to schedule a DTI read
+ * inbound transfers: need to schedule a buf_in_urb read
*
* FIXME: this function needs to be broken up in parts
*/
-static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
+static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
+ struct wa_xfer_result *xfer_result)
{
int result;
struct device *dev = &wa->usb_iface->dev;
@@ -1424,8 +2019,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
u8 seg_idx;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
- struct wa_xfer_result *xfer_result = wa->xfer_result;
- u8 done = 0;
+ unsigned done = 0;
u8 usb_status;
unsigned rpipe_ready = 0;
@@ -1436,8 +2030,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
seg = xfer->seg[seg_idx];
rpipe = xfer->ep->hcpriv;
usb_status = xfer_result->bTransferStatus;
- dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
- xfer, seg_idx, usb_status, seg->status);
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
+ xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
if (seg->status == WA_SEG_ABORTED
|| seg->status == WA_SEG_ERROR) /* already handled */
goto segment_aborted;
@@ -1453,12 +2047,19 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
seg->result = wa_xfer_status_to_errno(usb_status);
dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
xfer, xfer->id, seg->index, usb_status);
+ seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
+ WA_SEG_ABORTED : WA_SEG_ERROR;
goto error_complete;
}
/* FIXME: we ignore warnings, tally them for stats */
if (usb_status & 0x40) /* Warning?... */
usb_status = 0; /* ... pass */
- if (xfer->is_inbound) { /* IN data phase: read to buffer */
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /* set up WA state to read the isoc packet status next. */
+ wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
+ wa->dti_isoc_xfer_seg = seg_idx;
+ wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
+ } else if (xfer->is_inbound) { /* IN data phase: read to buffer */
seg->status = WA_SEG_DTI_PENDING;
BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
/* this should always be 0 before a resubmit. */
@@ -1535,12 +2136,14 @@ error_submit_buf_in:
xfer, seg_idx, result);
seg->result = result;
kfree(wa->buf_in_urb->sg);
+ wa->buf_in_urb->sg = NULL;
error_sg_alloc:
__wa_xfer_abort(xfer);
-error_complete:
seg->status = WA_SEG_ERROR;
+error_complete:
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
+ wa_complete_remaining_xfer_segs(xfer, seg);
done = __wa_xfer_is_done(xfer);
/*
* queue work item to clear STALL for control endpoints.
@@ -1552,10 +2155,8 @@ error_complete:
dev_info(dev, "Control EP stall. Queue delayed work.\n");
spin_lock_irq(&wa->xfer_list_lock);
- /* remove xfer from xfer_list. */
- list_del(&xfer->list_node);
- /* add xfer to xfer_errored_list. */
- list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
+ /* move xfer from xfer_list to xfer_errored_list. */
+ list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
spin_unlock_irq(&wa->xfer_list_lock);
spin_unlock_irqrestore(&xfer->lock, flags);
queue_work(wusbd, &wa->xfer_error_work);
@@ -1587,6 +2188,90 @@ segment_aborted:
}
/*
+ * Process a isochronous packet status message
+ *
+ * inbound transfers: need to schedule a buf_in_urb read
+ */
+static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer_packet_status_hwaiso *packet_status;
+ struct wa_xfer_packet_status_len_hwaiso *status_array;
+ struct wa_xfer *xfer;
+ unsigned long flags;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ unsigned done = 0;
+ unsigned rpipe_ready = 0, seg_index;
+ int expected_size;
+
+ /* We have a xfer result buffer; check it */
+ dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
+ if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
+ dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
+ packet_status->bPacketType);
+ goto error_parse_buffer;
+ }
+ xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
+ if (xfer == NULL) {
+ dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
+ wa->dti_isoc_xfer_in_progress);
+ goto error_parse_buffer;
+ }
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
+ goto error_bad_seg;
+ seg = xfer->seg[wa->dti_isoc_xfer_seg];
+ rpipe = xfer->ep->hcpriv;
+ expected_size = sizeof(*packet_status) +
+ (sizeof(packet_status->PacketStatus[0]) *
+ seg->isoc_frame_count);
+ if (urb->actual_length != expected_size) {
+ dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
+ urb->actual_length, expected_size);
+ goto error_bad_seg;
+ }
+ if (le16_to_cpu(packet_status->wLength) != expected_size) {
+ dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
+ le16_to_cpu(packet_status->wLength));
+ goto error_bad_seg;
+ }
+ /* isoc packet status and lengths back xfer urb. */
+ status_array = packet_status->PacketStatus;
+ for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
+ xfer->urb->iso_frame_desc[seg->index].status =
+ wa_xfer_status_to_errno(
+ le16_to_cpu(status_array[seg_index].PacketStatus));
+ xfer->urb->iso_frame_desc[seg->index].actual_length =
+ le16_to_cpu(status_array[seg_index].PacketLength);
+ }
+
+ if (!xfer->is_inbound) {
+ /* OUT transfer, complete it -- */
+ seg->status = WA_SEG_DONE;
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ return;
+
+error_bad_seg:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
+error_parse_buffer:
+ return;
+}
+
+/*
* Callback for the IN data phase
*
* If successful transition state; otherwise, take a note of the
@@ -1687,56 +2372,61 @@ static void wa_buf_in_cb(struct urb *urb)
* We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
* errors) in the URBs.
*/
-static void wa_xfer_result_cb(struct urb *urb)
+static void wa_dti_cb(struct urb *urb)
{
int result;
struct wahc *wa = urb->context;
struct device *dev = &wa->usb_iface->dev;
- struct wa_xfer_result *xfer_result;
u32 xfer_id;
- struct wa_xfer *xfer;
u8 usb_status;
BUG_ON(wa->dti_urb != urb);
switch (wa->dti_urb->status) {
case 0:
- /* We have a xfer result buffer; check it */
- dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
- urb->actual_length, urb->transfer_buffer);
- if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
- dev_err(dev, "DTI Error: xfer result--bad size "
- "xfer result (%d bytes vs %zu needed)\n",
- urb->actual_length, sizeof(*xfer_result));
- break;
- }
- xfer_result = wa->xfer_result;
- if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
- dev_err(dev, "DTI Error: xfer result--"
- "bad header length %u\n",
- xfer_result->hdr.bLength);
- break;
- }
- if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
- dev_err(dev, "DTI Error: xfer result--"
- "bad header type 0x%02x\n",
- xfer_result->hdr.bNotifyType);
- break;
- }
- usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_NOT_FOUND)
- /* taken care of already */
- break;
- xfer_id = xfer_result->dwTransferID;
- xfer = wa_xfer_get_by_id(wa, xfer_id);
- if (xfer == NULL) {
- /* FIXME: transaction might have been cancelled */
- dev_err(dev, "DTI Error: xfer result--"
- "unknown xfer 0x%08x (status 0x%02x)\n",
- xfer_id, usb_status);
- break;
+ if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
+ struct wa_xfer_result *xfer_result;
+ struct wa_xfer *xfer;
+
+ /* We have a xfer result buffer; check it */
+ dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ if (urb->actual_length != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
+ urb->actual_length,
+ sizeof(*xfer_result));
+ break;
+ }
+ xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
+ if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
+ xfer_result->hdr.bLength);
+ break;
+ }
+ if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
+ dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
+ xfer_result->hdr.bNotifyType);
+ break;
+ }
+ usb_status = xfer_result->bTransferStatus & 0x3f;
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND)
+ /* taken care of already */
+ break;
+ xfer_id = le32_to_cpu(xfer_result->dwTransferID);
+ xfer = wa_xfer_get_by_id(wa, xfer_id);
+ if (xfer == NULL) {
+ /* FIXME: transaction not found. */
+ dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
+ xfer_id, usb_status);
+ break;
+ }
+ wa_xfer_result_chew(wa, xfer, xfer_result);
+ wa_xfer_put(xfer);
+ } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
+ wa_process_iso_packet_status(wa, urb);
+ } else {
+ dev_err(dev, "DTI Error: unexpected EP state = %d\n",
+ wa->dti_state);
}
- wa_xfer_result_chew(wa, xfer);
- wa_xfer_put(xfer);
break;
case -ENOENT: /* (we killed the URB)...so, no broadcast */
case -ESHUTDOWN: /* going away! */
@@ -1777,7 +2467,7 @@ out:
* don't really set it up and start it until the first xfer complete
* notification arrives, which is what we do here.
*
- * Follow up in wa_xfer_result_cb(), as that's where the whole state
+ * Follow up in wa_dti_cb(), as that's where the whole state
* machine starts.
*
* So here we just initialize the DTI URB for reading transfer result
@@ -1813,8 +2503,8 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
usb_fill_bulk_urb(
wa->dti_urb, wa->usb_dev,
usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- wa->xfer_result, wa->xfer_result_size,
- wa_xfer_result_cb, wa);
+ wa->dti_buf, wa->dti_buf_size,
+ wa_dti_cb, wa);
wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (wa->buf_in_urb == NULL) {
@@ -1836,6 +2526,7 @@ out:
error_dti_urb_submit:
usb_put_urb(wa->buf_in_urb);
+ wa->buf_in_urb = NULL;
error_buf_in_urb_alloc:
usb_put_urb(wa->dti_urb);
wa->dti_urb = NULL;
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
index 9209eafc75b..80079b8fed1 100644
--- a/drivers/uwb/lc-dev.c
+++ b/drivers/uwb/lc-dev.c
@@ -244,7 +244,7 @@ static ssize_t uwb_dev_RSSI_store(struct device *dev,
static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
-static struct attribute *dev_attrs[] = {
+static struct attribute *uwb_dev_attrs[] = {
&dev_attr_EUI_48.attr,
&dev_attr_DevAddr.attr,
&dev_attr_BPST.attr,
@@ -253,20 +253,10 @@ static struct attribute *dev_attrs[] = {
&dev_attr_RSSI.attr,
NULL,
};
-
-static struct attribute_group dev_attr_group = {
- .attrs = dev_attrs,
-};
-
-static const struct attribute_group *groups[] = {
- &dev_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(uwb_dev);
/**
* Device SYSFS registration
- *
- *
*/
static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
{
@@ -276,7 +266,7 @@ static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
/* Device sysfs files are only useful for neighbor devices not
local radio controllers. */
if (&uwb_dev->rc->uwb_dev != uwb_dev)
- dev->groups = groups;
+ dev->groups = uwb_dev_groups;
dev->parent = parent_dev;
dev_set_drvdata(dev, uwb_dev);
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
index 5c5b3fc9088..e3ed6ff6a48 100644
--- a/drivers/uwb/umc-bus.c
+++ b/drivers/uwb/umc-bus.c
@@ -201,6 +201,7 @@ static ssize_t capability_id_show(struct device *dev, struct device_attribute *a
return sprintf(buf, "0x%02x\n", umc->cap_id);
}
+static DEVICE_ATTR_RO(capability_id);
static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -208,12 +209,14 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c
return sprintf(buf, "0x%04x\n", umc->version);
}
+static DEVICE_ATTR_RO(version);
-static struct device_attribute umc_dev_attrs[] = {
- __ATTR_RO(capability_id),
- __ATTR_RO(version),
- __ATTR_NULL,
+static struct attribute *umc_dev_attrs[] = {
+ &dev_attr_capability_id.attr,
+ &dev_attr_version.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(umc_dev);
struct bus_type umc_bus_type = {
.name = "umc",
@@ -222,7 +225,7 @@ struct bus_type umc_bus_type = {
.remove = umc_device_remove,
.suspend = umc_device_suspend,
.resume = umc_device_resume,
- .dev_attrs = umc_dev_attrs,
+ .dev_groups = umc_dev_groups,
};
EXPORT_SYMBOL_GPL(umc_bus_type);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e663921eebb..f175629513e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2168,15 +2168,15 @@ static int tcm_vhost_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the fabric for use within TCM
*/
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index fa44fbed397..552258c8f99 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -478,11 +478,10 @@ int __init mc68x328fb_init(void)
return -EINVAL;
}
- printk(KERN_INFO
- "fb%d: %s frame buffer device\n", fb_info.node, fb_info.fix.id);
- printk(KERN_INFO
- "fb%d: %dx%dx%d at 0x%08lx\n", fb_info.node,
- mc68x328fb_default.xres_virtual, mc68x328fb_default.yres_virtual,
+ fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
+ fb_info(&fb_info, "%dx%dx%d at 0x%08lx\n",
+ mc68x328fb_default.xres_virtual,
+ mc68x328fb_default.yres_virtual,
1 << mc68x328fb_default.bits_per_pixel, videomemory);
return 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 84b685f7ab6..4f2e1b35eb3 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -19,10 +19,10 @@ source "drivers/char/agp/Kconfig"
source "drivers/gpu/vga/Kconfig"
-source "drivers/gpu/drm/Kconfig"
-
source "drivers/gpu/host1x/Kconfig"
+source "drivers/gpu/drm/Kconfig"
+
config VGASTATE
tristate
default n
@@ -996,6 +996,8 @@ config FB_ATMEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS
+ select VIDEOMODE_HELPERS
help
This enables support for the AT91/AT32 LCD Controller.
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 7e8346ec9cd..a305caea58e 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -949,9 +949,7 @@ free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
* the page.
*/
page = virt_to_page(virtual_start);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(virtual_start);
+ __free_reserved_page(page);
virtual_start += PAGE_SIZE;
mb_freed += PAGE_SIZE / 1024;
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 0a2cce7285b..14d6b3793e0 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -10,6 +10,7 @@
*
* ARM PrimeCell PL110 Color LCD Controller
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -544,13 +545,17 @@ static int clcdfb_register(struct clcd_fb *fb)
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
- struct clcd_board *board = dev->dev.platform_data;
+ struct clcd_board *board = dev_get_platdata(&dev->dev);
struct clcd_fb *fb;
int ret;
if (!board)
return -EINVAL;
+ ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
ret = amba_request_regions(dev, NULL);
if (ret) {
printk(KERN_ERR "CLCD: unable to reserve regs region\n");
@@ -594,8 +599,6 @@ static int clcdfb_remove(struct amba_device *dev)
{
struct clcd_fb *fb = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
clcdfb_disable(fb);
unregister_framebuffer(&fb->fb);
if (fb->fb.cmap.len)
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index a6780eecff0..0dac36ce09d 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -3742,13 +3742,12 @@ default_chipset:
if (err)
goto unset_drvdata;
- printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- info->node, info->fix.id, info->fix.smem_len>>10);
+ fb_info(info, "%s frame buffer device, using %dK of video memory\n",
+ info->fix.id, info->fix.smem_len>>10);
return 0;
unset_drvdata:
- dev_set_drvdata(&pdev->dev, NULL);
fb_dealloc_cmap(&info->cmap);
free_irq:
free_irq(IRQ_AMIGA_COPPER, info->par);
@@ -3768,7 +3767,6 @@ static int __exit amifb_remove(struct platform_device *pdev)
struct fb_info *info = dev_get_drvdata(&pdev->dev);
unregister_framebuffer(info);
- dev_set_drvdata(&pdev->dev, NULL);
fb_dealloc_cmap(&info->cmap);
free_irq(IRQ_AMIGA_COPPER, info->par);
custom.dmacon = DMAF_ALL | DMAF_MASTER;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index e43401afdd0..1b0b233b8b3 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -556,9 +556,8 @@ static int arcfb_probe(struct platform_device *dev)
goto err1;
}
}
- printk(KERN_INFO
- "fb%d: Arc frame buffer device, using %dK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
+ videomemorysize >> 10);
/* this inits the lcd but doesn't clear dirty pixels */
for (i = 0; i < num_cols * num_rows; i++) {
@@ -572,8 +571,7 @@ static int arcfb_probe(struct platform_device *dev)
/* if we were told to splash the screen, we just clear it */
if (!nosplash) {
for (i = 0; i < num_cols * num_rows; i++) {
- printk(KERN_INFO "fb%d: splashing lcd %d\n",
- info->node, i);
+ fb_info(info, "splashing lcd %d\n", i);
ks108_set_start_line(par, i, 0);
ks108_clear_lcd(par, i);
}
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 94a51f1ef90..a6b29bd4a12 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -137,8 +137,7 @@ static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
- printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
- "height %d, depth %d, length %d\n", info->node,
+ fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
map->width, map->height, map->depth, map->length);
return;
}
@@ -517,7 +516,7 @@ static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
if (rv < 0) {
- printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ fb_err(info, "cannot set requested pixclock, keeping old value\n");
return;
}
@@ -584,7 +583,7 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
rv = svga_match_format (arkfb_formats, var, NULL);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ fb_err(info, "unsupported mode requested\n");
return rv;
}
@@ -604,14 +603,15 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
- printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ fb_err(info, "not enough framebuffer memory (%d kB requested, %d kB available)\n",
+ mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
rv = svga_check_timings (&ark_timing_regs, var, info->node);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ fb_err(info, "invalid timings requested\n");
return rv;
}
@@ -693,7 +693,7 @@ static int arkfb_set_par(struct fb_info *info)
vga_wseq(par->state.vgabase, 0x18, regval);
/* Set the offset register */
- pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
+ fb_dbg(info, "offset register : %d\n", offset_value);
svga_wcrt_multi(par->state.vgabase, ark_offset_regs, offset_value);
/* fix for hi-res textmode */
@@ -716,7 +716,7 @@ static int arkfb_set_par(struct fb_info *info)
/* Set mode-specific register values */
switch (mode) {
case 0:
- pr_debug("fb%d: text mode\n", info->node);
+ fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
@@ -725,7 +725,7 @@ static int arkfb_set_par(struct fb_info *info)
break;
case 1:
- pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor\n");
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
@@ -733,44 +733,44 @@ static int arkfb_set_par(struct fb_info *info)
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 2:
- pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 3:
- pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ fb_dbg(info, "8 bit pseudocolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode */
if (info->var.pixclock > 20000) {
- pr_debug("fb%d: not using multiplex\n", info->node);
+ fb_dbg(info, "not using multiplex\n");
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
} else {
- pr_debug("fb%d: using multiplex\n", info->node);
+ fb_dbg(info, "using multiplex\n");
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_16);
hdiv = 2;
}
break;
case 4:
- pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+ fb_dbg(info, "5/5/5 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB1555_16);
break;
case 5:
- pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ fb_dbg(info, "5/6/5 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0565_16);
break;
case 6:
- pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode ??? */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
@@ -779,7 +779,7 @@ static int arkfb_set_par(struct fb_info *info)
hdiv = 2;
break;
case 7:
- pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8/8 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x1E); /* 32bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
@@ -787,7 +787,7 @@ static int arkfb_set_par(struct fb_info *info)
hmul = 2;
break;
default:
- printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+ fb_err(info, "unsupported mode - bug\n");
return -EINVAL;
}
@@ -879,19 +879,19 @@ static int arkfb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- pr_debug("fb%d: unblank\n", info->node);
+ fb_dbg(info, "unblank\n");
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_NORMAL:
- pr_debug("fb%d: blank\n", info->node);
+ fb_dbg(info, "blank\n");
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_POWERDOWN:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
- pr_debug("fb%d: sync down\n", info->node);
+ fb_dbg(info, "sync down\n");
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
break;
@@ -1048,12 +1048,12 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(info->device, "cannot register framebugger\n");
+ dev_err(info->device, "cannot register framebuffer\n");
goto err_reg_fb;
}
- printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
- pci_name(dev), info->fix.smem_len >> 20);
+ fb_info(info, "%s on %s, %d MB RAM\n",
+ info->fix.id, pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -1108,7 +1108,6 @@ static void ark_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index d5a37d62847..d611f1a1ac5 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -527,8 +527,8 @@ static int init_asiliant(struct fb_info *p, unsigned long addr)
return err;
}
- printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n",
- p->node, p->fix.smem_len / 1024);
+ fb_info(p, "Asiliant 69000 frame buffer (%dK RAM detected)\n",
+ p->fix.smem_len / 1024);
writeb(0xff, mmio_base + 0x78c);
chips_hw_init(p);
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 64e41f5448c..e21d1f58554 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -3246,11 +3246,8 @@ int __init atafb_init(void)
return -EINVAL;
}
- // FIXME: mode needs setting!
- //printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- // fb_info.node, fb_info.mode->name, screen_len>>10);
- printk("fb%d: frame buffer device, using %dK of video memory\n",
- fb_info.node, screen_len >> 10);
+ fb_info(&fb_info, "frame buffer device, using %dK of video memory\n",
+ screen_len >> 10);
/* TODO: This driver cannot be unloaded yet */
return 0;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 088511a58a2..8521051cf94 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -20,12 +20,55 @@
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
#include <mach/cpu.h>
#include <asm/gpio.h>
#include <video/atmel_lcdc.h>
+struct atmel_lcdfb_config {
+ bool have_alt_pixclock;
+ bool have_hozval;
+ bool have_intensity_bit;
+};
+
+ /* LCD Controller info data structure, stored in device platform_data */
+struct atmel_lcdfb_info {
+ spinlock_t lock;
+ struct fb_info *info;
+ void __iomem *mmio;
+ int irq_base;
+ struct work_struct task;
+
+ unsigned int smem_len;
+ struct platform_device *pdev;
+ struct clk *bus_clk;
+ struct clk *lcdc_clk;
+
+ struct backlight_device *backlight;
+ u8 bl_power;
+ u8 saved_lcdcon;
+
+ u32 pseudo_palette[16];
+ bool have_intensity_bit;
+
+ struct atmel_lcdfb_pdata pdata;
+
+ struct atmel_lcdfb_config *config;
+};
+
+struct atmel_lcdfb_power_ctrl_gpio {
+ int gpio;
+ int active_low;
+
+ struct list_head list;
+};
+
#define lcdc_readl(sinfo, reg) __raw_readl((sinfo)->mmio+(reg))
#define lcdc_writel(sinfo, reg, val) __raw_writel((val), (sinfo)->mmio+(reg))
@@ -34,12 +77,6 @@
#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
-struct atmel_lcdfb_config {
- bool have_alt_pixclock;
- bool have_hozval;
- bool have_intensity_bit;
-};
-
static struct atmel_lcdfb_config at91sam9261_config = {
.have_hozval = true,
.have_intensity_bit = true,
@@ -248,18 +285,27 @@ static void exit_backlight(struct atmel_lcdfb_info *sinfo)
static void init_contrast(struct atmel_lcdfb_info *sinfo)
{
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
/* contrast pwm can be 'inverted' */
- if (sinfo->lcdcon_pol_negative)
+ if (pdata->lcdcon_pol_negative)
contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
/* have some default contrast/backlight settings */
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
- if (sinfo->lcdcon_is_backlight)
+ if (pdata->lcdcon_is_backlight)
init_backlight(sinfo);
}
+static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int on)
+{
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+ if (pdata->atmel_lcdfb_power_control)
+ pdata->atmel_lcdfb_power_control(pdata, on);
+}
static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.type = FB_TYPE_PACKED_PIXELS,
@@ -299,9 +345,11 @@ static unsigned long compute_hozval(struct atmel_lcdfb_info *sinfo,
static void atmel_lcdfb_stop_nowait(struct atmel_lcdfb_info *sinfo)
{
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
/* Turn off the LCD controller and the DMA controller */
lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
- sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
+ pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
/* Wait for the LCDC core to become idle */
while (lcdc_readl(sinfo, ATMEL_LCDC_PWRCON) & ATMEL_LCDC_BUSY)
@@ -321,9 +369,11 @@ static void atmel_lcdfb_stop(struct atmel_lcdfb_info *sinfo)
static void atmel_lcdfb_start(struct atmel_lcdfb_info *sinfo)
{
- lcdc_writel(sinfo, ATMEL_LCDC_DMACON, sinfo->default_dmacon);
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+ lcdc_writel(sinfo, ATMEL_LCDC_DMACON, pdata->default_dmacon);
lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
- (sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
+ (pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
| ATMEL_LCDC_PWR);
}
@@ -424,6 +474,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
{
struct device *dev = info->device;
struct atmel_lcdfb_info *sinfo = info->par;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
unsigned long clk_value_khz;
clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
@@ -510,7 +561,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
else
var->green.length = 6;
- if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+ if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
/* RGB:5X5 mode */
var->red.offset = var->green.length + 5;
var->blue.offset = 0;
@@ -527,7 +578,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
var->transp.length = 8;
/* fall through */
case 24:
- if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+ if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
/* RGB:888 mode */
var->red.offset = 16;
var->blue.offset = 0;
@@ -576,6 +627,7 @@ static void atmel_lcdfb_reset(struct atmel_lcdfb_info *sinfo)
static int atmel_lcdfb_set_par(struct fb_info *info)
{
struct atmel_lcdfb_info *sinfo = info->par;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
unsigned long hozval_linesz;
unsigned long value;
unsigned long clk_value_khz;
@@ -637,7 +689,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
/* Initialize control register 2 */
- value = sinfo->default_lcdcon2;
+ value = pdata->default_lcdcon2;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
value |= ATMEL_LCDC_INVLINE_INVERTED;
@@ -741,6 +793,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
unsigned int transp, struct fb_info *info)
{
struct atmel_lcdfb_info *sinfo = info->par;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
unsigned int val;
u32 *pal;
int ret = 1;
@@ -777,8 +830,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
*/
} else {
/* new style BGR:565 / RGB:565 */
- if (sinfo->lcd_wiring_mode ==
- ATMEL_LCDC_WIRING_RGB) {
+ if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
val = ((blue >> 11) & 0x001f);
val |= ((red >> 0) & 0xf800);
} else {
@@ -912,16 +964,187 @@ static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
clk_disable_unprepare(sinfo->lcdc_clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_lcdfb_dt_ids[] = {
+ { .compatible = "atmel,at91sam9261-lcdc" , .data = &at91sam9261_config, },
+ { .compatible = "atmel,at91sam9263-lcdc" , .data = &at91sam9263_config, },
+ { .compatible = "atmel,at91sam9g10-lcdc" , .data = &at91sam9g10_config, },
+ { .compatible = "atmel,at91sam9g45-lcdc" , .data = &at91sam9g45_config, },
+ { .compatible = "atmel,at91sam9g45es-lcdc" , .data = &at91sam9g45es_config, },
+ { .compatible = "atmel,at91sam9rl-lcdc" , .data = &at91sam9rl_config, },
+ { .compatible = "atmel,at32ap-lcdc" , .data = &at32ap_config, },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_lcdfb_dt_ids);
+
+static const char *atmel_lcdfb_wiring_modes[] = {
+ [ATMEL_LCDC_WIRING_BGR] = "BRG",
+ [ATMEL_LCDC_WIRING_RGB] = "RGB",
+};
+
+const int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
+{
+ const char *mode;
+ int err, i;
+
+ err = of_property_read_string(np, "atmel,lcd-wiring-mode", &mode);
+ if (err < 0)
+ return ATMEL_LCDC_WIRING_BGR;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_lcdfb_wiring_modes); i++)
+ if (!strcasecmp(mode, atmel_lcdfb_wiring_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+
+static void atmel_lcdfb_power_control_gpio(struct atmel_lcdfb_pdata *pdata, int on)
+{
+ struct atmel_lcdfb_power_ctrl_gpio *og;
+
+ list_for_each_entry(og, &pdata->pwr_gpios, list)
+ gpio_set_value(og->gpio, on);
+}
+
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+ struct fb_info *info = sinfo->info;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+ struct fb_var_screeninfo *var = &info->var;
+ struct device *dev = &sinfo->pdev->dev;
+ struct device_node *np =dev->of_node;
+ struct device_node *display_np;
+ struct device_node *timings_np;
+ struct display_timings *timings;
+ enum of_gpio_flags flags;
+ struct atmel_lcdfb_power_ctrl_gpio *og;
+ bool is_gpio_power = false;
+ int ret = -ENOENT;
+ int i, gpio;
+
+ sinfo->config = (struct atmel_lcdfb_config*)
+ of_match_device(atmel_lcdfb_dt_ids, dev)->data;
+
+ display_np = of_parse_phandle(np, "display", 0);
+ if (!display_np) {
+ dev_err(dev, "failed to find display phandle\n");
+ return -ENOENT;
+ }
+
+ ret = of_property_read_u32(display_np, "bits-per-pixel", &var->bits_per_pixel);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property bits-per-pixel\n");
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "atmel,guard-time", &pdata->guard_time);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property atmel,guard-time\n");
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "atmel,lcdcon2", &pdata->default_lcdcon2);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property atmel,lcdcon2\n");
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "atmel,dmacon", &pdata->default_dmacon);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property bits-per-pixel\n");
+ goto put_display_node;
+ }
+
+ ret = -ENOMEM;
+ for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
+ gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
+ i, &flags);
+ if (gpio < 0)
+ continue;
+
+ og = devm_kzalloc(dev, sizeof(*og), GFP_KERNEL);
+ if (!og)
+ goto put_display_node;
+
+ og->gpio = gpio;
+ og->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ is_gpio_power = true;
+ ret = devm_gpio_request(dev, gpio, "lcd-power-control-gpio");
+ if (ret) {
+ dev_err(dev, "request gpio %d failed\n", gpio);
+ goto put_display_node;
+ }
+
+ ret = gpio_direction_output(gpio, og->active_low);
+ if (ret) {
+ dev_err(dev, "set direction output gpio %d failed\n", gpio);
+ goto put_display_node;
+ }
+ }
+
+ if (is_gpio_power)
+ pdata->atmel_lcdfb_power_control = atmel_lcdfb_power_control_gpio;
+
+ ret = atmel_lcdfb_get_of_wiring_modes(display_np);
+ if (ret < 0) {
+ dev_err(dev, "invalid atmel,lcd-wiring-mode\n");
+ goto put_display_node;
+ }
+ pdata->lcd_wiring_mode = ret;
+
+ pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
+
+ timings = of_get_display_timings(display_np);
+ if (!timings) {
+ dev_err(dev, "failed to get display timings\n");
+ goto put_display_node;
+ }
+
+ timings_np = of_find_node_by_name(display_np, "display-timings");
+ if (!timings_np) {
+ dev_err(dev, "failed to find display-timings node\n");
+ goto put_display_node;
+ }
+
+ for (i = 0; i < of_get_child_count(timings_np); i++) {
+ struct videomode vm;
+ struct fb_videomode fb_vm;
+
+ ret = videomode_from_timings(timings, &vm, i);
+ if (ret < 0)
+ goto put_timings_node;
+ ret = fb_videomode_from_videomode(&vm, &fb_vm);
+ if (ret < 0)
+ goto put_timings_node;
+
+ fb_add_videomode(&fb_vm, &info->modelist);
+ }
+
+ return 0;
+
+put_timings_node:
+ of_node_put(timings_np);
+put_display_node:
+ of_node_put(display_np);
+ return ret;
+}
+#else
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+ return 0;
+}
+#endif
static int __init atmel_lcdfb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fb_info *info;
struct atmel_lcdfb_info *sinfo;
- struct atmel_lcdfb_info *pdata_sinfo;
- struct fb_videomode fbmode;
+ struct atmel_lcdfb_pdata *pdata = NULL;
struct resource *regs = NULL;
struct resource *map = NULL;
+ struct fb_modelist *modelist;
int ret;
dev_dbg(dev, "%s BEGIN\n", __func__);
@@ -934,26 +1157,35 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
}
sinfo = info->par;
+ sinfo->pdev = pdev;
+ sinfo->info = info;
+
+ INIT_LIST_HEAD(&info->modelist);
- if (dev->platform_data) {
- pdata_sinfo = (struct atmel_lcdfb_info *)dev->platform_data;
- sinfo->default_bpp = pdata_sinfo->default_bpp;
- sinfo->default_dmacon = pdata_sinfo->default_dmacon;
- sinfo->default_lcdcon2 = pdata_sinfo->default_lcdcon2;
- sinfo->default_monspecs = pdata_sinfo->default_monspecs;
- sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
- sinfo->guard_time = pdata_sinfo->guard_time;
- sinfo->smem_len = pdata_sinfo->smem_len;
- sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
- sinfo->lcdcon_pol_negative = pdata_sinfo->lcdcon_pol_negative;
- sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
+ if (pdev->dev.of_node) {
+ ret = atmel_lcdfb_of_init(sinfo);
+ if (ret)
+ goto free_info;
+ } else if (dev_get_platdata(dev)) {
+ struct fb_monspecs *monspecs;
+ int i;
+
+ pdata = dev_get_platdata(dev);
+ monspecs = pdata->default_monspecs;
+ sinfo->pdata = *pdata;
+
+ for (i = 0; i < monspecs->modedb_len; i++)
+ fb_add_videomode(&monspecs->modedb[i], &info->modelist);
+
+ sinfo->config = atmel_lcdfb_get_config(pdev);
+
+ info->var.bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+ memcpy(&info->monspecs, pdata->default_monspecs, sizeof(info->monspecs));
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
}
- sinfo->info = info;
- sinfo->pdev = pdev;
- sinfo->config = atmel_lcdfb_get_config(pdev);
+
if (!sinfo->config)
goto free_info;
@@ -962,7 +1194,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
info->pseudo_palette = sinfo->pseudo_palette;
info->fbops = &atmel_lcdfb_ops;
- memcpy(&info->monspecs, sinfo->default_monspecs, sizeof(info->monspecs));
info->fix = atmel_lcdfb_fix;
/* Enable LCDC Clocks */
@@ -978,14 +1209,11 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
}
atmel_lcdfb_start_clock(sinfo);
- ret = fb_find_mode(&info->var, info, NULL, info->monspecs.modedb,
- info->monspecs.modedb_len, info->monspecs.modedb,
- sinfo->default_bpp);
- if (!ret) {
- dev_err(dev, "no suitable video mode found\n");
- goto stop_clk;
- }
+ modelist = list_first_entry(&info->modelist,
+ struct fb_modelist, list);
+ fb_videomode_to_var(&info->var, &modelist->mode);
+ atmel_lcdfb_check_var(&info->var, info);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1069,18 +1297,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto unregister_irqs;
}
- /*
- * This makes sure that our colour bitfield
- * descriptors are correctly initialised.
- */
- atmel_lcdfb_check_var(&info->var, info);
-
- ret = fb_set_var(info, &info->var);
- if (ret) {
- dev_warn(dev, "unable to set display parameters\n");
- goto free_cmap;
- }
-
dev_set_drvdata(dev, info);
/*
@@ -1092,13 +1308,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto reset_drvdata;
}
- /* add selected videomode to modelist */
- fb_var_to_videomode(&fbmode, &info->var);
- fb_add_videomode(&fbmode, &info->modelist);
-
/* Power up the LCDC screen */
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(1);
+ atmel_lcdfb_power_control(sinfo, 1);
dev_info(dev, "fb%d: Atmel LCDC at 0x%08lx (mapped at %p), irq %d\n",
info->node, info->fix.mmio_start, sinfo->mmio, sinfo->irq_base);
@@ -1107,7 +1318,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
reset_drvdata:
dev_set_drvdata(dev, NULL);
-free_cmap:
fb_dealloc_cmap(&info->cmap);
unregister_irqs:
cancel_work_sync(&sinfo->task);
@@ -1143,15 +1353,16 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fb_info *info = dev_get_drvdata(dev);
struct atmel_lcdfb_info *sinfo;
+ struct atmel_lcdfb_pdata *pdata;
if (!info || !info->par)
return 0;
sinfo = info->par;
+ pdata = &sinfo->pdata;
cancel_work_sync(&sinfo->task);
exit_backlight(sinfo);
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(0);
+ atmel_lcdfb_power_control(sinfo, 0);
unregister_framebuffer(info);
atmel_lcdfb_stop_clock(sinfo);
clk_put(sinfo->lcdc_clk);
@@ -1167,7 +1378,6 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
atmel_lcdfb_free_video_memory(sinfo);
}
- dev_set_drvdata(dev, NULL);
framebuffer_release(info);
return 0;
@@ -1188,9 +1398,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(0);
-
+ atmel_lcdfb_power_control(sinfo, 0);
atmel_lcdfb_stop(sinfo);
atmel_lcdfb_stop_clock(sinfo);
@@ -1204,8 +1412,7 @@ static int atmel_lcdfb_resume(struct platform_device *pdev)
atmel_lcdfb_start_clock(sinfo);
atmel_lcdfb_start(sinfo);
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(1);
+ atmel_lcdfb_power_control(sinfo, 1);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, sinfo->saved_lcdcon);
/* Enable FIFO & DMA errors */
@@ -1228,6 +1435,7 @@ static struct platform_driver atmel_lcdfb_driver = {
.driver = {
.name = "atmel_lcdfb",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_lcdfb_dt_ids),
},
};
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index a4dfe8cb0a0..12ca031877d 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -413,7 +413,6 @@ struct aty128fb_par {
int blitter_may_be_busy;
int fifo_slots; /* free slots in FIFO (64 max) */
- int pm_reg;
int crt_on, lcd_on;
struct pci_dev *pdev;
struct fb_info *next;
@@ -2016,7 +2015,6 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
aty128_init_engine(par);
- par->pm_reg = pdev->pm_cap;
par->pdev = pdev;
par->asleep = 0;
par->lock_blank = 0;
@@ -2029,8 +2027,8 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
if (register_framebuffer(info) < 0)
return 0;
- printk(KERN_INFO "fb%d: %s frame buffer device on %s\n",
- info->node, info->fix.id, video_card);
+ fb_info(info, "%s frame buffer device on %s\n",
+ info->fix.id, video_card);
return 1; /* success! */
}
@@ -2397,7 +2395,7 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
u32 pmgt;
struct pci_dev *pdev = par->pdev;
- if (!par->pm_reg)
+ if (!par->pdev->pm_cap)
return;
/* Set the chip into the appropriate suspend mode (we use D2,
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 9b0f12c5c28..28fafbf864a 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1848,7 +1848,6 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
return aty_waitforvblank(par, crtc);
}
- break;
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 1e30b2b3e79..26d80a4486f 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -819,11 +819,6 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
if (v.xres_virtual < v.xres)
v.xres = v.xres_virtual;
- if (v.xoffset < 0)
- v.xoffset = 0;
- if (v.yoffset < 0)
- v.yoffset = 0;
-
if (v.xoffset > v.xres_virtual - v.xres)
v.xoffset = v.xres_virtual - v.xres - 1;
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index f7091ece580..46a12f1a93c 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1427,6 +1427,8 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
mdelay( 15);
}
+#if defined(CONFIG_PM)
+#if defined(CONFIG_X86) || defined(CONFIG_PPC_PMAC)
static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
{
u32 tmp, tmp2;
@@ -1939,9 +1941,10 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
*/
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
+#endif
#ifdef CONFIG_PPC_OF
-
+#ifdef CONFIG_PPC_PMAC
static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
{
OUTREG(MC_CNTL, rinfo->save_regs[46]);
@@ -2202,6 +2205,8 @@ static void radeon_reinitialize_M9P(struct radeonfb_info *rinfo)
radeon_pm_restore_pixel_pll(rinfo);
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
+#endif
+#endif
#if 0 /* Not ready yet */
static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
@@ -2515,13 +2520,13 @@ static void radeonfb_whack_power_state(struct radeonfb_info *rinfo, pci_power_t
for (;;) {
pci_read_config_word(rinfo->pdev,
- rinfo->pm_reg+PCI_PM_CTRL,
+ rinfo->pdev->pm_cap + PCI_PM_CTRL,
&pwr_cmd);
- if (pwr_cmd & 2)
+ if (pwr_cmd & state)
break;
- pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2;
+ pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | state;
pci_write_config_word(rinfo->pdev,
- rinfo->pm_reg+PCI_PM_CTRL,
+ rinfo->pdev->pm_cap + PCI_PM_CTRL,
pwr_cmd);
msleep(500);
}
@@ -2532,7 +2537,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
{
u32 tmp;
- if (!rinfo->pm_reg)
+ if (!rinfo->pdev->pm_cap)
return;
/* Set the chip into appropriate suspend mode (we use D2,
@@ -2804,9 +2809,6 @@ static void radeonfb_early_resume(void *data)
void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep)
{
- /* Find PM registers in config space if any*/
- rinfo->pm_reg = rinfo->pdev->pm_cap;
-
/* Enable/Disable dynamic clocks: TODO add sysfs access */
if (rinfo->family == CHIP_FAMILY_RS480)
rinfo->dynclk = -1;
@@ -2830,7 +2832,7 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
* reason. --BenH
*/
if (machine_is(powermac) && rinfo->of_node) {
- if (rinfo->is_mobility && rinfo->pm_reg &&
+ if (rinfo->is_mobility && rinfo->pdev->pm_cap &&
rinfo->family <= CHIP_FAMILY_RV250)
rinfo->pm_mode |= radeon_pm_d2;
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7351e66c7f5..cb846044f57 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -342,7 +342,6 @@ struct radeonfb_info {
int mtrr_hdl;
- int pm_reg;
u32 save_regs[100];
int asleep;
int lock_blank;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 22ad85242e5..372d4aea9d1 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -564,7 +564,7 @@ int au1100fb_drv_remove(struct platform_device *dev)
if (!dev)
return -ENODEV;
- fbdev = (struct au1100fb_device *) platform_get_drvdata(dev);
+ fbdev = platform_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
@@ -636,19 +636,7 @@ static struct platform_driver au1100fb_driver = {
.suspend = au1100fb_drv_suspend,
.resume = au1100fb_drv_resume,
};
-
-static int __init au1100fb_load(void)
-{
- return platform_driver_register(&au1100fb_driver);
-}
-
-static void __exit au1100fb_unload(void)
-{
- platform_driver_unregister(&au1100fb_driver);
-}
-
-module_init(au1100fb_load);
-module_exit(au1100fb_unload);
+module_platform_driver(au1100fb_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 1d02897d17f..4cfba78a145 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1853,21 +1853,7 @@ static struct platform_driver au1200fb_driver = {
.probe = au1200fb_drv_probe,
.remove = au1200fb_drv_remove,
};
-
-/*-------------------------------------------------------------------------*/
-
-static int __init au1200fb_init(void)
-{
- return platform_driver_register(&au1200fb_driver);
-}
-
-static void __exit au1200fb_cleanup(void)
-{
- platform_driver_unregister(&au1200fb_driver);
-}
-
-module_init(au1200fb_init);
-module_exit(au1200fb_cleanup);
+module_platform_driver(au1200fb_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 2cd63507ed7..7db5234462d 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -196,7 +196,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
static int pm860x_backlight_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_backlight_pdata *pdata = pdev->dev.platform_data;
+ struct pm860x_backlight_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm860x_backlight_data *data;
struct backlight_device *bl;
struct resource *res;
@@ -243,7 +243,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
- bl = backlight_device_register(name, &pdev->dev, data,
+ bl = devm_backlight_device_register(&pdev->dev, name, &pdev->dev, data,
&pm860x_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
@@ -256,21 +256,10 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
/* read current backlight */
ret = pm860x_backlight_get_brightness(bl);
if (ret < 0)
- goto out_brt;
+ return ret;
backlight_update_status(bl);
return 0;
-out_brt:
- backlight_device_unregister(bl);
- return ret;
-}
-
-static int pm860x_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
- return 0;
}
static struct platform_driver pm860x_backlight_driver = {
@@ -279,7 +268,6 @@ static struct platform_driver pm860x_backlight_driver = {
.owner = THIS_MODULE,
},
.probe = pm860x_backlight_probe,
- .remove = pm860x_backlight_remove,
};
module_platform_driver(pm860x_backlight_driver);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index d4a7a351d67..5a3eb2ecb52 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -368,12 +368,12 @@ config BACKLIGHT_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
backlight driver.
-config BACKLIGHT_LM3630
- tristate "Backlight Driver for LM3630"
+config BACKLIGHT_LM3630A
+ tristate "Backlight Driver for LM3630A"
depends on BACKLIGHT_CLASS_DEVICE && I2C
select REGMAP_I2C
help
- This supports TI LM3630 Backlight Driver
+ This supports TI LM3630A Backlight Driver
config BACKLIGHT_LM3639
tristate "Backlight Driver for LM3639"
@@ -388,8 +388,8 @@ config BACKLIGHT_LP855X
tristate "Backlight driver for TI LP855X"
depends on BACKLIGHT_CLASS_DEVICE && I2C
help
- This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
- backlight driver.
+ This supports TI LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
+ LP8557 backlight driver.
config BACKLIGHT_LP8788
tristate "Backlight driver for TI LP8788 MFD"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 38e1babb194..bb820024f34 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -37,7 +37,7 @@ obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index c6fc668d623..ee0c0a982e4 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -127,7 +127,7 @@ static const struct backlight_ops aat2870_bl_ops = {
static int aat2870_bl_probe(struct platform_device *pdev)
{
- struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
+ struct aat2870_bl_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct aat2870_bl_driver_data *aat2870_bl;
struct backlight_device *bd;
struct backlight_properties props;
@@ -158,8 +158,9 @@ static int aat2870_bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
- bd = backlight_device_register("aat2870-backlight", &pdev->dev,
- aat2870_bl, &aat2870_bl_ops, &props);
+ bd = devm_backlight_device_register(&pdev->dev, "aat2870-backlight",
+ &pdev->dev, aat2870_bl, &aat2870_bl_ops,
+ &props);
if (IS_ERR(bd)) {
dev_err(&pdev->dev,
"Failed allocate memory for backlight device\n");
@@ -194,13 +195,11 @@ static int aat2870_bl_probe(struct platform_device *pdev)
ret = aat2870_bl_update_status(bd);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to initialize\n");
- goto out_bl_dev_unregister;
+ return ret;
}
return 0;
-out_bl_dev_unregister:
- backlight_device_unregister(bd);
out:
return ret;
}
@@ -214,8 +213,6 @@ static int aat2870_bl_remove(struct platform_device *pdev)
bd->props.brightness = 0;
backlight_update_status(bd);
- backlight_device_unregister(bd);
-
return 0;
}
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index c84701b7ca6..f37097a261a 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -297,7 +297,7 @@ static int adp5520_bl_probe(struct platform_device *pdev)
return -ENOMEM;
data->master = pdev->dev.parent;
- data->pdata = pdev->dev.platform_data;
+ data->pdata = dev_get_platdata(&pdev->dev);
if (data->pdata == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
@@ -312,8 +312,9 @@ static int adp5520_bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = ADP5020_MAX_BRIGHTNESS;
- bl = backlight_device_register(pdev->name, data->master, data,
- &adp5520_bl_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+ data->master, data, &adp5520_bl_ops,
+ &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -326,7 +327,7 @@ static int adp5520_bl_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to register sysfs\n");
- backlight_device_unregister(bl);
+ return ret;
}
platform_set_drvdata(pdev, bl);
@@ -347,8 +348,6 @@ static int adp5520_bl_remove(struct platform_device *pdev)
sysfs_remove_group(&bl->dev.kobj,
&adp5520_bl_attr_group);
- backlight_device_unregister(bl);
-
return 0;
}
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 75b10f87612..9d656717d0f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -216,7 +216,7 @@ static int adp8860_led_setup(struct adp8860_led *led)
static int adp8860_led_probe(struct i2c_client *client)
{
struct adp8860_backlight_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct adp8860_bl *data = i2c_get_clientdata(client);
struct adp8860_led *led, *led_dat;
struct led_info *cur_led;
@@ -300,7 +300,7 @@ static int adp8860_led_probe(struct i2c_client *client)
static int adp8860_led_remove(struct i2c_client *client)
{
struct adp8860_backlight_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct adp8860_bl *data = i2c_get_clientdata(client);
int i;
@@ -658,7 +658,7 @@ static int adp8860_probe(struct i2c_client *client,
struct backlight_device *bl;
struct adp8860_bl *data;
struct adp8860_backlight_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct backlight_properties props;
uint8_t reg_val;
int ret;
@@ -711,8 +711,9 @@ static int adp8860_probe(struct i2c_client *client,
mutex_init(&data->lock);
- bl = backlight_device_register(dev_driver_string(&client->dev),
- &client->dev, data, &adp8860_bl_ops, &props);
+ bl = devm_backlight_device_register(&client->dev,
+ dev_driver_string(&client->dev),
+ &client->dev, data, &adp8860_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -728,7 +729,7 @@ static int adp8860_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "failed to register sysfs\n");
- goto out1;
+ return ret;
}
ret = adp8860_bl_setup(bl);
@@ -751,8 +752,6 @@ out:
if (data->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8860_bl_attr_group);
-out1:
- backlight_device_unregister(bl);
return ret;
}
@@ -770,8 +769,6 @@ static int adp8860_remove(struct i2c_client *client)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8860_bl_attr_group);
- backlight_device_unregister(data->bl);
-
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 90049d7b5c6..63707205326 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -238,7 +238,7 @@ static int adp8870_led_setup(struct adp8870_led *led)
static int adp8870_led_probe(struct i2c_client *client)
{
struct adp8870_backlight_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct adp8870_bl *data = i2c_get_clientdata(client);
struct adp8870_led *led, *led_dat;
struct led_info *cur_led;
@@ -325,7 +325,7 @@ static int adp8870_led_probe(struct i2c_client *client)
static int adp8870_led_remove(struct i2c_client *client)
{
struct adp8870_backlight_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct adp8870_bl *data = i2c_get_clientdata(client);
int i;
@@ -848,7 +848,7 @@ static int adp8870_probe(struct i2c_client *client,
struct backlight_device *bl;
struct adp8870_bl *data;
struct adp8870_backlight_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
uint8_t reg_val;
int ret;
@@ -888,8 +888,9 @@ static int adp8870_probe(struct i2c_client *client,
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
- bl = backlight_device_register(dev_driver_string(&client->dev),
- &client->dev, data, &adp8870_bl_ops, &props);
+ bl = devm_backlight_device_register(&client->dev,
+ dev_driver_string(&client->dev),
+ &client->dev, data, &adp8870_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -902,7 +903,7 @@ static int adp8870_probe(struct i2c_client *client,
&adp8870_bl_attr_group);
if (ret) {
dev_err(&client->dev, "failed to register sysfs\n");
- goto out1;
+ return ret;
}
}
@@ -925,8 +926,6 @@ out:
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8870_bl_attr_group);
-out1:
- backlight_device_unregister(bl);
return ret;
}
@@ -944,8 +943,6 @@ static int adp8870_remove(struct i2c_client *client)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8870_bl_attr_group);
- backlight_device_unregister(data->bl);
-
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 319fef6cb42..d8952c4aa68 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -471,14 +471,14 @@ static int ams369fg06_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->dev = &spi->dev;
- lcd->lcd_pd = spi->dev.platform_data;
+ lcd->lcd_pd = dev_get_platdata(&spi->dev);
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
return -EINVAL;
}
- ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
- &ams369fg06_lcd_ops);
+ ld = devm_lcd_device_register(&spi->dev, "ams369fg06", &spi->dev, lcd,
+ &ams369fg06_lcd_ops);
if (IS_ERR(ld))
return PTR_ERR(ld);
@@ -488,12 +488,11 @@ static int ams369fg06_probe(struct spi_device *spi)
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
- bd = backlight_device_register("ams369fg06-bl", &spi->dev, lcd,
- &ams369fg06_backlight_ops, &props);
- if (IS_ERR(bd)) {
- ret = PTR_ERR(bd);
- goto out_lcd_unregister;
- }
+ bd = devm_backlight_device_register(&spi->dev, "ams369fg06-bl",
+ &spi->dev, lcd,
+ &ams369fg06_backlight_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
bd->props.brightness = DEFAULT_BRIGHTNESS;
lcd->bd = bd;
@@ -516,10 +515,6 @@ static int ams369fg06_probe(struct spi_device *spi)
dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n");
return 0;
-
-out_lcd_unregister:
- lcd_device_unregister(ld);
- return ret;
}
static int ams369fg06_remove(struct spi_device *spi)
@@ -527,9 +522,6 @@ static int ams369fg06_remove(struct spi_device *spi)
struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
- backlight_device_unregister(lcd->bd);
- lcd_device_unregister(lcd->ld);
-
return 0;
}
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 123887cd76b..bb1fc45b754 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -240,7 +240,8 @@ static int as3711_bl_register(struct platform_device *pdev,
/* max tuning I = 31uA for voltage- and 38250uA for current-feedback */
props.max_brightness = max_brightness;
- bl = backlight_device_register(su->type == AS3711_BL_SU1 ?
+ bl = devm_backlight_device_register(&pdev->dev,
+ su->type == AS3711_BL_SU1 ?
"as3711-su1" : "as3711-su2",
&pdev->dev, su,
&as3711_bl_ops, &props);
@@ -432,8 +433,7 @@ static int as3711_backlight_probe(struct platform_device *pdev)
case AS3711_SU2_LX_SD4:
break;
default:
- ret = -EINVAL;
- goto esu2;
+ return -EINVAL;
}
switch (pdata->su2_feedback) {
@@ -447,8 +447,7 @@ static int as3711_backlight_probe(struct platform_device *pdev)
max_brightness = min(pdata->su2_max_uA / 150, 255);
break;
default:
- ret = -EINVAL;
- goto esu2;
+ return -EINVAL;
}
ret = as3711_bl_init_su2(supply);
@@ -457,26 +456,12 @@ static int as3711_backlight_probe(struct platform_device *pdev)
ret = as3711_bl_register(pdev, max_brightness, su);
if (ret < 0)
- goto esu2;
+ return ret;
}
platform_set_drvdata(pdev, supply);
return 0;
-
-esu2:
- backlight_device_unregister(supply->su1.bl);
- return ret;
-}
-
-static int as3711_backlight_remove(struct platform_device *pdev)
-{
- struct as3711_bl_supply *supply = platform_get_drvdata(pdev);
-
- backlight_device_unregister(supply->su1.bl);
- backlight_device_unregister(supply->su2.bl);
-
- return 0;
}
static struct platform_driver as3711_backlight_driver = {
@@ -485,7 +470,6 @@ static struct platform_driver as3711_backlight_driver = {
.owner = THIS_MODULE,
},
.probe = as3711_backlight_probe,
- .remove = as3711_backlight_remove,
};
module_platform_driver(as3711_backlight_driver);
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 0393d827dd4..261b1a4ec3d 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/fb.h>
-#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/backlight.h>
#include <linux/atmel_pwm.h>
@@ -27,6 +26,14 @@ struct atmel_pwm_bl {
int gpio_on;
};
+static void atmel_pwm_bl_set_gpio_on(struct atmel_pwm_bl *pwmbl, int on)
+{
+ if (!gpio_is_valid(pwmbl->gpio_on))
+ return;
+
+ gpio_set_value(pwmbl->gpio_on, on ^ pwmbl->pdata->on_active_low);
+}
+
static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
{
struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
@@ -49,19 +56,13 @@ static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
pwm_duty = pwmbl->pdata->pwm_duty_min;
if (!intensity) {
- if (pwmbl->gpio_on != -1) {
- gpio_set_value(pwmbl->gpio_on,
- 0 ^ pwmbl->pdata->on_active_low);
- }
+ atmel_pwm_bl_set_gpio_on(pwmbl, 0);
pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
pwm_channel_disable(&pwmbl->pwmc);
} else {
pwm_channel_enable(&pwmbl->pwmc);
pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
- if (pwmbl->gpio_on != -1) {
- gpio_set_value(pwmbl->gpio_on,
- 1 ^ pwmbl->pdata->on_active_low);
- }
+ atmel_pwm_bl_set_gpio_on(pwmbl, 1);
}
return 0;
@@ -70,17 +71,16 @@ static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
{
struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
- u8 intensity;
+ u32 cdty;
+ u32 intensity;
- if (pwmbl->pdata->pwm_active_low) {
- intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
- pwmbl->pdata->pwm_duty_min;
- } else {
- intensity = pwmbl->pdata->pwm_duty_max -
- pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
- }
+ cdty = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
+ if (pwmbl->pdata->pwm_active_low)
+ intensity = cdty - pwmbl->pdata->pwm_duty_min;
+ else
+ intensity = pwmbl->pdata->pwm_duty_max - cdty;
- return intensity;
+ return intensity & 0xffff;
}
static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
@@ -118,52 +118,46 @@ static const struct backlight_ops atmel_pwm_bl_ops = {
.update_status = atmel_pwm_bl_set_intensity,
};
-static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
+static int atmel_pwm_bl_probe(struct platform_device *pdev)
{
struct backlight_properties props;
const struct atmel_pwm_bl_platform_data *pdata;
struct backlight_device *bldev;
struct atmel_pwm_bl *pwmbl;
+ unsigned long flags;
int retval;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata)
+ return -ENODEV;
+
+ if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
+ pdata->pwm_duty_min > pdata->pwm_duty_max ||
+ pdata->pwm_frequency == 0)
+ return -EINVAL;
+
pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl),
GFP_KERNEL);
if (!pwmbl)
return -ENOMEM;
pwmbl->pdev = pdev;
-
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- retval = -ENODEV;
- goto err_free_mem;
- }
-
- if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
- pdata->pwm_duty_min > pdata->pwm_duty_max ||
- pdata->pwm_frequency == 0) {
- retval = -EINVAL;
- goto err_free_mem;
- }
-
pwmbl->pdata = pdata;
pwmbl->gpio_on = pdata->gpio_on;
retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc);
if (retval)
- goto err_free_mem;
-
- if (pwmbl->gpio_on != -1) {
- retval = devm_gpio_request(&pdev->dev, pwmbl->gpio_on,
- "gpio_atmel_pwm_bl");
- if (retval) {
- pwmbl->gpio_on = -1;
- goto err_free_pwm;
- }
+ return retval;
+ if (gpio_is_valid(pwmbl->gpio_on)) {
/* Turn display off by default. */
- retval = gpio_direction_output(pwmbl->gpio_on,
- 0 ^ pdata->on_active_low);
+ if (pdata->on_active_low)
+ flags = GPIOF_OUT_INIT_HIGH;
+ else
+ flags = GPIOF_OUT_INIT_LOW;
+
+ retval = devm_gpio_request_one(&pdev->dev, pwmbl->gpio_on,
+ flags, "gpio_atmel_pwm_bl");
if (retval)
goto err_free_pwm;
}
@@ -171,8 +165,9 @@ static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
- bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
- &atmel_pwm_bl_ops, &props);
+ bldev = devm_backlight_device_register(&pdev->dev, "atmel-pwm-bl",
+ &pdev->dev, pwmbl, &atmel_pwm_bl_ops,
+ &props);
if (IS_ERR(bldev)) {
retval = PTR_ERR(bldev);
goto err_free_pwm;
@@ -188,29 +183,25 @@ static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
retval = atmel_pwm_bl_init_pwm(pwmbl);
if (retval)
- goto err_free_bl_dev;
+ goto err_free_pwm;
atmel_pwm_bl_set_intensity(bldev);
return 0;
-err_free_bl_dev:
- backlight_device_unregister(bldev);
err_free_pwm:
pwm_channel_free(&pwmbl->pwmc);
-err_free_mem:
+
return retval;
}
-static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
+static int atmel_pwm_bl_remove(struct platform_device *pdev)
{
struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
- if (pwmbl->gpio_on != -1)
- gpio_set_value(pwmbl->gpio_on, 0);
+ atmel_pwm_bl_set_gpio_on(pwmbl, 0);
pwm_channel_disable(&pwmbl->pwmc);
pwm_channel_free(&pwmbl->pwmc);
- backlight_device_unregister(pwmbl->bldev);
return 0;
}
@@ -220,11 +211,13 @@ static struct platform_driver atmel_pwm_bl_driver = {
.name = "atmel-pwm-bl",
},
/* REVISIT add suspend() and resume() */
- .remove = __exit_p(atmel_pwm_bl_remove),
+ .probe = atmel_pwm_bl_probe,
+ .remove = atmel_pwm_bl_remove,
};
-module_platform_driver_probe(atmel_pwm_bl_driver, atmel_pwm_bl_probe);
+module_platform_driver(atmel_pwm_bl_driver);
MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
MODULE_DESCRIPTION("Atmel PWM backlight driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel-pwm-bl");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 94a403a9717..5d05555fe84 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -21,6 +21,9 @@
#include <asm/backlight.h>
#endif
+static struct list_head backlight_dev_list;
+static struct mutex backlight_dev_list_mutex;
+
static const char *const backlight_types[] = {
[BACKLIGHT_RAW] = "raw",
[BACKLIGHT_PLATFORM] = "platform",
@@ -349,10 +352,32 @@ struct backlight_device *backlight_device_register(const char *name,
mutex_unlock(&pmac_backlight_mutex);
#endif
+ mutex_lock(&backlight_dev_list_mutex);
+ list_add(&new_bd->entry, &backlight_dev_list);
+ mutex_unlock(&backlight_dev_list_mutex);
+
return new_bd;
}
EXPORT_SYMBOL(backlight_device_register);
+bool backlight_device_registered(enum backlight_type type)
+{
+ bool found = false;
+ struct backlight_device *bd;
+
+ mutex_lock(&backlight_dev_list_mutex);
+ list_for_each_entry(bd, &backlight_dev_list, entry) {
+ if (bd->props.type == type) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&backlight_dev_list_mutex);
+
+ return found;
+}
+EXPORT_SYMBOL(backlight_device_registered);
+
/**
* backlight_device_unregister - unregisters a backlight device object.
* @bd: the backlight device object to be unregistered and freed.
@@ -364,6 +389,10 @@ void backlight_device_unregister(struct backlight_device *bd)
if (!bd)
return;
+ mutex_lock(&backlight_dev_list_mutex);
+ list_del(&bd->entry);
+ mutex_unlock(&backlight_dev_list_mutex);
+
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight == bd)
@@ -499,6 +528,8 @@ static int __init backlight_class_init(void)
backlight_class->dev_groups = bl_device_groups;
backlight_class->pm = &backlight_class_dev_pm_ops;
+ INIT_LIST_HEAD(&backlight_dev_list);
+ mutex_init(&backlight_dev_list_mutex);
return 0;
}
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 15e3294b29f..16dd9bc625b 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -128,7 +128,7 @@ static const struct backlight_ops bd6107_backlight_ops = {
static int bd6107_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct bd6107_platform_data *pdata = client->dev.platform_data;
+ struct bd6107_platform_data *pdata = dev_get_platdata(&client->dev);
struct backlight_device *backlight;
struct backlight_properties props;
struct bd6107 *bd;
@@ -166,7 +166,8 @@ static int bd6107_probe(struct i2c_client *client,
props.brightness = clamp_t(unsigned int, pdata->def_value, 0,
props.max_brightness);
- backlight = backlight_device_register(dev_name(&client->dev),
+ backlight = devm_backlight_device_register(&client->dev,
+ dev_name(&client->dev),
&bd->client->dev, bd,
&bd6107_backlight_ops, &props);
if (IS_ERR(backlight)) {
@@ -186,7 +187,6 @@ static int bd6107_remove(struct i2c_client *client)
backlight->props.brightness = 0;
backlight_update_status(backlight);
- backlight_device_unregister(backlight);
return 0;
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index c97867a717a..db8db5fa658 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -533,7 +533,7 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
static int corgi_lcd_probe(struct spi_device *spi)
{
struct backlight_properties props;
- struct corgi_lcd_platform_data *pdata = spi->dev.platform_data;
+ struct corgi_lcd_platform_data *pdata = dev_get_platdata(&spi->dev);
struct corgi_lcd *lcd;
int ret = 0;
@@ -550,8 +550,8 @@ static int corgi_lcd_probe(struct spi_device *spi)
lcd->spi_dev = spi;
- lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
- lcd, &corgi_lcd_ops);
+ lcd->lcd_dev = devm_lcd_device_register(&spi->dev, "corgi_lcd",
+ &spi->dev, lcd, &corgi_lcd_ops);
if (IS_ERR(lcd->lcd_dev))
return PTR_ERR(lcd->lcd_dev);
@@ -561,18 +561,18 @@ static int corgi_lcd_probe(struct spi_device *spi)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = pdata->max_intensity;
- lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
- &corgi_bl_ops, &props);
- if (IS_ERR(lcd->bl_dev)) {
- ret = PTR_ERR(lcd->bl_dev);
- goto err_unregister_lcd;
- }
+ lcd->bl_dev = devm_backlight_device_register(&spi->dev, "corgi_bl",
+ &spi->dev, lcd, &corgi_bl_ops,
+ &props);
+ if (IS_ERR(lcd->bl_dev))
+ return PTR_ERR(lcd->bl_dev);
+
lcd->bl_dev->props.brightness = pdata->default_intensity;
lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
ret = setup_gpio_backlight(lcd, pdata);
if (ret)
- goto err_unregister_bl;
+ return ret;
lcd->kick_battery = pdata->kick_battery;
@@ -583,12 +583,6 @@ static int corgi_lcd_probe(struct spi_device *spi)
lcd->limit_mask = pdata->limit_mask;
the_corgi_lcd = lcd;
return 0;
-
-err_unregister_bl:
- backlight_device_unregister(lcd->bl_dev);
-err_unregister_lcd:
- lcd_device_unregister(lcd->lcd_dev);
- return ret;
}
static int corgi_lcd_remove(struct spi_device *spi)
@@ -598,11 +592,7 @@ static int corgi_lcd_remove(struct spi_device *spi)
lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
lcd->bl_dev->props.brightness = 0;
backlight_update_status(lcd->bl_dev);
- backlight_device_unregister(lcd->bl_dev);
-
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
- lcd_device_unregister(lcd->lcd_dev);
-
return 0;
}
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 37bae801e23..f3fed9ef745 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -195,16 +195,17 @@ static int cr_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
- bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
- &cr_backlight_ops, &props);
+ bdp = devm_backlight_device_register(&pdev->dev, "cr-backlight",
+ &pdev->dev, NULL, &cr_backlight_ops,
+ &props);
if (IS_ERR(bdp)) {
pci_dev_put(lpc_dev);
return PTR_ERR(bdp);
}
- ldp = lcd_device_register("cr-lcd", &pdev->dev, NULL, &cr_lcd_ops);
+ ldp = devm_lcd_device_register(&pdev->dev, "cr-lcd", &pdev->dev, NULL,
+ &cr_lcd_ops);
if (IS_ERR(ldp)) {
- backlight_device_unregister(bdp);
pci_dev_put(lpc_dev);
return PTR_ERR(ldp);
}
@@ -215,8 +216,6 @@ static int cr_backlight_probe(struct platform_device *pdev)
crp = devm_kzalloc(&pdev->dev, sizeof(*crp), GFP_KERNEL);
if (!crp) {
- lcd_device_unregister(ldp);
- backlight_device_unregister(bdp);
pci_dev_put(lpc_dev);
return -ENOMEM;
}
@@ -241,8 +240,6 @@ static int cr_backlight_remove(struct platform_device *pdev)
crp->cr_backlight_device->props.max_brightness = 0;
cr_backlight_set_intensity(crp->cr_backlight_device);
cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_POWERDOWN);
- backlight_device_unregister(crp->cr_backlight_device);
- lcd_device_unregister(crp->cr_lcd_device);
pci_dev_put(lpc_dev);
return 0;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 67cadd30e27..12c5d840c59 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -109,7 +109,7 @@ static const struct backlight_ops da903x_backlight_ops = {
static int da903x_backlight_probe(struct platform_device *pdev)
{
- struct da9034_backlight_pdata *pdata = pdev->dev.platform_data;
+ struct da9034_backlight_pdata *pdata = dev_get_platdata(&pdev->dev);
struct da903x_backlight_data *data;
struct backlight_device *bl;
struct backlight_properties props;
@@ -144,8 +144,9 @@ static int da903x_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
- bl = backlight_device_register(pdev->name, data->da903x_dev, data,
- &da903x_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+ data->da903x_dev, data,
+ &da903x_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -158,21 +159,12 @@ static int da903x_backlight_probe(struct platform_device *pdev)
return 0;
}
-static int da903x_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
- return 0;
-}
-
static struct platform_driver da903x_backlight_driver = {
.driver = {
.name = "da903x-backlight",
.owner = THIS_MODULE,
},
.probe = da903x_backlight_probe,
- .remove = da903x_backlight_remove,
};
module_platform_driver(da903x_backlight_driver);
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 842da5a3ac4..20d55becaa7 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -125,8 +125,9 @@ static int da9052_backlight_probe(struct platform_device *pdev)
props.type = BACKLIGHT_RAW;
props.max_brightness = DA9052_MAX_BRIGHTNESS;
- bl = backlight_device_register(pdev->name, wleds->da9052->dev, wleds,
- &da9052_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+ wleds->da9052->dev, wleds,
+ &da9052_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "Failed to register backlight\n");
return PTR_ERR(bl);
@@ -147,7 +148,6 @@ static int da9052_backlight_remove(struct platform_device *pdev)
wleds->brightness = 0;
wleds->state = DA9052_WLEDS_OFF;
da9052_adjust_wled_brightness(wleds);
- backlight_device_unregister(bl);
return 0;
}
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 018368ba412..0d1f633c648 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -92,8 +92,8 @@ static int ep93xxbl_probe(struct platform_device *dev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = EP93XX_MAX_BRIGHT;
- bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
- &ep93xxbl_ops, &props);
+ bl = devm_backlight_device_register(&dev->dev, dev->name, &dev->dev,
+ ep93xxbl, &ep93xxbl_ops, &props);
if (IS_ERR(bl))
return PTR_ERR(bl);
@@ -106,14 +106,6 @@ static int ep93xxbl_probe(struct platform_device *dev)
return 0;
}
-static int ep93xxbl_remove(struct platform_device *dev)
-{
- struct backlight_device *bl = platform_get_drvdata(dev);
-
- backlight_device_unregister(bl);
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int ep93xxbl_suspend(struct device *dev)
{
@@ -140,7 +132,6 @@ static struct platform_driver ep93xxbl_driver = {
.pm = &ep93xxbl_pm_ops,
},
.probe = ep93xxbl_probe,
- .remove = ep93xxbl_remove,
};
module_platform_driver(ep93xxbl_driver);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 19e393b4143..5d8d65200db 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -79,7 +79,7 @@ static const struct backlight_ops genericbl_ops = {
static int genericbl_probe(struct platform_device *pdev)
{
struct backlight_properties props;
- struct generic_bl_info *machinfo = pdev->dev.platform_data;
+ struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev);
const char *name = "generic-bl";
struct backlight_device *bd;
@@ -93,8 +93,8 @@ static int genericbl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = machinfo->max_intensity;
- bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
- &props);
+ bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev,
+ NULL, &genericbl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -118,8 +118,6 @@ static int genericbl_remove(struct platform_device *pdev)
bd->props.brightness = 0;
backlight_update_status(bd);
- backlight_device_unregister(bd);
-
dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n");
return 0;
}
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 5fa217f9f44..81fb12770c2 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -62,7 +62,8 @@ static const struct backlight_ops gpio_backlight_ops = {
static int gpio_backlight_probe(struct platform_device *pdev)
{
- struct gpio_backlight_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_backlight_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct backlight_properties props;
struct backlight_device *bl;
struct gpio_backlight *gbl;
@@ -94,8 +95,9 @@ static int gpio_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = 1;
- bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, gbl,
- &gpio_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
+ &pdev->dev, gbl, &gpio_backlight_ops,
+ &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -108,21 +110,12 @@ static int gpio_backlight_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
- return 0;
-}
-
static struct platform_driver gpio_backlight_driver = {
.driver = {
.name = "gpio-backlight",
.owner = THIS_MODULE,
},
.probe = gpio_backlight_probe,
- .remove = gpio_backlight_remove,
};
module_platform_driver(gpio_backlight_driver);
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index c7af8c45ab8..985e854e244 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -648,7 +648,8 @@ static int hx8357_probe(struct spi_device *spi)
lcd->use_im_pins = 0;
}
- lcdev = lcd_device_register("mxsfb", &spi->dev, lcd, &hx8357_ops);
+ lcdev = devm_lcd_device_register(&spi->dev, "mxsfb", &spi->dev, lcd,
+ &hx8357_ops);
if (IS_ERR(lcdev)) {
ret = PTR_ERR(lcdev);
return ret;
@@ -660,32 +661,19 @@ static int hx8357_probe(struct spi_device *spi)
ret = ((int (*)(struct lcd_device *))match->data)(lcdev);
if (ret) {
dev_err(&spi->dev, "Couldn't initialize panel\n");
- goto init_error;
+ return ret;
}
dev_info(&spi->dev, "Panel probed\n");
return 0;
-
-init_error:
- lcd_device_unregister(lcdev);
- return ret;
-}
-
-static int hx8357_remove(struct spi_device *spi)
-{
- struct lcd_device *lcdev = spi_get_drvdata(spi);
-
- lcd_device_unregister(lcdev);
- return 0;
}
static struct spi_driver hx8357_driver = {
.probe = hx8357_probe,
- .remove = hx8357_remove,
.driver = {
.name = "hx8357",
- .of_match_table = of_match_ptr(hx8357_dt_ids),
+ .of_match_table = hx8357_dt_ids,
},
};
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index d9f65c2d9b0..73464e4b4c7 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -513,8 +513,8 @@ static int ili922x_probe(struct spi_device *spi)
ili->power = FB_BLANK_POWERDOWN;
- lcd = lcd_device_register("ili922xlcd", &spi->dev, ili,
- &ili922x_ops);
+ lcd = devm_lcd_device_register(&spi->dev, "ili922xlcd", &spi->dev, ili,
+ &ili922x_ops);
if (IS_ERR(lcd)) {
dev_err(&spi->dev, "cannot register LCD\n");
return PTR_ERR(lcd);
@@ -530,10 +530,7 @@ static int ili922x_probe(struct spi_device *spi)
static int ili922x_remove(struct spi_device *spi)
{
- struct ili922x *ili = spi_get_drvdata(spi);
-
ili922x_poweroff(spi);
- lcd_device_unregister(ili->ld);
return 0;
}
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index f8be90c5ded..e2b8b40a9bd 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -198,7 +198,7 @@ static void ili9320_setup_spi(struct ili9320 *ili,
int ili9320_probe_spi(struct spi_device *spi,
struct ili9320_client *client)
{
- struct ili9320_platdata *cfg = spi->dev.platform_data;
+ struct ili9320_platdata *cfg = dev_get_platdata(&spi->dev);
struct device *dev = &spi->dev;
struct ili9320 *ili;
struct lcd_device *lcd;
@@ -235,7 +235,8 @@ int ili9320_probe_spi(struct spi_device *spi,
ili9320_setup_spi(ili, spi);
- lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
+ lcd = devm_lcd_device_register(&spi->dev, "ili9320", dev, ili,
+ &ili9320_ops);
if (IS_ERR(lcd)) {
dev_err(dev, "failed to register lcd device\n");
return PTR_ERR(lcd);
@@ -248,24 +249,16 @@ int ili9320_probe_spi(struct spi_device *spi,
ret = ili9320_power(ili, FB_BLANK_UNBLANK);
if (ret != 0) {
dev_err(dev, "failed to set lcd power state\n");
- goto err_unregister;
+ return ret;
}
return 0;
-
- err_unregister:
- lcd_device_unregister(lcd);
-
- return ret;
}
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
int ili9320_remove(struct ili9320 *ili)
{
ili9320_power(ili, FB_BLANK_POWERDOWN);
-
- lcd_device_unregister(ili->lcd);
-
return 0;
}
EXPORT_SYMBOL_GPL(ili9320_remove);
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index bca6ccc74df..7592cc25c96 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -141,7 +141,7 @@ static const struct backlight_ops kb3886bl_ops = {
static int kb3886bl_probe(struct platform_device *pdev)
{
struct backlight_properties props;
- struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data;
+ struct kb3886bl_machinfo *machinfo = dev_get_platdata(&pdev->dev);
bl_machinfo = machinfo;
if (!machinfo->limit_mask)
@@ -150,10 +150,10 @@ static int kb3886bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = machinfo->max_intensity;
- kb3886_backlight_device = backlight_device_register("kb3886-bl",
- &pdev->dev, NULL,
- &kb3886bl_ops,
- &props);
+ kb3886_backlight_device = devm_backlight_device_register(&pdev->dev,
+ "kb3886-bl", &pdev->dev,
+ NULL, &kb3886bl_ops,
+ &props);
if (IS_ERR(kb3886_backlight_device))
return PTR_ERR(kb3886_backlight_device);
@@ -166,18 +166,8 @@ static int kb3886bl_probe(struct platform_device *pdev)
return 0;
}
-static int kb3886bl_remove(struct platform_device *pdev)
-{
- struct backlight_device *bd = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bd);
-
- return 0;
-}
-
static struct platform_driver kb3886bl_driver = {
.probe = kb3886bl_probe,
- .remove = kb3886bl_remove,
.driver = {
.name = "kb3886-bl",
.pm = &kb3886bl_pm_ops,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index a35a38c709c..b5fc13bc24e 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -48,7 +48,7 @@ static void l4f00242t03_reset(unsigned int gpio)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
- struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+ struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
int ret;
@@ -88,7 +88,7 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
- struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+ struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "Powering down LCD\n");
@@ -171,7 +171,7 @@ static struct lcd_ops l4f_ops = {
static int l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
- struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+ struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
int ret;
if (pdata == NULL) {
@@ -244,7 +244,6 @@ static int l4f00242t03_remove(struct spi_device *spi)
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 1e0a3093ce5..506a6c23603 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -702,7 +702,7 @@ static int ld9040_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->dev = &spi->dev;
- lcd->lcd_pd = spi->dev.platform_data;
+ lcd->lcd_pd = dev_get_platdata(&spi->dev);
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
return -EINVAL;
@@ -716,7 +716,8 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
+ ld = devm_lcd_device_register(&spi->dev, "ld9040", &spi->dev, lcd,
+ &ld9040_lcd_ops);
if (IS_ERR(ld))
return PTR_ERR(ld);
@@ -726,12 +727,10 @@ static int ld9040_probe(struct spi_device *spi)
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
- bd = backlight_device_register("ld9040-bl", &spi->dev,
- lcd, &ld9040_backlight_ops, &props);
- if (IS_ERR(bd)) {
- ret = PTR_ERR(bd);
- goto out_unregister_lcd;
- }
+ bd = devm_backlight_device_register(&spi->dev, "ld9040-bl", &spi->dev,
+ lcd, &ld9040_backlight_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
bd->props.brightness = MAX_BRIGHTNESS;
lcd->bd = bd;
@@ -757,11 +756,6 @@ static int ld9040_probe(struct spi_device *spi)
dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
return 0;
-
-out_unregister_lcd:
- lcd_device_unregister(lcd->ld);
-
- return ret;
}
static int ld9040_remove(struct spi_device *spi)
@@ -769,9 +763,6 @@ static int ld9040_remove(struct spi_device *spi)
struct ld9040 *lcd = spi_get_drvdata(spi);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
- backlight_device_unregister(lcd->bd);
- lcd_device_unregister(lcd->ld);
-
return 0;
}
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
index 038d9c86ec0..c5e586d9738 100644
--- a/drivers/video/backlight/ld9040_gamma.h
+++ b/drivers/video/backlight/ld9040_gamma.h
@@ -169,7 +169,9 @@ static const unsigned int ld9040_22_50[] = {
struct ld9040_gamma {
unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
-} gamma_table = {
+};
+
+static struct ld9040_gamma gamma_table = {
.gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
.gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
.gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index 1d1dbfb789e..187d1c283c1 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -284,7 +284,7 @@ static int lm3533_bl_probe(struct platform_device *pdev)
if (!lm3533)
return -EINVAL;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
@@ -313,8 +313,9 @@ static int lm3533_bl_probe(struct platform_device *pdev)
props.type = BACKLIGHT_RAW;
props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
props.brightness = pdata->default_brightness;
- bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
- &lm3533_bl_ops, &props);
+ bd = devm_backlight_device_register(&pdev->dev, pdata->name,
+ pdev->dev.parent, bl, &lm3533_bl_ops,
+ &props);
if (IS_ERR(bd)) {
dev_err(&pdev->dev, "failed to register backlight device\n");
return PTR_ERR(bd);
@@ -328,7 +329,7 @@ static int lm3533_bl_probe(struct platform_device *pdev)
ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
if (ret < 0) {
dev_err(&pdev->dev, "failed to create sysfs attributes\n");
- goto err_unregister;
+ return ret;
}
backlight_update_status(bd);
@@ -345,8 +346,6 @@ static int lm3533_bl_probe(struct platform_device *pdev)
err_sysfs_remove:
sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
-err_unregister:
- backlight_device_unregister(bd);
return ret;
}
@@ -363,7 +362,6 @@ static int lm3533_bl_remove(struct platform_device *pdev)
lm3533_ctrlbank_disable(&bl->cb);
sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
- backlight_device_unregister(bd);
return 0;
}
diff --git a/drivers/video/backlight/lm3630_bl.c b/drivers/video/backlight/lm3630_bl.c
deleted file mode 100644
index 76a62e978fc..00000000000
--- a/drivers/video/backlight/lm3630_bl.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
-* Simple driver for Texas Instruments LM3630 Backlight driver chip
-* Copyright (C) 2012 Texas Instruments
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 as
-* published by the Free Software Foundation.
-*
-*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/backlight.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/regmap.h>
-#include <linux/platform_data/lm3630_bl.h>
-
-#define REG_CTRL 0x00
-#define REG_CONFIG 0x01
-#define REG_BRT_A 0x03
-#define REG_BRT_B 0x04
-#define REG_INT_STATUS 0x09
-#define REG_INT_EN 0x0A
-#define REG_FAULT 0x0B
-#define REG_PWM_OUTLOW 0x12
-#define REG_PWM_OUTHIGH 0x13
-#define REG_MAX 0x1F
-
-#define INT_DEBOUNCE_MSEC 10
-
-enum lm3630_leds {
- BLED_ALL = 0,
- BLED_1,
- BLED_2
-};
-
-static const char * const bled_name[] = {
- [BLED_ALL] = "lm3630_bled", /*Bank1 controls all string */
- [BLED_1] = "lm3630_bled1", /*Bank1 controls bled1 */
- [BLED_2] = "lm3630_bled2", /*Bank1 or 2 controls bled2 */
-};
-
-struct lm3630_chip_data {
- struct device *dev;
- struct delayed_work work;
- int irq;
- struct workqueue_struct *irqthread;
- struct lm3630_platform_data *pdata;
- struct backlight_device *bled1;
- struct backlight_device *bled2;
- struct regmap *regmap;
-};
-
-/* initialize chip */
-static int lm3630_chip_init(struct lm3630_chip_data *pchip)
-{
- int ret;
- unsigned int reg_val;
- struct lm3630_platform_data *pdata = pchip->pdata;
-
- /*pwm control */
- reg_val = ((pdata->pwm_active & 0x01) << 2) | (pdata->pwm_ctrl & 0x03);
- ret = regmap_update_bits(pchip->regmap, REG_CONFIG, 0x07, reg_val);
- if (ret < 0)
- goto out;
-
- /* bank control */
- reg_val = ((pdata->bank_b_ctrl & 0x01) << 1) |
- (pdata->bank_a_ctrl & 0x07);
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x07, reg_val);
- if (ret < 0)
- goto out;
-
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
- if (ret < 0)
- goto out;
-
- /* set initial brightness */
- if (pdata->bank_a_ctrl != BANK_A_CTRL_DISABLE) {
- ret = regmap_write(pchip->regmap,
- REG_BRT_A, pdata->init_brt_led1);
- if (ret < 0)
- goto out;
- }
-
- if (pdata->bank_b_ctrl != BANK_B_CTRL_DISABLE) {
- ret = regmap_write(pchip->regmap,
- REG_BRT_B, pdata->init_brt_led2);
- if (ret < 0)
- goto out;
- }
- return ret;
-
-out:
- dev_err(pchip->dev, "i2c failed to access register\n");
- return ret;
-}
-
-/* interrupt handling */
-static void lm3630_delayed_func(struct work_struct *work)
-{
- int ret;
- unsigned int reg_val;
- struct lm3630_chip_data *pchip;
-
- pchip = container_of(work, struct lm3630_chip_data, work.work);
-
- ret = regmap_read(pchip->regmap, REG_INT_STATUS, &reg_val);
- if (ret < 0) {
- dev_err(pchip->dev,
- "i2c failed to access REG_INT_STATUS Register\n");
- return;
- }
-
- dev_info(pchip->dev, "REG_INT_STATUS Register is 0x%x\n", reg_val);
-}
-
-static irqreturn_t lm3630_isr_func(int irq, void *chip)
-{
- int ret;
- struct lm3630_chip_data *pchip = chip;
- unsigned long delay = msecs_to_jiffies(INT_DEBOUNCE_MSEC);
-
- queue_delayed_work(pchip->irqthread, &pchip->work, delay);
-
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
- if (ret < 0)
- goto out;
-
- return IRQ_HANDLED;
-out:
- dev_err(pchip->dev, "i2c failed to access register\n");
- return IRQ_HANDLED;
-}
-
-static int lm3630_intr_config(struct lm3630_chip_data *pchip)
-{
- INIT_DELAYED_WORK(&pchip->work, lm3630_delayed_func);
- pchip->irqthread = create_singlethread_workqueue("lm3630-irqthd");
- if (!pchip->irqthread) {
- dev_err(pchip->dev, "create irq thread fail...\n");
- return -1;
- }
- if (request_threaded_irq
- (pchip->irq, NULL, lm3630_isr_func,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lm3630_irq", pchip)) {
- dev_err(pchip->dev, "request threaded irq fail..\n");
- return -1;
- }
- return 0;
-}
-
-static bool
-set_intensity(struct backlight_device *bl, struct lm3630_chip_data *pchip)
-{
- if (!pchip->pdata->pwm_set_intensity)
- return false;
- pchip->pdata->pwm_set_intensity(bl->props.brightness - 1,
- pchip->pdata->pwm_period);
- return true;
-}
-
-/* update and get brightness */
-static int lm3630_bank_a_update_status(struct backlight_device *bl)
-{
- int ret;
- struct lm3630_chip_data *pchip = bl_get_data(bl);
- enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
- /* brightness 0 means disable */
- if (!bl->props.brightness) {
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x04, 0x00);
- if (ret < 0)
- goto out;
- return bl->props.brightness;
- }
-
- /* pwm control */
- if (pwm_ctrl == PWM_CTRL_BANK_A || pwm_ctrl == PWM_CTRL_BANK_ALL) {
- if (!set_intensity(bl, pchip))
- dev_err(pchip->dev, "No pwm control func. in plat-data\n");
- } else {
-
- /* i2c control */
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
- if (ret < 0)
- goto out;
- mdelay(1);
- ret = regmap_write(pchip->regmap,
- REG_BRT_A, bl->props.brightness - 1);
- if (ret < 0)
- goto out;
- }
- return bl->props.brightness;
-out:
- dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
- return bl->props.brightness;
-}
-
-static int lm3630_bank_a_get_brightness(struct backlight_device *bl)
-{
- unsigned int reg_val;
- int brightness, ret;
- struct lm3630_chip_data *pchip = bl_get_data(bl);
- enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
- if (pwm_ctrl == PWM_CTRL_BANK_A || pwm_ctrl == PWM_CTRL_BANK_ALL) {
- ret = regmap_read(pchip->regmap, REG_PWM_OUTHIGH, &reg_val);
- if (ret < 0)
- goto out;
- brightness = reg_val & 0x01;
- ret = regmap_read(pchip->regmap, REG_PWM_OUTLOW, &reg_val);
- if (ret < 0)
- goto out;
- brightness = ((brightness << 8) | reg_val) + 1;
- } else {
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
- if (ret < 0)
- goto out;
- mdelay(1);
- ret = regmap_read(pchip->regmap, REG_BRT_A, &reg_val);
- if (ret < 0)
- goto out;
- brightness = reg_val + 1;
- }
- bl->props.brightness = brightness;
- return bl->props.brightness;
-out:
- dev_err(pchip->dev, "i2c failed to access register\n");
- return 0;
-}
-
-static const struct backlight_ops lm3630_bank_a_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .update_status = lm3630_bank_a_update_status,
- .get_brightness = lm3630_bank_a_get_brightness,
-};
-
-static int lm3630_bank_b_update_status(struct backlight_device *bl)
-{
- int ret;
- struct lm3630_chip_data *pchip = bl_get_data(bl);
- enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
- if (pwm_ctrl == PWM_CTRL_BANK_B || pwm_ctrl == PWM_CTRL_BANK_ALL) {
- if (!set_intensity(bl, pchip))
- dev_err(pchip->dev,
- "no pwm control func. in plat-data\n");
- } else {
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
- if (ret < 0)
- goto out;
- mdelay(1);
- ret = regmap_write(pchip->regmap,
- REG_BRT_B, bl->props.brightness - 1);
- }
- return bl->props.brightness;
-out:
- dev_err(pchip->dev, "i2c failed to access register\n");
- return bl->props.brightness;
-}
-
-static int lm3630_bank_b_get_brightness(struct backlight_device *bl)
-{
- unsigned int reg_val;
- int brightness, ret;
- struct lm3630_chip_data *pchip = bl_get_data(bl);
- enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
- if (pwm_ctrl == PWM_CTRL_BANK_B || pwm_ctrl == PWM_CTRL_BANK_ALL) {
- ret = regmap_read(pchip->regmap, REG_PWM_OUTHIGH, &reg_val);
- if (ret < 0)
- goto out;
- brightness = reg_val & 0x01;
- ret = regmap_read(pchip->regmap, REG_PWM_OUTLOW, &reg_val);
- if (ret < 0)
- goto out;
- brightness = ((brightness << 8) | reg_val) + 1;
- } else {
- ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
- if (ret < 0)
- goto out;
- mdelay(1);
- ret = regmap_read(pchip->regmap, REG_BRT_B, &reg_val);
- if (ret < 0)
- goto out;
- brightness = reg_val + 1;
- }
- bl->props.brightness = brightness;
-
- return bl->props.brightness;
-out:
- dev_err(pchip->dev, "i2c failed to access register\n");
- return bl->props.brightness;
-}
-
-static const struct backlight_ops lm3630_bank_b_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .update_status = lm3630_bank_b_update_status,
- .get_brightness = lm3630_bank_b_get_brightness,
-};
-
-static int lm3630_backlight_register(struct lm3630_chip_data *pchip,
- enum lm3630_leds ledno)
-{
- const char *name = bled_name[ledno];
- struct backlight_properties props;
- struct lm3630_platform_data *pdata = pchip->pdata;
-
- props.type = BACKLIGHT_RAW;
- switch (ledno) {
- case BLED_1:
- case BLED_ALL:
- props.brightness = pdata->init_brt_led1;
- props.max_brightness = pdata->max_brt_led1;
- pchip->bled1 =
- backlight_device_register(name, pchip->dev, pchip,
- &lm3630_bank_a_ops, &props);
- if (IS_ERR(pchip->bled1))
- return PTR_ERR(pchip->bled1);
- break;
- case BLED_2:
- props.brightness = pdata->init_brt_led2;
- props.max_brightness = pdata->max_brt_led2;
- pchip->bled2 =
- backlight_device_register(name, pchip->dev, pchip,
- &lm3630_bank_b_ops, &props);
- if (IS_ERR(pchip->bled2))
- return PTR_ERR(pchip->bled2);
- break;
- }
- return 0;
-}
-
-static void lm3630_backlight_unregister(struct lm3630_chip_data *pchip)
-{
- if (pchip->bled1)
- backlight_device_unregister(pchip->bled1);
- if (pchip->bled2)
- backlight_device_unregister(pchip->bled2);
-}
-
-static const struct regmap_config lm3630_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = REG_MAX,
-};
-
-static int lm3630_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct lm3630_platform_data *pdata = client->dev.platform_data;
- struct lm3630_chip_data *pchip;
- int ret;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "fail : i2c functionality check...\n");
- return -EOPNOTSUPP;
- }
-
- if (pdata == NULL) {
- dev_err(&client->dev, "fail : no platform data.\n");
- return -ENODATA;
- }
-
- pchip = devm_kzalloc(&client->dev, sizeof(struct lm3630_chip_data),
- GFP_KERNEL);
- if (!pchip)
- return -ENOMEM;
- pchip->pdata = pdata;
- pchip->dev = &client->dev;
-
- pchip->regmap = devm_regmap_init_i2c(client, &lm3630_regmap);
- if (IS_ERR(pchip->regmap)) {
- ret = PTR_ERR(pchip->regmap);
- dev_err(&client->dev, "fail : allocate register map: %d\n",
- ret);
- return ret;
- }
- i2c_set_clientdata(client, pchip);
-
- /* chip initialize */
- ret = lm3630_chip_init(pchip);
- if (ret < 0) {
- dev_err(&client->dev, "fail : init chip\n");
- goto err_chip_init;
- }
-
- switch (pdata->bank_a_ctrl) {
- case BANK_A_CTRL_ALL:
- ret = lm3630_backlight_register(pchip, BLED_ALL);
- pdata->bank_b_ctrl = BANK_B_CTRL_DISABLE;
- break;
- case BANK_A_CTRL_LED1:
- ret = lm3630_backlight_register(pchip, BLED_1);
- break;
- case BANK_A_CTRL_LED2:
- ret = lm3630_backlight_register(pchip, BLED_2);
- pdata->bank_b_ctrl = BANK_B_CTRL_DISABLE;
- break;
- default:
- break;
- }
-
- if (ret < 0)
- goto err_bl_reg;
-
- if (pdata->bank_b_ctrl && pchip->bled2 == NULL) {
- ret = lm3630_backlight_register(pchip, BLED_2);
- if (ret < 0)
- goto err_bl_reg;
- }
-
- /* interrupt enable : irq 0 is not allowed for lm3630 */
- pchip->irq = client->irq;
- if (pchip->irq)
- lm3630_intr_config(pchip);
-
- dev_info(&client->dev, "LM3630 backlight register OK.\n");
- return 0;
-
-err_bl_reg:
- dev_err(&client->dev, "fail : backlight register.\n");
- lm3630_backlight_unregister(pchip);
-err_chip_init:
- return ret;
-}
-
-static int lm3630_remove(struct i2c_client *client)
-{
- int ret;
- struct lm3630_chip_data *pchip = i2c_get_clientdata(client);
-
- ret = regmap_write(pchip->regmap, REG_BRT_A, 0);
- if (ret < 0)
- dev_err(pchip->dev, "i2c failed to access register\n");
-
- ret = regmap_write(pchip->regmap, REG_BRT_B, 0);
- if (ret < 0)
- dev_err(pchip->dev, "i2c failed to access register\n");
-
- lm3630_backlight_unregister(pchip);
- if (pchip->irq) {
- free_irq(pchip->irq, pchip);
- flush_workqueue(pchip->irqthread);
- destroy_workqueue(pchip->irqthread);
- }
- return 0;
-}
-
-static const struct i2c_device_id lm3630_id[] = {
- {LM3630_NAME, 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, lm3630_id);
-
-static struct i2c_driver lm3630_i2c_driver = {
- .driver = {
- .name = LM3630_NAME,
- },
- .probe = lm3630_probe,
- .remove = lm3630_remove,
- .id_table = lm3630_id,
-};
-
-module_i2c_driver(lm3630_i2c_driver);
-
-MODULE_DESCRIPTION("Texas Instruments Backlight driver for LM3630");
-MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
-MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
new file mode 100644
index 00000000000..35fe4825a45
--- /dev/null
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -0,0 +1,483 @@
+/*
+* Simple driver for Texas Instruments LM3630A Backlight driver chip
+* Copyright (C) 2012 Texas Instruments
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/pwm.h>
+#include <linux/platform_data/lm3630a_bl.h>
+
+#define REG_CTRL 0x00
+#define REG_BOOST 0x02
+#define REG_CONFIG 0x01
+#define REG_BRT_A 0x03
+#define REG_BRT_B 0x04
+#define REG_I_A 0x05
+#define REG_I_B 0x06
+#define REG_INT_STATUS 0x09
+#define REG_INT_EN 0x0A
+#define REG_FAULT 0x0B
+#define REG_PWM_OUTLOW 0x12
+#define REG_PWM_OUTHIGH 0x13
+#define REG_MAX 0x1F
+
+#define INT_DEBOUNCE_MSEC 10
+struct lm3630a_chip {
+ struct device *dev;
+ struct delayed_work work;
+
+ int irq;
+ struct workqueue_struct *irqthread;
+ struct lm3630a_platform_data *pdata;
+ struct backlight_device *bleda;
+ struct backlight_device *bledb;
+ struct regmap *regmap;
+ struct pwm_device *pwmd;
+};
+
+/* i2c access */
+static int lm3630a_read(struct lm3630a_chip *pchip, unsigned int reg)
+{
+ int rval;
+ unsigned int reg_val;
+
+ rval = regmap_read(pchip->regmap, reg, &reg_val);
+ if (rval < 0)
+ return rval;
+ return reg_val & 0xFF;
+}
+
+static int lm3630a_write(struct lm3630a_chip *pchip,
+ unsigned int reg, unsigned int data)
+{
+ return regmap_write(pchip->regmap, reg, data);
+}
+
+static int lm3630a_update(struct lm3630a_chip *pchip,
+ unsigned int reg, unsigned int mask,
+ unsigned int data)
+{
+ return regmap_update_bits(pchip->regmap, reg, mask, data);
+}
+
+/* initialize chip */
+static int lm3630a_chip_init(struct lm3630a_chip *pchip)
+{
+ int rval;
+ struct lm3630a_platform_data *pdata = pchip->pdata;
+
+ usleep_range(1000, 2000);
+ /* set Filter Strength Register */
+ rval = lm3630a_write(pchip, 0x50, 0x03);
+ /* set Cofig. register */
+ rval |= lm3630a_update(pchip, REG_CONFIG, 0x07, pdata->pwm_ctrl);
+ /* set boost control */
+ rval |= lm3630a_write(pchip, REG_BOOST, 0x38);
+ /* set current A */
+ rval |= lm3630a_update(pchip, REG_I_A, 0x1F, 0x1F);
+ /* set current B */
+ rval |= lm3630a_write(pchip, REG_I_B, 0x1F);
+ /* set control */
+ rval |= lm3630a_update(pchip, REG_CTRL, 0x14, pdata->leda_ctrl);
+ rval |= lm3630a_update(pchip, REG_CTRL, 0x0B, pdata->ledb_ctrl);
+ usleep_range(1000, 2000);
+ /* set brightness A and B */
+ rval |= lm3630a_write(pchip, REG_BRT_A, pdata->leda_init_brt);
+ rval |= lm3630a_write(pchip, REG_BRT_B, pdata->ledb_init_brt);
+
+ if (rval < 0)
+ dev_err(pchip->dev, "i2c failed to access register\n");
+ return rval;
+}
+
+/* interrupt handling */
+static void lm3630a_delayed_func(struct work_struct *work)
+{
+ int rval;
+ struct lm3630a_chip *pchip;
+
+ pchip = container_of(work, struct lm3630a_chip, work.work);
+
+ rval = lm3630a_read(pchip, REG_INT_STATUS);
+ if (rval < 0) {
+ dev_err(pchip->dev,
+ "i2c failed to access REG_INT_STATUS Register\n");
+ return;
+ }
+
+ dev_info(pchip->dev, "REG_INT_STATUS Register is 0x%x\n", rval);
+}
+
+static irqreturn_t lm3630a_isr_func(int irq, void *chip)
+{
+ int rval;
+ struct lm3630a_chip *pchip = chip;
+ unsigned long delay = msecs_to_jiffies(INT_DEBOUNCE_MSEC);
+
+ queue_delayed_work(pchip->irqthread, &pchip->work, delay);
+
+ rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+ if (rval < 0) {
+ dev_err(pchip->dev, "i2c failed to access register\n");
+ return IRQ_NONE;
+ }
+ return IRQ_HANDLED;
+}
+
+static int lm3630a_intr_config(struct lm3630a_chip *pchip)
+{
+ int rval;
+
+ rval = lm3630a_write(pchip, REG_INT_EN, 0x87);
+ if (rval < 0)
+ return rval;
+
+ INIT_DELAYED_WORK(&pchip->work, lm3630a_delayed_func);
+ pchip->irqthread = create_singlethread_workqueue("lm3630a-irqthd");
+ if (!pchip->irqthread) {
+ dev_err(pchip->dev, "create irq thread fail\n");
+ return -ENOMEM;
+ }
+ if (request_threaded_irq
+ (pchip->irq, NULL, lm3630a_isr_func,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lm3630a_irq", pchip)) {
+ dev_err(pchip->dev, "request threaded irq fail\n");
+ destroy_workqueue(pchip->irqthread);
+ return -ENOMEM;
+ }
+ return rval;
+}
+
+static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
+{
+ unsigned int period = pwm_get_period(pchip->pwmd);
+ unsigned int duty = br * period / br_max;
+
+ pwm_config(pchip->pwmd, duty, period);
+ if (duty)
+ pwm_enable(pchip->pwmd);
+ else
+ pwm_disable(pchip->pwmd);
+}
+
+/* update and get brightness */
+static int lm3630a_bank_a_update_status(struct backlight_device *bl)
+{
+ int ret;
+ struct lm3630a_chip *pchip = bl_get_data(bl);
+ enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+ /* pwm control */
+ if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) {
+ lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ bl->props.max_brightness);
+ return bl->props.brightness;
+ }
+
+ /* disable sleep */
+ ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+ if (ret < 0)
+ goto out_i2c_err;
+ usleep_range(1000, 2000);
+ /* minimum brightness is 0x04 */
+ ret = lm3630a_write(pchip, REG_BRT_A, bl->props.brightness);
+ if (bl->props.brightness < 0x4)
+ ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, 0);
+ else
+ ret |= lm3630a_update(pchip, REG_CTRL,
+ LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
+ if (ret < 0)
+ goto out_i2c_err;
+ return bl->props.brightness;
+
+out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access\n");
+ return bl->props.brightness;
+}
+
+static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+{
+ int brightness, rval;
+ struct lm3630a_chip *pchip = bl_get_data(bl);
+ enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+ if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) {
+ rval = lm3630a_read(pchip, REG_PWM_OUTHIGH);
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness = (rval & 0x01) << 8;
+ rval = lm3630a_read(pchip, REG_PWM_OUTLOW);
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness |= rval;
+ goto out;
+ }
+
+ /* disable sleep */
+ rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+ if (rval < 0)
+ goto out_i2c_err;
+ usleep_range(1000, 2000);
+ rval = lm3630a_read(pchip, REG_BRT_A);
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness = rval;
+
+out:
+ bl->props.brightness = brightness;
+ return bl->props.brightness;
+out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access register\n");
+ return 0;
+}
+
+static const struct backlight_ops lm3630a_bank_a_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = lm3630a_bank_a_update_status,
+ .get_brightness = lm3630a_bank_a_get_brightness,
+};
+
+/* update and get brightness */
+static int lm3630a_bank_b_update_status(struct backlight_device *bl)
+{
+ int ret;
+ struct lm3630a_chip *pchip = bl_get_data(bl);
+ enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+ /* pwm control */
+ if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) {
+ lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ bl->props.max_brightness);
+ return bl->props.brightness;
+ }
+
+ /* disable sleep */
+ ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+ if (ret < 0)
+ goto out_i2c_err;
+ usleep_range(1000, 2000);
+ /* minimum brightness is 0x04 */
+ ret = lm3630a_write(pchip, REG_BRT_B, bl->props.brightness);
+ if (bl->props.brightness < 0x4)
+ ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, 0);
+ else
+ ret |= lm3630a_update(pchip, REG_CTRL,
+ LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
+ if (ret < 0)
+ goto out_i2c_err;
+ return bl->props.brightness;
+
+out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
+ return bl->props.brightness;
+}
+
+static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+{
+ int brightness, rval;
+ struct lm3630a_chip *pchip = bl_get_data(bl);
+ enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+ if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) {
+ rval = lm3630a_read(pchip, REG_PWM_OUTHIGH);
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness = (rval & 0x01) << 8;
+ rval = lm3630a_read(pchip, REG_PWM_OUTLOW);
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness |= rval;
+ goto out;
+ }
+
+ /* disable sleep */
+ rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+ if (rval < 0)
+ goto out_i2c_err;
+ usleep_range(1000, 2000);
+ rval = lm3630a_read(pchip, REG_BRT_B);
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness = rval;
+
+out:
+ bl->props.brightness = brightness;
+ return bl->props.brightness;
+out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access register\n");
+ return 0;
+}
+
+static const struct backlight_ops lm3630a_bank_b_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = lm3630a_bank_b_update_status,
+ .get_brightness = lm3630a_bank_b_get_brightness,
+};
+
+static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
+{
+ struct backlight_properties props;
+ struct lm3630a_platform_data *pdata = pchip->pdata;
+
+ props.type = BACKLIGHT_RAW;
+ if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
+ props.brightness = pdata->leda_init_brt;
+ props.max_brightness = pdata->leda_max_brt;
+ pchip->bleda =
+ devm_backlight_device_register(pchip->dev, "lm3630a_leda",
+ pchip->dev, pchip,
+ &lm3630a_bank_a_ops, &props);
+ if (IS_ERR(pchip->bleda))
+ return PTR_ERR(pchip->bleda);
+ }
+
+ if ((pdata->ledb_ctrl != LM3630A_LEDB_DISABLE) &&
+ (pdata->ledb_ctrl != LM3630A_LEDB_ON_A)) {
+ props.brightness = pdata->ledb_init_brt;
+ props.max_brightness = pdata->ledb_max_brt;
+ pchip->bledb =
+ devm_backlight_device_register(pchip->dev, "lm3630a_ledb",
+ pchip->dev, pchip,
+ &lm3630a_bank_b_ops, &props);
+ if (IS_ERR(pchip->bledb))
+ return PTR_ERR(pchip->bledb);
+ }
+ return 0;
+}
+
+static const struct regmap_config lm3630a_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = REG_MAX,
+};
+
+static int lm3630a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3630a_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct lm3630a_chip *pchip;
+ int rval;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "fail : i2c functionality check\n");
+ return -EOPNOTSUPP;
+ }
+
+ pchip = devm_kzalloc(&client->dev, sizeof(struct lm3630a_chip),
+ GFP_KERNEL);
+ if (!pchip)
+ return -ENOMEM;
+ pchip->dev = &client->dev;
+
+ pchip->regmap = devm_regmap_init_i2c(client, &lm3630a_regmap);
+ if (IS_ERR(pchip->regmap)) {
+ rval = PTR_ERR(pchip->regmap);
+ dev_err(&client->dev, "fail : allocate reg. map: %d\n", rval);
+ return rval;
+ }
+
+ i2c_set_clientdata(client, pchip);
+ if (pdata == NULL) {
+ pdata = devm_kzalloc(pchip->dev,
+ sizeof(struct lm3630a_platform_data),
+ GFP_KERNEL);
+ if (pdata == NULL)
+ return -ENOMEM;
+ /* default values */
+ pdata->leda_ctrl = LM3630A_LEDA_ENABLE;
+ pdata->ledb_ctrl = LM3630A_LEDB_ENABLE;
+ pdata->leda_max_brt = LM3630A_MAX_BRIGHTNESS;
+ pdata->ledb_max_brt = LM3630A_MAX_BRIGHTNESS;
+ pdata->leda_init_brt = LM3630A_MAX_BRIGHTNESS;
+ pdata->ledb_init_brt = LM3630A_MAX_BRIGHTNESS;
+ }
+ pchip->pdata = pdata;
+
+ /* chip initialize */
+ rval = lm3630a_chip_init(pchip);
+ if (rval < 0) {
+ dev_err(&client->dev, "fail : init chip\n");
+ return rval;
+ }
+ /* backlight register */
+ rval = lm3630a_backlight_register(pchip);
+ if (rval < 0) {
+ dev_err(&client->dev, "fail : backlight register.\n");
+ return rval;
+ }
+ /* pwm */
+ if (pdata->pwm_ctrl != LM3630A_PWM_DISABLE) {
+ pchip->pwmd = devm_pwm_get(pchip->dev, "lm3630a-pwm");
+ if (IS_ERR(pchip->pwmd)) {
+ dev_err(&client->dev, "fail : get pwm device\n");
+ return PTR_ERR(pchip->pwmd);
+ }
+ }
+ pchip->pwmd->period = pdata->pwm_period;
+
+ /* interrupt enable : irq 0 is not allowed */
+ pchip->irq = client->irq;
+ if (pchip->irq) {
+ rval = lm3630a_intr_config(pchip);
+ if (rval < 0)
+ return rval;
+ }
+ dev_info(&client->dev, "LM3630A backlight register OK.\n");
+ return 0;
+}
+
+static int lm3630a_remove(struct i2c_client *client)
+{
+ int rval;
+ struct lm3630a_chip *pchip = i2c_get_clientdata(client);
+
+ rval = lm3630a_write(pchip, REG_BRT_A, 0);
+ if (rval < 0)
+ dev_err(pchip->dev, "i2c failed to access register\n");
+
+ rval = lm3630a_write(pchip, REG_BRT_B, 0);
+ if (rval < 0)
+ dev_err(pchip->dev, "i2c failed to access register\n");
+
+ if (pchip->irq) {
+ free_irq(pchip->irq, pchip);
+ flush_workqueue(pchip->irqthread);
+ destroy_workqueue(pchip->irqthread);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id lm3630a_id[] = {
+ {LM3630A_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3630a_id);
+
+static struct i2c_driver lm3630a_i2c_driver = {
+ .driver = {
+ .name = LM3630A_NAME,
+ },
+ .probe = lm3630a_probe,
+ .remove = lm3630a_remove,
+ .id_table = lm3630a_id,
+};
+
+module_i2c_driver(lm3630a_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments Backlight driver for LM3630A");
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("LDD MLP <ldd-mlp@list.ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 053964da8dd..6fd60adf922 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -76,10 +76,13 @@ static int lm3639_chip_init(struct lm3639_chip_data *pchip)
goto out;
/* output pins config. */
- if (!pdata->init_brt_led)
- reg_val = pdata->fled_pins | pdata->bled_pins;
- else
- reg_val = pdata->fled_pins | pdata->bled_pins | 0x01;
+ if (!pdata->init_brt_led) {
+ reg_val = pdata->fled_pins;
+ reg_val |= pdata->bled_pins;
+ } else {
+ reg_val = pdata->fled_pins;
+ reg_val |= pdata->bled_pins | 0x01;
+ }
ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x79, reg_val);
if (ret < 0)
@@ -304,7 +307,7 @@ static int lm3639_probe(struct i2c_client *client,
{
int ret;
struct lm3639_chip_data *pchip;
- struct lm3639_platform_data *pdata = client->dev.platform_data;
+ struct lm3639_platform_data *pdata = dev_get_platdata(&client->dev);
struct backlight_properties props;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4eec47261cd..de8832504f6 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -128,7 +128,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power)
{
struct lms283gf05_state *st = lcd_get_data(ld);
struct spi_device *spi = st->spi;
- struct lms283gf05_pdata *pdata = spi->dev.platform_data;
+ struct lms283gf05_pdata *pdata = dev_get_platdata(&spi->dev);
if (power <= FB_BLANK_NORMAL) {
if (pdata)
@@ -153,7 +153,7 @@ static struct lcd_ops lms_ops = {
static int lms283gf05_probe(struct spi_device *spi)
{
struct lms283gf05_state *st;
- struct lms283gf05_pdata *pdata = spi->dev.platform_data;
+ struct lms283gf05_pdata *pdata = dev_get_platdata(&spi->dev);
struct lcd_device *ld;
int ret = 0;
@@ -173,7 +173,8 @@ static int lms283gf05_probe(struct spi_device *spi)
return -ENOMEM;
}
- ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
+ ld = devm_lcd_device_register(&spi->dev, "lms283gf05", &spi->dev, st,
+ &lms_ops);
if (IS_ERR(ld))
return PTR_ERR(ld);
@@ -190,22 +191,12 @@ static int lms283gf05_probe(struct spi_device *spi)
return 0;
}
-static int lms283gf05_remove(struct spi_device *spi)
-{
- struct lms283gf05_state *st = spi_get_drvdata(spi);
-
- lcd_device_unregister(st->ld);
-
- return 0;
-}
-
static struct spi_driver lms283gf05_driver = {
.driver = {
.name = "lms283gf05",
.owner = THIS_MODULE,
},
.probe = lms283gf05_probe,
- .remove = lms283gf05_remove,
};
module_spi_driver(lms283gf05_driver);
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index cf01b9ac813..77258b7b04b 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -344,14 +344,14 @@ static int lms501kf03_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->dev = &spi->dev;
- lcd->lcd_pd = spi->dev.platform_data;
+ lcd->lcd_pd = dev_get_platdata(&spi->dev);
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
return -EINVAL;
}
- ld = lcd_device_register("lms501kf03", &spi->dev, lcd,
- &lms501kf03_lcd_ops);
+ ld = devm_lcd_device_register(&spi->dev, "lms501kf03", &spi->dev, lcd,
+ &lms501kf03_lcd_ops);
if (IS_ERR(ld))
return PTR_ERR(ld);
@@ -382,8 +382,6 @@ static int lms501kf03_remove(struct spi_device *spi)
struct lms501kf03 *lcd = spi_get_drvdata(spi);
lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
- lcd_device_unregister(lcd->ld);
-
return 0;
}
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index c0b41f13bd4..cae80d555e8 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -26,13 +26,15 @@
#define LP8556_EPROM_START 0xA0
#define LP8556_EPROM_END 0xAF
-/* LP8557 Registers */
+/* LP8555/7 Registers */
#define LP8557_BL_CMD 0x00
#define LP8557_BL_MASK 0x01
#define LP8557_BL_ON 0x01
#define LP8557_BL_OFF 0x00
#define LP8557_BRIGHTNESS_CTRL 0x04
#define LP8557_CONFIG 0x10
+#define LP8555_EPROM_START 0x10
+#define LP8555_EPROM_END 0x7A
#define LP8557_EPROM_START 0x10
#define LP8557_EPROM_END 0x1E
@@ -111,6 +113,10 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
start = LP8556_EPROM_START;
end = LP8556_EPROM_END;
break;
+ case LP8555:
+ start = LP8555_EPROM_START;
+ end = LP8555_EPROM_END;
+ break;
case LP8557:
start = LP8557_EPROM_START;
end = LP8557_EPROM_END;
@@ -165,9 +171,14 @@ static int lp855x_configure(struct lp855x *lp)
struct lp855x_platform_data *pd = lp->pdata;
switch (lp->chip_id) {
- case LP8550 ... LP8556:
+ case LP8550:
+ case LP8551:
+ case LP8552:
+ case LP8553:
+ case LP8556:
lp->cfg = &lp855x_dev_cfg;
break;
+ case LP8555:
case LP8557:
lp->cfg = &lp8557_dev_cfg;
break;
@@ -289,7 +300,7 @@ static int lp855x_backlight_register(struct lp855x *lp)
props.brightness = pdata->initial_brightness;
- bl = backlight_device_register(name, lp->dev, lp,
+ bl = devm_backlight_device_register(lp->dev, name, lp->dev, lp,
&lp855x_bl_ops, &props);
if (IS_ERR(bl))
return PTR_ERR(bl);
@@ -299,12 +310,6 @@ static int lp855x_backlight_register(struct lp855x *lp)
return 0;
}
-static void lp855x_backlight_unregister(struct lp855x *lp)
-{
- if (lp->bl)
- backlight_device_unregister(lp->bl);
-}
-
static ssize_t lp855x_get_chip_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -394,7 +399,7 @@ static int lp855x_parse_dt(struct device *dev, struct device_node *node)
static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct lp855x *lp;
- struct lp855x_platform_data *pdata = cl->dev.platform_data;
+ struct lp855x_platform_data *pdata = dev_get_platdata(&cl->dev);
struct device_node *node = cl->dev.of_node;
int ret;
@@ -403,7 +408,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
if (ret < 0)
return ret;
- pdata = cl->dev.platform_data;
+ pdata = dev_get_platdata(&cl->dev);
}
if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
@@ -428,29 +433,24 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = lp855x_configure(lp);
if (ret) {
dev_err(lp->dev, "device config err: %d", ret);
- goto err_dev;
+ return ret;
}
ret = lp855x_backlight_register(lp);
if (ret) {
dev_err(lp->dev,
"failed to register backlight. err: %d\n", ret);
- goto err_dev;
+ return ret;
}
ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group);
if (ret) {
dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret);
- goto err_sysfs;
+ return ret;
}
backlight_update_status(lp->bl);
return 0;
-
-err_sysfs:
- lp855x_backlight_unregister(lp);
-err_dev:
- return ret;
}
static int lp855x_remove(struct i2c_client *cl)
@@ -460,7 +460,6 @@ static int lp855x_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
- lp855x_backlight_unregister(lp);
return 0;
}
@@ -470,6 +469,7 @@ static const struct of_device_id lp855x_dt_ids[] = {
{ .compatible = "ti,lp8551", },
{ .compatible = "ti,lp8552", },
{ .compatible = "ti,lp8553", },
+ { .compatible = "ti,lp8555", },
{ .compatible = "ti,lp8556", },
{ .compatible = "ti,lp8557", },
{ }
@@ -481,6 +481,7 @@ static const struct i2c_device_id lp855x_ids[] = {
{"lp8551", LP8551},
{"lp8552", LP8552},
{"lp8553", LP8553},
+ {"lp8555", LP8555},
{"lp8556", LP8556},
{"lp8557", LP8557},
{ }
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index 980855ec9bb..e49905d495d 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -52,7 +52,7 @@ struct lp8788_bl {
struct pwm_device *pwm;
};
-struct lp8788_bl_config default_bl_config = {
+static struct lp8788_bl_config default_bl_config = {
.bl_mode = LP8788_BL_REGISTER_ONLY,
.dim_mode = LP8788_DIM_EXPONENTIAL,
.full_scale = LP8788_FULLSCALE_1900uA,
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index ed1b3926813..383f550e165 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -242,7 +242,8 @@ static int ltv350qv_probe(struct spi_device *spi)
if (!lcd->buffer)
return -ENOMEM;
- ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
+ ld = devm_lcd_device_register(&spi->dev, "ltv350qv", &spi->dev, lcd,
+ &ltv_ops);
if (IS_ERR(ld))
return PTR_ERR(ld);
@@ -250,15 +251,11 @@ static int ltv350qv_probe(struct spi_device *spi)
ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
if (ret)
- goto out_unregister;
+ return ret;
spi_set_drvdata(spi, lcd);
return 0;
-
-out_unregister:
- lcd_device_unregister(ld);
- return ret;
}
static int ltv350qv_remove(struct spi_device *spi)
@@ -266,8 +263,6 @@ static int ltv350qv_remove(struct spi_device *spi)
struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
- lcd_device_unregister(lcd->ld);
-
return 0;
}
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 498fd73d59b..1802b2d1357 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -93,7 +93,7 @@ static const struct backlight_ops lv5207lp_backlight_ops = {
static int lv5207lp_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lv5207lp_platform_data *pdata = client->dev.platform_data;
+ struct lv5207lp_platform_data *pdata = dev_get_platdata(&client->dev);
struct backlight_device *backlight;
struct backlight_properties props;
struct lv5207lp *lv;
@@ -124,9 +124,9 @@ static int lv5207lp_probe(struct i2c_client *client,
props.brightness = clamp_t(unsigned int, pdata->def_value, 0,
props.max_brightness);
- backlight = backlight_device_register(dev_name(&client->dev),
- &lv->client->dev, lv,
- &lv5207lp_backlight_ops, &props);
+ backlight = devm_backlight_device_register(&client->dev,
+ dev_name(&client->dev), &lv->client->dev,
+ lv, &lv5207lp_backlight_ops, &props);
if (IS_ERR(backlight)) {
dev_err(&client->dev, "failed to register backlight\n");
return PTR_ERR(backlight);
@@ -144,7 +144,6 @@ static int lv5207lp_remove(struct i2c_client *client)
backlight->props.brightness = 0;
backlight_update_status(backlight);
- backlight_device_unregister(backlight);
return 0;
}
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 886e797f75f..66fa08c920d 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -163,7 +163,8 @@ static int max8925_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
- bl = backlight_device_register("max8925-backlight", &pdev->dev, data,
+ bl = devm_backlight_device_register(&pdev->dev, "max8925-backlight",
+ &pdev->dev, data,
&max8925_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
@@ -188,20 +189,9 @@ static int max8925_backlight_probe(struct platform_device *pdev)
}
ret = max8925_set_bits(chip->i2c, data->reg_mode_cntl, 0xfe, value);
if (ret < 0)
- goto out_brt;
+ return ret;
backlight_update_status(bl);
return 0;
-out_brt:
- backlight_device_unregister(bl);
- return ret;
-}
-
-static int max8925_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
- return 0;
}
static struct platform_driver max8925_backlight_driver = {
@@ -210,7 +200,6 @@ static struct platform_driver max8925_backlight_driver = {
.owner = THIS_MODULE,
},
.probe = max8925_backlight_probe,
- .remove = max8925_backlight_remove,
};
module_platform_driver(max8925_backlight_driver);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 812e22e35ca..ac11a4650c1 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -133,7 +133,7 @@ static int omapbl_probe(struct platform_device *pdev)
struct backlight_properties props;
struct backlight_device *dev;
struct omap_backlight *bl;
- struct omap_backlight_config *pdata = pdev->dev.platform_data;
+ struct omap_backlight_config *pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 633b0a22fd6..2098c5d6efb 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -120,8 +120,8 @@ static int pandora_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(props));
props.max_brightness = MAX_USER_VALUE;
props.type = BACKLIGHT_RAW;
- bl = backlight_device_register(pdev->name, &pdev->dev,
- NULL, &pandora_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, pdev->name, &pdev->dev,
+ NULL, &pandora_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -145,20 +145,12 @@ static int pandora_backlight_probe(struct platform_device *pdev)
return 0;
}
-static int pandora_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bl = platform_get_drvdata(pdev);
- backlight_device_unregister(bl);
- return 0;
-}
-
static struct platform_driver pandora_backlight_driver = {
.driver = {
.name = "pandora-backlight",
.owner = THIS_MODULE,
},
.probe = pandora_backlight_probe,
- .remove = pandora_backlight_remove,
};
module_platform_driver(pandora_backlight_driver);
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 6ed76be18f1..b95d3b0aaff 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -103,7 +103,7 @@ static int pcf50633_bl_probe(struct platform_device *pdev)
{
struct pcf50633_bl *pcf_bl;
struct device *parent = pdev->dev.parent;
- struct pcf50633_platform_data *pcf50633_data = parent->platform_data;
+ struct pcf50633_platform_data *pcf50633_data = dev_get_platdata(parent);
struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data;
struct backlight_properties bl_props;
@@ -126,7 +126,8 @@ static int pcf50633_bl_probe(struct platform_device *pdev)
pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent);
- pcf_bl->bl = backlight_device_register(pdev->name, &pdev->dev, pcf_bl,
+ pcf_bl->bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+ &pdev->dev, pcf_bl,
&pcf50633_bl_ops, &bl_props);
if (IS_ERR(pcf_bl->bl))
@@ -147,18 +148,8 @@ static int pcf50633_bl_probe(struct platform_device *pdev)
return 0;
}
-static int pcf50633_bl_remove(struct platform_device *pdev)
-{
- struct pcf50633_bl *pcf_bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(pcf_bl->bl);
-
- return 0;
-}
-
static struct platform_driver pcf50633_bl_driver = {
.probe = pcf50633_bl_probe,
- .remove = pcf50633_bl_remove,
.driver = {
.name = "pcf50633-backlight",
},
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 05683670670..d01884d4f1b 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -80,7 +80,7 @@ static int platform_lcd_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int err;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(dev, "no platform data supplied\n");
return -EINVAL;
@@ -101,30 +101,17 @@ static int platform_lcd_probe(struct platform_device *pdev)
plcd->us = dev;
plcd->pdata = pdata;
- plcd->lcd = lcd_device_register(dev_name(dev), dev,
- plcd, &platform_lcd_ops);
+ plcd->lcd = devm_lcd_device_register(&pdev->dev, dev_name(dev), dev,
+ plcd, &platform_lcd_ops);
if (IS_ERR(plcd->lcd)) {
dev_err(dev, "cannot register lcd device\n");
- err = PTR_ERR(plcd->lcd);
- goto err;
+ return PTR_ERR(plcd->lcd);
}
platform_set_drvdata(pdev, plcd);
platform_lcd_set_power(plcd->lcd, FB_BLANK_NORMAL);
return 0;
-
- err:
- return err;
-}
-
-static int platform_lcd_remove(struct platform_device *pdev)
-{
- struct platform_lcd *plcd = platform_get_drvdata(pdev);
-
- lcd_device_unregister(plcd->lcd);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -168,7 +155,6 @@ static struct platform_driver platform_lcd_driver = {
.of_match_table = of_match_ptr(platform_lcd_of_match),
},
.probe = platform_lcd_probe,
- .remove = platform_lcd_remove,
};
module_platform_driver(platform_lcd_driver);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 1fea627394d..fb80d68f4d3 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -19,6 +21,7 @@
#include <linux/err.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
struct pwm_bl_data {
@@ -27,6 +30,11 @@ struct pwm_bl_data {
unsigned int period;
unsigned int lth_brightness;
unsigned int *levels;
+ bool enabled;
+ struct regulator *power_supply;
+ int enable_gpio;
+ unsigned long enable_gpio_flags;
+ unsigned int scale;
int (*notify)(struct device *,
int brightness);
void (*notify_after)(struct device *,
@@ -35,11 +43,65 @@ struct pwm_bl_data {
void (*exit)(struct device *);
};
+static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
+{
+ int err;
+
+ if (pb->enabled)
+ return;
+
+ err = regulator_enable(pb->power_supply);
+ if (err < 0)
+ dev_err(pb->dev, "failed to enable power supply\n");
+
+ if (gpio_is_valid(pb->enable_gpio)) {
+ if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
+ gpio_set_value(pb->enable_gpio, 0);
+ else
+ gpio_set_value(pb->enable_gpio, 1);
+ }
+
+ pwm_enable(pb->pwm);
+ pb->enabled = true;
+}
+
+static void pwm_backlight_power_off(struct pwm_bl_data *pb)
+{
+ if (!pb->enabled)
+ return;
+
+ pwm_config(pb->pwm, 0, pb->period);
+ pwm_disable(pb->pwm);
+
+ if (gpio_is_valid(pb->enable_gpio)) {
+ if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
+ gpio_set_value(pb->enable_gpio, 1);
+ else
+ gpio_set_value(pb->enable_gpio, 0);
+ }
+
+ regulator_disable(pb->power_supply);
+ pb->enabled = false;
+}
+
+static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
+{
+ unsigned int lth = pb->lth_brightness;
+ int duty_cycle;
+
+ if (pb->levels)
+ duty_cycle = pb->levels[brightness];
+ else
+ duty_cycle = brightness;
+
+ return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+}
+
static int pwm_backlight_update_status(struct backlight_device *bl)
{
struct pwm_bl_data *pb = bl_get_data(bl);
int brightness = bl->props.brightness;
- int max = bl->props.max_brightness;
+ int duty_cycle;
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK ||
@@ -49,24 +111,12 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
if (pb->notify)
brightness = pb->notify(pb->dev, brightness);
- if (brightness == 0) {
- pwm_config(pb->pwm, 0, pb->period);
- pwm_disable(pb->pwm);
- } else {
- int duty_cycle;
-
- if (pb->levels) {
- duty_cycle = pb->levels[brightness];
- max = pb->levels[max];
- } else {
- duty_cycle = brightness;
- }
-
- duty_cycle = pb->lth_brightness +
- (duty_cycle * (pb->period - pb->lth_brightness) / max);
+ if (brightness > 0) {
+ duty_cycle = compute_duty_cycle(pb, brightness);
pwm_config(pb->pwm, duty_cycle, pb->period);
- pwm_enable(pb->pwm);
- }
+ pwm_backlight_power_on(pb, brightness);
+ } else
+ pwm_backlight_power_off(pb);
if (pb->notify_after)
pb->notify_after(pb->dev, brightness);
@@ -98,6 +148,7 @@ static int pwm_backlight_parse_dt(struct device *dev,
struct platform_pwm_backlight_data *data)
{
struct device_node *node = dev->of_node;
+ enum of_gpio_flags flags;
struct property *prop;
int length;
u32 value;
@@ -138,11 +189,13 @@ static int pwm_backlight_parse_dt(struct device *dev,
data->max_brightness--;
}
- /*
- * TODO: Most users of this driver use a number of GPIOs to control
- * backlight power. Support for specifying these needs to be
- * added.
- */
+ data->enable_gpio = of_get_named_gpio_flags(node, "enable-gpios", 0,
+ &flags);
+ if (data->enable_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (gpio_is_valid(data->enable_gpio) && (flags & OF_GPIO_ACTIVE_LOW))
+ data->enable_gpio_flags |= PWM_BACKLIGHT_GPIO_ACTIVE_LOW;
return 0;
}
@@ -163,12 +216,11 @@ static int pwm_backlight_parse_dt(struct device *dev,
static int pwm_backlight_probe(struct platform_device *pdev)
{
- struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
+ struct platform_pwm_backlight_data *data = dev_get_platdata(&pdev->dev);
struct platform_pwm_backlight_data defdata;
struct backlight_properties props;
struct backlight_device *bl;
struct pwm_bl_data *pb;
- unsigned int max;
int ret;
if (!data) {
@@ -195,16 +247,46 @@ static int pwm_backlight_probe(struct platform_device *pdev)
}
if (data->levels) {
- max = data->levels[data->max_brightness];
+ unsigned int i;
+
+ for (i = 0; i <= data->max_brightness; i++)
+ if (data->levels[i] > pb->scale)
+ pb->scale = data->levels[i];
+
pb->levels = data->levels;
} else
- max = data->max_brightness;
+ pb->scale = data->max_brightness;
+ pb->enable_gpio = data->enable_gpio;
+ pb->enable_gpio_flags = data->enable_gpio_flags;
pb->notify = data->notify;
pb->notify_after = data->notify_after;
pb->check_fb = data->check_fb;
pb->exit = data->exit;
pb->dev = &pdev->dev;
+ pb->enabled = false;
+
+ if (gpio_is_valid(pb->enable_gpio)) {
+ unsigned long flags;
+
+ if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
+ flags = GPIOF_OUT_INIT_HIGH;
+ else
+ flags = GPIOF_OUT_INIT_LOW;
+
+ ret = gpio_request_one(pb->enable_gpio, flags, "enable");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n",
+ pb->enable_gpio, ret);
+ goto err_alloc;
+ }
+ }
+
+ pb->power_supply = devm_regulator_get(&pdev->dev, "power");
+ if (IS_ERR(pb->power_supply)) {
+ ret = PTR_ERR(pb->power_supply);
+ goto err_gpio;
+ }
pb->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(pb->pwm)) {
@@ -214,7 +296,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (IS_ERR(pb->pwm)) {
dev_err(&pdev->dev, "unable to request legacy PWM\n");
ret = PTR_ERR(pb->pwm);
- goto err_alloc;
+ goto err_gpio;
}
}
@@ -229,7 +311,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pwm_set_period(pb->pwm, data->pwm_period_ns);
pb->period = pwm_get_period(pb->pwm);
- pb->lth_brightness = data->lth_brightness * (pb->period / max);
+ pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
@@ -239,7 +321,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
ret = PTR_ERR(bl);
- goto err_alloc;
+ goto err_gpio;
}
if (data->dft_brightness > data->max_brightness) {
@@ -255,6 +337,9 @@ static int pwm_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
return 0;
+err_gpio:
+ if (gpio_is_valid(pb->enable_gpio))
+ gpio_free(pb->enable_gpio);
err_alloc:
if (data->exit)
data->exit(&pdev->dev);
@@ -267,10 +352,11 @@ static int pwm_backlight_remove(struct platform_device *pdev)
struct pwm_bl_data *pb = bl_get_data(bl);
backlight_device_unregister(bl);
- pwm_config(pb->pwm, 0, pb->period);
- pwm_disable(pb->pwm);
+ pwm_backlight_power_off(pb);
+
if (pb->exit)
pb->exit(&pdev->dev);
+
return 0;
}
@@ -282,10 +368,12 @@ static int pwm_backlight_suspend(struct device *dev)
if (pb->notify)
pb->notify(pb->dev, 0);
- pwm_config(pb->pwm, 0, pb->period);
- pwm_disable(pb->pwm);
+
+ pwm_backlight_power_off(pb);
+
if (pb->notify_after)
pb->notify_after(pb->dev, 0);
+
return 0;
}
@@ -294,12 +382,19 @@ static int pwm_backlight_resume(struct device *dev)
struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
- pwm_backlight_resume);
+static const struct dev_pm_ops pwm_backlight_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = pwm_backlight_suspend,
+ .resume = pwm_backlight_resume,
+ .poweroff = pwm_backlight_suspend,
+ .restore = pwm_backlight_resume,
+#endif
+};
static struct platform_driver pwm_backlight_driver = {
.driver = {
@@ -317,4 +412,3 @@ module_platform_driver(pwm_backlight_driver);
MODULE_DESCRIPTION("PWM based Backlight Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pwm-backlight");
-
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index b37bb1854bf..510a1bcf76f 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -735,13 +735,14 @@ static int s6e63m0_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->dev = &spi->dev;
- lcd->lcd_pd = spi->dev.platform_data;
+ lcd->lcd_pd = dev_get_platdata(&spi->dev);
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
return -EINVAL;
}
- ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
+ ld = devm_lcd_device_register(&spi->dev, "s6e63m0", &spi->dev, lcd,
+ &s6e63m0_lcd_ops);
if (IS_ERR(ld))
return PTR_ERR(ld);
@@ -751,12 +752,11 @@ static int s6e63m0_probe(struct spi_device *spi)
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
- bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd,
- &s6e63m0_backlight_ops, &props);
- if (IS_ERR(bd)) {
- ret = PTR_ERR(bd);
- goto out_lcd_unregister;
- }
+ bd = devm_backlight_device_register(&spi->dev, "s6e63m0bl-bl",
+ &spi->dev, lcd, &s6e63m0_backlight_ops,
+ &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
bd->props.brightness = MAX_BRIGHTNESS;
lcd->bd = bd;
@@ -798,10 +798,6 @@ static int s6e63m0_probe(struct spi_device *spi)
dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
return 0;
-
-out_lcd_unregister:
- lcd_device_unregister(ld);
- return ret;
}
static int s6e63m0_remove(struct spi_device *spi)
@@ -811,8 +807,6 @@ static int s6e63m0_remove(struct spi_device *spi)
s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
device_remove_file(&spi->dev, &dev_attr_gamma_table);
device_remove_file(&spi->dev, &dev_attr_gamma_mode);
- backlight_device_unregister(lcd->bd);
- lcd_device_unregister(lcd->ld);
return 0;
}
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 18cdf466d50..908016fc582 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -338,7 +338,7 @@ static int tdo24m_probe(struct spi_device *spi)
enum tdo24m_model model;
int err;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (pdata)
model = pdata->model;
else
@@ -385,21 +385,17 @@ static int tdo24m_probe(struct spi_device *spi)
return -EINVAL;
}
- lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
- lcd, &tdo24m_ops);
+ lcd->lcd_dev = devm_lcd_device_register(&spi->dev, "tdo24m", &spi->dev,
+ lcd, &tdo24m_ops);
if (IS_ERR(lcd->lcd_dev))
return PTR_ERR(lcd->lcd_dev);
spi_set_drvdata(spi, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
if (err)
- goto out_unregister;
+ return err;
return 0;
-
-out_unregister:
- lcd_device_unregister(lcd->lcd_dev);
- return err;
}
static int tdo24m_remove(struct spi_device *spi)
@@ -407,8 +403,6 @@ static int tdo24m_remove(struct spi_device *spi)
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
- lcd_device_unregister(lcd->lcd_dev);
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 9df66ac68b3..b8db9338cac 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -38,7 +38,7 @@ struct tosa_bl_data {
static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
{
- struct spi_device *spi = data->i2c->dev.platform_data;
+ struct spi_device *spi = dev_get_platdata(&data->i2c->dev);
i2c_smbus_write_byte_data(data->i2c, DAC_CH1, data->comadj);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index bf081573e5b..be5d636764b 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -198,7 +198,7 @@ static int tosa_lcd_probe(struct spi_device *spi)
ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
GPIOF_OUT_INIT_LOW, "tg #pwr");
if (ret < 0)
- goto err_gpio_tg;
+ return ret;
mdelay(60);
@@ -219,8 +219,6 @@ static int tosa_lcd_probe(struct spi_device *spi)
err_register:
tosa_lcd_tg_off(data);
-err_gpio_tg:
- spi_set_drvdata(spi, NULL);
return ret;
}
@@ -235,8 +233,6 @@ static int tosa_lcd_remove(struct spi_device *spi)
tosa_lcd_tg_off(data);
- spi_set_drvdata(spi, NULL);
-
return 0;
}
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 05782312aeb..cbba37e6836 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -287,12 +287,11 @@ static int tps65217_bl_probe(struct platform_device *pdev)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
} else {
- if (!pdev->dev.platform_data) {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
dev_err(&pdev->dev, "no platform data provided\n");
return -EINVAL;
}
-
- pdata = pdev->dev.platform_data;
}
tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
@@ -314,7 +313,7 @@ static int tps65217_bl_probe(struct platform_device *pdev)
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 100;
- tps65217_bl->bl = backlight_device_register(pdev->name,
+ tps65217_bl->bl = devm_backlight_device_register(&pdev->dev, pdev->name,
tps65217_bl->dev, tps65217_bl,
&tps65217_bl_ops, &bl_props);
if (IS_ERR(tps65217_bl->bl)) {
@@ -330,18 +329,8 @@ static int tps65217_bl_probe(struct platform_device *pdev)
return 0;
}
-static int tps65217_bl_remove(struct platform_device *pdev)
-{
- struct tps65217_bl *tps65217_bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(tps65217_bl->bl);
-
- return 0;
-}
-
static struct platform_driver tps65217_bl_driver = {
.probe = tps65217_bl_probe,
- .remove = tps65217_bl_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tps65217-bl",
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 9e5517a3a52..8b9455e9306 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -123,7 +123,7 @@ static const struct backlight_ops wm831x_backlight_ops = {
static int wm831x_backlight_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *wm831x_pdata;
+ struct wm831x_pdata *wm831x_pdata = dev_get_platdata(pdev->dev.parent);
struct wm831x_backlight_pdata *pdata;
struct wm831x_backlight_data *data;
struct backlight_device *bl;
@@ -131,12 +131,10 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
int ret, i, max_isel, isink_reg, dcdc_cfg;
/* We need platform data */
- if (pdev->dev.parent->platform_data) {
- wm831x_pdata = pdev->dev.parent->platform_data;
+ if (wm831x_pdata)
pdata = wm831x_pdata->backlight;
- } else {
+ else
pdata = NULL;
- }
if (!pdata) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -197,8 +195,8 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_isel;
- bl = backlight_device_register("wm831x", &pdev->dev, data,
- &wm831x_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, "wm831x", &pdev->dev,
+ data, &wm831x_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
return PTR_ERR(bl);
@@ -216,21 +214,12 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
return 0;
}
-static int wm831x_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
- return 0;
-}
-
static struct platform_driver wm831x_backlight_driver = {
.driver = {
.name = "wm831x-backlight",
.owner = THIS_MODULE,
},
.probe = wm831x_backlight_probe,
- .remove = wm831x_backlight_remove,
};
module_platform_driver(wm831x_backlight_driver);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 87f288bfc58..42b8f9d1101 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -761,19 +761,7 @@ static struct platform_driver bfin_bf54x_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init bfin_bf54x_driver_init(void)
-{
- return platform_driver_register(&bfin_bf54x_driver);
-}
-
-static void __exit bfin_bf54x_driver_cleanup(void)
-{
- platform_driver_unregister(&bfin_bf54x_driver);
-}
+module_platform_driver(bfin_bf54x_driver);
MODULE_DESCRIPTION("Blackfin BF54x TFT LCD Driver");
MODULE_LICENSE("GPL");
-
-module_init(bfin_bf54x_driver_init);
-module_exit(bfin_bf54x_driver_cleanup);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 48c0c4e38a6..b5cf1307a3d 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -664,19 +664,7 @@ static struct platform_driver bfin_t350mcqb_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init bfin_t350mcqb_driver_init(void)
-{
- return platform_driver_register(&bfin_t350mcqb_driver);
-}
-
-static void __exit bfin_t350mcqb_driver_cleanup(void)
-{
- platform_driver_unregister(&bfin_t350mcqb_driver);
-}
+module_platform_driver(bfin_t350mcqb_driver);
MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
MODULE_LICENSE("GPL");
-
-module_init(bfin_t350mcqb_driver_init);
-module_exit(bfin_t350mcqb_driver_cleanup);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index b09701c7943..8556264b16b 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1167,9 +1167,8 @@ static int broadsheetfb_probe(struct platform_device *dev)
if (retval < 0)
goto err_unreg_fb;
- printk(KERN_INFO
- "fb%d: Broadsheet frame buffer, using %dK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Broadsheet frame buffer, using %dK of video memory\n",
+ videomemorysize >> 10);
return 0;
@@ -1217,19 +1216,7 @@ static struct platform_driver broadsheetfb_driver = {
.name = "broadsheetfb",
},
};
-
-static int __init broadsheetfb_init(void)
-{
- return platform_driver_register(&broadsheetfb_driver);
-}
-
-static void __exit broadsheetfb_exit(void)
-{
- platform_driver_unregister(&broadsheetfb_driver);
-}
-
-module_init(broadsheetfb_init);
-module_exit(broadsheetfb_exit);
+module_platform_driver(broadsheetfb_driver);
MODULE_DESCRIPTION("fbdev driver for Broadsheet controller");
MODULE_AUTHOR("Jaya Kumar");
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 60017fc634b..bc123d6947a 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -363,8 +363,6 @@ static int bw2_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index 153dd65b0ae..65f7c15f5fd 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -585,8 +585,7 @@ static int alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
if (ret < 0)
goto err_dealloc_cmap;
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
*rinfo = info;
return 0;
@@ -746,7 +745,6 @@ static void carminefb_remove(struct pci_dev *dev)
iounmap(hw->v_regs);
release_mem_region(fix.mmio_start, fix.mmio_len);
- pci_set_drvdata(dev, NULL);
pci_disable_device(dev);
kfree(hw);
}
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index baed57d3cff..a2bb276a8b2 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -181,7 +181,7 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
}
shift += bpp;
shift &= (32 - 1);
- if (!l) { l = 8; s++; };
+ if (!l) { l = 8; s++; }
}
/* write trailing bits */
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index ed3b8891e00..c79745b136b 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -330,7 +330,7 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
default:
ret = -ENOSYS;
break;
- };
+ }
if (!ret) {
sbus_writeb(cur_mode, &regs->mcr);
par->mode = mode;
@@ -343,7 +343,7 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
FBTYPE_MDICOLOR, 8,
info->fix.smem_len);
break;
- };
+ }
return ret;
}
@@ -583,8 +583,6 @@ static int cg14_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 9f63507ded3..64a89d5747e 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -446,8 +446,6 @@ static int cg3_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 3545decc748..70781fea092 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -624,7 +624,7 @@ static void cg6_init_fix(struct fb_info *info, int linebytes)
default:
cg6_cpu_name = "i386";
break;
- };
+ }
if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) {
if (info->fix.smem_len <= 0x100000)
cg6_card_name = "TGX";
@@ -839,8 +839,6 @@ static int cg6_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 97db3ba8f23..5aab9b9dc21 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -595,11 +595,6 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- if (var->xoffset < 0)
- var->xoffset = 0;
- if (var->yoffset < 0)
- var->yoffset = 0;
-
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
@@ -2159,7 +2154,6 @@ static int cirrusfb_pci_register(struct pci_dev *pdev,
if (!ret)
return 0;
- pci_set_drvdata(pdev, NULL);
iounmap(info->screen_base);
err_release_legacy:
if (release_io_ports)
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index a9031498e10..d5533f4db1c 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -368,8 +368,7 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
lcd_clear(info);
- printk(KERN_INFO "fb%d: Cobalt server LCD frame buffer device\n",
- info->node);
+ fb_info(info, "Cobalt server LCD frame buffer device\n");
return 0;
}
@@ -395,19 +394,7 @@ static struct platform_driver cobalt_lcdfb_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init cobalt_lcdfb_init(void)
-{
- return platform_driver_register(&cobalt_lcdfb_driver);
-}
-
-static void __exit cobalt_lcdfb_exit(void)
-{
- platform_driver_unregister(&cobalt_lcdfb_driver);
-}
-
-module_init(cobalt_lcdfb_init);
-module_exit(cobalt_lcdfb_exit);
+module_platform_driver(cobalt_lcdfb_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoichi Yuasa");
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 35687fd5645..4ad24f2c647 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -3,7 +3,7 @@
* core code for console driver using HP's STI firmware
*
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2001-2003 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2001-2013 Helge Deller <deller@gmx.de>
* Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*
* TODO:
@@ -30,7 +30,7 @@
#include "../sticore.h"
-#define STI_DRIVERVERSION "Version 0.9a"
+#define STI_DRIVERVERSION "Version 0.9b"
static struct sti_struct *default_sti __read_mostly;
@@ -73,28 +73,34 @@ static const struct sti_init_flags default_init_flags = {
static int sti_init_graph(struct sti_struct *sti)
{
- struct sti_init_inptr_ext inptr_ext = { 0, };
- struct sti_init_inptr inptr = {
- .text_planes = 3, /* # of text planes (max 3 for STI) */
- .ext_ptr = STI_PTR(&inptr_ext)
- };
- struct sti_init_outptr outptr = { 0, };
+ struct sti_init_inptr *inptr = &sti->sti_data->init_inptr;
+ struct sti_init_inptr_ext *inptr_ext = &sti->sti_data->init_inptr_ext;
+ struct sti_init_outptr *outptr = &sti->sti_data->init_outptr;
unsigned long flags;
- int ret;
+ int ret, err;
spin_lock_irqsave(&sti->lock, flags);
- ret = STI_CALL(sti->init_graph, &default_init_flags, &inptr,
- &outptr, sti->glob_cfg);
+ memset(inptr, 0, sizeof(*inptr));
+ inptr->text_planes = 3; /* # of text planes (max 3 for STI) */
+ memset(inptr_ext, 0, sizeof(*inptr_ext));
+ inptr->ext_ptr = STI_PTR(inptr_ext);
+ outptr->errno = 0;
+
+ ret = sti_call(sti, sti->init_graph, &default_init_flags, inptr,
+ outptr, sti->glob_cfg);
+
+ if (ret >= 0)
+ sti->text_planes = outptr->text_planes;
+ err = outptr->errno;
spin_unlock_irqrestore(&sti->lock, flags);
if (ret < 0) {
- printk(KERN_ERR "STI init_graph failed (ret %d, errno %d)\n",ret,outptr.errno);
+ pr_err("STI init_graph failed (ret %d, errno %d)\n", ret, err);
return -1;
}
- sti->text_planes = outptr.text_planes;
return 0;
}
@@ -104,16 +110,18 @@ static const struct sti_conf_flags default_conf_flags = {
static void sti_inq_conf(struct sti_struct *sti)
{
- struct sti_conf_inptr inptr = { 0, };
+ struct sti_conf_inptr *inptr = &sti->sti_data->inq_inptr;
+ struct sti_conf_outptr *outptr = &sti->sti_data->inq_outptr;
unsigned long flags;
s32 ret;
- sti->outptr.ext_ptr = STI_PTR(&sti->outptr_ext);
+ outptr->ext_ptr = STI_PTR(&sti->sti_data->inq_outptr_ext);
do {
spin_lock_irqsave(&sti->lock, flags);
- ret = STI_CALL(sti->inq_conf, &default_conf_flags,
- &inptr, &sti->outptr, sti->glob_cfg);
+ memset(inptr, 0, sizeof(*inptr));
+ ret = sti_call(sti, sti->inq_conf, &default_conf_flags,
+ inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
} while (ret == 1);
}
@@ -126,7 +134,8 @@ static const struct sti_font_flags default_font_flags = {
void
sti_putc(struct sti_struct *sti, int c, int y, int x)
{
- struct sti_font_inptr inptr = {
+ struct sti_font_inptr *inptr = &sti->sti_data->font_inptr;
+ struct sti_font_inptr inptr_default = {
.font_start_addr= STI_PTR(sti->font->raw),
.index = c_index(sti, c),
.fg_color = c_fg(sti, c),
@@ -134,14 +143,15 @@ sti_putc(struct sti_struct *sti, int c, int y, int x)
.dest_x = x * sti->font_width,
.dest_y = y * sti->font_height,
};
- struct sti_font_outptr outptr = { 0, };
+ struct sti_font_outptr *outptr = &sti->sti_data->font_outptr;
s32 ret;
unsigned long flags;
do {
spin_lock_irqsave(&sti->lock, flags);
- ret = STI_CALL(sti->font_unpmv, &default_font_flags,
- &inptr, &outptr, sti->glob_cfg);
+ *inptr = inptr_default;
+ ret = sti_call(sti, sti->font_unpmv, &default_font_flags,
+ inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
} while (ret == 1);
}
@@ -156,7 +166,8 @@ void
sti_set(struct sti_struct *sti, int src_y, int src_x,
int height, int width, u8 color)
{
- struct sti_blkmv_inptr inptr = {
+ struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
+ struct sti_blkmv_inptr inptr_default = {
.fg_color = color,
.bg_color = color,
.src_x = src_x,
@@ -166,14 +177,15 @@ sti_set(struct sti_struct *sti, int src_y, int src_x,
.width = width,
.height = height,
};
- struct sti_blkmv_outptr outptr = { 0, };
+ struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
s32 ret;
unsigned long flags;
do {
spin_lock_irqsave(&sti->lock, flags);
- ret = STI_CALL(sti->block_move, &clear_blkmv_flags,
- &inptr, &outptr, sti->glob_cfg);
+ *inptr = inptr_default;
+ ret = sti_call(sti, sti->block_move, &clear_blkmv_flags,
+ inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
} while (ret == 1);
}
@@ -182,7 +194,8 @@ void
sti_clear(struct sti_struct *sti, int src_y, int src_x,
int height, int width, int c)
{
- struct sti_blkmv_inptr inptr = {
+ struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
+ struct sti_blkmv_inptr inptr_default = {
.fg_color = c_fg(sti, c),
.bg_color = c_bg(sti, c),
.src_x = src_x * sti->font_width,
@@ -192,14 +205,15 @@ sti_clear(struct sti_struct *sti, int src_y, int src_x,
.width = width * sti->font_width,
.height = height* sti->font_height,
};
- struct sti_blkmv_outptr outptr = { 0, };
+ struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
s32 ret;
unsigned long flags;
do {
spin_lock_irqsave(&sti->lock, flags);
- ret = STI_CALL(sti->block_move, &clear_blkmv_flags,
- &inptr, &outptr, sti->glob_cfg);
+ *inptr = inptr_default;
+ ret = sti_call(sti, sti->block_move, &clear_blkmv_flags,
+ inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
} while (ret == 1);
}
@@ -212,7 +226,8 @@ void
sti_bmove(struct sti_struct *sti, int src_y, int src_x,
int dst_y, int dst_x, int height, int width)
{
- struct sti_blkmv_inptr inptr = {
+ struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
+ struct sti_blkmv_inptr inptr_default = {
.src_x = src_x * sti->font_width,
.src_y = src_y * sti->font_height,
.dest_x = dst_x * sti->font_width,
@@ -220,14 +235,15 @@ sti_bmove(struct sti_struct *sti, int src_y, int src_x,
.width = width * sti->font_width,
.height = height* sti->font_height,
};
- struct sti_blkmv_outptr outptr = { 0, };
+ struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
s32 ret;
unsigned long flags;
do {
spin_lock_irqsave(&sti->lock, flags);
- ret = STI_CALL(sti->block_move, &default_blkmv_flags,
- &inptr, &outptr, sti->glob_cfg);
+ *inptr = inptr_default;
+ ret = sti_call(sti, sti->block_move, &default_blkmv_flags,
+ inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
} while (ret == 1);
}
@@ -284,7 +300,7 @@ __setup("sti=", sti_setup);
-static char *font_name[MAX_STI_ROMS] = { "VGA8x16", };
+static char *font_name[MAX_STI_ROMS];
static int font_index[MAX_STI_ROMS],
font_height[MAX_STI_ROMS],
font_width[MAX_STI_ROMS];
@@ -389,10 +405,10 @@ static void sti_dump_outptr(struct sti_struct *sti)
"%d used bits\n"
"%d planes\n"
"attributes %08x\n",
- sti->outptr.bits_per_pixel,
- sti->outptr.bits_used,
- sti->outptr.planes,
- sti->outptr.attributes));
+ sti->sti_data->inq_outptr.bits_per_pixel,
+ sti->sti_data->inq_outptr.bits_used,
+ sti->sti_data->inq_outptr.planes,
+ sti->sti_data->inq_outptr.attributes));
}
static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
@@ -402,24 +418,21 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
struct sti_glob_cfg_ext *glob_cfg_ext;
void *save_addr;
void *sti_mem_addr;
- const int save_addr_size = 1024; /* XXX */
- int i;
+ int i, size;
- if (!sti->sti_mem_request)
+ if (sti->sti_mem_request < 256)
sti->sti_mem_request = 256; /* STI default */
- glob_cfg = kzalloc(sizeof(*sti->glob_cfg), GFP_KERNEL);
- glob_cfg_ext = kzalloc(sizeof(*glob_cfg_ext), GFP_KERNEL);
- save_addr = kzalloc(save_addr_size, GFP_KERNEL);
- sti_mem_addr = kzalloc(sti->sti_mem_request, GFP_KERNEL);
+ size = sizeof(struct sti_all_data) + sti->sti_mem_request - 256;
- if (!(glob_cfg && glob_cfg_ext && save_addr && sti_mem_addr)) {
- kfree(glob_cfg);
- kfree(glob_cfg_ext);
- kfree(save_addr);
- kfree(sti_mem_addr);
+ sti->sti_data = kzalloc(size, STI_LOWMEM);
+ if (!sti->sti_data)
return -ENOMEM;
- }
+
+ glob_cfg = &sti->sti_data->glob_cfg;
+ glob_cfg_ext = &sti->sti_data->glob_cfg_ext;
+ save_addr = &sti->sti_data->save_addr;
+ sti_mem_addr = &sti->sti_data->sti_mem_addr;
glob_cfg->ext_ptr = STI_PTR(glob_cfg_ext);
glob_cfg->save_addr = STI_PTR(save_addr);
@@ -475,32 +488,31 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
return 0;
}
-#ifdef CONFIG_FB
+#ifdef CONFIG_FONTS
static struct sti_cooked_font *
sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
- const struct font_desc *fbfont;
+ const struct font_desc *fbfont = NULL;
unsigned int size, bpc;
void *dest;
struct sti_rom_font *nf;
struct sti_cooked_font *cooked_font;
- if (!fbfont_name || !strlen(fbfont_name))
- return NULL;
- fbfont = find_font(fbfont_name);
+ if (fbfont_name && strlen(fbfont_name))
+ fbfont = find_font(fbfont_name);
if (!fbfont)
fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0);
if (!fbfont)
return NULL;
- DPRINTK((KERN_DEBUG "selected %dx%d fb-font %s\n",
- fbfont->width, fbfont->height, fbfont->name));
+ pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
+ fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
size = bpc * 256;
size += sizeof(struct sti_rom_font);
- nf = kzalloc(size, GFP_KERNEL);
+ nf = kzalloc(size, STI_LOWMEM);
if (!nf)
return NULL;
@@ -637,7 +649,7 @@ static void *sti_bmode_font_raw(struct sti_cooked_font *f)
unsigned char *n, *p, *q;
int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font);
- n = kzalloc (4*size, GFP_KERNEL);
+ n = kzalloc(4*size, STI_LOWMEM);
if (!n)
return NULL;
p = n + 3;
@@ -673,7 +685,7 @@ static struct sti_rom *sti_get_bmode_rom (unsigned long address)
sti_bmode_rom_copy(address + BMODE_LAST_ADDR_OFFS, sizeof(size), &size);
size = (size+3) / 4;
- raw = kmalloc(size, GFP_KERNEL);
+ raw = kmalloc(size, STI_LOWMEM);
if (raw) {
sti_bmode_rom_copy(address, size, raw);
memmove (&raw->res004, &raw->type[0], 0x3c);
@@ -707,7 +719,7 @@ static struct sti_rom *sti_get_wmode_rom(unsigned long address)
/* read the ROM size directly from the struct in ROM */
size = gsc_readl(address + offsetof(struct sti_rom,last_addr));
- raw = kmalloc(size, GFP_KERNEL);
+ raw = kmalloc(size, STI_LOWMEM);
if (raw)
sti_rom_copy(address, size, raw);
@@ -743,6 +755,10 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
address = (unsigned long) STI_PTR(raw);
+ pr_info("STI ROM supports 32 %sbit firmware functions.\n",
+ raw->alt_code_type == ALT_CODE_TYPE_PA_RISC_64
+ ? "and 64 " : "");
+
sti->font_unpmv = address + (raw->font_unpmv & 0x03ffffff);
sti->block_move = address + (raw->block_move & 0x03ffffff);
sti->init_graph = address + (raw->init_graph & 0x03ffffff);
@@ -901,7 +917,8 @@ test_rom:
sti_dump_globcfg(sti->glob_cfg, sti->sti_mem_request);
sti_dump_outptr(sti);
- printk(KERN_INFO " graphics card name: %s\n", sti->outptr.dev_name );
+ pr_info(" graphics card name: %s\n",
+ sti->sti_data->inq_outptr.dev_name);
sti_roms[num_sti_roms] = sti;
num_sti_roms++;
@@ -1073,6 +1090,29 @@ struct sti_struct * sti_get_rom(unsigned int index)
}
EXPORT_SYMBOL(sti_get_rom);
+
+int sti_call(const struct sti_struct *sti, unsigned long func,
+ const void *flags, void *inptr, void *outptr,
+ struct sti_glob_cfg *glob_cfg)
+{
+ unsigned long _flags = STI_PTR(flags);
+ unsigned long _inptr = STI_PTR(inptr);
+ unsigned long _outptr = STI_PTR(outptr);
+ unsigned long _glob_cfg = STI_PTR(glob_cfg);
+ int ret;
+
+#ifdef CONFIG_64BIT
+ /* Check for overflow when using 32bit STI on 64bit kernel. */
+ if (WARN_ONCE(_flags>>32 || _inptr>>32 || _outptr>>32 || _glob_cfg>>32,
+ "Out of 32bit-range pointers!"))
+ return -1;
+#endif
+
+ ret = pdc_sti_call(func, _flags, _inptr, _outptr, _glob_cfg);
+
+ return ret;
+}
+
MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 67b77b40aa7..fdadef97923 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -471,8 +471,8 @@ try_again:
/* Register with fbdev layer */
if (register_framebuffer(&p->info) < 0)
return -ENXIO;
-
- printk(KERN_INFO "fb%d: control display adapter\n", p->info.node);
+
+ fb_info(&p->info, "control display adapter\n");
return 0;
}
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 57886787ead..b0a950f3697 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1641,67 +1641,6 @@ static void cyberpro_common_resume(struct cfb_info *cfb)
cyber2000fb_set_par(&cfb->fb);
}
-#ifdef CONFIG_ARCH_SHARK
-
-#include <mach/framebuffer.h>
-
-static int cyberpro_vl_probe(void)
-{
- struct cfb_info *cfb;
- int err = -ENOMEM;
-
- if (!request_mem_region(FB_START, FB_SIZE, "CyberPro2010"))
- return err;
-
- cfb = cyberpro_alloc_fb_info(ID_CYBERPRO_2010, "CyberPro2010");
- if (!cfb)
- goto failed_release;
-
- cfb->irq = -1;
- cfb->region = ioremap(FB_START, FB_SIZE);
- if (!cfb->region)
- goto failed_ioremap;
-
- cfb->regs = cfb->region + MMIO_OFFSET;
- cfb->fb.device = NULL;
- cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET;
- cfb->fb.fix.smem_start = FB_START;
-
- /*
- * Bring up the hardware. This is expected to enable access
- * to the linear memory region, and allow access to the memory
- * mapped registers. Also, mem_ctl1 and mem_ctl2 must be
- * initialised.
- */
- cyber2000fb_writeb(0x18, 0x46e8, cfb);
- cyber2000fb_writeb(0x01, 0x102, cfb);
- cyber2000fb_writeb(0x08, 0x46e8, cfb);
- cyber2000fb_writeb(EXT_BIU_MISC, 0x3ce, cfb);
- cyber2000fb_writeb(EXT_BIU_MISC_LIN_ENABLE, 0x3cf, cfb);
-
- cfb->mclk_mult = 0xdb;
- cfb->mclk_div = 0x54;
-
- err = cyberpro_common_probe(cfb);
- if (err)
- goto failed;
-
- if (int_cfb_info == NULL)
- int_cfb_info = cfb;
-
- return 0;
-
-failed:
- iounmap(cfb->region);
-failed_ioremap:
- cyberpro_free_fb_info(cfb);
-failed_release:
- release_mem_region(FB_START, FB_SIZE);
-
- return err;
-}
-#endif /* CONFIG_ARCH_SHARK */
-
/*
* PCI specific support.
*/
@@ -1871,11 +1810,6 @@ static void cyberpro_pci_remove(struct pci_dev *dev)
iounmap(cfb->region);
cyberpro_free_fb_info(cfb);
- /*
- * Ensure that the driver data is no longer
- * valid.
- */
- pci_set_drvdata(dev, NULL);
if (cfb == int_cfb_info)
int_cfb_info = NULL;
@@ -1948,28 +1882,19 @@ static int __init cyber2000fb_init(void)
cyber2000fb_setup(option);
#endif
-#ifdef CONFIG_ARCH_SHARK
- err = cyberpro_vl_probe();
- if (!err)
- ret = 0;
-#endif
-#ifdef CONFIG_PCI
err = pci_register_driver(&cyberpro_driver);
if (!err)
ret = 0;
-#endif
return ret ? err : 0;
}
module_init(cyber2000fb_init);
-#ifndef CONFIG_ARCH_SHARK
static void __exit cyberpro_exit(void)
{
pci_unregister_driver(&cyberpro_driver);
}
module_exit(cyberpro_exit);
-#endif
MODULE_AUTHOR("Russell King");
MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver");
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index e030e17a83f..a1d74dd1198 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -129,7 +129,6 @@
#define LCD_NUM_BUFFERS 2
-#define WSI_TIMEOUT 50
#define PALETTE_SIZE 256
#define CLK_MIN_DIV 2
@@ -1314,7 +1313,7 @@ static struct fb_ops da8xx_fb_ops = {
static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
{
- struct da8xx_lcdc_platform_data *fb_pdata = dev->dev.platform_data;
+ struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&dev->dev);
struct fb_videomode *lcdc_info;
int i;
@@ -1336,7 +1335,7 @@ static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
static int fb_probe(struct platform_device *device)
{
struct da8xx_lcdc_platform_data *fb_pdata =
- device->dev.platform_data;
+ dev_get_platdata(&device->dev);
static struct resource *lcdc_regs;
struct lcd_ctrl_config *lcd_cfg;
struct fb_videomode *lcdc_info;
@@ -1548,7 +1547,7 @@ err_pm_runtime_disable:
}
#ifdef CONFIG_PM
-struct lcdc_context {
+static struct lcdc_context {
u32 clk_enable;
u32 ctrl;
u32 dma_ctrl;
@@ -1663,19 +1662,7 @@ static struct platform_driver da8xx_fb_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init da8xx_fb_init(void)
-{
- return platform_driver_register(&da8xx_fb_driver);
-}
-
-static void __exit da8xx_fb_cleanup(void)
-{
- platform_driver_unregister(&da8xx_fb_driver);
-}
-
-module_init(da8xx_fb_init);
-module_exit(da8xx_fb_cleanup);
+module_platform_driver(da8xx_fb_driver);
MODULE_DESCRIPTION("Framebuffer driver for TI da8xx/omap-l1xx");
MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 7f9ff75d0db..cd7c0df9f24 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -108,8 +108,8 @@ static int efifb_setup(char *options)
if (!*this_opt) continue;
for (i = 0; i < M_UNKNOWN; i++) {
- if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
- efifb_dmi_list[i].base != 0) {
+ if (efifb_dmi_list[i].base != 0 &&
+ !strcmp(this_opt, efifb_dmi_list[i].optname)) {
screen_info.lfb_base = efifb_dmi_list[i].base;
screen_info.lfb_linelength = efifb_dmi_list[i].stride;
screen_info.lfb_width = efifb_dmi_list[i].width;
@@ -322,8 +322,7 @@ static int efifb_probe(struct platform_device *dev)
printk(KERN_ERR "efifb: cannot register framebuffer\n");
goto err_fb_dealoc;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err_fb_dealoc:
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 28a837dfddd..35a0f533f1a 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -487,7 +487,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info)
static int ep93xxfb_probe(struct platform_device *pdev)
{
- struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
+ struct ep93xxfb_mach_info *mach_info = dev_get_platdata(&pdev->dev);
struct fb_info *info;
struct ep93xx_fbi *fbi;
struct resource *res;
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1b035b2eb6b..1129d0e9e64 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -16,6 +16,7 @@ if EXYNOS_VIDEO
config EXYNOS_MIPI_DSI
bool "EXYNOS MIPI DSI driver support."
depends on ARCH_S5PV210 || ARCH_EXYNOS
+ select GENERIC_PHY
help
This enables support for MIPI-DSI device.
@@ -29,7 +30,7 @@ config EXYNOS_LCD_S6E8AX0
config EXYNOS_DP
bool "EXYNOS DP driver support"
- depends on ARCH_EXYNOS
+ depends on OF && ARCH_EXYNOS
default n
help
This enables support for DP device.
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 12bbede3b09..5e1a7158005 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -19,8 +19,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of.h>
-
-#include <video/exynos_dp.h>
+#include <linux/phy/phy.h>
#include "exynos_dp_core.h"
@@ -894,26 +893,17 @@ static void exynos_dp_hotplug(struct work_struct *work)
dev_err(dp->dev, "unable to config video\n");
}
-#ifdef CONFIG_OF
-static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
+static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
{
struct device_node *dp_node = dev->of_node;
- struct exynos_dp_platdata *pd;
struct video_info *dp_video_config;
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- dev_err(dev, "memory allocation for pdata failed\n");
- return ERR_PTR(-ENOMEM);
- }
dp_video_config = devm_kzalloc(dev,
sizeof(*dp_video_config), GFP_KERNEL);
-
if (!dp_video_config) {
dev_err(dev, "memory allocation for video config failed\n");
return ERR_PTR(-ENOMEM);
}
- pd->video_info = dp_video_config;
dp_video_config->h_sync_polarity =
of_property_read_bool(dp_node, "hsync-active-high");
@@ -960,7 +950,7 @@ static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
return ERR_PTR(-EINVAL);
}
- return pd;
+ return dp_video_config;
}
static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
@@ -971,8 +961,11 @@ static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
if (!dp_phy_node) {
- dev_err(dp->dev, "could not find dptx-phy node\n");
- return -ENODEV;
+ dp->phy = devm_phy_get(dp->dev, "dp");
+ if (IS_ERR(dp->phy))
+ return PTR_ERR(dp->phy);
+ else
+ return 0;
}
if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
@@ -1003,48 +996,34 @@ err:
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
{
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg |= dp->enable_mask;
- __raw_writel(reg, dp->phy_addr);
-}
-
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg &= ~(dp->enable_mask);
- __raw_writel(reg, dp->phy_addr);
-}
-#else
-static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
-{
- return NULL;
-}
-
-static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
-{
- return -EINVAL;
-}
-
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
-{
- return;
+ if (dp->phy) {
+ phy_power_on(dp->phy);
+ } else if (dp->phy_addr) {
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg |= dp->enable_mask;
+ __raw_writel(reg, dp->phy_addr);
+ }
}
static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
{
- return;
+ if (dp->phy) {
+ phy_power_off(dp->phy);
+ } else if (dp->phy_addr) {
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg &= ~(dp->enable_mask);
+ __raw_writel(reg, dp->phy_addr);
+ }
}
-#endif /* CONFIG_OF */
static int exynos_dp_probe(struct platform_device *pdev)
{
struct resource *res;
struct exynos_dp_device *dp;
- struct exynos_dp_platdata *pdata;
int ret = 0;
@@ -1057,21 +1036,13 @@ static int exynos_dp_probe(struct platform_device *pdev)
dp->dev = &pdev->dev;
- if (pdev->dev.of_node) {
- pdata = exynos_dp_dt_parse_pdata(&pdev->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev);
+ if (IS_ERR(dp->video_info))
+ return PTR_ERR(dp->video_info);
- ret = exynos_dp_dt_parse_phydata(dp);
- if (ret)
- return ret;
- } else {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- return -EINVAL;
- }
- }
+ ret = exynos_dp_dt_parse_phydata(dp);
+ if (ret)
+ return ret;
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
@@ -1095,15 +1066,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
- dp->video_info = pdata->video_info;
-
- if (pdev->dev.of_node) {
- if (dp->phy_addr)
- exynos_dp_phy_init(dp);
- } else {
- if (pdata->phy_init)
- pdata->phy_init();
- }
+ exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
@@ -1121,18 +1084,11 @@ static int exynos_dp_probe(struct platform_device *pdev)
static int exynos_dp_remove(struct platform_device *pdev)
{
- struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
flush_work(&dp->hotplug_work);
- if (pdev->dev.of_node) {
- if (dp->phy_addr)
- exynos_dp_phy_exit(dp);
- } else {
- if (pdata->phy_exit)
- pdata->phy_exit();
- }
+ exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
@@ -1143,20 +1099,13 @@ static int exynos_dp_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
- struct exynos_dp_platdata *pdata = dev->platform_data;
struct exynos_dp_device *dp = dev_get_drvdata(dev);
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
- if (dev->of_node) {
- if (dp->phy_addr)
- exynos_dp_phy_exit(dp);
- } else {
- if (pdata->phy_exit)
- pdata->phy_exit();
- }
+ exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
@@ -1165,16 +1114,9 @@ static int exynos_dp_suspend(struct device *dev)
static int exynos_dp_resume(struct device *dev)
{
- struct exynos_dp_platdata *pdata = dev->platform_data;
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- if (dev->of_node) {
- if (dp->phy_addr)
- exynos_dp_phy_init(dp);
- } else {
- if (pdata->phy_init)
- pdata->phy_init();
- }
+ exynos_dp_phy_init(dp);
clk_prepare_enable(dp->clock);
@@ -1203,7 +1145,7 @@ static struct platform_driver exynos_dp_driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
.pm = &exynos_dp_pm_ops,
- .of_match_table = of_match_ptr(exynos_dp_match),
+ .of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 6c567bbf2fb..607e36d0c14 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -13,6 +13,99 @@
#ifndef _EXYNOS_DP_CORE_H
#define _EXYNOS_DP_CORE_H
+#define DP_TIMEOUT_LOOP_COUNT 100
+#define MAX_CR_LOOP 5
+#define MAX_EQ_LOOP 5
+
+enum link_rate_type {
+ LINK_RATE_1_62GBPS = 0x06,
+ LINK_RATE_2_70GBPS = 0x0a
+};
+
+enum link_lane_count_type {
+ LANE_COUNT1 = 1,
+ LANE_COUNT2 = 2,
+ LANE_COUNT4 = 4
+};
+
+enum link_training_state {
+ START,
+ CLOCK_RECOVERY,
+ EQUALIZER_TRAINING,
+ FINISHED,
+ FAILED
+};
+
+enum voltage_swing_level {
+ VOLTAGE_LEVEL_0,
+ VOLTAGE_LEVEL_1,
+ VOLTAGE_LEVEL_2,
+ VOLTAGE_LEVEL_3,
+};
+
+enum pre_emphasis_level {
+ PRE_EMPHASIS_LEVEL_0,
+ PRE_EMPHASIS_LEVEL_1,
+ PRE_EMPHASIS_LEVEL_2,
+ PRE_EMPHASIS_LEVEL_3,
+};
+
+enum pattern_set {
+ PRBS7,
+ D10_2,
+ TRAINING_PTN1,
+ TRAINING_PTN2,
+ DP_NONE
+};
+
+enum color_space {
+ COLOR_RGB,
+ COLOR_YCBCR422,
+ COLOR_YCBCR444
+};
+
+enum color_depth {
+ COLOR_6,
+ COLOR_8,
+ COLOR_10,
+ COLOR_12
+};
+
+enum color_coefficient {
+ COLOR_YCBCR601,
+ COLOR_YCBCR709
+};
+
+enum dynamic_range {
+ VESA,
+ CEA
+};
+
+enum pll_status {
+ PLL_UNLOCKED,
+ PLL_LOCKED
+};
+
+enum clock_recovery_m_value_type {
+ CALCULATED_M,
+ REGISTER_M
+};
+
+enum video_timing_recognition_type {
+ VIDEO_TIMING_FROM_CAPTURE,
+ VIDEO_TIMING_FROM_REGISTER
+};
+
+enum analog_power_block {
+ AUX_BLOCK,
+ CH0_BLOCK,
+ CH1_BLOCK,
+ CH2_BLOCK,
+ CH3_BLOCK,
+ ANALOG_TOTAL,
+ POWER_ALL
+};
+
enum dp_irq_type {
DP_IRQ_TYPE_HP_CABLE_IN,
DP_IRQ_TYPE_HP_CABLE_OUT,
@@ -20,6 +113,22 @@ enum dp_irq_type {
DP_IRQ_TYPE_UNKNOWN,
};
+struct video_info {
+ char *name;
+
+ bool h_sync_polarity;
+ bool v_sync_polarity;
+ bool interlaced;
+
+ enum color_space color_space;
+ enum dynamic_range dynamic_range;
+ enum color_coefficient ycbcr_coeff;
+ enum color_depth color_depth;
+
+ enum link_rate_type link_rate;
+ enum link_lane_count_type lane_count;
+};
+
struct link_train {
int eq_loop;
int cr_loop[4];
@@ -42,6 +151,7 @@ struct exynos_dp_device {
struct video_info *video_info;
struct link_train link_train;
struct work_struct hotplug_work;
+ struct phy *phy;
};
/* exynos_dp_reg.c */
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 29d9d035c73..b70da5052ff 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -14,8 +14,6 @@
#include <linux/io.h>
#include <linux/delay.h>
-#include <video/exynos_dp.h>
-
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 32e540600f9..cee9602f9a7 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -30,6 +30,7 @@
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/notifier.h>
+#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@@ -140,7 +141,6 @@ static int exynos_mipi_dsi_early_blank_mode(struct mipi_dsim_device *dsim,
static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power)
{
- struct platform_device *pdev = to_platform_device(dsim->dev);
struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -156,8 +156,7 @@ static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power)
exynos_mipi_regulator_enable(dsim);
/* enable MIPI-DSI PHY. */
- if (dsim->pd->phy_enable)
- dsim->pd->phy_enable(pdev, true);
+ phy_power_on(dsim->phy);
clk_enable(dsim->clock);
@@ -373,6 +372,10 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
return ret;
}
+ dsim->phy = devm_phy_get(&pdev->dev, "dsim");
+ if (IS_ERR(dsim->phy))
+ return PTR_ERR(dsim->phy);
+
dsim->clock = devm_clk_get(&pdev->dev, "dsim0");
if (IS_ERR(dsim->clock)) {
dev_err(&pdev->dev, "failed to get dsim clock source\n");
@@ -439,8 +442,7 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
exynos_mipi_regulator_enable(dsim);
/* enable MIPI-DSI PHY. */
- if (dsim->pd->phy_enable)
- dsim->pd->phy_enable(pdev, true);
+ phy_power_on(dsim->phy);
exynos_mipi_update_cfg(dsim);
@@ -504,9 +506,8 @@ static int exynos_mipi_dsi_suspend(struct device *dev)
if (client_drv && client_drv->suspend)
client_drv->suspend(client_dev);
- /* enable MIPI-DSI PHY. */
- if (dsim->pd->phy_enable)
- dsim->pd->phy_enable(pdev, false);
+ /* disable MIPI-DSI PHY. */
+ phy_power_off(dsim->phy);
clk_disable(dsim->clock);
@@ -536,8 +537,7 @@ static int exynos_mipi_dsi_resume(struct device *dev)
exynos_mipi_regulator_enable(dsim);
/* enable MIPI-DSI PHY. */
- if (dsim->pd->phy_enable)
- dsim->pd->phy_enable(pdev, true);
+ phy_power_on(dsim->phy);
clk_enable(dsim->clock);
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 520fc9bd887..85edabfdef5 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -220,7 +220,7 @@ int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
case MIPI_DSI_DCS_LONG_WRITE:
{
unsigned int size, payload = 0;
- INIT_COMPLETION(dsim_wr_comp);
+ reinit_completion(&dsim_wr_comp);
size = data_size * 4;
@@ -356,7 +356,7 @@ int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
msleep(20);
mutex_lock(&dsim->lock);
- INIT_COMPLETION(dsim_rd_comp);
+ reinit_completion(&dsim_rd_comp);
exynos_mipi_dsi_rd_tx_header(dsim,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size);
@@ -376,6 +376,7 @@ int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
"data id %x is not supported current DSI spec.\n",
data_id);
+ mutex_unlock(&dsim->lock);
return -EINVAL;
}
@@ -667,7 +668,7 @@ int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim)
default:
dev_info(dsim->dev, "data lane is invalid.\n");
return -EINVAL;
- };
+ }
exynos_mipi_dsi_sw_reset(dsim);
exynos_mipi_dsi_func_reset(dsim);
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
index 27fc956166f..6db9ebd042a 100644
--- a/drivers/video/fb-puv3.c
+++ b/drivers/video/fb-puv3.c
@@ -713,9 +713,8 @@ static int unifb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
- printk(KERN_INFO
- "fb%d: Virtual frame buffer device, using %dM of video memory\n",
- info->node, UNIFB_MEMSIZE >> 20);
+ fb_info(info, "Virtual frame buffer device, using %dM of video memory\n",
+ UNIFB_MEMSIZE >> 20);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index dacaf74256a..010d19105eb 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1108,14 +1108,16 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
if (!ret && copy_to_user(argp, &var, sizeof(var)))
ret = -EFAULT;
break;
@@ -1144,12 +1146,14 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPAN_DISPLAY:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
ret = fb_pan_display(info, &var);
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
break;
@@ -1184,23 +1188,27 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
}
event.data = &con2fb;
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
break;
case FBIOBLANK:
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_blank(info, arg);
info->flags &= ~FBINFO_MISC_USEREVENT;
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
break;
default:
if (!lock_fb_info(info))
@@ -1660,12 +1668,15 @@ static int do_register_framebuffer(struct fb_info *fb_info)
registered_fb[i] = fb_info;
event.info = fb_info;
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
- console_unlock();
unlock_fb_info(fb_info);
+ console_unlock();
return 0;
}
@@ -1678,13 +1689,16 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
- console_unlock();
unlock_fb_info(fb_info);
+ console_unlock();
if (ret)
return -EINVAL;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index ef476b02fbe..53444ac19fe 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,9 +177,12 @@ static ssize_t store_modes(struct device *device,
if (i * sizeof(struct fb_videomode) != count)
return -EINVAL;
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
&fb_info->modelist);
@@ -189,8 +192,8 @@ static ssize_t store_modes(struct device *device,
} else
fb_destroy_modelist(&old_list);
- console_unlock();
unlock_fb_info(fb_info);
+ console_unlock();
return 0;
}
@@ -404,12 +407,16 @@ static ssize_t store_fbstate(struct device *device,
state = simple_strtoul(buf, &last, 0);
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
fb_set_suspend(fb_info, (int)state);
- console_unlock();
+
unlock_fb_info(fb_info);
+ console_unlock();
return count;
}
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 6d2744794dd..4c4ffa61ae2 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -1035,8 +1035,6 @@ static int ffb_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index c99c9671302..e69d47af993 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -289,7 +289,7 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
zorro_release_device(z);
return -EINVAL;
}
- printk("fb%d: %s frame buffer device\n", info->node, fb_fix.id);
+ fb_info(info, "%s frame buffer device\n", fb_fix.id);
return 0;
}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6dd72250111..e8758b9c3bc 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -31,6 +31,8 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <sysdev/fsl_soc.h>
#include <linux/fsl-diu-fb.h>
@@ -1102,7 +1104,7 @@ static int fsl_diu_cursor(struct fb_info *info, struct fb_cursor *cursor)
fsl_diu_load_cursor_image(info, image, bg, fg,
cursor->image.width, cursor->image.height);
- };
+ }
/*
* Show or hide the cursor. The cursor data is always stored in the
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index ceab37020ff..4c7cb368a9d 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1236,9 +1236,9 @@ static int gbefb_probe(struct platform_device *p_dev)
platform_set_drvdata(p_dev, info);
gbefb_create_sysfs(&p_dev->dev);
- printk(KERN_INFO "fb%d: %s rev %d @ 0x%08x using %dkB memory\n",
- info->node, info->fix.id, gbe_revision, (unsigned) GBE_BASE,
- gbe_mem_size >> 10);
+ fb_info(info, "%s rev %d @ 0x%08x using %dkB memory\n",
+ info->fix.id, gbe_revision, (unsigned)GBE_BASE,
+ gbe_mem_size >> 10);
return 0;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index ebbaada7b94..2794ba11f33 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -357,7 +357,7 @@ static int gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
pci_set_drvdata(pdev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
@@ -399,7 +399,6 @@ static void gx1fb_remove(struct pci_dev *pdev)
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
fb_dealloc_cmap(&info->cmap);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 19f0c1add74..1790f14bab1 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -423,7 +423,7 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
pci_set_drvdata(pdev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
@@ -471,7 +471,6 @@ static void gxfb_remove(struct pci_dev *pdev)
pci_release_region(pdev, 1);
fb_dealloc_cmap(&info->cmap);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 4dd7b556696..9e1d19d673a 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -555,8 +555,7 @@ static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
pci_set_drvdata(pdev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
@@ -606,7 +605,6 @@ static void lxfb_remove(struct pci_dev *pdev)
pci_release_region(pdev, 3);
fb_dealloc_cmap(&info->cmap);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index 861109e7de1..c078701f15f 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -496,7 +496,6 @@ static int grvga_probe(struct platform_device *dev)
return 0;
free_mem:
- dev_set_drvdata(&dev->dev, NULL);
if (grvga_fix_addr)
iounmap((void *)virtual_start);
else
@@ -530,7 +529,6 @@ static int grvga_remove(struct platform_device *device)
kfree((void *)info->screen_base);
framebuffer_release(info);
- dev_set_drvdata(&device->dev, NULL);
}
return 0;
@@ -557,19 +555,7 @@ static struct platform_driver grvga_driver = {
.remove = grvga_remove,
};
-
-static int __init grvga_init(void)
-{
- return platform_driver_register(&grvga_driver);
-}
-
-static void __exit grvga_exit(void)
-{
- platform_driver_unregister(&grvga_driver);
-}
-
-module_init(grvga_init);
-module_exit(grvga_exit);
+module_platform_driver(grvga_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Aeroflex Gaisler");
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index c35663f6a54..135d78a0258 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -698,8 +698,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "gxt4500: cannot register framebuffer\n");
goto err_free_cmap;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 59d23181fdb..f64120ec919 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -261,9 +261,8 @@ static int hecubafb_probe(struct platform_device *dev)
goto err_fbreg;
platform_set_drvdata(dev, info);
- printk(KERN_INFO
- "fb%d: Hecuba frame buffer device, using %dK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Hecuba frame buffer device, using %dK of video memory\n",
+ videomemorysize >> 10);
/* this inits the dpy */
retval = par->board->init(par);
@@ -305,19 +304,7 @@ static struct platform_driver hecubafb_driver = {
.name = "hecubafb",
},
};
-
-static int __init hecubafb_init(void)
-{
- return platform_driver_register(&hecubafb_driver);
-}
-
-static void __exit hecubafb_exit(void)
-{
- platform_driver_unregister(&hecubafb_driver);
-}
-
-module_init(hecubafb_init);
-module_exit(hecubafb_exit);
+module_platform_driver(hecubafb_driver);
MODULE_DESCRIPTION("fbdev driver for Hecuba/Apollo controller");
MODULE_AUTHOR("Jaya Kumar");
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 1e9e2d819d1..5ff9fe2116a 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -586,8 +586,7 @@ static int hgafb_probe(struct platform_device *pdev)
return -EINVAL;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
platform_set_drvdata(pdev, info);
return 0;
}
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index c2414d6ab64..a648d5186c6 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -405,8 +405,7 @@ static int hitfb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index b802f93cef5..a1b7e5fa9b0 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -298,8 +298,7 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
if (ret < 0)
goto dealloc_cmap;
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
return 0;
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index 8ac99b87c07..130708f9643 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -575,6 +575,10 @@ static int hvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
+static int hvfb_blank(int blank, struct fb_info *info)
+{
+ return 1; /* get fb_blank to set the colormap to all black */
+}
static struct fb_ops hvfb_ops = {
.owner = THIS_MODULE,
@@ -584,6 +588,7 @@ static struct fb_ops hvfb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_blank = hvfb_blank,
};
@@ -795,12 +800,21 @@ static int hvfb_remove(struct hv_device *hdev)
}
+static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = {
+ {
+ .vendor = PCI_VENDOR_ID_MICROSOFT,
+ .device = PCI_DEVICE_ID_HYPERV_VIDEO,
+ },
+ { /* end of list */ }
+};
+
static const struct hv_vmbus_device_id id_table[] = {
/* Synthetic Video Device GUID */
{HV_SYNTHVID_GUID},
{}
};
+MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver hvfb_drv = {
@@ -810,14 +824,43 @@ static struct hv_driver hvfb_drv = {
.remove = hvfb_remove,
};
+static int hvfb_pci_stub_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return 0;
+}
+
+static void hvfb_pci_stub_remove(struct pci_dev *pdev)
+{
+}
+
+static struct pci_driver hvfb_pci_stub_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_stub_id_table,
+ .probe = hvfb_pci_stub_probe,
+ .remove = hvfb_pci_stub_remove,
+};
static int __init hvfb_drv_init(void)
{
- return vmbus_driver_register(&hvfb_drv);
+ int ret;
+
+ ret = vmbus_driver_register(&hvfb_drv);
+ if (ret != 0)
+ return ret;
+
+ ret = pci_register_driver(&hvfb_pci_stub_driver);
+ if (ret != 0) {
+ vmbus_driver_unregister(&hvfb_drv);
+ return ret;
+ }
+
+ return 0;
}
static void __exit hvfb_drv_exit(void)
{
+ pci_unregister_driver(&hvfb_pci_stub_driver);
vmbus_driver_unregister(&hvfb_drv);
}
diff --git a/drivers/video/i740fb.c b/drivers/video/i740fb.c
index 6c483881895..ca7c9df193b 100644
--- a/drivers/video/i740fb.c
+++ b/drivers/video/i740fb.c
@@ -203,8 +203,7 @@ static int i740fb_release(struct fb_info *info, int user)
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
- printk(KERN_ERR "fb%d: release called with zero refcount\n",
- info->node);
+ fb_err(info, "release called with zero refcount\n");
mutex_unlock(&(par->open_lock));
return -EINVAL;
}
@@ -1067,7 +1066,7 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
par->has_sgram = !((tmp & DRAM_RAS_TIMING) ||
(tmp & DRAM_RAS_PRECHARGE));
- printk(KERN_INFO "fb%d: Intel740 on %s, %ld KB %s\n", info->node,
+ fb_info(info, "Intel740 on %s, %ld KB %s\n",
pci_name(dev), info->screen_size >> 10,
par->has_sgram ? "SGRAM" : "SDRAM");
@@ -1143,8 +1142,7 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_reg_framebuffer;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info);
#ifdef CONFIG_MTRR
if (mtrr) {
@@ -1194,7 +1192,6 @@ static void i740fb_remove(struct pci_dev *dev)
pci_iounmap(dev, info->screen_base);
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 4ce3438ade6..038192ac736 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2129,7 +2129,6 @@ static void __exit i810fb_remove_pci(struct pci_dev *dev)
unregister_framebuffer(info);
i810fb_release_resource(info, par);
- pci_set_drvdata(dev, NULL);
printk("cleanup_module: unloaded i810 framebuffer device\n");
}
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 79cbfa7d1a9..486f1889741 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -360,9 +360,8 @@ static int __init iga_init(struct fb_info *info, struct iga_par *par)
if (register_framebuffer(info) < 0)
return 0;
- printk("fb%d: %s frame buffer device at 0x%08lx [%dMB VRAM]\n",
- info->node, info->fix.id,
- par->frame_buffer_phys, info->fix.smem_len >> 20);
+ fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
+ info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
iga_blank_border(par);
return 1;
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index d5220cc90e9..aae10ce74f1 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -1461,8 +1461,8 @@ static void init_imstt(struct fb_info *info)
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
- printk("fb%u: %s frame buffer; %uMB vram; chip version %u\n",
- info->node, info->fix.id, info->fix.smem_len >> 20, tmp);
+ fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
+ info->fix.id, info->fix.smem_len >> 20, tmp);
}
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 38733ac2b69..44ee678481d 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -755,7 +755,7 @@ static int imxfb_resume(struct platform_device *dev)
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata = pdev->dev.platform_data;
+ struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct fb_info *info = dev_get_drvdata(&pdev->dev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
@@ -877,7 +877,7 @@ static int imxfb_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
if (!info)
@@ -1066,7 +1066,7 @@ static int imxfb_remove(struct platform_device *pdev)
#endif
unregister_framebuffer(info);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 8209e46c5d2..b847d530471 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -931,8 +931,6 @@ static void intelfb_pci_unregister(struct pci_dev *pdev)
return;
cleanup(dinfo);
-
- pci_set_drvdata(pdev, NULL);
}
/***************************************************************
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 2c49112fdd6..87790e9644d 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -99,9 +99,9 @@
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
-#define JZ_LCD_CMD_SOF_IRQ BIT(15)
-#define JZ_LCD_CMD_EOF_IRQ BIT(16)
-#define JZ_LCD_CMD_ENABLE_PAL BIT(12)
+#define JZ_LCD_CMD_SOF_IRQ BIT(31)
+#define JZ_LCD_CMD_EOF_IRQ BIT(30)
+#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
#define JZ_LCD_SYNC_MASK 0x3ff
@@ -471,7 +471,7 @@ static int jzfb_set_par(struct fb_info *info)
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
if (!jzfb->is_enabled)
- clk_disable(jzfb->ldclk);
+ clk_disable_unprepare(jzfb->ldclk);
mutex_unlock(&jzfb->lock);
@@ -485,7 +485,7 @@ static void jzfb_enable(struct jzfb *jzfb)
{
uint32_t ctrl;
- clk_enable(jzfb->ldclk);
+ clk_prepare_enable(jzfb->ldclk);
jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
@@ -514,7 +514,7 @@ static void jzfb_disable(struct jzfb *jzfb)
jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
- clk_disable(jzfb->ldclk);
+ clk_disable_unprepare(jzfb->ldclk);
}
static int jzfb_blank(int blank_mode, struct fb_info *info)
@@ -693,7 +693,7 @@ static int jzfb_probe(struct platform_device *pdev)
fb_alloc_cmap(&fb->cmap, 256, 0);
- clk_enable(jzfb->ldclk);
+ clk_prepare_enable(jzfb->ldclk);
jzfb->is_enabled = 1;
writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
@@ -763,7 +763,7 @@ static int jzfb_suspend(struct device *dev)
static int jzfb_resume(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
- clk_enable(jzfb->ldclk);
+ clk_prepare_enable(jzfb->ldclk);
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
@@ -798,18 +798,7 @@ static struct platform_driver jzfb_driver = {
.pm = JZFB_PM_OPS,
},
};
-
-static int __init jzfb_init(void)
-{
- return platform_driver_register(&jzfb_driver);
-}
-module_init(jzfb_init);
-
-static void __exit jzfb_exit(void)
-{
- platform_driver_unregister(&jzfb_driver);
-}
-module_exit(jzfb_exit);
+module_platform_driver(jzfb_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 6157f74ac60..50c857477e4 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -623,7 +623,6 @@ static int kyrofb_ioctl(struct fb_info *info,
"command instead.\n");
return -EINVAL;
}
- break;
case KYRO_IOCTL_UVSTRIDE:
if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long)))
return -EFAULT;
@@ -736,10 +735,10 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (register_framebuffer(info) < 0)
goto out_unmap;
- printk("fb%d: %s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
- info->node, info->fix.id, info->var.xres,
- info->var.yres, info->var.bits_per_pixel, size >> 10,
- (unsigned long)info->fix.smem_len >> 10);
+ fb_info(info, "%s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
+ info->fix.id,
+ info->var.xres, info->var.yres, info->var.bits_per_pixel,
+ size >> 10, (unsigned long)info->fix.smem_len >> 10);
pci_set_drvdata(pdev, info);
@@ -779,7 +778,6 @@ static void kyrofb_remove(struct pci_dev *pdev)
#endif
unregister_framebuffer(info);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index b17f5009a43..2c7f7d479fe 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -469,7 +469,7 @@ static void leo_wid_put(struct fb_info *info, struct fb_wid_list *wl)
default:
continue;
- };
+ }
sbus_writel(0x5800 + j, &lx_krn->krn_type);
sbus_writel(wi->wi_values[0], &lx_krn->krn_value);
}
@@ -648,8 +648,6 @@ static int leo_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index fe01add3700..5bd2eb8d4f3 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -913,8 +913,7 @@ static int __init macfb_init(void)
if (err)
goto fail_dealloc;
- pr_info("fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
return 0;
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index 1717623aabc..a01147fdf27 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -494,7 +494,7 @@ static int m1064_compute(void* out, struct my_timming* m) {
if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
- };
+ }
CRITEND
@@ -639,7 +639,7 @@ static void MGAG100_progPixClock(const struct matrox_fb_info *minfo, int flags,
if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
- };
+ }
if (!clk)
printk(KERN_ERR "matroxfb: Pixel PLL%c not locked after usual time\n", (reg-M1064_XPIXPLLAM-2)/4 + 'A');
selClk = inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_SRC_MASK;
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 9a44cec394b..195ad7cac1b 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -473,7 +473,7 @@ static void ti3026_setMCLK(struct matrox_fb_info *minfo, int fout)
if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40)
break;
udelay(10);
- };
+ }
if (!tmout)
printk(KERN_ERR "matroxfb: Temporary pixel PLL not locked after 5 secs\n");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 24565291165..87c64ff4546 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1893,14 +1893,12 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
if (register_framebuffer(&minfo->fbcon) < 0) {
goto failVideoIO;
}
- printk("fb%d: %s frame buffer device\n",
- minfo->fbcon.node, minfo->fbcon.fix.id);
+ fb_info(&minfo->fbcon, "%s frame buffer device\n", minfo->fbcon.fix.id);
/* there is no console on this fb... but we have to initialize hardware
* until someone tells me what is proper thing to do */
if (!minfo->initialized) {
- printk(KERN_INFO "fb%d: initializing hardware\n",
- minfo->fbcon.node);
+ fb_info(&minfo->fbcon, "initializing hardware\n");
/* We have to use FB_ACTIVATE_FORCE, as we had to put vesafb_defined to the fbcon.var
* already before, so register_framebuffer works correctly. */
vesafb_defined.activate |= FB_ACTIVATE_FORCE;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index fd289745569..ee41a0f276b 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1295,19 +1295,7 @@ static struct i2c_driver maven_driver={
.id_table = maven_id,
};
-static int __init matroxfb_maven_init(void)
-{
- return i2c_add_driver(&maven_driver);
-}
-
-static void __exit matroxfb_maven_exit(void)
-{
- i2c_del_driver(&maven_driver);
-}
-
+module_i2c_driver(maven_driver);
MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>");
MODULE_DESCRIPTION("Matrox G200/G400 Matrox MGA-TVO driver");
MODULE_LICENSE("GPL");
-module_init(matroxfb_maven_init);
-module_exit(matroxfb_maven_exit);
-/* we do not have __setup() yet */
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 91c59c9fb08..0cd4c331851 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -781,7 +781,6 @@ rel_reg:
irqdisp:
irq_dispose_mapping(par->irq);
fbrel:
- dev_set_drvdata(dev, NULL);
framebuffer_release(info);
return ret;
}
@@ -814,7 +813,6 @@ static int of_platform_mb862xx_remove(struct platform_device *ofdev)
iounmap(par->mmio_base);
iounmap(par->fb_base);
- dev_set_drvdata(&ofdev->dev, NULL);
release_mem_region(par->res->start, res_size);
framebuffer_release(fbi);
return 0;
@@ -1157,7 +1155,6 @@ static void mb862xx_pci_remove(struct pci_dev *pdev)
device_remove_file(&pdev->dev, &dev_attr_dispregs);
- pci_set_drvdata(pdev, NULL);
unregister_framebuffer(fbi);
fb_dealloc_cmap(&fbi->cmap);
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 0c1a874ffd2..f0a5392f5fd 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -890,7 +890,7 @@ static int mbxfb_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "mbxfb_probe\n");
- pdata = dev->dev.platform_data;
+ pdata = dev_get_platdata(&dev->dev);
if (!pdata) {
dev_err(&dev->dev, "platform data is required\n");
return -EINVAL;
@@ -976,7 +976,7 @@ static int mbxfb_probe(struct platform_device *dev)
platform_set_drvdata(dev, fbi);
- printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node);
+ fb_info(fbi, "mbx frame buffer device\n");
if (mfbi->platform_probe)
mfbi->platform_probe(fbi);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index f30150d71be..195cc2db4c2 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -690,7 +690,8 @@ static int metronomefb_probe(struct platform_device *dev)
goto err_csum_table;
}
- if (board->setup_irq(info))
+ retval = board->setup_irq(info);
+ if (retval)
goto err_csum_table;
retval = metronome_init_regs(par);
@@ -769,23 +770,11 @@ static struct platform_driver metronomefb_driver = {
.name = "metronomefb",
},
};
-
-static int __init metronomefb_init(void)
-{
- return platform_driver_register(&metronomefb_driver);
-}
-
-static void __exit metronomefb_exit(void)
-{
- platform_driver_unregister(&metronomefb_driver);
-}
+module_platform_driver(metronomefb_driver);
module_param(user_wfm_size, uint, 0);
MODULE_PARM_DESC(user_wfm_size, "Set custom waveform size");
-module_init(metronomefb_init);
-module_exit(metronomefb_exit);
-
MODULE_DESCRIPTION("fbdev driver for Metronome controller");
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/fb/mmpfb.c b/drivers/video/mmp/fb/mmpfb.c
index 4ab95b8daed..7ab31eb76a8 100644
--- a/drivers/video/mmp/fb/mmpfb.c
+++ b/drivers/video/mmp/fb/mmpfb.c
@@ -392,12 +392,29 @@ static int var_update(struct fb_info *info)
return 0;
}
+static void mmpfb_set_win(struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct mmp_win win;
+ u32 stride;
+
+ memset(&win, 0, sizeof(win));
+ win.xsrc = win.xdst = fbi->mode.xres;
+ win.ysrc = win.ydst = fbi->mode.yres;
+ win.pix_fmt = fbi->pix_fmt;
+ stride = pixfmt_to_stride(win.pix_fmt);
+ win.pitch[0] = var->xres_virtual * stride;
+ win.pitch[1] = win.pitch[2] =
+ (stride == 1) ? (var->xres_virtual >> 1) : 0;
+ mmp_overlay_set_win(fbi->overlay, &win);
+}
+
static int mmpfb_set_par(struct fb_info *info)
{
struct mmpfb_info *fbi = info->par;
struct fb_var_screeninfo *var = &info->var;
struct mmp_addr addr;
- struct mmp_win win;
struct mmp_mode mode;
int ret;
@@ -409,11 +426,8 @@ static int mmpfb_set_par(struct fb_info *info)
fbmode_to_mmpmode(&mode, &fbi->mode, fbi->output_fmt);
mmp_path_set_mode(fbi->path, &mode);
- memset(&win, 0, sizeof(win));
- win.xsrc = win.xdst = fbi->mode.xres;
- win.ysrc = win.ydst = fbi->mode.yres;
- win.pix_fmt = fbi->pix_fmt;
- mmp_overlay_set_win(fbi->overlay, &win);
+ /* set window related info */
+ mmpfb_set_win(info);
/* set address always */
memset(&addr, 0, sizeof(addr));
@@ -427,16 +441,12 @@ static int mmpfb_set_par(struct fb_info *info)
static void mmpfb_power(struct mmpfb_info *fbi, int power)
{
struct mmp_addr addr;
- struct mmp_win win;
struct fb_var_screeninfo *var = &fbi->fb_info->var;
/* for power on, always set address/window again */
if (power) {
- memset(&win, 0, sizeof(win));
- win.xsrc = win.xdst = fbi->mode.xres;
- win.ysrc = win.ydst = fbi->mode.yres;
- win.pix_fmt = fbi->pix_fmt;
- mmp_overlay_set_win(fbi->overlay, &win);
+ /* set window related info */
+ mmpfb_set_win(fbi->fb_info);
/* set address always */
memset(&addr, 0, sizeof(addr));
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 6ac755270ab..8621a9f2bdc 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -53,15 +53,14 @@ static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
if (tmp & isr)
writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
- } while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
+ } while ((isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
return IRQ_HANDLED;
}
static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
{
- u32 link_config = path_to_path_plat(overlay->path)->link_config;
- u32 rbswap, uvswap = 0, yuvswap = 0,
+ u32 rbswap = 0, uvswap = 0, yuvswap = 0,
csc_en = 0, val = 0,
vid = overlay_is_vid(overlay);
@@ -71,27 +70,23 @@ static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
case PIXFMT_RGB888PACK:
case PIXFMT_RGB888UNPACK:
case PIXFMT_RGBA888:
- rbswap = !(link_config & 0x1);
+ rbswap = 1;
break;
case PIXFMT_VYUY:
case PIXFMT_YVU422P:
case PIXFMT_YVU420P:
- rbswap = link_config & 0x1;
uvswap = 1;
break;
case PIXFMT_YUYV:
- rbswap = link_config & 0x1;
yuvswap = 1;
break;
default:
- rbswap = link_config & 0x1;
break;
}
switch (pix_fmt) {
case PIXFMT_RGB565:
case PIXFMT_BGR565:
- val = 0;
break;
case PIXFMT_RGB1555:
case PIXFMT_BGR1555:
@@ -147,17 +142,27 @@ static void dmafetch_set_fmt(struct mmp_overlay *overlay)
static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
{
struct lcd_regs *regs = path_regs(overlay->path);
- u32 pitch;
/* assert win supported */
memcpy(&overlay->win, win, sizeof(struct mmp_win));
mutex_lock(&overlay->access_ok);
- pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
- writel_relaxed(pitch, &regs->g_pitch);
- writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
- writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
- writel_relaxed(0, &regs->g_start);
+
+ if (overlay_is_vid(overlay)) {
+ writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
+ writel_relaxed(win->pitch[2] << 16 |
+ win->pitch[1], &regs->v_pitch_uv);
+
+ writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
+ writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
+ writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
+ } else {
+ writel_relaxed(win->pitch[0], &regs->g_pitch);
+
+ writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
+ writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
+ writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
+ }
dmafetch_set_fmt(overlay);
mutex_unlock(&overlay->access_ok);
@@ -239,7 +244,13 @@ static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
/* FIXME: assert addr supported */
memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
- writel(addr->phys[0], &regs->g_0);
+
+ if (overlay_is_vid(overlay)) {
+ writel_relaxed(addr->phys[0], &regs->v_y0);
+ writel_relaxed(addr->phys[1], &regs->v_u0);
+ writel_relaxed(addr->phys[2], &regs->v_v0);
+ } else
+ writel_relaxed(addr->phys[0], &regs->g_0);
return overlay->addr.phys[0];
}
@@ -248,7 +259,8 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
{
struct lcd_regs *regs = path_regs(path);
u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
- link_config = path_to_path_plat(path)->link_config;
+ link_config = path_to_path_plat(path)->link_config,
+ dsi_rbswap = path_to_path_plat(path)->link_config;
/* FIXME: assert videomode supported */
memcpy(&path->mode, mode, sizeof(struct mmp_mode));
@@ -263,6 +275,12 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
tmp |= CFG_DUMB_ENA(1);
writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
+ /* interface rb_swap setting */
+ tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) &
+ (~(CFG_INTFRBSWAP_MASK));
+ tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
+ writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
+
writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
writel_relaxed((mode->left_margin << 16) | mode->right_margin,
&regs->screen_h_porch);
@@ -370,20 +388,12 @@ static void path_set_default(struct mmp_path *path)
* bus arbiter for faster read if not tv path;
* 2.enable horizontal smooth filter;
*/
- if (PATH_PN == path->id) {
- mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
- | CFG_ARBFAST_ENA(1);
- tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
- tmp |= mask;
- writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
- } else if (PATH_TV == path->id) {
- mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
- | CFG_ARBFAST_ENA(1);
- tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
- tmp &= ~mask;
- tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
- writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
- }
+ mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp |= mask;
+ if (PATH_TV == path->id)
+ tmp &= ~CFG_ARBFAST_ENA(1);
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
}
static int path_init(struct mmphw_path_plat *path_plat,
@@ -419,6 +429,7 @@ static int path_init(struct mmphw_path_plat *path_plat,
path_plat->path = path;
path_plat->path_config = config->path_config;
path_plat->link_config = config->link_config;
+ path_plat->dsi_rbswap = config->dsi_rbswap;
path_set_default(path);
kfree(path_info);
diff --git a/drivers/video/mmp/hw/mmp_ctrl.h b/drivers/video/mmp/hw/mmp_ctrl.h
index edd2002b0e9..53301cfdb1a 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/mmp/hw/mmp_ctrl.h
@@ -163,6 +163,8 @@ struct lcd_regs {
#define LCD_SCLK(path) ((PATH_PN == path->id) ? LCD_CFG_SCLK_DIV :\
((PATH_TV == path->id) ? LCD_TCLK_DIV : LCD_PN2_SCLK_DIV))
+#define intf_rbswap_ctrl(id) ((id) ? (((id) & 1) ? LCD_TVIF_CTRL : \
+ PN2_IOPAD_CONTROL) : LCD_TOP_CTRL)
/* dither configure */
#ifdef CONFIG_CPU_PXA988
@@ -615,6 +617,8 @@ struct lcd_regs {
#define LCD_SPU_DUMB_CTRL 0x01B8
#define CFG_DUMBMODE(mode) ((mode)<<28)
#define CFG_DUMBMODE_MASK 0xF0000000
+#define CFG_INTFRBSWAP(mode) ((mode)<<24)
+#define CFG_INTFRBSWAP_MASK 0x0F000000
#define CFG_LCDGPIO_O(data) ((data)<<20)
#define CFG_LCDGPIO_O_MASK 0x0FF00000
#define CFG_LCDGPIO_ENA(gpio) ((gpio)<<12)
@@ -1427,6 +1431,7 @@ struct mmphw_path_plat {
struct mmp_path *path;
u32 path_config;
u32 link_config;
+ u32 dsi_rbswap;
};
/* mmp ctrl describes mmp controller related info */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cfdb380ec81..804f874d32d 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1354,7 +1354,7 @@ static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
{
struct device *dev = mx3fb->dev;
- struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data;
+ struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
const char *name = mx3fb_pdata->name;
unsigned int irq;
struct fb_info *fbi;
@@ -1462,7 +1462,7 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
return false;
dev = rq->mx3fb->dev;
- mx3fb_pdata = dev->platform_data;
+ mx3fb_pdata = dev_get_platdata(dev);
return rq->id == chan->chan_id &&
mx3fb_pdata->dma_dev == chan->device->dev;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index c172a5281f9..44f99a60bb9 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2106,8 +2106,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err < 0)
goto err_reg_fb;
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
/*
* Our driver data
@@ -2148,12 +2147,6 @@ static void neofb_remove(struct pci_dev *dev)
fb_destroy_modedb(info->monspecs.modedb);
neo_unmap_mmio(info);
neo_free_fb_info(info);
-
- /*
- * Ensure that the driver data is no longer
- * valid.
- */
- pci_set_drvdata(dev, NULL);
}
}
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 796e5112cee..478f9808dee 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -91,7 +91,7 @@ static int nuc900fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
- struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
struct nuc900fb_display *display = NULL;
struct nuc900fb_display *default_display = mach_info->displays +
mach_info->default_display;
@@ -358,7 +358,7 @@ static inline void modify_gpio(void __iomem *reg,
static int nuc900fb_init_registers(struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
- struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
void __iomem *regs = fbi->io;
/*reset the display engine*/
@@ -512,7 +512,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
int size;
dev_dbg(&pdev->dev, "devinit\n");
- mach_info = pdev->dev.platform_data;
+ mach_info = dev_get_platdata(&pdev->dev);
if (mach_info == NULL) {
dev_err(&pdev->dev,
"no platform data for lcd, cannot attach\n");
@@ -647,8 +647,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
goto free_cpufreq;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- fbinfo->node, fbinfo->fix.id);
+ fb_info(fbinfo, "%s frame buffer device\n", fbinfo->fix.id);
return 0;
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index ed20a9871b3..81c80ac3c76 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -1300,7 +1300,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
break;
default:
break;
- };
+ }
NV_WR32(par->PGRAPH, 0x0b38, 0x2ffff800);
NV_WR32(par->PGRAPH, 0x0b3c, 0x00006000);
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 0c4f34311ed..9dbea222340 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -515,8 +515,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
if (register_framebuffer(info) < 0)
goto out_err;
- printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
- info->node, full_name);
+ fb_info(info, "Open Firmware frame buffer device on %s\n", full_name);
return;
out_err:
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index f349ee6f0ce..a4ee65b8f91 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -947,7 +947,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
hwa742.extif = fbdev->ext_if;
hwa742.int_ctrl = fbdev->int_ctrl;
- omapfb_conf = fbdev->dev->platform_data;
+ omapfb_conf = dev_get_platdata(fbdev->dev);
hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck");
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index d40612c31a9..e4fc6d9b537 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -1602,7 +1602,7 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
char name[17];
int i;
- conf = fbdev->dev->platform_data;
+ conf = dev_get_platdata(fbdev->dev);
fbdev->ctrl = NULL;
@@ -1674,7 +1674,7 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
- if (pdev->dev.platform_data == NULL) {
+ if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
r = -ENOENT;
goto cleanup;
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 10b25e7cd87..e6cfc38160d 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -57,6 +57,12 @@ config DISPLAY_PANEL_SHARP_LS037V7DW01
help
LCD Panel used in TI's SDP3430 and EVM boards
+config DISPLAY_PANEL_TPO_TD028TTEC1
+ tristate "TPO TD028TTEC1 LCD Panel"
+ depends on SPI
+ help
+ LCD panel used in Openmoko.
+
config DISPLAY_PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
depends on SPI
diff --git a/drivers/video/omap2/displays-new/Makefile b/drivers/video/omap2/displays-new/Makefile
index 5aeb11b8fcd..0323a8a1c68 100644
--- a/drivers/video/omap2/displays-new/Makefile
+++ b/drivers/video/omap2/displays-new/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index 63d88ee6dfe..b6c50904038 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -262,6 +262,9 @@ static int dvic_probe_pdata(struct platform_device *pdev)
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
dev_err(&pdev->dev, "Failed to find video source\n");
return -EPROBE_DEFER;
}
@@ -313,6 +316,10 @@ static int dvic_probe(struct platform_device *pdev)
err_reg:
omap_dss_put_device(ddata->in);
+
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
return r;
}
diff --git a/drivers/video/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
index 798ef200b05..d5c936cb217 100644
--- a/drivers/video/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
@@ -69,7 +69,7 @@ static int tpd_connect(struct omap_dss_device *dssdev,
dst->src = dssdev;
dssdev->dst = dst;
- INIT_COMPLETION(ddata->hpd_completion);
+ reinit_completion(&ddata->hpd_completion);
gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
/* DC-DC converter needs at max 300us to get to 90% of 5V */
diff --git a/drivers/video/omap2/displays-new/panel-dsi-cm.c b/drivers/video/omap2/displays-new/panel-dsi-cm.c
index aaaea6469cd..b7baafe83aa 100644
--- a/drivers/video/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/omap2/displays-new/panel-dsi-cm.c
@@ -599,7 +599,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r) {
dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n");
goto err0;
- };
+ }
r = in->ops.dsi->set_config(in, &dsi_config);
if (r) {
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
new file mode 100644
index 00000000000..9a08908fe99
--- /dev/null
+++ b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -0,0 +1,480 @@
+/*
+ * Toppoly TD028TTEC1 panel support
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Neo 1973 code (jbt6k74.c):
+ * Copyright (C) 2006-2007 by OpenMoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ *
+ * Ported and adapted from Neo 1973 U-Boot by:
+ * H. Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int data_lines;
+
+ struct omap_video_timings videomode;
+
+ struct spi_device *spi_dev;
+};
+
+static struct omap_video_timings td028ttec1_panel_timings = {
+ .x_res = 480,
+ .y_res = 640,
+ .pixel_clock = 22153,
+ .hfp = 24,
+ .hsw = 8,
+ .hbp = 8,
+ .vfp = 4,
+ .vsw = 2,
+ .vbp = 2,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+#define JBT_COMMAND 0x000
+#define JBT_DATA 0x100
+
+static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg)
+{
+ int rc;
+ u16 tx_buf = JBT_COMMAND | reg;
+
+ rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf,
+ 1*sizeof(u16));
+ if (rc != 0)
+ dev_err(&ddata->spi_dev->dev,
+ "jbt_ret_write_0 spi_write ret %d\n", rc);
+
+ return rc;
+}
+
+static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data)
+{
+ int rc;
+ u16 tx_buf[2];
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | data;
+ rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
+ 2*sizeof(u16));
+ if (rc != 0)
+ dev_err(&ddata->spi_dev->dev,
+ "jbt_reg_write_1 spi_write ret %d\n", rc);
+
+ return rc;
+}
+
+static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data)
+{
+ int rc;
+ u16 tx_buf[3];
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | (data >> 8);
+ tx_buf[2] = JBT_DATA | (data & 0xff);
+
+ rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
+ 3*sizeof(u16));
+
+ if (rc != 0)
+ dev_err(&ddata->spi_dev->dev,
+ "jbt_reg_write_2 spi_write ret %d\n", rc);
+
+ return rc;
+}
+
+enum jbt_register {
+ JBT_REG_SLEEP_IN = 0x10,
+ JBT_REG_SLEEP_OUT = 0x11,
+
+ JBT_REG_DISPLAY_OFF = 0x28,
+ JBT_REG_DISPLAY_ON = 0x29,
+
+ JBT_REG_RGB_FORMAT = 0x3a,
+ JBT_REG_QUAD_RATE = 0x3b,
+
+ JBT_REG_POWER_ON_OFF = 0xb0,
+ JBT_REG_BOOSTER_OP = 0xb1,
+ JBT_REG_BOOSTER_MODE = 0xb2,
+ JBT_REG_BOOSTER_FREQ = 0xb3,
+ JBT_REG_OPAMP_SYSCLK = 0xb4,
+ JBT_REG_VSC_VOLTAGE = 0xb5,
+ JBT_REG_VCOM_VOLTAGE = 0xb6,
+ JBT_REG_EXT_DISPL = 0xb7,
+ JBT_REG_OUTPUT_CONTROL = 0xb8,
+ JBT_REG_DCCLK_DCEV = 0xb9,
+ JBT_REG_DISPLAY_MODE1 = 0xba,
+ JBT_REG_DISPLAY_MODE2 = 0xbb,
+ JBT_REG_DISPLAY_MODE = 0xbc,
+ JBT_REG_ASW_SLEW = 0xbd,
+ JBT_REG_DUMMY_DISPLAY = 0xbe,
+ JBT_REG_DRIVE_SYSTEM = 0xbf,
+
+ JBT_REG_SLEEP_OUT_FR_A = 0xc0,
+ JBT_REG_SLEEP_OUT_FR_B = 0xc1,
+ JBT_REG_SLEEP_OUT_FR_C = 0xc2,
+ JBT_REG_SLEEP_IN_LCCNT_D = 0xc3,
+ JBT_REG_SLEEP_IN_LCCNT_E = 0xc4,
+ JBT_REG_SLEEP_IN_LCCNT_F = 0xc5,
+ JBT_REG_SLEEP_IN_LCCNT_G = 0xc6,
+
+ JBT_REG_GAMMA1_FINE_1 = 0xc7,
+ JBT_REG_GAMMA1_FINE_2 = 0xc8,
+ JBT_REG_GAMMA1_INCLINATION = 0xc9,
+ JBT_REG_GAMMA1_BLUE_OFFSET = 0xca,
+
+ JBT_REG_BLANK_CONTROL = 0xcf,
+ JBT_REG_BLANK_TH_TV = 0xd0,
+ JBT_REG_CKV_ON_OFF = 0xd1,
+ JBT_REG_CKV_1_2 = 0xd2,
+ JBT_REG_OEV_TIMING = 0xd3,
+ JBT_REG_ASW_TIMING_1 = 0xd4,
+ JBT_REG_ASW_TIMING_2 = 0xd5,
+
+ JBT_REG_HCLOCK_VGA = 0xec,
+ JBT_REG_HCLOCK_QVGA = 0xed,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n",
+ dssdev->state);
+
+ /* three times command zero */
+ r |= jbt_ret_write_0(ddata, 0x00);
+ usleep_range(1000, 2000);
+ r |= jbt_ret_write_0(ddata, 0x00);
+ usleep_range(1000, 2000);
+ r |= jbt_ret_write_0(ddata, 0x00);
+ usleep_range(1000, 2000);
+
+ if (r) {
+ dev_warn(dssdev->dev, "transfer error\n");
+ goto transfer_err;
+ }
+
+ /* deep standby out */
+ r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17);
+
+ /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
+ r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80);
+
+ /* Quad mode off */
+ r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00);
+
+ /* AVDD on, XVDD on */
+ r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16);
+
+ /* Output control */
+ r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9);
+
+ /* Sleep mode off */
+ r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT);
+
+ /* at this point we have like 50% grey */
+
+ /* initialize register set */
+ r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00);
+ r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02);
+ r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b);
+ r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40);
+ r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04);
+ /*
+ * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
+ * to avoid red / blue flicker
+ */
+ r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00);
+
+ r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0);
+
+ r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533);
+ r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00);
+ r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00);
+ r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00);
+
+ r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02);
+ r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804);
+
+ r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01);
+ r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000);
+
+ r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e);
+ r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4);
+ r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e);
+
+ r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+transfer_err:
+
+ return r ? -EIO : 0;
+}
+
+static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
+
+ jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF);
+ jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
+ jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
+ jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver td028ttec1_ops = {
+ .connect = td028ttec1_panel_connect,
+ .disconnect = td028ttec1_panel_disconnect,
+
+ .enable = td028ttec1_panel_enable,
+ .disable = td028ttec1_panel_disable,
+
+ .set_timings = td028ttec1_panel_set_timings,
+ .get_timings = td028ttec1_panel_get_timings,
+ .check_timings = td028ttec1_panel_check_timings,
+};
+
+static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
+{
+ const struct panel_tpo_td028ttec1_platform_data *pdata;
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&spi->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&spi->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int td028ttec1_panel_probe(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->bits_per_word = 9;
+ spi->mode = SPI_MODE_3;
+
+ r = spi_setup(spi);
+ if (r < 0) {
+ dev_err(&spi->dev, "spi_setup failed: %d\n", r);
+ return r;
+ }
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ddata);
+
+ ddata->spi_dev = spi;
+
+ if (dev_get_platdata(&spi->dev)) {
+ r = td028ttec1_panel_probe_pdata(spi);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->videomode = td028ttec1_panel_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &spi->dev;
+ dssdev->driver = &td028ttec1_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+ dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&spi->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int td028ttec1_panel_remove(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
+
+ omapdss_unregister_display(dssdev);
+
+ td028ttec1_panel_disable(dssdev);
+ td028ttec1_panel_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct spi_driver td028ttec1_spi_driver = {
+ .probe = td028ttec1_panel_probe,
+ .remove = td028ttec1_panel_remove,
+
+ .driver = {
+ .name = "panel-tpo-td028ttec1",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(td028ttec1_spi_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 94832eb06a3..d3aa91bdd6a 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -10,5 +10,6 @@ omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o ti_hdmi_4xxx_ip.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi_common.o hdmi_wp.o hdmi_pll.o \
+ hdmi_phy.o hdmi4_core.o
ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 60d3958d04f..ffa45c894cd 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -266,7 +266,7 @@ static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
venc_init_platform_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi_init_platform_driver,
+ hdmi4_init_platform_driver,
#endif
};
@@ -287,7 +287,7 @@ static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
venc_uninit_platform_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi_uninit_platform_driver,
+ hdmi4_uninit_platform_driver,
#endif
};
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 477975009ee..4ec59ca72e5 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2352,7 +2352,7 @@ int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
{
enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
bool five_taps = true;
- bool fieldmode = 0;
+ bool fieldmode = false;
u16 in_height = oi->height;
u16 in_width = oi->width;
bool ilace = timings->interlace;
@@ -2365,7 +2365,7 @@ int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
out_height = oi->out_height == 0 ? oi->height : oi->out_height;
if (ilace && oi->height == out_height)
- fieldmode = 1;
+ fieldmode = true;
if (ilace) {
if (fieldmode)
@@ -2396,7 +2396,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
bool mem_to_mem)
{
bool five_taps = true;
- bool fieldmode = 0;
+ bool fieldmode = false;
int r, cconv = 0;
unsigned offset0, offset1;
s32 row_inc;
@@ -2417,7 +2417,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
out_height = out_height == 0 ? height : out_height;
if (ilace && height == out_height)
- fieldmode = 1;
+ fieldmode = true;
if (ilace) {
if (fieldmode)
@@ -2918,7 +2918,7 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
break;
default:
BUG();
- };
+ }
l = dispc_read_reg(DISPC_POL_FREQ(channel));
l |= FLD_VAL(onoff, 17, 17);
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index fafe7c941a6..669a81fdf58 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -266,7 +266,7 @@ void videomode_to_omap_video_timings(const struct videomode *vm,
OMAPDSS_SIG_ACTIVE_LOW;
ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_HIGH;
+ OMAPDSS_SIG_ACTIVE_LOW;
ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
OMAPDSS_DRIVE_SIG_RISING_EDGE :
OMAPDSS_DRIVE_SIG_FALLING_EDGE;
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index a598b581228..6056b27cf73 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -312,7 +312,7 @@ struct dsi_data {
struct dsi_isr_tables isr_tables_copy;
int update_channel;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
unsigned update_bytes;
#endif
@@ -334,7 +334,7 @@ struct dsi_data {
u32 errors;
spinlock_t errors_lock;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
ktime_t perf_setup_time;
ktime_t perf_start_time;
#endif
@@ -373,7 +373,7 @@ struct dsi_packet_sent_handler_data {
struct completion *completion;
};
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
@@ -497,7 +497,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
}
}
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4066,7 +4066,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
default:
r = -EINVAL;
goto err_pix_fmt;
- };
+ }
dsi_if_enable(dsidev, false);
dsi_vc_enable(dsidev, channel, false);
@@ -4277,7 +4277,7 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dw = dsi->timings.x_res;
dh = dsi->timings.y_res;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index e172531d196..f538e867c0f 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -427,8 +427,8 @@ int venc_init_platform_driver(void) __init;
void venc_uninit_platform_driver(void) __exit;
/* HDMI */
-int hdmi_init_platform_driver(void) __init;
-void hdmi_uninit_platform_driver(void) __exit;
+int hdmi4_init_platform_driver(void) __init;
+void hdmi4_uninit_platform_driver(void) __exit;
/* RFBI */
int rfbi_init_platform_driver(void) __init;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index b9cfebb378a..f8fd6dbacab 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -789,50 +789,6 @@ static const struct omap_dss_features omap5_dss_features = {
.burst_size_unit = 16,
};
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-/* HDMI OMAP4 Functions*/
-static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
-
- .video_configure = ti_hdmi_4xxx_basic_configure,
- .phy_enable = ti_hdmi_4xxx_phy_enable,
- .phy_disable = ti_hdmi_4xxx_phy_disable,
- .read_edid = ti_hdmi_4xxx_read_edid,
- .pll_enable = ti_hdmi_4xxx_pll_enable,
- .pll_disable = ti_hdmi_4xxx_pll_disable,
- .video_enable = ti_hdmi_4xxx_wp_video_start,
- .video_disable = ti_hdmi_4xxx_wp_video_stop,
- .dump_wrapper = ti_hdmi_4xxx_wp_dump,
- .dump_core = ti_hdmi_4xxx_core_dump,
- .dump_pll = ti_hdmi_4xxx_pll_dump,
- .dump_phy = ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
- .audio_enable = ti_hdmi_4xxx_wp_audio_enable,
- .audio_disable = ti_hdmi_4xxx_wp_audio_disable,
- .audio_start = ti_hdmi_4xxx_audio_start,
- .audio_stop = ti_hdmi_4xxx_audio_stop,
- .audio_config = ti_hdmi_4xxx_audio_config,
- .audio_get_dma_port = ti_hdmi_4xxx_audio_get_dma_port,
-#endif
-
-};
-
-void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data,
- enum omapdss_version version)
-{
- switch (version) {
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- ip_data->ops = &omap4_hdmi_functions;
- break;
- default:
- ip_data->ops = NULL;
- }
-
- WARN_ON(ip_data->ops == NULL);
-}
-#endif
-
/* Functions returning values related to a DSS feature */
int dss_feat_get_num_mgrs(void)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 489b9bec4a6..10b0556e135 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,10 +20,6 @@
#ifndef __OMAP2_DSS_FEATURES_H
#define __OMAP2_DSS_FEATURES_H
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-#include "ti_hdmi.h"
-#endif
-
#define MAX_DSS_MANAGERS 4
#define MAX_DSS_OVERLAYS 4
#define MAX_DSS_LCD_MANAGERS 3
@@ -117,8 +113,4 @@ bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(enum omapdss_version version);
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data,
- enum omapdss_version version);
-#endif
#endif
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
deleted file mode 100644
index 82a96407499..00000000000
--- a/drivers/video/omap2/dss/hdmi.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * hdmi.c
- *
- * HDMI interface DSS driver setting for TI's OMAP4 family of processor.
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Yong Zhi
- * Mythri pk <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "HDMI"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
-
-#include "ti_hdmi.h"
-#include "dss.h"
-#include "dss_features.h"
-
-#define HDMI_WP 0x0
-#define HDMI_CORE_SYS 0x400
-#define HDMI_CORE_AV 0x900
-#define HDMI_PLLCTRL 0x200
-#define HDMI_PHY 0x300
-
-/* HDMI EDID Length move this */
-#define HDMI_EDID_MAX_LENGTH 256
-#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
-#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
-#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
-#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
-#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
-
-#define HDMI_DEFAULT_REGN 16
-#define HDMI_DEFAULT_REGM2 1
-
-static struct {
- struct mutex lock;
- struct platform_device *pdev;
-
- struct hdmi_ip_data ip_data;
-
- struct clk *sys_clk;
- struct regulator *vdda_hdmi_dac_reg;
-
- bool core_enabled;
-
- struct omap_dss_device output;
-} hdmi;
-
-/*
- * Logic for the below structure :
- * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
- * There is a correspondence between CEA/VESA timing and code, please
- * refer to section 6.3 in HDMI 1.3 specification for timing code.
- *
- * In the below structure, cea_vesa_timings corresponds to all OMAP4
- * supported CEA and VESA timing values.code_cea corresponds to the CEA
- * code, It is used to get the timing from cea_vesa_timing array.Similarly
- * with code_vesa. Code_index is used for back mapping, that is once EDID
- * is read from the TV, EDID is parsed to find the timing values and then
- * map it to corresponding CEA or VESA index.
- */
-
-static const struct hdmi_config cea_timings[] = {
- {
- { 640, 480, 25200, 96, 16, 48, 2, 10, 33,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 1, HDMI_HDMI },
- },
- {
- { 720, 480, 27027, 62, 16, 60, 6, 9, 30,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 2, HDMI_HDMI },
- },
- {
- { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 4, HDMI_HDMI },
- },
- {
- { 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- true, },
- { 5, HDMI_HDMI },
- },
- {
- { 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- true, },
- { 6, HDMI_HDMI },
- },
- {
- { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 16, HDMI_HDMI },
- },
- {
- { 720, 576, 27000, 64, 12, 68, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 17, HDMI_HDMI },
- },
- {
- { 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 19, HDMI_HDMI },
- },
- {
- { 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- true, },
- { 20, HDMI_HDMI },
- },
- {
- { 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- true, },
- { 21, HDMI_HDMI },
- },
- {
- { 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 29, HDMI_HDMI },
- },
- {
- { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 31, HDMI_HDMI },
- },
- {
- { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 32, HDMI_HDMI },
- },
- {
- { 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 35, HDMI_HDMI },
- },
- {
- { 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 37, HDMI_HDMI },
- },
-};
-
-static const struct hdmi_config vesa_timings[] = {
-/* VESA From Here */
- {
- { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 4, HDMI_DVI },
- },
- {
- { 800, 600, 40000, 128, 40, 88, 4, 1, 23,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 9, HDMI_DVI },
- },
- {
- { 848, 480, 33750, 112, 16, 112, 8, 6, 23,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0xE, HDMI_DVI },
- },
- {
- { 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x17, HDMI_DVI },
- },
- {
- { 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x1C, HDMI_DVI },
- },
- {
- { 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x27, HDMI_DVI },
- },
- {
- { 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x20, HDMI_DVI },
- },
- {
- { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x23, HDMI_DVI },
- },
- {
- { 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x10, HDMI_DVI },
- },
- {
- { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x2A, HDMI_DVI },
- },
- {
- { 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x2F, HDMI_DVI },
- },
- {
- { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x3A, HDMI_DVI },
- },
- {
- { 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x51, HDMI_DVI },
- },
- {
- { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x52, HDMI_DVI },
- },
- {
- { 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x16, HDMI_DVI },
- },
- {
- { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x29, HDMI_DVI },
- },
- {
- { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x39, HDMI_DVI },
- },
- {
- { 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x1B, HDMI_DVI },
- },
- {
- { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x55, HDMI_DVI },
- },
- {
- { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x44, HDMI_DVI },
- },
-};
-
-static int hdmi_runtime_get(void)
-{
- int r;
-
- DSSDBG("hdmi_runtime_get\n");
-
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static void hdmi_runtime_put(void)
-{
- int r;
-
- DSSDBG("hdmi_runtime_put\n");
-
- r = pm_runtime_put_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0 && r != -ENOSYS);
-}
-
-static int hdmi_init_regulator(void)
-{
- struct regulator *reg;
-
- if (hdmi.vdda_hdmi_dac_reg != NULL)
- return 0;
-
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
-
- /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
- if (IS_ERR(reg))
- reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
-
- if (IS_ERR(reg)) {
- DSSERR("can't get VDDA_HDMI_DAC regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi.vdda_hdmi_dac_reg = reg;
-
- return 0;
-}
-
-static const struct hdmi_config *hdmi_find_timing(
- const struct hdmi_config *timings_arr,
- int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (timings_arr[i].cm.code == hdmi.ip_data.cfg.cm.code)
- return &timings_arr[i];
- }
- return NULL;
-}
-
-static const struct hdmi_config *hdmi_get_timings(void)
-{
- const struct hdmi_config *arr;
- int len;
-
- if (hdmi.ip_data.cfg.cm.mode == HDMI_DVI) {
- arr = vesa_timings;
- len = ARRAY_SIZE(vesa_timings);
- } else {
- arr = cea_timings;
- len = ARRAY_SIZE(cea_timings);
- }
-
- return hdmi_find_timing(arr, len);
-}
-
-static bool hdmi_timings_compare(struct omap_video_timings *timing1,
- const struct omap_video_timings *timing2)
-{
- int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
-
- if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
- DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
- (timing2->x_res == timing1->x_res) &&
- (timing2->y_res == timing1->y_res)) {
-
- timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
- timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
- timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
- timing1_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
-
- DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
- "timing2_hsync = %d timing2_vsync = %d\n",
- timing1_hsync, timing1_vsync,
- timing2_hsync, timing2_vsync);
-
- if ((timing1_hsync == timing2_hsync) &&
- (timing1_vsync == timing2_vsync)) {
- return true;
- }
- }
- return false;
-}
-
-static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
-{
- int i;
- struct hdmi_cm cm = {-1};
- DSSDBG("hdmi_get_code\n");
-
- for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
- if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
- cm = cea_timings[i].cm;
- goto end;
- }
- }
- for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
- if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
- cm = vesa_timings[i].cm;
- goto end;
- }
- }
-
-end: return cm;
-
-}
-
-static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
- struct hdmi_pll_info *pi)
-{
- unsigned long clkin, refclk;
- u32 mf;
-
- clkin = clk_get_rate(hdmi.sys_clk) / 10000;
- /*
- * Input clock is predivided by N + 1
- * out put of which is reference clk
- */
-
- pi->regn = HDMI_DEFAULT_REGN;
-
- refclk = clkin / pi->regn;
-
- pi->regm2 = HDMI_DEFAULT_REGM2;
-
- /*
- * multiplier is pixel_clk/ref_clk
- * Multiplying by 100 to avoid fractional part removal
- */
- pi->regm = phy * pi->regm2 / refclk;
-
- /*
- * fractional multiplier is remainder of the difference between
- * multiplier and actual phy(required pixel clock thus should be
- * multiplied by 2^18(262144) divided by the reference clock
- */
- mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
- pi->regmf = pi->regm2 * mf / refclk;
-
- /*
- * Dcofreq should be set to 1 if required pixel clock
- * is greater than 1000MHz
- */
- pi->dcofreq = phy > 1000 * 100;
- pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
-
- /* Set the reference clock to sysclk reference */
- pi->refsel = HDMI_REFSEL_SYSCLK;
-
- DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
- DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
-}
-
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
-{
- int r;
-
- r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
- if (r)
- return r;
-
- r = hdmi_runtime_get();
- if (r)
- goto err_runtime_get;
-
- /* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
-
- hdmi.core_enabled = true;
-
- return 0;
-
-err_runtime_get:
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
-
- return r;
-}
-
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
-{
- hdmi.core_enabled = false;
-
- hdmi_runtime_put();
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
-}
-
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
-{
- int r;
- struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = hdmi.output.manager;
- unsigned long phy;
-
- r = hdmi_power_on_core(dssdev);
- if (r)
- return r;
-
- dss_mgr_disable(mgr);
-
- p = &hdmi.ip_data.cfg.timings;
-
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
-
- phy = p->pixel_clock;
-
- hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
-
- hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-
- /* config the PLL and PHY hdmi_set_pll_pwrfirst */
- r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
- if (r) {
- DSSDBG("Failed to lock PLL\n");
- goto err_pll_enable;
- }
-
- r = hdmi.ip_data.ops->phy_enable(&hdmi.ip_data);
- if (r) {
- DSSDBG("Failed to start PHY\n");
- goto err_phy_enable;
- }
-
- hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
-
- /* bypass TV gamma table */
- dispc_enable_gamma_table(0);
-
- /* tv size */
- dss_mgr_set_timings(mgr, p);
-
- r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
- if (r)
- goto err_vid_enable;
-
- r = dss_mgr_enable(mgr);
- if (r)
- goto err_mgr_enable;
-
- return 0;
-
-err_mgr_enable:
- hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-err_vid_enable:
- hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
-err_phy_enable:
- hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
-err_pll_enable:
- hdmi_power_off_core(dssdev);
- return -EIO;
-}
-
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
-{
- struct omap_overlay_manager *mgr = hdmi.output.manager;
-
- dss_mgr_disable(mgr);
-
- hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
- hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
- hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
-
- hdmi_power_off_core(dssdev);
-}
-
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct hdmi_cm cm;
-
- cm = hdmi_get_code(timings);
- if (cm.code == -1) {
- return -EINVAL;
- }
-
- return 0;
-
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct hdmi_cm cm;
- const struct hdmi_config *t;
-
- mutex_lock(&hdmi.lock);
-
- cm = hdmi_get_code(timings);
- hdmi.ip_data.cfg.cm = cm;
-
- t = hdmi_get_timings();
- if (t != NULL) {
- hdmi.ip_data.cfg = *t;
-
- dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
- }
-
- mutex_unlock(&hdmi.lock);
-}
-
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- const struct hdmi_config *cfg;
-
- cfg = hdmi_get_timings();
- if (cfg == NULL)
- cfg = &vesa_timings[0];
-
- memcpy(timings, &cfg->timings, sizeof(cfg->timings));
-}
-
-static void hdmi_dump_regs(struct seq_file *s)
-{
- mutex_lock(&hdmi.lock);
-
- if (hdmi_runtime_get()) {
- mutex_unlock(&hdmi.lock);
- return;
- }
-
- hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
- hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
- hdmi.ip_data.ops->dump_phy(&hdmi.ip_data, s);
- hdmi.ip_data.ops->dump_core(&hdmi.ip_data, s);
-
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
-}
-
-static int read_edid(u8 *buf, int len)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_runtime_get();
- BUG_ON(r);
-
- r = hdmi.ip_data.ops->read_edid(&hdmi.ip_data, buf, len);
-
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out = &hdmi.output;
- int r = 0;
-
- DSSDBG("ENTER hdmi_display_enable\n");
-
- mutex_lock(&hdmi.lock);
-
- if (out == NULL || out->manager == NULL) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
- r = hdmi_power_on_full(dssdev);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
-{
- DSSDBG("Enter hdmi_display_disable\n");
-
- mutex_lock(&hdmi.lock);
-
- hdmi_power_off_full(dssdev);
-
- mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_power_on_core(dssdev);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
-{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
- mutex_lock(&hdmi.lock);
-
- hdmi_power_off_core(dssdev);
-
- mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_get_clocks(struct platform_device *pdev)
-{
- struct clk *clk;
-
- clk = devm_clk_get(&pdev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
-
- hdmi.sys_clk = clk;
-
- return 0;
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
-{
- u32 deep_color;
- bool deep_color_correct = false;
- u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
-
- if (n == NULL || cts == NULL)
- return -EINVAL;
-
- /* TODO: When implemented, query deep color mode here. */
- deep_color = 100;
-
- /*
- * When using deep color, the default N value (as in the HDMI
- * specification) yields to an non-integer CTS. Hence, we
- * modify it while keeping the restrictions described in
- * section 7.2.1 of the HDMI 1.4a specification.
- */
- switch (sample_freq) {
- case 32000:
- case 48000:
- case 96000:
- case 192000:
- if (deep_color == 125)
- if (pclk == 27027 || pclk == 74250)
- deep_color_correct = true;
- if (deep_color == 150)
- if (pclk == 27027)
- deep_color_correct = true;
- break;
- case 44100:
- case 88200:
- case 176400:
- if (deep_color == 125)
- if (pclk == 27027)
- deep_color_correct = true;
- break;
- default:
- return -EINVAL;
- }
-
- if (deep_color_correct) {
- switch (sample_freq) {
- case 32000:
- *n = 8192;
- break;
- case 44100:
- *n = 12544;
- break;
- case 48000:
- *n = 8192;
- break;
- case 88200:
- *n = 25088;
- break;
- case 96000:
- *n = 16384;
- break;
- case 176400:
- *n = 50176;
- break;
- case 192000:
- *n = 32768;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (sample_freq) {
- case 32000:
- *n = 4096;
- break;
- case 44100:
- *n = 6272;
- break;
- case 48000:
- *n = 6144;
- break;
- case 88200:
- *n = 12544;
- break;
- case 96000:
- *n = 12288;
- break;
- case 176400:
- *n = 25088;
- break;
- case 192000:
- *n = 24576;
- break;
- default:
- return -EINVAL;
- }
- }
- /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
-
- return 0;
-}
-
-static bool hdmi_mode_has_audio(void)
-{
- if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
- return true;
- else
- return false;
-}
-
-#endif
-
-static int hdmi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
-{
- struct omap_overlay_manager *mgr;
- int r;
-
- dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
-
- r = hdmi_init_regulator();
- if (r)
- return r;
-
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
- if (r)
- return r;
-
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(mgr, dssdev);
- return r;
- }
-
- return 0;
-}
-
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
-{
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
-}
-
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- bool need_enable;
- int r;
-
- need_enable = hdmi.core_enabled == false;
-
- if (need_enable) {
- r = hdmi_core_enable(dssdev);
- if (r)
- return r;
- }
-
- r = read_edid(edid, len);
-
- if (need_enable)
- hdmi_core_disable(dssdev);
-
- return r;
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio()) {
- r = -EPERM;
- goto err;
- }
-
-
- r = hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
- hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
- hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- bool r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_mode_has_audio();
-
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio()) {
- r = -EPERM;
- goto err;
- }
-
- r = hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-#else
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- return false;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- return -EPERM;
-}
-#endif
-
-static const struct omapdss_hdmi_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
-
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
-
- .check_timings = hdmi_display_check_timing,
- .set_timings = hdmi_display_set_timing,
- .get_timings = hdmi_display_get_timings,
-
- .read_edid = hdmi_read_edid,
-
- .audio_enable = hdmi_audio_enable,
- .audio_disable = hdmi_audio_disable,
- .audio_start = hdmi_audio_start,
- .audio_stop = hdmi_audio_stop,
- .audio_supported = hdmi_audio_supported,
- .audio_config = hdmi_audio_config,
-};
-
-static void hdmi_init_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &hdmi.output;
-
- out->dev = &pdev->dev;
- out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
- out->name = "hdmi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.hdmi = &hdmi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void __exit hdmi_uninit_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &hdmi.output;
-
- omapdss_unregister_output(out);
-}
-
-/* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int r;
-
- hdmi.pdev = pdev;
-
- mutex_init(&hdmi.lock);
- mutex_init(&hdmi.ip_data.lock);
-
- res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
-
- /* Base address taken from platform */
- hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hdmi.ip_data.base_wp))
- return PTR_ERR(hdmi.ip_data.base_wp);
-
- hdmi.ip_data.irq = platform_get_irq(pdev, 0);
- if (hdmi.ip_data.irq < 0) {
- DSSERR("platform_get_irq failed\n");
- return -ENODEV;
- }
-
- r = hdmi_get_clocks(pdev);
- if (r) {
- DSSERR("can't get clocks\n");
- return r;
- }
-
- pm_runtime_enable(&pdev->dev);
-
- hdmi.ip_data.core_sys_offset = HDMI_CORE_SYS;
- hdmi.ip_data.core_av_offset = HDMI_CORE_AV;
- hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
- hdmi.ip_data.phy_offset = HDMI_PHY;
-
- hdmi_init_output(pdev);
-
- dss_debugfs_create_file("hdmi", hdmi_dump_regs);
-
- return 0;
-}
-
-static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
-{
- hdmi_uninit_output(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
- clk_disable_unprepare(hdmi.sys_clk);
-
- dispc_runtime_put();
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- int r;
-
- r = dispc_runtime_get();
- if (r < 0)
- return r;
-
- clk_prepare_enable(hdmi.sys_clk);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
-static struct platform_driver omapdss_hdmihw_driver = {
- .probe = omapdss_hdmihw_probe,
- .remove = __exit_p(omapdss_hdmihw_remove),
- .driver = {
- .name = "omapdss_hdmi",
- .owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
- },
-};
-
-int __init hdmi_init_platform_driver(void)
-{
- return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void __exit hdmi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
new file mode 100644
index 00000000000..b0493768a5d
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -0,0 +1,444 @@
+/*
+ * HDMI driver definition for TI OMAP4 Processor.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDMI_H
+#define _HDMI_H
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+
+/* HDMI Wrapper */
+
+#define HDMI_WP_REVISION 0x0
+#define HDMI_WP_SYSCONFIG 0x10
+#define HDMI_WP_IRQSTATUS_RAW 0x24
+#define HDMI_WP_IRQSTATUS 0x28
+#define HDMI_WP_IRQENABLE_SET 0x2C
+#define HDMI_WP_IRQENABLE_CLR 0x30
+#define HDMI_WP_IRQWAKEEN 0x34
+#define HDMI_WP_PWR_CTRL 0x40
+#define HDMI_WP_DEBOUNCE 0x44
+#define HDMI_WP_VIDEO_CFG 0x50
+#define HDMI_WP_VIDEO_SIZE 0x60
+#define HDMI_WP_VIDEO_TIMING_H 0x68
+#define HDMI_WP_VIDEO_TIMING_V 0x6C
+#define HDMI_WP_WP_CLK 0x70
+#define HDMI_WP_AUDIO_CFG 0x80
+#define HDMI_WP_AUDIO_CFG2 0x84
+#define HDMI_WP_AUDIO_CTRL 0x88
+#define HDMI_WP_AUDIO_DATA 0x8C
+
+/* HDMI WP IRQ flags */
+
+#define HDMI_IRQ_OCP_TIMEOUT (1 << 4)
+#define HDMI_IRQ_AUDIO_FIFO_UNDERFLOW (1 << 8)
+#define HDMI_IRQ_AUDIO_FIFO_OVERFLOW (1 << 9)
+#define HDMI_IRQ_AUDIO_FIFO_SAMPLE_REQ (1 << 10)
+#define HDMI_IRQ_VIDEO_VSYNC (1 << 16)
+#define HDMI_IRQ_VIDEO_FRAME_DONE (1 << 17)
+#define HDMI_IRQ_PHY_LINE5V_ASSERT (1 << 24)
+#define HDMI_IRQ_LINK_CONNECT (1 << 25)
+#define HDMI_IRQ_LINK_DISCONNECT (1 << 26)
+#define HDMI_IRQ_PLL_LOCK (1 << 29)
+#define HDMI_IRQ_PLL_UNLOCK (1 << 30)
+#define HDMI_IRQ_PLL_RECAL (1 << 31)
+
+/* HDMI PLL */
+
+#define PLLCTRL_PLL_CONTROL 0x0
+#define PLLCTRL_PLL_STATUS 0x4
+#define PLLCTRL_PLL_GO 0x8
+#define PLLCTRL_CFG1 0xC
+#define PLLCTRL_CFG2 0x10
+#define PLLCTRL_CFG3 0x14
+#define PLLCTRL_SSC_CFG1 0x18
+#define PLLCTRL_SSC_CFG2 0x1C
+#define PLLCTRL_CFG4 0x20
+
+/* HDMI PHY */
+
+#define HDMI_TXPHY_TX_CTRL 0x0
+#define HDMI_TXPHY_DIGITAL_CTRL 0x4
+#define HDMI_TXPHY_POWER_CTRL 0x8
+#define HDMI_TXPHY_PAD_CFG_CTRL 0xC
+
+enum hdmi_pll_pwr {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_phy_pwr {
+ HDMI_PHYPWRCMD_OFF = 0,
+ HDMI_PHYPWRCMD_LDOON = 1,
+ HDMI_PHYPWRCMD_TXON = 2
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_clk_refsel {
+ HDMI_REFSEL_PCLK = 0,
+ HDMI_REFSEL_REF1 = 1,
+ HDMI_REFSEL_REF2 = 2,
+ HDMI_REFSEL_SYSCLK = 3
+};
+
+enum hdmi_packing_mode {
+ HDMI_PACK_10b_RGB_YUV444 = 0,
+ HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+ HDMI_PACK_20b_YUV422 = 2,
+ HDMI_PACK_ALREADYPACKED = 7
+};
+
+enum hdmi_stereo_channels {
+ HDMI_AUDIO_STEREO_NOCHANNELS = 0,
+ HDMI_AUDIO_STEREO_ONECHANNEL = 1,
+ HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
+ HDMI_AUDIO_STEREO_THREECHANNELS = 3,
+ HDMI_AUDIO_STEREO_FOURCHANNELS = 4
+};
+
+enum hdmi_audio_type {
+ HDMI_AUDIO_TYPE_LPCM = 0,
+ HDMI_AUDIO_TYPE_IEC = 1
+};
+
+enum hdmi_audio_justify {
+ HDMI_AUDIO_JUSTIFY_LEFT = 0,
+ HDMI_AUDIO_JUSTIFY_RIGHT = 1
+};
+
+enum hdmi_audio_sample_order {
+ HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
+ HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
+};
+
+enum hdmi_audio_samples_perword {
+ HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
+ HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_16BITS = 0,
+ HDMI_AUDIO_SAMPLE_24BITS = 1
+};
+
+enum hdmi_audio_transf_mode {
+ HDMI_AUDIO_TRANSF_DMA = 0,
+ HDMI_AUDIO_TRANSF_IRQ = 1
+};
+
+enum hdmi_audio_blk_strt_end_sig {
+ HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
+ HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
+};
+
+enum hdmi_core_audio_layout {
+ HDMI_AUDIO_LAYOUT_2CH = 0,
+ HDMI_AUDIO_LAYOUT_8CH = 1
+};
+
+enum hdmi_core_cts_mode {
+ HDMI_AUDIO_CTS_MODE_HW = 0,
+ HDMI_AUDIO_CTS_MODE_SW = 1
+};
+
+enum hdmi_audio_mclk_mode {
+ HDMI_AUDIO_MCLK_128FS = 0,
+ HDMI_AUDIO_MCLK_256FS = 1,
+ HDMI_AUDIO_MCLK_384FS = 2,
+ HDMI_AUDIO_MCLK_512FS = 3,
+ HDMI_AUDIO_MCLK_768FS = 4,
+ HDMI_AUDIO_MCLK_1024FS = 5,
+ HDMI_AUDIO_MCLK_1152FS = 6,
+ HDMI_AUDIO_MCLK_192FS = 7
+};
+
+/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
+enum hdmi_core_infoframe {
+ HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
+ HDMI_INFOFRAME_AVI_DB1B_NO = 0,
+ HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
+ HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
+ HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
+ HDMI_INFOFRAME_AVI_DB1S_0 = 0,
+ HDMI_INFOFRAME_AVI_DB1S_1 = 1,
+ HDMI_INFOFRAME_AVI_DB1S_2 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
+ HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+ HDMI_INFOFRAME_AVI_DB2M_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2M_43 = 1,
+ HDMI_INFOFRAME_AVI_DB2M_169 = 2,
+ HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
+ HDMI_INFOFRAME_AVI_DB2R_43 = 9,
+ HDMI_INFOFRAME_AVI_DB2R_169 = 10,
+ HDMI_INFOFRAME_AVI_DB2R_149 = 11,
+ HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+ HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
+ HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
+ HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
+ HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
+ HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
+ HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
+ HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
+ HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
+ HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
+ HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
+};
+
+struct hdmi_cm {
+ int code;
+ int mode;
+};
+
+struct hdmi_video_format {
+ enum hdmi_packing_mode packing_mode;
+ u32 y_res; /* Line per panel */
+ u32 x_res; /* pixel per line */
+};
+
+struct hdmi_config {
+ struct omap_video_timings timings;
+ struct hdmi_cm cm;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
+ enum hdmi_clk_refsel refsel;
+};
+
+struct hdmi_audio_format {
+ enum hdmi_stereo_channels stereo_channels;
+ u8 active_chnnls_msk;
+ enum hdmi_audio_type type;
+ enum hdmi_audio_justify justification;
+ enum hdmi_audio_sample_order sample_order;
+ enum hdmi_audio_samples_perword samples_per_word;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
+};
+
+struct hdmi_audio_dma {
+ u8 transfer_size;
+ u8 block_size;
+ enum hdmi_audio_transf_mode mode;
+ u16 fifo_threshold;
+};
+
+struct hdmi_core_audio_i2s_config {
+ u8 in_length_bits;
+ u8 justification;
+ u8 sck_edge_mode;
+ u8 vbit;
+ u8 direction;
+ u8 shift;
+ u8 active_sds;
+};
+
+struct hdmi_core_audio_config {
+ struct hdmi_core_audio_i2s_config i2s_cfg;
+ struct snd_aes_iec958 *iec60958_cfg;
+ bool fs_override;
+ u32 n;
+ u32 cts;
+ u32 aud_par_busclk;
+ enum hdmi_core_audio_layout layout;
+ enum hdmi_core_cts_mode cts_mode;
+ bool use_mclk;
+ enum hdmi_audio_mclk_mode mclk_mode;
+ bool en_acr_pkt;
+ bool en_dsd_audio;
+ bool en_parallel_aud_input;
+ bool en_spdif;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_avi {
+ /* Y0, Y1 rgb,yCbCr */
+ u8 db1_format;
+ /* A0 Active information Present */
+ u8 db1_active_info;
+ /* B0, B1 Bar info data valid */
+ u8 db1_bar_info_dv;
+ /* S0, S1 scan information */
+ u8 db1_scan_info;
+ /* C0, C1 colorimetry */
+ u8 db2_colorimetry;
+ /* M0, M1 Aspect ratio (4:3, 16:9) */
+ u8 db2_aspect_ratio;
+ /* R0...R3 Active format aspect ratio */
+ u8 db2_active_fmt_ar;
+ /* ITC IT content. */
+ u8 db3_itc;
+ /* EC0, EC1, EC2 Extended colorimetry */
+ u8 db3_ec;
+ /* Q1, Q0 Quantization range */
+ u8 db3_q_range;
+ /* SC1, SC0 Non-uniform picture scaling */
+ u8 db3_nup_scaling;
+ /* VIC0..6 Video format identification */
+ u8 db4_videocode;
+ /* PR0..PR3 Pixel repetition factor */
+ u8 db5_pixel_repeat;
+ /* Line number end of top bar */
+ u16 db6_7_line_eoftop;
+ /* Line number start of bottom bar */
+ u16 db8_9_line_sofbottom;
+ /* Pixel number end of left bar */
+ u16 db10_11_pixel_eofleft;
+ /* Pixel number start of right bar */
+ u16 db12_13_pixel_sofright;
+};
+
+struct hdmi_wp_data {
+ void __iomem *base;
+};
+
+struct hdmi_pll_data {
+ void __iomem *base;
+
+ struct hdmi_pll_info info;
+};
+
+struct hdmi_phy_data {
+ void __iomem *base;
+
+ int irq;
+};
+
+struct hdmi_core_data {
+ void __iomem *base;
+
+ struct hdmi_core_infoframe_avi avi_cfg;
+};
+
+static inline void hdmi_write_reg(void __iomem *base_addr, const u16 idx,
+ u32 val)
+{
+ __raw_writel(val, base_addr + idx);
+}
+
+static inline u32 hdmi_read_reg(void __iomem *base_addr, const u16 idx)
+{
+ return __raw_readl(base_addr + idx);
+}
+
+#define REG_FLD_MOD(base, idx, val, start, end) \
+ hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
+ val, start, end))
+#define REG_GET(base, idx, start, end) \
+ FLD_GET(hdmi_read_reg(base, idx), start, end)
+
+static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
+ const u16 idx, int b2, int b1, u32 val)
+{
+ u32 t = 0;
+ while (val != REG_GET(base_addr, idx, b2, b1)) {
+ udelay(1);
+ if (t++ > 10000)
+ return !val;
+ }
+ return val;
+}
+
+/* HDMI wrapper funcs */
+int hdmi_wp_video_start(struct hdmi_wp_data *wp);
+void hdmi_wp_video_stop(struct hdmi_wp_data *wp);
+void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s);
+u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp);
+void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus);
+void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask);
+void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask);
+int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val);
+int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
+void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_video_format *video_fmt);
+void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings);
+void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings);
+void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param);
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
+
+/* HDMI PLL funcs */
+int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
+void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
+void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
+void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy);
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll);
+
+/* HDMI PHY funcs */
+int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp,
+ struct hdmi_config *cfg);
+void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp);
+void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
+
+/* HDMI common funcs */
+const struct hdmi_config *hdmi_default_timing(void);
+const struct hdmi_config *hdmi_get_timings(int mode, int code);
+struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts);
+int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable);
+int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable);
+void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_audio_format *aud_fmt);
+void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
+ struct hdmi_audio_dma *aud_dma);
+static inline bool hdmi_mode_has_audio(int mode)
+{
+ return mode == HDMI_HDMI ? true : false;
+}
+#endif
+#endif
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/omap2/dss/hdmi4.c
new file mode 100644
index 00000000000..e1400961433
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi4.c
@@ -0,0 +1,696 @@
+/*
+ * HDMI interface DSS driver for TI's OMAP4 family of SoCs.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <video/omapdss.h>
+
+#include "hdmi4_core.h"
+#include "dss.h"
+#include "dss_features.h"
+
+static struct {
+ struct mutex lock;
+ struct platform_device *pdev;
+
+ struct hdmi_wp_data wp;
+ struct hdmi_pll_data pll;
+ struct hdmi_phy_data phy;
+ struct hdmi_core_data core;
+
+ struct hdmi_config cfg;
+
+ struct clk *sys_clk;
+ struct regulator *vdda_hdmi_dac_reg;
+
+ bool core_enabled;
+
+ struct omap_dss_device output;
+} hdmi;
+
+static int hdmi_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static void hdmi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_put\n");
+
+ r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
+}
+
+static int hdmi_init_regulator(void)
+{
+ struct regulator *reg;
+
+ if (hdmi.vdda_hdmi_dac_reg != NULL)
+ return 0;
+
+ reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
+
+ /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(reg))
+ reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+
+ if (IS_ERR(reg)) {
+ DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ hdmi.vdda_hdmi_dac_reg = reg;
+
+ return 0;
+}
+
+static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
+ if (r)
+ return r;
+
+ r = hdmi_runtime_get();
+ if (r)
+ goto err_runtime_get;
+
+ /* Make selection of HDMI in DSS */
+ dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+ hdmi.core_enabled = true;
+
+ return 0;
+
+err_runtime_get:
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+
+ return r;
+}
+
+static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+{
+ hdmi.core_enabled = false;
+
+ hdmi_runtime_put();
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+}
+
+static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct omap_video_timings *p;
+ struct omap_overlay_manager *mgr = hdmi.output.manager;
+ unsigned long phy;
+
+ r = hdmi_power_on_core(dssdev);
+ if (r)
+ return r;
+
+ dss_mgr_disable(mgr);
+
+ p = &hdmi.cfg.timings;
+
+ DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+
+ phy = p->pixel_clock;
+
+ hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
+
+ hdmi_wp_video_stop(&hdmi.wp);
+
+ /* config the PLL and PHY hdmi_set_pll_pwrfirst */
+ r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
+ if (r) {
+ DSSDBG("Failed to lock PLL\n");
+ goto err_pll_enable;
+ }
+
+ r = hdmi_phy_enable(&hdmi.phy, &hdmi.wp, &hdmi.cfg);
+ if (r) {
+ DSSDBG("Failed to start PHY\n");
+ goto err_phy_enable;
+ }
+
+ hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+
+ /* bypass TV gamma table */
+ dispc_enable_gamma_table(0);
+
+ /* tv size */
+ dss_mgr_set_timings(mgr, p);
+
+ r = hdmi_wp_video_start(&hdmi.wp);
+ if (r)
+ goto err_vid_enable;
+
+ r = dss_mgr_enable(mgr);
+ if (r)
+ goto err_mgr_enable;
+
+ return 0;
+
+err_mgr_enable:
+ hdmi_wp_video_stop(&hdmi.wp);
+err_vid_enable:
+ hdmi_phy_disable(&hdmi.phy, &hdmi.wp);
+err_phy_enable:
+ hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+err_pll_enable:
+ hdmi_power_off_core(dssdev);
+ return -EIO;
+}
+
+static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+{
+ struct omap_overlay_manager *mgr = hdmi.output.manager;
+
+ dss_mgr_disable(mgr);
+
+ hdmi_wp_video_stop(&hdmi.wp);
+ hdmi_phy_disable(&hdmi.phy, &hdmi.wp);
+ hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+
+ hdmi_power_off_core(dssdev);
+}
+
+static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct hdmi_cm cm;
+
+ cm = hdmi_get_code(timings);
+ if (cm.code == -1)
+ return -EINVAL;
+
+ return 0;
+
+}
+
+static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct hdmi_cm cm;
+ const struct hdmi_config *t;
+
+ mutex_lock(&hdmi.lock);
+
+ cm = hdmi_get_code(timings);
+ hdmi.cfg.cm = cm;
+
+ t = hdmi_get_timings(cm.mode, cm.code);
+ if (t != NULL) {
+ hdmi.cfg = *t;
+
+ dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ }
+
+ mutex_unlock(&hdmi.lock);
+}
+
+static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ const struct hdmi_config *cfg;
+ struct hdmi_cm cm = hdmi.cfg.cm;
+
+ cfg = hdmi_get_timings(cm.mode, cm.code);
+ if (cfg == NULL)
+ cfg = hdmi_default_timing();
+
+ memcpy(timings, &cfg->timings, sizeof(cfg->timings));
+}
+
+static void hdmi_dump_regs(struct seq_file *s)
+{
+ mutex_lock(&hdmi.lock);
+
+ if (hdmi_runtime_get()) {
+ mutex_unlock(&hdmi.lock);
+ return;
+ }
+
+ hdmi_wp_dump(&hdmi.wp, s);
+ hdmi_pll_dump(&hdmi.pll, s);
+ hdmi_phy_dump(&hdmi.phy, s);
+ hdmi4_core_dump(&hdmi.core, s);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+}
+
+static int read_edid(u8 *buf, int len)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_runtime_get();
+ BUG_ON(r);
+
+ r = hdmi4_read_edid(&hdmi.core, buf, len);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+
+ return r;
+}
+
+static int hdmi_display_enable(struct omap_dss_device *dssdev)
+{
+ struct omap_dss_device *out = &hdmi.output;
+ int r = 0;
+
+ DSSDBG("ENTER hdmi_display_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ if (out == NULL || out->manager == NULL) {
+ DSSERR("failed to enable display: no output/manager\n");
+ r = -ENODEV;
+ goto err0;
+ }
+
+ r = hdmi_power_on_full(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_display_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter hdmi_display_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off_full(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_core_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_power_on_core(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_core_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off_core(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ hdmi.sys_clk = clk;
+
+ return 0;
+}
+
+static int hdmi_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ r = hdmi_init_regulator();
+ if (r)
+ return r;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dst->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void hdmi_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->dst);
+
+ if (dst != dssdev->dst)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static int hdmi_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ bool need_enable;
+ int r;
+
+ need_enable = hdmi.core_enabled == false;
+
+ if (need_enable) {
+ r = hdmi_core_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ r = read_edid(edid, len);
+
+ if (need_enable)
+ hdmi_core_disable(dssdev);
+
+ return r;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_wp_audio_enable(&hdmi.wp, true);
+ if (r)
+ goto err;
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+ hdmi_wp_audio_enable(&hdmi.wp, false);
+}
+
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+ return hdmi4_audio_start(&hdmi.core, &hdmi.wp);
+}
+
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+ hdmi4_audio_stop(&hdmi.core, &hdmi.wp);
+}
+
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+ bool r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_mode_has_audio(hdmi.cfg.cm.mode);
+
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ int r;
+ u32 pclk = hdmi.cfg.timings.pixel_clock;
+
+ mutex_lock(&hdmi.lock);
+
+ if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
+ if (r)
+ goto err;
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+#else
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+ return false;
+}
+
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ return -EPERM;
+}
+#endif
+
+static const struct omapdss_hdmi_ops hdmi_ops = {
+ .connect = hdmi_connect,
+ .disconnect = hdmi_disconnect,
+
+ .enable = hdmi_display_enable,
+ .disable = hdmi_display_disable,
+
+ .check_timings = hdmi_display_check_timing,
+ .set_timings = hdmi_display_set_timing,
+ .get_timings = hdmi_display_get_timings,
+
+ .read_edid = hdmi_read_edid,
+
+ .audio_enable = hdmi_audio_enable,
+ .audio_disable = hdmi_audio_disable,
+ .audio_start = hdmi_audio_start,
+ .audio_stop = hdmi_audio_stop,
+ .audio_supported = hdmi_audio_supported,
+ .audio_config = hdmi_audio_config,
+};
+
+static void hdmi_init_output(struct platform_device *pdev)
+{
+ struct omap_dss_device *out = &hdmi.output;
+
+ out->dev = &pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops.hdmi = &hdmi_ops;
+ out->owner = THIS_MODULE;
+
+ omapdss_register_output(out);
+}
+
+static void __exit hdmi_uninit_output(struct platform_device *pdev)
+{
+ struct omap_dss_device *out = &hdmi.output;
+
+ omapdss_unregister_output(out);
+}
+
+/* HDMI HW IP initialisation */
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
+{
+ int r;
+
+ hdmi.pdev = pdev;
+
+ mutex_init(&hdmi.lock);
+
+ r = hdmi_wp_init(pdev, &hdmi.wp);
+ if (r)
+ return r;
+
+ r = hdmi_pll_init(pdev, &hdmi.pll);
+ if (r)
+ return r;
+
+ r = hdmi_phy_init(pdev, &hdmi.phy);
+ if (r)
+ return r;
+
+ r = hdmi4_core_init(pdev, &hdmi.core);
+ if (r)
+ return r;
+
+ r = hdmi_get_clocks(pdev);
+ if (r) {
+ DSSERR("can't get clocks\n");
+ return r;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ hdmi_init_output(pdev);
+
+ dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+ return 0;
+}
+
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
+{
+ hdmi_uninit_output(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ clk_disable_unprepare(hdmi.sys_clk);
+
+ dispc_runtime_put();
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ return r;
+
+ clk_prepare_enable(hdmi.sys_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static struct platform_driver omapdss_hdmihw_driver = {
+ .probe = omapdss_hdmihw_probe,
+ .remove = __exit_p(omapdss_hdmihw_remove),
+ .driver = {
+ .name = "omapdss_hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+
+int __init hdmi4_init_platform_driver(void)
+{
+ return platform_driver_register(&omapdss_hdmihw_driver);
+}
+
+void __exit hdmi4_uninit_platform_driver(void)
+{
+ platform_driver_unregister(&omapdss_hdmihw_driver);
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/hdmi4_core.c
index 3dfe00956a4..5dd5e5489b4 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/hdmi4_core.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
@@ -33,304 +34,19 @@
#include <sound/asoundef.h>
#endif
-#include "ti_hdmi_4xxx_ip.h"
-#include "dss.h"
+#include "hdmi4_core.h"
#include "dss_features.h"
-#define HDMI_IRQ_LINK_CONNECT (1 << 25)
-#define HDMI_IRQ_LINK_DISCONNECT (1 << 26)
+#define HDMI_CORE_AV 0x500
-static inline void hdmi_write_reg(void __iomem *base_addr,
- const u16 idx, u32 val)
+static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
{
- __raw_writel(val, base_addr + idx);
+ return core->base + HDMI_CORE_AV;
}
-static inline u32 hdmi_read_reg(void __iomem *base_addr,
- const u16 idx)
+static int hdmi_core_ddc_init(struct hdmi_core_data *core)
{
- return __raw_readl(base_addr + idx);
-}
-
-static inline void __iomem *hdmi_wp_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp;
-}
-
-static inline void __iomem *hdmi_phy_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->phy_offset;
-}
-
-static inline void __iomem *hdmi_pll_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->pll_offset;
-}
-
-static inline void __iomem *hdmi_av_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->core_av_offset;
-}
-
-static inline void __iomem *hdmi_core_sys_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->core_sys_offset;
-}
-
-static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
- const u16 idx,
- int b2, int b1, u32 val)
-{
- u32 t = 0;
- while (val != REG_GET(base_addr, idx, b2, b1)) {
- udelay(1);
- if (t++ > 10000)
- return !val;
- }
- return val;
-}
-
-static int hdmi_pll_init(struct hdmi_ip_data *ip_data)
-{
- u32 r;
- void __iomem *pll_base = hdmi_pll_base(ip_data);
- struct hdmi_pll_info *fmt = &ip_data->pll_data;
-
- /* PLL start always use manual mode */
- REG_FLD_MOD(pll_base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
-
- r = hdmi_read_reg(pll_base, PLLCTRL_CFG1);
- r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
- r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
-
- hdmi_write_reg(pll_base, PLLCTRL_CFG1, r);
-
- r = hdmi_read_reg(pll_base, PLLCTRL_CFG2);
-
- r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
- r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
- r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
- r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
-
- if (fmt->dcofreq) {
- /* divider programming for frequency beyond 1000Mhz */
- REG_FLD_MOD(pll_base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
- r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
- } else {
- r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
- }
-
- hdmi_write_reg(pll_base, PLLCTRL_CFG2, r);
-
- r = hdmi_read_reg(pll_base, PLLCTRL_CFG4);
- r = FLD_MOD(r, fmt->regm2, 24, 18);
- r = FLD_MOD(r, fmt->regmf, 17, 0);
-
- hdmi_write_reg(pll_base, PLLCTRL_CFG4, r);
-
- /* go now */
- REG_FLD_MOD(pll_base, PLLCTRL_PLL_GO, 0x1, 0, 0);
-
- /* wait for bit change */
- if (hdmi_wait_for_bit_change(pll_base, PLLCTRL_PLL_GO,
- 0, 0, 1) != 1) {
- pr_err("PLL GO bit not set\n");
- return -ETIMEDOUT;
- }
-
- /* Wait till the lock bit is set in PLL status */
- if (hdmi_wait_for_bit_change(pll_base,
- PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- pr_err("cannot lock PLL\n");
- pr_err("CFG1 0x%x\n",
- hdmi_read_reg(pll_base, PLLCTRL_CFG1));
- pr_err("CFG2 0x%x\n",
- hdmi_read_reg(pll_base, PLLCTRL_CFG2));
- pr_err("CFG4 0x%x\n",
- hdmi_read_reg(pll_base, PLLCTRL_CFG4));
- return -ETIMEDOUT;
- }
-
- pr_debug("PLL locked!\n");
-
- return 0;
-}
-
-/* PHY_PWR_CMD */
-static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val)
-{
- /* Return if already the state */
- if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val)
- return 0;
-
- /* Command for power control of HDMI PHY */
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6);
-
- /* Status of the power control of HDMI PHY */
- if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data),
- HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
- pr_err("Failed to set PHY power mode to %d\n", val);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/* PLL_PWR_CMD */
-static int hdmi_set_pll_pwr(struct hdmi_ip_data *ip_data, enum hdmi_pll_pwr val)
-{
- /* Command for power control of HDMI PLL */
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 3, 2);
-
- /* wait till PHY_PWR_STATUS is set */
- if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL,
- 1, 0, val) != val) {
- pr_err("Failed to set PLL_PWR_STATUS\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_pll_reset(struct hdmi_ip_data *ip_data)
-{
- /* SYSRESET controlled by power FSM */
- REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
-
- /* READ 0x0 reset is in progress */
- if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data),
- PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
- pr_err("Failed to sysreset PLL\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data)
-{
- u16 r = 0;
-
- r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
- if (r)
- return r;
-
- r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
- if (r)
- return r;
-
- r = hdmi_pll_reset(ip_data);
- if (r)
- return r;
-
- r = hdmi_pll_init(ip_data);
- if (r)
- return r;
-
- return 0;
-}
-
-void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
-{
- hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
-}
-
-static irqreturn_t hdmi_irq_handler(int irq, void *data)
-{
- struct hdmi_ip_data *ip_data = data;
- void __iomem *wp_base = hdmi_wp_base(ip_data);
- u32 irqstatus;
-
- irqstatus = hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
- hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS, irqstatus);
- /* flush posted write */
- hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-
- if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
- irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
- /*
- * If we get both connect and disconnect interrupts at the same
- * time, turn off the PHY, clear interrupts, and restart, which
- * raises connect interrupt if a cable is connected, or nothing
- * if cable is not connected.
- */
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-
- hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS,
- HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
- /* flush posted write */
- hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
- } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
- } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
- }
-
- return IRQ_HANDLED;
-}
-
-int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
-{
- u16 r = 0;
- void __iomem *phy_base = hdmi_phy_base(ip_data);
-
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_CLR,
- 0xffffffff);
-
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQSTATUS,
- HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-
- r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
- if (r)
- return r;
-
- /*
- * Read address 0 in order to get the SCP reset done completed
- * Dummy access performed to make sure reset is done
- */
- hdmi_read_reg(phy_base, HDMI_TXPHY_TX_CTRL);
-
- /*
- * Write to phy address 0 to configure the clock
- * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
- */
- REG_FLD_MOD(phy_base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
-
- /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
- hdmi_write_reg(phy_base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
-
- /* Setup max LDO voltage */
- REG_FLD_MOD(phy_base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
-
- /* Write to phy address 3 to change the polarity control */
- REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
-
- r = request_threaded_irq(ip_data->irq, NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", ip_data);
- if (r) {
- DSSERR("HDMI IRQ request failed\n");
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
- return r;
- }
-
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_SET,
- HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-
- return 0;
-}
-
-void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
-{
- free_irq(ip_data->irq, ip_data);
-
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-}
-
-static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
-{
- void __iomem *base = hdmi_core_sys_base(ip_data);
+ void __iomem *base = core->base;
/* Turn on CLK for DDC */
REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0);
@@ -370,10 +86,10 @@ static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
return 0;
}
-static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
+static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
u8 *pedid, int ext)
{
- void __iomem *base = hdmi_core_sys_base(ip_data);
+ void __iomem *base = core->base;
u32 i;
char checksum;
u32 offset = 0;
@@ -452,26 +168,25 @@ static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
return 0;
}
-int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data,
- u8 *edid, int len)
+int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
{
int r, l;
if (len < 128)
return -EINVAL;
- r = hdmi_core_ddc_init(ip_data);
+ r = hdmi_core_ddc_init(core);
if (r)
return r;
- r = hdmi_core_ddc_edid(ip_data, edid, 0);
+ r = hdmi_core_ddc_edid(core, edid, 0);
if (r)
return r;
l = 128;
if (len >= 128 * 2 && edid[0x7e] > 0) {
- r = hdmi_core_ddc_edid(ip_data, edid + 0x80, 1);
+ r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
if (r)
return r;
l += 128;
@@ -508,7 +223,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
avi_cfg->db3_nup_scaling = 0;
avi_cfg->db4_videocode = 0;
avi_cfg->db5_pixel_repeat = 0;
- avi_cfg->db6_7_line_eoftop = 0 ;
+ avi_cfg->db6_7_line_eoftop = 0;
avi_cfg->db8_9_line_sofbottom = 0;
avi_cfg->db10_11_pixel_eofleft = 0;
avi_cfg->db12_13_pixel_sofright = 0;
@@ -524,38 +239,39 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
repeat_cfg->generic_pkt_repeat = 0;
}
-static void hdmi_core_powerdown_disable(struct hdmi_ip_data *ip_data)
+static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
{
pr_debug("Enter hdmi_core_powerdown_disable\n");
- REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1, 0x0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
}
-static void hdmi_core_swreset_release(struct hdmi_ip_data *ip_data)
+static void hdmi_core_swreset_release(struct hdmi_core_data *core)
{
pr_debug("Enter hdmi_core_swreset_release\n");
- REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0);
}
-static void hdmi_core_swreset_assert(struct hdmi_ip_data *ip_data)
+static void hdmi_core_swreset_assert(struct hdmi_core_data *core)
{
pr_debug("Enter hdmi_core_swreset_assert\n");
- REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0);
}
/* HDMI_CORE_VIDEO_CONFIG */
-static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
+static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_video_config *cfg)
{
u32 r = 0;
- void __iomem *core_sys_base = hdmi_core_sys_base(ip_data);
+ void __iomem *core_sys_base = core->base;
+ void __iomem *core_av_base = hdmi_av_base(core);
/* sys_ctrl1 default configuration not tunable */
- r = hdmi_read_reg(core_sys_base, HDMI_CORE_CTRL1);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
- hdmi_write_reg(core_sys_base, HDMI_CORE_CTRL1, r);
+ r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS, 2, 2);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE, 1, 1);
+ hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1, r);
REG_FLD_MOD(core_sys_base,
HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
@@ -574,23 +290,23 @@ static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r);
/* HDMI_Ctrl */
- r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL);
+ r = hdmi_read_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL);
r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
- hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL, r);
+ hdmi_write_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL, r);
/* TMDS_CTRL */
REG_FLD_MOD(core_sys_base,
HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
}
-static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data)
+static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core)
{
u32 val;
char sum = 0, checksum = 0;
- void __iomem *av_base = hdmi_av_base(ip_data);
- struct hdmi_core_infoframe_avi info_avi = ip_data->avi_cfg;
+ void __iomem *av_base = hdmi_av_base(core);
+ struct hdmi_core_infoframe_avi info_avi = core->avi_cfg;
sum += 0x82 + 0x002 + 0x00D;
hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082);
@@ -661,160 +377,64 @@ static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data)
hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum);
}
-static void hdmi_core_av_packet_config(struct hdmi_ip_data *ip_data,
+static void hdmi_core_av_packet_config(struct hdmi_core_data *core,
struct hdmi_core_packet_enable_repeat repeat_cfg)
{
/* enable/repeat the infoframe */
- hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL1,
+ hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL1,
(repeat_cfg.audio_pkt << 5) |
(repeat_cfg.audio_pkt_repeat << 4) |
(repeat_cfg.avi_infoframe << 1) |
(repeat_cfg.avi_infoframe_repeat));
/* enable/repeat the packet */
- hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL2,
+ hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL2,
(repeat_cfg.gen_cntrl_pkt << 3) |
(repeat_cfg.gen_cntrl_pkt_repeat << 2) |
(repeat_cfg.generic_pkt << 1) |
(repeat_cfg.generic_pkt_repeat));
}
-static void hdmi_wp_init(struct omap_video_timings *timings,
- struct hdmi_video_format *video_fmt)
-{
- pr_debug("Enter hdmi_wp_init\n");
-
- timings->hbp = 0;
- timings->hfp = 0;
- timings->hsw = 0;
- timings->vbp = 0;
- timings->vfp = 0;
- timings->vsw = 0;
-
- video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = 0;
- video_fmt->x_res = 0;
-
-}
-
-int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
- return 0;
-}
-
-void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
-}
-
-static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
-{
- pr_debug("Enter hdmi_wp_video_init_format\n");
-
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-}
-
-static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data,
- struct hdmi_video_format *video_fmt)
-{
- u32 l = 0;
-
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG,
- video_fmt->packing_mode, 10, 8);
-
- l |= FLD_VAL(video_fmt->y_res, 31, 16);
- l |= FLD_VAL(video_fmt->x_res, 15, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_SIZE, l);
-}
-
-static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data)
-{
- u32 r;
- bool vsync_pol, hsync_pol;
- pr_debug("Enter hdmi_wp_video_config_interface\n");
-
- vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
- r = FLD_MOD(r, vsync_pol, 7, 7);
- r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3);
- r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r);
-}
-
-static void hdmi_wp_video_config_timing(struct hdmi_ip_data *ip_data,
- struct omap_video_timings *timings)
-{
- u32 timing_h = 0;
- u32 timing_v = 0;
-
- pr_debug("Enter hdmi_wp_video_config_timing\n");
-
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw, 7, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_H, timing_h);
-
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_V, timing_v);
-}
-
-void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
+void hdmi4_configure(struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
struct omap_video_timings video_timing;
struct hdmi_video_format video_format;
/* HDMI core */
- struct hdmi_core_infoframe_avi *avi_cfg = &ip_data->avi_cfg;
+ struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg;
struct hdmi_core_video_config v_core_cfg;
struct hdmi_core_packet_enable_repeat repeat_cfg;
- struct hdmi_config *cfg = &ip_data->cfg;
-
- hdmi_wp_init(&video_timing, &video_format);
hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
- hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
- hdmi_wp_video_config_timing(ip_data, &video_timing);
+ hdmi_wp_video_config_timing(wp, &video_timing);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
- hdmi_wp_video_config_format(ip_data, &video_format);
+ hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(ip_data);
+ hdmi_wp_video_config_interface(wp, &video_timing);
/*
* configure core video part
* set software reset in the core
*/
- hdmi_core_swreset_assert(ip_data);
+ hdmi_core_swreset_assert(core);
/* power down off */
- hdmi_core_powerdown_disable(ip_data);
+ hdmi_core_powerdown_disable(core);
v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
v_core_cfg.hdmi_dvi = cfg->cm.mode;
- hdmi_core_video_config(ip_data, &v_core_cfg);
+ hdmi_core_video_config(core, &v_core_cfg);
/* release software reset in the core */
- hdmi_core_swreset_release(ip_data);
+ hdmi_core_swreset_release(core);
/*
* configure packet
@@ -839,7 +459,7 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
avi_cfg->db10_11_pixel_eofleft = 0;
avi_cfg->db12_13_pixel_sofright = 0;
- hdmi_core_aux_infoframe_avi_config(ip_data);
+ hdmi_core_aux_infoframe_avi_config(core);
/* enable/repeat the infoframe */
repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
@@ -847,65 +467,30 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
/* wakeup */
repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
- hdmi_core_av_packet_config(ip_data, repeat_cfg);
-}
-
-void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_wp_base(ip_data), r))
-
- DUMPREG(HDMI_WP_REVISION);
- DUMPREG(HDMI_WP_SYSCONFIG);
- DUMPREG(HDMI_WP_IRQSTATUS_RAW);
- DUMPREG(HDMI_WP_IRQSTATUS);
- DUMPREG(HDMI_WP_PWR_CTRL);
- DUMPREG(HDMI_WP_IRQENABLE_SET);
- DUMPREG(HDMI_WP_VIDEO_CFG);
- DUMPREG(HDMI_WP_VIDEO_SIZE);
- DUMPREG(HDMI_WP_VIDEO_TIMING_H);
- DUMPREG(HDMI_WP_VIDEO_TIMING_V);
- DUMPREG(HDMI_WP_WP_CLK);
- DUMPREG(HDMI_WP_AUDIO_CFG);
- DUMPREG(HDMI_WP_AUDIO_CFG2);
- DUMPREG(HDMI_WP_AUDIO_CTRL);
- DUMPREG(HDMI_WP_AUDIO_DATA);
-}
-
-void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_pll_base(ip_data), r))
-
- DUMPPLL(PLLCTRL_PLL_CONTROL);
- DUMPPLL(PLLCTRL_PLL_STATUS);
- DUMPPLL(PLLCTRL_PLL_GO);
- DUMPPLL(PLLCTRL_CFG1);
- DUMPPLL(PLLCTRL_CFG2);
- DUMPPLL(PLLCTRL_CFG3);
- DUMPPLL(PLLCTRL_CFG4);
+ hdmi_core_av_packet_config(core, repeat_cfg);
}
-void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
+void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
int i;
#define CORE_REG(i, name) name(i)
#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+ hdmi_read_reg(core->base, r))
#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_av_base(ip_data), r))
+ hdmi_read_reg(hdmi_av_base(core), r))
#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
(i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
- hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
+ hdmi_read_reg(hdmi_av_base(core), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDH);
DUMPCORE(HDMI_CORE_SYS_DEV_REV);
DUMPCORE(HDMI_CORE_SYS_SRST);
- DUMPCORE(HDMI_CORE_CTRL1);
+ DUMPCORE(HDMI_CORE_SYS_SYS_CTRL1);
DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+ DUMPCORE(HDMI_CORE_SYS_SYS_CTRL3);
DUMPCORE(HDMI_CORE_SYS_DE_DLY);
DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
DUMPCORE(HDMI_CORE_SYS_DE_TOP);
@@ -913,14 +498,58 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
DUMPCORE(HDMI_CORE_SYS_DE_LINL);
DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
+ DUMPCORE(HDMI_CORE_SYS_HRES_L);
+ DUMPCORE(HDMI_CORE_SYS_HRES_H);
+ DUMPCORE(HDMI_CORE_SYS_VRES_L);
+ DUMPCORE(HDMI_CORE_SYS_VRES_H);
+ DUMPCORE(HDMI_CORE_SYS_IADJUST);
+ DUMPCORE(HDMI_CORE_SYS_POLDETECT);
+ DUMPCORE(HDMI_CORE_SYS_HWIDTH1);
+ DUMPCORE(HDMI_CORE_SYS_HWIDTH2);
+ DUMPCORE(HDMI_CORE_SYS_VWIDTH);
+ DUMPCORE(HDMI_CORE_SYS_VID_CTRL);
DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
DUMPCORE(HDMI_CORE_SYS_VID_MODE);
+ DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
+ DUMPCORE(HDMI_CORE_SYS_VID_BLANK3);
+ DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
+ DUMPCORE(HDMI_CORE_SYS_DC_HEADER);
+ DUMPCORE(HDMI_CORE_SYS_VID_DITHER);
+ DUMPCORE(HDMI_CORE_SYS_RGB2XVYCC_CT);
+ DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_LOW);
+ DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_UP);
+ DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_LOW);
+ DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_UP);
+ DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_LOW);
+ DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_UP);
DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
DUMPCORE(HDMI_CORE_SYS_INTR1);
DUMPCORE(HDMI_CORE_SYS_INTR2);
DUMPCORE(HDMI_CORE_SYS_INTR3);
DUMPCORE(HDMI_CORE_SYS_INTR4);
- DUMPCORE(HDMI_CORE_SYS_UMASK1);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK1);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK2);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK3);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK4);
+ DUMPCORE(HDMI_CORE_SYS_INTR_CTRL);
DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
DUMPCORE(HDMI_CORE_DDC_ADDR);
@@ -1009,60 +638,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
}
-void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_phy_base(ip_data), r))
-
- DUMPPHY(HDMI_TXPHY_TX_CTRL);
- DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
- DUMPPHY(HDMI_TXPHY_POWER_CTRL);
- DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
-}
-
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_format *aud_fmt)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_format\n");
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG);
- r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
- r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
- r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
- r = FLD_MOD(r, aud_fmt->type, 4, 4);
- r = FLD_MOD(r, aud_fmt->justification, 3, 3);
- r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
- r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
- r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
-}
-
-static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_dma *aud_dma)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_dma\n");
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2);
- r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
- r = FLD_MOD(r, aud_dma->block_size, 7, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2, r);
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL);
- r = FLD_MOD(r, aud_dma->mode, 9, 9);
- r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
-}
-
-static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
+static void hdmi_core_audio_config(struct hdmi_core_data *core,
struct hdmi_core_audio_config *cfg)
{
u32 r;
- void __iomem *av_base = hdmi_av_base(ip_data);
+ void __iomem *av_base = hdmi_av_base(core);
/*
* Parameters for generation of Audio Clock Recovery packets
@@ -1157,11 +738,11 @@ static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
}
-static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+static void hdmi_core_audio_infoframe_cfg(struct hdmi_core_data *core,
struct snd_cea_861_aud_if *info_aud)
{
u8 sum = 0, checksum = 0;
- void __iomem *av_base = hdmi_av_base(ip_data);
+ void __iomem *av_base = hdmi_av_base(core);
/*
* Set audio info frame type, version and length as
@@ -1207,20 +788,20 @@ static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
*/
}
-int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
- struct omap_dss_audio *audio)
+int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+ struct omap_dss_audio *audio, u32 pclk)
{
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
- struct hdmi_core_audio_config core;
+ struct hdmi_core_audio_config acore;
int err, n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
- if (!audio || !audio->iec || !audio->cea || !ip_data)
+ if (!audio || !audio->iec || !audio->cea || !core)
return -EINVAL;
- core.iec60958_cfg = audio->iec;
+ acore.iec60958_cfg = audio->iec;
/*
* In the IEC-60958 status word, check if the audio sample word length
* is 16-bit as several optimizations can be performed in such case.
@@ -1231,22 +812,22 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
/* I2S configuration. See Phillips' specification */
if (word_length_16b)
- core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
else
- core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
* The I2S input word length is twice the lenght given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
- core.i2s_cfg.in_length_bits = audio->iec->status[4]
+ acore.i2s_cfg.in_length_bits = audio->iec->status[4]
& IEC958_AES4_CON_WORDLEN;
if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
- core.i2s_cfg.in_length_bits++;
- core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
- core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
- core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
- core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+ acore.i2s_cfg.in_length_bits++;
+ acore.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+ acore.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+ acore.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+ acore.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
/* convert sample frequency to a number */
switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
@@ -1275,23 +856,23 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
return -EINVAL;
}
- err = hdmi_compute_acr(fs_nr, &n, &cts);
+ err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
- core.n = n;
- core.cts = cts;
+ acore.n = n;
+ acore.cts = cts;
if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
- core.aud_par_busclk = 0;
- core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+ acore.aud_par_busclk = 0;
+ acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+ acore.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
} else {
- core.aud_par_busclk = (((128 * 31) - 1) << 8);
- core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
- core.use_mclk = true;
+ acore.aud_par_busclk = (((128 * 31) - 1) << 8);
+ acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+ acore.use_mclk = true;
}
- if (core.use_mclk)
- core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+ if (acore.use_mclk)
+ acore.mclk_mode = HDMI_AUDIO_MCLK_128FS;
/* Audio channels settings */
channel_count = (audio->cea->db1_ct_cc &
@@ -1329,25 +910,25 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
*/
if (channel_count == 2) {
audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
- core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
- core.layout = HDMI_AUDIO_LAYOUT_2CH;
+ acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+ acore.layout = HDMI_AUDIO_LAYOUT_2CH;
} else {
audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
- core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+ acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
HDMI_AUDIO_I2S_SD3_EN;
- core.layout = HDMI_AUDIO_LAYOUT_8CH;
+ acore.layout = HDMI_AUDIO_LAYOUT_8CH;
}
- core.en_spdif = false;
+ acore.en_spdif = false;
/* use sample frequency from channel status word */
- core.fs_override = true;
+ acore.fs_override = true;
/* enable ACR packets */
- core.en_acr_pkt = true;
+ acore.en_acr_pkt = true;
/* disable direct streaming digital audio */
- core.en_dsd_audio = false;
+ acore.en_dsd_audio = false;
/* use parallel audio interface */
- core.en_parallel_aud_input = true;
+ acore.en_parallel_aud_input = true;
/* DMA settings */
if (word_length_16b)
@@ -1374,49 +955,37 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
/* configure DMA and audio FIFO format*/
- ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
- ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+ hdmi_wp_audio_config_dma(wp, &audio_dma);
+ hdmi_wp_audio_config_format(wp, &audio_format);
/* configure the core*/
- ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+ hdmi_core_audio_config(core, &acore);
/* configure CEA 861 audio infoframe*/
- ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
+ hdmi_core_audio_infoframe_cfg(core, audio->cea);
return 0;
}
-int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
{
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, true, 31, 31);
- return 0;
-}
+ REG_FLD_MOD(hdmi_av_base(core),
+ HDMI_CORE_AV_AUD_MODE, true, 0, 0);
-void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, false, 31, 31);
-}
+ hdmi_wp_audio_core_req_enable(wp, true);
-int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, true, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, true, 30, 30);
return 0;
}
-void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
{
- REG_FLD_MOD(hdmi_av_base(ip_data),
+ REG_FLD_MOD(hdmi_av_base(core),
HDMI_CORE_AV_AUD_MODE, false, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, false, 30, 30);
+
+ hdmi_wp_audio_core_req_enable(wp, false);
}
-int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
+int hdmi4_audio_get_dma_port(u32 *offset, u32 *size)
{
if (!offset || !size)
return -EINVAL;
@@ -1424,4 +993,42 @@ int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
*size = 4;
return 0;
}
+
#endif
+
+#define CORE_OFFSET 0x400
+#define CORE_SIZE 0xc00
+
+int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_core");
+ if (!res) {
+ DSSDBG("can't get CORE mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get CORE mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + CORE_OFFSET;
+ temp_res.end = temp_res.start + CORE_SIZE - 1;
+ res = &temp_res;
+ }
+
+ core->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!core->base) {
+ DSSERR("can't ioremap CORE\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/hdmi4_core.h
index 6ef2f929a76..bb646896fa8 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/hdmi4_core.h
@@ -1,7 +1,5 @@
/*
- * ti_hdmi_4xxx_ip.h
- *
- * HDMI header definition for DM81xx, DM38xx, TI OMAP4 etc processors.
+ * HDMI header definition for OMAP4 HDMI core IP
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
*
@@ -18,41 +16,22 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _HDMI_TI_4xxx_H_
-#define _HDMI_TI_4xxx_H_
-
-#include <linux/string.h>
-#include <video/omapdss.h>
-#include "ti_hdmi.h"
-
-/* HDMI Wrapper */
+#ifndef _HDMI4_CORE_H_
+#define _HDMI4_CORE_H_
-#define HDMI_WP_REVISION 0x0
-#define HDMI_WP_SYSCONFIG 0x10
-#define HDMI_WP_IRQSTATUS_RAW 0x24
-#define HDMI_WP_IRQSTATUS 0x28
-#define HDMI_WP_PWR_CTRL 0x40
-#define HDMI_WP_IRQENABLE_SET 0x2C
-#define HDMI_WP_IRQENABLE_CLR 0x30
-#define HDMI_WP_VIDEO_CFG 0x50
-#define HDMI_WP_VIDEO_SIZE 0x60
-#define HDMI_WP_VIDEO_TIMING_H 0x68
-#define HDMI_WP_VIDEO_TIMING_V 0x6C
-#define HDMI_WP_WP_CLK 0x70
-#define HDMI_WP_AUDIO_CFG 0x80
-#define HDMI_WP_AUDIO_CFG2 0x84
-#define HDMI_WP_AUDIO_CTRL 0x88
-#define HDMI_WP_AUDIO_DATA 0x8C
+#include "hdmi.h"
-/* HDMI IP Core System */
+/* OMAP4 HDMI IP Core System */
#define HDMI_CORE_SYS_VND_IDL 0x0
#define HDMI_CORE_SYS_DEV_IDL 0x8
#define HDMI_CORE_SYS_DEV_IDH 0xC
#define HDMI_CORE_SYS_DEV_REV 0x10
#define HDMI_CORE_SYS_SRST 0x14
-#define HDMI_CORE_CTRL1 0x20
+#define HDMI_CORE_SYS_SYS_CTRL1 0x20
#define HDMI_CORE_SYS_SYS_STAT 0x24
+#define HDMI_CORE_SYS_SYS_CTRL3 0x28
+#define HDMI_CORE_SYS_DCTL 0x34
#define HDMI_CORE_SYS_DE_DLY 0xC8
#define HDMI_CORE_SYS_DE_CTRL 0xCC
#define HDMI_CORE_SYS_DE_TOP 0xD0
@@ -60,20 +39,65 @@
#define HDMI_CORE_SYS_DE_CNTH 0xDC
#define HDMI_CORE_SYS_DE_LINL 0xE0
#define HDMI_CORE_SYS_DE_LINH_1 0xE4
+#define HDMI_CORE_SYS_HRES_L 0xE8
+#define HDMI_CORE_SYS_HRES_H 0xEC
+#define HDMI_CORE_SYS_VRES_L 0xF0
+#define HDMI_CORE_SYS_VRES_H 0xF4
+#define HDMI_CORE_SYS_IADJUST 0xF8
+#define HDMI_CORE_SYS_POLDETECT 0xFC
+#define HDMI_CORE_SYS_HWIDTH1 0x110
+#define HDMI_CORE_SYS_HWIDTH2 0x114
+#define HDMI_CORE_SYS_VWIDTH 0x11C
+#define HDMI_CORE_SYS_VID_CTRL 0x120
#define HDMI_CORE_SYS_VID_ACEN 0x124
#define HDMI_CORE_SYS_VID_MODE 0x128
+#define HDMI_CORE_SYS_VID_BLANK1 0x12C
+#define HDMI_CORE_SYS_VID_BLANK2 0x130
+#define HDMI_CORE_SYS_VID_BLANK3 0x134
+#define HDMI_CORE_SYS_DC_HEADER 0x138
+#define HDMI_CORE_SYS_VID_DITHER 0x13C
+#define HDMI_CORE_SYS_RGB2XVYCC_CT 0x140
+#define HDMI_CORE_SYS_R2Y_COEFF_LOW 0x144
+#define HDMI_CORE_SYS_R2Y_COEFF_UP 0x148
+#define HDMI_CORE_SYS_G2Y_COEFF_LOW 0x14C
+#define HDMI_CORE_SYS_G2Y_COEFF_UP 0x150
+#define HDMI_CORE_SYS_B2Y_COEFF_LOW 0x154
+#define HDMI_CORE_SYS_B2Y_COEFF_UP 0x158
+#define HDMI_CORE_SYS_R2CB_COEFF_LOW 0x15C
+#define HDMI_CORE_SYS_R2CB_COEFF_UP 0x160
+#define HDMI_CORE_SYS_G2CB_COEFF_LOW 0x164
+#define HDMI_CORE_SYS_G2CB_COEFF_UP 0x168
+#define HDMI_CORE_SYS_B2CB_COEFF_LOW 0x16C
+#define HDMI_CORE_SYS_B2CB_COEFF_UP 0x170
+#define HDMI_CORE_SYS_R2CR_COEFF_LOW 0x174
+#define HDMI_CORE_SYS_R2CR_COEFF_UP 0x178
+#define HDMI_CORE_SYS_G2CR_COEFF_LOW 0x17C
+#define HDMI_CORE_SYS_G2CR_COEFF_UP 0x180
+#define HDMI_CORE_SYS_B2CR_COEFF_LOW 0x184
+#define HDMI_CORE_SYS_B2CR_COEFF_UP 0x188
+#define HDMI_CORE_SYS_RGB_OFFSET_LOW 0x18C
+#define HDMI_CORE_SYS_RGB_OFFSET_UP 0x190
+#define HDMI_CORE_SYS_Y_OFFSET_LOW 0x194
+#define HDMI_CORE_SYS_Y_OFFSET_UP 0x198
+#define HDMI_CORE_SYS_CBCR_OFFSET_LOW 0x19C
+#define HDMI_CORE_SYS_CBCR_OFFSET_UP 0x1A0
#define HDMI_CORE_SYS_INTR_STATE 0x1C0
#define HDMI_CORE_SYS_INTR1 0x1C4
#define HDMI_CORE_SYS_INTR2 0x1C8
#define HDMI_CORE_SYS_INTR3 0x1CC
#define HDMI_CORE_SYS_INTR4 0x1D0
-#define HDMI_CORE_SYS_UMASK1 0x1D4
+#define HDMI_CORE_SYS_INTR_UNMASK1 0x1D4
+#define HDMI_CORE_SYS_INTR_UNMASK2 0x1D8
+#define HDMI_CORE_SYS_INTR_UNMASK3 0x1DC
+#define HDMI_CORE_SYS_INTR_UNMASK4 0x1E0
+#define HDMI_CORE_SYS_INTR_CTRL 0x1E4
#define HDMI_CORE_SYS_TMDS_CTRL 0x208
-#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
-#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
-#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
+/* value definitions for HDMI_CORE_SYS_SYS_CTRL1 fields */
+#define HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC 0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC 0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE 0x1
/* HDMI DDC E-DID */
#define HDMI_CORE_DDC_ADDR 0x3B4
@@ -158,35 +182,6 @@
#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
-/* PLL */
-
-#define PLLCTRL_PLL_CONTROL 0x0
-#define PLLCTRL_PLL_STATUS 0x4
-#define PLLCTRL_PLL_GO 0x8
-#define PLLCTRL_CFG1 0xC
-#define PLLCTRL_CFG2 0x10
-#define PLLCTRL_CFG3 0x14
-#define PLLCTRL_CFG4 0x20
-
-/* HDMI PHY */
-
-#define HDMI_TXPHY_TX_CTRL 0x0
-#define HDMI_TXPHY_DIGITAL_CTRL 0x4
-#define HDMI_TXPHY_POWER_CTRL 0x8
-#define HDMI_TXPHY_PAD_CFG_CTRL 0xC
-
-#define REG_FLD_MOD(base, idx, val, start, end) \
- hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
- val, start, end))
-#define REG_GET(base, idx, start, end) \
- FLD_GET(hdmi_read_reg(base, idx), start, end)
-
-enum hdmi_phy_pwr {
- HDMI_PHYPWRCMD_OFF = 0,
- HDMI_PHYPWRCMD_LDOON = 1,
- HDMI_PHYPWRCMD_TXON = 2
-};
-
enum hdmi_core_inputbus_width {
HDMI_INPUT_8BIT = 0,
HDMI_INPUT_10BIT = 1,
@@ -229,114 +224,6 @@ enum hdmi_core_packet_ctrl {
HDMI_PACKETREPEATOFF = 0
};
-/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
-enum hdmi_core_infoframe {
- HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
- HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
- HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
- HDMI_INFOFRAME_AVI_DB1B_NO = 0,
- HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
- HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
- HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
- HDMI_INFOFRAME_AVI_DB1S_0 = 0,
- HDMI_INFOFRAME_AVI_DB1S_1 = 1,
- HDMI_INFOFRAME_AVI_DB1S_2 = 2,
- HDMI_INFOFRAME_AVI_DB2C_NO = 0,
- HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
- HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
- HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
- HDMI_INFOFRAME_AVI_DB2M_NO = 0,
- HDMI_INFOFRAME_AVI_DB2M_43 = 1,
- HDMI_INFOFRAME_AVI_DB2M_169 = 2,
- HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
- HDMI_INFOFRAME_AVI_DB2R_43 = 9,
- HDMI_INFOFRAME_AVI_DB2R_169 = 10,
- HDMI_INFOFRAME_AVI_DB2R_149 = 11,
- HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
- HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
- HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
- HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
- HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
- HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
- HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
- HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
- HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
- HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
- HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
- HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
- HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
- HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
- HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
- HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
- HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-};
-
-enum hdmi_packing_mode {
- HDMI_PACK_10b_RGB_YUV444 = 0,
- HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
- HDMI_PACK_20b_YUV422 = 2,
- HDMI_PACK_ALREADYPACKED = 7
-};
-
-enum hdmi_core_audio_layout {
- HDMI_AUDIO_LAYOUT_2CH = 0,
- HDMI_AUDIO_LAYOUT_8CH = 1
-};
-
-enum hdmi_core_cts_mode {
- HDMI_AUDIO_CTS_MODE_HW = 0,
- HDMI_AUDIO_CTS_MODE_SW = 1
-};
-
-enum hdmi_stereo_channels {
- HDMI_AUDIO_STEREO_NOCHANNELS = 0,
- HDMI_AUDIO_STEREO_ONECHANNEL = 1,
- HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
- HDMI_AUDIO_STEREO_THREECHANNELS = 3,
- HDMI_AUDIO_STEREO_FOURCHANNELS = 4
-};
-
-enum hdmi_audio_type {
- HDMI_AUDIO_TYPE_LPCM = 0,
- HDMI_AUDIO_TYPE_IEC = 1
-};
-
-enum hdmi_audio_justify {
- HDMI_AUDIO_JUSTIFY_LEFT = 0,
- HDMI_AUDIO_JUSTIFY_RIGHT = 1
-};
-
-enum hdmi_audio_sample_order {
- HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
- HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
-};
-
-enum hdmi_audio_samples_perword {
- HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
- HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
-};
-
-enum hdmi_audio_sample_size {
- HDMI_AUDIO_SAMPLE_16BITS = 0,
- HDMI_AUDIO_SAMPLE_24BITS = 1
-};
-
-enum hdmi_audio_transf_mode {
- HDMI_AUDIO_TRANSF_DMA = 0,
- HDMI_AUDIO_TRANSF_IRQ = 1
-};
-
-enum hdmi_audio_blk_strt_end_sig {
- HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
- HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
-};
-
enum hdmi_audio_i2s_config {
HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
@@ -352,17 +239,6 @@ enum hdmi_audio_i2s_config {
HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
};
-enum hdmi_audio_mclk_mode {
- HDMI_AUDIO_MCLK_128FS = 0,
- HDMI_AUDIO_MCLK_256FS = 1,
- HDMI_AUDIO_MCLK_384FS = 2,
- HDMI_AUDIO_MCLK_512FS = 3,
- HDMI_AUDIO_MCLK_768FS = 4,
- HDMI_AUDIO_MCLK_1024FS = 5,
- HDMI_AUDIO_MCLK_1152FS = 6,
- HDMI_AUDIO_MCLK_192FS = 7
-};
-
struct hdmi_core_video_config {
enum hdmi_core_inputbus_width ip_bus_width;
enum hdmi_core_dither_trunc op_dither_truc;
@@ -383,55 +259,18 @@ struct hdmi_core_packet_enable_repeat {
u32 generic_pkt_repeat;
};
-struct hdmi_video_format {
- enum hdmi_packing_mode packing_mode;
- u32 y_res; /* Line per panel */
- u32 x_res; /* pixel per line */
-};
-
-struct hdmi_audio_format {
- enum hdmi_stereo_channels stereo_channels;
- u8 active_chnnls_msk;
- enum hdmi_audio_type type;
- enum hdmi_audio_justify justification;
- enum hdmi_audio_sample_order sample_order;
- enum hdmi_audio_samples_perword samples_per_word;
- enum hdmi_audio_sample_size sample_size;
- enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
-};
-
-struct hdmi_audio_dma {
- u8 transfer_size;
- u8 block_size;
- enum hdmi_audio_transf_mode mode;
- u16 fifo_threshold;
-};
-
-struct hdmi_core_audio_i2s_config {
- u8 in_length_bits;
- u8 justification;
- u8 sck_edge_mode;
- u8 vbit;
- u8 direction;
- u8 shift;
- u8 active_sds;
-};
-
-struct hdmi_core_audio_config {
- struct hdmi_core_audio_i2s_config i2s_cfg;
- struct snd_aes_iec958 *iec60958_cfg;
- bool fs_override;
- u32 n;
- u32 cts;
- u32 aud_par_busclk;
- enum hdmi_core_audio_layout layout;
- enum hdmi_core_cts_mode cts_mode;
- bool use_mclk;
- enum hdmi_audio_mclk_mode mclk_mode;
- bool en_acr_pkt;
- bool en_dsd_audio;
- bool en_parallel_aud_input;
- bool en_spdif;
-};
+int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+ struct hdmi_config *cfg);
+void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
+int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
+void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
+int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+ struct omap_dss_audio *audio, u32 pclk);
+int hdmi4_audio_get_dma_port(u32 *offset, u32 *size);
+#endif
#endif
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
new file mode 100644
index 00000000000..5586aaad9d6
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -0,0 +1,423 @@
+
+/*
+ * Logic for the below structure :
+ * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
+ * There is a correspondence between CEA/VESA timing and code, please
+ * refer to section 6.3 in HDMI 1.3 specification for timing code.
+ *
+ * In the below structure, cea_vesa_timings corresponds to all OMAP4
+ * supported CEA and VESA timing values.code_cea corresponds to the CEA
+ * code, It is used to get the timing from cea_vesa_timing array.Similarly
+ * with code_vesa. Code_index is used for back mapping, that is once EDID
+ * is read from the TV, EDID is parsed to find the timing values and then
+ * map it to corresponding CEA or VESA index.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <video/omapdss.h>
+
+#include "hdmi.h"
+
+static const struct hdmi_config cea_timings[] = {
+ {
+ { 640, 480, 25200, 96, 16, 48, 2, 10, 33,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 1, HDMI_HDMI },
+ },
+ {
+ { 720, 480, 27027, 62, 16, 60, 6, 9, 30,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 2, HDMI_HDMI },
+ },
+ {
+ { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 4, HDMI_HDMI },
+ },
+ {
+ { 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ true, },
+ { 5, HDMI_HDMI },
+ },
+ {
+ { 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ true, },
+ { 6, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 16, HDMI_HDMI },
+ },
+ {
+ { 720, 576, 27000, 64, 12, 68, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 17, HDMI_HDMI },
+ },
+ {
+ { 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 19, HDMI_HDMI },
+ },
+ {
+ { 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ true, },
+ { 20, HDMI_HDMI },
+ },
+ {
+ { 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ true, },
+ { 21, HDMI_HDMI },
+ },
+ {
+ { 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 29, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 31, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 32, HDMI_HDMI },
+ },
+ {
+ { 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 35, HDMI_HDMI },
+ },
+ {
+ { 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 37, HDMI_HDMI },
+ },
+};
+
+static const struct hdmi_config vesa_timings[] = {
+/* VESA From Here */
+ {
+ { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 4, HDMI_DVI },
+ },
+ {
+ { 800, 600, 40000, 128, 40, 88, 4, 1, 23,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 9, HDMI_DVI },
+ },
+ {
+ { 848, 480, 33750, 112, 16, 112, 8, 6, 23,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0xE, HDMI_DVI },
+ },
+ {
+ { 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x17, HDMI_DVI },
+ },
+ {
+ { 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x1C, HDMI_DVI },
+ },
+ {
+ { 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x27, HDMI_DVI },
+ },
+ {
+ { 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x20, HDMI_DVI },
+ },
+ {
+ { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x23, HDMI_DVI },
+ },
+ {
+ { 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x10, HDMI_DVI },
+ },
+ {
+ { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x2A, HDMI_DVI },
+ },
+ {
+ { 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x2F, HDMI_DVI },
+ },
+ {
+ { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x3A, HDMI_DVI },
+ },
+ {
+ { 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x51, HDMI_DVI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x52, HDMI_DVI },
+ },
+ {
+ { 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x16, HDMI_DVI },
+ },
+ {
+ { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x29, HDMI_DVI },
+ },
+ {
+ { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x39, HDMI_DVI },
+ },
+ {
+ { 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x1B, HDMI_DVI },
+ },
+ {
+ { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x55, HDMI_DVI },
+ },
+ {
+ { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x44, HDMI_DVI },
+ },
+};
+
+const struct hdmi_config *hdmi_default_timing(void)
+{
+ return &vesa_timings[0];
+}
+
+static const struct hdmi_config *hdmi_find_timing(int code,
+ const struct hdmi_config *timings_arr, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (timings_arr[i].cm.code == code)
+ return &timings_arr[i];
+ }
+
+ return NULL;
+}
+
+const struct hdmi_config *hdmi_get_timings(int mode, int code)
+{
+ const struct hdmi_config *arr;
+ int len;
+
+ if (mode == HDMI_DVI) {
+ arr = vesa_timings;
+ len = ARRAY_SIZE(vesa_timings);
+ } else {
+ arr = cea_timings;
+ len = ARRAY_SIZE(cea_timings);
+ }
+
+ return hdmi_find_timing(code, arr, len);
+}
+
+static bool hdmi_timings_compare(struct omap_video_timings *timing1,
+ const struct omap_video_timings *timing2)
+{
+ int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
+
+ if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
+ DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
+ (timing2->x_res == timing1->x_res) &&
+ (timing2->y_res == timing1->y_res)) {
+
+ timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
+ timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
+ timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
+ timing1_vsync = timing1->vfp + timing1->vsw + timing1->vbp;
+
+ DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
+ "timing2_hsync = %d timing2_vsync = %d\n",
+ timing1_hsync, timing1_vsync,
+ timing2_hsync, timing2_vsync);
+
+ if ((timing1_hsync == timing2_hsync) &&
+ (timing1_vsync == timing2_vsync)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
+{
+ int i;
+ struct hdmi_cm cm = {-1};
+ DSSDBG("hdmi_get_code\n");
+
+ for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
+ if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
+ cm = cea_timings[i].cm;
+ goto end;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
+ if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
+ cm = vesa_timings[i].cm;
+ goto end;
+ }
+ }
+
+end:
+ return cm;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
+{
+ u32 deep_color;
+ bool deep_color_correct = false;
+
+ if (n == NULL || cts == NULL)
+ return -EINVAL;
+
+ /* TODO: When implemented, query deep color mode here. */
+ deep_color = 100;
+
+ /*
+ * When using deep color, the default N value (as in the HDMI
+ * specification) yields to an non-integer CTS. Hence, we
+ * modify it while keeping the restrictions described in
+ * section 7.2.1 of the HDMI 1.4a specification.
+ */
+ switch (sample_freq) {
+ case 32000:
+ case 48000:
+ case 96000:
+ case 192000:
+ if (deep_color == 125)
+ if (pclk == 27027 || pclk == 74250)
+ deep_color_correct = true;
+ if (deep_color == 150)
+ if (pclk == 27027)
+ deep_color_correct = true;
+ break;
+ case 44100:
+ case 88200:
+ case 176400:
+ if (deep_color == 125)
+ if (pclk == 27027)
+ deep_color_correct = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (deep_color_correct) {
+ switch (sample_freq) {
+ case 32000:
+ *n = 8192;
+ break;
+ case 44100:
+ *n = 12544;
+ break;
+ case 48000:
+ *n = 8192;
+ break;
+ case 88200:
+ *n = 25088;
+ break;
+ case 96000:
+ *n = 16384;
+ break;
+ case 176400:
+ *n = 50176;
+ break;
+ case 192000:
+ *n = 32768;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_freq) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ case 88200:
+ *n = 12544;
+ break;
+ case 96000:
+ *n = 12288;
+ break;
+ case 176400:
+ *n = 25088;
+ break;
+ case 192000:
+ *n = 24576;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+ *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+
+ return 0;
+}
+#endif
diff --git a/drivers/video/omap2/dss/hdmi_phy.c b/drivers/video/omap2/dss/hdmi_phy.c
new file mode 100644
index 00000000000..45acb997ac0
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_phy.c
@@ -0,0 +1,160 @@
+/*
+ * HDMI PHY
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
+{
+#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(phy->base, r))
+
+ DUMPPHY(HDMI_TXPHY_TX_CTRL);
+ DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
+ DUMPPHY(HDMI_TXPHY_POWER_CTRL);
+ DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *data)
+{
+ struct hdmi_wp_data *wp = data;
+ u32 irqstatus;
+
+ irqstatus = hdmi_wp_get_irqstatus(wp);
+ hdmi_wp_set_irqstatus(wp, irqstatus);
+
+ if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
+ irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+ /*
+ * If we get both connect and disconnect interrupts at the same
+ * time, turn off the PHY, clear interrupts, and restart, which
+ * raises connect interrupt if a cable is connected, or nothing
+ * if cable is not connected.
+ */
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+
+ hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
+ HDMI_IRQ_LINK_DISCONNECT);
+
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+ } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
+ } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp,
+ struct hdmi_config *cfg)
+{
+ u16 r = 0;
+ u32 irqstatus;
+
+ hdmi_wp_clear_irqenable(wp, 0xffffffff);
+
+ irqstatus = hdmi_wp_get_irqstatus(wp);
+ hdmi_wp_set_irqstatus(wp, irqstatus);
+
+ r = hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+ if (r)
+ return r;
+
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+ */
+ hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL);
+
+ /*
+ * Write to phy address 0 to configure the clock
+ * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+ */
+ REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+ /* Setup max LDO voltage */
+ REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ r = request_threaded_irq(phy->irq, NULL, hdmi_irq_handler,
+ IRQF_ONESHOT, "OMAP HDMI", wp);
+ if (r) {
+ DSSERR("HDMI IRQ request failed\n");
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+ return r;
+ }
+
+ hdmi_wp_set_irqenable(wp,
+ HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
+
+ return 0;
+}
+
+void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp)
+{
+ free_irq(phy->irq, wp);
+
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+}
+
+#define PHY_OFFSET 0x300
+#define PHY_SIZE 0x100
+
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_txphy");
+ if (!res) {
+ DSSDBG("can't get PHY mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get PHY mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + PHY_OFFSET;
+ temp_res.end = temp_res.start + PHY_SIZE - 1;
+ res = &temp_res;
+ }
+
+ phy->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!phy->base) {
+ DSSERR("can't ioremap TX PHY\n");
+ return -ENOMEM;
+ }
+
+ phy->irq = platform_get_irq(pdev, 0);
+ if (phy->irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi_pll.c b/drivers/video/omap2/dss/hdmi_pll.c
new file mode 100644
index 00000000000..d3e6e78c008
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_pll.c
@@ -0,0 +1,230 @@
+/*
+ * HDMI PLL
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+#define HDMI_DEFAULT_REGN 16
+#define HDMI_DEFAULT_REGM2 1
+
+void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
+{
+#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(pll->base, r))
+
+ DUMPPLL(PLLCTRL_PLL_CONTROL);
+ DUMPPLL(PLLCTRL_PLL_STATUS);
+ DUMPPLL(PLLCTRL_PLL_GO);
+ DUMPPLL(PLLCTRL_CFG1);
+ DUMPPLL(PLLCTRL_CFG2);
+ DUMPPLL(PLLCTRL_CFG3);
+ DUMPPLL(PLLCTRL_SSC_CFG1);
+ DUMPPLL(PLLCTRL_SSC_CFG2);
+ DUMPPLL(PLLCTRL_CFG4);
+}
+
+void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy)
+{
+ struct hdmi_pll_info *pi = &pll->info;
+ unsigned long refclk;
+ u32 mf;
+
+ /* use our funky units */
+ clkin /= 10000;
+
+ /*
+ * Input clock is predivided by N + 1
+ * out put of which is reference clk
+ */
+
+ pi->regn = HDMI_DEFAULT_REGN;
+
+ refclk = clkin / pi->regn;
+
+ pi->regm2 = HDMI_DEFAULT_REGM2;
+
+ /*
+ * multiplier is pixel_clk/ref_clk
+ * Multiplying by 100 to avoid fractional part removal
+ */
+ pi->regm = phy * pi->regm2 / refclk;
+
+ /*
+ * fractional multiplier is remainder of the difference between
+ * multiplier and actual phy(required pixel clock thus should be
+ * multiplied by 2^18(262144) divided by the reference clock
+ */
+ mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
+ pi->regmf = pi->regm2 * mf / refclk;
+
+ /*
+ * Dcofreq should be set to 1 if required pixel clock
+ * is greater than 1000MHz
+ */
+ pi->dcofreq = phy > 1000 * 100;
+ pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
+
+ /* Set the reference clock to sysclk reference */
+ pi->refsel = HDMI_REFSEL_SYSCLK;
+
+ DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
+ DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
+}
+
+
+static int hdmi_pll_config(struct hdmi_pll_data *pll)
+{
+ u32 r;
+ struct hdmi_pll_info *fmt = &pll->info;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(pll->base, PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
+ r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
+ hdmi_write_reg(pll->base, PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(pll->base, PLLCTRL_CFG2);
+
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+ r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
+
+ if (fmt->dcofreq) {
+ /* divider programming for frequency beyond 1000Mhz */
+ REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else {
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+ }
+
+ hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
+ r = FLD_MOD(r, fmt->regm2, 24, 18);
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+ hdmi_write_reg(pll->base, PLLCTRL_CFG4, r);
+
+ /* go now */
+ REG_FLD_MOD(pll->base, PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+ /* wait for bit change */
+ if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
+ 0, 0, 1) != 1) {
+ pr_err("PLL GO bit not set\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait till the lock bit is set in PLL status */
+ if (hdmi_wait_for_bit_change(pll->base,
+ PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+ pr_err("cannot lock PLL\n");
+ pr_err("CFG1 0x%x\n",
+ hdmi_read_reg(pll->base, PLLCTRL_CFG1));
+ pr_err("CFG2 0x%x\n",
+ hdmi_read_reg(pll->base, PLLCTRL_CFG2));
+ pr_err("CFG4 0x%x\n",
+ hdmi_read_reg(pll->base, PLLCTRL_CFG4));
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("PLL locked!\n");
+
+ return 0;
+}
+
+static int hdmi_pll_reset(struct hdmi_pll_data *pll)
+{
+ /* SYSRESET controlled by power FSM */
+ REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
+ != 1) {
+ pr_err("Failed to sysreset PLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+{
+ u16 r = 0;
+
+ r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+ if (r)
+ return r;
+
+ r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+ if (r)
+ return r;
+
+ r = hdmi_pll_reset(pll);
+ if (r)
+ return r;
+
+ r = hdmi_pll_config(pll);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+{
+ hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+}
+
+#define PLL_OFFSET 0x200
+#define PLL_SIZE 0x100
+
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_pllctrl");
+ if (!res) {
+ DSSDBG("can't get PLL mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get PLL mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + PLL_OFFSET;
+ temp_res.end = temp_res.start + PLL_SIZE - 1;
+ res = &temp_res;
+ }
+
+ pll->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!pll->base) {
+ DSSERR("can't ioremap PLLCTRL\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/omap2/dss/hdmi_wp.c
new file mode 100644
index 00000000000..8151d8969a6
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_wp.c
@@ -0,0 +1,271 @@
+/*
+ * HDMI wrapper
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, hdmi_read_reg(wp->base, r))
+
+ DUMPREG(HDMI_WP_REVISION);
+ DUMPREG(HDMI_WP_SYSCONFIG);
+ DUMPREG(HDMI_WP_IRQSTATUS_RAW);
+ DUMPREG(HDMI_WP_IRQSTATUS);
+ DUMPREG(HDMI_WP_IRQENABLE_SET);
+ DUMPREG(HDMI_WP_IRQENABLE_CLR);
+ DUMPREG(HDMI_WP_IRQWAKEEN);
+ DUMPREG(HDMI_WP_PWR_CTRL);
+ DUMPREG(HDMI_WP_DEBOUNCE);
+ DUMPREG(HDMI_WP_VIDEO_CFG);
+ DUMPREG(HDMI_WP_VIDEO_SIZE);
+ DUMPREG(HDMI_WP_VIDEO_TIMING_H);
+ DUMPREG(HDMI_WP_VIDEO_TIMING_V);
+ DUMPREG(HDMI_WP_WP_CLK);
+ DUMPREG(HDMI_WP_AUDIO_CFG);
+ DUMPREG(HDMI_WP_AUDIO_CFG2);
+ DUMPREG(HDMI_WP_AUDIO_CTRL);
+ DUMPREG(HDMI_WP_AUDIO_DATA);
+}
+
+u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp)
+{
+ return hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
+}
+
+void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus)
+{
+ hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, irqstatus);
+ /* flush posted write */
+ hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
+}
+
+void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask)
+{
+ hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_SET, mask);
+}
+
+void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask)
+{
+ hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_CLR, mask);
+}
+
+/* PHY_PWR_CMD */
+int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val)
+{
+ /* Return if already the state */
+ if (REG_GET(wp->base, HDMI_WP_PWR_CTRL, 5, 4) == val)
+ return 0;
+
+ /* Command for power control of HDMI PHY */
+ REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ /* Status of the power control of HDMI PHY */
+ if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val)
+ != val) {
+ pr_err("Failed to set PHY power mode to %d\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val)
+{
+ /* Command for power control of HDMI PLL */
+ REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS is set */
+ if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val)
+ != val) {
+ pr_err("Failed to set PLL_PWR_STATUS\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hdmi_wp_video_start(struct hdmi_wp_data *wp)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, true, 31, 31);
+
+ return 0;
+}
+
+void hdmi_wp_video_stop(struct hdmi_wp_data *wp)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, false, 31, 31);
+}
+
+void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_video_format *video_fmt)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, video_fmt->packing_mode,
+ 10, 8);
+
+ l |= FLD_VAL(video_fmt->y_res, 31, 16);
+ l |= FLD_VAL(video_fmt->x_res, 15, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l);
+}
+
+void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings)
+{
+ u32 r;
+ bool vsync_pol, hsync_pol;
+ pr_debug("Enter hdmi_wp_video_config_interface\n");
+
+ vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, vsync_pol, 7, 7);
+ r = FLD_MOD(r, hsync_pol, 6, 6);
+ r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
+}
+
+void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ pr_debug("Enter hdmi_wp_video_config_timing\n");
+
+ timing_h |= FLD_VAL(timings->hbp, 31, 20);
+ timing_h |= FLD_VAL(timings->hfp, 19, 8);
+ timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(timings->vbp, 31, 20);
+ timing_v |= FLD_VAL(timings->vfp, 19, 8);
+ timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param)
+{
+ pr_debug("Enter hdmi_wp_video_init_format\n");
+
+ video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+ video_fmt->y_res = param->timings.y_res;
+ video_fmt->x_res = param->timings.x_res;
+
+ timings->hbp = param->timings.hbp;
+ timings->hfp = param->timings.hfp;
+ timings->hsw = param->timings.hsw;
+ timings->vbp = param->timings.vbp;
+ timings->vfp = param->timings.vfp;
+ timings->vsw = param->timings.vsw;
+ timings->vsync_level = param->timings.vsync_level;
+ timings->hsync_level = param->timings.hsync_level;
+ timings->interlace = param->timings.interlace;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_audio_format *aud_fmt)
+{
+ u32 r;
+
+ DSSDBG("Enter hdmi_wp_audio_config_format\n");
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
+ r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
+ r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+ r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
+ r = FLD_MOD(r, aud_fmt->type, 4, 4);
+ r = FLD_MOD(r, aud_fmt->justification, 3, 3);
+ r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
+ r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
+ r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG, r);
+}
+
+void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
+ struct hdmi_audio_dma *aud_dma)
+{
+ u32 r;
+
+ DSSDBG("Enter hdmi_wp_audio_config_dma\n");
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG2);
+ r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
+ r = FLD_MOD(r, aud_dma->block_size, 7, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG2, r);
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CTRL);
+ r = FLD_MOD(r, aud_dma->mode, 9, 9);
+ r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CTRL, r);
+}
+
+int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+
+ return 0;
+}
+
+int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+
+ return 0;
+}
+#endif
+
+#define WP_SIZE 0x200
+
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_wp");
+ if (!res) {
+ DSSDBG("can't get WP mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get WP mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start;
+ temp_res.end = temp_res.start + WP_SIZE - 1;
+ res = &temp_res;
+ }
+
+ wp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!wp->base) {
+ DSSERR("can't ioremap HDMI WP\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
deleted file mode 100644
index 45215f44617..00000000000
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * ti_hdmi.h
- *
- * HDMI driver definition for TI OMAP4, DM81xx, DM38xx Processor.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _TI_HDMI_H
-#define _TI_HDMI_H
-
-struct hdmi_ip_data;
-
-enum hdmi_pll_pwr {
- HDMI_PLLPWRCMD_ALLOFF = 0,
- HDMI_PLLPWRCMD_PLLONLY = 1,
- HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
- HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
-};
-
-enum hdmi_core_hdmi_dvi {
- HDMI_DVI = 0,
- HDMI_HDMI = 1
-};
-
-enum hdmi_clk_refsel {
- HDMI_REFSEL_PCLK = 0,
- HDMI_REFSEL_REF1 = 1,
- HDMI_REFSEL_REF2 = 2,
- HDMI_REFSEL_SYSCLK = 3
-};
-
-struct hdmi_cm {
- int code;
- int mode;
-};
-
-struct hdmi_config {
- struct omap_video_timings timings;
- struct hdmi_cm cm;
-};
-
-/* HDMI PLL structure */
-struct hdmi_pll_info {
- u16 regn;
- u16 regm;
- u32 regmf;
- u16 regm2;
- u16 regsd;
- u16 dcofreq;
- enum hdmi_clk_refsel refsel;
-};
-
-struct ti_hdmi_ip_ops {
-
- void (*video_configure)(struct hdmi_ip_data *ip_data);
-
- int (*phy_enable)(struct hdmi_ip_data *ip_data);
-
- void (*phy_disable)(struct hdmi_ip_data *ip_data);
-
- int (*read_edid)(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-
- int (*pll_enable)(struct hdmi_ip_data *ip_data);
-
- void (*pll_disable)(struct hdmi_ip_data *ip_data);
-
- int (*video_enable)(struct hdmi_ip_data *ip_data);
-
- void (*video_disable)(struct hdmi_ip_data *ip_data);
-
- void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
- void (*dump_core)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
- void (*dump_pll)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
- void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
- int (*audio_enable)(struct hdmi_ip_data *ip_data);
-
- void (*audio_disable)(struct hdmi_ip_data *ip_data);
-
- int (*audio_start)(struct hdmi_ip_data *ip_data);
-
- void (*audio_stop)(struct hdmi_ip_data *ip_data);
-
- int (*audio_config)(struct hdmi_ip_data *ip_data,
- struct omap_dss_audio *audio);
-
- int (*audio_get_dma_port)(u32 *offset, u32 *size);
-#endif
-
-};
-
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_avi {
- /* Y0, Y1 rgb,yCbCr */
- u8 db1_format;
- /* A0 Active information Present */
- u8 db1_active_info;
- /* B0, B1 Bar info data valid */
- u8 db1_bar_info_dv;
- /* S0, S1 scan information */
- u8 db1_scan_info;
- /* C0, C1 colorimetry */
- u8 db2_colorimetry;
- /* M0, M1 Aspect ratio (4:3, 16:9) */
- u8 db2_aspect_ratio;
- /* R0...R3 Active format aspect ratio */
- u8 db2_active_fmt_ar;
- /* ITC IT content. */
- u8 db3_itc;
- /* EC0, EC1, EC2 Extended colorimetry */
- u8 db3_ec;
- /* Q1, Q0 Quantization range */
- u8 db3_q_range;
- /* SC1, SC0 Non-uniform picture scaling */
- u8 db3_nup_scaling;
- /* VIC0..6 Video format identification */
- u8 db4_videocode;
- /* PR0..PR3 Pixel repetition factor */
- u8 db5_pixel_repeat;
- /* Line number end of top bar */
- u16 db6_7_line_eoftop;
- /* Line number start of bottom bar */
- u16 db8_9_line_sofbottom;
- /* Pixel number end of left bar */
- u16 db10_11_pixel_eofleft;
- /* Pixel number start of right bar */
- u16 db12_13_pixel_sofright;
-};
-
-struct hdmi_ip_data {
- void __iomem *base_wp; /* HDMI wrapper */
- unsigned long core_sys_offset;
- unsigned long core_av_offset;
- unsigned long pll_offset;
- unsigned long phy_offset;
- int irq;
- const struct ti_hdmi_ip_ops *ops;
- struct hdmi_config cfg;
- struct hdmi_pll_info pll_data;
- struct hdmi_core_infoframe_avi avi_cfg;
-
- /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
- struct mutex lock;
-};
-int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
-int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
- struct omap_dss_audio *audio);
-int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size);
-#endif
-#endif
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 4b23af6e5c2..367cea8f43f 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -339,8 +339,6 @@ static int p9100_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 3d86bac62d3..4c929957682 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -403,7 +403,7 @@ try_again:
if (rc < 0)
return rc;
- printk(KERN_INFO "fb%d: Apple Platinum frame buffer device\n", info->node);
+ fb_info(info, "Apple Platinum frame buffer device\n");
return 0;
}
@@ -639,7 +639,6 @@ static int platinumfb_probe(struct platform_device* odev)
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
iounmap(pinfo->cmap_regs);
- dev_set_drvdata(&odev->dev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 81354eeab02..3b85b647bc1 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1694,8 +1694,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (retval < 0)
goto err_exit_all;
- printk(KERN_INFO "fb%d: %s frame buffer device, memory = %dK.\n",
- info->node, info->fix.id, pm2fb_fix.smem_len / 1024);
+ fb_info(info, "%s frame buffer device, memory = %dK\n",
+ info->fix.id, pm2fb_fix.smem_len / 1024);
/*
* Our driver data
@@ -1744,7 +1744,6 @@ static void pm2fb_remove(struct pci_dev *pdev)
iounmap(par->v_regs);
release_mem_region(fix->mmio_start, fix->mmio_len);
- pci_set_drvdata(pdev, NULL);
fb_dealloc_cmap(&info->cmap);
kfree(info->pixmap.addr);
framebuffer_release(info);
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 7718faa4a73..4bf3273d043 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -1445,8 +1445,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
retval = -EINVAL;
goto err_exit_all;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info);
return 0;
@@ -1489,7 +1488,6 @@ static void pm3fb_remove(struct pci_dev *dev)
iounmap(par->v_regs);
release_mem_region(fix->mmio_start, fix->mmio_len);
- pci_set_drvdata(dev, NULL);
kfree(info->pixmap.addr);
framebuffer_release(info);
}
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index d1e46cedb1f..914a52ba847 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -212,8 +212,8 @@ static int pmagbafb_probe(struct device *dev)
get_device(dev);
- pr_info("fb%d: %s frame buffer device at %s\n",
- info->node, info->fix.id, dev_name(dev));
+ fb_info(info, "%s frame buffer device at %s\n",
+ info->fix.id, dev_name(dev));
return 0;
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 0e131740032..0822b6f8ddd 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -328,11 +328,10 @@ static int pmagbbfb_probe(struct device *dev)
snprintf(freq1, sizeof(freq1), "%u.%03uMHz",
par->osc1 / 1000, par->osc1 % 1000);
- pr_info("fb%d: %s frame buffer device at %s\n",
- info->node, info->fix.id, dev_name(dev));
- pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n",
- info->node, freq0, par->osc1 ? freq1 : "disabled",
- par->osc1 != 0);
+ fb_info(info, "%s frame buffer device at %s\n",
+ info->fix.id, dev_name(dev));
+ fb_info(info, "Osc0: %s, Osc1: %s, Osc%u selected\n",
+ freq0, par->osc1 ? freq1 : "disabled", par->osc1 != 0);
return 0;
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index df07860563e..167cffff3d4 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -817,24 +817,25 @@ static int pvr2fb_common_init(void)
rev = fb_readl(par->mmio_base + 0x04);
- printk("fb%d: %s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
- fb_info->node, fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
- modememused >> 10, (unsigned long)(fb_info->fix.smem_len >> 10));
- printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
- fb_info->node, fb_info->var.xres, fb_info->var.yres,
- fb_info->var.bits_per_pixel,
- get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
- (char *)pvr2_get_param(cables, NULL, cable_type, 3),
- (char *)pvr2_get_param(outputs, NULL, video_output, 3));
+ fb_info(fb_info, "%s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
+ fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
+ modememused >> 10,
+ (unsigned long)(fb_info->fix.smem_len >> 10));
+ fb_info(fb_info, "Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
+ fb_info->var.xres, fb_info->var.yres,
+ fb_info->var.bits_per_pixel,
+ get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
+ (char *)pvr2_get_param(cables, NULL, cable_type, 3),
+ (char *)pvr2_get_param(outputs, NULL, video_output, 3));
#ifdef CONFIG_SH_STORE_QUEUES
- printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
+ fb_notice(fb_info, "registering with SQ API\n");
pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
fb_info->fix.id, PAGE_SHARED);
- printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
- fb_info->node, pvr2fb_map);
+ fb_notice(fb_info, "Mapped video memory to SQ addr 0x%lx\n",
+ pvr2fb_map);
#endif
return 0;
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index aa9bd1f76d6..c95b9e46d48 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -364,7 +364,7 @@ static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset)
static void set_dumb_panel_control(struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
- struct pxa168fb_mach_info *mi = fbi->dev->platform_data;
+ struct pxa168fb_mach_info *mi = dev_get_platdata(fbi->dev);
u32 x;
/*
@@ -407,7 +407,7 @@ static int pxa168fb_set_par(struct fb_info *info)
u32 x;
struct pxa168fb_mach_info *mi;
- mi = fbi->dev->platform_data;
+ mi = dev_get_platdata(fbi->dev);
/*
* Set additional mode info.
@@ -609,7 +609,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
struct clk *clk;
int irq, ret;
- mi = pdev->dev.platform_data;
+ mi = dev_get_platdata(&pdev->dev);
if (mi == NULL) {
dev_err(&pdev->dev, "no platform data defined\n");
return -EINVAL;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index eca2de45f7a..1ecd9cec292 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -457,7 +457,7 @@ static int pxafb_adjust_timing(struct pxafb_info *fbi,
static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct pxafb_info *fbi = (struct pxafb_info *)info;
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
int err;
if (inf->fixed_modes) {
@@ -1230,7 +1230,7 @@ static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk)
static void setup_smart_timing(struct pxafb_info *fbi,
struct fb_var_screeninfo *var)
{
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
struct pxafb_mode_info *mode = &inf->modes[0];
unsigned long lclk = clk_get_rate(fbi->clk);
unsigned t1, t2, t3, t4;
@@ -1258,14 +1258,14 @@ static void setup_smart_timing(struct pxafb_info *fbi,
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
if (!inf->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
}
- inf = fbi->dev->platform_data;
+ inf = dev_get_platdata(fbi->dev);
pr_debug("%s(): task starting\n", __func__);
@@ -1793,7 +1793,7 @@ static struct pxafb_info *pxafb_init_fbinfo(struct device *dev)
{
struct pxafb_info *fbi;
void *addr;
- struct pxafb_mach_info *inf = dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(dev);
/* Alloc the pxafb_info and pseudo_palette in one step */
fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL);
@@ -1855,7 +1855,7 @@ static struct pxafb_info *pxafb_init_fbinfo(struct device *dev)
#ifdef CONFIG_FB_PXA_PARAMETERS
static int parse_opt_mode(struct device *dev, const char *this_opt)
{
- struct pxafb_mach_info *inf = dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(dev);
const char *name = this_opt+5;
unsigned int namelen = strlen(name);
@@ -1914,7 +1914,7 @@ done:
static int parse_opt(struct device *dev, char *this_opt)
{
- struct pxafb_mach_info *inf = dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(dev);
struct pxafb_mode_info *mode = &inf->modes[0];
char s[64];
@@ -2102,7 +2102,7 @@ static int pxafb_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "pxafb_probe\n");
- inf = dev->dev.platform_data;
+ inf = dev_get_platdata(&dev->dev);
ret = -ENOMEM;
fbi = NULL;
if (!inf)
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index d44c7351de0..7487f76f627 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -119,8 +119,7 @@ static int q40fb_probe(struct platform_device *dev)
return -EINVAL;
}
- printk(KERN_INFO "fb%d: Q40 frame buffer alive and kicking !\n",
- info->node);
+ fb_info(info, "Q40 frame buffer alive and kicking !\n");
return 0;
}
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 9536715b5a1..a5514acd2ac 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1185,11 +1185,6 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (rivafb_do_maximize(info, var, nom, den) < 0)
return -EINVAL;
- if (var->xoffset < 0)
- var->xoffset = 0;
- if (var->yoffset < 0)
- var->yoffset = 0;
-
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 05c2dc3d4bc..83433cb0dfb 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -777,8 +777,8 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
printk(KERN_INFO "Epson S1D13XXX FB Driver\n");
/* enable platform-dependent hardware glue, if any */
- if (pdev->dev.platform_data)
- pdata = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev))
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->platform_init_video)
pdata->platform_init_video();
@@ -901,8 +901,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
goto bail;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
@@ -923,8 +922,8 @@ static int s1d13xxxfb_suspend(struct platform_device *dev, pm_message_t state)
lcd_enable(s1dfb, 0);
crt_enable(s1dfb, 0);
- if (dev->dev.platform_data)
- pdata = dev->dev.platform_data;
+ if (dev_get_platdata(&dev->dev))
+ pdata = dev_get_platdata(&dev->dev);
#if 0
if (!s1dfb->disp_save)
@@ -973,8 +972,8 @@ static int s1d13xxxfb_resume(struct platform_device *dev)
while ((s1d13xxxfb_readreg(s1dfb, S1DREG_PS_STATUS) & 0x01))
udelay(10);
- if (dev->dev.platform_data)
- pdata = dev->dev.platform_data;
+ if (dev_get_platdata(&dev->dev))
+ pdata = dev_get_platdata(&dev->dev);
if (s1dfb->regs_save) {
/* will write RO regs, *should* get away with it :) */
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 2e7991c7ca0..62acae2694a 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -1378,7 +1378,7 @@ static int s3c_fb_probe(struct platform_device *pdev)
return -EINVAL;
}
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_err(dev, "no platform data specified\n");
return -EINVAL;
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 21a32adbb8e..81af5a63e9e 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -123,7 +123,7 @@ static int s3c2410fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
struct s3c2410fb_display *display = NULL;
struct s3c2410fb_display *default_display = mach_info->displays +
mach_info->default_display;
@@ -686,7 +686,7 @@ static inline void modify_gpio(void __iomem *reg,
static int s3c2410fb_init_registers(struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
unsigned long flags;
void __iomem *regs = fbi->io;
void __iomem *tpal;
@@ -833,7 +833,7 @@ static int s3c24xxfb_probe(struct platform_device *pdev,
int size;
u32 lcdcon1;
- mach_info = pdev->dev.platform_data;
+ mach_info = dev_get_platdata(&pdev->dev);
if (mach_info == NULL) {
dev_err(&pdev->dev,
"no platform data for lcd, cannot attach\n");
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d838ba82945..968b2997175 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -306,8 +306,8 @@ static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map)
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
- printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n",
- info->node, map->width, map->height, map->depth, map->length);
+ fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
+ map->width, map->height, map->depth, map->length);
return;
}
@@ -476,7 +476,7 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
rv = svga_compute_pll((par->chip == CHIP_365_TRIO3D) ? &s3_trio3d_pll : &s3_pll,
1000000000 / pixclock, &m, &n, &r, info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ fb_err(info, "cannot set requested pixclock, keeping old value\n");
return;
}
@@ -569,7 +569,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
rv = -EINVAL;
if (rv < 0) {
- printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ fb_err(info, "unsupported mode requested\n");
return rv;
}
@@ -587,22 +587,21 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
/* Check whether have enough memory */
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size) {
- printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n",
- info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ fb_err(info, "not enough framebuffer memory (%d kB requested , %u kB available)\n",
+ mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
rv = svga_check_timings (&s3_timing_regs, var, info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ fb_err(info, "invalid timings requested\n");
return rv;
}
rv = svga_compute_pll(&s3_pll, PICOS2KHZ(var->pixclock), &m, &n, &r,
info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: invalid pixclock value requested\n",
- info->node);
+ fb_err(info, "invalid pixclock value requested\n");
return rv;
}
@@ -686,7 +685,7 @@ static int s3fb_set_par(struct fb_info *info)
/* Set the offset register */
- pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
+ fb_dbg(info, "offset register : %d\n", offset_value);
svga_wcrt_multi(par->state.vgabase, s3_offset_regs, offset_value);
if (par->chip != CHIP_357_VIRGE_GX2 &&
@@ -769,7 +768,7 @@ static int s3fb_set_par(struct fb_info *info)
/* Set mode-specific register values */
switch (mode) {
case 0:
- pr_debug("fb%d: text mode\n", info->node);
+ fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
/* Set additional registers like in 8-bit mode */
@@ -780,12 +779,12 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
if (fasttext) {
- pr_debug("fb%d: high speed text mode set\n", info->node);
+ fb_dbg(info, "high speed text mode set\n");
svga_wcrt_mask(par->state.vgabase, 0x31, 0x40, 0x40);
}
break;
case 1:
- pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor\n");
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
/* Set additional registers like in 8-bit mode */
@@ -796,7 +795,7 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 2:
- pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
/* Set additional registers like in 8-bit mode */
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
@@ -806,7 +805,7 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 3:
- pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ fb_dbg(info, "8 bit pseudocolor\n");
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
if (info->var.pixclock > 20000 ||
par->chip == CHIP_357_VIRGE_GX2 ||
@@ -822,7 +821,7 @@ static int s3fb_set_par(struct fb_info *info)
}
break;
case 4:
- pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+ fb_dbg(info, "5/5/5 truecolor\n");
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
svga_wcrt_mask(par->state.vgabase, 0x67, 0x20, 0xF0);
@@ -850,7 +849,7 @@ static int s3fb_set_par(struct fb_info *info)
}
break;
case 5:
- pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ fb_dbg(info, "5/6/5 truecolor\n");
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
svga_wcrt_mask(par->state.vgabase, 0x67, 0x40, 0xF0);
@@ -879,16 +878,16 @@ static int s3fb_set_par(struct fb_info *info)
break;
case 6:
/* VIRGE VX case */
- pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8 truecolor\n");
svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
case 7:
- pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8/8 truecolor\n");
svga_wcrt_mask(par->state.vgabase, 0x50, 0x30, 0x30);
svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
default:
- printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+ fb_err(info, "unsupported mode - bug\n");
return -EINVAL;
}
@@ -991,27 +990,27 @@ static int s3fb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- pr_debug("fb%d: unblank\n", info->node);
+ fb_dbg(info, "unblank\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
- pr_debug("fb%d: blank\n", info->node);
+ fb_dbg(info, "blank\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
- pr_debug("fb%d: hsync\n", info->node);
+ fb_dbg(info, "hsync\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x02, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
- pr_debug("fb%d: vsync\n", info->node);
+ fb_dbg(info, "vsync\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x04, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
- pr_debug("fb%d: sync down\n", info->node);
+ fb_dbg(info, "sync down\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x06, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
@@ -1352,13 +1351,16 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto err_reg_fb;
}
- printk(KERN_INFO "fb%d: %s on %s, %d MB RAM, %d MHz MCLK\n", info->node, info->fix.id,
- pci_name(dev), info->fix.smem_len >> 20, (par->mclk_freq + 500) / 1000);
+ fb_info(info, "%s on %s, %d MB RAM, %d MHz MCLK\n",
+ info->fix.id, pci_name(dev),
+ info->fix.smem_len >> 20, (par->mclk_freq + 500) / 1000);
if (par->chip == CHIP_UNKNOWN)
- printk(KERN_INFO "fb%d: unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
- info->node, vga_rcrt(par->state.vgabase, 0x2d), vga_rcrt(par->state.vgabase, 0x2e),
- vga_rcrt(par->state.vgabase, 0x2f), vga_rcrt(par->state.vgabase, 0x30));
+ fb_info(info, "unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
+ vga_rcrt(par->state.vgabase, 0x2d),
+ vga_rcrt(par->state.vgabase, 0x2e),
+ vga_rcrt(par->state.vgabase, 0x2f),
+ vga_rcrt(par->state.vgabase, 0x30));
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -1424,7 +1426,6 @@ static void s3_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index de76da0c642..580c444ec30 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -1116,7 +1116,7 @@ static struct fb_monspecs monspecs = {
static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
{
- struct sa1100fb_mach_info *inf = dev->platform_data;
+ struct sa1100fb_mach_info *inf = dev_get_platdata(dev);
struct sa1100fb_info *fbi;
unsigned i;
@@ -1201,7 +1201,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform LCD data\n");
return -EINVAL;
}
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 741b2395d01..4dbf45f3b21 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2362,12 +2362,6 @@ static void savagefb_remove(struct pci_dev *dev)
kfree(info->pixmap.addr);
pci_release_regions(dev);
framebuffer_release(info);
-
- /*
- * Ensure that the driver data is no longer
- * valid.
- */
- pci_set_drvdata(dev, NULL);
}
}
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 296afae442f..a350209ffbd 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -186,7 +186,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
}
default:
return -EINVAL;
- };
+ }
}
EXPORT_SYMBOL(sbusfb_ioctl_helper);
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index a9ac3ce2d0e..bc74d040899 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -803,8 +803,8 @@ static int sgivwfb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
- printk(KERN_INFO "fb%d: SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
- info->node, sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
+ fb_info(info, "SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
+ sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
return 0;
fail_register_framebuffer:
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index bfe4728480f..9a33ee0413f 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -498,7 +498,7 @@ static void sh_hdmi_video_config(struct sh_hdmi *hdmi)
static void sh_hdmi_audio_config(struct sh_hdmi *hdmi)
{
u8 data;
- struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+ struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev);
/*
* [7:4] L/R data swap control
@@ -815,7 +815,7 @@ static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
unsigned long *hdmi_rate, unsigned long *parent_rate)
{
unsigned long target = PICOS2KHZ(mode->pixclock) * 1000, rate_error;
- struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+ struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev);
*hdmi_rate = clk_round_rate(hdmi->hdmi_clk, target);
if ((long)*hdmi_rate < 0)
@@ -1271,7 +1271,7 @@ static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
static int __init sh_hdmi_probe(struct platform_device *pdev)
{
- struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
+ struct sh_mobile_hdmi_info *pdata = dev_get_platdata(&pdev->dev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *htop1_res;
int irq = platform_get_irq(pdev, 0), ret;
@@ -1290,7 +1290,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
}
}
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
dev_err(&pdev->dev, "Cannot allocate device data\n");
return -ENOMEM;
@@ -1304,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
if (IS_ERR(hdmi->hdmi_clk)) {
ret = PTR_ERR(hdmi->hdmi_clk);
dev_err(&pdev->dev, "Unable to get clock: %d\n", ret);
- goto egetclk;
+ return ret;
}
/* select register access functions */
@@ -1326,7 +1326,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
goto erate;
}
- ret = clk_enable(hdmi->hdmi_clk);
+ ret = clk_prepare_enable(hdmi->hdmi_clk);
if (ret < 0) {
dev_err(hdmi->dev, "Cannot enable clock: %d\n", ret);
goto erate;
@@ -1404,11 +1404,9 @@ emap_htop1:
emap:
release_mem_region(res->start, resource_size(res));
ereqreg:
- clk_disable(hdmi->hdmi_clk);
+ clk_disable_unprepare(hdmi->hdmi_clk);
erate:
clk_put(hdmi->hdmi_clk);
-egetclk:
- kfree(hdmi);
return ret;
}
@@ -1427,13 +1425,12 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&hdmi->edid_work);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- clk_disable(hdmi->hdmi_clk);
+ clk_disable_unprepare(hdmi->hdmi_clk);
clk_put(hdmi->hdmi_clk);
if (hdmi->htop1)
iounmap(hdmi->htop1);
iounmap(hdmi->base);
release_mem_region(res->start, resource_size(res));
- kfree(hdmi);
return 0;
}
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 0264704a52b..ab85ad6c25e 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -344,7 +344,7 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
if (priv->dot_clk)
- clk_enable(priv->dot_clk);
+ clk_prepare_enable(priv->dot_clk);
pm_runtime_get_sync(priv->dev);
if (priv->meram_dev && priv->meram_dev->pdev)
pm_runtime_get_sync(&priv->meram_dev->pdev->dev);
@@ -358,7 +358,7 @@ static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv)
pm_runtime_put_sync(&priv->meram_dev->pdev->dev);
pm_runtime_put(priv->dev);
if (priv->dot_clk)
- clk_disable(priv->dot_clk);
+ clk_disable_unprepare(priv->dot_clk);
}
}
@@ -574,8 +574,9 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
switch (event) {
case SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT:
/* HDMI plug in */
+ console_lock();
if (lock_fb_info(info)) {
- console_lock();
+
ch->display.width = monspec->max_x * 10;
ch->display.height = monspec->max_y * 10;
@@ -594,19 +595,20 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
fb_set_suspend(info, 0);
}
- console_unlock();
+
unlock_fb_info(info);
}
+ console_unlock();
break;
case SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT:
/* HDMI disconnect */
+ console_lock();
if (lock_fb_info(info)) {
- console_lock();
fb_set_suspend(info, 1);
- console_unlock();
unlock_fb_info(info);
}
+ console_unlock();
break;
case SH_MOBILE_LCDC_EVENT_DISPLAY_MODE:
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
index 8d781061305..210f3a02121 100644
--- a/drivers/video/simplefb.c
+++ b/drivers/video/simplefb.c
@@ -66,8 +66,15 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
}
+static void simplefb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+}
+
static struct fb_ops simplefb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = simplefb_destroy,
.fb_setcolreg = simplefb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -132,7 +139,7 @@ static int simplefb_parse_dt(struct platform_device *pdev,
static int simplefb_parse_pd(struct platform_device *pdev,
struct simplefb_params *params)
{
- struct simplefb_platform_data *pd = pdev->dev.platform_data;
+ struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
int i;
params->width = pd->width;
@@ -167,7 +174,7 @@ static int simplefb_probe(struct platform_device *pdev)
return -ENODEV;
ret = -ENODEV;
- if (pdev->dev.platform_data)
+ if (dev_get_platdata(&pdev->dev))
ret = simplefb_parse_pd(pdev, &params);
else if (pdev->dev.of_node)
ret = simplefb_parse_dt(pdev, &params);
@@ -212,17 +219,26 @@ static int simplefb_probe(struct platform_device *pdev)
info->fbops = &simplefb_ops;
info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE;
- info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
- info->fix.smem_len);
+ info->screen_base = ioremap_wc(info->fix.smem_start,
+ info->fix.smem_len);
if (!info->screen_base) {
framebuffer_release(info);
return -ENODEV;
}
info->pseudo_palette = (void *)(info + 1);
+ dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n",
+ info->fix.smem_start, info->fix.smem_len,
+ info->screen_base);
+ dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n",
+ params.format->name,
+ info->var.xres, info->var.yres,
+ info->var.bits_per_pixel, info->fix.line_length);
+
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+ iounmap(info->screen_base);
framebuffer_release(info);
return ret;
}
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c
index f082ae55c0c..4f26bc28e60 100644
--- a/drivers/video/sis/init.c
+++ b/drivers/video/sis/init.c
@@ -3320,9 +3320,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
}
#ifndef GETBITSTR
-#define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l))
-#define GENMASK(mask) BITMASK(1?mask,0?mask)
-#define GETBITS(var,mask) (((var) & GENMASK(mask)) >> (0?mask))
+#define GENBITSMASK(mask) GENMASK(1?mask,0?mask)
+#define GETBITS(var,mask) (((var) & GENBITSMASK(mask)) >> (0?mask))
#define GETBITSTR(val,from,to) ((GETBITS(val,from)) << (0?to))
#endif
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 977e27927a2..22ad028bf12 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -5994,7 +5994,6 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if(!ivideo->sisvga_enabled) {
if(pci_enable_device(pdev)) {
if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(sis_fb_info);
return -EIO;
}
@@ -6211,7 +6210,6 @@ error_3: vfree(ivideo->bios_abase);
pci_dev_put(ivideo->lpcdev);
if(ivideo->nbridge)
pci_dev_put(ivideo->nbridge);
- pci_set_drvdata(pdev, NULL);
if(!ivideo->sisvga_enabled)
pci_disable_device(pdev);
framebuffer_release(sis_fb_info);
@@ -6480,8 +6478,8 @@ error_3: vfree(ivideo->bios_abase);
"disabled");
- printk(KERN_INFO "fb%d: %s frame buffer device version %d.%d.%d\n",
- sis_fb_info->node, ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
+ fb_info(sis_fb_info, "%s frame buffer device version %d.%d.%d\n",
+ ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
printk(KERN_INFO "sisfb: Copyright (C) 2001-2005 Thomas Winischhofer\n");
@@ -6523,8 +6521,6 @@ static void sisfb_remove(struct pci_dev *pdev)
mtrr_del(ivideo->mtrr, ivideo->video_base, ivideo->video_size);
#endif
- pci_set_drvdata(pdev, NULL);
-
/* If device was disabled when starting, disable
* it when quitting.
*/
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 2d4694c6b9e..fefde7c6add 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -824,8 +824,7 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info); /* or platform_set_drvdata(pdev, info) */
return 0;
}
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index e188ada2ffd..d513ed6a49f 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -1147,7 +1147,7 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
fb_destroy_modelist(&info->modelist);
- dev->info = 0;
+ dev->info = NULL;
/* Assume info structure is freed after this point */
framebuffer_release(info);
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c
index 44967c8fef2..f4daa59f0a8 100644
--- a/drivers/video/ssd1307fb.c
+++ b/drivers/video/ssd1307fb.c
@@ -569,7 +569,7 @@ static struct i2c_driver ssd1307fb_driver = {
.id_table = ssd1307fb_i2c_id,
.driver = {
.name = "ssd1307fb",
- .of_match_table = of_match_ptr(ssd1307fb_of_match),
+ .of_match_table = ssd1307fb_of_match,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 9c00026e3ae..f0cb279ef33 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -706,10 +706,10 @@ static void sstfb_setvgapass( struct fb_info *info, int enable )
fbiinit0 = sst_read (FBIINIT0);
if (par->vgapass) {
sst_write(FBIINIT0, fbiinit0 & ~DIS_VGA_PASSTHROUGH);
- printk(KERN_INFO "fb%d: Enabling VGA pass-through\n", info->node );
+ fb_info(info, "Enabling VGA pass-through\n");
} else {
sst_write(FBIINIT0, fbiinit0 | DIS_VGA_PASSTHROUGH);
- printk(KERN_INFO "fb%d: Disabling VGA pass-through\n", info->node );
+ fb_info(info, "Disabling VGA pass-through\n");
}
pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
}
@@ -1437,8 +1437,8 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_WARNING "sstfb: can't create sysfs entry.\n");
- printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n",
- info->node, fix->id, info->screen_base);
+ fb_info(info, "%s frame buffer device at 0x%p\n",
+ fix->id, info->screen_base);
return 0;
diff --git a/drivers/video/sticore.h b/drivers/video/sticore.h
index addf7b615ef..af1619536ac 100644
--- a/drivers/video/sticore.h
+++ b/drivers/video/sticore.h
@@ -18,6 +18,9 @@
#define STI_FONT_HPROMAN8 1
#define STI_FONT_KANA8 2
+#define ALT_CODE_TYPE_UNKNOWN 0x00 /* alt code type values */
+#define ALT_CODE_TYPE_PA_RISC_64 0x01
+
/* The latency of the STI functions cannot really be reduced by setting
* this to 0; STI doesn't seem to be designed to allow calling a different
* function (or the same function with different arguments) after a
@@ -40,14 +43,6 @@
#define STI_PTR(p) ( virt_to_phys(p) )
#define PTR_STI(p) ( phys_to_virt((unsigned long)p) )
-#define STI_CALL(func, flags, inptr, outptr, glob_cfg) \
- ({ \
- pdc_sti_call( func, STI_PTR(flags), \
- STI_PTR(inptr), \
- STI_PTR(outptr), \
- STI_PTR(glob_cfg)); \
- })
-
#define sti_onscreen_x(sti) (sti->glob_cfg->onscreen_x)
#define sti_onscreen_y(sti) (sti->glob_cfg->onscreen_y)
@@ -56,6 +51,12 @@
#define sti_font_x(sti) (PTR_STI(sti->font)->width)
#define sti_font_y(sti) (PTR_STI(sti->font)->height)
+#ifdef CONFIG_64BIT
+#define STI_LOWMEM (GFP_KERNEL | GFP_DMA)
+#else
+#define STI_LOWMEM (GFP_KERNEL)
+#endif
+
/* STI function configuration structs */
@@ -306,6 +307,34 @@ struct sti_blkmv_outptr {
};
+/* sti_all_data is an internal struct which needs to be allocated in
+ * low memory (< 4GB) if STI is used with 32bit STI on a 64bit kernel */
+
+struct sti_all_data {
+ struct sti_glob_cfg glob_cfg;
+ struct sti_glob_cfg_ext glob_cfg_ext;
+
+ struct sti_conf_inptr inq_inptr;
+ struct sti_conf_outptr inq_outptr; /* configuration */
+ struct sti_conf_outptr_ext inq_outptr_ext;
+
+ struct sti_init_inptr_ext init_inptr_ext;
+ struct sti_init_inptr init_inptr;
+ struct sti_init_outptr init_outptr;
+
+ struct sti_blkmv_inptr blkmv_inptr;
+ struct sti_blkmv_outptr blkmv_outptr;
+
+ struct sti_font_inptr font_inptr;
+ struct sti_font_outptr font_outptr;
+
+ /* leave as last entries */
+ unsigned long save_addr[1024 / sizeof(unsigned long)];
+ /* min 256 bytes which is STI default, max sti->sti_mem_request */
+ unsigned long sti_mem_addr[256 / sizeof(unsigned long)];
+ /* do not add something below here ! */
+};
+
/* internal generic STI struct */
struct sti_struct {
@@ -330,11 +359,9 @@ struct sti_struct {
region_t regions[STI_REGION_MAX];
unsigned long regions_phys[STI_REGION_MAX];
- struct sti_glob_cfg *glob_cfg;
- struct sti_cooked_font *font; /* ptr to selected font (cooked) */
+ struct sti_glob_cfg *glob_cfg; /* points into sti_all_data */
- struct sti_conf_outptr outptr; /* configuration */
- struct sti_conf_outptr_ext outptr_ext;
+ struct sti_cooked_font *font; /* ptr to selected font (cooked) */
struct pci_dev *pd;
@@ -343,6 +370,9 @@ struct sti_struct {
/* pointer to the fb_info where this STI device is used */
struct fb_info *info;
+
+ /* pointer to all internal data */
+ struct sti_all_data *sti_data;
};
@@ -350,6 +380,14 @@ struct sti_struct {
struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */
+
+/* sticore main function to call STI firmware */
+
+int sti_call(const struct sti_struct *sti, unsigned long func,
+ const void *flags, void *inptr, void *outptr,
+ struct sti_glob_cfg *glob_cfg);
+
+
/* functions to call the STI ROM directly */
void sti_putc(struct sti_struct *sti, int c, int y, int x);
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index 876648e15e9..cfe8a2f905c 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1101,6 +1101,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
var = &info->var;
fb->sti = sti;
+ dev_name = sti->sti_data->inq_outptr.dev_name;
/* store upper 32bits of the graphics id */
fb->id = fb->sti->graphics_id[0];
@@ -1114,11 +1115,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
Since this driver only supports standard mode, we check
if the device name contains the string "DX" and tell the
user how to reconfigure the card. */
- if (strstr(sti->outptr.dev_name, "DX")) {
+ if (strstr(dev_name, "DX")) {
printk(KERN_WARNING
"WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n"
"WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n",
- sti->outptr.dev_name);
+ dev_name);
goto out_err0;
}
/* fall though */
@@ -1130,7 +1131,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
break;
default:
printk(KERN_WARNING "stifb: '%s' (id: 0x%08x) not supported.\n",
- sti->outptr.dev_name, fb->id);
+ dev_name, fb->id);
goto out_err0;
}
@@ -1154,7 +1155,6 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fb->id = S9000_ID_A1659A;
break;
case S9000_ID_TIMBER: /* HP9000/710 Any (may be a grayscale device) */
- dev_name = fb->sti->outptr.dev_name;
if (strstr(dev_name, "GRAYSCALE") ||
strstr(dev_name, "Grayscale") ||
strstr(dev_name, "grayscale"))
@@ -1283,14 +1283,12 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
sti->info = info; /* save for unregister_framebuffer() */
- printk(KERN_INFO
- "fb%d: %s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
- fb->info.node,
+ fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
var->yres,
var->bits_per_pixel,
- sti->outptr.dev_name,
+ dev_name,
fb->id,
fix->mmio_start);
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index cc6f48bba36..58241b47a96 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -186,8 +186,6 @@ static int gfb_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 33df9ec9179..9e01322fabe 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -198,8 +198,8 @@ void svga_settile(struct fb_info *info, struct fb_tilemap *map)
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
- printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n",
- info->node, map->width, map->height, map->depth, map->length);
+ fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
+ map->width, map->height, map->depth, map->length);
return;
}
diff --git a/drivers/video/sysimgblt.c b/drivers/video/sysimgblt.c
index 186c6f607be..a4d05b1b17d 100644
--- a/drivers/video/sysimgblt.c
+++ b/drivers/video/sysimgblt.c
@@ -152,7 +152,7 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
}
shift += bpp;
shift &= (32 - 1);
- if (!l) { l = 8; s++; };
+ if (!l) { l = 8; s++; }
}
/* write trailing bits */
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index c000852500a..7fb2d696fac 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -232,7 +232,7 @@ tcx_blank(int blank, struct fb_info *info)
case FB_BLANK_POWERDOWN: /* Poweroff */
break;
- };
+ }
sbus_writel(val, &thc->thc_misc);
@@ -434,7 +434,7 @@ static int tcx_probe(struct platform_device *op)
default:
j = i;
break;
- };
+ }
par->mmap_map[i].poff = op->resource[j].start;
}
@@ -498,8 +498,6 @@ static int tcx_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 64bc28ba403..f761fe375f5 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1646,7 +1646,6 @@ static void tdfxfb_remove(struct pci_dev *pdev)
pci_resource_len(pdev, 1));
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
- pci_set_drvdata(pdev, NULL);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index c9c8e5a1fde..f28674fea90 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1671,8 +1671,8 @@ static int tgafb_register(struct device *dev)
if (tga_bus_tc)
pr_info("tgafb: SFB+ detected, rev=0x%02x\n",
par->tga_chip_rev);
- pr_info("fb%d: %s frame buffer device at 0x%lx\n",
- info->node, info->fix.id, (long)bar0_start);
+ fb_info(info, "%s frame buffer device at 0x%lx\n",
+ info->fix.id, (long)bar0_start);
return 0;
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index deb8733f3c7..7fb4e321a43 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,7 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
*/
static int tmiofb_hw_stop(struct platform_device *dev)
{
- struct tmio_fb_data *data = dev->dev.platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
@@ -311,7 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
*/
static void tmiofb_hw_mode(struct platform_device *dev)
{
- struct tmio_fb_data *data = dev->dev.platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
struct fb_info *info = platform_get_drvdata(dev);
struct fb_videomode *mode = info->mode;
struct tmiofb_par *par = info->par;
@@ -557,7 +557,7 @@ static int tmiofb_ioctl(struct fb_info *fbi,
static struct fb_videomode *
tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct tmio_fb_data *data = info->device->platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(info->device);
struct fb_videomode *best = NULL;
int i;
@@ -577,7 +577,7 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
- struct tmio_fb_data *data = info->device->platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(info->device);
mode = tmiofb_find_mode(info, var);
if (!mode || var->bits_per_pixel > 16)
@@ -678,7 +678,7 @@ static struct fb_ops tmiofb_ops = {
static int tmiofb_probe(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
- struct tmio_fb_data *data = dev->dev.platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -781,8 +781,7 @@ static int tmiofb_probe(struct platform_device *dev)
if (retval < 0)
goto err_register_framebuffer;
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index ab57d387d6b..7ed9a227f5e 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -1553,7 +1553,6 @@ static void trident_pci_remove(struct pci_dev *dev)
iounmap(info->screen_base);
release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
- pci_set_drvdata(dev, NULL);
kfree(info->pixmap.addr);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index d2e5bc3cf96..025f14e30ee 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1166,7 +1166,7 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
int new_len;
unsigned char *old_fb = info->screen_base;
unsigned char *new_fb;
- unsigned char *new_back = 0;
+ unsigned char *new_back = NULL;
pr_warn("Reallocating framebuffer. Addresses will change!\n");
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7aec6f39fdd..256fba7f464 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -233,8 +233,7 @@ out:
static void uvesafb_free(struct uvesafb_ktask *task)
{
if (task) {
- if (task->done)
- kfree(task->done);
+ kfree(task->done);
kfree(task);
}
}
@@ -1332,8 +1331,8 @@ setmode:
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->fix.line_length = mode->bytes_per_scan_line;
-out: if (crtc != NULL)
- kfree(crtc);
+out:
+ kfree(crtc);
uvesafb_free(task);
return err;
@@ -1771,13 +1770,11 @@ static int uvesafb_probe(struct platform_device *dev)
"using %dk, total %dk\n", info->fix.smem_start,
info->screen_base, info->fix.smem_len/1024,
par->vbe_ib.total_memory * 64);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
if (err != 0)
- printk(KERN_WARNING "fb%d: failed to register attributes\n",
- info->node);
+ fb_warn(info, "failed to register attributes\n");
return 0;
@@ -1793,8 +1790,7 @@ out_mode:
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
out:
- if (par->vbe_modes)
- kfree(par->vbe_modes);
+ kfree(par->vbe_modes);
framebuffer_release(info);
return err;
@@ -1817,12 +1813,9 @@ static int uvesafb_remove(struct platform_device *dev)
fb_dealloc_cmap(&info->cmap);
if (par) {
- if (par->vbe_modes)
- kfree(par->vbe_modes);
- if (par->vbe_state_orig)
- kfree(par->vbe_state_orig);
- if (par->vbe_state_saved)
- kfree(par->vbe_state_saved);
+ kfree(par->vbe_modes);
+ kfree(par->vbe_state_orig);
+ kfree(par->vbe_state_saved);
}
framebuffer_release(info);
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index 3f5a041601d..e287ebc4781 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -392,7 +392,7 @@ int __init valkyriefb_init(void)
if ((err = register_framebuffer(&p->info)) != 0)
goto out_cmap_free;
- printk(KERN_INFO "fb%d: valkyrie frame buffer device\n", p->info.node);
+ fb_info(&p->info, "valkyrie frame buffer device\n");
return 0;
out_cmap_free:
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index bd83233ec22..1c7da3b098d 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -489,8 +489,7 @@ static int vesafb_probe(struct platform_device *dev)
fb_dealloc_cmap(&info->cmap);
goto err;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
if (info->screen_base)
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index ee5985efa15..70a897b1e45 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -390,9 +390,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0
- || var->yoffset >= info->var.yres_virtual
- || var->xoffset)
+ if (var->yoffset >= info->var.yres_virtual ||
+ var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + info->var.xres > info->var.xres_virtual ||
@@ -527,9 +526,8 @@ static int vfb_probe(struct platform_device *dev)
goto err2;
platform_set_drvdata(dev, info);
- printk(KERN_INFO
- "fb%d: Virtual frame buffer device, using %ldK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
+ videomemorysize >> 10);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 2827333703d..283d335a759 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1377,8 +1377,7 @@ static int vga16fb_probe(struct platform_device *dev)
goto err_check_var;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
platform_set_drvdata(dev, info);
return 0;
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 897484903c3..b30e5a439d1 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -365,7 +365,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
if (!fb_mem_virt) {
pr_err("%s: Failed to allocate framebuffer\n", __func__);
return -ENOMEM;
- };
+ }
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index e9557fa014e..8bc6e0958a0 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -266,7 +266,7 @@ static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ fb_err(info, "cannot set requested pixclock, keeping old value\n");
return;
}
@@ -335,7 +335,7 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
rv = svga_match_format (vt8623fb_formats, var, NULL);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ fb_err(info, "unsupported mode requested\n");
return rv;
}
@@ -354,21 +354,23 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
- printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ fb_err(info, "not enough framebuffer memory (%d kB requested, %d kB available)\n",
+ mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
/* Text mode is limited to 256 kB of memory */
if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
{
- printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
+ fb_err(info, "text framebuffer size too large (%d kB requested, 256 kB possible)\n",
+ mem >> 10);
return -EINVAL;
}
rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ fb_err(info, "invalid timings requested\n");
return rv;
}
@@ -474,32 +476,32 @@ static int vt8623fb_set_par(struct fb_info *info)
mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
switch (mode) {
case 0:
- pr_debug("fb%d: text mode\n", info->node);
+ fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x60, 0x70);
break;
case 1:
- pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor\n");
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
svga_wseq_mask(par->state.vgabase, 0x15, 0x20, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 2:
- pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 3:
- pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ fb_dbg(info, "8 bit pseudocolor\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0x22, 0xFE);
break;
case 4:
- pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ fb_dbg(info, "5/6/5 truecolor\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0xB6, 0xFE);
break;
case 5:
- pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8 truecolor\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0xAE, 0xFE);
break;
default:
@@ -584,27 +586,27 @@ static int vt8623fb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- pr_debug("fb%d: unblank\n", info->node);
+ fb_dbg(info, "unblank\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
- pr_debug("fb%d: blank\n", info->node);
+ fb_dbg(info, "blank\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
- pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
+ fb_dbg(info, "DPMS standby (hsync off)\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x10, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
- pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
+ fb_dbg(info, "DPMS suspend (vsync off)\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x20, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
- pr_debug("fb%d: DPMS off (no sync)\n", info->node);
+ fb_dbg(info, "DPMS off (no sync)\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
@@ -769,12 +771,12 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(info->device, "cannot register framebugger\n");
+ dev_err(info->device, "cannot register framebuffer\n");
goto err_reg_fb;
}
- printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
- pci_name(dev), info->fix.smem_len >> 20);
+ fb_info(info, "%s on %s, %d MB RAM\n",
+ info->fix.id, pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -829,7 +831,6 @@ static void vt8623_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 7a299e951f7..10951c82f6e 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -680,7 +680,7 @@ int w100fb_probe(struct platform_device *pdev)
par = info->par;
platform_set_drvdata(pdev, info);
- inf = pdev->dev.platform_data;
+ inf = dev_get_platdata(&pdev->dev);
par->chip_id = chip_id;
par->mach = inf;
par->fastpll_mode = 0;
@@ -761,10 +761,9 @@ int w100fb_probe(struct platform_device *pdev)
err |= device_create_file(&pdev->dev, &dev_attr_flip);
if (err != 0)
- printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
- info->node, err);
+ fb_warn(info, "failed to register attributes (%d)\n", err);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
out:
if (info) {
diff --git a/drivers/video/wm8505fb.c b/drivers/video/wm8505fb.c
index 3072f30cad1..537d199612a 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/wm8505fb.c
@@ -372,14 +372,12 @@ static int wm8505fb_probe(struct platform_device *pdev)
}
ret = device_create_file(&pdev->dev, &dev_attr_contrast);
- if (ret < 0) {
- printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
- fbi->fb.node, ret);
- }
+ if (ret < 0)
+ fb_warn(&fbi->fb, "failed to register attributes (%d)\n", ret);
- printk(KERN_INFO "fb%d: %s frame buffer at 0x%lx-0x%lx\n",
- fbi->fb.node, fbi->fb.fix.id, fbi->fb.fix.smem_start,
- fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
+ fb_info(&fbi->fb, "%s frame buffer at 0x%lx-0x%lx\n",
+ fbi->fb.fix.id, fbi->fb.fix.smem_start,
+ fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
return 0;
}
@@ -411,7 +409,7 @@ static struct platform_driver wm8505fb_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(wmt_dt_ids),
+ .of_match_table = wmt_dt_ids,
},
};
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/wmt_ge_rops.c
index 4aaeb18223b..b0a9f34b2e0 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/wmt_ge_rops.c
@@ -169,13 +169,13 @@ static struct platform_driver wmt_ge_rops_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "wmt_ge_rops",
- .of_match_table = of_match_ptr(wmt_dt_ids),
+ .of_match_table = wmt_dt_ids,
},
};
module_platform_driver(wmt_ge_rops_driver);
-MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com");
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("Accelerators for raster operations using "
"WonderMedia Graphics Engine");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 84c664ea8eb..6ff1a91e9df 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -260,10 +260,9 @@ static int xilinxfb_assign(struct platform_device *pdev,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drvdata->regs)) {
- rc = PTR_ERR(drvdata->regs);
- goto err_region;
- }
+ if (IS_ERR(drvdata->regs))
+ return PTR_ERR(drvdata->regs);
+
drvdata->regs_phys = res->start;
}
@@ -279,11 +278,7 @@ static int xilinxfb_assign(struct platform_device *pdev,
if (!drvdata->fb_virt) {
dev_err(dev, "Could not allocate frame buffer memory\n");
- rc = -ENOMEM;
- if (drvdata->flags & BUS_ACCESS_FLAG)
- goto err_fbmem;
- else
- goto err_region;
+ return -ENOMEM;
}
/* Clear (turn to black) the framebuffer */
@@ -363,14 +358,6 @@ err_cmap:
/* Turn off the display */
xilinx_fb_out32(drvdata, REG_CTRL, 0);
-err_fbmem:
- if (drvdata->flags & BUS_ACCESS_FLAG)
- devm_iounmap(dev, drvdata->regs);
-
-err_region:
- kfree(drvdata);
- dev_set_drvdata(dev, NULL);
-
return rc;
}
@@ -395,17 +382,12 @@ static int xilinxfb_release(struct device *dev)
/* Turn off the display */
xilinx_fb_out32(drvdata, REG_CTRL, 0);
- /* Release the resources, as allocated based on interface */
- if (drvdata->flags & BUS_ACCESS_FLAG)
- devm_iounmap(dev, drvdata->regs);
#ifdef CONFIG_PPC_DCR
- else
+ /* Release the resources, as allocated based on interface */
+ if (!(drvdata->flags & BUS_ACCESS_FLAG))
dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
#endif
- kfree(drvdata);
- dev_set_drvdata(dev, NULL);
-
return 0;
}
@@ -413,7 +395,7 @@ static int xilinxfb_release(struct device *dev)
* OF bus binding
*/
-static int xilinxfb_of_probe(struct platform_device *op)
+static int xilinxfb_of_probe(struct platform_device *pdev)
{
const u32 *prop;
u32 tft_access = 0;
@@ -425,17 +407,15 @@ static int xilinxfb_of_probe(struct platform_device *op)
pdata = xilinx_fb_default_pdata;
/* Allocate the driver data region */
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata) {
- dev_err(&op->dev, "Couldn't allocate device private record\n");
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
return -ENOMEM;
- }
/*
* To check whether the core is connected directly to DCR or BUS
* interface and initialize the tft_access accordingly.
*/
- of_property_read_u32(op->dev.of_node, "xlnx,dcr-splb-slave-if",
+ of_property_read_u32(pdev->dev.of_node, "xlnx,dcr-splb-slave-if",
&tft_access);
/*
@@ -448,40 +428,39 @@ static int xilinxfb_of_probe(struct platform_device *op)
#ifdef CONFIG_PPC_DCR
else {
int start;
- start = dcr_resource_start(op->dev.of_node, 0);
- drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
- drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
+ start = dcr_resource_start(pdev->dev.of_node, 0);
+ drvdata->dcr_len = dcr_resource_len(pdev->dev.of_node, 0);
+ drvdata->dcr_host = dcr_map(pdev->dev.of_node, start, drvdata->dcr_len);
if (!DCR_MAP_OK(drvdata->dcr_host)) {
- dev_err(&op->dev, "invalid DCR address\n");
- kfree(drvdata);
+ dev_err(&pdev->dev, "invalid DCR address\n");
return -ENODEV;
}
}
#endif
- prop = of_get_property(op->dev.of_node, "phys-size", &size);
+ prop = of_get_property(pdev->dev.of_node, "phys-size", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.screen_width_mm = prop[0];
pdata.screen_height_mm = prop[1];
}
- prop = of_get_property(op->dev.of_node, "resolution", &size);
+ prop = of_get_property(pdev->dev.of_node, "resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xres = prop[0];
pdata.yres = prop[1];
}
- prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
+ prop = of_get_property(pdev->dev.of_node, "virtual-resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xvirt = prop[0];
pdata.yvirt = prop[1];
}
- if (of_find_property(op->dev.of_node, "rotate-display", NULL))
+ if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
pdata.rotate_screen = 1;
- dev_set_drvdata(&op->dev, drvdata);
- return xilinxfb_assign(op, drvdata, &pdata);
+ dev_set_drvdata(&pdev->dev, drvdata);
+ return xilinxfb_assign(pdev, drvdata, &pdata);
}
static int xilinxfb_of_remove(struct platform_device *op)
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d294f67d6f8..32c8fc5f7a5 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/notifier.h>
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ee59b74768d..fed0ce198ae 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -13,18 +13,24 @@ static ssize_t device_show(struct device *_d,
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device);
}
+static DEVICE_ATTR_RO(device);
+
static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor);
}
+static DEVICE_ATTR_RO(vendor);
+
static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
}
+static DEVICE_ATTR_RO(status);
+
static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -32,6 +38,8 @@ static ssize_t modalias_show(struct device *_d,
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
+static DEVICE_ATTR_RO(modalias);
+
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -47,14 +55,17 @@ static ssize_t features_show(struct device *_d,
len += sprintf(buf+len, "\n");
return len;
}
-static struct device_attribute virtio_dev_attrs[] = {
- __ATTR_RO(device),
- __ATTR_RO(vendor),
- __ATTR_RO(status),
- __ATTR_RO(modalias),
- __ATTR_RO(features),
- __ATTR_NULL
+static DEVICE_ATTR_RO(features);
+
+static struct attribute *virtio_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_status.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_features.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(virtio_dev);
static inline int virtio_id_match(const struct virtio_device *dev,
const struct virtio_device_id *id)
@@ -165,7 +176,7 @@ static int virtio_dev_remove(struct device *_d)
static struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
- .dev_attrs = virtio_dev_attrs,
+ .dev_groups = virtio_dev_groups,
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f572c00a1b..c444654fc33 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -275,9 +275,8 @@ static inline s64 towards_target(struct virtio_balloon *vb)
__le32 v;
s64 target;
- vb->vdev->config->get(vb->vdev,
- offsetof(struct virtio_balloon_config, num_pages),
- &v, sizeof(v));
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+
target = le32_to_cpu(v);
return target - vb->num_pages;
}
@@ -286,9 +285,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- vb->vdev->config->set(vb->vdev,
- offsetof(struct virtio_balloon_config, actual),
- &actual, sizeof(actual));
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+ &actual);
}
static int balloon(void *_vballoon)
@@ -513,7 +511,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtballoon_freeze(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -556,7 +554,7 @@ static struct virtio_driver virtio_balloon_driver = {
.probe = virtballoon_probe,
.remove = virtballoon_remove,
.config_changed = virtballoon_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtballoon_freeze,
.restore = virtballoon_restore,
#endif
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 1ba0d683101..c600ccfd692 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -219,13 +219,14 @@ static void vm_reset(struct virtio_device *vdev)
/* Transport interface */
/* the notify function used when creating a virt queue */
-static void vm_notify(struct virtqueue *vq)
+static bool vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
/* We write the queue's selector into the notification register to
* signal the other end */
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ return true;
}
/* Notify all virtqueues on an interrupt. */
@@ -470,7 +471,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
- if (memcmp(&magic, "virt", 4) != 0) {
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
return -ENODEV;
}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 98917fc872a..a37c69941d3 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -197,13 +197,14 @@ static void vp_reset(struct virtio_device *vdev)
}
/* the notify function used when creating a virt queue */
-static void vp_notify(struct virtqueue *vq)
+static bool vp_notify(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ return true;
}
/* Handle a configuration change: Tell driver if it wants to know. */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6b4a4db4404..28b5338fff7 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -81,7 +81,7 @@ struct vring_virtqueue
u16 last_used_idx;
/* How to notify other side. FIXME: commonalize hcalls! */
- void (*notify)(struct virtqueue *vq);
+ bool (*notify)(struct virtqueue *vq);
#ifdef DEBUG
/* They're supposed to lock for us. */
@@ -173,6 +173,8 @@ static inline int vring_add_indirect(struct vring_virtqueue *vq,
head = vq->free_head;
vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
vq->vring.desc[head].addr = virt_to_phys(desc);
+ /* kmemleak gives a false positive, as it's hidden by virt_to_phys */
+ kmemleak_ignore(desc);
vq->vring.desc[head].len = i * sizeof(struct vring_desc);
/* Update free pointer */
@@ -428,13 +430,22 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
* @vq: the struct virtqueue
*
* This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
*/
-void virtqueue_notify(struct virtqueue *_vq)
+bool virtqueue_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
/* Prod other side to tell it about changes. */
- vq->notify(_vq);
+ if (!vq->notify(_vq)) {
+ vq->broken = true;
+ return false;
+ }
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_notify);
@@ -447,11 +458,14 @@ EXPORT_SYMBOL_GPL(virtqueue_notify);
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
*/
-void virtqueue_kick(struct virtqueue *vq)
+bool virtqueue_kick(struct virtqueue *vq)
{
if (virtqueue_kick_prepare(vq))
- virtqueue_notify(vq);
+ return virtqueue_notify(vq);
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
@@ -742,7 +756,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
- void (*notify)(struct virtqueue *),
+ bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
@@ -837,4 +851,12 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+bool virtqueue_is_broken(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->broken;
+}
+EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 96cab6ac2b4..02df3b1381d 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -255,17 +255,17 @@ static int ds1wm_find_divisor(int gclk)
static void ds1wm_up(struct ds1wm_data *ds1wm_data)
{
int divisor;
- struct ds1wm_driver_data *plat = ds1wm_data->pdev->dev.platform_data;
+ struct device *dev = &ds1wm_data->pdev->dev;
+ struct ds1wm_driver_data *plat = dev_get_platdata(dev);
if (ds1wm_data->cell->enable)
ds1wm_data->cell->enable(ds1wm_data->pdev);
divisor = ds1wm_find_divisor(plat->clock_rate);
- dev_dbg(&ds1wm_data->pdev->dev,
- "found divisor 0x%x for clock %d\n", divisor, plat->clock_rate);
+ dev_dbg(dev, "found divisor 0x%x for clock %d\n",
+ divisor, plat->clock_rate);
if (divisor == 0) {
- dev_err(&ds1wm_data->pdev->dev,
- "no suitable divisor for %dHz clock\n",
+ dev_err(dev, "no suitable divisor for %dHz clock\n",
plat->clock_rate);
return;
}
@@ -481,7 +481,7 @@ static int ds1wm_probe(struct platform_device *pdev)
ds1wm_data->cell = mfd_get_cell(pdev);
if (!ds1wm_data->cell)
return -ENODEV;
- plat = pdev->dev.platform_data;
+ plat = dev_get_platdata(&pdev->dev);
if (!plat)
return -ENODEV;
@@ -498,7 +498,7 @@ static int ds1wm_probe(struct platform_device *pdev)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
- IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
+ IRQF_SHARED, "ds1wm", ds1wm_data);
if (ret)
return ret;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 6e94d8dd3d0..9900e8ec739 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -577,8 +577,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
goto err_irq;
}
- ret = devm_request_irq(dev, irq, hdq_isr, IRQF_DISABLED,
- "omap_hdq", hdq_data);
+ ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
if (ret < 0) {
dev_dbg(&pdev->dev, "could not request irq\n");
goto err_irq;
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index f54ece268c9..e36b18b2817 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -56,8 +56,9 @@ MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
static int w1_gpio_probe_dt(struct platform_device *pdev)
{
- struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
+ int gpio;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -66,7 +67,11 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
if (of_get_property(np, "linux,open-drain", NULL))
pdata->is_open_drain = 1;
- pdata->pin = of_get_gpio(np, 0);
+ gpio = of_get_gpio(np, 0);
+ if (gpio < 0)
+ return gpio;
+ pdata->pin = gpio;
+
pdata->ext_pullup_enable_pin = of_get_gpio(np, 1);
pdev->dev.platform_data = pdata;
@@ -87,32 +92,34 @@ static int w1_gpio_probe(struct platform_device *pdev)
}
}
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No configuration data\n");
return -ENXIO;
}
- master = kzalloc(sizeof(struct w1_bus_master), GFP_KERNEL);
+ master = devm_kzalloc(&pdev->dev, sizeof(struct w1_bus_master),
+ GFP_KERNEL);
if (!master) {
dev_err(&pdev->dev, "Out of memory\n");
return -ENOMEM;
}
- err = gpio_request(pdata->pin, "w1");
+ err = devm_gpio_request(&pdev->dev, pdata->pin, "w1");
if (err) {
dev_err(&pdev->dev, "gpio_request (pin) failed\n");
- goto free_master;
+ return err;
}
if (gpio_is_valid(pdata->ext_pullup_enable_pin)) {
- err = gpio_request_one(pdata->ext_pullup_enable_pin,
- GPIOF_INIT_LOW, "w1 pullup");
+ err = devm_gpio_request_one(&pdev->dev,
+ pdata->ext_pullup_enable_pin, GPIOF_INIT_LOW,
+ "w1 pullup");
if (err < 0) {
dev_err(&pdev->dev, "gpio_request_one "
"(ext_pullup_enable_pin) failed\n");
- goto free_gpio;
+ return err;
}
}
@@ -130,7 +137,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
err = w1_add_master_device(master);
if (err) {
dev_err(&pdev->dev, "w1_add_master device failed\n");
- goto free_gpio_ext_pu;
+ return err;
}
if (pdata->enable_external_pullup)
@@ -142,22 +149,12 @@ static int w1_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
return 0;
-
- free_gpio_ext_pu:
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_free(pdata->ext_pullup_enable_pin);
- free_gpio:
- gpio_free(pdata->pin);
- free_master:
- kfree(master);
-
- return err;
}
static int w1_gpio_remove(struct platform_device *pdev)
{
struct w1_bus_master *master = platform_get_drvdata(pdev);
- struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
@@ -166,8 +163,6 @@ static int w1_gpio_remove(struct platform_device *pdev)
gpio_set_value(pdata->ext_pullup_enable_pin, 0);
w1_remove_master_device(master);
- gpio_free(pdata->pin);
- kfree(master);
return 0;
}
@@ -176,7 +171,7 @@ static int w1_gpio_remove(struct platform_device *pdev)
static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
@@ -186,7 +181,7 @@ static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
static int w1_gpio_resume(struct platform_device *pdev)
{
- struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index fa932c2f7d9..66efa96c460 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -709,7 +709,7 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
sl->owner = THIS_MODULE;
sl->master = dev;
- set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
+ set_bit(W1_SLAVE_ACTIVE, &sl->flags);
memset(&msg, 0, sizeof(msg));
memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
@@ -866,7 +866,7 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
sl = w1_slave_search_device(dev, tmp);
if (sl) {
- set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
+ set_bit(W1_SLAVE_ACTIVE, &sl->flags);
} else {
if (rn && tmp->crc == w1_calc_crc8((u8 *)&rn_le, 7))
w1_attach_slave_device(dev, tmp);
@@ -984,14 +984,14 @@ void w1_search_process_cb(struct w1_master *dev, u8 search_type,
struct w1_slave *sl, *sln;
list_for_each_entry(sl, &dev->slist, w1_slave_entry)
- clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
+ clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
w1_search_devices(dev, search_type, cb);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
- if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl)
+ if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl)
w1_slave_detach(sl);
- else if (test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags))
+ else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
sl->ttl = dev->slave_ttl;
}
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 45908e56c2f..ca8081a101d 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -67,8 +67,8 @@ struct w1_slave
struct w1_reg_num reg_num;
atomic_t refcnt;
u8 rom[9];
- u32 flags;
int ttl;
+ unsigned long flags;
struct w1_master *master;
struct w1_family *family;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d1d53f301de..5be6e919f78 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -392,6 +392,25 @@ config RETU_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called retu_wdt.
+config MOXART_WDT
+ tristate "MOXART watchdog"
+ depends on ARCH_MOXART
+ help
+ Say Y here to include Watchdog timer support for the watchdog
+ existing on the MOXA ART SoC series platforms.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moxart_wdt.
+
+config SIRFSOC_WATCHDOG
+ tristate "SiRFSOC watchdog"
+ depends on ARCH_SIRF
+ select WATCHDOG_CORE
+ default y
+ help
+ Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When
+ the watchdog triggers the system will be reset.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -418,8 +437,6 @@ config BFIN_WDT
# FRV Architecture
-# H8300 Architecture
-
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -868,6 +885,7 @@ config VIA_WDT
config W83627HF_WDT
tristate "W83627HF/W83627DHG Watchdog Timer"
depends on X86
+ select WATCHDOG_CORE
---help---
This is the driver for the hardware watchdog on the W83627HF chipset
as used in Advantech PC-9578 and Tyan S2721-533 motherboards
@@ -1127,6 +1145,13 @@ config LANTIQ_WDT
help
Hardware driver for the Lantiq SoC Watchdog Timer.
+config RALINK_WDT
+ tristate "Ralink SoC watchdog"
+ select WATCHDOG_CORE
+ depends on RALINK
+ help
+ Hardware driver for the Ralink SoC Watchdog Timer.
+
# PARISC Architecture
# POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 6c5bb274d3c..91bd95a64ba 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -55,6 +55,8 @@ obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
+obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
+obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -66,8 +68,6 @@ obj-$(CONFIG_BFIN_WDT) += bfin_wdt.o
# FRV Architecture
-# H8300 Architecture
-
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
@@ -136,6 +136,7 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
+obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
# PARISC Architecture
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 24a517777fa..5cf1621def9 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -60,8 +60,7 @@
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/fs.h> /* For file operations */
#include <linux/ioport.h> /* For io-port access */
@@ -337,4 +336,3 @@ module_exit(acq_exit);
MODULE_AUTHOR("David Woodhouse");
MODULE_DESCRIPTION("Acquire Inc. Single Board Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index cc6702fc526..a8961addc59 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -345,4 +345,3 @@ module_exit(advwdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marek Michalkiewicz <marekm@linux.org.pl>");
MODULE_DESCRIPTION("Advantech Single Board Computer WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 41b84936a52..fbb7b94cabf 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -452,4 +452,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("ALi M1535 PMU Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 5eee55012e3..12f0b762b52 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -425,4 +425,3 @@ MODULE_DEVICE_TABLE(pci, alim7101_pci_tbl);
MODULE_AUTHOR("Steve Hill");
MODULE_DESCRIPTION("ALi M7101 PMU Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b3709f9cf5b..3a996576343 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -46,7 +46,6 @@
MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>");
MODULE_DESCRIPTION(LONGNAME);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int margin = 60;
module_param(margin, int, 0);
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index b178e717ef0..afe7d17e677 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -434,4 +434,3 @@ module_platform_driver_probe(at32_wdt_driver, at32_wdt_probe);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 1c75260b987..dee6cc21d27 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -269,7 +269,7 @@ static struct platform_driver at91wdt_driver = {
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(at91_wdt_dt_ids),
+ .of_match_table = at91_wdt_dt_ids,
},
};
@@ -297,5 +297,4 @@ module_exit(at91_wdt_exit);
MODULE_AUTHOR("Andrew Victor");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:at91_wdt");
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 37cb09b27b6..9fa1f69dac1 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -329,4 +329,3 @@ MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 61566fc47f8..a6a2cebb258 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -186,4 +186,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index a14a58d9d11..4eb188b87f8 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -317,5 +317,4 @@ MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:bcm63xx-wdt");
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 5d36d6fb496..a3b6a5b30f9 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -465,7 +465,6 @@ module_exit(bfin_wdt_exit);
MODULE_AUTHOR("Michele d'Amico, Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("Blackfin Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout,
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index f270bb7bc45..f7ae49edb51 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -289,7 +289,6 @@ MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>");
MODULE_DESCRIPTION("sma cpu5 watchdog driver");
MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(port, int, 0);
MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index bead7740c86..dd625cca1ae 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -267,5 +267,4 @@ MODULE_PARM_DESC(heartbeat,
__MODULE_STRING(DEFAULT_HEARTBEAT));
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:watchdog");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index e621098bf66..a46f5c7ee7f 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -29,6 +29,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -203,12 +204,12 @@ static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident,
+ return copy_to_user((void __user *)arg, &dw_wdt_ident,
sizeof(dw_wdt_ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int *)arg);
+ return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
dw_wdt_set_next_heartbeat();
@@ -252,17 +253,17 @@ static int dw_wdt_release(struct inode *inode, struct file *filp)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
- clk_disable(dw_wdt.clk);
+ clk_disable_unprepare(dw_wdt.clk);
return 0;
}
static int dw_wdt_resume(struct device *dev)
{
- int err = clk_enable(dw_wdt.clk);
+ int err = clk_prepare_enable(dw_wdt.clk);
if (err)
return err;
@@ -271,12 +272,9 @@ static int dw_wdt_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
-static const struct dev_pm_ops dw_wdt_pm_ops = {
- .suspend = dw_wdt_suspend,
- .resume = dw_wdt_resume,
-};
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
@@ -309,7 +307,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
if (IS_ERR(dw_wdt.clk))
return PTR_ERR(dw_wdt.clk);
- ret = clk_enable(dw_wdt.clk);
+ ret = clk_prepare_enable(dw_wdt.clk);
if (ret)
return ret;
@@ -326,7 +324,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
return 0;
out_disable_clk:
- clk_disable(dw_wdt.clk);
+ clk_disable_unprepare(dw_wdt.clk);
return ret;
}
@@ -335,20 +333,27 @@ static int dw_wdt_drv_remove(struct platform_device *pdev)
{
misc_deregister(&dw_wdt_miscdev);
- clk_disable(dw_wdt.clk);
+ clk_disable_unprepare(dw_wdt.clk);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dw_wdt_of_match[] = {
+ { .compatible = "snps,dw-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw_wdt_of_match);
+#endif
+
static struct platform_driver dw_wdt_driver = {
.probe = dw_wdt_drv_probe,
.remove = dw_wdt_drv_remove,
.driver = {
.name = "dw_wdt",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = of_match_ptr(dw_wdt_of_match),
.pm = &dw_wdt_pm_ops,
-#endif /* CONFIG_PM */
},
};
@@ -357,4 +362,3 @@ module_platform_driver(dw_wdt_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index e0574844c31..833e8131184 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -179,4 +179,3 @@ MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>,"
MODULE_DESCRIPTION("EP93xx Watchdog");
MODULE_LICENSE("GPL");
MODULE_VERSION(WDT_VERSION);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index cd31b8a2a72..23ee53240c4 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -477,4 +477,3 @@ module_exit(eurwdt_exit);
MODULE_AUTHOR("Rodolfo Giometti");
MODULE_DESCRIPTION("Driver for Eurotech CPU-1220/1410 on board watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 257cfbad21d..25beb30878d 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -34,6 +34,7 @@
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -330,5 +331,4 @@ module_exit(gef_wdt_exit);
MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_DESCRIPTION("GE watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:gef_wdt");
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index fcd599d4e22..4a6ae84b42b 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -297,4 +297,3 @@ module_exit(geodewdt_exit);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 19f3c3fc65f..45b979d9dd1 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -881,7 +881,6 @@ MODULE_AUTHOR("Tom Mingarelli");
MODULE_DESCRIPTION("hp watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(HPWDT_VERSION);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 2b2ea13d03e..a72fe9361dd 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -497,4 +497,3 @@ module_pci_driver(esb_driver);
MODULE_AUTHOR("Ross Biro and David Härdeman");
MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 6130321da38..04f8af65acf 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -56,8 +56,6 @@
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
@@ -394,7 +392,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
{
int ret = -ENODEV;
unsigned long val32;
- struct lpc_ich_info *ich_info = dev->dev.platform_data;
+ struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev);
if (!ich_info)
goto out;
@@ -582,5 +580,4 @@ MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index eb6b5cc98ec..7ae36690c44 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -382,6 +382,5 @@ module_exit(ibwdt_exit);
MODULE_AUTHOR("Charles Howes <chowes@vsol.net>");
MODULE_DESCRIPTION("IB700 SBC watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of ib700wdt.c */
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index bc3fb8fe89a..db0a34460e5 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -419,4 +419,3 @@ MODULE_PARM_DESC(nowayout,
MODULE_DESCRIPTION("IBM Automatic Server Restart driver");
MODULE_AUTHOR("Andrey Panin");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index e24ef6a6e06..70a240297c6 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -344,5 +344,4 @@ module_exit(ie6xx_wdt_exit);
MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
MODULE_DESCRIPTION("Intel Atom E6xx Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 693ac3f4de5..b4786bccc42 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -322,6 +322,7 @@ static const struct of_device_id imx2_wdt_dt_ids[] = {
{ .compatible = "fsl,imx21-wdt", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
static struct platform_driver imx2_wdt_driver = {
.remove = __exit_p(imx2_wdt_remove),
@@ -338,5 +339,4 @@ module_platform_driver_probe(imx2_wdt_driver, imx2_wdt_probe);
MODULE_AUTHOR("Wolfram Sang");
MODULE_DESCRIPTION("Watchdog driver for IMX2 and later");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 6d90f7a2ce2..1b5c25a47b8 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -214,4 +214,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
MODULE_DESCRIPTION("Hardware Watchdog Device for SGI IP22");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 9dda2d08af9..e13e65e996a 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -48,7 +48,7 @@
#include <linux/atomic.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include "intel_scu_watchdog.h"
@@ -445,7 +445,7 @@ static int __init intel_scu_watchdog_init(void)
*
* If it isn't an intel MID device then it doesn't have this watchdog
*/
- if (!mrst_identify_cpu())
+ if (!intel_mid_identify_cpu())
return -ENODEV;
/* Check boot parameters to verify that their initial values */
@@ -564,5 +564,4 @@ module_exit(intel_scu_watchdog_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index d964faf1a25..b16013ffacc 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -259,4 +259,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("Curt E Bruns <curt.e.bruns@intel.com>");
MODULE_DESCRIPTION("iop watchdog timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index f4cce6d66a5..41b3979a9d8 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -41,7 +41,6 @@
MODULE_AUTHOR("Jorge Boncompte - DTI2 <jorge@dti2.net>");
MODULE_DESCRIPTION("IT8712F Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int max_units = 255;
static int margin = 60; /* in seconds */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index d3dcc6988b5..e2bba68ae71 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -772,4 +772,3 @@ module_exit(it87_wdt_exit);
MODULE_AUTHOR("Oliver Schuster");
MODULE_DESCRIPTION("Hardware Watchdog Device Driver for IT87xx EC-LPC I/O");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 5580b4fff7f..f20cc53ff71 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -208,5 +208,3 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index d1afdf684c1..2de486a7eea 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -222,5 +222,4 @@ module_platform_driver(jz4740_wdt_driver);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("jz4740 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:jz4740-wdt");
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 5c3d4df63e6..a1a3638c579 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -67,7 +67,7 @@ enum {
PRESCALER_12,
};
-const u32 kempld_prescaler[] = {
+static const u32 kempld_prescaler[] = {
[PRESCALER_21] = (1 << 21) - 1,
[PRESCALER_17] = (1 << 17) - 1,
[PRESCALER_12] = (1 << 12) - 1,
@@ -361,7 +361,7 @@ static long kempld_wdt_ioctl(struct watchdog_device *wdd, unsigned int cmd,
ret = kempld_wdt_keepalive(wdd);
break;
case WDIOC_GETPRETIMEOUT:
- ret = put_user(wdt_data->pretimeout, (int *)arg);
+ ret = put_user(wdt_data->pretimeout, (int __user *)arg);
break;
}
@@ -578,4 +578,3 @@ module_platform_driver(kempld_wdt_driver);
MODULE_DESCRIPTION("KEM PLD Watchdog Driver");
MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index dce9ecffd44..40ca5594a33 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -323,5 +323,4 @@ module_exit(ks8695_wdt_exit);
MODULE_AUTHOR("Andrew Victor");
MODULE_DESCRIPTION("Watchdog driver for KS8695");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:ks8695_wdt");
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 088fd0c9d88..3b3148c764a 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -249,4 +249,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 173494a681e..da6fa2b6807 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -223,4 +223,3 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index bf84f788e59..9826b59ef73 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -92,7 +92,6 @@ static unsigned short zf_readw(unsigned char port)
MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>");
MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index cc9d328086e..6d4f3998e1f 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -258,4 +258,3 @@ MODULE_PARM_DESC(nodelay,
"(max6373/74 only, default=0)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index 97d62ee5034..be86ea359ee 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -315,4 +315,3 @@ MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>");
MODULE_DESCRIPTION("MixCom Watchdog driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
new file mode 100644
index 00000000000..4166e4d116a
--- /dev/null
+++ b/drivers/watchdog/moxart_wdt.c
@@ -0,0 +1,165 @@
+/*
+ * MOXA ART SoCs watchdog driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/moduleparam.h>
+
+#define REG_COUNT 0x4
+#define REG_MODE 0x8
+#define REG_ENABLE 0xC
+
+struct moxart_wdt_dev {
+ struct watchdog_device dev;
+ void __iomem *base;
+ unsigned int clock_frequency;
+};
+
+static int heartbeat;
+
+static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
+
+ writel(0, moxart_wdt->base + REG_ENABLE);
+
+ return 0;
+}
+
+static int moxart_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
+
+ writel(moxart_wdt->clock_frequency * wdt_dev->timeout,
+ moxart_wdt->base + REG_COUNT);
+ writel(0x5ab9, moxart_wdt->base + REG_MODE);
+ writel(0x03, moxart_wdt->base + REG_ENABLE);
+
+ return 0;
+}
+
+static int moxart_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ wdt_dev->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info moxart_wdt_info = {
+ .identity = "moxart-wdt",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops moxart_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = moxart_wdt_start,
+ .stop = moxart_wdt_stop,
+ .set_timeout = moxart_wdt_set_timeout,
+};
+
+static int moxart_wdt_probe(struct platform_device *pdev)
+{
+ struct moxart_wdt_dev *moxart_wdt;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+ unsigned int max_timeout;
+ bool nowayout = WATCHDOG_NOWAYOUT;
+
+ moxart_wdt = devm_kzalloc(dev, sizeof(*moxart_wdt), GFP_KERNEL);
+ if (!moxart_wdt)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, moxart_wdt);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ moxart_wdt->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(moxart_wdt->base))
+ return PTR_ERR(moxart_wdt->base);
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%s: of_clk_get failed\n", __func__);
+ return PTR_ERR(clk);
+ }
+
+ moxart_wdt->clock_frequency = clk_get_rate(clk);
+ if (moxart_wdt->clock_frequency == 0) {
+ pr_err("%s: incorrect clock frequency\n", __func__);
+ return -EINVAL;
+ }
+
+ max_timeout = UINT_MAX / moxart_wdt->clock_frequency;
+
+ moxart_wdt->dev.info = &moxart_wdt_info;
+ moxart_wdt->dev.ops = &moxart_wdt_ops;
+ moxart_wdt->dev.timeout = max_timeout;
+ moxart_wdt->dev.min_timeout = 1;
+ moxart_wdt->dev.max_timeout = max_timeout;
+ moxart_wdt->dev.parent = dev;
+
+ watchdog_init_timeout(&moxart_wdt->dev, heartbeat, dev);
+ watchdog_set_nowayout(&moxart_wdt->dev, nowayout);
+
+ watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt);
+
+ err = watchdog_register_device(&moxart_wdt->dev);
+ if (err)
+ return err;
+
+ dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
+ moxart_wdt->dev.timeout, nowayout);
+
+ return 0;
+}
+
+static int moxart_wdt_remove(struct platform_device *pdev)
+{
+ struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
+
+ moxart_wdt_stop(&moxart_wdt->dev);
+ watchdog_unregister_device(&moxart_wdt->dev);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_watchdog_match[] = {
+ { .compatible = "moxa,moxart-watchdog" },
+ { },
+};
+
+static struct platform_driver moxart_wdt_driver = {
+ .probe = moxart_wdt_probe,
+ .remove = moxart_wdt_remove,
+ .driver = {
+ .name = "moxart-watchdog",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_watchdog_match,
+ },
+};
+module_platform_driver(moxart_wdt_driver);
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds");
+
+MODULE_DESCRIPTION("MOXART watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index da2752063bb..d82152077fd 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/miscdevice.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/watchdog.h>
@@ -329,4 +330,3 @@ MODULE_AUTHOR("Dave Updegraff, Kumar Gala");
MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx "
"uProcessors");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index b4341110ad4..edb31ffd792 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -257,5 +257,4 @@ module_platform_driver(mtx1_wdt_driver);
MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:mtx1-wdt");
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index e4cf9801926..f9fa5840939 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -255,7 +255,7 @@ static struct miscdevice mv64x60_wdt_miscdev = {
static int mv64x60_wdt_probe(struct platform_device *dev)
{
- struct mv64x60_wdt_pdata *pdata = dev->dev.platform_data;
+ struct mv64x60_wdt_pdata *pdata = dev_get_platdata(&dev->dev);
struct resource *r;
int timeout = 10;
@@ -323,5 +323,4 @@ module_exit(mv64x60_wdt_exit);
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("MV64x60 watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" MV64x60_WDT_NAME);
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index b15b6efd91a..a0d893b0930 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -307,5 +307,4 @@ module_platform_driver(nuc900wdt_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("Watchdog driver for NUC900");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:nuc900-wdt");
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 59cf19eeea0..231e5b9d5c8 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -513,4 +513,3 @@ module_exit(nv_tco_cleanup_module);
MODULE_AUTHOR("Mike Waychison");
MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 4dd281f2c33..fb57103c8eb 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -405,4 +405,3 @@ module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index af88ffd1068..09cf0135e8a 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -68,14 +68,14 @@ static void omap_wdt_reload(struct omap_wdt_dev *wdev)
void __iomem *base = wdev->base;
/* wait for posted write to complete */
- while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08)
+ while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08)
cpu_relax();
wdev->wdt_trgr_pattern = ~wdev->wdt_trgr_pattern;
- __raw_writel(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
+ writel_relaxed(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
/* wait for posted write to complete */
- while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08)
+ while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08)
cpu_relax();
/* reloaded WCRR from WLDR */
}
@@ -85,12 +85,12 @@ static void omap_wdt_enable(struct omap_wdt_dev *wdev)
void __iomem *base = wdev->base;
/* Sequence to enable the watchdog */
- __raw_writel(0xBBBB, base + OMAP_WATCHDOG_SPR);
- while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x10)
+ writel_relaxed(0xBBBB, base + OMAP_WATCHDOG_SPR);
+ while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10)
cpu_relax();
- __raw_writel(0x4444, base + OMAP_WATCHDOG_SPR);
- while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x10)
+ writel_relaxed(0x4444, base + OMAP_WATCHDOG_SPR);
+ while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10)
cpu_relax();
}
@@ -99,12 +99,12 @@ static void omap_wdt_disable(struct omap_wdt_dev *wdev)
void __iomem *base = wdev->base;
/* sequence required to disable watchdog */
- __raw_writel(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */
- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x10)
+ writel_relaxed(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */
+ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10)
cpu_relax();
- __raw_writel(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */
- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x10)
+ writel_relaxed(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */
+ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10)
cpu_relax();
}
@@ -115,11 +115,11 @@ static void omap_wdt_set_timer(struct omap_wdt_dev *wdev,
void __iomem *base = wdev->base;
/* just count up at 32 KHz */
- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
+ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
- __raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR);
- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
+ writel_relaxed(pre_margin, base + OMAP_WATCHDOG_LDR);
+ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
}
@@ -135,11 +135,11 @@ static int omap_wdt_start(struct watchdog_device *wdog)
pm_runtime_get_sync(wdev->dev);
/* initialize prescaler */
- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
+ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
- __raw_writel((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL);
- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
+ writel_relaxed((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL);
+ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
omap_wdt_set_timer(wdev, wdog->timeout);
@@ -205,7 +205,7 @@ static const struct watchdog_ops omap_wdt_ops = {
static int omap_wdt_probe(struct platform_device *pdev)
{
- struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct watchdog_device *omap_wdt;
struct resource *res, *mem;
struct omap_wdt_dev *wdev;
@@ -275,7 +275,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
}
pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
- __raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
+ readl_relaxed(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
omap_wdt->timeout);
pm_runtime_put_sync(wdev->dev);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 4ea5fcccac0..44edca66d56 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -207,7 +207,7 @@ static struct platform_driver orion_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "orion_wdt",
- .of_match_table = of_match_ptr(orion_wdt_of_match_table),
+ .of_match_table = orion_wdt_of_match_table,
},
};
@@ -225,4 +225,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:orion_wdt");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 5afb89b4865..5211d56b368 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -580,8 +580,6 @@ MODULE_AUTHOR("Sven Anders <anders@anduras.de>, "
MODULE_DESCRIPTION("PC87413 WDT driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
module_param(io, int, 0);
MODULE_PARM_DESC(io, MODNAME " I/O port (default: "
__MODULE_STRING(IO_DEFAULT) ").");
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 33e49a7f889..e936f15dc7c 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -61,7 +61,7 @@
#include <linux/delay.h> /* For mdelay function */
#include <linux/timer.h> /* For timer related operations */
#include <linux/jiffies.h> /* For jiffies stuff */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/reboot.h> /* For kernel_power_off() */
#include <linux/init.h> /* For __init/__exit/... */
@@ -1011,5 +1011,3 @@ MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, "
MODULE_DESCRIPTION("Berkshire ISA-PC Watchdog driver");
MODULE_VERSION(WATCHDOG_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 7890f84edf7..b4864f254b4 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -40,7 +40,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
@@ -820,5 +820,3 @@ module_pci_driver(pcipcwd_driver);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 7b14d184792..b731b5d129b 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -32,7 +32,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
@@ -72,8 +72,6 @@ do { \
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
/* Module Parameters */
module_param(debug, int, 0);
@@ -235,13 +233,17 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
unsigned char cmd, unsigned char *msb, unsigned char *lsb)
{
int got_response, count;
- unsigned char buf[6];
+ unsigned char *buf;
/* We will not send any commands if the USB PCWD device does
* not exist */
if ((!usb_pcwd) || (!usb_pcwd->exists))
return -1;
+ buf = kmalloc(6, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
/* The USB PC Watchdog uses a 6 byte report format.
* The board currently uses only 3 of the six bytes of the report. */
buf[0] = cmd; /* Byte 0 = CMD */
@@ -256,8 +258,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0),
HID_REQ_SET_REPORT, HID_DT_REPORT,
- 0x0200, usb_pcwd->interface_number, buf, sizeof(buf),
- USB_COMMAND_TIMEOUT) != sizeof(buf)) {
+ 0x0200, usb_pcwd->interface_number, buf, 6,
+ USB_COMMAND_TIMEOUT) != 6) {
dbg("usb_pcwd_send_command: error in usb_control_msg for "
"cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb);
}
@@ -277,6 +279,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
*lsb = usb_pcwd->cmd_data_lsb;
}
+ kfree(buf);
+
return got_response;
}
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 7d3d471f810..0cdfee26669 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#define DRV_NAME "PIKA-WDT"
@@ -298,5 +299,3 @@ module_exit(pikawdt_exit);
MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>");
MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index b30bd430f59..1bdcc313e1d 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -233,5 +233,4 @@ MODULE_PARM_DESC(nowayout,
"Set to 1 to keep watchdog running after device release");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:pnx4008-watchdog");
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index 1b62a7dfcc9..882fdcb46ad 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -278,4 +278,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Daniel Laird/Andre McCurdy");
MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 9cf6bc7a234..71e78ef4b73 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -25,8 +25,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/fs.h> /* For file operations */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/platform_device.h> /* For platform_driver framework */
@@ -329,4 +328,3 @@ MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
"Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index b0f116c2fd5..082d0626295 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -231,7 +231,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
struct resource *r;
struct rdc321x_wdt_pdata *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
@@ -298,4 +298,3 @@ module_platform_driver(rdc321x_wdt_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
new file mode 100644
index 00000000000..53d37fea183
--- /dev/null
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -0,0 +1,207 @@
+/*
+ * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ *
+ * This driver was based on: drivers/watchdog/softdog.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define SYSC_RSTSTAT 0x38
+#define WDT_RST_CAUSE BIT(1)
+
+#define RALINK_WDT_TIMEOUT 30
+#define RALINK_WDT_PRESCALE 65536
+
+#define TIMER_REG_TMR1LOAD 0x00
+#define TIMER_REG_TMR1CTL 0x08
+
+#define TMRSTAT_TMR1RST BIT(5)
+
+#define TMR1CTL_ENABLE BIT(7)
+#define TMR1CTL_MODE_SHIFT 4
+#define TMR1CTL_MODE_MASK 0x3
+#define TMR1CTL_MODE_FREE_RUNNING 0x0
+#define TMR1CTL_MODE_PERIODIC 0x1
+#define TMR1CTL_MODE_TIMEOUT 0x2
+#define TMR1CTL_MODE_WDT 0x3
+#define TMR1CTL_PRESCALE_MASK 0xf
+#define TMR1CTL_PRESCALE_65536 0xf
+
+static struct clk *rt288x_wdt_clk;
+static unsigned long rt288x_wdt_freq;
+static void __iomem *rt288x_wdt_base;
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static inline void rt_wdt_w32(unsigned reg, u32 val)
+{
+ iowrite32(val, rt288x_wdt_base + reg);
+}
+
+static inline u32 rt_wdt_r32(unsigned reg)
+{
+ return ioread32(rt288x_wdt_base + reg);
+}
+
+static int rt288x_wdt_ping(struct watchdog_device *w)
+{
+ rt_wdt_w32(TIMER_REG_TMR1LOAD, w->timeout * rt288x_wdt_freq);
+
+ return 0;
+}
+
+static int rt288x_wdt_start(struct watchdog_device *w)
+{
+ u32 t;
+
+ t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t &= ~(TMR1CTL_MODE_MASK << TMR1CTL_MODE_SHIFT |
+ TMR1CTL_PRESCALE_MASK);
+ t |= (TMR1CTL_MODE_WDT << TMR1CTL_MODE_SHIFT |
+ TMR1CTL_PRESCALE_65536);
+ rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+
+ rt288x_wdt_ping(w);
+
+ t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t |= TMR1CTL_ENABLE;
+ rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+
+ return 0;
+}
+
+static int rt288x_wdt_stop(struct watchdog_device *w)
+{
+ u32 t;
+
+ rt288x_wdt_ping(w);
+
+ t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t &= ~TMR1CTL_ENABLE;
+ rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+
+ return 0;
+}
+
+static int rt288x_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
+{
+ w->timeout = t;
+ rt288x_wdt_ping(w);
+
+ return 0;
+}
+
+static int rt288x_wdt_bootcause(void)
+{
+ if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE)
+ return WDIOF_CARDRESET;
+
+ return 0;
+}
+
+static struct watchdog_info rt288x_wdt_info = {
+ .identity = "Ralink Watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static struct watchdog_ops rt288x_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rt288x_wdt_start,
+ .stop = rt288x_wdt_stop,
+ .ping = rt288x_wdt_ping,
+ .set_timeout = rt288x_wdt_set_timeout,
+};
+
+static struct watchdog_device rt288x_wdt_dev = {
+ .info = &rt288x_wdt_info,
+ .ops = &rt288x_wdt_ops,
+ .min_timeout = 1,
+};
+
+static int rt288x_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rt288x_wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rt288x_wdt_base))
+ return PTR_ERR(rt288x_wdt_base);
+
+ rt288x_wdt_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rt288x_wdt_clk))
+ return PTR_ERR(rt288x_wdt_clk);
+
+ device_reset(&pdev->dev);
+
+ rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
+
+ rt288x_wdt_dev.dev = &pdev->dev;
+ rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
+
+ rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
+ rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout;
+
+ watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
+
+ ret = watchdog_register_device(&rt288x_wdt_dev);
+ if (!ret)
+ dev_info(&pdev->dev, "Initialized\n");
+
+ return 0;
+}
+
+static int rt288x_wdt_remove(struct platform_device *pdev)
+{
+ watchdog_unregister_device(&rt288x_wdt_dev);
+
+ return 0;
+}
+
+static void rt288x_wdt_shutdown(struct platform_device *pdev)
+{
+ rt288x_wdt_stop(&rt288x_wdt_dev);
+}
+
+static const struct of_device_id rt288x_wdt_match[] = {
+ { .compatible = "ralink,rt2880-wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt288x_wdt_match);
+
+static struct platform_driver rt288x_wdt_driver = {
+ .probe = rt288x_wdt_probe,
+ .remove = rt288x_wdt_remove,
+ .shutdown = rt288x_wdt_shutdown,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rt288x_wdt_match,
+ },
+};
+
+module_platform_driver(rt288x_wdt_driver);
+
+MODULE_DESCRIPTION("MediaTek/Ralink RT288x/RT3xxx hardware watchdog driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 23aad7c6bf5..7d8fd041ee2 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -29,7 +29,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -539,5 +538,4 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:s3c2410-wdt");
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index ccd6b29e21b..e1d39a1e962 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -193,4 +193,3 @@ module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index ea5d84a1fda..3abae50773b 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -341,7 +341,6 @@ MODULE_PARM_DESC(timeout,
"Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/*
* example code that can be put in a platform code area to utilize the
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 63632ec87c7..2eef58a0cf0 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -387,4 +387,3 @@ module_exit(sbc60xxwdt_unload);
MODULE_AUTHOR("Jakob Oestergaard <jakob@unthought.net>");
MODULE_DESCRIPTION("60xx Single Board Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 719edc8fdeb..5f268add17c 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -309,5 +309,3 @@ MODULE_AUTHOR("Gilles Gigan");
MODULE_DESCRIPTION("Watchdog device driver for single board"
" computers EPIC Nano 7240 from iEi");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index d4781e05f01..da60560ca44 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -404,6 +404,5 @@ MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>");
MODULE_DESCRIPTION("SBC8360 watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.01");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of sbc8360.c */
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 0c3e9f66ef7..a1c502e0d8e 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -220,4 +220,3 @@ MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
"so only use it if you are *sure* you are running on this specific "
"SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 90d5527ca88..a517d8bae75 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -263,5 +263,3 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 3fb83b0c28c..3b9fff9dcf6 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -476,4 +476,3 @@ MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
MODULE_DESCRIPTION(
"Driver for National Semiconductor PC87307/PC97307 watchdog component");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 707e027e500..f353e18b1a8 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -433,4 +433,3 @@ MODULE_AUTHOR("Scott and Bill Jennings");
MODULE_DESCRIPTION(
"Driver for watchdog timer in AMD \"Elan\" SC520 uProcessor");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index af7b136b187..b96127ea3de 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -26,8 +26,7 @@
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/... */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
@@ -545,5 +544,3 @@ module_exit(sch311x_wdt_exit);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("SMSC SCH311x WatchDog Timer Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 8ae7c282d46..836377cf927 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -37,7 +37,6 @@
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int margin = 60; /* in seconds */
module_param(margin, int, 0);
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 5bca7945776..f9b8e06f355 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -343,7 +343,6 @@ MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("SuperH watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
MODULE_PARM_DESC(clock_division_ratio,
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
new file mode 100644
index 00000000000..ced3edc9595
--- /dev/null
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -0,0 +1,226 @@
+/*
+ * Watchdog driver for CSR SiRFprimaII and SiRFatlasVI
+ *
+ * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#define CLOCK_FREQ 1000000
+
+#define SIRFSOC_TIMER_COUNTER_LO 0x0000
+#define SIRFSOC_TIMER_MATCH_0 0x0008
+#define SIRFSOC_TIMER_INT_EN 0x0024
+#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028
+#define SIRFSOC_TIMER_LATCH 0x0030
+#define SIRFSOC_TIMER_LATCHED_LO 0x0034
+
+#define SIRFSOC_TIMER_WDT_INDEX 5
+
+#define SIRFSOC_WDT_MIN_TIMEOUT 30 /* 30 secs */
+#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */
+#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */
+
+static unsigned int timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(timeout, uint, 0);
+module_param(nowayout, bool, 0);
+
+MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd)
+{
+ u32 counter, match;
+ void __iomem *wdt_base;
+ int time_left;
+
+ wdt_base = watchdog_get_drvdata(wdd);
+ counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO);
+ match = readl(wdt_base +
+ SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
+
+ time_left = match - counter;
+
+ return time_left / CLOCK_FREQ;
+}
+
+static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
+{
+ u32 counter, timeout_ticks;
+ void __iomem *wdt_base;
+
+ timeout_ticks = wdd->timeout * CLOCK_FREQ;
+ wdt_base = watchdog_get_drvdata(wdd);
+
+ /* Enable the latch before reading the LATCH_LO register */
+ writel(1, wdt_base + SIRFSOC_TIMER_LATCH);
+
+ /* Set the TO value */
+ counter = readl(wdt_base + SIRFSOC_TIMER_LATCHED_LO);
+
+ counter += timeout_ticks;
+
+ writel(counter, wdt_base +
+ SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
+
+ return 0;
+}
+
+static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
+{
+ void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+ sirfsoc_wdt_updatetimeout(wdd);
+
+ /*
+ * NOTE: If interrupt is not enabled
+ * then WD-Reset doesn't get generated at all.
+ */
+ writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
+ | (1 << SIRFSOC_TIMER_WDT_INDEX),
+ wdt_base + SIRFSOC_TIMER_INT_EN);
+ writel(1, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
+
+ return 0;
+}
+
+static int sirfsoc_wdt_disable(struct watchdog_device *wdd)
+{
+ void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+
+ writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
+ writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
+ & (~(1 << SIRFSOC_TIMER_WDT_INDEX)),
+ wdt_base + SIRFSOC_TIMER_INT_EN);
+
+ return 0;
+}
+
+static int sirfsoc_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
+{
+ wdd->timeout = to;
+ sirfsoc_wdt_updatetimeout(wdd);
+
+ return 0;
+}
+
+#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
+
+static const struct watchdog_info sirfsoc_wdt_ident = {
+ .options = OPTIONS,
+ .firmware_version = 0,
+ .identity = "SiRFSOC Watchdog",
+};
+
+static struct watchdog_ops sirfsoc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sirfsoc_wdt_enable,
+ .stop = sirfsoc_wdt_disable,
+ .get_timeleft = sirfsoc_wdt_gettimeleft,
+ .ping = sirfsoc_wdt_updatetimeout,
+ .set_timeout = sirfsoc_wdt_settimeout,
+};
+
+static struct watchdog_device sirfsoc_wdd = {
+ .info = &sirfsoc_wdt_ident,
+ .ops = &sirfsoc_wdt_ops,
+ .timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT,
+ .min_timeout = SIRFSOC_WDT_MIN_TIMEOUT,
+ .max_timeout = SIRFSOC_WDT_MAX_TIMEOUT,
+};
+
+static int sirfsoc_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ watchdog_set_drvdata(&sirfsoc_wdd, base);
+
+ watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
+ watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
+
+ ret = watchdog_register_device(&sirfsoc_wdd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, &sirfsoc_wdd);
+
+ return 0;
+}
+
+static void sirfsoc_wdt_shutdown(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+
+ sirfsoc_wdt_disable(wdd);
+}
+
+static int sirfsoc_wdt_remove(struct platform_device *pdev)
+{
+ sirfsoc_wdt_shutdown(pdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sirfsoc_wdt_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int sirfsoc_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ /*
+ * NOTE: Since timer controller registers settings are saved
+ * and restored back by the timer-prima2.c, so we need not
+ * update WD settings except refreshing timeout.
+ */
+ sirfsoc_wdt_updatetimeout(wdd);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sirfsoc_wdt_pm_ops,
+ sirfsoc_wdt_suspend, sirfsoc_wdt_resume);
+
+static const struct of_device_id sirfsoc_wdt_of_match[] = {
+ { .compatible = "sirf,prima2-tick"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_wdt_of_match);
+
+static struct platform_driver sirfsoc_wdt_driver = {
+ .driver = {
+ .name = "sirfsoc-wdt",
+ .owner = THIS_MODULE,
+ .pm = &sirfsoc_wdt_pm_ops,
+ .of_match_table = of_match_ptr(sirfsoc_wdt_of_match),
+ },
+ .probe = sirfsoc_wdt_probe,
+ .remove = sirfsoc_wdt_remove,
+ .shutdown = sirfsoc_wdt_shutdown,
+};
+module_platform_driver(sirfsoc_wdt_driver);
+
+MODULE_DESCRIPTION("SiRF SoC watchdog driver");
+MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sirfsoc-wdt");
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 6d665f9c1d5..445ea1ad1fa 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -603,8 +603,6 @@ MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version "
VERSION ")");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
#ifdef SMSC_SUPPORT_MINUTES
module_param(unit, int, 0);
MODULE_PARM_DESC(unit,
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index b68b1e519d5..ef2638fee4a 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -207,4 +207,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Software Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 0e9d8c479c3..ce63a1bbf39 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -580,4 +580,3 @@ module_exit(sp5100_tco_cleanup_module);
MODULE_AUTHOR("Priyanka Gupta");
MODULE_DESCRIPTION("TCO timer driver for SP5100/SB800 chipset");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 58df98aec12..3f786ce0a6f 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -268,7 +268,6 @@ static int sp805_wdt_remove(struct amba_device *adev)
struct sp805_wdt *wdt = amba_get_drvdata(adev);
watchdog_unregister_device(&wdt->wdd);
- amba_set_drvdata(adev, NULL);
watchdog_set_drvdata(&wdt->wdd, NULL);
return 0;
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index c97e98dcde6..d667f6b51d3 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to "
static int wdt_start(struct watchdog_device *wdd)
{
struct device *dev = watchdog_get_drvdata(wdd);
- struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+ struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev);
pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE);
return 0;
@@ -39,7 +39,7 @@ static int wdt_start(struct watchdog_device *wdd)
static int wdt_stop(struct watchdog_device *wdd)
{
struct device *dev = watchdog_get_drvdata(wdd);
- struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+ struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev);
pdata->wdt_set_timeout(dev->parent, 0);
return 0;
@@ -108,4 +108,3 @@ module_platform_driver(stmp3xxx_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index f6caa77151c..76332d893e1 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -217,7 +217,7 @@ static struct platform_driver sunxi_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
- .of_match_table = of_match_ptr(sunxi_wdt_dt_ids)
+ .of_match_table = sunxi_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index c9b0c627fe7..09d4831aa61 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -192,7 +192,7 @@ static int ts72xx_wdt_open(struct inode *inode, struct file *file)
dev_err(&wdt->pdev->dev,
"failed to convert timeout (%d) to register value\n",
timeout);
- return -EINVAL;
+ return regval;
}
if (mutex_lock_interruptible(&wdt->lock))
@@ -305,7 +305,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case WDIOC_GETSUPPORT:
- error = copy_to_user(argp, &winfo, sizeof(winfo));
+ if (copy_to_user(argp, &winfo, sizeof(winfo)))
+ error = -EFAULT;
break;
case WDIOC_GETSTATUS:
@@ -320,10 +321,9 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_SETOPTIONS: {
int options;
- if (get_user(options, p)) {
- error = -EFAULT;
+ error = get_user(options, p);
+ if (error)
break;
- }
error = -EINVAL;
@@ -341,30 +341,26 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_SETTIMEOUT: {
int new_timeout;
+ int regval;
- if (get_user(new_timeout, p)) {
- error = -EFAULT;
- } else {
- int regval;
-
- regval = timeout_to_regval(new_timeout);
- if (regval < 0) {
- error = -EINVAL;
- } else {
- ts72xx_wdt_stop(wdt);
- wdt->regval = regval;
- ts72xx_wdt_start(wdt);
- }
- }
+ error = get_user(new_timeout, p);
if (error)
break;
+ regval = timeout_to_regval(new_timeout);
+ if (regval < 0) {
+ error = regval;
+ break;
+ }
+ ts72xx_wdt_stop(wdt);
+ wdt->regval = regval;
+ ts72xx_wdt_start(wdt);
+
/*FALLTHROUGH*/
}
case WDIOC_GETTIMEOUT:
- if (put_user(regval_to_timeout(wdt->regval), p))
- error = -EFAULT;
+ error = put_user(regval_to_timeout(wdt->regval), p);
break;
default:
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 88f23c5cfdd..0fd0e8ae62a 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -176,5 +176,4 @@ module_platform_driver_probe(txx9wdt_driver, txx9wdt_probe);
MODULE_DESCRIPTION("TXx9 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:txx9wdt");
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index a614d84121c..e029b5768f2 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -88,7 +88,7 @@ static struct watchdog_device ux500_wdt = {
static int ux500_wdt_probe(struct platform_device *pdev)
{
int ret;
- struct ux500_wdt_data *pdata = pdev->dev.platform_data;
+ struct ux500_wdt_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
if (pdata->timeout > 0)
@@ -167,5 +167,4 @@ module_platform_driver(ux500_wdt_driver);
MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
MODULE_DESCRIPTION("Ux500 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:ux500_wdt");
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 92f1326f0cf..e24b2108287 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -1,6 +1,9 @@
/*
* w83627hf/thf WDT driver
*
+ * (c) Copyright 2013 Guenter Roeck
+ * converted to watchdog infrastructure
+ *
* (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com>
* added support for W83627THF.
*
@@ -31,31 +34,22 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
-#include <linux/spinlock.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
-
#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
-static unsigned long wdt_is_open;
-static char expect_close;
-static DEFINE_SPINLOCK(io_lock);
-
/* You must set this - there is no sane way to probe for this board. */
static int wdt_io = 0x2E;
module_param(wdt_io, int, 0);
MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)");
-static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
+static int timeout; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1 <= timeout <= 255, default="
@@ -76,236 +70,147 @@ MODULE_PARM_DESC(nowayout,
(same as EFER) */
#define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */
-static void w83627hf_select_wd_register(void)
+#define W83627HF_LD_WDT 0x08
+
+static void superio_outb(int reg, int val)
{
- unsigned char c;
+ outb(reg, WDT_EFER);
+ outb(val, WDT_EFDR);
+}
+
+static inline int superio_inb(int reg)
+{
+ outb(reg, WDT_EFER);
+ return inb(WDT_EFDR);
+}
+
+static int superio_enter(void)
+{
+ if (!request_muxed_region(wdt_io, 2, WATCHDOG_NAME))
+ return -EBUSY;
+
outb_p(0x87, WDT_EFER); /* Enter extended function mode */
outb_p(0x87, WDT_EFER); /* Again according to manual */
- outb(0x20, WDT_EFER); /* check chip version */
- c = inb(WDT_EFDR);
- if (c == 0x82) { /* W83627THF */
- outb_p(0x2b, WDT_EFER); /* select GPIO3 */
- c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
- outb_p(0x2b, WDT_EFER);
- outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */
- } else if (c == 0x88 || c == 0xa0) { /* W83627EHF / W83627DHG */
- outb_p(0x2d, WDT_EFER); /* select GPIO5 */
- c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
- outb_p(0x2d, WDT_EFER);
- outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */
- }
+ return 0;
+}
- outb_p(0x07, WDT_EFER); /* point to logical device number reg */
- outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
- outb_p(0x30, WDT_EFER); /* select CR30 */
- outb_p(0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
+static void superio_select(int ld)
+{
+ superio_outb(0x07, ld);
}
-static void w83627hf_unselect_wd_register(void)
+static void superio_exit(void)
{
outb_p(0xAA, WDT_EFER); /* Leave extended function mode */
+ release_region(wdt_io, 2);
}
/* tyan motherboards seem to set F5 to 0x4C ?
* So explicitly init to appropriate value. */
-static void w83627hf_init(void)
+static int w83627hf_init(struct watchdog_device *wdog)
{
+ int ret;
unsigned char t;
- w83627hf_select_wd_register();
+ ret = superio_enter();
+ if (ret)
+ return ret;
+
+ superio_select(W83627HF_LD_WDT);
+ t = superio_inb(0x20); /* check chip version */
+ if (t == 0x82) { /* W83627THF */
+ t = (superio_inb(0x2b) & 0xf7);
+ superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */
+ } else if (t == 0x88 || t == 0xa0) { /* W83627EHF / W83627DHG */
+ t = superio_inb(0x2d);
+ superio_outb(0x2d, t & ~0x01); /* set GPIO5 to WDT0 */
+ }
+
+ /* set CR30 bit 0 to activate GPIO2 */
+ t = superio_inb(0x30);
+ if (!(t & 0x01))
+ superio_outb(0x30, t | 0x01);
- outb_p(0xF6, WDT_EFER); /* Select CRF6 */
- t = inb_p(WDT_EFDR); /* read CRF6 */
+ t = superio_inb(0xF6);
if (t != 0) {
pr_info("Watchdog already running. Resetting timeout to %d sec\n",
- timeout);
- outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */
+ wdog->timeout);
+ superio_outb(0xF6, wdog->timeout);
}
- outb_p(0xF5, WDT_EFER); /* Select CRF5 */
- t = inb_p(WDT_EFDR); /* read CRF5 */
- t &= ~0x0C; /* set second mode & disable keyboard
- turning off watchdog */
- t |= 0x02; /* enable the WDTO# output low pulse
- to the KBRST# pin (PIN60) */
- outb_p(t, WDT_EFDR); /* Write back to CRF5 */
-
- outb_p(0xF7, WDT_EFER); /* Select CRF7 */
- t = inb_p(WDT_EFDR); /* read CRF7 */
- t &= ~0xC0; /* disable keyboard & mouse turning off
- watchdog */
- outb_p(t, WDT_EFDR); /* Write back to CRF7 */
-
- w83627hf_unselect_wd_register();
-}
-
-static void wdt_set_time(int timeout)
-{
- spin_lock(&io_lock);
-
- w83627hf_select_wd_register();
+ /* set second mode & disable keyboard turning off watchdog */
+ t = superio_inb(0xF5) & ~0x0C;
+ /* enable the WDTO# output low pulse to the KBRST# pin */
+ t |= 0x02;
+ superio_outb(0xF5, t);
- outb_p(0xF6, WDT_EFER); /* Select CRF6 */
- outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF6 */
+ /* disable keyboard & mouse turning off watchdog */
+ t = superio_inb(0xF7) & ~0xC0;
+ superio_outb(0xF7, t);
- w83627hf_unselect_wd_register();
+ superio_exit();
- spin_unlock(&io_lock);
-}
-
-static int wdt_ping(void)
-{
- wdt_set_time(timeout);
return 0;
}
-static int wdt_disable(void)
+static int wdt_set_time(unsigned int timeout)
{
- wdt_set_time(0);
- return 0;
-}
+ int ret;
+
+ ret = superio_enter();
+ if (ret)
+ return ret;
+
+ superio_select(W83627HF_LD_WDT);
+ superio_outb(0xF6, timeout);
+ superio_exit();
-static int wdt_set_heartbeat(int t)
-{
- if (t < 1 || t > 255)
- return -EINVAL;
- timeout = t;
return 0;
}
-static int wdt_get_time(void)
+static int wdt_start(struct watchdog_device *wdog)
{
- int timeleft;
-
- spin_lock(&io_lock);
-
- w83627hf_select_wd_register();
-
- outb_p(0xF6, WDT_EFER); /* Select CRF6 */
- timeleft = inb_p(WDT_EFDR); /* Read Timeout counter to CRF6 */
-
- w83627hf_unselect_wd_register();
-
- spin_unlock(&io_lock);
-
- return timeleft;
+ return wdt_set_time(wdog->timeout);
}
-static ssize_t wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static int wdt_stop(struct watchdog_device *wdog)
{
- if (count) {
- if (!nowayout) {
- size_t i;
-
- expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- wdt_ping();
- }
- return count;
+ return wdt_set_time(0);
}
-static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout)
{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int timeval;
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
- WDIOF_MAGICCLOSE,
- .firmware_version = 1,
- .identity = "W83627HF WDT",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user(argp, &ident, sizeof(ident)))
- return -EFAULT;
- break;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_SETOPTIONS:
- {
- int options, retval = -EINVAL;
-
- if (get_user(options, p))
- return -EFAULT;
- if (options & WDIOS_DISABLECARD) {
- wdt_disable();
- retval = 0;
- }
- if (options & WDIOS_ENABLECARD) {
- wdt_ping();
- retval = 0;
- }
- return retval;
- }
- case WDIOC_KEEPALIVE:
- wdt_ping();
- break;
- case WDIOC_SETTIMEOUT:
- if (get_user(timeval, p))
- return -EFAULT;
- if (wdt_set_heartbeat(timeval))
- return -EINVAL;
- wdt_ping();
- /* Fall */
- case WDIOC_GETTIMEOUT:
- return put_user(timeout, p);
- case WDIOC_GETTIMELEFT:
- timeval = wdt_get_time();
- return put_user(timeval, p);
- default:
- return -ENOTTY;
- }
+ wdog->timeout = timeout;
+
return 0;
}
-static int wdt_open(struct inode *inode, struct file *file)
+static unsigned int wdt_get_time(struct watchdog_device *wdog)
{
- if (test_and_set_bit(0, &wdt_is_open))
- return -EBUSY;
- /*
- * Activate
- */
+ unsigned int timeleft;
+ int ret;
- wdt_ping();
- return nonseekable_open(inode, file);
-}
+ ret = superio_enter();
+ if (ret)
+ return 0;
-static int wdt_close(struct inode *inode, struct file *file)
-{
- if (expect_close == 42)
- wdt_disable();
- else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- wdt_ping();
- }
- expect_close = 0;
- clear_bit(0, &wdt_is_open);
- return 0;
+ superio_select(W83627HF_LD_WDT);
+ timeleft = superio_inb(0xF6);
+ superio_exit();
+
+ return timeleft;
}
/*
* Notifier for system down
*/
-
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
- wdt_disable(); /* Turn the WDT off */
+ wdt_set_time(0); /* Turn the WDT off */
return NOTIFY_DONE;
}
@@ -314,19 +219,25 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
* Kernel Interfaces
*/
-static const struct file_operations wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = wdt_write,
- .unlocked_ioctl = wdt_ioctl,
- .open = wdt_open,
- .release = wdt_close,
+static struct watchdog_info wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "W83627HF Watchdog",
};
-static struct miscdevice wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &wdt_fops,
+static struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .set_timeout = wdt_set_timeout,
+ .get_timeleft = wdt_get_time,
+};
+
+static struct watchdog_device wdt_dev = {
+ .info = &wdt_info,
+ .ops = &wdt_ops,
+ .timeout = WATCHDOG_TIMEOUT,
+ .min_timeout = 1,
+ .max_timeout = 255,
};
/*
@@ -344,50 +255,39 @@ static int __init wdt_init(void)
pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n");
- if (wdt_set_heartbeat(timeout)) {
- wdt_set_heartbeat(WATCHDOG_TIMEOUT);
- pr_info("timeout value must be 1 <= timeout <= 255, using %d\n",
- WATCHDOG_TIMEOUT);
- }
+ watchdog_init_timeout(&wdt_dev, timeout, NULL);
+ watchdog_set_nowayout(&wdt_dev, nowayout);
- if (!request_region(wdt_io, 1, WATCHDOG_NAME)) {
- pr_err("I/O address 0x%04x already in use\n", wdt_io);
- ret = -EIO;
- goto out;
+ ret = w83627hf_init(&wdt_dev);
+ if (ret) {
+ pr_err("failed to initialize watchdog (err=%d)\n", ret);
+ return ret;
}
- w83627hf_init();
-
ret = register_reboot_notifier(&wdt_notifier);
if (ret != 0) {
pr_err("cannot register reboot notifier (err=%d)\n", ret);
- goto unreg_regions;
+ return ret;
}
- ret = misc_register(&wdt_miscdev);
- if (ret != 0) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ ret = watchdog_register_device(&wdt_dev);
+ if (ret)
goto unreg_reboot;
- }
pr_info("initialized. timeout=%d sec (nowayout=%d)\n",
- timeout, nowayout);
+ wdt_dev.timeout, nowayout);
-out:
return ret;
+
unreg_reboot:
unregister_reboot_notifier(&wdt_notifier);
-unreg_regions:
- release_region(wdt_io, 1);
- goto out;
+ return ret;
}
static void __exit wdt_exit(void)
{
- misc_deregister(&wdt_miscdev);
+ watchdog_unregister_device(&wdt_dev);
unregister_reboot_notifier(&wdt_notifier);
- release_region(wdt_io, 1);
}
module_init(wdt_init);
@@ -396,4 +296,3 @@ module_exit(wdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>");
MODULE_DESCRIPTION("w83627hf/thf WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index cd9f3c1e1af..aaf2995d37f 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -458,4 +458,3 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, "
"Samuel Tardieu <sam@rfc1149.net>");
MODULE_DESCRIPTION("w83697hf/hg WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index 274be0bfaf2..ff58cb74671 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -395,4 +395,3 @@ module_exit(wdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Flemming Frandsen <ff@nrvissing.net>");
MODULE_DESCRIPTION("w83697ug/uf WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 7874ae06232..f0483c75ed3 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -406,4 +406,3 @@ module_exit(w83877f_wdt_unload);
MODULE_AUTHOR("Scott and Bill Jennings");
MODULE_DESCRIPTION("Driver for watchdog timer in w83877f chip");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 5d2c902825c..91bf55a2002 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -527,4 +527,3 @@ module_exit(w83977f_wdt_exit);
MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>");
MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 25aba6e00a2..db0da7ea4fd 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -322,6 +322,5 @@ module_exit(wafwdt_exit);
MODULE_AUTHOR("Justin Cormack");
MODULE_DESCRIPTION("ICP Wafer 5823 Single Board Computer WDT driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of wafer5823wdt.c */
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 05d18b4c661..461336c4519 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -77,7 +77,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
watchdog_check_min_max_timeout(wdd);
- /* try to get the tiemout module parameter first */
+ /* try to get the timeout module parameter first */
if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
wdd->timeout = timeout_parm;
return ret;
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 3045debd541..0240c60d14e 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -48,8 +48,6 @@
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
MODULE_DESCRIPTION("RTAS watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
static bool wdrtas_nowayout = WATCHDOG_NOWAYOUT;
static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0);
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index ee4333c0110..e0206b5b7d8 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -664,6 +664,4 @@ module_exit(wdt_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Driver for ISA ICS watchdog cards (WDT500/501)");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 5eec7405388..7355ddd0b20 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -224,7 +224,6 @@ static void __exit footbridge_watchdog_exit(void)
MODULE_AUTHOR("Phil Blundell <pb@nexus.co.uk>");
MODULE_DESCRIPTION("Footbridge watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 65a40234493..a8e6f87f60c 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -507,4 +507,3 @@ module_exit(wd977_exit);
MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>");
MODULE_DESCRIPTION("W83977AF Watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 36a54c0e32d..ee89ba4dea6 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -744,5 +744,3 @@ module_pci_driver(wdtpci_driver);
MODULE_AUTHOR("JP Nollmann, Alan Cox");
MODULE_DESCRIPTION("Driver for the ICS PCI-WDT500/501 watchdog cards");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index d4e47eda418..e243bd01c77 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -184,7 +184,7 @@ static const struct watchdog_ops wm831x_wdt_ops = {
static int wm831x_wdt_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *chip_pdata;
+ struct wm831x_pdata *chip_pdata = dev_get_platdata(pdev->dev.parent);
struct wm831x_watchdog_pdata *pdata;
struct wm831x_wdt_drvdata *driver_data;
struct watchdog_device *wm831x_wdt;
@@ -231,12 +231,10 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time;
/* Apply any configuration */
- if (pdev->dev.parent->platform_data) {
- chip_pdata = pdev->dev.parent->platform_data;
+ if (chip_pdata)
pdata = chip_pdata->watchdog;
- } else {
+ else
pdata = NULL;
- }
if (pdata) {
reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK |
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
index 92ad33d0cb7..7a42dffd39e 100644
--- a/drivers/watchdog/xen_wdt.c
+++ b/drivers/watchdog/xen_wdt.c
@@ -362,4 +362,3 @@ MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>");
MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 23eae5cb69c..c794ea18214 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -140,7 +140,6 @@ config XEN_GRANT_DEV_ALLOC
config SWIOTLB_XEN
def_bool y
- depends on PCI && X86
select SWIOTLB
config XEN_TMEM
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b232908a619..55ea73f7c70 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -596,7 +596,7 @@ static void __init balloon_add_region(unsigned long start_pfn,
}
}
-static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
+static int balloon_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
@@ -616,7 +616,7 @@ static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block balloon_cpu_notifier __cpuinitdata = {
+static struct notifier_block balloon_cpu_notifier = {
.notifier_call = balloon_cpu_notify,
};
@@ -641,7 +641,7 @@ static int __init balloon_init(void)
balloon_stats.current_pages = xen_pv_domain()
? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
- : max_pfn;
+ : get_num_physpages();
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8b3a69a06c3..5de2063e16d 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -305,7 +305,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
if (rc < 0)
goto err;
- rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
+ rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
u->name, evtchn);
if (rc < 0)
goto err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c4d2298893b..62ccf5424ba 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -49,6 +49,7 @@
#include <xen/grant_table.h>
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
+#include <xen/swiotlb-xen.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -898,8 +899,16 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ /* this is basically a nop on x86 */
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ for (i = 0; i < count; i++) {
+ if (map_ops[i].status)
+ continue;
+ set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
+ map_ops[i].dev_bus_addr >> PAGE_SHIFT);
+ }
return ret;
+ }
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
@@ -942,8 +951,14 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ /* this is basically a nop on x86 */
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ for (i = 0; i < count; i++) {
+ set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
+ INVALID_P2M_ENTRY);
+ }
return ret;
+ }
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 18fff88254e..188825122aa 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -26,6 +26,7 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include "../pci/pci.h"
+#include <asm/pci_x86.h>
static bool __read_mostly pci_seg_supported = true;
@@ -58,12 +59,12 @@ static int xen_add_device(struct device *dev)
add.flags = XEN_PCI_DEV_EXTFN;
#ifdef CONFIG_ACPI
- handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
+ handle = ACPI_HANDLE(&pci_dev->dev);
if (!handle && pci_dev->bus->bridge)
- handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
+ handle = ACPI_HANDLE(pci_dev->bus->bridge);
#ifdef CONFIG_PCI_IOV
if (!handle && pci_dev->is_virtfn)
- handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge);
+ handle = ACPI_HANDLE(physfn->bus->bridge);
#endif
if (handle) {
acpi_status status;
@@ -192,3 +193,49 @@ static int __init register_xen_pci_notifier(void)
}
arch_initcall(register_xen_pci_notifier);
+
+#ifdef CONFIG_PCI_MMCONFIG
+static int __init xen_mcfg_late(void)
+{
+ struct pci_mmcfg_region *cfg;
+ int rc;
+
+ if (!xen_initial_domain())
+ return 0;
+
+ if ((pci_probe & PCI_PROBE_MMCONF) == 0)
+ return 0;
+
+ if (list_empty(&pci_mmcfg_list))
+ return 0;
+
+ /* Check whether they are in the right area. */
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ struct physdev_pci_mmcfg_reserved r;
+
+ r.address = cfg->address;
+ r.segment = cfg->segment;
+ r.start_bus = cfg->start_bus;
+ r.end_bus = cfg->end_bus;
+ r.flags = XEN_PCI_MMCFG_RESERVED;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
+ switch (rc) {
+ case 0:
+ case -ENOSYS:
+ continue;
+
+ default:
+ pr_warn("Failed to report MMCONFIG reservation"
+ " state for %s to hypervisor"
+ " (%d)\n",
+ cfg->name, rc);
+ }
+ }
+ return 0;
+}
+/*
+ * Needs to be done after acpi_init which are subsys_initcall.
+ */
+subsys_initcall_sync(xen_mcfg_late);
+#endif
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 99db9e1eb8b..2f3528e93cb 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -84,7 +84,7 @@ static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
static int xen_allocate_irq(struct pci_dev *pdev)
{
return request_irq(pdev->irq, do_hvm_evtchn_intr,
- IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
"xen-platform-pci", pdev);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1b2277c311d..a224bc74b6b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -42,12 +42,31 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <xen/hvc-console.h>
+
+#include <asm/dma-mapping.h>
+#include <asm/xen/page-coherent.h>
+
+#include <trace/events/swiotlb.h>
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
+#ifndef CONFIG_X86
+static unsigned long dma_alloc_coherent_mask(struct device *dev,
+ gfp_t gfp)
+{
+ unsigned long dma_mask = 0;
+
+ dma_mask = dev->coherent_dma_mask;
+ if (!dma_mask)
+ dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
+
+ return dma_mask;
+}
+#endif
+
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
@@ -56,17 +75,17 @@ static unsigned long xen_io_tlb_nslabs;
static u64 start_dma_addr;
-static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
return phys_to_machine(XPADDR(paddr)).maddr;
}
-static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
+static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
return machine_to_phys(XMADDR(baddr)).paddr;
}
-static dma_addr_t xen_virt_to_bus(void *address)
+static inline dma_addr_t xen_virt_to_bus(void *address)
{
return xen_phys_to_bus(virt_to_phys(address));
}
@@ -89,7 +108,7 @@ static int check_pages_physically_contiguous(unsigned long pfn,
return 1;
}
-static int range_straddles_page_boundary(phys_addr_t p, size_t size)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long pfn = PFN_DOWN(p);
unsigned int offset = p & ~PAGE_MASK;
@@ -126,6 +145,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
int i, rc;
int dma_bits;
+ dma_addr_t dma_handle;
+ phys_addr_t p = virt_to_phys(buf);
dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
@@ -135,9 +156,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
do {
rc = xen_create_contiguous_region(
- (unsigned long)buf + (i << IO_TLB_SHIFT),
+ p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
- dma_bits);
+ dma_bits, &dma_handle);
} while (rc && dma_bits++ < max_dma_bits);
if (rc)
return rc;
@@ -263,7 +284,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
- unsigned long vstart;
phys_addr_t phys;
dma_addr_t dev_addr;
@@ -278,8 +298,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
return ret;
- vstart = __get_free_pages(flags, order);
- ret = (void *)vstart;
+ /* On ARM this function returns an ioremap'ped virtual address for
+ * which virt_to_phys doesn't return the corresponding physical
+ * address. In fact on ARM virt_to_phys only works for kernel direct
+ * mapped RAM memory. Also see comment below.
+ */
+ ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
if (!ret)
return ret;
@@ -287,18 +311,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
- phys = virt_to_phys(ret);
+ /* At this point dma_handle is the physical address, next we are
+ * going to set it to the machine address.
+ * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
+ * to *dma_handle. */
+ phys = *dma_handle;
dev_addr = xen_phys_to_bus(phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
else {
- if (xen_create_contiguous_region(vstart, order,
- fls64(dma_mask)) != 0) {
- free_pages(vstart, order);
+ if (xen_create_contiguous_region(phys, order,
+ fls64(dma_mask), dma_handle) != 0) {
+ xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
- *dma_handle = virt_to_machine(ret).maddr;
}
memset(ret, 0, size);
return ret;
@@ -319,13 +346,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
- phys = virt_to_phys(vaddr);
+ /* do not use virt_to_phys because on ARM it doesn't return you the
+ * physical address */
+ phys = xen_bus_to_phys(dev_addr);
if (((dev_addr + size - 1 > dma_mask)) ||
range_straddles_page_boundary(phys, size))
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ xen_destroy_contiguous_region(phys, order);
- free_pages((unsigned long)vaddr, order);
+ xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
@@ -352,16 +381,25 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
- !range_straddles_page_boundary(phys, size) && !swiotlb_force)
+ !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
+ /* we are not interested in the dma_addr returned by
+ * xen_dma_map_page, only in the potential cache flushes executed
+ * by the function. */
+ xen_dma_map_page(dev, page, offset, size, dir, attrs);
return dev_addr;
+ }
/*
* Oh well, have to allocate and map a bounce buffer.
*/
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
+ map & ~PAGE_MASK, size, dir, attrs);
dev_addr = xen_phys_to_bus(map);
/*
@@ -384,12 +422,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
* whatever the device wrote there.
*/
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
BUG_ON(dir == DMA_NONE);
+ xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
+
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
@@ -412,7 +453,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- xen_unmap_single(hwdev, dev_addr, size, dir);
+ xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
@@ -435,11 +476,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
+ if (target == SYNC_FOR_CPU)
+ xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+
/* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr)) {
+ if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
- return;
- }
+
+ if (target == SYNC_FOR_DEVICE)
+ xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
if (dir != DMA_FROM_DEVICE)
return;
@@ -502,16 +547,26 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg->length,
dir);
if (map == SWIOTLB_MAP_ERROR) {
+ dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users
to do proper error handling. */
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
- return DMA_ERROR_CODE;
+ return 0;
}
sg->dma_address = xen_phys_to_bus(map);
- } else
+ } else {
+ /* we are not interested in the dma_addr returned by
+ * xen_dma_map_page, only in the potential cache flushes executed
+ * by the function. */
+ xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
+ paddr & ~PAGE_MASK,
+ sg->length,
+ dir,
+ attrs);
sg->dma_address = dev_addr;
+ }
sg_dma_len(sg) = sg->length;
}
return nelems;
@@ -533,7 +588,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
@@ -593,3 +648,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
+
+int
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 38e92b770e9..3c0a74b3e9b 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -384,12 +384,14 @@ static ssize_t nodename_show(struct device *dev,
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
+static DEVICE_ATTR_RO(nodename);
static ssize_t devtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
+static DEVICE_ATTR_RO(devtype);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -397,14 +399,24 @@ static ssize_t modalias_show(struct device *dev,
return sprintf(buf, "%s:%s\n", dev->bus->name,
to_xenbus_device(dev)->devicetype);
}
+static DEVICE_ATTR_RO(modalias);
-struct device_attribute xenbus_dev_attrs[] = {
- __ATTR_RO(nodename),
- __ATTR_RO(devtype),
- __ATTR_RO(modalias),
- __ATTR_NULL
+static struct attribute *xenbus_dev_attrs[] = {
+ &dev_attr_nodename.attr,
+ &dev_attr_devtype.attr,
+ &dev_attr_modalias.attr,
+ NULL,
};
-EXPORT_SYMBOL_GPL(xenbus_dev_attrs);
+
+static const struct attribute_group xenbus_dev_group = {
+ .attrs = xenbus_dev_attrs,
+};
+
+const struct attribute_group *xenbus_dev_groups[] = {
+ &xenbus_dev_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(xenbus_dev_groups);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 146f857a36f..1085ec294a1 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -54,7 +54,7 @@ enum xenstore_init {
XS_LOCAL,
};
-extern struct device_attribute xenbus_dev_attrs[];
+extern const struct attribute_group *xenbus_dev_groups[];
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 998bbbab816..5125dce11a6 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -200,7 +200,7 @@ static struct xen_bus_type xenbus_backend = {
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_dev_attrs,
+ .dev_groups = xenbus_dev_groups,
},
};
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 34b20bfa4e8..129bf84c19e 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -154,7 +154,7 @@ static struct xen_bus_type xenbus_frontend = {
.probe = xenbus_frontend_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_dev_attrs,
+ .dev_groups = xenbus_dev_groups,
.pm = &xenbus_pm_ops,
},